summary refs log tree commit diff
path: root/sys_util
diff options
context:
space:
mode:
authorAlyssa Ross <hi@alyssa.is>2020-05-08 15:27:56 +0000
committerAlyssa Ross <hi@alyssa.is>2020-05-10 02:39:28 +0000
commit2f8d50adc97cc7fca6f710bd575b4f71ccb40f6b (patch)
treefefaf2c13796f8f2fa9a13b99b09c3b40ab5966b /sys_util
parent00c41c28bbc44b37fc8dcf5d2a5b4679f2aa4297 (diff)
parent03a54abf852984f696e7a101ff9590f05ebcba5b (diff)
downloadcrosvm-2f8d50adc97cc7fca6f710bd575b4f71ccb40f6b.tar
crosvm-2f8d50adc97cc7fca6f710bd575b4f71ccb40f6b.tar.gz
crosvm-2f8d50adc97cc7fca6f710bd575b4f71ccb40f6b.tar.bz2
crosvm-2f8d50adc97cc7fca6f710bd575b4f71ccb40f6b.tar.lz
crosvm-2f8d50adc97cc7fca6f710bd575b4f71ccb40f6b.tar.xz
crosvm-2f8d50adc97cc7fca6f710bd575b4f71ccb40f6b.tar.zst
crosvm-2f8d50adc97cc7fca6f710bd575b4f71ccb40f6b.zip
Merge remote-tracking branch 'origin/master'
Diffstat (limited to 'sys_util')
-rw-r--r--sys_util/src/descriptor.rs88
-rw-r--r--sys_util/src/ioctl.rs2
-rw-r--r--sys_util/src/lib.rs5
-rw-r--r--sys_util/src/mmap.rs200
-rw-r--r--sys_util/src/shm.rs8
-rw-r--r--sys_util/src/struct_util.rs1
-rw-r--r--sys_util/src/timerfd.rs10
7 files changed, 198 insertions, 116 deletions
diff --git a/sys_util/src/descriptor.rs b/sys_util/src/descriptor.rs
new file mode 100644
index 0000000..1af72fc
--- /dev/null
+++ b/sys_util/src/descriptor.rs
@@ -0,0 +1,88 @@
+// Copyright 2020 The Chromium OS Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+use std::os::unix::io::RawFd;
+
+use crate::{errno_result, Result};
+use std::mem;
+use std::ops::Drop;
+
+pub type RawDescriptor = RawFd;
+
+/// Trait for forfeiting ownership of the current raw descriptor, and returning the raw descriptor
+pub trait IntoRawDescriptor {
+    fn into_raw_descriptor(self) -> RawDescriptor;
+}
+
+/// Trait for returning the underlying raw descriptor, without giving up ownership of the
+/// descriptor.
+pub trait AsRawDescriptor {
+    fn as_raw_descriptor(&self) -> RawDescriptor;
+}
+
+pub trait FromRawDescriptor {
+    /// # Safety
+    /// Safe only if the caller ensures nothing has access to the descriptor after passing it to
+    /// `from_raw_descriptor`
+    unsafe fn from_raw_descriptor(descriptor: RawDescriptor) -> Self;
+}
+
+/// Wraps a RawDescriptor and safely closes it when self falls out of scope.
+#[derive(Debug, PartialEq)]
+pub struct SafeDescriptor {
+    descriptor: RawDescriptor,
+}
+
+impl Drop for SafeDescriptor {
+    fn drop(&mut self) {
+        let _ = unsafe { libc::close(self.descriptor) };
+    }
+}
+
+impl AsRawDescriptor for SafeDescriptor {
+    fn as_raw_descriptor(&self) -> RawDescriptor {
+        self.descriptor
+    }
+}
+
+impl IntoRawDescriptor for SafeDescriptor {
+    fn into_raw_descriptor(self) -> RawDescriptor {
+        let descriptor = self.descriptor;
+        mem::forget(self);
+        descriptor
+    }
+}
+
+impl FromRawDescriptor for SafeDescriptor {
+    unsafe fn from_raw_descriptor(descriptor: RawDescriptor) -> Self {
+        SafeDescriptor { descriptor }
+    }
+}
+
+impl SafeDescriptor {
+    /// Clones this descriptor, internally creating a new descriptor. The new SafeDescriptor will
+    /// share the same underlying count within the kernel.
+    pub fn try_clone(&self) -> Result<SafeDescriptor> {
+        // Safe because self.as_raw_descriptor() returns a valid value
+        let copy_fd = unsafe { libc::dup(self.as_raw_descriptor()) };
+        if copy_fd < 0 {
+            return errno_result();
+        }
+        // Safe becuase we just successfully duplicated and this object will uniquely
+        // own the raw descriptor.
+        Ok(unsafe { SafeDescriptor::from_raw_descriptor(copy_fd) })
+    }
+}
+
+/// For use cases where a simple wrapper around a RawDescriptor is needed.
+/// This is a simply a wrapper and does not manage the lifetime of the descriptor.
+/// Most usages should prefer SafeDescriptor or using a RawDescriptor directly
+#[derive(Copy, Clone, Hash, PartialEq, Eq, PartialOrd, Ord)]
+#[repr(transparent)]
+pub struct Descriptor(pub RawDescriptor);
+impl AsRawDescriptor for Descriptor {
+    fn as_raw_descriptor(&self) -> RawDescriptor {
+        self.0
+    }
+}
diff --git a/sys_util/src/ioctl.rs b/sys_util/src/ioctl.rs
index a0309ff..f8c7604 100644
--- a/sys_util/src/ioctl.rs
+++ b/sys_util/src/ioctl.rs
@@ -7,8 +7,6 @@
 use std::os::raw::*;
 use std::os::unix::io::AsRawFd;
 
-use libc;
-
 /// Raw macro to declare the expression that calculates an ioctl number
 #[macro_export]
 macro_rules! ioctl_expr {
diff --git a/sys_util/src/lib.rs b/sys_util/src/lib.rs
index 357e6b4..98c9dc6 100644
--- a/sys_util/src/lib.rs
+++ b/sys_util/src/lib.rs
@@ -14,6 +14,7 @@ pub mod ioctl;
 pub mod syslog;
 mod capabilities;
 mod clock;
+mod descriptor;
 mod errno;
 mod eventfd;
 mod file_flags;
@@ -41,8 +42,8 @@ pub use crate::affinity::*;
 pub use crate::alloc::LayoutAllocation;
 pub use crate::capabilities::drop_capabilities;
 pub use crate::clock::{Clock, FakeClock};
-use crate::errno::errno_result;
-pub use crate::errno::{Error, Result};
+pub use crate::descriptor::*;
+pub use crate::errno::{errno_result, Error, Result};
 pub use crate::eventfd::*;
 pub use crate::file_flags::*;
 pub use crate::fork::*;
diff --git a/sys_util/src/mmap.rs b/sys_util/src/mmap.rs
index 006b0a8..c6a52ea 100644
--- a/sys_util/src/mmap.rs
+++ b/sys_util/src/mmap.rs
@@ -6,10 +6,9 @@
 //! mmap object leaves scope.
 
 use std::cmp::min;
-use std::collections::BTreeMap;
 use std::fmt::{self, Display};
 use std::io;
-use std::mem::{size_of, ManuallyDrop};
+use std::mem::size_of;
 use std::os::unix::io::AsRawFd;
 use std::ptr::{copy_nonoverlapping, null_mut, read_unaligned, write_unaligned};
 
@@ -28,8 +27,6 @@ pub enum Error {
     InvalidOffset,
     /// Requested mapping is not page aligned
     NotPageAligned,
-    /// Overlapping regions
-    Overlapping(usize, usize),
     /// Requested memory range spans past the end of the region.
     InvalidRange(usize, usize, usize),
     /// `mmap` returned the given error.
@@ -49,11 +46,6 @@ impl Display for Error {
             InvalidAddress => write!(f, "requested memory out of range"),
             InvalidOffset => write!(f, "requested offset is out of range of off_t"),
             NotPageAligned => write!(f, "requested memory is not page aligned"),
-            Overlapping(offset, count) => write!(
-                f,
-                "requested memory range overlaps with existing region: offset={} size={}",
-                offset, count
-            ),
             InvalidRange(offset, count, region_size) => write!(
                 f,
                 "requested memory range spans past the end of the region: offset={} count={} region_size={}",
@@ -643,13 +635,6 @@ impl Drop for MemoryMapping {
 pub struct MemoryMappingArena {
     addr: *mut u8,
     size: usize,
-    // When doing in-place swaps of MemoryMappings, the BTreeMap returns a owned
-    // instance of the old MemoryMapping. When the old MemoryMapping falls out
-    // of scope, it calls munmap on the same region as the new MemoryMapping
-    // that was just mapped in. To avoid accidentally munmapping the new,
-    // MemoryMapping, all mappings are wrapped in a ManuallyDrop, and then
-    // "forgotten" when removed from the BTreeMap
-    maps: BTreeMap<usize, ManuallyDrop<MemoryMapping>>,
 }
 
 // Send and Sync aren't automatically inherited for the raw address pointer.
@@ -666,17 +651,7 @@ impl MemoryMappingArena {
     /// * `size` - Size of memory region in bytes.
     pub fn new(size: usize) -> Result<MemoryMappingArena> {
         // Reserve the arena's memory using an anonymous read-only mmap.
-        // The actual MemoryMapping object is forgotten, with
-        // MemoryMappingArena manually calling munmap on drop.
-        let mmap = MemoryMapping::new_protection(size, Protection::none().set_read())?;
-        let addr = mmap.as_ptr();
-        let size = mmap.size();
-        std::mem::forget(mmap);
-        Ok(MemoryMappingArena {
-            addr,
-            size,
-            maps: BTreeMap::new(),
-        })
+        MemoryMapping::new_protection(size, Protection::none().set_read()).map(From::from)
     }
 
     /// Anonymously maps `size` bytes at `offset` bytes from the start of the arena.
@@ -755,58 +730,43 @@ impl MemoryMappingArena {
         let mmap = unsafe {
             match fd {
                 Some((fd, fd_offset)) => MemoryMapping::from_fd_offset_protection_fixed(
-                    (self.addr as usize + offset) as *mut u8,
+                    self.addr.add(offset),
                     fd,
                     size,
                     fd_offset,
                     prot,
                 )?,
-                None => MemoryMapping::new_protection_fixed(
-                    (self.addr as usize + offset) as *mut u8,
-                    size,
-                    prot,
-                )?,
+                None => MemoryMapping::new_protection_fixed(self.addr.add(offset), size, prot)?,
             }
         };
 
-        self.maps.insert(offset, ManuallyDrop::new(mmap));
+        // This mapping will get automatically removed when we drop the whole arena.
+        std::mem::forget(mmap);
         Ok(())
     }
 
-    /// Removes a mapping at `offset` from the start of the arena.
-    /// Returns a boolean indicating if there was a mapping present at `offset`.
-    /// If none was present, this method is a noop.
-    pub fn remove(&mut self, offset: usize) -> Result<bool> {
-        if let Some(mmap) = self.maps.remove(&offset) {
-            // Instead of munmapping the memory map, leaving an unprotected hole
-            // in the arena, swap this mmap with an anonymous protection.
-            // This is safe since the memory mapping perfectly overlaps with an
-            // existing, known good memory mapping.
-            let mmap = unsafe {
-                MemoryMapping::new_protection_fixed(
-                    mmap.as_ptr(),
-                    mmap.size(),
-                    Protection::none().set_read(),
-                )?
-            };
-            self.maps.insert(offset, ManuallyDrop::new(mmap));
-            Ok(true)
-        } else {
-            Ok(false)
-        }
+    /// Removes `size` bytes at `offset` bytes from the start of the arena. `offset` must be page
+    /// aligned.
+    ///
+    /// # Arguments
+    /// * `offset` - Page aligned offset into the arena in bytes.
+    /// * `size` - Size of memory region in bytes.
+    pub fn remove(&mut self, offset: usize, size: usize) -> Result<()> {
+        self.try_add(offset, size, Protection::read(), None)
     }
 
-    /// Calls msync with MS_SYNC on the mapping at `offset` from the start of
-    /// the arena.
-    /// Returns a boolean indicating if there was a mapping present at `offset`.
-    /// If none was present, this method is a noop.
-    pub fn msync(&self, offset: usize) -> Result<bool> {
-        if let Some(mmap) = self.maps.get(&offset) {
-            mmap.msync()?;
-            Ok(true)
-        } else {
-            Ok(false)
+    /// Calls msync with MS_SYNC on a mapping of `size` bytes starting at `offset` from the start of
+    /// the arena. `offset` must be page aligned.
+    pub fn msync(&self, offset: usize, size: usize) -> Result<()> {
+        self.validate_range(offset, size)?;
+
+        // Safe because we've validated that this memory range is owned by this `MemoryMappingArena`.
+        let ret =
+            unsafe { libc::msync(self.addr as *mut libc::c_void, self.size(), libc::MS_SYNC) };
+        if ret == -1 {
+            return Err(Error::SystemCallFailed(errno::Error::last()));
         }
+        Ok(())
     }
 
     /// Returns a pointer to the beginning of the memory region.  Should only be
@@ -836,32 +796,26 @@ impl MemoryMappingArena {
         if end_offset > self.size {
             return Err(Error::InvalidAddress);
         }
-        // Ensure offset..offset+size doesn't overlap with existing regions
-        // Find the offset + size of the first mapping before the desired offset
-        let (prev_offset, prev_size) = match self.maps.range(..offset).rev().next() {
-            Some((offset, mmap)) => (*offset, mmap.size()),
-            None => {
-                // Empty map
-                return Ok(());
-            }
-        };
-        if offset == prev_offset {
-            // Perfectly overlapping regions are allowed
-            if size != prev_size {
-                return Err(Error::Overlapping(offset, size));
-            }
-        } else if offset < (prev_offset + prev_size) {
-            return Err(Error::Overlapping(offset, size));
-        }
-
         Ok(())
     }
 }
 
+impl From<MemoryMapping> for MemoryMappingArena {
+    fn from(mmap: MemoryMapping) -> Self {
+        let addr = mmap.as_ptr();
+        let size = mmap.size();
+
+        // Forget the original mapping because the `MemoryMappingArena` will take care of calling
+        // `munmap` when it is dropped.
+        std::mem::forget(mmap);
+        MemoryMappingArena { addr, size }
+    }
+}
+
 impl Drop for MemoryMappingArena {
     fn drop(&mut self) {
-        // This is safe because we mmap the area at addr ourselves, and nobody
-        // else is holding a reference to it.
+        // This is safe because we own this memory range, and nobody else is holding a reference to
+        // it.
         unsafe {
             libc::munmap(self.addr as *mut libc::c_void, self.size);
         }
@@ -977,22 +931,8 @@ mod tests {
     fn arena_remove() {
         let mut m = MemoryMappingArena::new(0x40000).unwrap();
         assert!(m.add_anon(0, pagesize() * 4).is_ok());
-        assert!(m.remove(0).unwrap(), true);
-        assert!(m.remove(0).unwrap(), false);
-    }
-
-    #[test]
-    fn arena_add_overlap_error() {
-        let page = pagesize();
-        let mut m = MemoryMappingArena::new(page * 4).unwrap();
-        assert!(m.add_anon(0, page * 4).is_ok());
-        let res = m.add_anon(page, page).unwrap_err();
-        match res {
-            Error::Overlapping(a, o) => {
-                assert_eq!((a, o), (page, page));
-            }
-            e => panic!("unexpected error: {}", e),
-        }
+        assert!(m.remove(0, pagesize()).is_ok());
+        assert!(m.remove(0, pagesize() * 2).is_ok());
     }
 
     #[test]
@@ -1015,4 +955,62 @@ mod tests {
             e => panic!("unexpected error: {}", e),
         }
     }
+
+    #[test]
+    fn arena_add_overlapping() {
+        let ps = pagesize();
+        let mut m =
+            MemoryMappingArena::new(12 * ps).expect("failed to create `MemoryMappingArena`");
+        m.add_anon(ps * 4, ps * 4)
+            .expect("failed to add sub-mapping");
+
+        // Overlap in the front.
+        m.add_anon(ps * 2, ps * 3)
+            .expect("failed to add front overlapping sub-mapping");
+
+        // Overlap in the back.
+        m.add_anon(ps * 7, ps * 3)
+            .expect("failed to add back overlapping sub-mapping");
+
+        // Overlap the back of the first mapping, all of the middle mapping, and the front of the
+        // last mapping.
+        m.add_anon(ps * 3, ps * 6)
+            .expect("failed to add mapping that overlaps several mappings");
+    }
+
+    #[test]
+    fn arena_remove_overlapping() {
+        let ps = pagesize();
+        let mut m =
+            MemoryMappingArena::new(12 * ps).expect("failed to create `MemoryMappingArena`");
+        m.add_anon(ps * 4, ps * 4)
+            .expect("failed to add sub-mapping");
+        m.add_anon(ps * 2, ps * 2)
+            .expect("failed to add front overlapping sub-mapping");
+        m.add_anon(ps * 8, ps * 2)
+            .expect("failed to add back overlapping sub-mapping");
+
+        // Remove the back of the first mapping and the front of the second.
+        m.remove(ps * 3, ps * 2)
+            .expect("failed to remove front overlapping mapping");
+
+        // Remove the back of the second mapping and the front of the third.
+        m.remove(ps * 7, ps * 2)
+            .expect("failed to remove back overlapping mapping");
+
+        // Remove a mapping that completely overlaps the middle mapping.
+        m.remove(ps * 5, ps * 2)
+            .expect("failed to remove fully overlapping mapping");
+    }
+
+    #[test]
+    fn arena_remove_unaligned() {
+        let ps = pagesize();
+        let mut m =
+            MemoryMappingArena::new(12 * ps).expect("failed to create `MemoryMappingArena`");
+
+        m.add_anon(0, ps).expect("failed to add mapping");
+        m.remove(0, ps - 1)
+            .expect("failed to remove unaligned mapping");
+    }
 }
diff --git a/sys_util/src/shm.rs b/sys_util/src/shm.rs
index d158230..ee5f5a3 100644
--- a/sys_util/src/shm.rs
+++ b/sys_util/src/shm.rs
@@ -5,7 +5,7 @@
 use std::ffi::{CStr, CString};
 use std::fs::{read_link, File};
 use std::io::{self, Read, Seek, SeekFrom, Write};
-use std::os::unix::io::{AsRawFd, FromRawFd, IntoRawFd, RawFd};
+use std::os::unix::io::{AsRawFd, FromRawFd, RawFd};
 
 use libc::{
     self, c_char, c_int, c_long, c_uint, close, fcntl, ftruncate64, off64_t, syscall, EINVAL,
@@ -136,13 +136,11 @@ impl SharedMemory {
         Ok(SharedMemory { fd: file, size: 0 })
     }
 
-    /// Constructs a `SharedMemory` instance from a file descriptor that represents shared memory.
+    /// Constructs a `SharedMemory` instance from a `File` that represents shared memory.
     ///
     /// The size of the resulting shared memory will be determined using `File::seek`. If the given
     /// file's size can not be determined this way, this will return an error.
-    pub fn from_raw_fd<T: IntoRawFd>(fd: T) -> Result<SharedMemory> {
-        // Safe because the IntoRawFd trait indicates fd has unique ownership.
-        let mut file = unsafe { File::from_raw_fd(fd.into_raw_fd()) };
+    pub fn from_file(mut file: File) -> Result<SharedMemory> {
         let file_size = file.seek(SeekFrom::End(0))?;
         Ok(SharedMemory {
             fd: file,
diff --git a/sys_util/src/struct_util.rs b/sys_util/src/struct_util.rs
index 551204e..aa2ca39 100644
--- a/sys_util/src/struct_util.rs
+++ b/sys_util/src/struct_util.rs
@@ -2,7 +2,6 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-use std;
 use std::io::Read;
 use std::mem::size_of;
 
diff --git a/sys_util/src/timerfd.rs b/sys_util/src/timerfd.rs
index 8e5d91b..f2a3ff8 100644
--- a/sys_util/src/timerfd.rs
+++ b/sys_util/src/timerfd.rs
@@ -34,7 +34,7 @@ impl TimerFd {
     /// Sets the timer to expire after `dur`.  If `interval` is not `None` it represents
     /// the period for repeated expirations after the initial expiration.  Otherwise
     /// the timer will expire just once.  Cancels any existing duration and repeating interval.
-    pub fn reset(&mut self, dur: Duration, interval: Option<Duration>) -> Result<()> {
+    pub fn reset(&self, dur: Duration, interval: Option<Duration>) -> Result<()> {
         // Safe because we are zero-initializing a struct with only primitive member fields.
         let mut spec: libc::itimerspec = unsafe { mem::zeroed() };
         spec.it_value.tv_sec = dur.as_secs() as libc::time_t;
@@ -61,7 +61,7 @@ impl TimerFd {
     /// Waits until the timer expires.  The return value represents the number of times the timer
     /// has expired since the last time `wait` was called.  If the timer has not yet expired once
     /// this call will block until it does.
-    pub fn wait(&mut self) -> Result<u64> {
+    pub fn wait(&self) -> Result<u64> {
         let mut count = 0u64;
 
         // Safe because this will only modify |buf| and we check the return value.
@@ -96,7 +96,7 @@ impl TimerFd {
     }
 
     /// Disarms the timer.
-    pub fn clear(&mut self) -> Result<()> {
+    pub fn clear(&self) -> Result<()> {
         // Safe because we are zero-initializing a struct with only primitive member fields.
         let spec: libc::itimerspec = unsafe { mem::zeroed() };
 
@@ -222,7 +222,7 @@ mod tests {
 
     #[test]
     fn one_shot() {
-        let mut tfd = TimerFd::new().expect("failed to create timerfd");
+        let tfd = TimerFd::new().expect("failed to create timerfd");
         assert_eq!(tfd.is_armed().unwrap(), false);
 
         let dur = Duration::from_millis(200);
@@ -239,7 +239,7 @@ mod tests {
 
     #[test]
     fn repeating() {
-        let mut tfd = TimerFd::new().expect("failed to create timerfd");
+        let tfd = TimerFd::new().expect("failed to create timerfd");
 
         let dur = Duration::from_millis(200);
         let interval = Duration::from_millis(100);