summary refs log tree commit diff
path: root/sys_util
diff options
context:
space:
mode:
authorDylan Reid <dgreid@chromium.org>2017-05-15 17:37:47 -0700
committerchrome-bot <chrome-bot@chromium.org>2017-05-25 22:51:14 -0700
commitd4eaa4056f84b256ccd7684d9cf7e4363b9cf413 (patch)
tree728fbb294cf5b603fe232c7e284c00ffdd9904ed /sys_util
parent37285dc09d8fe5988fb98dc20bf00fdda38b0843 (diff)
downloadcrosvm-d4eaa4056f84b256ccd7684d9cf7e4363b9cf413.tar
crosvm-d4eaa4056f84b256ccd7684d9cf7e4363b9cf413.tar.gz
crosvm-d4eaa4056f84b256ccd7684d9cf7e4363b9cf413.tar.bz2
crosvm-d4eaa4056f84b256ccd7684d9cf7e4363b9cf413.tar.lz
crosvm-d4eaa4056f84b256ccd7684d9cf7e4363b9cf413.tar.xz
crosvm-d4eaa4056f84b256ccd7684d9cf7e4363b9cf413.tar.zst
crosvm-d4eaa4056f84b256ccd7684d9cf7e4363b9cf413.zip
sys_util: Add guest_memory
Add a module for accessing guest memory.
This module will replace all the slices that are used to access it
currently as those slices aren't valid because the memory is volatile
and a volatile slice doesn't exist in rust.

Modify the existing users so they no longer depend on the deprecated slice
access.

Change-Id: Ic0e86dacf66f68bd88ed9cc197cb14e45ada891d
Signed-off-by: Dylan Reid <dgreid@chromium.org>
Reviewed-on: https://chromium-review.googlesource.com/509919
Diffstat (limited to 'sys_util')
-rw-r--r--sys_util/src/guest_address.rs137
-rw-r--r--sys_util/src/guest_memory.rs318
-rw-r--r--sys_util/src/lib.rs4
-rw-r--r--sys_util/src/mmap.rs256
4 files changed, 674 insertions, 41 deletions
diff --git a/sys_util/src/guest_address.rs b/sys_util/src/guest_address.rs
new file mode 100644
index 0000000..09b6578
--- /dev/null
+++ b/sys_util/src/guest_address.rs
@@ -0,0 +1,137 @@
+// Copyright 2017 The Chromium OS Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+//! Represents an address in the guest's memory space.
+
+use std::cmp::{Eq, Ord, Ordering, PartialEq, PartialOrd};
+use std::ops::{BitAnd, BitOr};
+
+/// Represents an Address in the guest's memory.
+#[derive(Clone, Copy, Debug)]
+pub struct GuestAddress(pub usize);
+
+impl GuestAddress {
+    /// Returns the offset from this address to the given base address.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// # use sys_util::GuestAddress;
+    ///   let base = GuestAddress(0x100);
+    ///   let addr = GuestAddress(0x150);
+    ///   assert_eq!(addr.offset_from(base), 0x50usize);
+    /// ```
+    pub fn offset_from(&self, base: GuestAddress) -> usize {
+        self.0 - base.0
+    }
+
+    /// Returns the address as a usize offset from 0x0.
+    /// Use this when a raw number is needed to pass to the kernel.
+    pub fn offset(&self) -> usize {
+        self.0
+    }
+
+    /// Returns the result of the add or None if there is overflow.
+    pub fn checked_add(&self, other: usize) -> Option<GuestAddress> {
+        self.0.checked_add(other).map(GuestAddress)
+    }
+
+    /// Returns the result of the base address + the size.
+    /// Only use this when `offset` is guaranteed not to overflow.
+    pub fn unchecked_add(&self, offset: usize) -> GuestAddress {
+        GuestAddress(self.0 + offset)
+    }
+
+    /// Returns the result of the subtraction of None if there is underflow.
+    pub fn checked_sub(&self, other: usize) -> Option<GuestAddress> {
+        self.0.checked_sub(other).map(GuestAddress)
+    }
+
+    /// Returns the bitwise and of the address with the given mask.
+    pub fn mask(&self, mask: u64) -> GuestAddress {
+        GuestAddress(self.0 & mask as usize)
+    }
+}
+
+impl BitAnd<u64> for GuestAddress {
+    type Output = GuestAddress;
+
+    fn bitand(self, other: u64) -> GuestAddress {
+        GuestAddress(self.0 & other as usize)
+    }
+}
+
+impl BitOr<u64> for GuestAddress {
+    type Output = GuestAddress;
+
+    fn bitor(self, other: u64) -> GuestAddress {
+        GuestAddress(self.0 | other as usize)
+    }
+}
+
+impl PartialEq for GuestAddress {
+    fn eq(&self, other: &GuestAddress) -> bool {
+        self.0 == other.0
+    }
+}
+impl Eq for GuestAddress {}
+
+impl Ord for GuestAddress {
+    fn cmp(&self, other: &GuestAddress) -> Ordering {
+        self.0.cmp(&other.0)
+    }
+}
+
+impl PartialOrd for GuestAddress {
+    fn partial_cmp(&self, other: &GuestAddress) -> Option<Ordering> {
+        Some(self.cmp(other))
+    }
+}
+
+#[cfg(test)]
+mod tests {
+    use super::*;
+
+    #[test]
+    fn equals() {
+        let a = GuestAddress(0x300);
+        let b = GuestAddress(0x300);
+        let c = GuestAddress(0x301);
+        assert_eq!(a, b);
+        assert_eq!(b, a);
+        assert_ne!(a, c);
+        assert_ne!(c, a);
+    }
+
+    #[test]
+    fn cmp() {
+        let a = GuestAddress(0x300);
+        let b = GuestAddress(0x301);
+        assert!(a < b);
+        assert!(b > a);
+        assert!(!(a < a));
+    }
+
+    #[test]
+    fn mask() {
+        let a = GuestAddress(0x5050);
+        assert_eq!(GuestAddress(0x5000), a & 0xff00u64);
+        assert_eq!(GuestAddress(0x5055), a | 0x0005u64);
+    }
+
+    #[test]
+    fn add_sub() {
+        let a = GuestAddress(0x50);
+        let b = GuestAddress(0x60);
+        assert_eq!(Some(GuestAddress(0xb0)), a.checked_add(0x60));
+        assert_eq!(0x10, b.offset_from(a));
+    }
+
+    #[test]
+    fn checked_add_overflow() {
+        let a = GuestAddress(0xffffffffffffff55);
+        assert_eq!(Some(GuestAddress(0xffffffffffffff57)), a.checked_add(2));
+        assert!(a.checked_add(0xf0).is_none());
+    }
+}
diff --git a/sys_util/src/guest_memory.rs b/sys_util/src/guest_memory.rs
new file mode 100644
index 0000000..0dfb709
--- /dev/null
+++ b/sys_util/src/guest_memory.rs
@@ -0,0 +1,318 @@
+// Copyright 2017 The Chromium OS Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+//! Track memory regions that are mapped to the guest VM.
+
+use std::io::{Read, Write};
+use std::result;
+use std::sync::Arc;
+
+use guest_address::GuestAddress;
+use mmap::MemoryMapping;
+
+#[derive(Clone, Debug, PartialEq)]
+pub enum Error {
+    InvalidGuestAddress(GuestAddress),
+    MemoryMappingFailed,
+    MemoryRegionOverlap,
+    NoMemoryRegions,
+    RegionOperationFailed,
+}
+pub type Result<T> = result::Result<T, Error>;
+
+struct MemoryRegion {
+    mapping: MemoryMapping,
+    guest_base: GuestAddress,
+}
+
+fn region_end(region: &MemoryRegion) -> GuestAddress {
+    // unchecked_add is safe as the region bounds were checked when it was created.
+    region.guest_base.unchecked_add(region.mapping.size())
+}
+
+/// Tracks a memory region and where it is mapped in the guest.
+#[derive(Clone)]
+pub struct GuestMemory {
+    regions: Arc<Vec<MemoryRegion>>,
+}
+
+impl GuestMemory {
+    /// Creates a container for guest memory regions.
+    /// Valid memory regions are specified as a Vec of (Address, Size) tuples sorted by Address.
+    pub fn new(ranges: &[(GuestAddress, usize)]) -> Result<GuestMemory> {
+        if ranges.is_empty() {
+            return Err(Error::NoMemoryRegions);
+        }
+
+        let mut regions = Vec::<MemoryRegion>::new();
+        for range in ranges.iter() {
+            if let Some(last) = regions.last() {
+                if last.guest_base
+                       .checked_add(last.mapping.size())
+                       .map_or(true, |a| a > range.0) {
+                    return Err(Error::MemoryRegionOverlap);
+                }
+            }
+
+            let mapping = MemoryMapping::new(range.1).map_err(|_| Error::MemoryMappingFailed)?;
+            regions.push(MemoryRegion {
+                             mapping: mapping,
+                             guest_base: range.0,
+                         });
+        }
+
+        Ok(GuestMemory { regions: Arc::new(regions) })
+    }
+
+    /// Returns the end address of memory.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// # use sys_util::{GuestAddress, GuestMemory, MemoryMapping};
+    /// # fn test_end_addr() -> Result<(), ()> {
+    ///     let start_addr = GuestAddress(0x1000);
+    ///     let mut gm = GuestMemory::new(&vec![(start_addr, 0x400)]).map_err(|_| ())?;
+    ///     assert_eq!(start_addr.checked_add(0x400), Some(gm.end_addr()));
+    ///     Ok(())
+    /// # }
+    /// ```
+    pub fn end_addr(&self) -> GuestAddress {
+        self.regions
+            .iter()
+            .max_by_key(|region| region.guest_base)
+            .map_or(GuestAddress(0), |region| region_end(region))
+    }
+
+    /// Returns true if the given address is within the memory range available to the guest.
+    pub fn address_in_range(&self, addr: GuestAddress) -> bool {
+        addr < self.end_addr()
+    }
+
+    /// Returns the address plus the offset if it is in range.
+    pub fn checked_offset(&self, addr: GuestAddress, offset: usize) -> Option<GuestAddress> {
+        addr.checked_add(offset)
+            .and_then(|a| if a < self.end_addr() { Some(a) } else { None })
+    }
+
+    /// Returns the size of the memory region in bytes.
+    pub fn num_regions(&self) -> usize {
+        self.regions.len()
+    }
+
+    /// Perform the specified action on each region's addresses.
+    pub fn with_regions<F, E>(&self, cb: F) -> result::Result<(), E>
+        where F: Fn(usize, GuestAddress, usize, usize) -> result::Result<(), E>
+    {
+        for (index, region) in self.regions.iter().enumerate() {
+            cb(index,
+               region.guest_base,
+               region.mapping.size(),
+               region.mapping.as_ptr() as usize)?;
+        }
+        Ok(())
+    }
+
+    /// Writes a slice to guest memory at the specified guest address.
+    /// Returns Ok(<number of bytes written>).  The number of bytes written can
+    /// be less than the length of the slice if there isn't enough room in the
+    /// memory region.
+    ///
+    /// # Examples
+    /// * Write a slice at guestaddress 0x200.
+    ///
+    /// ```
+    /// # use sys_util::{GuestAddress, GuestMemory, MemoryMapping};
+    /// # fn test_write_u64() -> Result<(), ()> {
+    /// #   let start_addr = GuestAddress(0x1000);
+    /// #   let mut gm = GuestMemory::new(&vec![(start_addr, 0x400)]).map_err(|_| ())?;
+    ///     let res = gm.write_slice_at_addr(&[1,2,3,4,5], GuestAddress(0x200));
+    ///     assert_eq!(Ok(5), res);
+    ///     Ok(())
+    /// # }
+    /// ```
+    pub fn write_slice_at_addr(&self, buf: &[u8], guest_addr: GuestAddress) -> Result<usize> {
+        self.do_in_region(guest_addr, move |mapping, offset| {
+            mapping
+                .write_slice(buf, offset)
+                .map_err(|_| Error::InvalidGuestAddress(guest_addr))
+        })
+    }
+
+    /// Reads an object from guest memory at the given guest address.
+    /// Reading from a volatile area isn't strictly safe as it could change
+    /// mid-read.  However, as long as the type T is plain old data and can
+    /// handle random initialization, everything will be OK.
+    ///
+    /// # Examples
+    /// * Read a u64 from two areas of guest memory backed by separate mappings.
+    ///
+    /// ```
+    /// # use sys_util::{GuestAddress, GuestMemory, MemoryMapping};
+    /// # fn test_read_u64() -> Result<u64, ()> {
+    /// #     let start_addr1 = GuestAddress(0x0);
+    /// #     let start_addr2 = GuestAddress(0x400);
+    /// #     let mut gm = GuestMemory::new(&vec![(start_addr1, 0x400), (start_addr2, 0x400)])
+    /// #         .map_err(|_| ())?;
+    ///       let num1: u64 = gm.read_obj_from_addr(GuestAddress(32)).map_err(|_| ())?;
+    ///       let num2: u64 = gm.read_obj_from_addr(GuestAddress(0x400+32)).map_err(|_| ())?;
+    /// #     Ok(num1 + num2)
+    /// # }
+    /// ```
+    pub fn read_obj_from_addr<T: Copy>(&self, guest_addr: GuestAddress) -> Result<T> {
+        self.do_in_region(guest_addr, |mapping, offset| {
+            mapping
+                .read_obj(offset)
+                .map_err(|_| Error::InvalidGuestAddress(guest_addr))
+        })
+    }
+
+    /// Writes an object to the memory region at the specified guest address.
+    /// Returns Ok(()) if the object fits, or Err if it extends past the end.
+    ///
+    /// # Examples
+    /// * Write a u64 at guest address 0x1100.
+    ///
+    /// ```
+    /// # use sys_util::{GuestAddress, GuestMemory, MemoryMapping};
+    /// # fn test_write_u64() -> Result<(), ()> {
+    /// #   let start_addr = GuestAddress(0x1000);
+    /// #   let mut gm = GuestMemory::new(&vec![(start_addr, 0x400)]).map_err(|_| ())?;
+    ///     gm.write_obj_at_addr(55u64, GuestAddress(0x1100))
+    ///         .map_err(|_| ())
+    /// # }
+    /// ```
+    pub fn write_obj_at_addr<T>(&self, val: T, guest_addr: GuestAddress) -> Result<()> {
+        self.do_in_region(guest_addr, move |mapping, offset| {
+            mapping
+                .write_obj(val, offset)
+                .map_err(|_| Error::InvalidGuestAddress(guest_addr))
+        })
+    }
+
+    /// Reads data from a readable object like a File and writes it to guest memory.
+    ///
+    /// # Arguments
+    /// * `guest_addr` - Begin writing memory at this offset.
+    /// * `src` - Read from `src` to memory.
+    /// * `count` - Read `count` bytes from `src` to memory.
+    ///
+    /// # Examples
+    ///
+    /// * Read bytes from /dev/urandom
+    ///
+    /// ```
+    /// # use sys_util::{GuestAddress, GuestMemory, MemoryMapping};
+    /// # use std::fs::File;
+    /// # use std::path::Path;
+    /// # fn test_read_random() -> Result<u32, ()> {
+    /// #     let start_addr = GuestAddress(0x1000);
+    /// #     let gm = GuestMemory::new(&vec![(start_addr, 0x400)]).map_err(|_| ())?;
+    ///       let mut file = File::open(Path::new("/dev/urandom")).map_err(|_| ())?;
+    ///       let addr = GuestAddress(0x1010);
+    ///       gm.read_to_memory(addr, &mut file, 128).map_err(|_| ())?;
+    ///       let read_addr = addr.checked_add(8).ok_or(())?;
+    ///       let rand_val: u32 = gm.read_obj_from_addr(read_addr).map_err(|_| ())?;
+    /// #     Ok(rand_val)
+    /// # }
+    /// ```
+    pub fn read_to_memory<F>(&self,
+                             guest_addr: GuestAddress,
+                             src: &mut F,
+                             count: usize)
+                             -> Result<()>
+        where F: Read
+    {
+        self.do_in_region(guest_addr, move |mapping, offset| {
+            mapping
+                .read_to_memory(offset, src, count)
+                .map_err(|_| Error::InvalidGuestAddress(guest_addr))
+        })
+    }
+
+    /// Writes data from memory to a writable object.
+    ///
+    /// # Arguments
+    /// * `guest_addr` - Begin reading memory from this offset.
+    /// * `dst` - Write from memory to `dst`.
+    /// * `count` - Read `count` bytes from memory to `src`.
+    ///
+    /// # Examples
+    ///
+    /// * Write 128 bytes to /dev/null
+    ///
+    /// ```
+    /// # use sys_util::{GuestAddress, GuestMemory, MemoryMapping};
+    /// # use std::fs::File;
+    /// # use std::path::Path;
+    /// # fn test_write_null() -> Result<(), ()> {
+    /// #     let start_addr = GuestAddress(0x1000);
+    /// #     let gm = GuestMemory::new(&vec![(start_addr, 0x400)]).map_err(|_| ())?;
+    ///       let mut file = File::open(Path::new("/dev/null")).map_err(|_| ())?;
+    ///       let addr = GuestAddress(0x1010);
+    ///       gm.write_from_memory(addr, &mut file, 128).map_err(|_| ())?;
+    /// #     Ok(())
+    /// # }
+    /// ```
+    pub fn write_from_memory<F>(&self,
+                                guest_addr: GuestAddress,
+                                dst: &mut F,
+                                count: usize)
+                                -> Result<()>
+        where F: Write
+    {
+        self.do_in_region(guest_addr, move |mapping, offset| {
+            mapping
+                .write_from_memory(offset, dst, count)
+                .map_err(|_| Error::InvalidGuestAddress(guest_addr))
+        })
+    }
+
+    fn do_in_region<F, T>(&self, guest_addr: GuestAddress, cb: F) -> Result<T>
+        where F: FnOnce(&MemoryMapping, usize) -> Result<T>
+    {
+        for region in self.regions.iter() {
+            if guest_addr >= region.guest_base && guest_addr < region_end(region) {
+                return cb(&region.mapping, guest_addr.offset_from(region.guest_base));
+            }
+        }
+        Err(Error::InvalidGuestAddress(guest_addr))
+    }
+}
+
+#[cfg(test)]
+mod tests {
+    use super::*;
+
+    #[test]
+    fn two_regions() {
+        let start_addr1 = GuestAddress(0x0);
+        let start_addr2 = GuestAddress(0x400);
+        assert!(GuestMemory::new(&vec![(start_addr1, 0x400), (start_addr2, 0x400)]).is_ok());
+    }
+
+    #[test]
+    fn overlap_memory() {
+        let start_addr1 = GuestAddress(0x0);
+        let start_addr2 = GuestAddress(0x1000);
+        assert!(GuestMemory::new(&vec![(start_addr1, 0x2000), (start_addr2, 0x2000)]).is_err());
+    }
+
+    #[test]
+    fn test_read_u64() {
+        let start_addr1 = GuestAddress(0x0);
+        let start_addr2 = GuestAddress(0x1000);
+        let gm = GuestMemory::new(&vec![(start_addr1, 0x1000), (start_addr2, 0x1000)]).unwrap();
+
+        let val1: u64 = 0xaa55aa55aa55aa55;
+        let val2: u64 = 0x55aa55aa55aa55aa;
+        gm.write_obj_at_addr(val1, GuestAddress(0x500)).unwrap();
+        gm.write_obj_at_addr(val2, GuestAddress(0x1000 + 32))
+            .unwrap();
+        let num1: u64 = gm.read_obj_from_addr(GuestAddress(0x500)).unwrap();
+        let num2: u64 = gm.read_obj_from_addr(GuestAddress(0x1000 + 32)).unwrap();
+        assert_eq!(val1, num1);
+        assert_eq!(val2, num2);
+    }
+}
diff --git a/sys_util/src/lib.rs b/sys_util/src/lib.rs
index 2c7f682..79a9691 100644
--- a/sys_util/src/lib.rs
+++ b/sys_util/src/lib.rs
@@ -9,10 +9,14 @@ extern crate libc;
 mod mmap;
 mod eventfd;
 mod errno;
+mod guest_address;
+mod guest_memory;
 mod struct_util;
 
 pub use mmap::*;
 pub use eventfd::*;
 pub use errno::{Error, Result};
 use errno::errno_result;
+pub use guest_address::*;
+pub use guest_memory::*;
 pub use struct_util::*;
diff --git a/sys_util/src/mmap.rs b/sys_util/src/mmap.rs
index 22c6753..71b8d56 100644
--- a/sys_util/src/mmap.rs
+++ b/sys_util/src/mmap.rs
@@ -6,26 +6,45 @@
 //! mmap object leaves scope.
 
 use std;
+use std::io::{Read, Write};
 use std::ptr::null_mut;
 use std::os::unix::io::AsRawFd;
-use std::sync::Arc;
-use std::sync::atomic::{AtomicUsize, Ordering};
 
 use libc;
 
-use {Result, errno_result};
+use errno;
+
+#[derive(Debug)]
+pub enum Error {
+    /// Requested memory out of range.
+    InvalidAddress,
+    /// Couldn't read from the given source.
+    ReadFromSource,
+    /// `mmap` returned the given error.
+    SystemCallFailed(errno::Error),
+    /// Wrting to memory failed
+    WriteToMemory(std::io::Error),
+}
+pub type Result<T> = std::result::Result<T, Error>;
 
 /// Wraps an anonymous shared memory mapping in the current process.
 pub struct MemoryMapping {
     addr: *mut u8,
     size: usize,
-    ref_count: Arc<AtomicUsize>,
 }
 
+// Send and Sync aren't automatically inherited for the raw address pointer.
+// Accessing that pointer is only done through the stateless interface which
+// allows the object to be shared by multiple threads without a decrease in
+// safety.
 unsafe impl Send for MemoryMapping {}
+unsafe impl Sync for MemoryMapping {}
 
 impl MemoryMapping {
     /// Creates an anonymous shared mapping of `size` bytes.
+    ///
+    /// # Arguments
+    /// * `size` - Size of memory region in bytes.
     pub fn new(size: usize) -> Result<MemoryMapping> {
         // This is safe because we are creating an anonymous mapping in a place not already used by
         // any other area in this process.
@@ -37,17 +56,20 @@ impl MemoryMapping {
                        -1,
                        0)
         };
-        if addr == null_mut() {
-            return errno_result();
+        if addr.is_null() {
+            return Err(Error::SystemCallFailed(errno::Error::last()));
         }
         Ok(MemoryMapping {
                addr: addr as *mut u8,
                size: size,
-               ref_count: Arc::new(AtomicUsize::new(1)),
            })
     }
 
     /// Maps the first `size` bytes of the given `fd`.
+    ///
+    /// # Arguments
+    /// * `fd` - File descriptor to mmap from.
+    /// * `size` - Size of memory region in bytes.
     pub fn from_fd(fd: &AsRawFd, size: usize) -> Result<MemoryMapping> {
         // This is safe because we are creating a mapping in a place not already used by any other
         // area in this process.
@@ -59,63 +81,217 @@ impl MemoryMapping {
                        fd.as_raw_fd(),
                        0)
         };
-        if addr == null_mut() {
-            return errno_result();
+        if addr.is_null() {
+            return Err(Error::SystemCallFailed(errno::Error::last()));
         }
         Ok(MemoryMapping {
                addr: addr as *mut u8,
                size: size,
-               ref_count: Arc::new(AtomicUsize::new(1)),
            })
     }
 
+    /// Returns a pointer to the begining of the memory region.  Should only be
+    /// used for passing this region to ioctls for setting guest memory.
     pub fn as_ptr(&self) -> *mut u8 {
         self.addr
     }
 
+    /// Returns the size of the memory region in bytes.
     pub fn size(&self) -> usize {
         self.size
     }
 
-    #[deprecated(note="use volatile_read with the ptr instead")]
-    pub fn as_slice(&self) -> &[u8] {
-        // This is safe because we mapped the area at addr ourselves, so this slice will not
-        // overflow. However, it is possible to alias, hence the deprecation.
-        unsafe { std::slice::from_raw_parts(self.addr, self.size) }
+    /// Writes a slice to the memory region at the specified offset.
+    /// Returns Ok(<number of bytes written>).  The number of bytes written can
+    /// be less than the length of the slice if there isn't enough room in the
+    /// memory region.
+    ///
+    /// # Examples
+    /// * Write a slice at offset 256.
+    ///
+    /// ```
+    /// #   use sys_util::MemoryMapping;
+    /// #   let mut mem_map = MemoryMapping::new(1024).unwrap();
+    ///     let res = mem_map.write_slice(&[1,2,3,4,5], 0);
+    ///     assert!(res.is_ok());
+    ///     assert_eq!(res.unwrap(), 5);
+    /// ```
+    pub fn write_slice(&self, buf: &[u8], offset: usize) -> Result<usize> {
+        if offset >= self.size {
+            return Err(Error::InvalidAddress);
+        }
+        unsafe {
+            // Guest memory can't strictly be modeled as a slice because it is
+            // volatile.  Writing to it with what compiles down to a memcpy
+            // won't hurt anything as long as we get the bounds checks right.
+            let mut slice: &mut [u8] = &mut self.as_mut_slice()[offset..];
+            Ok(slice.write(buf).map_err(Error::WriteToMemory)?)
+        }
     }
 
-    #[deprecated(note="use volatile_write with the ptr instead")]
-    pub fn as_mut_slice(&self) -> &mut [u8] {
-        // This is safe because we mapped the area at addr ourselves, so this slice will not
-        // overflow. However, it is possible to alias, hence the deprecation.
-        unsafe { std::slice::from_raw_parts_mut(self.addr, self.size) }
+    /// Writes an object to the memory region at the specified offset.
+    /// Returns Ok(()) if the object fits, or Err if it extends past the end.
+    ///
+    /// # Examples
+    /// * Write a u64 at offset 16.
+    ///
+    /// ```
+    /// #   use sys_util::MemoryMapping;
+    /// #   let mut mem_map = MemoryMapping::new(1024).unwrap();
+    ///     let res = mem_map.write_obj(55u64, 16);
+    ///     assert!(res.is_ok());
+    /// ```
+    pub fn write_obj<T>(&self, val: T, offset: usize) -> Result<()> {
+        unsafe {
+            // Guest memory can't strictly be modeled as a slice because it is
+            // volatile.  Writing to it with what compiles down to a memcpy
+            // won't hurt anything as long as we get the bounds checks right.
+            if offset + std::mem::size_of::<T>() > self.size {
+                return Err(Error::InvalidAddress);
+            }
+            std::ptr::write_volatile(&mut self.as_mut_slice()[offset..] as *mut _ as *mut T, val);
+            Ok(())
+        }
+    }
+
+    /// Reads on object from the memory region at the given offset.
+    /// Reading from a volatile area isn't strictly safe as it could change
+    /// mid-read.  However, as long as the type T is plain old data and can
+    /// handle random initialization, everything will be OK.
+    ///
+    /// # Examples
+    /// * Read a u64 written to offset 32.
+    ///
+    /// ```
+    /// #   use sys_util::MemoryMapping;
+    /// #   let mut mem_map = MemoryMapping::new(1024).unwrap();
+    ///     let res = mem_map.write_obj(55u64, 32);
+    ///     assert!(res.is_ok());
+    ///     let num: u64 = mem_map.read_obj(32).unwrap();
+    ///     assert_eq!(55, num);
+    /// ```
+    pub fn read_obj<T: Copy>(&self, offset: usize) -> Result<T> {
+        if offset + std::mem::size_of::<T>() > self.size {
+            return Err(Error::InvalidAddress);
+        }
+        unsafe {
+            // This is safe because by definition Copy types can have their bits
+            // set arbitrarily and still be valid.
+            Ok(std::ptr::read_volatile(&self.as_slice()[offset..] as *const _ as *const T))
+        }
+    }
+
+    /// Reads data from a readable object like a File and writes it to guest memory.
+    ///
+    /// # Arguments
+    /// * `mem_offset` - Begin writing memory at this offset.
+    /// * `src` - Read from `src` to memory.
+    /// * `count` - Read `count` bytes from `src` to memory.
+    ///
+    /// # Examples
+    ///
+    /// * Read bytes from /dev/urandom
+    ///
+    /// ```
+    /// # use sys_util::MemoryMapping;
+    /// # use std::fs::File;
+    /// # use std::path::Path;
+    /// # fn test_read_random() -> Result<u32, ()> {
+    /// #     let mut mem_map = MemoryMapping::new(1024).unwrap();
+    ///       let mut file = File::open(Path::new("/dev/urandom")).map_err(|_| ())?;
+    ///       mem_map.read_to_memory(32, &mut file, 128).map_err(|_| ())?;
+    ///       let rand_val: u32 =  mem_map.read_obj(40).map_err(|_| ())?;
+    /// #     Ok(rand_val)
+    /// # }
+    /// ```
+    pub fn read_to_memory<F>(&self, mem_offset: usize, src: &mut F, count: usize) -> Result<()>
+        where F: Read
+    {
+        let mem_end = mem_offset + count;
+        if mem_end > self.size() {
+            return Err(Error::InvalidAddress);
+        }
+        unsafe {
+            // It is safe to overwrite the volatile memory.  Acessing the guest
+            // memory as a mutable slice is OK because nothing assumes another
+            // thread won't change what is loaded.
+            let mut dst = &mut self.as_mut_slice()[mem_offset..mem_end];
+            if src.read_exact(dst).is_err() {
+                return Err(Error::ReadFromSource);
+            }
+        }
+        Ok(())
     }
 
-    // TODO(zachr): remove when we no longer need it, clone is sketchy
-    pub fn clone(&self) -> MemoryMapping {
-        self.ref_count.fetch_add(1, Ordering::SeqCst);
-        MemoryMapping {
-            addr: self.addr,
-            size: self.size,
-            ref_count: self.ref_count.clone(),
+    /// Writes data from memory to a writable object.
+    ///
+    /// # Arguments
+    /// * `mem_offset` - Begin reading memory from this offset.
+    /// * `dst` - Write from memory to `dst`.
+    /// * `count` - Read `count` bytes from memory to `src`.
+    ///
+    /// # Examples
+    ///
+    /// * Write 128 bytes to /dev/null
+    ///
+    /// ```
+    /// # use sys_util::MemoryMapping;
+    /// # use std::fs::File;
+    /// # use std::path::Path;
+    /// # fn test_write_null() -> Result<(), ()> {
+    /// #     let mut mem_map = MemoryMapping::new(1024).unwrap();
+    ///       let mut file = File::open(Path::new("/dev/null")).map_err(|_| ())?;
+    ///       mem_map.write_from_memory(32, &mut file, 128).map_err(|_| ())?;
+    /// #     Ok(())
+    /// # }
+    /// ```
+    pub fn write_from_memory<F>(&self, mem_offset: usize, dst: &mut F, count: usize) -> Result<()>
+        where F: Write
+    {
+        let mem_end = match mem_offset.checked_add(count) {
+            None => return Err(Error::InvalidAddress),
+            Some(m) => m,
+        };
+        if mem_end > self.size() {
+            return Err(Error::InvalidAddress);
+        }
+        unsafe {
+            // It is safe to read from volatile memory.  Acessing the guest
+            // memory as a slice is OK because nothing assumes another thread
+            // won't change what is loaded.
+            let src = &self.as_mut_slice()[mem_offset..mem_end];
+            if dst.write_all(src).is_err() {
+                return Err(Error::ReadFromSource);
+            }
         }
+        Ok(())
+    }
+
+    unsafe fn as_slice(&self) -> &[u8] {
+        // This is safe because we mapped the area at addr ourselves, so this slice will not
+        // overflow. However, it is possible to alias.
+        std::slice::from_raw_parts(self.addr, self.size)
+    }
+
+    unsafe fn as_mut_slice(&self) -> &mut [u8] {
+        // This is safe because we mapped the area at addr ourselves, so this slice will not
+        // overflow. However, it is possible to alias.
+        std::slice::from_raw_parts_mut(self.addr, self.size)
     }
 }
 
 impl Drop for MemoryMapping {
     fn drop(&mut self) {
-        if self.ref_count.fetch_sub(1, Ordering::SeqCst) == 1 {
-            // This is safe because we mmap the area at addr ourselves, and the ref_count ensures
-            // nobody else is holding a reference to it.
-            unsafe {
-                libc::munmap(self.addr as *mut libc::c_void, self.size);
-            }
+        // This is safe because we mmap the area at addr ourselves, and nobody
+        // else is holding a reference to it.
+        unsafe {
+            libc::munmap(self.addr as *mut libc::c_void, self.size);
         }
     }
 }
 
 #[cfg(test)]
-mod test {
+mod tests {
     use super::*;
 
     #[test]
@@ -125,12 +301,10 @@ mod test {
     }
 
     #[test]
-    fn mutate_slices() {
-        let m = MemoryMapping::new(1024).unwrap();
-        assert_eq!(1024, m.size());
-        {
-            m.as_mut_slice()[128] = 55;
-        }
-        assert_eq!(m.as_slice()[128], 55);
+    fn test_write_past_end() {
+        let m = MemoryMapping::new(5).unwrap();
+        let res = m.write_slice(&[1, 2, 3, 4, 5, 6], 0);
+        assert!(res.is_ok());
+        assert_eq!(res.unwrap(), 5);
     }
 }