summary refs log tree commit diff
diff options
context:
space:
mode:
authorDylan Reid <dgreid@chromium.org>2017-05-15 17:37:47 -0700
committerchrome-bot <chrome-bot@chromium.org>2017-05-25 22:51:14 -0700
commitd4eaa4056f84b256ccd7684d9cf7e4363b9cf413 (patch)
tree728fbb294cf5b603fe232c7e284c00ffdd9904ed
parent37285dc09d8fe5988fb98dc20bf00fdda38b0843 (diff)
downloadcrosvm-d4eaa4056f84b256ccd7684d9cf7e4363b9cf413.tar
crosvm-d4eaa4056f84b256ccd7684d9cf7e4363b9cf413.tar.gz
crosvm-d4eaa4056f84b256ccd7684d9cf7e4363b9cf413.tar.bz2
crosvm-d4eaa4056f84b256ccd7684d9cf7e4363b9cf413.tar.lz
crosvm-d4eaa4056f84b256ccd7684d9cf7e4363b9cf413.tar.xz
crosvm-d4eaa4056f84b256ccd7684d9cf7e4363b9cf413.tar.zst
crosvm-d4eaa4056f84b256ccd7684d9cf7e4363b9cf413.zip
sys_util: Add guest_memory
Add a module for accessing guest memory.
This module will replace all the slices that are used to access it
currently as those slices aren't valid because the memory is volatile
and a volatile slice doesn't exist in rust.

Modify the existing users so they no longer depend on the deprecated slice
access.

Change-Id: Ic0e86dacf66f68bd88ed9cc197cb14e45ada891d
Signed-off-by: Dylan Reid <dgreid@chromium.org>
Reviewed-on: https://chromium-review.googlesource.com/509919
-rw-r--r--kernel_loader/src/lib.rs110
-rw-r--r--kvm/src/lib.rs170
-rw-r--r--kvm/tests/real_run_adder.rs21
-rw-r--r--sys_util/src/guest_address.rs137
-rw-r--r--sys_util/src/guest_memory.rs318
-rw-r--r--sys_util/src/lib.rs4
-rw-r--r--sys_util/src/mmap.rs256
-rw-r--r--x86_64/src/lib.rs128
-rw-r--r--x86_64/src/regs.rs140
9 files changed, 984 insertions, 300 deletions
diff --git a/kernel_loader/src/lib.rs b/kernel_loader/src/lib.rs
index 0d88fa5..89b6348 100644
--- a/kernel_loader/src/lib.rs
+++ b/kernel_loader/src/lib.rs
@@ -8,6 +8,8 @@ use std::mem;
 use std::ffi::CStr;
 use std::io::{Read, Seek, SeekFrom};
 
+use sys_util::{GuestAddress, GuestMemory};
+
 #[allow(dead_code)]
 #[allow(non_camel_case_types)]
 #[allow(non_snake_case)]
@@ -17,11 +19,12 @@ mod elf;
 #[derive(Debug, PartialEq)]
 pub enum Error {
     BigEndianElfOnLittle,
+    CommandLineCopy,
     CommandLineOverflow,
-    ImagePastRamEnd,
     InvalidElfMagicNumber,
     InvalidProgramHeaderSize,
     InvalidProgramHeaderOffset,
+    InvalidProgramHeaderAddress,
     ReadElfHeader,
     ReadKernelImage,
     ReadProgramHeader,
@@ -35,10 +38,10 @@ pub type Result<T> = std::result::Result<T, Error>;
 ///
 /// # Arguments
 ///
-/// * `guest_mem` - A u8 slice that will be partially overwritten by the kernel.
+/// * `guest_mem` - The guest memory region the kernel is written to.
 /// * `kernel_start` - The offset into `guest_mem` at which to load the kernel.
 /// * `kernel_image` - Input vmlinux image.
-pub fn load_kernel<F>(guest_mem: &mut [u8], kernel_start: usize, kernel_image: &mut F) -> Result<()>
+pub fn load_kernel<F>(guest_mem: &GuestMemory, kernel_start: GuestAddress, kernel_image: &mut F) -> Result<()>
     where F: Read + Seek
 {
     let mut ehdr: elf::Elf64_Ehdr = Default::default();
@@ -81,15 +84,12 @@ pub fn load_kernel<F>(guest_mem: &mut [u8], kernel_start: usize, kernel_image: &
             continue;
         }
 
-        let mem_offset = phdr.p_paddr as usize + kernel_start;
-        let mem_end = mem_offset + phdr.p_filesz as usize;
-        if mem_end > guest_mem.len() {
-            return Err(Error::ImagePastRamEnd);
-        }
-        let mut dst = &mut guest_mem[mem_offset..mem_end];
         kernel_image.seek(SeekFrom::Start(phdr.p_offset))
             .map_err(|_| Error::SeekKernelStart)?;
-        kernel_image.read_exact(dst)
+
+        let mem_offset = kernel_start.checked_add(phdr.p_paddr as usize)
+            .ok_or(Error::InvalidProgramHeaderAddress)?;
+        guest_mem.read_to_memory(mem_offset, kernel_image, phdr.p_filesz as usize)
             .map_err(|_| Error::ReadKernelImage)?;
     }
 
@@ -101,23 +101,23 @@ pub fn load_kernel<F>(guest_mem: &mut [u8], kernel_start: usize, kernel_image: &
 /// # Arguments
 ///
 /// * `guest_mem` - A u8 slice that will be partially overwritten by the command line.
-/// * `kernel_start` - The offset into `guest_mem` at which to load the command line.
+/// * `guest_addr` - The address in `guest_mem` at which to load the command line.
 /// * `cmdline` - The kernel command line.
-pub fn load_cmdline(guest_mem: &mut [u8], offset: usize, cmdline: &CStr) -> Result<()> {
+pub fn load_cmdline(guest_mem: &GuestMemory, guest_addr: GuestAddress, cmdline: &CStr) -> Result<()> {
     let len = cmdline.to_bytes().len();
     if len <= 0 {
         return Ok(());
     }
 
-    let end = offset + len + 1; // Extra for null termination.
-    if end > guest_mem.len() {
-        return Err(Error::CommandLineOverflow);
-    }
-    let cmdline_slice = &mut guest_mem[offset..end];
-    for (i, s) in cmdline_slice.iter_mut().enumerate() {
-        *s = cmdline.to_bytes().get(i).map_or(0, |c| (*c as u8));
+    let end = guest_addr.checked_add(len + 1)
+        .ok_or(Error::CommandLineOverflow)?; // Extra for null termination.
+    if end > guest_mem.end_addr() {
+        return Err(Error::CommandLineOverflow)?;
     }
 
+    guest_mem.write_slice_at_addr(cmdline.to_bytes_with_nul(), guest_addr)
+        .map_err(|_| Error::CommandLineCopy)?;
+
     Ok(())
 }
 
@@ -125,28 +125,46 @@ pub fn load_cmdline(guest_mem: &mut [u8], offset: usize, cmdline: &CStr) -> Resu
 mod test {
     use std::io::Cursor;
     use super::*;
+    use sys_util::{GuestAddress, GuestMemory};
+
+    const MEM_SIZE: usize = 0x8000;
+
+    fn create_guest_mem() -> GuestMemory {
+        GuestMemory::new(&vec![(GuestAddress(0x0), MEM_SIZE)]).unwrap()
+    }
 
     #[test]
     fn cmdline_overflow() {
-        let mut mem = vec![0; 50];
+        let gm = create_guest_mem();
+        let cmdline_address = GuestAddress(MEM_SIZE - 5);
         assert_eq!(Err(Error::CommandLineOverflow),
-                   load_cmdline(mem.as_mut_slice(),
-                                45,
+                   load_cmdline(&gm,
+                                cmdline_address,
                                 CStr::from_bytes_with_nul(b"12345\0").unwrap()));
     }
 
     #[test]
     fn cmdline_write_end() {
-        let mut mem = vec![0; 50];
+        let gm = create_guest_mem();
+        let mut cmdline_address = GuestAddress(45);
         assert_eq!(Ok(()),
-                   load_cmdline(mem.as_mut_slice(),
-                                45,
+                   load_cmdline(&gm,
+                                cmdline_address,
                                 CStr::from_bytes_with_nul(b"1234\0").unwrap()));
-        assert_eq!(mem[45], '1' as u8);
-        assert_eq!(mem[46], '2' as u8);
-        assert_eq!(mem[47], '3' as u8);
-        assert_eq!(mem[48], '4' as u8);
-        assert_eq!(mem[49], '\0' as u8);
+        let val: u8 = gm.read_obj_from_addr(cmdline_address).unwrap();
+        assert_eq!(val, '1' as u8);
+        cmdline_address = cmdline_address.unchecked_add(1);
+        let val: u8 = gm.read_obj_from_addr(cmdline_address).unwrap();
+        assert_eq!(val, '2' as u8);
+        cmdline_address = cmdline_address.unchecked_add(1);
+        let val: u8 = gm.read_obj_from_addr(cmdline_address).unwrap();
+        assert_eq!(val, '3' as u8);
+        cmdline_address = cmdline_address.unchecked_add(1);
+        let val: u8 = gm.read_obj_from_addr(cmdline_address).unwrap();
+        assert_eq!(val, '4' as u8);
+        cmdline_address = cmdline_address.unchecked_add(1);
+        let val: u8 = gm.read_obj_from_addr(cmdline_address).unwrap();
+        assert_eq!(val, '\0' as u8);
     }
 
     // Elf64 image that prints hello world on x86_64.
@@ -158,50 +176,42 @@ mod test {
 
     #[test]
     fn load_elf() {
+        let gm = create_guest_mem();
+        let kernel_addr = GuestAddress(0x0);
         let image = make_elf_bin();
-        let mut mem = Vec::<u8>::with_capacity(0x8000);
-        unsafe {
-            mem.set_len(0x8000);
-        }
         assert_eq!(Ok(()),
-                   load_kernel(mem.as_mut_slice(), 0x0, &mut Cursor::new(&image)));
+                   load_kernel(&gm, kernel_addr, &mut Cursor::new(&image)));
     }
 
     #[test]
     fn bad_magic() {
-        let mut mem = Vec::<u8>::with_capacity(0x8000);
-        unsafe {
-            mem.set_len(0x8000);
-        }
+        let gm = create_guest_mem();
+        let kernel_addr = GuestAddress(0x0);
         let mut bad_image = make_elf_bin();
         bad_image[0x1] = 0x33;
         assert_eq!(Err(Error::InvalidElfMagicNumber),
-                   load_kernel(mem.as_mut_slice(), 0x0, &mut Cursor::new(&bad_image)));
+                   load_kernel(&gm, kernel_addr, &mut Cursor::new(&bad_image)));
     }
 
     #[test]
     fn bad_endian() {
         // Only little endian is supported
-        let mut mem = Vec::<u8>::with_capacity(0x8000);
-        unsafe {
-            mem.set_len(0x8000);
-        }
+        let gm = create_guest_mem();
+        let kernel_addr = GuestAddress(0x0);
         let mut bad_image = make_elf_bin();
         bad_image[0x5] = 2;
         assert_eq!(Err(Error::BigEndianElfOnLittle),
-                   load_kernel(mem.as_mut_slice(), 0x0, &mut Cursor::new(&bad_image)));
+                   load_kernel(&gm, kernel_addr, &mut Cursor::new(&bad_image)));
     }
 
     #[test]
     fn bad_phoff() {
         // program header has to be past the end of the elf header
-        let mut mem = Vec::<u8>::with_capacity(0x8000);
-        unsafe {
-            mem.set_len(0x8000);
-        }
+        let gm = create_guest_mem();
+        let kernel_addr = GuestAddress(0x0);
         let mut bad_image = make_elf_bin();
         bad_image[0x20] = 0x10;
         assert_eq!(Err(Error::InvalidProgramHeaderOffset),
-                   load_kernel(mem.as_mut_slice(), 0x0, &mut Cursor::new(&bad_image)));
+                   load_kernel(&gm, kernel_addr, &mut Cursor::new(&bad_image)));
     }
 }
diff --git a/kvm/src/lib.rs b/kvm/src/lib.rs
index c440401..3b19b82 100644
--- a/kvm/src/lib.rs
+++ b/kvm/src/lib.rs
@@ -19,7 +19,7 @@ use libc::{open, O_RDWR, EINVAL, ENOSPC};
 
 use kvm_sys::*;
 
-use sys_util::{MemoryMapping, EventFd, Error, Result};
+use sys_util::{GuestAddress, GuestMemory, MemoryMapping, EventFd, Error, Result};
 
 pub use cap::*;
 
@@ -51,6 +51,23 @@ unsafe fn ioctl_with_mut_ptr<F: AsRawFd, T>(fd: &F, nr: c_ulong, arg: *mut T) ->
     libc::ioctl(fd.as_raw_fd(), nr, arg as *mut c_void)
 }
 
+unsafe fn set_user_memory_region<F: AsRawFd>(fd: &F, slot: u32, guest_addr: u64, memory_size: u64, userspace_addr: u64) -> Result<()> {
+    let region = kvm_userspace_memory_region {
+        slot: slot,
+        flags: 0,
+        guest_phys_addr: guest_addr,
+        memory_size: memory_size,
+        userspace_addr: userspace_addr,
+    };
+
+    let ret = ioctl_with_ref(fd, KVM_SET_USER_MEMORY_REGION(), &region);
+    if ret == 0 {
+        Ok(())
+    } else {
+        errno_result()
+    }
+}
+
 /// A wrapper around opening and using `/dev/kvm`.
 ///
 /// Useful for querying extensions and basic values from the KVM backend. A `Kvm` is required to
@@ -137,11 +154,6 @@ impl AsRawFd for Kvm {
     }
 }
 
-struct MemoryRegion {
-    mapping: MemoryMapping,
-    guest_addr: u64,
-}
-
 /// An address either in programmable I/O space or in memory mapped I/O space.
 pub enum IoeventAddress {
     Pio(u64),
@@ -159,20 +171,35 @@ impl Into<u64> for NoDatamatch {
 /// A wrapper around creating and using a VM.
 pub struct Vm {
     vm: File,
-    mem_regions: Vec<MemoryRegion>,
+    guest_mem: GuestMemory,
+    next_mem_slot: usize,
 }
 
 impl Vm {
     /// Constructs a new `Vm` using the given `Kvm` instance.
-    pub fn new(kvm: &Kvm) -> Result<Vm> {
+    pub fn new(kvm: &Kvm, guest_mem: GuestMemory) -> Result<Vm> {
         // Safe because we know kvm is a real kvm fd as this module is the only one that can make
         // Kvm objects.
         let ret = unsafe { ioctl(kvm, KVM_CREATE_VM()) };
         if ret >= 0 {
             // Safe because we verify the value of ret and we are the owners of the fd.
+            let vm_file = unsafe { File::from_raw_fd(ret) };
+            guest_mem.with_regions(|index, guest_addr, size, host_addr| {
+                unsafe {
+                    // Safe because the guest regions are guaranteed not to overlap.
+                    set_user_memory_region(&vm_file, index as u32,
+                        guest_addr.offset() as u64,
+                        size as u64,
+                        host_addr as u64)
+                }
+            })?;
+
+            let next_mem_slot = guest_mem.num_regions();
+
             Ok(Vm {
-                vm: unsafe { File::from_raw_fd(ret) },
-                mem_regions: Vec::new(),
+                vm: vm_file,
+                guest_mem: guest_mem,
+                next_mem_slot: next_mem_slot,
             })
         } else {
             errno_result()
@@ -181,85 +208,41 @@ impl Vm {
 
     /// Inserts the given `MemoryMapping` into the VM's address space at `guest_addr`.
     ///
-    /// This returns on the memory slot number on success. Note that memory inserted into the VM's
-    /// address space must not overlap with any other memory slot's region.
-    pub fn add_memory(&mut self, guest_addr: u64, mem: MemoryMapping) -> Result<u32> {
-        let size = mem.size() as u64;
-        let guest_start = guest_addr;
-        let guest_end = guest_start + size;
-
-        for region in self.mem_regions.iter() {
-            let region_start = region.guest_addr;
-            let region_end = region_start + region.mapping.size() as u64;
-            if guest_start < region_end && guest_end > region_start {
-                return Err(Error::new(ENOSPC))
-            }
-
+    /// Note that memory inserted into the VM's address space must not overlap
+    /// with any other memory slot's region.
+    pub fn add_device_memory(&mut self, guest_addr: GuestAddress, mem: MemoryMapping) -> Result<()> {
+        if guest_addr < self.guest_mem.end_addr() {
+            return Err(Error::new(ENOSPC));
         }
 
-        let slot = self.mem_regions.len() as u32;
-
         // Safe because we check that the given guest address is valid and has no overlaps. We also
         // know that the pointer and size are correct because the MemoryMapping interface ensures
         // this.
         unsafe {
-            self.set_user_memory_region(slot, guest_addr, size, mem.as_ptr() as u64)
-        }?;
-
-        self.mem_regions.push(MemoryRegion{
-            mapping: mem,
-            guest_addr: guest_addr,
-        });
+            set_user_memory_region(&self.vm, self.next_mem_slot as u32,
+                                        guest_addr.offset() as u64,
+                                        mem.size() as u64,
+                                        mem.as_ptr() as u64)?;
+        };
+        self.next_mem_slot += 1;
 
-        Ok(slot)
+        Ok(())
     }
 
     /// Gets a reference to the memory at the given address in the VM's address space.
-    pub fn get_memory(&self, guest_addr: u64) -> Option<&[u8]> {
-        for region in self.mem_regions.iter() {
-            if guest_addr >= region.guest_addr && guest_addr < region.guest_addr + region.mapping.size() as u64 {
-                let offset = (guest_addr - region.guest_addr) as usize;
-                return Some(&region.mapping.as_slice()[offset..])
-            }
-        }
-        None
-    }
-
-    /// Gets a mutable reference to the memory at the given address in the VM's address space.
-    pub fn get_memory_mut(&mut self, guest_addr: u64) -> Option<&mut [u8]> {
-        for region in self.mem_regions.iter_mut() {
-            if guest_addr >= region.guest_addr && guest_addr < region.guest_addr + region.mapping.size() as u64 {
-                let offset = (guest_addr - region.guest_addr) as usize;
-                return Some(&mut region.mapping.as_mut_slice()[offset..])
-            }
-        }
-        None
-    }
-
-    unsafe fn set_user_memory_region(&self, slot: u32, guest_addr: u64, memory_size: u64, userspace_addr: u64) -> Result<()> {
-        let region = kvm_userspace_memory_region {
-            slot: slot,
-            flags: 0,
-            guest_phys_addr: guest_addr,
-            memory_size: memory_size,
-            userspace_addr: userspace_addr,
-        };
-
-        let ret = ioctl_with_ref(self, KVM_SET_USER_MEMORY_REGION(), &region);
-        if ret == 0 {
-            Ok(())
-        } else {
-            errno_result()
-        }
+    pub fn get_memory(&self) -> &GuestMemory {
+        &self.guest_mem
     }
 
     /// Sets the address of the three-page region in the VM's address space.
     ///
     /// See the documentation on the KVM_SET_TSS_ADDR ioctl.
     #[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
-    pub fn set_tss_addr(&self, addr: c_ulong) -> Result<()> {
+    pub fn set_tss_addr(&self, addr: GuestAddress) -> Result<()> {
         // Safe because we know that our file is a VM fd and we verify the return result.
-        let ret = unsafe { ioctl_with_val(self, KVM_SET_TSS_ADDR(), addr) };
+        let ret = unsafe {
+            ioctl_with_val(self, KVM_SET_TSS_ADDR(), addr.offset() as u64)
+        };
         if ret == 0 {
             Ok(())
         } else {
@@ -437,7 +420,8 @@ impl Vcpu {
         // the value of the fd and we own the fd.
         let vcpu = unsafe { File::from_raw_fd(vcpu_fd) };
 
-        let run_mmap = MemoryMapping::from_fd(&vcpu, run_mmap_size)?;
+        let run_mmap = MemoryMapping::from_fd(&vcpu, run_mmap_size)
+            .map_err(|_| Error::new(ENOSPC))?;
 
         Ok(Vcpu {
             vcpu: vcpu,
@@ -721,7 +705,8 @@ mod tests {
     #[test]
     fn create_vm() {
         let kvm = Kvm::new().unwrap();
-        Vm::new(&kvm).unwrap();
+        let gm = GuestMemory::new(&vec![(GuestAddress(0), 0x1000)]).unwrap();
+        Vm::new(&kvm, gm).unwrap();
     }
 
     #[test]
@@ -735,32 +720,32 @@ mod tests {
     #[test]
     fn add_memory() {
         let kvm = Kvm::new().unwrap();
-        let mut vm = Vm::new(&kvm).unwrap();
+        let gm = GuestMemory::new(&vec![(GuestAddress(0), 0x1000)]).unwrap();
+        let mut vm = Vm::new(&kvm, gm).unwrap();
         let mem_size = 0x1000;
         let mem = MemoryMapping::new(mem_size).unwrap();
-        vm.add_memory(0x1000, mem).unwrap();
+        vm.add_device_memory(GuestAddress(0x1000), mem).unwrap();
     }
 
-    #[test]
+     #[test]
     fn overlap_memory() {
         let kvm = Kvm::new().unwrap();
-        let mut vm = Vm::new(&kvm).unwrap();
+        let gm = GuestMemory::new(&vec![(GuestAddress(0), 0x10000)]).unwrap();
+        let mut vm = Vm::new(&kvm, gm).unwrap();
         let mem_size = 0x2000;
-        let mem1 = MemoryMapping::new(mem_size).unwrap();
-        let mem2 = MemoryMapping::new(mem_size).unwrap();
-        vm.add_memory(0x1000, mem1).unwrap();
-        assert!(vm.add_memory(0x2000, mem2).is_err());
+        let mem = MemoryMapping::new(mem_size).unwrap();
+        assert!(vm.add_device_memory(GuestAddress(0x2000), mem).is_err());
     }
 
     #[test]
     fn get_memory() {
         let kvm = Kvm::new().unwrap();
-        let mut vm = Vm::new(&kvm).unwrap();
-        let mem_size = 0x1000;
-        let mem = MemoryMapping::new(mem_size).unwrap();
-        mem.as_mut_slice()[0xf0] = 67;
-        vm.add_memory(0x1000, mem).unwrap();
-        assert_eq!(vm.get_memory(0x10f0).unwrap()[0], 67);
+        let gm = GuestMemory::new(&vec![(GuestAddress(0), 0x1000)]).unwrap();
+        let vm = Vm::new(&kvm, gm).unwrap();
+        let obj_addr = GuestAddress(0xf0);
+        vm.get_memory().write_obj_at_addr(67u8, obj_addr).unwrap();
+        let read_val: u8 = vm.get_memory().read_obj_from_addr(obj_addr).unwrap();
+        assert_eq!(read_val, 67u8);
     }
 
     #[test]
@@ -768,7 +753,8 @@ mod tests {
         assert_eq!(std::mem::size_of::<NoDatamatch>(), 0);
 
         let kvm = Kvm::new().unwrap();
-        let vm = Vm::new(&kvm).unwrap();
+        let gm = GuestMemory::new(&vec![(GuestAddress(0), 0x10000)]).unwrap();
+        let vm = Vm::new(&kvm, gm).unwrap();
         let evtfd = EventFd::new().unwrap();
         vm.register_ioevent(&evtfd, IoeventAddress::Pio(0xf4), NoDatamatch).unwrap();
         vm.register_ioevent(&evtfd, IoeventAddress::Mmio(0x1000), NoDatamatch).unwrap();
@@ -781,7 +767,8 @@ mod tests {
     #[test]
     fn register_irqfd() {
         let kvm = Kvm::new().unwrap();
-        let vm = Vm::new(&kvm).unwrap();
+        let gm = GuestMemory::new(&vec![(GuestAddress(0), 0x10000)]).unwrap();
+        let vm = Vm::new(&kvm, gm).unwrap();
         let evtfd1 = EventFd::new().unwrap();
         let evtfd2 = EventFd::new().unwrap();
         let evtfd3 = EventFd::new().unwrap();
@@ -794,7 +781,8 @@ mod tests {
     #[test]
     fn create_vcpu() {
         let kvm = Kvm::new().unwrap();
-        let vm = Vm::new(&kvm).unwrap();
+        let gm = GuestMemory::new(&vec![(GuestAddress(0), 0x10000)]).unwrap();
+        let vm = Vm::new(&kvm, gm).unwrap();
         Vcpu::new(0, &kvm, &vm).unwrap();
     }
 
diff --git a/kvm/tests/real_run_adder.rs b/kvm/tests/real_run_adder.rs
index aa90a19..26b1add 100644
--- a/kvm/tests/real_run_adder.rs
+++ b/kvm/tests/real_run_adder.rs
@@ -10,7 +10,7 @@ extern crate kvm;
 
 use kvm::*;
 use kvm_sys::kvm_regs;
-use sys_util::MemoryMapping;
+use sys_util::{GuestAddress, GuestMemory};
 
 #[test]
 fn test_run() {
@@ -26,14 +26,17 @@ fn test_run() {
         0xf4,             /* hlt */
     ];
 
+    let mem_size = 0x1000;
+    let load_addr = GuestAddress(0x1000);
+    let mem = GuestMemory::new(&vec![(load_addr, mem_size)]).unwrap();
+
     let kvm = Kvm::new().expect("new kvm failed");
-    let mut vm = Vm::new(&kvm).expect("new vm failed");
+    let vm = Vm::new(&kvm, mem).expect("new vm failed");
     let vcpu = Vcpu::new(0, &kvm, &vm).expect("new vcpu failed");
 
-    let mem_size = 0x1000;
-    let mem = MemoryMapping::new(mem_size).expect("new mmap failed");
-    mem.as_mut_slice()[..code.len()].copy_from_slice(&code);
-    vm.add_memory(0x1000, mem).expect("adding memory failed");
+    vm.get_memory()
+        .write_slice_at_addr(&code, load_addr)
+        .expect("Writing code to memory failed.");
 
     let mut vcpu_sregs = vcpu.get_sregs().expect("get sregs failed");
     assert_ne!(vcpu_sregs.cs.base, 0);
@@ -62,5 +65,9 @@ fn test_run() {
     }
 
     assert_eq!(out, "9\n");
-    assert_eq!(vm.get_memory(0x1000).unwrap()[0xf1], 0x13);
+    let result: u8 =
+        vm.get_memory()
+            .read_obj_from_addr(load_addr.checked_add(0xf1).unwrap())
+            .expect("Error reading the result.");
+    assert_eq!(result, 0x13);
 }
diff --git a/sys_util/src/guest_address.rs b/sys_util/src/guest_address.rs
new file mode 100644
index 0000000..09b6578
--- /dev/null
+++ b/sys_util/src/guest_address.rs
@@ -0,0 +1,137 @@
+// Copyright 2017 The Chromium OS Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+//! Represents an address in the guest's memory space.
+
+use std::cmp::{Eq, Ord, Ordering, PartialEq, PartialOrd};
+use std::ops::{BitAnd, BitOr};
+
+/// Represents an Address in the guest's memory.
+#[derive(Clone, Copy, Debug)]
+pub struct GuestAddress(pub usize);
+
+impl GuestAddress {
+    /// Returns the offset from this address to the given base address.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// # use sys_util::GuestAddress;
+    ///   let base = GuestAddress(0x100);
+    ///   let addr = GuestAddress(0x150);
+    ///   assert_eq!(addr.offset_from(base), 0x50usize);
+    /// ```
+    pub fn offset_from(&self, base: GuestAddress) -> usize {
+        self.0 - base.0
+    }
+
+    /// Returns the address as a usize offset from 0x0.
+    /// Use this when a raw number is needed to pass to the kernel.
+    pub fn offset(&self) -> usize {
+        self.0
+    }
+
+    /// Returns the result of the add or None if there is overflow.
+    pub fn checked_add(&self, other: usize) -> Option<GuestAddress> {
+        self.0.checked_add(other).map(GuestAddress)
+    }
+
+    /// Returns the result of the base address + the size.
+    /// Only use this when `offset` is guaranteed not to overflow.
+    pub fn unchecked_add(&self, offset: usize) -> GuestAddress {
+        GuestAddress(self.0 + offset)
+    }
+
+    /// Returns the result of the subtraction of None if there is underflow.
+    pub fn checked_sub(&self, other: usize) -> Option<GuestAddress> {
+        self.0.checked_sub(other).map(GuestAddress)
+    }
+
+    /// Returns the bitwise and of the address with the given mask.
+    pub fn mask(&self, mask: u64) -> GuestAddress {
+        GuestAddress(self.0 & mask as usize)
+    }
+}
+
+impl BitAnd<u64> for GuestAddress {
+    type Output = GuestAddress;
+
+    fn bitand(self, other: u64) -> GuestAddress {
+        GuestAddress(self.0 & other as usize)
+    }
+}
+
+impl BitOr<u64> for GuestAddress {
+    type Output = GuestAddress;
+
+    fn bitor(self, other: u64) -> GuestAddress {
+        GuestAddress(self.0 | other as usize)
+    }
+}
+
+impl PartialEq for GuestAddress {
+    fn eq(&self, other: &GuestAddress) -> bool {
+        self.0 == other.0
+    }
+}
+impl Eq for GuestAddress {}
+
+impl Ord for GuestAddress {
+    fn cmp(&self, other: &GuestAddress) -> Ordering {
+        self.0.cmp(&other.0)
+    }
+}
+
+impl PartialOrd for GuestAddress {
+    fn partial_cmp(&self, other: &GuestAddress) -> Option<Ordering> {
+        Some(self.cmp(other))
+    }
+}
+
+#[cfg(test)]
+mod tests {
+    use super::*;
+
+    #[test]
+    fn equals() {
+        let a = GuestAddress(0x300);
+        let b = GuestAddress(0x300);
+        let c = GuestAddress(0x301);
+        assert_eq!(a, b);
+        assert_eq!(b, a);
+        assert_ne!(a, c);
+        assert_ne!(c, a);
+    }
+
+    #[test]
+    fn cmp() {
+        let a = GuestAddress(0x300);
+        let b = GuestAddress(0x301);
+        assert!(a < b);
+        assert!(b > a);
+        assert!(!(a < a));
+    }
+
+    #[test]
+    fn mask() {
+        let a = GuestAddress(0x5050);
+        assert_eq!(GuestAddress(0x5000), a & 0xff00u64);
+        assert_eq!(GuestAddress(0x5055), a | 0x0005u64);
+    }
+
+    #[test]
+    fn add_sub() {
+        let a = GuestAddress(0x50);
+        let b = GuestAddress(0x60);
+        assert_eq!(Some(GuestAddress(0xb0)), a.checked_add(0x60));
+        assert_eq!(0x10, b.offset_from(a));
+    }
+
+    #[test]
+    fn checked_add_overflow() {
+        let a = GuestAddress(0xffffffffffffff55);
+        assert_eq!(Some(GuestAddress(0xffffffffffffff57)), a.checked_add(2));
+        assert!(a.checked_add(0xf0).is_none());
+    }
+}
diff --git a/sys_util/src/guest_memory.rs b/sys_util/src/guest_memory.rs
new file mode 100644
index 0000000..0dfb709
--- /dev/null
+++ b/sys_util/src/guest_memory.rs
@@ -0,0 +1,318 @@
+// Copyright 2017 The Chromium OS Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+//! Track memory regions that are mapped to the guest VM.
+
+use std::io::{Read, Write};
+use std::result;
+use std::sync::Arc;
+
+use guest_address::GuestAddress;
+use mmap::MemoryMapping;
+
+#[derive(Clone, Debug, PartialEq)]
+pub enum Error {
+    InvalidGuestAddress(GuestAddress),
+    MemoryMappingFailed,
+    MemoryRegionOverlap,
+    NoMemoryRegions,
+    RegionOperationFailed,
+}
+pub type Result<T> = result::Result<T, Error>;
+
+struct MemoryRegion {
+    mapping: MemoryMapping,
+    guest_base: GuestAddress,
+}
+
+fn region_end(region: &MemoryRegion) -> GuestAddress {
+    // unchecked_add is safe as the region bounds were checked when it was created.
+    region.guest_base.unchecked_add(region.mapping.size())
+}
+
+/// Tracks a memory region and where it is mapped in the guest.
+#[derive(Clone)]
+pub struct GuestMemory {
+    regions: Arc<Vec<MemoryRegion>>,
+}
+
+impl GuestMemory {
+    /// Creates a container for guest memory regions.
+    /// Valid memory regions are specified as a Vec of (Address, Size) tuples sorted by Address.
+    pub fn new(ranges: &[(GuestAddress, usize)]) -> Result<GuestMemory> {
+        if ranges.is_empty() {
+            return Err(Error::NoMemoryRegions);
+        }
+
+        let mut regions = Vec::<MemoryRegion>::new();
+        for range in ranges.iter() {
+            if let Some(last) = regions.last() {
+                if last.guest_base
+                       .checked_add(last.mapping.size())
+                       .map_or(true, |a| a > range.0) {
+                    return Err(Error::MemoryRegionOverlap);
+                }
+            }
+
+            let mapping = MemoryMapping::new(range.1).map_err(|_| Error::MemoryMappingFailed)?;
+            regions.push(MemoryRegion {
+                             mapping: mapping,
+                             guest_base: range.0,
+                         });
+        }
+
+        Ok(GuestMemory { regions: Arc::new(regions) })
+    }
+
+    /// Returns the end address of memory.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// # use sys_util::{GuestAddress, GuestMemory, MemoryMapping};
+    /// # fn test_end_addr() -> Result<(), ()> {
+    ///     let start_addr = GuestAddress(0x1000);
+    ///     let mut gm = GuestMemory::new(&vec![(start_addr, 0x400)]).map_err(|_| ())?;
+    ///     assert_eq!(start_addr.checked_add(0x400), Some(gm.end_addr()));
+    ///     Ok(())
+    /// # }
+    /// ```
+    pub fn end_addr(&self) -> GuestAddress {
+        self.regions
+            .iter()
+            .max_by_key(|region| region.guest_base)
+            .map_or(GuestAddress(0), |region| region_end(region))
+    }
+
+    /// Returns true if the given address is within the memory range available to the guest.
+    pub fn address_in_range(&self, addr: GuestAddress) -> bool {
+        addr < self.end_addr()
+    }
+
+    /// Returns the address plus the offset if it is in range.
+    pub fn checked_offset(&self, addr: GuestAddress, offset: usize) -> Option<GuestAddress> {
+        addr.checked_add(offset)
+            .and_then(|a| if a < self.end_addr() { Some(a) } else { None })
+    }
+
+    /// Returns the size of the memory region in bytes.
+    pub fn num_regions(&self) -> usize {
+        self.regions.len()
+    }
+
+    /// Perform the specified action on each region's addresses.
+    pub fn with_regions<F, E>(&self, cb: F) -> result::Result<(), E>
+        where F: Fn(usize, GuestAddress, usize, usize) -> result::Result<(), E>
+    {
+        for (index, region) in self.regions.iter().enumerate() {
+            cb(index,
+               region.guest_base,
+               region.mapping.size(),
+               region.mapping.as_ptr() as usize)?;
+        }
+        Ok(())
+    }
+
+    /// Writes a slice to guest memory at the specified guest address.
+    /// Returns Ok(<number of bytes written>).  The number of bytes written can
+    /// be less than the length of the slice if there isn't enough room in the
+    /// memory region.
+    ///
+    /// # Examples
+    /// * Write a slice at guestaddress 0x200.
+    ///
+    /// ```
+    /// # use sys_util::{GuestAddress, GuestMemory, MemoryMapping};
+    /// # fn test_write_u64() -> Result<(), ()> {
+    /// #   let start_addr = GuestAddress(0x1000);
+    /// #   let mut gm = GuestMemory::new(&vec![(start_addr, 0x400)]).map_err(|_| ())?;
+    ///     let res = gm.write_slice_at_addr(&[1,2,3,4,5], GuestAddress(0x200));
+    ///     assert_eq!(Ok(5), res);
+    ///     Ok(())
+    /// # }
+    /// ```
+    pub fn write_slice_at_addr(&self, buf: &[u8], guest_addr: GuestAddress) -> Result<usize> {
+        self.do_in_region(guest_addr, move |mapping, offset| {
+            mapping
+                .write_slice(buf, offset)
+                .map_err(|_| Error::InvalidGuestAddress(guest_addr))
+        })
+    }
+
+    /// Reads an object from guest memory at the given guest address.
+    /// Reading from a volatile area isn't strictly safe as it could change
+    /// mid-read.  However, as long as the type T is plain old data and can
+    /// handle random initialization, everything will be OK.
+    ///
+    /// # Examples
+    /// * Read a u64 from two areas of guest memory backed by separate mappings.
+    ///
+    /// ```
+    /// # use sys_util::{GuestAddress, GuestMemory, MemoryMapping};
+    /// # fn test_read_u64() -> Result<u64, ()> {
+    /// #     let start_addr1 = GuestAddress(0x0);
+    /// #     let start_addr2 = GuestAddress(0x400);
+    /// #     let mut gm = GuestMemory::new(&vec![(start_addr1, 0x400), (start_addr2, 0x400)])
+    /// #         .map_err(|_| ())?;
+    ///       let num1: u64 = gm.read_obj_from_addr(GuestAddress(32)).map_err(|_| ())?;
+    ///       let num2: u64 = gm.read_obj_from_addr(GuestAddress(0x400+32)).map_err(|_| ())?;
+    /// #     Ok(num1 + num2)
+    /// # }
+    /// ```
+    pub fn read_obj_from_addr<T: Copy>(&self, guest_addr: GuestAddress) -> Result<T> {
+        self.do_in_region(guest_addr, |mapping, offset| {
+            mapping
+                .read_obj(offset)
+                .map_err(|_| Error::InvalidGuestAddress(guest_addr))
+        })
+    }
+
+    /// Writes an object to the memory region at the specified guest address.
+    /// Returns Ok(()) if the object fits, or Err if it extends past the end.
+    ///
+    /// # Examples
+    /// * Write a u64 at guest address 0x1100.
+    ///
+    /// ```
+    /// # use sys_util::{GuestAddress, GuestMemory, MemoryMapping};
+    /// # fn test_write_u64() -> Result<(), ()> {
+    /// #   let start_addr = GuestAddress(0x1000);
+    /// #   let mut gm = GuestMemory::new(&vec![(start_addr, 0x400)]).map_err(|_| ())?;
+    ///     gm.write_obj_at_addr(55u64, GuestAddress(0x1100))
+    ///         .map_err(|_| ())
+    /// # }
+    /// ```
+    pub fn write_obj_at_addr<T>(&self, val: T, guest_addr: GuestAddress) -> Result<()> {
+        self.do_in_region(guest_addr, move |mapping, offset| {
+            mapping
+                .write_obj(val, offset)
+                .map_err(|_| Error::InvalidGuestAddress(guest_addr))
+        })
+    }
+
+    /// Reads data from a readable object like a File and writes it to guest memory.
+    ///
+    /// # Arguments
+    /// * `guest_addr` - Begin writing memory at this offset.
+    /// * `src` - Read from `src` to memory.
+    /// * `count` - Read `count` bytes from `src` to memory.
+    ///
+    /// # Examples
+    ///
+    /// * Read bytes from /dev/urandom
+    ///
+    /// ```
+    /// # use sys_util::{GuestAddress, GuestMemory, MemoryMapping};
+    /// # use std::fs::File;
+    /// # use std::path::Path;
+    /// # fn test_read_random() -> Result<u32, ()> {
+    /// #     let start_addr = GuestAddress(0x1000);
+    /// #     let gm = GuestMemory::new(&vec![(start_addr, 0x400)]).map_err(|_| ())?;
+    ///       let mut file = File::open(Path::new("/dev/urandom")).map_err(|_| ())?;
+    ///       let addr = GuestAddress(0x1010);
+    ///       gm.read_to_memory(addr, &mut file, 128).map_err(|_| ())?;
+    ///       let read_addr = addr.checked_add(8).ok_or(())?;
+    ///       let rand_val: u32 = gm.read_obj_from_addr(read_addr).map_err(|_| ())?;
+    /// #     Ok(rand_val)
+    /// # }
+    /// ```
+    pub fn read_to_memory<F>(&self,
+                             guest_addr: GuestAddress,
+                             src: &mut F,
+                             count: usize)
+                             -> Result<()>
+        where F: Read
+    {
+        self.do_in_region(guest_addr, move |mapping, offset| {
+            mapping
+                .read_to_memory(offset, src, count)
+                .map_err(|_| Error::InvalidGuestAddress(guest_addr))
+        })
+    }
+
+    /// Writes data from memory to a writable object.
+    ///
+    /// # Arguments
+    /// * `guest_addr` - Begin reading memory from this offset.
+    /// * `dst` - Write from memory to `dst`.
+    /// * `count` - Read `count` bytes from memory to `src`.
+    ///
+    /// # Examples
+    ///
+    /// * Write 128 bytes to /dev/null
+    ///
+    /// ```
+    /// # use sys_util::{GuestAddress, GuestMemory, MemoryMapping};
+    /// # use std::fs::File;
+    /// # use std::path::Path;
+    /// # fn test_write_null() -> Result<(), ()> {
+    /// #     let start_addr = GuestAddress(0x1000);
+    /// #     let gm = GuestMemory::new(&vec![(start_addr, 0x400)]).map_err(|_| ())?;
+    ///       let mut file = File::open(Path::new("/dev/null")).map_err(|_| ())?;
+    ///       let addr = GuestAddress(0x1010);
+    ///       gm.write_from_memory(addr, &mut file, 128).map_err(|_| ())?;
+    /// #     Ok(())
+    /// # }
+    /// ```
+    pub fn write_from_memory<F>(&self,
+                                guest_addr: GuestAddress,
+                                dst: &mut F,
+                                count: usize)
+                                -> Result<()>
+        where F: Write
+    {
+        self.do_in_region(guest_addr, move |mapping, offset| {
+            mapping
+                .write_from_memory(offset, dst, count)
+                .map_err(|_| Error::InvalidGuestAddress(guest_addr))
+        })
+    }
+
+    fn do_in_region<F, T>(&self, guest_addr: GuestAddress, cb: F) -> Result<T>
+        where F: FnOnce(&MemoryMapping, usize) -> Result<T>
+    {
+        for region in self.regions.iter() {
+            if guest_addr >= region.guest_base && guest_addr < region_end(region) {
+                return cb(&region.mapping, guest_addr.offset_from(region.guest_base));
+            }
+        }
+        Err(Error::InvalidGuestAddress(guest_addr))
+    }
+}
+
+#[cfg(test)]
+mod tests {
+    use super::*;
+
+    #[test]
+    fn two_regions() {
+        let start_addr1 = GuestAddress(0x0);
+        let start_addr2 = GuestAddress(0x400);
+        assert!(GuestMemory::new(&vec![(start_addr1, 0x400), (start_addr2, 0x400)]).is_ok());
+    }
+
+    #[test]
+    fn overlap_memory() {
+        let start_addr1 = GuestAddress(0x0);
+        let start_addr2 = GuestAddress(0x1000);
+        assert!(GuestMemory::new(&vec![(start_addr1, 0x2000), (start_addr2, 0x2000)]).is_err());
+    }
+
+    #[test]
+    fn test_read_u64() {
+        let start_addr1 = GuestAddress(0x0);
+        let start_addr2 = GuestAddress(0x1000);
+        let gm = GuestMemory::new(&vec![(start_addr1, 0x1000), (start_addr2, 0x1000)]).unwrap();
+
+        let val1: u64 = 0xaa55aa55aa55aa55;
+        let val2: u64 = 0x55aa55aa55aa55aa;
+        gm.write_obj_at_addr(val1, GuestAddress(0x500)).unwrap();
+        gm.write_obj_at_addr(val2, GuestAddress(0x1000 + 32))
+            .unwrap();
+        let num1: u64 = gm.read_obj_from_addr(GuestAddress(0x500)).unwrap();
+        let num2: u64 = gm.read_obj_from_addr(GuestAddress(0x1000 + 32)).unwrap();
+        assert_eq!(val1, num1);
+        assert_eq!(val2, num2);
+    }
+}
diff --git a/sys_util/src/lib.rs b/sys_util/src/lib.rs
index 2c7f682..79a9691 100644
--- a/sys_util/src/lib.rs
+++ b/sys_util/src/lib.rs
@@ -9,10 +9,14 @@ extern crate libc;
 mod mmap;
 mod eventfd;
 mod errno;
+mod guest_address;
+mod guest_memory;
 mod struct_util;
 
 pub use mmap::*;
 pub use eventfd::*;
 pub use errno::{Error, Result};
 use errno::errno_result;
+pub use guest_address::*;
+pub use guest_memory::*;
 pub use struct_util::*;
diff --git a/sys_util/src/mmap.rs b/sys_util/src/mmap.rs
index 22c6753..71b8d56 100644
--- a/sys_util/src/mmap.rs
+++ b/sys_util/src/mmap.rs
@@ -6,26 +6,45 @@
 //! mmap object leaves scope.
 
 use std;
+use std::io::{Read, Write};
 use std::ptr::null_mut;
 use std::os::unix::io::AsRawFd;
-use std::sync::Arc;
-use std::sync::atomic::{AtomicUsize, Ordering};
 
 use libc;
 
-use {Result, errno_result};
+use errno;
+
+#[derive(Debug)]
+pub enum Error {
+    /// Requested memory out of range.
+    InvalidAddress,
+    /// Couldn't read from the given source.
+    ReadFromSource,
+    /// `mmap` returned the given error.
+    SystemCallFailed(errno::Error),
+    /// Wrting to memory failed
+    WriteToMemory(std::io::Error),
+}
+pub type Result<T> = std::result::Result<T, Error>;
 
 /// Wraps an anonymous shared memory mapping in the current process.
 pub struct MemoryMapping {
     addr: *mut u8,
     size: usize,
-    ref_count: Arc<AtomicUsize>,
 }
 
+// Send and Sync aren't automatically inherited for the raw address pointer.
+// Accessing that pointer is only done through the stateless interface which
+// allows the object to be shared by multiple threads without a decrease in
+// safety.
 unsafe impl Send for MemoryMapping {}
+unsafe impl Sync for MemoryMapping {}
 
 impl MemoryMapping {
     /// Creates an anonymous shared mapping of `size` bytes.
+    ///
+    /// # Arguments
+    /// * `size` - Size of memory region in bytes.
     pub fn new(size: usize) -> Result<MemoryMapping> {
         // This is safe because we are creating an anonymous mapping in a place not already used by
         // any other area in this process.
@@ -37,17 +56,20 @@ impl MemoryMapping {
                        -1,
                        0)
         };
-        if addr == null_mut() {
-            return errno_result();
+        if addr.is_null() {
+            return Err(Error::SystemCallFailed(errno::Error::last()));
         }
         Ok(MemoryMapping {
                addr: addr as *mut u8,
                size: size,
-               ref_count: Arc::new(AtomicUsize::new(1)),
            })
     }
 
     /// Maps the first `size` bytes of the given `fd`.
+    ///
+    /// # Arguments
+    /// * `fd` - File descriptor to mmap from.
+    /// * `size` - Size of memory region in bytes.
     pub fn from_fd(fd: &AsRawFd, size: usize) -> Result<MemoryMapping> {
         // This is safe because we are creating a mapping in a place not already used by any other
         // area in this process.
@@ -59,63 +81,217 @@ impl MemoryMapping {
                        fd.as_raw_fd(),
                        0)
         };
-        if addr == null_mut() {
-            return errno_result();
+        if addr.is_null() {
+            return Err(Error::SystemCallFailed(errno::Error::last()));
         }
         Ok(MemoryMapping {
                addr: addr as *mut u8,
                size: size,
-               ref_count: Arc::new(AtomicUsize::new(1)),
            })
     }
 
+    /// Returns a pointer to the begining of the memory region.  Should only be
+    /// used for passing this region to ioctls for setting guest memory.
     pub fn as_ptr(&self) -> *mut u8 {
         self.addr
     }
 
+    /// Returns the size of the memory region in bytes.
     pub fn size(&self) -> usize {
         self.size
     }
 
-    #[deprecated(note="use volatile_read with the ptr instead")]
-    pub fn as_slice(&self) -> &[u8] {
-        // This is safe because we mapped the area at addr ourselves, so this slice will not
-        // overflow. However, it is possible to alias, hence the deprecation.
-        unsafe { std::slice::from_raw_parts(self.addr, self.size) }
+    /// Writes a slice to the memory region at the specified offset.
+    /// Returns Ok(<number of bytes written>).  The number of bytes written can
+    /// be less than the length of the slice if there isn't enough room in the
+    /// memory region.
+    ///
+    /// # Examples
+    /// * Write a slice at offset 256.
+    ///
+    /// ```
+    /// #   use sys_util::MemoryMapping;
+    /// #   let mut mem_map = MemoryMapping::new(1024).unwrap();
+    ///     let res = mem_map.write_slice(&[1,2,3,4,5], 0);
+    ///     assert!(res.is_ok());
+    ///     assert_eq!(res.unwrap(), 5);
+    /// ```
+    pub fn write_slice(&self, buf: &[u8], offset: usize) -> Result<usize> {
+        if offset >= self.size {
+            return Err(Error::InvalidAddress);
+        }
+        unsafe {
+            // Guest memory can't strictly be modeled as a slice because it is
+            // volatile.  Writing to it with what compiles down to a memcpy
+            // won't hurt anything as long as we get the bounds checks right.
+            let mut slice: &mut [u8] = &mut self.as_mut_slice()[offset..];
+            Ok(slice.write(buf).map_err(Error::WriteToMemory)?)
+        }
     }
 
-    #[deprecated(note="use volatile_write with the ptr instead")]
-    pub fn as_mut_slice(&self) -> &mut [u8] {
-        // This is safe because we mapped the area at addr ourselves, so this slice will not
-        // overflow. However, it is possible to alias, hence the deprecation.
-        unsafe { std::slice::from_raw_parts_mut(self.addr, self.size) }
+    /// Writes an object to the memory region at the specified offset.
+    /// Returns Ok(()) if the object fits, or Err if it extends past the end.
+    ///
+    /// # Examples
+    /// * Write a u64 at offset 16.
+    ///
+    /// ```
+    /// #   use sys_util::MemoryMapping;
+    /// #   let mut mem_map = MemoryMapping::new(1024).unwrap();
+    ///     let res = mem_map.write_obj(55u64, 16);
+    ///     assert!(res.is_ok());
+    /// ```
+    pub fn write_obj<T>(&self, val: T, offset: usize) -> Result<()> {
+        unsafe {
+            // Guest memory can't strictly be modeled as a slice because it is
+            // volatile.  Writing to it with what compiles down to a memcpy
+            // won't hurt anything as long as we get the bounds checks right.
+            if offset + std::mem::size_of::<T>() > self.size {
+                return Err(Error::InvalidAddress);
+            }
+            std::ptr::write_volatile(&mut self.as_mut_slice()[offset..] as *mut _ as *mut T, val);
+            Ok(())
+        }
+    }
+
+    /// Reads on object from the memory region at the given offset.
+    /// Reading from a volatile area isn't strictly safe as it could change
+    /// mid-read.  However, as long as the type T is plain old data and can
+    /// handle random initialization, everything will be OK.
+    ///
+    /// # Examples
+    /// * Read a u64 written to offset 32.
+    ///
+    /// ```
+    /// #   use sys_util::MemoryMapping;
+    /// #   let mut mem_map = MemoryMapping::new(1024).unwrap();
+    ///     let res = mem_map.write_obj(55u64, 32);
+    ///     assert!(res.is_ok());
+    ///     let num: u64 = mem_map.read_obj(32).unwrap();
+    ///     assert_eq!(55, num);
+    /// ```
+    pub fn read_obj<T: Copy>(&self, offset: usize) -> Result<T> {
+        if offset + std::mem::size_of::<T>() > self.size {
+            return Err(Error::InvalidAddress);
+        }
+        unsafe {
+            // This is safe because by definition Copy types can have their bits
+            // set arbitrarily and still be valid.
+            Ok(std::ptr::read_volatile(&self.as_slice()[offset..] as *const _ as *const T))
+        }
+    }
+
+    /// Reads data from a readable object like a File and writes it to guest memory.
+    ///
+    /// # Arguments
+    /// * `mem_offset` - Begin writing memory at this offset.
+    /// * `src` - Read from `src` to memory.
+    /// * `count` - Read `count` bytes from `src` to memory.
+    ///
+    /// # Examples
+    ///
+    /// * Read bytes from /dev/urandom
+    ///
+    /// ```
+    /// # use sys_util::MemoryMapping;
+    /// # use std::fs::File;
+    /// # use std::path::Path;
+    /// # fn test_read_random() -> Result<u32, ()> {
+    /// #     let mut mem_map = MemoryMapping::new(1024).unwrap();
+    ///       let mut file = File::open(Path::new("/dev/urandom")).map_err(|_| ())?;
+    ///       mem_map.read_to_memory(32, &mut file, 128).map_err(|_| ())?;
+    ///       let rand_val: u32 =  mem_map.read_obj(40).map_err(|_| ())?;
+    /// #     Ok(rand_val)
+    /// # }
+    /// ```
+    pub fn read_to_memory<F>(&self, mem_offset: usize, src: &mut F, count: usize) -> Result<()>
+        where F: Read
+    {
+        let mem_end = mem_offset + count;
+        if mem_end > self.size() {
+            return Err(Error::InvalidAddress);
+        }
+        unsafe {
+            // It is safe to overwrite the volatile memory.  Acessing the guest
+            // memory as a mutable slice is OK because nothing assumes another
+            // thread won't change what is loaded.
+            let mut dst = &mut self.as_mut_slice()[mem_offset..mem_end];
+            if src.read_exact(dst).is_err() {
+                return Err(Error::ReadFromSource);
+            }
+        }
+        Ok(())
     }
 
-    // TODO(zachr): remove when we no longer need it, clone is sketchy
-    pub fn clone(&self) -> MemoryMapping {
-        self.ref_count.fetch_add(1, Ordering::SeqCst);
-        MemoryMapping {
-            addr: self.addr,
-            size: self.size,
-            ref_count: self.ref_count.clone(),
+    /// Writes data from memory to a writable object.
+    ///
+    /// # Arguments
+    /// * `mem_offset` - Begin reading memory from this offset.
+    /// * `dst` - Write from memory to `dst`.
+    /// * `count` - Read `count` bytes from memory to `src`.
+    ///
+    /// # Examples
+    ///
+    /// * Write 128 bytes to /dev/null
+    ///
+    /// ```
+    /// # use sys_util::MemoryMapping;
+    /// # use std::fs::File;
+    /// # use std::path::Path;
+    /// # fn test_write_null() -> Result<(), ()> {
+    /// #     let mut mem_map = MemoryMapping::new(1024).unwrap();
+    ///       let mut file = File::open(Path::new("/dev/null")).map_err(|_| ())?;
+    ///       mem_map.write_from_memory(32, &mut file, 128).map_err(|_| ())?;
+    /// #     Ok(())
+    /// # }
+    /// ```
+    pub fn write_from_memory<F>(&self, mem_offset: usize, dst: &mut F, count: usize) -> Result<()>
+        where F: Write
+    {
+        let mem_end = match mem_offset.checked_add(count) {
+            None => return Err(Error::InvalidAddress),
+            Some(m) => m,
+        };
+        if mem_end > self.size() {
+            return Err(Error::InvalidAddress);
+        }
+        unsafe {
+            // It is safe to read from volatile memory.  Acessing the guest
+            // memory as a slice is OK because nothing assumes another thread
+            // won't change what is loaded.
+            let src = &self.as_mut_slice()[mem_offset..mem_end];
+            if dst.write_all(src).is_err() {
+                return Err(Error::ReadFromSource);
+            }
         }
+        Ok(())
+    }
+
+    unsafe fn as_slice(&self) -> &[u8] {
+        // This is safe because we mapped the area at addr ourselves, so this slice will not
+        // overflow. However, it is possible to alias.
+        std::slice::from_raw_parts(self.addr, self.size)
+    }
+
+    unsafe fn as_mut_slice(&self) -> &mut [u8] {
+        // This is safe because we mapped the area at addr ourselves, so this slice will not
+        // overflow. However, it is possible to alias.
+        std::slice::from_raw_parts_mut(self.addr, self.size)
     }
 }
 
 impl Drop for MemoryMapping {
     fn drop(&mut self) {
-        if self.ref_count.fetch_sub(1, Ordering::SeqCst) == 1 {
-            // This is safe because we mmap the area at addr ourselves, and the ref_count ensures
-            // nobody else is holding a reference to it.
-            unsafe {
-                libc::munmap(self.addr as *mut libc::c_void, self.size);
-            }
+        // This is safe because we mmap the area at addr ourselves, and nobody
+        // else is holding a reference to it.
+        unsafe {
+            libc::munmap(self.addr as *mut libc::c_void, self.size);
         }
     }
 }
 
 #[cfg(test)]
-mod test {
+mod tests {
     use super::*;
 
     #[test]
@@ -125,12 +301,10 @@ mod test {
     }
 
     #[test]
-    fn mutate_slices() {
-        let m = MemoryMapping::new(1024).unwrap();
-        assert_eq!(1024, m.size());
-        {
-            m.as_mut_slice()[128] = 55;
-        }
-        assert_eq!(m.as_slice()[128], 55);
+    fn test_write_past_end() {
+        let m = MemoryMapping::new(5).unwrap();
+        let res = m.write_slice(&[1, 2, 3, 4, 5, 6], 0);
+        assert!(res.is_ok());
+        assert_eq!(res.unwrap(), 5);
     }
 }
diff --git a/x86_64/src/lib.rs b/x86_64/src/lib.rs
index 91945bf..c09e229 100644
--- a/x86_64/src/lib.rs
+++ b/x86_64/src/lib.rs
@@ -23,25 +23,30 @@ mod gdt;
 mod interrupts;
 mod regs;
 
-use std::io::Write;
 use std::mem;
 use std::result;
 
 use bootparam::boot_params;
 use bootparam::E820_RAM;
+use sys_util::{GuestAddress, GuestMemory};
+
+pub use regs::Error as RegError;
+pub use interrupts::Error as IntError;
 
 #[derive(Debug)]
 pub enum Error {
     /// Error configuring the VCPU.
     CpuSetup(cpuid::Error),
+    /// The kernel extends past the end of RAM
+    KernelOffsetPastEnd,
     /// Error configuring the VCPU registers.
-    RegisterConfiguration(regs::Error),
+    RegisterConfiguration(RegError),
     /// Error configuring the VCPU floating point registers.
-    FpuRegisterConfiguration(regs::Error),
+    FpuRegisterConfiguration(RegError),
     /// Error configuring the VCPU segment registers.
-    SegmentRegisterConfiguration(regs::Error),
+    SegmentRegisterConfiguration(RegError),
     /// Error configuring the VCPU local interrupt.
-    LocalIntConfiguration(interrupts::Error),
+    LocalIntConfiguration(IntError),
     /// Error writing the zero page of guest memory.
     ZeroPageSetup,
     /// The zero page extends past the end of guest_mem.
@@ -51,9 +56,33 @@ pub enum Error {
 }
 pub type Result<T> = result::Result<T, Error>;
 
-const ZERO_PAGE_OFFSET: usize = 0x7000;
 const BOOT_STACK_POINTER: usize = 0x8000;
+const MEM_32BIT_GAP_SIZE: usize = (768 << 20);
+const FIRST_ADDR_PAST_32BITS: usize = (1 << 32);
 const KERNEL_64BIT_ENTRY_OFFSET: usize = 0x200;
+const ZERO_PAGE_OFFSET: usize = 0x7000;
+
+/// Returns a Vec of the valid memory addresses.
+/// These should be used to configure the GuestMemory structure for the platfrom.
+/// For x86_64 all addresses are valid from the start of the kenel except a
+/// carve out at the end of 32bit address space.
+pub fn arch_memory_regions(size: usize) -> Vec<(GuestAddress, usize)> {
+    let mem_end = GuestAddress(size);
+    let first_addr_past_32bits = GuestAddress(FIRST_ADDR_PAST_32BITS);
+    let end_32bit_gap_start = GuestAddress(FIRST_ADDR_PAST_32BITS - MEM_32BIT_GAP_SIZE);
+
+    let mut regions = Vec::new();
+    if mem_end < end_32bit_gap_start {
+        regions.push((GuestAddress(0), size));
+    } else {
+        regions.push((GuestAddress(0), end_32bit_gap_start.offset()));
+        if mem_end > first_addr_past_32bits {
+            regions.push((first_addr_past_32bits, mem_end.offset_from(first_addr_past_32bits)));
+        }
+    }
+
+    regions
+}
 
 /// Configures the vcpu and should be called once per vcpu from the vcpu's thread.
 ///
@@ -64,15 +93,20 @@ const KERNEL_64BIT_ENTRY_OFFSET: usize = 0x200;
 /// * `kvm` - The /dev/kvm object that created vcpu.
 /// * `vcpu` - The VCPU object to configure.
 /// * `num_cpus` - The number of vcpus that will be given to the guest.
-pub fn configure_vcpu(guest_mem: &mut [u8],
-                      kernel_load_offset: usize,
+pub fn configure_vcpu(guest_mem: &GuestMemory,
+                      kernel_load_addr: GuestAddress,
                       kvm: &kvm::Kvm,
                       vcpu: &kvm::Vcpu,
                       num_cpus: usize)
                       -> Result<()> {
     cpuid::setup_cpuid(&kvm, &vcpu, 0, num_cpus as u64).map_err(|e| Error::CpuSetup(e))?;
     regs::setup_msrs(&vcpu).map_err(|e| Error::RegisterConfiguration(e))?;
-    regs::setup_regs(&vcpu, (kernel_load_offset + KERNEL_64BIT_ENTRY_OFFSET) as u64, BOOT_STACK_POINTER as u64, ZERO_PAGE_OFFSET as u64).map_err(|e| Error::RegisterConfiguration(e))?;
+    let kernel_end = guest_mem.checked_offset(kernel_load_addr, KERNEL_64BIT_ENTRY_OFFSET)
+        .ok_or(Error::KernelOffsetPastEnd)?;
+    regs::setup_regs(&vcpu,
+                     (kernel_end).offset() as u64,
+                     BOOT_STACK_POINTER as u64,
+                     ZERO_PAGE_OFFSET as u64).map_err(|e| Error::RegisterConfiguration(e))?;
     regs::setup_fpu(&vcpu).map_err(|e| Error::FpuRegisterConfiguration(e))?;
     regs::setup_sregs(guest_mem, &vcpu).map_err(|e| Error::SegmentRegisterConfiguration(e))?;
     interrupts::set_lint(&vcpu).map_err(|e| Error::LocalIntConfiguration(e))?;
@@ -84,12 +118,12 @@ pub fn configure_vcpu(guest_mem: &mut [u8],
 /// # Arguments
 ///
 /// * `guest_mem` - The memory to be used by the guest.
-/// * `kernel_offset` - Offset into `guest_mem` where the kernel was loaded.
-/// * `cmdline_offset` - Offset into `guest_mem` where the kernel command line was loaded.
+/// * `kernel_addr` - Address in `guest_mem` where the kernel was loaded.
+/// * `cmdline_addr` - Address in `guest_mem` where the kernel command line was loaded.
 /// * `cmdline_size` - Size of the kernel command line in bytes including the null terminator.
-pub fn configure_system(guest_mem: &mut [u8],
-                        kernel_offset: usize,
-                        cmdline_offset: usize,
+pub fn configure_system(guest_mem: &GuestMemory,
+                        kernel_addr: GuestAddress,
+                        cmdline_addr: GuestAddress,
                         cmdline_size: usize)
                         -> Result<()> {
     const EBDA_START: u64 = 0x0009fc00;
@@ -97,55 +131,44 @@ pub fn configure_system(guest_mem: &mut [u8],
     const KERNEL_HDR_MAGIC: u32 = 0x53726448;
     const KERNEL_LOADER_OTHER: u8 = 0xff;
     const KERNEL_MIN_ALIGNMENT_BYTES: u32 = 0x1000000; // Must be non-zero.
-    const KVM_32BIT_MAX_MEM_SIZE: u64 = (1 << 32);
-    const KVM_32BIT_GAP_SIZE: u64 = (768 << 20);
-    const KVM_32BIT_GAP_START: u64 = (KVM_32BIT_MAX_MEM_SIZE - KVM_32BIT_GAP_SIZE);
+    let first_addr_past_32bits = GuestAddress(FIRST_ADDR_PAST_32BITS);
+    let end_32bit_gap_start = GuestAddress(FIRST_ADDR_PAST_32BITS - MEM_32BIT_GAP_SIZE);
 
     let mut params: boot_params = Default::default();
 
     params.hdr.type_of_loader = KERNEL_LOADER_OTHER;
     params.hdr.boot_flag = KERNEL_BOOT_FLAG_MAGIC;
     params.hdr.header = KERNEL_HDR_MAGIC;
-    params.hdr.cmd_line_ptr = cmdline_offset as u32;
+    params.hdr.cmd_line_ptr = cmdline_addr.offset() as u32;
     params.hdr.cmdline_size = cmdline_size as u32;
     params.hdr.kernel_alignment = KERNEL_MIN_ALIGNMENT_BYTES;
 
     add_e820_entry(&mut params, 0, EBDA_START, E820_RAM)?;
 
-    let mem_size = guest_mem.len() as u64;
-    if mem_size < KVM_32BIT_GAP_START {
+    let mem_end = guest_mem.end_addr();
+    if mem_end < end_32bit_gap_start {
         add_e820_entry(&mut params,
-                       kernel_offset as u64,
-                       mem_size - kernel_offset as u64,
+                       kernel_addr.offset() as u64,
+                       mem_end.offset_from(kernel_addr) as u64,
                        E820_RAM)?;
     } else {
         add_e820_entry(&mut params,
-                       kernel_offset as u64,
-                       KVM_32BIT_GAP_START - kernel_offset as u64,
+                       kernel_addr.offset() as u64,
+                       end_32bit_gap_start.offset_from(kernel_addr) as u64,
                        E820_RAM)?;
-        if mem_size > KVM_32BIT_MAX_MEM_SIZE {
+        if mem_end > first_addr_past_32bits {
             add_e820_entry(&mut params,
-                           KVM_32BIT_MAX_MEM_SIZE,
-                           mem_size - KVM_32BIT_MAX_MEM_SIZE,
+                           first_addr_past_32bits.offset() as u64,
+                           mem_end.offset_from(first_addr_past_32bits) as u64,
                            E820_RAM)?;
         }
     }
 
-    let zero_page_end = ZERO_PAGE_OFFSET + mem::size_of::<boot_params>();
-    if zero_page_end as usize > guest_mem.len() {
-        return Err(Error::ZeroPagePastRamEnd);
-    }
-    let mut zero_page_slice = &mut guest_mem[ZERO_PAGE_OFFSET..zero_page_end as usize];
-    unsafe {
-        // Dereferencing the pointer to params is safe here because it is valid, it can't be
-        // destroyed after it is created at the top of this function,  and we drop it as soon as the
-        // data is written.
-        let ptr = &params as *const boot_params as *const u8;
-        let bp_slice: &[u8] = std::slice::from_raw_parts(ptr, mem::size_of::<boot_params>());
-        zero_page_slice.write_all(bp_slice)
-            .map_err(|_| Error::ZeroPageSetup)?;
-    }
-
+    let zero_page_addr = GuestAddress(ZERO_PAGE_OFFSET);
+    guest_mem.checked_offset(zero_page_addr, mem::size_of::<boot_params>())
+        .ok_or(Error::ZeroPagePastRamEnd)?;
+    guest_mem.write_obj_at_addr(params, zero_page_addr)
+        .map_err(|_| Error::ZeroPageSetup)?;
 
     Ok(())
 }
@@ -164,3 +187,24 @@ fn add_e820_entry(params: &mut boot_params, addr: u64, size: u64, mem_type: u32)
 
     Ok(())
 }
+
+#[cfg(test)]
+mod tests {
+    use super::*;
+
+    #[test]
+    fn regions_lt_4gb() {
+        let regions = arch_memory_regions(1usize << 29);
+        assert_eq!(1, regions.len());
+        assert_eq!(GuestAddress(0), regions[0].0);
+        assert_eq!(1usize << 29, regions[0].1);
+    }
+
+    #[test]
+    fn regions_gt_4gb() {
+        let regions = arch_memory_regions((1usize << 32) + 0x8000);
+        assert_eq!(2, regions.len());
+        assert_eq!(GuestAddress(0), regions[0].0);
+        assert_eq!(GuestAddress(1usize << 32), regions[1].0);
+    }
+}
diff --git a/x86_64/src/regs.rs b/x86_64/src/regs.rs
index 268768e..ebe295c 100644
--- a/x86_64/src/regs.rs
+++ b/x86_64/src/regs.rs
@@ -2,11 +2,7 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-use std::io::Cursor;
-use std::mem;
-use std::result;
-
-use byteorder::{LittleEndian, WriteBytesExt};
+use std::{mem, result};
 
 use kvm;
 use kvm_sys::kvm_fpu;
@@ -16,13 +12,26 @@ use kvm_sys::kvm_regs;
 use kvm_sys::kvm_sregs;
 use gdt;
 use sys_util;
+use sys_util::{GuestAddress, GuestMemory};
 
 #[derive(Debug)]
 pub enum Error {
+    /// Setting up msrs failed.
     MsrIoctlFailed(sys_util::Error),
+    /// Failed to configure the FPU.
     FpuIoctlFailed(sys_util::Error),
+    /// Failed to set base registers for this cpu.
     SettingRegistersIoctl(sys_util::Error),
+    /// Failed to set sregs for this cpu.
     SRegsIoctlFailed(sys_util::Error),
+    /// Writing the GDT to RAM failed.
+    WriteGDTFailure,
+    /// Writing the IDT to RAM failed.
+    WriteIDTFailure,
+    /// Writing PML4 to RAM failed.
+    WritePML4Address,
+    /// Writing PDPTE to RAM failed.
+    WritePDPTEAddress,
 }
 pub type Result<T> = result::Result<T, Error>;
 
@@ -168,21 +177,25 @@ const BOOT_IDT_OFFSET: usize = 0x520;
 
 const BOOT_GDT_MAX: usize = 4;
 
-fn write_gdt_table(table: &[u64; BOOT_GDT_MAX], out: &mut [u8]) {
-    let mut writer = Cursor::new(&mut out[BOOT_GDT_OFFSET..
-                                      (BOOT_GDT_OFFSET + mem::size_of_val(table))]);
-    for entry in table.iter() {
-        writer.write_u64::<LittleEndian>(*entry).unwrap(); // Can't fail if the above slice worked.
+fn write_gdt_table(table: &[u64], guest_mem: &GuestMemory) -> Result<()> {
+    let boot_gdt_addr = GuestAddress(BOOT_GDT_OFFSET);
+    for (index, entry) in table.iter().enumerate() {
+        let addr = guest_mem.checked_offset(boot_gdt_addr, index * mem::size_of::<u64>())
+            .ok_or(Error::WriteGDTFailure)?;
+        guest_mem.write_obj_at_addr(*entry, addr)
+            .map_err(|_| Error::WriteGDTFailure)?;
     }
+    Ok(())
 }
 
-fn write_idt_value(val: u64, out: &mut [u8]) {
-    let mut writer = Cursor::new(&mut out[BOOT_IDT_OFFSET..
-                                      (BOOT_IDT_OFFSET + mem::size_of::<u64>())]);
-    writer.write_u64::<LittleEndian>(val).unwrap(); // Can't fail if the above slice worked.
+fn write_idt_value(val: u64, guest_mem: &GuestMemory) -> Result<()> {
+    let boot_idt_addr = GuestAddress(BOOT_IDT_OFFSET);
+    guest_mem
+        .write_obj_at_addr(val, boot_idt_addr)
+        .map_err(|_| Error::WriteIDTFailure)
 }
 
-fn configure_segments_and_sregs(mem: &mut [u8], sregs: &mut kvm_sregs) {
+fn configure_segments_and_sregs(mem: &GuestMemory, sregs: &mut kvm_sregs) -> Result<()> {
     let gdt_table: [u64; BOOT_GDT_MAX as usize] = [
         gdt::gdt_entry(0, 0, 0), // NULL
         gdt::gdt_entry(0xa09b, 0, 0xfffff), // CODE
@@ -195,11 +208,11 @@ fn configure_segments_and_sregs(mem: &mut [u8], sregs: &mut kvm_sregs) {
     let tss_seg = gdt::kvm_segment_from_gdt(gdt_table[3], 3);
 
     // Write segments
-    write_gdt_table(&gdt_table, mem);
+    write_gdt_table(&gdt_table[..], mem)?;
     sregs.gdt.base = BOOT_GDT_OFFSET as u64;
     sregs.gdt.limit = mem::size_of_val(&gdt_table) as u16 - 1;
 
-    write_idt_value(0, mem);
+    write_idt_value(0, mem)?;
     sregs.idt.base = BOOT_IDT_OFFSET as u64;
     sregs.idt.limit = mem::size_of::<u64>() as u16 - 1;
 
@@ -214,50 +227,37 @@ fn configure_segments_and_sregs(mem: &mut [u8], sregs: &mut kvm_sregs) {
     /* 64-bit protected mode */
     sregs.cr0 |= X86_CR0_PE;
     sregs.efer |= EFER_LME;
+
+    Ok(())
 }
 
-fn setup_page_tables(mem: &mut [u8], sregs: &mut kvm_sregs) {
+fn setup_page_tables(mem: &GuestMemory, sregs: &mut kvm_sregs) -> Result<()> {
     // Puts PML4 right after zero page but aligned to 4k.
-    const BOOT_PML4_OFFSET: usize = 0x9000;
-    const BOOT_PDPTE_OFFSET: usize = 0xa000;
-    const TABLE_LEN: usize = 4096;
-
-    {
-        let out_slice = &mut mem[BOOT_PML4_OFFSET..(BOOT_PML4_OFFSET + TABLE_LEN)];
-        for v in out_slice.iter_mut() {
-            *v = 0;
-        }
-        let mut writer = Cursor::new(out_slice);
-        // write_u64 Can't fail if the above slice works.
-        writer
-            .write_u64::<LittleEndian>(BOOT_PDPTE_OFFSET as u64 | 3)
-            .unwrap();
-    }
-    {
-        let out_slice = &mut mem[BOOT_PDPTE_OFFSET..(BOOT_PDPTE_OFFSET + TABLE_LEN)];
-        for v in out_slice.iter_mut() {
-            *v = 0;
-        }
-        let mut writer = Cursor::new(out_slice);
-        writer.write_u64::<LittleEndian>(0x83).unwrap(); // Can't fail if the slice works.
-    }
-    sregs.cr3 = BOOT_PML4_OFFSET as u64;
+    let boot_pml4_addr = GuestAddress(0x9000);
+    let boot_pdpte_addr = GuestAddress(0xa000);
+
+    mem.write_obj_at_addr(boot_pdpte_addr.offset() as u64 | 0x03, boot_pml4_addr)
+        .map_err(|_| Error::WritePML4Address)?;
+    mem.write_obj_at_addr(0x83u64, boot_pdpte_addr)
+        .map_err(|_| Error::WritePDPTEAddress)?;
+    sregs.cr3 = boot_pml4_addr.offset() as u64;
     sregs.cr4 |= X86_CR4_PAE;
     sregs.cr0 |= X86_CR0_PG;
+    Ok(())
 }
 
 /// Configures the segment registers and system page tables for a given CPU.
 ///
 /// # Arguments
 ///
-/// * `guest_mem` - The memory that will be passed to the guest.
+/// * `mem` - The memory that will be passed to the guest.
 /// * `vcpu_fd` - The FD returned from the KVM_CREATE_VCPU ioctl.
-pub fn setup_sregs(mem: &mut [u8], vcpu: &kvm::Vcpu) -> Result<()> {
+pub fn setup_sregs(mem: &GuestMemory, vcpu: &kvm::Vcpu) -> Result<()> {
     let mut sregs: kvm_sregs = vcpu.get_sregs()
         .map_err(|e| Error::SRegsIoctlFailed(e))?;
 
-    configure_segments_and_sregs(mem, &mut sregs);
-    setup_page_tables(mem, &mut sregs); // TODO(dgreid) - Can this be done once per system instead?
+    configure_segments_and_sregs(mem, &mut sregs)?;
+    setup_page_tables(mem, &mut sregs)?; // TODO(dgreid) - Can this be done once per system instead?
 
     vcpu.set_sregs(&sregs)
         .map_err(|e| Error::SRegsIoctlFailed(e))?;
@@ -267,25 +267,30 @@ pub fn setup_sregs(mem: &mut [u8], vcpu: &kvm::Vcpu) -> Result<()> {
 
 #[cfg(test)]
 mod tests {
-    use byteorder::{LittleEndian, ReadBytesExt};
-    use std::io::Cursor;
     use super::*;
+    use sys_util::{GuestAddress, GuestMemory};
+
+    fn create_guest_mem() -> GuestMemory {
+        GuestMemory::new(&vec![(GuestAddress(0), 0x10000)]).unwrap()
+    }
+
+    fn read_u64(gm: &GuestMemory, offset: usize) -> u64 {
+        let read_addr = GuestAddress(offset);
+        gm.read_obj_from_addr(read_addr).unwrap()
+    }
 
     #[test]
     fn segments_and_sregs() {
         let mut sregs: kvm_sregs = Default::default();
-        let mut mem_vec: Vec<u8> = Vec::with_capacity(0x10000);
-        unsafe {
-            mem_vec.set_len(0x10000);
-        }
-        configure_segments_and_sregs(mem_vec.as_mut_slice(), &mut sregs);
-        let mut reader = Cursor::new(&mem_vec.as_slice()[BOOT_GDT_OFFSET..]);
-        assert_eq!(0, reader.read_u64::<LittleEndian>().unwrap());
-        assert_eq!(0xaf9b000000ffff, reader.read_u64::<LittleEndian>().unwrap());
-        assert_eq!(0xcf93000000ffff, reader.read_u64::<LittleEndian>().unwrap());
-        assert_eq!(0x8f8b000000ffff, reader.read_u64::<LittleEndian>().unwrap());
-        let mut reader = Cursor::new(&mem_vec.as_slice()[BOOT_IDT_OFFSET..]);
-        assert_eq!(0, reader.read_u64::<LittleEndian>().unwrap());
+        let gm = create_guest_mem();
+        configure_segments_and_sregs(&gm, &mut sregs).unwrap();
+
+        assert_eq!(0x0, read_u64(&gm, BOOT_GDT_OFFSET));
+        assert_eq!(0xaf9b000000ffff, read_u64(&gm, BOOT_GDT_OFFSET + 8));
+        assert_eq!(0xcf93000000ffff, read_u64(&gm, BOOT_GDT_OFFSET + 16));
+        assert_eq!(0x8f8b000000ffff, read_u64(&gm, BOOT_GDT_OFFSET + 24));
+        assert_eq!(0x0, read_u64(&gm, BOOT_IDT_OFFSET));
+
         assert_eq!(0, sregs.cs.base);
         assert_eq!(0xfffff, sregs.ds.limit);
         assert_eq!(0x10, sregs.es.selector);
@@ -302,15 +307,12 @@ mod tests {
     #[test]
     fn page_tables() {
         let mut sregs: kvm_sregs = Default::default();
-        let mut mem_vec: Vec<u8> = Vec::with_capacity(0x10000);
-        unsafe {
-            mem_vec.set_len(0x10000);
-        }
-        setup_page_tables(mem_vec.as_mut_slice(), &mut sregs);
-        let mut reader = Cursor::new(&mem_vec.as_slice()[0x9000..]);
-        assert_eq!(0xa003, reader.read_u64::<LittleEndian>().unwrap());
-        let mut reader = Cursor::new(&mem_vec.as_slice()[0xa000..]);
-        assert_eq!(0x83, reader.read_u64::<LittleEndian>().unwrap());
+        let gm = create_guest_mem();
+        setup_page_tables(&gm, &mut sregs).unwrap();
+
+        assert_eq!(0xa003, read_u64(&gm, 0x9000));
+        assert_eq!(0x83, read_u64(&gm, 0xa000));
+
         assert_eq!(0x9000, sregs.cr3);
         assert_eq!(X86_CR4_PAE, sregs.cr4);
         assert_eq!(X86_CR0_PG, sregs.cr0);