summary refs log tree commit diff
diff options
context:
space:
mode:
authorSonny Rao <sonnyrao@chromium.org>2018-01-31 17:49:07 -0800
committerchrome-bot <chrome-bot@chromium.org>2018-02-01 18:08:55 -0800
commit29cd40a1d61c0d558768e0d32f07f7b7ad84ca63 (patch)
treeb384a0e5d29db74de61ee5c9c17355af332bf30a
parentad2391528f93e696807060f4fa6fb43faebced92 (diff)
downloadcrosvm-29cd40a1d61c0d558768e0d32f07f7b7ad84ca63.tar
crosvm-29cd40a1d61c0d558768e0d32f07f7b7ad84ca63.tar.gz
crosvm-29cd40a1d61c0d558768e0d32f07f7b7ad84ca63.tar.bz2
crosvm-29cd40a1d61c0d558768e0d32f07f7b7ad84ca63.tar.lz
crosvm-29cd40a1d61c0d558768e0d32f07f7b7ad84ca63.tar.xz
crosvm-29cd40a1d61c0d558768e0d32f07f7b7ad84ca63.tar.zst
crosvm-29cd40a1d61c0d558768e0d32f07f7b7ad84ca63.zip
crosvm: change GuestAddress to always be a u64
We want to be able to run 64-bit ARM kernels using a 32-bit version of
crosvm, to make it more consistent use a u64 to represent
GuestAddress.

BUG=chromium:797868
TEST=./build_test passes on all architectures
TEST=crosvm runs on caroline

Change-Id: I43bf993592caf46891e3e5e05258ab70b6bf3045
Signed-off-by: Sonny Rao <sonnyrao@chromium.org>
Reviewed-on: https://chromium-review.googlesource.com/896398
Reviewed-by: Dylan Reid <dgreid@chromium.org>
-rw-r--r--data_model/src/volatile_memory.rs60
-rw-r--r--devices/src/virtio/balloon.rs4
-rw-r--r--devices/src/virtio/block.rs4
-rw-r--r--devices/src/virtio/queue.rs16
-rw-r--r--devices/src/virtio/wl.rs35
-rw-r--r--kernel_loader/src/lib.rs6
-rw-r--r--kvm/src/lib.rs2
-rw-r--r--kvm/tests/dirty_log.rs2
-rw-r--r--src/linux.rs12
-rw-r--r--sys_util/src/guest_address.rs22
-rw-r--r--sys_util/src/guest_memory.rs22
-rw-r--r--sys_util/src/mmap.rs10
-rw-r--r--sys_util/src/sock_ctrl_msg.rs2
-rw-r--r--vhost/src/lib.rs8
-rw-r--r--vm_control/src/lib.rs2
-rw-r--r--x86_64/src/lib.rs22
-rw-r--r--x86_64/src/mptable.rs44
-rw-r--r--x86_64/src/regs.rs8
18 files changed, 141 insertions, 140 deletions
diff --git a/data_model/src/volatile_memory.rs b/data_model/src/volatile_memory.rs
index 5c5881a..2a52d57 100644
--- a/data_model/src/volatile_memory.rs
+++ b/data_model/src/volatile_memory.rs
@@ -33,9 +33,9 @@ use DataInit;
 #[derive(Eq, PartialEq, Debug)]
 pub enum VolatileMemoryError {
     /// `addr` is out of bounds of the volatile memory slice.
-    OutOfBounds { addr: usize },
-    /// Taking a slice at `base` with `offset` would overflow `usize`.
-    Overflow { base: usize, offset: usize },
+    OutOfBounds { addr: u64 },
+    /// Taking a slice at `base` with `offset` would overflow `u64`.
+    Overflow { base: u64, offset: u64 },
 }
 
 impl fmt::Display for VolatileMemoryError {
@@ -61,13 +61,13 @@ type Result<T> = VolatileMemoryResult<T>;
 
 /// Convenience function for computing `base + offset` which returns
 /// `Err(VolatileMemoryError::Overflow)` instead of panicking in the case `base + offset` exceeds
-/// `usize::MAX`.
+/// `u64::MAX`.
 ///
 /// # Examples
 ///
 /// ```
 /// # use data_model::*;
-/// # fn get_slice(offset: usize, count: usize) -> VolatileMemoryResult<()> {
+/// # fn get_slice(offset: u64, count: u64) -> VolatileMemoryResult<()> {
 ///   let mem_end = calc_offset(offset, count)?;
 ///   if mem_end > 100 {
 ///       return Err(VolatileMemoryError::OutOfBounds{addr: mem_end});
@@ -75,7 +75,7 @@ type Result<T> = VolatileMemoryResult<T>;
 /// # Ok(())
 /// # }
 /// ```
-pub fn calc_offset(base: usize, offset: usize) -> Result<usize> {
+pub fn calc_offset(base: u64, offset: u64) -> Result<u64> {
     match base.checked_add(offset) {
         None => {
             Err(Error::Overflow {
@@ -91,11 +91,11 @@ pub fn calc_offset(base: usize, offset: usize) -> Result<usize> {
 pub trait VolatileMemory {
     /// Gets a slice of memory at `offset` that is `count` bytes in length and supports volatile
     /// access.
-    fn get_slice(&self, offset: usize, count: usize) -> Result<VolatileSlice>;
+    fn get_slice(&self, offset: u64, count: u64) -> Result<VolatileSlice>;
 
     /// Gets a `VolatileRef` at `offset`.
-    fn get_ref<T: DataInit>(&self, offset: usize) -> Result<VolatileRef<T>> {
-        let slice = self.get_slice(offset, size_of::<T>())?;
+    fn get_ref<T: DataInit>(&self, offset: u64) -> Result<VolatileRef<T>> {
+        let slice = self.get_slice(offset, size_of::<T>() as u64)?;
         Ok(VolatileRef {
                addr: slice.addr as *mut T,
                phantom: PhantomData,
@@ -104,12 +104,12 @@ pub trait VolatileMemory {
 }
 
 impl<'a> VolatileMemory for &'a mut [u8] {
-    fn get_slice(&self, offset: usize, count: usize) -> Result<VolatileSlice> {
+    fn get_slice(&self, offset: u64, count: u64) -> Result<VolatileSlice> {
         let mem_end = calc_offset(offset, count)?;
-        if mem_end > self.len() {
+        if mem_end > self.len() as u64 {
             return Err(Error::OutOfBounds { addr: mem_end });
         }
-        Ok(unsafe { VolatileSlice::new((self.as_ptr() as usize + offset) as *mut _, count) })
+        Ok(unsafe { VolatileSlice::new((self.as_ptr() as u64 + offset) as *mut _, count) })
     }
 }
 
@@ -117,7 +117,7 @@ impl<'a> VolatileMemory for &'a mut [u8] {
 #[derive(Debug)]
 pub struct VolatileSlice<'a> {
     addr: *mut u8,
-    size: usize,
+    size: u64,
     phantom: PhantomData<&'a u8>,
 }
 
@@ -128,7 +128,7 @@ impl<'a> VolatileSlice<'a> {
     /// and is available for the duration of the lifetime of the new `VolatileSlice`. The caller
     /// must also guarantee that all other users of the given chunk of memory are using volatile
     /// accesses.
-    pub unsafe fn new(addr: *mut u8, size: usize) -> VolatileSlice<'a> {
+    pub unsafe fn new(addr: *mut u8, size: u64) -> VolatileSlice<'a> {
         VolatileSlice {
             addr: addr,
             size: size,
@@ -142,7 +142,7 @@ impl<'a> VolatileSlice<'a> {
     }
 
     /// Gets the size of this slice.
-    pub fn size(&self) -> usize {
+    pub fn size(&self) -> u64 {
         self.size
     }
 
@@ -173,7 +173,7 @@ impl<'a> VolatileSlice<'a> {
         where T: DataInit
     {
         let mut addr = self.addr;
-        for v in buf.iter_mut().take(self.size / size_of::<T>()) {
+        for v in buf.iter_mut().take(self.size as usize / size_of::<T>()) {
             unsafe {
                 *v = read_volatile(addr as *const T);
                 addr = addr.offset(size_of::<T>() as isize);
@@ -208,7 +208,7 @@ impl<'a> VolatileSlice<'a> {
         where T: DataInit
     {
         let mut addr = self.addr;
-        for &v in buf.iter().take(self.size / size_of::<T>()) {
+        for &v in buf.iter().take(self.size as usize / size_of::<T>()) {
             unsafe {
                 write_volatile(addr as *mut T, v);
                 addr = addr.offset(size_of::<T>() as isize);
@@ -326,21 +326,21 @@ impl<'a> VolatileSlice<'a> {
     // These function are private and only used for the read/write functions. It is not valid in
     // general to take slices of volatile memory.
     unsafe fn as_slice(&self) -> &[u8] {
-        from_raw_parts(self.addr, self.size)
+        from_raw_parts(self.addr, self.size as usize)
     }
     unsafe fn as_mut_slice(&self) -> &mut [u8] {
-        from_raw_parts_mut(self.addr, self.size)
+        from_raw_parts_mut(self.addr, self.size as usize)
     }
 }
 
 impl<'a> VolatileMemory for VolatileSlice<'a> {
-    fn get_slice(&self, offset: usize, count: usize) -> Result<VolatileSlice> {
+    fn get_slice(&self, offset: u64, count: u64) -> Result<VolatileSlice> {
         let mem_end = calc_offset(offset, count)?;
         if mem_end > self.size {
             return Err(Error::OutOfBounds { addr: mem_end });
         }
         Ok(VolatileSlice {
-               addr: (self.addr as usize + offset) as *mut _,
+               addr: (self.addr as u64 + offset) as *mut _,
                size: count,
                phantom: PhantomData,
            })
@@ -396,8 +396,8 @@ impl<'a, T: DataInit> VolatileRef<'a, T> {
     ///   let v_ref = unsafe { VolatileRef::new(0 as *mut u32) };
     ///   assert_eq!(v_ref.size(), size_of::<u32>());
     /// ```
-    pub fn size(&self) -> usize {
-        size_of::<T>()
+    pub fn size(&self) -> u64 {
+        size_of::<T>() as u64
     }
 
     /// Does a volatile write of the value `v` to the address of this ref.
@@ -417,7 +417,7 @@ impl<'a, T: DataInit> VolatileRef<'a, T> {
 
     /// Converts this `T` reference to a raw slice with the same size and address.
     pub fn to_slice(&self) -> VolatileSlice<'a> {
-        unsafe { VolatileSlice::new(self.addr as *mut u8, size_of::<T>()) }
+        unsafe { VolatileSlice::new(self.addr as *mut u8, size_of::<T>() as u64) }
     }
 }
 
@@ -443,13 +443,13 @@ mod tests {
     }
 
     impl VolatileMemory for VecMem {
-        fn get_slice(&self, offset: usize, count: usize) -> Result<VolatileSlice> {
+        fn get_slice(&self, offset: u64, count: u64) -> Result<VolatileSlice> {
             let mem_end = calc_offset(offset, count)?;
-            if mem_end > self.mem.len() {
+            if mem_end > self.mem.len() as u64 {
                 return Err(Error::OutOfBounds { addr: mem_end });
             }
             Ok(unsafe {
-                   VolatileSlice::new((self.mem.as_ptr() as usize + offset) as *mut _, count)
+                   VolatileSlice::new((self.mem.as_ptr() as u64 + offset) as *mut _, count)
                })
         }
     }
@@ -490,7 +490,7 @@ mod tests {
         let v_ref = a_ref.get_ref(1).unwrap();
         v_ref.store(0x12345678u32);
         let ref_slice = v_ref.to_slice();
-        assert_eq!(v_ref.as_ptr() as usize, ref_slice.as_ptr() as usize);
+        assert_eq!(v_ref.as_ptr() as u64, ref_slice.as_ptr() as u64);
         assert_eq!(v_ref.size(), ref_slice.size());
     }
 
@@ -547,7 +547,7 @@ mod tests {
 
     #[test]
     fn slice_overflow_error() {
-        use std::usize::MAX;
+        use std::u64::MAX;
         let a = VecMem::new(1);
         let res = a.get_slice(MAX, 1).unwrap_err();
         assert_eq!(res,
@@ -567,7 +567,7 @@ mod tests {
 
     #[test]
     fn ref_overflow_error() {
-        use std::usize::MAX;
+        use std::u64::MAX;
         let a = VecMem::new(1);
         let res = a.get_ref::<u8>(MAX).unwrap_err();
         assert_eq!(res,
diff --git a/devices/src/virtio/balloon.rs b/devices/src/virtio/balloon.rs
index ad87e20..5cfdbcf 100644
--- a/devices/src/virtio/balloon.rs
+++ b/devices/src/virtio/balloon.rs
@@ -73,7 +73,7 @@ impl Worker {
                 if valid_inflate_desc(&avail_desc) {
                     let num_addrs = avail_desc.len / 4;
                     'addr_loop: for i in 0..num_addrs as usize {
-                        let addr = match avail_desc.addr.checked_add(i * 4) {
+                        let addr = match avail_desc.addr.checked_add((i * 4) as u64) {
                             Some(a) => a,
                             None => break,
                         };
@@ -82,7 +82,7 @@ impl Worker {
                             Err(_) => continue,
                         };
                         let guest_address =
-                            GuestAddress((guest_input as usize) << VIRTIO_BALLOON_PFN_SHIFT);
+                            GuestAddress((guest_input as u64) << VIRTIO_BALLOON_PFN_SHIFT);
 
                         if self.mem
                             .dont_need_range(guest_address, 1 << VIRTIO_BALLOON_PFN_SHIFT)
diff --git a/devices/src/virtio/block.rs b/devices/src/virtio/block.rs
index 1fe2b6b..de55851 100644
--- a/devices/src/virtio/block.rs
+++ b/devices/src/virtio/block.rs
@@ -44,7 +44,7 @@ enum ParseError {
     /// Guest gave us bad memory addresses
     GuestMemory(GuestMemoryError),
     /// Guest gave us offsets that would have overflowed a usize.
-    CheckedOffset(GuestAddress, usize),
+    CheckedOffset(GuestAddress, u64),
     /// Guest gave us a write only descriptor that protocol says to read from.
     UnexpectedWriteOnlyDescriptor,
     /// Guest gave us a read only descriptor that protocol says to write to.
@@ -69,7 +69,7 @@ fn request_type(mem: &GuestMemory,
 }
 
 fn sector(mem: &GuestMemory, desc_addr: GuestAddress) -> result::Result<u64, ParseError> {
-    const SECTOR_OFFSET: usize = 8;
+    const SECTOR_OFFSET: u64 = 8;
     let addr = match mem.checked_offset(desc_addr, SECTOR_OFFSET) {
         Some(v) => v,
         None => return Err(ParseError::CheckedOffset(desc_addr, SECTOR_OFFSET)),
diff --git a/devices/src/virtio/queue.rs b/devices/src/virtio/queue.rs
index c02de8a..b9313f7 100644
--- a/devices/src/virtio/queue.rs
+++ b/devices/src/virtio/queue.rs
@@ -47,12 +47,12 @@ impl<'a> DescriptorChain<'a> {
             return None;
         }
 
-        let desc_head = match mem.checked_offset(desc_table, (index as usize) * 16) {
+        let desc_head = match mem.checked_offset(desc_table, (index as u64) * 16) {
             Some(a) => a,
             None => return None,
         };
         // These reads can't fail unless Guest memory is hopelessly broken.
-        let addr = GuestAddress(mem.read_obj_from_addr::<u64>(desc_head).unwrap() as usize);
+        let addr = GuestAddress(mem.read_obj_from_addr::<u64>(desc_head).unwrap() as u64);
         if mem.checked_offset(desc_head, 16).is_none() {
             return None;
         }
@@ -79,7 +79,7 @@ impl<'a> DescriptorChain<'a> {
 
     fn is_valid(&self) -> bool {
         if self.mem
-               .checked_offset(self.addr, self.len as usize)
+               .checked_offset(self.addr, self.len as u64)
                .is_none() {
             false
         } else if self.has_next() && self.next >= self.queue_size {
@@ -139,7 +139,7 @@ impl<'a, 'b> Iterator for AvailIter<'a, 'b> {
         }
 
         let offset = (4 + (self.next_index.0 % self.queue_size) * 2) as usize;
-        let avail_addr = match self.mem.checked_offset(self.avail_ring, offset) {
+        let avail_addr = match self.mem.checked_offset(self.avail_ring, offset as u64) {
             Some(a) => a,
             None => return None,
         };
@@ -219,21 +219,21 @@ impl Queue {
             error!("virtio queue with invalid size: {}", self.size);
             false
         } else if desc_table
-                      .checked_add(desc_table_size)
+                      .checked_add(desc_table_size as u64)
                       .map_or(true, |v| !mem.address_in_range(v)) {
             error!("virtio queue descriptor table goes out of bounds: start:0x{:08x} size:0x{:08x}",
                    desc_table.offset(),
                    desc_table_size);
             false
         } else if avail_ring
-                      .checked_add(avail_ring_size)
+                      .checked_add(avail_ring_size as u64)
                       .map_or(true, |v| !mem.address_in_range(v)) {
             error!("virtio queue available ring goes out of bounds: start:0x{:08x} size:0x{:08x}",
                    avail_ring.offset(),
                    avail_ring_size);
             false
         } else if used_ring
-                      .checked_add(used_ring_size)
+                      .checked_add(used_ring_size as u64)
                       .map_or(true, |v| !mem.address_in_range(v)) {
             error!("virtio queue used ring goes out of bounds: start:0x{:08x} size:0x{:08x}",
                    used_ring.offset(),
@@ -299,7 +299,7 @@ impl Queue {
 
         let used_ring = self.used_ring;
         let next_used = (self.next_used.0 % self.actual_size()) as usize;
-        let used_elem = used_ring.unchecked_add(4 + next_used * 8);
+        let used_elem = used_ring.unchecked_add((4 + next_used * 8) as u64);
 
         // These writes can't fail as we are guaranteed to be within the descriptor ring.
         mem.write_obj_at_addr(desc_index as u32, used_elem)
diff --git a/devices/src/virtio/wl.rs b/devices/src/virtio/wl.rs
index 71fa80a..76f2709 100644
--- a/devices/src/virtio/wl.rs
+++ b/devices/src/virtio/wl.rs
@@ -90,9 +90,9 @@ fn round_to_page_size(v: u64) -> u64 {
 }
 
 fn parse_new(addr: GuestAddress, mem: &GuestMemory) -> WlResult<WlOp> {
-    const ID_OFFSET: usize = 8;
-    const FLAGS_OFFSET: usize = 12;
-    const SIZE_OFFSET: usize = 24;
+    const ID_OFFSET: u64 = 8;
+    const FLAGS_OFFSET: u64 = 12;
+    const SIZE_OFFSET: u64 = 24;
 
     let id: Le32 = mem.read_obj_from_addr(mem.checked_offset(addr, ID_OFFSET)
                                               .ok_or(WlError::CheckedOffset)?)?;
@@ -109,9 +109,9 @@ fn parse_new(addr: GuestAddress, mem: &GuestMemory) -> WlResult<WlOp> {
 }
 
 fn parse_send(addr: GuestAddress, len: u32, mem: &GuestMemory) -> WlResult<WlOp> {
-    const ID_OFFSET: usize = 8;
-    const VFD_COUNT_OFFSET: usize = 12;
-    const VFDS_OFFSET: usize = 16;
+    const ID_OFFSET: u64 = 8;
+    const VFD_COUNT_OFFSET: u64 = 12;
+    const VFDS_OFFSET: u64 = 16;
 
     let id: Le32 = mem.read_obj_from_addr(mem.checked_offset(addr, ID_OFFSET)
                                               .ok_or(WlError::CheckedOffset)?)?;
@@ -121,7 +121,7 @@ fn parse_send(addr: GuestAddress, len: u32, mem: &GuestMemory) -> WlResult<WlOp>
     let vfd_count: u32 = vfd_count.into();
     let vfds_addr = mem.checked_offset(addr, VFDS_OFFSET)
         .ok_or(WlError::CheckedOffset)?;
-    let data_addr = mem.checked_offset(vfds_addr, (vfd_count * 4) as usize)
+    let data_addr = mem.checked_offset(vfds_addr, (vfd_count * 4) as u64)
         .ok_or(WlError::CheckedOffset)?;
     Ok(WlOp::Send {
            id: id.into(),
@@ -133,7 +133,7 @@ fn parse_send(addr: GuestAddress, len: u32, mem: &GuestMemory) -> WlResult<WlOp>
 }
 
 fn parse_id(addr: GuestAddress, mem: &GuestMemory) -> WlResult<u32> {
-    const ID_OFFSET: usize = 8;
+    const ID_OFFSET: u64 = 8;
     let id: Le32 = mem.read_obj_from_addr(mem.checked_offset(addr, ID_OFFSET)
                                               .ok_or(WlError::CheckedOffset)?)?;
     Ok(id.into())
@@ -192,16 +192,17 @@ fn encode_vfd_recv(desc_mem: VolatileSlice,
     desc_mem.get_ref(0)?.store(ctrl_vfd_recv);
 
     let vfd_slice = desc_mem
-        .get_slice(size_of::<CtrlVfdRecv>(), vfd_ids.len() * size_of::<Le32>())?;
+        .get_slice(size_of::<CtrlVfdRecv>() as u64,
+                   (vfd_ids.len() * size_of::<Le32>()) as u64)?;
     for (i, &recv_vfd_id) in vfd_ids.iter().enumerate() {
         vfd_slice
-            .get_ref(size_of::<Le32>() * i)?
+            .get_ref((size_of::<Le32>() * i) as u64)?
             .store(recv_vfd_id);
     }
 
     let data_slice = desc_mem
-        .get_slice(size_of::<CtrlVfdRecv>() + vfd_ids.len() * size_of::<Le32>(),
-                   data.len())?;
+        .get_slice((size_of::<CtrlVfdRecv>() + vfd_ids.len() * size_of::<Le32>()) as u64,
+                   data.len() as u64)?;
     data_slice.copy_from(data);
 
     Ok((size_of::<CtrlVfdRecv>() + vfd_ids.len() * size_of::<Le32>() + data.len()) as u32)
@@ -615,7 +616,7 @@ impl WlState {
     }
 
     fn send(&mut self, vfd_id: u32, vfds: VolatileSlice, data: VolatileSlice) -> WlResult<WlResp> {
-        let vfd_count = vfds.size() / size_of::<Le32>();
+        let vfd_count = vfds.size() as usize / size_of::<Le32>();
         let mut vfd_ids = [Le32::from(0); VIRTWL_SEND_MAX_ALLOCS];
         vfds.copy_to(&mut vfd_ids[..]);
         let mut fds = [0; VIRTWL_SEND_MAX_ALLOCS];
@@ -665,8 +666,8 @@ impl WlState {
                 data_addr,
                 data_len,
             } => {
-                let vfd_mem = mem.get_slice(vfds_addr.0, (vfd_count as usize) * size_of::<Le32>())?;
-                let data_mem = mem.get_slice(data_addr.0, data_len as usize)?;
+                let vfd_mem = mem.get_slice(vfds_addr.0, (vfd_count as u64) * size_of::<Le32>() as u64)?;
+                let data_mem = mem.get_slice(data_addr.0, data_len as u64)?;
                 self.send(id, vfd_mem, data_mem)
             }
             WlOp::NewCtx { id } => self.new_context(id),
@@ -880,7 +881,7 @@ impl Worker {
                                         };
 
                                         let resp_mem = self.mem
-                                            .get_slice(resp_desc.addr.0, resp_desc.len as usize)
+                                            .get_slice(resp_desc.addr.0, resp_desc.len as u64)
                                             .unwrap();
                                         let used_len = encode_resp(resp_mem, resp)
                                             .unwrap_or_default();
@@ -922,7 +923,7 @@ impl Worker {
                     let (index, addr, desc_len) = self.in_desc_chains.pop_front().unwrap();
                     // This memory location is valid because it came from a queue which always
                     // checks the descriptor memory locations.
-                    let desc_mem = self.mem.get_slice(addr.0, desc_len as usize).unwrap();
+                    let desc_mem = self.mem.get_slice(addr.0, desc_len as u64).unwrap();
                     let len = match encode_resp(desc_mem, in_resp) {
                         Ok(len) => {
                             should_pop = true;
diff --git a/kernel_loader/src/lib.rs b/kernel_loader/src/lib.rs
index 3be9889..f9bd3bb 100644
--- a/kernel_loader/src/lib.rs
+++ b/kernel_loader/src/lib.rs
@@ -87,7 +87,7 @@ pub fn load_kernel<F>(guest_mem: &GuestMemory, kernel_start: GuestAddress, kerne
         kernel_image.seek(SeekFrom::Start(phdr.p_offset))
             .map_err(|_| Error::SeekKernelStart)?;
 
-        let mem_offset = kernel_start.checked_add(phdr.p_paddr as usize)
+        let mem_offset = kernel_start.checked_add(phdr.p_paddr)
             .ok_or(Error::InvalidProgramHeaderAddress)?;
         guest_mem.read_to_memory(mem_offset, kernel_image, phdr.p_filesz as usize)
             .map_err(|_| Error::ReadKernelImage)?;
@@ -109,7 +109,7 @@ pub fn load_cmdline(guest_mem: &GuestMemory, guest_addr: GuestAddress, cmdline:
         return Ok(());
     }
 
-    let end = guest_addr.checked_add(len + 1)
+    let end = guest_addr.checked_add(len as u64 + 1)
         .ok_or(Error::CommandLineOverflow)?; // Extra for null termination.
     if end > guest_mem.end_addr() {
         return Err(Error::CommandLineOverflow)?;
@@ -127,7 +127,7 @@ mod test {
     use super::*;
     use sys_util::{GuestAddress, GuestMemory};
 
-    const MEM_SIZE: usize = 0x8000;
+    const MEM_SIZE: u64 = 0x8000;
 
     fn create_guest_mem() -> GuestMemory {
         GuestMemory::new(&vec![(GuestAddress(0x0), MEM_SIZE)]).unwrap()
diff --git a/kvm/src/lib.rs b/kvm/src/lib.rs
index ef382ef..2b12261 100644
--- a/kvm/src/lib.rs
+++ b/kvm/src/lib.rs
@@ -246,7 +246,7 @@ impl Vm {
         // device memory, and there are no gaps, it follows that the lowest unused slot is 2+3=5.
         let slot = match self.mem_slot_gaps.pop() {
             Some(gap) => (-gap) as u32,
-            None => (self.device_memory.len() + self.guest_mem.num_regions()) as u32,
+            None => (self.device_memory.len() + self.guest_mem.num_regions() as usize) as u32,
         };
 
         // Safe because we check that the given guest address is valid and has no overlaps. We also
diff --git a/kvm/tests/dirty_log.rs b/kvm/tests/dirty_log.rs
index 94f854e..4ec7295 100644
--- a/kvm/tests/dirty_log.rs
+++ b/kvm/tests/dirty_log.rs
@@ -28,7 +28,7 @@ fn test_run() {
     let mmap = MemoryMapping::from_fd(&mem, mem_size as usize)
         .expect("failed to create memory mapping");
 
-    mmap.write_slice(&code[..], load_addr.offset())
+    mmap.write_slice(&code[..], load_addr.offset() as usize)
         .expect("Writing code to memory failed.");
 
     let kvm = Kvm::new().expect("new kvm failed");
diff --git a/src/linux.rs b/src/linux.rs
index b22c043..abf39f9 100644
--- a/src/linux.rs
+++ b/src/linux.rs
@@ -175,9 +175,9 @@ impl Drop for UnlinkUnixDatagram {
     }
 }
 
-const KERNEL_START_OFFSET: usize = 0x200000;
-const CMDLINE_OFFSET: usize = 0x20000;
-const CMDLINE_MAX_SIZE: usize = KERNEL_START_OFFSET - CMDLINE_OFFSET;
+const KERNEL_START_OFFSET: u64 = 0x200000;
+const CMDLINE_OFFSET: u64 = 0x20000;
+const CMDLINE_MAX_SIZE: u64 = KERNEL_START_OFFSET - CMDLINE_OFFSET;
 const BASE_DEV_MEMORY_PFN: u64 = 1u64 << 26;
 
 fn create_base_minijail(root: &Path, seccomp_policy: &Path) -> Result<Minijail> {
@@ -211,9 +211,9 @@ fn create_base_minijail(root: &Path, seccomp_policy: &Path) -> Result<Minijail>
 fn setup_memory(memory: Option<usize>) -> Result<GuestMemory> {
     let mem_size = memory.unwrap_or(256) << 20;
     #[cfg(not(any(target_arch = "x86", target_arch = "x86_64")))]
-    let arch_mem_regions = vec![(GuestAddress(0), mem_size)];
+    let arch_mem_regions = vec![(GuestAddress(0), mem_size as u64)];
     #[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
-    let arch_mem_regions = x86_64::arch_memory_regions(mem_size);
+    let arch_mem_regions = x86_64::arch_memory_regions(mem_size as u64);
     GuestMemory::new(&arch_mem_regions).map_err(Error::CreateGuestMemory)
 }
 
@@ -725,7 +725,7 @@ pub fn run_config(cfg: Config) -> Result<()> {
     let kvm = Kvm::new().map_err(Error::CreateKvm)?;
     let mut vm = setup_vm(&kvm, mem.clone())?;
 
-    let mut cmdline = kernel_cmdline::Cmdline::new(CMDLINE_MAX_SIZE);
+    let mut cmdline = kernel_cmdline::Cmdline::new(CMDLINE_MAX_SIZE as usize);
     cmdline
         .insert_str("console=ttyS0 noacpi reboot=k panic=1 pci=off")
         .unwrap();
diff --git a/sys_util/src/guest_address.rs b/sys_util/src/guest_address.rs
index 09b6578..1048c6d 100644
--- a/sys_util/src/guest_address.rs
+++ b/sys_util/src/guest_address.rs
@@ -9,7 +9,7 @@ use std::ops::{BitAnd, BitOr};
 
 /// Represents an Address in the guest's memory.
 #[derive(Clone, Copy, Debug)]
-pub struct GuestAddress(pub usize);
+pub struct GuestAddress(pub u64);
 
 impl GuestAddress {
     /// Returns the offset from this address to the given base address.
@@ -20,37 +20,37 @@ impl GuestAddress {
     /// # use sys_util::GuestAddress;
     ///   let base = GuestAddress(0x100);
     ///   let addr = GuestAddress(0x150);
-    ///   assert_eq!(addr.offset_from(base), 0x50usize);
+    ///   assert_eq!(addr.offset_from(base), 0x50u64);
     /// ```
-    pub fn offset_from(&self, base: GuestAddress) -> usize {
+    pub fn offset_from(&self, base: GuestAddress) -> u64 {
         self.0 - base.0
     }
 
-    /// Returns the address as a usize offset from 0x0.
+    /// Returns the address as a u64 offset from 0x0.
     /// Use this when a raw number is needed to pass to the kernel.
-    pub fn offset(&self) -> usize {
+    pub fn offset(&self) -> u64 {
         self.0
     }
 
     /// Returns the result of the add or None if there is overflow.
-    pub fn checked_add(&self, other: usize) -> Option<GuestAddress> {
+    pub fn checked_add(&self, other: u64) -> Option<GuestAddress> {
         self.0.checked_add(other).map(GuestAddress)
     }
 
     /// Returns the result of the base address + the size.
     /// Only use this when `offset` is guaranteed not to overflow.
-    pub fn unchecked_add(&self, offset: usize) -> GuestAddress {
+    pub fn unchecked_add(&self, offset: u64) -> GuestAddress {
         GuestAddress(self.0 + offset)
     }
 
     /// Returns the result of the subtraction of None if there is underflow.
-    pub fn checked_sub(&self, other: usize) -> Option<GuestAddress> {
+    pub fn checked_sub(&self, other: u64) -> Option<GuestAddress> {
         self.0.checked_sub(other).map(GuestAddress)
     }
 
     /// Returns the bitwise and of the address with the given mask.
     pub fn mask(&self, mask: u64) -> GuestAddress {
-        GuestAddress(self.0 & mask as usize)
+        GuestAddress(self.0 & mask as u64)
     }
 }
 
@@ -58,7 +58,7 @@ impl BitAnd<u64> for GuestAddress {
     type Output = GuestAddress;
 
     fn bitand(self, other: u64) -> GuestAddress {
-        GuestAddress(self.0 & other as usize)
+        GuestAddress(self.0 & other as u64)
     }
 }
 
@@ -66,7 +66,7 @@ impl BitOr<u64> for GuestAddress {
     type Output = GuestAddress;
 
     fn bitor(self, other: u64) -> GuestAddress {
-        GuestAddress(self.0 | other as usize)
+        GuestAddress(self.0 | other as u64)
     }
 }
 
diff --git a/sys_util/src/guest_memory.rs b/sys_util/src/guest_memory.rs
index 01892b5..f9dc716 100644
--- a/sys_util/src/guest_memory.rs
+++ b/sys_util/src/guest_memory.rs
@@ -30,7 +30,7 @@ struct MemoryRegion {
 
 fn region_end(region: &MemoryRegion) -> GuestAddress {
     // unchecked_add is safe as the region bounds were checked when it was created.
-    region.guest_base.unchecked_add(region.mapping.size())
+    region.guest_base.unchecked_add(region.mapping.size() as u64)
 }
 
 /// Tracks a memory region and where it is mapped in the guest.
@@ -42,18 +42,18 @@ pub struct GuestMemory {
 impl GuestMemory {
     /// Creates a container for guest memory regions.
     /// Valid memory regions are specified as a Vec of (Address, Size) tuples sorted by Address.
-    pub fn new(ranges: &[(GuestAddress, usize)]) -> Result<GuestMemory> {
+    pub fn new(ranges: &[(GuestAddress, u64)]) -> Result<GuestMemory> {
         let mut regions = Vec::<MemoryRegion>::new();
         for range in ranges.iter() {
             if let Some(last) = regions.last() {
                 if last.guest_base
-                       .checked_add(last.mapping.size())
+                       .checked_add(last.mapping.size() as u64)
                        .map_or(true, |a| a > range.0) {
                     return Err(Error::MemoryRegionOverlap);
                 }
             }
 
-            let mapping = MemoryMapping::new(range.1)
+            let mapping = MemoryMapping::new(range.1 as usize)
                 .map_err(Error::MemoryMappingFailed)?;
             regions.push(MemoryRegion {
                              mapping: mapping,
@@ -90,21 +90,21 @@ impl GuestMemory {
     }
 
     /// Returns the address plus the offset if it is in range.
-    pub fn checked_offset(&self, addr: GuestAddress, offset: usize) -> Option<GuestAddress> {
+    pub fn checked_offset(&self, addr: GuestAddress, offset: u64) -> Option<GuestAddress> {
         addr.checked_add(offset)
             .and_then(|a| if a < self.end_addr() { Some(a) } else { None })
     }
 
     /// Returns the size of the memory region in bytes.
-    pub fn num_regions(&self) -> usize {
-        self.regions.len()
+    pub fn num_regions(&self) -> u64 {
+        self.regions.len() as u64
     }
 
     /// Madvise away the address range in the host that is associated with the given guest range.
-    pub fn dont_need_range(&self, addr: GuestAddress, count: usize) -> Result<()> {
+    pub fn dont_need_range(&self, addr: GuestAddress, count: u64) -> Result<()> {
         self.do_in_region(addr, move |mapping, offset| {
             mapping
-                .dont_need_range(offset, count)
+                .dont_need_range(offset, count as usize)
                 .map_err(|e| Error::MemoryAccess(addr, e))
         })
     }
@@ -352,7 +352,7 @@ impl GuestMemory {
     {
         for region in self.regions.iter() {
             if guest_addr >= region.guest_base && guest_addr < region_end(region) {
-                return cb(&region.mapping, guest_addr.offset_from(region.guest_base));
+                return cb(&region.mapping, guest_addr.offset_from(region.guest_base) as usize);
             }
         }
         Err(Error::InvalidGuestAddress(guest_addr))
@@ -360,7 +360,7 @@ impl GuestMemory {
 }
 
 impl VolatileMemory for GuestMemory {
-    fn get_slice(&self, offset: usize, count: usize) -> VolatileMemoryResult<VolatileSlice> {
+    fn get_slice(&self, offset: u64, count: u64) -> VolatileMemoryResult<VolatileSlice> {
         for region in self.regions.iter() {
             if offset >= region.guest_base.0 && offset < region_end(region).0 {
                 return region
diff --git a/sys_util/src/mmap.rs b/sys_util/src/mmap.rs
index 403ae92..18891a1 100644
--- a/sys_util/src/mmap.rs
+++ b/sys_util/src/mmap.rs
@@ -346,15 +346,15 @@ impl MemoryMapping {
 }
 
 impl VolatileMemory for MemoryMapping {
-    fn get_slice(&self, offset: usize, count: usize) -> VolatileMemoryResult<VolatileSlice> {
+    fn get_slice(&self, offset: u64, count: u64) -> VolatileMemoryResult<VolatileSlice> {
         let mem_end = calc_offset(offset, count)?;
-        if mem_end > self.size {
+        if mem_end > self.size as u64 {
             return Err(VolatileMemoryError::OutOfBounds { addr: mem_end });
         }
 
         // Safe because we checked that offset + count was within our range and we only ever hand
         // out volatile accessors.
-        Ok(unsafe { VolatileSlice::new((self.addr as usize + offset) as *mut _, count) })
+        Ok(unsafe { VolatileSlice::new((self.addr as usize + offset as usize) as *mut _, count) })
     }
 }
 
@@ -435,10 +435,10 @@ mod tests {
     fn slice_overflow_error() {
         use std::usize;
         let m = MemoryMapping::new(5).unwrap();
-        let res = m.get_slice(usize::MAX, 3).unwrap_err();
+        let res = m.get_slice(std::u64::MAX, 3).unwrap_err();
         assert_eq!(res,
                    VolatileMemoryError::Overflow {
-                       base: usize::MAX,
+                       base: std::u64::MAX,
                        offset: 3,
                    });
 
diff --git a/sys_util/src/sock_ctrl_msg.rs b/sys_util/src/sock_ctrl_msg.rs
index 2d4b91c..545e1f2 100644
--- a/sys_util/src/sock_ctrl_msg.rs
+++ b/sys_util/src/sock_ctrl_msg.rs
@@ -91,7 +91,7 @@ unsafe impl<'a> IntoIovec for VolatileSlice<'a> {
     }
 
     fn size(&self) -> usize {
-        self.size()
+        self.size() as usize
     }
 }
 
diff --git a/vhost/src/lib.rs b/vhost/src/lib.rs
index 4a103a2..dc1bf81 100644
--- a/vhost/src/lib.rs
+++ b/vhost/src/lib.rs
@@ -98,7 +98,7 @@ pub trait Vhost: AsRawFd + std::marker::Sized {
 
     /// Set the guest memory mappings for vhost to use.
     fn set_mem_table(&self) -> Result<()> {
-        let num_regions = self.mem().num_regions();
+        let num_regions = self.mem().num_regions() as usize;
         let vec_size_bytes = mem::size_of::<virtio_sys::vhost_memory>() +
                              (num_regions * mem::size_of::<virtio_sys::vhost_memory_region>());
         let mut bytes: Vec<u8> = vec![0; vec_size_bytes];
@@ -175,15 +175,15 @@ pub trait Vhost: AsRawFd + std::marker::Sized {
                   (queue_size & (queue_size - 1)) != 0 {
             false
         } else if desc_addr
-                      .checked_add(desc_table_size)
+                      .checked_add(desc_table_size as u64)
                       .map_or(true, |v| !self.mem().address_in_range(v)) {
             false
         } else if avail_addr
-                      .checked_add(avail_ring_size)
+                      .checked_add(avail_ring_size as u64)
                       .map_or(true, |v| !self.mem().address_in_range(v)) {
             false
         } else if used_addr
-                      .checked_add(used_ring_size)
+                      .checked_add(used_ring_size as u64)
                       .map_or(true, |v| !self.mem().address_in_range(v)) {
             false
         } else {
diff --git a/vm_control/src/lib.rs b/vm_control/src/lib.rs
index bf363e1..6688077 100644
--- a/vm_control/src/lib.rs
+++ b/vm_control/src/lib.rs
@@ -205,7 +205,7 @@ impl VmRequest {
                 };
                 let pfn = *next_mem_pfn;
                 let slot =
-                    match vm.add_device_memory(GuestAddress((pfn << 12) as usize), mmap, false) {
+                    match vm.add_device_memory(GuestAddress(pfn << 12), mmap, false) {
                         Ok(slot) => slot,
                         Err(e) => return VmResponse::Err(e),
                     };
diff --git a/x86_64/src/lib.rs b/x86_64/src/lib.rs
index ad3ec46..30c4b13 100644
--- a/x86_64/src/lib.rs
+++ b/x86_64/src/lib.rs
@@ -89,17 +89,17 @@ pub enum Error {
 }
 pub type Result<T> = result::Result<T, Error>;
 
-const BOOT_STACK_POINTER: usize = 0x8000;
-const MEM_32BIT_GAP_SIZE: usize = (768 << 20);
-const FIRST_ADDR_PAST_32BITS: usize = (1 << 32);
-const KERNEL_64BIT_ENTRY_OFFSET: usize = 0x200;
-const ZERO_PAGE_OFFSET: usize = 0x7000;
+const BOOT_STACK_POINTER: u64 = 0x8000;
+const MEM_32BIT_GAP_SIZE: u64 = (768 << 20);
+const FIRST_ADDR_PAST_32BITS: u64 = (1 << 32);
+const KERNEL_64BIT_ENTRY_OFFSET: u64 = 0x200;
+const ZERO_PAGE_OFFSET: u64 = 0x7000;
 
 /// Returns a Vec of the valid memory addresses.
 /// These should be used to configure the GuestMemory structure for the platfrom.
 /// For x86_64 all addresses are valid from the start of the kenel except a
 /// carve out at the end of 32bit address space.
-pub fn arch_memory_regions(size: usize) -> Vec<(GuestAddress, usize)> {
+pub fn arch_memory_regions(size: u64) -> Vec<(GuestAddress, u64)> {
     let mem_end = GuestAddress(size);
     let first_addr_past_32bits = GuestAddress(FIRST_ADDR_PAST_32BITS);
     let end_32bit_gap_start = GuestAddress(FIRST_ADDR_PAST_32BITS - MEM_32BIT_GAP_SIZE);
@@ -205,7 +205,7 @@ pub fn configure_system(guest_mem: &GuestMemory,
     }
 
     let zero_page_addr = GuestAddress(ZERO_PAGE_OFFSET);
-    guest_mem.checked_offset(zero_page_addr, mem::size_of::<boot_params>())
+    guest_mem.checked_offset(zero_page_addr, mem::size_of::<boot_params>() as u64)
         .ok_or(Error::ZeroPagePastRamEnd)?;
     guest_mem.write_obj_at_addr(params, zero_page_addr)
         .map_err(|_| Error::ZeroPageSetup)?;
@@ -234,17 +234,17 @@ mod tests {
 
     #[test]
     fn regions_lt_4gb() {
-        let regions = arch_memory_regions(1usize << 29);
+        let regions = arch_memory_regions(1u64 << 29);
         assert_eq!(1, regions.len());
         assert_eq!(GuestAddress(0), regions[0].0);
-        assert_eq!(1usize << 29, regions[0].1);
+        assert_eq!(1u64 << 29, regions[0].1);
     }
 
     #[test]
     fn regions_gt_4gb() {
-        let regions = arch_memory_regions((1usize << 32) + 0x8000);
+        let regions = arch_memory_regions((1u64 << 32) + 0x8000);
         assert_eq!(2, regions.len());
         assert_eq!(GuestAddress(0), regions[0].0);
-        assert_eq!(GuestAddress(1usize << 32), regions[1].0);
+        assert_eq!(GuestAddress(1u64 << 32), regions[1].0);
     }
 }
diff --git a/x86_64/src/mptable.rs b/x86_64/src/mptable.rs
index b74992c..fea1622 100644
--- a/x86_64/src/mptable.rs
+++ b/x86_64/src/mptable.rs
@@ -57,7 +57,7 @@ const APIC_VERSION: u8 = 0x14;
 const CPU_STEPPING: u32 = 0x600;
 const CPU_FEATURE_APIC: u32 = 0x200;
 const CPU_FEATURE_FPU: u32 = 0x001;
-const MPTABLE_START: usize = 0x400 * 639; // Last 1k of Linux's 640k base RAM.
+const MPTABLE_START: u64 = 0x400 * 639; // Last 1k of Linux's 640k base RAM.
 
 fn compute_checksum<T: Copy>(v: &T) -> u8 {
     // Safe because we are only reading the bytes within the size of the `T` reference `v`.
@@ -92,7 +92,7 @@ pub fn setup_mptable(mem: &GuestMemory, num_cpus: u8) -> Result<()> {
 
     // The checked_add here ensures the all of the following base_mp.unchecked_add's will be without
     // overflow.
-    if let Some(end_mp) = base_mp.checked_add(mp_size - 1) {
+    if let Some(end_mp) = base_mp.checked_add(mp_size as u64 - 1) {
         if !mem.address_in_range(end_mp) {
             return Err(Error::NotEnoughMemory);
         }
@@ -109,17 +109,17 @@ pub fn setup_mptable(mem: &GuestMemory, num_cpus: u8) -> Result<()> {
         mpf_intel.signature = SMP_MAGIC_IDENT;
         mpf_intel.length = 1;
         mpf_intel.specification = 4;
-        mpf_intel.physptr = (base_mp.offset() + mem::size_of::<mpf_intel>()) as u32;
+        mpf_intel.physptr = (base_mp.offset() + mem::size_of::<mpf_intel>() as u64) as u32;
         mpf_intel.checksum = mpf_intel_compute_checksum(&mpf_intel);
         mem.write_obj_at_addr(mpf_intel, base_mp)
             .map_err(|_| Error::WriteMpfIntel)?;
-        base_mp = base_mp.unchecked_add(size);
+        base_mp = base_mp.unchecked_add(size as u64);
     }
 
     // We set the location of the mpc_table here but we can't fill it out until we have the length
     // of the entire table later.
     let table_base = base_mp;
-    base_mp = base_mp.unchecked_add(mem::size_of::<mpc_table>());
+    base_mp = base_mp.unchecked_add(mem::size_of::<mpc_table>() as u64);
 
     let mut checksum: u8 = 0;
     let ioapicid: u8 = num_cpus + 1;
@@ -140,7 +140,7 @@ pub fn setup_mptable(mem: &GuestMemory, num_cpus: u8) -> Result<()> {
         mpc_cpu.featureflag = CPU_FEATURE_APIC | CPU_FEATURE_FPU;
         mem.write_obj_at_addr(mpc_cpu, base_mp)
             .map_err(|_| Error::WriteMpcCpu)?;
-        base_mp = base_mp.unchecked_add(size);
+        base_mp = base_mp.unchecked_add(size as u64);
         checksum = checksum.wrapping_add(compute_checksum(&mpc_cpu));
     }
     {
@@ -153,7 +153,7 @@ pub fn setup_mptable(mem: &GuestMemory, num_cpus: u8) -> Result<()> {
         mpc_ioapic.apicaddr = IO_APIC_DEFAULT_PHYS_BASE;
         mem.write_obj_at_addr(mpc_ioapic, base_mp)
             .map_err(|_| Error::WriteMpcIoapic)?;
-        base_mp = base_mp.unchecked_add(size);
+        base_mp = base_mp.unchecked_add(size as u64);
         checksum = checksum.wrapping_add(compute_checksum(&mpc_ioapic));
     }
     {
@@ -164,7 +164,7 @@ pub fn setup_mptable(mem: &GuestMemory, num_cpus: u8) -> Result<()> {
         mpc_bus.bustype = BUS_TYPE_ISA;
         mem.write_obj_at_addr(mpc_bus, base_mp)
             .map_err(|_| Error::WriteMpcBus)?;
-        base_mp = base_mp.unchecked_add(size);
+        base_mp = base_mp.unchecked_add(size as u64);
         checksum = checksum.wrapping_add(compute_checksum(&mpc_bus));
     }
     {
@@ -179,7 +179,7 @@ pub fn setup_mptable(mem: &GuestMemory, num_cpus: u8) -> Result<()> {
         mpc_intsrc.dstirq = 0;
         mem.write_obj_at_addr(mpc_intsrc, base_mp)
             .map_err(|_| Error::WriteMpcIntsrc)?;
-        base_mp = base_mp.unchecked_add(size);
+        base_mp = base_mp.unchecked_add(size as u64);
         checksum = checksum.wrapping_add(compute_checksum(&mpc_intsrc));
     }
     // Per kvm_setup_default_irq_routing() in kernel
@@ -195,7 +195,7 @@ pub fn setup_mptable(mem: &GuestMemory, num_cpus: u8) -> Result<()> {
         mpc_intsrc.dstirq = i;
         mem.write_obj_at_addr(mpc_intsrc, base_mp)
             .map_err(|_| Error::WriteMpcIntsrc)?;
-        base_mp = base_mp.unchecked_add(size);
+        base_mp = base_mp.unchecked_add(size as u64);
         checksum = checksum.wrapping_add(compute_checksum(&mpc_intsrc));
     }
     {
@@ -210,7 +210,7 @@ pub fn setup_mptable(mem: &GuestMemory, num_cpus: u8) -> Result<()> {
         mpc_lintsrc.destapiclint = 0;
         mem.write_obj_at_addr(mpc_lintsrc, base_mp)
             .map_err(|_| Error::WriteMpcLintsrc)?;
-        base_mp = base_mp.unchecked_add(size);
+        base_mp = base_mp.unchecked_add(size as u64);
         checksum = checksum.wrapping_add(compute_checksum(&mpc_lintsrc));
     }
     {
@@ -225,7 +225,7 @@ pub fn setup_mptable(mem: &GuestMemory, num_cpus: u8) -> Result<()> {
         mpc_lintsrc.destapiclint = 1;
         mem.write_obj_at_addr(mpc_lintsrc, base_mp)
             .map_err(|_| Error::WriteMpcLintsrc)?;
-        base_mp = base_mp.unchecked_add(size);
+        base_mp = base_mp.unchecked_add(size as u64);
         checksum = checksum.wrapping_add(compute_checksum(&mpc_lintsrc));
     }
 
@@ -269,7 +269,7 @@ mod tests {
     fn bounds_check() {
         let num_cpus = 4;
         let mem = GuestMemory::new(&[(GuestAddress(MPTABLE_START),
-                                      compute_mp_size(num_cpus))]).unwrap();
+                                      compute_mp_size(num_cpus) as u64)]).unwrap();
 
         setup_mptable(&mem, num_cpus).unwrap();
     }
@@ -278,7 +278,7 @@ mod tests {
     fn bounds_check_fails() {
         let num_cpus = 4;
         let mem = GuestMemory::new(&[(GuestAddress(MPTABLE_START),
-                                      compute_mp_size(num_cpus) - 1)]).unwrap();
+                                      (compute_mp_size(num_cpus) - 1) as u64)]).unwrap();
 
         assert!(setup_mptable(&mem, num_cpus).is_err());
     }
@@ -287,7 +287,7 @@ mod tests {
     fn mpf_intel_checksum() {
         let num_cpus = 1;
         let mem = GuestMemory::new(&[(GuestAddress(MPTABLE_START),
-                                      compute_mp_size(num_cpus))]).unwrap();
+                                      compute_mp_size(num_cpus) as u64)]).unwrap();
 
         setup_mptable(&mem, num_cpus).unwrap();
 
@@ -300,12 +300,12 @@ mod tests {
     fn mpc_table_checksum() {
         let num_cpus = 4;
         let mem = GuestMemory::new(&[(GuestAddress(MPTABLE_START),
-                                      compute_mp_size(num_cpus))]).unwrap();
+                                      compute_mp_size(num_cpus) as u64)]).unwrap();
 
         setup_mptable(&mem, num_cpus).unwrap();
 
         let mpf_intel: mpf_intel = mem.read_obj_from_addr(GuestAddress(MPTABLE_START)).unwrap();
-        let mpc_offset = GuestAddress(mpf_intel.physptr as usize);
+        let mpc_offset = GuestAddress(mpf_intel.physptr as u64);
         let mpc_table: mpc_table = mem.read_obj_from_addr(mpc_offset).unwrap();
 
         struct Sum(u8);
@@ -331,26 +331,26 @@ mod tests {
     fn cpu_entry_count() {
         const MAX_CPUS: u8 = 0xff;
         let mem = GuestMemory::new(&[(GuestAddress(MPTABLE_START),
-                                      compute_mp_size(MAX_CPUS))]).unwrap();
+                                      compute_mp_size(MAX_CPUS) as u64)]).unwrap();
 
         for i in 0..MAX_CPUS {
             setup_mptable(&mem, i).unwrap();
 
             let mpf_intel: mpf_intel = mem.read_obj_from_addr(GuestAddress(MPTABLE_START)).unwrap();
-            let mpc_offset = GuestAddress(mpf_intel.physptr as usize);
+            let mpc_offset = GuestAddress(mpf_intel.physptr as u64);
             let mpc_table: mpc_table = mem.read_obj_from_addr(mpc_offset).unwrap();
             let mpc_end = mpc_offset
-                .checked_add(mpc_table.length as usize)
+                .checked_add(mpc_table.length as u64)
                 .unwrap();
 
             let mut entry_offset = mpc_offset
-                .checked_add(mem::size_of::<mpc_table>())
+                .checked_add(mem::size_of::<mpc_table>() as u64)
                 .unwrap();
             let mut cpu_count = 0;
             while entry_offset < mpc_end {
                 let entry_type: u8 = mem.read_obj_from_addr(entry_offset).unwrap();
                 entry_offset = entry_offset
-                    .checked_add(table_entry_size(entry_type))
+                    .checked_add(table_entry_size(entry_type) as u64)
                     .unwrap();
                 assert!(entry_offset <= mpc_end);
                 if entry_type as u32 == MP_PROCESSOR {
diff --git a/x86_64/src/regs.rs b/x86_64/src/regs.rs
index cfe6de3..e364ce4 100644
--- a/x86_64/src/regs.rs
+++ b/x86_64/src/regs.rs
@@ -170,15 +170,15 @@ const X86_CR4_PAE: u64 = 0x20;
 
 const EFER_LME: u64 = 0x100;
 
-const BOOT_GDT_OFFSET: usize = 0x500;
-const BOOT_IDT_OFFSET: usize = 0x520;
+const BOOT_GDT_OFFSET: u64 = 0x500;
+const BOOT_IDT_OFFSET: u64 = 0x520;
 
 const BOOT_GDT_MAX: usize = 4;
 
 fn write_gdt_table(table: &[u64], guest_mem: &GuestMemory) -> Result<()> {
     let boot_gdt_addr = GuestAddress(BOOT_GDT_OFFSET);
     for (index, entry) in table.iter().enumerate() {
-        let addr = guest_mem.checked_offset(boot_gdt_addr, index * mem::size_of::<u64>())
+        let addr = guest_mem.checked_offset(boot_gdt_addr, (index * mem::size_of::<u64>()) as u64)
             .ok_or(Error::WriteGDTFailure)?;
         guest_mem.write_obj_at_addr(*entry, addr)
             .map_err(|_| Error::WriteGDTFailure)?;
@@ -270,7 +270,7 @@ mod tests {
         GuestMemory::new(&vec![(GuestAddress(0), 0x10000)]).unwrap()
     }
 
-    fn read_u64(gm: &GuestMemory, offset: usize) -> u64 {
+    fn read_u64(gm: &GuestMemory, offset: u64) -> u64 {
         let read_addr = GuestAddress(offset);
         gm.read_obj_from_addr(read_addr).unwrap()
     }