summary refs log tree commit diff
path: root/data_model
diff options
context:
space:
mode:
authorZach Reizner <zachr@google.com>2018-06-14 18:55:12 -0700
committerchrome-bot <chrome-bot@chromium.org>2018-07-20 05:30:56 -0700
commitcd2d4fe45e3e7ecc635eff2107f5b875bcd8504b (patch)
treeb5f97ad43dfe2f920b4368a2dede9c5a194c551d /data_model
parent59caefa5ab3d8c72af9307c0c80373172492821f (diff)
downloadcrosvm-cd2d4fe45e3e7ecc635eff2107f5b875bcd8504b.tar
crosvm-cd2d4fe45e3e7ecc635eff2107f5b875bcd8504b.tar.gz
crosvm-cd2d4fe45e3e7ecc635eff2107f5b875bcd8504b.tar.bz2
crosvm-cd2d4fe45e3e7ecc635eff2107f5b875bcd8504b.tar.lz
crosvm-cd2d4fe45e3e7ecc635eff2107f5b875bcd8504b.tar.xz
crosvm-cd2d4fe45e3e7ecc635eff2107f5b875bcd8504b.tar.zst
crosvm-cd2d4fe45e3e7ecc635eff2107f5b875bcd8504b.zip
data_model: add offset, copy_to_volatile_slice, Copy to VolatileSlice
These methods are convenient for safely doing complex copies between
`VolatileSlice`s.

TEST=cargo test -p data_model
BUG=None

Change-Id: I02f446953c24ef5cbb2cebd306344b1e13556bd7
Reviewed-on: https://chromium-review.googlesource.com/1102153
Commit-Ready: Zach Reizner <zachr@chromium.org>
Tested-by: Zach Reizner <zachr@chromium.org>
Reviewed-by: Dylan Reid <dgreid@chromium.org>
Diffstat (limited to 'data_model')
-rw-r--r--data_model/src/volatile_memory.rs63
1 files changed, 60 insertions, 3 deletions
diff --git a/data_model/src/volatile_memory.rs b/data_model/src/volatile_memory.rs
index 98528bc..d448fd1 100644
--- a/data_model/src/volatile_memory.rs
+++ b/data_model/src/volatile_memory.rs
@@ -19,13 +19,16 @@
 //! done concurrently without synchronization. With volatile access we know that the compiler has
 //! not reordered or elided the access.
 
+use std::cmp::min;
+use std::fmt;
 use std::io::Result as IoResult;
 use std::io::{Read, Write};
+use std::{isize, usize};
 use std::marker::PhantomData;
 use std::mem::size_of;
-use std::ptr::{write_volatile, read_volatile};
+use std::ptr::copy;
+use std::ptr::{write_volatile, read_volatile, null_mut};
 use std::result;
-use std::fmt;
 use std::slice::{from_raw_parts, from_raw_parts_mut};
 
 use DataInit;
@@ -114,13 +117,23 @@ impl<'a> VolatileMemory for &'a mut [u8] {
 }
 
 /// A slice of raw memory that supports volatile access.
-#[derive(Debug)]
+#[derive(Copy, Clone, Debug)]
 pub struct VolatileSlice<'a> {
     addr: *mut u8,
     size: u64,
     phantom: PhantomData<&'a u8>,
 }
 
+impl<'a> Default for VolatileSlice<'a> {
+    fn default() -> VolatileSlice<'a> {
+        VolatileSlice {
+            addr: null_mut(),
+            size: 0,
+            phantom: PhantomData,
+        }
+    }
+}
+
 impl<'a> VolatileSlice<'a> {
     /// Creates a slice of raw memory that must support volatile access.
     ///
@@ -146,6 +159,29 @@ impl<'a> VolatileSlice<'a> {
         self.size
     }
 
+    /// Creates a copy of this slice with the address increased by `count` bytes, and the size
+    /// reduced by `count` bytes.
+    pub fn offset(self, count: u64) -> Result<VolatileSlice<'a>> {
+        let new_addr = (self.addr as u64)
+            .checked_add(count)
+            .ok_or(VolatileMemoryError::Overflow {
+                       base: self.addr as u64,
+                       offset: count,
+                   })?;
+        if new_addr > usize::MAX as u64 {
+            return Err(VolatileMemoryError::Overflow {
+                           base: self.addr as u64,
+                           offset: count,
+                       })?;
+        }
+        let new_size = self.size
+            .checked_sub(count)
+            .ok_or(VolatileMemoryError::OutOfBounds { addr: new_addr })?;
+        // Safe because the memory has the same lifetime and points to a subset of the memory of the
+        // original slice.
+        unsafe { Ok(VolatileSlice::new(new_addr as *mut u8, new_size)) }
+    }
+
     /// Copies `self.size()` or `buf.len()` times the size of `T` bytes, whichever is smaller, to
     /// `buf`.
     ///
@@ -181,6 +217,27 @@ impl<'a> VolatileSlice<'a> {
         }
     }
 
+    /// Copies `self.size()` or `slice.size()` bytes, whichever is smaller, to `slice`.
+    ///
+    /// The copies happen in an undefined order.
+    /// # Examples
+    ///
+    /// ```
+    /// # use data_model::VolatileMemory;
+    /// # fn test_write_null() -> Result<(), ()> {
+    /// let mut mem = [0u8; 32];
+    /// let mem_ref = &mut mem[..];
+    /// let vslice = mem_ref.get_slice(0, 32).map_err(|_| ())?;
+    /// vslice.copy_to_volatile_slice(vslice.get_slice(16, 16).map_err(|_| ())?);
+    /// # Ok(())
+    /// # }
+    /// ```
+    pub fn copy_to_volatile_slice(&self, slice: VolatileSlice) {
+        unsafe {
+            copy(self.addr, slice.addr, min(self.size, slice.size) as usize);
+        }
+    }
+
     /// Copies `self.size()` or `buf.len()` times the size of `T` bytes, whichever is smaller, to
     /// this slice's memory.
     ///