summary refs log tree commit diff
diff options
context:
space:
mode:
authorTomasz Jeznach <tjeznach@chromium.org>2020-05-15 14:26:27 -0700
committerCommit Bot <commit-bot@chromium.org>2020-05-26 21:48:15 +0000
commitaa6bdd92da0eb61a285a0721e1eaf1ca9e2b8f70 (patch)
treebf6c962d36e9b1e3073842da1a75966507d55391
parente7d1221c9d5a4e23b6142ef466892ccf38cfde9c (diff)
downloadcrosvm-aa6bdd92da0eb61a285a0721e1eaf1ca9e2b8f70.tar
crosvm-aa6bdd92da0eb61a285a0721e1eaf1ca9e2b8f70.tar.gz
crosvm-aa6bdd92da0eb61a285a0721e1eaf1ca9e2b8f70.tar.bz2
crosvm-aa6bdd92da0eb61a285a0721e1eaf1ca9e2b8f70.tar.lz
crosvm-aa6bdd92da0eb61a285a0721e1eaf1ca9e2b8f70.tar.xz
crosvm-aa6bdd92da0eb61a285a0721e1eaf1ca9e2b8f70.tar.zst
crosvm-aa6bdd92da0eb61a285a0721e1eaf1ca9e2b8f70.zip
resources: allocate_at support for address_allocator
Support for address range allocation at specific
location and size. Allocation is successful only
if requested range is available and free.
Change to address_allocator allocation algorithm
from contiguous memory allocation to first-fit like
implementation to allow memory release/reuse in future.

BUG=None
TEST=cargo test -p resources && tast test vm.*

Change-Id: I58218f5a2c6a215152904cc1cf0748e842fa7374
Reviewed-on: https://chromium-review.googlesource.com/c/chromiumos/platform/crosvm/+/2203297
Tested-by: Tomasz Jeznach <tjeznach@chromium.org>
Commit-Queue: Tomasz Jeznach <tjeznach@chromium.org>
Reviewed-by: Zach Reizner <zachr@chromium.org>
Reviewed-by: Daniel Verkamp <dverkamp@chromium.org>
Reviewed-by: Dylan Reid <dgreid@chromium.org>
-rw-r--r--resources/src/address_allocator.rs134
1 files changed, 109 insertions, 25 deletions
diff --git a/resources/src/address_allocator.rs b/resources/src/address_allocator.rs
index 11978ee..7bf03d7 100644
--- a/resources/src/address_allocator.rs
+++ b/resources/src/address_allocator.rs
@@ -3,7 +3,7 @@
 // found in the LICENSE file.
 
 use std::cmp;
-use std::collections::HashMap;
+use std::collections::{BTreeSet, HashMap};
 
 use crate::{Alloc, Error, Result};
 
@@ -26,11 +26,9 @@ use crate::{Alloc, Error, Result};
 /// ```
 #[derive(Debug, Eq, PartialEq)]
 pub struct AddressAllocator {
-    pool_base: u64,
-    pool_end: u64,
     alignment: u64,
-    next_addr: u64,
     allocs: HashMap<Alloc, (u64, u64, String)>,
+    regions: BTreeSet<(u64, u64)>,
 }
 
 impl AddressAllocator {
@@ -55,12 +53,12 @@ impl AddressAllocator {
         if !alignment.is_power_of_two() || alignment == 0 {
             return Err(Error::BadAlignment);
         }
+        let mut regions = BTreeSet::new();
+        regions.insert((pool_base, pool_end));
         Ok(AddressAllocator {
-            pool_base,
-            pool_end,
             alignment,
-            next_addr: pool_base,
             allocs: HashMap::new(),
+            regions,
         })
     }
 
@@ -85,32 +83,80 @@ impl AddressAllocator {
         if !alignment.is_power_of_two() {
             return Err(Error::BadAlignment);
         }
-        let align_adjust = if self.next_addr % alignment != 0 {
-            alignment - (self.next_addr % alignment)
-        } else {
-            0
-        };
-        let addr = self
-            .next_addr
-            .checked_add(align_adjust)
-            .ok_or(Error::OutOfSpace)?;
-        let end_addr = addr.checked_add(size - 1).ok_or(Error::OutOfSpace)?;
-        if end_addr > self.pool_end {
-            return Err(Error::OutOfSpace);
-        }
 
-        // TODO(dgreid): Use a smarter allocation strategy. The current strategy is just
-        // bumping this pointer, meaning it will eventually exhaust available addresses.
-        self.next_addr = end_addr.saturating_add(1);
+        // finds first region matching alignment and size.
+        match self
+            .regions
+            .iter()
+            .find(|range| {
+                match range.0 % alignment {
+                    0 => range.0.checked_add(size - 1),
+                    r => range.0.checked_add(size - 1 + alignment - r),
+                }
+                .map_or(false, |end| end <= range.1)
+            })
+            .cloned()
+        {
+            Some(slot) => {
+                self.regions.remove(&slot);
+                let start = match slot.0 % alignment {
+                    0 => slot.0,
+                    r => slot.0 + alignment - r,
+                };
+                let end = start + size - 1;
+                if slot.0 < start {
+                    self.regions.insert((slot.0, start - 1));
+                }
+                if slot.1 > end {
+                    self.regions.insert((end + 1, slot.1));
+                }
+                self.allocs.insert(alloc, (start, size, tag));
 
-        self.allocs.insert(alloc, (addr, size, tag));
-        Ok(addr)
+                Ok(start)
+            }
+            None => Err(Error::OutOfSpace),
+        }
     }
 
     pub fn allocate(&mut self, size: u64, alloc: Alloc, tag: String) -> Result<u64> {
         self.allocate_with_align(size, alloc, tag, self.alignment)
     }
 
+    /// Allocates a range of addresses from the managed region with an optional tag
+    /// and required location. Allocation alignment is not enforced.
+    /// Returns OutOfSpace if requested range is not available (e.g. already allocated
+    /// with a different alloc tag).
+    pub fn allocate_at(&mut self, start: u64, size: u64, alloc: Alloc, tag: String) -> Result<()> {
+        if self.allocs.contains_key(&alloc) {
+            return Err(Error::ExistingAlloc(alloc));
+        }
+        if size == 0 {
+            return Err(Error::AllocSizeZero);
+        }
+
+        let end = start.checked_add(size - 1).ok_or(Error::OutOfSpace)?;
+        match self
+            .regions
+            .iter()
+            .find(|range| range.0 <= start && range.1 >= end)
+            .cloned()
+        {
+            Some(slot) => {
+                self.regions.remove(&slot);
+                if slot.0 < start {
+                    self.regions.insert((slot.0, start - 1));
+                }
+                if slot.1 > end {
+                    self.regions.insert((end + 1, slot.1));
+                }
+                self.allocs.insert(alloc, (start, size, tag));
+
+                Ok(())
+            }
+            None => Err(Error::OutOfSpace),
+        }
+    }
+
     /// Returns allocation associated with `alloc`, or None if no such allocation exists.
     pub fn get(&self, alloc: &Alloc) -> Option<&(u64, u64, String)> {
         self.allocs.get(alloc)
@@ -172,6 +218,44 @@ mod tests {
     }
 
     #[test]
+    fn allocate_with_special_alignment() {
+        let mut pool = AddressAllocator::new(0x1000, 0x1000, Some(0x100)).unwrap();
+        assert_eq!(
+            pool.allocate(0x10, Alloc::Anon(0), String::from("bar0")),
+            Ok(0x1000)
+        );
+        assert_eq!(
+            pool.allocate_at(0x1200, 0x100, Alloc::Anon(1), String::from("bar1")),
+            Ok(())
+        );
+        assert_eq!(
+            pool.allocate_with_align(0x800, Alloc::Anon(2), String::from("bar2"), 0x800),
+            Ok(0x1800)
+        );
+    }
+
+    #[test]
+    fn allocate_and_split_allocate_at() {
+        let mut pool = AddressAllocator::new(0x1000, 0x1000, Some(0x100)).unwrap();
+        assert_eq!(
+            pool.allocate_at(0x1200, 0x800, Alloc::Anon(0), String::from("bar0")),
+            Ok(())
+        );
+        assert_eq!(
+            pool.allocate(0x800, Alloc::Anon(1), String::from("bar1")),
+            Err(Error::OutOfSpace)
+        );
+        assert_eq!(
+            pool.allocate(0x600, Alloc::Anon(2), String::from("bar2")),
+            Ok(0x1a00)
+        );
+        assert_eq!(
+            pool.allocate(0x200, Alloc::Anon(3), String::from("bar3")),
+            Ok(0x1000)
+        );
+    }
+
+    #[test]
     fn allocate_alignment() {
         let mut pool = AddressAllocator::new(0x1000, 0x10000, Some(0x100)).unwrap();
         assert_eq!(