summary refs log tree commit diff
path: root/resources
diff options
context:
space:
mode:
authorAlyssa Ross <hi@alyssa.is>2020-06-02 03:03:26 +0000
committerAlyssa Ross <hi@alyssa.is>2020-06-14 11:23:24 +0000
commit28d9682698d287d14cbe67a0ed7acc1427add320 (patch)
tree669ed98d9b1388b553c8e0f0189678cc68dd4162 /resources
parent460406d10bbfaa890d56d616b4610813da63a312 (diff)
parent4264464153a7a788ef73c5015ac8bbde5f8ebe1c (diff)
downloadcrosvm-28d9682698d287d14cbe67a0ed7acc1427add320.tar
crosvm-28d9682698d287d14cbe67a0ed7acc1427add320.tar.gz
crosvm-28d9682698d287d14cbe67a0ed7acc1427add320.tar.bz2
crosvm-28d9682698d287d14cbe67a0ed7acc1427add320.tar.lz
crosvm-28d9682698d287d14cbe67a0ed7acc1427add320.tar.xz
crosvm-28d9682698d287d14cbe67a0ed7acc1427add320.tar.zst
crosvm-28d9682698d287d14cbe67a0ed7acc1427add320.zip
Merge remote-tracking branch 'origin/master'
Diffstat (limited to 'resources')
-rw-r--r--resources/src/address_allocator.rs229
-rw-r--r--resources/src/lib.rs6
2 files changed, 210 insertions, 25 deletions
diff --git a/resources/src/address_allocator.rs b/resources/src/address_allocator.rs
index 11978ee..88f652a 100644
--- a/resources/src/address_allocator.rs
+++ b/resources/src/address_allocator.rs
@@ -3,7 +3,7 @@
 // found in the LICENSE file.
 
 use std::cmp;
-use std::collections::HashMap;
+use std::collections::{BTreeSet, HashMap};
 
 use crate::{Alloc, Error, Result};
 
@@ -26,11 +26,9 @@ use crate::{Alloc, Error, Result};
 /// ```
 #[derive(Debug, Eq, PartialEq)]
 pub struct AddressAllocator {
-    pool_base: u64,
-    pool_end: u64,
     alignment: u64,
-    next_addr: u64,
     allocs: HashMap<Alloc, (u64, u64, String)>,
+    regions: BTreeSet<(u64, u64)>,
 }
 
 impl AddressAllocator {
@@ -55,12 +53,12 @@ impl AddressAllocator {
         if !alignment.is_power_of_two() || alignment == 0 {
             return Err(Error::BadAlignment);
         }
+        let mut regions = BTreeSet::new();
+        regions.insert((pool_base, pool_end));
         Ok(AddressAllocator {
-            pool_base,
-            pool_end,
             alignment,
-            next_addr: pool_base,
             allocs: HashMap::new(),
+            regions,
         })
     }
 
@@ -85,36 +83,138 @@ impl AddressAllocator {
         if !alignment.is_power_of_two() {
             return Err(Error::BadAlignment);
         }
-        let align_adjust = if self.next_addr % alignment != 0 {
-            alignment - (self.next_addr % alignment)
-        } else {
-            0
-        };
-        let addr = self
-            .next_addr
-            .checked_add(align_adjust)
-            .ok_or(Error::OutOfSpace)?;
-        let end_addr = addr.checked_add(size - 1).ok_or(Error::OutOfSpace)?;
-        if end_addr > self.pool_end {
-            return Err(Error::OutOfSpace);
-        }
 
-        // TODO(dgreid): Use a smarter allocation strategy. The current strategy is just
-        // bumping this pointer, meaning it will eventually exhaust available addresses.
-        self.next_addr = end_addr.saturating_add(1);
+        // finds first region matching alignment and size.
+        match self
+            .regions
+            .iter()
+            .find(|range| {
+                match range.0 % alignment {
+                    0 => range.0.checked_add(size - 1),
+                    r => range.0.checked_add(size - 1 + alignment - r),
+                }
+                .map_or(false, |end| end <= range.1)
+            })
+            .cloned()
+        {
+            Some(slot) => {
+                self.regions.remove(&slot);
+                let start = match slot.0 % alignment {
+                    0 => slot.0,
+                    r => slot.0 + alignment - r,
+                };
+                let end = start + size - 1;
+                if slot.0 < start {
+                    self.regions.insert((slot.0, start - 1));
+                }
+                if slot.1 > end {
+                    self.regions.insert((end + 1, slot.1));
+                }
+                self.allocs.insert(alloc, (start, size, tag));
 
-        self.allocs.insert(alloc, (addr, size, tag));
-        Ok(addr)
+                Ok(start)
+            }
+            None => Err(Error::OutOfSpace),
+        }
     }
 
     pub fn allocate(&mut self, size: u64, alloc: Alloc, tag: String) -> Result<u64> {
         self.allocate_with_align(size, alloc, tag, self.alignment)
     }
 
+    /// Allocates a range of addresses from the managed region with an optional tag
+    /// and required location. Allocation alignment is not enforced.
+    /// Returns OutOfSpace if requested range is not available (e.g. already allocated
+    /// with a different alloc tag).
+    pub fn allocate_at(&mut self, start: u64, size: u64, alloc: Alloc, tag: String) -> Result<()> {
+        if self.allocs.contains_key(&alloc) {
+            return Err(Error::ExistingAlloc(alloc));
+        }
+        if size == 0 {
+            return Err(Error::AllocSizeZero);
+        }
+
+        let end = start.checked_add(size - 1).ok_or(Error::OutOfSpace)?;
+        match self
+            .regions
+            .iter()
+            .find(|range| range.0 <= start && range.1 >= end)
+            .cloned()
+        {
+            Some(slot) => {
+                self.regions.remove(&slot);
+                if slot.0 < start {
+                    self.regions.insert((slot.0, start - 1));
+                }
+                if slot.1 > end {
+                    self.regions.insert((end + 1, slot.1));
+                }
+                self.allocs.insert(alloc, (start, size, tag));
+
+                Ok(())
+            }
+            None => Err(Error::OutOfSpace),
+        }
+    }
+
+    /// Releases exising allocation back to free pool.
+    pub fn release(&mut self, alloc: Alloc) -> Result<()> {
+        self.allocs
+            .remove(&alloc)
+            .map_or_else(|| Err(Error::BadAlloc(alloc)), |v| self.insert_at(v.0, v.1))
+    }
+
     /// Returns allocation associated with `alloc`, or None if no such allocation exists.
     pub fn get(&self, alloc: &Alloc) -> Option<&(u64, u64, String)> {
         self.allocs.get(alloc)
     }
+
+    /// Insert range of addresses into the pool, coalescing neighboring regions.
+    fn insert_at(&mut self, start: u64, size: u64) -> Result<()> {
+        if size == 0 {
+            return Err(Error::AllocSizeZero);
+        }
+
+        let mut slot = (start, start.checked_add(size - 1).ok_or(Error::OutOfSpace)?);
+        let mut left = None;
+        let mut right = None;
+        // simple coalescing with linear search over free regions.
+        //
+        // Calculating the distance between start and end of two regions we can
+        // detect if they are disjoint (>1), adjacent (=1) or possibly
+        // overlapping (<1). Saturating arithmetic is used to avoid overflow.
+        // Overlapping regions are detected if both oposite ends are overlapping.
+        // Algorithm assumes all existing regions are disjoined and represented
+        // as pair of inclusive location point (start, end), where end >= start.
+        for range in self.regions.iter() {
+            match (
+                slot.0.saturating_sub(range.1),
+                range.0.saturating_sub(slot.1),
+            ) {
+                (1, 0) => {
+                    left = Some(*range);
+                }
+                (0, 1) => {
+                    right = Some(*range);
+                }
+                (0, 0) => {
+                    return Err(Error::RegionOverlap { base: start, size });
+                }
+                (_, _) => (),
+            }
+        }
+        if let Some(left) = left {
+            self.regions.remove(&left);
+            slot.0 = left.0;
+        }
+        if let Some(right) = right {
+            self.regions.remove(&right);
+            slot.1 = right.1;
+        }
+        self.regions.insert(slot);
+
+        Ok(())
+    }
 }
 
 #[cfg(test)]
@@ -172,6 +272,44 @@ mod tests {
     }
 
     #[test]
+    fn allocate_with_special_alignment() {
+        let mut pool = AddressAllocator::new(0x1000, 0x1000, Some(0x100)).unwrap();
+        assert_eq!(
+            pool.allocate(0x10, Alloc::Anon(0), String::from("bar0")),
+            Ok(0x1000)
+        );
+        assert_eq!(
+            pool.allocate_at(0x1200, 0x100, Alloc::Anon(1), String::from("bar1")),
+            Ok(())
+        );
+        assert_eq!(
+            pool.allocate_with_align(0x800, Alloc::Anon(2), String::from("bar2"), 0x800),
+            Ok(0x1800)
+        );
+    }
+
+    #[test]
+    fn allocate_and_split_allocate_at() {
+        let mut pool = AddressAllocator::new(0x1000, 0x1000, Some(0x100)).unwrap();
+        assert_eq!(
+            pool.allocate_at(0x1200, 0x800, Alloc::Anon(0), String::from("bar0")),
+            Ok(())
+        );
+        assert_eq!(
+            pool.allocate(0x800, Alloc::Anon(1), String::from("bar1")),
+            Err(Error::OutOfSpace)
+        );
+        assert_eq!(
+            pool.allocate(0x600, Alloc::Anon(2), String::from("bar2")),
+            Ok(0x1a00)
+        );
+        assert_eq!(
+            pool.allocate(0x200, Alloc::Anon(3), String::from("bar3")),
+            Ok(0x1000)
+        );
+    }
+
+    #[test]
     fn allocate_alignment() {
         let mut pool = AddressAllocator::new(0x1000, 0x10000, Some(0x100)).unwrap();
         assert_eq!(
@@ -243,4 +381,45 @@ mod tests {
             .allocate_with_align(0x110, Alloc::Anon(0), String::from("bar0"), 200)
             .is_err());
     }
+
+    #[test]
+    fn allocate_with_release() {
+        let mut pool = AddressAllocator::new(0x1000, 0x1000, None).unwrap();
+        assert_eq!(
+            pool.allocate_with_align(0x100, Alloc::Anon(0), String::from("bar0"), 0x100),
+            Ok(0x1000)
+        );
+        assert!(pool.release(Alloc::Anon(0)).is_ok());
+        assert_eq!(
+            pool.allocate_with_align(0x1000, Alloc::Anon(0), String::from("bar0"), 0x100),
+            Ok(0x1000)
+        );
+    }
+
+    #[test]
+    fn coalescing_and_overlap() {
+        let mut pool = AddressAllocator::new(0x1000, 0x1000, None).unwrap();
+        assert!(pool.insert_at(0x3000, 0x1000).is_ok());
+        assert!(pool.insert_at(0x1fff, 0x20).is_err());
+        assert!(pool.insert_at(0x2ff1, 0x10).is_err());
+        assert!(pool.insert_at(0x1800, 0x1000).is_err());
+        assert!(pool.insert_at(0x2000, 0x1000).is_ok());
+        assert_eq!(
+            pool.allocate(0x3000, Alloc::Anon(0), String::from("bar0")),
+            Ok(0x1000)
+        );
+    }
+
+    #[test]
+    fn coalescing_single_addresses() {
+        let mut pool = AddressAllocator::new(0x1000, 0x1000, None).unwrap();
+        assert!(pool.insert_at(0x2001, 1).is_ok());
+        assert!(pool.insert_at(0x2003, 1).is_ok());
+        assert!(pool.insert_at(0x2000, 1).is_ok());
+        assert!(pool.insert_at(0x2002, 1).is_ok());
+        assert_eq!(
+            pool.allocate(0x1004, Alloc::Anon(0), String::from("bar0")),
+            Ok(0x1000)
+        );
+    }
 }
diff --git a/resources/src/lib.rs b/resources/src/lib.rs
index cf36cc1..6195e91 100644
--- a/resources/src/lib.rs
+++ b/resources/src/lib.rs
@@ -46,6 +46,8 @@ pub enum Error {
     OutOfSpace,
     PoolOverflow { base: u64, size: u64 },
     PoolSizeZero,
+    RegionOverlap { base: u64, size: u64 },
+    BadAlloc(Alloc),
 }
 
 pub type Result<T> = std::result::Result<T, Error>;
@@ -64,6 +66,10 @@ impl Display for Error {
             OutOfSpace => write!(f, "Out of space"),
             PoolOverflow { base, size } => write!(f, "base={} + size={} overflows", base, size),
             PoolSizeZero => write!(f, "Pool cannot have size of 0"),
+            RegionOverlap { base, size } => {
+                write!(f, "Overlapping region base={} size={}", base, size)
+            }
+            BadAlloc(tag) => write!(f, "Alloc does not exists: {:?}", tag),
         }
     }
 }