summary refs log tree commit diff
diff options
context:
space:
mode:
authorXiong Zhang <xiong.y.zhang@intel.corp-partner.google.com>2019-10-30 14:59:26 +0800
committerCommit Bot <commit-bot@chromium.org>2019-11-10 06:39:34 +0000
commit383b3b520a76a921be17a12640b24fba1419dbbb (patch)
tree1aea1553c2534aab73b265929cf462c0d8999a67
parent87a3b44d9e6caa82ab521a5dd9bafdb56e8b6ed1 (diff)
downloadcrosvm-383b3b520a76a921be17a12640b24fba1419dbbb.tar
crosvm-383b3b520a76a921be17a12640b24fba1419dbbb.tar.gz
crosvm-383b3b520a76a921be17a12640b24fba1419dbbb.tar.bz2
crosvm-383b3b520a76a921be17a12640b24fba1419dbbb.tar.lz
crosvm-383b3b520a76a921be17a12640b24fba1419dbbb.tar.xz
crosvm-383b3b520a76a921be17a12640b24fba1419dbbb.tar.zst
crosvm-383b3b520a76a921be17a12640b24fba1419dbbb.zip
resource: Rename device memory to mmio
Since unified allocator is used to allocate mmio, this patch remove the
device memory name, and rename device to mmio.

BUG=chromium:992270
TEST=this patch doesn't change function, run build_test

Change-Id: I234b0db4b3c5de8cfee372ace5212a980564d0c7
Signed-off-by: Xiong Zhang <xiong.y.zhang@intel.corp-partner.google.com>
Reviewed-on: https://chromium-review.googlesource.com/c/chromiumos/platform/crosvm/+/1895234
Tested-by: kokoro <noreply+kokoro@google.com>
Reviewed-by: Daniel Verkamp <dverkamp@chromium.org>
-rw-r--r--aarch64/src/lib.rs10
-rw-r--r--devices/src/pci/ac97.rs8
-rw-r--r--devices/src/pci/vfio_pci.rs2
-rw-r--r--devices/src/usb/xhci/xhci_controller.rs2
-rw-r--r--devices/src/virtio/virtio_pci_device.rs4
-rw-r--r--kvm/src/lib.rs44
-rw-r--r--kvm/tests/dirty_log.rs2
-rw-r--r--kvm/tests/read_only_memory.rs4
-rw-r--r--resources/src/lib.rs8
-rw-r--r--resources/src/system_allocator.rs82
-rw-r--r--src/linux.rs10
-rw-r--r--src/plugin/mod.rs2
-rw-r--r--src/plugin/process.rs2
-rw-r--r--vm_control/src/lib.rs8
-rw-r--r--x86_64/src/lib.rs12
15 files changed, 101 insertions, 99 deletions
diff --git a/aarch64/src/lib.rs b/aarch64/src/lib.rs
index a35963b..5beb473 100644
--- a/aarch64/src/lib.rs
+++ b/aarch64/src/lib.rs
@@ -341,7 +341,7 @@ impl AArch64 {
             }
             None => None,
         };
-        let (pci_device_base, pci_device_size) = Self::get_device_addr_base_size(mem_size);
+        let (pci_device_base, pci_device_size) = Self::get_high_mmio_base_size(mem_size);
         fdt::create_fdt(
             AARCH64_FDT_MAX_SIZE as usize,
             mem,
@@ -364,7 +364,7 @@ impl AArch64 {
         Ok(mem)
     }
 
-    fn get_device_addr_base_size(mem_size: u64) -> (u64, u64) {
+    fn get_high_mmio_base_size(mem_size: u64) -> (u64, u64) {
         let base = AARCH64_PHYS_MEM_START + mem_size;
         let size = u64::max_value() - base;
         (base, size)
@@ -383,10 +383,10 @@ impl AArch64 {
 
     /// Returns a system resource allocator.
     fn get_resource_allocator(mem_size: u64, gpu_allocation: bool) -> SystemAllocator {
-        let (device_addr_base, device_addr_size) = Self::get_device_addr_base_size(mem_size);
+        let (high_mmio_base, high_mmio_size) = Self::get_high_mmio_base_size(mem_size);
         SystemAllocator::builder()
-            .add_device_addresses(device_addr_base, device_addr_size)
-            .add_mmio_addresses(AARCH64_MMIO_BASE, AARCH64_MMIO_SIZE)
+            .add_high_mmio_addresses(high_mmio_base, high_mmio_size)
+            .add_low_mmio_addresses(AARCH64_MMIO_BASE, AARCH64_MMIO_SIZE)
             .create_allocator(AARCH64_IRQ_BASE, gpu_allocation)
             .unwrap()
     }
diff --git a/devices/src/pci/ac97.rs b/devices/src/pci/ac97.rs
index 73a5093..9a3c7e5 100644
--- a/devices/src/pci/ac97.rs
+++ b/devices/src/pci/ac97.rs
@@ -149,7 +149,7 @@ impl PciDevice for Ac97Dev {
             .expect("assign_bus_dev must be called prior to allocate_io_bars");
         let mut ranges = Vec::new();
         let mixer_regs_addr = resources
-            .mmio_allocator(MmioType::Mmio)
+            .mmio_allocator(MmioType::Low)
             .allocate_with_align(
                 MIXER_REGS_SIZE,
                 Alloc::PciBar { bus, dev, bar: 0 },
@@ -167,7 +167,7 @@ impl PciDevice for Ac97Dev {
         ranges.push((mixer_regs_addr, MIXER_REGS_SIZE));
 
         let master_regs_addr = resources
-            .mmio_allocator(MmioType::Mmio)
+            .mmio_allocator(MmioType::Low)
             .allocate_with_align(
                 MASTER_REGS_SIZE,
                 Alloc::PciBar { bus, dev, bar: 1 },
@@ -245,8 +245,8 @@ mod tests {
         let mut ac97_dev = Ac97Dev::new(mem, Box::new(DummyStreamSource::new()));
         let mut allocator = SystemAllocator::builder()
             .add_io_addresses(0x1000_0000, 0x1000_0000)
-            .add_mmio_addresses(0x2000_0000, 0x1000_0000)
-            .add_device_addresses(0x3000_0000, 0x1000_0000)
+            .add_low_mmio_addresses(0x2000_0000, 0x1000_0000)
+            .add_high_mmio_addresses(0x3000_0000, 0x1000_0000)
             .create_allocator(5, false)
             .unwrap();
         ac97_dev.assign_bus_dev(0, 0);
diff --git a/devices/src/pci/vfio_pci.rs b/devices/src/pci/vfio_pci.rs
index c10715d..2243990 100644
--- a/devices/src/pci/vfio_pci.rs
+++ b/devices/src/pci/vfio_pci.rs
@@ -502,7 +502,7 @@ impl PciDevice for VfioPciDevice {
                 size |= u64::from(low);
                 size = !size + 1;
                 let bar_addr = resources
-                    .mmio_allocator(MmioType::Mmio)
+                    .mmio_allocator(MmioType::Low)
                     .allocate_with_align(
                         size,
                         Alloc::PciBar {
diff --git a/devices/src/usb/xhci/xhci_controller.rs b/devices/src/usb/xhci/xhci_controller.rs
index dfba4a4..db06b6e 100644
--- a/devices/src/usb/xhci/xhci_controller.rs
+++ b/devices/src/usb/xhci/xhci_controller.rs
@@ -213,7 +213,7 @@ impl PciDevice for XhciController {
             .expect("assign_bus_dev must be called prior to allocate_io_bars");
         // xHCI spec 5.2.1.
         let bar0_addr = resources
-            .mmio_allocator(MmioType::Mmio)
+            .mmio_allocator(MmioType::Low)
             .allocate_with_align(
                 XHCI_BAR0_SIZE,
                 Alloc::PciBar { bus, dev, bar: 0 },
diff --git a/devices/src/virtio/virtio_pci_device.rs b/devices/src/virtio/virtio_pci_device.rs
index bd94fe3..cbe56a2 100644
--- a/devices/src/virtio/virtio_pci_device.rs
+++ b/devices/src/virtio/virtio_pci_device.rs
@@ -384,7 +384,7 @@ impl PciDevice for VirtioPciDevice {
         // Allocate one bar for the structures pointed to by the capability structures.
         let mut ranges = Vec::new();
         let settings_config_addr = resources
-            .mmio_allocator(MmioType::Mmio)
+            .mmio_allocator(MmioType::Low)
             .allocate_with_align(
                 CAPABILITY_BAR_SIZE,
                 Alloc::PciBar { bus, dev, bar: 0 },
@@ -422,7 +422,7 @@ impl PciDevice for VirtioPciDevice {
         let mut ranges = Vec::new();
         for config in self.device.get_device_bars(bus, dev) {
             let device_addr = resources
-                .mmio_allocator(MmioType::Device)
+                .mmio_allocator(MmioType::High)
                 .allocate_with_align(
                     config.get_size(),
                     Alloc::PciBar {
diff --git a/kvm/src/lib.rs b/kvm/src/lib.rs
index d742780..8d3c61f 100644
--- a/kvm/src/lib.rs
+++ b/kvm/src/lib.rs
@@ -304,7 +304,7 @@ impl PartialOrd for MemSlot {
 pub struct Vm {
     vm: File,
     guest_mem: GuestMemory,
-    device_memory: HashMap<u32, MemoryMapping>,
+    mmio_memory: HashMap<u32, MemoryMapping>,
     mmap_arenas: HashMap<u32, MemoryMappingArena>,
     mem_slot_gaps: BinaryHeap<MemSlot>,
     #[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
@@ -338,7 +338,7 @@ impl Vm {
             Ok(Vm {
                 vm: vm_file,
                 guest_mem,
-                device_memory: HashMap::new(),
+                mmio_memory: HashMap::new(),
                 mmap_arenas: HashMap::new(),
                 mem_slot_gaps: BinaryHeap::new(),
                 #[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
@@ -361,7 +361,7 @@ impl Vm {
         let slot = match self.mem_slot_gaps.pop() {
             Some(gap) => gap.0,
             None => {
-                (self.device_memory.len()
+                (self.mmio_memory.len()
                     + self.guest_mem.num_regions() as usize
                     + self.mmap_arenas.len()) as u32
             }
@@ -405,8 +405,8 @@ impl Vm {
 
     /// Inserts the given `MemoryMapping` into the VM's address space at `guest_addr`.
     ///
-    /// The slot that was assigned the device memory mapping is returned on success. The slot can be
-    /// given to `Vm::remove_device_memory` to remove the memory from the VM's address space and
+    /// The slot that was assigned the mmio memory mapping is returned on success. The slot can be
+    /// given to `Vm::remove_mmio_memory` to remove the memory from the VM's address space and
     /// take back ownership of `mem`.
     ///
     /// Note that memory inserted into the VM's address space must not overlap with any other memory
@@ -417,7 +417,7 @@ impl Vm {
     ///
     /// If `log_dirty_pages` is true, the slot number can be used to retrieve the pages written to
     /// by the guest with `get_dirty_log`.
-    pub fn add_device_memory(
+    pub fn add_mmio_memory(
         &mut self,
         guest_addr: GuestAddress,
         mem: MemoryMapping,
@@ -441,22 +441,22 @@ impl Vm {
                 mem.as_ptr(),
             )?
         };
-        self.device_memory.insert(slot, mem);
+        self.mmio_memory.insert(slot, mem);
 
         Ok(slot)
     }
 
-    /// Removes device memory that was previously added at the given slot.
+    /// Removes mmio memory that was previously added at the given slot.
     ///
     /// Ownership of the host memory mapping associated with the given slot is returned on success.
-    pub fn remove_device_memory(&mut self, slot: u32) -> Result<MemoryMapping> {
-        if self.device_memory.contains_key(&slot) {
-            // Safe because the slot is checked against the list of device memory slots.
+    pub fn remove_mmio_memory(&mut self, slot: u32) -> Result<MemoryMapping> {
+        if self.mmio_memory.contains_key(&slot) {
+            // Safe because the slot is checked against the list of mmio memory slots.
             unsafe {
                 self.remove_user_memory_region(slot)?;
             }
             // Safe to unwrap since map is checked to contain key
-            Ok(self.device_memory.remove(&slot).unwrap())
+            Ok(self.mmio_memory.remove(&slot).unwrap())
         } else {
             Err(Error::new(ENOENT))
         }
@@ -464,7 +464,7 @@ impl Vm {
 
     /// Inserts the given `MemoryMappingArena` into the VM's address space at `guest_addr`.
     ///
-    /// The slot that was assigned the device memory mapping is returned on success. The slot can be
+    /// The slot that was assigned the mmio memory mapping is returned on success. The slot can be
     /// given to `Vm::remove_mmap_arena` to remove the memory from the VM's address space and
     /// take back ownership of `mmap_arena`.
     ///
@@ -510,7 +510,7 @@ impl Vm {
     /// Ownership of the host memory mapping associated with the given slot is returned on success.
     pub fn remove_mmap_arena(&mut self, slot: u32) -> Result<MemoryMappingArena> {
         if self.mmap_arenas.contains_key(&slot) {
-            // Safe because the slot is checked against the list of device memory slots.
+            // Safe because the slot is checked against the list of mmio memory slots.
             unsafe {
                 self.remove_user_memory_region(slot)?;
             }
@@ -533,7 +533,7 @@ impl Vm {
     /// region `slot` represents. For example, if the size of `slot` is 16 pages, `dirty_log` must
     /// be 2 bytes or greater.
     pub fn get_dirty_log(&self, slot: u32, dirty_log: &mut [u8]) -> Result<()> {
-        match self.device_memory.get(&slot) {
+        match self.mmio_memory.get(&slot) {
             Some(mmap) => {
                 // Ensures that there are as many bytes in dirty_log as there are pages in the mmap.
                 if dirty_log_bitmap_size(mmap.size()) > dirty_log.len() {
@@ -560,7 +560,7 @@ impl Vm {
 
     /// Gets a reference to the guest memory owned by this VM.
     ///
-    /// Note that `GuestMemory` does not include any device memory that may have been added after
+    /// Note that `GuestMemory` does not include any mmio memory that may have been added after
     /// this VM was constructed.
     pub fn get_memory(&self) -> &GuestMemory {
         &self.guest_mem
@@ -1927,7 +1927,7 @@ mod tests {
         let mut vm = Vm::new(&kvm, gm).unwrap();
         let mem_size = 0x1000;
         let mem = MemoryMapping::new(mem_size).unwrap();
-        vm.add_device_memory(GuestAddress(0x1000), mem, false, false)
+        vm.add_mmio_memory(GuestAddress(0x1000), mem, false, false)
             .unwrap();
     }
 
@@ -1938,7 +1938,7 @@ mod tests {
         let mut vm = Vm::new(&kvm, gm).unwrap();
         let mem_size = 0x1000;
         let mem = MemoryMapping::new(mem_size).unwrap();
-        vm.add_device_memory(GuestAddress(0x1000), mem, true, false)
+        vm.add_mmio_memory(GuestAddress(0x1000), mem, true, false)
             .unwrap();
     }
 
@@ -1951,9 +1951,9 @@ mod tests {
         let mem = MemoryMapping::new(mem_size).unwrap();
         let mem_ptr = mem.as_ptr();
         let slot = vm
-            .add_device_memory(GuestAddress(0x1000), mem, false, false)
+            .add_mmio_memory(GuestAddress(0x1000), mem, false, false)
             .unwrap();
-        let mem = vm.remove_device_memory(slot).unwrap();
+        let mem = vm.remove_mmio_memory(slot).unwrap();
         assert_eq!(mem.size(), mem_size);
         assert_eq!(mem.as_ptr(), mem_ptr);
     }
@@ -1963,7 +1963,7 @@ mod tests {
         let kvm = Kvm::new().unwrap();
         let gm = GuestMemory::new(&vec![(GuestAddress(0), 0x1000)]).unwrap();
         let mut vm = Vm::new(&kvm, gm).unwrap();
-        assert!(vm.remove_device_memory(0).is_err());
+        assert!(vm.remove_mmio_memory(0).is_err());
     }
 
     #[test]
@@ -1974,7 +1974,7 @@ mod tests {
         let mem_size = 0x2000;
         let mem = MemoryMapping::new(mem_size).unwrap();
         assert!(vm
-            .add_device_memory(GuestAddress(0x2000), mem, false, false)
+            .add_mmio_memory(GuestAddress(0x2000), mem, false, false)
             .is_err());
     }
 
diff --git a/kvm/tests/dirty_log.rs b/kvm/tests/dirty_log.rs
index 8d8e56d..527af46 100644
--- a/kvm/tests/dirty_log.rs
+++ b/kvm/tests/dirty_log.rs
@@ -43,7 +43,7 @@ fn test_run() {
     vcpu_regs.rbx = 0x12;
     vcpu.set_regs(&vcpu_regs).expect("set regs failed");
     let slot = vm
-        .add_device_memory(
+        .add_mmio_memory(
             GuestAddress(0),
             MemoryMapping::from_fd(&mem, mem_size as usize)
                 .expect("failed to create memory mapping"),
diff --git a/kvm/tests/read_only_memory.rs b/kvm/tests/read_only_memory.rs
index 54f0acd..5aa7bcc 100644
--- a/kvm/tests/read_only_memory.rs
+++ b/kvm/tests/read_only_memory.rs
@@ -45,7 +45,7 @@ fn test_run() {
     vcpu_regs.rax = 0x66;
     vcpu_regs.rbx = 0;
     vcpu.set_regs(&vcpu_regs).expect("set regs failed");
-    vm.add_device_memory(
+    vm.add_mmio_memory(
         GuestAddress(0),
         MemoryMapping::from_fd(&mem, mem_size as usize).expect("failed to create memory mapping"),
         false,
@@ -63,7 +63,7 @@ fn test_run() {
     mmap_ro
         .write_obj(vcpu_regs.rax as u8, 0)
         .expect("failed writing data to ro memory");
-    vm.add_device_memory(
+    vm.add_mmio_memory(
         GuestAddress(vcpu_sregs.es.base),
         MemoryMapping::from_fd(&mem_ro, 0x1000).expect("failed to create memory mapping"),
         true,
diff --git a/resources/src/lib.rs b/resources/src/lib.rs
index 27f6abc..0e16786 100644
--- a/resources/src/lib.rs
+++ b/resources/src/lib.rs
@@ -44,8 +44,8 @@ pub enum Error {
     BadAlignment,
     CreateGpuAllocator(GpuAllocatorError),
     ExistingAlloc(Alloc),
-    MissingDeviceAddresses,
-    MissingMMIOAddresses,
+    MissingHighMMIOAddresses,
+    MissingLowMMIOAddresses,
     NoIoAllocator,
     OutOfSpace,
     PoolOverflow { base: u64, size: u64 },
@@ -62,8 +62,8 @@ impl Display for Error {
             BadAlignment => write!(f, "Pool alignment must be a power of 2"),
             CreateGpuAllocator(e) => write!(f, "Failed to create GPU allocator: {:?}", e),
             ExistingAlloc(tag) => write!(f, "Alloc already exists: {:?}", tag),
-            MissingDeviceAddresses => write!(f, "Device address range not specified"),
-            MissingMMIOAddresses => write!(f, "MMIO address range not specified"),
+            MissingHighMMIOAddresses => write!(f, "High MMIO address range not specified"),
+            MissingLowMMIOAddresses => write!(f, "Low MMIO address range not specified"),
             NoIoAllocator => write!(f, "No IO address range specified"),
             OutOfSpace => write!(f, "Out of space"),
             PoolOverflow { base, size } => write!(f, "base={} + size={} overflows", base, size),
diff --git a/resources/src/system_allocator.rs b/resources/src/system_allocator.rs
index b3a3062..984bc51 100644
--- a/resources/src/system_allocator.rs
+++ b/resources/src/system_allocator.rs
@@ -16,13 +16,13 @@ use crate::{Alloc, Error, Result};
 /// # use resources::{Alloc, MmioType, SystemAllocator};
 ///   if let Ok(mut a) = SystemAllocator::builder()
 ///           .add_io_addresses(0x1000, 0x10000)
-///           .add_device_addresses(0x10000000, 0x10000000)
-///           .add_mmio_addresses(0x30000000, 0x10000)
+///           .add_high_mmio_addresses(0x10000000, 0x10000000)
+///           .add_low_mmio_addresses(0x30000000, 0x10000)
 ///           .create_allocator(5, false) {
 ///       assert_eq!(a.allocate_irq(), Some(5));
 ///       assert_eq!(a.allocate_irq(), Some(6));
 ///       assert_eq!(
-///           a.mmio_allocator(MmioType::Device)
+///           a.mmio_allocator(MmioType::High)
 ///              .allocate(
 ///                  0x100,
 ///                  Alloc::PciBar { bus: 0, dev: 0, bar: 0 },
@@ -31,25 +31,25 @@ use crate::{Alloc, Error, Result};
 ///           Ok(0x10000000)
 ///       );
 ///       assert_eq!(
-///           a.mmio_allocator(MmioType::Device).get(&Alloc::PciBar { bus: 0, dev: 0, bar: 0 }),
+///           a.mmio_allocator(MmioType::High).get(&Alloc::PciBar { bus: 0, dev: 0, bar: 0 }),
 ///           Some(&(0x10000000, 0x100, "bar0".to_string()))
 ///       );
 ///   }
 /// ```
 
 /// MMIO address Type
-///    Mmio: address allocated from mmio_address_space
-///    Device: address allocated from device_address_space
+///    Low: address allocated from low_address_space
+///    High: address allocated from high_address_space
 pub enum MmioType {
-    Mmio,
-    Device,
+    Low,
+    High,
 }
 
 #[derive(Debug)]
 pub struct SystemAllocator {
     io_address_space: Option<AddressAllocator>,
-    device_address_space: AddressAllocator,
-    mmio_address_space: AddressAllocator,
+    high_mmio_address_space: AddressAllocator,
+    low_mmio_address_space: AddressAllocator,
     gpu_allocator: Option<Box<dyn GpuMemoryAllocator>>,
     next_irq: u32,
     next_anon_id: usize,
@@ -62,19 +62,19 @@ impl SystemAllocator {
     ///
     /// * `io_base` - The starting address of IO memory.
     /// * `io_size` - The size of IO memory.
-    /// * `dev_base` - The starting address of device memory.
-    /// * `dev_size` - The size of device memory.
-    /// * `mmio_base` - The starting address of MMIO space.
-    /// * `mmio_size` - The size of MMIO space.
+    /// * `high_base` - The starting address of high MMIO space.
+    /// * `high_size` - The size of high MMIO space.
+    /// * `low_base` - The starting address of low MMIO space.
+    /// * `low_size` - The size of low MMIO space.
     /// * `create_gpu_allocator` - If true, enable gpu memory allocation.
     /// * `first_irq` - The first irq number to give out.
     fn new(
         io_base: Option<u64>,
         io_size: Option<u64>,
-        dev_base: u64,
-        dev_size: u64,
-        mmio_base: u64,
-        mmio_size: u64,
+        high_base: u64,
+        high_size: u64,
+        low_base: u64,
+        low_size: u64,
         create_gpu_allocator: bool,
         first_irq: u32,
     ) -> Result<Self> {
@@ -85,8 +85,8 @@ impl SystemAllocator {
             } else {
                 None
             },
-            device_address_space: AddressAllocator::new(dev_base, dev_size, Some(page_size))?,
-            mmio_address_space: AddressAllocator::new(mmio_base, mmio_size, Some(page_size))?,
+            high_mmio_address_space: AddressAllocator::new(high_base, high_size, Some(page_size))?,
+            low_mmio_address_space: AddressAllocator::new(low_base, low_size, Some(page_size))?,
             gpu_allocator: if create_gpu_allocator {
                 gpu_allocator::create_gpu_memory_allocator().map_err(Error::CreateGpuAllocator)?
             } else {
@@ -118,10 +118,12 @@ impl SystemAllocator {
     }
 
     /// Gets an allocator to be used for MMIO allocation.
+    ///    MmioType::Low: low mmio allocator
+    ///    MmioType::High: high mmio allocator
     pub fn mmio_allocator(&mut self, mmio_type: MmioType) -> &mut AddressAllocator {
         match mmio_type {
-            MmioType::Device => &mut self.device_address_space,
-            MmioType::Mmio => &mut self.mmio_address_space,
+            MmioType::Low => &mut self.low_mmio_address_space,
+            MmioType::High => &mut self.high_mmio_address_space,
         }
     }
 
@@ -141,10 +143,10 @@ impl SystemAllocator {
 pub struct SystemAllocatorBuilder {
     io_base: Option<u64>,
     io_size: Option<u64>,
-    mmio_base: Option<u64>,
-    mmio_size: Option<u64>,
-    device_base: Option<u64>,
-    device_size: Option<u64>,
+    low_mmio_base: Option<u64>,
+    low_mmio_size: Option<u64>,
+    high_mmio_base: Option<u64>,
+    high_mmio_size: Option<u64>,
 }
 
 impl SystemAllocatorBuilder {
@@ -152,10 +154,10 @@ impl SystemAllocatorBuilder {
         SystemAllocatorBuilder {
             io_base: None,
             io_size: None,
-            mmio_base: None,
-            mmio_size: None,
-            device_base: None,
-            device_size: None,
+            low_mmio_base: None,
+            low_mmio_size: None,
+            high_mmio_base: None,
+            high_mmio_size: None,
         }
     }
 
@@ -165,15 +167,15 @@ impl SystemAllocatorBuilder {
         self
     }
 
-    pub fn add_mmio_addresses(mut self, base: u64, size: u64) -> Self {
-        self.mmio_base = Some(base);
-        self.mmio_size = Some(size);
+    pub fn add_low_mmio_addresses(mut self, base: u64, size: u64) -> Self {
+        self.low_mmio_base = Some(base);
+        self.low_mmio_size = Some(size);
         self
     }
 
-    pub fn add_device_addresses(mut self, base: u64, size: u64) -> Self {
-        self.device_base = Some(base);
-        self.device_size = Some(size);
+    pub fn add_high_mmio_addresses(mut self, base: u64, size: u64) -> Self {
+        self.high_mmio_base = Some(base);
+        self.high_mmio_size = Some(size);
         self
     }
 
@@ -185,10 +187,10 @@ impl SystemAllocatorBuilder {
         SystemAllocator::new(
             self.io_base,
             self.io_size,
-            self.device_base.ok_or(Error::MissingDeviceAddresses)?,
-            self.device_size.ok_or(Error::MissingDeviceAddresses)?,
-            self.mmio_base.ok_or(Error::MissingMMIOAddresses)?,
-            self.mmio_size.ok_or(Error::MissingMMIOAddresses)?,
+            self.high_mmio_base.ok_or(Error::MissingHighMMIOAddresses)?,
+            self.high_mmio_size.ok_or(Error::MissingHighMMIOAddresses)?,
+            self.low_mmio_base.ok_or(Error::MissingLowMMIOAddresses)?,
+            self.low_mmio_size.ok_or(Error::MissingLowMMIOAddresses)?,
             gpu_allocation,
             first_irq,
         )
diff --git a/src/linux.rs b/src/linux.rs
index 2816d55..24ef5ec 100644
--- a/src/linux.rs
+++ b/src/linux.rs
@@ -784,7 +784,7 @@ fn create_pmem_device(
     };
 
     let mapping_address = resources
-        .mmio_allocator(MmioType::Device)
+        .mmio_allocator(MmioType::High)
         .allocate_with_align(
             image_size,
             Alloc::PmemDevice(index),
@@ -794,7 +794,7 @@ fn create_pmem_device(
         )
         .map_err(Error::AllocatePmemDeviceAddress)?;
 
-    vm.add_device_memory(
+    vm.add_mmio_memory(
         GuestAddress(mapping_address),
         memory_mapping,
         /* read_only = */ disk.read_only,
@@ -1420,11 +1420,11 @@ pub fn run_config(cfg: Config) -> Result<()> {
             MemoryMapping::new_protection(RENDER_NODE_HOST_SIZE as usize, Protection::none())
                 .map_err(Error::ReserveGpuMemory)?;
 
-        // Put the non-accessible memory map into device memory so that no other devices use that
+        // Put the non-accessible memory map into high mmio so that no other devices use that
         // guest address space.
         let gpu_addr = linux
             .resources
-            .mmio_allocator(MmioType::Device)
+            .mmio_allocator(MmioType::High)
             .allocate(
                 RENDER_NODE_HOST_SIZE,
                 Alloc::GpuRenderNode,
@@ -1437,7 +1437,7 @@ pub fn run_config(cfg: Config) -> Result<()> {
         // Makes the gpu memory accessible at allocated address.
         linux
             .vm
-            .add_device_memory(
+            .add_mmio_memory(
                 GuestAddress(gpu_addr),
                 gpu_mmap,
                 /* read_only = */ false,
diff --git a/src/plugin/mod.rs b/src/plugin/mod.rs
index 012eec1..f0d6932 100644
--- a/src/plugin/mod.rs
+++ b/src/plugin/mod.rs
@@ -361,7 +361,7 @@ impl PluginObject {
                 8 => vm.unregister_ioevent(&evt, addr, Datamatch::U64(Some(datamatch as u64))),
                 _ => Err(SysError::new(EINVAL)),
             },
-            PluginObject::Memory { slot, .. } => vm.remove_device_memory(slot).and(Ok(())),
+            PluginObject::Memory { slot, .. } => vm.remove_mmio_memory(slot).and(Ok(())),
             PluginObject::IrqEvent { irq_id, evt } => vm.unregister_irqfd(&evt, irq_id),
         }
     }
diff --git a/src/plugin/process.rs b/src/plugin/process.rs
index c2d6acb..ea7a78c 100644
--- a/src/plugin/process.rs
+++ b/src/plugin/process.rs
@@ -363,7 +363,7 @@ impl Process {
         }
         let mem = MemoryMapping::from_fd_offset(&shm, length as usize, offset as usize)
             .map_err(mmap_to_sys_err)?;
-        let slot = vm.add_device_memory(GuestAddress(start), mem, read_only, dirty_log)?;
+        let slot = vm.add_mmio_memory(GuestAddress(start), mem, read_only, dirty_log)?;
         entry.insert(PluginObject::Memory {
             slot,
             length: length as usize,
diff --git a/vm_control/src/lib.rs b/vm_control/src/lib.rs
index 71bb193..eccee10 100644
--- a/vm_control/src/lib.rs
+++ b/vm_control/src/lib.rs
@@ -228,7 +228,7 @@ impl VmMemoryRequest {
                     Err(e) => VmMemoryResponse::Err(e),
                 }
             }
-            UnregisterMemory(slot) => match vm.remove_device_memory(slot) {
+            UnregisterMemory(slot) => match vm.remove_mmio_memory(slot) {
                 Ok(_) => VmMemoryResponse::Ok,
                 Err(e) => VmMemoryResponse::Err(e),
             },
@@ -404,7 +404,7 @@ fn register_memory(
     let addr = match allocation {
         Some((Alloc::PciBar { bus, dev, bar }, address)) => {
             match allocator
-                .mmio_allocator(MmioType::Device)
+                .mmio_allocator(MmioType::High)
                 .get(&Alloc::PciBar { bus, dev, bar })
             {
                 Some((start_addr, length, _)) => {
@@ -420,7 +420,7 @@ fn register_memory(
         }
         None => {
             let alloc = allocator.get_anon_alloc();
-            match allocator.mmio_allocator(MmioType::Device).allocate(
+            match allocator.mmio_allocator(MmioType::High).allocate(
                 size as u64,
                 alloc,
                 "vmcontrol_register_memory".to_string(),
@@ -432,7 +432,7 @@ fn register_memory(
         _ => return Err(SysError::new(EINVAL)),
     };
 
-    let slot = match vm.add_device_memory(GuestAddress(addr), mmap, false, false) {
+    let slot = match vm.add_mmio_memory(GuestAddress(addr), mmap, false, false) {
         Ok(v) => v,
         Err(e) => return Err(e),
     };
diff --git a/x86_64/src/lib.rs b/x86_64/src/lib.rs
index c15221e..1974ce2 100644
--- a/x86_64/src/lib.rs
+++ b/x86_64/src/lib.rs
@@ -578,12 +578,12 @@ impl X8664arch {
         Ok(None)
     }
 
-    /// This returns the start address of device memory
+    /// This returns the start address of high mmio
     ///
     /// # Arguments
     ///
     /// * mem: The memory to be used by the guest
-    fn get_dev_memory_base(mem: &GuestMemory) -> u64 {
+    fn get_high_mmio_base(mem: &GuestMemory) -> u64 {
         // Put device memory at a 2MB boundary after physical memory or 4gb, whichever is greater.
         const MB: u64 = 1 << 20;
         const GB: u64 = 1 << 30;
@@ -605,12 +605,12 @@ impl X8664arch {
 
     /// Returns a system resource allocator.
     fn get_resource_allocator(mem: &GuestMemory, gpu_allocation: bool) -> SystemAllocator {
-        const MMIO_BASE: u64 = 0xe0000000;
-        let device_addr_start = Self::get_dev_memory_base(mem);
+        const LOW_MMIO_BASE: u64 = 0xe0000000;
+        let high_mmio_start = Self::get_high_mmio_base(mem);
         SystemAllocator::builder()
             .add_io_addresses(0xc000, 0x10000)
-            .add_mmio_addresses(MMIO_BASE, 0x100000)
-            .add_device_addresses(device_addr_start, u64::max_value() - device_addr_start)
+            .add_low_mmio_addresses(LOW_MMIO_BASE, 0x100000)
+            .add_high_mmio_addresses(high_mmio_start, u64::max_value() - high_mmio_start)
             .create_allocator(X86_64_IRQ_BASE, gpu_allocation)
             .unwrap()
     }