summary refs log tree commit diff
diff options
context:
space:
mode:
authorAlyssa Ross <hi@alyssa.is>2020-05-22 01:18:42 +0000
committerAlyssa Ross <hi@alyssa.is>2020-05-22 01:18:42 +0000
commit460406d10bbfaa890d56d616b4610813da63a312 (patch)
tree889af76de40dfca7228a22a38f6b65b9562946f9
parenteb223862bd19827cc15d74b8af75b8c45a79b4d0 (diff)
parent56520c27224579640da9d3e8e4964b0f27dc9bdc (diff)
downloadcrosvm-460406d10bbfaa890d56d616b4610813da63a312.tar
crosvm-460406d10bbfaa890d56d616b4610813da63a312.tar.gz
crosvm-460406d10bbfaa890d56d616b4610813da63a312.tar.bz2
crosvm-460406d10bbfaa890d56d616b4610813da63a312.tar.lz
crosvm-460406d10bbfaa890d56d616b4610813da63a312.tar.xz
crosvm-460406d10bbfaa890d56d616b4610813da63a312.tar.zst
crosvm-460406d10bbfaa890d56d616b4610813da63a312.zip
Merge remote-tracking branch 'origin/master'
-rw-r--r--Cargo.lock12
-rw-r--r--acpi_tables/src/sdt.rs5
-rw-r--r--arch/src/android.rs10
-rw-r--r--devices/Cargo.toml2
-rw-r--r--devices/src/acpi.rs15
-rw-r--r--devices/src/irqchip/kvm/mod.rs122
-rw-r--r--devices/src/irqchip/mod.rs64
-rw-r--r--devices/src/lib.rs2
-rw-r--r--devices/src/usb/xhci/event_ring.rs5
-rw-r--r--devices/src/usb/xhci/interrupter.rs10
-rw-r--r--devices/src/virtio/block.rs4
-rw-r--r--devices/src/virtio/descriptor_utils.rs373
-rw-r--r--devices/src/virtio/fs/server.rs9
-rw-r--r--devices/src/virtio/gpu/mod.rs31
-rw-r--r--devices/src/virtio/gpu/virtio_2d_backend.rs22
-rw-r--r--devices/src/virtio/gpu/virtio_3d_backend.rs37
-rw-r--r--devices/src/virtio/gpu/virtio_gfxstream_backend.rs23
-rw-r--r--gpu_display/src/lib.rs12
-rw-r--r--hypervisor/Cargo.toml4
-rw-r--r--hypervisor/src/aarch64.rs24
-rw-r--r--hypervisor/src/caps.rs8
-rw-r--r--hypervisor/src/kvm/aarch64.rs22
-rw-r--r--hypervisor/src/kvm/mod.rs281
-rw-r--r--hypervisor/src/kvm/x86_64.rs124
-rw-r--r--hypervisor/src/lib.rs85
-rw-r--r--hypervisor/src/types/mod.rs9
-rw-r--r--hypervisor/src/types/x86.rs10
-rw-r--r--hypervisor/src/x86_64.rs52
-rw-r--r--hypervisor/tests/test_concrete.rs51
-rw-r--r--hypervisor/tests/test_concrete_aarch64.rs21
-rw-r--r--hypervisor/tests/test_concrete_x86_64.rs39
-rw-r--r--hypervisor/tests/types.rs18
-rw-r--r--kvm/src/lib.rs12
-rw-r--r--msg_socket/msg_on_socket_derive/msg_on_socket_derive.rs2
-rw-r--r--msg_socket/src/msg_on_socket.rs28
-rw-r--r--sys_util/src/descriptor.rs12
-rw-r--r--vm_control/src/lib.rs15
-rw-r--r--x86_64/src/acpi.rs41
-rw-r--r--x86_64/src/lib.rs60
39 files changed, 1289 insertions, 387 deletions
diff --git a/Cargo.lock b/Cargo.lock
index 41843ee..4df284d 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -167,6 +167,7 @@ dependencies = [
 name = "devices"
 version = "0.1.0"
 dependencies = [
+ "acpi_tables 0.1.0",
  "audio_streams 0.1.0",
  "bit_field 0.1.0",
  "bitflags 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
@@ -176,6 +177,7 @@ dependencies = [
  "gpu_buffer 0.1.0",
  "gpu_display 0.1.0",
  "gpu_renderer 0.1.0",
+ "hypervisor 0.1.0",
  "io_jail 0.1.0",
  "kvm 0.1.0",
  "kvm_sys 0.1.0",
@@ -342,6 +344,16 @@ dependencies = [
 ]
 
 [[package]]
+name = "hypervisor"
+version = "0.1.0"
+dependencies = [
+ "kvm 0.1.0",
+ "kvm_sys 0.1.0",
+ "libc 0.2.44 (registry+https://github.com/rust-lang/crates.io-index)",
+ "sys_util 0.1.0",
+]
+
+[[package]]
 name = "io_jail"
 version = "0.1.0"
 dependencies = [
diff --git a/acpi_tables/src/sdt.rs b/acpi_tables/src/sdt.rs
index e8a9ea2..0ea61a0 100644
--- a/acpi_tables/src/sdt.rs
+++ b/acpi_tables/src/sdt.rs
@@ -69,6 +69,11 @@ impl SDT {
         self.write(LENGTH_OFFSET, self.data.len() as u32);
     }
 
+    pub fn append_slice(&mut self, value: &[u8]) {
+        self.data.extend_from_slice(value);
+        self.write(LENGTH_OFFSET, self.data.len() as u32);
+    }
+
     /// Write a value at the given offset
     pub fn write<T: DataInit>(&mut self, offset: usize, value: T) {
         let value_len = std::mem::size_of::<T>();
diff --git a/arch/src/android.rs b/arch/src/android.rs
index 3d459d4..5311d3f 100644
--- a/arch/src/android.rs
+++ b/arch/src/android.rs
@@ -31,9 +31,17 @@ pub fn create_android_fdt(fdt: &mut Vec<u8>, fstab: File) -> Result<()> {
     begin_node(fdt, "firmware")?;
     begin_node(fdt, "android")?;
     property_string(fdt, "compatible", "android,firmware")?;
+
+    let (dtprop, fstab): (_, Vec<_>) = vecs.into_iter().partition(|x| x[0] == "#dt-vendor");
+    begin_node(fdt, "vendor")?;
+    for vec in dtprop {
+        let content = std::fs::read_to_string(&vec[2]).map_err(Error::FdtIoError)?;
+        property_string(fdt, &vec[1], &content);
+    }
+    end_node(fdt)?; // vendor
     begin_node(fdt, "fstab")?;
     property_string(fdt, "compatible", "android,fstab")?;
-    for vec in vecs {
+    for vec in fstab {
         let partition = &vec[1][1..];
         begin_node(fdt, partition)?;
         property_string(fdt, "compatible", &("android,".to_owned() + partition))?;
diff --git a/devices/Cargo.toml b/devices/Cargo.toml
index 629f29b..931ea36 100644
--- a/devices/Cargo.toml
+++ b/devices/Cargo.toml
@@ -12,6 +12,7 @@ x = ["gpu_display/x"]
 gfxstream = ["gpu"]
 
 [dependencies]
+acpi_tables = {path = "../acpi_tables" }
 audio_streams = "*"
 bit_field = { path = "../bit_field" }
 bitflags = "1"
@@ -21,6 +22,7 @@ enumn = { path = "../enumn" }
 gpu_buffer = { path = "../gpu_buffer", optional = true }
 gpu_display = { path = "../gpu_display", optional = true }
 gpu_renderer = { path = "../gpu_renderer", optional = true }
+hypervisor = { path = "../hypervisor" }
 io_jail = { path = "../io_jail" }
 kvm = { path = "../kvm" }
 kvm_sys = { path = "../kvm_sys" }
diff --git a/devices/src/acpi.rs b/devices/src/acpi.rs
index 990a782..1cfab35 100644
--- a/devices/src/acpi.rs
+++ b/devices/src/acpi.rs
@@ -3,6 +3,7 @@
 // found in the LICENSE file.
 
 use crate::{BusDevice, BusResumeDevice};
+use acpi_tables::{aml, aml::Aml};
 use sys_util::{error, warn, EventFd};
 
 /// ACPI PM resource for handling OS suspend/resume request
@@ -29,8 +30,7 @@ impl ACPIPMResource {
     }
 }
 
-/// the ACPI PM register's base and length.
-pub const ACPIPM_RESOURCE_BASE: u64 = 0x600;
+/// the ACPI PM register length.
 pub const ACPIPM_RESOURCE_LEN: u8 = 8;
 pub const ACPIPM_RESOURCE_EVENTBLK_LEN: u8 = 4;
 pub const ACPIPM_RESOURCE_CONTROLBLK_LEN: u8 = 2;
@@ -127,3 +127,14 @@ impl BusResumeDevice for ACPIPMResource {
         self.sleep_status = val | BITMASK_SLEEPCNT_WAKE_STATUS;
     }
 }
+
+impl Aml for ACPIPMResource {
+    fn to_aml_bytes(&self, bytes: &mut Vec<u8>) {
+        // S1
+        aml::Name::new(
+            "_S1_".into(),
+            &aml::Package::new(vec![&aml::ONE, &aml::ONE, &aml::ZERO, &aml::ZERO]),
+        )
+        .to_aml_bytes(bytes);
+    }
+}
diff --git a/devices/src/irqchip/kvm/mod.rs b/devices/src/irqchip/kvm/mod.rs
new file mode 100644
index 0000000..56794a3
--- /dev/null
+++ b/devices/src/irqchip/kvm/mod.rs
@@ -0,0 +1,122 @@
+// Copyright 2020 The Chromium OS Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+use hypervisor::kvm::{KvmVcpu, KvmVm};
+use hypervisor::IrqRoute;
+use std::sync::Arc;
+use sync::Mutex;
+use sys_util::{EventFd, Result};
+
+use crate::IrqChip;
+
+/// IrqChip implementation where the entire IrqChip is emulated by KVM.
+///
+/// This implementation will use the KVM API to create and configure the in-kernel irqchip.
+pub struct KvmKernelIrqChip {
+    _vm: KvmVm,
+    vcpus: Arc<Mutex<Vec<Option<KvmVcpu>>>>,
+}
+
+impl KvmKernelIrqChip {
+    /// Construct a new KvmKernelIrqchip.
+    pub fn new(vm: KvmVm, num_vcpus: usize) -> Result<KvmKernelIrqChip> {
+        Ok(KvmKernelIrqChip {
+            _vm: vm,
+            vcpus: Arc::new(Mutex::new((0..num_vcpus).map(|_| None).collect())),
+        })
+    }
+}
+
+/// This IrqChip only works with Kvm so we only implement it for KvmVcpu.
+impl IrqChip<KvmVcpu> for KvmKernelIrqChip {
+    /// Add a vcpu to the irq chip.
+    fn add_vcpu(&mut self, vcpu_id: usize, vcpu: KvmVcpu) -> Result<()> {
+        self.vcpus.lock().insert(vcpu_id, Some(vcpu));
+        Ok(())
+    }
+
+    /// Register an event that can trigger an interrupt for a particular GSI.
+    fn register_irq_event(
+        &mut self,
+        _irq: u32,
+        _irq_event: &EventFd,
+        _resample_event: Option<&EventFd>,
+    ) -> Result<()> {
+        unimplemented!("register_irq_event for KvmKernelIrqChip is not yet implemented");
+    }
+
+    /// Unregister an event for a particular GSI.
+    fn unregister_irq_event(
+        &mut self,
+        _irq: u32,
+        _irq_event: &EventFd,
+        _resample_event: Option<&EventFd>,
+    ) -> Result<()> {
+        unimplemented!("unregister_irq_event for KvmKernelIrqChip is not yet implemented");
+    }
+
+    /// Route an IRQ line to an interrupt controller, or to a particular MSI vector.
+    fn route_irq(&mut self, _route: IrqRoute) -> Result<()> {
+        unimplemented!("route_irq for KvmKernelIrqChip is not yet implemented");
+    }
+
+    /// Return a vector of all registered irq numbers and their associated events.  To be used by
+    /// the main thread to wait for irq events to be triggered.
+    fn irq_event_tokens(&self) -> Result<Vec<(u32, EventFd)>> {
+        unimplemented!("irq_event_tokens for KvmKernelIrqChip is not yet implemented");
+    }
+
+    /// Either assert or deassert an IRQ line.  Sends to either an interrupt controller, or does
+    /// a send_msi if the irq is associated with an MSI.
+    fn service_irq(&mut self, _irq: u32, _level: bool) -> Result<()> {
+        unimplemented!("service_irq for KvmKernelIrqChip is not yet implemented");
+    }
+
+    /// Broadcast an end of interrupt.
+    fn broadcast_eoi(&mut self, _vector: u8) -> Result<()> {
+        unimplemented!("broadcast_eoi for KvmKernelIrqChip is not yet implemented");
+    }
+
+    /// Return true if there is a pending interrupt for the specified vcpu.
+    fn interrupt_requested(&self, _vcpu_id: usize) -> bool {
+        unimplemented!("interrupt_requested for KvmKernelIrqChip is not yet implemented");
+    }
+
+    /// Check if the specified vcpu has any pending interrupts. Returns None for no interrupts,
+    /// otherwise Some(u32) should be the injected interrupt vector.
+    fn get_external_interrupt(&mut self, _vcpu_id: usize) -> Result<Option<u32>> {
+        unimplemented!("get_external_interrupt for KvmKernelIrqChip is not yet implemented");
+    }
+
+    /// Attempt to clone this IrqChip instance.
+    fn try_clone(&self) -> Result<Self> {
+        unimplemented!("try_clone for KvmKernelIrqChip is not yet implemented");
+    }
+}
+
+#[cfg(test)]
+mod tests {
+
+    use hypervisor::kvm::{Kvm, KvmVm};
+    use sys_util::GuestMemory;
+
+    use crate::irqchip::{IrqChip, KvmKernelIrqChip};
+    #[cfg(any(target_arch = "arm", target_arch = "aarch64"))]
+    use hypervisor::VmAarch64;
+    #[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
+    use hypervisor::VmX86_64;
+
+    #[test]
+    fn create_kvm_kernel_irqchip() {
+        let kvm = Kvm::new().expect("failed to instantiate Kvm");
+        let mem = GuestMemory::new(&[]).unwrap();
+        let vm = KvmVm::new(&kvm, mem).expect("failed to instantiate vm");
+        let vcpu = vm.create_vcpu(0).expect("failed to instantiate vcpu");
+
+        let mut chip =
+            KvmKernelIrqChip::new(vm, 1).expect("failed to instantiate KvmKernelIrqChip");
+
+        chip.add_vcpu(0, vcpu).expect("failed to add vcpu");
+    }
+}
diff --git a/devices/src/irqchip/mod.rs b/devices/src/irqchip/mod.rs
new file mode 100644
index 0000000..a8023e9
--- /dev/null
+++ b/devices/src/irqchip/mod.rs
@@ -0,0 +1,64 @@
+// Copyright 2020 The Chromium OS Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+mod kvm;
+pub use self::kvm::KvmKernelIrqChip;
+
+use std::marker::{Send, Sized};
+
+use hypervisor::{IrqRoute, Vcpu};
+use sys_util::{EventFd, Result};
+
+/// Trait that abstracts interactions with interrupt controllers.
+///
+/// Each VM will have one IrqChip instance which is responsible for routing IRQ lines and
+/// registering IRQ events. Depending on the implementation, the IrqChip may interact with an
+/// underlying hypervisor API or emulate devices in userspace.
+///
+/// This trait is generic over a Vcpu type because some IrqChip implementations can support
+/// multiple hypervisors with a single implementation.
+pub trait IrqChip<V: Vcpu>: Send + Sized {
+    /// Add a vcpu to the irq chip.
+    fn add_vcpu(&mut self, vcpu_id: usize, vcpu: V) -> Result<()>;
+
+    /// Register an event that can trigger an interrupt for a particular GSI.
+    fn register_irq_event(
+        &mut self,
+        irq: u32,
+        irq_event: &EventFd,
+        resample_event: Option<&EventFd>,
+    ) -> Result<()>;
+
+    /// Unregister an event for a particular GSI.
+    fn unregister_irq_event(
+        &mut self,
+        irq: u32,
+        irq_event: &EventFd,
+        resample_event: Option<&EventFd>,
+    ) -> Result<()>;
+
+    /// Route an IRQ line to an interrupt controller, or to a particular MSI vector.
+    fn route_irq(&mut self, route: IrqRoute) -> Result<()>;
+
+    /// Return a vector of all registered irq numbers and their associated events.  To be used by
+    /// the main thread to wait for irq events to be triggered.
+    fn irq_event_tokens(&self) -> Result<Vec<(u32, EventFd)>>;
+
+    /// Either assert or deassert an IRQ line.  Sends to either an interrupt controller, or does
+    /// a send_msi if the irq is associated with an MSI.
+    fn service_irq(&mut self, irq: u32, level: bool) -> Result<()>;
+
+    /// Broadcast an end of interrupt.
+    fn broadcast_eoi(&mut self, vector: u8) -> Result<()>;
+
+    /// Return true if there is a pending interrupt for the specified vcpu.
+    fn interrupt_requested(&self, vcpu_id: usize) -> bool;
+
+    /// Check if the specified vcpu has any pending interrupts. Returns None for no interrupts,
+    /// otherwise Some(u32) should be the injected interrupt vector.
+    fn get_external_interrupt(&mut self, vcpu_id: usize) -> Result<Option<u32>>;
+
+    /// Attempt to clone this IrqChip instance.
+    fn try_clone(&self) -> Result<Self>;
+}
diff --git a/devices/src/lib.rs b/devices/src/lib.rs
index 01c2b46..c945e00 100644
--- a/devices/src/lib.rs
+++ b/devices/src/lib.rs
@@ -8,6 +8,7 @@ mod bus;
 mod cmos;
 mod i8042;
 mod ioapic;
+mod irqchip;
 mod pci;
 mod pic;
 mod pit;
@@ -30,6 +31,7 @@ pub use self::bus::{Bus, BusDevice, BusRange, BusResumeDevice};
 pub use self::cmos::Cmos;
 pub use self::i8042::I8042Device;
 pub use self::ioapic::{Ioapic, IOAPIC_BASE_ADDRESS, IOAPIC_MEM_LENGTH_BYTES};
+pub use self::irqchip::*;
 pub use self::pci::{
     Ac97Backend, Ac97Dev, Ac97Parameters, PciAddress, PciConfigIo, PciConfigMmio, PciDevice,
     PciDeviceError, PciInterruptPin, PciRoot, VfioPciDevice,
diff --git a/devices/src/usb/xhci/event_ring.rs b/devices/src/usb/xhci/event_ring.rs
index 1742c77..4711c46 100644
--- a/devices/src/usb/xhci/event_ring.rs
+++ b/devices/src/usb/xhci/event_ring.rs
@@ -144,11 +144,6 @@ impl EventRing {
         self.dequeue_pointer = addr;
     }
 
-    /// Get the enqueue pointer.
-    pub fn get_enqueue_pointer(&self) -> GuestAddress {
-        self.enqueue_pointer
-    }
-
     /// Check if event ring is empty.
     pub fn is_empty(&self) -> bool {
         self.enqueue_pointer == self.dequeue_pointer
diff --git a/devices/src/usb/xhci/interrupter.rs b/devices/src/usb/xhci/interrupter.rs
index c58ae59..cf79fd8 100644
--- a/devices/src/usb/xhci/interrupter.rs
+++ b/devices/src/usb/xhci/interrupter.rs
@@ -45,7 +45,6 @@ pub struct Interrupter {
     erdp: Register<u64>,
     event_handler_busy: bool,
     enabled: bool,
-    pending: bool,
     moderation_interval: u16,
     moderation_counter: u16,
     event_ring: EventRing,
@@ -61,7 +60,6 @@ impl Interrupter {
             erdp: regs.erdp.clone(),
             event_handler_busy: false,
             enabled: false,
-            pending: false,
             moderation_interval: 0,
             moderation_counter: 0,
             event_ring: EventRing::new(mem),
@@ -76,7 +74,6 @@ impl Interrupter {
     /// Add event to event ring.
     fn add_event(&mut self, trb: Trb) -> Result<()> {
         self.event_ring.add_event(trb).map_err(Error::AddEvent)?;
-        self.pending = true;
         self.interrupt_if_needed()
     }
 
@@ -169,9 +166,6 @@ impl Interrupter {
     pub fn set_event_ring_dequeue_pointer(&mut self, addr: GuestAddress) -> Result<()> {
         usb_debug!("interrupter set dequeue ptr addr {:#x}", addr.0);
         self.event_ring.set_dequeue_pointer(addr);
-        if addr == self.event_ring.get_enqueue_pointer() {
-            self.pending = false;
-        }
         self.interrupt_if_needed()
     }
 
@@ -186,7 +180,6 @@ impl Interrupter {
     pub fn interrupt(&mut self) -> Result<()> {
         usb_debug!("sending interrupt");
         self.event_handler_busy = true;
-        self.pending = false;
         self.usbsts.set_bits(USB_STS_EVENT_INTERRUPT);
         self.iman.set_bits(IMAN_INTERRUPT_PENDING);
         self.erdp.set_bits(ERDP_EVENT_HANDLER_BUSY);
@@ -194,7 +187,8 @@ impl Interrupter {
     }
 
     fn interrupt_if_needed(&mut self) -> Result<()> {
-        if self.enabled && self.pending && !self.event_handler_busy {
+        // TODO(dverkamp): re-add !self.event_handler_busy after solving https://crbug.com/1082930
+        if self.enabled && !self.event_ring.is_empty() {
             self.interrupt()?;
         }
         Ok(())
diff --git a/devices/src/virtio/block.rs b/devices/src/virtio/block.rs
index c9dda55..80d5103 100644
--- a/devices/src/virtio/block.rs
+++ b/devices/src/virtio/block.rs
@@ -267,9 +267,7 @@ impl Worker {
         let status_offset = available_bytes
             .checked_sub(1)
             .ok_or(ExecuteError::MissingStatus)?;
-        let mut status_writer = writer
-            .split_at(status_offset)
-            .map_err(ExecuteError::Descriptor)?;
+        let mut status_writer = writer.split_at(status_offset);
 
         let status = match Block::execute_request(
             &mut reader,
diff --git a/devices/src/virtio/descriptor_utils.rs b/devices/src/virtio/descriptor_utils.rs
index f90264a..d65341b 100644
--- a/devices/src/virtio/descriptor_utils.rs
+++ b/devices/src/virtio/descriptor_utils.rs
@@ -3,7 +3,8 @@
 // found in the LICENSE file.
 
 use std::cmp;
-use std::collections::VecDeque;
+use std::convert::TryInto;
+use std::ffi::c_void;
 use std::fmt::{self, Display};
 use std::io::{self, Read, Write};
 use std::iter::FromIterator;
@@ -53,8 +54,10 @@ impl std::error::Error for Error {}
 
 #[derive(Clone)]
 struct DescriptorChainConsumer<'a> {
-    buffers: VecDeque<VolatileSlice<'a>>,
+    buffers: Vec<libc::iovec>,
+    current: usize,
     bytes_consumed: usize,
+    mem: PhantomData<&'a GuestMemory>,
 }
 
 impl<'a> DescriptorChainConsumer<'a> {
@@ -62,140 +65,136 @@ impl<'a> DescriptorChainConsumer<'a> {
         // This is guaranteed not to overflow because the total length of the chain
         // is checked during all creations of `DescriptorChainConsumer` (see
         // `Reader::new()` and `Writer::new()`).
-        self.buffers
+        self.get_remaining()
             .iter()
-            .fold(0usize, |count, vs| count + vs.size() as usize)
+            .fold(0usize, |count, buf| count + buf.iov_len)
     }
 
     fn bytes_consumed(&self) -> usize {
         self.bytes_consumed
     }
 
-    /// Consumes at most `count` bytes from the `DescriptorChain`. Callers must provide a function
-    /// that takes a `&[VolatileSlice]` and returns the total number of bytes consumed. This
-    /// function guarantees that the combined length of all the slices in the `&[VolatileSlice]` is
-    /// less than or equal to `count`.
+    /// Returns all the remaining buffers in the `DescriptorChain`. Calling this function does not
+    /// consume any bytes from the `DescriptorChain`. Instead callers should use the `consume`
+    /// method to advance the `DescriptorChain`. Multiple calls to `get` with no intervening calls
+    /// to `consume` will return the same data.
+    fn get_remaining(&self) -> &[libc::iovec] {
+        &self.buffers[self.current..]
+    }
+
+    /// Consumes `count` bytes from the `DescriptorChain`. If `count` is larger than
+    /// `self.available_bytes()` then all remaining bytes in the `DescriptorChain` will be consumed.
     ///
     /// # Errors
     ///
-    /// If the provided function returns any error then no bytes are consumed from the buffer and
-    /// the error is returned to the caller.
-    fn consume<F>(&mut self, count: usize, f: F) -> io::Result<usize>
-    where
-        F: FnOnce(&[VolatileSlice]) -> io::Result<usize>,
-    {
-        let mut buflen = 0;
-        let mut bufs = Vec::with_capacity(self.buffers.len());
-        for &vs in &self.buffers {
-            if buflen >= count {
+    /// Returns an error if the total bytes consumed by this `DescriptorChainConsumer` overflows a
+    /// usize.
+    fn consume(&mut self, mut count: usize) {
+        // The implementation is adapted from `IoSlice::advance` in libstd. We can't use
+        // `get_remaining` here because then the compiler complains that `self.current` is already
+        // borrowed and doesn't allow us to modify it.  We also need to borrow the iovecs mutably.
+        let current = self.current;
+        for buf in &mut self.buffers[current..] {
+            if count == 0 {
                 break;
             }
 
-            let rem = count - buflen;
-            if (rem as u64) < vs.size() {
-                let buf = vs.sub_slice(0, rem as u64).map_err(|e| {
-                    io::Error::new(io::ErrorKind::InvalidData, Error::VolatileMemoryError(e))
-                })?;
-                bufs.push(buf);
-                buflen += rem;
+            let consumed = if count < buf.iov_len {
+                // Safe because we know that the iovec pointed to valid memory and we are adding a
+                // value that is smaller than the length of the memory.
+                buf.iov_base = unsafe { (buf.iov_base as *mut u8).add(count) as *mut c_void };
+                buf.iov_len -= count;
+                count
             } else {
-                bufs.push(vs);
-                buflen += vs.size() as usize;
-            }
+                self.current += 1;
+                buf.iov_len
+            };
+
+            // This shouldn't overflow because `consumed <= buf.iov_len` and we already verified
+            // that adding all `buf.iov_len` values will not overflow when the Reader/Writer was
+            // constructed.
+            self.bytes_consumed += consumed;
+            count -= consumed;
         }
+    }
 
-        if bufs.is_empty() {
-            return Ok(0);
-        }
+    fn split_at(&mut self, offset: usize) -> DescriptorChainConsumer<'a> {
+        let mut other = self.clone();
+        other.consume(offset);
+        other.bytes_consumed = 0;
 
-        let bytes_consumed = f(&*bufs)?;
-
-        // This can happen if a driver tricks a device into reading/writing more data than
-        // fits in a `usize`.
-        let total_bytes_consumed =
-            self.bytes_consumed
-                .checked_add(bytes_consumed)
-                .ok_or_else(|| {
-                    io::Error::new(io::ErrorKind::InvalidData, Error::DescriptorChainOverflow)
-                })?;
-
-        let mut rem = bytes_consumed;
-        while let Some(vs) = self.buffers.pop_front() {
-            if (rem as u64) < vs.size() {
-                // Split the slice and push the remainder back into the buffer list. Safe because we
-                // know that `rem` is not out of bounds due to the check and we checked the bounds
-                // on `vs` when we added it to the buffer list.
-                self.buffers.push_front(vs.offset(rem as u64).unwrap());
+        let mut rem = offset;
+        let mut end = self.current;
+        for buf in &mut self.buffers[self.current..] {
+            if rem < buf.iov_len {
+                buf.iov_len = rem;
                 break;
             }
 
-            // No need for checked math because we know that `vs.size() <= rem`.
-            rem -= vs.size() as usize;
+            end += 1;
+            rem -= buf.iov_len;
         }
 
-        self.bytes_consumed = total_bytes_consumed;
+        self.buffers.truncate(end + 1);
 
-        Ok(bytes_consumed)
+        other
     }
 
-    fn split_at(&mut self, offset: usize) -> Result<DescriptorChainConsumer<'a>> {
-        let mut rem = offset;
-        let pos = self.buffers.iter().position(|vs| {
-            if (rem as u64) < vs.size() {
-                true
-            } else {
-                rem -= vs.size() as usize;
-                false
-            }
-        });
-
-        if let Some(at) = pos {
-            let mut other = self.buffers.split_off(at);
-
-            if rem > 0 {
-                // There must be at least one element in `other` because we checked
-                // its `size` value in the call to `position` above.
-                let front = other.pop_front().expect("empty VecDeque after split");
-                self.buffers.push_back(
-                    front
-                        .sub_slice(0, rem as u64)
-                        .map_err(Error::VolatileMemoryError)?,
-                );
-                other.push_front(
-                    front
-                        .offset(rem as u64)
-                        .map_err(Error::VolatileMemoryError)?,
-                );
-            }
+    // Temporary method for converting iovecs into VolatileSlices until we can change the
+    // ReadWriteVolatile traits. The irony here is that the standard implementation of the
+    // ReadWriteVolatile traits will convert the VolatileSlices back into iovecs.
+    fn get_volatile_slices(&mut self, mut count: usize) -> Vec<VolatileSlice> {
+        let bufs = self.get_remaining();
+        let mut iovs = Vec::with_capacity(bufs.len());
+        for b in bufs {
+            // Safe because we verified during construction that the memory at `b.iov_base` is
+            // `b.iov_len` bytes long. The lifetime of the `VolatileSlice` is tied to the lifetime
+            // of this `DescriptorChainConsumer`, which is in turn tied to the lifetime of the
+            // `GuestMemory` used to create it and so the memory will be available for the duration
+            // of the `VolatileSlice`.
+            let iov = unsafe {
+                if count < b.iov_len {
+                    VolatileSlice::new(
+                        b.iov_base as *mut u8,
+                        count.try_into().expect("usize doesn't fit in u64"),
+                    )
+                } else {
+                    VolatileSlice::new(
+                        b.iov_base as *mut u8,
+                        b.iov_len.try_into().expect("usize doesn't fit in u64"),
+                    )
+                }
+            };
 
-            Ok(DescriptorChainConsumer {
-                buffers: other,
-                bytes_consumed: 0,
-            })
-        } else if rem == 0 {
-            Ok(DescriptorChainConsumer {
-                buffers: VecDeque::new(),
-                bytes_consumed: 0,
-            })
-        } else {
-            Err(Error::SplitOutOfBounds(offset))
+            count -= iov.size() as usize;
+            iovs.push(iov);
         }
+
+        iovs
     }
 
     fn get_iovec(&mut self, len: usize) -> io::Result<DescriptorIovec<'a>> {
-        let mut iovec = Vec::new();
+        let mut iovec = Vec::with_capacity(self.get_remaining().len());
+
+        let mut rem = len;
+        for buf in self.get_remaining() {
+            let iov = if rem < buf.iov_len {
+                libc::iovec {
+                    iov_base: buf.iov_base,
+                    iov_len: rem,
+                }
+            } else {
+                buf.clone()
+            };
 
-        self.consume(len, |bufs| {
-            let mut total = 0;
-            for vs in bufs {
-                iovec.push(libc::iovec {
-                    iov_base: vs.as_ptr() as *mut libc::c_void,
-                    iov_len: vs.size() as usize,
-                });
-                total += vs.size() as usize;
+            rem -= iov.iov_len;
+            iovec.push(iov);
+
+            if rem == 0 {
+                break;
             }
-            Ok(total)
-        })?;
+        }
+        self.consume(len);
 
         Ok(DescriptorIovec {
             iovec,
@@ -250,14 +249,21 @@ impl<'a> Reader<'a> {
                     .checked_add(desc.len as usize)
                     .ok_or(Error::DescriptorChainOverflow)?;
 
-                mem.get_slice(desc.addr.offset(), desc.len.into())
-                    .map_err(Error::VolatileMemoryError)
+                let vs = mem
+                    .get_slice(desc.addr.offset(), desc.len.into())
+                    .map_err(Error::VolatileMemoryError)?;
+                Ok(libc::iovec {
+                    iov_base: vs.as_ptr() as *mut c_void,
+                    iov_len: vs.size() as usize,
+                })
             })
-            .collect::<Result<VecDeque<VolatileSlice<'a>>>>()?;
+            .collect::<Result<Vec<libc::iovec>>>()?;
         Ok(Reader {
             buffer: DescriptorChainConsumer {
                 buffers,
+                current: 0,
                 bytes_consumed: 0,
+                mem: PhantomData,
             },
         })
     }
@@ -305,8 +311,10 @@ impl<'a> Reader<'a> {
         mut dst: F,
         count: usize,
     ) -> io::Result<usize> {
-        self.buffer
-            .consume(count, |bufs| dst.write_vectored_volatile(bufs))
+        let iovs = self.buffer.get_volatile_slices(count);
+        let written = dst.write_vectored_volatile(&iovs[..])?;
+        self.buffer.consume(written);
+        Ok(written)
     }
 
     /// Reads data from the descriptor chain buffer into a File at offset `off`.
@@ -319,8 +327,10 @@ impl<'a> Reader<'a> {
         count: usize,
         off: u64,
     ) -> io::Result<usize> {
-        self.buffer
-            .consume(count, |bufs| dst.write_vectored_at_volatile(bufs, off))
+        let iovs = self.buffer.get_volatile_slices(count);
+        let written = dst.write_vectored_at_volatile(&iovs[..], off)?;
+        self.buffer.consume(written);
+        Ok(written)
     }
 
     pub fn read_exact_to<F: FileReadWriteVolatile>(
@@ -382,12 +392,14 @@ impl<'a> Reader<'a> {
         self.buffer.bytes_consumed()
     }
 
-    /// Splits this `Reader` into two at the given offset in the `DescriptorChain` buffer.
-    /// After the split, `self` will be able to read up to `offset` bytes while the returned
-    /// `Reader` can read up to `available_bytes() - offset` bytes.  Returns an error if
-    /// `offset > self.available_bytes()`.
-    pub fn split_at(&mut self, offset: usize) -> Result<Reader<'a>> {
-        self.buffer.split_at(offset).map(|buffer| Reader { buffer })
+    /// Splits this `Reader` into two at the given offset in the `DescriptorChain` buffer. After the
+    /// split, `self` will be able to read up to `offset` bytes while the returned `Reader` can read
+    /// up to `available_bytes() - offset` bytes. If `offset > self.available_bytes()`, then the
+    /// returned `Reader` will not be able to read any bytes.
+    pub fn split_at(&mut self, offset: usize) -> Reader<'a> {
+        Reader {
+            buffer: self.buffer.split_at(offset),
+        }
     }
 
     /// Returns a DescriptorIovec for the next `len` bytes of the descriptor chain
@@ -399,27 +411,25 @@ impl<'a> Reader<'a> {
 
 impl<'a> io::Read for Reader<'a> {
     fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
-        self.buffer.consume(buf.len(), |bufs| {
-            let mut rem = buf;
-            let mut total = 0;
-            for vs in bufs {
-                // This is guaranteed by the implementation of `consume`.
-                debug_assert_eq!(vs.size(), cmp::min(rem.len() as u64, vs.size()));
-
-                // Safe because we have already verified that `vs` points to valid memory.
-                unsafe {
-                    copy_nonoverlapping(
-                        vs.as_ptr() as *const u8,
-                        rem.as_mut_ptr(),
-                        vs.size() as usize,
-                    );
-                }
-                let copied = vs.size() as usize;
-                rem = &mut rem[copied..];
-                total += copied;
+        let mut rem = buf;
+        let mut total = 0;
+        for b in self.buffer.get_remaining() {
+            if rem.len() == 0 {
+                break;
             }
-            Ok(total)
-        })
+
+            let count = cmp::min(rem.len(), b.iov_len);
+
+            // Safe because we have already verified that `b` points to valid memory.
+            unsafe {
+                copy_nonoverlapping(b.iov_base as *const u8, rem.as_mut_ptr(), count);
+            }
+            rem = &mut rem[count..];
+            total += count;
+        }
+
+        self.buffer.consume(total);
+        Ok(total)
     }
 }
 
@@ -450,14 +460,21 @@ impl<'a> Writer<'a> {
                     .checked_add(desc.len as usize)
                     .ok_or(Error::DescriptorChainOverflow)?;
 
-                mem.get_slice(desc.addr.offset(), desc.len.into())
-                    .map_err(Error::VolatileMemoryError)
+                let vs = mem
+                    .get_slice(desc.addr.offset(), desc.len.into())
+                    .map_err(Error::VolatileMemoryError)?;
+                Ok(libc::iovec {
+                    iov_base: vs.as_ptr() as *mut c_void,
+                    iov_len: vs.size() as usize,
+                })
             })
-            .collect::<Result<VecDeque<VolatileSlice<'a>>>>()?;
+            .collect::<Result<Vec<libc::iovec>>>()?;
         Ok(Writer {
             buffer: DescriptorChainConsumer {
                 buffers,
+                current: 0,
                 bytes_consumed: 0,
+                mem: PhantomData,
             },
         })
     }
@@ -495,8 +512,10 @@ impl<'a> Writer<'a> {
         mut src: F,
         count: usize,
     ) -> io::Result<usize> {
-        self.buffer
-            .consume(count, |bufs| src.read_vectored_volatile(bufs))
+        let iovs = self.buffer.get_volatile_slices(count);
+        let read = src.read_vectored_volatile(&iovs[..])?;
+        self.buffer.consume(read);
+        Ok(read)
     }
 
     /// Writes data to the descriptor chain buffer from a File at offset `off`.
@@ -509,8 +528,10 @@ impl<'a> Writer<'a> {
         count: usize,
         off: u64,
     ) -> io::Result<usize> {
-        self.buffer
-            .consume(count, |bufs| src.read_vectored_at_volatile(bufs, off))
+        let iovs = self.buffer.get_volatile_slices(count);
+        let read = src.read_vectored_at_volatile(&iovs[..], off)?;
+        self.buffer.consume(read);
+        Ok(read)
     }
 
     pub fn write_all_from<F: FileReadWriteVolatile>(
@@ -565,12 +586,14 @@ impl<'a> Writer<'a> {
         self.buffer.bytes_consumed()
     }
 
-    /// Splits this `Writer` into two at the given offset in the `DescriptorChain` buffer.
-    /// After the split, `self` will be able to write up to `offset` bytes while the returned
-    /// `Writer` can write up to `available_bytes() - offset` bytes.  Returns an error if
-    /// `offset > self.available_bytes()`.
-    pub fn split_at(&mut self, offset: usize) -> Result<Writer<'a>> {
-        self.buffer.split_at(offset).map(|buffer| Writer { buffer })
+    /// Splits this `Writer` into two at the given offset in the `DescriptorChain` buffer. After the
+    /// split, `self` will be able to write up to `offset` bytes while the returned `Writer` can
+    /// write up to `available_bytes() - offset` bytes. If `offset > self.available_bytes()`, then
+    /// the returned `Writer` will not be able to write any bytes.
+    pub fn split_at(&mut self, offset: usize) -> Writer<'a> {
+        Writer {
+            buffer: self.buffer.split_at(offset),
+        }
     }
 
     /// Returns a DescriptorIovec for the next `len` bytes of the descriptor chain
@@ -582,23 +605,24 @@ impl<'a> Writer<'a> {
 
 impl<'a> io::Write for Writer<'a> {
     fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
-        self.buffer.consume(buf.len(), |bufs| {
-            let mut rem = buf;
-            let mut total = 0;
-            for vs in bufs {
-                // This is guaranteed by the implementation of `consume`.
-                debug_assert_eq!(vs.size(), cmp::min(rem.len() as u64, vs.size()));
-
-                // Safe because we have already verified that `vs` points to valid memory.
-                unsafe {
-                    copy_nonoverlapping(rem.as_ptr(), vs.as_ptr(), vs.size() as usize);
-                }
-                let copied = vs.size() as usize;
-                rem = &rem[copied..];
-                total += copied;
+        let mut rem = buf;
+        let mut total = 0;
+        for b in self.buffer.get_remaining() {
+            if rem.len() == 0 {
+                break;
             }
-            Ok(total)
-        })
+
+            let count = cmp::min(rem.len(), b.iov_len);
+            // Safe because we have already verified that `vs` points to valid memory.
+            unsafe {
+                copy_nonoverlapping(rem.as_ptr(), b.iov_base as *mut u8, count);
+            }
+            rem = &rem[count..];
+            total += count;
+        }
+
+        self.buffer.consume(total);
+        Ok(total)
     }
 
     fn flush(&mut self) -> io::Result<()> {
@@ -1031,7 +1055,7 @@ mod tests {
         .expect("create_descriptor_chain failed");
         let mut reader = Reader::new(&memory, chain).expect("failed to create Reader");
 
-        let other = reader.split_at(32).expect("failed to split Reader");
+        let other = reader.split_at(32);
         assert_eq!(reader.available_bytes(), 32);
         assert_eq!(other.available_bytes(), 96);
     }
@@ -1060,7 +1084,7 @@ mod tests {
         .expect("create_descriptor_chain failed");
         let mut reader = Reader::new(&memory, chain).expect("failed to create Reader");
 
-        let other = reader.split_at(24).expect("failed to split Reader");
+        let other = reader.split_at(24);
         assert_eq!(reader.available_bytes(), 24);
         assert_eq!(other.available_bytes(), 104);
     }
@@ -1089,7 +1113,7 @@ mod tests {
         .expect("create_descriptor_chain failed");
         let mut reader = Reader::new(&memory, chain).expect("failed to create Reader");
 
-        let other = reader.split_at(128).expect("failed to split Reader");
+        let other = reader.split_at(128);
         assert_eq!(reader.available_bytes(), 128);
         assert_eq!(other.available_bytes(), 0);
     }
@@ -1118,7 +1142,7 @@ mod tests {
         .expect("create_descriptor_chain failed");
         let mut reader = Reader::new(&memory, chain).expect("failed to create Reader");
 
-        let other = reader.split_at(0).expect("failed to split Reader");
+        let other = reader.split_at(0);
         assert_eq!(reader.available_bytes(), 0);
         assert_eq!(other.available_bytes(), 128);
     }
@@ -1147,9 +1171,12 @@ mod tests {
         .expect("create_descriptor_chain failed");
         let mut reader = Reader::new(&memory, chain).expect("failed to create Reader");
 
-        if let Ok(_) = reader.split_at(256) {
-            panic!("successfully split Reader with out of bounds offset");
-        }
+        let other = reader.split_at(256);
+        assert_eq!(
+            other.available_bytes(),
+            0,
+            "Reader returned from out-of-bounds split still has available bytes"
+        );
     }
 
     #[test]
diff --git a/devices/src/virtio/fs/server.rs b/devices/src/virtio/fs/server.rs
index 33b7c98..c1af80c 100644
--- a/devices/src/virtio/fs/server.rs
+++ b/devices/src/virtio/fs/server.rs
@@ -496,10 +496,7 @@ impl<F: FileSystem + Sync> Server<F> {
         };
 
         // Split the writer into 2 pieces: one for the `OutHeader` and the rest for the data.
-        let data_writer = ZCWriter(
-            w.split_at(size_of::<OutHeader>())
-                .map_err(Error::InvalidDescriptorChain)?,
-        );
+        let data_writer = ZCWriter(w.split_at(size_of::<OutHeader>()));
 
         match self.fs.read(
             Context::from(in_header),
@@ -910,9 +907,7 @@ impl<F: FileSystem + Sync> Server<F> {
         }
 
         // Skip over enough bytes for the header.
-        let mut cursor = w
-            .split_at(size_of::<OutHeader>())
-            .map_err(Error::InvalidDescriptorChain)?;
+        let mut cursor = w.split_at(size_of::<OutHeader>());
 
         let res = if plus {
             self.fs.readdirplus(
diff --git a/devices/src/virtio/gpu/mod.rs b/devices/src/virtio/gpu/mod.rs
index b305089..2873b74 100644
--- a/devices/src/virtio/gpu/mod.rs
+++ b/devices/src/virtio/gpu/mod.rs
@@ -111,7 +111,7 @@ trait Backend {
 
     /// Constructs a backend.
     fn build(
-        possible_displays: &[DisplayBackend],
+        display: GpuDisplay,
         display_width: u32,
         display_height: u32,
         renderer_flags: RendererFlags,
@@ -345,9 +345,28 @@ impl BackendKind {
         gpu_device_socket: VmMemoryControlRequestSocket,
         pci_bar: Alloc,
     ) -> Option<Box<dyn Backend>> {
+        let mut display_opt = None;
+        for display in possible_displays {
+            match display.build() {
+                Ok(c) => {
+                    display_opt = Some(c);
+                    break;
+                }
+                Err(e) => error!("failed to open display: {}", e),
+            };
+        }
+
+        let display = match display_opt {
+            Some(d) => d,
+            None => {
+                error!("failed to open any displays");
+                return None;
+            }
+        };
+
         match self {
             BackendKind::Virtio2D => Virtio2DBackend::build(
-                possible_displays,
+                display,
                 display_width,
                 display_height,
                 renderer_flags,
@@ -356,7 +375,7 @@ impl BackendKind {
                 pci_bar,
             ),
             BackendKind::Virtio3D => Virtio3DBackend::build(
-                possible_displays,
+                display,
                 display_width,
                 display_height,
                 renderer_flags,
@@ -366,7 +385,7 @@ impl BackendKind {
             ),
             #[cfg(feature = "gfxstream")]
             BackendKind::VirtioGfxStream => VirtioGfxStreamBackend::build(
-                possible_displays,
+                display,
                 display_width,
                 display_height,
                 renderer_flags,
@@ -977,10 +996,6 @@ impl DisplayBackend {
             DisplayBackend::Stub => GpuDisplay::open_stub(),
         }
     }
-
-    fn is_x(&self) -> bool {
-        matches!(self, DisplayBackend::X(_))
-    }
 }
 
 pub struct Gpu {
diff --git a/devices/src/virtio/gpu/virtio_2d_backend.rs b/devices/src/virtio/gpu/virtio_2d_backend.rs
index a51a664..fd85bef 100644
--- a/devices/src/virtio/gpu/virtio_2d_backend.rs
+++ b/devices/src/virtio/gpu/virtio_2d_backend.rs
@@ -22,7 +22,7 @@ use vm_control::VmMemoryControlRequestSocket;
 
 use super::protocol::GpuResponse;
 pub use super::virtio_backend::{VirtioBackend, VirtioResource};
-use crate::virtio::gpu::{Backend, DisplayBackend, VIRTIO_F_VERSION_1};
+use crate::virtio::gpu::{Backend, VIRTIO_F_VERSION_1};
 use crate::virtio::resource_bridge::ResourceResponse;
 
 #[derive(Debug)]
@@ -427,7 +427,7 @@ impl Backend for Virtio2DBackend {
 
     /// Returns the underlying Backend.
     fn build(
-        possible_displays: &[DisplayBackend],
+        display: GpuDisplay,
         display_width: u32,
         display_height: u32,
         _renderer_flags: RendererFlags,
@@ -435,24 +435,6 @@ impl Backend for Virtio2DBackend {
         _gpu_device_socket: VmMemoryControlRequestSocket,
         _pci_bar: Alloc,
     ) -> Option<Box<dyn Backend>> {
-        let mut display_opt = None;
-        for display in possible_displays {
-            match display.build() {
-                Ok(c) => {
-                    display_opt = Some(c);
-                    break;
-                }
-                Err(e) => error!("failed to open display: {}", e),
-            };
-        }
-        let display = match display_opt {
-            Some(d) => d,
-            None => {
-                error!("failed to open any displays");
-                return None;
-            }
-        };
-
         Some(Box::new(Virtio2DBackend::new(
             display,
             display_width,
diff --git a/devices/src/virtio/gpu/virtio_3d_backend.rs b/devices/src/virtio/gpu/virtio_3d_backend.rs
index 692bedc..f7899be 100644
--- a/devices/src/virtio/gpu/virtio_3d_backend.rs
+++ b/devices/src/virtio/gpu/virtio_3d_backend.rs
@@ -31,8 +31,8 @@ use super::protocol::{
 };
 pub use crate::virtio::gpu::virtio_backend::{VirtioBackend, VirtioResource};
 use crate::virtio::gpu::{
-    Backend, DisplayBackend, VIRTIO_F_VERSION_1, VIRTIO_GPU_F_HOST_VISIBLE,
-    VIRTIO_GPU_F_RESOURCE_UUID, VIRTIO_GPU_F_RESOURCE_V2, VIRTIO_GPU_F_VIRGL, VIRTIO_GPU_F_VULKAN,
+    Backend, VIRTIO_F_VERSION_1, VIRTIO_GPU_F_HOST_VISIBLE, VIRTIO_GPU_F_RESOURCE_UUID,
+    VIRTIO_GPU_F_RESOURCE_V2, VIRTIO_GPU_F_VIRGL, VIRTIO_GPU_F_VULKAN,
 };
 use crate::virtio::resource_bridge::{PlaneInfo, ResourceInfo, ResourceResponse};
 
@@ -246,7 +246,7 @@ impl Backend for Virtio3DBackend {
 
     /// Returns the underlying Backend.
     fn build(
-        possible_displays: &[DisplayBackend],
+        display: GpuDisplay,
         display_width: u32,
         display_height: u32,
         renderer_flags: RendererFlags,
@@ -255,31 +255,14 @@ impl Backend for Virtio3DBackend {
         pci_bar: Alloc,
     ) -> Option<Box<dyn Backend>> {
         let mut renderer_flags = renderer_flags;
-        let mut display_opt = None;
-        for display in possible_displays {
-            match display.build() {
-                Ok(c) => {
-                    // If X11 is being used, that's an indication that the renderer should also be
-                    // using glx. Otherwise, we are likely in an enviroment in which GBM will work
-                    // for doing allocations of buffers we wish to display. TODO(zachr): this is a
-                    // heuristic (or terrible hack depending on your POV). We should do something
-                    // either smarter or more configurable.
-                    if display.is_x() {
-                        renderer_flags = RendererFlags::new().use_glx(true);
-                    }
-                    display_opt = Some(c);
-                    break;
-                }
-                Err(e) => error!("failed to open display: {}", e),
-            };
+        if display.is_x() {
+            // If X11 is being used, that's an indication that the renderer should also be
+            // using glx. Otherwise, we are likely in an enviroment in which GBM will work
+            // for doing allocations of buffers we wish to display. TODO(zachr): this is a
+            // heuristic (or terrible hack depending on your POV). We should do something
+            // either smarter or more configurable.
+            renderer_flags = RendererFlags::new().use_glx(true);
         }
-        let display = match display_opt {
-            Some(d) => d,
-            None => {
-                error!("failed to open any displays");
-                return None;
-            }
-        };
 
         if cfg!(debug_assertions) {
             let ret = unsafe { libc::dup2(libc::STDOUT_FILENO, libc::STDERR_FILENO) };
diff --git a/devices/src/virtio/gpu/virtio_gfxstream_backend.rs b/devices/src/virtio/gpu/virtio_gfxstream_backend.rs
index 2a49da8..d8ef793 100644
--- a/devices/src/virtio/gpu/virtio_gfxstream_backend.rs
+++ b/devices/src/virtio/gpu/virtio_gfxstream_backend.rs
@@ -25,7 +25,7 @@ use vm_control::VmMemoryControlRequestSocket;
 
 use super::protocol::GpuResponse;
 pub use super::virtio_backend::{VirtioBackend, VirtioResource};
-use crate::virtio::gpu::{Backend, DisplayBackend, VIRTIO_F_VERSION_1, VIRTIO_GPU_F_VIRGL};
+use crate::virtio::gpu::{Backend, VIRTIO_F_VERSION_1, VIRTIO_GPU_F_VIRGL};
 use crate::virtio::resource_bridge::ResourceResponse;
 
 // C definitions related to gfxstream
@@ -282,7 +282,7 @@ impl Backend for VirtioGfxStreamBackend {
 
     /// Returns the underlying Backend.
     fn build(
-        possible_displays: &[DisplayBackend],
+        display: GpuDisplay,
         display_width: u32,
         display_height: u32,
         _renderer_flags: RendererFlags,
@@ -290,25 +290,6 @@ impl Backend for VirtioGfxStreamBackend {
         gpu_device_socket: VmMemoryControlRequestSocket,
         pci_bar: Alloc,
     ) -> Option<Box<dyn Backend>> {
-        let mut display_opt = None;
-        for display in possible_displays {
-            match display.build() {
-                Ok(c) => {
-                    display_opt = Some(c);
-                    break;
-                }
-                Err(e) => error!("failed to open display: {}", e),
-            };
-        }
-
-        let display = match display_opt {
-            Some(d) => d,
-            None => {
-                error!("failed to open any displays");
-                return None;
-            }
-        };
-
         Some(Box::new(VirtioGfxStreamBackend::new(
             display,
             display_width,
diff --git a/gpu_display/src/lib.rs b/gpu_display/src/lib.rs
index 07358a0..dccf1c7 100644
--- a/gpu_display/src/lib.rs
+++ b/gpu_display/src/lib.rs
@@ -171,6 +171,7 @@ trait DisplayT: AsRawFd {
 /// descriptor. When the connection is readable, `dispatch_events` can be called to process it.
 pub struct GpuDisplay {
     inner: Box<dyn DisplayT>,
+    is_x: bool,
 }
 
 impl GpuDisplay {
@@ -183,7 +184,7 @@ impl GpuDisplay {
                 None => gpu_display_x::DisplayX::open_display(None)?,
             };
             let inner = Box::new(display);
-            Ok(GpuDisplay { inner })
+            Ok(GpuDisplay { inner, is_x: true })
         }
         #[cfg(not(feature = "x"))]
         Err(GpuDisplayError::Unsupported)
@@ -198,13 +199,18 @@ impl GpuDisplay {
             None => gpu_display_wl::DisplayWl::new(None)?,
         };
         let inner = Box::new(display);
-        Ok(GpuDisplay { inner })
+        Ok(GpuDisplay { inner, is_x: false })
     }
 
     pub fn open_stub() -> Result<GpuDisplay, GpuDisplayError> {
         let display = gpu_display_stub::DisplayStub::new()?;
         let inner = Box::new(display);
-        Ok(GpuDisplay { inner })
+        Ok(GpuDisplay { inner, is_x: false })
+    }
+
+    /// Return whether this display is an X display
+    pub fn is_x(&self) -> bool {
+        self.is_x
     }
 
     /// Imports a dmabuf to the compositor for use as a surface buffer and returns a handle to it.
diff --git a/hypervisor/Cargo.toml b/hypervisor/Cargo.toml
index de82105..8f07d5d 100644
--- a/hypervisor/Cargo.toml
+++ b/hypervisor/Cargo.toml
@@ -6,5 +6,7 @@ edition = "2018"
 
 [dependencies]
 libc = "*"
+kvm = { path = "../kvm" }
 kvm_sys = { path = "../kvm_sys" }
-sys_util = { path = "../sys_util" }
\ No newline at end of file
+sync = { path = "../sync" }
+sys_util = { path = "../sys_util" }
diff --git a/hypervisor/src/aarch64.rs b/hypervisor/src/aarch64.rs
new file mode 100644
index 0000000..875941b
--- /dev/null
+++ b/hypervisor/src/aarch64.rs
@@ -0,0 +1,24 @@
+// Copyright 2020 The Chromium OS Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+use crate::{Vcpu, Vm};
+use sys_util::Result;
+
+/// A wrapper for using a VM on aarch64 and getting/setting its state.
+pub trait VmAArch64: Vm {
+    type Vcpu: VcpuAArch64;
+
+    /// Create a Vcpu with the specified Vcpu ID.
+    fn create_vcpu(&self, id: usize) -> Result<Self::Vcpu>;
+}
+
+/// A wrapper around creating and using a VCPU on aarch64.
+pub trait VcpuAArch64: Vcpu {
+    /// Sets the value of register on this VCPU.
+    ///
+    /// # Arguments
+    ///
+    /// * `reg_id` - Register ID, specified in the KVM API documentation for KVM_SET_ONE_REG
+    fn set_one_reg(&self, reg_id: u64, data: u64) -> Result<()>;
+}
diff --git a/hypervisor/src/caps.rs b/hypervisor/src/caps.rs
index d088ca7..c3fe745 100644
--- a/hypervisor/src/caps.rs
+++ b/hypervisor/src/caps.rs
@@ -3,4 +3,10 @@
 // found in the LICENSE file.
 
 /// An enumeration of different hypervisor capabilities.
-pub enum HypervisorCap {}
+pub enum HypervisorCap {
+    ArmPmuV3,
+    ImmediateExit,
+    S390UserSigp,
+    TscDeadlineTimer,
+    UserMemory,
+}
diff --git a/hypervisor/src/kvm/aarch64.rs b/hypervisor/src/kvm/aarch64.rs
new file mode 100644
index 0000000..4f0398f
--- /dev/null
+++ b/hypervisor/src/kvm/aarch64.rs
@@ -0,0 +1,22 @@
+// Copyright 2020 The Chromium OS Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+use sys_util::Result;
+
+use super::{KvmVcpu, KvmVm};
+use crate::{VcpuAArch64, VmAArch64};
+
+impl VmAArch64 for KvmVm {
+    type Vcpu = KvmVcpu;
+
+    fn create_vcpu(&self, id: usize) -> Result<Self::Vcpu> {
+        self.create_kvm_vcpu(id)
+    }
+}
+
+impl VcpuAArch64 for KvmVcpu {
+    fn set_one_reg(&self, _reg_id: u64, _data: u64) -> Result<()> {
+        Ok(())
+    }
+}
diff --git a/hypervisor/src/kvm/mod.rs b/hypervisor/src/kvm/mod.rs
index 7058921..0550792 100644
--- a/hypervisor/src/kvm/mod.rs
+++ b/hypervisor/src/kvm/mod.rs
@@ -2,17 +2,69 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-use super::{CpuId, Hypervisor, HypervisorCap};
+#[cfg(any(target_arch = "arm", target_arch = "aarch64"))]
+mod aarch64;
+#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
+mod x86_64;
+
+use std::cmp::Ordering;
+use std::collections::{BinaryHeap, HashMap};
+use std::convert::TryFrom;
+use std::ops::{Deref, DerefMut};
+use std::os::raw::{c_char, c_ulong};
+use std::os::unix::io::{AsRawFd, RawFd};
+use std::sync::Arc;
+
 use libc::{open, O_CLOEXEC, O_RDWR};
-use std::os::raw::c_char;
+
+use kvm_sys::*;
+use sync::Mutex;
 use sys_util::{
-    errno_result, AsRawDescriptor, FromRawDescriptor, RawDescriptor, Result, SafeDescriptor,
+    errno_result, ioctl, ioctl_with_ref, ioctl_with_val, AsRawDescriptor, Error, FromRawDescriptor,
+    GuestMemory, RawDescriptor, Result, SafeDescriptor,
 };
 
+use crate::{Hypervisor, HypervisorCap, MappedRegion, RunnableVcpu, Vcpu, VcpuExit, Vm};
+
+// Wrapper around KVM_SET_USER_MEMORY_REGION ioctl, which creates, modifies, or deletes a mapping
+// from guest physical to host user pages.
+//
+// Safe when the guest regions are guaranteed not to overlap.
+unsafe fn set_user_memory_region(
+    descriptor: &SafeDescriptor,
+    slot: u32,
+    read_only: bool,
+    log_dirty_pages: bool,
+    guest_addr: u64,
+    memory_size: u64,
+    userspace_addr: *mut u8,
+) -> Result<()> {
+    let mut flags = if read_only { KVM_MEM_READONLY } else { 0 };
+    if log_dirty_pages {
+        flags |= KVM_MEM_LOG_DIRTY_PAGES;
+    }
+    let region = kvm_userspace_memory_region {
+        slot,
+        flags,
+        guest_phys_addr: guest_addr,
+        memory_size,
+        userspace_addr: userspace_addr as u64,
+    };
+
+    let ret = ioctl_with_ref(descriptor, KVM_SET_USER_MEMORY_REGION(), &region);
+    if ret == 0 {
+        Ok(())
+    } else {
+        errno_result()
+    }
+}
+
 pub struct Kvm {
     kvm: SafeDescriptor,
 }
 
+type KvmCap = kvm::Cap;
+
 impl Kvm {
     /// Opens `/dev/kvm/` and returns a Kvm object on success.
     pub fn new() -> Result<Kvm> {
@@ -35,28 +87,233 @@ impl AsRawDescriptor for Kvm {
     }
 }
 
+impl AsRawFd for Kvm {
+    fn as_raw_fd(&self) -> RawFd {
+        self.kvm.as_raw_descriptor()
+    }
+}
+
 impl Hypervisor for Kvm {
-    fn check_capability(&self, _cap: &HypervisorCap) -> bool {
-        unimplemented!("check_capability for Kvm is not yet implemented");
+    fn check_capability(&self, cap: &HypervisorCap) -> bool {
+        if let Ok(kvm_cap) = KvmCap::try_from(cap) {
+            // this ioctl is safe because we know this kvm descriptor is valid,
+            // and we are copying over the kvm capability (u32) as a c_ulong value.
+            unsafe { ioctl_with_val(self, KVM_CHECK_EXTENSION(), kvm_cap as c_ulong) == 1 }
+        } else {
+            // this capability cannot be converted on this platform, so return false
+            false
+        }
+    }
+}
+
+// Used to invert the order when stored in a max-heap.
+#[derive(Copy, Clone, Eq, PartialEq)]
+struct MemSlot(u32);
+
+impl Ord for MemSlot {
+    fn cmp(&self, other: &MemSlot) -> Ordering {
+        // Notice the order is inverted so the lowest magnitude slot has the highest priority in a
+        // max-heap.
+        other.0.cmp(&self.0)
+    }
+}
+
+impl PartialOrd for MemSlot {
+    fn partial_cmp(&self, other: &MemSlot) -> Option<Ordering> {
+        Some(self.cmp(other))
+    }
+}
+
+/// A wrapper around creating and using a KVM VM.
+pub struct KvmVm {
+    vm: SafeDescriptor,
+    guest_mem: GuestMemory,
+    mem_regions: Arc<Mutex<HashMap<u32, Box<dyn MappedRegion>>>>,
+    mem_slot_gaps: Arc<Mutex<BinaryHeap<MemSlot>>>,
+}
+
+impl KvmVm {
+    /// Constructs a new `KvmVm` using the given `Kvm` instance.
+    pub fn new(kvm: &Kvm, guest_mem: GuestMemory) -> Result<KvmVm> {
+        // Safe because we know kvm is a real kvm fd as this module is the only one that can make
+        // Kvm objects.
+        let ret = unsafe { ioctl(kvm, KVM_CREATE_VM()) };
+        if ret < 0 {
+            return errno_result();
+        }
+        // Safe because we verify that ret is valid and we own the fd.
+        let vm_descriptor = unsafe { SafeDescriptor::from_raw_descriptor(ret) };
+        guest_mem.with_regions(|index, guest_addr, size, host_addr, _| {
+            unsafe {
+                // Safe because the guest regions are guaranteed not to overlap.
+                set_user_memory_region(
+                    &vm_descriptor,
+                    index as u32,
+                    false,
+                    false,
+                    guest_addr.offset() as u64,
+                    size as u64,
+                    host_addr as *mut u8,
+                )
+            }
+        })?;
+        // TODO(colindr/srichman): add default IRQ routes in IrqChip constructor or configure_vm
+        Ok(KvmVm {
+            vm: vm_descriptor,
+            guest_mem,
+            mem_regions: Arc::new(Mutex::new(HashMap::new())),
+            mem_slot_gaps: Arc::new(Mutex::new(BinaryHeap::new())),
+        })
+    }
+
+    fn create_kvm_vcpu(&self, _id: usize) -> Result<KvmVcpu> {
+        Ok(KvmVcpu {})
+    }
+}
+
+impl Vm for KvmVm {
+    fn try_clone(&self) -> Result<Self> {
+        Ok(KvmVm {
+            vm: self.vm.try_clone()?,
+            guest_mem: self.guest_mem.clone(),
+            mem_regions: self.mem_regions.clone(),
+            mem_slot_gaps: self.mem_slot_gaps.clone(),
+        })
+    }
+
+    fn get_memory(&self) -> &GuestMemory {
+        &self.guest_mem
+    }
+}
+
+impl AsRawDescriptor for KvmVm {
+    fn as_raw_descriptor(&self) -> RawDescriptor {
+        self.vm.as_raw_descriptor()
+    }
+}
+
+impl AsRawFd for KvmVm {
+    fn as_raw_fd(&self) -> RawFd {
+        self.vm.as_raw_descriptor()
+    }
+}
+
+/// A wrapper around creating and using a KVM Vcpu.
+pub struct KvmVcpu {}
+
+impl Vcpu for KvmVcpu {
+    type Runnable = RunnableKvmVcpu;
+
+    fn to_runnable(self) -> Result<Self::Runnable> {
+        Ok(RunnableKvmVcpu {
+            vcpu: self,
+            phantom: Default::default(),
+        })
+    }
+
+    fn request_interrupt_window(&self) -> Result<()> {
+        Ok(())
     }
+}
+
+/// A KvmVcpu that has a thread and can be run.
+pub struct RunnableKvmVcpu {
+    vcpu: KvmVcpu,
+
+    // vcpus must stay on the same thread once they start.
+    // Add the PhantomData pointer to ensure RunnableKvmVcpu is not `Send`.
+    phantom: std::marker::PhantomData<*mut u8>,
+}
+
+impl RunnableVcpu for RunnableKvmVcpu {
+    type Vcpu = KvmVcpu;
 
-    #[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
-    fn get_supported_cpuid(&self) -> Result<CpuId> {
-        unimplemented!("get_supported_cpuid for Kvm is not yet implemented");
+    fn run(&self) -> Result<VcpuExit> {
+        Ok(VcpuExit::Unknown)
     }
+}
+
+impl Deref for RunnableKvmVcpu {
+    type Target = <Self as RunnableVcpu>::Vcpu;
+
+    fn deref(&self) -> &Self::Target {
+        &self.vcpu
+    }
+}
 
-    #[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
-    fn get_emulated_cpuid(&self) -> Result<CpuId> {
-        unimplemented!("get_emulated_cpuid for Kvm is not yet implemented");
+impl DerefMut for RunnableKvmVcpu {
+    fn deref_mut(&mut self) -> &mut Self::Target {
+        &mut self.vcpu
+    }
+}
+
+impl<'a> TryFrom<&'a HypervisorCap> for KvmCap {
+    type Error = Error;
+
+    fn try_from(cap: &'a HypervisorCap) -> Result<KvmCap> {
+        match cap {
+            HypervisorCap::ArmPmuV3 => Ok(KvmCap::ArmPmuV3),
+            HypervisorCap::ImmediateExit => Ok(KvmCap::ImmediateExit),
+            HypervisorCap::S390UserSigp => Ok(KvmCap::S390UserSigp),
+            HypervisorCap::TscDeadlineTimer => Ok(KvmCap::TscDeadlineTimer),
+            HypervisorCap::UserMemory => Ok(KvmCap::UserMemory),
+        }
     }
 }
 
 #[cfg(test)]
 mod tests {
-    use super::Kvm;
+    use super::*;
+    use std::thread;
+    use sys_util::GuestAddress;
 
     #[test]
     fn new() {
         Kvm::new().unwrap();
     }
+
+    #[test]
+    fn check_capability() {
+        let kvm = Kvm::new().unwrap();
+        assert!(kvm.check_capability(&HypervisorCap::UserMemory));
+        assert!(!kvm.check_capability(&HypervisorCap::S390UserSigp));
+    }
+
+    #[test]
+    fn create_vm() {
+        let kvm = Kvm::new().unwrap();
+        let gm = GuestMemory::new(&[(GuestAddress(0), 0x1000)]).unwrap();
+        KvmVm::new(&kvm, gm).unwrap();
+    }
+
+    #[test]
+    fn clone_vm() {
+        let kvm = Kvm::new().unwrap();
+        let gm = GuestMemory::new(&[(GuestAddress(0), 0x1000)]).unwrap();
+        let vm = KvmVm::new(&kvm, gm).unwrap();
+        vm.try_clone().unwrap();
+    }
+
+    #[test]
+    fn send_vm() {
+        let kvm = Kvm::new().unwrap();
+        let gm = GuestMemory::new(&[(GuestAddress(0), 0x1000)]).unwrap();
+        let vm = KvmVm::new(&kvm, gm).unwrap();
+        thread::spawn(move || {
+            let _vm = vm;
+        })
+        .join()
+        .unwrap();
+    }
+
+    #[test]
+    fn get_memory() {
+        let kvm = Kvm::new().unwrap();
+        let gm = GuestMemory::new(&[(GuestAddress(0), 0x1000)]).unwrap();
+        let vm = KvmVm::new(&kvm, gm).unwrap();
+        let obj_addr = GuestAddress(0xf0);
+        vm.get_memory().write_obj_at_addr(67u8, obj_addr).unwrap();
+        let read_val: u8 = vm.get_memory().read_obj_from_addr(obj_addr).unwrap();
+        assert_eq!(read_val, 67u8);
+    }
 }
diff --git a/hypervisor/src/kvm/x86_64.rs b/hypervisor/src/kvm/x86_64.rs
new file mode 100644
index 0000000..43d6c97
--- /dev/null
+++ b/hypervisor/src/kvm/x86_64.rs
@@ -0,0 +1,124 @@
+// Copyright 2020 The Chromium OS Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+use kvm_sys::*;
+use libc::E2BIG;
+use sys_util::{ioctl_with_mut_ptr, Error, Result};
+
+use super::{Kvm, KvmVcpu, KvmVm};
+use crate::{CpuId, CpuIdEntry, HypervisorX86_64, Regs, VcpuX86_64, VmX86_64};
+
+type KvmCpuId = kvm::CpuId;
+
+impl Kvm {
+    pub fn get_cpuid(&self, kind: u64) -> Result<CpuId> {
+        const KVM_MAX_ENTRIES: usize = 256;
+        self.get_cpuid_with_initial_capacity(kind, KVM_MAX_ENTRIES)
+    }
+
+    fn get_cpuid_with_initial_capacity(&self, kind: u64, initial_capacity: usize) -> Result<CpuId> {
+        let mut entries: usize = initial_capacity;
+
+        loop {
+            let mut kvm_cpuid = KvmCpuId::new(entries);
+
+            let ret = unsafe {
+                // ioctl is unsafe. The kernel is trusted not to write beyond the bounds of the
+                // memory allocated for the struct. The limit is read from nent within KvmCpuId,
+                // which is set to the allocated size above.
+                ioctl_with_mut_ptr(self, kind, kvm_cpuid.as_mut_ptr())
+            };
+            if ret < 0 {
+                let err = Error::last();
+                match err.errno() {
+                    E2BIG => {
+                        // double the available memory for cpuid entries for kvm.
+                        if let Some(val) = entries.checked_mul(2) {
+                            entries = val;
+                        } else {
+                            return Err(err);
+                        }
+                    }
+                    _ => return Err(err),
+                }
+            } else {
+                return Ok(CpuId::from(&kvm_cpuid));
+            }
+        }
+    }
+}
+
+impl<'a> From<&'a KvmCpuId> for CpuId {
+    fn from(kvm_cpuid: &'a KvmCpuId) -> CpuId {
+        let kvm_entries = kvm_cpuid.entries_slice();
+        let mut cpu_id_entries = Vec::with_capacity(kvm_entries.len());
+
+        for entry in kvm_entries {
+            let cpu_id_entry = CpuIdEntry {
+                function: entry.function,
+                index: entry.index,
+                eax: entry.eax,
+                ebx: entry.ebx,
+                ecx: entry.ecx,
+                edx: entry.edx,
+            };
+            cpu_id_entries.push(cpu_id_entry)
+        }
+        CpuId { cpu_id_entries }
+    }
+}
+
+impl HypervisorX86_64 for Kvm {
+    fn get_supported_cpuid(&self) -> Result<CpuId> {
+        self.get_cpuid(KVM_GET_SUPPORTED_CPUID())
+    }
+
+    fn get_emulated_cpuid(&self) -> Result<CpuId> {
+        self.get_cpuid(KVM_GET_EMULATED_CPUID())
+    }
+}
+
+impl VmX86_64 for KvmVm {
+    type Vcpu = KvmVcpu;
+
+    fn create_vcpu(&self, id: usize) -> Result<Self::Vcpu> {
+        self.create_kvm_vcpu(id)
+    }
+}
+
+impl VcpuX86_64 for KvmVcpu {
+    fn get_regs(&self) -> Result<Regs> {
+        Ok(Regs {})
+    }
+}
+
+#[cfg(test)]
+mod tests {
+    use super::Kvm;
+    use crate::HypervisorX86_64;
+    use kvm_sys::*;
+
+    #[test]
+    fn get_supported_cpuid() {
+        let hypervisor = Kvm::new().unwrap();
+        let cpuid = hypervisor.get_supported_cpuid().unwrap();
+        assert!(cpuid.cpu_id_entries.len() > 0);
+    }
+
+    #[test]
+    fn get_emulated_cpuid() {
+        let hypervisor = Kvm::new().unwrap();
+        let cpuid = hypervisor.get_emulated_cpuid().unwrap();
+        assert!(cpuid.cpu_id_entries.len() > 0);
+    }
+
+    #[test]
+    fn entries_double_on_error() {
+        let hypervisor = Kvm::new().unwrap();
+        let cpuid = hypervisor
+            .get_cpuid_with_initial_capacity(KVM_GET_SUPPORTED_CPUID(), 4)
+            .unwrap();
+        assert!(cpuid.cpu_id_entries.len() > 4);
+    }
+}
diff --git a/hypervisor/src/lib.rs b/hypervisor/src/lib.rs
index 056070b..784af8c 100644
--- a/hypervisor/src/lib.rs
+++ b/hypervisor/src/lib.rs
@@ -3,23 +3,84 @@
 // found in the LICENSE file.
 
 //! A crate for abstracting the underlying kernel hypervisor used in crosvm.
+#[cfg(any(target_arch = "arm", target_arch = "aarch64"))]
+pub mod aarch64;
 pub mod caps;
 pub mod kvm;
-pub mod types;
+#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
+pub mod x86_64;
 
-use sys_util::Result;
+use std::ops::{Deref, DerefMut};
 
+use sys_util::{GuestMemory, Result};
+
+#[cfg(any(target_arch = "arm", target_arch = "aarch64"))]
+pub use crate::aarch64::*;
 pub use crate::caps::*;
-pub use crate::types::*;
+#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
+pub use crate::x86_64::*;
 
-/// A trait for managing the underlying cpu information for the hypervisor and to check its capabilities.
-trait Hypervisor {
-    // Checks if a particular `HypervisorCap` is available.
+/// A trait for checking hypervisor capabilities.
+pub trait Hypervisor {
+    /// Checks if a particular `HypervisorCap` is available.
     fn check_capability(&self, cap: &HypervisorCap) -> bool;
-    // Get the system supported CPUID values.
-    #[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
-    fn get_supported_cpuid(&self) -> Result<CpuId>;
-    // Get the system emulated CPUID values.
-    #[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
-    fn get_emulated_cpuid(&self) -> Result<CpuId>;
 }
+
+/// A wrapper for using a VM and getting/setting its state.
+pub trait Vm: Send + Sized {
+    /// Makes a shallow clone this `Vm`.
+    fn try_clone(&self) -> Result<Self>;
+
+    /// Gets the guest-mapped memory for the Vm.
+    fn get_memory(&self) -> &GuestMemory;
+}
+
+/// A wrapper around using a VCPU.
+/// `Vcpu` provides all functionality except for running. To run, `to_runnable` must be called to
+/// lock the vcpu to a thread. Then the returned `RunnableVcpu` can be used for running.
+pub trait Vcpu: Send + Sized {
+    type Runnable: RunnableVcpu;
+
+    /// Consumes `self` and returns a `RunnableVcpu`. A `RunnableVcpu` is required to run the guest.
+    fn to_runnable(self) -> Result<Self::Runnable>;
+
+    /// Request the Vcpu to exit the next time it can accept an interrupt.
+    fn request_interrupt_window(&self) -> Result<()>;
+}
+
+/// A Vcpu that has a thread and can be run. Created by calling `to_runnable` on a `Vcpu`.
+/// Implements `Deref` to a `Vcpu` so all `Vcpu` methods are usable, with the addition of the `run`
+/// function to execute the guest.
+pub trait RunnableVcpu: Deref<Target = <Self as RunnableVcpu>::Vcpu> + DerefMut {
+    type Vcpu: Vcpu;
+
+    /// Runs the VCPU until it exits, returning the reason for the exit.
+    ///
+    /// Note that the state of the VCPU and associated VM must be setup first for this to do
+    /// anything useful.
+    fn run(&self) -> Result<VcpuExit>;
+}
+
+/// A memory region in the current process that can be mapped into the guest's memory.
+///
+/// Safe when implementers guarantee `ptr`..`ptr+size` is an mmaped region owned by this object that
+/// can't be unmapped during the `MappedRegion`'s lifetime.
+pub unsafe trait MappedRegion: Send + Sync {
+    /// Returns a pointer to the beginning of the memory region. Should only be
+    /// used for passing this region to ioctls for setting guest memory.
+    fn as_ptr(&self) -> *mut u8;
+
+    /// Returns the size of the memory region in bytes.
+    fn size(&self) -> usize;
+
+    /// Flushes changes to this memory region to the backing file.
+    fn msync(&self) -> Result<()>;
+}
+
+/// A reason why a VCPU exited. One of these returns every time `Vcpu::run` is called.
+#[derive(Debug)]
+pub enum VcpuExit {
+    Unknown,
+}
+
+pub struct IrqRoute {}
diff --git a/hypervisor/src/types/mod.rs b/hypervisor/src/types/mod.rs
deleted file mode 100644
index 69fa9e4..0000000
--- a/hypervisor/src/types/mod.rs
+++ /dev/null
@@ -1,9 +0,0 @@
-// Copyright 2020 The Chromium OS Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
-pub mod x86;
-
-#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
-pub use self::x86::*;
diff --git a/hypervisor/src/types/x86.rs b/hypervisor/src/types/x86.rs
deleted file mode 100644
index cd5236a..0000000
--- a/hypervisor/src/types/x86.rs
+++ /dev/null
@@ -1,10 +0,0 @@
-// Copyright 2020 The Chromium OS Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-use kvm_sys::kvm_cpuid_entry2;
-
-pub type CpuIdEntry = kvm_cpuid_entry2;
-pub struct CpuId {
-    _cpu_id_entries: Vec<CpuIdEntry>,
-}
diff --git a/hypervisor/src/x86_64.rs b/hypervisor/src/x86_64.rs
new file mode 100644
index 0000000..87f0777
--- /dev/null
+++ b/hypervisor/src/x86_64.rs
@@ -0,0 +1,52 @@
+// Copyright 2020 The Chromium OS Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+use sys_util::Result;
+
+use crate::{Hypervisor, Vcpu, Vm};
+
+/// A trait for managing cpuids for an x86_64 hypervisor and for checking its capabilities.
+pub trait HypervisorX86_64: Hypervisor {
+    /// Get the system supported CPUID values.
+    fn get_supported_cpuid(&self) -> Result<CpuId>;
+
+    /// Get the system emulated CPUID values.
+    fn get_emulated_cpuid(&self) -> Result<CpuId>;
+}
+
+/// A wrapper for using a VM on x86_64 and getting/setting its state.
+pub trait VmX86_64: Vm {
+    type Vcpu: VcpuX86_64;
+
+    /// Create a Vcpu with the specified Vcpu ID.
+    fn create_vcpu(&self, id: usize) -> Result<Self::Vcpu>;
+}
+
+/// A wrapper around creating and using a VCPU on x86_64.
+pub trait VcpuX86_64: Vcpu {
+    /// Gets the VCPU registers.
+    fn get_regs(&self) -> Result<Regs>;
+}
+
+/// A CpuId Entry contains supported feature information for the given processor.
+/// This can be modified by the hypervisor to pass additional information to the guest kernel
+/// about the hypervisor or vm. Information is returned in the eax, ebx, ecx and edx registers
+/// by the cpu for a given function and index/subfunction (passed into the cpu via the eax and ecx
+/// register respectively).
+pub struct CpuIdEntry {
+    pub function: u32,
+    pub index: u32,
+    pub eax: u32,
+    pub ebx: u32,
+    pub ecx: u32,
+    pub edx: u32,
+}
+
+/// A container for the list of cpu id entries for the hypervisor and underlying cpu.
+pub struct CpuId {
+    pub cpu_id_entries: Vec<CpuIdEntry>,
+}
+
+/// The state of a vcpu's general-purpose registers.
+pub struct Regs {}
diff --git a/hypervisor/tests/test_concrete.rs b/hypervisor/tests/test_concrete.rs
new file mode 100644
index 0000000..fd6b89f
--- /dev/null
+++ b/hypervisor/tests/test_concrete.rs
@@ -0,0 +1,51 @@
+// Copyright 2020 The Chromium OS Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// TODO: Delete these tests soon, once we start getting real implementations in place.
+
+#[cfg(any(target_arch = "arm", target_arch = "aarch64"))]
+mod test_concrete_aarch64;
+#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
+mod test_concrete_x86_64;
+
+use sys_util::GuestMemory;
+
+use hypervisor::*;
+#[cfg(any(target_arch = "arm", target_arch = "aarch64"))]
+use test_concrete_aarch64::*;
+#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
+use test_concrete_x86_64::*;
+
+fn run_vcpu<T, U, V>(_hypervm: &HyperVm<T, U, V>, _linux: &RunnableLinuxVm, vcpu: impl Vcpu)
+where
+    T: Hypervisor,
+    U: Vm,
+    V: Vcpu,
+{
+    let vcpu = vcpu.to_runnable().unwrap();
+    vcpu.run().unwrap();
+    vcpu.request_interrupt_window().unwrap();
+}
+
+#[test]
+fn test_concrete_types() {
+    let cfg_use_kvm = true;
+    if cfg_use_kvm {
+        let hypervisor = kvm::Kvm::new().unwrap();
+        let mem = GuestMemory::new(&[]).unwrap();
+        let vm = kvm::KvmVm::new(&hypervisor, mem).unwrap();
+        let vcpu = vm.create_vcpu(0).unwrap();
+        let mut vcpus = vec![vcpu];
+        let mut hypervm = HyperVm {
+            hypervisor,
+            vm,
+            vcpus,
+        };
+        let linux = configure_vm(&hypervm);
+        vcpus = hypervm.vcpus.split_off(0);
+        for vcpu in vcpus.into_iter() {
+            run_vcpu(&hypervm, &linux, vcpu);
+        }
+    }
+}
diff --git a/hypervisor/tests/test_concrete_aarch64.rs b/hypervisor/tests/test_concrete_aarch64.rs
new file mode 100644
index 0000000..6c58bbe
--- /dev/null
+++ b/hypervisor/tests/test_concrete_aarch64.rs
@@ -0,0 +1,21 @@
+// Copyright 2020 The Chromium OS Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#[path = "types.rs"]
+mod types;
+
+#[allow(unused_imports)]
+use hypervisor::*;
+pub use types::{HyperVm, RunnableLinuxVm};
+
+// Inline cfg won't be needed in real code, but integration tests don't have conditional includes.
+#[cfg(any(target_arch = "arm", target_arch = "aarch64"))]
+pub fn configure_vm<T, U, V>(_hypervm: &HyperVm<T, U, V>) -> RunnableLinuxVm
+where
+    T: Hypervisor,
+    U: VmAArch64,
+    V: VcpuAArch64,
+{
+    RunnableLinuxVm {}
+}
diff --git a/hypervisor/tests/test_concrete_x86_64.rs b/hypervisor/tests/test_concrete_x86_64.rs
new file mode 100644
index 0000000..6e24b80
--- /dev/null
+++ b/hypervisor/tests/test_concrete_x86_64.rs
@@ -0,0 +1,39 @@
+// Copyright 2020 The Chromium OS Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#[path = "types.rs"]
+mod types;
+
+#[allow(unused_imports)]
+use hypervisor::*;
+pub use types::{HyperVm, RunnableLinuxVm};
+
+// Inline cfg won't be needed in real code, but integration tests don't have conditional includes.
+#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
+fn configure_vcpu<T, U, V>(
+    _hypervm: &HyperVm<T, U, V>,
+    _linux: &RunnableLinuxVm,
+    vcpu: &impl VcpuX86_64,
+) where
+    T: HypervisorX86_64,
+    U: VmX86_64,
+    V: VcpuX86_64,
+{
+    vcpu.get_regs().unwrap();
+}
+
+// Inline cfg won't be needed in real code, but integration tests don't have conditional includes.
+#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
+pub fn configure_vm<T, U, V>(hypervm: &HyperVm<T, U, V>) -> RunnableLinuxVm
+where
+    T: HypervisorX86_64,
+    U: VmX86_64,
+    V: VcpuX86_64,
+{
+    let linux = RunnableLinuxVm {};
+    for vcpu in hypervm.vcpus.iter() {
+        configure_vcpu(&hypervm, &linux, vcpu);
+    }
+    linux
+}
diff --git a/hypervisor/tests/types.rs b/hypervisor/tests/types.rs
new file mode 100644
index 0000000..d9272af
--- /dev/null
+++ b/hypervisor/tests/types.rs
@@ -0,0 +1,18 @@
+// Copyright 2020 The Chromium OS Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+use hypervisor::*;
+
+pub struct HyperVm<HyperT, VmT, VcpuT>
+where
+    HyperT: Hypervisor,
+    VmT: Vm,
+    VcpuT: Vcpu,
+{
+    pub hypervisor: HyperT,
+    pub vm: VmT,
+    pub vcpus: Vec<VcpuT>,
+}
+
+pub struct RunnableLinuxVm {}
diff --git a/kvm/src/lib.rs b/kvm/src/lib.rs
index 8b98b7c..1b06b39 100644
--- a/kvm/src/lib.rs
+++ b/kvm/src/lib.rs
@@ -2045,6 +2045,18 @@ impl CpuId {
         unsafe { self.kvm_cpuid[0].entries.as_mut_slice(nent) }
     }
 
+    /// Get the entries slice, for inspecting. To modify, use mut_entries_slice instead.
+    pub fn entries_slice(&self) -> &[kvm_cpuid_entry2] {
+        // Mapping the unsized array to a slice is unsafe because the length isn't known.  Using
+        // the length we originally allocated with eliminates the possibility of overflow.
+        let slice_size = if self.kvm_cpuid[0].nent as usize > self.allocated_len {
+            self.allocated_len
+        } else {
+            self.kvm_cpuid[0].nent as usize
+        };
+        unsafe { self.kvm_cpuid[0].entries.as_slice(slice_size) }
+    }
+
     /// Get a  pointer so it can be passed to the kernel.  Using this pointer is unsafe.
     pub fn as_ptr(&self) -> *const kvm_cpuid2 {
         &self.kvm_cpuid[0]
diff --git a/msg_socket/msg_on_socket_derive/msg_on_socket_derive.rs b/msg_socket/msg_on_socket_derive/msg_on_socket_derive.rs
index c814767..bfca732 100644
--- a/msg_socket/msg_on_socket_derive/msg_on_socket_derive.rs
+++ b/msg_socket/msg_on_socket_derive/msg_on_socket_derive.rs
@@ -96,7 +96,7 @@ fn get_struct_fields(ds: DataStruct) -> Vec<StructField> {
                 Meta::List(meta) => {
                     for nested in meta.nested {
                         match nested {
-                            NestedMeta::Meta(Meta::Path(meta_path))
+                            NestedMeta::Meta(Meta::Path(ref meta_path))
                                 if meta_path.is_ident("skip") =>
                             {
                                 skipped = true;
diff --git a/msg_socket/src/msg_on_socket.rs b/msg_socket/src/msg_on_socket.rs
index d263407..82ee9a8 100644
--- a/msg_socket/src/msg_on_socket.rs
+++ b/msg_socket/src/msg_on_socket.rs
@@ -146,30 +146,6 @@ impl MsgOnSocket for SysError {
     }
 }
 
-impl MsgOnSocket for RawFd {
-    fn fixed_size() -> Option<usize> {
-        Some(0)
-    }
-
-    fn fd_count(&self) -> usize {
-        1
-    }
-
-    unsafe fn read_from_buffer(_buffer: &[u8], fds: &[RawFd]) -> MsgResult<(Self, usize)> {
-        if fds.is_empty() {
-            return Err(MsgError::ExpectFd);
-        }
-        Ok((fds[0], 1))
-    }
-    fn write_to_buffer(&self, _buffer: &mut [u8], fds: &mut [RawFd]) -> MsgResult<usize> {
-        if fds.is_empty() {
-            return Err(MsgError::WrongFdBufferSize);
-        }
-        fds[0] = *self;
-        Ok(1)
-    }
-}
-
 impl<T: MsgOnSocket> MsgOnSocket for Option<T> {
     fn uses_fd() -> bool {
         T::uses_fd()
@@ -178,7 +154,7 @@ impl<T: MsgOnSocket> MsgOnSocket for Option<T> {
     fn msg_size(&self) -> usize {
         match self {
             Some(v) => v.msg_size() + 1,
-            None => 0,
+            None => 1,
         }
     }
 
@@ -291,7 +267,7 @@ macro_rules! rawfd_impl {
                 Ok(($type::from_raw_fd(fds[0]), 1))
             }
             fn write_to_buffer(&self, _buffer: &mut [u8], fds: &mut [RawFd]) -> MsgResult<usize> {
-                if fds.len() < 1 {
+                if fds.is_empty() {
                     return Err(MsgError::WrongFdBufferSize);
                 }
                 fds[0] = self.as_raw_fd();
diff --git a/sys_util/src/descriptor.rs b/sys_util/src/descriptor.rs
index 1af72fc..9082394 100644
--- a/sys_util/src/descriptor.rs
+++ b/sys_util/src/descriptor.rs
@@ -2,11 +2,11 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-use std::os::unix::io::RawFd;
-
-use crate::{errno_result, Result};
 use std::mem;
 use std::ops::Drop;
+use std::os::unix::io::{AsRawFd, RawFd};
+
+use crate::{errno_result, Result};
 
 pub type RawDescriptor = RawFd;
 
@@ -60,6 +60,12 @@ impl FromRawDescriptor for SafeDescriptor {
     }
 }
 
+impl AsRawFd for SafeDescriptor {
+    fn as_raw_fd(&self) -> RawFd {
+        self.as_raw_descriptor()
+    }
+}
+
 impl SafeDescriptor {
     /// Clones this descriptor, internally creating a new descriptor. The new SafeDescriptor will
     /// share the same underlying count within the kernel.
diff --git a/vm_control/src/lib.rs b/vm_control/src/lib.rs
index a1d2964..a995f3a 100644
--- a/vm_control/src/lib.rs
+++ b/vm_control/src/lib.rs
@@ -19,7 +19,7 @@ use std::os::unix::io::{AsRawFd, FromRawFd, RawFd};
 use libc::{EINVAL, EIO, ENODEV};
 
 use kvm::{IrqRoute, IrqSource, Vm};
-use msg_socket::{MsgOnSocket, MsgReceiver, MsgResult, MsgSender, MsgSocket};
+use msg_socket::{MsgError, MsgOnSocket, MsgReceiver, MsgResult, MsgSender, MsgSocket};
 use resources::{Alloc, GpuMemoryDesc, MmioType, SystemAllocator};
 use sys_util::{error, Error as SysError, EventFd, GuestAddress, MemoryMapping, MmapError, Result};
 
@@ -53,13 +53,16 @@ impl MsgOnSocket for MaybeOwnedFd {
         1usize
     }
     unsafe fn read_from_buffer(buffer: &[u8], fds: &[RawFd]) -> MsgResult<(Self, usize)> {
-        let (fd, size) = RawFd::read_from_buffer(buffer, fds)?;
-        let file = File::from_raw_fd(fd);
+        let (file, size) = File::read_from_buffer(buffer, fds)?;
         Ok((MaybeOwnedFd::Owned(file), size))
     }
-    fn write_to_buffer(&self, buffer: &mut [u8], fds: &mut [RawFd]) -> MsgResult<usize> {
-        let fd = self.as_raw_fd();
-        fd.write_to_buffer(buffer, fds)
+    fn write_to_buffer(&self, _buffer: &mut [u8], fds: &mut [RawFd]) -> MsgResult<usize> {
+        if fds.is_empty() {
+            return Err(MsgError::WrongFdBufferSize);
+        }
+
+        fds[0] = self.as_raw_fd();
+        Ok(1)
     }
 }
 
diff --git a/x86_64/src/acpi.rs b/x86_64/src/acpi.rs
index 14b21a7..c1d92ee 100644
--- a/x86_64/src/acpi.rs
+++ b/x86_64/src/acpi.rs
@@ -5,6 +5,11 @@ use acpi_tables::{rsdp::RSDP, sdt::SDT};
 use data_model::DataInit;
 use sys_util::{GuestAddress, GuestMemory};
 
+pub struct ACPIDevResource {
+    pub amls: Vec<u8>,
+    pub pm_iobase: u64,
+}
+
 #[repr(C)]
 #[derive(Clone, Copy, Default)]
 struct LocalAPIC {
@@ -65,21 +70,7 @@ const MADT_ENABLED: u32 = 1;
 // XSDT
 const XSDT_REVISION: u8 = 1;
 
-fn create_dsdt_table() -> SDT {
-    // The hex tables in this file are generated from the ASL below with:
-    // "iasl -tc <dsdt.asl>"
-    // Below is the tables represents by the pm_dsdt_data
-    // Name (_S1, Package (0x04)  // _S1_: S1 System State
-    // {
-    //     One,
-    //     One,
-    //     Zero,
-    //     Zero
-    // })
-    let pm_dsdt_data = [
-        0x08u8, 0x5F, 0x53, 0x31, 0x5f, 0x12, 0x06, 0x04, 0x01, 0x01, 0x00, 0x00,
-    ];
-
+fn create_dsdt_table(amls: Vec<u8>) -> SDT {
     let mut dsdt = SDT::new(
         *b"DSDT",
         acpi_tables::HEADER_LEN,
@@ -88,7 +79,10 @@ fn create_dsdt_table() -> SDT {
         *b"CROSVMDT",
         OEM_REVISION,
     );
-    dsdt.append(pm_dsdt_data);
+
+    if amls.len() != 0 {
+        dsdt.append_slice(amls.as_slice());
+    }
 
     dsdt
 }
@@ -102,13 +96,19 @@ fn create_dsdt_table() -> SDT {
 /// * `sci_irq` - Used to fill the FACP SCI_INTERRUPT field, which
 ///               is going to be used by the ACPI drivers to register
 ///               sci handler.
-pub fn create_acpi_tables(guest_mem: &GuestMemory, num_cpus: u8, sci_irq: u32) -> GuestAddress {
+/// * `acpi_dev_resource` - resouces needed by the ACPI devices for creating tables
+pub fn create_acpi_tables(
+    guest_mem: &GuestMemory,
+    num_cpus: u8,
+    sci_irq: u32,
+    acpi_dev_resource: ACPIDevResource,
+) -> GuestAddress {
     // RSDP is at the HI RSDP WINDOW
     let rsdp_offset = GuestAddress(super::ACPI_HI_RSDP_WINDOW_BASE);
     let mut tables: Vec<u64> = Vec::new();
 
     // DSDT
-    let dsdt = create_dsdt_table();
+    let dsdt = create_dsdt_table(acpi_dev_resource.amls);
     let dsdt_offset = rsdp_offset.checked_add(RSDP::len() as u64).unwrap();
     guest_mem
         .write_at_addr(dsdt.as_slice(), dsdt_offset)
@@ -134,14 +134,13 @@ pub fn create_acpi_tables(guest_mem: &GuestMemory, num_cpus: u8, sci_irq: u32) -
     // PM1A Event Block Address
     facp.write(
         FADT_FIELD_PM1A_EVENT_BLK_ADDR,
-        devices::acpi::ACPIPM_RESOURCE_BASE as u32,
+        acpi_dev_resource.pm_iobase as u32,
     );
 
     // PM1A Control Block Address
     facp.write(
         FADT_FIELD_PM1A_CONTROL_BLK_ADDR,
-        devices::acpi::ACPIPM_RESOURCE_BASE as u32
-            + devices::acpi::ACPIPM_RESOURCE_EVENTBLK_LEN as u32,
+        acpi_dev_resource.pm_iobase as u32 + devices::acpi::ACPIPM_RESOURCE_EVENTBLK_LEN as u32,
     );
 
     // PM1 Event Block Length
diff --git a/x86_64/src/lib.rs b/x86_64/src/lib.rs
index a6a02bc..b4c4aa7 100644
--- a/x86_64/src/lib.rs
+++ b/x86_64/src/lib.rs
@@ -54,6 +54,7 @@ use std::mem;
 use std::sync::Arc;
 
 use crate::bootparam::boot_params;
+use acpi_tables::aml::Aml;
 use arch::{
     get_serial_cmdline, GetSerialCmdlineError, RunnableLinuxVm, SerialHardware, SerialParameters,
     VmComponents, VmImage,
@@ -74,6 +75,7 @@ use vm_control::VmIrqRequestSocket;
 #[sorted]
 #[derive(Debug)]
 pub enum Error {
+    AllocateIOResouce(resources::Error),
     AllocateIrq,
     CloneEventFd(sys_util::Error),
     Cmdline(kernel_cmdline::Error),
@@ -124,6 +126,7 @@ impl Display for Error {
 
         #[sorted]
         match self {
+            AllocateIOResouce(e) => write!(f, "error allocating IO resource: {}", e),
             AllocateIrq => write!(f, "error allocating a single irq"),
             CloneEventFd(e) => write!(f, "unable to clone an EventFd: {}", e),
             Cmdline(e) => write!(f, "the given kernel command line was invalid: {}", e),
@@ -216,6 +219,7 @@ fn configure_system(
     setup_data: Option<GuestAddress>,
     initrd: Option<(GuestAddress, usize)>,
     mut params: boot_params,
+    acpi_dev_resource: acpi::ACPIDevResource,
 ) -> Result<()> {
     const EBDA_START: u64 = 0x0009fc00;
     const KERNEL_BOOT_FLAG_MAGIC: u16 = 0xaa55;
@@ -279,7 +283,8 @@ fn configure_system(
         .write_obj_at_addr(params, zero_page_addr)
         .map_err(|_| Error::ZeroPageSetup)?;
 
-    let rsdp_addr = acpi::create_acpi_tables(guest_mem, num_cpus, X86_64_SCI_IRQ);
+    let rsdp_addr =
+        acpi::create_acpi_tables(guest_mem, num_cpus, X86_64_SCI_IRQ, acpi_dev_resource);
     params.acpi_rsdp_addr = rsdp_addr.0;
 
     Ok(())
@@ -423,7 +428,6 @@ impl arch::LinuxArch for X8664arch {
             exit_evt.try_clone().map_err(Error::CloneEventFd)?,
             Some(pci_bus.clone()),
             components.memory_size,
-            suspend_evt.try_clone().map_err(Error::CloneEventFd)?,
         )?;
 
         Self::setup_serial_devices(
@@ -434,6 +438,12 @@ impl arch::LinuxArch for X8664arch {
             serial_jail,
         )?;
 
+        let acpi_dev_resource = Self::setup_acpi_devices(
+            &mut io_bus,
+            &mut resources,
+            suspend_evt.try_clone().map_err(Error::CloneEventFd)?,
+        )?;
+
         let ramoops_region = match components.pstore {
             Some(pstore) => Some(
                 arch::pstore::create_memory_region(&mut vm, &mut resources, &pstore)
@@ -506,6 +516,7 @@ impl arch::LinuxArch for X8664arch {
                     components.android_fstab,
                     kernel_end,
                     params,
+                    acpi_dev_resource,
                 )?;
             }
         }
@@ -592,6 +603,7 @@ impl X8664arch {
         android_fstab: Option<File>,
         kernel_end: u64,
         params: boot_params,
+        acpi_dev_resource: acpi::ACPIDevResource,
     ) -> Result<()> {
         kernel_loader::load_cmdline(mem, GuestAddress(CMDLINE_OFFSET), cmdline)
             .map_err(Error::LoadCmdline)?;
@@ -653,6 +665,7 @@ impl X8664arch {
             setup_data,
             initrd,
             params,
+            acpi_dev_resource,
         )?;
         Ok(())
     }
@@ -754,14 +767,12 @@ impl X8664arch {
     /// * - `gsi_relay`: only valid for split IRQ chip (i.e. userspace PIT/PIC/IOAPIC)
     /// * - `exit_evt` - the event fd object which should receive exit events
     /// * - `mem_size` - the size in bytes of physical ram for the guest
-    /// * - `suspend_evt` - the event fd object which used to suspend the vm
     fn setup_io_bus(
         _vm: &mut Vm,
         gsi_relay: &mut Option<GsiRelay>,
         exit_evt: EventFd,
         pci: Option<Arc<Mutex<devices::PciConfigIo>>>,
         mem_size: u64,
-        suspend_evt: EventFd,
     ) -> Result<devices::Bus> {
         struct NoDevice;
         impl devices::BusDevice for NoDevice {
@@ -826,18 +837,53 @@ impl X8664arch {
                 .unwrap();
         }
 
-        let pm = Arc::new(Mutex::new(devices::ACPIPMResource::new(suspend_evt)));
+        Ok(io_bus)
+    }
+
+    /// Sets up the acpi devices for this platform and
+    /// return the resources which is used to set the ACPI tables.
+    ///
+    /// # Arguments
+    ///
+    /// * - `io_bus` the I/O bus to add the devices to
+    /// * - `resources` the SystemAllocator to allocate IO and MMIO for acpi
+    ///                devices.
+    /// * - `suspend_evt` - the event fd object which used to suspend the vm
+    fn setup_acpi_devices(
+        io_bus: &mut devices::Bus,
+        resources: &mut SystemAllocator,
+        suspend_evt: EventFd,
+    ) -> Result<acpi::ACPIDevResource> {
+        // The AML data for the acpi devices
+        let mut amls = Vec::new();
+
+        let pm_alloc = resources.get_anon_alloc();
+        let pm_iobase = match resources.io_allocator() {
+            Some(io) => io
+                .allocate_with_align(
+                    devices::acpi::ACPIPM_RESOURCE_LEN as u64,
+                    pm_alloc,
+                    "ACPIPM".to_string(),
+                    devices::acpi::ACPIPM_RESOURCE_LEN as u64,
+                )
+                .map_err(Error::AllocateIOResouce)?,
+            None => 0x600,
+        };
+
+        let pmresource = devices::ACPIPMResource::new(suspend_evt);
+        Aml::to_aml_bytes(&pmresource, &mut amls);
+        let pm = Arc::new(Mutex::new(pmresource));
         io_bus
             .insert(
                 pm.clone(),
-                devices::acpi::ACPIPM_RESOURCE_BASE,
+                pm_iobase as u64,
                 devices::acpi::ACPIPM_RESOURCE_LEN as u64,
                 false,
             )
             .unwrap();
         io_bus.notify_on_resume(pm);
 
-        Ok(io_bus)
+        Ok(acpi::ACPIDevResource { amls, pm_iobase })
     }
 
     /// Sets up the serial devices for this platform. Returns the serial port number and serial