diff options
Diffstat (limited to 'hypervisor')
-rw-r--r-- | hypervisor/src/kvm/aarch64.rs | 18 | ||||
-rw-r--r-- | hypervisor/src/kvm/mod.rs | 196 | ||||
-rw-r--r-- | hypervisor/src/kvm/x86_64.rs | 238 | ||||
-rw-r--r-- | hypervisor/src/lib.rs | 65 | ||||
-rw-r--r-- | hypervisor/src/x86_64.rs | 2 |
5 files changed, 462 insertions, 57 deletions
diff --git a/hypervisor/src/kvm/aarch64.rs b/hypervisor/src/kvm/aarch64.rs index 4f0398f..4e5b65c 100644 --- a/hypervisor/src/kvm/aarch64.rs +++ b/hypervisor/src/kvm/aarch64.rs @@ -2,10 +2,24 @@ // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. -use sys_util::Result; +use libc::ENXIO; + +use sys_util::{Error, Result}; use super::{KvmVcpu, KvmVm}; -use crate::{VcpuAArch64, VmAArch64}; +use crate::{ClockState, VcpuAArch64, VmAArch64}; + +impl KvmVm { + /// Arch-specific implementation of `Vm::get_pvclock`. Always returns an error on AArch64. + pub fn get_pvclock_arch(&self) -> Result<ClockState> { + Err(Error::new(ENXIO)) + } + + /// Arch-specific implementation of `Vm::set_pvclock`. Always returns an error on AArch64. + pub fn set_pvclock_arch(&self, _state: &ClockState) -> Result<()> { + Err(Error::new(ENXIO)) + } +} impl VmAArch64 for KvmVm { type Vcpu = KvmVcpu; diff --git a/hypervisor/src/kvm/mod.rs b/hypervisor/src/kvm/mod.rs index 0550792..c738cfa 100644 --- a/hypervisor/src/kvm/mod.rs +++ b/hypervisor/src/kvm/mod.rs @@ -8,23 +8,23 @@ mod aarch64; mod x86_64; use std::cmp::Ordering; -use std::collections::{BinaryHeap, HashMap}; +use std::collections::{BTreeMap, BinaryHeap}; use std::convert::TryFrom; use std::ops::{Deref, DerefMut}; use std::os::raw::{c_char, c_ulong}; use std::os::unix::io::{AsRawFd, RawFd}; use std::sync::Arc; -use libc::{open, O_CLOEXEC, O_RDWR}; +use libc::{open, EFAULT, EINVAL, EIO, ENOENT, ENOSPC, EOVERFLOW, O_CLOEXEC, O_RDWR}; use kvm_sys::*; use sync::Mutex; use sys_util::{ errno_result, ioctl, ioctl_with_ref, ioctl_with_val, AsRawDescriptor, Error, FromRawDescriptor, - GuestMemory, RawDescriptor, Result, SafeDescriptor, + GuestAddress, GuestMemory, MappedRegion, MmapError, RawDescriptor, Result, SafeDescriptor, }; -use crate::{Hypervisor, HypervisorCap, MappedRegion, RunnableVcpu, Vcpu, VcpuExit, Vm}; +use crate::{ClockState, Hypervisor, HypervisorCap, RunnableVcpu, Vcpu, VcpuExit, Vm}; // Wrapper around KVM_SET_USER_MEMORY_REGION ioctl, which creates, modifies, or deletes a mapping // from guest physical to host user pages. @@ -128,7 +128,7 @@ impl PartialOrd for MemSlot { pub struct KvmVm { vm: SafeDescriptor, guest_mem: GuestMemory, - mem_regions: Arc<Mutex<HashMap<u32, Box<dyn MappedRegion>>>>, + mem_regions: Arc<Mutex<BTreeMap<u32, Box<dyn MappedRegion>>>>, mem_slot_gaps: Arc<Mutex<BinaryHeap<MemSlot>>>, } @@ -151,7 +151,7 @@ impl KvmVm { index as u32, false, false, - guest_addr.offset() as u64, + guest_addr.offset(), size as u64, host_addr as *mut u8, ) @@ -161,7 +161,7 @@ impl KvmVm { Ok(KvmVm { vm: vm_descriptor, guest_mem, - mem_regions: Arc::new(Mutex::new(HashMap::new())), + mem_regions: Arc::new(Mutex::new(BTreeMap::new())), mem_slot_gaps: Arc::new(Mutex::new(BinaryHeap::new())), }) } @@ -169,6 +169,34 @@ impl KvmVm { fn create_kvm_vcpu(&self, _id: usize) -> Result<KvmVcpu> { Ok(KvmVcpu {}) } + + /// Crates an in kernel interrupt controller. + /// + /// See the documentation on the KVM_CREATE_IRQCHIP ioctl. + pub fn create_irq_chip(&self) -> Result<()> { + // Safe because we know that our file is a VM fd and we verify the return result. + let ret = unsafe { ioctl(self, KVM_CREATE_IRQCHIP()) }; + if ret == 0 { + Ok(()) + } else { + errno_result() + } + } + /// Sets the level on the given irq to 1 if `active` is true, and 0 otherwise. + pub fn set_irq_line(&self, irq: u32, active: bool) -> Result<()> { + let mut irq_level = kvm_irq_level::default(); + irq_level.__bindgen_anon_1.irq = irq; + irq_level.level = if active { 1 } else { 0 }; + + // Safe because we know that our file is a VM fd, we know the kernel will only read the + // correct amount of memory from our pointer, and we verify the return result. + let ret = unsafe { ioctl_with_ref(self, KVM_IRQ_LINE(), &irq_level) }; + if ret == 0 { + Ok(()) + } else { + errno_result() + } + } } impl Vm for KvmVm { @@ -184,6 +212,83 @@ impl Vm for KvmVm { fn get_memory(&self) -> &GuestMemory { &self.guest_mem } + + fn add_memory_region( + &mut self, + guest_addr: GuestAddress, + mem: Box<dyn MappedRegion>, + read_only: bool, + log_dirty_pages: bool, + ) -> Result<u32> { + let size = mem.size() as u64; + let end_addr = guest_addr.checked_add(size).ok_or(Error::new(EOVERFLOW))?; + if self.guest_mem.range_overlap(guest_addr, end_addr) { + return Err(Error::new(ENOSPC)); + } + let mut regions = self.mem_regions.lock(); + let mut gaps = self.mem_slot_gaps.lock(); + let slot = match gaps.pop() { + Some(gap) => gap.0, + None => (regions.len() + self.guest_mem.num_regions() as usize) as u32, + }; + + // Safe because we check that the given guest address is valid and has no overlaps. We also + // know that the pointer and size are correct because the MemoryMapping interface ensures + // this. We take ownership of the memory mapping so that it won't be unmapped until the slot + // is removed. + let res = unsafe { + set_user_memory_region( + &self.vm, + slot, + read_only, + log_dirty_pages, + guest_addr.offset() as u64, + size, + mem.as_ptr(), + ) + }; + + if let Err(e) = res { + gaps.push(MemSlot(slot)); + return Err(e); + } + regions.insert(slot, mem); + Ok(slot) + } + + fn msync_memory_region(&mut self, slot: u32, offset: usize, size: usize) -> Result<()> { + let mut regions = self.mem_regions.lock(); + let mem = regions.get_mut(&slot).ok_or(Error::new(ENOENT))?; + + mem.msync(offset, size).map_err(|err| match err { + MmapError::InvalidAddress => Error::new(EFAULT), + MmapError::NotPageAligned => Error::new(EINVAL), + MmapError::SystemCallFailed(e) => e, + _ => Error::new(EIO), + }) + } + + fn remove_memory_region(&mut self, slot: u32) -> Result<()> { + let mut regions = self.mem_regions.lock(); + if !regions.contains_key(&slot) { + return Err(Error::new(ENOENT)); + } + // Safe because the slot is checked against the list of memory slots. + unsafe { + set_user_memory_region(&self.vm, slot, false, false, 0, 0, std::ptr::null_mut())?; + } + self.mem_slot_gaps.lock().push(MemSlot(slot)); + regions.remove(&slot); + Ok(()) + } + + fn get_pvclock(&self) -> Result<ClockState> { + self.get_pvclock_arch() + } + + fn set_pvclock(&self, state: &ClockState) -> Result<()> { + self.set_pvclock_arch(state) + } } impl AsRawDescriptor for KvmVm { @@ -265,7 +370,7 @@ impl<'a> TryFrom<&'a HypervisorCap> for KvmCap { mod tests { use super::*; use std::thread; - use sys_util::GuestAddress; + use sys_util::{GuestAddress, MemoryMapping, MemoryMappingArena}; #[test] fn new() { @@ -316,4 +421,79 @@ mod tests { let read_val: u8 = vm.get_memory().read_obj_from_addr(obj_addr).unwrap(); assert_eq!(read_val, 67u8); } + + #[test] + fn add_memory() { + let kvm = Kvm::new().unwrap(); + let gm = + GuestMemory::new(&[(GuestAddress(0), 0x1000), (GuestAddress(0x5000), 0x5000)]).unwrap(); + let mut vm = KvmVm::new(&kvm, gm).unwrap(); + let mem_size = 0x1000; + let mem = MemoryMapping::new(mem_size).unwrap(); + vm.add_memory_region(GuestAddress(0x1000), Box::new(mem), false, false) + .unwrap(); + let mem = MemoryMapping::new(mem_size).unwrap(); + vm.add_memory_region(GuestAddress(0x10000), Box::new(mem), false, false) + .unwrap(); + } + + #[test] + fn add_memory_ro() { + let kvm = Kvm::new().unwrap(); + let gm = GuestMemory::new(&[(GuestAddress(0), 0x1000)]).unwrap(); + let mut vm = KvmVm::new(&kvm, gm).unwrap(); + let mem_size = 0x1000; + let mem = MemoryMapping::new(mem_size).unwrap(); + vm.add_memory_region(GuestAddress(0x1000), Box::new(mem), true, false) + .unwrap(); + } + + #[test] + fn remove_memory() { + let kvm = Kvm::new().unwrap(); + let gm = GuestMemory::new(&[(GuestAddress(0), 0x1000)]).unwrap(); + let mut vm = KvmVm::new(&kvm, gm).unwrap(); + let mem_size = 0x1000; + let mem = MemoryMapping::new(mem_size).unwrap(); + let slot = vm + .add_memory_region(GuestAddress(0x1000), Box::new(mem), false, false) + .unwrap(); + vm.remove_memory_region(slot).unwrap(); + } + + #[test] + fn remove_invalid_memory() { + let kvm = Kvm::new().unwrap(); + let gm = GuestMemory::new(&[(GuestAddress(0), 0x1000)]).unwrap(); + let mut vm = KvmVm::new(&kvm, gm).unwrap(); + assert!(vm.remove_memory_region(0).is_err()); + } + + #[test] + fn overlap_memory() { + let kvm = Kvm::new().unwrap(); + let gm = GuestMemory::new(&[(GuestAddress(0), 0x10000)]).unwrap(); + let mut vm = KvmVm::new(&kvm, gm).unwrap(); + let mem_size = 0x2000; + let mem = MemoryMapping::new(mem_size).unwrap(); + assert!(vm + .add_memory_region(GuestAddress(0x2000), Box::new(mem), false, false) + .is_err()); + } + + #[test] + fn sync_memory() { + let kvm = Kvm::new().unwrap(); + let gm = + GuestMemory::new(&[(GuestAddress(0), 0x1000), (GuestAddress(0x5000), 0x5000)]).unwrap(); + let mut vm = KvmVm::new(&kvm, gm).unwrap(); + let mem_size = 0x1000; + let mem = MemoryMappingArena::new(mem_size).unwrap(); + let slot = vm + .add_memory_region(GuestAddress(0x1000), Box::new(mem), false, false) + .unwrap(); + vm.msync_memory_region(slot, mem_size, 0).unwrap(); + assert!(vm.msync_memory_region(slot, mem_size + 1, 0).is_err()); + assert!(vm.msync_memory_region(slot + 1, mem_size, 0).is_err()); + } } diff --git a/hypervisor/src/kvm/x86_64.rs b/hypervisor/src/kvm/x86_64.rs index 06774f4..eaa34cf 100644 --- a/hypervisor/src/kvm/x86_64.rs +++ b/hypervisor/src/kvm/x86_64.rs @@ -4,14 +4,17 @@ use std::convert::TryInto; -use kvm_sys::*; use libc::E2BIG; -use sys_util::{ioctl_with_mut_ptr, Error, Result}; + +use kvm_sys::*; +use sys_util::{ + errno_result, ioctl_with_mut_ptr, ioctl_with_mut_ref, ioctl_with_ref, Error, Result, +}; use super::{Kvm, KvmVcpu, KvmVm}; use crate::{ - CpuId, CpuIdEntry, HypervisorX86_64, IoapicRedirectionTableEntry, IoapicState, LapicState, - PicState, PitChannelState, PitState, Regs, VcpuX86_64, VmX86_64, + ClockState, CpuId, CpuIdEntry, HypervisorX86_64, IoapicRedirectionTableEntry, IoapicState, + LapicState, PicSelect, PicState, PitChannelState, PitState, Regs, VcpuX86_64, VmX86_64, }; type KvmCpuId = kvm::CpuId; @@ -54,6 +57,180 @@ impl Kvm { } } +impl HypervisorX86_64 for Kvm { + fn get_supported_cpuid(&self) -> Result<CpuId> { + self.get_cpuid(KVM_GET_SUPPORTED_CPUID()) + } + + fn get_emulated_cpuid(&self) -> Result<CpuId> { + self.get_cpuid(KVM_GET_EMULATED_CPUID()) + } +} + +impl KvmVm { + /// Arch-specific implementation of `Vm::get_pvclock`. + pub fn get_pvclock_arch(&self) -> Result<ClockState> { + // Safe because we know that our file is a VM fd, we know the kernel will only write correct + // amount of memory to our pointer, and we verify the return result. + let mut clock_data: kvm_clock_data = unsafe { std::mem::zeroed() }; + let ret = unsafe { ioctl_with_mut_ref(self, KVM_GET_CLOCK(), &mut clock_data) }; + if ret == 0 { + Ok(ClockState::from(clock_data)) + } else { + errno_result() + } + } + + /// Arch-specific implementation of `Vm::set_pvclock`. + pub fn set_pvclock_arch(&self, state: &ClockState) -> Result<()> { + let clock_data = kvm_clock_data::from(*state); + // Safe because we know that our file is a VM fd, we know the kernel will only read correct + // amount of memory from our pointer, and we verify the return result. + let ret = unsafe { ioctl_with_ref(self, KVM_SET_CLOCK(), &clock_data) }; + if ret == 0 { + Ok(()) + } else { + errno_result() + } + } + + /// Retrieves the state of given interrupt controller by issuing KVM_GET_IRQCHIP ioctl. + /// + /// Note that this call can only succeed after a call to `Vm::create_irq_chip`. + pub fn get_pic_state(&self, id: PicSelect) -> Result<kvm_pic_state> { + let mut irqchip_state = kvm_irqchip::default(); + irqchip_state.chip_id = id as u32; + let ret = unsafe { + // Safe because we know our file is a VM fd, we know the kernel will only write + // correct amount of memory to our pointer, and we verify the return result. + ioctl_with_mut_ref(self, KVM_GET_IRQCHIP(), &mut irqchip_state) + }; + if ret == 0 { + Ok(unsafe { + // Safe as we know that we are retrieving data related to the + // PIC (primary or secondary) and not IOAPIC. + irqchip_state.chip.pic + }) + } else { + errno_result() + } + } + + /// Sets the state of given interrupt controller by issuing KVM_SET_IRQCHIP ioctl. + /// + /// Note that this call can only succeed after a call to `Vm::create_irq_chip`. + pub fn set_pic_state(&self, id: PicSelect, state: &kvm_pic_state) -> Result<()> { + let mut irqchip_state = kvm_irqchip::default(); + irqchip_state.chip_id = id as u32; + irqchip_state.chip.pic = *state; + // Safe because we know that our file is a VM fd, we know the kernel will only read + // correct amount of memory from our pointer, and we verify the return result. + let ret = unsafe { ioctl_with_ref(self, KVM_SET_IRQCHIP(), &irqchip_state) }; + if ret == 0 { + Ok(()) + } else { + errno_result() + } + } + + /// Retrieves the state of IOAPIC by issuing KVM_GET_IRQCHIP ioctl. + /// + /// Note that this call can only succeed after a call to `Vm::create_irq_chip`. + pub fn get_ioapic_state(&self) -> Result<kvm_ioapic_state> { + let mut irqchip_state = kvm_irqchip::default(); + irqchip_state.chip_id = 2; + let ret = unsafe { + // Safe because we know our file is a VM fd, we know the kernel will only write + // correct amount of memory to our pointer, and we verify the return result. + ioctl_with_mut_ref(self, KVM_GET_IRQCHIP(), &mut irqchip_state) + }; + if ret == 0 { + Ok(unsafe { + // Safe as we know that we are retrieving data related to the + // IOAPIC and not PIC. + irqchip_state.chip.ioapic + }) + } else { + errno_result() + } + } + + /// Sets the state of IOAPIC by issuing KVM_SET_IRQCHIP ioctl. + /// + /// Note that this call can only succeed after a call to `Vm::create_irq_chip`. + pub fn set_ioapic_state(&self, state: &kvm_ioapic_state) -> Result<()> { + let mut irqchip_state = kvm_irqchip::default(); + irqchip_state.chip_id = 2; + irqchip_state.chip.ioapic = *state; + // Safe because we know that our file is a VM fd, we know the kernel will only read + // correct amount of memory from our pointer, and we verify the return result. + let ret = unsafe { ioctl_with_ref(self, KVM_SET_IRQCHIP(), &irqchip_state) }; + if ret == 0 { + Ok(()) + } else { + errno_result() + } + } + + /// Creates a PIT as per the KVM_CREATE_PIT2 ioctl. + /// + /// Note that this call can only succeed after a call to `Vm::create_irq_chip`. + pub fn create_pit(&self) -> Result<()> { + let pit_config = kvm_pit_config::default(); + // Safe because we know that our file is a VM fd, we know the kernel will only read the + // correct amount of memory from our pointer, and we verify the return result. + let ret = unsafe { ioctl_with_ref(self, KVM_CREATE_PIT2(), &pit_config) }; + if ret == 0 { + Ok(()) + } else { + errno_result() + } + } + + /// Retrieves the state of PIT by issuing KVM_GET_PIT2 ioctl. + /// + /// Note that this call can only succeed after a call to `Vm::create_pit`. + pub fn get_pit_state(&self) -> Result<kvm_pit_state2> { + // Safe because we know that our file is a VM fd, we know the kernel will only write + // correct amount of memory to our pointer, and we verify the return result. + let mut pit_state = unsafe { std::mem::zeroed() }; + let ret = unsafe { ioctl_with_mut_ref(self, KVM_GET_PIT2(), &mut pit_state) }; + if ret == 0 { + Ok(pit_state) + } else { + errno_result() + } + } + + /// Sets the state of PIT by issuing KVM_SET_PIT2 ioctl. + /// + /// Note that this call can only succeed after a call to `Vm::create_pit`. + pub fn set_pit_state(&self, pit_state: &kvm_pit_state2) -> Result<()> { + // Safe because we know that our file is a VM fd, we know the kernel will only read + // correct amount of memory from our pointer, and we verify the return result. + let ret = unsafe { ioctl_with_ref(self, KVM_SET_PIT2(), pit_state) }; + if ret == 0 { + Ok(()) + } else { + errno_result() + } + } +} + +impl VmX86_64 for KvmVm { + type Vcpu = KvmVcpu; + + fn create_vcpu(&self, id: usize) -> Result<Self::Vcpu> { + self.create_kvm_vcpu(id) + } +} + +impl VcpuX86_64 for KvmVcpu { + fn get_regs(&self) -> Result<Regs> { + Ok(Regs {}) + } +} + impl<'a> From<&'a KvmCpuId> for CpuId { fn from(kvm_cpuid: &'a KvmCpuId) -> CpuId { let kvm_entries = kvm_cpuid.entries_slice(); @@ -74,27 +251,22 @@ impl<'a> From<&'a KvmCpuId> for CpuId { } } -impl HypervisorX86_64 for Kvm { - fn get_supported_cpuid(&self) -> Result<CpuId> { - self.get_cpuid(KVM_GET_SUPPORTED_CPUID()) - } - - fn get_emulated_cpuid(&self) -> Result<CpuId> { - self.get_cpuid(KVM_GET_EMULATED_CPUID()) - } -} - -impl VmX86_64 for KvmVm { - type Vcpu = KvmVcpu; - - fn create_vcpu(&self, id: usize) -> Result<Self::Vcpu> { - self.create_kvm_vcpu(id) +impl From<ClockState> for kvm_clock_data { + fn from(state: ClockState) -> Self { + kvm_clock_data { + clock: state.clock, + flags: state.flags, + ..Default::default() + } } } -impl VcpuX86_64 for KvmVcpu { - fn get_regs(&self) -> Result<Regs> { - Ok(Regs {}) +impl From<kvm_clock_data> for ClockState { + fn from(clock_data: kvm_clock_data) -> Self { + ClockState { + clock: clock_data.clock, + flags: clock_data.flags, + } } } @@ -305,15 +477,13 @@ impl From<&kvm_pit_channel_state> for PitChannelState { #[cfg(test)] mod tests { + use super::*; use crate::{ - DeliveryMode, DeliveryStatus, DestinationMode, IoapicRedirectionTableEntry, IoapicState, - LapicState, PicInitState, PicState, PitChannelState, PitRWMode, PitRWState, PitState, - TriggerMode, + DeliveryMode, DeliveryStatus, DestinationMode, HypervisorX86_64, + IoapicRedirectionTableEntry, IoapicState, LapicState, PicInitState, PicState, + PitChannelState, PitRWMode, PitRWState, PitState, TriggerMode, Vm, }; - use kvm_sys::*; - - use super::Kvm; - use crate::HypervisorX86_64; + use sys_util::{GuestAddress, GuestMemory}; #[test] fn get_supported_cpuid() { @@ -501,4 +671,14 @@ mod tests { // convert back and compare assert_eq!(state, PitState::from(&kvm_state)); } + + #[test] + fn clock_handling() { + let kvm = Kvm::new().unwrap(); + let gm = GuestMemory::new(&[(GuestAddress(0), 0x10000)]).unwrap(); + let vm = KvmVm::new(&kvm, gm).unwrap(); + let mut clock_data = vm.get_pvclock().unwrap(); + clock_data.clock += 1000; + vm.set_pvclock(&clock_data).unwrap(); + } } diff --git a/hypervisor/src/lib.rs b/hypervisor/src/lib.rs index 784af8c..f098720 100644 --- a/hypervisor/src/lib.rs +++ b/hypervisor/src/lib.rs @@ -12,7 +12,7 @@ pub mod x86_64; use std::ops::{Deref, DerefMut}; -use sys_util::{GuestMemory, Result}; +use sys_util::{GuestAddress, GuestMemory, MappedRegion, Result}; #[cfg(any(target_arch = "arm", target_arch = "aarch64"))] pub use crate::aarch64::*; @@ -33,6 +33,43 @@ pub trait Vm: Send + Sized { /// Gets the guest-mapped memory for the Vm. fn get_memory(&self) -> &GuestMemory; + + /// Inserts the given `MappedRegion` into the VM's address space at `guest_addr`. + /// + /// The slot that was assigned the memory mapping is returned on success. The slot can be given + /// to `Vm::remove_memory_region` to remove the memory from the VM's address space and take back + /// ownership of `mem_region`. + /// + /// Note that memory inserted into the VM's address space must not overlap with any other memory + /// slot's region. + /// + /// If `read_only` is true, the guest will be able to read the memory as normal, but attempts to + /// write will trigger a mmio VM exit, leaving the memory untouched. + /// + /// If `log_dirty_pages` is true, the slot number can be used to retrieve the pages written to + /// by the guest with `get_dirty_log`. + fn add_memory_region( + &mut self, + guest_addr: GuestAddress, + mem_region: Box<dyn MappedRegion>, + read_only: bool, + log_dirty_pages: bool, + ) -> Result<u32>; + + /// Does a synchronous msync of the memory mapped at `slot`, syncing `size` bytes starting at + /// `offset` from the start of the region. `offset` must be page aligned. + fn msync_memory_region(&mut self, slot: u32, offset: usize, size: usize) -> Result<()>; + + /// Removes and drops the `UserMemoryRegion` that was previously added at the given slot. + fn remove_memory_region(&mut self, slot: u32) -> Result<()>; + + /// Retrieves the current timestamp of the paravirtual clock as seen by the current guest. + /// Only works on VMs that support `VmCap::PvClock`. + fn get_pvclock(&self) -> Result<ClockState>; + + /// Sets the current timestamp of the paravirtual clock as seen by the current guest. + /// Only works on VMs that support `VmCap::PvClock`. + fn set_pvclock(&self, state: &ClockState) -> Result<()>; } /// A wrapper around using a VCPU. @@ -61,26 +98,20 @@ pub trait RunnableVcpu: Deref<Target = <Self as RunnableVcpu>::Vcpu> + DerefMut fn run(&self) -> Result<VcpuExit>; } -/// A memory region in the current process that can be mapped into the guest's memory. -/// -/// Safe when implementers guarantee `ptr`..`ptr+size` is an mmaped region owned by this object that -/// can't be unmapped during the `MappedRegion`'s lifetime. -pub unsafe trait MappedRegion: Send + Sync { - /// Returns a pointer to the beginning of the memory region. Should only be - /// used for passing this region to ioctls for setting guest memory. - fn as_ptr(&self) -> *mut u8; - - /// Returns the size of the memory region in bytes. - fn size(&self) -> usize; - - /// Flushes changes to this memory region to the backing file. - fn msync(&self) -> Result<()>; -} - /// A reason why a VCPU exited. One of these returns every time `Vcpu::run` is called. #[derive(Debug)] pub enum VcpuExit { Unknown, } +/// A single route for an IRQ. pub struct IrqRoute {} + +/// The state of the paravirtual clock +#[derive(Debug, Default, Copy, Clone)] +pub struct ClockState { + /// Current pv clock timestamp, as seen by the guest + pub clock: u64, + /// Hypervisor-specific feature flags for the pv clock + pub flags: u32, +} diff --git a/hypervisor/src/x86_64.rs b/hypervisor/src/x86_64.rs index c311924..859ebff 100644 --- a/hypervisor/src/x86_64.rs +++ b/hypervisor/src/x86_64.rs @@ -339,6 +339,6 @@ pub struct PitChannelState { pub bcd: bool, /// Value of the gate input pin. This only applies to channel 2. pub gate: bool, - /// Guest boot nanosecond timestamp of when the count value was loaded. + /// Nanosecond timestamp of when the count value was loaded. pub count_load_time: u64, } |