diff options
author | Matt Delco <delco@chromium.org> | 2019-09-19 10:30:41 -0700 |
---|---|---|
committer | Commit Bot <commit-bot@chromium.org> | 2019-10-31 06:18:09 +0000 |
commit | ac0b9b71d142f381d39162a1ac52c7d143700a1b (patch) | |
tree | e9a6cabc3feae921b0514354fc3e86dbb778c853 /src/plugin | |
parent | 5bff67d485f22fcbd391231dad1666cc849deb36 (diff) | |
download | crosvm-ac0b9b71d142f381d39162a1ac52c7d143700a1b.tar crosvm-ac0b9b71d142f381d39162a1ac52c7d143700a1b.tar.gz crosvm-ac0b9b71d142f381d39162a1ac52c7d143700a1b.tar.bz2 crosvm-ac0b9b71d142f381d39162a1ac52c7d143700a1b.tar.lz crosvm-ac0b9b71d142f381d39162a1ac52c7d143700a1b.tar.xz crosvm-ac0b9b71d142f381d39162a1ac52c7d143700a1b.tar.zst crosvm-ac0b9b71d142f381d39162a1ac52c7d143700a1b.zip |
crosvm: pre-cache answers to plugin get calls
This change tries to improve the performance of a plugin-based VM by adding a hint API that allows crosvm to proactively push cpu state to the plugin when certain ports for hypercalls are accessed by the VM. BUG=None TEST=build and run. See performance increase significantly. Change-Id: I71af24ebc034095ffea42eedb9ffda0afc719cd6 Signed-off-by: Matt Delco <delco@chromium.org> Reviewed-on: https://chromium-review.googlesource.com/c/chromiumos/platform/crosvm/+/1873005 Tested-by: kokoro <noreply+kokoro@google.com> Reviewed-by: Zach Reizner <zachr@chromium.org>
Diffstat (limited to 'src/plugin')
-rw-r--r-- | src/plugin/process.rs | 32 | ||||
-rw-r--r-- | src/plugin/vcpu.rs | 87 |
2 files changed, 115 insertions, 4 deletions
diff --git a/src/plugin/process.rs b/src/plugin/process.rs index 50b4465..2869847 100644 --- a/src/plugin/process.rs +++ b/src/plugin/process.rs @@ -418,6 +418,35 @@ impl Process { vm.set_gsi_routing(&routes[..]) } + fn handle_set_call_hint(&mut self, hints: &MainRequest_SetCallHint) -> SysResult<()> { + let mut regs: Vec<CallHintDetails> = vec![]; + for hint in &hints.hints { + regs.push(CallHintDetails { + match_rax: hint.match_rax, + match_rbx: hint.match_rbx, + match_rcx: hint.match_rcx, + match_rdx: hint.match_rdx, + rax: hint.rax, + rbx: hint.rbx, + rcx: hint.rcx, + rdx: hint.rdx, + send_sregs: hint.send_sregs, + send_debugregs: hint.send_debugregs, + }); + } + match self.shared_vcpu_state.write() { + Ok(mut lock) => { + let space = match hints.space { + AddressSpace::IOPORT => IoSpace::Ioport, + AddressSpace::MMIO => IoSpace::Mmio, + }; + lock.set_hint(space, hints.address, hints.on_write, regs); + Ok(()) + } + Err(_) => Err(SysError::new(EDEADLK)), + } + } + fn handle_pause_vcpus(&self, vcpu_handles: &[JoinHandle<()>], cpu_mask: u64, user_data: u64) { for (cpu_id, (handle, per_cpu_state)) in vcpu_handles.iter().zip(&self.per_vcpu_states).enumerate() @@ -631,6 +660,9 @@ impl Process { } None => Err(SysError::new(ENODATA)), } + } else if request.has_set_call_hint() { + response.mut_set_call_hint(); + self.handle_set_call_hint(request.get_set_call_hint()) } else if request.has_dirty_log() { let dirty_log_response = response.mut_dirty_log(); match self.objects.get(&request.get_dirty_log().id) { diff --git a/src/plugin/vcpu.rs b/src/plugin/vcpu.rs index 05970f7..03d63b4 100644 --- a/src/plugin/vcpu.rs +++ b/src/plugin/vcpu.rs @@ -30,7 +30,7 @@ use sys_util::{error, LayoutAllocation}; use super::*; /// Identifier for an address space in the VM. -#[derive(Copy, Clone)] +#[derive(Copy, Clone, PartialEq)] pub enum IoSpace { Ioport, Mmio, @@ -137,11 +137,32 @@ fn set_vcpu_state(vcpu: &Vcpu, state_set: VcpuRequest_StateSet, state: &[u8]) -> } } +pub struct CallHintDetails { + pub match_rax: bool, + pub match_rbx: bool, + pub match_rcx: bool, + pub match_rdx: bool, + pub rax: u64, + pub rbx: u64, + pub rcx: u64, + pub rdx: u64, + pub send_sregs: bool, + pub send_debugregs: bool, +} + +pub struct CallHint { + io_space: IoSpace, + addr: u64, + on_write: bool, + regs: Vec<CallHintDetails>, +} + /// State shared by every VCPU, grouped together to make edits to the state coherent across VCPUs. #[derive(Default)] pub struct SharedVcpuState { ioport_regions: BTreeSet<Range>, mmio_regions: BTreeSet<Range>, + hint: Option<CallHint>, } impl SharedVcpuState { @@ -191,6 +212,26 @@ impl SharedVcpuState { } } + pub fn set_hint( + &mut self, + space: IoSpace, + addr: u64, + on_write: bool, + regs: Vec<CallHintDetails>, + ) { + if addr == 0 { + self.hint = None; + } else { + let hint = CallHint { + io_space: space, + addr, + on_write, + regs, + }; + self.hint = Some(hint); + } + } + fn is_reserved(&self, space: IoSpace, addr: u64) -> bool { if let Some(Range(start, len)) = self.first_before(space, addr) { let offset = addr - start; @@ -212,6 +253,28 @@ impl SharedVcpuState { None => None, } } + + fn matches_hint(&self, io_space: IoSpace, addr: u64, is_write: bool) -> bool { + if let Some(hint) = &self.hint { + return io_space == hint.io_space && addr == hint.addr && is_write == hint.on_write; + } + false + } + + fn check_hint_details(&self, regs: &kvm_regs) -> (bool, bool) { + if let Some(hint) = &self.hint { + for entry in hint.regs.iter() { + if (!entry.match_rax || entry.rax == regs.rax) + && (!entry.match_rbx || entry.rbx == regs.rbx) + && (!entry.match_rcx || entry.rcx == regs.rcx) + && (!entry.match_rdx || entry.rdx == regs.rdx) + { + return (entry.send_sregs, entry.send_debugregs); + } + } + } + (false, false) + } } /// State specific to a VCPU, grouped so that each `PluginVcpu` object will share a canonical @@ -338,9 +401,6 @@ impl PluginVcpu { }; let first_before_addr = vcpu_state_lock.first_before(io_space, addr); - // Drops the read lock as soon as possible, to prevent holding lock while blocked in - // `handle_until_resume`. - drop(vcpu_state_lock); match first_before_addr { Some(Range(start, len)) => { @@ -358,6 +418,25 @@ impl PluginVcpu { io.address = addr; io.is_write = data.is_write(); io.data = data.as_slice().to_vec(); + if vcpu_state_lock.matches_hint(io_space, addr, io.is_write) { + if let Ok(regs) = vcpu.get_regs() { + let (has_sregs, has_debugregs) = vcpu_state_lock.check_hint_details(®s); + io.regs = VcpuRegs(regs).as_slice().to_vec(); + if has_sregs { + if let Ok(state) = get_vcpu_state(vcpu, VcpuRequest_StateSet::SREGS) { + io.sregs = state; + } + } + if has_debugregs { + if let Ok(state) = get_vcpu_state(vcpu, VcpuRequest_StateSet::DEBUGREGS) + { + io.debugregs = state; + } + } + } + } + // don't hold lock while blocked in `handle_until_resume`. + drop(vcpu_state_lock); self.wait_reason.set(Some(wait_reason)); match self.handle_until_resume(vcpu) { |