diff options
author | Xiong Zhang <xiong.y.zhang@intel.corp-partner.google.com> | 2020-01-02 18:17:43 +0800 |
---|---|---|
committer | Commit Bot <commit-bot@chromium.org> | 2020-03-05 09:03:33 +0000 |
commit | 2f7dabbd6a0d8620e4b19b92cdae24c08e4c7ccc (patch) | |
tree | 122c4d7f3d9bf97de7f9208f74912a077fe09415 | |
parent | db4c70d2151d054b6b4df58be432e500aeafecbe (diff) | |
download | crosvm-2f7dabbd6a0d8620e4b19b92cdae24c08e4c7ccc.tar crosvm-2f7dabbd6a0d8620e4b19b92cdae24c08e4c7ccc.tar.gz crosvm-2f7dabbd6a0d8620e4b19b92cdae24c08e4c7ccc.tar.bz2 crosvm-2f7dabbd6a0d8620e4b19b92cdae24c08e4c7ccc.tar.lz crosvm-2f7dabbd6a0d8620e4b19b92cdae24c08e4c7ccc.tar.xz crosvm-2f7dabbd6a0d8620e4b19b92cdae24c08e4c7ccc.tar.zst crosvm-2f7dabbd6a0d8620e4b19b92cdae24c08e4c7ccc.zip |
Virtio: Add blk VIRTIO_RING_F_EVENT_IDX feature
Previous interrupt suppress patch only supply crude interrupt suppress, VIRTIO_RING_F_EVENT_IDX feature supply a more performant alternative: 1) where the driver specifies how far the device can progress before a notification is required 2) where the device specifies how far the driver can progress before a interrrupt is required. This patch add this feature into blk. For gpu and network, this could be added also, but gpu and network performance don't get better. BUG=None TEST=run benchmark for blk in guest Change-Id: I73fe3f8b72a9e88fd6073890bc6ab2bee891d51d Reviewed-on: https://chromium-review.googlesource.com/c/chromiumos/platform/crosvm/+/2008341 Reviewed-by: Daniel Verkamp <dverkamp@chromium.org> Tested-by: kokoro <noreply+kokoro@google.com> Commit-Queue: Xiong Zhang <xiong.y.zhang@intel.corp-partner.google.com>
-rw-r--r-- | devices/src/virtio/block.rs | 13 | ||||
-rw-r--r-- | devices/src/virtio/net.rs | 2 | ||||
-rw-r--r-- | devices/src/virtio/queue.rs | 62 |
3 files changed, 59 insertions, 18 deletions
diff --git a/devices/src/virtio/block.rs b/devices/src/virtio/block.rs index 094586f..c9dda55 100644 --- a/devices/src/virtio/block.rs +++ b/devices/src/virtio/block.rs @@ -20,6 +20,7 @@ use sync::Mutex; use sys_util::Error as SysError; use sys_util::Result as SysResult; use sys_util::{error, info, iov_max, warn, EventFd, GuestMemory, PollContext, PollToken, TimerFd}; +use virtio_sys::virtio_ring::VIRTIO_RING_F_EVENT_IDX; use vm_control::{DiskControlCommand, DiskControlResponseSocket, DiskControlResult}; use super::{ @@ -510,6 +511,7 @@ impl Block { } let mut avail_features: u64 = 1 << VIRTIO_BLK_F_FLUSH; + avail_features |= 1 << VIRTIO_RING_F_EVENT_IDX; if read_only { avail_features |= 1 << VIRTIO_BLK_F_RO; } else { @@ -871,8 +873,8 @@ mod tests { let b = Block::new(Box::new(f), false, true, 512, None).unwrap(); // writable device should set VIRTIO_BLK_F_FLUSH + VIRTIO_BLK_F_DISCARD // + VIRTIO_BLK_F_WRITE_ZEROES + VIRTIO_F_VERSION_1 + VIRTIO_BLK_F_BLK_SIZE - // + VIRTIO_BLK_F_SEG_MAX - assert_eq!(0x100006244, b.features()); + // + VIRTIO_BLK_F_SEG_MAX + VIRTIO_RING_F_EVENT_IDX + assert_eq!(0x120006244, b.features()); } // read-write block device, non-sparse @@ -881,8 +883,8 @@ mod tests { let b = Block::new(Box::new(f), false, false, 512, None).unwrap(); // writable device should set VIRTIO_BLK_F_FLUSH // + VIRTIO_BLK_F_WRITE_ZEROES + VIRTIO_F_VERSION_1 + VIRTIO_BLK_F_BLK_SIZE - // + VIRTIO_BLK_F_SEG_MAX - assert_eq!(0x100004244, b.features()); + // + VIRTIO_BLK_F_SEG_MAX + VIRTIO_RING_F_EVENT_IDX + assert_eq!(0x120004244, b.features()); } // read-only block device @@ -891,7 +893,8 @@ mod tests { let b = Block::new(Box::new(f), true, true, 512, None).unwrap(); // read-only device should set VIRTIO_BLK_F_FLUSH and VIRTIO_BLK_F_RO // + VIRTIO_F_VERSION_1 + VIRTIO_BLK_F_BLK_SIZE + VIRTIO_BLK_F_SEG_MAX - assert_eq!(0x100000264, b.features()); + // + VIRTIO_RING_F_EVENT_IDX + assert_eq!(0x120000264, b.features()); } } diff --git a/devices/src/virtio/net.rs b/devices/src/virtio/net.rs index 38ba5a7..44a39ab 100644 --- a/devices/src/virtio/net.rs +++ b/devices/src/virtio/net.rs @@ -192,7 +192,7 @@ where }; if bytes_written > 0 { - self.rx_queue.pop_peeked(); + self.rx_queue.pop_peeked(&self.mem); self.rx_queue.add_used(&self.mem, index, bytes_written); needs_interrupt = true; } diff --git a/devices/src/virtio/queue.rs b/devices/src/virtio/queue.rs index e33e3ba..18ddd6d 100644 --- a/devices/src/virtio/queue.rs +++ b/devices/src/virtio/queue.rs @@ -7,6 +7,7 @@ use std::num::Wrapping; use std::sync::atomic::{fence, Ordering}; use sys_util::{error, GuestAddress, GuestMemory}; +use virtio_sys::virtio_ring::VIRTIO_RING_F_EVENT_IDX; use super::{Interrupt, VIRTIO_MSI_NO_VECTOR}; @@ -227,6 +228,7 @@ pub struct Queue { // Device feature bits accepted by the driver features: u64, + last_used: Wrapping<u16>, } impl Queue { @@ -243,6 +245,7 @@ impl Queue { next_avail: Wrapping(0), next_used: Wrapping(0), features: 0, + last_used: Wrapping(0), } } @@ -263,6 +266,7 @@ impl Queue { self.next_avail = Wrapping(0); self.next_used = Wrapping(0); self.features = 0; + self.last_used = Wrapping(0); } pub fn is_valid(&self, mem: &GuestMemory) -> bool { @@ -344,15 +348,22 @@ impl Queue { /// Remove the first available descriptor chain from the queue. /// This function should only be called immediately following `peek`. - pub fn pop_peeked(&mut self) { + pub fn pop_peeked(&mut self, mem: &GuestMemory) { self.next_avail += Wrapping(1); + if self.features & ((1u64) << VIRTIO_RING_F_EVENT_IDX) != 0 { + let avail_event_off = self + .used_ring + .unchecked_add((4 + 8 * self.actual_size()).into()); + mem.write_obj_at_addr(self.next_avail.0 as u16, avail_event_off) + .unwrap(); + } } /// If a new DescriptorHead is available, returns one and removes it from the queue. pub fn pop<'a>(&mut self, mem: &'a GuestMemory) -> Option<DescriptorChain<'a>> { let descriptor_chain = self.peek(mem); if descriptor_chain.is_some() { - self.pop_peeked(); + self.pop_peeked(mem); } descriptor_chain } @@ -393,28 +404,55 @@ impl Queue { /// Enable / Disable guest notify device that requests are available on /// the descriptor chain. pub fn set_notify(&mut self, mem: &GuestMemory, enable: bool) { - let mut used_flags: u16 = mem.read_obj_from_addr(self.used_ring).unwrap(); - if enable { - used_flags &= !VIRTQ_USED_F_NO_NOTIFY; + if self.features & ((1u64) << VIRTIO_RING_F_EVENT_IDX) != 0 { + let avail_index_addr = mem.checked_offset(self.avail_ring, 2).unwrap(); + let avail_index: u16 = mem.read_obj_from_addr(avail_index_addr).unwrap(); + let avail_event_off = self + .used_ring + .unchecked_add((4 + 8 * self.actual_size()).into()); + mem.write_obj_at_addr(avail_index, avail_event_off).unwrap(); } else { - used_flags |= VIRTQ_USED_F_NO_NOTIFY; + let mut used_flags: u16 = mem.read_obj_from_addr(self.used_ring).unwrap(); + if enable { + used_flags &= !VIRTQ_USED_F_NO_NOTIFY; + } else { + used_flags |= VIRTQ_USED_F_NO_NOTIFY; + } + mem.write_obj_at_addr(used_flags, self.used_ring).unwrap(); } - mem.write_obj_at_addr(used_flags, self.used_ring).unwrap(); } // Check Whether guest enable interrupt injection or not. fn available_interrupt_enabled(&self, mem: &GuestMemory) -> bool { - let avail_flags: u16 = mem.read_obj_from_addr(self.avail_ring).unwrap(); - if avail_flags & VIRTQ_AVAIL_F_NO_INTERRUPT == VIRTQ_AVAIL_F_NO_INTERRUPT { - false + if self.features & ((1u64) << VIRTIO_RING_F_EVENT_IDX) != 0 { + let used_event_off = self + .avail_ring + .unchecked_add((4 + 2 * self.actual_size()).into()); + let used_event: u16 = mem.read_obj_from_addr(used_event_off).unwrap(); + // if used_event >= self.last_used, driver handle interrupt quickly enough, new + // interrupt could be injected. + // if used_event < self.last_used, driver hasn't finished the last interrupt, + // so no need to inject new interrupt. + if self.next_used - Wrapping(used_event) - Wrapping(1) < self.next_used - self.last_used + { + true + } else { + false + } } else { - true + let avail_flags: u16 = mem.read_obj_from_addr(self.avail_ring).unwrap(); + if avail_flags & VIRTQ_AVAIL_F_NO_INTERRUPT == VIRTQ_AVAIL_F_NO_INTERRUPT { + false + } else { + true + } } } /// inject interrupt into guest on this queue - pub fn trigger_interrupt(&self, mem: &GuestMemory, interrupt: &Interrupt) { + pub fn trigger_interrupt(&mut self, mem: &GuestMemory, interrupt: &Interrupt) { if self.available_interrupt_enabled(mem) { + self.last_used = self.next_used; interrupt.signal_used_queue(self.vector); } } |