summary refs log tree commit diff
diff options
context:
space:
mode:
authorXiong Zhang <xiong.y.zhang@intel.corp-partner.google.com>2019-09-05 19:29:30 +0800
committerCommit Bot <commit-bot@chromium.org>2019-10-25 23:59:43 +0000
commit3185ae95dd58f556a836f9e146dfe7b8450749b2 (patch)
treeb3eb1b569650d2a9fda7d864c622a5510e7d6b2a
parent3530f2a2bb05f8ccf45d2fa452c9f9f4de547876 (diff)
downloadcrosvm-3185ae95dd58f556a836f9e146dfe7b8450749b2.tar
crosvm-3185ae95dd58f556a836f9e146dfe7b8450749b2.tar.gz
crosvm-3185ae95dd58f556a836f9e146dfe7b8450749b2.tar.bz2
crosvm-3185ae95dd58f556a836f9e146dfe7b8450749b2.tar.lz
crosvm-3185ae95dd58f556a836f9e146dfe7b8450749b2.tar.xz
crosvm-3185ae95dd58f556a836f9e146dfe7b8450749b2.tar.zst
crosvm-3185ae95dd58f556a836f9e146dfe7b8450749b2.zip
devices: enable MSI-X for virtio-net and viotio-block devices
- signal_used_queue(): trigger MSI-X interrupts to the guest if MSI-X is
  enabled, otherwise trigger INTx interrupts
- enable MSI-X on vhost-net: allocate one vhost_interrupt for every
  MSI-X vector.

Performance wise, fio random R/W test on eve pixelbook:

           INTx          MSI-X      delta
fio write  8.13MiB/s    9.79MiB/s   +1.66MiB/s (+20%)
fio read   24.35MiB/s   29.3MiB/s   +4.95MiB/s (+20%)

For networking performance (TCP stream), test results on eve pixelbook:

             INTx            MSI-X          delta
iperf3       5.93Gbits/s     6.57Gbits/s    +0.64Gbits/s (+10.7%)
iperf3 -R    5.68Gbits/s     7.37Gbits/s    +1.30Gbits/s (+22.8%)

iperf test results on VM launched from Ubuntu host (client sends only):

             INTx            MSI-X          delta
virtio-net   9.53Gbits/s     11.4 Gbits/s   +1.87Gbits/s (+19.5%)
vhost        28.34Gbits/s    44.43Gbits/s   +16.09Gbits/s (+56.7%)

BUG=chromium:854765
TEST=cargo test -p devices
TEST=tested virtio-net and block on Linux VM and eve pixelbook

Change-Id: Ic4952a094327e6b977f446def8209ea2f796878c
Signed-off-by: Xiong Zhang <xiong.y.zhang@intel.corp-partner.google.com>
Signed-off-by: Zide Chen <zide.chen@intel.corp-partner.google.com>
Signed-off-by: Sainath Grandhi <sainath.grandhi@intel.corp-partner.google.com>
Reviewed-on: https://chromium-review.googlesource.com/c/chromiumos/platform/crosvm/+/1828340
Reviewed-by: Daniel Verkamp <dverkamp@chromium.org>
Tested-by: Daniel Verkamp <dverkamp@chromium.org>
Tested-by: kokoro <noreply+kokoro@google.com>
Commit-Queue: Daniel Verkamp <dverkamp@chromium.org>
-rw-r--r--devices/src/pci/msix.rs6
-rw-r--r--devices/src/virtio/block.rs27
-rw-r--r--devices/src/virtio/net.rs29
-rw-r--r--devices/src/virtio/vhost/net.rs21
-rw-r--r--devices/src/virtio/vhost/vsock.rs29
-rw-r--r--devices/src/virtio/vhost/worker.rs42
6 files changed, 111 insertions, 43 deletions
diff --git a/devices/src/pci/msix.rs b/devices/src/pci/msix.rs
index 6110b72..368f8b7 100644
--- a/devices/src/pci/msix.rs
+++ b/devices/src/pci/msix.rs
@@ -83,7 +83,7 @@ impl MsixConfig {
     /// Check whether the Function Mask bit in Message Control word in set or not.
     /// if 1, all of the vectors associated with the function are masked,
     /// regardless of their per-vector Mask bit states.
-    /// If 0, each vector’s Mask bit determines whether the vector is masked or not.
+    /// If 0, each vector's Mask bit determines whether the vector is masked or not.
     pub fn masked(&self) -> bool {
         self.masked
     }
@@ -201,7 +201,7 @@ impl MsixConfig {
     /// For all accesses to MSI-X Table and MSI-X PBA fields, software must use aligned full
     /// DWORD or aligned full QWORD transactions; otherwise, the result is undefined.
     ///
-    ///             DWORD3            DWORD2      DWORD1            DWORD0
+    ///   location: DWORD3            DWORD2      DWORD1            DWORD0
     ///   entry 0:  Vector Control    Msg Data    Msg Upper Addr    Msg Addr
     ///   entry 1:  Vector Control    Msg Data    Msg Upper Addr    Msg Addr
     ///   entry 2:  Vector Control    Msg Data    Msg Upper Addr    Msg Addr
@@ -253,7 +253,7 @@ impl MsixConfig {
     ///     for the memory write transaction; different MSI-X vectors have
     ///     different Message Address values
     /// Message Data: the contents of this field specifies the data driven
-    ///     on AD[31::00] during the memory write transaction’s data phase.
+    ///     on AD[31::00] during the memory write transaction's data phase.
     /// Vector Control: only bit 0 (Mask Bit) is not reserved: when this bit
     ///     is set, the function is prohibited from sending a message using
     ///     this MSI-X Table entry.
diff --git a/devices/src/virtio/block.rs b/devices/src/virtio/block.rs
index 58e00d7..26be384 100644
--- a/devices/src/virtio/block.rs
+++ b/devices/src/virtio/block.rs
@@ -30,6 +30,7 @@ use super::{
 
 const QUEUE_SIZE: u16 = 256;
 const QUEUE_SIZES: &[u16] = &[QUEUE_SIZE];
+const NUM_MSIX_VECTORS: u16 = 2;
 const SECTOR_SHIFT: u8 = 9;
 const SECTOR_SIZE: u64 = 0x01 << SECTOR_SHIFT;
 const MAX_DISCARD_SECTORS: u32 = u32::MAX;
@@ -279,6 +280,7 @@ struct Worker {
     interrupt_status: Arc<AtomicUsize>,
     interrupt_evt: EventFd,
     interrupt_resample_evt: EventFd,
+    msix_config: Option<Arc<Mutex<MsixConfig>>>,
 }
 
 impl Worker {
@@ -381,7 +383,14 @@ impl Worker {
         DiskControlResult::Ok
     }
 
-    fn signal_used_queue(&self) {
+    fn signal_used_queue(&self, vector: u16) {
+        if let Some(msix_config) = &self.msix_config {
+            let mut msix_config = msix_config.lock();
+            if msix_config.enabled() {
+                msix_config.trigger(vector);
+                return;
+            }
+        }
         self.interrupt_status
             .fetch_or(INTERRUPT_STATUS_USED_RING as usize, Ordering::SeqCst);
         self.interrupt_evt.write(1).unwrap();
@@ -440,7 +449,6 @@ impl Worker {
                 }
             };
 
-            let mut needs_interrupt = false;
             let mut needs_config_interrupt = false;
             for event in events.iter_readable() {
                 match event.token() {
@@ -459,8 +467,9 @@ impl Worker {
                             error!("failed reading queue EventFd: {}", e);
                             break 'poll;
                         }
-                        needs_interrupt |=
-                            self.process_queue(0, &mut flush_timer, &mut flush_timer_armed);
+                        if self.process_queue(0, &mut flush_timer, &mut flush_timer_armed) {
+                            self.signal_used_queue(self.queues[0].vector);
+                        }
                     }
                     Token::ControlRequest => {
                         let req = match control_socket.recv() {
@@ -492,9 +501,6 @@ impl Worker {
                     Token::Kill => break 'poll,
                 }
             }
-            if needs_interrupt {
-                self.signal_used_queue();
-            }
             if needs_config_interrupt {
                 self.signal_config_changed();
             }
@@ -764,6 +770,10 @@ impl VirtioDevice for Block {
         TYPE_BLOCK
     }
 
+    fn msix_vectors(&self) -> u16 {
+        NUM_MSIX_VECTORS
+    }
+
     fn queue_max_sizes(&self) -> &[u16] {
         QUEUE_SIZES
     }
@@ -781,7 +791,7 @@ impl VirtioDevice for Block {
         mem: GuestMemory,
         interrupt_evt: EventFd,
         interrupt_resample_evt: EventFd,
-        _msix_config: Option<Arc<Mutex<MsixConfig>>>,
+        msix_config: Option<Arc<Mutex<MsixConfig>>>,
         status: Arc<AtomicUsize>,
         queues: Vec<Queue>,
         mut queue_evts: Vec<EventFd>,
@@ -816,6 +826,7 @@ impl VirtioDevice for Block {
                                 interrupt_status: status,
                                 interrupt_evt,
                                 interrupt_resample_evt,
+                                msix_config,
                             };
                             worker.run(queue_evts.remove(0), kill_evt, control_socket);
                         });
diff --git a/devices/src/virtio/net.rs b/devices/src/virtio/net.rs
index 4cb2741..51d2039 100644
--- a/devices/src/virtio/net.rs
+++ b/devices/src/virtio/net.rs
@@ -28,7 +28,9 @@ use super::{Queue, Reader, VirtioDevice, Writer, INTERRUPT_STATUS_USED_RING, TYP
 /// http://docs.oasis-open.org/virtio/virtio/v1.0/virtio-v1.0.html#x1-1740003
 const MAX_BUFFER_SIZE: usize = 65562;
 const QUEUE_SIZE: u16 = 256;
+const NUM_QUEUES: usize = 2;
 const QUEUE_SIZES: &[u16] = &[QUEUE_SIZE, QUEUE_SIZE];
+const NUM_MSIX_VECTORS: u16 = NUM_QUEUES as u16;
 
 #[derive(Debug)]
 pub enum NetError {
@@ -93,6 +95,7 @@ struct Worker<T: TapT> {
     interrupt_status: Arc<AtomicUsize>,
     interrupt_evt: EventFd,
     interrupt_resample_evt: EventFd,
+    msix_config: Option<Arc<Mutex<MsixConfig>>>,
     rx_buf: [u8; MAX_BUFFER_SIZE],
     rx_count: usize,
     deferred_rx: bool,
@@ -106,7 +109,14 @@ impl<T> Worker<T>
 where
     T: TapT,
 {
-    fn signal_used_queue(&self) {
+    fn signal_used_queue(&self, vector: u16) {
+        if let Some(msix_config) = &self.msix_config {
+            let mut msix_config = msix_config.lock();
+            if msix_config.enabled() {
+                msix_config.trigger(vector);
+                return;
+            }
+        }
         self.interrupt_status
             .fetch_or(INTERRUPT_STATUS_USED_RING as usize, Ordering::SeqCst);
         self.interrupt_evt.write(1).unwrap();
@@ -164,7 +174,7 @@ where
                         self.deferred_rx = true;
                         break;
                     } else if first_frame {
-                        self.signal_used_queue();
+                        self.signal_used_queue(self.rx_queue.vector);
                         first_frame = false;
                     } else {
                         needs_interrupt = true;
@@ -224,7 +234,7 @@ where
             self.tx_queue.add_used(&self.mem, index, 0);
         }
 
-        self.signal_used_queue();
+        self.signal_used_queue(self.tx_queue.vector);
     }
 
     fn run(
@@ -318,7 +328,7 @@ where
                 }
 
                 if needs_interrupt_rx {
-                    self.signal_used_queue();
+                    self.signal_used_queue(self.rx_queue.vector);
                 }
             }
         }
@@ -466,6 +476,10 @@ where
         TYPE_NET
     }
 
+    fn msix_vectors(&self) -> u16 {
+        NUM_MSIX_VECTORS
+    }
+
     fn queue_max_sizes(&self) -> &[u16] {
         QUEUE_SIZES
     }
@@ -493,13 +507,13 @@ where
         mem: GuestMemory,
         interrupt_evt: EventFd,
         interrupt_resample_evt: EventFd,
-        _msix_config: Option<Arc<Mutex<MsixConfig>>>,
+        msix_config: Option<Arc<Mutex<MsixConfig>>>,
         status: Arc<AtomicUsize>,
         mut queues: Vec<Queue>,
         mut queue_evts: Vec<EventFd>,
     ) {
-        if queues.len() != 2 || queue_evts.len() != 2 {
-            error!("net: expected 2 queues, got {}", queues.len());
+        if queues.len() != NUM_QUEUES || queue_evts.len() != NUM_QUEUES {
+            error!("net: expected {} queues, got {}", NUM_QUEUES, queues.len());
             return;
         }
 
@@ -520,6 +534,7 @@ where
                                 tap,
                                 interrupt_status: status,
                                 interrupt_evt,
+                                msix_config,
                                 interrupt_resample_evt,
                                 rx_buf: [0u8; MAX_BUFFER_SIZE],
                                 rx_count: 0,
diff --git a/devices/src/virtio/vhost/net.rs b/devices/src/virtio/vhost/net.rs
index 229b4f6..6bfc52a 100644
--- a/devices/src/virtio/vhost/net.rs
+++ b/devices/src/virtio/vhost/net.rs
@@ -25,6 +25,7 @@ use crate::virtio::{Queue, VirtioDevice, TYPE_NET};
 const QUEUE_SIZE: u16 = 256;
 const NUM_QUEUES: usize = 2;
 const QUEUE_SIZES: &[u16] = &[QUEUE_SIZE; NUM_QUEUES];
+const NUM_MSIX_VECTORS: u16 = NUM_QUEUES as u16;
 
 pub struct Net<T: TapT, U: VhostNetT<T>> {
     workers_kill_evt: Option<EventFd>,
@@ -32,7 +33,7 @@ pub struct Net<T: TapT, U: VhostNetT<T>> {
     worker_thread: Option<thread::JoinHandle<()>>,
     tap: Option<T>,
     vhost_net_handle: Option<U>,
-    vhost_interrupt: Option<EventFd>,
+    vhost_interrupt: Option<Vec<EventFd>>,
     avail_features: u64,
     acked_features: u64,
 }
@@ -84,13 +85,18 @@ where
             | 1 << virtio_sys::vhost::VIRTIO_F_NOTIFY_ON_EMPTY
             | 1 << virtio_sys::vhost::VIRTIO_F_VERSION_1;
 
+        let mut vhost_interrupt = Vec::new();
+        for _ in 0..NUM_MSIX_VECTORS {
+            vhost_interrupt.push(EventFd::new().map_err(Error::VhostIrqCreate)?);
+        }
+
         Ok(Net {
             workers_kill_evt: Some(kill_evt.try_clone().map_err(Error::CloneKillEventFd)?),
             kill_evt,
             worker_thread: None,
             tap: Some(tap),
             vhost_net_handle: Some(vhost_net_handle),
-            vhost_interrupt: Some(EventFd::new().map_err(Error::VhostIrqCreate)?),
+            vhost_interrupt: Some(vhost_interrupt),
             avail_features,
             acked_features: 0u64,
         })
@@ -132,7 +138,9 @@ where
         }
 
         if let Some(vhost_interrupt) = &self.vhost_interrupt {
-            keep_fds.push(vhost_interrupt.as_raw_fd());
+            for vhost_int in vhost_interrupt.iter() {
+                keep_fds.push(vhost_int.as_raw_fd());
+            }
         }
 
         if let Some(workers_kill_evt) = &self.workers_kill_evt {
@@ -146,6 +154,10 @@ where
         TYPE_NET
     }
 
+    fn msix_vectors(&self) -> u16 {
+        NUM_MSIX_VECTORS
+    }
+
     fn queue_max_sizes(&self) -> &[u16] {
         QUEUE_SIZES
     }
@@ -173,7 +185,7 @@ where
         _: GuestMemory,
         interrupt_evt: EventFd,
         interrupt_resample_evt: EventFd,
-        _msix_config: Option<Arc<Mutex<MsixConfig>>>,
+        msix_config: Option<Arc<Mutex<MsixConfig>>>,
         status: Arc<AtomicUsize>,
         queues: Vec<Queue>,
         queue_evts: Vec<EventFd>,
@@ -198,6 +210,7 @@ where
                                     status,
                                     interrupt_evt,
                                     interrupt_resample_evt,
+                                    msix_config,
                                     acked_features,
                                 );
                                 let activate_vqs = |handle: &U| -> Result<()> {
diff --git a/devices/src/virtio/vhost/vsock.rs b/devices/src/virtio/vhost/vsock.rs
index b39c368..b4101eb 100644
--- a/devices/src/virtio/vhost/vsock.rs
+++ b/devices/src/virtio/vhost/vsock.rs
@@ -21,13 +21,14 @@ use crate::virtio::{copy_config, Queue, VirtioDevice, TYPE_VSOCK};
 const QUEUE_SIZE: u16 = 256;
 const NUM_QUEUES: usize = 3;
 const QUEUE_SIZES: &[u16] = &[QUEUE_SIZE; NUM_QUEUES];
+const NUM_MSIX_VECTORS: u16 = NUM_QUEUES as u16;
 
 pub struct Vsock {
     worker_kill_evt: Option<EventFd>,
     kill_evt: Option<EventFd>,
     vhost_handle: Option<VhostVsockHandle>,
     cid: u64,
-    interrupt: Option<EventFd>,
+    interrupts: Option<Vec<EventFd>>,
     avail_features: u64,
     acked_features: u64,
 }
@@ -45,12 +46,17 @@ impl Vsock {
             | 1 << virtio_sys::vhost::VIRTIO_F_ANY_LAYOUT
             | 1 << virtio_sys::vhost::VIRTIO_F_VERSION_1;
 
+        let mut interrupts = Vec::new();
+        for _ in 0..NUM_MSIX_VECTORS {
+            interrupts.push(EventFd::new().map_err(Error::VhostIrqCreate)?);
+        }
+
         Ok(Vsock {
             worker_kill_evt: Some(kill_evt.try_clone().map_err(Error::CloneKillEventFd)?),
             kill_evt: Some(kill_evt),
             vhost_handle: Some(handle),
             cid,
-            interrupt: Some(EventFd::new().map_err(Error::VhostIrqCreate)?),
+            interrupts: Some(interrupts),
             avail_features,
             acked_features: 0,
         })
@@ -62,7 +68,7 @@ impl Vsock {
             kill_evt: None,
             vhost_handle: None,
             cid,
-            interrupt: None,
+            interrupts: None,
             avail_features: features,
             acked_features: 0,
         }
@@ -93,8 +99,10 @@ impl VirtioDevice for Vsock {
             keep_fds.push(handle.as_raw_fd());
         }
 
-        if let Some(interrupt) = &self.interrupt {
-            keep_fds.push(interrupt.as_raw_fd());
+        if let Some(interrupt) = &self.interrupts {
+            for vhost_int in interrupt.iter() {
+                keep_fds.push(vhost_int.as_raw_fd());
+            }
         }
 
         if let Some(worker_kill_evt) = &self.worker_kill_evt {
@@ -108,6 +116,10 @@ impl VirtioDevice for Vsock {
         TYPE_VSOCK
     }
 
+    fn msix_vectors(&self) -> u16 {
+        NUM_MSIX_VECTORS
+    }
+
     fn queue_max_sizes(&self) -> &[u16] {
         QUEUE_SIZES
     }
@@ -140,7 +152,7 @@ impl VirtioDevice for Vsock {
         _: GuestMemory,
         interrupt_evt: EventFd,
         interrupt_resample_evt: EventFd,
-        _msix_config: Option<Arc<Mutex<MsixConfig>>>,
+        msix_config: Option<Arc<Mutex<MsixConfig>>>,
         status: Arc<AtomicUsize>,
         queues: Vec<Queue>,
         queue_evts: Vec<EventFd>,
@@ -151,7 +163,7 @@ impl VirtioDevice for Vsock {
         }
 
         if let Some(vhost_handle) = self.vhost_handle.take() {
-            if let Some(interrupt) = self.interrupt.take() {
+            if let Some(interrupts) = self.interrupts.take() {
                 if let Some(kill_evt) = self.worker_kill_evt.take() {
                     let acked_features = self.acked_features;
                     let cid = self.cid;
@@ -164,10 +176,11 @@ impl VirtioDevice for Vsock {
                             let mut worker = Worker::new(
                                 vhost_queues,
                                 vhost_handle,
-                                interrupt,
+                                interrupts,
                                 status,
                                 interrupt_evt,
                                 interrupt_resample_evt,
+                                msix_config,
                                 acked_features,
                             );
                             let activate_vqs = |handle: &VhostVsockHandle| -> Result<()> {
diff --git a/devices/src/virtio/vhost/worker.rs b/devices/src/virtio/vhost/worker.rs
index 54cbb63..26c1449 100644
--- a/devices/src/virtio/vhost/worker.rs
+++ b/devices/src/virtio/vhost/worker.rs
@@ -5,11 +5,13 @@
 use std::os::raw::c_ulonglong;
 use std::sync::atomic::{AtomicUsize, Ordering};
 use std::sync::Arc;
+use sync::Mutex;
 
 use sys_util::{EventFd, PollContext, PollToken};
 use vhost::Vhost;
 
 use super::{Error, Result};
+use crate::pci::MsixConfig;
 use crate::virtio::{Queue, INTERRUPT_STATUS_USED_RING};
 
 /// Worker that takes care of running the vhost device.  This mainly involves forwarding interrupts
@@ -19,10 +21,11 @@ use crate::virtio::{Queue, INTERRUPT_STATUS_USED_RING};
 pub struct Worker<T: Vhost> {
     queues: Vec<Queue>,
     vhost_handle: T,
-    vhost_interrupt: EventFd,
+    vhost_interrupt: Vec<EventFd>,
     interrupt_status: Arc<AtomicUsize>,
     interrupt_evt: EventFd,
     interrupt_resample_evt: EventFd,
+    msix_config: Option<Arc<Mutex<MsixConfig>>>,
     acked_features: u64,
 }
 
@@ -30,10 +33,11 @@ impl<T: Vhost> Worker<T> {
     pub fn new(
         queues: Vec<Queue>,
         vhost_handle: T,
-        vhost_interrupt: EventFd,
+        vhost_interrupt: Vec<EventFd>,
         interrupt_status: Arc<AtomicUsize>,
         interrupt_evt: EventFd,
         interrupt_resample_evt: EventFd,
+        msix_config: Option<Arc<Mutex<MsixConfig>>>,
         acked_features: u64,
     ) -> Worker<T> {
         Worker {
@@ -43,11 +47,20 @@ impl<T: Vhost> Worker<T> {
             interrupt_status,
             interrupt_evt,
             interrupt_resample_evt,
+            msix_config,
             acked_features,
         }
     }
 
-    fn signal_used_queue(&self) {
+    fn signal_used_queue(&self, vector: u16) {
+        if let Some(msix_config) = &self.msix_config {
+            let mut msix_config = msix_config.lock();
+            if msix_config.enabled() {
+                msix_config.trigger(vector);
+                return;
+            }
+        }
+
         self.interrupt_status
             .fetch_or(INTERRUPT_STATUS_USED_RING as usize, Ordering::SeqCst);
         self.interrupt_evt.write(1).unwrap();
@@ -103,7 +116,7 @@ impl<T: Vhost> Worker<T> {
                 .set_vring_base(queue_index, 0)
                 .map_err(Error::VhostSetVringBase)?;
             self.vhost_handle
-                .set_vring_call(queue_index, &self.vhost_interrupt)
+                .set_vring_call(queue_index, &self.vhost_interrupt[queue_index])
                 .map_err(Error::VhostSetVringCall)?;
             self.vhost_handle
                 .set_vring_kick(queue_index, &queue_evts[queue_index])
@@ -114,27 +127,33 @@ impl<T: Vhost> Worker<T> {
 
         #[derive(PollToken)]
         enum Token {
-            VhostIrq,
+            VhostIrqi { index: usize },
             InterruptResample,
             Kill,
         }
 
         let poll_ctx: PollContext<Token> = PollContext::build_with(&[
-            (&self.vhost_interrupt, Token::VhostIrq),
             (&self.interrupt_resample_evt, Token::InterruptResample),
             (&kill_evt, Token::Kill),
         ])
         .map_err(Error::CreatePollContext)?;
 
+        for (index, vhost_int) in self.vhost_interrupt.iter().enumerate() {
+            poll_ctx
+                .add(vhost_int, Token::VhostIrqi { index })
+                .map_err(Error::CreatePollContext)?;
+        }
+
         'poll: loop {
             let events = poll_ctx.wait().map_err(Error::PollError)?;
 
-            let mut needs_interrupt = false;
             for event in events.iter_readable() {
                 match event.token() {
-                    Token::VhostIrq => {
-                        needs_interrupt = true;
-                        self.vhost_interrupt.read().map_err(Error::VhostIrqRead)?;
+                    Token::VhostIrqi { index } => {
+                        self.vhost_interrupt[index]
+                            .read()
+                            .map_err(Error::VhostIrqRead)?;
+                        self.signal_used_queue(self.queues[index].vector);
                     }
                     Token::InterruptResample => {
                         let _ = self.interrupt_resample_evt.read();
@@ -145,9 +164,6 @@ impl<T: Vhost> Worker<T> {
                     Token::Kill => break 'poll,
                 }
             }
-            if needs_interrupt {
-                self.signal_used_queue();
-            }
         }
         Ok(())
     }