summary refs log tree commit diff
diff options
context:
space:
mode:
authorJingkui Wang <jkwang@google.com>2019-03-07 23:54:09 -0800
committerchrome-bot <chrome-bot@chromium.org>2019-03-16 15:25:22 -0700
commit0a5bf14261edd60641b75aeb60247001b5a3056f (patch)
tree9d93ee479ddd3be24fbd30e2193db37353e1e489
parentc698769b42779a8f621b0df48583c4307c596cb6 (diff)
downloadcrosvm-0a5bf14261edd60641b75aeb60247001b5a3056f.tar
crosvm-0a5bf14261edd60641b75aeb60247001b5a3056f.tar.gz
crosvm-0a5bf14261edd60641b75aeb60247001b5a3056f.tar.bz2
crosvm-0a5bf14261edd60641b75aeb60247001b5a3056f.tar.lz
crosvm-0a5bf14261edd60641b75aeb60247001b5a3056f.tar.xz
crosvm-0a5bf14261edd60641b75aeb60247001b5a3056f.tar.zst
crosvm-0a5bf14261edd60641b75aeb60247001b5a3056f.zip
usb: add ring buffer and ring buffer controller
for ring buffer, guest kernel is producer and crosvm is consumer

CQ-DEPEND=1510817
BUG=chromium:831850
TEST=cargo test

Change-Id: Ib62d2b42de1a77ff71ca0e2a0066feacc56dddc1
Reviewed-on: https://chromium-review.googlesource.com/1510818
Commit-Ready: Jingkui Wang <jkwang@google.com>
Tested-by: kokoro <noreply+kokoro@google.com>
Reviewed-by: Zach Reizner <zachr@chromium.org>
-rw-r--r--devices/src/usb/xhci/mod.rs3
-rw-r--r--devices/src/usb/xhci/ring_buffer.rs271
-rw-r--r--devices/src/usb/xhci/ring_buffer_controller.rs345
-rw-r--r--devices/src/usb/xhci/ring_buffer_stop_cb.rs56
4 files changed, 675 insertions, 0 deletions
diff --git a/devices/src/usb/xhci/mod.rs b/devices/src/usb/xhci/mod.rs
index 57d9523..1d3ed60 100644
--- a/devices/src/usb/xhci/mod.rs
+++ b/devices/src/usb/xhci/mod.rs
@@ -5,6 +5,9 @@
 mod event_ring;
 mod interrupter;
 mod intr_resample_handler;
+mod ring_buffer;
+mod ring_buffer_controller;
+mod ring_buffer_stop_cb;
 mod scatter_gather_buffer;
 mod xhci_abi;
 mod xhci_abi_schema;
diff --git a/devices/src/usb/xhci/ring_buffer.rs b/devices/src/usb/xhci/ring_buffer.rs
new file mode 100644
index 0000000..37afe17
--- /dev/null
+++ b/devices/src/usb/xhci/ring_buffer.rs
@@ -0,0 +1,271 @@
+// Copyright 2019 The Chromium OS Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+use super::xhci_abi::{
+    AddressedTrb, Error as TrbError, LinkTrb, TransferDescriptor, Trb, TrbCast, TrbType,
+};
+use std::fmt::{self, Display};
+use std::mem::size_of;
+use sys_util::{GuestAddress, GuestMemory, GuestMemoryError};
+
+#[derive(Debug)]
+pub enum Error {
+    ReadGuestMemory(GuestMemoryError),
+    BadDequeuePointer(GuestAddress),
+    CastTrb(TrbError),
+    TrbChain(TrbError),
+}
+
+type Result<T> = std::result::Result<T, Error>;
+
+impl Display for Error {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        use self::Error::*;
+
+        match self {
+            ReadGuestMemory(e) => write!(f, "cannot read guest memory: {}", e),
+            BadDequeuePointer(addr) => write!(f, "bad dequeue pointer: {}", addr),
+            CastTrb(e) => write!(f, "cannot cast trb: {}", e),
+            TrbChain(e) => write!(f, "cannot get trb chain bit: {}", e),
+        }
+    }
+}
+
+/// Ring Buffer is segmented circular buffer in guest memory containing work items
+/// called transfer descriptors, each of which consists of one or more TRBs.
+/// Ring buffer logic is shared between transfer ring and command ring.
+/// Transfer Ring management is defined in xHCI spec 4.9.2.
+pub struct RingBuffer {
+    name: String,
+    mem: GuestMemory,
+    dequeue_pointer: GuestAddress,
+    // Used to check if the ring is empty. Toggled when looping back to the begining
+    // of the buffer.
+    consumer_cycle_state: bool,
+}
+
+impl Display for RingBuffer {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        write!(f, "RingBuffer `{}`", self.name)
+    }
+}
+
+// Public interfaces for Ring buffer.
+impl RingBuffer {
+    /// Create a new RingBuffer.
+    pub fn new(name: String, mem: GuestMemory) -> Self {
+        RingBuffer {
+            name,
+            mem,
+            dequeue_pointer: GuestAddress(0),
+            consumer_cycle_state: false,
+        }
+    }
+
+    /// Dequeue next transfer descriptor from the transfer ring.
+    pub fn dequeue_transfer_descriptor(&mut self) -> Result<Option<TransferDescriptor>> {
+        let mut td: TransferDescriptor = TransferDescriptor::new();
+        loop {
+            let addressed_trb = match self.get_current_trb()? {
+                Some(t) => t,
+                None => break,
+            };
+
+            match addressed_trb.trb.trb_type() {
+                Ok(TrbType::Link) => {
+                    let link_trb = addressed_trb
+                        .trb
+                        .cast::<LinkTrb>()
+                        .map_err(Error::CastTrb)?;
+                    self.dequeue_pointer = GuestAddress(link_trb.get_ring_segment_pointer());
+                    self.consumer_cycle_state =
+                        self.consumer_cycle_state != link_trb.get_toggle_cycle_bit();
+                    continue;
+                }
+                _ => {}
+            };
+
+            self.dequeue_pointer = match self.dequeue_pointer.checked_add(size_of::<Trb>() as u64) {
+                Some(addr) => addr,
+                None => {
+                    return Err(Error::BadDequeuePointer(self.dequeue_pointer.clone()));
+                }
+            };
+
+            td.push(addressed_trb);
+            if !addressed_trb.trb.get_chain_bit().map_err(Error::TrbChain)? {
+                usb_debug!("trb chain is false returning");
+                break;
+            }
+        }
+        // A valid transfer descriptor contains at least one addressed trb and the last trb has
+        // chain bit != 0.
+        match td.last() {
+            Some(t) => {
+                if t.trb.get_chain_bit().map_err(Error::TrbChain)? {
+                    return Ok(None);
+                }
+            }
+            None => return Ok(None),
+        }
+        Ok(Some(td))
+    }
+
+    /// Set dequeue pointer of the ring buffer.
+    pub fn set_dequeue_pointer(&mut self, addr: GuestAddress) {
+        usb_debug!("{}: set dequeue pointer {:x}", self.name.as_str(), addr.0);
+
+        self.dequeue_pointer = addr;
+    }
+
+    /// Set consumer cycle state of the ring buffer.
+    pub fn set_consumer_cycle_state(&mut self, state: bool) {
+        usb_debug!("{}: set consumer cycle state {}", self.name.as_str(), state);
+        self.consumer_cycle_state = state;
+    }
+
+    // Read trb pointed by dequeue pointer. Does not proceed dequeue pointer.
+    fn get_current_trb(&self) -> Result<Option<AddressedTrb>> {
+        let trb: Trb = self
+            .mem
+            .read_obj_from_addr(self.dequeue_pointer)
+            .map_err(Error::ReadGuestMemory)?;
+        usb_debug!("{}: trb read from memory {:?}", self.name.as_str(), trb);
+        // If cycle bit of trb does not equal consumer cycle state, the ring is empty.
+        // This trb is invalid.
+        if trb.get_cycle_bit() != self.consumer_cycle_state {
+            usb_debug!(
+                "cycle bit does not match, self cycle {}",
+                self.consumer_cycle_state
+            );
+            Ok(None)
+        } else {
+            Ok(Some(AddressedTrb {
+                trb,
+                gpa: self.dequeue_pointer.0,
+            }))
+        }
+    }
+}
+
+#[cfg(test)]
+mod test {
+    use super::*;
+    use usb::xhci::xhci_abi::*;
+
+    #[test]
+    fn ring_test_dequeue() {
+        let trb_size = size_of::<Trb>() as u64;
+        let gm = GuestMemory::new(&vec![(GuestAddress(0), 0x1000)]).unwrap();
+        let mut transfer_ring = RingBuffer::new(String::new(), gm.clone());
+
+        // Structure of ring buffer:
+        //  0x100  --> 0x200  --> 0x300
+        //  trb 1  |   trb 3  |   trb 5
+        //  trb 2  |   trb 4  |   trb 6
+        //  l trb  -   l trb  -   l trb to 0x100
+        let mut trb = NormalTrb::new();
+        trb.set_trb_type(TrbType::Normal as u8);
+        trb.set_data_buffer(1);
+        trb.set_chain(1);
+        gm.write_obj_at_addr(trb.clone(), GuestAddress(0x100))
+            .unwrap();
+
+        trb.set_data_buffer(2);
+        gm.write_obj_at_addr(trb, GuestAddress(0x100 + trb_size))
+            .unwrap();
+
+        let mut ltrb = LinkTrb::new();
+        ltrb.set_trb_type(TrbType::Link as u8);
+        ltrb.set_ring_segment_pointer(0x200);
+        gm.write_obj_at_addr(ltrb, GuestAddress(0x100 + 2 * trb_size))
+            .unwrap();
+
+        trb.set_data_buffer(3);
+        gm.write_obj_at_addr(trb, GuestAddress(0x200)).unwrap();
+
+        // Chain bit is false.
+        trb.set_data_buffer(4);
+        trb.set_chain(0);
+        gm.write_obj_at_addr(trb, GuestAddress(0x200 + 1 * trb_size))
+            .unwrap();
+
+        ltrb.set_ring_segment_pointer(0x300);
+        gm.write_obj_at_addr(ltrb, GuestAddress(0x200 + 2 * trb_size))
+            .unwrap();
+
+        trb.set_data_buffer(5);
+        trb.set_chain(1);
+        gm.write_obj_at_addr(trb, GuestAddress(0x300)).unwrap();
+
+        // Chain bit is false.
+        trb.set_data_buffer(6);
+        trb.set_chain(0);
+        gm.write_obj_at_addr(trb, GuestAddress(0x300 + 1 * trb_size))
+            .unwrap();
+
+        ltrb.set_ring_segment_pointer(0x100);
+        gm.write_obj_at_addr(ltrb, GuestAddress(0x300 + 2 * trb_size))
+            .unwrap();
+
+        transfer_ring.set_dequeue_pointer(GuestAddress(0x100));
+        transfer_ring.set_consumer_cycle_state(false);
+
+        // Read first transfer descriptor.
+        let descriptor = transfer_ring
+            .dequeue_transfer_descriptor()
+            .unwrap()
+            .unwrap();
+        assert_eq!(descriptor.len(), 4);
+        assert_eq!(descriptor[0].trb.get_parameter(), 1);
+        assert_eq!(descriptor[1].trb.get_parameter(), 2);
+        assert_eq!(descriptor[2].trb.get_parameter(), 3);
+        assert_eq!(descriptor[3].trb.get_parameter(), 4);
+
+        // Read second transfer descriptor.
+        let descriptor = transfer_ring
+            .dequeue_transfer_descriptor()
+            .unwrap()
+            .unwrap();
+        assert_eq!(descriptor.len(), 2);
+        assert_eq!(descriptor[0].trb.get_parameter(), 5);
+        assert_eq!(descriptor[1].trb.get_parameter(), 6);
+    }
+
+    #[test]
+    fn transfer_ring_test_dequeue_failure() {
+        let trb_size = size_of::<Trb>() as u64;
+        let gm = GuestMemory::new(&vec![(GuestAddress(0), 0x1000)]).unwrap();
+        let mut transfer_ring = RingBuffer::new(String::new(), gm.clone());
+
+        let mut trb = NormalTrb::new();
+        trb.set_trb_type(TrbType::Normal as u8);
+        trb.set_data_buffer(1);
+        trb.set_chain(1);
+        gm.write_obj_at_addr(trb.clone(), GuestAddress(0x100))
+            .unwrap();
+
+        trb.set_data_buffer(2);
+        gm.write_obj_at_addr(trb, GuestAddress(0x100 + trb_size))
+            .unwrap();
+
+        let mut ltrb = LinkTrb::new();
+        ltrb.set_trb_type(TrbType::Link as u8);
+        ltrb.set_ring_segment_pointer(0x200);
+        ltrb.set_toggle_cycle(1);
+        gm.write_obj_at_addr(ltrb, GuestAddress(0x100 + 2 * trb_size))
+            .unwrap();
+
+        trb.set_data_buffer(3);
+        gm.write_obj_at_addr(trb, GuestAddress(0x200)).unwrap();
+
+        transfer_ring.set_dequeue_pointer(GuestAddress(0x100));
+        transfer_ring.set_consumer_cycle_state(false);
+
+        // Read first transfer descriptor.
+        let descriptor = transfer_ring.dequeue_transfer_descriptor().unwrap();
+        assert_eq!(descriptor.is_none(), true);
+    }
+
+}
diff --git a/devices/src/usb/xhci/ring_buffer_controller.rs b/devices/src/usb/xhci/ring_buffer_controller.rs
new file mode 100644
index 0000000..7aed102
--- /dev/null
+++ b/devices/src/usb/xhci/ring_buffer_controller.rs
@@ -0,0 +1,345 @@
+// Copyright 2019 The Chromium OS Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+use super::ring_buffer_stop_cb::RingBufferStopCallback;
+use super::xhci_abi::*;
+use std::fmt::{self, Display};
+use std::os::unix::io::RawFd;
+use std::sync::{Arc, MutexGuard};
+use sync::Mutex;
+use utils::{self, EventHandler, EventLoop};
+
+use sys_util::{Error as SysError, EventFd, GuestAddress, GuestMemory, WatchingEvents};
+
+use super::ring_buffer::RingBuffer;
+
+#[derive(Debug)]
+pub enum Error {
+    AddEvent(utils::Error),
+    CreateEventFd(SysError),
+}
+
+type Result<T> = std::result::Result<T, Error>;
+
+impl Display for Error {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        use self::Error::*;
+
+        match self {
+            AddEvent(e) => write!(f, "failed to add event to event loop: {}", e),
+            CreateEventFd(e) => write!(f, "failed to create event fd: {}", e),
+        }
+    }
+}
+
+#[derive(PartialEq, Copy, Clone)]
+enum RingBufferState {
+    /// Running: RingBuffer is running, consuming transfer descriptor.
+    Running,
+    /// Stopping: Some thread requested RingBuffer stop. It will stop when current descriptor is
+    /// handled.
+    Stopping,
+    /// Stopped: RingBuffer already stopped.
+    Stopped,
+}
+
+/// TransferDescriptorHandler handles transfer descriptor. User should implement this trait and
+/// build a ring buffer controller with the struct.
+pub trait TransferDescriptorHandler {
+    /// Process descriptor asynchronously, write complete_event when done.
+    fn handle_transfer_descriptor(
+        &self,
+        descriptor: TransferDescriptor,
+        complete_event: EventFd,
+    ) -> std::result::Result<(), ()>;
+    /// Stop is called when trying to stop ring buffer controller. Returns true when stop must be
+    /// performed asynchronously. This happens because the handler is handling some descriptor
+    /// asynchronously, the stop callback of ring buffer controller must be called after the
+    /// `async` part is handled or canceled. If the TransferDescriptorHandler decide it could stop
+    /// immediately, it could return false.
+    /// For example, if a handler submitted a transfer but the transfer has not yet finished. Then
+    /// guest kernel requests to stop the ring buffer controller. Transfer descriptor handler will
+    /// return true, thus RingBufferController would transfer to Stopping state. It will be stopped
+    /// when all pending transfer completed.
+    /// On the other hand, if hander does not have any pending transfers, it would return false.
+    fn stop(&self) -> bool {
+        true
+    }
+}
+
+/// RingBufferController owns a ring buffer. It lives on a event_loop. It will pop out transfer
+/// descriptor and let TransferDescriptorHandler handle it.
+pub struct RingBufferController<T: 'static + TransferDescriptorHandler> {
+    name: String,
+    state: Mutex<RingBufferState>,
+    stop_callback: Mutex<Vec<RingBufferStopCallback>>,
+    ring_buffer: Mutex<RingBuffer>,
+    handler: Mutex<T>,
+    event_loop: Arc<EventLoop>,
+    event: EventFd,
+}
+
+impl<T: 'static + TransferDescriptorHandler> Display for RingBufferController<T> {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        write!(f, "RingBufferController `{}`", self.name)
+    }
+}
+
+impl<T: Send> RingBufferController<T>
+where
+    T: 'static + TransferDescriptorHandler,
+{
+    /// Create a ring buffer controller and add it to event loop.
+    pub fn new_with_handler(
+        name: String,
+        mem: GuestMemory,
+        event_loop: Arc<EventLoop>,
+        handler: T,
+    ) -> Result<Arc<RingBufferController<T>>> {
+        let evt = EventFd::new().map_err(Error::CreateEventFd)?;
+        let controller = Arc::new(RingBufferController {
+            name: name.clone(),
+            state: Mutex::new(RingBufferState::Stopped),
+            stop_callback: Mutex::new(Vec::new()),
+            ring_buffer: Mutex::new(RingBuffer::new(name.clone(), mem)),
+            handler: Mutex::new(handler),
+            event_loop: event_loop.clone(),
+            event: evt,
+        });
+        let event_handler: Arc<EventHandler> = controller.clone();
+        event_loop
+            .add_event(
+                &controller.event,
+                WatchingEvents::empty().set_read(),
+                Arc::downgrade(&event_handler),
+            )
+            .map_err(Error::AddEvent)?;
+        Ok(controller)
+    }
+
+    fn lock_ring_buffer(&self) -> MutexGuard<RingBuffer> {
+        self.ring_buffer.lock()
+    }
+
+    /// Set dequeue pointer of the internal ring buffer.
+    pub fn set_dequeue_pointer(&self, ptr: GuestAddress) {
+        usb_debug!("{}: set dequeue pointer: {:x}", self.name, ptr.0);
+        // Fast because this should only happen during xhci setup.
+        self.lock_ring_buffer().set_dequeue_pointer(ptr);
+    }
+
+    /// Set consumer cycle state.
+    pub fn set_consumer_cycle_state(&self, state: bool) {
+        usb_debug!("{}: set consumer cycle state: {}", self.name, state);
+        // Fast because this should only happen during xhci setup.
+        self.lock_ring_buffer().set_consumer_cycle_state(state);
+    }
+
+    /// Start the ring buffer.
+    pub fn start(&self) {
+        usb_debug!("{} started", self.name);
+        let mut state = self.state.lock();
+        if *state != RingBufferState::Running {
+            *state = RingBufferState::Running;
+            if let Err(e) = self.event.write(1) {
+                error!("cannot start event ring: {}", e);
+            }
+        }
+    }
+
+    /// Stop the ring buffer asynchronously.
+    pub fn stop(&self, callback: RingBufferStopCallback) {
+        usb_debug!("{} being stopped", self.name);
+        let mut state = self.state.lock();
+        if *state == RingBufferState::Stopped {
+            usb_debug!("{} is already stopped", self.name);
+            return;
+        }
+        if self.handler.lock().stop() {
+            *state = RingBufferState::Stopping;
+            self.stop_callback.lock().push(callback);
+        } else {
+            *state = RingBufferState::Stopped;
+        }
+    }
+}
+
+impl<T> Drop for RingBufferController<T>
+where
+    T: 'static + TransferDescriptorHandler,
+{
+    fn drop(&mut self) {
+        // Remove self from the event loop.
+        if let Err(e) = self.event_loop.remove_event_for_fd(&self.event) {
+            error!(
+                "cannot remove ring buffer controller from event loop: {}",
+                e
+            );
+        }
+    }
+}
+
+impl<T> EventHandler for RingBufferController<T>
+where
+    T: 'static + TransferDescriptorHandler + Send,
+{
+    fn on_event(&self, _fd: RawFd) -> std::result::Result<(), ()> {
+        // `self.event` triggers ring buffer controller to run, the value read is not important.
+        match self.event.read() {
+            Ok(_) => {}
+            Err(e) => {
+                error!("cannot read from event fd: {}", e);
+                return Err(());
+            }
+        }
+        let mut state = self.state.lock();
+
+        match *state {
+            RingBufferState::Stopped => return Ok(()),
+            RingBufferState::Stopping => {
+                usb_debug!("{}: stopping ring buffer controller", self.name);
+                *state = RingBufferState::Stopped;
+                self.stop_callback.lock().clear();
+                return Ok(());
+            }
+            RingBufferState::Running => {}
+        }
+
+        let transfer_descriptor = match self.lock_ring_buffer().dequeue_transfer_descriptor() {
+            Ok(t) => t,
+            Err(e) => {
+                error!("cannot dequeue transfer descriptor: {}", e);
+                return Err(());
+            }
+        };
+
+        let transfer_descriptor = match transfer_descriptor {
+            Some(t) => t,
+            None => {
+                *state = RingBufferState::Stopped;
+                self.stop_callback.lock().clear();
+                return Ok(());
+            }
+        };
+
+        let event = match self.event.try_clone() {
+            Ok(evt) => evt,
+            Err(e) => {
+                error!("cannot clone event fd: {}", e);
+                return Err(());
+            }
+        };
+        self.handler
+            .lock()
+            .handle_transfer_descriptor(transfer_descriptor, event)
+    }
+}
+
+#[cfg(test)]
+mod tests {
+    use super::*;
+    use std::mem::size_of;
+    use std::sync::mpsc::{channel, Sender};
+    use utils::FailHandle;
+
+    struct TestHandler {
+        sender: Sender<i32>,
+    }
+
+    impl TransferDescriptorHandler for TestHandler {
+        fn handle_transfer_descriptor(
+            &self,
+            descriptor: TransferDescriptor,
+            complete_event: EventFd,
+        ) -> std::result::Result<(), ()> {
+            for atrb in descriptor {
+                assert_eq!(atrb.trb.trb_type().unwrap(), TrbType::Normal);
+                self.sender.send(atrb.trb.get_parameter() as i32).unwrap();
+            }
+            complete_event.write(1).unwrap();
+            Ok(())
+        }
+    }
+
+    fn setup_mem() -> GuestMemory {
+        let trb_size = size_of::<Trb>() as u64;
+        let gm = GuestMemory::new(&vec![(GuestAddress(0), 0x1000)]).unwrap();
+
+        // Structure of ring buffer:
+        //  0x100  --> 0x200  --> 0x300
+        //  trb 1  |   trb 3  |   trb 5
+        //  trb 2  |   trb 4  |   trb 6
+        //  l trb  -   l trb  -   l trb to 0x100
+        let mut trb = NormalTrb::new();
+        trb.set_trb_type(TrbType::Normal as u8);
+        trb.set_data_buffer(1);
+        trb.set_chain(1);
+        gm.write_obj_at_addr(trb.clone(), GuestAddress(0x100))
+            .unwrap();
+
+        trb.set_data_buffer(2);
+        gm.write_obj_at_addr(trb, GuestAddress(0x100 + trb_size))
+            .unwrap();
+
+        let mut ltrb = LinkTrb::new();
+        ltrb.set_trb_type(TrbType::Link as u8);
+        ltrb.set_ring_segment_pointer(0x200);
+        gm.write_obj_at_addr(ltrb, GuestAddress(0x100 + 2 * trb_size))
+            .unwrap();
+
+        trb.set_data_buffer(3);
+        gm.write_obj_at_addr(trb, GuestAddress(0x200)).unwrap();
+
+        // Chain bit is false.
+        trb.set_data_buffer(4);
+        trb.set_chain(0);
+        gm.write_obj_at_addr(trb, GuestAddress(0x200 + 1 * trb_size))
+            .unwrap();
+
+        ltrb.set_ring_segment_pointer(0x300);
+        gm.write_obj_at_addr(ltrb, GuestAddress(0x200 + 2 * trb_size))
+            .unwrap();
+
+        trb.set_data_buffer(5);
+        trb.set_chain(1);
+        gm.write_obj_at_addr(trb, GuestAddress(0x300)).unwrap();
+
+        // Chain bit is false.
+        trb.set_data_buffer(6);
+        trb.set_chain(0);
+        gm.write_obj_at_addr(trb, GuestAddress(0x300 + 1 * trb_size))
+            .unwrap();
+
+        ltrb.set_ring_segment_pointer(0x100);
+        gm.write_obj_at_addr(ltrb, GuestAddress(0x300 + 2 * trb_size))
+            .unwrap();
+        gm
+    }
+
+    #[test]
+    fn test_ring_buffer_controller() {
+        let (tx, rx) = channel();
+        let mem = setup_mem();
+        let (l, j) = EventLoop::start(None).unwrap();
+        let l = Arc::new(l);
+        let controller = RingBufferController::new_with_handler(
+            "".to_string(),
+            mem,
+            l.clone(),
+            TestHandler { sender: tx },
+        )
+        .unwrap();
+        controller.set_dequeue_pointer(GuestAddress(0x100));
+        controller.set_consumer_cycle_state(false);
+        controller.start();
+        assert_eq!(rx.recv().unwrap(), 1);
+        assert_eq!(rx.recv().unwrap(), 2);
+        assert_eq!(rx.recv().unwrap(), 3);
+        assert_eq!(rx.recv().unwrap(), 4);
+        assert_eq!(rx.recv().unwrap(), 5);
+        assert_eq!(rx.recv().unwrap(), 6);
+        l.stop();
+        j.join().unwrap();
+    }
+}
diff --git a/devices/src/usb/xhci/ring_buffer_stop_cb.rs b/devices/src/usb/xhci/ring_buffer_stop_cb.rs
new file mode 100644
index 0000000..29b3aa1
--- /dev/null
+++ b/devices/src/usb/xhci/ring_buffer_stop_cb.rs
@@ -0,0 +1,56 @@
+// Copyright 2019 The Chromium OS Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+use std::sync::{Arc, Mutex};
+
+/// RingBufferStopCallback wraps a callback. The callback will be invoked when last instance of
+/// RingBufferStopCallback and its clones is dropped.
+///
+/// The callback might not be invoked in certain cases. Don't depend this for safety.
+#[derive(Clone)]
+pub struct RingBufferStopCallback {
+    inner: Arc<Mutex<RingBufferStopCallbackInner>>,
+}
+
+impl RingBufferStopCallback {
+    /// Create new callback from closure.
+    pub fn new<C: 'static + FnMut() + Send>(cb: C) -> RingBufferStopCallback {
+        RingBufferStopCallback {
+            inner: Arc::new(Mutex::new(RingBufferStopCallbackInner {
+                callback: Box::new(cb),
+            })),
+        }
+    }
+}
+
+struct RingBufferStopCallbackInner {
+    callback: Box<FnMut() + Send>,
+}
+
+impl Drop for RingBufferStopCallbackInner {
+    fn drop(&mut self) {
+        (self.callback)();
+    }
+}
+
+#[cfg(test)]
+mod tests {
+    use super::*;
+    use std::sync::{Arc, Mutex};
+
+    fn task(_: RingBufferStopCallback) {}
+
+    #[test]
+    fn simple_raii_callback() {
+        let a = Arc::new(Mutex::new(0));
+        let ac = a.clone();
+        let cb = RingBufferStopCallback::new(move || {
+            *ac.lock().unwrap() = 1;
+        });
+        task(cb.clone());
+        task(cb.clone());
+        task(cb);
+        assert_eq!(*a.lock().unwrap(), 1);
+    }
+}