summary refs log tree commit diff
path: root/devices
diff options
context:
space:
mode:
authorAlyssa Ross <hi@alyssa.is>2020-04-10 13:44:58 +0000
committerAlyssa Ross <hi@alyssa.is>2020-04-10 13:45:14 +0000
commit8404e234c3428a682dfd5ee900936a8032243ba7 (patch)
tree458d0c9db4e145c1ace3ea7e50c3a50a45f020c3 /devices
parentd1ea7fc8d6b750ba75df719fb932984ab1ef5f11 (diff)
parent4ee9bffbd5722ac6602abaac6f691917add12f48 (diff)
downloadcrosvm-8404e234c3428a682dfd5ee900936a8032243ba7.tar
crosvm-8404e234c3428a682dfd5ee900936a8032243ba7.tar.gz
crosvm-8404e234c3428a682dfd5ee900936a8032243ba7.tar.bz2
crosvm-8404e234c3428a682dfd5ee900936a8032243ba7.tar.lz
crosvm-8404e234c3428a682dfd5ee900936a8032243ba7.tar.xz
crosvm-8404e234c3428a682dfd5ee900936a8032243ba7.tar.zst
crosvm-8404e234c3428a682dfd5ee900936a8032243ba7.zip
Merge remote-tracking branch 'origin/master'
Diffstat (limited to 'devices')
-rw-r--r--devices/src/pci/ac97.rs3
-rw-r--r--devices/src/pci/ac97_bus_master.rs10
-rw-r--r--devices/src/pci/vfio_pci.rs2
-rw-r--r--devices/src/virtio/balloon.rs149
-rw-r--r--devices/src/virtio/fs/passthrough.rs174
-rw-r--r--devices/src/virtio/gpu/mod.rs8
-rw-r--r--devices/src/virtio/gpu/protocol.rs38
-rw-r--r--devices/src/virtio/gpu/virtio_3d_backend.rs17
-rw-r--r--devices/src/virtio/net.rs338
-rw-r--r--devices/src/virtio/pmem.rs60
-rw-r--r--devices/src/virtio/vhost/net.rs2
11 files changed, 561 insertions, 240 deletions
diff --git a/devices/src/pci/ac97.rs b/devices/src/pci/ac97.rs
index 5f59165..d061746 100644
--- a/devices/src/pci/ac97.rs
+++ b/devices/src/pci/ac97.rs
@@ -12,7 +12,7 @@ use audio_streams::{
     shm_streams::{NullShmStreamSource, ShmStreamSource},
     StreamEffect,
 };
-use libcras::CrasClient;
+use libcras::{CrasClient, CrasClientType};
 use resources::{Alloc, MmioType, SystemAllocator};
 use sys_util::{error, EventFd, GuestMemory};
 
@@ -119,6 +119,7 @@ impl Ac97Dev {
     fn create_cras_audio_device(params: Ac97Parameters, mem: GuestMemory) -> Result<Ac97Dev> {
         let mut server =
             Box::new(CrasClient::new().map_err(|e| pci_device::Error::CreateCrasClientFailed(e))?);
+        server.set_client_type(CrasClientType::CRAS_CLIENT_TYPE_CROSVM);
         if params.capture {
             server.enable_cras_capture();
         }
diff --git a/devices/src/pci/ac97_bus_master.rs b/devices/src/pci/ac97_bus_master.rs
index 5f4ca75..809f31f 100644
--- a/devices/src/pci/ac97_bus_master.rs
+++ b/devices/src/pci/ac97_bus_master.rs
@@ -5,6 +5,7 @@
 use std;
 use std::collections::VecDeque;
 use std::convert::AsRef;
+use std::convert::TryInto;
 use std::error::Error;
 use std::fmt::{self, Display};
 use std::os::unix::io::{AsRawFd, RawFd};
@@ -106,6 +107,8 @@ type GuestMemoryResult<T> = std::result::Result<T, GuestMemoryError>;
 enum AudioError {
     // Failed to create a new stream.
     CreateStream(Box<dyn Error>),
+    // Invalid buffer offset received from the audio server.
+    InvalidBufferOffset,
     // Guest did not provide a buffer when needed.
     NoBufferAvailable,
     // Failure to read guest memory.
@@ -124,6 +127,7 @@ impl Display for AudioError {
 
         match self {
             CreateStream(e) => write!(f, "Failed to create audio stream: {}.", e),
+            InvalidBufferOffset => write!(f, "Offset > max usize"),
             NoBufferAvailable => write!(f, "No buffer was available from the Guest"),
             ReadingGuestError(e) => write!(f, "Failed to read guest memory: {}.", e),
             RespondRequest(e) => write!(f, "Failed to respond to the ServerRequest: {}", e),
@@ -610,7 +614,7 @@ fn get_buffer_offset(
     func_regs: &Ac97FunctionRegs,
     mem: &GuestMemory,
     index: u8,
-) -> GuestMemoryResult<usize> {
+) -> GuestMemoryResult<u64> {
     let descriptor_addr = func_regs.bdbar + u32::from(index) * DESCRIPTOR_LENGTH as u32;
     let buffer_addr_reg: u32 = mem
         .read_obj_from_addr(GuestAddress(u64::from(descriptor_addr)))
@@ -673,7 +677,9 @@ fn next_guest_buffer<'a>(
         return Ok(None);
     }
 
-    let offset = get_buffer_offset(func_regs, mem, index)?;
+    let offset = get_buffer_offset(func_regs, mem, index)?
+        .try_into()
+        .map_err(|_| AudioError::InvalidBufferOffset)?;
     let frames = get_buffer_samples(func_regs, mem, index)? / DEVICE_CHANNEL_COUNT;
 
     Ok(Some(GuestBuffer {
diff --git a/devices/src/pci/vfio_pci.rs b/devices/src/pci/vfio_pci.rs
index 4d5ad9e..d1b6f2b 100644
--- a/devices/src/pci/vfio_pci.rs
+++ b/devices/src/pci/vfio_pci.rs
@@ -709,7 +709,7 @@ impl VfioPciDevice {
                 let mmap_size = mmap.size;
                 let guest_map_start = bar_addr + mmap_offset;
                 let region_offset = self.device.get_region_offset(index);
-                let offset: usize = (region_offset + mmap_offset) as usize;
+                let offset = region_offset + mmap_offset;
                 if self
                     .vm_socket_mem
                     .send(&VmMemoryRequest::RegisterMmapMemory {
diff --git a/devices/src/virtio/balloon.rs b/devices/src/virtio/balloon.rs
index 35964e2..3e6d602 100644
--- a/devices/src/virtio/balloon.rs
+++ b/devices/src/virtio/balloon.rs
@@ -9,12 +9,14 @@ use std::sync::atomic::{AtomicUsize, Ordering};
 use std::sync::Arc;
 use std::thread;
 
-use data_model::{DataInit, Le32};
-use msg_socket::MsgReceiver;
+use data_model::{DataInit, Le16, Le32, Le64};
+use msg_socket::{MsgReceiver, MsgSender};
 use sys_util::{
     self, error, info, warn, EventFd, GuestAddress, GuestMemory, PollContext, PollToken,
 };
-use vm_control::{BalloonControlCommand, BalloonControlResponseSocket};
+use vm_control::{
+    BalloonControlCommand, BalloonControlResponseSocket, BalloonControlResult, BalloonStats,
+};
 
 use super::{
     copy_config, Interrupt, Queue, Reader, VirtioDevice, TYPE_BALLOON, VIRTIO_F_VERSION_1,
@@ -41,14 +43,14 @@ impl Display for BalloonError {
 }
 
 // Balloon has three virt IO queues: Inflate, Deflate, and Stats.
-// Stats is currently not used.
 const QUEUE_SIZE: u16 = 128;
-const QUEUE_SIZES: &[u16] = &[QUEUE_SIZE, QUEUE_SIZE];
+const QUEUE_SIZES: &[u16] = &[QUEUE_SIZE, QUEUE_SIZE, QUEUE_SIZE];
 
 const VIRTIO_BALLOON_PFN_SHIFT: u32 = 12;
 
 // The feature bitmap for virtio balloon
 const VIRTIO_BALLOON_F_MUST_TELL_HOST: u32 = 0; // Tell before reclaiming pages
+const VIRTIO_BALLOON_F_STATS_VQ: u32 = 1; // Stats reporting enabled
 const VIRTIO_BALLOON_F_DEFLATE_ON_OOM: u32 = 2; // Deflate balloon on OOM
 
 // virtio_balloon_config is the ballon device configuration space defined by the virtio spec.
@@ -69,11 +71,54 @@ struct BalloonConfig {
     actual_pages: AtomicUsize,
 }
 
+// The constants defining stats types in virtio_baloon_stat
+const VIRTIO_BALLOON_S_SWAP_IN: u16 = 0;
+const VIRTIO_BALLOON_S_SWAP_OUT: u16 = 1;
+const VIRTIO_BALLOON_S_MAJFLT: u16 = 2;
+const VIRTIO_BALLOON_S_MINFLT: u16 = 3;
+const VIRTIO_BALLOON_S_MEMFREE: u16 = 4;
+const VIRTIO_BALLOON_S_MEMTOT: u16 = 5;
+const VIRTIO_BALLOON_S_AVAIL: u16 = 6;
+const VIRTIO_BALLOON_S_CACHES: u16 = 7;
+const VIRTIO_BALLOON_S_HTLB_PGALLOC: u16 = 8;
+const VIRTIO_BALLOON_S_HTLB_PGFAIL: u16 = 9;
+
+// BalloonStat is used to deserialize stats from the stats_queue.
+#[derive(Copy, Clone)]
+#[repr(C, packed)]
+struct BalloonStat {
+    tag: Le16,
+    val: Le64,
+}
+// Safe because it only has data.
+unsafe impl DataInit for BalloonStat {}
+
+impl BalloonStat {
+    fn update_stats(&self, stats: &mut BalloonStats) {
+        let val = Some(self.val.to_native());
+        match self.tag.to_native() {
+            VIRTIO_BALLOON_S_SWAP_IN => stats.swap_in = val,
+            VIRTIO_BALLOON_S_SWAP_OUT => stats.swap_out = val,
+            VIRTIO_BALLOON_S_MAJFLT => stats.major_faults = val,
+            VIRTIO_BALLOON_S_MINFLT => stats.minor_faults = val,
+            VIRTIO_BALLOON_S_MEMFREE => stats.free_memory = val,
+            VIRTIO_BALLOON_S_MEMTOT => stats.total_memory = val,
+            VIRTIO_BALLOON_S_AVAIL => stats.available_memory = val,
+            VIRTIO_BALLOON_S_CACHES => stats.disk_caches = val,
+            VIRTIO_BALLOON_S_HTLB_PGALLOC => stats.hugetlb_allocations = val,
+            VIRTIO_BALLOON_S_HTLB_PGFAIL => stats.hugetlb_failures = val,
+            _ => (),
+        }
+    }
+}
+
 struct Worker {
     interrupt: Interrupt,
     mem: GuestMemory,
     inflate_queue: Queue,
     deflate_queue: Queue,
+    stats_queue: Queue,
+    stats_desc_index: Option<u16>,
     config: Arc<BalloonConfig>,
     command_socket: BalloonControlResponseSocket,
 }
@@ -100,27 +145,16 @@ impl Worker {
                         continue;
                     }
                 };
-                let data_length = reader.available_bytes();
-
-                if data_length % 4 != 0 {
-                    error!("invalid inflate buffer size: {}", data_length);
-                    queue.add_used(&self.mem, index, 0);
-                    needs_interrupt = true;
-                    continue;
-                }
-
-                let num_addrs = data_length / 4;
-                for _ in 0..num_addrs as usize {
-                    let guest_input = match reader.read_obj::<Le32>() {
-                        Ok(a) => a.to_native(),
-                        Err(err) => {
-                            error!("error while reading unused pages: {}", err);
+                for res in reader.iter::<Le32>() {
+                    let pfn = match res {
+                        Ok(pfn) => pfn,
+                        Err(e) => {
+                            error!("error while reading unused pages: {}", e);
                             break;
                         }
                     };
                     let guest_address =
-                        GuestAddress((u64::from(guest_input)) << VIRTIO_BALLOON_PFN_SHIFT);
-
+                        GuestAddress((u64::from(pfn.to_native())) << VIRTIO_BALLOON_PFN_SHIFT);
                     if self
                         .mem
                         .remove_range(guest_address, 1 << VIRTIO_BALLOON_PFN_SHIFT)
@@ -131,7 +165,6 @@ impl Worker {
                     }
                 }
             }
-
             queue.add_used(&self.mem, index, 0);
             needs_interrupt = true;
         }
@@ -139,11 +172,57 @@ impl Worker {
         needs_interrupt
     }
 
+    fn process_stats(&mut self) {
+        let queue = &mut self.stats_queue;
+        while let Some(stats_desc) = queue.pop(&self.mem) {
+            if let Some(prev_desc) = self.stats_desc_index {
+                // We shouldn't ever have an extra buffer if the driver follows
+                // the protocol, but return it if we find one.
+                warn!("balloon: driver is not compliant, more than one stats buffer received");
+                queue.add_used(&self.mem, prev_desc, 0);
+            }
+            self.stats_desc_index = Some(stats_desc.index);
+            let mut reader = match Reader::new(&self.mem, stats_desc) {
+                Ok(r) => r,
+                Err(e) => {
+                    error!("balloon: failed to create reader: {}", e);
+                    continue;
+                }
+            };
+            let mut stats: BalloonStats = Default::default();
+            for res in reader.iter::<BalloonStat>() {
+                match res {
+                    Ok(stat) => stat.update_stats(&mut stats),
+                    Err(e) => {
+                        error!("error while reading stats: {}", e);
+                        break;
+                    }
+                };
+            }
+            let actual_pages = self.config.actual_pages.load(Ordering::Relaxed) as u64;
+            let result = BalloonControlResult::Stats {
+                balloon_actual: actual_pages << VIRTIO_BALLOON_PFN_SHIFT,
+                stats,
+            };
+            if let Err(e) = self.command_socket.send(&result) {
+                warn!("failed to send stats result: {}", e);
+            }
+        }
+    }
+
+    fn request_stats(&mut self) {
+        if let Some(index) = self.stats_desc_index.take() {
+            self.stats_queue.add_used(&self.mem, index, 0);
+            self.interrupt.signal_used_queue(self.stats_queue.vector);
+        }
+    }
+
     fn run(&mut self, mut queue_evts: Vec<EventFd>, kill_evt: EventFd) {
         #[derive(PartialEq, PollToken)]
         enum Token {
             Inflate,
             Deflate,
+            Stats,
             CommandSocket,
             InterruptResample,
             Kill,
@@ -151,10 +230,12 @@ impl Worker {
 
         let inflate_queue_evt = queue_evts.remove(0);
         let deflate_queue_evt = queue_evts.remove(0);
+        let stats_queue_evt = queue_evts.remove(0);
 
         let poll_ctx: PollContext<Token> = match PollContext::build_with(&[
             (&inflate_queue_evt, Token::Inflate),
             (&deflate_queue_evt, Token::Deflate),
+            (&stats_queue_evt, Token::Stats),
             (&self.command_socket, Token::CommandSocket),
             (self.interrupt.get_resample_evt(), Token::InterruptResample),
             (&kill_evt, Token::Kill),
@@ -193,6 +274,13 @@ impl Worker {
                         }
                         needs_interrupt_deflate |= self.process_inflate_deflate(false);
                     }
+                    Token::Stats => {
+                        if let Err(e) = stats_queue_evt.read() {
+                            error!("failed reading stats queue EventFd: {}", e);
+                            break 'poll;
+                        }
+                        self.process_stats();
+                    }
                     Token::CommandSocket => {
                         if let Ok(req) = self.command_socket.recv() {
                             match req {
@@ -204,6 +292,9 @@ impl Worker {
                                     self.config.num_pages.store(num_pages, Ordering::Relaxed);
                                     self.interrupt.signal_config_changed();
                                 }
+                                BalloonControlCommand::Stats => {
+                                    self.request_stats();
+                                }
                             };
                         }
                     }
@@ -252,8 +343,10 @@ impl Balloon {
             }),
             kill_evt: None,
             worker_thread: None,
-            // TODO(dgreid) - Add stats queue feature.
-            features: 1 << VIRTIO_BALLOON_F_MUST_TELL_HOST | 1 << VIRTIO_BALLOON_F_DEFLATE_ON_OOM,
+            features: 1 << VIRTIO_BALLOON_F_MUST_TELL_HOST
+                | 1 << VIRTIO_BALLOON_F_STATS_VQ
+                | 1 << VIRTIO_BALLOON_F_DEFLATE_ON_OOM
+                | 1 << VIRTIO_F_VERSION_1,
         })
     }
 
@@ -306,9 +399,7 @@ impl VirtioDevice for Balloon {
     }
 
     fn features(&self) -> u64 {
-        1 << VIRTIO_BALLOON_F_MUST_TELL_HOST
-            | 1 << VIRTIO_BALLOON_F_DEFLATE_ON_OOM
-            | 1 << VIRTIO_F_VERSION_1
+        self.features
     }
 
     fn ack_features(&mut self, value: u64) {
@@ -345,6 +436,8 @@ impl VirtioDevice for Balloon {
                     mem,
                     inflate_queue: queues.remove(0),
                     deflate_queue: queues.remove(0),
+                    stats_queue: queues.remove(0),
+                    stats_desc_index: None,
                     command_socket,
                     config,
                 };
diff --git a/devices/src/virtio/fs/passthrough.rs b/devices/src/virtio/fs/passthrough.rs
index 419d466..d2034ba 100644
--- a/devices/src/virtio/fs/passthrough.rs
+++ b/devices/src/virtio/fs/passthrough.rs
@@ -11,7 +11,7 @@ use std::mem::{self, size_of, MaybeUninit};
 use std::os::unix::io::{AsRawFd, FromRawFd, RawFd};
 use std::str::FromStr;
 use std::sync::atomic::{AtomicBool, AtomicU64, Ordering};
-use std::sync::{Arc, RwLock};
+use std::sync::Arc;
 use std::time::Duration;
 
 use data_model::DataInit;
@@ -297,12 +297,12 @@ pub struct PassthroughFs {
     // the `O_PATH` option so they cannot be used for reading or writing any data. See the
     // documentation of the `O_PATH` flag in `open(2)` for more details on what one can and cannot
     // do with an fd opened with this flag.
-    inodes: RwLock<MultikeyBTreeMap<Inode, InodeAltKey, Arc<InodeData>>>,
+    inodes: Mutex<MultikeyBTreeMap<Inode, InodeAltKey, Arc<InodeData>>>,
     next_inode: AtomicU64,
 
     // File descriptors for open files and directories. Unlike the fds in `inodes`, these _can_ be
     // used for reading and writing data.
-    handles: RwLock<BTreeMap<Handle, Arc<HandleData>>>,
+    handles: Mutex<BTreeMap<Handle, Arc<HandleData>>>,
     next_handle: AtomicU64,
 
     // File descriptor pointing to the `/proc` directory. This is used to convert an fd from
@@ -339,10 +339,10 @@ impl PassthroughFs {
         let proc = unsafe { File::from_raw_fd(fd) };
 
         Ok(PassthroughFs {
-            inodes: RwLock::new(MultikeyBTreeMap::new()),
+            inodes: Mutex::new(MultikeyBTreeMap::new()),
             next_inode: AtomicU64::new(fuse::ROOT_ID + 1),
 
-            handles: RwLock::new(BTreeMap::new()),
+            handles: Mutex::new(BTreeMap::new()),
             next_handle: AtomicU64::new(0),
 
             proc,
@@ -356,11 +356,41 @@ impl PassthroughFs {
         vec![self.proc.as_raw_fd()]
     }
 
+    fn get_path(&self, inode: Inode) -> io::Result<CString> {
+        let data = self
+            .inodes
+            .lock()
+            .get(&inode)
+            .map(Arc::clone)
+            .ok_or_else(ebadf)?;
+
+        let mut buf = Vec::with_capacity(libc::PATH_MAX as usize);
+        buf.resize(libc::PATH_MAX as usize, 0);
+
+        let path = CString::new(format!("self/fd/{}", data.file.as_raw_fd()))
+            .map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))?;
+
+        // Safe because this will only modify the contents of `buf` and we check the return value.
+        let res = unsafe {
+            libc::readlinkat(
+                self.proc.as_raw_fd(),
+                path.as_ptr(),
+                buf.as_mut_ptr() as *mut libc::c_char,
+                buf.len(),
+            )
+        };
+        if res < 0 {
+            return Err(io::Error::last_os_error());
+        }
+
+        buf.resize(res as usize, 0);
+        CString::new(buf).map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))
+    }
+
     fn open_inode(&self, inode: Inode, mut flags: i32) -> io::Result<File> {
         let data = self
             .inodes
-            .read()
-            .unwrap()
+            .lock()
             .get(&inode)
             .map(Arc::clone)
             .ok_or_else(ebadf)?;
@@ -409,8 +439,7 @@ impl PassthroughFs {
     fn do_lookup(&self, parent: Inode, name: &CStr) -> io::Result<Entry> {
         let p = self
             .inodes
-            .read()
-            .unwrap()
+            .lock()
             .get(&parent)
             .map(Arc::clone)
             .ok_or_else(ebadf)?;
@@ -436,7 +465,7 @@ impl PassthroughFs {
             ino: st.st_ino,
             dev: st.st_dev,
         };
-        let data = self.inodes.read().unwrap().get_alt(&altkey).map(Arc::clone);
+        let data = self.inodes.lock().get_alt(&altkey).map(Arc::clone);
 
         let inode = if let Some(data) = data {
             // Matches with the release store in `forget`.
@@ -447,7 +476,7 @@ impl PassthroughFs {
             // into the inode list.  However, since each of those will get a unique Inode
             // value and unique file descriptors this shouldn't be that much of a problem.
             let inode = self.next_inode.fetch_add(1, Ordering::Relaxed);
-            self.inodes.write().unwrap().insert(
+            self.inodes.lock().insert(
                 inode,
                 InodeAltKey {
                     ino: st.st_ino,
@@ -489,8 +518,7 @@ impl PassthroughFs {
 
         let data = self
             .handles
-            .read()
-            .unwrap()
+            .lock()
             .get(&handle)
             .filter(|hd| hd.inode == inode)
             .map(Arc::clone)
@@ -583,7 +611,7 @@ impl PassthroughFs {
         let handle = self.next_handle.fetch_add(1, Ordering::Relaxed);
         let data = HandleData { inode, file };
 
-        self.handles.write().unwrap().insert(handle, Arc::new(data));
+        self.handles.lock().insert(handle, Arc::new(data));
 
         let mut opts = OpenOptions::empty();
         match self.cfg.cache_policy {
@@ -606,7 +634,7 @@ impl PassthroughFs {
     }
 
     fn do_release(&self, inode: Inode, handle: Handle) -> io::Result<()> {
-        let mut handles = self.handles.write().unwrap();
+        let mut handles = self.handles.lock();
 
         if let btree_map::Entry::Occupied(e) = handles.entry(handle) {
             if e.get().inode == inode {
@@ -623,8 +651,7 @@ impl PassthroughFs {
     fn do_getattr(&self, inode: Inode) -> io::Result<(libc::stat64, Duration)> {
         let data = self
             .inodes
-            .read()
-            .unwrap()
+            .lock()
             .get(&inode)
             .map(Arc::clone)
             .ok_or_else(ebadf)?;
@@ -637,8 +664,7 @@ impl PassthroughFs {
     fn do_unlink(&self, parent: Inode, name: &CStr, flags: libc::c_int) -> io::Result<()> {
         let data = self
             .inodes
-            .read()
-            .unwrap()
+            .lock()
             .get(&parent)
             .map(Arc::clone)
             .ok_or_else(ebadf)?;
@@ -655,8 +681,7 @@ impl PassthroughFs {
     fn get_encryption_policy(&self, handle: Handle) -> io::Result<IoctlReply> {
         let data = self
             .handles
-            .read()
-            .unwrap()
+            .lock()
             .get(&handle)
             .map(Arc::clone)
             .ok_or_else(ebadf)?;
@@ -679,8 +704,7 @@ impl PassthroughFs {
     fn set_encryption_policy<R: io::Read>(&self, handle: Handle, r: R) -> io::Result<IoctlReply> {
         let data = self
             .handles
-            .read()
-            .unwrap()
+            .lock()
             .get(&handle)
             .map(Arc::clone)
             .ok_or_else(ebadf)?;
@@ -766,7 +790,7 @@ impl FileSystem for PassthroughFs {
         // we want the client to be able to set all the bits in the mode.
         unsafe { libc::umask(0o000) };
 
-        let mut inodes = self.inodes.write().unwrap();
+        let mut inodes = self.inodes.lock();
 
         // Not sure why the root inode gets a refcount of 2 but that's what libfuse does.
         inodes.insert(
@@ -791,15 +815,14 @@ impl FileSystem for PassthroughFs {
     }
 
     fn destroy(&self) {
-        self.handles.write().unwrap().clear();
-        self.inodes.write().unwrap().clear();
+        self.handles.lock().clear();
+        self.inodes.lock().clear();
     }
 
     fn statfs(&self, _ctx: Context, inode: Inode) -> io::Result<libc::statvfs64> {
         let data = self
             .inodes
-            .read()
-            .unwrap()
+            .lock()
             .get(&inode)
             .map(Arc::clone)
             .ok_or_else(ebadf)?;
@@ -821,13 +844,13 @@ impl FileSystem for PassthroughFs {
     }
 
     fn forget(&self, _ctx: Context, inode: Inode, count: u64) {
-        let mut inodes = self.inodes.write().unwrap();
+        let mut inodes = self.inodes.lock();
 
         forget_one(&mut inodes, inode, count)
     }
 
     fn batch_forget(&self, _ctx: Context, requests: Vec<(Inode, u64)>) {
-        let mut inodes = self.inodes.write().unwrap();
+        let mut inodes = self.inodes.lock();
 
         for (inode, count) in requests {
             forget_one(&mut inodes, inode, count)
@@ -864,8 +887,7 @@ impl FileSystem for PassthroughFs {
         let (_uid, _gid) = set_creds(ctx.uid, ctx.gid)?;
         let data = self
             .inodes
-            .read()
-            .unwrap()
+            .lock()
             .get(&parent)
             .map(Arc::clone)
             .ok_or_else(ebadf)?;
@@ -958,8 +980,7 @@ impl FileSystem for PassthroughFs {
         let (_uid, _gid) = set_creds(ctx.uid, ctx.gid)?;
         let data = self
             .inodes
-            .read()
-            .unwrap()
+            .lock()
             .get(&parent)
             .map(Arc::clone)
             .ok_or_else(ebadf)?;
@@ -991,7 +1012,7 @@ impl FileSystem for PassthroughFs {
             file,
         };
 
-        self.handles.write().unwrap().insert(handle, Arc::new(data));
+        self.handles.lock().insert(handle, Arc::new(data));
 
         let mut opts = OpenOptions::empty();
         match self.cfg.cache_policy {
@@ -1020,8 +1041,7 @@ impl FileSystem for PassthroughFs {
     ) -> io::Result<usize> {
         let data = self
             .handles
-            .read()
-            .unwrap()
+            .lock()
             .get(&handle)
             .filter(|hd| hd.inode == inode)
             .map(Arc::clone)
@@ -1048,8 +1068,7 @@ impl FileSystem for PassthroughFs {
         let (_uid, _gid) = set_creds(ctx.uid, ctx.gid)?;
         let data = self
             .handles
-            .read()
-            .unwrap()
+            .lock()
             .get(&handle)
             .filter(|hd| hd.inode == inode)
             .map(Arc::clone)
@@ -1078,8 +1097,7 @@ impl FileSystem for PassthroughFs {
     ) -> io::Result<(libc::stat64, Duration)> {
         let inode_data = self
             .inodes
-            .read()
-            .unwrap()
+            .lock()
             .get(&inode)
             .map(Arc::clone)
             .ok_or_else(ebadf)?;
@@ -1093,8 +1111,7 @@ impl FileSystem for PassthroughFs {
         let data = if let Some(handle) = handle {
             let hd = self
                 .handles
-                .read()
-                .unwrap()
+                .lock()
                 .get(&handle)
                 .filter(|hd| hd.inode == inode)
                 .map(Arc::clone)
@@ -1222,15 +1239,13 @@ impl FileSystem for PassthroughFs {
     ) -> io::Result<()> {
         let old_inode = self
             .inodes
-            .read()
-            .unwrap()
+            .lock()
             .get(&olddir)
             .map(Arc::clone)
             .ok_or_else(ebadf)?;
         let new_inode = self
             .inodes
-            .read()
-            .unwrap()
+            .lock()
             .get(&newdir)
             .map(Arc::clone)
             .ok_or_else(ebadf)?;
@@ -1267,8 +1282,7 @@ impl FileSystem for PassthroughFs {
         let (_uid, _gid) = set_creds(ctx.uid, ctx.gid)?;
         let data = self
             .inodes
-            .read()
-            .unwrap()
+            .lock()
             .get(&parent)
             .map(Arc::clone)
             .ok_or_else(ebadf)?;
@@ -1299,15 +1313,13 @@ impl FileSystem for PassthroughFs {
     ) -> io::Result<Entry> {
         let data = self
             .inodes
-            .read()
-            .unwrap()
+            .lock()
             .get(&inode)
             .map(Arc::clone)
             .ok_or_else(ebadf)?;
         let new_inode = self
             .inodes
-            .read()
-            .unwrap()
+            .lock()
             .get(&newparent)
             .map(Arc::clone)
             .ok_or_else(ebadf)?;
@@ -1342,8 +1354,7 @@ impl FileSystem for PassthroughFs {
         let (_uid, _gid) = set_creds(ctx.uid, ctx.gid)?;
         let data = self
             .inodes
-            .read()
-            .unwrap()
+            .lock()
             .get(&parent)
             .map(Arc::clone)
             .ok_or_else(ebadf)?;
@@ -1361,8 +1372,7 @@ impl FileSystem for PassthroughFs {
     fn readlink(&self, _ctx: Context, inode: Inode) -> io::Result<Vec<u8>> {
         let data = self
             .inodes
-            .read()
-            .unwrap()
+            .lock()
             .get(&inode)
             .map(Arc::clone)
             .ok_or_else(ebadf)?;
@@ -1399,8 +1409,7 @@ impl FileSystem for PassthroughFs {
     ) -> io::Result<()> {
         let data = self
             .handles
-            .read()
-            .unwrap()
+            .lock()
             .get(&handle)
             .filter(|hd| hd.inode == inode)
             .map(Arc::clone)
@@ -1426,8 +1435,7 @@ impl FileSystem for PassthroughFs {
     fn fsync(&self, _ctx: Context, inode: Inode, datasync: bool, handle: Handle) -> io::Result<()> {
         let data = self
             .handles
-            .read()
-            .unwrap()
+            .lock()
             .get(&handle)
             .filter(|hd| hd.inode == inode)
             .map(Arc::clone)
@@ -1464,8 +1472,7 @@ impl FileSystem for PassthroughFs {
     fn access(&self, ctx: Context, inode: Inode, mask: u32) -> io::Result<()> {
         let data = self
             .inodes
-            .read()
-            .unwrap()
+            .lock()
             .get(&inode)
             .map(Arc::clone)
             .ok_or_else(ebadf)?;
@@ -1521,14 +1528,12 @@ impl FileSystem for PassthroughFs {
         value: &[u8],
         flags: u32,
     ) -> io::Result<()> {
-        // The f{set,get,remove,list}xattr functions don't work on an fd opened with `O_PATH` so we
-        // need to get a new fd.
-        let file = self.open_inode(inode, libc::O_RDONLY | libc::O_NONBLOCK)?;
+        let path = self.get_path(inode)?;
 
         // Safe because this doesn't modify any memory and we check the return value.
         let res = unsafe {
-            libc::fsetxattr(
-                file.as_raw_fd(),
+            libc::lsetxattr(
+                path.as_ptr(),
                 name.as_ptr(),
                 value.as_ptr() as *const libc::c_void,
                 value.len(),
@@ -1549,17 +1554,15 @@ impl FileSystem for PassthroughFs {
         name: &CStr,
         size: u32,
     ) -> io::Result<GetxattrReply> {
-        // The f{set,get,remove,list}xattr functions don't work on an fd opened with `O_PATH` so we
-        // need to get a new fd.
-        let file = self.open_inode(inode, libc::O_RDONLY | libc::O_NONBLOCK)?;
+        let path = self.get_path(inode)?;
 
         let mut buf = Vec::with_capacity(size as usize);
         buf.resize(size as usize, 0);
 
         // Safe because this will only modify the contents of `buf`.
         let res = unsafe {
-            libc::fgetxattr(
-                file.as_raw_fd(),
+            libc::lgetxattr(
+                path.as_ptr(),
                 name.as_ptr(),
                 buf.as_mut_ptr() as *mut libc::c_void,
                 size as libc::size_t,
@@ -1578,17 +1581,15 @@ impl FileSystem for PassthroughFs {
     }
 
     fn listxattr(&self, _ctx: Context, inode: Inode, size: u32) -> io::Result<ListxattrReply> {
-        // The f{set,get,remove,list}xattr functions don't work on an fd opened with `O_PATH` so we
-        // need to get a new fd.
-        let file = self.open_inode(inode, libc::O_RDONLY | libc::O_NONBLOCK)?;
+        let path = self.get_path(inode)?;
 
         let mut buf = Vec::with_capacity(size as usize);
         buf.resize(size as usize, 0);
 
         // Safe because this will only modify the contents of `buf`.
         let res = unsafe {
-            libc::flistxattr(
-                file.as_raw_fd(),
+            libc::llistxattr(
+                path.as_ptr(),
                 buf.as_mut_ptr() as *mut libc::c_char,
                 size as libc::size_t,
             )
@@ -1606,12 +1607,10 @@ impl FileSystem for PassthroughFs {
     }
 
     fn removexattr(&self, _ctx: Context, inode: Inode, name: &CStr) -> io::Result<()> {
-        // The f{set,get,remove,list}xattr functions don't work on an fd opened with `O_PATH` so we
-        // need to get a new fd.
-        let file = self.open_inode(inode, libc::O_RDONLY | libc::O_NONBLOCK)?;
+        let path = self.get_path(inode)?;
 
         // Safe because this doesn't modify any memory and we check the return value.
-        let res = unsafe { libc::fremovexattr(file.as_raw_fd(), name.as_ptr()) };
+        let res = unsafe { libc::lremovexattr(path.as_ptr(), name.as_ptr()) };
 
         if res == 0 {
             Ok(())
@@ -1631,8 +1630,7 @@ impl FileSystem for PassthroughFs {
     ) -> io::Result<()> {
         let data = self
             .handles
-            .read()
-            .unwrap()
+            .lock()
             .get(&handle)
             .filter(|hd| hd.inode == inode)
             .map(Arc::clone)
@@ -1714,16 +1712,14 @@ impl FileSystem for PassthroughFs {
         let (_uid, _gid) = set_creds(ctx.uid, ctx.gid)?;
         let src_data = self
             .handles
-            .read()
-            .unwrap()
+            .lock()
             .get(&handle_src)
             .filter(|hd| hd.inode == inode_src)
             .map(Arc::clone)
             .ok_or_else(ebadf)?;
         let dst_data = self
             .handles
-            .read()
-            .unwrap()
+            .lock()
             .get(&handle_dst)
             .filter(|hd| hd.inode == inode_dst)
             .map(Arc::clone)
@@ -1738,7 +1734,7 @@ impl FileSystem for PassthroughFs {
                 src,
                 &offset_src,
                 dst,
-                offset_dst,
+                &offset_dst,
                 length,
                 flags,
             )
diff --git a/devices/src/virtio/gpu/mod.rs b/devices/src/virtio/gpu/mod.rs
index 93d004f..e8e7f4a 100644
--- a/devices/src/virtio/gpu/mod.rs
+++ b/devices/src/virtio/gpu/mod.rs
@@ -182,6 +182,10 @@ trait Backend {
     /// Detaches any backing memory from the given resource, if there is any.
     fn detach_backing(&mut self, id: u32) -> GpuResponse;
 
+    fn resource_assign_uuid(&mut self, _id: u32) -> GpuResponse {
+        GpuResponse::ErrUnspec
+    }
+
     /// Updates the cursor's memory to the given id, and sets its position to the given coordinates.
     fn update_cursor(&mut self, id: u32, x: u32, y: u32) -> GpuResponse;
 
@@ -497,6 +501,10 @@ impl Frontend {
             GpuCommand::MoveCursor(info) => self
                 .backend
                 .move_cursor(info.pos.x.into(), info.pos.y.into()),
+            GpuCommand::ResourceAssignUuid(info) => {
+                let resource_id = info.resource_id.to_native();
+                self.backend.resource_assign_uuid(resource_id)
+            }
             GpuCommand::GetCapsetInfo(info) => {
                 self.backend.get_capset_info(info.capset_index.to_native())
             }
diff --git a/devices/src/virtio/gpu/protocol.rs b/devices/src/virtio/gpu/protocol.rs
index c98e289..3df6975 100644
--- a/devices/src/virtio/gpu/protocol.rs
+++ b/devices/src/virtio/gpu/protocol.rs
@@ -38,6 +38,7 @@ pub const VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING: u32 = 0x107;
 pub const VIRTIO_GPU_CMD_GET_CAPSET_INFO: u32 = 0x108;
 pub const VIRTIO_GPU_CMD_GET_CAPSET: u32 = 0x109;
 pub const VIRTIO_GPU_CMD_GET_EDID: u32 = 0x10a;
+pub const VIRTIO_GPU_CMD_RESOURCE_ASSIGN_UUID: u32 = 0x10b;
 
 /* 3d commands */
 pub const VIRTIO_GPU_CMD_CTX_CREATE: u32 = 0x200;
@@ -64,6 +65,7 @@ pub const VIRTIO_GPU_RESP_OK_CAPSET_INFO: u32 = 0x1102;
 pub const VIRTIO_GPU_RESP_OK_CAPSET: u32 = 0x1103;
 pub const VIRTIO_GPU_RESP_OK_RESOURCE_PLANE_INFO: u32 = 0x1104;
 pub const VIRTIO_GPU_RESP_OK_EDID: u32 = 0x1105;
+pub const VIRTIO_GPU_RESP_OK_RESOURCE_UUID: u32 = 0x1105;
 
 /* error responses */
 pub const VIRTIO_GPU_RESP_ERR_UNSPEC: u32 = 0x1200;
@@ -109,6 +111,7 @@ pub fn virtio_gpu_cmd_str(cmd: u32) -> &'static str {
         VIRTIO_GPU_CMD_CTX_DESTROY => "VIRTIO_GPU_CMD_CTX_DESTROY",
         VIRTIO_GPU_CMD_CTX_ATTACH_RESOURCE => "VIRTIO_GPU_CMD_CTX_ATTACH_RESOURCE",
         VIRTIO_GPU_CMD_CTX_DETACH_RESOURCE => "VIRTIO_GPU_CMD_CTX_DETACH_RESOURCE",
+        VIRTIO_GPU_CMD_RESOURCE_ASSIGN_UUID => "VIRTIO_GPU_CMD_RESOURCE_ASSIGN_UUID",
         VIRTIO_GPU_CMD_RESOURCE_CREATE_3D => "VIRTIO_GPU_CMD_RESOURCE_CREATE_3D",
         VIRTIO_GPU_CMD_TRANSFER_TO_HOST_3D => "VIRTIO_GPU_CMD_TRANSFER_TO_HOST_3D",
         VIRTIO_GPU_CMD_TRANSFER_FROM_HOST_3D => "VIRTIO_GPU_CMD_TRANSFER_FROM_HOST_3D",
@@ -123,6 +126,7 @@ pub fn virtio_gpu_cmd_str(cmd: u32) -> &'static str {
         VIRTIO_GPU_RESP_OK_CAPSET_INFO => "VIRTIO_GPU_RESP_OK_CAPSET_INFO",
         VIRTIO_GPU_RESP_OK_CAPSET => "VIRTIO_GPU_RESP_OK_CAPSET",
         VIRTIO_GPU_RESP_OK_RESOURCE_PLANE_INFO => "VIRTIO_GPU_RESP_OK_RESOURCE_PLANE_INFO",
+        VIRTIO_GPU_RESP_OK_RESOURCE_UUID => "VIRTIO_GPU_RESP_OK_RESOURCE_UUID",
         VIRTIO_GPU_RESP_ERR_UNSPEC => "VIRTIO_GPU_RESP_ERR_UNSPEC",
         VIRTIO_GPU_RESP_ERR_OUT_OF_MEMORY => "VIRTIO_GPU_RESP_ERR_OUT_OF_MEMORY",
         VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID => "VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID",
@@ -532,6 +536,25 @@ pub struct virtio_gpu_resource_unmap {
 
 unsafe impl DataInit for virtio_gpu_resource_unmap {}
 
+#[derive(Copy, Clone, Debug, Default)]
+#[repr(C)]
+pub struct virtio_gpu_resource_assign_uuid {
+    pub hdr: virtio_gpu_ctrl_hdr,
+    pub resource_id: Le32,
+    pub padding: Le32,
+}
+
+unsafe impl DataInit for virtio_gpu_resource_assign_uuid {}
+
+#[derive(Copy, Clone, Debug, Default)]
+#[repr(C)]
+pub struct virtio_gpu_resp_resource_uuid {
+    pub hdr: virtio_gpu_ctrl_hdr,
+    pub uuid: [u8; 16],
+}
+
+unsafe impl DataInit for virtio_gpu_resp_resource_uuid {}
+
 /* simple formats for fbcon/X use */
 pub const VIRTIO_GPU_FORMAT_B8G8R8A8_UNORM: u32 = 1;
 pub const VIRTIO_GPU_FORMAT_B8G8R8X8_UNORM: u32 = 2;
@@ -568,6 +591,7 @@ pub enum GpuCommand {
     ResourceUnmap(virtio_gpu_resource_unmap),
     UpdateCursor(virtio_gpu_update_cursor),
     MoveCursor(virtio_gpu_update_cursor),
+    ResourceAssignUuid(virtio_gpu_resource_assign_uuid),
 }
 
 /// An error indicating something went wrong decoding a `GpuCommand`. These correspond to
@@ -637,6 +661,7 @@ impl fmt::Debug for GpuCommand {
             ResourceUnmap(_info) => f.debug_struct("ResourceUnmap").finish(),
             UpdateCursor(_info) => f.debug_struct("UpdateCursor").finish(),
             MoveCursor(_info) => f.debug_struct("MoveCursor").finish(),
+            ResourceAssignUuid(_info) => f.debug_struct("ResourceAssignUuid").finish(),
         }
     }
 }
@@ -670,6 +695,7 @@ impl GpuCommand {
             VIRTIO_GPU_CMD_RESOURCE_UNMAP => ResourceUnmap(cmd.read_obj()?),
             VIRTIO_GPU_CMD_UPDATE_CURSOR => UpdateCursor(cmd.read_obj()?),
             VIRTIO_GPU_CMD_MOVE_CURSOR => MoveCursor(cmd.read_obj()?),
+            VIRTIO_GPU_CMD_RESOURCE_ASSIGN_UUID => ResourceAssignUuid(cmd.read_obj()?),
             _ => return Err(GpuCommandDecodeError::InvalidType(hdr.type_.into())),
         })
     }
@@ -701,6 +727,7 @@ impl GpuCommand {
             ResourceUnmap(info) => &info.hdr,
             UpdateCursor(info) => &info.hdr,
             MoveCursor(info) => &info.hdr,
+            ResourceAssignUuid(info) => &info.hdr,
         }
     }
 }
@@ -726,6 +753,9 @@ pub enum GpuResponse {
         format_modifier: u64,
         plane_info: Vec<GpuResponsePlaneInfo>,
     },
+    OkResourceUuid {
+        uuid: [u8; 16],
+    },
     ErrUnspec,
     ErrOutOfMemory,
     ErrInvalidScanoutId,
@@ -859,6 +889,12 @@ impl GpuResponse {
                     size_of_val(&hdr)
                 }
             }
+            GpuResponse::OkResourceUuid { uuid } => {
+                let resp_info = virtio_gpu_resp_resource_uuid { hdr, uuid };
+
+                resp.write_obj(resp_info)?;
+                size_of_val(&resp_info)
+            }
             _ => {
                 resp.write_obj(hdr)?;
                 size_of_val(&hdr)
@@ -875,6 +911,7 @@ impl GpuResponse {
             GpuResponse::OkCapsetInfo { .. } => VIRTIO_GPU_RESP_OK_CAPSET_INFO,
             GpuResponse::OkCapset(_) => VIRTIO_GPU_RESP_OK_CAPSET,
             GpuResponse::OkResourcePlaneInfo { .. } => VIRTIO_GPU_RESP_OK_RESOURCE_PLANE_INFO,
+            GpuResponse::OkResourceUuid { .. } => VIRTIO_GPU_RESP_OK_RESOURCE_UUID,
             GpuResponse::ErrUnspec => VIRTIO_GPU_RESP_ERR_UNSPEC,
             GpuResponse::ErrOutOfMemory => VIRTIO_GPU_RESP_ERR_OUT_OF_MEMORY,
             GpuResponse::ErrInvalidScanoutId => VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID,
@@ -892,6 +929,7 @@ impl GpuResponse {
             GpuResponse::OkCapsetInfo { .. } => true,
             GpuResponse::OkCapset(_) => true,
             GpuResponse::OkResourcePlaneInfo { .. } => true,
+            GpuResponse::OkResourceUuid { .. } => true,
             _ => false,
         }
     }
diff --git a/devices/src/virtio/gpu/virtio_3d_backend.rs b/devices/src/virtio/gpu/virtio_3d_backend.rs
index 3ede8a6..692bedc 100644
--- a/devices/src/virtio/gpu/virtio_3d_backend.rs
+++ b/devices/src/virtio/gpu/virtio_3d_backend.rs
@@ -497,6 +497,23 @@ impl Backend for Virtio3DBackend {
         self.base.move_cursor(x, y)
     }
 
+    /// Returns a uuid for the resource.
+    fn resource_assign_uuid(&mut self, id: u32) -> GpuResponse {
+        match self.resources.entry(id) {
+            Entry::Vacant(_) => GpuResponse::ErrInvalidResourceId,
+            Entry::Occupied(_) => {
+                // TODO(stevensd): use real uuids once the virtio wayland protocol is updated to
+                // handle more than 32 bits. For now, the virtwl driver knows that the uuid is
+                // actually just the resource id.
+                let mut uuid: [u8; 16] = [0; 16];
+                for (idx, byte) in id.to_be_bytes().iter().enumerate() {
+                    uuid[12 + idx] = *byte;
+                }
+                GpuResponse::OkResourceUuid { uuid }
+            }
+        }
+    }
+
     /// Gets the renderer's capset information associated with `index`.
     fn get_capset_info(&self, index: u32) -> GpuResponse {
         let id = match index {
diff --git a/devices/src/virtio/net.rs b/devices/src/virtio/net.rs
index 44a39ab..a15ab03 100644
--- a/devices/src/virtio/net.rs
+++ b/devices/src/virtio/net.rs
@@ -9,24 +9,25 @@ use std::net::Ipv4Addr;
 use std::os::raw::c_uint;
 use std::os::unix::io::{AsRawFd, RawFd};
 use std::result;
+use std::sync::Arc;
 use std::thread;
 
-use data_model::{DataInit, Le64};
+use data_model::{DataInit, Le16, Le64};
 use net_sys;
 use net_util::{Error as TapError, MacAddress, TapT};
 use sys_util::Error as SysError;
 use sys_util::{error, warn, EventFd, GuestMemory, PollContext, PollToken, WatchingEvents};
 use virtio_sys::virtio_net::{
     virtio_net_hdr_v1, VIRTIO_NET_CTRL_GUEST_OFFLOADS, VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET,
-    VIRTIO_NET_ERR, VIRTIO_NET_OK,
+    VIRTIO_NET_CTRL_MQ, VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET, VIRTIO_NET_ERR, VIRTIO_NET_OK,
 };
 use virtio_sys::{vhost, virtio_net};
 
-use super::{DescriptorError, Interrupt, Queue, Reader, VirtioDevice, Writer, TYPE_NET};
+use super::{
+    copy_config, DescriptorError, Interrupt, Queue, Reader, VirtioDevice, Writer, TYPE_NET,
+};
 
 const QUEUE_SIZE: u16 = 256;
-const NUM_QUEUES: usize = 3;
-const QUEUE_SIZES: &[u16] = &[QUEUE_SIZE, QUEUE_SIZE, QUEUE_SIZE];
 
 #[derive(Debug)]
 pub enum NetError {
@@ -132,17 +133,27 @@ fn virtio_features_to_tap_offload(features: u64) -> c_uint {
     tap_offloads
 }
 
+#[derive(Debug, Clone, Copy, Default)]
+#[repr(C)]
+struct VirtioNetConfig {
+    mac: [u8; 6],
+    status: Le16,
+    max_vq_pairs: Le16,
+    mtu: Le16,
+}
+
+// Safe because it only has data and has no implicit padding.
+unsafe impl DataInit for VirtioNetConfig {}
+
 struct Worker<T: TapT> {
-    interrupt: Interrupt,
+    interrupt: Arc<Interrupt>,
     mem: GuestMemory,
     rx_queue: Queue,
     tx_queue: Queue,
-    ctrl_queue: Queue,
+    ctrl_queue: Option<Queue>,
     tap: T,
-    // TODO(smbarber): http://crbug.com/753630
-    // Remove once MRG_RXBUF is supported and this variable is actually used.
-    #[allow(dead_code)]
     acked_features: u64,
+    vq_pairs: u16,
     kill_evt: EventFd,
 }
 
@@ -240,7 +251,12 @@ where
     }
 
     fn process_ctrl(&mut self) -> Result<(), NetError> {
-        while let Some(desc_chain) = self.ctrl_queue.pop(&self.mem) {
+        let ctrl_queue = match self.ctrl_queue.as_mut() {
+            Some(queue) => queue,
+            None => return Ok(()),
+        };
+
+        while let Some(desc_chain) = ctrl_queue.pop(&self.mem) {
             let index = desc_chain.index;
 
             let mut reader =
@@ -259,7 +275,7 @@ where
                         );
                         let ack = VIRTIO_NET_ERR as u8;
                         writer.write_all(&[ack]).map_err(NetError::WriteAck)?;
-                        self.ctrl_queue.add_used(&self.mem, index, 0);
+                        ctrl_queue.add_used(&self.mem, index, 0);
                         continue;
                     }
                     let offloads: Le64 = reader.read_obj().map_err(NetError::ReadCtrlData)?;
@@ -270,16 +286,34 @@ where
                     let ack = VIRTIO_NET_OK as u8;
                     writer.write_all(&[ack]).map_err(NetError::WriteAck)?;
                 }
+                VIRTIO_NET_CTRL_MQ => {
+                    if ctrl_hdr.cmd == VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET as u8 {
+                        let pairs: Le16 = reader.read_obj().map_err(NetError::ReadCtrlData)?;
+                        // Simple handle it now
+                        if self.acked_features & 1 << virtio_net::VIRTIO_NET_F_MQ == 0
+                            || pairs.to_native() != self.vq_pairs
+                        {
+                            error!("Invalid VQ_PAIRS_SET cmd, driver request pairs: {}, device vq pairs: {}",
+                                   pairs.to_native(), self.vq_pairs);
+                            let ack = VIRTIO_NET_ERR as u8;
+                            writer.write_all(&[ack]).map_err(NetError::WriteAck)?;
+                            ctrl_queue.add_used(&self.mem, index, 0);
+                            continue;
+                        }
+                        let ack = VIRTIO_NET_OK as u8;
+                        writer.write_all(&[ack]).map_err(NetError::WriteAck)?;
+                    }
+                }
                 _ => warn!(
                     "unimplemented class for VIRTIO_NET_CTRL_GUEST_OFFLOADS: {}",
                     ctrl_hdr.class
                 ),
             }
 
-            self.ctrl_queue.add_used(&self.mem, index, 0);
+            ctrl_queue.add_used(&self.mem, index, 0);
         }
 
-        self.interrupt.signal_used_queue(self.ctrl_queue.vector);
+        self.interrupt.signal_used_queue(ctrl_queue.vector);
         Ok(())
     }
 
@@ -287,7 +321,7 @@ where
         &mut self,
         rx_queue_evt: EventFd,
         tx_queue_evt: EventFd,
-        ctrl_queue_evt: EventFd,
+        ctrl_queue_evt: Option<EventFd>,
     ) -> Result<(), NetError> {
         #[derive(PollToken)]
         enum Token {
@@ -309,12 +343,20 @@ where
             (&self.tap, Token::RxTap),
             (&rx_queue_evt, Token::RxQueue),
             (&tx_queue_evt, Token::TxQueue),
-            (&ctrl_queue_evt, Token::CtrlQueue),
-            (self.interrupt.get_resample_evt(), Token::InterruptResample),
             (&self.kill_evt, Token::Kill),
         ])
         .map_err(NetError::CreatePollContext)?;
 
+        if let Some(ctrl_evt) = &ctrl_queue_evt {
+            poll_ctx
+                .add(ctrl_evt, Token::CtrlQueue)
+                .map_err(NetError::CreatePollContext)?;
+            // Let CtrlQueue's thread handle InterruptResample also.
+            poll_ctx
+                .add(self.interrupt.get_resample_evt(), Token::InterruptResample)
+                .map_err(NetError::CreatePollContext)?;
+        }
+
         let mut tap_polling_enabled = true;
         'poll: loop {
             let events = poll_ctx.wait().map_err(NetError::PollError)?;
@@ -350,8 +392,12 @@ where
                         self.process_tx();
                     }
                     Token::CtrlQueue => {
-                        if let Err(e) = ctrl_queue_evt.read() {
-                            error!("net: error reading ctrl queue EventFd: {}", e);
+                        if let Some(ctrl_evt) = &ctrl_queue_evt {
+                            if let Err(e) = ctrl_evt.read() {
+                                error!("net: error reading ctrl queue EventFd: {}", e);
+                                break 'poll;
+                            }
+                        } else {
                             break 'poll;
                         }
                         if let Err(e) = self.process_ctrl() {
@@ -374,10 +420,11 @@ where
 }
 
 pub struct Net<T: TapT> {
-    workers_kill_evt: Option<EventFd>,
-    kill_evt: EventFd,
-    worker_thread: Option<thread::JoinHandle<Worker<T>>>,
-    tap: Option<T>,
+    queue_sizes: Box<[u16]>,
+    workers_kill_evt: Vec<EventFd>,
+    kill_evts: Vec<EventFd>,
+    worker_threads: Vec<thread::JoinHandle<Worker<T>>>,
+    taps: Vec<T>,
     avail_features: u64,
     acked_features: u64,
 }
@@ -392,8 +439,10 @@ where
         ip_addr: Ipv4Addr,
         netmask: Ipv4Addr,
         mac_addr: MacAddress,
+        vq_pairs: u16,
     ) -> Result<Net<T>, NetError> {
-        let tap: T = T::new(true).map_err(NetError::TapOpen)?;
+        let multi_queue = if vq_pairs > 1 { true } else { false };
+        let tap: T = T::new(true, multi_queue).map_err(NetError::TapOpen)?;
         tap.set_ip_addr(ip_addr).map_err(NetError::TapSetIp)?;
         tap.set_netmask(netmask).map_err(NetError::TapSetNetmask)?;
         tap.set_mac_address(mac_addr)
@@ -401,18 +450,22 @@ where
 
         tap.enable().map_err(NetError::TapEnable)?;
 
-        Net::from(tap)
+        Net::from(tap, vq_pairs)
     }
 
     /// Creates a new virtio network device from a tap device that has already been
     /// configured.
-    pub fn from(tap: T) -> Result<Net<T>, NetError> {
+    pub fn from(tap: T, vq_pairs: u16) -> Result<Net<T>, NetError> {
+        let taps = tap.into_mq_taps(vq_pairs).map_err(NetError::TapOpen)?;
+
         // This would also validate a tap created by Self::new(), but that's a good thing as it
         // would ensure that any changes in the creation procedure are matched in the validation.
         // Plus we still need to set the offload and vnet_hdr_size values.
-        validate_and_configure_tap(&tap)?;
+        for tap in &taps {
+            validate_and_configure_tap(tap, vq_pairs)?;
+        }
 
-        let avail_features = 1 << virtio_net::VIRTIO_NET_F_GUEST_CSUM
+        let mut avail_features = 1 << virtio_net::VIRTIO_NET_F_GUEST_CSUM
             | 1 << virtio_net::VIRTIO_NET_F_CSUM
             | 1 << virtio_net::VIRTIO_NET_F_CTRL_VQ
             | 1 << virtio_net::VIRTIO_NET_F_CTRL_GUEST_OFFLOADS
@@ -422,27 +475,55 @@ where
             | 1 << virtio_net::VIRTIO_NET_F_HOST_UFO
             | 1 << vhost::VIRTIO_F_VERSION_1;
 
-        let kill_evt = EventFd::new().map_err(NetError::CreateKillEventFd)?;
+        if vq_pairs > 1 {
+            avail_features |= 1 << virtio_net::VIRTIO_NET_F_MQ;
+        }
+
+        let mut kill_evts: Vec<EventFd> = Vec::new();
+        let mut workers_kill_evt: Vec<EventFd> = Vec::new();
+        for _ in 0..taps.len() {
+            let kill_evt = EventFd::new().map_err(NetError::CreateKillEventFd)?;
+            let worker_kill_evt = kill_evt.try_clone().map_err(NetError::CloneKillEventFd)?;
+            kill_evts.push(kill_evt);
+            workers_kill_evt.push(worker_kill_evt);
+        }
+
         Ok(Net {
-            workers_kill_evt: Some(kill_evt.try_clone().map_err(NetError::CloneKillEventFd)?),
-            kill_evt,
-            worker_thread: None,
-            tap: Some(tap),
+            queue_sizes: vec![QUEUE_SIZE; (vq_pairs * 2 + 1) as usize].into_boxed_slice(),
+            workers_kill_evt,
+            kill_evts,
+            worker_threads: Vec::new(),
+            taps,
             avail_features,
             acked_features: 0u64,
         })
     }
+
+    fn build_config(&self) -> VirtioNetConfig {
+        let vq_pairs = self.queue_sizes.len() as u16 / 2;
+
+        VirtioNetConfig {
+            max_vq_pairs: Le16::from(vq_pairs),
+            // Other field has meaningful value when the corresponding feature
+            // is enabled, but all these features aren't supported now.
+            // So set them to default.
+            ..Default::default()
+        }
+    }
 }
 
 // Ensure that the tap interface has the correct flags and sets the offload and VNET header size
 // to the appropriate values.
-fn validate_and_configure_tap<T: TapT>(tap: &T) -> Result<(), NetError> {
+fn validate_and_configure_tap<T: TapT>(tap: &T, vq_pairs: u16) -> Result<(), NetError> {
     let flags = tap.if_flags();
-    let required_flags = [
+    let mut required_flags = vec![
         (net_sys::IFF_TAP, "IFF_TAP"),
         (net_sys::IFF_NO_PI, "IFF_NO_PI"),
         (net_sys::IFF_VNET_HDR, "IFF_VNET_HDR"),
     ];
+    if vq_pairs > 1 {
+        required_flags.push((net_sys::IFF_MULTI_QUEUE, "IFF_MULTI_QUEUE"));
+    }
     let missing_flags = required_flags
         .iter()
         .filter_map(
@@ -475,14 +556,20 @@ where
     T: TapT,
 {
     fn drop(&mut self) {
-        // Only kill the child if it claimed its eventfd.
-        if self.workers_kill_evt.is_none() {
-            // Ignore the result because there is nothing we can do about it.
-            let _ = self.kill_evt.write(1);
+        let len = self.kill_evts.len();
+        for i in 0..len {
+            // Only kill the child if it claimed its eventfd.
+            if self.workers_kill_evt.get(i).is_none() {
+                if let Some(kill_evt) = self.kill_evts.get(i) {
+                    // Ignore the result because there is nothing we can do about it.
+                    let _ = kill_evt.write(1);
+                }
+            }
         }
 
-        if let Some(worker_thread) = self.worker_thread.take() {
-            let _ = worker_thread.join();
+        let len = self.worker_threads.len();
+        for _ in 0..len {
+            let _ = self.worker_threads.remove(0).join();
         }
     }
 }
@@ -494,14 +581,16 @@ where
     fn keep_fds(&self) -> Vec<RawFd> {
         let mut keep_fds = Vec::new();
 
-        if let Some(tap) = &self.tap {
+        for tap in &self.taps {
             keep_fds.push(tap.as_raw_fd());
         }
 
-        if let Some(workers_kill_evt) = &self.workers_kill_evt {
-            keep_fds.push(workers_kill_evt.as_raw_fd());
+        for worker_kill_evt in &self.workers_kill_evt {
+            keep_fds.push(worker_kill_evt.as_raw_fd());
+        }
+        for kill_evt in &self.kill_evts {
+            keep_fds.push(kill_evt.as_raw_fd());
         }
-        keep_fds.push(self.kill_evt.as_raw_fd());
 
         keep_fds
     }
@@ -511,7 +600,7 @@ where
     }
 
     fn queue_max_sizes(&self) -> &[u16] {
-        QUEUE_SIZES
+        &self.queue_sizes
     }
 
     fn features(&self) -> u64 {
@@ -532,19 +621,21 @@ where
         self.acked_features |= v;
 
         // Set offload flags to match acked virtio features.
-        if let Err(e) = self
-            .tap
-            .as_ref()
-            .expect("missing tap in ack_features")
-            .set_offload(virtio_features_to_tap_offload(self.acked_features))
-        {
-            warn!(
-                "net: failed to set tap offload to match acked features: {}",
-                e
-            );
+        if let Some(tap) = self.taps.first() {
+            if let Err(e) = tap.set_offload(virtio_features_to_tap_offload(self.acked_features)) {
+                warn!(
+                    "net: failed to set tap offload to match acked features: {}",
+                    e
+                );
+            }
         }
     }
 
+    fn read_config(&self, offset: u64, data: &mut [u8]) {
+        let config_space = self.build_config();
+        copy_config(data, 0, config_space.as_slice(), offset);
+    }
+
     fn activate(
         &mut self,
         mem: GuestMemory,
@@ -552,75 +643,110 @@ where
         mut queues: Vec<Queue>,
         mut queue_evts: Vec<EventFd>,
     ) {
-        if queues.len() != NUM_QUEUES || queue_evts.len() != NUM_QUEUES {
-            error!("net: expected {} queues, got {}", NUM_QUEUES, queues.len());
+        if queues.len() != self.queue_sizes.len() || queue_evts.len() != self.queue_sizes.len() {
+            error!(
+                "net: expected {} queues, got {}",
+                self.queue_sizes.len(),
+                queues.len()
+            );
             return;
         }
 
-        if let Some(tap) = self.tap.take() {
-            if let Some(kill_evt) = self.workers_kill_evt.take() {
-                let acked_features = self.acked_features;
-                let worker_result =
-                    thread::Builder::new()
-                        .name("virtio_net".to_string())
-                        .spawn(move || {
-                            // First queue is rx, second is tx, third is ctrl.
-                            let rx_queue = queues.remove(0);
-                            let tx_queue = queues.remove(0);
-                            let ctrl_queue = queues.remove(0);
-                            let mut worker = Worker {
-                                interrupt,
-                                mem,
-                                rx_queue,
-                                tx_queue,
-                                ctrl_queue,
-                                tap,
-                                acked_features,
-                                kill_evt,
-                            };
-                            let rx_queue_evt = queue_evts.remove(0);
-                            let tx_queue_evt = queue_evts.remove(0);
-                            let ctrl_queue_evt = queue_evts.remove(0);
-                            let result = worker.run(rx_queue_evt, tx_queue_evt, ctrl_queue_evt);
-                            if let Err(e) = result {
-                                error!("net worker thread exited with error: {}", e);
-                            }
-                            worker
-                        });
-
-                match worker_result {
-                    Err(e) => {
-                        error!("failed to spawn virtio_net worker: {}", e);
-                        return;
-                    }
-                    Ok(join_handle) => {
-                        self.worker_thread = Some(join_handle);
+        let vq_pairs = self.queue_sizes.len() / 2;
+        if self.taps.len() != vq_pairs {
+            error!("net: expected {} taps, got {}", vq_pairs, self.taps.len());
+            return;
+        }
+        if self.workers_kill_evt.len() != vq_pairs {
+            error!(
+                "net: expected {} worker_kill_evt, got {}",
+                vq_pairs,
+                self.workers_kill_evt.len()
+            );
+            return;
+        }
+        let interrupt_arc = Arc::new(interrupt);
+        for i in 0..vq_pairs {
+            let tap = self.taps.remove(0);
+            let acked_features = self.acked_features;
+            let interrupt = interrupt_arc.clone();
+            let memory = mem.clone();
+            let kill_evt = self.workers_kill_evt.remove(0);
+            // Queues alternate between rx0, tx0, rx1, tx1, ..., rxN, txN, ctrl.
+            let rx_queue = queues.remove(0);
+            let tx_queue = queues.remove(0);
+            let ctrl_queue = if i == 0 {
+                Some(queues.remove(queues.len() - 1))
+            } else {
+                None
+            };
+            let pairs = vq_pairs as u16;
+            let rx_queue_evt = queue_evts.remove(0);
+            let tx_queue_evt = queue_evts.remove(0);
+            let ctrl_queue_evt = if i == 0 {
+                Some(queue_evts.remove(queue_evts.len() - 1))
+            } else {
+                None
+            };
+            let worker_result = thread::Builder::new()
+                .name(format!("virtio_net worker {}", i))
+                .spawn(move || {
+                    let mut worker = Worker {
+                        interrupt,
+                        mem: memory,
+                        rx_queue,
+                        tx_queue,
+                        ctrl_queue,
+                        tap,
+                        acked_features,
+                        vq_pairs: pairs,
+                        kill_evt,
+                    };
+                    let result = worker.run(rx_queue_evt, tx_queue_evt, ctrl_queue_evt);
+                    if let Err(e) = result {
+                        error!("net worker thread exited with error: {}", e);
                     }
+                    worker
+                });
+
+            match worker_result {
+                Err(e) => {
+                    error!("failed to spawn virtio_net worker: {}", e);
+                    return;
                 }
+                Ok(join_handle) => self.worker_threads.push(join_handle),
             }
         }
     }
 
     fn reset(&mut self) -> bool {
-        // Only kill the child if it claimed its eventfd.
-        if self.workers_kill_evt.is_none() && self.kill_evt.write(1).is_err() {
-            error!("{}: failed to notify the kill event", self.debug_label());
-            return false;
+        let len = self.kill_evts.len();
+        for i in 0..len {
+            // Only kill the child if it claimed its eventfd.
+            if self.workers_kill_evt.get(i).is_none() {
+                if let Some(kill_evt) = self.kill_evts.get(i) {
+                    if kill_evt.write(1).is_err() {
+                        error!("{}: failed to notify the kill event", self.debug_label());
+                        return false;
+                    }
+                }
+            }
         }
 
-        if let Some(worker_thread) = self.worker_thread.take() {
-            match worker_thread.join() {
+        let len = self.worker_threads.len();
+        for _ in 0..len {
+            match self.worker_threads.remove(0).join() {
                 Err(_) => {
                     error!("{}: failed to get back resources", self.debug_label());
                     return false;
                 }
                 Ok(worker) => {
-                    self.tap = Some(worker.tap);
-                    self.workers_kill_evt = Some(worker.kill_evt);
-                    return true;
+                    self.taps.push(worker.tap);
+                    self.workers_kill_evt.push(worker.kill_evt);
                 }
             }
         }
-        false
+
+        return true;
     }
 }
diff --git a/devices/src/virtio/pmem.rs b/devices/src/virtio/pmem.rs
index 931b037..499e110 100644
--- a/devices/src/virtio/pmem.rs
+++ b/devices/src/virtio/pmem.rs
@@ -13,6 +13,10 @@ use sys_util::{error, EventFd, GuestAddress, GuestMemory, PollContext, PollToken
 
 use data_model::{DataInit, Le32, Le64};
 
+use msg_socket::{MsgReceiver, MsgSender};
+
+use vm_control::{VmMsyncRequest, VmMsyncRequestSocket, VmMsyncResponse};
+
 use super::{
     copy_config, DescriptorChain, DescriptorError, Interrupt, Queue, Reader, VirtioDevice, Writer,
     TYPE_PMEM, VIRTIO_F_VERSION_1,
@@ -83,19 +87,38 @@ struct Worker {
     interrupt: Interrupt,
     queue: Queue,
     memory: GuestMemory,
-    disk_image: File,
+    pmem_device_socket: VmMsyncRequestSocket,
+    mapping_arena_slot: u32,
 }
 
 impl Worker {
     fn execute_request(&self, request: virtio_pmem_req) -> u32 {
         match request.type_.to_native() {
-            VIRTIO_PMEM_REQ_TYPE_FLUSH => match self.disk_image.sync_all() {
-                Ok(()) => VIRTIO_PMEM_RESP_TYPE_OK,
-                Err(e) => {
-                    error!("failed flushing disk image: {}", e);
-                    VIRTIO_PMEM_RESP_TYPE_EIO
+            VIRTIO_PMEM_REQ_TYPE_FLUSH => {
+                let request = VmMsyncRequest::MsyncArena {
+                    slot: self.mapping_arena_slot,
+                    offset: 0, // The pmem backing file is always at offset 0 in the arena.
+                };
+
+                if let Err(e) = self.pmem_device_socket.send(&request) {
+                    error!("failed to send request: {}", e);
+                    return VIRTIO_PMEM_RESP_TYPE_EIO;
+                }
+
+                match self.pmem_device_socket.recv() {
+                    Ok(response) => match response {
+                        VmMsyncResponse::Ok => VIRTIO_PMEM_RESP_TYPE_OK,
+                        VmMsyncResponse::Err(e) => {
+                            error!("failed flushing disk image: {}", e);
+                            VIRTIO_PMEM_RESP_TYPE_EIO
+                        }
+                    },
+                    Err(e) => {
+                        error!("failed to receive data: {}", e);
+                        VIRTIO_PMEM_RESP_TYPE_EIO
+                    }
                 }
-            },
+            }
             _ => {
                 error!("unknown request type: {}", request.type_.to_native());
                 VIRTIO_PMEM_RESP_TYPE_EIO
@@ -199,21 +222,27 @@ pub struct Pmem {
     worker_thread: Option<thread::JoinHandle<()>>,
     disk_image: Option<File>,
     mapping_address: GuestAddress,
+    mapping_arena_slot: u32,
     mapping_size: u64,
+    pmem_device_socket: Option<VmMsyncRequestSocket>,
 }
 
 impl Pmem {
     pub fn new(
         disk_image: File,
         mapping_address: GuestAddress,
+        mapping_arena_slot: u32,
         mapping_size: u64,
+        pmem_device_socket: Option<VmMsyncRequestSocket>,
     ) -> SysResult<Pmem> {
         Ok(Pmem {
             kill_event: None,
             worker_thread: None,
             disk_image: Some(disk_image),
             mapping_address,
+            mapping_arena_slot,
             mapping_size,
+            pmem_device_socket,
         })
     }
 }
@@ -233,11 +262,15 @@ impl Drop for Pmem {
 
 impl VirtioDevice for Pmem {
     fn keep_fds(&self) -> Vec<RawFd> {
+        let mut keep_fds = Vec::new();
         if let Some(disk_image) = &self.disk_image {
-            vec![disk_image.as_raw_fd()]
-        } else {
-            vec![]
+            keep_fds.push(disk_image.as_raw_fd());
         }
+
+        if let Some(ref pmem_device_socket) = self.pmem_device_socket {
+            keep_fds.push(pmem_device_socket.as_raw_fd());
+        }
+        keep_fds
     }
 
     fn device_type(&self) -> u32 {
@@ -274,7 +307,9 @@ impl VirtioDevice for Pmem {
         let queue = queues.remove(0);
         let queue_event = queue_events.remove(0);
 
-        if let Some(disk_image) = self.disk_image.take() {
+        let mapping_arena_slot = self.mapping_arena_slot;
+
+        if let Some(pmem_device_socket) = self.pmem_device_socket.take() {
             let (self_kill_event, kill_event) =
                 match EventFd::new().and_then(|e| Ok((e.try_clone()?, e))) {
                     Ok(v) => v,
@@ -291,8 +326,9 @@ impl VirtioDevice for Pmem {
                     let mut worker = Worker {
                         interrupt,
                         memory,
-                        disk_image,
                         queue,
+                        pmem_device_socket,
+                        mapping_arena_slot,
                     };
                     worker.run(queue_event, kill_event);
                 });
diff --git a/devices/src/virtio/vhost/net.rs b/devices/src/virtio/vhost/net.rs
index 542423a..681cfe8 100644
--- a/devices/src/virtio/vhost/net.rs
+++ b/devices/src/virtio/vhost/net.rs
@@ -53,7 +53,7 @@ where
     ) -> Result<Net<T, U>> {
         let kill_evt = EventFd::new().map_err(Error::CreateKillEventFd)?;
 
-        let tap: T = T::new(true).map_err(Error::TapOpen)?;
+        let tap: T = T::new(true, false).map_err(Error::TapOpen)?;
         tap.set_ip_addr(ip_addr).map_err(Error::TapSetIp)?;
         tap.set_netmask(netmask).map_err(Error::TapSetNetmask)?;
         tap.set_mac_address(mac_addr)