summary refs log tree commit diff
diff options
context:
space:
mode:
authorDavid Riley <davidriley@chromium.org>2019-08-23 16:11:11 -0700
committerCommit Bot <commit-bot@chromium.org>2019-09-17 22:35:33 +0000
commitbca67ae7e3f543566144b57b5f816cb95c4d674e (patch)
treec64b6a0cd1f1316280e8443caeb9c48adc2ccfb8
parente5e30a705af867d15ae92ac0dbc783ee73dd17f1 (diff)
downloadcrosvm-bca67ae7e3f543566144b57b5f816cb95c4d674e.tar
crosvm-bca67ae7e3f543566144b57b5f816cb95c4d674e.tar.gz
crosvm-bca67ae7e3f543566144b57b5f816cb95c4d674e.tar.bz2
crosvm-bca67ae7e3f543566144b57b5f816cb95c4d674e.tar.lz
crosvm-bca67ae7e3f543566144b57b5f816cb95c4d674e.tar.xz
crosvm-bca67ae7e3f543566144b57b5f816cb95c4d674e.tar.zst
crosvm-bca67ae7e3f543566144b57b5f816cb95c4d674e.zip
devices: gpu: Use descriptor_utils helpers for virtio processing.
Switch to using Reader/Writer which allows buffers to be passed from
the guest as scatter gathers instead of requiring a single contiguous
buffer.

BUG=chromium:993452
TEST=apitrace replay

Change-Id: Ibe212cfa60eae16d70db248a2a619d272c13f540
Reviewed-on: https://chromium-review.googlesource.com/c/chromiumos/platform/crosvm/+/1775365
Reviewed-by: Daniel Verkamp <dverkamp@chromium.org>
Reviewed-by: Zach Reizner <zachr@chromium.org>
Tested-by: David Riley <davidriley@chromium.org>
Tested-by: kokoro <noreply+kokoro@google.com>
Commit-Queue: David Riley <davidriley@chromium.org>
-rw-r--r--devices/src/virtio/gpu/mod.rs283
-rw-r--r--devices/src/virtio/gpu/protocol.rs157
2 files changed, 197 insertions, 243 deletions
diff --git a/devices/src/virtio/gpu/mod.rs b/devices/src/virtio/gpu/mod.rs
index 48ee6cb..453a8c5 100644
--- a/devices/src/virtio/gpu/mod.rs
+++ b/devices/src/virtio/gpu/mod.rs
@@ -28,8 +28,8 @@ use gpu_display::*;
 use gpu_renderer::{Renderer, RendererFlags};
 
 use super::{
-    copy_config, resource_bridge::*, AvailIter, Queue, VirtioDevice, INTERRUPT_STATUS_USED_RING,
-    TYPE_GPU, VIRTIO_F_VERSION_1,
+    copy_config, resource_bridge::*, DescriptorChain, Queue, Reader, VirtioDevice, Writer,
+    INTERRUPT_STATUS_USED_RING, TYPE_GPU, VIRTIO_F_VERSION_1,
 };
 
 use self::backend::Backend;
@@ -43,14 +43,6 @@ use vm_control::VmMemoryControlRequestSocket;
 const QUEUE_SIZES: &[u16] = &[256, 16];
 const FENCE_POLL_MS: u64 = 1;
 
-struct QueueDescriptor {
-    index: u16,
-    addr: GuestAddress,
-    len: u32,
-    data: Option<(GuestAddress, u32)>,
-    ret: Option<(GuestAddress, u32)>,
-}
-
 struct ReturnDescriptor {
     index: u16,
     len: u32,
@@ -58,13 +50,11 @@ struct ReturnDescriptor {
 
 struct FenceDescriptor {
     fence_id: u32,
+    index: u16,
     len: u32,
-    desc: QueueDescriptor,
 }
 
 struct Frontend {
-    ctrl_descriptors: VecDeque<QueueDescriptor>,
-    cursor_descriptors: VecDeque<QueueDescriptor>,
     return_ctrl_descriptors: VecDeque<ReturnDescriptor>,
     return_cursor_descriptors: VecDeque<ReturnDescriptor>,
     fence_descriptors: Vec<FenceDescriptor>,
@@ -74,8 +64,6 @@ struct Frontend {
 impl Frontend {
     fn new(backend: Backend) -> Frontend {
         Frontend {
-            ctrl_descriptors: Default::default(),
-            cursor_descriptors: Default::default(),
             return_ctrl_descriptors: Default::default(),
             return_cursor_descriptors: Default::default(),
             fence_descriptors: Default::default(),
@@ -99,7 +87,7 @@ impl Frontend {
         &mut self,
         mem: &GuestMemory,
         cmd: GpuCommand,
-        data: Option<VolatileSlice>,
+        reader: &mut Reader,
     ) -> GpuResponse {
         self.backend.force_ctx_0();
 
@@ -133,24 +121,26 @@ impl Frontend {
                 info.offset.to_native(),
                 mem,
             ),
-            GpuCommand::ResourceAttachBacking(info) if data.is_some() => {
-                let data = data.unwrap();
-                let entry_count = info.nr_entries.to_native() as usize;
-                let mut iovecs = Vec::with_capacity(entry_count);
-                for i in 0..entry_count {
-                    if let Ok(entry_ref) =
-                        data.get_ref((i * size_of::<virtio_gpu_mem_entry>()) as u64)
-                    {
-                        let entry: virtio_gpu_mem_entry = entry_ref.load();
-                        let addr = GuestAddress(entry.addr.to_native());
-                        let len = entry.length.to_native() as usize;
-                        iovecs.push((addr, len))
-                    } else {
-                        return GpuResponse::ErrUnspec;
+            GpuCommand::ResourceAttachBacking(info) => {
+                if reader.available_bytes() != 0 {
+                    let entry_count = info.nr_entries.to_native() as usize;
+                    let mut iovecs = Vec::with_capacity(entry_count);
+                    for _ in 0..entry_count {
+                        match reader.read_obj::<virtio_gpu_mem_entry>() {
+                            Ok(entry) => {
+                                let addr = GuestAddress(entry.addr.to_native());
+                                let len = entry.length.to_native() as usize;
+                                iovecs.push((addr, len))
+                            }
+                            Err(_) => return GpuResponse::ErrUnspec,
+                        }
                     }
+                    self.backend
+                        .attach_backing(info.resource_id.to_native(), mem, iovecs)
+                } else {
+                    error!("missing data for command {:?}", cmd);
+                    GpuResponse::ErrUnspec
                 }
-                self.backend
-                    .attach_backing(info.resource_id.to_native(), mem, iovecs)
             }
             GpuCommand::ResourceDetachBacking(info) => {
                 self.backend.detach_backing(info.resource_id.to_native())
@@ -255,17 +245,14 @@ impl Frontend {
                 )
             }
             GpuCommand::CmdSubmit3d(info) => {
-                if data.is_some() {
-                    let data = data.unwrap(); // guarded by this match arm
+                if reader.available_bytes() != 0 {
                     let cmd_size = info.size.to_native() as usize;
-                    match data.get_slice(0, cmd_size as u64) {
-                        Ok(cmd_slice) => {
-                            let mut cmd_buf = vec![0; cmd_size];
-                            cmd_slice.copy_to(&mut cmd_buf[..]);
-                            self.backend
-                                .submit_command(info.hdr.ctx_id.to_native(), &mut cmd_buf[..])
-                        }
-                        Err(_) => GpuResponse::ErrInvalidParameter,
+                    let mut cmd_buf = vec![0; cmd_size];
+                    if reader.read(&mut cmd_buf[..]).is_ok() {
+                        self.backend
+                            .submit_command(info.hdr.ctx_id.to_native(), &mut cmd_buf[..])
+                    } else {
+                        GpuResponse::ErrInvalidParameter
                     }
                 } else {
                     // Silently accept empty command buffers to allow for
@@ -273,166 +260,112 @@ impl Frontend {
                     GpuResponse::OkNoData
                 }
             }
-            _ => {
-                error!("unhandled command {:?}", cmd);
-                GpuResponse::ErrUnspec
-            }
         }
     }
 
-    fn take_descriptors(
-        mem: &GuestMemory,
-        desc_iter: AvailIter,
-        descriptors: &mut VecDeque<QueueDescriptor>,
-        return_descriptors: &mut VecDeque<ReturnDescriptor>,
-    ) {
-        for desc in desc_iter {
-            if desc.len as usize >= size_of::<virtio_gpu_ctrl_hdr>() && !desc.is_write_only() {
-                let mut q_desc = QueueDescriptor {
-                    index: desc.index,
-                    addr: desc.addr,
-                    len: desc.len,
-                    data: None,
-                    ret: None,
-                };
-                if let Some(extra_desc) = desc.next_descriptor() {
-                    if extra_desc.is_write_only() {
-                        q_desc.ret = Some((extra_desc.addr, extra_desc.len));
-                    } else {
-                        q_desc.data = Some((extra_desc.addr, extra_desc.len));
-                    }
-                    if let Some(extra_desc) = extra_desc.next_descriptor() {
-                        if extra_desc.is_write_only() && q_desc.ret.is_none() {
-                            q_desc.ret = Some((extra_desc.addr, extra_desc.len));
-                        }
-                    }
+    fn validate_desc(desc: &DescriptorChain) -> bool {
+        desc.len as usize >= size_of::<virtio_gpu_ctrl_hdr>() && !desc.is_write_only()
+    }
+
+    fn process_queue(&mut self, mem: &GuestMemory, queue: &mut Queue) -> bool {
+        let mut signal_used = false;
+        while let Some(desc) = queue.pop(mem) {
+            if Frontend::validate_desc(&desc) {
+                let mut reader = Reader::new(mem, desc.clone());
+                let mut writer = Writer::new(mem, desc.clone());
+                if let Some(ret_desc) =
+                    self.process_descriptor(mem, desc.index, &mut reader, &mut writer)
+                {
+                    queue.add_used(&mem, ret_desc.index, ret_desc.len);
+                    signal_used = true;
                 }
-                descriptors.push_back(q_desc);
             } else {
                 let likely_type = mem.read_obj_from_addr(desc.addr).unwrap_or(Le32::from(0));
                 debug!(
-                    "ctrl queue bad descriptor index = {} len = {} write = {} type = {}",
+                    "queue bad descriptor index = {} len = {} write = {} type = {}",
                     desc.index,
                     desc.len,
                     desc.is_write_only(),
                     virtio_gpu_cmd_str(likely_type.to_native())
                 );
-                return_descriptors.push_back(ReturnDescriptor {
-                    index: desc.index,
-                    len: 0,
-                });
+                queue.add_used(&mem, desc.index, 0);
+                signal_used = true;
             }
         }
-    }
 
-    fn take_ctrl_descriptors(&mut self, mem: &GuestMemory, desc_iter: AvailIter) {
-        Frontend::take_descriptors(
-            mem,
-            desc_iter,
-            &mut self.ctrl_descriptors,
-            &mut self.return_ctrl_descriptors,
-        );
-    }
-
-    fn take_cursor_descriptors(&mut self, mem: &GuestMemory, desc_iter: AvailIter) {
-        Frontend::take_descriptors(
-            mem,
-            desc_iter,
-            &mut self.cursor_descriptors,
-            &mut self.return_cursor_descriptors,
-        );
+        signal_used
     }
 
     fn process_descriptor(
         &mut self,
         mem: &GuestMemory,
-        desc: QueueDescriptor,
+        desc_index: u16,
+        reader: &mut Reader,
+        writer: &mut Writer,
     ) -> Option<ReturnDescriptor> {
         let mut resp = GpuResponse::ErrUnspec;
         let mut gpu_cmd = None;
         let mut len = 0;
-        if let Ok(desc_mem) = mem.get_slice(desc.addr.offset(), desc.len as u64) {
-            match GpuCommand::decode(desc_mem) {
-                Ok(cmd) => {
-                    match desc.data {
-                        Some(data_desc) => {
-                            match mem.get_slice(data_desc.0.offset(), data_desc.1 as u64) {
-                                Ok(data_mem) => {
-                                    resp = self.process_gpu_command(mem, cmd, Some(data_mem))
-                                }
-                                Err(e) => debug!("ctrl queue invalid data descriptor: {}", e),
-                            }
-                        }
-                        None => resp = self.process_gpu_command(mem, cmd, None),
-                    }
-                    gpu_cmd = Some(cmd);
-                }
-                Err(e) => debug!("ctrl queue decode error: {}", e),
+        match GpuCommand::decode(reader) {
+            Ok(cmd) => {
+                resp = self.process_gpu_command(mem, cmd, reader);
+                gpu_cmd = Some(cmd);
             }
+            Err(e) => debug!("descriptor decode error: {}", e),
         }
         if resp.is_err() {
             debug!("{:?} -> {:?}", gpu_cmd, resp);
         }
-        if let Some(ret_desc) = desc.ret {
-            if let Ok(ret_desc_mem) = mem.get_slice(ret_desc.0.offset(), ret_desc.1 as u64) {
-                let mut fence_id = 0;
-                let mut ctx_id = 0;
-                let mut flags = 0;
-                if let Some(cmd) = gpu_cmd {
-                    let ctrl_hdr = cmd.ctrl_hdr();
-                    if ctrl_hdr.flags.to_native() & VIRTIO_GPU_FLAG_FENCE != 0 {
-                        fence_id = ctrl_hdr.fence_id.to_native();
-                        ctx_id = ctrl_hdr.ctx_id.to_native();
-                        flags = VIRTIO_GPU_FLAG_FENCE;
-
-                        let fence_resp = self.backend.create_fence(ctx_id, fence_id as u32);
-                        if fence_resp.is_err() {
-                            warn!("create_fence {} -> {:?}", fence_id, fence_resp);
-                            resp = fence_resp;
-                        }
+        if writer.available_bytes() != 0 {
+            let mut fence_id = 0;
+            let mut ctx_id = 0;
+            let mut flags = 0;
+            if let Some(cmd) = gpu_cmd {
+                let ctrl_hdr = cmd.ctrl_hdr();
+                if ctrl_hdr.flags.to_native() & VIRTIO_GPU_FLAG_FENCE != 0 {
+                    fence_id = ctrl_hdr.fence_id.to_native();
+                    ctx_id = ctrl_hdr.ctx_id.to_native();
+                    flags = VIRTIO_GPU_FLAG_FENCE;
+
+                    let fence_resp = self.backend.create_fence(ctx_id, fence_id as u32);
+                    if fence_resp.is_err() {
+                        warn!("create_fence {} -> {:?}", fence_id, fence_resp);
+                        resp = fence_resp;
                     }
                 }
+            }
 
-                // Prepare the response now, even if it is going to wait until
-                // fence is complete.
-                match resp.encode(flags, fence_id, ctx_id, ret_desc_mem) {
-                    Ok(l) => len = l,
-                    Err(e) => debug!("ctrl queue response encode error: {}", e),
-                }
-
-                if flags & VIRTIO_GPU_FLAG_FENCE != 0 {
-                    self.fence_descriptors.push(FenceDescriptor {
-                        fence_id: fence_id as u32,
-                        len,
-                        desc,
-                    });
+            // Prepare the response now, even if it is going to wait until
+            // fence is complete.
+            match resp.encode(flags, fence_id, ctx_id, writer) {
+                Ok(l) => len = l,
+                Err(e) => debug!("ctrl queue response encode error: {}", e),
+            }
 
-                    return None;
-                }
+            if flags & VIRTIO_GPU_FLAG_FENCE != 0 {
+                self.fence_descriptors.push(FenceDescriptor {
+                    fence_id: fence_id as u32,
+                    index: desc_index,
+                    len,
+                });
 
-                // No fence, respond now.
+                return None;
             }
+
+            // No fence, respond now.
         }
         Some(ReturnDescriptor {
-            index: desc.index,
+            index: desc_index,
             len,
         })
     }
 
-    fn process_ctrl(&mut self, mem: &GuestMemory) -> Option<ReturnDescriptor> {
-        self.return_ctrl_descriptors.pop_front().or_else(|| {
-            self.ctrl_descriptors
-                .pop_front()
-                .and_then(|desc| self.process_descriptor(mem, desc))
-        })
+    fn return_cursor(&mut self) -> Option<ReturnDescriptor> {
+        self.return_cursor_descriptors.pop_front()
     }
 
-    fn process_cursor(&mut self, mem: &GuestMemory) -> Option<ReturnDescriptor> {
-        self.return_cursor_descriptors.pop_front().or_else(|| {
-            self.cursor_descriptors
-                .pop_front()
-                .and_then(|desc| self.process_descriptor(mem, desc))
-        })
+    fn return_ctrl(&mut self) -> Option<ReturnDescriptor> {
+        self.return_ctrl_descriptors.pop_front()
     }
 
     fn fence_poll(&mut self) {
@@ -443,7 +376,7 @@ impl Frontend {
                 true
             } else {
                 return_descs.push_back(ReturnDescriptor {
-                    index: f_desc.desc.index,
+                    index: f_desc.index,
                     len: f_desc.len,
                 });
                 false
@@ -505,6 +438,14 @@ impl Worker {
             }
         }
 
+        // TODO(davidriley): The entire main loop processing is somewhat racey and incorrect with
+        // respect to cursor vs control queue processing.  As both currently and originally
+        // written, while the control queue is only processed/read from after the the cursor queue
+        // is finished, the entire queue will be processed at that time.  The end effect of this
+        // racyiness is that control queue descriptors that are issued after cursors descriptors
+        // might be handled first instead of the other way around.  In practice, the cursor queue
+        // isn't used so this isn't a huge issue.
+
         // Declare this outside the loop so we don't keep allocating and freeing the vector.
         let mut process_resource_bridge = Vec::with_capacity(self.resource_bridges.len());
         'poll: loop {
@@ -523,6 +464,7 @@ impl Worker {
                 }
             };
             let mut signal_used = false;
+            let mut ctrl_available = false;
 
             // Clear the old values and re-initialize with false.
             process_resource_bridge.clear();
@@ -532,13 +474,15 @@ impl Worker {
                 match event.token() {
                     Token::CtrlQueue => {
                         let _ = self.ctrl_evt.read();
-                        self.state
-                            .take_ctrl_descriptors(&self.mem, self.ctrl_queue.iter(&self.mem));
+                        // Set flag that control queue is available to be read, but defer reading
+                        // until rest of the events are processed.
+                        ctrl_available = true;
                     }
                     Token::CursorQueue => {
                         let _ = self.cursor_evt.read();
-                        self.state
-                            .take_cursor_descriptors(&self.mem, self.cursor_queue.iter(&self.mem));
+                        if self.state.process_queue(&self.mem, &mut self.cursor_queue) {
+                            signal_used = true;
+                        }
                     }
                     Token::Display => {
                         let close_requested = self.state.process_display();
@@ -560,14 +504,18 @@ impl Worker {
             }
 
             // All cursor commands go first because they have higher priority.
-            while let Some(desc) = self.state.process_cursor(&self.mem) {
+            while let Some(desc) = self.state.return_cursor() {
                 self.cursor_queue.add_used(&self.mem, desc.index, desc.len);
                 signal_used = true;
             }
 
+            if ctrl_available && self.state.process_queue(&self.mem, &mut self.ctrl_queue) {
+                signal_used = true;
+            }
+
             self.state.fence_poll();
 
-            while let Some(desc) = self.state.process_ctrl(&self.mem) {
+            while let Some(desc) = self.state.return_ctrl() {
                 self.ctrl_queue.add_used(&self.mem, desc.index, desc.len);
                 signal_used = true;
             }
@@ -575,6 +523,9 @@ impl Worker {
             // Process the entire control queue before the resource bridge in case a resource is
             // created or destroyed by the control queue. Processing the resource bridge first may
             // lead to a race condition.
+            // TODO(davidriley): This is still inherently racey if both the control queue request
+            // and the resource bridge request come in at the same time after the control queue is
+            // processed above and before the corresponding bridge is processed below.
             for (bridge, &should_process) in
                 self.resource_bridges.iter().zip(&process_resource_bridge)
             {
diff --git a/devices/src/virtio/gpu/protocol.rs b/devices/src/virtio/gpu/protocol.rs
index 3649e10..a1b7164 100644
--- a/devices/src/virtio/gpu/protocol.rs
+++ b/devices/src/virtio/gpu/protocol.rs
@@ -11,7 +11,9 @@ use std::marker::PhantomData;
 use std::mem::{size_of, size_of_val};
 use std::str::from_utf8;
 
-use data_model::{DataInit, Le32, Le64, VolatileMemory, VolatileMemoryError, VolatileSlice};
+use super::super::DescriptorError;
+use super::{Reader, Writer};
+use data_model::{DataInit, Le32, Le64};
 
 pub const VIRTIO_GPU_F_VIRGL: u32 = 0;
 
@@ -97,7 +99,7 @@ pub fn virtio_gpu_cmd_str(cmd: u32) -> &'static str {
 
 pub const VIRTIO_GPU_FLAG_FENCE: u32 = (1 << 0);
 
-#[derive(Copy, Clone, Debug)]
+#[derive(Copy, Clone, Debug, Default)]
 #[repr(C)]
 pub struct virtio_gpu_ctrl_hdr {
     pub type_: Le32,
@@ -111,7 +113,7 @@ unsafe impl DataInit for virtio_gpu_ctrl_hdr {}
 
 /* data passed in the cursor vq */
 
-#[derive(Copy, Clone, Debug)]
+#[derive(Copy, Clone, Debug, Default)]
 #[repr(C)]
 pub struct virtio_gpu_cursor_pos {
     pub scanout_id: Le32,
@@ -123,7 +125,7 @@ pub struct virtio_gpu_cursor_pos {
 unsafe impl DataInit for virtio_gpu_cursor_pos {}
 
 /* VIRTIO_GPU_CMD_UPDATE_CURSOR, VIRTIO_GPU_CMD_MOVE_CURSOR */
-#[derive(Copy, Clone, Debug)]
+#[derive(Copy, Clone, Debug, Default)]
 #[repr(C)]
 pub struct virtio_gpu_update_cursor {
     pub hdr: virtio_gpu_ctrl_hdr,
@@ -150,7 +152,7 @@ pub struct virtio_gpu_rect {
 unsafe impl DataInit for virtio_gpu_rect {}
 
 /* VIRTIO_GPU_CMD_RESOURCE_UNREF */
-#[derive(Copy, Clone, Debug)]
+#[derive(Copy, Clone, Debug, Default)]
 #[repr(C)]
 pub struct virtio_gpu_resource_unref {
     pub hdr: virtio_gpu_ctrl_hdr,
@@ -161,7 +163,7 @@ pub struct virtio_gpu_resource_unref {
 unsafe impl DataInit for virtio_gpu_resource_unref {}
 
 /* VIRTIO_GPU_CMD_RESOURCE_CREATE_2D: create a 2d resource with a format */
-#[derive(Copy, Clone, Debug)]
+#[derive(Copy, Clone, Debug, Default)]
 #[repr(C)]
 pub struct virtio_gpu_resource_create_2d {
     pub hdr: virtio_gpu_ctrl_hdr,
@@ -174,7 +176,7 @@ pub struct virtio_gpu_resource_create_2d {
 unsafe impl DataInit for virtio_gpu_resource_create_2d {}
 
 /* VIRTIO_GPU_CMD_SET_SCANOUT */
-#[derive(Copy, Clone, Debug)]
+#[derive(Copy, Clone, Debug, Default)]
 #[repr(C)]
 pub struct virtio_gpu_set_scanout {
     pub hdr: virtio_gpu_ctrl_hdr,
@@ -186,7 +188,7 @@ pub struct virtio_gpu_set_scanout {
 unsafe impl DataInit for virtio_gpu_set_scanout {}
 
 /* VIRTIO_GPU_CMD_RESOURCE_FLUSH */
-#[derive(Copy, Clone, Debug)]
+#[derive(Copy, Clone, Debug, Default)]
 #[repr(C)]
 pub struct virtio_gpu_resource_flush {
     pub hdr: virtio_gpu_ctrl_hdr,
@@ -198,7 +200,7 @@ pub struct virtio_gpu_resource_flush {
 unsafe impl DataInit for virtio_gpu_resource_flush {}
 
 /* VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D: simple transfer to_host */
-#[derive(Copy, Clone, Debug)]
+#[derive(Copy, Clone, Debug, Default)]
 #[repr(C)]
 pub struct virtio_gpu_transfer_to_host_2d {
     pub hdr: virtio_gpu_ctrl_hdr,
@@ -210,7 +212,7 @@ pub struct virtio_gpu_transfer_to_host_2d {
 
 unsafe impl DataInit for virtio_gpu_transfer_to_host_2d {}
 
-#[derive(Copy, Clone, Debug)]
+#[derive(Copy, Clone, Debug, Default)]
 #[repr(C)]
 pub struct virtio_gpu_mem_entry {
     pub addr: Le64,
@@ -221,7 +223,7 @@ pub struct virtio_gpu_mem_entry {
 unsafe impl DataInit for virtio_gpu_mem_entry {}
 
 /* VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING */
-#[derive(Copy, Clone, Debug)]
+#[derive(Copy, Clone, Debug, Default)]
 #[repr(C)]
 pub struct virtio_gpu_resource_attach_backing {
     pub hdr: virtio_gpu_ctrl_hdr,
@@ -232,7 +234,7 @@ pub struct virtio_gpu_resource_attach_backing {
 unsafe impl DataInit for virtio_gpu_resource_attach_backing {}
 
 /* VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING */
-#[derive(Copy, Clone, Debug)]
+#[derive(Copy, Clone, Debug, Default)]
 #[repr(C)]
 pub struct virtio_gpu_resource_detach_backing {
     pub hdr: virtio_gpu_ctrl_hdr,
@@ -254,7 +256,7 @@ unsafe impl DataInit for virtio_gpu_display_one {}
 
 /* VIRTIO_GPU_RESP_OK_DISPLAY_INFO */
 const VIRTIO_GPU_MAX_SCANOUTS: usize = 16;
-#[derive(Copy, Clone, Debug)]
+#[derive(Copy, Clone, Debug, Default)]
 #[repr(C)]
 pub struct virtio_gpu_resp_display_info {
     pub hdr: virtio_gpu_ctrl_hdr,
@@ -265,7 +267,7 @@ unsafe impl DataInit for virtio_gpu_resp_display_info {}
 
 /* data passed in the control vq, 3d related */
 
-#[derive(Copy, Clone, Debug)]
+#[derive(Copy, Clone, Debug, Default)]
 #[repr(C)]
 pub struct virtio_gpu_box {
     pub x: Le32,
@@ -279,7 +281,7 @@ pub struct virtio_gpu_box {
 unsafe impl DataInit for virtio_gpu_box {}
 
 /* VIRTIO_GPU_CMD_TRANSFER_TO_HOST_3D, VIRTIO_GPU_CMD_TRANSFER_FROM_HOST_3D */
-#[derive(Copy, Clone, Debug)]
+#[derive(Copy, Clone, Debug, Default)]
 #[repr(C)]
 pub struct virtio_gpu_transfer_host_3d {
     pub hdr: virtio_gpu_ctrl_hdr,
@@ -295,7 +297,7 @@ unsafe impl DataInit for virtio_gpu_transfer_host_3d {}
 
 /* VIRTIO_GPU_CMD_RESOURCE_CREATE_3D */
 pub const VIRTIO_GPU_RESOURCE_FLAG_Y_0_TOP: u32 = (1 << 0);
-#[derive(Copy, Clone, Debug)]
+#[derive(Copy, Clone, Debug, Default)]
 #[repr(C)]
 pub struct virtio_gpu_resource_create_3d {
     pub hdr: virtio_gpu_ctrl_hdr,
@@ -325,6 +327,12 @@ pub struct virtio_gpu_ctx_create {
     pub debug_name: [u8; 64],
 }
 
+impl Default for virtio_gpu_ctx_create {
+    fn default() -> Self {
+        unsafe { ::std::mem::zeroed() }
+    }
+}
+
 unsafe impl DataInit for virtio_gpu_ctx_create {}
 
 impl Clone for virtio_gpu_ctx_create {
@@ -345,7 +353,7 @@ impl fmt::Debug for virtio_gpu_ctx_create {
 }
 
 /* VIRTIO_GPU_CMD_CTX_DESTROY */
-#[derive(Copy, Clone, Debug)]
+#[derive(Copy, Clone, Debug, Default)]
 #[repr(C)]
 pub struct virtio_gpu_ctx_destroy {
     pub hdr: virtio_gpu_ctrl_hdr,
@@ -354,7 +362,7 @@ pub struct virtio_gpu_ctx_destroy {
 unsafe impl DataInit for virtio_gpu_ctx_destroy {}
 
 /* VIRTIO_GPU_CMD_CTX_ATTACH_RESOURCE, VIRTIO_GPU_CMD_CTX_DETACH_RESOURCE */
-#[derive(Copy, Clone, Debug)]
+#[derive(Copy, Clone, Debug, Default)]
 #[repr(C)]
 pub struct virtio_gpu_ctx_resource {
     pub hdr: virtio_gpu_ctrl_hdr,
@@ -365,7 +373,7 @@ pub struct virtio_gpu_ctx_resource {
 unsafe impl DataInit for virtio_gpu_ctx_resource {}
 
 /* VIRTIO_GPU_CMD_SUBMIT_3D */
-#[derive(Copy, Clone, Debug)]
+#[derive(Copy, Clone, Debug, Default)]
 #[repr(C)]
 pub struct virtio_gpu_cmd_submit {
     pub hdr: virtio_gpu_ctrl_hdr,
@@ -380,7 +388,7 @@ pub const VIRTIO_GPU_CAPSET_VIRGL2: u32 = 2;
 pub const VIRTIO_GPU_CAPSET3: u32 = 3;
 
 /* VIRTIO_GPU_CMD_GET_CAPSET_INFO */
-#[derive(Copy, Clone, Debug)]
+#[derive(Copy, Clone, Debug, Default)]
 #[repr(C)]
 pub struct virtio_gpu_get_capset_info {
     pub hdr: virtio_gpu_ctrl_hdr,
@@ -391,7 +399,7 @@ pub struct virtio_gpu_get_capset_info {
 unsafe impl DataInit for virtio_gpu_get_capset_info {}
 
 /* VIRTIO_GPU_RESP_OK_CAPSET_INFO */
-#[derive(Copy, Clone, Debug)]
+#[derive(Copy, Clone, Debug, Default)]
 #[repr(C)]
 pub struct virtio_gpu_resp_capset_info {
     pub hdr: virtio_gpu_ctrl_hdr,
@@ -404,7 +412,7 @@ pub struct virtio_gpu_resp_capset_info {
 unsafe impl DataInit for virtio_gpu_resp_capset_info {}
 
 /* VIRTIO_GPU_CMD_GET_CAPSET */
-#[derive(Copy, Clone, Debug)]
+#[derive(Copy, Clone, Debug, Default)]
 #[repr(C)]
 pub struct virtio_gpu_get_capset {
     pub hdr: virtio_gpu_ctrl_hdr,
@@ -415,7 +423,7 @@ pub struct virtio_gpu_get_capset {
 unsafe impl DataInit for virtio_gpu_get_capset {}
 
 /* VIRTIO_GPU_RESP_OK_CAPSET */
-#[derive(Copy, Clone, Debug)]
+#[derive(Copy, Clone, Debug, Default)]
 #[repr(C)]
 pub struct virtio_gpu_resp_capset {
     pub hdr: virtio_gpu_ctrl_hdr,
@@ -425,7 +433,7 @@ pub struct virtio_gpu_resp_capset {
 unsafe impl DataInit for virtio_gpu_resp_capset {}
 
 /* VIRTIO_GPU_RESP_OK_RESOURCE_PLANE_INFO */
-#[derive(Copy, Clone, Debug)]
+#[derive(Copy, Clone, Debug, Default)]
 #[repr(C)]
 pub struct virtio_gpu_resp_resource_plane_info {
     pub hdr: virtio_gpu_ctrl_hdr,
@@ -442,7 +450,7 @@ const PLANE_INFO_MAX_COUNT: usize = 4;
 
 pub const VIRTIO_GPU_EVENT_DISPLAY: u32 = 1 << 0;
 
-#[derive(Copy, Clone, Debug)]
+#[derive(Copy, Clone, Debug, Default)]
 #[repr(C)]
 pub struct virtio_gpu_config {
     pub events_read: Le32,
@@ -493,7 +501,7 @@ pub enum GpuCommand {
 #[derive(Debug)]
 pub enum GpuCommandDecodeError {
     /// The command referenced an inaccessible area of memory.
-    Memory(VolatileMemoryError),
+    Memory(DescriptorError),
     /// The type of the command was invalid.
     InvalidType(u32),
 }
@@ -513,8 +521,8 @@ impl Display for GpuCommandDecodeError {
     }
 }
 
-impl From<VolatileMemoryError> for GpuCommandDecodeError {
-    fn from(e: VolatileMemoryError) -> GpuCommandDecodeError {
+impl From<DescriptorError> for GpuCommandDecodeError {
+    fn from(e: DescriptorError) -> GpuCommandDecodeError {
         GpuCommandDecodeError::Memory(e)
     }
 }
@@ -549,30 +557,30 @@ impl fmt::Debug for GpuCommand {
 
 impl GpuCommand {
     /// Decodes a command from the given chunk of memory.
-    pub fn decode(cmd: VolatileSlice) -> Result<GpuCommand, GpuCommandDecodeError> {
+    pub fn decode(cmd: &mut Reader) -> Result<GpuCommand, GpuCommandDecodeError> {
         use self::GpuCommand::*;
-        let hdr: virtio_gpu_ctrl_hdr = cmd.get_ref(0)?.load();
+        let hdr = cmd.clone().read_obj::<virtio_gpu_ctrl_hdr>()?;
         Ok(match hdr.type_.into() {
-            VIRTIO_GPU_CMD_GET_DISPLAY_INFO => GetDisplayInfo(cmd.get_ref(0)?.load()),
-            VIRTIO_GPU_CMD_RESOURCE_CREATE_2D => ResourceCreate2d(cmd.get_ref(0)?.load()),
-            VIRTIO_GPU_CMD_RESOURCE_UNREF => ResourceUnref(cmd.get_ref(0)?.load()),
-            VIRTIO_GPU_CMD_SET_SCANOUT => SetScanout(cmd.get_ref(0)?.load()),
-            VIRTIO_GPU_CMD_RESOURCE_FLUSH => ResourceFlush(cmd.get_ref(0)?.load()),
-            VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D => TransferToHost2d(cmd.get_ref(0)?.load()),
-            VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING => ResourceAttachBacking(cmd.get_ref(0)?.load()),
-            VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING => ResourceDetachBacking(cmd.get_ref(0)?.load()),
-            VIRTIO_GPU_CMD_GET_CAPSET_INFO => GetCapsetInfo(cmd.get_ref(0)?.load()),
-            VIRTIO_GPU_CMD_GET_CAPSET => GetCapset(cmd.get_ref(0)?.load()),
-            VIRTIO_GPU_CMD_CTX_CREATE => CtxCreate(cmd.get_ref(0)?.load()),
-            VIRTIO_GPU_CMD_CTX_DESTROY => CtxDestroy(cmd.get_ref(0)?.load()),
-            VIRTIO_GPU_CMD_CTX_ATTACH_RESOURCE => CtxAttachResource(cmd.get_ref(0)?.load()),
-            VIRTIO_GPU_CMD_CTX_DETACH_RESOURCE => CtxDetachResource(cmd.get_ref(0)?.load()),
-            VIRTIO_GPU_CMD_RESOURCE_CREATE_3D => ResourceCreate3d(cmd.get_ref(0)?.load()),
-            VIRTIO_GPU_CMD_TRANSFER_TO_HOST_3D => TransferToHost3d(cmd.get_ref(0)?.load()),
-            VIRTIO_GPU_CMD_TRANSFER_FROM_HOST_3D => TransferFromHost3d(cmd.get_ref(0)?.load()),
-            VIRTIO_GPU_CMD_SUBMIT_3D => CmdSubmit3d(cmd.get_ref(0)?.load()),
-            VIRTIO_GPU_CMD_UPDATE_CURSOR => UpdateCursor(cmd.get_ref(0)?.load()),
-            VIRTIO_GPU_CMD_MOVE_CURSOR => MoveCursor(cmd.get_ref(0)?.load()),
+            VIRTIO_GPU_CMD_GET_DISPLAY_INFO => GetDisplayInfo(cmd.read_obj()?),
+            VIRTIO_GPU_CMD_RESOURCE_CREATE_2D => ResourceCreate2d(cmd.read_obj()?),
+            VIRTIO_GPU_CMD_RESOURCE_UNREF => ResourceUnref(cmd.read_obj()?),
+            VIRTIO_GPU_CMD_SET_SCANOUT => SetScanout(cmd.read_obj()?),
+            VIRTIO_GPU_CMD_RESOURCE_FLUSH => ResourceFlush(cmd.read_obj()?),
+            VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D => TransferToHost2d(cmd.read_obj()?),
+            VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING => ResourceAttachBacking(cmd.read_obj()?),
+            VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING => ResourceDetachBacking(cmd.read_obj()?),
+            VIRTIO_GPU_CMD_GET_CAPSET_INFO => GetCapsetInfo(cmd.read_obj()?),
+            VIRTIO_GPU_CMD_GET_CAPSET => GetCapset(cmd.read_obj()?),
+            VIRTIO_GPU_CMD_CTX_CREATE => CtxCreate(cmd.read_obj()?),
+            VIRTIO_GPU_CMD_CTX_DESTROY => CtxDestroy(cmd.read_obj()?),
+            VIRTIO_GPU_CMD_CTX_ATTACH_RESOURCE => CtxAttachResource(cmd.read_obj()?),
+            VIRTIO_GPU_CMD_CTX_DETACH_RESOURCE => CtxDetachResource(cmd.read_obj()?),
+            VIRTIO_GPU_CMD_RESOURCE_CREATE_3D => ResourceCreate3d(cmd.read_obj()?),
+            VIRTIO_GPU_CMD_TRANSFER_TO_HOST_3D => TransferToHost3d(cmd.read_obj()?),
+            VIRTIO_GPU_CMD_TRANSFER_FROM_HOST_3D => TransferFromHost3d(cmd.read_obj()?),
+            VIRTIO_GPU_CMD_SUBMIT_3D => CmdSubmit3d(cmd.read_obj()?),
+            VIRTIO_GPU_CMD_UPDATE_CURSOR => UpdateCursor(cmd.read_obj()?),
+            VIRTIO_GPU_CMD_MOVE_CURSOR => MoveCursor(cmd.read_obj()?),
             _ => return Err(GpuCommandDecodeError::InvalidType(hdr.type_.into())),
         })
     }
@@ -638,7 +646,7 @@ pub enum GpuResponse {
 #[derive(Debug)]
 pub enum GpuResponseEncodeError {
     /// The response was encoded to an inaccessible area of memory.
-    Memory(VolatileMemoryError),
+    Memory(DescriptorError),
     /// More displays than are valid were in a `OkDisplayInfo`.
     TooManyDisplays(usize),
     /// More planes than are valid were in a `OkResourcePlaneInfo`.
@@ -661,8 +669,8 @@ impl Display for GpuResponseEncodeError {
     }
 }
 
-impl From<VolatileMemoryError> for GpuResponseEncodeError {
-    fn from(e: VolatileMemoryError) -> GpuResponseEncodeError {
+impl From<DescriptorError> for GpuResponseEncodeError {
+    fn from(e: DescriptorError) -> GpuResponseEncodeError {
         GpuResponseEncodeError::Memory(e)
     }
 }
@@ -674,7 +682,7 @@ impl GpuResponse {
         flags: u32,
         fence_id: u64,
         ctx_id: u32,
-        resp: VolatileSlice,
+        resp: &mut Writer,
     ) -> Result<u32, GpuResponseEncodeError> {
         let hdr = virtio_gpu_ctrl_hdr {
             type_: Le32::from(self.get_type()),
@@ -697,24 +705,22 @@ impl GpuResponse {
                     disp_mode.r.height = Le32::from(height);
                     disp_mode.enabled = Le32::from(1);
                 }
-                resp.get_ref(0)?.store(disp_info);
+                resp.write_obj(disp_info)?;
                 size_of_val(&disp_info)
             }
             GpuResponse::OkCapsetInfo { id, version, size } => {
-                resp.get_ref(0)?.store(virtio_gpu_resp_capset_info {
+                resp.write_obj(virtio_gpu_resp_capset_info {
                     hdr,
                     capset_id: Le32::from(id),
                     capset_max_version: Le32::from(version),
                     capset_max_size: Le32::from(size),
                     padding: Le32::from(0),
-                });
+                })?;
                 size_of::<virtio_gpu_resp_capset_info>()
             }
             GpuResponse::OkCapset(ref data) => {
-                resp.get_ref(0)?.store(hdr);
-                let resp_data_slice =
-                    resp.get_slice(size_of_val(&hdr) as u64, data.len() as u64)?;
-                resp_data_slice.copy_from(data);
+                resp.write_obj(hdr)?;
+                resp.write(data)?;
                 size_of_val(&hdr) + data.len()
             }
             GpuResponse::OkResourcePlaneInfo {
@@ -738,25 +744,22 @@ impl GpuResponse {
                     strides,
                     offsets,
                 };
-                match resp.get_ref(0) {
-                    Ok(resp_ref) => {
-                        resp_ref.store(plane_info);
-                        size_of_val(&plane_info)
-                    }
-                    _ => {
-                        // In case there is too little room in the response slice to store the
-                        // entire virtio_gpu_resp_resource_plane_info, convert response to a regular
-                        // VIRTIO_GPU_RESP_OK_NODATA and attempt to return that.
-                        resp.get_ref(0)?.store(virtio_gpu_ctrl_hdr {
-                            type_: Le32::from(VIRTIO_GPU_RESP_OK_NODATA),
-                            ..hdr
-                        });
-                        size_of_val(&hdr)
-                    }
+                if resp.available_bytes() >= size_of_val(&plane_info) {
+                    resp.write_obj(plane_info)?;
+                    size_of_val(&plane_info)
+                } else {
+                    // In case there is too little room in the response slice to store the
+                    // entire virtio_gpu_resp_resource_plane_info, convert response to a regular
+                    // VIRTIO_GPU_RESP_OK_NODATA and attempt to return that.
+                    resp.write_obj(virtio_gpu_ctrl_hdr {
+                        type_: Le32::from(VIRTIO_GPU_RESP_OK_NODATA),
+                        ..hdr
+                    })?;
+                    size_of_val(&hdr)
                 }
             }
             _ => {
-                resp.get_ref(0)?.store(hdr);
+                resp.write_obj(hdr)?;
                 size_of_val(&hdr)
             }
         };