summary refs log tree commit diff
diff options
context:
space:
mode:
authorDavid Reveman <reveman@chromium.org>2018-04-22 21:42:09 -0400
committerchrome-bot <chrome-bot@chromium.org>2018-05-16 08:34:40 -0700
commit52ba4e5c6492d69a8ff57e87a0134c148a74a1f2 (patch)
treee7afc45010d50be4cb42db47314222af8d6b121d
parent0f1770d3ef9469b23edbaaa5f977dc0bb59602c6 (diff)
downloadcrosvm-52ba4e5c6492d69a8ff57e87a0134c148a74a1f2.tar
crosvm-52ba4e5c6492d69a8ff57e87a0134c148a74a1f2.tar.gz
crosvm-52ba4e5c6492d69a8ff57e87a0134c148a74a1f2.tar.bz2
crosvm-52ba4e5c6492d69a8ff57e87a0134c148a74a1f2.tar.lz
crosvm-52ba4e5c6492d69a8ff57e87a0134c148a74a1f2.tar.xz
crosvm-52ba4e5c6492d69a8ff57e87a0134c148a74a1f2.tar.zst
crosvm-52ba4e5c6492d69a8ff57e87a0134c148a74a1f2.zip
virtwl: Add DMABuf allocation support.
This implements DMABuf allocation type in the virtio wayland
device.

We attempt to locate a supported DRM device prior to engaging
the device jail. If found, the DRM device is passed to the
wayland device code and used to serve DMABuf allocations.

DMABuf support can be disabled by not providing crosvm with
access to any DRM device nodes.

The guest is expected to handle the case when DMABuf allocation
fails and fall-back to standard shared memory.

This initial change uses DRM directly but is structured in a
way that would allow the allocator to be replaced by minigbm
with minimal effort.

BUG=chromium:837209
TEST=crosvm finds drm device and returns valid dmabufs to guest

Change-Id: Ic1fd776dfdfefae2d7b321d449273ef269e9cc62
Reviewed-on: https://chromium-review.googlesource.com/1034088
Commit-Ready: David Reveman <reveman@chromium.org>
Tested-by: David Reveman <reveman@chromium.org>
Reviewed-by: Zach Reizner <zachr@chromium.org>
-rw-r--r--Cargo.lock1
-rw-r--r--Cargo.toml1
-rw-r--r--devices/Cargo.toml3
-rw-r--r--devices/src/virtio/wl.rs158
-rw-r--r--gpu_buffer/Cargo.toml1
-rw-r--r--gpu_buffer/src/lib.rs3
-rw-r--r--gpu_buffer/src/rendernode.rs106
-rw-r--r--src/linux.rs81
-rw-r--r--src/main.rs10
-rw-r--r--vm_control/src/lib.rs243
10 files changed, 560 insertions, 47 deletions
diff --git a/Cargo.lock b/Cargo.lock
index 33f2d56..1422dcf 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -139,6 +139,7 @@ name = "gpu_buffer"
 version = "0.1.0"
 dependencies = [
  "data_model 0.1.0",
+ "sys_util 0.1.0",
 ]
 
 [[package]]
diff --git a/Cargo.toml b/Cargo.toml
index dfc58f4..dead362 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -12,6 +12,7 @@ panic = 'abort'
 [features]
 plugin = ["plugin_proto", "crosvm_plugin", "protobuf"]
 default-no-sandbox = []
+wl-dmabuf = ["devices/wl-dmabuf", "gpu_buffer"]
 
 [dependencies]
 arch = { path = "arch" }
diff --git a/devices/Cargo.toml b/devices/Cargo.toml
index 54b9487..4581594 100644
--- a/devices/Cargo.toml
+++ b/devices/Cargo.toml
@@ -3,6 +3,9 @@ name = "devices"
 version = "0.1.0"
 authors = ["The Chromium OS Authors"]
 
+[features]
+wl-dmabuf = []
+
 [dependencies]
 byteorder = "*"
 data_model = { path = "../data_model" }
diff --git a/devices/src/virtio/wl.rs b/devices/src/virtio/wl.rs
index 2c34e4e..a8b2603 100644
--- a/devices/src/virtio/wl.rs
+++ b/devices/src/virtio/wl.rs
@@ -39,6 +39,8 @@ use std::fs::File;
 use std::io::{self, Seek, SeekFrom, Read};
 use std::mem::{size_of, size_of_val};
 use std::os::unix::io::{AsRawFd, RawFd};
+#[cfg(feature = "wl-dmabuf")]
+use std::os::unix::io::FromRawFd;
 use std::os::unix::net::{UnixDatagram, UnixStream};
 use std::path::{PathBuf, Path};
 use std::rc::Rc;
@@ -48,6 +50,9 @@ use std::sync::atomic::{AtomicUsize, Ordering};
 use std::thread;
 use std::time::Duration;
 
+#[cfg(feature = "wl-dmabuf")]
+use libc::dup;
+
 use data_model::*;
 use data_model::VolatileMemoryError;
 
@@ -65,8 +70,12 @@ const VIRTIO_WL_CMD_VFD_RECV: u32 = 259;
 const VIRTIO_WL_CMD_VFD_NEW_CTX: u32 = 260;
 const VIRTIO_WL_CMD_VFD_NEW_PIPE: u32 = 261;
 const VIRTIO_WL_CMD_VFD_HUP: u32 = 262;
+#[cfg(feature = "wl-dmabuf")]
+const VIRTIO_WL_CMD_VFD_NEW_DMABUF: u32 = 263;
 const VIRTIO_WL_RESP_OK: u32 = 4096;
 const VIRTIO_WL_RESP_VFD_NEW: u32 = 4097;
+#[cfg(feature = "wl-dmabuf")]
+const VIRTIO_WL_RESP_VFD_NEW_DMABUF: u32 = 4098;
 const VIRTIO_WL_RESP_ERR: u32 = 4352;
 const VIRTIO_WL_RESP_OUT_OF_MEMORY: u32 = 4353;
 const VIRTIO_WL_RESP_INVALID_ID: u32 = 4354;
@@ -126,6 +135,30 @@ fn parse_new_pipe(addr: GuestAddress, mem: &GuestMemory) -> WlResult<WlOp> {
        })
 }
 
+#[cfg(feature = "wl-dmabuf")]
+fn parse_new_dmabuf(addr: GuestAddress, mem: &GuestMemory) -> WlResult<WlOp> {
+    const ID_OFFSET: u64 = 8;
+    const WIDTH_OFFSET: u64 = 28;
+    const HEIGHT_OFFSET: u64 = 32;
+    const FORMAT_OFFSET: u64 = 36;
+
+    let id: Le32 = mem.read_obj_from_addr(mem.checked_offset(addr, ID_OFFSET)
+                                              .ok_or(WlError::CheckedOffset)?)?;
+    let width: Le32 = mem.read_obj_from_addr(mem.checked_offset(addr, WIDTH_OFFSET)
+                                                .ok_or(WlError::CheckedOffset)?)?;
+    let height: Le32 = mem.read_obj_from_addr(mem.checked_offset(addr, HEIGHT_OFFSET)
+                                                .ok_or(WlError::CheckedOffset)?)?;
+    let format: Le32 = mem.read_obj_from_addr(mem.checked_offset(addr, FORMAT_OFFSET)
+                                                .ok_or(WlError::CheckedOffset)?)?;
+    Ok(WlOp::NewDmabuf {
+           id: id.into(),
+           width: width.into(),
+           height: height.into(),
+           format: format.into(),
+       })
+}
+
+
 fn parse_send(addr: GuestAddress, len: u32, mem: &GuestMemory) -> WlResult<WlOp> {
     const ID_OFFSET: u64 = 8;
     const VFD_COUNT_OFFSET: u64 = 12;
@@ -165,6 +198,8 @@ fn parse_desc(desc: &DescriptorChain, mem: &GuestMemory) -> WlResult<WlOp> {
         VIRTIO_WL_CMD_VFD_SEND => parse_send(desc.addr, desc.len, mem),
         VIRTIO_WL_CMD_VFD_NEW_CTX => Ok(WlOp::NewCtx { id: parse_id(desc.addr, mem)? }),
         VIRTIO_WL_CMD_VFD_NEW_PIPE => parse_new_pipe(desc.addr, mem),
+        #[cfg(feature = "wl-dmabuf")]
+        VIRTIO_WL_CMD_VFD_NEW_DMABUF => parse_new_dmabuf(desc.addr, mem),
         v => Ok(WlOp::InvalidCommand { op_type: v }),
     }
 }
@@ -195,6 +230,38 @@ fn encode_vfd_new(desc_mem: VolatileSlice,
     Ok(size_of::<CtrlVfdNew>() as u32)
 }
 
+#[cfg(feature = "wl-dmabuf")]
+fn encode_vfd_new_dmabuf(desc_mem: VolatileSlice,
+                         vfd_id: u32,
+                         flags: u32,
+                         pfn: u64,
+                         size: u32,
+                         stride: u32)
+                  -> WlResult<u32> {
+    let ctrl_vfd_new_dmabuf = CtrlVfdNewDmabuf {
+        hdr: CtrlHeader {
+            type_: Le32::from(VIRTIO_WL_RESP_VFD_NEW_DMABUF),
+            flags: Le32::from(0),
+        },
+        id: Le32::from(vfd_id),
+        flags: Le32::from(flags),
+        pfn: Le64::from(pfn),
+        size: Le32::from(size),
+        width: Le32::from(0),
+        height: Le32::from(0),
+        format: Le32::from(0),
+        stride0: Le32::from(stride),
+        stride1: Le32::from(0),
+        stride2: Le32::from(0),
+        offset0: Le32::from(0),
+        offset1: Le32::from(0),
+        offset2: Le32::from(0),
+    };
+
+    desc_mem.get_ref(0)?.store(ctrl_vfd_new_dmabuf);
+    Ok(size_of::<CtrlVfdNewDmabuf>() as u32)
+}
+
 fn encode_vfd_recv(desc_mem: VolatileSlice,
                    vfd_id: u32,
                    data: &[u8],
@@ -249,6 +316,14 @@ fn encode_resp(desc_mem: VolatileSlice, resp: WlResp) -> WlResult<u32> {
             size,
             resp,
         } => encode_vfd_new(desc_mem, resp, id, flags, pfn, size),
+        #[cfg(feature = "wl-dmabuf")]
+        WlResp::VfdNewDmabuf {
+            id,
+            flags,
+            pfn,
+            size,
+            stride,
+        } => encode_vfd_new_dmabuf(desc_mem, id, flags, pfn, size, stride),
         WlResp::VfdRecv { id, data, vfds } => encode_vfd_recv(desc_mem, id, data, vfds),
         WlResp::VfdHup { id } => encode_vfd_hup(desc_mem, id),
         r => {
@@ -360,6 +435,29 @@ unsafe impl DataInit for CtrlVfdNew {}
 
 #[repr(C)]
 #[derive(Copy, Clone)]
+#[cfg(feature = "wl-dmabuf")]
+struct CtrlVfdNewDmabuf {
+    hdr: CtrlHeader,
+    id: Le32,
+    flags: Le32,
+    pfn: Le64,
+    size: Le32,
+    width: Le32,
+    height: Le32,
+    format: Le32,
+    stride0: Le32,
+    stride1: Le32,
+    stride2: Le32,
+    offset0: Le32,
+    offset1: Le32,
+    offset2: Le32,
+}
+
+#[cfg(feature = "wl-dmabuf")]
+unsafe impl DataInit for CtrlVfdNewDmabuf {}
+
+#[repr(C)]
+#[derive(Copy, Clone)]
 struct CtrlVfdRecv {
     hdr: CtrlHeader,
     id: Le32,
@@ -390,6 +488,8 @@ enum WlOp {
     },
     NewCtx { id: u32 },
     NewPipe { id: u32, flags: u32 },
+    #[cfg(feature = "wl-dmabuf")]
+    NewDmabuf { id: u32, width: u32, height: u32, format: u32 },
     InvalidCommand { op_type: u32 },
 }
 
@@ -406,6 +506,14 @@ enum WlResp<'a> {
         // is important for the `get_code` method.
         resp: bool,
     },
+    #[cfg(feature = "wl-dmabuf")]
+    VfdNewDmabuf {
+        id: u32,
+        flags: u32,
+        pfn: u64,
+        size: u32,
+        stride: u32,
+    },
     VfdRecv {
         id: u32,
         data: &'a [u8],
@@ -431,6 +539,8 @@ impl<'a> WlResp<'a> {
                     VIRTIO_WL_CMD_VFD_NEW
                 }
             }
+            #[cfg(feature = "wl-dmabuf")]
+            &WlResp::VfdNewDmabuf { .. } => VIRTIO_WL_RESP_VFD_NEW_DMABUF,
             &WlResp::VfdRecv { .. } => VIRTIO_WL_CMD_VFD_RECV,
             &WlResp::VfdHup { .. } => VIRTIO_WL_CMD_VFD_HUP,
             &WlResp::Err(_) => VIRTIO_WL_RESP_ERR,
@@ -505,6 +615,26 @@ impl WlVfd {
         }
     }
 
+    #[cfg(feature = "wl-dmabuf")]
+    fn dmabuf(vm: VmRequester, width: u32, height: u32, format: u32) -> WlResult<(WlVfd, u32)> {
+        let allocate_and_register_gpu_memory_response =
+            vm.request(VmRequest::AllocateAndRegisterGpuMemory { width: width,
+                                                                 height: height,
+                                                                 format: format })?;
+        match allocate_and_register_gpu_memory_response {
+            VmResponse::AllocateAndRegisterGpuMemory { fd, pfn, slot, stride } => {
+                let mut vfd = WlVfd::default();
+                // Duplicate FD for shared memory instance.
+                let raw_fd = unsafe { File::from_raw_fd(dup(fd.as_raw_fd())) };
+                let vfd_shm = SharedMemory::from_raw_fd(raw_fd).map_err(WlError::NewAlloc)?;
+                vfd.guest_shared_memory = Some((vfd_shm.size(), vfd_shm.into()));
+                vfd.slot = Some((slot, pfn, vm));
+                Ok((vfd, stride))
+            }
+            _ => Err(WlError::VmBadResponse),
+        }
+    }
+
     fn pipe_remote_read_local_write() -> WlResult<WlVfd> {
         let (read_pipe, write_pipe) = pipe(true).map_err(WlError::NewPipe)?;
         let mut vfd = WlVfd::default();
@@ -792,6 +922,32 @@ impl WlState {
         }
     }
 
+    #[cfg(feature = "wl-dmabuf")]
+    fn new_dmabuf(&mut self, id: u32, width: u32, height: u32, format: u32) -> WlResult<WlResp> {
+        if id & VFD_ID_HOST_MASK != 0 {
+            return Ok(WlResp::InvalidId);
+        }
+
+        match self.vfds.entry(id) {
+            Entry::Vacant(entry) => {
+                let (vfd, stride) = WlVfd::dmabuf(self.vm.clone(),
+                                                  width,
+                                                  height,
+                                                  format)?;
+                let resp = WlResp::VfdNewDmabuf {
+                    id: id,
+                    flags: 0,
+                    pfn: vfd.pfn().unwrap_or_default(),
+                    size: vfd.size().unwrap_or_default() as u32,
+                    stride: stride,
+                };
+                entry.insert(vfd);
+                Ok(resp)
+            }
+            Entry::Occupied(_) => Ok(WlResp::InvalidId),
+        }
+    }
+
     fn new_context(&mut self, id: u32) -> WlResult<WlResp> {
         if id & VFD_ID_HOST_MASK != 0 {
             return Ok(WlResp::InvalidId);
@@ -956,6 +1112,8 @@ impl WlState {
             }
             WlOp::NewCtx { id } => self.new_context(id),
             WlOp::NewPipe { id, flags } => self.new_pipe(id, flags),
+            #[cfg(feature = "wl-dmabuf")]
+            WlOp::NewDmabuf { id, width, height, format } => self.new_dmabuf(id, width, height, format),
             WlOp::InvalidCommand { op_type } => {
                 warn!("unexpected command {}", op_type);
                 Ok(WlResp::InvalidCommand)
diff --git a/gpu_buffer/Cargo.toml b/gpu_buffer/Cargo.toml
index 11b47c4..239283d 100644
--- a/gpu_buffer/Cargo.toml
+++ b/gpu_buffer/Cargo.toml
@@ -5,3 +5,4 @@ authors = ["The Chromium OS Authors"]
 
 [dependencies]
 data_model = { path = "../data_model" }
+sys_util = { path = "../sys_util" }
diff --git a/gpu_buffer/src/lib.rs b/gpu_buffer/src/lib.rs
index 6d0cfa9..fb69eeb 100644
--- a/gpu_buffer/src/lib.rs
+++ b/gpu_buffer/src/lib.rs
@@ -31,7 +31,10 @@
 //! ```
 
 extern crate data_model;
+#[macro_use]
+extern crate sys_util;
 
+pub mod rendernode;
 mod raw;
 
 use std::os::raw::c_void;
diff --git a/gpu_buffer/src/rendernode.rs b/gpu_buffer/src/rendernode.rs
new file mode 100644
index 0000000..ce89a15
--- /dev/null
+++ b/gpu_buffer/src/rendernode.rs
@@ -0,0 +1,106 @@
+// Copyright 2018 The Chromium OS Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+use std::ffi::CString;
+use std::fs::{File, OpenOptions};
+use std::os::raw::{c_char, c_int, c_uint, c_ulonglong};
+use std::path::Path;
+use std::ptr::null_mut;
+
+use sys_util::ioctl_with_mut_ref;
+
+const DRM_IOCTL_BASE: c_uint = 0x64;
+
+#[repr(C)]
+#[derive(Copy, Clone)]
+struct drm_version {
+    version_major: c_int,
+    version_minor: c_int,
+    version_patchlevel: c_int,
+    name_len: c_ulonglong,
+    name: *mut c_char,
+    date_len: c_ulonglong,
+    date: *mut c_char,
+    desc_len: c_ulonglong,
+    desc: *mut c_char,
+}
+
+ioctl_iowr_nr!(DRM_IOCTL_VERSION, DRM_IOCTL_BASE, 0x0, drm_version);
+
+fn get_drm_device_name(fd: &File) -> Result<String, ()> {
+    let mut version = drm_version {
+        version_major: 0,
+        version_minor: 0,
+        version_patchlevel: 0,
+        name_len: 0,
+        name: null_mut(),
+        date_len: 0,
+        date: null_mut(),
+        desc_len: 0,
+        desc: null_mut(),
+    };
+
+    // Get the length of the device name.
+    if unsafe { ioctl_with_mut_ref(fd, DRM_IOCTL_VERSION(), &mut version) } < 0 {
+        return Err(());
+    }
+
+    // Enough bytes to hold the device name and terminating null character.
+    let mut name_bytes: Vec<u8> = vec![0; (version.name_len + 1) as usize];
+    let mut version = drm_version {
+        version_major: 0,
+        version_minor: 0,
+        version_patchlevel: 0,
+        name_len: name_bytes.len() as c_ulonglong,
+        name: name_bytes.as_mut_ptr() as *mut c_char,
+        date_len: 0,
+        date: null_mut(),
+        desc_len: 0,
+        desc: null_mut(),
+    };
+
+    // Safe as no more than name_len + 1 bytes will be written to name.
+    if unsafe { ioctl_with_mut_ref(fd, DRM_IOCTL_VERSION(), &mut version) } < 0 {
+        return Err(());
+    }
+
+    Ok(CString::new(&name_bytes[..(version.name_len as usize)])
+       .map_err(|_| ())?
+       .into_string().map_err(|_| ())?)
+}
+
+
+/// Returns a `fd` for an opened rendernode device, while filtering out specified
+/// undesired drivers.
+pub fn open_device(undesired: &[&str]) -> Result<File, ()> {
+    const DRM_DIR_NAME: &str = "/dev/dri";
+    const DRM_MAX_MINOR: u32 = 15;
+    const RENDER_NODE_START: u32 = 128;
+
+    for n in RENDER_NODE_START..(RENDER_NODE_START + DRM_MAX_MINOR + 1) {
+        let path = Path::new(DRM_DIR_NAME).join(format!("renderD{}", n));
+
+        if let Ok(fd) = OpenOptions::new().read(true).write(true).open(path) {
+            if let Ok(name) = get_drm_device_name(&fd) {
+                if !undesired.iter().any(|item| *item == name) {
+                    return Ok(fd);
+                }
+            }
+        }
+    }
+
+    Err(())
+}
+
+#[cfg(test)]
+mod tests {
+    use super::*;
+
+    #[test]
+    #[ignore] // no access to /dev/dri
+    fn open_rendernode_device() {
+        let undesired: &[&str] = &["bad_driver", "another_bad_driver"];
+        open_device(undesired).expect("failed to open rendernode");
+    }
+}
diff --git a/src/linux.rs b/src/linux.rs
index 6192478..8aae432 100644
--- a/src/linux.rs
+++ b/src/linux.rs
@@ -18,6 +18,8 @@ use std::thread::JoinHandle;
 
 use libc;
 use libc::c_int;
+#[cfg(feature = "wl-dmabuf")]
+use libc::EINVAL;
 
 use device_manager;
 use devices;
@@ -29,7 +31,9 @@ use qcow::{self, QcowFile};
 use sys_util::*;
 use sys_util;
 use vhost;
-use vm_control::VmRequest;
+use vm_control::{VmRequest, GpuMemoryAllocator};
+#[cfg(feature = "wl-dmabuf")]
+use gpu_buffer;
 
 use Config;
 use DiskType;
@@ -48,6 +52,7 @@ pub enum Error {
     CloneEventFd(sys_util::Error),
     Cmdline(kernel_cmdline::Error),
     CreateEventFd(sys_util::Error),
+    CreateGpuBufferDevice,
     CreateGuestMemory(Box<error::Error>),
     CreateIrqChip(Box<error::Error>),
     CreateKvm(sys_util::Error),
@@ -66,6 +71,7 @@ pub enum Error {
     NetDeviceNew(devices::virtio::NetError),
     NoVarEmpty,
     OpenKernel(PathBuf, io::Error),
+    OpenGpuBufferDevice,
     PollContextAdd(sys_util::Error),
     QcowDeviceCreate(qcow::Error),
     RegisterBalloon(device_manager::Error),
@@ -99,6 +105,7 @@ impl fmt::Display for Error {
             &Error::CloneEventFd(ref e) => write!(f, "failed to clone eventfd: {:?}", e),
             &Error::Cmdline(ref e) => write!(f, "the given kernel command line was invalid: {}", e),
             &Error::CreateEventFd(ref e) => write!(f, "failed to create eventfd: {:?}", e),
+            &Error::CreateGpuBufferDevice => write!(f, "failed to create GPU buffer device"),
             &Error::CreateGuestMemory(ref e) => write!(f, "failed to create guest memory: {:?}", e),
             &Error::CreateIrqChip(ref e) => {
                 write!(f, "failed to create in-kernel IRQ chip: {:?}", e)
@@ -123,6 +130,7 @@ impl fmt::Display for Error {
             &Error::OpenKernel(ref p, ref e) => {
                 write!(f, "failed to open kernel image {:?}: {}", p, e)
             }
+            &Error::OpenGpuBufferDevice => write!(f, "failed to open GPU buffer device"),
             &Error::PollContextAdd(ref e) => write!(f, "failed to add fd to poll context: {:?}", e),
             &Error::QcowDeviceCreate(ref e) => {
                 write!(f, "failed to read qcow formatted file {:?}", e)
@@ -543,6 +551,56 @@ fn run_vcpu(vcpu: Vcpu,
         .map_err(Error::SpawnVcpu)
 }
 
+#[cfg(feature = "wl-dmabuf")]
+struct GpuBufferDevice {
+    device: gpu_buffer::Device,
+}
+
+#[cfg(feature = "wl-dmabuf")]
+impl GpuMemoryAllocator for GpuBufferDevice {
+    fn allocate(&self, width: u32, height: u32, format: u32) -> sys_util::Result<(File, u32)> {
+        let buffer = match self.device.create_buffer(
+            width,
+            height,
+            gpu_buffer::Format::from(format),
+            // Linear layout is a requirement as virtio wayland guest expects
+            // this for CPU access to the buffer. Scanout and texturing are
+            // optional as the consumer (wayland compositor) is expected to
+            // fall-back to a less efficient meachnisms for presentation if
+            // neccesary. In practice, linear buffers for commonly used formats
+            // will also support scanout and texturing.
+            gpu_buffer::Flags::empty().use_linear(true)) {
+            Ok(v) => v,
+            Err(_) => return Err(sys_util::Error::new(EINVAL)),
+        };
+        // We only support the first plane. Buffers with more planes are not
+        // a problem but additional planes will not be registered for access
+        // from guest.
+        let fd = match buffer.export_plane_fd(0) {
+            Ok(v) => v,
+            Err(e) => return Err(sys_util::Error::new(e)),
+        };
+
+        Ok((fd, buffer.stride()))
+    }
+}
+
+#[cfg(feature = "wl-dmabuf")]
+fn create_gpu_memory_allocator() -> Result<Option<Box<GpuMemoryAllocator>>> {
+    let undesired: &[&str] = &["vgem"];
+    let fd = gpu_buffer::rendernode::open_device(undesired)
+        .map_err(|_| Error::OpenGpuBufferDevice)?;
+    let device = gpu_buffer::Device::new(fd)
+        .map_err(|_| Error::CreateGpuBufferDevice)?;
+    info!("created GPU buffer device for DMABuf allocations");
+    Ok(Some(Box::new(GpuBufferDevice { device })))
+}
+
+#[cfg(not(feature = "wl-dmabuf"))]
+fn create_gpu_memory_allocator() -> Result<Option<Box<GpuMemoryAllocator>>> {
+    Ok(None)
+}
+
 fn run_control(vm: &mut Vm,
                control_sockets: Vec<UnlinkUnixDatagram>,
                next_dev_pfn: &mut u64,
@@ -552,7 +610,8 @@ fn run_control(vm: &mut Vm,
                kill_signaled: Arc<AtomicBool>,
                vcpu_handles: Vec<JoinHandle<()>>,
                balloon_host_socket: UnixDatagram,
-               _irqchip_fd: Option<File>)
+               _irqchip_fd: Option<File>,
+               gpu_memory_allocator: Option<Box<GpuMemoryAllocator>>)
                -> Result<()> {
     const MAX_VM_FD_RECV: usize = 1;
 
@@ -638,8 +697,13 @@ fn run_control(vm: &mut Vm,
                             Ok(request) => {
                                 let mut running = true;
                                 let response =
-                                    request.execute(vm, next_dev_pfn,
-                                                    &mut running, &balloon_host_socket);
+                                    request.execute(vm,
+                                                    next_dev_pfn,
+                                                    &mut running,
+                                                    &balloon_host_socket,
+                                                    gpu_memory_allocator.as_ref().map(|v| {
+                                                                                          v.as_ref()
+                                                                                      }));
                                 if let Err(e) = response.send(&mut scm, socket.as_ref()) {
                                     error!("failed to send VmResponse: {:?}", e);
                                 }
@@ -751,6 +815,12 @@ pub fn run_config(cfg: Config) -> Result<()> {
                                   &mut control_sockets,
                                   balloon_device_socket)?;
 
+    let gpu_memory_allocator = if cfg.wayland_dmabuf {
+        create_gpu_memory_allocator()?
+    } else {
+        None
+    };
+
     for param in &cfg.params {
         cmdline.insert_str(&param).map_err(Error::Cmdline)?;
     }
@@ -787,5 +857,6 @@ pub fn run_config(cfg: Config) -> Result<()> {
                 kill_signaled,
                 vcpu_handles,
                 balloon_host_socket,
-                irq_chip)
+                irq_chip,
+                gpu_memory_allocator)
 }
diff --git a/src/main.rs b/src/main.rs
index f8ed715..7c248e2 100644
--- a/src/main.rs
+++ b/src/main.rs
@@ -29,6 +29,8 @@ extern crate data_model;
 extern crate plugin_proto;
 #[cfg(feature = "plugin")]
 extern crate protobuf;
+#[cfg(feature = "wl-dmabuf")]
+extern crate gpu_buffer;
 
 pub mod argument;
 pub mod linux;
@@ -71,6 +73,7 @@ pub struct Config {
     mac_address: Option<net_util::MacAddress>,
     vhost_net: bool,
     wayland_socket_path: Option<PathBuf>,
+    wayland_dmabuf: bool,
     socket_path: Option<PathBuf>,
     multiprocess: bool,
     seccomp_policy_dir: PathBuf,
@@ -92,6 +95,7 @@ impl Default for Config {
             mac_address: None,
             vhost_net: false,
             wayland_socket_path: None,
+            wayland_dmabuf: false,
             socket_path: None,
             multiprocess: !cfg!(feature = "default-no-sandbox"),
             seccomp_policy_dir: PathBuf::from(SECCOMP_POLICY_DIR),
@@ -269,6 +273,10 @@ fn set_argument(cfg: &mut Config, name: &str, value: Option<&str>) -> argument::
             }
             cfg.wayland_socket_path = Some(wayland_socket_path);
         }
+        #[cfg(feature = "wl-dmabuf")]
+        "enable-wayland-dmabuf" => {
+            cfg.wayland_dmabuf = true
+        },
         "socket" => {
             if cfg.socket_path.is_some() {
                 return Err(argument::Error::TooManyArguments("`socket` already given".to_owned()));
@@ -363,6 +371,8 @@ fn run_vm(args: std::env::Args) -> std::result::Result<(), ()> {
           Argument::value("wayland-group",
                           "GROUP",
                           "Name of the group with access to the Wayland socket."),
+          #[cfg(feature = "wl-dmabuf")]
+          Argument::flag("wayland-dmabuf", "Enable support for DMABufs in Wayland device."),
           Argument::short_value('s',
                                 "socket",
                                 "PATH",
diff --git a/vm_control/src/lib.rs b/vm_control/src/lib.rs
index ec0522e..3837bb4 100644
--- a/vm_control/src/lib.rs
+++ b/vm_control/src/lib.rs
@@ -17,15 +17,16 @@ extern crate libc;
 extern crate sys_util;
 
 use std::fs::File;
+use std::io::{Seek, SeekFrom};
 use std::os::unix::io::{AsRawFd, RawFd};
 use std::os::unix::net::UnixDatagram;
 use std::result;
 
-use libc::{ERANGE, EINVAL};
+use libc::{ERANGE, EINVAL, ENODEV};
 
 use byteorder::{LittleEndian, WriteBytesExt};
 use data_model::{DataInit, Le32, Le64, VolatileMemory};
-use sys_util::{EventFd, Error as SysError, MmapError, MemoryMapping, Scm, GuestAddress};
+use sys_util::{EventFd, Result, Error as SysError, MmapError, MemoryMapping, Scm, GuestAddress};
 use kvm::{IoeventAddress, Vm};
 
 #[derive(Debug, PartialEq)]
@@ -80,13 +81,17 @@ pub enum VmRequest {
     RegisterMemory(MaybeOwnedFd, usize),
     /// Unregister the given memory slot that was previously registereed with `RegisterMemory`.
     UnregisterMemory(u32),
+    /// Allocate GPU buffer of a given size/format and register the memory into guest address space.
+    /// The response variant is `VmResponse::AllocateAndRegisterGpuMemory`
+    AllocateAndRegisterGpuMemory { width: u32, height: u32, format: u32 },
 }
 
 const VM_REQUEST_TYPE_EXIT: u32 = 1;
 const VM_REQUEST_TYPE_REGISTER_MEMORY: u32 = 2;
 const VM_REQUEST_TYPE_UNREGISTER_MEMORY: u32 = 3;
 const VM_REQUEST_TYPE_BALLOON_ADJUST: u32 = 4;
-const VM_REQUEST_SIZE: usize = 24;
+const VM_REQUEST_TYPE_ALLOCATE_AND_REGISTER_GPU_MEMORY: u32 = 5;
+const VM_REQUEST_SIZE: usize = 32;
 
 #[repr(C)]
 #[derive(Clone, Copy, Default)]
@@ -95,11 +100,51 @@ struct VmRequestStruct {
     slot: Le32,
     size: Le64,
     num_pages: Le32,
+    width: Le32,
+    height: Le32,
+    format: Le32,
 }
 
 // Safe because it only has data and has no implicit padding.
 unsafe impl DataInit for VmRequestStruct {}
 
+fn register_memory(vm: &mut Vm, next_mem_pfn: &mut u64, fd: &AsRawFd, size: usize) -> Result<(u64, u32)> {
+    let mmap = match MemoryMapping::from_fd(fd, size) {
+        Ok(v) => v,
+        Err(MmapError::SystemCallFailed(e)) => return Err(e),
+        _ => return Err(SysError::new(EINVAL)),
+    };
+    let pfn = *next_mem_pfn;
+    let slot =
+        match vm.add_device_memory(GuestAddress(pfn << 12), mmap, false, false) {
+            Ok(v) => v,
+            Err(e) => return Err(e),
+        };
+    // TODO(zachr): Use a smarter allocation strategy. The current strategy is just
+    // bumping this pointer, meaning the remove operation does not free any address
+    // space. Given enough allocations, device memory may run out of address space and
+    // collide with guest memory or MMIO address space. There is currently nothing in
+    // place to limit the amount of address space used by device memory.
+    *next_mem_pfn += (((size + 0x7ff) >> 12) + 1) as u64;
+
+    Ok((pfn, slot))
+}
+
+/// Trait that needs to be implemented in order to service GPU memory allocation
+/// requests. Implementations are expected to support some set of buffer sizes and
+/// formats but every possible combination is not required.
+pub trait GpuMemoryAllocator {
+    /// Allocates GPU memory for a buffer of a specific size and format. The memory
+    /// layout for the returned buffer must be linear. A file handle and the stride
+    /// for the buffer are returned on success.
+    ///
+    /// # Arguments
+    /// * `width` - Width of buffer.
+    /// * `height` - Height of buffer.
+    /// * `format` - Fourcc format of buffer.
+    fn allocate(&self, width: u32, height: u32, format: u32) -> Result<(File, u32)>;
+}
+
 impl VmRequest {
     /// Receive a `VmRequest` from the given socket.
     ///
@@ -128,6 +173,12 @@ impl VmRequest {
             VM_REQUEST_TYPE_BALLOON_ADJUST => {
                 Ok(VmRequest::BalloonAdjust(req.num_pages.to_native() as i32))
             },
+            VM_REQUEST_TYPE_ALLOCATE_AND_REGISTER_GPU_MEMORY => {
+                Ok(VmRequest::AllocateAndRegisterGpuMemory { width: req.width.to_native(),
+                                                             height: req.height.to_native(),
+                                                             format: req.format.to_native()
+                    })
+            },
             _ => Err(VmControlError::InvalidType),
         }
     }
@@ -157,6 +208,12 @@ impl VmRequest {
                 req.type_ = Le32::from(VM_REQUEST_TYPE_BALLOON_ADJUST);
                 req.num_pages = Le32::from(pages as u32);
             },
+            &VmRequest::AllocateAndRegisterGpuMemory { width, height, format } => {
+                req.type_ = Le32::from(VM_REQUEST_TYPE_ALLOCATE_AND_REGISTER_GPU_MEMORY);
+                req.width = Le32::from(width as u32);
+                req.height = Le32::from(height as u32);
+                req.format = Le32::from(format as u32);
+            },
             _ => return Err(VmControlError::InvalidType),
         }
         let mut buf = [0; VM_REQUEST_SIZE];
@@ -178,7 +235,8 @@ impl VmRequest {
     /// `VmResponse` with the intended purpose of sending the response back over the  socket that
     /// received this `VmRequest`.
     pub fn execute(&self, vm: &mut Vm, next_mem_pfn: &mut u64, running: &mut bool,
-                   balloon_host_socket: &UnixDatagram) -> VmResponse {
+                   balloon_host_socket: &UnixDatagram,
+                   gpu_memory_allocator: Option<&GpuMemoryAllocator>) -> VmResponse {
         *running = true;
         match self {
             &VmRequest::Exit => {
@@ -198,26 +256,9 @@ impl VmRequest {
                 }
             }
             &VmRequest::RegisterMemory(ref fd, size) => {
-                let mmap = match MemoryMapping::from_fd(fd, size) {
-                    Ok(v) => v,
-                    Err(MmapError::SystemCallFailed(e)) => return VmResponse::Err(e),
-                    _ => return VmResponse::Err(SysError::new(EINVAL)),
-                };
-                let pfn = *next_mem_pfn;
-                let slot =
-                    match vm.add_device_memory(GuestAddress(pfn << 12), mmap, false, false) {
-                        Ok(slot) => slot,
-                        Err(e) => return VmResponse::Err(e),
-                    };
-                // TODO(zachr): Use a smarter allocation strategy. The current strategy is just
-                // bumping this pointer, meaning the remove operation does not free any address
-                // space. Given enough allocations, device memory may run out of address space and
-                // collide with guest memory or MMIO address space. There is currently nothing in
-                // place to limit the amount of address space used by device memory.
-                *next_mem_pfn += (((size + 0x7ff) >> 12) + 1) as u64;
-                VmResponse::RegisterMemory {
-                    pfn: pfn,
-                    slot: slot,
+                match register_memory(vm, next_mem_pfn, fd, size) {
+                    Ok((pfn, slot)) => VmResponse::RegisterMemory { pfn, slot },
+                    Err(e) => VmResponse::Err(e),
                 }
             }
             &VmRequest::UnregisterMemory(slot) => {
@@ -234,7 +275,31 @@ impl VmRequest {
                     Ok(_) => VmResponse::Ok,
                     Err(_) => VmResponse::Err(SysError::last()),
                 }
-            },
+            }
+            &VmRequest::AllocateAndRegisterGpuMemory {width, height, format} => {
+                let allocator = match gpu_memory_allocator {
+                    Some(v) => v,
+                    None => return VmResponse::Err(SysError::new(ENODEV)),
+                };
+                let (mut fd, stride) = match allocator.allocate(width, height, format) {
+                    Ok(v) => v,
+                    Err(e) => return VmResponse::Err(e),
+                };
+                // Determine size of buffer using 0 byte seek from end. This is preferred over
+                // `stride * height` as it's not limited to packed pixel formats.
+                let size = match fd.seek(SeekFrom::End(0)) {
+                    Ok(v) => v,
+                    Err(e) => return VmResponse::Err(SysError::from(e)),
+                };
+                match register_memory(vm, next_mem_pfn, &fd, size as usize) {
+                    Ok((pfn, slot)) => VmResponse::AllocateAndRegisterGpuMemory {
+                        fd: MaybeOwnedFd::Owned(fd),
+                        pfn,
+                        slot,
+                        stride },
+                    Err(e) => VmResponse::Err(e),
+                }
+            }
         }
     }
 }
@@ -242,7 +307,6 @@ impl VmRequest {
 /// Indication of success or failure of a `VmRequest`.
 ///
 /// Success is usually indicated `VmResponse::Ok` unless there is data associated with the response.
-#[derive(Debug, PartialEq)]
 pub enum VmResponse {
     /// Indicates the request was executed successfully.
     Ok,
@@ -251,11 +315,15 @@ pub enum VmResponse {
     /// The request to register memory into guest address space was successfully done at page frame
     /// number `pfn` and memory slot number `slot`.
     RegisterMemory { pfn: u64, slot: u32 },
+    /// The request to allocate and register GPU memory into guest address space was successfully
+    /// done at page frame number `pfn` and memory slot number `slot` for buffer with `stride`.
+    AllocateAndRegisterGpuMemory { fd: MaybeOwnedFd, pfn: u64, slot: u32, stride: u32 },
 }
 
 const VM_RESPONSE_TYPE_OK: u32 = 1;
 const VM_RESPONSE_TYPE_ERR: u32 = 2;
 const VM_RESPONSE_TYPE_REGISTER_MEMORY: u32 = 3;
+const VM_RESPONSE_TYPE_ALLOCATE_AND_REGISTER_GPU_MEMORY: u32 = 4;
 const VM_RESPONSE_SIZE: usize = 24;
 
 #[repr(C)]
@@ -265,7 +333,7 @@ struct VmResponseStruct {
     errno: Le32,
     pfn: Le64,
     slot: Le32,
-    padding: Le32,
+    stride: Le32,
 }
 
 // Safe because it only has data and has no implicit padding.
@@ -296,6 +364,15 @@ impl VmResponse {
                        slot: resp.slot.into(),
                    })
             }
+            VM_RESPONSE_TYPE_ALLOCATE_AND_REGISTER_GPU_MEMORY => {
+                let fd = fds.pop().ok_or(VmControlError::ExpectFd)?;
+                Ok(VmResponse::AllocateAndRegisterGpuMemory {
+                       fd: MaybeOwnedFd::Owned(fd),
+                       pfn: resp.pfn.into(),
+                       slot: resp.slot.into(),
+                       stride: resp.stride.into()
+                  })
+            }
             _ => Err(VmControlError::InvalidType),
         }
     }
@@ -306,6 +383,8 @@ impl VmResponse {
     /// execution.
     pub fn send(&self, scm: &mut Scm, s: &UnixDatagram) -> VmControlResult<()> {
         let mut resp = VmResponseStruct::default();
+        let mut fd_buf = [0; 1];
+        let mut fd_len = 0;
         match self {
             &VmResponse::Ok => resp.type_ = Le32::from(VM_RESPONSE_TYPE_OK),
             &VmResponse::Err(e) => {
@@ -317,10 +396,18 @@ impl VmResponse {
                 resp.pfn = Le64::from(pfn);
                 resp.slot = Le32::from(slot);
             }
+            &VmResponse::AllocateAndRegisterGpuMemory {ref fd, pfn, slot, stride } => {
+                fd_buf[0] = fd.as_raw_fd();
+                fd_len = 1;
+                resp.type_ = Le32::from(VM_RESPONSE_TYPE_ALLOCATE_AND_REGISTER_GPU_MEMORY);
+                resp.pfn = Le64::from(pfn);
+                resp.slot = Le32::from(slot);
+                resp.stride = Le32::from(stride);
+            }
         }
         let mut buf = [0; VM_RESPONSE_SIZE];
         buf.as_mut().get_ref(0).unwrap().store(resp);
-        scm.send(s, &[buf.as_ref()], &[])
+        scm.send(s, &[buf.as_ref()], &fd_buf[..fd_len])
             .map_err(|e| VmControlError::Send(e))?;
         Ok(())
     }
@@ -419,7 +506,7 @@ mod tests {
     fn request_invalid_type() {
         let (s1, s2) = UnixDatagram::pair().expect("failed to create socket pair");
         let mut scm = Scm::new(1);
-        scm.send(&s2, &[[12; VM_RESPONSE_SIZE].as_ref()], &[])
+        scm.send(&s2, &[[12; VM_REQUEST_SIZE].as_ref()], &[])
             .unwrap();
         match VmRequest::recv(&mut scm, &s1) {
             Err(VmControlError::InvalidType) => {}
@@ -428,12 +515,33 @@ mod tests {
     }
 
     #[test]
+    fn request_allocate_and_register_gpu_memory() {
+        let (s1, s2) = UnixDatagram::pair().expect("failed to create socket pair");
+        let mut scm = Scm::new(1);
+        let gpu_width: u32 = 32;
+        let gpu_height: u32 = 32;
+        let gpu_format: u32 = 0x34325258;
+        let r = VmRequest::AllocateAndRegisterGpuMemory { width: gpu_width, height: gpu_height, format: gpu_format };
+        r.send(&mut scm, &s1).unwrap();
+        match VmRequest::recv(&mut scm, &s2).unwrap() {
+            VmRequest::AllocateAndRegisterGpuMemory {width, height, format} => {
+                assert_eq!(width, gpu_width);
+                assert_eq!(height, gpu_width);
+                assert_eq!(format, gpu_format);
+            }
+            _ => panic!("recv wrong request variant"),
+        }
+    }
+
+    #[test]
     fn resp_ok() {
         let (s1, s2) = UnixDatagram::pair().expect("failed to create socket pair");
         let mut scm = Scm::new(1);
         VmResponse::Ok.send(&mut scm, &s1).unwrap();
-        let r = VmResponse::recv(&mut scm, &s2).unwrap();
-        assert_eq!(r, VmResponse::Ok);
+        match VmResponse::recv(&mut scm, &s2).unwrap() {
+            VmResponse::Ok => {}
+            _ => panic!("recv wrong response variant"),
+        }
     }
 
     #[test]
@@ -442,18 +550,29 @@ mod tests {
         let mut scm = Scm::new(1);
         let r1 = VmResponse::Err(SysError::new(libc::EDESTADDRREQ));
         r1.send(&mut scm, &s1).unwrap();
-        let r2 = VmResponse::recv(&mut scm, &s2).unwrap();
-        assert_eq!(r1, r2);
+        match VmResponse::recv(&mut scm, &s2).unwrap() {
+            VmResponse::Err(e) => {
+                assert_eq!(e, SysError::new(libc::EDESTADDRREQ));
+            }
+            _ => panic!("recv wrong response variant"),
+        }
     }
 
     #[test]
     fn resp_memory() {
         let (s1, s2) = UnixDatagram::pair().expect("failed to create socket pair");
         let mut scm = Scm::new(1);
-        let r1 = VmResponse::RegisterMemory { pfn: 55, slot: 66 };
+        let memory_pfn = 55;
+        let memory_slot = 66;
+        let r1 = VmResponse::RegisterMemory { pfn: memory_pfn, slot: memory_slot };
         r1.send(&mut scm, &s1).unwrap();
-        let r2 = VmResponse::recv(&mut scm, &s2).unwrap();
-        assert_eq!(r1, r2);
+        match VmResponse::recv(&mut scm, &s2).unwrap() {
+            VmResponse::RegisterMemory { pfn, slot } => {
+                assert_eq!(pfn, memory_pfn);
+                assert_eq!(slot, memory_slot);
+            }
+            _ => panic!("recv wrong response variant"),
+        }
     }
 
     #[test]
@@ -461,8 +580,12 @@ mod tests {
         let (s1, _) = UnixDatagram::pair().expect("failed to create socket pair");
         let mut scm = Scm::new(1);
         s1.shutdown(Shutdown::Both).unwrap();
-        let r = VmResponse::recv(&mut scm, &s1);
-        assert_eq!(r, Err(VmControlError::BadSize(0)));
+        match VmResponse::recv(&mut scm, &s1) {
+            Err(e) => {
+                assert_eq!(e, VmControlError::BadSize(0));
+            }
+            _ => panic!("recv wrong response"),
+        }
     }
 
     #[test]
@@ -470,8 +593,12 @@ mod tests {
         let (s1, s2) = UnixDatagram::pair().expect("failed to create socket pair");
         let mut scm = Scm::new(1);
         scm.send(&s2, &[[12; 7].as_ref()], &[]).unwrap();
-        let r = VmResponse::recv(&mut scm, &s1);
-        assert_eq!(r, Err(VmControlError::BadSize(7)));
+        match VmResponse::recv(&mut scm, &s1) {
+            Err(e) => {
+                assert_eq!(e, VmControlError::BadSize(7));
+            }
+            _ => panic!("recv wrong response"),
+        }
     }
 
     #[test]
@@ -480,7 +607,39 @@ mod tests {
         let mut scm = Scm::new(1);
         scm.send(&s2, &[[12; VM_RESPONSE_SIZE].as_ref()], &[])
             .unwrap();
-        let r = VmResponse::recv(&mut scm, &s1);
-        assert_eq!(r, Err(VmControlError::InvalidType));
+        match VmResponse::recv(&mut scm, &s1) {
+            Err(e) => {
+                assert_eq!(e, VmControlError::InvalidType);
+            }
+            _ => panic!("recv wrong response"),
+        }
+    }
+
+    #[test]
+    fn resp_allocate_and_register_gpu_memory() {
+        if !kernel_has_memfd() { return; }
+        let (s1, s2) = UnixDatagram::pair().expect("failed to create socket pair");
+        let mut scm = Scm::new(1);
+        let shm_size: usize = 4096;
+        let mut shm = SharedMemory::new(None).unwrap();
+        shm.set_size(shm_size as u64).unwrap();
+        let memory_pfn = 55;
+        let memory_slot = 66;
+        let gpu_stride = 32;
+        let r1 = VmResponse::AllocateAndRegisterGpuMemory {
+            fd: MaybeOwnedFd::Borrowed(shm.as_raw_fd()),
+            pfn: memory_pfn,
+            slot: memory_slot,
+            stride: gpu_stride };
+        r1.send(&mut scm, &s1).unwrap();
+        match VmResponse::recv(&mut scm, &s2).unwrap() {
+            VmResponse::AllocateAndRegisterGpuMemory { fd, pfn, slot, stride } => {
+                assert!(fd.as_raw_fd() >= 0);
+                assert_eq!(pfn, memory_pfn);
+                assert_eq!(slot, memory_slot);
+                assert_eq!(stride, gpu_stride);
+            }
+            _ => panic!("recv wrong response variant"),
+        }
     }
 }