summary refs log tree commit diff
path: root/gpu_display/src/lib.rs
diff options
context:
space:
mode:
authorZach Reizner <zachr@google.com>2018-05-03 13:19:37 -0700
committerchrome-bot <chrome-bot@chromium.org>2018-07-09 15:48:21 -0700
commit20d71f8928e1e95f6554c87cfca12f24f9f652cf (patch)
tree1d383fdbe0edf037f55b5b0f0100ee8a2fd94747 /gpu_display/src/lib.rs
parentac5a8dbe505daf95a802cc606988bc2323087a05 (diff)
downloadcrosvm-20d71f8928e1e95f6554c87cfca12f24f9f652cf.tar
crosvm-20d71f8928e1e95f6554c87cfca12f24f9f652cf.tar.gz
crosvm-20d71f8928e1e95f6554c87cfca12f24f9f652cf.tar.bz2
crosvm-20d71f8928e1e95f6554c87cfca12f24f9f652cf.tar.lz
crosvm-20d71f8928e1e95f6554c87cfca12f24f9f652cf.tar.xz
crosvm-20d71f8928e1e95f6554c87cfca12f24f9f652cf.tar.zst
crosvm-20d71f8928e1e95f6554c87cfca12f24f9f652cf.zip
gpu_display: provides wayland based output for virtio-gpu
This provides virtual display style output, useful for debugging
virtio-gpu. Although using virtio-gpu for display purposes clashes with
the more integreated virtio-wayland support, it is nonetheless helpful
for debugging virtio-gpu, and is technically required to fully implement
that device.

TEST=cargo build -p gpu_display
BUG=chromium:837073
CQ-DEPEND=CL:1096300

Change-Id: I59f895e951ef593d4119e7558168dd34223519ee
Reviewed-on: https://chromium-review.googlesource.com/1043446
Commit-Ready: Zach Reizner <zachr@chromium.org>
Tested-by: Zach Reizner <zachr@chromium.org>
Reviewed-by: Zach Reizner <zachr@chromium.org>
Diffstat (limited to 'gpu_display/src/lib.rs')
-rw-r--r--gpu_display/src/lib.rs378
1 files changed, 378 insertions, 0 deletions
diff --git a/gpu_display/src/lib.rs b/gpu_display/src/lib.rs
new file mode 100644
index 0000000..7f2b775
--- /dev/null
+++ b/gpu_display/src/lib.rs
@@ -0,0 +1,378 @@
+// Copyright 2018 The Chromium OS Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+//! Crate for displaying simple surfaces and GPU buffers over wayland.
+
+extern crate data_model;
+extern crate sys_util;
+
+mod dwl;
+
+use std::cell::Cell;
+use std::collections::HashMap;
+use std::ffi::CStr;
+use std::os::unix::io::{AsRawFd, RawFd};
+use std::ptr::null_mut;
+
+use data_model::{VolatileSlice, VolatileMemory};
+use sys_util::{Error as SysError, SharedMemory, MemoryMapping, round_up_to_page_size};
+
+use dwl::*;
+
+const BUFFER_COUNT: usize = 2;
+const BYTES_PER_PIXEL: u32 = 4;
+
+/// An error generated by `GpuDisplay`.
+#[derive(Debug)]
+pub enum GpuDisplayError {
+    /// An internal allocation failed.
+    Allocate,
+    /// Connecting to the compositor failed.
+    Connect,
+    /// Creating shared memory failed.
+    CreateShm(SysError),
+    /// Setting the size of shared memory failed.
+    SetSize(SysError),
+    /// Failed to create a surface on the compositor.
+    CreateSurface,
+    /// Failed to import a buffer to the compositor.
+    FailedImport,
+    /// The surface ID is invalid.
+    InvalidSurfaceId,
+}
+
+struct DwlContext(*mut dwl_context);
+impl Drop for DwlContext {
+    fn drop(&mut self) {
+        if !self.0.is_null() {
+            // Safe given that we checked the pointer for non-null and it should always be of the
+            // correct type.
+            unsafe {
+                dwl_context_destroy(&mut self.0);
+            }
+        }
+    }
+}
+
+struct DwlDmabuf(*mut dwl_dmabuf);
+impl Drop for DwlDmabuf {
+    fn drop(&mut self) {
+        if !self.0.is_null() {
+            // Safe given that we checked the pointer for non-null and it should always be of the
+            // correct type.
+            unsafe {
+                dwl_dmabuf_destroy(&mut self.0);
+            }
+        }
+    }
+}
+
+struct DwlSurface(*mut dwl_surface);
+impl Drop for DwlSurface {
+    fn drop(&mut self) {
+        if !self.0.is_null() {
+            // Safe given that we checked the pointer for non-null and it should always be of the
+            // correct type.
+            unsafe {
+                dwl_surface_destroy(&mut self.0);
+            }
+        }
+    }
+}
+
+struct GpuDisplaySurface {
+    surface: DwlSurface,
+    buffer_size: usize,
+    buffer_index: Cell<usize>,
+    buffer_mem: MemoryMapping,
+}
+
+impl GpuDisplaySurface {
+    fn surface(&self) -> *mut dwl_surface {
+        self.surface.0
+    }
+}
+
+/// A connection to the compositor and associated collection of state.
+///
+/// The user of `GpuDisplay` can use `AsRawFd` to poll on the compositor connection's file
+/// descriptor. When the connection is readable, `dispatch_events` can be called to process it.
+pub struct GpuDisplay {
+    ctx: DwlContext,
+    dmabufs: HashMap<u32, DwlDmabuf>,
+    dmabuf_next_id: u32,
+    surfaces: HashMap<u32, GpuDisplaySurface>,
+    surface_next_id: u32,
+}
+
+impl GpuDisplay {
+    /// Opens a fresh connection to the compositor.
+    pub fn new() -> Result<GpuDisplay, GpuDisplayError> {
+        // The dwl_context_new call should always be safe to call, and we check its result.
+        let ctx = DwlContext(unsafe { dwl_context_new() });
+        if ctx.0.is_null() {
+            return Err(GpuDisplayError::Allocate);
+        }
+        // The dwl_context_setup call is always safe to call given that the supplied context is
+        // valid. and we check its result.
+        let setup_success = unsafe { dwl_context_setup(ctx.0) };
+        if !setup_success {
+            return Err(GpuDisplayError::Connect);
+        }
+
+        Ok(GpuDisplay {
+               ctx,
+               dmabufs: Default::default(),
+               dmabuf_next_id: 0,
+               surfaces: Default::default(),
+               surface_next_id: 0,
+           })
+    }
+
+    fn ctx(&self) -> *mut dwl_context {
+        self.ctx.0
+    }
+
+    fn get_surface(&self, surface_id: u32) -> Option<&GpuDisplaySurface> {
+        self.surfaces.get(&surface_id)
+    }
+
+    /// Imports a dmabuf to the compositor for use as a surface buffer and returns a handle to it.
+    pub fn import_dmabuf(&mut self,
+                         fd: RawFd,
+                         offset: u32,
+                         stride: u32,
+                         modifiers: u64,
+                         width: u32,
+                         height: u32,
+                         fourcc: u32)
+                         -> Result<u32, GpuDisplayError> {
+        // Safe given that the context pointer is valid. Any other invalid parameters would be
+        // rejected by dwl_context_dmabuf_new safely. We check that the resulting dmabuf is valid
+        // before filing it away.
+        let dmabuf = DwlDmabuf(unsafe {
+                                   dwl_context_dmabuf_new(self.ctx(),
+                                                          fd,
+                                                          offset,
+                                                          stride,
+                                                          modifiers,
+                                                          width,
+                                                          height,
+                                                          fourcc)
+                               });
+        if dmabuf.0.is_null() {
+            return Err(GpuDisplayError::FailedImport);
+        }
+
+        let next_id = self.dmabuf_next_id;
+        self.dmabufs.insert(next_id, dmabuf);
+        self.dmabuf_next_id += 1;
+        Ok(next_id)
+    }
+
+    pub fn import_in_use(&mut self, import_id: u32) -> bool {
+        match self.dmabufs.get(&import_id) {
+            // Safe because only a valid dmabuf is used.
+            Some(dmabuf) => unsafe { dwl_dmabuf_in_use(dmabuf.0) },
+            None => {
+                debug_assert!(false, "invalid import_id {}", import_id);
+                false
+            }
+        }
+    }
+
+    /// Releases a previously imported dmabuf identified by the given handle.
+    pub fn release_import(&mut self, import_id: u32) {
+        self.dmabufs.remove(&import_id);
+    }
+
+    /// Dispatches internal events that were received from the compositor since the last call to
+    /// `dispatch_events`.
+    pub fn dispatch_events(&mut self) {
+        // Safe given that the context pointer is valid.
+        unsafe {
+            dwl_context_dispatch(self.ctx());
+        }
+    }
+
+    /// Creates a surface on the the compositor as either a top level window, or child of another
+    /// surface, returning a handle to the new surface.
+    pub fn create_surface(&mut self,
+                          parent_surface_id: Option<u32>,
+                          width: u32,
+                          height: u32)
+                          -> Result<u32, GpuDisplayError> {
+        let parent_ptr = match parent_surface_id {
+            Some(id) => {
+                match self.get_surface(id).map(|p| p.surface()) {
+                    Some(ptr) => ptr,
+                    None => return Err(GpuDisplayError::InvalidSurfaceId),
+                }
+            }
+            None => null_mut(),
+        };
+        let row_size = width * BYTES_PER_PIXEL;
+        let fb_size = row_size * height;
+        let buffer_size = round_up_to_page_size(fb_size as usize * BUFFER_COUNT);
+        let mut buffer_shm =
+            SharedMemory::new(Some(CStr::from_bytes_with_nul(b"GpuDisplaySurface\0").unwrap()))
+                .map_err(GpuDisplayError::CreateShm)?;
+        buffer_shm
+            .set_size(buffer_size as u64)
+            .map_err(GpuDisplayError::SetSize)?;
+        let buffer_mem = MemoryMapping::from_fd(&buffer_shm, buffer_size).unwrap();
+
+        // Safe because only a valid context, parent pointer (if not  None), and buffer FD are used.
+        // The returned surface is checked for validity before being filed away.
+        let surface = DwlSurface(unsafe {
+                                     dwl_context_surface_new(self.ctx(),
+                                                             parent_ptr,
+                                                             buffer_shm.as_raw_fd(),
+                                                             buffer_size,
+                                                             fb_size as usize,
+                                                             width,
+                                                             height,
+                                                             row_size)
+                                 });
+
+        if surface.0.is_null() {
+            return Err(GpuDisplayError::CreateSurface);
+        }
+
+        let next_id = self.surface_next_id;
+        self.surfaces
+            .insert(next_id,
+                    GpuDisplaySurface {
+                        surface,
+                        buffer_size: fb_size as usize,
+                        buffer_index: Cell::new(0),
+                        buffer_mem,
+                    });
+
+        self.surface_next_id += 1;
+        Ok(next_id)
+    }
+
+    /// Releases a previously created surface identified by the given handle.
+    pub fn release_surface(&mut self, surface_id: u32) {
+        self.surfaces.remove(&surface_id);
+    }
+
+    /// Gets a reference to an unused framebuffer for the identified surface.
+    pub fn framebuffer_memory(&self, surface_id: u32) -> Option<VolatileSlice> {
+        let surface = self.get_surface(surface_id)?;
+        let buffer_index = (surface.buffer_index.get() + 1) % BUFFER_COUNT;
+        surface
+            .buffer_mem
+            .get_slice((buffer_index * surface.buffer_size) as u64,
+                       surface.buffer_size as u64)
+            .ok()
+    }
+
+    /// Commits any pending state for the identified surface.
+    pub fn commit(&self, surface_id: u32) {
+        match self.get_surface(surface_id) {
+            Some(surface) => {
+                // Safe because only a valid surface is used.
+                unsafe {
+                    dwl_surface_commit(surface.surface());
+                }
+            }
+            None => debug_assert!(false, "invalid surface_id {}", surface_id),
+        }
+    }
+
+    /// Returns true if the next buffer in the buffer queue for the given surface is currently in
+    /// use.
+    ///
+    /// If the next buffer is in use, the memory returned from `framebuffer_memory` should not be
+    /// written to.
+    pub fn next_buffer_in_use(&self, surface_id: u32) -> bool {
+        match self.get_surface(surface_id) {
+            Some(surface) => {
+                let next_buffer_index = (surface.buffer_index.get() + 1) % BUFFER_COUNT;
+                // Safe because only a valid surface and buffer index is used.
+                unsafe { dwl_surface_buffer_in_use(surface.surface(), next_buffer_index) }
+            }
+            None => {
+                debug_assert!(false, "invalid surface_id {}", surface_id);
+                false
+            }
+        }
+    }
+
+    /// Changes the visible contents of the identified surface to the contents of the framebuffer
+    /// last returned by `framebuffer_memory` for this surface.
+    pub fn flip(&self, surface_id: u32) {
+        match self.get_surface(surface_id) {
+            Some(surface) => {
+                surface
+                    .buffer_index
+                    .set((surface.buffer_index.get() + 1) % BUFFER_COUNT);
+                // Safe because only a valid surface and buffer index is used.
+                unsafe {
+                    dwl_surface_flip(surface.surface(), surface.buffer_index.get());
+                }
+            }
+            None => debug_assert!(false, "invalid surface_id {}", surface_id),
+        }
+    }
+
+    /// Changes the visible contents of the identified surface to that of the identified imported
+    /// buffer.
+    pub fn flip_to(&self, surface_id: u32, import_id: u32) {
+        match self.get_surface(surface_id) {
+            Some(surface) => {
+                match self.dmabufs.get(&import_id) {
+                    // Safe because only a valid surface and dmabuf is used.
+                    Some(dmabuf) => unsafe { dwl_surface_flip_to(surface.surface(), dmabuf.0) },
+                    None => debug_assert!(false, "invalid import_id {}", import_id),
+                }
+            }
+            None => debug_assert!(false, "invalid surface_id {}", surface_id),
+        }
+    }
+
+    /// Returns true if the identified top level surface has been told to close by the compositor,
+    /// and by extension the user.
+    pub fn close_requested(&self, surface_id: u32) -> bool {
+        match self.get_surface(surface_id) {
+            Some(surface) =>
+                // Safe because only a valid surface is used.
+                unsafe {
+                    dwl_surface_close_requested(surface.surface())
+                },
+            None => false
+        }
+    }
+
+    /// Sets the position of the identified subsurface relative to its parent.
+    ///
+    /// The change in position will not be visible until `commit` is called for the parent surface.
+    pub fn set_position(&self, surface_id: u32, x: u32, y: u32) {
+        match self.get_surface(surface_id) {
+            Some(surface) => {
+                // Safe because only a valid surface is used.
+                unsafe {
+                    dwl_surface_set_position(surface.surface(), x, y);
+                }
+            }
+            None => debug_assert!(false, "invalid surface_id {}", surface_id),
+        }
+    }
+}
+
+impl Drop for GpuDisplay {
+    fn drop(&mut self) {
+        // Safe given that the context pointer is valid.
+        unsafe { dwl_context_destroy(&mut self.ctx.0) }
+    }
+}
+
+impl AsRawFd for GpuDisplay {
+    fn as_raw_fd(&self) -> RawFd {
+        // Safe given that the context pointer is valid.
+        unsafe { dwl_context_fd(self.ctx.0) }
+    }
+}