summary refs log tree commit diff
path: root/src/linux.rs
diff options
context:
space:
mode:
authorDaniel Verkamp <dverkamp@chromium.org>2020-02-07 11:00:55 -0800
committerCommit Bot <commit-bot@chromium.org>2020-04-08 06:09:25 +0000
commite1980a9c360b04705a16434bdaf1a56161dafb56 (patch)
tree95944d7bfa87505050c2716fd764ffd1699d737e /src/linux.rs
parentf3081b120e0934539f6f3f2c60c9ff26c801c0ea (diff)
downloadcrosvm-e1980a9c360b04705a16434bdaf1a56161dafb56.tar
crosvm-e1980a9c360b04705a16434bdaf1a56161dafb56.tar.gz
crosvm-e1980a9c360b04705a16434bdaf1a56161dafb56.tar.bz2
crosvm-e1980a9c360b04705a16434bdaf1a56161dafb56.tar.lz
crosvm-e1980a9c360b04705a16434bdaf1a56161dafb56.tar.xz
crosvm-e1980a9c360b04705a16434bdaf1a56161dafb56.tar.zst
crosvm-e1980a9c360b04705a16434bdaf1a56161dafb56.zip
devices: pmem: implement flush using msync()
Previously, writable pmem devices implemented the flush command using
fsync(); however, this does not guarantee synchronization of memory
mappings via mmap() to the file on disk.  What we actually need is
msync() on the pmem file mapping, but we don't have access to that
mapping in the pmem child process, and it isn't trivial to pass it along
since it is owned by the Vm object once it has been added as a
mmap_arena.

In order to call msync() on the mapping, add a new VmControl socket so
that the pmem device can request that the main process issues an msync()
on the MemoryMappingArena identified by its slot number.

BUG=chromium:1007535
TEST=mount filesystem on /dev/pmem0 and sync; verify msync in strace

Change-Id: Id0484757c422cf81d454fd54012a12dbcc1baaf6
Reviewed-on: https://chromium-review.googlesource.com/c/chromiumos/platform/crosvm/+/2044365
Reviewed-by: Stephen Barber <smbarber@chromium.org>
Tested-by: kokoro <noreply+kokoro@google.com>
Commit-Queue: Daniel Verkamp <dverkamp@chromium.org>
Diffstat (limited to 'src/linux.rs')
-rw-r--r--src/linux.rs67
1 files changed, 57 insertions, 10 deletions
diff --git a/src/linux.rs b/src/linux.rs
index 5bc2dcc..9dbdb5c 100644
--- a/src/linux.rs
+++ b/src/linux.rs
@@ -56,7 +56,8 @@ use vm_control::{
     BalloonControlResult, DiskControlCommand, DiskControlRequestSocket, DiskControlResponseSocket,
     DiskControlResult, UsbControlSocket, VmControlResponseSocket, VmIrqRequest, VmIrqResponse,
     VmIrqResponseSocket, VmMemoryControlRequestSocket, VmMemoryControlResponseSocket,
-    VmMemoryRequest, VmMemoryResponse, VmRunMode,
+    VmMemoryRequest, VmMemoryResponse, VmMsyncRequest, VmMsyncRequestSocket, VmMsyncResponse,
+    VmMsyncResponseSocket, VmRunMode,
 };
 
 use crate::{Config, DiskOption, Executable, SharedDir, SharedDirKind, TouchDeviceOption};
@@ -254,6 +255,7 @@ enum TaggedControlSocket {
     Vm(VmControlResponseSocket),
     VmMemory(VmMemoryControlResponseSocket),
     VmIrq(VmIrqResponseSocket),
+    VmMsync(VmMsyncResponseSocket),
 }
 
 impl AsRef<UnixSeqpacket> for TaggedControlSocket {
@@ -263,6 +265,7 @@ impl AsRef<UnixSeqpacket> for TaggedControlSocket {
             Vm(ref socket) => socket.as_ref(),
             VmMemory(ref socket) => socket.as_ref(),
             VmIrq(ref socket) => socket.as_ref(),
+            VmMsync(ref socket) => socket.as_ref(),
         }
     }
 }
@@ -874,6 +877,7 @@ fn create_pmem_device(
     resources: &mut SystemAllocator,
     disk: &DiskOption,
     index: usize,
+    pmem_device_socket: VmMsyncRequestSocket,
 ) -> DeviceResult {
     let fd = OpenOptions::new()
         .read(true)
@@ -935,16 +939,23 @@ fn create_pmem_device(
         )
         .map_err(Error::AllocatePmemDeviceAddress)?;
 
-    vm.add_mmap_arena(
+    let slot = vm
+        .add_mmap_arena(
+            GuestAddress(mapping_address),
+            arena,
+            /* read_only = */ disk.read_only,
+            /* log_dirty_pages = */ false,
+        )
+        .map_err(Error::AddPmemDeviceMemory)?;
+
+    let dev = virtio::Pmem::new(
+        fd,
         GuestAddress(mapping_address),
-        arena,
-        /* read_only = */ disk.read_only,
-        /* log_dirty_pages = */ false,
+        slot,
+        arena_size,
+        Some(pmem_device_socket),
     )
-    .map_err(Error::AddPmemDeviceMemory)?;
-
-    let dev = virtio::Pmem::new(fd, GuestAddress(mapping_address), arena_size)
-        .map_err(Error::PmemDeviceNew)?;
+    .map_err(Error::PmemDeviceNew)?;
 
     Ok(VirtioDeviceStub {
         dev: Box::new(dev) as Box<dyn VirtioDevice>,
@@ -964,6 +975,7 @@ fn create_virtio_devices(
     gpu_device_socket: VmMemoryControlRequestSocket,
     balloon_device_socket: BalloonControlResponseSocket,
     disk_device_sockets: &mut Vec<DiskControlResponseSocket>,
+    pmem_device_sockets: &mut Vec<VmMsyncRequestSocket>,
 ) -> DeviceResult<Vec<VirtioDeviceStub>> {
     let mut devs = Vec::new();
 
@@ -973,7 +985,15 @@ fn create_virtio_devices(
     }
 
     for (index, pmem_disk) in cfg.pmem_devices.iter().enumerate() {
-        devs.push(create_pmem_device(cfg, vm, resources, pmem_disk, index)?);
+        let pmem_device_socket = pmem_device_sockets.remove(0);
+        devs.push(create_pmem_device(
+            cfg,
+            vm,
+            resources,
+            pmem_disk,
+            index,
+            pmem_device_socket,
+        )?);
     }
 
     devs.push(create_rng_device(cfg)?);
@@ -1124,6 +1144,7 @@ fn create_devices(
     gpu_device_socket: VmMemoryControlRequestSocket,
     balloon_device_socket: BalloonControlResponseSocket,
     disk_device_sockets: &mut Vec<DiskControlResponseSocket>,
+    pmem_device_sockets: &mut Vec<VmMsyncRequestSocket>,
     usb_provider: HostBackendDeviceProvider,
 ) -> DeviceResult<Vec<(Box<dyn PciDevice>, Option<Minijail>)>> {
     let stubs = create_virtio_devices(
@@ -1136,6 +1157,7 @@ fn create_devices(
         gpu_device_socket,
         balloon_device_socket,
         disk_device_sockets,
+        pmem_device_sockets,
     )?;
 
     let mut pci_devices = Vec::new();
@@ -1606,6 +1628,15 @@ pub fn run_config(cfg: Config) -> Result<()> {
         disk_device_sockets.push(disk_device_socket);
     }
 
+    let mut pmem_device_sockets = Vec::new();
+    let pmem_count = cfg.pmem_devices.len();
+    for _ in 0..pmem_count {
+        let (pmem_host_socket, pmem_device_socket) =
+            msg_socket::pair::<VmMsyncResponse, VmMsyncRequest>().map_err(Error::CreateSocket)?;
+        pmem_device_sockets.push(pmem_device_socket);
+        control_sockets.push(TaggedControlSocket::VmMsync(pmem_host_socket));
+    }
+
     let (gpu_host_socket, gpu_device_socket) =
         msg_socket::pair::<VmMemoryResponse, VmMemoryRequest>().map_err(Error::CreateSocket)?;
     control_sockets.push(TaggedControlSocket::VmMemory(gpu_host_socket));
@@ -1633,6 +1664,7 @@ pub fn run_config(cfg: Config) -> Result<()> {
                 gpu_device_socket,
                 balloon_device_socket,
                 &mut disk_device_sockets,
+                &mut pmem_device_sockets,
                 usb_provider,
             )
         },
@@ -2031,6 +2063,21 @@ fn run_control(
                                     }
                                 }
                             },
+                            TaggedControlSocket::VmMsync(socket) => match socket.recv() {
+                                Ok(request) => {
+                                    let response = request.execute(&mut linux.vm);
+                                    if let Err(e) = socket.send(&response) {
+                                        error!("failed to send VmMsyncResponse: {}", e);
+                                    }
+                                }
+                                Err(e) => {
+                                    if let MsgError::BadRecvSize { actual: 0, .. } = e {
+                                        vm_control_indices_to_remove.push(index);
+                                    } else {
+                                        error!("failed to recv VmMsyncRequest: {}", e);
+                                    }
+                                }
+                            },
                         }
                     }
                 }