summary refs log tree commit diff
diff options
context:
space:
mode:
authorZach Reizner <zachr@google.com>2020-01-29 15:34:42 -0800
committerCommit Bot <commit-bot@chromium.org>2020-02-01 08:50:21 +0000
commitb427411b64aca7e9579408a68eb1182ec1ce1a33 (patch)
tree1615888369de301166780c976195aac9e67f30b7
parent8b8c01e1ad31718932491e4aee63f56109a138e2 (diff)
downloadcrosvm-b427411b64aca7e9579408a68eb1182ec1ce1a33.tar
crosvm-b427411b64aca7e9579408a68eb1182ec1ce1a33.tar.gz
crosvm-b427411b64aca7e9579408a68eb1182ec1ce1a33.tar.bz2
crosvm-b427411b64aca7e9579408a68eb1182ec1ce1a33.tar.lz
crosvm-b427411b64aca7e9579408a68eb1182ec1ce1a33.tar.xz
crosvm-b427411b64aca7e9579408a68eb1182ec1ce1a33.tar.zst
crosvm-b427411b64aca7e9579408a68eb1182ec1ce1a33.zip
data_model: fix flaky observe_mutate
The original version of this test used sleeps, retries, and vague
commentary about how the test is made reliable. The new version of test
uses real synchronization primitives..

BUG=chromium:1045426
TEST=put all cores under load;
     cargo test -p data_model

Change-Id: I7fa4ac45a9003e2ebb98c57ca6a03be17bdf65cf
Reviewed-on: https://chromium-review.googlesource.com/c/chromiumos/platform/crosvm/+/2029925
Tested-by: Zach Reizner <zachr@chromium.org>
Tested-by: kokoro <noreply+kokoro@google.com>
Auto-Submit: Zach Reizner <zachr@chromium.org>
Reviewed-by: Dylan Reid <dgreid@chromium.org>
Commit-Queue: Zach Reizner <zachr@chromium.org>
-rw-r--r--data_model/src/volatile_memory.rs35
1 files changed, 11 insertions, 24 deletions
diff --git a/data_model/src/volatile_memory.rs b/data_model/src/volatile_memory.rs
index c02111e..d834f0b 100644
--- a/data_model/src/volatile_memory.rs
+++ b/data_model/src/volatile_memory.rs
@@ -401,9 +401,8 @@ impl<'a, T: DataInit> VolatileRef<'a, T> {
 mod tests {
     use super::*;
 
-    use std::sync::Arc;
-    use std::thread::{sleep, spawn};
-    use std::time::Duration;
+    use std::sync::{Arc, Barrier};
+    use std::thread::spawn;
 
     #[derive(Clone)]
     struct VecMem {
@@ -472,35 +471,23 @@ mod tests {
         let a_clone = a.clone();
         let v_ref = a.get_ref::<u8>(0).unwrap();
         v_ref.store(99);
+
+        let start_barrier = Arc::new(Barrier::new(2));
+        let thread_start_barrier = start_barrier.clone();
+        let end_barrier = Arc::new(Barrier::new(2));
+        let thread_end_barrier = end_barrier.clone();
         spawn(move || {
-            sleep(Duration::from_millis(10));
+            thread_start_barrier.wait();
             let clone_v_ref = a_clone.get_ref::<u8>(0).unwrap();
             clone_v_ref.store(0);
+            thread_end_barrier.wait();
         });
 
-        // Technically this is a race condition but we have to observe the v_ref's value changing
-        // somehow and this helps to ensure the sleep actually happens before the store rather then
-        // being reordered by the compiler.
         assert_eq!(v_ref.load(), 99);
 
-        // Granted we could have a machine that manages to perform this many volatile loads in the
-        // amount of time the spawned thread sleeps, but the most likely reason the retry limit will
-        // get reached is because v_ref.load() is not actually performing the required volatile read
-        // or v_ref.store() is not doing a volatile write. A timer based solution was avoided
-        // because that might use a syscall which could hint the optimizer to reload v_ref's pointer
-        // regardless of volatile status. Note that we use a longer retry duration for optimized
-        // builds.
-        #[cfg(debug_assertions)]
-        const RETRY_MAX: u64 = 500_000_000;
-        #[cfg(not(debug_assertions))]
-        const RETRY_MAX: u64 = 10_000_000_000;
-
-        let mut retry = 0;
-        while v_ref.load() == 99 && retry < RETRY_MAX {
-            retry += 1;
-        }
+        start_barrier.wait();
+        end_barrier.wait();
 
-        assert_ne!(retry, RETRY_MAX, "maximum retry exceeded");
         assert_eq!(v_ref.load(), 0);
     }