summary refs log tree commit diff
diff options
context:
space:
mode:
authorK900 <me@0upti.me>2023-11-07 19:57:23 +0300
committerK900 <me@0upti.me>2023-11-07 21:08:23 +0300
commit9843bbbeee2642df74a5d4b4f94bae05cea0e2e0 (patch)
treedbc470c7991386c638485664bd45b5c4487e47c6
parent223c8a6ed4c41e1449fa83d0e1feb45d5dd5910e (diff)
downloadnixpkgs-9843bbbeee2642df74a5d4b4f94bae05cea0e2e0.tar
nixpkgs-9843bbbeee2642df74a5d4b4f94bae05cea0e2e0.tar.gz
nixpkgs-9843bbbeee2642df74a5d4b4f94bae05cea0e2e0.tar.bz2
nixpkgs-9843bbbeee2642df74a5d4b4f94bae05cea0e2e0.tar.lz
nixpkgs-9843bbbeee2642df74a5d4b4f94bae05cea0e2e0.tar.xz
nixpkgs-9843bbbeee2642df74a5d4b4f94bae05cea0e2e0.tar.zst
nixpkgs-9843bbbeee2642df74a5d4b4f94bae05cea0e2e0.zip
treewide: replace `<command> | systemd-cat` with `systemd-cat <command>`
The former swallows exit codes, the latter doesn't.
-rw-r--r--nixos/tests/castopod.nix2
-rw-r--r--nixos/tests/hadoop/hadoop.nix24
-rw-r--r--nixos/tests/hadoop/hdfs.nix4
-rw-r--r--nixos/tests/iscsi-multipath-root.nix4
-rw-r--r--nixos/tests/vaultwarden.nix2
5 files changed, 18 insertions, 18 deletions
diff --git a/nixos/tests/castopod.nix b/nixos/tests/castopod.nix
index 1d53c3e9a3e..4435ec617d4 100644
--- a/nixos/tests/castopod.nix
+++ b/nixos/tests/castopod.nix
@@ -82,6 +82,6 @@ import ./make-test-python.nix ({ pkgs, lib, ... }:
     castopod.succeed("curl -s http://localhost/cp-install | grep 'Create your Super Admin account' > /dev/null")
 
     with subtest("Create superadmin and log in"):
-        castopod.succeed("PYTHONUNBUFFERED=1 test-runner | systemd-cat -t test-runner")
+        castopod.succeed("PYTHONUNBUFFERED=1 systemd-cat -t test-runner test-runner")
   '';
 })
diff --git a/nixos/tests/hadoop/hadoop.nix b/nixos/tests/hadoop/hadoop.nix
index 0de2366b186..6162ccfd33d 100644
--- a/nixos/tests/hadoop/hadoop.nix
+++ b/nixos/tests/hadoop/hadoop.nix
@@ -176,22 +176,22 @@ import ../make-test-python.nix ({ package, ... }: {
     nn2.succeed("systemctl stop hdfs-zkfc")
 
     # Initialize zookeeper for failover controller
-    nn1.succeed("sudo -u hdfs hdfs zkfc -formatZK 2>&1 | systemd-cat")
+    nn1.succeed("sudo -u hdfs systemd-cat hdfs zkfc -formatZK")
 
     # Format NN1 and start it
-    nn1.succeed("sudo -u hdfs hadoop namenode -format 2>&1 | systemd-cat")
+    nn1.succeed("sudo -u hdfs systemd-cat hadoop namenode -format")
     nn1.succeed("systemctl start hdfs-namenode")
     nn1.wait_for_open_port(9870)
     nn1.wait_for_open_port(8022)
     nn1.wait_for_open_port(8020)
 
     # Bootstrap NN2 from NN1 and start it
-    nn2.succeed("sudo -u hdfs hdfs namenode -bootstrapStandby 2>&1 | systemd-cat")
+    nn2.succeed("sudo -u hdfs systemd-cat hdfs namenode -bootstrapStandby")
     nn2.succeed("systemctl start hdfs-namenode")
     nn2.wait_for_open_port(9870)
     nn2.wait_for_open_port(8022)
     nn2.wait_for_open_port(8020)
-    nn1.succeed("netstat -tulpne | systemd-cat")
+    nn1.succeed("systemd-cat netstat -tulpne")
 
     # Start failover controllers
     nn1.succeed("systemctl start hdfs-zkfc")
@@ -200,10 +200,10 @@ import ../make-test-python.nix ({ package, ... }: {
     # DN should have started by now, but confirm anyway
     dn1.wait_for_unit("hdfs-datanode")
     # Print states of namenodes
-    client.succeed("sudo -u hdfs hdfs haadmin -getAllServiceState | systemd-cat")
+    client.succeed("sudo -u hdfs systemd-cat hdfs haadmin -getAllServiceState")
     # Wait for cluster to exit safemode
     client.succeed("sudo -u hdfs hdfs dfsadmin -safemode wait")
-    client.succeed("sudo -u hdfs hdfs haadmin -getAllServiceState | systemd-cat")
+    client.succeed("sudo -u hdfs systemd-cat hdfs haadmin -getAllServiceState")
     # test R/W
     client.succeed("echo testfilecontents | sudo -u hdfs hdfs dfs -put - /testfile")
     assert "testfilecontents" in client.succeed("sudo -u hdfs hdfs dfs -cat /testfile")
@@ -211,7 +211,7 @@ import ../make-test-python.nix ({ package, ... }: {
     # Test NN failover
     nn1.succeed("systemctl stop hdfs-namenode")
     assert "active" in client.succeed("sudo -u hdfs hdfs haadmin -getAllServiceState")
-    client.succeed("sudo -u hdfs hdfs haadmin -getAllServiceState | systemd-cat")
+    client.succeed("sudo -u hdfs systemd-cat hdfs haadmin -getAllServiceState")
     assert "testfilecontents" in client.succeed("sudo -u hdfs hdfs dfs -cat /testfile")
 
     nn1.succeed("systemctl start hdfs-namenode")
@@ -219,7 +219,7 @@ import ../make-test-python.nix ({ package, ... }: {
     nn1.wait_for_open_port(8022)
     nn1.wait_for_open_port(8020)
     assert "standby" in client.succeed("sudo -u hdfs hdfs haadmin -getAllServiceState")
-    client.succeed("sudo -u hdfs hdfs haadmin -getAllServiceState | systemd-cat")
+    client.succeed("sudo -u hdfs systemd-cat hdfs haadmin -getAllServiceState")
 
     #### YARN tests ####
 
@@ -236,18 +236,18 @@ import ../make-test-python.nix ({ package, ... }: {
     nm1.wait_for_open_port(8042)
     nm1.wait_for_open_port(8040)
     client.wait_until_succeeds("yarn node -list | grep Nodes:1")
-    client.succeed("sudo -u yarn yarn rmadmin -getAllServiceState | systemd-cat")
-    client.succeed("sudo -u yarn yarn node -list | systemd-cat")
+    client.succeed("sudo -u yarn systemd-cat yarn rmadmin -getAllServiceState")
+    client.succeed("sudo -u yarn systemd-cat yarn node -list")
 
     # Test RM failover
     rm1.succeed("systemctl stop yarn-resourcemanager")
     assert "standby" not in client.succeed("sudo -u yarn yarn rmadmin -getAllServiceState")
-    client.succeed("sudo -u yarn yarn rmadmin -getAllServiceState | systemd-cat")
+    client.succeed("sudo -u yarn systemd-cat yarn rmadmin -getAllServiceState")
     rm1.succeed("systemctl start yarn-resourcemanager")
     rm1.wait_for_unit("yarn-resourcemanager")
     rm1.wait_for_open_port(8088)
     assert "standby" in client.succeed("sudo -u yarn yarn rmadmin -getAllServiceState")
-    client.succeed("sudo -u yarn yarn rmadmin -getAllServiceState | systemd-cat")
+    client.succeed("sudo -u yarn systemd-cat yarn rmadmin -getAllServiceState")
 
     assert "Estimated value of Pi is" in client.succeed("HADOOP_USER_NAME=hdfs yarn jar $(readlink $(which yarn) | sed -r 's~bin/yarn~share/hadoop/mapreduce/hadoop-mapreduce-examples-*.jar~g') pi 2 10")
     assert "SUCCEEDED" in client.succeed("yarn application -list -appStates FINISHED")
diff --git a/nixos/tests/hadoop/hdfs.nix b/nixos/tests/hadoop/hdfs.nix
index 429d4bf6b53..65686b37155 100644
--- a/nixos/tests/hadoop/hdfs.nix
+++ b/nixos/tests/hadoop/hdfs.nix
@@ -50,8 +50,8 @@ import ../make-test-python.nix ({ package, lib, ... }:
     namenode.wait_for_unit("hdfs-namenode")
     namenode.wait_for_unit("network.target")
     namenode.wait_for_open_port(8020)
-    namenode.succeed("ss -tulpne | systemd-cat")
-    namenode.succeed("cat /etc/hadoop*/hdfs-site.xml | systemd-cat")
+    namenode.succeed("systemd-cat ss -tulpne")
+    namenode.succeed("systemd-cat cat /etc/hadoop*/hdfs-site.xml")
     namenode.wait_for_open_port(9870)
 
     datanode.wait_for_unit("hdfs-datanode")
diff --git a/nixos/tests/iscsi-multipath-root.nix b/nixos/tests/iscsi-multipath-root.nix
index 92ae9990c94..494a539b57e 100644
--- a/nixos/tests/iscsi-multipath-root.nix
+++ b/nixos/tests/iscsi-multipath-root.nix
@@ -202,7 +202,7 @@ import ./make-test-python.nix (
       initiatorAuto.succeed("umount /mnt")
 
       initiatorAuto.succeed("systemctl restart multipathd")
-      initiatorAuto.succeed("multipath -ll | systemd-cat")
+      initiatorAuto.succeed("systemd-cat multipath -ll")
 
       # Install our RootDisk machine to 123456, the alias to the device that multipath is now managing
       initiatorAuto.succeed("mount /dev/mapper/123456 /mnt")
@@ -223,7 +223,7 @@ import ./make-test-python.nix (
       initiatorRootDisk.fail("iscsiadm -m discovery -o update -t sendtargets -p 192.168.1.3 --login")
       initiatorRootDisk.fail("iscsiadm -m discovery -o update -t sendtargets -p 192.168.2.3 --login")
       initiatorRootDisk.succeed("systemctl restart multipathd")
-      initiatorRootDisk.succeed("multipath -ll | systemd-cat")
+      initiatorRootDisk.succeed("systemd-cat multipath -ll")
 
       # Verify we can write and sync the root disk
       initiatorRootDisk.succeed("mkdir /scratch")
diff --git a/nixos/tests/vaultwarden.nix b/nixos/tests/vaultwarden.nix
index 95d00c1d8ec..486a8aeddf7 100644
--- a/nixos/tests/vaultwarden.nix
+++ b/nixos/tests/vaultwarden.nix
@@ -174,7 +174,7 @@ let
           )
 
       with subtest("use the web interface to sign up, log in, and save a password"):
-          server.succeed("PYTHONUNBUFFERED=1 test-runner | systemd-cat -t test-runner")
+          server.succeed("PYTHONUNBUFFERED=1 systemd-cat -t test-runner test-runner")
 
       with subtest("log in with the cli"):
           key = client.succeed(