summary refs log tree commit diff
path: root/nixos/tests
diff options
context:
space:
mode:
Diffstat (limited to 'nixos/tests')
-rw-r--r--nixos/tests/3proxy.nix8
-rw-r--r--nixos/tests/acme.nix76
-rw-r--r--nixos/tests/adguardhome.nix22
-rw-r--r--nixos/tests/aesmd.nix4
-rw-r--r--nixos/tests/airsonic.nix3
-rw-r--r--nixos/tests/all-tests.nix168
-rw-r--r--nixos/tests/alps.nix108
-rw-r--r--nixos/tests/atuin.nix65
-rw-r--r--nixos/tests/auth-mysql.nix177
-rw-r--r--nixos/tests/bazarr.nix2
-rw-r--r--nixos/tests/bitcoind.nix4
-rw-r--r--nixos/tests/bootspec.nix144
-rw-r--r--nixos/tests/borgbackup.nix20
-rw-r--r--nixos/tests/btrbk-section-order.nix51
-rw-r--r--nixos/tests/cagebreak.nix1
-rw-r--r--nixos/tests/chromium.nix11
-rw-r--r--nixos/tests/cinnamon.nix68
-rw-r--r--nixos/tests/cloud-init-hostname.nix46
-rw-r--r--nixos/tests/cloud-init.nix19
-rw-r--r--nixos/tests/cockroachdb.nix2
-rw-r--r--nixos/tests/common/acme/client/default.nix4
-rw-r--r--nixos/tests/common/acme/server/acme.test.cert.pem32
-rw-r--r--nixos/tests/common/acme/server/acme.test.key.pem50
-rw-r--r--nixos/tests/common/acme/server/ca.cert.pem34
-rw-r--r--nixos/tests/common/acme/server/ca.key.pem50
-rw-r--r--nixos/tests/common/acme/server/default.nix16
-rw-r--r--nixos/tests/common/acme/server/generate-certs.nix6
-rw-r--r--nixos/tests/common/auto.nix6
-rw-r--r--nixos/tests/common/ec2.nix2
-rw-r--r--nixos/tests/common/resolver.nix6
-rw-r--r--nixos/tests/containers-unified-hierarchy.nix21
-rw-r--r--nixos/tests/convos.nix2
-rw-r--r--nixos/tests/corerad.nix1
-rw-r--r--nixos/tests/couchdb.nix2
-rw-r--r--nixos/tests/cri-o.nix2
-rw-r--r--nixos/tests/cryptpad.nix18
-rw-r--r--nixos/tests/custom-ca.nix2
-rw-r--r--nixos/tests/deluge.nix4
-rw-r--r--nixos/tests/dhparams.nix138
-rw-r--r--nixos/tests/discourse.nix1
-rw-r--r--nixos/tests/dnscrypt-proxy2.nix8
-rw-r--r--nixos/tests/docker-tools-cross.nix6
-rw-r--r--nixos/tests/docker-tools.nix78
-rw-r--r--nixos/tests/docker.nix1
-rw-r--r--nixos/tests/documize.nix6
-rw-r--r--nixos/tests/dolibarr.nix59
-rw-r--r--nixos/tests/domination.nix2
-rw-r--r--nixos/tests/ec2.nix2
-rw-r--r--nixos/tests/endlessh-go.nix58
-rw-r--r--nixos/tests/endlessh.nix43
-rw-r--r--nixos/tests/evcc.nix97
-rw-r--r--nixos/tests/firefox.nix19
-rw-r--r--nixos/tests/freenet.nix19
-rw-r--r--nixos/tests/freshrss.nix19
-rw-r--r--nixos/tests/fscrypt.nix50
-rw-r--r--nixos/tests/garage.nix169
-rw-r--r--nixos/tests/ghostunnel.nix1
-rw-r--r--nixos/tests/gitea.nix2
-rw-r--r--nixos/tests/gitlab.nix2
-rw-r--r--nixos/tests/gollum.nix14
-rw-r--r--nixos/tests/grafana/basic.nix (renamed from nixos/tests/grafana.nix)71
-rw-r--r--nixos/tests/grafana/default.nix9
-rw-r--r--nixos/tests/grafana/provision/contact-points.yaml9
-rw-r--r--nixos/tests/grafana/provision/dashboards.yaml6
-rw-r--r--nixos/tests/grafana/provision/datasources.yaml7
-rw-r--r--nixos/tests/grafana/provision/default.nix251
-rw-r--r--nixos/tests/grafana/provision/mute-timings.yaml4
-rw-r--r--nixos/tests/grafana/provision/policies.yaml4
-rw-r--r--nixos/tests/grafana/provision/rules.yaml36
-rw-r--r--nixos/tests/grafana/provision/templates.yaml5
-rw-r--r--nixos/tests/grafana/provision/test_dashboard.json47
-rw-r--r--nixos/tests/graphite.nix14
-rw-r--r--nixos/tests/grocy.nix26
-rw-r--r--nixos/tests/hadoop/default.nix1
-rw-r--r--nixos/tests/hadoop/hbase.nix84
-rw-r--r--nixos/tests/hadoop/yarn.nix2
-rw-r--r--nixos/tests/hardened.nix9
-rw-r--r--nixos/tests/hbase.nix4
-rw-r--r--nixos/tests/hedgedoc.nix4
-rw-r--r--nixos/tests/hibernate.nix5
-rw-r--r--nixos/tests/home-assistant.nix27
-rw-r--r--nixos/tests/hydra/common.nix2
-rw-r--r--nixos/tests/installed-tests/default.nix9
-rw-r--r--nixos/tests/installed-tests/flatpak-builder.nix3
-rw-r--r--nixos/tests/installed-tests/flatpak.nix2
-rw-r--r--nixos/tests/installed-tests/gdk-pixbuf.nix2
-rw-r--r--nixos/tests/installed-tests/geocode-glib.nix13
-rw-r--r--nixos/tests/installed-tests/ibus.nix1
-rw-r--r--nixos/tests/installed-tests/json-glib.nix5
-rw-r--r--nixos/tests/installed-tests/librsvg.nix9
-rw-r--r--nixos/tests/installer-systemd-stage-1.nix7
-rw-r--r--nixos/tests/installer.nix33
-rw-r--r--nixos/tests/invoiceplane.nix16
-rw-r--r--nixos/tests/isso.nix2
-rw-r--r--nixos/tests/jenkins.nix19
-rw-r--r--nixos/tests/jibri.nix3
-rw-r--r--nixos/tests/jitsi-meet.nix3
-rw-r--r--nixos/tests/k3s/default.nix9
-rw-r--r--nixos/tests/k3s/multi-node.nix183
-rw-r--r--nixos/tests/k3s/single-node.nix (renamed from nixos/tests/k3s-single-node.nix)26
-rw-r--r--nixos/tests/kafka.nix17
-rw-r--r--nixos/tests/kanidm.nix31
-rw-r--r--nixos/tests/karma.nix84
-rw-r--r--nixos/tests/kea.nix2
-rw-r--r--nixos/tests/kerberos/mit.nix2
-rw-r--r--nixos/tests/kernel-generic.nix2
-rw-r--r--nixos/tests/keter.nix42
-rw-r--r--nixos/tests/keycloak.nix17
-rw-r--r--nixos/tests/komga.nix22
-rw-r--r--nixos/tests/krb5/example-config.nix2
-rw-r--r--nixos/tests/kthxbye.nix110
-rw-r--r--nixos/tests/kubernetes/base.nix4
-rw-r--r--nixos/tests/kubernetes/default.nix2
-rw-r--r--nixos/tests/kubernetes/dns.nix14
-rw-r--r--nixos/tests/kubernetes/e2e.nix40
-rw-r--r--nixos/tests/kubernetes/rbac.nix6
-rw-r--r--nixos/tests/kubo.nix (renamed from nixos/tests/ipfs.nix)23
-rw-r--r--nixos/tests/ladybird.nix30
-rw-r--r--nixos/tests/languagetool.nix19
-rw-r--r--nixos/tests/lemmy.nix89
-rw-r--r--nixos/tests/libuiohook.nix21
-rw-r--r--nixos/tests/libvirtd.nix61
-rw-r--r--nixos/tests/lighttpd.nix21
-rw-r--r--nixos/tests/listmonk.nix69
-rw-r--r--nixos/tests/logrotate.nix29
-rw-r--r--nixos/tests/lorri/default.nix2
-rw-r--r--nixos/tests/lxd-image-server.nix4
-rw-r--r--nixos/tests/lxd.nix6
-rw-r--r--nixos/tests/maddy.nix2
-rw-r--r--nixos/tests/make-test-python.nix2
-rw-r--r--nixos/tests/matomo.nix2
-rw-r--r--nixos/tests/matrix/conduit.nix2
-rw-r--r--nixos/tests/matrix/mjolnir.nix2
-rw-r--r--nixos/tests/matrix/synapse.nix2
-rw-r--r--nixos/tests/mediatomb.nix101
-rw-r--r--nixos/tests/meilisearch.nix10
-rw-r--r--nixos/tests/merecat.nix28
-rw-r--r--nixos/tests/minecraft-server.nix3
-rw-r--r--nixos/tests/minidlna.nix31
-rw-r--r--nixos/tests/moodle.nix2
-rw-r--r--nixos/tests/mosquitto.nix12
-rw-r--r--nixos/tests/musescore.nix4
-rw-r--r--nixos/tests/mysql/common.nix7
-rw-r--r--nixos/tests/neo4j.nix14
-rw-r--r--nixos/tests/netbird.nix21
-rw-r--r--nixos/tests/networking-proxy.nix2
-rw-r--r--nixos/tests/networking.nix40
-rw-r--r--nixos/tests/nextcloud/basic.nix7
-rw-r--r--nixos/tests/nextcloud/default.nix10
-rw-r--r--nixos/tests/nextcloud/openssl-sse.nix105
-rw-r--r--nixos/tests/nextcloud/with-declarative-redis-and-secrets.nix117
-rw-r--r--nixos/tests/nextcloud/with-postgresql-and-redis.nix5
-rw-r--r--nixos/tests/nginx-auth.nix8
-rw-r--r--nixos/tests/nginx-etag.nix4
-rw-r--r--nixos/tests/nginx-globalredirect.nix24
-rw-r--r--nixos/tests/nginx-http3.nix3
-rw-r--r--nixos/tests/nginx-modsecurity.nix2
-rw-r--r--nixos/tests/nginx-njs.nix27
-rw-r--r--nixos/tests/nginx.nix2
-rw-r--r--nixos/tests/nix-ld.nix5
-rw-r--r--nixos/tests/nixops/default.nix1
-rw-r--r--nixos/tests/non-default-filesystems.nix54
-rw-r--r--nixos/tests/nscd.nix141
-rw-r--r--nixos/tests/ntfy-sh.nix21
-rw-r--r--nixos/tests/oci-containers.nix2
-rw-r--r--nixos/tests/openldap.nix232
-rw-r--r--nixos/tests/pam/pam-file-contents.nix1
-rw-r--r--nixos/tests/pam/pam-u2f.nix3
-rw-r--r--nixos/tests/paperless.nix8
-rw-r--r--nixos/tests/pass-secret-service.nix69
-rw-r--r--nixos/tests/patroni.nix206
-rw-r--r--nixos/tests/pgadmin4-standalone.nix2
-rw-r--r--nixos/tests/pgadmin4.nix15
-rw-r--r--nixos/tests/phosh.nix70
-rw-r--r--nixos/tests/php/pcre.nix16
-rw-r--r--nixos/tests/pinnwand.nix57
-rw-r--r--nixos/tests/plasma-bigscreen.nix38
-rw-r--r--nixos/tests/plasma5.nix8
-rw-r--r--nixos/tests/please.nix66
-rw-r--r--nixos/tests/podgrab.nix4
-rw-r--r--nixos/tests/podman/default.nix73
-rw-r--r--nixos/tests/polaris.nix31
-rw-r--r--nixos/tests/postgresql.nix93
-rw-r--r--nixos/tests/powerdns.nix4
-rw-r--r--nixos/tests/pppd.nix2
-rw-r--r--nixos/tests/printing.nix1
-rw-r--r--nixos/tests/privacyidea.nix2
-rw-r--r--nixos/tests/prometheus-exporters.nix152
-rw-r--r--nixos/tests/pulseaudio.nix25
-rw-r--r--nixos/tests/quake3.nix95
-rw-r--r--nixos/tests/rabbitmq.nix36
-rw-r--r--nixos/tests/resolv.nix46
-rw-r--r--nixos/tests/restic.nix73
-rw-r--r--nixos/tests/retroarch.nix4
-rw-r--r--nixos/tests/sanoid.nix18
-rw-r--r--nixos/tests/schleuder.nix4
-rw-r--r--nixos/tests/seafile.nix12
-rw-r--r--nixos/tests/shadow.nix30
-rw-r--r--nixos/tests/signal-desktop.nix2
-rw-r--r--nixos/tests/smokeping.nix2
-rw-r--r--nixos/tests/sourcehut.nix2
-rw-r--r--nixos/tests/spark/default.nix1
-rw-r--r--nixos/tests/sqlite3-to-mysql.nix65
-rw-r--r--nixos/tests/sssd-ldap.nix13
-rw-r--r--nixos/tests/stratis/default.nix8
-rw-r--r--nixos/tests/stratis/encryption.nix32
-rw-r--r--nixos/tests/stratis/simple.nix39
-rw-r--r--nixos/tests/stunnel.nix174
-rw-r--r--nixos/tests/swap-partition.nix48
-rw-r--r--nixos/tests/switch-test.nix39
-rw-r--r--nixos/tests/sympa.nix2
-rw-r--r--nixos/tests/systemd-bpf.nix42
-rw-r--r--nixos/tests/systemd-confinement.nix2
-rw-r--r--nixos/tests/systemd-coredump.nix44
-rw-r--r--nixos/tests/systemd-cryptenroll.nix1
-rw-r--r--nixos/tests/systemd-initrd-luks-fido2.nix45
-rw-r--r--nixos/tests/systemd-initrd-luks-password.nix5
-rw-r--r--nixos/tests/systemd-initrd-luks-tpm2.nix72
-rw-r--r--nixos/tests/systemd-initrd-modprobe.nix17
-rw-r--r--nixos/tests/systemd-machinectl.nix41
-rw-r--r--nixos/tests/systemd-networkd-ipv6-prefix-delegation.nix125
-rw-r--r--nixos/tests/systemd-no-tainted.nix14
-rw-r--r--nixos/tests/systemd-nspawn.nix6
-rw-r--r--nixos/tests/systemd-oomd.nix54
-rw-r--r--nixos/tests/systemd-portabled.nix51
-rw-r--r--nixos/tests/systemd.nix6
-rw-r--r--nixos/tests/tandoor-recipes.nix43
-rw-r--r--nixos/tests/tayga.nix235
-rw-r--r--nixos/tests/terminal-emulators.nix8
-rw-r--r--nixos/tests/thelounge.nix2
-rw-r--r--nixos/tests/tmate-ssh-server.nix73
-rw-r--r--nixos/tests/tracee.nix49
-rw-r--r--nixos/tests/upnp.nix4
-rw-r--r--nixos/tests/uptime-kuma.nix19
-rw-r--r--nixos/tests/v2ray.nix12
-rw-r--r--nixos/tests/varnish.nix55
-rw-r--r--nixos/tests/vault-dev.nix35
-rw-r--r--nixos/tests/vaultwarden.nix50
-rw-r--r--nixos/tests/vector.nix2
-rw-r--r--nixos/tests/vengi-tools.nix4
-rw-r--r--nixos/tests/virtualbox.nix4
-rw-r--r--nixos/tests/vscodium.nix6
-rw-r--r--nixos/tests/warzone2100.nix26
-rw-r--r--nixos/tests/web-apps/healthchecks.nix42
-rw-r--r--nixos/tests/web-apps/mastodon.nix170
-rw-r--r--nixos/tests/web-apps/mastodon/default.nix9
-rw-r--r--nixos/tests/web-apps/mastodon/remote-postgresql.nix160
-rw-r--r--nixos/tests/web-apps/mastodon/script.nix54
-rw-r--r--nixos/tests/web-apps/mastodon/standard.nix92
-rw-r--r--nixos/tests/web-apps/peering-manager.nix40
-rw-r--r--nixos/tests/web-apps/phylactery.nix20
-rw-r--r--nixos/tests/web-apps/writefreely.nix44
-rw-r--r--nixos/tests/web-servers/agate.nix46
-rw-r--r--nixos/tests/web-servers/unit-php.nix17
-rw-r--r--nixos/tests/wine.nix5
-rw-r--r--nixos/tests/wireguard/wg-quick.nix5
-rw-r--r--nixos/tests/wrappers.nix79
-rw-r--r--nixos/tests/xmpp/prosody.nix3
-rw-r--r--nixos/tests/xmpp/xmpp-sendmessage.nix10
-rw-r--r--nixos/tests/xpadneo.nix18
-rw-r--r--nixos/tests/xrdp.nix2
-rw-r--r--nixos/tests/yggdrasil.nix4
-rw-r--r--nixos/tests/zfs.nix4
-rw-r--r--nixos/tests/zigbee2mqtt.nix2
-rw-r--r--nixos/tests/zrepl.nix6
265 files changed, 7275 insertions, 1251 deletions
diff --git a/nixos/tests/3proxy.nix b/nixos/tests/3proxy.nix
index 8127438fabd..647d9d57c7f 100644
--- a/nixos/tests/3proxy.nix
+++ b/nixos/tests/3proxy.nix
@@ -1,6 +1,6 @@
-import ./make-test-python.nix ({ pkgs, ...} : {
+{ lib, pkgs, ... }: {
   name = "3proxy";
-  meta = with pkgs.lib.maintainers; {
+  meta = with lib.maintainers; {
     maintainers = [ misuzu ];
   };
 
@@ -92,7 +92,7 @@ import ./make-test-python.nix ({ pkgs, ...} : {
       networking.firewall.allowedTCPPorts = [ 3128 9999 ];
     };
 
-    peer3 = { lib, ... }: {
+    peer3 = { lib, pkgs, ... }: {
       networking.useDHCP = false;
       networking.interfaces.eth1 = {
         ipv4.addresses = [
@@ -186,4 +186,4 @@ import ./make-test-python.nix ({ pkgs, ...} : {
         "${pkgs.wget}/bin/wget -e use_proxy=yes -e http_proxy=http://192.168.0.4:3128 -S -O /dev/null http://127.0.0.1:9999"
     )
   '';
-})
+}
diff --git a/nixos/tests/acme.nix b/nixos/tests/acme.nix
index c07f99c5db3..64bc99f6d32 100644
--- a/nixos/tests/acme.nix
+++ b/nixos/tests/acme.nix
@@ -1,7 +1,7 @@
-import ./make-test-python.nix ({ pkgs, lib, ... }: let
+{ pkgs, lib, ... }: let
   commonConfig = ./common/acme/client;
 
-  dnsServerIP = nodes: nodes.dnsserver.config.networking.primaryIPAddress;
+  dnsServerIP = nodes: nodes.dnsserver.networking.primaryIPAddress;
 
   dnsScript = nodes: let
     dnsAddress = dnsServerIP nodes;
@@ -41,6 +41,16 @@ import ./make-test-python.nix ({ pkgs, lib, ... }: let
     inherit documentRoot;
   };
 
+  simpleConfig = {
+    security.acme = {
+      certs."http.example.test" = {
+        listenHTTP = ":80";
+      };
+    };
+
+    networking.firewall.allowedTCPPorts = [ 80 ];
+  };
+
   # Base specialisation config for testing general ACME features
   webserverBasicConfig = {
     services.nginx.enable = true;
@@ -153,7 +163,7 @@ in {
         description = "Pebble ACME challenge test server";
         wantedBy = [ "network.target" ];
         serviceConfig = {
-          ExecStart = "${pkgs.pebble}/bin/pebble-challtestsrv -dns01 ':53' -defaultIPv6 '' -defaultIPv4 '${nodes.webserver.config.networking.primaryIPAddress}'";
+          ExecStart = "${pkgs.pebble}/bin/pebble-challtestsrv -dns01 ':53' -defaultIPv6 '' -defaultIPv4 '${nodes.webserver.networking.primaryIPAddress}'";
           # Required to bind on privileged ports.
           AmbientCapabilities = [ "CAP_NET_BIND_SERVICE" ];
         };
@@ -173,9 +183,29 @@ in {
       services.nginx.logError = "stderr info";
 
       specialisation = {
+        # Tests HTTP-01 verification using Lego's built-in web server
+        http01lego.configuration = simpleConfig;
+
+        renew.configuration = lib.mkMerge [
+          simpleConfig
+          {
+            # Pebble provides 5 year long certs,
+            # needs to be higher than that to test renewal
+            security.acme.certs."http.example.test".validMinDays = 9999;
+          }
+        ];
+
+        # Tests that account creds can be safely changed.
+        accountchange.configuration = lib.mkMerge [
+          simpleConfig
+          {
+            security.acme.certs."http.example.test".email = "admin@example.test";
+          }
+        ];
+
         # First derivation used to test general ACME features
         general.configuration = { ... }: let
-          caDomain = nodes.acme.config.test-support.acme.caDomain;
+          caDomain = nodes.acme.test-support.acme.caDomain;
           email = config.security.acme.defaults.email;
           # Exit 99 to make it easier to track if this is the reason a renew failed
           accountCreateTester = ''
@@ -247,7 +277,7 @@ in {
           };
         };
 
-      # Test compatiblity with Caddy
+      # Test compatibility with Caddy
       # It only supports useACMEHost, hence not using mkServerConfigs
       } // (let
         baseCaddyConfig = { nodes, config, ... }: {
@@ -316,7 +346,7 @@ in {
 
   testScript = { nodes, ... }:
     let
-      caDomain = nodes.acme.config.test-support.acme.caDomain;
+      caDomain = nodes.acme.test-support.acme.caDomain;
       newServerSystem = nodes.webserver.config.system.build.toplevel;
       switchToNewServer = "${newServerSystem}/bin/switch-to-configuration test";
     in
@@ -438,7 +468,7 @@ in {
       client.wait_for_unit("default.target")
 
       client.succeed(
-          'curl --data \'{"host": "${caDomain}", "addresses": ["${nodes.acme.config.networking.primaryIPAddress}"]}\' http://${dnsServerIP nodes}:8055/add-a'
+          'curl --data \'{"host": "${caDomain}", "addresses": ["${nodes.acme.networking.primaryIPAddress}"]}\' http://${dnsServerIP nodes}:8055/add-a'
       )
 
       acme.wait_for_unit("network-online.target")
@@ -446,7 +476,35 @@ in {
 
       download_ca_certs(client)
 
-      # Perform general tests first
+      # Perform http-01 w/ lego test first
+      with subtest("Can request certificate with Lego's built in web server"):
+          switch_to(webserver, "http01lego")
+          webserver.wait_for_unit("acme-finished-http.example.test.target")
+          check_fullchain(webserver, "http.example.test")
+          check_issuer(webserver, "http.example.test", "pebble")
+
+      # Perform renewal test
+      with subtest("Can renew certificates when they expire"):
+          hash = webserver.succeed("sha256sum /var/lib/acme/http.example.test/cert.pem")
+          switch_to(webserver, "renew")
+          webserver.wait_for_unit("acme-finished-http.example.test.target")
+          check_fullchain(webserver, "http.example.test")
+          check_issuer(webserver, "http.example.test", "pebble")
+          hash_after = webserver.succeed("sha256sum /var/lib/acme/http.example.test/cert.pem")
+          assert hash != hash_after
+
+      # Perform account change test
+      with subtest("Handles email change correctly"):
+          hash = webserver.succeed("sha256sum /var/lib/acme/http.example.test/cert.pem")
+          switch_to(webserver, "accountchange")
+          webserver.wait_for_unit("acme-finished-http.example.test.target")
+          check_fullchain(webserver, "http.example.test")
+          check_issuer(webserver, "http.example.test", "pebble")
+          hash_after = webserver.succeed("sha256sum /var/lib/acme/http.example.test/cert.pem")
+          # Has to do a full run to register account, which creates new certs.
+          assert hash != hash_after
+
+      # Perform general tests
       switch_to(webserver, "general")
 
       with subtest("Can request certificate with HTTP-01 challenge"):
@@ -594,4 +652,4 @@ in {
               wait_for_server()
               check_connection_key_bits(client, test_domain, "384")
     '';
-})
+}
diff --git a/nixos/tests/adguardhome.nix b/nixos/tests/adguardhome.nix
index ddbe8ff9c11..ec1cc1e497b 100644
--- a/nixos/tests/adguardhome.nix
+++ b/nixos/tests/adguardhome.nix
@@ -1,9 +1,14 @@
-import ./make-test-python.nix {
+{
   name = "adguardhome";
 
   nodes = {
-    minimalConf = { ... }: {
-      services.adguardhome = { enable = true; };
+    nullConf = { ... }: { services.adguardhome = { enable = true; }; };
+
+    emptyConf = { lib, ... }: {
+      services.adguardhome = {
+        enable = true;
+        settings = {};
+      };
     };
 
     declarativeConf = { ... }: {
@@ -12,6 +17,7 @@ import ./make-test-python.nix {
 
         mutableSettings = false;
         settings = {
+          schema_version = 0;
           dns = {
             bind_host = "0.0.0.0";
             bootstrap_dns = "127.0.0.1";
@@ -26,6 +32,7 @@ import ./make-test-python.nix {
 
         mutableSettings = true;
         settings = {
+          schema_version = 0;
           dns = {
             bind_host = "0.0.0.0";
             bootstrap_dns = "127.0.0.1";
@@ -36,9 +43,12 @@ import ./make-test-python.nix {
   };
 
   testScript = ''
-    with subtest("Minimal config test"):
-        minimalConf.wait_for_unit("adguardhome.service")
-        minimalConf.wait_for_open_port(3000)
+    with subtest("Minimal (settings = null) config test"):
+        nullConf.wait_for_unit("adguardhome.service")
+
+    with subtest("Default config test"):
+        emptyConf.wait_for_unit("adguardhome.service")
+        emptyConf.wait_for_open_port(3000)
 
     with subtest("Declarative config test, DNS will be reachable"):
         declarativeConf.wait_for_unit("adguardhome.service")
diff --git a/nixos/tests/aesmd.nix b/nixos/tests/aesmd.nix
index 9f07426be8d..5da661afd54 100644
--- a/nixos/tests/aesmd.nix
+++ b/nixos/tests/aesmd.nix
@@ -1,4 +1,4 @@
-import ./make-test-python.nix ({ pkgs, lib, ... }: {
+{ pkgs, lib, ... }: {
   name = "aesmd";
   meta = {
     maintainers = with lib.maintainers; [ veehaitch ];
@@ -59,4 +59,4 @@ import ./make-test-python.nix ({ pkgs, lib, ... }: {
 
       assert aesmd_config == "whitelist url = http://nixos.org\nproxy type = direct\ndefault quoting type = ecdsa_256\n", "aesmd.conf differs"
   '';
-})
+}
diff --git a/nixos/tests/airsonic.nix b/nixos/tests/airsonic.nix
index 2f60c56f643..69f979726bc 100644
--- a/nixos/tests/airsonic.nix
+++ b/nixos/tests/airsonic.nix
@@ -15,7 +15,8 @@ import ./make-test-python.nix ({ pkgs, ... }: {
 
   testScript = ''
     def airsonic_is_up(_) -> bool:
-        return machine.succeed("curl --fail http://localhost:4040/login")
+        status, _ = machine.execute("curl --fail http://localhost:4040/login")
+        return status == 0
 
 
     machine.start()
diff --git a/nixos/tests/all-tests.nix b/nixos/tests/all-tests.nix
index 671b1a8876a..1956d3c9e8c 100644
--- a/nixos/tests/all-tests.nix
+++ b/nixos/tests/all-tests.nix
@@ -1,4 +1,11 @@
-{ system, pkgs, callTest }:
+{ system,
+  pkgs,
+
+  # Projects the test configuration into a the desired value; usually
+  # the test runner: `config: config.test`.
+  callTest,
+
+}:
 # The return value of this function will be an attrset with arbitrary depth and
 # the `anything` returned by callTest at its test leafs.
 # The tests not supported by `system` will be replaced with `{}`, so that
@@ -11,9 +18,18 @@ with pkgs.lib;
 
 let
   discoverTests = val:
-    if !isAttrs val then val
-    else if hasAttr "test" val then callTest val
-    else mapAttrs (n: s: discoverTests s) val;
+    if isAttrs val
+    then
+      if hasAttr "test" val then callTest val
+      else mapAttrs (n: s: discoverTests s) val
+    else if isFunction val
+      then
+        # Tests based on make-test-python.nix will return the second lambda
+        # in that file, which are then forwarded to the test definition
+        # following the `import make-test-python.nix` expression
+        # (if it is a function).
+        discoverTests (val { inherit system pkgs; })
+      else val;
   handleTest = path: args:
     discoverTests (import path ({ inherit system pkgs; } // args));
   handleTestOn = systems: path: args:
@@ -27,20 +43,45 @@ let
   };
   evalMinimalConfig = module: nixosLib.evalModules { modules = [ module ]; };
 
+  inherit
+    (rec {
+      doRunTest = arg: ((import ../lib/testing-python.nix { inherit system pkgs; }).evalTest {
+        imports = [ arg ];
+      }).config.result;
+      findTests = tree:
+        if tree?recurseForDerivations && tree.recurseForDerivations
+        then
+          mapAttrs
+            (k: findTests)
+            (builtins.removeAttrs tree ["recurseForDerivations"])
+        else callTest tree;
+
+      runTest = arg: let r = doRunTest arg; in findTests r;
+      runTestOn = systems: arg:
+        if elem system systems then runTest arg
+        else {};
+    })
+    runTest
+    runTestOn
+    ;
+
 in {
-  _3proxy = handleTest ./3proxy.nix {};
-  acme = handleTest ./acme.nix {};
-  adguardhome = handleTest ./adguardhome.nix {};
-  aesmd = handleTest ./aesmd.nix {};
-  agate = handleTest ./web-servers/agate.nix {};
+  _3proxy = runTest ./3proxy.nix;
+  acme = runTest ./acme.nix;
+  adguardhome = runTest ./adguardhome.nix;
+  aesmd = runTest ./aesmd.nix;
+  agate = runTest ./web-servers/agate.nix;
   agda = handleTest ./agda.nix {};
   airsonic = handleTest ./airsonic.nix {};
   allTerminfo = handleTest ./all-terminfo.nix {};
+  alps = handleTest ./alps.nix {};
   amazon-init-shell = handleTest ./amazon-init-shell.nix {};
   apfs = handleTest ./apfs.nix {};
   apparmor = handleTest ./apparmor.nix {};
   atd = handleTest ./atd.nix {};
   atop = handleTest ./atop.nix {};
+  atuin = handleTest ./atuin.nix {};
+  auth-mysql = handleTest ./auth-mysql.nix {};
   avahi = handleTest ./avahi.nix {};
   avahi-with-resolved = handleTest ./avahi.nix { networkd = true; };
   babeld = handleTest ./babeld.nix {};
@@ -63,6 +104,7 @@ in {
   brscan5 = handleTest ./brscan5.nix {};
   btrbk = handleTest ./btrbk.nix {};
   btrbk-no-timer = handleTest ./btrbk-no-timer.nix {};
+  btrbk-section-order = handleTest ./btrbk-section-order.nix {};
   buildbot = handleTest ./buildbot.nix {};
   buildkite-agents = handleTest ./buildkite-agents.nix {};
   caddy = handleTest ./caddy.nix {};
@@ -70,21 +112,22 @@ in {
   cage = handleTest ./cage.nix {};
   cagebreak = handleTest ./cagebreak.nix {};
   calibre-web = handleTest ./calibre-web.nix {};
-  cassandra_2_1 = handleTest ./cassandra.nix { testPackage = pkgs.cassandra_2_1; };
-  cassandra_2_2 = handleTest ./cassandra.nix { testPackage = pkgs.cassandra_2_2; };
   cassandra_3_0 = handleTest ./cassandra.nix { testPackage = pkgs.cassandra_3_0; };
   cassandra_3_11 = handleTest ./cassandra.nix { testPackage = pkgs.cassandra_3_11; };
+  cassandra_4 = handleTest ./cassandra.nix { testPackage = pkgs.cassandra_4; };
   ceph-multi-node = handleTestOn ["x86_64-linux"] ./ceph-multi-node.nix {};
   ceph-single-node = handleTestOn ["x86_64-linux"] ./ceph-single-node.nix {};
   ceph-single-node-bluestore = handleTestOn ["x86_64-linux"] ./ceph-single-node-bluestore.nix {};
   certmgr = handleTest ./certmgr.nix {};
-  cfssl = handleTestOn ["x86_64-linux"] ./cfssl.nix {};
+  cfssl = handleTestOn ["aarch64-linux" "x86_64-linux"] ./cfssl.nix {};
   charliecloud = handleTest ./charliecloud.nix {};
-  chromium = (handleTestOn ["x86_64-linux"] ./chromium.nix {}).stable or {};
+  chromium = (handleTestOn ["aarch64-linux" "x86_64-linux"] ./chromium.nix {}).stable or {};
+  cinnamon = handleTest ./cinnamon.nix {};
   cjdns = handleTest ./cjdns.nix {};
   clickhouse = handleTest ./clickhouse.nix {};
   cloud-init = handleTest ./cloud-init.nix {};
-  cntr = handleTest ./cntr.nix {};
+  cloud-init-hostname = handleTest ./cloud-init-hostname.nix {};
+  cntr = handleTestOn ["aarch64-linux" "x86_64-linux"] ./cntr.nix {};
   cockroachdb = handleTestOn ["x86_64-linux"] ./cockroachdb.nix {};
   collectd = handleTest ./collectd.nix {};
   consul = handleTest ./consul.nix {};
@@ -103,14 +146,14 @@ in {
   containers-reloadable = handleTest ./containers-reloadable.nix {};
   containers-restart_networking = handleTest ./containers-restart_networking.nix {};
   containers-tmpfs = handleTest ./containers-tmpfs.nix {};
+  containers-unified-hierarchy = handleTest ./containers-unified-hierarchy.nix {};
   convos = handleTest ./convos.nix {};
   corerad = handleTest ./corerad.nix {};
   coturn = handleTest ./coturn.nix {};
   couchdb = handleTest ./couchdb.nix {};
-  cri-o = handleTestOn ["x86_64-linux"] ./cri-o.nix {};
+  cri-o = handleTestOn ["aarch64-linux" "x86_64-linux"] ./cri-o.nix {};
   custom-ca = handleTest ./custom-ca.nix {};
   croc = handleTest ./croc.nix {};
-  cryptpad = handleTest ./cryptpad.nix {};
   deluge = handleTest ./deluge.nix {};
   dendrite = handleTest ./matrix/dendrite.nix {};
   dex-oidc = handleTest ./dex-oidc.nix {};
@@ -121,15 +164,17 @@ in {
   dnscrypt-wrapper = handleTestOn ["x86_64-linux"] ./dnscrypt-wrapper {};
   dnsdist = handleTest ./dnsdist.nix {};
   doas = handleTest ./doas.nix {};
-  docker = handleTestOn ["x86_64-linux"] ./docker.nix {};
-  docker-rootless = handleTestOn ["x86_64-linux"] ./docker-rootless.nix {};
+  docker = handleTestOn ["aarch64-linux" "x86_64-linux"] ./docker.nix {};
+  docker-rootless = handleTestOn ["aarch64-linux" "x86_64-linux"] ./docker-rootless.nix {};
   docker-registry = handleTest ./docker-registry.nix {};
   docker-tools = handleTestOn ["x86_64-linux"] ./docker-tools.nix {};
   docker-tools-cross = handleTestOn ["x86_64-linux" "aarch64-linux"] ./docker-tools-cross.nix {};
   docker-tools-overlay = handleTestOn ["x86_64-linux"] ./docker-tools-overlay.nix {};
   documize = handleTest ./documize.nix {};
+  documentation = pkgs.callPackage ../modules/misc/documentation/test.nix { inherit nixosLib; };
   doh-proxy-rust = handleTest ./doh-proxy-rust.nix {};
   dokuwiki = handleTest ./dokuwiki.nix {};
+  dolibarr = handleTest ./dolibarr.nix {};
   domination = handleTest ./domination.nix {};
   dovecot = handleTest ./dovecot.nix {};
   drbd = handleTest ./drbd.nix {};
@@ -137,9 +182,12 @@ in {
   ec2-config = (handleTestOn ["x86_64-linux"] ./ec2.nix {}).boot-ec2-config or {};
   ec2-nixops = (handleTestOn ["x86_64-linux"] ./ec2.nix {}).boot-ec2-nixops or {};
   ecryptfs = handleTest ./ecryptfs.nix {};
+  fscrypt = handleTest ./fscrypt.nix {};
   ejabberd = handleTest ./xmpp/ejabberd.nix {};
   elk = handleTestOn ["x86_64-linux"] ./elk.nix {};
   emacs-daemon = handleTest ./emacs-daemon.nix {};
+  endlessh = handleTest ./endlessh.nix {};
+  endlessh-go = handleTest ./endlessh-go.nix {};
   engelsystem = handleTest ./engelsystem.nix {};
   enlightenment = handleTest ./enlightenment.nix {};
   env = handleTest ./env.nix {};
@@ -147,18 +195,19 @@ in {
   ergo = handleTest ./ergo.nix {};
   ergochat = handleTest ./ergochat.nix {};
   etc = pkgs.callPackage ../modules/system/etc/test.nix { inherit evalMinimalConfig; };
+  activation = pkgs.callPackage ../modules/system/activation/test.nix { };
   etcd = handleTestOn ["x86_64-linux"] ./etcd.nix {};
   etcd-cluster = handleTestOn ["x86_64-linux"] ./etcd-cluster.nix {};
   etebase-server = handleTest ./etebase-server.nix {};
   etesync-dav = handleTest ./etesync-dav.nix {};
   extra-python-packages = handleTest ./extra-python-packages.nix {};
+  evcc = handleTest ./evcc.nix {};
   fancontrol = handleTest ./fancontrol.nix {};
   fcitx = handleTest ./fcitx {};
   fenics = handleTest ./fenics.nix {};
   ferm = handleTest ./ferm.nix {};
   firefox = handleTest ./firefox.nix { firefoxPackage = pkgs.firefox; };
   firefox-esr    = handleTest ./firefox.nix { firefoxPackage = pkgs.firefox-esr; }; # used in `tested` job
-  firefox-esr-91 = handleTest ./firefox.nix { firefoxPackage = pkgs.firefox-esr-91; };
   firefox-esr-102 = handleTest ./firefox.nix { firefoxPackage = pkgs.firefox-esr-102; };
   firejail = handleTest ./firejail.nix {};
   firewall = handleTest ./firewall.nix {};
@@ -167,11 +216,14 @@ in {
   fluentd = handleTest ./fluentd.nix {};
   fluidd = handleTest ./fluidd.nix {};
   fontconfig-default-fonts = handleTest ./fontconfig-default-fonts.nix {};
+  freenet = handleTest ./freenet.nix {};
   freeswitch = handleTest ./freeswitch.nix {};
+  freshrss = handleTest ./freshrss.nix {};
   frr = handleTest ./frr.nix {};
   fsck = handleTest ./fsck.nix {};
   ft2-clone = handleTest ./ft2-clone.nix {};
   mimir = handleTest ./mimir.nix {};
+  garage = handleTest ./garage.nix {};
   gerrit = handleTest ./gerrit.nix {};
   geth = handleTest ./geth.nix {};
   ghostunnel = handleTest ./ghostunnel.nix {};
@@ -187,9 +239,10 @@ in {
   gobgpd = handleTest ./gobgpd.nix {};
   gocd-agent = handleTest ./gocd-agent.nix {};
   gocd-server = handleTest ./gocd-server.nix {};
+  gollum = handleTest ./gollum.nix {};
   google-oslogin = handleTest ./google-oslogin {};
   gotify-server = handleTest ./gotify-server.nix {};
-  grafana = handleTest ./grafana.nix {};
+  grafana = handleTest ./grafana {};
   grafana-agent = handleTest ./grafana-agent.nix {};
   graphite = handleTest ./graphite.nix {};
   graylog = handleTest ./graylog.nix {};
@@ -203,14 +256,15 @@ in {
   haste-server = handleTest ./haste-server.nix {};
   haproxy = handleTest ./haproxy.nix {};
   hardened = handleTest ./hardened.nix {};
-  hbase1 = handleTest ./hbase.nix { package=pkgs.hbase1; };
+  healthchecks = handleTest ./web-apps/healthchecks.nix {};
   hbase2 = handleTest ./hbase.nix { package=pkgs.hbase2; };
+  hbase_2_4 = handleTest ./hbase.nix { package=pkgs.hbase_2_4; };
   hbase3 = handleTest ./hbase.nix { package=pkgs.hbase3; };
   hedgedoc = handleTest ./hedgedoc.nix {};
   herbstluftwm = handleTest ./herbstluftwm.nix {};
   installed-tests = pkgs.recurseIntoAttrs (handleTest ./installed-tests {});
   invidious = handleTest ./invidious.nix {};
-  oci-containers = handleTestOn ["x86_64-linux"] ./oci-containers.nix {};
+  oci-containers = handleTestOn ["aarch64-linux" "x86_64-linux"] ./oci-containers.nix {};
   odoo = handleTest ./odoo.nix {};
   # 9pnet_virtio used to mount /nix partition doesn't support
   # hibernation. This test happens to work on x86_64-linux but
@@ -242,7 +296,6 @@ in {
   installer-systemd-stage-1 = handleTest ./installer-systemd-stage-1.nix {};
   invoiceplane = handleTest ./invoiceplane.nix {};
   iodine = handleTest ./iodine.nix {};
-  ipfs = handleTest ./ipfs.nix {};
   ipv6 = handleTest ./ipv6.nix {};
   iscsi-multipath-root = handleTest ./iscsi-multipath-root.nix {};
   iscsi-root = handleTest ./iscsi-root.nix {};
@@ -254,9 +307,10 @@ in {
   jibri = handleTest ./jibri.nix {};
   jirafeau = handleTest ./jirafeau.nix {};
   jitsi-meet = handleTest ./jitsi-meet.nix {};
-  k3s-single-node = handleTest ./k3s-single-node.nix {};
+  k3s = handleTest ./k3s {};
   kafka = handleTest ./kafka.nix {};
   kanidm = handleTest ./kanidm.nix {};
+  karma = handleTest ./karma.nix {};
   kbd-setfont-decompress = handleTest ./kbd-setfont-decompress.nix {};
   kbd-update-search-paths-patch = handleTest ./kbd-update-search-paths-patch.nix {};
   kea = handleTest ./kea.nix {};
@@ -265,22 +319,34 @@ in {
   kerberos = handleTest ./kerberos/default.nix {};
   kernel-generic = handleTest ./kernel-generic.nix {};
   kernel-latest-ath-user-regd = handleTest ./kernel-latest-ath-user-regd.nix {};
+  keter = handleTest ./keter.nix {};
   kexec = handleTest ./kexec.nix {};
   keycloak = discoverTests (import ./keycloak.nix);
   keymap = handleTest ./keymap.nix {};
   knot = handleTest ./knot.nix {};
+  komga = handleTest ./komga.nix {};
   krb5 = discoverTests (import ./krb5 {});
   ksm = handleTest ./ksm.nix {};
+  kthxbye = handleTest ./kthxbye.nix {};
   kubernetes = handleTestOn ["x86_64-linux"] ./kubernetes {};
+  kubo = handleTest ./kubo.nix {};
+  ladybird = handleTest ./ladybird.nix {};
+  languagetool = handleTest ./languagetool.nix {};
   latestKernel.login = handleTest ./login.nix { latestKernel = true; };
   leaps = handleTest ./leaps.nix {};
+  lemmy = handleTest ./lemmy.nix {};
   libinput = handleTest ./libinput.nix {};
   libreddit = handleTest ./libreddit.nix {};
   libresprite = handleTest ./libresprite.nix {};
   libreswan = handleTest ./libreswan.nix {};
+  librewolf = handleTest ./firefox.nix { firefoxPackage = pkgs.librewolf; };
+  libuiohook = handleTest ./libuiohook.nix {};
+  libvirtd = handleTest ./libvirtd.nix {};
   lidarr = handleTest ./lidarr.nix {};
   lightdm = handleTest ./lightdm.nix {};
+  lighttpd = handleTest ./lighttpd.nix {};
   limesurvey = handleTest ./limesurvey.nix {};
+  listmonk = handleTest ./listmonk.nix {};
   litestream = handleTest ./litestream.nix {};
   locate = handleTest ./locate.nix {};
   login = handleTest ./login.nix {};
@@ -300,7 +366,7 @@ in {
   mailhog = handleTest ./mailhog.nix {};
   man = handleTest ./man.nix {};
   mariadb-galera = handleTest ./mysql/mariadb-galera.nix {};
-  mastodon = handleTestOn ["x86_64-linux" "i686-linux" "aarch64-linux"] ./web-apps/mastodon.nix {};
+  mastodon = discoverTests (import ./web-apps/mastodon { inherit handleTestOn; });
   matomo = handleTest ./matomo.nix {};
   matrix-appservice-irc = handleTest ./matrix/appservice-irc.nix {};
   matrix-conduit = handleTest ./matrix/conduit.nix {};
@@ -310,6 +376,7 @@ in {
   mediawiki = handleTest ./mediawiki.nix {};
   meilisearch = handleTest ./meilisearch.nix {};
   memcached = handleTest ./memcached.nix {};
+  merecat = handleTest ./merecat.nix {};
   metabase = handleTest ./metabase.nix {};
   minecraft = handleTest ./minecraft.nix {};
   minecraft-server = handleTest ./minecraft-server.nix {};
@@ -350,6 +417,7 @@ in {
   ncdns = handleTest ./ncdns.nix {};
   ndppd = handleTest ./ndppd.nix {};
   nebula = handleTest ./nebula.nix {};
+  netbird = handleTest ./netbird.nix {};
   neo4j = handleTest ./neo4j.nix {};
   netdata = handleTest ./netdata.nix {};
   networking.networkd = handleTest ./networking.nix { networkd = true; };
@@ -367,8 +435,10 @@ in {
   nginx = handleTest ./nginx.nix {};
   nginx-auth = handleTest ./nginx-auth.nix {};
   nginx-etag = handleTest ./nginx-etag.nix {};
+  nginx-globalredirect = handleTest ./nginx-globalredirect.nix {};
   nginx-http3 = handleTest ./nginx-http3.nix {};
   nginx-modsecurity = handleTest ./nginx-modsecurity.nix {};
+  nginx-njs = handleTest ./nginx-njs.nix {};
   nginx-pubhtml = handleTest ./nginx-pubhtml.nix {};
   nginx-sandbox = handleTestOn ["x86_64-linux"] ./nginx-sandbox.nix {};
   nginx-sso = handleTest ./nginx-sso.nix {};
@@ -383,9 +453,12 @@ in {
   nixpkgs = pkgs.callPackage ../modules/misc/nixpkgs/test.nix { inherit evalMinimalConfig; };
   node-red = handleTest ./node-red.nix {};
   nomad = handleTest ./nomad.nix {};
+  non-default-filesystems = handleTest ./non-default-filesystems.nix {};
   noto-fonts = handleTest ./noto-fonts.nix {};
   novacomd = handleTestOn ["x86_64-linux"] ./novacomd.nix {};
+  nscd = handleTest ./nscd.nix {};
   nsd = handleTest ./nsd.nix {};
+  ntfy-sh = handleTest ./ntfy-sh.nix {};
   nzbget = handleTest ./nzbget.nix {};
   nzbhydra2 = handleTest ./nzbhydra2.nix {};
   oh-my-zsh = handleTest ./oh-my-zsh.nix {};
@@ -411,32 +484,41 @@ in {
   pam-oath-login = handleTest ./pam/pam-oath-login.nix {};
   pam-u2f = handleTest ./pam/pam-u2f.nix {};
   pam-ussh = handleTest ./pam/pam-ussh.nix {};
+  pass-secret-service = handleTest ./pass-secret-service.nix {};
+  patroni = handleTestOn ["x86_64-linux"] ./patroni.nix {};
   pantalaimon = handleTest ./matrix/pantalaimon.nix {};
   pantheon = handleTest ./pantheon.nix {};
   paperless = handleTest ./paperless.nix {};
   parsedmarc = handleTest ./parsedmarc {};
   pdns-recursor = handleTest ./pdns-recursor.nix {};
   peerflix = handleTest ./peerflix.nix {};
+  peering-manager = handleTest ./web-apps/peering-manager.nix {};
   peertube = handleTestOn ["x86_64-linux"] ./web-apps/peertube.nix {};
   pgadmin4 = handleTest ./pgadmin4.nix {};
   pgadmin4-standalone = handleTest ./pgadmin4-standalone.nix {};
   pgjwt = handleTest ./pgjwt.nix {};
   pgmanage = handleTest ./pgmanage.nix {};
+  phosh = handleTest ./phosh.nix {};
   php = handleTest ./php {};
   php80 = handleTest ./php { php = pkgs.php80; };
   php81 = handleTest ./php { php = pkgs.php81; };
+  php82 = handleTest ./php { php = pkgs.php82; };
+  phylactery = handleTest ./web-apps/phylactery.nix {};
   pict-rs = handleTest ./pict-rs.nix {};
   pinnwand = handleTest ./pinnwand.nix {};
+  plasma-bigscreen = handleTest ./plasma-bigscreen.nix {};
   plasma5 = handleTest ./plasma5.nix {};
   plasma5-systemd-start = handleTest ./plasma5-systemd-start.nix {};
   plausible = handleTest ./plausible.nix {};
+  please = handleTest ./please.nix {};
   pleroma = handleTestOn [ "x86_64-linux" "aarch64-linux" ] ./pleroma.nix {};
   plikd = handleTest ./plikd.nix {};
   plotinus = handleTest ./plotinus.nix {};
   podgrab = handleTest ./podgrab.nix {};
-  podman = handleTestOn ["x86_64-linux"] ./podman/default.nix {};
-  podman-dnsname = handleTestOn ["x86_64-linux"] ./podman/dnsname.nix {};
-  podman-tls-ghostunnel = handleTestOn ["x86_64-linux"] ./podman/tls-ghostunnel.nix {};
+  podman = handleTestOn ["aarch64-linux" "x86_64-linux"] ./podman/default.nix {};
+  podman-dnsname = handleTestOn ["aarch64-linux" "x86_64-linux"] ./podman/dnsname.nix {};
+  podman-tls-ghostunnel = handleTestOn ["aarch64-linux" "x86_64-linux"] ./podman/tls-ghostunnel.nix {};
+  polaris = handleTest ./polaris.nix {};
   pomerium = handleTestOn ["x86_64-linux"] ./pomerium.nix {};
   postfix = handleTest ./postfix.nix {};
   postfix-raise-smtpd-tls-security-level = handleTest ./postfix-raise-smtpd-tls-security-level.nix {};
@@ -464,13 +546,13 @@ in {
   pulseaudio = discoverTests (import ./pulseaudio.nix);
   qboot = handleTestOn ["x86_64-linux" "i686-linux"] ./qboot.nix {};
   quorum = handleTest ./quorum.nix {};
+  quake3 = handleTest ./quake3.nix {};
   rabbitmq = handleTest ./rabbitmq.nix {};
   radarr = handleTest ./radarr.nix {};
   radicale = handleTest ./radicale.nix {};
   rasdaemon = handleTest ./rasdaemon.nix {};
   redis = handleTest ./redis.nix {};
   redmine = handleTest ./redmine.nix {};
-  resolv = handleTest ./resolv.nix {};
   restartByActivationScript = handleTest ./restart-by-activation-script.nix {};
   restic = handleTest ./restic.nix {};
   retroarch = handleTest ./retroarch.nix {};
@@ -510,13 +592,17 @@ in {
   sourcehut = handleTest ./sourcehut.nix {};
   spacecookie = handleTest ./spacecookie.nix {};
   spark = handleTestOn [ "x86_64-linux" "aarch64-linux" ] ./spark {};
+  sqlite3-to-mysql = handleTest ./sqlite3-to-mysql.nix {};
   sslh = handleTest ./sslh.nix {};
   sssd = handleTestOn ["x86_64-linux"] ./sssd.nix {};
   sssd-ldap = handleTestOn ["x86_64-linux"] ./sssd-ldap.nix {};
   starship = handleTest ./starship.nix {};
   step-ca = handleTestOn ["x86_64-linux"] ./step-ca.nix {};
+  stratis = handleTest ./stratis {};
   strongswan-swanctl = handleTest ./strongswan-swanctl.nix {};
+  stunnel = handleTest ./stunnel.nix {};
   sudo = handleTest ./sudo.nix {};
+  swap-partition = handleTest ./swap-partition.nix {};
   sway = handleTest ./sway.nix {};
   switchTest = handleTest ./switch-test.nix {};
   sympa = handleTest ./sympa.nix {};
@@ -527,12 +613,17 @@ in {
   systemd-analyze = handleTest ./systemd-analyze.nix {};
   systemd-binfmt = handleTestOn ["x86_64-linux"] ./systemd-binfmt.nix {};
   systemd-boot = handleTest ./systemd-boot.nix {};
+  systemd-bpf = handleTest ./systemd-bpf.nix {};
   systemd-confinement = handleTest ./systemd-confinement.nix {};
+  systemd-coredump = handleTest ./systemd-coredump.nix {};
   systemd-cryptenroll = handleTest ./systemd-cryptenroll.nix {};
   systemd-escaping = handleTest ./systemd-escaping.nix {};
   systemd-initrd-btrfs-raid = handleTest ./systemd-initrd-btrfs-raid.nix {};
+  systemd-initrd-luks-fido2 = handleTest ./systemd-initrd-luks-fido2.nix {};
   systemd-initrd-luks-keyfile = handleTest ./systemd-initrd-luks-keyfile.nix {};
   systemd-initrd-luks-password = handleTest ./systemd-initrd-luks-password.nix {};
+  systemd-initrd-luks-tpm2 = handleTest ./systemd-initrd-luks-tpm2.nix {};
+  systemd-initrd-modprobe = handleTest ./systemd-initrd-modprobe.nix {};
   systemd-initrd-shutdown = handleTest ./systemd-shutdown.nix { systemdStage1 = true; };
   systemd-initrd-simple = handleTest ./systemd-initrd-simple.nix {};
   systemd-initrd-swraid = handleTest ./systemd-initrd-swraid.nix {};
@@ -543,11 +634,16 @@ in {
   systemd-networkd-dhcpserver-static-leases = handleTest ./systemd-networkd-dhcpserver-static-leases.nix {};
   systemd-networkd-ipv6-prefix-delegation = handleTest ./systemd-networkd-ipv6-prefix-delegation.nix {};
   systemd-networkd-vrf = handleTest ./systemd-networkd-vrf.nix {};
+  systemd-no-tainted = handleTest ./systemd-no-tainted.nix {};
   systemd-nspawn = handleTest ./systemd-nspawn.nix {};
+  systemd-oomd = handleTest ./systemd-oomd.nix {};
+  systemd-portabled = handleTest ./systemd-portabled.nix {};
   systemd-shutdown = handleTest ./systemd-shutdown.nix {};
   systemd-timesyncd = handleTest ./systemd-timesyncd.nix {};
   systemd-misc = handleTest ./systemd-misc.nix {};
+  tandoor-recipes = handleTest ./tandoor-recipes.nix {};
   taskserver = handleTest ./taskserver.nix {};
+  tayga = handleTest ./tayga.nix {};
   teeworlds = handleTest ./teeworlds.nix {};
   telegraf = handleTest ./telegraf.nix {};
   teleport = handleTest ./teleport.nix {};
@@ -559,12 +655,14 @@ in {
   tinc = handleTest ./tinc {};
   tinydns = handleTest ./tinydns.nix {};
   tinywl = handleTest ./tinywl.nix {};
+  tmate-ssh-server = handleTest ./tmate-ssh-server.nix { };
   tomcat = handleTest ./tomcat.nix {};
   tor = handleTest ./tor.nix {};
-  # traefik test relies on docker-containers
-  traefik = handleTestOn ["x86_64-linux"] ./traefik.nix {};
+  traefik = handleTestOn ["aarch64-linux" "x86_64-linux"] ./traefik.nix {};
   trafficserver = handleTest ./trafficserver.nix {};
   transmission = handleTest ./transmission.nix {};
+  # tracee requires bpf
+  tracee = handleTestOn ["x86_64-linux"] ./tracee.nix {};
   trezord = handleTest ./trezord.nix {};
   trickster = handleTest ./trickster.nix {};
   trilium-server = handleTestOn ["x86_64-linux"] ./trilium-server.nix {};
@@ -580,12 +678,16 @@ in {
   unit-php = handleTest ./web-servers/unit-php.nix {};
   upnp = handleTest ./upnp.nix {};
   uptermd = handleTest ./uptermd.nix {};
+  uptime-kuma = handleTest ./uptime-kuma.nix {};
   usbguard = handleTest ./usbguard.nix {};
   user-activation-scripts = handleTest ./user-activation-scripts.nix {};
   user-home-mode = handleTest ./user-home-mode.nix {};
   uwsgi = handleTest ./uwsgi.nix {};
   v2ray = handleTest ./v2ray.nix {};
+  varnish60 = handleTest ./varnish.nix { package = pkgs.varnish60; };
+  varnish72 = handleTest ./varnish.nix { package = pkgs.varnish72; };
   vault = handleTest ./vault.nix {};
+  vault-dev = handleTest ./vault-dev.nix {};
   vault-postgresql = handleTest ./vault-postgresql.nix {};
   vaultwarden = handleTest ./vaultwarden.nix {};
   vector = handleTest ./vector.nix {};
@@ -595,6 +697,7 @@ in {
   virtualbox = handleTestOn ["x86_64-linux"] ./virtualbox.nix {};
   vscodium = discoverTests (import ./vscodium.nix);
   vsftpd = handleTest ./vsftpd.nix {};
+  warzone2100 = handleTest ./warzone2100.nix {};
   wasabibackend = handleTest ./wasabibackend.nix {};
   wiki-js = handleTest ./wiki-js.nix {};
   wine = handleTest ./wine.nix {};
@@ -603,11 +706,14 @@ in {
   wmderland = handleTest ./wmderland.nix {};
   wpa_supplicant = handleTest ./wpa_supplicant.nix {};
   wordpress = handleTest ./wordpress.nix {};
+  wrappers = handleTest ./wrappers.nix {};
+  writefreely = handleTest ./web-apps/writefreely.nix {};
   xandikos = handleTest ./xandikos.nix {};
   xautolock = handleTest ./xautolock.nix {};
   xfce = handleTest ./xfce.nix {};
   xmonad = handleTest ./xmonad.nix {};
   xmonad-xdg-autostart = handleTest ./xmonad-xdg-autostart.nix {};
+  xpadneo = handleTest ./xpadneo.nix {};
   xrdp = handleTest ./xrdp.nix {};
   xss-lock = handleTest ./xss-lock.nix {};
   xterm = handleTest ./xterm.nix {};
diff --git a/nixos/tests/alps.nix b/nixos/tests/alps.nix
new file mode 100644
index 00000000000..9756f2d4da1
--- /dev/null
+++ b/nixos/tests/alps.nix
@@ -0,0 +1,108 @@
+let
+  certs = import ./common/acme/server/snakeoil-certs.nix;
+  domain = certs.domain;
+in
+import ./make-test-python.nix ({ pkgs, ... }: {
+  name = "alps";
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ hmenke ];
+  };
+
+  nodes = {
+    server = {
+      imports = [ ./common/user-account.nix ];
+      security.pki.certificateFiles = [
+        certs.ca.cert
+      ];
+      networking.extraHosts = ''
+        127.0.0.1 ${domain}
+      '';
+      networking.firewall.allowedTCPPorts = [ 25 465 993 ];
+      services.postfix = {
+        enable = true;
+        enableSubmission = true;
+        enableSubmissions = true;
+        tlsTrustedAuthorities = "${certs.ca.cert}";
+        sslCert = "${certs.${domain}.cert}";
+        sslKey = "${certs.${domain}.key}";
+      };
+      services.dovecot2 = {
+        enable = true;
+        enableImap = true;
+        sslCACert = "${certs.ca.cert}";
+        sslServerCert = "${certs.${domain}.cert}";
+        sslServerKey = "${certs.${domain}.key}";
+      };
+    };
+
+    client = { nodes, config, ... }: {
+      security.pki.certificateFiles = [
+        certs.ca.cert
+      ];
+      networking.extraHosts = ''
+        ${nodes.server.config.networking.primaryIPAddress} ${domain}
+      '';
+      services.alps = {
+        enable = true;
+        theme = "alps";
+        imaps = {
+          host = domain;
+          port = 993;
+        };
+        smtps = {
+          host = domain;
+          port = 465;
+        };
+      };
+      environment.systemPackages = [
+        (pkgs.writers.writePython3Bin "test-alps-login" { } ''
+          from urllib.request import build_opener, HTTPCookieProcessor, Request
+          from urllib.parse import urlencode, urljoin
+          from http.cookiejar import CookieJar
+
+          baseurl = "http://localhost:${toString config.services.alps.port}"
+          username = "alice"
+          password = "${nodes.server.config.users.users.alice.password}"
+          cookiejar = CookieJar()
+          cookieprocessor = HTTPCookieProcessor(cookiejar)
+          opener = build_opener(cookieprocessor)
+
+          data = urlencode({"username": username, "password": password}).encode()
+          req = Request(urljoin(baseurl, "login"), data=data, method="POST")
+          with opener.open(req) as ret:
+              # Check that the alps_session cookie is set
+              print(cookiejar)
+              assert any(cookie.name == "alps_session" for cookie in cookiejar)
+
+          req = Request(baseurl)
+          with opener.open(req) as ret:
+              # Check that the alps_session cookie is still there...
+              print(cookiejar)
+              assert any(cookie.name == "alps_session" for cookie in cookiejar)
+              # ...and that we have not been redirected back to the login page
+              print(ret.url)
+              assert ret.url == urljoin(baseurl, "mailbox/INBOX")
+
+          req = Request(urljoin(baseurl, "logout"))
+          with opener.open(req) as ret:
+              # Check that the alps_session cookie is now gone
+              print(cookiejar)
+              assert all(cookie.name != "alps_session" for cookie in cookiejar)
+        '')
+      ];
+    };
+  };
+
+  testScript = { nodes, ... }: ''
+    server.start()
+    server.wait_for_unit("postfix.service")
+    server.wait_for_unit("dovecot2.service")
+    server.wait_for_open_port(465)
+    server.wait_for_open_port(993)
+
+    client.start()
+    client.wait_for_unit("alps.service")
+    client.wait_for_open_port(${toString nodes.client.config.services.alps.port})
+    client.succeed("test-alps-login")
+  '';
+})
diff --git a/nixos/tests/atuin.nix b/nixos/tests/atuin.nix
new file mode 100644
index 00000000000..85213d1e53e
--- /dev/null
+++ b/nixos/tests/atuin.nix
@@ -0,0 +1,65 @@
+import ./make-test-python.nix ({ pkgs, lib, ... }:
+
+let
+  testPort = 8888;
+  testUser = "testerman";
+  testPass = "password";
+  testEmail = "test.testerman@test.com";
+in
+with lib;
+{
+  name = "atuin";
+  meta.maintainers = with pkgs.lib.maintainers; [ devusb ];
+
+  nodes = {
+    server =
+      { ... }:
+      {
+        services.atuin = {
+          enable = true;
+          port = testPort;
+          host = "0.0.0.0";
+          openFirewall = true;
+          openRegistration = true;
+        };
+      };
+
+    client =
+      { ... }:
+      { };
+
+  };
+
+  testScript = with pkgs; ''
+    start_all()
+
+    # wait for atuin server startup
+    server.wait_for_unit("atuin.service")
+    server.wait_for_open_port(${toString testPort})
+
+    # configure atuin client on server node
+    server.execute("mkdir -p ~/.config/atuin")
+    server.execute("echo 'sync_address = \"http://localhost:${toString testPort}\"' > ~/.config/atuin/config.toml")
+
+    # register with atuin server on server node
+    server.succeed("${atuin}/bin/atuin register -u ${testUser} -p ${testPass} -e ${testEmail}")
+    _, key = server.execute("${atuin}/bin/atuin key")
+
+    # store test record in atuin server and sync
+    server.succeed("ATUIN_SESSION=$(${atuin}/bin/atuin uuid) ${atuin}/bin/atuin history start 'shazbot'")
+    server.succeed("${atuin}/bin/atuin sync")
+
+    # configure atuin client on client node
+    client.execute("mkdir -p ~/.config/atuin")
+    client.execute("echo 'sync_address = \"http://server:${toString testPort}\"' > ~/.config/atuin/config.toml")
+
+    # log in to atuin server on client node
+    client.succeed(f"${atuin}/bin/atuin login -u ${testUser} -p ${testPass} -k {key}")
+
+    # pull records from atuin server
+    client.succeed("${atuin}/bin/atuin sync -f")
+
+    # check for test record
+    client.succeed("ATUIN_SESSION=$(${atuin}/bin/atuin uuid) ${atuin}/bin/atuin history list | grep shazbot")
+  '';
+})
diff --git a/nixos/tests/auth-mysql.nix b/nixos/tests/auth-mysql.nix
new file mode 100644
index 00000000000..0ed4b050a69
--- /dev/null
+++ b/nixos/tests/auth-mysql.nix
@@ -0,0 +1,177 @@
+import ./make-test-python.nix ({ pkgs, lib, ... }:
+
+let
+  dbUser = "nixos_auth";
+  dbPassword = "topsecret123";
+  dbName = "auth";
+
+  mysqlUsername = "mysqltest";
+  mysqlPassword = "topsecretmysqluserpassword123";
+  mysqlGroup = "mysqlusers";
+
+  localUsername = "localtest";
+  localPassword = "topsecretlocaluserpassword123";
+
+  mysqlInit = pkgs.writeText "mysqlInit" ''
+      CREATE USER '${dbUser}'@'localhost' IDENTIFIED BY '${dbPassword}';
+      CREATE DATABASE ${dbName};
+      GRANT ALL PRIVILEGES ON ${dbName}.* TO '${dbUser}'@'localhost';
+      FLUSH PRIVILEGES;
+
+      USE ${dbName};
+      CREATE TABLE `groups` (
+        rowid int(11) NOT NULL auto_increment,
+        gid int(11) NOT NULL,
+        name char(255) NOT NULL,
+        PRIMARY KEY (rowid)
+      );
+
+      CREATE TABLE `users` (
+        name varchar(255) NOT NULL,
+        uid int(11) NOT NULL auto_increment,
+        gid int(11) NOT NULL,
+        password varchar(255) NOT NULL,
+        PRIMARY KEY (uid),
+        UNIQUE (name)
+      ) AUTO_INCREMENT=5000;
+
+      INSERT INTO `users` (name, uid, gid, password) VALUES
+      ('${mysqlUsername}', 5000, 5000, SHA2('${mysqlPassword}', 256));
+      INSERT INTO `groups` (name, gid) VALUES ('${mysqlGroup}', 5000);
+    '';
+in
+{
+  name = "auth-mysql";
+  meta.maintainers = with lib.maintainers; [ netali ];
+
+  nodes.machine =
+    { ... }:
+    {
+      services.mysql = {
+        enable = true;
+        package = pkgs.mariadb;
+        settings.mysqld.bind-address = "127.0.0.1";
+        initialScript = mysqlInit;
+      };
+
+      users.users.${localUsername} = {
+        isNormalUser = true;
+        password = localPassword;
+      };
+
+      security.pam.services.login.makeHomeDir = true;
+
+      users.mysql = {
+        enable = true;
+        host = "127.0.0.1";
+        user = dbUser;
+        database = dbName;
+        passwordFile = "${builtins.toFile "dbPassword" dbPassword}";
+        pam = {
+          table = "users";
+          userColumn = "name";
+          passwordColumn = "password";
+          passwordCrypt = "sha256";
+          disconnectEveryOperation = true;
+        };
+        nss = {
+          getpwnam = ''
+            SELECT name, 'x', uid, gid, name, CONCAT('/home/', name), "/run/current-system/sw/bin/bash" \
+            FROM users \
+            WHERE name='%1$s' \
+            LIMIT 1
+          '';
+          getpwuid = ''
+            SELECT name, 'x', uid, gid, name, CONCAT('/home/', name), "/run/current-system/sw/bin/bash" \
+            FROM users \
+            WHERE id=%1$u \
+            LIMIT 1
+          '';
+          getspnam = ''
+            SELECT name, password, 1, 0, 99999, 7, 0, -1, 0 \
+            FROM users \
+            WHERE name='%1$s' \
+            LIMIT 1
+          '';
+          getpwent = ''
+            SELECT name, 'x', uid, gid, name, CONCAT('/home/', name), "/run/current-system/sw/bin/bash" \
+            FROM users
+          '';
+          getspent = ''
+            SELECT name, password, 1, 0, 99999, 7, 0, -1, 0 \
+            FROM users
+          '';
+          getgrnam = ''
+            SELECT name, 'x', gid FROM groups WHERE name='%1$s' LIMIT 1
+          '';
+          getgrgid = ''
+            SELECT name, 'x', gid FROM groups WHERE gid='%1$u' LIMIT 1
+          '';
+          getgrent = ''
+            SELECT name, 'x', gid FROM groups
+          '';
+          memsbygid = ''
+            SELECT name FROM users WHERE gid=%1$u
+          '';
+          gidsbymem = ''
+            SELECT gid FROM users WHERE name='%1$s'
+          '';
+        };
+      };
+    };
+
+  testScript = ''
+    def switch_to_tty(tty_number):
+        machine.fail(f"pgrep -f 'agetty.*tty{tty_number}'")
+        machine.send_key(f"alt-f{tty_number}")
+        machine.wait_until_succeeds(f"[ $(fgconsole) = {tty_number} ]")
+        machine.wait_for_unit(f"getty@tty{tty_number}.service")
+        machine.wait_until_succeeds(f"pgrep -f 'agetty.*tty{tty_number}'")
+
+
+    def try_login(tty_number, username, password):
+        machine.wait_until_tty_matches(tty_number, "login: ")
+        machine.send_chars(f"{username}\n")
+        machine.wait_until_tty_matches(tty_number, f"login: {username}")
+        machine.wait_until_succeeds("pgrep login")
+        machine.wait_until_tty_matches(tty_number, "Password: ")
+        machine.send_chars(f"{password}\n")
+
+
+    machine.wait_for_unit("multi-user.target")
+    machine.wait_for_unit("mysql.service")
+    machine.wait_until_succeeds("pgrep -f 'agetty.*tty1'")
+
+    with subtest("Local login"):
+        switch_to_tty("2")
+        try_login("2", "${localUsername}", "${localPassword}")
+
+        machine.wait_until_succeeds("pgrep -u ${localUsername} bash")
+        machine.send_chars("id > local_id.txt\n")
+        machine.wait_for_file("/home/${localUsername}/local_id.txt")
+        machine.succeed("cat /home/${localUsername}/local_id.txt | grep 'uid=1000(${localUsername}) gid=100(users) groups=100(users)'")
+
+    with subtest("Local incorrect login"):
+        switch_to_tty("3")
+        try_login("3", "${localUsername}", "wrongpassword")
+
+        machine.wait_until_tty_matches("3", "Login incorrect")
+        machine.wait_until_tty_matches("3", "login:")
+
+    with subtest("MySQL login"):
+        switch_to_tty("4")
+        try_login("4", "${mysqlUsername}", "${mysqlPassword}")
+
+        machine.wait_until_succeeds("pgrep -u ${mysqlUsername} bash")
+        machine.send_chars("id > mysql_id.txt\n")
+        machine.wait_for_file("/home/${mysqlUsername}/mysql_id.txt")
+        machine.succeed("cat /home/${mysqlUsername}/mysql_id.txt | grep 'uid=5000(${mysqlUsername}) gid=5000(${mysqlGroup}) groups=5000(${mysqlGroup})'")
+
+    with subtest("MySQL incorrect login"):
+        switch_to_tty("5")
+        try_login("5", "${mysqlUsername}", "wrongpassword")
+
+        machine.wait_until_tty_matches("5", "Login incorrect")
+        machine.wait_until_tty_matches("5", "login:")
+    '';
+})
diff --git a/nixos/tests/bazarr.nix b/nixos/tests/bazarr.nix
index efcd9de0108..e59833e5e94 100644
--- a/nixos/tests/bazarr.nix
+++ b/nixos/tests/bazarr.nix
@@ -20,7 +20,7 @@ in
 
   testScript = ''
     machine.wait_for_unit("bazarr.service")
-    machine.wait_for_open_port(port)
+    machine.wait_for_open_port(${toString port})
     machine.succeed("curl --fail http://localhost:${toString port}/")
   '';
 })
diff --git a/nixos/tests/bitcoind.nix b/nixos/tests/bitcoind.nix
index 04655b7f6a5..7726a23d853 100644
--- a/nixos/tests/bitcoind.nix
+++ b/nixos/tests/bitcoind.nix
@@ -13,9 +13,11 @@ import ./make-test-python.nix ({ pkgs, ... }: {
         users.rpc2.passwordHMAC = "1495e4a3ad108187576c68f7f9b5ddc5$accce0881c74aa01bb8960ff3bdbd39f607fd33178147679e055a4ac35f53225";
       };
     };
+
+    environment.etc."test.blank".text = "";
     services.bitcoind."testnet" = {
       enable = true;
-      configFile = "/test.blank";
+      configFile = "/etc/test.blank";
       testnet = true;
       rpc = {
         port = 18332;
diff --git a/nixos/tests/bootspec.nix b/nixos/tests/bootspec.nix
new file mode 100644
index 00000000000..13360bb1eaa
--- /dev/null
+++ b/nixos/tests/bootspec.nix
@@ -0,0 +1,144 @@
+{ system ? builtins.currentSystem,
+  config ? {},
+  pkgs ? import ../.. { inherit system config; }
+}:
+
+with import ../lib/testing-python.nix { inherit system pkgs; };
+with pkgs.lib;
+
+let
+  baseline = {
+    virtualisation.useBootLoader = true;
+  };
+  grub = {
+    boot.loader.grub.enable = true;
+  };
+  systemd-boot = {
+    boot.loader.systemd-boot.enable = true;
+  };
+  uefi = {
+    virtualisation.useEFIBoot = true;
+    boot.loader.efi.canTouchEfiVariables = true;
+    boot.loader.grub.efiSupport = true;
+    environment.systemPackages = [ pkgs.efibootmgr ];
+  };
+  standard = {
+    boot.bootspec.enable = true;
+
+    imports = [
+      baseline
+      systemd-boot
+      uefi
+    ];
+  };
+in
+{
+  basic = makeTest {
+    name = "systemd-boot-with-bootspec";
+    meta.maintainers = with pkgs.lib.maintainers; [ raitobezarius ];
+
+    nodes.machine = standard;
+
+    testScript = ''
+      machine.start()
+      machine.wait_for_unit("multi-user.target")
+
+      machine.succeed("test -e /run/current-system/bootspec/boot.json")
+    '';
+  };
+
+  grub = makeTest {
+    name = "grub-with-bootspec";
+    meta.maintainers = with pkgs.lib.maintainers; [ raitobezarius ];
+
+    nodes.machine = {
+      boot.bootspec.enable = true;
+
+      imports = [
+        baseline
+        grub
+        uefi
+      ];
+    };
+
+    testScript = ''
+      machine.start()
+      machine.wait_for_unit("multi-user.target")
+
+      machine.succeed("test -e /run/current-system/bootspec/boot.json")
+    '';
+  };
+
+  legacy-boot = makeTest {
+    name = "legacy-boot-with-bootspec";
+    meta.maintainers = with pkgs.lib.maintainers; [ raitobezarius ];
+
+    nodes.machine = {
+      boot.bootspec.enable = true;
+
+      imports = [
+        baseline
+        grub
+      ];
+    };
+
+    testScript = ''
+      machine.start()
+      machine.wait_for_unit("multi-user.target")
+
+      machine.succeed("test -e /run/current-system/bootspec/boot.json")
+    '';
+  };
+
+  # Check that specialisations create corresponding entries in bootspec.
+  specialisation = makeTest {
+    name = "bootspec-with-specialisation";
+    meta.maintainers = with pkgs.lib.maintainers; [ raitobezarius ];
+
+    nodes.machine = {
+      imports = [ standard ];
+      environment.systemPackages = [ pkgs.jq ];
+      specialisation.something.configuration = {};
+    };
+
+    testScript = ''
+      import json
+
+      machine.start()
+      machine.wait_for_unit("multi-user.target")
+
+      machine.succeed("test -e /run/current-system/bootspec/boot.json")
+      machine.succeed("test -e /run/current-system/specialisation/something/bootspec/boot.json")
+
+      sp_in_parent = json.loads(machine.succeed("jq -r '.v1.specialisation.something' /run/current-system/bootspec/boot.json"))
+      sp_in_fs = json.loads(machine.succeed("cat /run/current-system/specialisation/something/bootspec/boot.json"))
+
+      assert sp_in_parent == sp_in_fs['v1'], "Bootspecs of the same specialisation are different!"
+    '';
+  };
+
+  # Check that extensions are propagated.
+  extensions = makeTest {
+    name = "bootspec-with-extensions";
+    meta.maintainers = with pkgs.lib.maintainers; [ raitobezarius ];
+
+    nodes.machine = { config, ... }: {
+      imports = [ standard ];
+      environment.systemPackages = [ pkgs.jq ];
+      boot.bootspec.extensions = {
+        osRelease = config.environment.etc."os-release".source;
+      };
+    };
+
+    testScript = ''
+      machine.start()
+      machine.wait_for_unit("multi-user.target")
+
+      current_os_release = machine.succeed("cat /etc/os-release")
+      bootspec_os_release = machine.succeed("cat $(jq -r '.v1.extensions.osRelease' /run/current-system/bootspec/boot.json)")
+
+      assert current_os_release == bootspec_os_release, "Filename referenced by extension has unexpected contents"
+    '';
+  };
+
+}
diff --git a/nixos/tests/borgbackup.nix b/nixos/tests/borgbackup.nix
index d3cd6c66bfe..9afe4d537da 100644
--- a/nixos/tests/borgbackup.nix
+++ b/nixos/tests/borgbackup.nix
@@ -99,6 +99,18 @@ in {
           environment.BORG_RSH = "ssh -oStrictHostKeyChecking=no -i /root/id_ed25519";
         };
 
+        sleepInhibited = {
+          inhibitsSleep = true;
+          # Blocks indefinitely while "backing up" so that we can try to suspend the local system while it's hung
+          dumpCommand = pkgs.writeScript "sleepInhibited" ''
+            cat /dev/zero
+          '';
+          repo = remoteRepo;
+          encryption.mode = "none";
+          startAt = [ ];
+          environment.BORG_RSH = "ssh -oStrictHostKeyChecking=no -i /root/id_ed25519";
+        };
+
       };
     };
 
@@ -204,5 +216,13 @@ in {
         client.wait_for_unit("network.target")
         client.systemctl("start --wait borgbackup-job-commandFail")
         client.succeed("systemctl is-failed borgbackup-job-commandFail")
+
+    with subtest("sleepInhibited"):
+        server.wait_for_unit("sshd.service")
+        client.wait_for_unit("network.target")
+        client.fail("systemd-inhibit --list | grep -q borgbackup")
+        client.systemctl("start borgbackup-job-sleepInhibited")
+        client.wait_until_succeeds("systemd-inhibit --list | grep -q borgbackup")
+        client.systemctl("stop borgbackup-job-sleepInhibited")
   '';
 })
diff --git a/nixos/tests/btrbk-section-order.nix b/nixos/tests/btrbk-section-order.nix
new file mode 100644
index 00000000000..20f1afcf80e
--- /dev/null
+++ b/nixos/tests/btrbk-section-order.nix
@@ -0,0 +1,51 @@
+# This tests validates the order of generated sections that may contain
+# other sections.
+# When a `volume` section has both `subvolume` and `target` children,
+# `target` must go before `subvolume`. Otherwise, `target` will become
+# a child of the last `subvolume` instead of `volume`, due to the
+# order-sensitive config format.
+#
+# Issue: https://github.com/NixOS/nixpkgs/issues/195660
+import ./make-test-python.nix ({ lib, pkgs, ... }: {
+  name = "btrbk-section-order";
+  meta.maintainers = with lib.maintainers; [ oxalica ];
+
+  nodes.machine = { ... }: {
+    services.btrbk.instances.local = {
+      onCalendar = null;
+      settings = {
+        timestamp_format = "long";
+        target."ssh://global-target/".ssh_user = "root";
+        volume."/btrfs" = {
+          snapshot_dir = "/volume-snapshots";
+          target."ssh://volume-target/".ssh_user = "root";
+          subvolume."@subvolume" = {
+            snapshot_dir = "/subvolume-snapshots";
+            target."ssh://subvolume-target/".ssh_user = "root";
+          };
+        };
+      };
+    };
+  };
+
+  testScript = ''
+    machine.wait_for_unit("basic.target")
+    got = machine.succeed("cat /etc/btrbk/local.conf")
+    expect = """
+    backend btrfs-progs-sudo
+    timestamp_format long
+    target ssh://global-target/
+     ssh_user root
+    volume /btrfs
+     snapshot_dir /volume-snapshots
+     target ssh://volume-target/
+      ssh_user root
+     subvolume @subvolume
+      snapshot_dir /subvolume-snapshots
+      target ssh://subvolume-target/
+       ssh_user root
+    """.strip()
+    print(got)
+    assert got == expect
+  '';
+})
diff --git a/nixos/tests/cagebreak.nix b/nixos/tests/cagebreak.nix
index 1dcc910f974..1fef7cb57cf 100644
--- a/nixos/tests/cagebreak.nix
+++ b/nixos/tests/cagebreak.nix
@@ -33,6 +33,7 @@ in
 
     hardware.opengl.enable = true;
     programs.xwayland.enable = true;
+    security.polkit.enable = true;
     environment.systemPackages = [ pkgs.cagebreak pkgs.wayland-utils ];
 
     # Need to switch to a different GPU driver than the default one (-vga std) so that Cagebreak can launch:
diff --git a/nixos/tests/chromium.nix b/nixos/tests/chromium.nix
index 6b296fe8a61..cdfdcc9bcdd 100644
--- a/nixos/tests/chromium.nix
+++ b/nixos/tests/chromium.nix
@@ -39,7 +39,9 @@ mapAttrs (channel: chromiumPkg: makeTest {
   name = "chromium-${channel}";
   meta = {
     maintainers = with maintainers; [ aszlig primeos ];
+  } // optionalAttrs (chromiumPkg.meta ? timeout) {
     # https://github.com/NixOS/hydra/issues/591#issuecomment-435125621
+    # Note: optionalAttrs is used since meta.timeout is not set for Google Chrome
     inherit (chromiumPkg.meta) timeout;
   };
 
@@ -65,6 +67,9 @@ mapAttrs (channel: chromiumPkg: makeTest {
     from contextlib import contextmanager
 
 
+    major_version = "${versions.major (getVersion chromiumPkg.name)}"
+
+
     # Run as user alice
     def ru(cmd):
         return "su - ${user} -c " + shlex.quote(cmd)
@@ -84,7 +89,6 @@ mapAttrs (channel: chromiumPkg: makeTest {
             binary = pname
         # Add optional CLI options:
         options = []
-        major_version = "${versions.major (getVersion chromiumPkg.name)}"
         if major_version > "95" and not pname.startswith("google-chrome"):
             # Workaround to avoid a GPU crash:
             options.append("--use-gl=swiftshader")
@@ -162,6 +166,8 @@ mapAttrs (channel: chromiumPkg: makeTest {
         clipboard = machine.succeed(
             ru("${pkgs.xclip}/bin/xclip -o")
         )
+        if url == "chrome://gpu":
+            clipboard = ""  # TODO: We cannot copy the text via Ctrl+a
         print(f"{description} window content:\n{clipboard}")
         with machine.nested(description):
             yield clipboard
@@ -242,9 +248,10 @@ mapAttrs (channel: chromiumPkg: makeTest {
         machine.screenshot("after_copy_from_chromium")
 
 
-    with test_new_win("gpu_info", "chrome://gpu", "chrome://gpu"):
+    with test_new_win("gpu_info", "chrome://gpu", "GPU Internals"):
         # To check the text rendering (catches regressions like #131074):
         machine.wait_for_text("Graphics Feature Status")
+        # TODO: Fix copying all of the text to the clipboard
 
 
     with test_new_win("version_info", "chrome://version", "About Version") as clipboard:
diff --git a/nixos/tests/cinnamon.nix b/nixos/tests/cinnamon.nix
new file mode 100644
index 00000000000..f0add414292
--- /dev/null
+++ b/nixos/tests/cinnamon.nix
@@ -0,0 +1,68 @@
+import ./make-test-python.nix ({ pkgs, lib, ... }: {
+  name = "cinnamon";
+
+  meta = with lib; {
+    maintainers = teams.cinnamon.members;
+  };
+
+  nodes.machine = { nodes, ... }: {
+    imports = [ ./common/user-account.nix ];
+    services.xserver.enable = true;
+    services.xserver.desktopManager.cinnamon.enable = true;
+  };
+
+  enableOCR = true;
+
+  testScript = { nodes, ... }:
+    let
+      user = nodes.machine.config.users.users.alice;
+      uid = toString user.uid;
+      bus = "DBUS_SESSION_BUS_ADDRESS=unix:path=/run/user/${uid}/bus";
+      display = "DISPLAY=:0.0";
+      env = "${bus} ${display}";
+      gdbus = "${env} gdbus";
+      su = command: "su - ${user.name} -c '${env} ${command}'";
+
+      # Call javascript in cinnamon (the shell), returns a tuple (success, output),
+      # where `success` is true if the dbus call was successful and `output` is what
+      # the javascript evaluates to.
+      eval = "call --session -d org.Cinnamon -o /org/Cinnamon -m org.Cinnamon.Eval";
+
+      # Should be 2 (RunState.RUNNING) when startup is done.
+      # https://github.com/linuxmint/cinnamon/blob/5.4.0/js/ui/main.js#L183-L187
+      getRunState = su "${gdbus} ${eval} Main.runState";
+
+      # Start gnome-terminal.
+      gnomeTerminalCommand = su "gnome-terminal";
+
+      # Hopefully gnome-terminal's wm class.
+      wmClass = su "${gdbus} ${eval} global.display.focus_window.wm_class";
+    in
+    ''
+      machine.wait_for_unit("display-manager.service")
+
+      with subtest("Test if we can see username in slick-greeter"):
+          machine.wait_for_text("${user.description}")
+          machine.screenshot("slick_greeter_lightdm")
+
+      with subtest("Login with slick-greeter"):
+          machine.send_chars("${user.password}\n")
+          machine.wait_for_x()
+          machine.wait_for_file("${user.home}/.Xauthority")
+          machine.succeed("xauth merge ${user.home}/.Xauthority")
+
+      with subtest("Check that logging in has given the user ownership of devices"):
+          machine.succeed("getfacl -p /dev/snd/timer | grep -q ${user.name}")
+
+      with subtest("Wait for the Cinnamon shell"):
+          # Correct output should be (true, '2')
+          machine.wait_until_succeeds("${getRunState} | grep -q 'true,..2'")
+
+      with subtest("Open GNOME Terminal"):
+          machine.succeed("${gnomeTerminalCommand}")
+          # Correct output should be (true, '"Gnome-terminal"')
+          machine.wait_until_succeeds("${wmClass} | grep -q 'true,...Gnome-terminal'")
+          machine.sleep(20)
+          machine.screenshot("screen")
+    '';
+})
diff --git a/nixos/tests/cloud-init-hostname.nix b/nixos/tests/cloud-init-hostname.nix
new file mode 100644
index 00000000000..7c657cc9f6f
--- /dev/null
+++ b/nixos/tests/cloud-init-hostname.nix
@@ -0,0 +1,46 @@
+{ system ? builtins.currentSystem,
+  config ? {},
+  pkgs ? import ../.. { inherit system config; }
+}:
+
+with import ../lib/testing-python.nix { inherit system pkgs; };
+with pkgs.lib;
+
+let
+  # Hostname can also be set through "hostname" in user-data.
+  # This is how proxmox configures hostname through cloud-init.
+  metadataDrive = pkgs.stdenv.mkDerivation {
+    name = "metadata";
+    buildCommand = ''
+      mkdir -p $out/iso
+
+      cat << EOF > $out/iso/user-data
+      #cloud-config
+      hostname: testhostname
+      EOF
+
+      cat << EOF > $out/iso/meta-data
+      instance-id: iid-local02
+      EOF
+
+      ${pkgs.cdrkit}/bin/genisoimage -volid cidata -joliet -rock -o $out/metadata.iso $out/iso
+    '';
+  };
+
+in makeTest {
+  name = "cloud-init-hostname";
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ lewo illustris ];
+  };
+
+  nodes.machine2 = { ... }: {
+    virtualisation.qemu.options = [ "-cdrom" "${metadataDrive}/metadata.iso" ];
+    services.cloud-init.enable = true;
+    networking.hostName = "";
+  };
+
+  testScript = ''
+    unnamed.wait_for_unit("cloud-final.service")
+    assert "testhostname" in unnamed.succeed("hostname")
+  '';
+}
diff --git a/nixos/tests/cloud-init.nix b/nixos/tests/cloud-init.nix
index 9feb8d5c152..786e01add7d 100644
--- a/nixos/tests/cloud-init.nix
+++ b/nixos/tests/cloud-init.nix
@@ -49,18 +49,17 @@ let
               gateway: '12.34.56.9'
           - type: nameserver
             address:
-            - '8.8.8.8'
+            - '6.7.8.9'
             search:
             - 'example.com'
       EOF
       ${pkgs.cdrkit}/bin/genisoimage -volid cidata -joliet -rock -o $out/metadata.iso $out/iso
       '';
   };
+
 in makeTest {
   name = "cloud-init";
-  meta = with pkgs.lib.maintainers; {
-    maintainers = [ lewo ];
-  };
+  meta.maintainers = with pkgs.lib.maintainers; [ lewo illustris ];
   nodes.machine = { ... }:
   {
     virtualisation.qemu.options = [ "-cdrom" "${metadataDrive}/metadata.iso" ];
@@ -89,21 +88,27 @@ in makeTest {
 
     # we should be able to log in as the root user, as well as the created nixos user
     unnamed.succeed(
-        "ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -o IdentityFile=~/.ssh/id_snakeoil root@localhost 'true'"
+        "timeout 10 ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -o IdentityFile=~/.ssh/id_snakeoil root@localhost 'true'"
     )
     unnamed.succeed(
-        "ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -o IdentityFile=~/.ssh/id_snakeoil nixos@localhost 'true'"
+        "timeout 10 ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -o IdentityFile=~/.ssh/id_snakeoil nixos@localhost 'true'"
     )
 
     # test changing hostname via cloud-init worked
     assert (
         unnamed.succeed(
-            "ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -o IdentityFile=~/.ssh/id_snakeoil nixos@localhost 'hostname'"
+            "timeout 10 ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -o IdentityFile=~/.ssh/id_snakeoil nixos@localhost 'hostname'"
         ).strip()
         == "test"
     )
 
+    # check IP and route configs
     assert "default via 12.34.56.9 dev eth0 proto static" in unnamed.succeed("ip route")
     assert "12.34.56.0/24 dev eth0 proto kernel scope link src 12.34.56.78" in unnamed.succeed("ip route")
+
+    # check nameserver and search configs
+    assert "6.7.8.9" in unnamed.succeed("resolvectl status")
+    assert "example.com" in unnamed.succeed("resolvectl status")
+
   '';
 }
diff --git a/nixos/tests/cockroachdb.nix b/nixos/tests/cockroachdb.nix
index d793842f0ab..5b1e1a7dee1 100644
--- a/nixos/tests/cockroachdb.nix
+++ b/nixos/tests/cockroachdb.nix
@@ -8,7 +8,7 @@
 # Cluster joins that are outside this window will fail, and nodes that skew
 # outside the window after joining will promptly get kicked out.
 #
-# To accomodate this, we use QEMU/virtio infrastructure and load the 'ptp_kvm'
+# To accommodate this, we use QEMU/virtio infrastructure and load the 'ptp_kvm'
 # driver inside a guest. This driver allows the host machine to pass its clock
 # through to the guest as a hardware clock that appears as a Precision Time
 # Protocol (PTP) Clock device, generally /dev/ptp0. PTP devices can be measured
diff --git a/nixos/tests/common/acme/client/default.nix b/nixos/tests/common/acme/client/default.nix
index 9dbe345e7a0..503e610d1ac 100644
--- a/nixos/tests/common/acme/client/default.nix
+++ b/nixos/tests/common/acme/client/default.nix
@@ -1,7 +1,7 @@
 { lib, nodes, pkgs, ... }:
 let
-  caCert = nodes.acme.config.test-support.acme.caCert;
-  caDomain = nodes.acme.config.test-support.acme.caDomain;
+  caCert = nodes.acme.test-support.acme.caCert;
+  caDomain = nodes.acme.test-support.acme.caDomain;
 
 in {
   security.acme = {
diff --git a/nixos/tests/common/acme/server/acme.test.cert.pem b/nixos/tests/common/acme/server/acme.test.cert.pem
index 76b0d916a81..48f488ab8f9 100644
--- a/nixos/tests/common/acme/server/acme.test.cert.pem
+++ b/nixos/tests/common/acme/server/acme.test.cert.pem
@@ -1,19 +1,19 @@
 -----BEGIN CERTIFICATE-----
-MIIDLDCCAhSgAwIBAgIIRDAN3FHH//IwDQYJKoZIhvcNAQELBQAwIDEeMBwGA1UE
-AxMVbWluaWNhIHJvb3QgY2EgNzg3NDZmMB4XDTIwMTAyMTEzMjgzNloXDTIyMTEy
-MDEzMjgzNlowFDESMBAGA1UEAxMJYWNtZS50ZXN0MIIBIjANBgkqhkiG9w0BAQEF
-AAOCAQ8AMIIBCgKCAQEAo8XjMVUaljcaqQ5MFhfPuQgSwdyXEUbpSHz+5yPkE0h9
-Z4Xu5BJF1Oq7h5ggCtadVsIspiY6Jm6aWDOjlh4myzW5UNBNUG3OPEk50vmmHFeH
-pImHO/d8yb33QoF9VRcTZs4tuJYg7l9bSs4jNG72vYvv2YiGAcmjJcsmAZIfniCN
-Xf/LjIm+Cxykn+Vo3UuzO1w5/iuofdgWO/aZxMezmXUivlL3ih4cNzCJei8WlB/l
-EnHrkcy3ogRmmynP5zcz7vmGIJX2ji6dhCa4Got5B7eZK76o2QglhQXqPatG0AOY
-H+RfQfzKemqPG5om9MgJtwFtTOU1LoaiBw//jXKESQIDAQABo3YwdDAOBgNVHQ8B
+MIIDLDCCAhSgAwIBAgIIajCXIUnozqQwDQYJKoZIhvcNAQELBQAwIDEeMBwGA1UE
+AxMVbWluaWNhIHJvb3QgY2EgMjMwYjU4MB4XDTIyMTEyMTE3MTcxMFoXDTQyMTEy
+MTE3MTcxMFowFDESMBAGA1UEAxMJYWNtZS50ZXN0MIIBIjANBgkqhkiG9w0BAQEF
+AAOCAQ8AMIIBCgKCAQEA5INxJwKDVYNfTnkXwvKM/SufBNjvxWZxlkaMFbkAN5wJ
+6HwuesRZE9IgfRO9N+rSq1U2lDBm9gFPERqsQJVZHHJ5kkaNUr89h25+wgX5emGy
+UV2KEpCFssDD4aSBF+b0sryguCa1ZRj9b+pdfRxiYaORjSh5UzlXZoRm9iwHdzHT
+oKLlmqozqzEt0o9qpZL8gv+rv8C5BGOY6hfXAHYmkWRt87FN5BkSjgEWiY++DOAU
+X0TdobdSTrs/xJP+IbadRchqTH2kiG0g2BoCSXUsl7Mdh4IOUeQGDz/F5tH8PAtz
+p3dyjdQEFex2J5tlScLfVHoCBKV3gpCg+Keuum2j8QIDAQABo3YwdDAOBgNVHQ8B
 Af8EBAMCBaAwHQYDVR0lBBYwFAYIKwYBBQUHAwEGCCsGAQUFBwMCMAwGA1UdEwEB
-/wQCMAAwHwYDVR0jBBgwFoAU+8IZlLV/Qp5CXqpXMLvtxWlxcJwwFAYDVR0RBA0w
-C4IJYWNtZS50ZXN0MA0GCSqGSIb3DQEBCwUAA4IBAQB0pe8I5/VDkB5VMgQB2GJV
-GKzyigfWbVez9uLmqMj9PPP/zzYKSYeq+91aMuOZrnH7NqBxSTwanULkmqAmhbJJ
-YkXw+FlFekf9FyxcuArzwzzNZDSGcjcdXpN8S2K1qkBd00iSJF9kU7pdZYCIKR20
-QirdBrELEfsJ3GU62a6N3a2YsrisZUvq5TbjGJDcytAtt+WG3gmV7RInLdFfPwbw
-bEHPCnx0uiV0nxLjd/aVT+RceVrFQVt4hR99jLoMlBitSKluZ1ljsrpIyroBhQT0
-pp/pVi6HJdijG0fsPrC325NEGAwcpotLUhczoeM/rffKJd54wLhDkfYxOyRZXivs
+/wQCMAAwHwYDVR0jBBgwFoAUvTCE3Lj/P6OWkmOGtsjcTcIDzkAwFAYDVR0RBA0w
+C4IJYWNtZS50ZXN0MA0GCSqGSIb3DQEBCwUAA4IBAQAvZM4Ik1NOXQfbPRgbolyL
+b3afsSHbhHl9B2f0HGi5EAPdwyeWZsK3BF+SKFGAW5BlXr2SSlW/MQOMiUKTadnS
+8xTOFc1Ws8JWWc82zQqWcOWEXhU+AI8p70sTVFeXPWwLFy3nBRwDH4ZPU8UFHeje
+YXqbfxrsdEFXrbCfWSzPQP24xqVt7n9Am/5XFGtDkRsYlVgLwq/F6lN9hO0/gYIx
+8NsZ8Xy+QvBlGL+z9Zo7EylB8bP9OBtOtEv9fZcnxgughieiTDs36GwdQRE2aI+d
+cG3lQX8NGxgcpDoH8+rNx7Uw7odg0gVbI3agyyvax6DPht+/bzXmHm8ogklGTOoG
 -----END CERTIFICATE-----
diff --git a/nixos/tests/common/acme/server/acme.test.key.pem b/nixos/tests/common/acme/server/acme.test.key.pem
index 741df99a372..4837f19b302 100644
--- a/nixos/tests/common/acme/server/acme.test.key.pem
+++ b/nixos/tests/common/acme/server/acme.test.key.pem
@@ -1,27 +1,27 @@
 -----BEGIN RSA PRIVATE KEY-----
-MIIEowIBAAKCAQEAo8XjMVUaljcaqQ5MFhfPuQgSwdyXEUbpSHz+5yPkE0h9Z4Xu
-5BJF1Oq7h5ggCtadVsIspiY6Jm6aWDOjlh4myzW5UNBNUG3OPEk50vmmHFeHpImH
-O/d8yb33QoF9VRcTZs4tuJYg7l9bSs4jNG72vYvv2YiGAcmjJcsmAZIfniCNXf/L
-jIm+Cxykn+Vo3UuzO1w5/iuofdgWO/aZxMezmXUivlL3ih4cNzCJei8WlB/lEnHr
-kcy3ogRmmynP5zcz7vmGIJX2ji6dhCa4Got5B7eZK76o2QglhQXqPatG0AOYH+Rf
-QfzKemqPG5om9MgJtwFtTOU1LoaiBw//jXKESQIDAQABAoIBADox/2FwVFo8ioS4
-R+Ex5OZjMAcjU6sX/516jTmlT05q2+UFerYgqB/YqXqtW/V9/brulN8VhmRRuRbO
-grq9TBu5o3hMDK0f18EkZB/MBnLbx594H033y6gEkPBZAyhRYtuNOEH3VwxdZhtW
-1Lu1EoiYSUqLcNMBy6+KWJ8GRaXyacMYBlj2lMHmyzkA/t1+2mwTGC3lT6zN0F5Y
-E5umXOxsn6Tb6q3KM9O5IvtmMMKpgj4HIHZLZ6j40nNgHwGRaAv4Sha/vx0DeBw3
-6VlNiTTPdShEkhESlM5/ocqTfI92VHJpM5gkqTYOWBi2aKIPfAopXoqoJdWl4pQ/
-NCFIu2ECgYEAzntNKIcQtf0ewe0/POo07SIFirvz6jVtYNMTzeQfL6CoEjYArJeu
-Vzc4wEQfA4ZFVerBb1/O6M449gI3zex1PH4AX0h8q8DSjrppK1Jt2TnpVh97k7Gg
-Tnat/M/yW3lWYkcMVJJ3AYurXLFTT1dYP0HvBwZN04yInrEcPNXKfmcCgYEAywyJ
-51d4AE94PrANathKqSI/gk8sP+L1gzylZCcUEAiGk/1r45iYB4HN2gvWbS+CvSdp
-F7ShlDWrTaNh2Bm1dgTjc4pWb4J+CPy/KN2sgLwIuM4+ZWIZmEDcio6khrM/gNqK
-aR7xUsvWsqU26O84woY/xR8IHjSNF7cFWE1H2c8CgYEAt6SSi2kVQ8dMg84uYE8t
-o3qO00U3OycpkOQqyQQLeKC62veMwfRl6swCfX4Y11mkcTXJtPTRYd2Ia8StPUkB
-PDwUuKoPt/JXUvoYb59wc7M+BIsbrdBdc2u6cw+/zfutCNuH6/AYSBeg4WAVaIuW
-wSwzG1xP+8cR+5IqOzEqWCECgYATweeVTCyQEyuHJghYMi2poXx+iIesu7/aAkex
-pB/Oo5W8xrb90XZRnK7UHbzCqRHWqAQQ23Gxgztk9ZXqui2vCzC6qGZauV7cLwPG
-zTMg36sVmHP314DYEM+k59ZYiQ6P0jQPoIQo407D2VGrfsOOIhQIcUmP7tsfyJ5L
-hlGMfwKBgGq4VNnnuX8I5kl03NpaKfG+M8jEHmVwtI9RkPTCCX9bMjeG0cDxqPTF
-TRkf3r8UWQTZ5QfAfAXYAOlZvmGhHjSembRbXMrMdi3rGsYRSrQL6n5NHnORUaMy
-FCWo4gyAnniry7tx9dVNgmHmbjEHuQnf8AC1r3dibRCjvJWUiQ8H
+MIIEpQIBAAKCAQEA5INxJwKDVYNfTnkXwvKM/SufBNjvxWZxlkaMFbkAN5wJ6Hwu
+esRZE9IgfRO9N+rSq1U2lDBm9gFPERqsQJVZHHJ5kkaNUr89h25+wgX5emGyUV2K
+EpCFssDD4aSBF+b0sryguCa1ZRj9b+pdfRxiYaORjSh5UzlXZoRm9iwHdzHToKLl
+mqozqzEt0o9qpZL8gv+rv8C5BGOY6hfXAHYmkWRt87FN5BkSjgEWiY++DOAUX0Td
+obdSTrs/xJP+IbadRchqTH2kiG0g2BoCSXUsl7Mdh4IOUeQGDz/F5tH8PAtzp3dy
+jdQEFex2J5tlScLfVHoCBKV3gpCg+Keuum2j8QIDAQABAoIBAHfnUHQ7qVYxfMzc
+VU+BneEqBmKwwf8+ZdOIaPDtBeQoCDrpDip05Ji15T48IUk5+hjUubLAQwZKYYaE
+DGZG918p4giS5IzKtCpgHDsKj4FbyglPn6dmFgFZjG7VtrcoBLXUrDB0fzHxDuqu
+eyeuwSCihzkeR6sXp3iveKcrKy+rA31aqWvJZb24qyAu1y8KIcf2ZMUiYcJF2kpL
+XZz4uyx4x/B9NE+PmLqo7x/9iS+p5aT2kWVCVUGmhII0ChFnWSnjxqecBMhWFY1O
+3U0lKhloj6UKBya91hGospEJdaLHpHCWUgYPvA5mG+48kqYkPkecmTf8Xha3TxPf
+g1qv3sECgYEA+hMO1qTlnqhBajCMcAGIlpRHwr97hQMdSylHBXob1xCnuTEJKHOo
+7UmQw9hJgD4JgYxcivg/OFErXdefbSae9NqSNdOshxmrxz6DFTN3Ms3WR1I1be3c
+B2mpGllMPbxJ3CKFet2CQSvOM9jfbK68R7Jlhiap0bESvWrT9ztUCWUCgYEA6e2Y
+iMNNo1dWushSMVvCkWR9CLAsnWnjFG4FYIPz/iuxJjRXDiWyR6x4WYjUx3ZBhpf5
+wVFUK7VaPJBfOn7KCan59dqOvL3LSB/5SupwRMecCEhYPQvSaxn4MNrx0Vi83O4C
+togyD9/UJ4ji+TXwMj2eMzwRspmO/26hXkQGzZ0CgYEA0qlLTrYKWOUUdgf/xjsE
+fRTcfsofm6VMAAz9rzd2TG3TXMZaGKGWJI5cTR7ejBG2oFNFgiwt1ZtLFPqXarOm
+JE4b7QwrwoN1mZqngiygtUOAxwQRzlEZkYUI1xFykG8VKURLfX0sRQpJ4pNHY56v
+LRazP5dCZ0rrpnVfql1oJaECgYEAxtvT728XcOOuNtpUBOGcZTynjds2EhsRjyx4
+JbQGlutNjMyxtLUW+RcEuBg5ydYdne1Tw6L/iqiALTwNuAxQdCaq9vT0oj41sPp9
+UdI53j5Rxji5yitilOlesylsqCpnYuhyJflhlV0RXQpg6LmRlyQKeEN4R/uCNGI3
+i4sIvYECgYEA4DC2qObfB0UkN81uGluwwM5rR04qvIc5xX3QIvHuIJOs/uP54daD
+OiEDTxTpiqDNsFL0Pyl07aL7jubHNqU/eQpQIEZRlDy4Mr31QSbQ9R2/NNBwHu22
+BnnNKzZ97T0NVgxJXOqcOlRGjwb/5OUDpaIClJY+GqilEdOeu7Pl3aA=
 -----END RSA PRIVATE KEY-----
diff --git a/nixos/tests/common/acme/server/ca.cert.pem b/nixos/tests/common/acme/server/ca.cert.pem
index 5c33e879b67..b6f2b9e3a91 100644
--- a/nixos/tests/common/acme/server/ca.cert.pem
+++ b/nixos/tests/common/acme/server/ca.cert.pem
@@ -1,20 +1,20 @@
 -----BEGIN CERTIFICATE-----
-MIIDSzCCAjOgAwIBAgIIeHRvRrNvbGQwDQYJKoZIhvcNAQELBQAwIDEeMBwGA1UE
-AxMVbWluaWNhIHJvb3QgY2EgNzg3NDZmMCAXDTIwMTAyMTEzMjgzNloYDzIxMjAx
-MDIxMTMyODM2WjAgMR4wHAYDVQQDExVtaW5pY2Egcm9vdCBjYSA3ODc0NmYwggEi
-MA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCrNTzVLDJOKtGYGLU98EEcLKps
-tXHCLC6G54LKbEcU80fn+ArX8qsPSHyhdXQkcYjq6Vh/EDJ1TctyRSnvAjwyG4Aa
-1Zy1QFc/JnjMjvzimCkUc9lQ+wkLwHSM/KGwR1cGjmtQ/EMClZTA0NwulJsXMKVz
-bd5asXbq/yJTQ5Ww25HtdNjwRQXTvB7r3IKcY+DsED9CvFvC9oG/ZhtZqZuyyRdC
-kFUrrv8WNUDkWSN+lMR6xMx8v0583IN6f11IhX0b+svK98G81B2eswBdkzvVyv9M
-unZBO0JuJG8sdM502KhWLmzBC1ZbvgUBF9BumDRpMFH4DCj7+qQ2taWeGyc7AgMB
+MIIDSzCCAjOgAwIBAgIIIwtYp+WlBbswDQYJKoZIhvcNAQELBQAwIDEeMBwGA1UE
+AxMVbWluaWNhIHJvb3QgY2EgMjMwYjU4MCAXDTIyMTEyMTE3MTcxMFoYDzIxMjIx
+MTIxMTcxNzEwWjAgMR4wHAYDVQQDExVtaW5pY2Egcm9vdCBjYSAyMzBiNTgwggEi
+MA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCvqAoAyV8igrmBnU6T1nQDfkkQ
+HjQp+ANCthNCi4kGPOoTxrYrUMWa6d/aSIv5hKO2A+r2GdTeM1RvSo6GUr3GmsJc
+WUMbIsJ0SJSLQEyvmFPpzfV3NdfIt6vZRiqJbLt7yuDiZil33GdQEKYywJxIsCb2
+CSd55V1cZSiLItWEIURAhHhSxHabMRmIF/xZWxKFEDeagzXOxUBPAvIwzzqQroBv
+3vZhfgcAjCyS0crJ/E2Wa6GLKfFvaXGEj/KlXftwpbvFtnNBtmtJcNy9a8LJoOcA
+E+ZjD21hidnCc+Yag7LaR3ZtAVkpeRJ9rRNBkVP4rv2mq2skIkgDfY/F8smPAgMB
 AAGjgYYwgYMwDgYDVR0PAQH/BAQDAgKEMB0GA1UdJQQWMBQGCCsGAQUFBwMBBggr
-BgEFBQcDAjASBgNVHRMBAf8ECDAGAQH/AgEAMB0GA1UdDgQWBBT7whmUtX9CnkJe
-qlcwu+3FaXFwnDAfBgNVHSMEGDAWgBT7whmUtX9CnkJeqlcwu+3FaXFwnDANBgkq
-hkiG9w0BAQsFAAOCAQEARMe1wKmF33GjEoLLw0oDDS4EdAv26BzCwtrlljsEtwQN
-95oSzUNd6o4Js7WCG2o543OX6cxzM+yju8TES3+vJKDgsbNMU0bWCv//tdrb0/G8
-OkU3Kfi5q4fOauZ1pqGv/pXdfYhZ5ieB/zwis3ykANe5JfB0XqwCb1Vd0C3UCIS2
-NPKngRwNSzphIsbzfvxGDkdM1enuGl5CVyDhrwTMqGaJGDSOv6U5jKFxKRvigqTN
-Ls9lPmT5NXYETduWLBR3yUIdH6kZXrcozZ02B9vjOB2Cv4RMDc+9eM30CLIWpf1I
-097e7JkhzxFhfC/bMMt3P1FeQc+fwH91wdBmNi7tQw==
+BgEFBQcDAjASBgNVHRMBAf8ECDAGAQH/AgEAMB0GA1UdDgQWBBS9MITcuP8/o5aS
+Y4a2yNxNwgPOQDAfBgNVHSMEGDAWgBS9MITcuP8/o5aSY4a2yNxNwgPOQDANBgkq
+hkiG9w0BAQsFAAOCAQEADCcgaxrI/pqjkYb0c3QHwfKCNz4khSWs/9tBpBfdxdUX
+uvG7rZzVW7pkzML+m4tSo2wm9sHRAgG+dIpzbSoRTouMntWlvYEnrr1SCw4NyBo1
+cwmNUz4JL+E3dnpI4FSOpyFyO87qL9ep0dxQEADWSppyCA762wfFpY+FvT6b/he8
+eDEc/Umjfm+X0tqNWx3aVoeyIJT46AeElry2IRLAk7z/vEPGFFzgd2Jh6Qsdeagk
+YkU0tFl9q9BotPYGlCMtVjmzbJtxh4uM9YCgiz1THzFjrUvfaTM8VjuBxbpoCZkS
+85mNhFZvNq8/cgYc0kYZOg8+jRdy87xmTRp64LBd6w==
 -----END CERTIFICATE-----
diff --git a/nixos/tests/common/acme/server/ca.key.pem b/nixos/tests/common/acme/server/ca.key.pem
index ed46f5dccf4..5d46c025788 100644
--- a/nixos/tests/common/acme/server/ca.key.pem
+++ b/nixos/tests/common/acme/server/ca.key.pem
@@ -1,27 +1,27 @@
 -----BEGIN RSA PRIVATE KEY-----
-MIIEowIBAAKCAQEAqzU81SwyTirRmBi1PfBBHCyqbLVxwiwuhueCymxHFPNH5/gK
-1/KrD0h8oXV0JHGI6ulYfxAydU3LckUp7wI8MhuAGtWctUBXPyZ4zI784pgpFHPZ
-UPsJC8B0jPyhsEdXBo5rUPxDApWUwNDcLpSbFzClc23eWrF26v8iU0OVsNuR7XTY
-8EUF07we69yCnGPg7BA/QrxbwvaBv2YbWambsskXQpBVK67/FjVA5FkjfpTEesTM
-fL9OfNyDen9dSIV9G/rLyvfBvNQdnrMAXZM71cr/TLp2QTtCbiRvLHTOdNioVi5s
-wQtWW74FARfQbpg0aTBR+Awo+/qkNrWlnhsnOwIDAQABAoIBAA3ykVkgd5ysmlSU
-trcsCnHcJaojgff6l3PACoSpG4VWaGY6a8+54julgRm6MtMBONFCX0ZCsImj484U
-Wl0xRmwil2YYPuL5MeJgJPktMObY1IfpBCw3tz3w2M3fiuCMf0d2dMGtO1xLiUnH
-+hgFXTkfamsj6ThkOrbcQBSebeRxbKM5hqyCaQoieV+0IJnyxUVq/apib8N50VsH
-SHd4oqLUuEZgg6N70+l5DpzedJUb4nrwS/KhUHUBgnoPItYBCiGPmrwLk7fUhPs6
-kTDqJDtc/xW/JbjmzhWEpVvtumcC/OEKULss7HLdeQqwVBrRQkznb0M9AnSra3d0
-X11/Y4ECgYEA3FC8SquLPFb2lHK4+YbJ4Ac6QVWeYFEHiZ0Rj+CmONmjcAvOGLPE
-SblRLm3Nbrkxbm8FF6/AfXa/rviAKEVPs5xqGfSDw/3n1uInPcmShiBCLwM/jHH5
-NeVG+R5mTg5zyQ/pQMLWRcs+Ail+ZAnZuoGpW3Cdc8OtCUYFQ7XB6nsCgYEAxvBJ
-zFxcTtsDzWbMWXejugQiUqJcEbKWwEfkRbf3J2rAVO2+EFr7LxdRfN2VwPiTQcWc
-LnN2QN+ouOjqBMTh3qm5oQY+TLLHy86k9g1k0gXWkMRQgP2ZdfWH1HyrwjLUgLe1
-VezFN7N1azgy6xFkInAAvuA4loxElZNvkGBgekECgYA/Xw26ILvNIGqO6qzgQXAh
-+5I7JsiGheg4IjDiBMlrQtbrLMoceuD0H9UFGNplhel9DXwWgxxIOncKejpK2x0A
-2fX+/0FDh+4+9hA5ipiV8gN3iGSoHkSDxy5yC9d7jlapt+TtFt4Rd1OfxZWwatDw
-/8jaH3t6yAcmyrhK8KYVrwKBgAE5KwsBqmOlvyE9N5Z5QN189wUREIXfVkP6bTHs
-jq2EX4hmKdwJ4y+H8i1VY31bSfSGlY5HkXuWpH/2lrHO0CDBZG3UDwADvWzIaYVF
-0c/kz0v2mRQh+xaZmus4lQnNrDbaalgL666LAPbW0qFVaws3KxoBYPe0BxvwWyhF
-H3LBAoGBAKRRNsq2pWQ8Gqxc0rVoH0FlexU9U2ci3lsLmgEB0A/o/kQkSyAxaRM+
-VdKp3sWfO8o8lX5CVQslCNBSjDTNcat3Co4NEBLg6Xv1yKN/WN1GhusnchP9szsP
-oU47gC89QhUyWSd6vvr2z2NG9C3cACxe4dhDSHQcE4nHSldzCKv2
+MIIEowIBAAKCAQEAr6gKAMlfIoK5gZ1Ok9Z0A35JEB40KfgDQrYTQouJBjzqE8a2
+K1DFmunf2kiL+YSjtgPq9hnU3jNUb0qOhlK9xprCXFlDGyLCdEiUi0BMr5hT6c31
+dzXXyLer2UYqiWy7e8rg4mYpd9xnUBCmMsCcSLAm9gkneeVdXGUoiyLVhCFEQIR4
+UsR2mzEZiBf8WVsShRA3moM1zsVATwLyMM86kK6Ab972YX4HAIwsktHKyfxNlmuh
+iynxb2lxhI/ypV37cKW7xbZzQbZrSXDcvWvCyaDnABPmYw9tYYnZwnPmGoOy2kd2
+bQFZKXkSfa0TQZFT+K79pqtrJCJIA32PxfLJjwIDAQABAoIBAErEFJXnIIY47Cq+
+QS7t7e16uDCTGpLujLy9cQ83AzjTfrKyNuHS/HkGqRBpJqMrEN+tZTohHpkBciP4
+sRd9amd5gdb663RGZExIhGmNEdb/2F/BGYUHNvSpMQ1HL13VGSwE25mh8G6jMppC
+q+sYTq0lxT+d/96DgSyNpicqyYT2S2CTCRkWGAsc6KQwRpBYqoEqUeakyGfe2k85
+pj32H53Si/49fkWkQ9RciPdg7qcu7u/iegwAkkjKoATeEjNf0NqBlkWag1qU0UHR
+r2xDin+3ffEU2GQEwSvnGwlo7uyAN0UsryEWa9suuhX5T4eSWAMgTL4iVkh8Aa24
++YEFOGkCgYEA0DUb++31+nuxU8N+GPaPQXiob8C0RmSzSzSHJ3daJpzq8k576jqs
+3TgkhLDzQepcTYVU2ucn6+9ziXEsz4H06W3FNGktnyK4BRqYitt5TjZvPc+WTPhR
+0U+iUqBZilCAhUkIsNUiGvnMhz9VfcS/gn+NqhL7kvYi11/jAc4bbB0CgYEA1/oh
++t1ZKVLkbANrma/M8AX27Vl3k4jgOWGzFwAVD10zN31gGyVjv1knmG22pmL2+N+Z
+8CnVmdHQQQIWV1pYbgwRkvpnZWyH7AvHd9l1XLYyOU3VEpz+e2bpMtzesaza3UWW
+k8NELNE5sBopY939XkQ9G3aMXtbkx01zX+0BZJsCgYB+MdJ2TfKrEVGXfYPuSXLm
+seUVZu1dRSfOy1WnvBVuFenpV1yPyWSA6MhpjH7EUvIDIm8eBsERpZ6XjXslgpUY
+7ql6bM10CK0UmtwePYw2tZOTGUD2AgRFI0k1X28mAEkFgBC+bVAwnXsz9lUw15Fj
+3T/V9493savIcpu6uluwmQKBgQCE/I4jzFv0aAgiwlBlB6znNqT/LRHGFIgMjS4b
+QX+2QCsjRd4BmRo8XodVAmlvNozgXb6J9RiDaIAVJ1XeX9EHogLIP8ue1h8zp2Uh
+VRNBDScLxfMnTOgd0BZTrVCqkscJbKn1Pk0iU4pz9wf5aF10yAvgdzSjySqB1hzu
+uh8bdQKBgEpFIyhqfXf/NzchI5y23Cok14LFIPJ1yERD/B8taS7muVQwpgffy+Ld
+BH7dhafWSDVqIk1e6yl+82b4amleTEmDfopgc6FR7uPid1JoFxrwhnEfC3FjZamp
+1OzXAOE/mX3jHf1spqpB2J/rDVPKi934ocQVoWnBeRopGTXxzbed
 -----END RSA PRIVATE KEY-----
diff --git a/nixos/tests/common/acme/server/default.nix b/nixos/tests/common/acme/server/default.nix
index 450d49e6039..b81f860125c 100644
--- a/nixos/tests/common/acme/server/default.nix
+++ b/nixos/tests/common/acme/server/default.nix
@@ -18,10 +18,10 @@
 #
 #   example = { nodes, ... }: {
 #     networking.nameservers = [
-#       nodes.acme.config.networking.primaryIPAddress
+#       nodes.acme.networking.primaryIPAddress
 #     ];
 #     security.pki.certificateFiles = [
-#       nodes.acme.config.test-support.acme.caCert
+#       nodes.acme.test-support.acme.caCert
 #     ];
 #   };
 # }
@@ -36,7 +36,7 @@
 #   acme = { nodes, lib, ... }: {
 #     imports = [ ./common/acme/server ];
 #     networking.nameservers = lib.mkForce [
-#       nodes.myresolver.config.networking.primaryIPAddress
+#       nodes.myresolver.networking.primaryIPAddress
 #     ];
 #   };
 #
@@ -81,8 +81,8 @@ in {
       type = types.str;
       readOnly = true;
       default = domain;
-      description = ''
-        A domain name to use with the <literal>nodes</literal> attribute to
+      description = lib.mdDoc ''
+        A domain name to use with the `nodes` attribute to
         identify the CA server.
       '';
     };
@@ -90,10 +90,10 @@ in {
       type = types.path;
       readOnly = true;
       default = testCerts.ca.cert;
-      description = ''
-        A certificate file to use with the <literal>nodes</literal> attribute to
+      description = lib.mdDoc ''
+        A certificate file to use with the `nodes` attribute to
         inject the test CA certificate used in the ACME server into
-        <option>security.pki.certificateFiles</option>.
+        {option}`security.pki.certificateFiles`.
       '';
     };
   };
diff --git a/nixos/tests/common/acme/server/generate-certs.nix b/nixos/tests/common/acme/server/generate-certs.nix
index cd8fe0dffca..85c751c56ad 100644
--- a/nixos/tests/common/acme/server/generate-certs.nix
+++ b/nixos/tests/common/acme/server/generate-certs.nix
@@ -10,7 +10,11 @@ let
   domain = conf.domain;
 in mkDerivation {
   name = "test-certs";
-  buildInputs = [ minica ];
+  buildInputs = [ (minica.overrideAttrs (old: {
+    prePatch = ''
+      sed -i 's_NotAfter: time.Now().AddDate(2, 0, 30),_NotAfter: time.Now().AddDate(20, 0, 0),_' main.go
+    '';
+  })) ];
   phases = [ "buildPhase" "installPhase" ];
 
   buildPhase = ''
diff --git a/nixos/tests/common/auto.nix b/nixos/tests/common/auto.nix
index da6b14e9f16..f2ab82f88ff 100644
--- a/nixos/tests/common/auto.nix
+++ b/nixos/tests/common/auto.nix
@@ -19,17 +19,17 @@ in
 
       enable = mkOption {
         default = false;
-        description = ''
+        description = lib.mdDoc ''
           Whether to enable the fake "auto" display manager, which
           automatically logs in the user specified in the
-          <option>user</option> option.  This is mostly useful for
+          {option}`user` option.  This is mostly useful for
           automated tests.
         '';
       };
 
       user = mkOption {
         default = "root";
-        description = "The user account to login automatically.";
+        description = lib.mdDoc "The user account to login automatically.";
       };
 
     };
diff --git a/nixos/tests/common/ec2.nix b/nixos/tests/common/ec2.nix
index 64b0a91ac1f..6ed420e0aae 100644
--- a/nixos/tests/common/ec2.nix
+++ b/nixos/tests/common/ec2.nix
@@ -46,7 +46,7 @@ with pkgs.lib;
         # Note: we use net=169.0.0.0/8 rather than
         # net=169.254.0.0/16 to prevent dhcpcd from getting horribly
         # confused. (It would get a DHCP lease in the 169.254.*
-        # range, which it would then configure and prompty delete
+        # range, which it would then configure and promptly delete
         # again when it deletes link-local addresses.) Ideally we'd
         # turn off the DHCP server, but qemu does not have an option
         # to do that.
diff --git a/nixos/tests/common/resolver.nix b/nixos/tests/common/resolver.nix
index 09a74de20fa..3ddf730668c 100644
--- a/nixos/tests/common/resolver.nix
+++ b/nixos/tests/common/resolver.nix
@@ -10,15 +10,15 @@
     type = lib.types.bool;
     default = true;
     internal = true;
-    description = ''
+    description = lib.mdDoc ''
       Whether to enable the resolver that automatically discovers zone in the
       test network.
 
-      This option is <literal>true</literal> by default, because the module
+      This option is `true` by default, because the module
       defining this option needs to be explicitly imported.
 
       The reason this option exists is for the
-      <filename>nixos/tests/common/acme/server</filename> module, which
+      {file}`nixos/tests/common/acme/server` module, which
       needs that option to disable the resolver once the user has set its own
       resolver.
     '';
diff --git a/nixos/tests/containers-unified-hierarchy.nix b/nixos/tests/containers-unified-hierarchy.nix
new file mode 100644
index 00000000000..978d59e12c8
--- /dev/null
+++ b/nixos/tests/containers-unified-hierarchy.nix
@@ -0,0 +1,21 @@
+import ./make-test-python.nix ({ pkgs, lib, ... }: {
+  name = "containers-unified-hierarchy";
+  meta = {
+    maintainers = with lib.maintainers; [ farnoy ];
+  };
+
+  nodes.machine = { ... }: {
+    containers = {
+      test-container = {
+        autoStart = true;
+        config = { };
+      };
+    };
+  };
+
+  testScript = ''
+    machine.wait_for_unit("default.target")
+
+    machine.succeed("echo 'stat -fc %T /sys/fs/cgroup/ | grep cgroup2fs' | nixos-container root-login test-container")
+  '';
+})
diff --git a/nixos/tests/convos.nix b/nixos/tests/convos.nix
index cc0c2e75893..a5dafed8f6f 100644
--- a/nixos/tests/convos.nix
+++ b/nixos/tests/convos.nix
@@ -24,7 +24,7 @@ in
   testScript = ''
     machine.wait_for_unit("convos")
     machine.wait_for_open_port(${toString port})
-    machine.succeed("journalctl -u convos | grep -q 'Listening at.*${toString port}'")
+    machine.succeed("journalctl -u convos | grep -q 'application available at.*${toString port}'")
     machine.succeed("curl -f http://localhost:${toString port}/")
   '';
 })
diff --git a/nixos/tests/corerad.nix b/nixos/tests/corerad.nix
index 638010f92f4..b6f5d7fc6f7 100644
--- a/nixos/tests/corerad.nix
+++ b/nixos/tests/corerad.nix
@@ -1,5 +1,6 @@
 import ./make-test-python.nix (
   {
+    name = "corerad";
     nodes = {
       router = {config, pkgs, ...}: {
         config = {
diff --git a/nixos/tests/couchdb.nix b/nixos/tests/couchdb.nix
index 453f5dcd66e..b57072d6be2 100644
--- a/nixos/tests/couchdb.nix
+++ b/nixos/tests/couchdb.nix
@@ -20,7 +20,7 @@ with lib;
 {
   name = "couchdb";
   meta = with pkgs.lib.maintainers; {
-    maintainers = [ fpletz ];
+    maintainers = [ ];
   };
 
   nodes = {
diff --git a/nixos/tests/cri-o.nix b/nixos/tests/cri-o.nix
index d3a8713d6a9..08e1e8f36b0 100644
--- a/nixos/tests/cri-o.nix
+++ b/nixos/tests/cri-o.nix
@@ -1,7 +1,7 @@
 # This test runs CRI-O and verifies via critest
 import ./make-test-python.nix ({ pkgs, ... }: {
   name = "cri-o";
-  meta.maintainers = with pkgs.lib.maintainers; teams.podman.members;
+  meta.maintainers = with pkgs.lib; teams.podman.members;
 
   nodes = {
     crio = {
diff --git a/nixos/tests/cryptpad.nix b/nixos/tests/cryptpad.nix
deleted file mode 100644
index db271f937ef..00000000000
--- a/nixos/tests/cryptpad.nix
+++ /dev/null
@@ -1,18 +0,0 @@
-import ./make-test-python.nix ({ lib, ... }:
-
-with lib;
-
-{
-  name = "cryptpad";
-  meta.maintainers = with maintainers; [ davhau ];
-
-  nodes.machine =
-    { pkgs, ... }:
-    { services.cryptpad.enable = true; };
-
-  testScript = ''
-    machine.wait_for_unit("cryptpad.service")
-    machine.wait_for_open_port(3000)
-    machine.succeed("curl -L --fail http://localhost:3000/sheet")
-  '';
-})
diff --git a/nixos/tests/custom-ca.nix b/nixos/tests/custom-ca.nix
index 73e47c3c9d0..25a7b6fdea4 100644
--- a/nixos/tests/custom-ca.nix
+++ b/nixos/tests/custom-ca.nix
@@ -191,5 +191,5 @@ in
   firefox = { error = "Security Risk"; };
   chromium = { error = "not private"; };
   qutebrowser = { args = "-T"; error = "Certificate error"; };
-  midori = { error = "Security"; };
+  midori = { args = "-p"; error = "Security"; };
 }
diff --git a/nixos/tests/deluge.nix b/nixos/tests/deluge.nix
index 0cd1d21870a..e8945fdea00 100644
--- a/nixos/tests/deluge.nix
+++ b/nixos/tests/deluge.nix
@@ -54,8 +54,10 @@ import ./make-test-python.nix ({ pkgs, ...} : {
     declarative.wait_for_unit("deluged")
     declarative.wait_for_unit("delugeweb")
     declarative.wait_until_succeeds("curl --fail http://declarative:3142")
+
+    # deluge-console always exits with 1. https://dev.deluge-torrent.org/ticket/3291
     declarative.succeed(
-        "deluge-console 'connect 127.0.0.1:58846 andrew password; help' | grep -q 'rm.*Remove a torrent'"
+        "(deluge-console 'connect 127.0.0.1:58846 andrew password; help' || true) | grep -q 'rm.*Remove a torrent'"
     )
   '';
 })
diff --git a/nixos/tests/dhparams.nix b/nixos/tests/dhparams.nix
index a0de2911777..021042fafdb 100644
--- a/nixos/tests/dhparams.nix
+++ b/nixos/tests/dhparams.nix
@@ -1,81 +1,69 @@
-let
-  common = { pkgs, ... }: {
-    security.dhparams.enable = true;
-    environment.systemPackages = [ pkgs.openssl ];
-  };
-
-in import ./make-test-python.nix {
+import ./make-test-python.nix {
   name = "dhparams";
 
-  nodes.generation1 = { pkgs, config, ... }: {
-    imports = [ common ];
-    security.dhparams.params = {
-      # Use low values here because we don't want the test to run for ages.
-      foo.bits = 16;
-      # Also use the old format to make sure the type is coerced in the right
-      # way.
-      bar = 17;
-    };
+  nodes.machine = { pkgs, ... }: {
+    security.dhparams.enable = true;
+    environment.systemPackages = [ pkgs.openssl ];
 
-    systemd.services.foo = {
-      description = "Check systemd Ordering";
-      wantedBy = [ "multi-user.target" ];
-      unitConfig = {
-        # This is to make sure that the dhparams generation of foo occurs
-        # before this service so we need this service to start as early as
-        # possible to provoke a race condition.
-        DefaultDependencies = false;
-
-        # We check later whether the service has been started or not.
-        ConditionPathExists = config.security.dhparams.params.foo.path;
+    specialisation = {
+      gen1.configuration = { config, ... }: {
+        security.dhparams.params = {
+          # Use low values here because we don't want the test to run for ages.
+          foo.bits = 1024;
+          # Also use the old format to make sure the type is coerced in the right
+          # way.
+          bar = 1025;
+        };
+
+        systemd.services.foo = {
+          description = "Check systemd Ordering";
+          wantedBy = [ "multi-user.target" ];
+          unitConfig = {
+            # This is to make sure that the dhparams generation of foo occurs
+            # before this service so we need this service to start as early as
+            # possible to provoke a race condition.
+            DefaultDependencies = false;
+
+            # We check later whether the service has been started or not.
+            ConditionPathExists = config.security.dhparams.params.foo.path;
+          };
+          serviceConfig.Type = "oneshot";
+          serviceConfig.RemainAfterExit = true;
+          # The reason we only provide an ExecStop here is to ensure that we don't
+          # accidentally trigger an error because a file system is not yet ready
+          # during very early startup (we might not even have the Nix store
+          # available, for example if future changes in NixOS use systemd mount
+          # units to do early file system initialisation).
+          serviceConfig.ExecStop = "${pkgs.coreutils}/bin/true";
+        };
+      };
+      gen2.configuration = {
+        security.dhparams.params.foo.bits = 1026;
+      };
+      gen3.configuration =  {};
+      gen4.configuration = {
+        security.dhparams.stateful = false;
+        security.dhparams.params.foo2.bits = 1027;
+        security.dhparams.params.bar2.bits = 1028;
+      };
+      gen5.configuration = {
+        security.dhparams.defaultBitSize = 1029;
+        security.dhparams.params.foo3 = {};
+        security.dhparams.params.bar3 = {};
       };
-      serviceConfig.Type = "oneshot";
-      serviceConfig.RemainAfterExit = true;
-      # The reason we only provide an ExecStop here is to ensure that we don't
-      # accidentally trigger an error because a file system is not yet ready
-      # during very early startup (we might not even have the Nix store
-      # available, for example if future changes in NixOS use systemd mount
-      # units to do early file system initialisation).
-      serviceConfig.ExecStop = "${pkgs.coreutils}/bin/true";
     };
   };
 
-  nodes.generation2 = {
-    imports = [ common ];
-    security.dhparams.params.foo.bits = 18;
-  };
-
-  nodes.generation3 = common;
-
-  nodes.generation4 = {
-    imports = [ common ];
-    security.dhparams.stateful = false;
-    security.dhparams.params.foo2.bits = 18;
-    security.dhparams.params.bar2.bits = 19;
-  };
-
-  nodes.generation5 = {
-    imports = [ common ];
-    security.dhparams.defaultBitSize = 30;
-    security.dhparams.params.foo3 = {};
-    security.dhparams.params.bar3 = {};
-  };
-
   testScript = { nodes, ... }: let
     getParamPath = gen: name: let
-      node = "generation${toString gen}";
-    in nodes.${node}.config.security.dhparams.params.${name}.path;
+      node = "gen${toString gen}";
+    in nodes.machine.config.specialisation.${node}.configuration.security.dhparams.params.${name}.path;
 
     switchToGeneration = gen: let
-      node = "generation${toString gen}";
-      inherit (nodes.${node}.config.system.build) toplevel;
-      switchCmd = "${toplevel}/bin/switch-to-configuration test";
+      switchCmd = "${nodes.machine.config.system.build.toplevel}/specialisation/gen${toString gen}/bin/switch-to-configuration test";
     in ''
       with machine.nested("switch to generation ${toString gen}"):
-          machine.succeed(
-              "${switchCmd}"
-          )
-          machine = ${node}
+        machine.succeed("${switchCmd}")
     '';
 
   in ''
@@ -92,22 +80,20 @@ in import ./make-test-python.nix {
             if match[1] != str(bits):
                 raise Exception(f"bit size should be {bits} but it is {match[1]} instead.")
 
-
-    machine = generation1
-
     machine.wait_for_unit("multi-user.target")
+    ${switchToGeneration 1}
 
     with subtest("verify startup order"):
         machine.succeed("systemctl is-active foo.service")
 
     with subtest("check bit sizes of dhparam files"):
-        assert_param_bits("${getParamPath 1 "foo"}", 16)
-        assert_param_bits("${getParamPath 1 "bar"}", 17)
+        assert_param_bits("${getParamPath 1 "foo"}", 1024)
+        assert_param_bits("${getParamPath 1 "bar"}", 1025)
 
     ${switchToGeneration 2}
 
     with subtest("check whether bit size has changed"):
-        assert_param_bits("${getParamPath 2 "foo"}", 18)
+        assert_param_bits("${getParamPath 2 "foo"}", 1026)
 
     with subtest("ensure that dhparams file for 'bar' was deleted"):
         machine.fail("test -e ${getParamPath 1 "bar"}")
@@ -115,16 +101,16 @@ in import ./make-test-python.nix {
     ${switchToGeneration 3}
 
     with subtest("ensure that 'security.dhparams.path' has been deleted"):
-        machine.fail("test -e ${nodes.generation3.config.security.dhparams.path}")
+        machine.fail("test -e ${nodes.machine.config.specialisation.gen3.configuration.security.dhparams.path}")
 
     ${switchToGeneration 4}
 
     with subtest("check bit sizes dhparam files"):
         assert_param_bits(
-            "${getParamPath 4 "foo2"}", 18
+            "${getParamPath 4 "foo2"}", 1027
         )
         assert_param_bits(
-            "${getParamPath 4 "bar2"}", 19
+            "${getParamPath 4 "bar2"}", 1028
         )
 
     with subtest("check whether dhparam files are in the Nix store"):
@@ -136,7 +122,7 @@ in import ./make-test-python.nix {
     ${switchToGeneration 5}
 
     with subtest("check whether defaultBitSize works as intended"):
-        assert_param_bits("${getParamPath 5 "foo3"}", 30)
-        assert_param_bits("${getParamPath 5 "bar3"}", 30)
+        assert_param_bits("${getParamPath 5 "foo3"}", 1029)
+        assert_param_bits("${getParamPath 5 "bar3"}", 1029)
   '';
 }
diff --git a/nixos/tests/discourse.nix b/nixos/tests/discourse.nix
index cfac5f84a62..35ca083c6c4 100644
--- a/nixos/tests/discourse.nix
+++ b/nixos/tests/discourse.nix
@@ -30,6 +30,7 @@ import ./make-test-python.nix (
         virtualisation.memorySize = 2048;
         virtualisation.cores = 4;
         virtualisation.useNixStoreImage = true;
+        virtualisation.writableStore = false;
 
         imports = [ common/user-account.nix ];
 
diff --git a/nixos/tests/dnscrypt-proxy2.nix b/nixos/tests/dnscrypt-proxy2.nix
index 1ba5d983e9b..a75a745d355 100644
--- a/nixos/tests/dnscrypt-proxy2.nix
+++ b/nixos/tests/dnscrypt-proxy2.nix
@@ -1,4 +1,6 @@
-import ./make-test-python.nix ({ pkgs, ... }: {
+import ./make-test-python.nix ({ pkgs, ... }: let
+  localProxyPort = 43;
+in {
   name = "dnscrypt-proxy2";
   meta = with pkgs.lib.maintainers; {
     maintainers = [ joachifm ];
@@ -9,7 +11,6 @@ import ./make-test-python.nix ({ pkgs, ... }: {
     # for a caching DNS client.
     client =
     { ... }:
-    let localProxyPort = 43; in
     {
       security.apparmor.enable = true;
 
@@ -25,12 +26,13 @@ import ./make-test-python.nix ({ pkgs, ... }: {
       };
 
       services.dnsmasq.enable = true;
-      services.dnsmasq.servers = [ "127.0.0.1#${toString localProxyPort}" ];
+      services.dnsmasq.settings.server = [ "127.0.0.1#${toString localProxyPort}" ];
     };
   };
 
   testScript = ''
     client.wait_for_unit("dnsmasq")
     client.wait_for_unit("dnscrypt-proxy2")
+    client.wait_until_succeeds("ss --numeric --udp --listening | grep -q ${toString localProxyPort}")
   '';
 })
diff --git a/nixos/tests/docker-tools-cross.nix b/nixos/tests/docker-tools-cross.nix
index 8791ec25812..14cb14ceeae 100644
--- a/nixos/tests/docker-tools-cross.nix
+++ b/nixos/tests/docker-tools-cross.nix
@@ -24,7 +24,11 @@ let
   hello1 = remoteCrossPkgs.dockerTools.buildImage {
     name = "hello1";
     tag = "latest";
-    contents = remoteCrossPkgs.hello;
+    copyToRoot = remoteCrossPkgs.buildEnv {
+      name = "image-root";
+      pathsToLink = [ "/bin" ];
+      paths = [ remoteCrossPkgs.hello ];
+    };
   };
 
   hello2 = remoteCrossPkgs.dockerTools.buildLayeredImage {
diff --git a/nixos/tests/docker-tools.nix b/nixos/tests/docker-tools.nix
index 99a968f17af..98704ecb2fb 100644
--- a/nixos/tests/docker-tools.nix
+++ b/nixos/tests/docker-tools.nix
@@ -346,7 +346,7 @@ import ./make-test-python.nix ({ pkgs, ... }: {
             "docker load --input='${examples.layeredImageWithFakeRootCommands}'"
         )
         docker.succeed(
-            "docker run --rm ${examples.layeredImageWithFakeRootCommands.imageName} sh -c 'stat -c '%u' /home/jane | grep -E ^1000$'"
+            "docker run --rm ${examples.layeredImageWithFakeRootCommands.imageName} sh -c 'stat -c '%u' /home/alice | grep -E ^1000$'"
         )
 
     with subtest("Ensure docker load on merged images loads all of the constituent images"):
@@ -389,7 +389,7 @@ import ./make-test-python.nix ({ pkgs, ... }: {
             "docker load --input='${examples.mergedBashFakeRoot}'"
         )
         docker.succeed(
-            "docker run --rm ${examples.layeredImageWithFakeRootCommands.imageName} sh -c 'stat -c '%u' /home/jane | grep -E ^1000$'"
+            "docker run --rm ${examples.layeredImageWithFakeRootCommands.imageName} sh -c 'stat -c '%u' /home/alice | grep -E ^1000$'"
         )
 
     with subtest("The image contains store paths referenced by the fakeRootCommands output"):
@@ -419,10 +419,84 @@ import ./make-test-python.nix ({ pkgs, ... }: {
             "docker rmi layered-image-with-path",
         )
 
+    with subtest("Ensure correct architecture is present in manifests."):
+        docker.succeed("""
+            docker load --input='${examples.build-image-with-architecture}'
+            docker inspect build-image-with-architecture \
+              | ${pkgs.jq}/bin/jq -er '.[] | select(.Architecture=="arm64").Architecture'
+            docker rmi build-image-with-architecture
+        """)
+        docker.succeed("""
+            ${examples.layered-image-with-architecture} | docker load
+            docker inspect layered-image-with-architecture \
+              | ${pkgs.jq}/bin/jq -er '.[] | select(.Architecture=="arm64").Architecture'
+            docker rmi layered-image-with-architecture
+        """)
+
     with subtest("etc"):
         docker.succeed("${examples.etc} | docker load")
         docker.succeed("docker run --rm etc | grep localhost")
         docker.succeed("docker image rm etc:latest")
 
+    with subtest("image-with-certs"):
+        docker.succeed("<${examples.image-with-certs} docker load")
+        docker.succeed("docker run --rm image-with-certs:latest test -r /etc/ssl/certs/ca-bundle.crt")
+        docker.succeed("docker run --rm image-with-certs:latest test -r /etc/ssl/certs/ca-certificates.crt")
+        docker.succeed("docker run --rm image-with-certs:latest test -r /etc/pki/tls/certs/ca-bundle.crt")
+        docker.succeed("docker image rm image-with-certs:latest")
+
+    with subtest("buildNixShellImage: Can build a basic derivation"):
+        docker.succeed(
+            "${examples.nix-shell-basic} | docker load",
+            "docker run --rm nix-shell-basic bash -c 'buildDerivation && $out/bin/hello' | grep '^Hello, world!$'"
+        )
+
+    with subtest("buildNixShellImage: Runs the shell hook"):
+        docker.succeed(
+            "${examples.nix-shell-hook} | docker load",
+            "docker run --rm -it nix-shell-hook | grep 'This is the shell hook!'"
+        )
+
+    with subtest("buildNixShellImage: Sources stdenv, making build inputs available"):
+        docker.succeed(
+            "${examples.nix-shell-inputs} | docker load",
+            "docker run --rm -it nix-shell-inputs | grep 'Hello, world!'"
+        )
+
+    with subtest("buildNixShellImage: passAsFile works"):
+        docker.succeed(
+            "${examples.nix-shell-pass-as-file} | docker load",
+            "docker run --rm -it nix-shell-pass-as-file | grep 'this is a string'"
+        )
+
+    with subtest("buildNixShellImage: run argument works"):
+        docker.succeed(
+            "${examples.nix-shell-run} | docker load",
+            "docker run --rm -it nix-shell-run | grep 'This shell is not interactive'"
+        )
+
+    with subtest("buildNixShellImage: command argument works"):
+        docker.succeed(
+            "${examples.nix-shell-command} | docker load",
+            "docker run --rm -it nix-shell-command | grep 'This shell is interactive'"
+        )
+
+    with subtest("buildNixShellImage: home directory is writable by default"):
+        docker.succeed(
+            "${examples.nix-shell-writable-home} | docker load",
+            "docker run --rm -it nix-shell-writable-home"
+        )
+
+    with subtest("buildNixShellImage: home directory can be made non-existent"):
+        docker.succeed(
+            "${examples.nix-shell-nonexistent-home} | docker load",
+            "docker run --rm -it nix-shell-nonexistent-home"
+        )
+
+    with subtest("buildNixShellImage: can build derivations"):
+        docker.succeed(
+            "${examples.nix-shell-build-derivation} | docker load",
+            "docker run --rm -it nix-shell-build-derivation"
+        )
   '';
 })
diff --git a/nixos/tests/docker.nix b/nixos/tests/docker.nix
index dee7480eb4a..93baa198088 100644
--- a/nixos/tests/docker.nix
+++ b/nixos/tests/docker.nix
@@ -11,6 +11,7 @@ import ./make-test-python.nix ({ pkgs, ...} : {
       { pkgs, ... }:
         {
           virtualisation.docker.enable = true;
+          virtualisation.docker.autoPrune.enable = true;
           virtualisation.docker.package = pkgs.docker;
 
           users.users = {
diff --git a/nixos/tests/documize.nix b/nixos/tests/documize.nix
index 528bf5338ce..fda79b1a093 100644
--- a/nixos/tests/documize.nix
+++ b/nixos/tests/documize.nix
@@ -47,9 +47,9 @@ import ./make-test-python.nix ({ pkgs, lib, ...} : {
             " --data 'dbhash={}'"
             " --data 'title=NixOS'"
             " --data 'message=Docs'"
-            " --data 'firstname=John'"
-            " --data 'lastname=Doe'"
-            " --data 'email=john.doe@nixos.org'"
+            " --data 'firstname=Bob'"
+            " --data 'lastname=Foobar'"
+            " --data 'email=bob.foobar@nixos.org'"
             " --data 'password=verysafe'"
             " -f localhost:3000/api/setup"
         ).format(dbhash)
diff --git a/nixos/tests/dolibarr.nix b/nixos/tests/dolibarr.nix
new file mode 100644
index 00000000000..2f012a0c67d
--- /dev/null
+++ b/nixos/tests/dolibarr.nix
@@ -0,0 +1,59 @@
+import ./make-test-python.nix ({ pkgs, lib, ... }: {
+  name = "dolibarr";
+  meta.maintainers = [ lib.maintainers.raitobezarius ];
+
+  nodes.machine =
+    { ... }:
+    {
+      services.dolibarr = {
+        enable = true;
+        domain = "localhost";
+        nginx = {
+          forceSSL = false;
+          enableACME = false;
+        };
+      };
+
+      networking.firewall.allowedTCPPorts = [ 80 ];
+    };
+
+  testScript = ''
+    from html.parser import HTMLParser
+    start_all()
+
+    csrf_token = None
+    class TokenParser(HTMLParser):
+      def handle_starttag(self, tag, attrs):
+        attrs = dict(attrs) # attrs is an assoc list originally
+        if tag == 'input' and attrs.get('name') == 'token':
+            csrf_token = attrs.get('value')
+            print(f'[+] Caught CSRF token: {csrf_token}')
+      def handle_endtag(self, tag): pass
+      def handle_data(self, data): pass
+
+    machine.wait_for_unit("phpfpm-dolibarr.service")
+    machine.wait_for_unit("nginx.service")
+    machine.wait_for_open_port(80)
+    # Sanity checks on URLs.
+    # machine.succeed("curl -fL http://localhost/index.php")
+    # machine.succeed("curl -fL http://localhost/")
+    # Perform installation.
+    machine.succeed('curl -fL -X POST http://localhost/install/check.php -F selectlang=auto')
+    machine.succeed('curl -fL -X POST http://localhost/install/fileconf.php -F selectlang=auto')
+    # First time is to write the configuration file correctly.
+    machine.succeed('curl -fL -X POST http://localhost/install/step1.php -F "testpost=ok" -F "action=set" -F "selectlang=auto"')
+    # Now, we have a proper conf.php in $stateDir.
+    assert 'nixos' in machine.succeed("cat /var/lib/dolibarr/conf.php")
+    machine.succeed('curl -fL -X POST http://localhost/install/step2.php --data "testpost=ok&action=set&dolibarr_main_db_character_set=utf8&dolibarr_main_db_collation=utf8_unicode_ci&selectlang=auto"')
+    machine.succeed('curl -fL -X POST http://localhost/install/step4.php --data "testpost=ok&action=set&selectlang=auto"')
+    machine.succeed('curl -fL -X POST http://localhost/install/step5.php --data "testpost=ok&action=set&login=root&pass=hunter2&pass_verif=hunter2&selectlang=auto"')
+    # Now, we have installed the machine, let's verify we still have the right configuration.
+    assert 'nixos' in machine.succeed("cat /var/lib/dolibarr/conf.php")
+    # We do not want any redirect now as we have installed the machine.
+    machine.succeed('curl -f -X POST http://localhost')
+    # Test authentication to the webservice.
+    parser = TokenParser()
+    parser.feed(machine.succeed('curl -f -X GET http://localhost/index.php?mainmenu=login&username=root'))
+    machine.succeed(f'curl -f -X POST http://localhost/index.php?mainmenu=login&token={csrf_token}&username=root&password=hunter2')
+  '';
+})
diff --git a/nixos/tests/domination.nix b/nixos/tests/domination.nix
index 09027740ab8..409a7f3029c 100644
--- a/nixos/tests/domination.nix
+++ b/nixos/tests/domination.nix
@@ -20,7 +20,7 @@ import ./make-test-python.nix ({ pkgs, ... }: {
       machine.wait_for_x()
       machine.execute("domination >&2 &")
       machine.wait_for_window("Menu")
-      machine.wait_for_text("New Game")
+      machine.wait_for_text(r"(New Game|Start Server|Load Game|Help Manual|Join Game|About|Play Online)")
       machine.screenshot("screen")
     '';
 })
diff --git a/nixos/tests/ec2.nix b/nixos/tests/ec2.nix
index aa3c2b7051f..e649761d029 100644
--- a/nixos/tests/ec2.nix
+++ b/nixos/tests/ec2.nix
@@ -16,8 +16,6 @@ let
       ../modules/testing/test-instrumentation.nix
       ../modules/profiles/qemu-guest.nix
       {
-        ec2.hvm = true;
-
         # Hack to make the partition resizing work in QEMU.
         boot.initrd.postDeviceCommands = mkBefore ''
           ln -s vda /dev/xvda
diff --git a/nixos/tests/endlessh-go.nix b/nixos/tests/endlessh-go.nix
new file mode 100644
index 00000000000..b261dbf1c56
--- /dev/null
+++ b/nixos/tests/endlessh-go.nix
@@ -0,0 +1,58 @@
+import ./make-test-python.nix ({ lib, pkgs, ... }:
+{
+  name = "endlessh-go";
+  meta.maintainers = with lib.maintainers; [ azahi ];
+
+  nodes = {
+    server = { ... }: {
+      services.endlessh-go = {
+        enable = true;
+        prometheus.enable = true;
+        openFirewall = true;
+      };
+
+      specialisation = {
+        unprivileged.configuration = {
+          services.endlessh-go = {
+            port = 2222;
+            prometheus.port = 9229;
+          };
+        };
+
+        privileged.configuration = {
+          services.endlessh-go = {
+            port = 22;
+            prometheus.port = 92;
+          };
+        };
+      };
+    };
+
+    client = { pkgs, ... }: {
+      environment.systemPackages = with pkgs; [ curl netcat ];
+    };
+  };
+
+  testScript = ''
+    def activate_specialisation(name: str):
+        server.succeed(f"/run/booted-system/specialisation/{name}/bin/switch-to-configuration test >&2")
+
+    start_all()
+
+    with subtest("Unprivileged"):
+        activate_specialisation("unprivileged")
+        server.wait_for_unit("endlessh-go.service")
+        server.wait_for_open_port(2222)
+        server.wait_for_open_port(9229)
+        client.succeed("nc -dvW5 server 2222")
+        client.succeed("curl -kv server:9229/metrics")
+
+    with subtest("Privileged"):
+        activate_specialisation("privileged")
+        server.wait_for_unit("endlessh-go.service")
+        server.wait_for_open_port(22)
+        server.wait_for_open_port(92)
+        client.succeed("nc -dvW5 server 22")
+        client.succeed("curl -kv server:92/metrics")
+  '';
+})
diff --git a/nixos/tests/endlessh.nix b/nixos/tests/endlessh.nix
new file mode 100644
index 00000000000..be742a749fd
--- /dev/null
+++ b/nixos/tests/endlessh.nix
@@ -0,0 +1,43 @@
+import ./make-test-python.nix ({ lib, pkgs, ... }:
+{
+  name = "endlessh";
+  meta.maintainers = with lib.maintainers; [ azahi ];
+
+  nodes = {
+    server = { ... }: {
+      services.endlessh = {
+        enable = true;
+        openFirewall = true;
+      };
+
+      specialisation = {
+        unprivileged.configuration.services.endlessh.port = 2222;
+
+        privileged.configuration.services.endlessh.port = 22;
+      };
+    };
+
+    client = { pkgs, ... }: {
+      environment.systemPackages = with pkgs; [ curl netcat ];
+    };
+  };
+
+  testScript = ''
+    def activate_specialisation(name: str):
+        server.succeed(f"/run/booted-system/specialisation/{name}/bin/switch-to-configuration test >&2")
+
+    start_all()
+
+    with subtest("Unprivileged"):
+        activate_specialisation("unprivileged")
+        server.wait_for_unit("endlessh.service")
+        server.wait_for_open_port(2222)
+        client.succeed("nc -dvW5 server 2222")
+
+    with subtest("Privileged"):
+        activate_specialisation("privileged")
+        server.wait_for_unit("endlessh.service")
+        server.wait_for_open_port(22)
+        client.succeed("nc -dvW5 server 22")
+  '';
+})
diff --git a/nixos/tests/evcc.nix b/nixos/tests/evcc.nix
new file mode 100644
index 00000000000..c223977a9d8
--- /dev/null
+++ b/nixos/tests/evcc.nix
@@ -0,0 +1,97 @@
+import ./make-test-python.nix ({ pkgs, lib, ...} :
+
+{
+  name = "evcc";
+  meta.maintainers = with lib.maintainers; [ hexa ];
+
+  nodes = {
+    machine = { config, ... }: {
+      services.evcc = {
+        enable = true;
+        settings = {
+          network = {
+            schema = "http";
+            host = "localhost";
+            port = 7070;
+          };
+
+          log = "info";
+
+          site = {
+            title = "NixOS Test";
+            meters = {
+              grid = "grid";
+              pv = "pv";
+            };
+          };
+
+          meters = [ {
+            type = "custom";
+            name = "grid";
+            power = {
+              source = "script";
+              cmd = "/bin/sh -c 'echo -4500'";
+            };
+          } {
+            type = "custom";
+            name = "pv";
+            power = {
+              source = "script";
+              cmd = "/bin/sh -c 'echo 7500'";
+            };
+          } ];
+
+          chargers = [ {
+            name = "dummy-charger";
+            type = "custom";
+            status = {
+              source = "script";
+              cmd = "/bin/sh -c 'echo charger status F'";
+            };
+            enabled = {
+              source = "script";
+              cmd = "/bin/sh -c 'echo charger enabled state false'";
+            };
+            enable = {
+              source = "script";
+              cmd = "/bin/sh -c 'echo set charger enabled state true'";
+            };
+            maxcurrent = {
+              source = "script";
+              cmd = "/bin/sh -c 'echo set charger max current 7200'";
+            };
+          } ];
+
+          loadpoints = [ {
+            title = "Dummy";
+            charger = "dummy-charger";
+          } ];
+        };
+      };
+    };
+  };
+
+  testScript = ''
+    start_all()
+
+    machine.wait_for_unit("evcc.service")
+    machine.wait_for_open_port(7070)
+
+    with subtest("Check package version propagates into frontend"):
+        machine.fail(
+            "curl --fail http://localhost:7070 | grep '0.0.1-alpha'"
+        )
+        machine.succeed(
+            "curl --fail http://localhost:7070 | grep '${pkgs.evcc.version}'"
+        )
+
+    with subtest("Check journal for errors"):
+        _, output = machine.execute("journalctl -o cat -u evcc.service")
+        assert "FATAL" not in output
+        assert "ERROR" not in output
+
+    with subtest("Check systemd hardening"):
+        _, output = machine.execute("systemd-analyze security evcc.service | grep -v '✓'")
+        machine.log(output)
+  '';
+})
diff --git a/nixos/tests/firefox.nix b/nixos/tests/firefox.nix
index 63ccc6efb5b..3f9cea6662f 100644
--- a/nixos/tests/firefox.nix
+++ b/nixos/tests/firefox.nix
@@ -1,5 +1,14 @@
-import ./make-test-python.nix ({ pkgs, firefoxPackage, ... }: {
-  name = "firefox";
+import ./make-test-python.nix ({ pkgs, firefoxPackage, ... }:
+let firefoxPackage' = firefoxPackage.override (args: {
+      extraPrefsFiles = (args.extraPrefsFiles or []) ++ [
+        # make sure that autoplay is enabled by default for the audio test
+        (builtins.toString (builtins.toFile "autoplay-pref.js" ''defaultPref("media.autoplay.default",0);''))
+      ];
+  });
+
+in
+{
+  name = firefoxPackage'.unwrapped.pname;
   meta = with pkgs.lib.maintainers; {
     maintainers = [ eelco shlevy ];
   };
@@ -9,7 +18,7 @@ import ./make-test-python.nix ({ pkgs, firefoxPackage, ... }: {
 
     { imports = [ ./common/x11.nix ];
       environment.systemPackages = [
-        firefoxPackage
+        firefoxPackage'
         pkgs.xdotool
       ];
 
@@ -88,7 +97,7 @@ import ./make-test-python.nix ({ pkgs, firefoxPackage, ... }: {
 
       with subtest("Wait until Firefox has finished loading the Valgrind docs page"):
           machine.execute(
-              "xterm -e 'firefox file://${pkgs.valgrind.doc}/share/doc/valgrind/html/index.html' >&2 &"
+              "xterm -e '${firefoxPackage'.unwrapped.binaryName} file://${pkgs.valgrind.doc}/share/doc/valgrind/html/index.html' >&2 &"
           )
           machine.wait_for_window("Valgrind")
           machine.sleep(40)
@@ -96,7 +105,7 @@ import ./make-test-python.nix ({ pkgs, firefoxPackage, ... }: {
       with subtest("Check whether Firefox can play sound"):
           with record_audio(machine):
               machine.succeed(
-                  "firefox file://${pkgs.sound-theme-freedesktop}/share/sounds/freedesktop/stereo/phone-incoming-call.oga >&2 &"
+                  "${firefoxPackage'.unwrapped.binaryName} file://${pkgs.sound-theme-freedesktop}/share/sounds/freedesktop/stereo/phone-incoming-call.oga >&2 &"
               )
               wait_for_sound(machine)
           machine.copy_from_vm("/tmp/record.wav")
diff --git a/nixos/tests/freenet.nix b/nixos/tests/freenet.nix
new file mode 100644
index 00000000000..96dbb4caa12
--- /dev/null
+++ b/nixos/tests/freenet.nix
@@ -0,0 +1,19 @@
+import ./make-test-python.nix ({ pkgs, ... }: {
+  name = "freenet";
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ nagy ];
+  };
+
+  nodes = {
+    machine = { ... }: {
+      services.freenet.enable = true;
+    };
+  };
+
+  testScript = ''
+    machine.wait_for_unit("freenet.service")
+    machine.wait_for_open_port(8888)
+    machine.wait_until_succeeds("curl -sfL http://localhost:8888/ | grep Freenet")
+    machine.succeed("systemctl stop freenet")
+  '';
+})
diff --git a/nixos/tests/freshrss.nix b/nixos/tests/freshrss.nix
new file mode 100644
index 00000000000..7bdbf29e923
--- /dev/null
+++ b/nixos/tests/freshrss.nix
@@ -0,0 +1,19 @@
+import ./make-test-python.nix ({ lib, pkgs, ... }: {
+  name = "freshrss";
+  meta.maintainers = with lib.maintainers; [ etu stunkymonkey ];
+
+  nodes.machine = { pkgs, ... }: {
+    services.freshrss = {
+      enable = true;
+      baseUrl = "http://localhost";
+      passwordFile = pkgs.writeText "password" "secret";
+    };
+  };
+
+  testScript = ''
+    machine.wait_for_unit("multi-user.target")
+    machine.wait_for_open_port(80)
+    response = machine.succeed("curl -vvv -s -H 'Host: freshrss' http://127.0.0.1:80/i/")
+    assert '<title>Login · FreshRSS</title>' in response, "Login page didn't load successfully"
+  '';
+})
diff --git a/nixos/tests/fscrypt.nix b/nixos/tests/fscrypt.nix
new file mode 100644
index 00000000000..03367979359
--- /dev/null
+++ b/nixos/tests/fscrypt.nix
@@ -0,0 +1,50 @@
+import ./make-test-python.nix ({ ... }:
+{
+  name = "fscrypt";
+
+  nodes.machine = { pkgs, ... }: {
+    imports = [ ./common/user-account.nix ];
+    security.pam.enableFscrypt = true;
+  };
+
+  testScript = ''
+    def login_as_alice():
+        machine.wait_until_tty_matches("1", "login: ")
+        machine.send_chars("alice\n")
+        machine.wait_until_tty_matches("1", "Password: ")
+        machine.send_chars("foobar\n")
+        machine.wait_until_tty_matches("1", "alice\@machine")
+
+
+    def logout():
+        machine.send_chars("logout\n")
+        machine.wait_until_tty_matches("1", "login: ")
+
+
+    machine.wait_for_unit("default.target")
+
+    with subtest("Enable fscrypt on filesystem"):
+        machine.succeed("tune2fs -O encrypt /dev/vda")
+        machine.succeed("fscrypt setup --quiet --force --time=1ms")
+
+    with subtest("Set up alice with an fscrypt-enabled home directory"):
+        machine.succeed("(echo foobar; echo foobar) | passwd alice")
+        machine.succeed("chown -R alice.users ~alice")
+        machine.succeed("echo foobar | fscrypt encrypt --skip-unlock --source=pam_passphrase --user=alice /home/alice")
+
+    with subtest("Create file as alice"):
+      login_as_alice()
+      machine.succeed("echo hello > /home/alice/world")
+      logout()
+      # Wait for logout to be processed
+      machine.sleep(1)
+
+    with subtest("File should not be readable without being logged in as alice"):
+      machine.fail("cat /home/alice/world")
+
+    with subtest("File should be readable again as alice"):
+      login_as_alice()
+      machine.succeed("cat /home/alice/world")
+      logout()
+  '';
+})
diff --git a/nixos/tests/garage.nix b/nixos/tests/garage.nix
new file mode 100644
index 00000000000..dc1f83e7f8f
--- /dev/null
+++ b/nixos/tests/garage.nix
@@ -0,0 +1,169 @@
+import ./make-test-python.nix ({ pkgs, ...} :
+let
+    mkNode = { replicationMode, publicV6Address ? "::1" }: { pkgs, ... }: {
+      networking.interfaces.eth1.ipv6.addresses = [{
+        address = publicV6Address;
+        prefixLength = 64;
+      }];
+
+      networking.firewall.allowedTCPPorts = [ 3901 3902 ];
+
+      services.garage = {
+        enable = true;
+        settings = {
+          replication_mode = replicationMode;
+
+          rpc_bind_addr = "[::]:3901";
+          rpc_public_addr = "[${publicV6Address}]:3901";
+          rpc_secret = "5c1915fa04d0b6739675c61bf5907eb0fe3d9c69850c83820f51b4d25d13868c";
+
+          s3_api = {
+            s3_region = "garage";
+            api_bind_addr = "[::]:3900";
+            root_domain = ".s3.garage";
+          };
+
+          s3_web = {
+            bind_addr = "[::]:3902";
+            root_domain = ".web.garage";
+            index = "index.html";
+          };
+        };
+      };
+      environment.systemPackages = [ pkgs.minio-client ];
+
+      # Garage requires at least 1GiB of free disk space to run.
+      virtualisation.diskSize = 2 * 1024;
+    };
+
+
+in {
+  name = "garage";
+  meta = {
+    maintainers = with pkgs.lib.maintainers; [ raitobezarius ];
+  };
+
+  nodes = {
+    single_node = mkNode { replicationMode = "none"; };
+    node1 = mkNode { replicationMode = 3; publicV6Address = "fc00:1::1"; };
+    node2 = mkNode { replicationMode = 3; publicV6Address = "fc00:1::2"; };
+    node3 = mkNode { replicationMode = 3; publicV6Address = "fc00:1::3"; };
+    node4 = mkNode { replicationMode = 3; publicV6Address = "fc00:1::4"; };
+  };
+
+  testScript = ''
+    from typing import List
+    from dataclasses import dataclass
+    import re
+    start_all()
+
+    cur_version_regex = re.compile('Current cluster layout version: (?P<ver>\d*)')
+    key_creation_regex = re.compile('Key name: (?P<key_name>.*)\nKey ID: (?P<key_id>.*)\nSecret key: (?P<secret_key>.*)')
+
+    @dataclass
+    class S3Key:
+       key_name: str
+       key_id: str
+       secret_key: str
+
+    @dataclass
+    class GarageNode:
+       node_id: str
+       host: str
+
+    def get_node_fqn(machine: Machine) -> GarageNode:
+      node_id, host = machine.succeed("garage node id").split('@')
+      return GarageNode(node_id=node_id, host=host)
+
+    def get_node_id(machine: Machine) -> str:
+      return get_node_fqn(machine).node_id
+
+    def get_layout_version(machine: Machine) -> int:
+      version_data = machine.succeed("garage layout show")
+      m = cur_version_regex.search(version_data)
+      if m and m.group('ver') is not None:
+        return int(m.group('ver')) + 1
+      else:
+        raise ValueError('Cannot find current layout version')
+
+    def apply_garage_layout(machine: Machine, layouts: List[str]):
+       for layout in layouts:
+          machine.succeed(f"garage layout assign {layout}")
+       version = get_layout_version(machine)
+       machine.succeed(f"garage layout apply --version {version}")
+
+    def create_api_key(machine: Machine, key_name: str) -> S3Key:
+       output = machine.succeed(f"garage key new --name {key_name}")
+       m = key_creation_regex.match(output)
+       if not m or not m.group('key_id') or not m.group('secret_key'):
+          raise ValueError('Cannot parse API key data')
+       return S3Key(key_name=key_name, key_id=m.group('key_id'), secret_key=m.group('secret_key'))
+
+    def get_api_key(machine: Machine, key_pattern: str) -> S3Key:
+       output = machine.succeed(f"garage key info {key_pattern}")
+       m = key_creation_regex.match(output)
+       if not m or not m.group('key_name') or not m.group('key_id') or not m.group('secret_key'):
+           raise ValueError('Cannot parse API key data')
+       return S3Key(key_name=m.group('key_name'), key_id=m.group('key_id'), secret_key=m.group('secret_key'))
+
+    def test_bucket_writes(node):
+      node.succeed("garage bucket create test-bucket")
+      s3_key = create_api_key(node, "test-api-key")
+      node.succeed("garage bucket allow --read --write test-bucket --key test-api-key")
+      other_s3_key = get_api_key(node, 'test-api-key')
+      assert other_s3_key.secret_key == other_s3_key.secret_key
+      node.succeed(
+        f"mc alias set test-garage http://[::1]:3900 {s3_key.key_id} {s3_key.secret_key} --api S3v4"
+      )
+      node.succeed("echo test | mc pipe test-garage/test-bucket/test.txt")
+      assert node.succeed("mc cat test-garage/test-bucket/test.txt").strip() == "test"
+
+    def test_bucket_over_http(node, bucket='test-bucket', url=None):
+      if url is None:
+         url = f"{bucket}.web.garage"
+
+      node.succeed(f'garage bucket website --allow {bucket}')
+      node.succeed(f'echo hello world | mc pipe test-garage/{bucket}/index.html')
+      assert (node.succeed(f"curl -H 'Host: {url}' http://localhost:3902")).strip() == 'hello world'
+
+    with subtest("Garage works as a single-node S3 storage"):
+      single_node.wait_for_unit("garage.service")
+      single_node.wait_for_open_port(3900)
+      # Now Garage is initialized.
+      single_node_id = get_node_id(single_node)
+      apply_garage_layout(single_node, [f'-z qemutest -c 1 "{single_node_id}"'])
+      # Now Garage is operational.
+      test_bucket_writes(single_node)
+      test_bucket_over_http(single_node)
+
+    with subtest("Garage works as a multi-node S3 storage"):
+      nodes = ('node1', 'node2', 'node3', 'node4')
+      rev_machines = {m.name: m for m in machines}
+      def get_machine(key): return rev_machines[key]
+      for key in nodes:
+        node = get_machine(key)
+        node.wait_for_unit("garage.service")
+        node.wait_for_open_port(3900)
+
+      # Garage is initialized on all nodes.
+      node_ids = {key: get_node_fqn(get_machine(key)) for key in nodes}
+
+      for key in nodes:
+        for other_key in nodes:
+          if other_key != key:
+            other_id = node_ids[other_key]
+            get_machine(key).succeed(f"garage node connect {other_id.node_id}@{other_id.host}")
+
+      # Provide multiple zones for the nodes.
+      zones = ["nixcon", "nixcon", "paris_meetup", "fosdem"]
+      apply_garage_layout(node1,
+      [
+        f'{ndata.node_id} -z {zones[index]} -c 1'
+        for index, ndata in enumerate(node_ids.values())
+      ])
+      # Now Garage is operational.
+      test_bucket_writes(node1)
+      for node in nodes:
+         test_bucket_over_http(get_machine(node))
+  '';
+})
diff --git a/nixos/tests/ghostunnel.nix b/nixos/tests/ghostunnel.nix
index 8bea6485402..91a7b7085f6 100644
--- a/nixos/tests/ghostunnel.nix
+++ b/nixos/tests/ghostunnel.nix
@@ -1,4 +1,5 @@
 import ./make-test-python.nix ({ pkgs, ... }: {
+  name = "ghostunnel";
   nodes = {
     backend = { pkgs, ... }: {
       services.nginx.enable = true;
diff --git a/nixos/tests/gitea.nix b/nixos/tests/gitea.nix
index 037fc7b31bf..68a2566c119 100644
--- a/nixos/tests/gitea.nix
+++ b/nixos/tests/gitea.nix
@@ -18,7 +18,7 @@ let
         services.gitea = {
           enable = true;
           database = { inherit type; };
-          disableRegistration = true;
+          settings.service.DISABLE_REGISTRATION = true;
         };
         environment.systemPackages = [ pkgs.gitea pkgs.jq ];
         services.openssh.enable = true;
diff --git a/nixos/tests/gitlab.nix b/nixos/tests/gitlab.nix
index 4f7d3f07f06..d9d75d1cbd8 100644
--- a/nixos/tests/gitlab.nix
+++ b/nixos/tests/gitlab.nix
@@ -38,6 +38,8 @@ in {
       virtualisation.memorySize = if pkgs.stdenv.is64bit then 4096 else 2047;
       virtualisation.cores = 4;
       virtualisation.useNixStoreImage = true;
+      virtualisation.writableStore = false;
+
       systemd.services.gitlab.serviceConfig.Restart = mkForce "no";
       systemd.services.gitlab-workhorse.serviceConfig.Restart = mkForce "no";
       systemd.services.gitaly.serviceConfig.Restart = mkForce "no";
diff --git a/nixos/tests/gollum.nix b/nixos/tests/gollum.nix
new file mode 100644
index 00000000000..833db87f2f3
--- /dev/null
+++ b/nixos/tests/gollum.nix
@@ -0,0 +1,14 @@
+import ./make-test-python.nix ({ pkgs, ... }: {
+  name = "gollum";
+
+  nodes = {
+    webserver = { pkgs, lib, ... }: {
+      services.gollum.enable = true;
+    };
+  };
+
+  testScript = { nodes, ... }: ''
+    webserver.wait_for_unit("gollum")
+    webserver.wait_for_open_port(${toString nodes.webserver.config.services.gollum.port})
+  '';
+})
diff --git a/nixos/tests/grafana.nix b/nixos/tests/grafana/basic.nix
index 174d664d877..8bf4caad7fb 100644
--- a/nixos/tests/grafana.nix
+++ b/nixos/tests/grafana/basic.nix
@@ -1,4 +1,4 @@
-import ./make-test-python.nix ({ lib, pkgs, ... }:
+import ../make-test-python.nix ({ lib, pkgs, ... }:
 
 let
   inherit (lib) mkMerge nameValuePair maintainers;
@@ -6,23 +6,47 @@ let
   baseGrafanaConf = {
     services.grafana = {
       enable = true;
-      addr = "localhost";
-      analytics.reporting.enable = false;
-      domain = "localhost";
-      security = {
-        adminUser = "testadmin";
-        adminPassword = "snakeoilpwd";
+      settings = {
+        analytics.reporting_enabled = false;
+
+        server = {
+          http_addr = "localhost";
+          domain = "localhost";
+        };
+
+        security = {
+          admin_user = "testadmin";
+          admin_password = "snakeoilpwd";
+        };
       };
     };
   };
 
   extraNodeConfs = {
+    sqlite = {};
+
+    socket = { config, ... }: {
+      services.grafana.settings.server = {
+        protocol = "socket";
+        socket = "/run/grafana/sock";
+        socket_gid = config.users.groups.nginx.gid;
+      };
+
+      users.users.grafana.extraGroups = [ "nginx" ];
+
+      services.nginx = {
+        enable = true;
+        recommendedProxySettings = true;
+        virtualHosts."_".locations."/".proxyPass = "http://unix:/run/grafana/sock";
+      };
+    };
+
     declarativePlugins = {
       services.grafana.declarativePlugins = [ pkgs.grafanaPlugins.grafana-clock-panel ];
     };
 
     postgresql = {
-      services.grafana.database = {
+      services.grafana.settings.database = {
         host = "127.0.0.1:5432";
         user = "grafana";
       };
@@ -38,7 +62,7 @@ let
     };
 
     mysql = {
-      services.grafana.database.user = "grafana";
+      services.grafana.settings.database.user = "grafana";
       services.mysql = {
         enable = true;
         ensureDatabases = [ "grafana" ];
@@ -52,14 +76,9 @@ let
     };
   };
 
-  nodes = builtins.listToAttrs (map (dbName:
-    nameValuePair dbName (mkMerge [
-    baseGrafanaConf
-    (extraNodeConfs.${dbName} or {})
-  ])) [ "sqlite" "declarativePlugins" "postgresql" "mysql" ]);
-
+  nodes = builtins.mapAttrs (_: val: mkMerge [ val baseGrafanaConf ]) extraNodeConfs;
 in {
-  name = "grafana";
+  name = "grafana-basic";
 
   meta = with maintainers; {
     maintainers = [ willibutz ];
@@ -81,18 +100,32 @@ in {
     with subtest("Successful API query as admin user with sqlite db"):
         sqlite.wait_for_unit("grafana.service")
         sqlite.wait_for_open_port(3000)
+        print(sqlite.succeed(
+            "curl -sSfN -u testadmin:snakeoilpwd http://127.0.0.1:3000/api/org/users -i"
+        ))
         sqlite.succeed(
-            "curl -sSfN -u testadmin:snakeoilpwd http://127.0.0.1:3000/api/org/users | grep testadmin\@localhost"
+            "curl -sSfN -u testadmin:snakeoilpwd http://127.0.0.1:3000/api/org/users | grep admin\@localhost"
         )
         sqlite.shutdown()
 
+    with subtest("Successful API query as admin user with sqlite db listening on socket"):
+        socket.wait_for_unit("grafana.service")
+        socket.wait_for_open_port(80)
+        print(socket.succeed(
+            "curl -sSfN -u testadmin:snakeoilpwd http://127.0.0.1/api/org/users -i"
+        ))
+        socket.succeed(
+            "curl -sSfN -u testadmin:snakeoilpwd http://127.0.0.1/api/org/users | grep admin\@localhost"
+        )
+        socket.shutdown()
+
     with subtest("Successful API query as admin user with postgresql db"):
         postgresql.wait_for_unit("grafana.service")
         postgresql.wait_for_unit("postgresql.service")
         postgresql.wait_for_open_port(3000)
         postgresql.wait_for_open_port(5432)
         postgresql.succeed(
-            "curl -sSfN -u testadmin:snakeoilpwd http://127.0.0.1:3000/api/org/users | grep testadmin\@localhost"
+            "curl -sSfN -u testadmin:snakeoilpwd http://127.0.0.1:3000/api/org/users | grep admin\@localhost"
         )
         postgresql.shutdown()
 
@@ -102,7 +135,7 @@ in {
         mysql.wait_for_open_port(3000)
         mysql.wait_for_open_port(3306)
         mysql.succeed(
-            "curl -sSfN -u testadmin:snakeoilpwd http://127.0.0.1:3000/api/org/users | grep testadmin\@localhost"
+            "curl -sSfN -u testadmin:snakeoilpwd http://127.0.0.1:3000/api/org/users | grep admin\@localhost"
         )
         mysql.shutdown()
   '';
diff --git a/nixos/tests/grafana/default.nix b/nixos/tests/grafana/default.nix
new file mode 100644
index 00000000000..9c262257180
--- /dev/null
+++ b/nixos/tests/grafana/default.nix
@@ -0,0 +1,9 @@
+{ system ? builtins.currentSystem
+, config ? { }
+, pkgs ? import ../../.. { inherit system config; }
+}:
+
+{
+  basic = import ./basic.nix { inherit system pkgs; };
+  provision = import ./provision { inherit system pkgs; };
+}
diff --git a/nixos/tests/grafana/provision/contact-points.yaml b/nixos/tests/grafana/provision/contact-points.yaml
new file mode 100644
index 00000000000..2a5f14e75e2
--- /dev/null
+++ b/nixos/tests/grafana/provision/contact-points.yaml
@@ -0,0 +1,9 @@
+apiVersion: 1
+
+contactPoints:
+  - name: "Test Contact Point"
+    receivers:
+      - uid: "test_contact_point"
+        type: prometheus-alertmanager
+        settings:
+          url: http://localhost:9000
diff --git a/nixos/tests/grafana/provision/dashboards.yaml b/nixos/tests/grafana/provision/dashboards.yaml
new file mode 100644
index 00000000000..dc83fe6b892
--- /dev/null
+++ b/nixos/tests/grafana/provision/dashboards.yaml
@@ -0,0 +1,6 @@
+apiVersion: 1
+
+providers:
+  - name: 'default'
+    options:
+      path: /var/lib/grafana/dashboards
diff --git a/nixos/tests/grafana/provision/datasources.yaml b/nixos/tests/grafana/provision/datasources.yaml
new file mode 100644
index 00000000000..ccf9481db7f
--- /dev/null
+++ b/nixos/tests/grafana/provision/datasources.yaml
@@ -0,0 +1,7 @@
+apiVersion: 1
+
+datasources:
+  - name: 'Test Datasource'
+    type: 'testdata'
+    access: 'proxy'
+    uid: 'test_datasource'
diff --git a/nixos/tests/grafana/provision/default.nix b/nixos/tests/grafana/provision/default.nix
new file mode 100644
index 00000000000..1eb927632eb
--- /dev/null
+++ b/nixos/tests/grafana/provision/default.nix
@@ -0,0 +1,251 @@
+import ../../make-test-python.nix ({ lib, pkgs, ... }:
+
+let
+  inherit (lib) mkMerge nameValuePair maintainers;
+
+  baseGrafanaConf = {
+    services.grafana = {
+      enable = true;
+      provision.enable = true;
+      settings = {
+        analytics.reporting_enabled = false;
+
+        server = {
+          http_addr = "localhost";
+          domain = "localhost";
+        };
+
+        security = {
+          admin_user = "testadmin";
+          admin_password = "$__file{${pkgs.writeText "pwd" "snakeoilpwd"}}";
+        };
+      };
+    };
+
+    systemd.tmpfiles.rules = [
+      "L /var/lib/grafana/dashboards/test.json 0700 grafana grafana - ${pkgs.writeText "test.json" (builtins.readFile ./test_dashboard.json)}"
+    ];
+  };
+
+  extraNodeConfs = {
+    provisionLegacyNotifiers = {
+      services.grafana.provision = {
+        datasources.settings = {
+          apiVersion = 1;
+          datasources = [{
+            name = "Test Datasource";
+            type = "testdata";
+            access = "proxy";
+            uid = "test_datasource";
+          }];
+        };
+        dashboards.settings = {
+          apiVersion = 1;
+          providers = [{
+            name = "default";
+            options.path = "/var/lib/grafana/dashboards";
+          }];
+        };
+        notifiers = [{
+          uid = "test_notifiers";
+          name = "Test Notifiers";
+          type = "email";
+          settings = {
+            singleEmail = true;
+            addresses = "test@test.com";
+          };
+        }];
+      };
+    };
+    provisionNix = {
+      services.grafana.provision = {
+        datasources.settings = {
+          apiVersion = 1;
+          datasources = [{
+            name = "Test Datasource";
+            type = "testdata";
+            access = "proxy";
+            uid = "test_datasource";
+          }];
+        };
+
+        dashboards.settings = {
+          apiVersion = 1;
+          providers = [{
+            name = "default";
+            options.path = "/var/lib/grafana/dashboards";
+          }];
+        };
+
+        alerting = {
+          rules.settings = {
+            groups = [{
+              name = "test_rule_group";
+              folder = "test_folder";
+              interval = "60s";
+              rules = [{
+                uid = "test_rule";
+                title = "Test Rule";
+                condition = "A";
+                data = [{
+                  refId = "A";
+                  datasourceUid = "-100";
+                  model = {
+                    conditions = [{
+                      evaluator = {
+                        params = [ 3 ];
+                        type = "git";
+                      };
+                      operator.type = "and";
+                      query.params = [ "A" ];
+                      reducer.type = "last";
+                      type = "query";
+                    }];
+                    datasource = {
+                      type = "__expr__";
+                      uid = "-100";
+                    };
+                    expression = "1==0";
+                    intervalMs = 1000;
+                    maxDataPoints = 43200;
+                    refId = "A";
+                    type = "math";
+                  };
+                }];
+                for = "60s";
+              }];
+            }];
+          };
+
+          contactPoints.settings = {
+            contactPoints = [{
+              name = "Test Contact Point";
+              receivers = [{
+                uid = "test_contact_point";
+                type = "prometheus-alertmanager";
+                settings.url = "http://localhost:9000";
+              }];
+            }];
+          };
+
+          policies.settings = {
+            policies = [{
+              receiver = "Test Contact Point";
+            }];
+          };
+
+          templates.settings = {
+            templates = [{
+              name = "Test Template";
+              template = "Test message";
+            }];
+          };
+
+          muteTimings.settings = {
+            muteTimes = [{
+              name = "Test Mute Timing";
+            }];
+          };
+        };
+      };
+    };
+
+    provisionYaml = {
+      services.grafana.provision = {
+        datasources.path = ./datasources.yaml;
+        dashboards.path = ./dashboards.yaml;
+        alerting = {
+          rules.path = ./rules.yaml;
+          contactPoints.path = ./contact-points.yaml;
+          policies.path = ./policies.yaml;
+          templates.path = ./templates.yaml;
+          muteTimings.path = ./mute-timings.yaml;
+        };
+      };
+    };
+
+    provisionYamlDirs = let
+      mkdir = p: pkgs.writeTextDir (baseNameOf p) (builtins.readFile p);
+    in {
+      services.grafana.provision = {
+        datasources.path = mkdir ./datasources.yaml;
+        dashboards.path = mkdir ./dashboards.yaml;
+        alerting = {
+          rules.path = mkdir ./rules.yaml;
+          contactPoints.path = mkdir ./contact-points.yaml;
+          policies.path = mkdir ./policies.yaml;
+          templates.path = mkdir ./templates.yaml;
+          muteTimings.path = mkdir ./mute-timings.yaml;
+        };
+      };
+    };
+  };
+
+  nodes = builtins.mapAttrs (_: val: mkMerge [ val baseGrafanaConf ]) extraNodeConfs;
+in {
+  name = "grafana-provision";
+
+  meta = with maintainers; {
+    maintainers = [ kfears willibutz ];
+  };
+
+  inherit nodes;
+
+  testScript = ''
+    start_all()
+
+    nodeNix = ("Nix (new format)", provisionNix)
+    nodeYaml = ("Nix (YAML)", provisionYaml)
+    nodeYamlDir = ("Nix (YAML in dirs)", provisionYamlDirs)
+
+    for description, machine in [nodeNix, nodeYaml, nodeYamlDir]:
+        with subtest(f"Should start provision node: {description}"):
+            machine.wait_for_unit("grafana.service")
+            machine.wait_for_open_port(3000)
+
+        with subtest(f"Successful datasource provision with {description}"):
+            machine.succeed(
+                "curl -sSfN -u testadmin:snakeoilpwd http://127.0.0.1:3000/api/datasources/uid/test_datasource | grep Test\ Datasource"
+            )
+
+        with subtest(f"Successful dashboard provision with {description}"):
+            machine.succeed(
+                "curl -sSfN -u testadmin:snakeoilpwd http://127.0.0.1:3000/api/dashboards/uid/test_dashboard | grep Test\ Dashboard"
+            )
+
+        with subtest(f"Successful rule provision with {description}"):
+            machine.succeed(
+                "curl -sSfN -u testadmin:snakeoilpwd http://127.0.0.1:3000/api/v1/provisioning/alert-rules/test_rule | grep Test\ Rule"
+            )
+
+        with subtest(f"Successful contact point provision with {description}"):
+            machine.succeed(
+                "curl -sSfN -u testadmin:snakeoilpwd http://127.0.0.1:3000/api/v1/provisioning/contact-points | grep Test\ Contact\ Point"
+            )
+
+        with subtest(f"Successful policy provision with {description}"):
+            machine.succeed(
+                "curl -sSfN -u testadmin:snakeoilpwd http://127.0.0.1:3000/api/v1/provisioning/policies | grep Test\ Contact\ Point"
+            )
+
+        with subtest(f"Successful template provision with {description}"):
+            machine.succeed(
+                "curl -sSfN -u testadmin:snakeoilpwd http://127.0.0.1:3000/api/v1/provisioning/templates | grep Test\ Template"
+            )
+
+        with subtest("Successful mute timings provision with {description}"):
+            machine.succeed(
+                "curl -sSfN -u testadmin:snakeoilpwd http://127.0.0.1:3000/api/v1/provisioning/mute-timings | grep Test\ Mute\ Timing"
+            )
+
+    with subtest("Successful notifiers provision"):
+        provisionLegacyNotifiers.wait_for_unit("grafana.service")
+        provisionLegacyNotifiers.wait_for_open_port(3000)
+        print(provisionLegacyNotifiers.succeed(
+            "curl -sSfN -u testadmin:snakeoilpwd http://127.0.0.1:3000/api/alert-notifications/uid/test_notifiers"
+        ))
+        provisionLegacyNotifiers.succeed(
+            "curl -sSfN -u testadmin:snakeoilpwd http://127.0.0.1:3000/api/alert-notifications/uid/test_notifiers | grep Test\ Notifiers"
+        )
+  '';
+})
diff --git a/nixos/tests/grafana/provision/mute-timings.yaml b/nixos/tests/grafana/provision/mute-timings.yaml
new file mode 100644
index 00000000000..1f47f7c18f0
--- /dev/null
+++ b/nixos/tests/grafana/provision/mute-timings.yaml
@@ -0,0 +1,4 @@
+apiVersion: 1
+
+muteTimes:
+  - name: "Test Mute Timing"
diff --git a/nixos/tests/grafana/provision/policies.yaml b/nixos/tests/grafana/provision/policies.yaml
new file mode 100644
index 00000000000..eb31126c4ba
--- /dev/null
+++ b/nixos/tests/grafana/provision/policies.yaml
@@ -0,0 +1,4 @@
+apiVersion: 1
+
+policies:
+  - receiver: "Test Contact Point"
diff --git a/nixos/tests/grafana/provision/rules.yaml b/nixos/tests/grafana/provision/rules.yaml
new file mode 100644
index 00000000000..946539c8cb6
--- /dev/null
+++ b/nixos/tests/grafana/provision/rules.yaml
@@ -0,0 +1,36 @@
+apiVersion: 1
+
+groups:
+  - name: "test_rule_group"
+    folder: "test_group"
+    interval: 60s
+    rules:
+      - uid: "test_rule"
+        title: "Test Rule"
+        condition: A
+        data:
+          - refId: A
+            datasourceUid: '-100'
+            model:
+              conditions:
+                - evaluator:
+                    params:
+                      - 3
+                    type: gt
+                  operator:
+                    type: and
+                  query:
+                    params:
+                      - A
+                  reducer:
+                    type: last
+                  type: query
+              datasource:
+                type: __expr__
+                uid: '-100'
+              expression: 1==0
+              intervalMs: 1000
+              maxDataPoints: 43200
+              refId: A
+              type: math
+        for: 60s
diff --git a/nixos/tests/grafana/provision/templates.yaml b/nixos/tests/grafana/provision/templates.yaml
new file mode 100644
index 00000000000..09df247b345
--- /dev/null
+++ b/nixos/tests/grafana/provision/templates.yaml
@@ -0,0 +1,5 @@
+apiVersion: 1
+
+templates:
+  - name: "Test Template"
+    template: "Test message"
diff --git a/nixos/tests/grafana/provision/test_dashboard.json b/nixos/tests/grafana/provision/test_dashboard.json
new file mode 100644
index 00000000000..6e7a5b37f22
--- /dev/null
+++ b/nixos/tests/grafana/provision/test_dashboard.json
@@ -0,0 +1,47 @@
+{
+  "annotations": {
+    "list": [
+      {
+        "builtIn": 1,
+        "datasource": {
+          "type": "grafana",
+          "uid": "-- Grafana --"
+        },
+        "enable": true,
+        "hide": true,
+        "iconColor": "rgba(0, 211, 255, 1)",
+        "name": "Annotations & Alerts",
+        "target": {
+          "limit": 100,
+          "matchAny": false,
+          "tags": [],
+          "type": "dashboard"
+        },
+        "type": "dashboard"
+      }
+    ]
+  },
+  "editable": true,
+  "fiscalYearStartMonth": 0,
+  "graphTooltip": 0,
+  "id": 28,
+  "links": [],
+  "liveNow": false,
+  "panels": [],
+  "schemaVersion": 37,
+  "style": "dark",
+  "tags": [],
+  "templating": {
+    "list": []
+  },
+  "time": {
+    "from": "now-6h",
+    "to": "now"
+  },
+  "timepicker": {},
+  "timezone": "",
+  "title": "Test Dashboard",
+  "uid": "test_dashboard",
+  "version": 1,
+  "weekStart": ""
+}
diff --git a/nixos/tests/graphite.nix b/nixos/tests/graphite.nix
index 496f16846ea..de6cd8a50e1 100644
--- a/nixos/tests/graphite.nix
+++ b/nixos/tests/graphite.nix
@@ -12,14 +12,8 @@ import ./make-test-python.nix ({ pkgs, ... } :
               SECRET_KEY = "abcd";
             '';
           };
-          api = {
-            enable = true;
-            port = 8082;
-            finders = [ ];
-          };
           carbon.enableCache = true;
-          seyren.enable = false;  # Implicitely requires openssl-1.0.2u which is marked insecure
-          beacon.enable = true;
+          seyren.enable = false;  # Implicitly requires openssl-1.0.2u which is marked insecure
         };
       };
   };
@@ -28,21 +22,15 @@ import ./make-test-python.nix ({ pkgs, ... } :
     start_all()
     one.wait_for_unit("default.target")
     one.wait_for_unit("graphiteWeb.service")
-    one.wait_for_unit("graphiteApi.service")
-    one.wait_for_unit("graphite-beacon.service")
     one.wait_for_unit("carbonCache.service")
     # The services above are of type "simple". systemd considers them active immediately
     # even if they're still in preStart (which takes quite long for graphiteWeb).
     # Wait for ports to open so we're sure the services are up and listening.
     one.wait_for_open_port(8080)
-    one.wait_for_open_port(8082)
     one.wait_for_open_port(2003)
     one.succeed('echo "foo 1 `date +%s`" | nc -N localhost 2003')
     one.wait_until_succeeds(
         "curl 'http://localhost:8080/metrics/find/?query=foo&format=treejson' --silent | grep foo >&2"
     )
-    one.wait_until_succeeds(
-        "curl 'http://localhost:8082/metrics/find/?query=foo&format=treejson' --silent | grep foo >&2"
-    )
   '';
 })
diff --git a/nixos/tests/grocy.nix b/nixos/tests/grocy.nix
index fe0ddd34148..48bbc9f7d3f 100644
--- a/nixos/tests/grocy.nix
+++ b/nixos/tests/grocy.nix
@@ -14,6 +14,9 @@ import ./make-test-python.nix ({ pkgs, ... }: {
   };
 
   testScript = ''
+    from base64 import b64encode
+    from urllib.parse import quote
+
     machine.start()
     machine.wait_for_open_port(80)
     machine.wait_for_unit("multi-user.target")
@@ -42,6 +45,29 @@ import ./make-test-python.nix ({ pkgs, ... }: {
 
     machine.succeed("curl -sSI http://localhost/api/tasks 2>&1 | grep '401 Unauthorized'")
 
+    file_name = "test.txt"
+    file_name_base64 = b64encode(file_name.encode('ascii')).decode('ascii')
+    file_name_base64_urlencode = quote(file_name_base64)
+
+    machine.succeed(
+        f"echo Sample equipment manual > /tmp/{file_name}"
+    )
+
+    machine.succeed(
+        f"curl -sSf -X 'PUT' -b 'grocy_session={cookie}' "
+        + f" 'http://localhost/api/files/equipmentmanuals/{file_name_base64_urlencode}' "
+        + "  --header 'Accept: */*' "
+        + "  --header 'Content-Type: application/octet-stream' "
+        + f" --data-binary '@/tmp/{file_name}' "
+    )
+
+    machine.succeed(
+        f"curl -sSf -X 'GET' -b 'grocy_session={cookie}' "
+        + f" 'http://localhost/api/files/equipmentmanuals/{file_name_base64_urlencode}' "
+        + "  --header 'Accept: application/octet-stream' "
+        + f" | cmp /tmp/{file_name}"
+    )
+
     machine.shutdown()
   '';
 })
diff --git a/nixos/tests/hadoop/default.nix b/nixos/tests/hadoop/default.nix
index d2a97cbeffb..479690adc06 100644
--- a/nixos/tests/hadoop/default.nix
+++ b/nixos/tests/hadoop/default.nix
@@ -4,4 +4,5 @@
   all = handleTestOn [ "x86_64-linux" "aarch64-linux" ] ./hadoop.nix { inherit package; };
   hdfs = handleTestOn [ "x86_64-linux" "aarch64-linux" ] ./hdfs.nix { inherit package; };
   yarn = handleTestOn [ "x86_64-linux" "aarch64-linux" ] ./yarn.nix { inherit package; };
+  hbase = handleTestOn [ "x86_64-linux" "aarch64-linux" ] ./hbase.nix { inherit package; };
 }
diff --git a/nixos/tests/hadoop/hbase.nix b/nixos/tests/hadoop/hbase.nix
new file mode 100644
index 00000000000..d9d2dac0f65
--- /dev/null
+++ b/nixos/tests/hadoop/hbase.nix
@@ -0,0 +1,84 @@
+# Test a minimal hbase cluster
+{ pkgs, ... }:
+import ../make-test-python.nix ({ hadoop ? pkgs.hadoop, hbase ? pkgs.hbase, ... }:
+with pkgs.lib;
+{
+  name = "hadoop-hbase";
+
+  nodes = let
+    coreSite = {
+      "fs.defaultFS" = "hdfs://namenode:8020";
+    };
+    defOpts = {
+      enable = true;
+      openFirewall = true;
+    };
+    zookeeperQuorum = "zookeeper";
+  in {
+    zookeeper = { ... }: {
+      services.zookeeper.enable = true;
+      networking.firewall.allowedTCPPorts = [ 2181 ];
+    };
+    namenode = { ... }: {
+      services.hadoop = {
+        hdfs = {
+          namenode = defOpts // { formatOnInit = true; };
+        };
+        inherit coreSite;
+      };
+    };
+    datanode = { ... }: {
+      virtualisation.diskSize = 8192;
+      services.hadoop = {
+        hdfs.datanode = defOpts;
+        inherit coreSite;
+      };
+    };
+
+    master = { ... }:{
+      services.hadoop = {
+        inherit coreSite;
+        hbase = {
+          inherit zookeeperQuorum;
+          master = defOpts // { initHDFS = true; };
+        };
+      };
+    };
+    regionserver = { ... }:{
+      services.hadoop = {
+        inherit coreSite;
+        hbase = {
+          inherit zookeeperQuorum;
+          regionServer = defOpts;
+        };
+      };
+    };
+  };
+
+  testScript = ''
+    start_all()
+
+    # wait for HDFS cluster
+    namenode.wait_for_unit("hdfs-namenode")
+    namenode.wait_for_unit("network.target")
+    namenode.wait_for_open_port(8020)
+    namenode.wait_for_open_port(9870)
+    datanode.wait_for_unit("hdfs-datanode")
+    datanode.wait_for_unit("network.target")
+    datanode.wait_for_open_port(9864)
+    datanode.wait_for_open_port(9866)
+    datanode.wait_for_open_port(9867)
+
+    # wait for ZK
+    zookeeper.wait_for_unit("zookeeper")
+    zookeeper.wait_for_open_port(2181)
+
+    # wait for HBase to start up
+    master.wait_for_unit("hbase-master")
+    regionserver.wait_for_unit("hbase-regionserver")
+
+    assert "1 active master, 0 backup masters, 1 servers" in master.succeed("echo status | HADOOP_USER_NAME=hbase hbase shell -n")
+    regionserver.wait_until_succeeds("echo \"create 't1','f1'\" | HADOOP_USER_NAME=hbase hbase shell -n")
+    assert "NAME => 'f1'" in regionserver.succeed("echo \"describe 't1'\" | HADOOP_USER_NAME=hbase hbase shell -n")
+  '';
+})
diff --git a/nixos/tests/hadoop/yarn.nix b/nixos/tests/hadoop/yarn.nix
index 1bf8e3831f6..08c8ff857d8 100644
--- a/nixos/tests/hadoop/yarn.nix
+++ b/nixos/tests/hadoop/yarn.nix
@@ -19,7 +19,7 @@ import ../make-test-python.nix ({ package, ... }: {
           enable = true;
           openFirewall = true;
         };
-        yarnSite = options.services.hadoop.yarnSite.default // {
+        yarnSite = {
           "yarn.resourcemanager.hostname" = "resourcemanager";
           "yarn.nodemanager.log-dirs" = "/tmp/userlogs";
         };
diff --git a/nixos/tests/hardened.nix b/nixos/tests/hardened.nix
index 3afa8ebf2b5..ccb85816854 100644
--- a/nixos/tests/hardened.nix
+++ b/nixos/tests/hardened.nix
@@ -12,6 +12,11 @@ import ./make-test-python.nix ({ pkgs, ... } : {
       imports = [ ../modules/profiles/hardened.nix ];
       environment.memoryAllocator.provider = "graphene-hardened";
       nix.settings.sandbox = false;
+      nixpkgs.overlays = [
+        (final: super: {
+          dhcpcd = super.dhcpcd.override { enablePrivSep = false; };
+        })
+      ];
       virtualisation.emptyDiskImages = [ 4096 ];
       boot.initrd.postDeviceCommands = ''
         ${pkgs.dosfstools}/bin/mkfs.vfat -n EFISYS /dev/vdb
@@ -85,8 +90,8 @@ import ./make-test-python.nix ({ pkgs, ... } : {
 
       # Test Nix dæmon usage
       with subtest("nix-daemon cannot be used by all users"):
-          machine.fail("su -l nobody -s /bin/sh -c 'nix ping-store'")
-          machine.succeed("su -l alice -c 'nix ping-store'")
+          machine.fail("su -l nobody -s /bin/sh -c 'nix --extra-experimental-features nix-command ping-store'")
+          machine.succeed("su -l alice -c 'nix --extra-experimental-features nix-command ping-store'")
 
 
       # Test kernel image protection
diff --git a/nixos/tests/hbase.nix b/nixos/tests/hbase.nix
index a449d24dd6f..7d8e32f8160 100644
--- a/nixos/tests/hbase.nix
+++ b/nixos/tests/hbase.nix
@@ -1,6 +1,6 @@
 import ./make-test-python.nix ({ pkgs, lib, package ? pkgs.hbase, ... }:
 {
-  name = "hbase";
+  name = "hbase-standalone";
 
   meta = with lib.maintainers; {
     maintainers = [ illustris ];
@@ -8,7 +8,7 @@ import ./make-test-python.nix ({ pkgs, lib, package ? pkgs.hbase, ... }:
 
   nodes = {
     hbase = { pkgs, ... }: {
-      services.hbase = {
+      services.hbase-standalone = {
         enable = true;
         inherit package;
         # Needed for standalone mode in hbase 2+
diff --git a/nixos/tests/hedgedoc.nix b/nixos/tests/hedgedoc.nix
index 657d49c555e..410350d8362 100644
--- a/nixos/tests/hedgedoc.nix
+++ b/nixos/tests/hedgedoc.nix
@@ -11,7 +11,7 @@ import ./make-test-python.nix ({ pkgs, lib, ... }:
       services = {
         hedgedoc = {
           enable = true;
-          configuration.dbURL = "sqlite:///var/lib/hedgedoc/hedgedoc.db";
+          settings.dbURL = "sqlite:///var/lib/hedgedoc/hedgedoc.db";
         };
       };
     };
@@ -21,7 +21,7 @@ import ./make-test-python.nix ({ pkgs, lib, ... }:
       services = {
         hedgedoc = {
           enable = true;
-          configuration.dbURL = "postgres://hedgedoc:\${DB_PASSWORD}@localhost:5432/hedgedocdb";
+          settings.dbURL = "postgres://hedgedoc:\${DB_PASSWORD}@localhost:5432/hedgedocdb";
 
           /*
            * Do not use pkgs.writeText for secrets as
diff --git a/nixos/tests/hibernate.nix b/nixos/tests/hibernate.nix
index 7a4b331169a..cb75322ca5f 100644
--- a/nixos/tests/hibernate.nix
+++ b/nixos/tests/hibernate.nix
@@ -26,8 +26,9 @@ let
 
     powerManagement.resumeCommands = "systemctl --no-block restart backdoor.service";
 
-    fileSystems = {
-      "/".device = "/dev/vda2";
+    fileSystems."/" = {
+      device = "/dev/vda2";
+      fsType = "ext3";
     };
     swapDevices = mkOverride 0 [ { device = "/dev/vda1"; } ];
     boot.resumeDevice = mkIf systemdStage1 "/dev/vda1";
diff --git a/nixos/tests/home-assistant.nix b/nixos/tests/home-assistant.nix
index 59b82593abc..8d58de75eab 100644
--- a/nixos/tests/home-assistant.nix
+++ b/nixos/tests/home-assistant.nix
@@ -7,8 +7,6 @@ in {
   meta.maintainers = lib.teams.home-assistant.members;
 
   nodes.hass = { pkgs, ... }: {
-    environment.systemPackages = with pkgs; [ mosquitto ];
-
     services.postgresql = {
       enable = true;
       ensureDatabases = [ "hass" ];
@@ -108,9 +106,7 @@ in {
     # Cause a configuration change that requires a service restart as we added a new runtime dependency
     specialisation.newFeature = {
       inheritParentConfig = true;
-      configuration.services.home-assistant.config.device_tracker = [
-        { platform = "bluetooth_tracker"; }
-      ];
+      configuration.services.home-assistant.config.esphome = {};
     };
   };
 
@@ -119,11 +115,12 @@ in {
   in
   ''
     import re
+    import json
 
     start_all()
 
     # Parse the package path out of the systemd unit, as we cannot
-    # access the final package, that is overriden inside the module,
+    # access the final package, that is overridden inside the module,
     # by any other means.
     pattern = re.compile(r"path=(?P<path>[\/a-z0-9-.]+)\/bin\/hass")
     response = hass.execute("systemctl show -p ExecStart home-assistant.service")[1]
@@ -131,7 +128,19 @@ in {
     assert match
     package = match.group('path')
 
+
+    def get_journal_cursor(host) -> str:
+        exit, out = host.execute("journalctl -u home-assistant.service -n1 -o json-pretty --output-fields=__CURSOR")
+        assert exit == 0
+        return json.loads(out)["__CURSOR"]
+
+
+    def wait_for_homeassistant(host, cursor):
+        host.wait_until_succeeds(f"journalctl --after-cursor='{cursor}' -u home-assistant.service | grep -q 'Home Assistant initialized in'")
+
+
     hass.wait_for_unit("home-assistant.service")
+    cursor = get_journal_cursor(hass)
 
     with subtest("Check that YAML configuration file is in place"):
         hass.succeed("test -L ${configDir}/configuration.yaml")
@@ -148,7 +157,7 @@ in {
         hass.succeed(f"grep -q 'wake_on_lan' {package}/extra_components")
 
     with subtest("Check that Home Assistant's web interface and API can be reached"):
-        hass.wait_until_succeeds("journalctl -u home-assistant.service | grep -q 'Home Assistant initialized in'")
+        wait_for_homeassistant(hass, cursor)
         hass.wait_for_open_port(8123)
         hass.succeed("curl --fail http://localhost:8123/lovelace")
 
@@ -162,15 +171,19 @@ in {
     with subtest("Check service reloads when configuration changes"):
       # store the old pid of the process
       pid = hass.succeed("systemctl show --property=MainPID home-assistant.service")
+      cursor = get_journal_cursor(hass)
       hass.succeed("${system}/specialisation/differentName/bin/switch-to-configuration test")
       new_pid = hass.succeed("systemctl show --property=MainPID home-assistant.service")
       assert pid == new_pid, "The PID of the process should not change between process reloads"
+      wait_for_homeassistant(hass, cursor)
 
     with subtest("check service restarts when package changes"):
       pid = new_pid
+      cursor = get_journal_cursor(hass)
       hass.succeed("${system}/specialisation/newFeature/bin/switch-to-configuration test")
       new_pid = hass.succeed("systemctl show --property=MainPID home-assistant.service")
       assert pid != new_pid, "The PID of the process shoudl change when the HA binary changes"
+      wait_for_homeassistant(hass, cursor)
 
     with subtest("Check that no errors were logged"):
         output_log = hass.succeed("cat ${configDir}/home-assistant.log")
diff --git a/nixos/tests/hydra/common.nix b/nixos/tests/hydra/common.nix
index fdf2b2c6f6d..2bce03418e1 100644
--- a/nixos/tests/hydra/common.nix
+++ b/nixos/tests/hydra/common.nix
@@ -16,7 +16,7 @@
     createTrivialProject = pkgs.stdenv.mkDerivation {
       name = "create-trivial-project";
       dontUnpack = true;
-      buildInputs = [ pkgs.makeWrapper ];
+      nativeBuildInputs = [ pkgs.makeWrapper ];
       installPhase = "install -m755 -D ${./create-trivial-project.sh} $out/bin/create-trivial-project.sh";
       postFixup = ''
         wrapProgram "$out/bin/create-trivial-project.sh" --prefix PATH ":" ${pkgs.lib.makeBinPath [ pkgs.curl ]} --set EXPR_PATH ${trivialJob}
diff --git a/nixos/tests/installed-tests/default.nix b/nixos/tests/installed-tests/default.nix
index c6fb37cfe58..78a6325a245 100644
--- a/nixos/tests/installed-tests/default.nix
+++ b/nixos/tests/installed-tests/default.nix
@@ -28,7 +28,7 @@ let
     , withX11 ? false
 
       # Extra flags to pass to gnome-desktop-testing-runner.
-    , testRunnerFlags ? ""
+    , testRunnerFlags ? []
 
       # Extra attributes to pass to makeTest.
       # They will be recursively merged into the attrset created by this function.
@@ -40,7 +40,7 @@ let
           name = tested.name;
 
           meta = {
-            maintainers = tested.meta.maintainers;
+            maintainers = tested.meta.maintainers or [];
           };
 
           nodes.machine = { ... }: {
@@ -67,7 +67,7 @@ let
             '' +
             ''
               machine.succeed(
-                  "gnome-desktop-testing-runner ${testRunnerFlags} -d '${tested.installedTests}/share'"
+                  "gnome-desktop-testing-runner ${escapeShellArgs testRunnerFlags} -d '${tested.installedTests}/share'"
               )
             '';
         }
@@ -92,14 +92,15 @@ in
   fwupd = callInstalledTest ./fwupd.nix {};
   gcab = callInstalledTest ./gcab.nix {};
   gdk-pixbuf = callInstalledTest ./gdk-pixbuf.nix {};
+  geocode-glib = callInstalledTest ./geocode-glib.nix {};
   gjs = callInstalledTest ./gjs.nix {};
   glib-networking = callInstalledTest ./glib-networking.nix {};
   gnome-photos = callInstalledTest ./gnome-photos.nix {};
   graphene = callInstalledTest ./graphene.nix {};
   gsconnect = callInstalledTest ./gsconnect.nix {};
+  json-glib = callInstalledTest ./json-glib.nix {};
   ibus = callInstalledTest ./ibus.nix {};
   libgdata = callInstalledTest ./libgdata.nix {};
-  librsvg = callInstalledTest ./librsvg.nix {};
   glib-testing = callInstalledTest ./glib-testing.nix {};
   libjcat = callInstalledTest ./libjcat.nix {};
   libxmlb = callInstalledTest ./libxmlb.nix {};
diff --git a/nixos/tests/installed-tests/flatpak-builder.nix b/nixos/tests/installed-tests/flatpak-builder.nix
index 31b9f2b258f..d5e04fcf975 100644
--- a/nixos/tests/installed-tests/flatpak-builder.nix
+++ b/nixos/tests/installed-tests/flatpak-builder.nix
@@ -6,9 +6,10 @@ makeInstalledTest {
   testConfig = {
     services.flatpak.enable = true;
     xdg.portal.enable = true;
+    xdg.portal.extraPortals = with pkgs; [ xdg-desktop-portal-gtk ];
     environment.systemPackages = with pkgs; [ flatpak-builder ] ++ flatpak-builder.installedTestsDependencies;
     virtualisation.diskSize = 2048;
   };
 
-  testRunnerFlags = "--timeout 3600";
+  testRunnerFlags = [ "--timeout" "3600" ];
 }
diff --git a/nixos/tests/installed-tests/flatpak.nix b/nixos/tests/installed-tests/flatpak.nix
index c7fe9cf4588..9524d890c40 100644
--- a/nixos/tests/installed-tests/flatpak.nix
+++ b/nixos/tests/installed-tests/flatpak.nix
@@ -13,5 +13,5 @@ makeInstalledTest {
     virtualisation.diskSize = 3072;
   };
 
-  testRunnerFlags = "--timeout 3600";
+  testRunnerFlags = [ "--timeout" "3600" ];
 }
diff --git a/nixos/tests/installed-tests/gdk-pixbuf.nix b/nixos/tests/installed-tests/gdk-pixbuf.nix
index 3d0011a427a..110efdbf710 100644
--- a/nixos/tests/installed-tests/gdk-pixbuf.nix
+++ b/nixos/tests/installed-tests/gdk-pixbuf.nix
@@ -9,5 +9,5 @@ makeInstalledTest {
     virtualisation.memorySize = if pkgs.stdenv.isi686 then 2047 else 4096;
   };
 
-  testRunnerFlags = "--timeout 1800";
+  testRunnerFlags = [ "--timeout" "1800" ];
 }
diff --git a/nixos/tests/installed-tests/geocode-glib.nix b/nixos/tests/installed-tests/geocode-glib.nix
new file mode 100644
index 00000000000..fcb38c96ab0
--- /dev/null
+++ b/nixos/tests/installed-tests/geocode-glib.nix
@@ -0,0 +1,13 @@
+{ pkgs, makeInstalledTest, ... }:
+
+makeInstalledTest {
+  testConfig = {
+    i18n.supportedLocales = [
+      "en_US.UTF-8/UTF-8"
+      # The tests require this locale available.
+      "en_GB.UTF-8/UTF-8"
+    ];
+  };
+
+  tested = pkgs.geocode-glib;
+}
diff --git a/nixos/tests/installed-tests/ibus.nix b/nixos/tests/installed-tests/ibus.nix
index a4bc2a7d7de..028c20c29f2 100644
--- a/nixos/tests/installed-tests/ibus.nix
+++ b/nixos/tests/installed-tests/ibus.nix
@@ -4,6 +4,7 @@ makeInstalledTest {
   tested = pkgs.ibus;
 
   testConfig = {
+    i18n.supportedLocales = [ "all" ];
     i18n.inputMethod.enabled = "ibus";
     systemd.user.services.ibus-daemon = {
       serviceConfig.ExecStart = "${pkgs.ibus}/bin/ibus-daemon --xim --verbose";
diff --git a/nixos/tests/installed-tests/json-glib.nix b/nixos/tests/installed-tests/json-glib.nix
new file mode 100644
index 00000000000..3dfd3dd0b09
--- /dev/null
+++ b/nixos/tests/installed-tests/json-glib.nix
@@ -0,0 +1,5 @@
+{ pkgs, makeInstalledTest, ... }:
+
+makeInstalledTest {
+  tested = pkgs.json-glib;
+}
diff --git a/nixos/tests/installed-tests/librsvg.nix b/nixos/tests/installed-tests/librsvg.nix
deleted file mode 100644
index 378e7cce3ff..00000000000
--- a/nixos/tests/installed-tests/librsvg.nix
+++ /dev/null
@@ -1,9 +0,0 @@
-{ pkgs, makeInstalledTest, ... }:
-
-makeInstalledTest {
-  tested = pkgs.librsvg;
-
-  testConfig = {
-    virtualisation.memorySize = 2047;
-  };
-}
diff --git a/nixos/tests/installer-systemd-stage-1.nix b/nixos/tests/installer-systemd-stage-1.nix
index d02387ee80e..03f0ec8d746 100644
--- a/nixos/tests/installer-systemd-stage-1.nix
+++ b/nixos/tests/installer-systemd-stage-1.nix
@@ -8,9 +8,10 @@
   # them when fixed.
   inherit (import ./installer.nix { inherit system config pkgs; systemdStage1 = true; })
     # bcache
-    # btrfsSimple
-    # btrfsSubvolDefault
-    # btrfsSubvols
+    btrfsSimple
+    btrfsSubvolDefault
+    btrfsSubvolEscape
+    btrfsSubvols
     # encryptedFSWithKeyfile
     # grub1
     # luksroot
diff --git a/nixos/tests/installer.nix b/nixos/tests/installer.nix
index 8bef4fad3dd..398ad8de19c 100644
--- a/nixos/tests/installer.nix
+++ b/nixos/tests/installer.nix
@@ -57,7 +57,7 @@ let
 
         hardware.enableAllFirmware = lib.mkForce false;
 
-        ${replaceChars ["\n"] ["\n  "] extraConfig}
+        ${replaceStrings ["\n"] ["\n  "] extraConfig}
       }
     '';
 
@@ -324,6 +324,9 @@ let
             desktop-file-utils
             docbook5
             docbook_xsl_ns
+            (docbook-xsl-ns.override {
+              withManOptDedupPatch = true;
+            })
             kmod.dev
             libarchive.dev
             libxml2.bin
@@ -333,6 +336,13 @@ let
             perlPackages.ListCompare
             perlPackages.XMLLibXML
             python3Minimal
+            # make-options-doc/default.nix
+            (let
+                self = (pkgs.python3Minimal.override {
+                  inherit self;
+                  includeSiteCustomize = true;
+                });
+              in self.withPackages (p: [ p.mistune ]))
             shared-mime-info
             sudo
             texinfo
@@ -901,4 +911,25 @@ in {
       )
     '';
   };
+
+  # Test to see if we can deal with subvols that need to be escaped in fstab
+  btrfsSubvolEscape = makeInstallerTest "btrfsSubvolEscape" {
+    createPartitions = ''
+      machine.succeed(
+          "sgdisk -Z /dev/vda",
+          "sgdisk -n 1:0:+1M -n 2:0:+1G -N 3 -t 1:ef02 -t 2:8200 -t 3:8300 -c 3:root /dev/vda",
+          "mkswap /dev/vda2 -L swap",
+          "swapon -L swap",
+          "mkfs.btrfs -L root /dev/vda3",
+          "btrfs device scan",
+          "mount LABEL=root /mnt",
+          "btrfs subvol create '/mnt/nixos in space'",
+          "btrfs subvol create /mnt/boot",
+          "umount /mnt",
+          "mount -o 'defaults,subvol=nixos in space' LABEL=root /mnt",
+          "mkdir /mnt/boot",
+          "mount -o defaults,subvol=boot LABEL=root /mnt/boot",
+      )
+    '';
+  };
 }
diff --git a/nixos/tests/invoiceplane.nix b/nixos/tests/invoiceplane.nix
index 4e63f8ac21c..70ed96ee39f 100644
--- a/nixos/tests/invoiceplane.nix
+++ b/nixos/tests/invoiceplane.nix
@@ -13,12 +13,12 @@ import ./make-test-python.nix ({ pkgs, ... }:
       services.invoiceplane.webserver = "caddy";
       services.invoiceplane.sites = {
         "site1.local" = {
-          #database.name = "invoiceplane1";
+          database.name = "invoiceplane1";
           database.createLocally = true;
           enable = true;
         };
         "site2.local" = {
-          #database.name = "invoiceplane2";
+          database.name = "invoiceplane2";
           database.createLocally = true;
           enable = true;
         };
@@ -46,37 +46,37 @@ import ./make-test-python.nix ({ pkgs, ... }:
 
         with subtest("Finish InvoicePlane setup"):
           machine.succeed(
-            f"curl -sSfL --cookie-jar cjar {site_name}/index.php/setup/language"
+            f"curl -sSfL --cookie-jar cjar {site_name}/setup/language"
           )
           csrf_token = machine.succeed(
             "grep ip_csrf_cookie cjar | cut -f 7 | tr -d '\n'"
           )
           machine.succeed(
-            f"curl -sSfL --cookie cjar --cookie-jar cjar -d '_ip_csrf={csrf_token}&ip_lang=english&btn_continue=Continue' {site_name}/index.php/setup/language"
+            f"curl -sSfL --cookie cjar --cookie-jar cjar -d '_ip_csrf={csrf_token}&ip_lang=english&btn_continue=Continue' {site_name}/setup/language"
           )
           csrf_token = machine.succeed(
             "grep ip_csrf_cookie cjar | cut -f 7 | tr -d '\n'"
           )
           machine.succeed(
-            f"curl -sSfL --cookie cjar --cookie-jar cjar -d '_ip_csrf={csrf_token}&btn_continue=Continue' {site_name}/index.php/setup/prerequisites"
+            f"curl -sSfL --cookie cjar --cookie-jar cjar -d '_ip_csrf={csrf_token}&btn_continue=Continue' {site_name}/setup/prerequisites"
           )
           csrf_token = machine.succeed(
             "grep ip_csrf_cookie cjar | cut -f 7 | tr -d '\n'"
           )
           machine.succeed(
-            f"curl -sSfL --cookie cjar --cookie-jar cjar -d '_ip_csrf={csrf_token}&btn_continue=Continue' {site_name}/index.php/setup/configure_database"
+            f"curl -sSfL --cookie cjar --cookie-jar cjar -d '_ip_csrf={csrf_token}&btn_continue=Continue' {site_name}/setup/configure_database"
           )
           csrf_token = machine.succeed(
             "grep ip_csrf_cookie cjar | cut -f 7 | tr -d '\n'"
           )
           machine.succeed(
-            f"curl -sSfl --cookie cjar --cookie-jar cjar -d '_ip_csrf={csrf_token}&btn_continue=Continue' {site_name}/index.php/setup/install_tables"
+            f"curl -sSfl --cookie cjar --cookie-jar cjar -d '_ip_csrf={csrf_token}&btn_continue=Continue' {site_name}/setup/install_tables"
           )
           csrf_token = machine.succeed(
             "grep ip_csrf_cookie cjar | cut -f 7 | tr -d '\n'"
           )
           machine.succeed(
-            f"curl -sSfl --cookie cjar --cookie-jar cjar -d '_ip_csrf={csrf_token}&btn_continue=Continue' {site_name}/index.php/setup/upgrade_tables"
+            f"curl -sSfl --cookie cjar --cookie-jar cjar -d '_ip_csrf={csrf_token}&btn_continue=Continue' {site_name}/setup/upgrade_tables"
           )
   '';
 })
diff --git a/nixos/tests/isso.nix b/nixos/tests/isso.nix
index f4560ba3e63..575e1c52ecc 100644
--- a/nixos/tests/isso.nix
+++ b/nixos/tests/isso.nix
@@ -22,7 +22,7 @@ import ./make-test-python.nix ({ pkgs, ... }: {
   ''
     machine.wait_for_unit("isso.service")
 
-    machine.wait_for_open_port(port)
+    machine.wait_for_open_port(${toString port})
 
     machine.succeed("curl --fail http://localhost:${toString port}/?uri")
     machine.succeed("curl --fail http://localhost:${toString port}/js/embed.min.js")
diff --git a/nixos/tests/jenkins.nix b/nixos/tests/jenkins.nix
index 265d1a330cd..a1ede6dc917 100644
--- a/nixos/tests/jenkins.nix
+++ b/nixos/tests/jenkins.nix
@@ -79,7 +79,7 @@ import ./make-test-python.nix ({ pkgs, ...} : {
     in ''
     start_all()
 
-    master.wait_for_unit("jenkins")
+    master.wait_for_unit("default.target")
 
     assert "Authentication required" in master.succeed("curl http://localhost:8080")
 
@@ -94,18 +94,12 @@ import ./make-test-python.nix ({ pkgs, ...} : {
 
     with subtest("jobs are declarative"):
         # Check that jobs are created on disk.
-        master.wait_for_unit("jenkins-job-builder")
-        master.wait_until_fails("systemctl is-active jenkins-job-builder")
         master.wait_until_succeeds("test -f /var/lib/jenkins/jobs/job-1/config.xml")
         master.wait_until_succeeds("test -f /var/lib/jenkins/jobs/folder-1/config.xml")
         master.wait_until_succeeds("test -f /var/lib/jenkins/jobs/folder-1/jobs/job-2/config.xml")
 
-        # Wait until jenkins is ready, reload configuration and verify it also
-        # sees the jobs.
-        master.succeed("curl --fail ${jenkinsUrl}/cli")
-        master.succeed("curl ${jenkinsUrl}/jnlpJars/jenkins-cli.jar -O")
-        master.succeed("${pkgs.jre}/bin/java -jar jenkins-cli.jar -s ${jenkinsUrl} -auth admin:$(cat /var/lib/jenkins/secrets/initialAdminPassword) reload-configuration")
-        out = master.succeed("${pkgs.jre}/bin/java -jar jenkins-cli.jar -s ${jenkinsUrl} -auth admin:$(cat /var/lib/jenkins/secrets/initialAdminPassword) list-jobs")
+        # Verify that jenkins also sees the jobs.
+        out = master.succeed("${pkgs.jenkins}/bin/jenkins-cli -s ${jenkinsUrl} -auth admin:$(cat /var/lib/jenkins/secrets/initialAdminPassword) list-jobs")
         jobs = [x.strip() for x in out.splitlines()]
         # Seeing jobs inside folders requires the Folders plugin
         # (https://plugins.jenkins.io/cloudbees-folder/), which we don't have
@@ -117,15 +111,12 @@ import ./make-test-python.nix ({ pkgs, ...} : {
         )
 
         # Check that jobs are removed from disk.
-        master.wait_for_unit("jenkins-job-builder")
-        master.wait_until_fails("systemctl is-active jenkins-job-builder")
         master.wait_until_fails("test -f /var/lib/jenkins/jobs/job-1/config.xml")
         master.wait_until_fails("test -f /var/lib/jenkins/jobs/folder-1/config.xml")
         master.wait_until_fails("test -f /var/lib/jenkins/jobs/folder-1/jobs/job-2/config.xml")
 
-        # Reload jenkins' configuration and verify it also sees the jobs as removed.
-        master.succeed("${pkgs.jre}/bin/java -jar jenkins-cli.jar -s ${jenkinsUrl} -auth admin:$(cat /var/lib/jenkins/secrets/initialAdminPassword) reload-configuration")
-        out = master.succeed("${pkgs.jre}/bin/java -jar jenkins-cli.jar -s ${jenkinsUrl} -auth admin:$(cat /var/lib/jenkins/secrets/initialAdminPassword) list-jobs")
+        # Verify that jenkins also sees the jobs as removed.
+        out = master.succeed("${pkgs.jenkins}/bin/jenkins-cli -s ${jenkinsUrl} -auth admin:$(cat /var/lib/jenkins/secrets/initialAdminPassword) list-jobs")
         jobs = [x.strip() for x in out.splitlines()]
         assert jobs == [], f"jobs != []: {jobs}"
   '';
diff --git a/nixos/tests/jibri.nix b/nixos/tests/jibri.nix
index 223120cdb22..45e30af9a9a 100644
--- a/nixos/tests/jibri.nix
+++ b/nixos/tests/jibri.nix
@@ -35,9 +35,6 @@ import ./make-test-python.nix ({ pkgs, ... }: {
     machine.wait_for_unit("jibri.service")
 
     machine.wait_until_succeeds(
-        "journalctl -b -u jitsi-videobridge2 -o cat | grep -q 'Performed a successful health check'", timeout=30
-    )
-    machine.wait_until_succeeds(
         "journalctl -b -u prosody -o cat | grep -q 'Authenticated as focus@auth.machine'", timeout=31
     )
     machine.wait_until_succeeds(
diff --git a/nixos/tests/jitsi-meet.nix b/nixos/tests/jitsi-meet.nix
index 41d53bc7380..c39cd19e1f0 100644
--- a/nixos/tests/jitsi-meet.nix
+++ b/nixos/tests/jitsi-meet.nix
@@ -34,9 +34,6 @@ import ./make-test-python.nix ({ pkgs, ... }: {
     server.wait_for_unit("prosody.service")
 
     server.wait_until_succeeds(
-        "journalctl -b -u jitsi-videobridge2 -o cat | grep -q 'Performed a successful health check'"
-    )
-    server.wait_until_succeeds(
         "journalctl -b -u prosody -o cat | grep -q 'Authenticated as focus@auth.server'"
     )
     server.wait_until_succeeds(
diff --git a/nixos/tests/k3s/default.nix b/nixos/tests/k3s/default.nix
new file mode 100644
index 00000000000..07d93c41c7a
--- /dev/null
+++ b/nixos/tests/k3s/default.nix
@@ -0,0 +1,9 @@
+{ system ? builtins.currentSystem
+, pkgs ? import ../../.. { inherit system; }
+}:
+{
+  # Run a single node k3s cluster and verify a pod can run
+  single-node = import ./single-node.nix { inherit system pkgs; };
+  # Run a multi-node k3s cluster and verify pod networking works across nodes
+  multi-node = import ./multi-node.nix { inherit system pkgs; };
+}
diff --git a/nixos/tests/k3s/multi-node.nix b/nixos/tests/k3s/multi-node.nix
new file mode 100644
index 00000000000..9a6c7fd4657
--- /dev/null
+++ b/nixos/tests/k3s/multi-node.nix
@@ -0,0 +1,183 @@
+import ../make-test-python.nix ({ pkgs, lib, ... }:
+  let
+    imageEnv = pkgs.buildEnv {
+      name = "k3s-pause-image-env";
+      paths = with pkgs; [ tini bashInteractive coreutils socat ];
+    };
+    pauseImage = pkgs.dockerTools.streamLayeredImage {
+      name = "test.local/pause";
+      tag = "local";
+      contents = imageEnv;
+      config.Entrypoint = [ "/bin/tini" "--" "/bin/sleep" "inf" ];
+    };
+    # A daemonset that responds 'server' on port 8000
+    networkTestDaemonset = pkgs.writeText "test.yml" ''
+      apiVersion: apps/v1
+      kind: DaemonSet
+      metadata:
+        name: test
+        labels:
+          name: test
+      spec:
+        selector:
+          matchLabels:
+            name: test
+        template:
+          metadata:
+            labels:
+              name: test
+          spec:
+            containers:
+            - name: test
+              image: test.local/pause:local
+              imagePullPolicy: Never
+              resources:
+                limits:
+                  memory: 20Mi
+              command: ["socat", "TCP4-LISTEN:8000,fork", "EXEC:echo server"]
+    '';
+    tokenFile = pkgs.writeText "token" "p@s$w0rd";
+  in
+  {
+    name = "k3s-multi-node";
+
+    nodes = {
+      server = { pkgs, ... }: {
+        environment.systemPackages = with pkgs; [ gzip jq ];
+        # k3s uses enough resources the default vm fails.
+        virtualisation.memorySize = 1536;
+        virtualisation.diskSize = 4096;
+
+        services.k3s = {
+          inherit tokenFile;
+          enable = true;
+          role = "server";
+          package = pkgs.k3s;
+          clusterInit = true;
+          extraFlags = builtins.toString [
+            "--disable" "coredns"
+            "--disable" "local-storage"
+            "--disable" "metrics-server"
+            "--disable" "servicelb"
+            "--disable" "traefik"
+            "--node-ip" "192.168.1.1"
+            "--pause-image" "test.local/pause:local"
+          ];
+        };
+        networking.firewall.allowedTCPPorts = [ 2379 2380 6443 ];
+        networking.firewall.allowedUDPPorts = [ 8472 ];
+        networking.firewall.trustedInterfaces = [ "flannel.1" ];
+        networking.useDHCP = false;
+        networking.defaultGateway = "192.168.1.1";
+        networking.interfaces.eth1.ipv4.addresses = pkgs.lib.mkForce [
+          { address = "192.168.1.1"; prefixLength = 24; }
+        ];
+      };
+
+      server2 = { pkgs, ... }: {
+        environment.systemPackages = with pkgs; [ gzip jq ];
+        virtualisation.memorySize = 1536;
+        virtualisation.diskSize = 4096;
+
+        services.k3s = {
+          inherit tokenFile;
+          enable = true;
+          serverAddr = "https://192.168.1.1:6443";
+          clusterInit = false;
+          extraFlags = builtins.toString [
+            "--disable" "coredns"
+            "--disable" "local-storage"
+            "--disable" "metrics-server"
+            "--disable" "servicelb"
+            "--disable" "traefik"
+            "--node-ip" "192.168.1.3"
+            "--pause-image" "test.local/pause:local"
+          ];
+        };
+        networking.firewall.allowedTCPPorts = [ 2379 2380 6443 ];
+        networking.firewall.allowedUDPPorts = [ 8472 ];
+        networking.firewall.trustedInterfaces = [ "flannel.1" ];
+        networking.useDHCP = false;
+        networking.defaultGateway = "192.168.1.3";
+        networking.interfaces.eth1.ipv4.addresses = pkgs.lib.mkForce [
+          { address = "192.168.1.3"; prefixLength = 24; }
+        ];
+      };
+
+      agent = { pkgs, ... }: {
+        virtualisation.memorySize = 1024;
+        virtualisation.diskSize = 2048;
+        services.k3s = {
+          inherit tokenFile;
+          enable = true;
+          role = "agent";
+          serverAddr = "https://192.168.1.3:6443";
+          extraFlags = lib.concatStringsSep " " [
+            "--pause-image" "test.local/pause:local"
+            "--node-ip" "192.168.1.2"
+          ];
+        };
+        networking.firewall.allowedTCPPorts = [ 6443 ];
+        networking.firewall.allowedUDPPorts = [ 8472 ];
+        networking.firewall.trustedInterfaces = [ "flannel.1" ];
+        networking.useDHCP = false;
+        networking.defaultGateway = "192.168.1.2";
+        networking.interfaces.eth1.ipv4.addresses = pkgs.lib.mkForce [
+          { address = "192.168.1.2"; prefixLength = 24; }
+        ];
+      };
+    };
+
+    meta = with pkgs.lib.maintainers; {
+      maintainers = [ euank ];
+    };
+
+    testScript = ''
+      machines = [server, server2, agent]
+      for m in machines:
+          m.start()
+          m.wait_for_unit("k3s")
+
+      is_aarch64 = "${toString pkgs.stdenv.isAarch64}" == "1"
+
+      # wait for the agent to show up
+      server.wait_until_succeeds("k3s kubectl get node agent")
+
+      for m in machines:
+          # Fix-Me: Tests fail for 'aarch64-linux' as: "CONFIG_CGROUP_FREEZER: missing (fail)"
+          if not is_aarch64:
+              m.succeed("k3s check-config")
+          m.succeed(
+              "${pauseImage} | k3s ctr image import -"
+          )
+
+      server.succeed("k3s kubectl cluster-info")
+      # Also wait for our service account to show up; it takes a sec
+      server.wait_until_succeeds("k3s kubectl get serviceaccount default")
+
+      # Now create a pod on each node via a daemonset and verify they can talk to each other.
+      server.succeed("k3s kubectl apply -f ${networkTestDaemonset}")
+      server.wait_until_succeeds(f'[ "$(k3s kubectl get ds test -o json | jq .status.numberReady)" -eq {len(machines)} ]')
+
+      # Get pod IPs
+      pods = server.succeed("k3s kubectl get po -o json | jq '.items[].metadata.name' -r").splitlines()
+      pod_ips = [server.succeed(f"k3s kubectl get po {name} -o json | jq '.status.podIP' -cr").strip() for name in pods]
+
+      # Verify each server can ping each pod ip
+      for pod_ip in pod_ips:
+          server.succeed(f"ping -c 1 {pod_ip}")
+          agent.succeed(f"ping -c 1 {pod_ip}")
+
+      # Verify the pods can talk to each other
+      resp = server.wait_until_succeeds(f"k3s kubectl exec {pods[0]} -- socat TCP:{pod_ips[1]}:8000 -")
+      assert resp.strip() == "server"
+      resp = server.wait_until_succeeds(f"k3s kubectl exec {pods[1]} -- socat TCP:{pod_ips[0]}:8000 -")
+      assert resp.strip() == "server"
+
+      # Cleanup
+      server.succeed("k3s kubectl delete -f ${networkTestDaemonset}")
+
+      for m in machines:
+          m.shutdown()
+    '';
+  })
diff --git a/nixos/tests/k3s-single-node.nix b/nixos/tests/k3s/single-node.nix
index fb6510ee087..a95fa4a031e 100644
--- a/nixos/tests/k3s-single-node.nix
+++ b/nixos/tests/k3s/single-node.nix
@@ -1,5 +1,4 @@
-import ./make-test-python.nix ({ pkgs, ... }:
-
+import ../make-test-python.nix ({ pkgs, lib, ... }:
   let
     imageEnv = pkgs.buildEnv {
       name = "k3s-pause-image-env";
@@ -11,20 +10,12 @@ import ./make-test-python.nix ({ pkgs, ... }:
       contents = imageEnv;
       config.Entrypoint = [ "/bin/tini" "--" "/bin/sleep" "inf" ];
     };
-    # Don't use the default service account because there's a race where it may
-    # not be created yet; make our own instead.
     testPodYaml = pkgs.writeText "test.yml" ''
       apiVersion: v1
-      kind: ServiceAccount
-      metadata:
-        name: test
-      ---
-      apiVersion: v1
       kind: Pod
       metadata:
         name: test
       spec:
-        serviceAccountName: test
         containers:
         - name: test
           image: test.local/pause:local
@@ -49,7 +40,14 @@ import ./make-test-python.nix ({ pkgs, ... }:
       services.k3s.role = "server";
       services.k3s.package = pkgs.k3s;
       # Slightly reduce resource usage
-      services.k3s.extraFlags = "--no-deploy coredns,servicelb,traefik,local-storage,metrics-server --pause-image test.local/pause:local";
+      services.k3s.extraFlags = builtins.toString [
+        "--disable" "coredns"
+        "--disable" "local-storage"
+        "--disable" "metrics-server"
+        "--disable" "servicelb"
+        "--disable" "traefik"
+        "--pause-image" "test.local/pause:local"
+      ];
 
       users.users = {
         noprivs = {
@@ -66,13 +64,15 @@ import ./make-test-python.nix ({ pkgs, ... }:
       machine.wait_for_unit("k3s")
       machine.succeed("k3s kubectl cluster-info")
       machine.fail("sudo -u noprivs k3s kubectl cluster-info")
-      # FIXME: this fails with the current nixos kernel config; once it passes, we should uncomment it
-      # machine.succeed("k3s check-config")
+      '' # Fix-Me: Tests fail for 'aarch64-linux' as: "CONFIG_CGROUP_FREEZER: missing (fail)"
+      + lib.optionalString (!pkgs.stdenv.isAarch64) ''machine.succeed("k3s check-config")'' + ''
 
       machine.succeed(
           "${pauseImage} | k3s ctr image import -"
       )
 
+      # Also wait for our service account to show up; it takes a sec
+      machine.wait_until_succeeds("k3s kubectl get serviceaccount default")
       machine.succeed("k3s kubectl apply -f ${testPodYaml}")
       machine.succeed("k3s kubectl wait --for 'condition=Ready' pod/test")
       machine.succeed("k3s kubectl delete -f ${testPodYaml}")
diff --git a/nixos/tests/kafka.nix b/nixos/tests/kafka.nix
index 5def759ca24..79af02710c3 100644
--- a/nixos/tests/kafka.nix
+++ b/nixos/tests/kafka.nix
@@ -50,7 +50,7 @@ let
 
       kafka.wait_until_succeeds(
           "${kafkaPackage}/bin/kafka-topics.sh --create "
-          + "--zookeeper zookeeper1:2181 --partitions 1 "
+          + "--bootstrap-server localhost:9092 --partitions 1 "
           + "--replication-factor 1 --topic testtopic"
       )
       kafka.succeed(
@@ -58,22 +58,19 @@ let
           + "${kafkaPackage}/bin/kafka-console-producer.sh "
           + "--broker-list localhost:9092 --topic testtopic"
       )
-    '' + (if name == "kafka_0_9" then ''
-      assert "test 1" in kafka.succeed(
-          "${kafkaPackage}/bin/kafka-console-consumer.sh "
-          + "--zookeeper zookeeper1:2181 --topic testtopic "
-          + "--from-beginning --max-messages 1"
-      )
-    '' else ''
       assert "test 1" in kafka.succeed(
           "${kafkaPackage}/bin/kafka-console-consumer.sh "
           + "--bootstrap-server localhost:9092 --topic testtopic "
           + "--from-beginning --max-messages 1"
       )
-    '');
+    '';
   }) { inherit system; });
 
 in with pkgs; {
-  kafka_2_7  = makeKafkaTest "kafka_2_7"  apacheKafka_2_7;
   kafka_2_8  = makeKafkaTest "kafka_2_8"  apacheKafka_2_8;
+  kafka_3_0  = makeKafkaTest "kafka_3_0"  apacheKafka_3_0;
+  kafka_3_1  = makeKafkaTest "kafka_3_1"  apacheKafka_3_1;
+  kafka_3_2  = makeKafkaTest "kafka_3_2"  apacheKafka_3_2;
+  kafka_3_3  = makeKafkaTest "kafka_3_3"  apacheKafka_3_3;
+  kafka  = makeKafkaTest "kafka"  apacheKafka;
 }
diff --git a/nixos/tests/kanidm.nix b/nixos/tests/kanidm.nix
index d34f680f522..33c65026b9b 100644
--- a/nixos/tests/kanidm.nix
+++ b/nixos/tests/kanidm.nix
@@ -13,26 +13,17 @@ import ./make-test-python.nix ({ pkgs, ... }:
         serverSettings = {
           origin = "https://${serverDomain}";
           domain = serverDomain;
-          bindaddress = "[::1]:8443";
+          bindaddress = "[::]:443";
           ldapbindaddress = "[::1]:636";
-        };
-      };
-
-      services.nginx = {
-        enable = true;
-        recommendedProxySettings = true;
-        virtualHosts."${serverDomain}" = {
-          forceSSL = true;
-          sslCertificate = certs."${serverDomain}".cert;
-          sslCertificateKey = certs."${serverDomain}".key;
-          locations."/".proxyPass = "http://[::1]:8443";
+          tls_chain = certs."${serverDomain}".cert;
+          tls_key = certs."${serverDomain}".key;
         };
       };
 
       security.pki.certificateFiles = [ certs.ca.cert ];
 
       networking.hosts."::1" = [ serverDomain ];
-      networking.firewall.allowedTCPPorts = [ 80 443 ];
+      networking.firewall.allowedTCPPorts = [ 443 ];
 
       users.users.kanidm.shell = pkgs.bashInteractive;
 
@@ -44,6 +35,12 @@ import ./make-test-python.nix ({ pkgs, ... }:
         enableClient = true;
         clientSettings = {
           uri = "https://${serverDomain}";
+          verify_ca = true;
+          verify_hostnames = true;
+        };
+        enablePam = true;
+        unixSettings = {
+          pam_allowed_login_groups = [ "shell" ];
         };
       };
 
@@ -67,9 +64,11 @@ import ./make-test-python.nix ({ pkgs, ... }:
         start_all()
         server.wait_for_unit("kanidm.service")
         server.wait_until_succeeds("curl -sf https://${serverDomain} | grep Kanidm")
-        server.wait_until_succeeds("ldapsearch -H ldap://[::1]:636 -b '${ldapBaseDN}' -x '(name=test)'")
-        client.wait_until_succeeds("kanidm login -D anonymous && kanidm self whoami | grep anonymous@${serverDomain}")
-        (rv, result) = server.execute("kanidmd recover_account -d quiet -c ${serverConfigFile} -n admin 2>&1 | rg -o '[A-Za-z0-9]{48}'")
+        server.succeed("ldapsearch -H ldaps://${serverDomain}:636 -b '${ldapBaseDN}' -x '(name=test)'")
+        client.succeed("kanidm login -D anonymous && kanidm self whoami | grep anonymous@${serverDomain}")
+        rv, result = server.execute("kanidmd recover_account -c ${serverConfigFile} idm_admin 2>&1 | rg -o '[A-Za-z0-9]{48}'")
         assert rv == 0
+        client.wait_for_unit("kanidm-unixd.service")
+        client.succeed("kanidm_unixd_status | grep working!")
       '';
   })
diff --git a/nixos/tests/karma.nix b/nixos/tests/karma.nix
new file mode 100644
index 00000000000..5ac2983b8aa
--- /dev/null
+++ b/nixos/tests/karma.nix
@@ -0,0 +1,84 @@
+import ./make-test-python.nix ({ lib, pkgs, ... }: {
+  name = "karma";
+  nodes = {
+    server = { ... }: {
+      services.prometheus.alertmanager = {
+        enable = true;
+        logLevel = "debug";
+        port = 9093;
+        openFirewall = true;
+        configuration = {
+          global = {
+            resolve_timeout = "1m";
+          };
+          route = {
+            # Root route node
+            receiver = "test";
+            group_by = ["..."];
+            continue = false;
+            group_wait = "1s";
+            group_interval="15s";
+            repeat_interval = "24h";
+          };
+          receivers = [
+            {
+              name = "test";
+              webhook_configs = [
+                {
+                  url = "http://localhost:1234";
+                  send_resolved = true;
+                  max_alerts = 0;
+                }
+              ];
+            }
+          ];
+        };
+      };
+      services.karma = {
+        enable = true;
+        openFirewall = true;
+        settings = {
+          listen = {
+            address = "0.0.0.0";
+            port = 8081;
+          };
+          alertmanager = {
+            servers = [
+              {
+                name = "alertmanager";
+                uri = "https://127.0.0.1:9093";
+              }
+            ];
+          };
+          karma.name = "test-dashboard";
+          log.config = true;
+          log.requests = true;
+          log.timestamp = true;
+        };
+      };
+    };
+  };
+
+  testScript = ''
+    start_all()
+
+    with subtest("Wait for server to come up"):
+
+      server.wait_for_unit("alertmanager.service")
+      server.wait_for_unit("karma.service")
+
+      server.sleep(5) # wait for both services to settle
+
+      server.wait_for_open_port(9093)
+      server.wait_for_open_port(8081)
+
+    with subtest("Test alertmanager readiness"):
+      server.succeed("curl -s http://127.0.0.1:9093/-/ready")
+
+      # Karma only starts serving the dashboard once it has established connectivity to all alertmanagers in its config
+      # Therefore, this will fail if karma isn't able to reach alertmanager
+      server.succeed("curl -s http://127.0.0.1:8081")
+
+    server.shutdown()
+  '';
+})
diff --git a/nixos/tests/kea.nix b/nixos/tests/kea.nix
index 6b345893108..b1d5894cc7c 100644
--- a/nixos/tests/kea.nix
+++ b/nixos/tests/kea.nix
@@ -1,6 +1,8 @@
 import ./make-test-python.nix ({ pkgs, lib, ...}: {
   meta.maintainers = with lib.maintainers; [ hexa ];
 
+  name = "kea";
+
   nodes = {
     router = { config, pkgs, ... }: {
       virtualisation.vlans = [ 1 ];
diff --git a/nixos/tests/kerberos/mit.nix b/nixos/tests/kerberos/mit.nix
index b475b7e4c92..7e427ffef0b 100644
--- a/nixos/tests/kerberos/mit.nix
+++ b/nixos/tests/kerberos/mit.nix
@@ -9,7 +9,7 @@ import ../make-test-python.nix ({pkgs, ...}: {
     };
     krb5 = {
       enable = true;
-      kerberos = pkgs.krb5Full;
+      kerberos = pkgs.krb5;
       libdefaults = {
         default_realm = "FOO.BAR";
       };
diff --git a/nixos/tests/kernel-generic.nix b/nixos/tests/kernel-generic.nix
index 04d52cf82e4..7ee734a1eff 100644
--- a/nixos/tests/kernel-generic.nix
+++ b/nixos/tests/kernel-generic.nix
@@ -30,7 +30,7 @@ let
       linux_5_4_hardened
       linux_5_10_hardened
       linux_5_15_hardened
-      linux_5_18_hardened
+      linux_6_0_hardened
 
       linux_testing;
   };
diff --git a/nixos/tests/keter.nix b/nixos/tests/keter.nix
new file mode 100644
index 00000000000..0bfb96e1c32
--- /dev/null
+++ b/nixos/tests/keter.nix
@@ -0,0 +1,42 @@
+import ./make-test-python.nix ({ pkgs, ... }:
+let
+  port = 81;
+in
+{
+  name = "keter";
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ jappie ];
+  };
+
+
+  nodes.machine = { config, pkgs, ... }: {
+    services.keter = {
+      enable = true;
+
+      globalKeterConfig = {
+        listeners = [{
+          host = "*4";
+          inherit port;
+        }];
+      };
+      bundle = {
+        appName = "test-bundle";
+        domain = "localhost";
+        executable = pkgs.writeShellScript "run" ''
+          ${pkgs.python3}/bin/python -m http.server $PORT
+        '';
+      };
+    };
+  };
+
+  testScript =
+    ''
+      machine.wait_for_unit("keter.service")
+
+      machine.wait_for_open_port(${toString port})
+      machine.wait_for_console_text("Activating app test-bundle with hosts: localhost")
+
+
+      machine.succeed("curl --fail http://localhost:${toString port}/")
+    '';
+})
diff --git a/nixos/tests/keycloak.nix b/nixos/tests/keycloak.nix
index 6ce136330d4..228e57d1cdd 100644
--- a/nixos/tests/keycloak.nix
+++ b/nixos/tests/keycloak.nix
@@ -5,10 +5,13 @@
 let
   certs = import ./common/acme/server/snakeoil-certs.nix;
   frontendUrl = "https://${certs.domain}";
-  initialAdminPassword = "h4IhoJFnt2iQIR9";
 
   keycloakTest = import ./make-test-python.nix (
     { pkgs, databaseType, ... }:
+    let
+      initialAdminPassword = "h4Iho\"JFn't2>iQIR9";
+      adminPasswordFile = pkgs.writeText "admin-password" "${initialAdminPassword}";
+    in
     {
       name = "keycloak";
       meta = with pkgs.lib.maintainers; {
@@ -37,7 +40,7 @@ let
               type = databaseType;
               username = "bogus";
               name = "also bogus";
-              passwordFile = "${pkgs.writeText "dbPassword" "wzf6vOCbPp6cqTH"}";
+              passwordFile = "${pkgs.writeText "dbPassword" ''wzf6\"vO"Cb\nP>p#6;c&o?eu=q'THE'''H''''E''}";
             };
             plugins = with config.services.keycloak.package.plugins; [
               keycloak-discord
@@ -111,7 +114,7 @@ let
           keycloak.succeed("""
               curl -sSf -d 'client_id=admin-cli' \
                    -d 'username=admin' \
-                   -d 'password=${initialAdminPassword}' \
+                   -d "password=$(<${adminPasswordFile})" \
                    -d 'grant_type=password' \
                    '${frontendUrl}/realms/master/protocol/openid-connect/token' \
                    | jq -r '"Authorization: bearer " + .access_token' >admin_auth_header
@@ -119,10 +122,10 @@ let
 
           # Register the metrics SPI
           keycloak.succeed(
-              "${pkgs.jre}/bin/keytool -import -alias snakeoil -file ${certs.ca.cert} -storepass aaaaaa -keystore cacert.jks -noprompt",
-              "KC_OPTS='-Djavax.net.ssl.trustStore=cacert.jks -Djavax.net.ssl.trustStorePassword=aaaaaa' kcadm.sh config credentials --server '${frontendUrl}' --realm master --user admin --password '${initialAdminPassword}'",
-              "KC_OPTS='-Djavax.net.ssl.trustStore=cacert.jks -Djavax.net.ssl.trustStorePassword=aaaaaa' kcadm.sh update events/config -s 'eventsEnabled=true' -s 'adminEventsEnabled=true' -s 'eventsListeners+=metrics-listener'",
-              "curl -sSf '${frontendUrl}/realms/master/metrics' | grep '^keycloak_admin_event_UPDATE'"
+              """${pkgs.jre}/bin/keytool -import -alias snakeoil -file ${certs.ca.cert} -storepass aaaaaa -keystore cacert.jks -noprompt""",
+              """KC_OPTS='-Djavax.net.ssl.trustStore=cacert.jks -Djavax.net.ssl.trustStorePassword=aaaaaa' kcadm.sh config credentials --server '${frontendUrl}' --realm master --user admin --password "$(<${adminPasswordFile})" """,
+              """KC_OPTS='-Djavax.net.ssl.trustStore=cacert.jks -Djavax.net.ssl.trustStorePassword=aaaaaa' kcadm.sh update events/config -s 'eventsEnabled=true' -s 'adminEventsEnabled=true' -s 'eventsListeners+=metrics-listener'""",
+              """curl -sSf '${frontendUrl}/realms/master/metrics' | grep '^keycloak_admin_event_UPDATE'"""
           )
 
           # Publish the realm, including a test OIDC client and user
diff --git a/nixos/tests/komga.nix b/nixos/tests/komga.nix
new file mode 100644
index 00000000000..02db50ef25f
--- /dev/null
+++ b/nixos/tests/komga.nix
@@ -0,0 +1,22 @@
+import ./make-test-python.nix ({ lib, ... }:
+
+with lib;
+
+{
+  name = "komga";
+  meta.maintainers = with maintainers; [ govanify ];
+
+  nodes.machine =
+    { pkgs, ... }:
+    { services.komga = {
+        enable = true;
+        port = 1234;
+      };
+    };
+
+  testScript = ''
+    machine.wait_for_unit("komga.service")
+    machine.wait_for_open_port(1234)
+    machine.succeed("curl --fail http://localhost:1234/")
+  '';
+})
diff --git a/nixos/tests/krb5/example-config.nix b/nixos/tests/krb5/example-config.nix
index 1125b02f01c..9a5c3b2af24 100644
--- a/nixos/tests/krb5/example-config.nix
+++ b/nixos/tests/krb5/example-config.nix
@@ -11,7 +11,7 @@ import ../make-test-python.nix ({ pkgs, ...} : {
     { pkgs, ... }: {
       krb5 = {
         enable = true;
-        kerberos = pkgs.krb5Full;
+        kerberos = pkgs.krb5;
         libdefaults = {
           default_realm = "ATHENA.MIT.EDU";
         };
diff --git a/nixos/tests/kthxbye.nix b/nixos/tests/kthxbye.nix
new file mode 100644
index 00000000000..5ca0917ec8e
--- /dev/null
+++ b/nixos/tests/kthxbye.nix
@@ -0,0 +1,110 @@
+import ./make-test-python.nix ({ lib, pkgs, ... }:
+{
+  name = "kthxbye";
+
+  meta = with lib.maintainers; {
+    maintainers = [ nukaduka ];
+  };
+
+  nodes.server = { ... }: {
+    environment.systemPackages = with pkgs; [ prometheus-alertmanager ];
+    services.prometheus = {
+      enable = true;
+
+      globalConfig = {
+        scrape_interval = "5s";
+        scrape_timeout = "5s";
+        evaluation_interval = "5s";
+      };
+
+      scrapeConfigs = [
+        {
+          job_name = "prometheus";
+          scrape_interval = "5s";
+          static_configs = [
+            {
+              targets = [ "localhost:9090" ];
+            }
+          ];
+        }
+      ];
+
+      rules = [
+        ''
+          groups:
+            - name: test
+              rules:
+                - alert: node_up
+                  expr: up != 0
+                  for: 5s
+                  labels:
+                    severity: bottom of the barrel
+                  annotations:
+                    summary: node is fine
+        ''
+      ];
+
+      alertmanagers = [
+        {
+          static_configs = [
+            {
+              targets = [
+                "localhost:9093"
+              ];
+            }
+          ];
+        }
+      ];
+
+      alertmanager = {
+        enable = true;
+        openFirewall = true;
+        configuration.route = {
+          receiver = "test";
+          group_wait = "5s";
+          group_interval = "5s";
+          group_by = [ "..." ];
+        };
+        configuration.receivers = [
+          {
+            name = "test";
+            webhook_configs = [
+              {
+                url = "http://localhost:1234";
+              }
+            ];
+          }
+        ];
+      };
+    };
+
+    services.kthxbye = {
+      enable = true;
+      openFirewall = true;
+      extendIfExpiringIn = "30s";
+      logJSON = true;
+      maxDuration = "15m";
+      interval = "5s";
+    };
+  };
+
+  testScript = ''
+    with subtest("start the server"):
+      start_all()
+      server.wait_for_unit("prometheus.service")
+      server.wait_for_unit("alertmanager.service")
+      server.wait_for_unit("kthxbye.service")
+
+      server.sleep(2) # wait for units to settle
+      server.systemctl("restart kthxbye.service") # make sure kthxbye comes up after alertmanager
+      server.sleep(2)
+
+    with subtest("set up test silence which expires in 20s"):
+      server.succeed('amtool --alertmanager.url "http://localhost:9093" silence add alertname="node_up" -a "nixosTest" -d "20s" -c "ACK! this server is fine!!"')
+
+    with subtest("wait for 21 seconds and check if the silence is still active"):
+      server.sleep(21)
+      server.systemctl("status kthxbye.service")
+      server.succeed("amtool --alertmanager.url 'http://localhost:9093' silence | grep 'ACK'")
+  '';
+})
diff --git a/nixos/tests/kubernetes/base.nix b/nixos/tests/kubernetes/base.nix
index d4410beb937..ba7b2d9b1d2 100644
--- a/nixos/tests/kubernetes/base.nix
+++ b/nixos/tests/kubernetes/base.nix
@@ -18,7 +18,7 @@ let
         ${master.ip}  api.${domain}
         ${concatMapStringsSep "\n" (machineName: "${machines.${machineName}.ip}  ${machineName}.${domain}") (attrNames machines)}
       '';
-      wrapKubectl = with pkgs; runCommand "wrap-kubectl" { buildInputs = [ makeWrapper ]; } ''
+      wrapKubectl = with pkgs; runCommand "wrap-kubectl" { nativeBuildInputs = [ makeWrapper ]; } ''
         mkdir -p $out/bin
         makeWrapper ${pkgs.kubernetes}/bin/kubectl $out/bin/kubectl --set KUBECONFIG "/etc/kubernetes/cluster-admin.kubeconfig"
       '';
@@ -43,7 +43,7 @@ let
                   trustedInterfaces = ["mynet"];
 
                   extraCommands = concatMapStrings  (node: ''
-                    iptables -A INPUT -s ${node.config.networking.primaryIPAddress} -j ACCEPT
+                    iptables -A INPUT -s ${node.networking.primaryIPAddress} -j ACCEPT
                   '') (attrValues nodes);
                 };
               };
diff --git a/nixos/tests/kubernetes/default.nix b/nixos/tests/kubernetes/default.nix
index 60ba482758f..a3de9ed115d 100644
--- a/nixos/tests/kubernetes/default.nix
+++ b/nixos/tests/kubernetes/default.nix
@@ -4,8 +4,6 @@
 let
   dns = import ./dns.nix { inherit system pkgs; };
   rbac = import ./rbac.nix { inherit system pkgs; };
-  # TODO kubernetes.e2e should eventually replace kubernetes.rbac when it works
-  # e2e = import ./e2e.nix { inherit system pkgs; };
 in
 {
   dns-single-node = dns.singlenode.test;
diff --git a/nixos/tests/kubernetes/dns.nix b/nixos/tests/kubernetes/dns.nix
index 3fd1dd31f74..1b7145eb5d5 100644
--- a/nixos/tests/kubernetes/dns.nix
+++ b/nixos/tests/kubernetes/dns.nix
@@ -33,7 +33,11 @@ let
   redisImage = pkgs.dockerTools.buildImage {
     name = "redis";
     tag = "latest";
-    contents = [ pkgs.redis pkgs.bind.host ];
+    copyToRoot = pkgs.buildEnv {
+      name = "image-root";
+      pathsToLink = [ "/bin" ];
+      paths = [ pkgs.redis pkgs.bind.host ];
+    };
     config.Entrypoint = ["/bin/redis-server"];
   };
 
@@ -54,14 +58,18 @@ let
   probeImage = pkgs.dockerTools.buildImage {
     name = "probe";
     tag = "latest";
-    contents = [ pkgs.bind.host pkgs.busybox ];
+    copyToRoot = pkgs.buildEnv {
+      name = "image-root";
+      pathsToLink = [ "/bin" ];
+      paths = [ pkgs.bind.host pkgs.busybox ];
+    };
     config.Entrypoint = ["/bin/tail"];
   };
 
   extraConfiguration = { config, pkgs, lib, ... }: {
     environment.systemPackages = [ pkgs.bind.host ];
     services.dnsmasq.enable = true;
-    services.dnsmasq.servers = [
+    services.dnsmasq.settings.server = [
       "/cluster.local/${config.services.kubernetes.addons.dns.clusterIp}#53"
     ];
   };
diff --git a/nixos/tests/kubernetes/e2e.nix b/nixos/tests/kubernetes/e2e.nix
deleted file mode 100644
index fb29d9cc695..00000000000
--- a/nixos/tests/kubernetes/e2e.nix
+++ /dev/null
@@ -1,40 +0,0 @@
-{ system ? builtins.currentSystem, pkgs ? import ../../.. { inherit system; } }:
-with import ./base.nix { inherit system; };
-let
-  domain = "my.zyx";
-  certs = import ./certs.nix { externalDomain = domain; kubelets = ["machine1" "machine2"]; };
-  kubeconfig = pkgs.writeText "kubeconfig.json" (builtins.toJSON {
-    apiVersion = "v1";
-    kind = "Config";
-    clusters = [{
-      name = "local";
-      cluster.certificate-authority = "${certs.master}/ca.pem";
-      cluster.server = "https://api.${domain}";
-    }];
-    users = [{
-      name = "kubelet";
-      user = {
-        client-certificate = "${certs.admin}/admin.pem";
-        client-key = "${certs.admin}/admin-key.pem";
-      };
-    }];
-    contexts = [{
-      context = {
-        cluster = "local";
-        user = "kubelet";
-      };
-      current-context = "kubelet-context";
-    }];
-  });
-
-  base = {
-    name = "e2e";
-    inherit domain certs;
-    test = ''
-      $machine1->succeed("e2e.test -kubeconfig ${kubeconfig} -provider local -ginkgo.focus '\\[Conformance\\]' -ginkgo.skip '\\[Flaky\\]|\\[Serial\\]'");
-    '';
-  };
-in {
-  singlenode = mkKubernetesSingleNodeTest base;
-  multinode = mkKubernetesMultiNodeTest base;
-}
diff --git a/nixos/tests/kubernetes/rbac.nix b/nixos/tests/kubernetes/rbac.nix
index 9e73fbbd32a..779eafbb1d2 100644
--- a/nixos/tests/kubernetes/rbac.nix
+++ b/nixos/tests/kubernetes/rbac.nix
@@ -84,7 +84,11 @@ let
   kubectlImage = pkgs.dockerTools.buildImage {
     name = "kubectl";
     tag = "latest";
-    contents = [ copyKubectl pkgs.busybox kubectlPod2 ];
+    copyToRoot = pkgs.buildEnv {
+      name = "image-root";
+      pathsToLink = [ "/bin" ];
+      paths = [ copyKubectl pkgs.busybox kubectlPod2 ];
+    };
     config.Entrypoint = ["/bin/sh"];
   };
 
diff --git a/nixos/tests/ipfs.nix b/nixos/tests/kubo.nix
index 5e7c967028e..94aa24a9204 100644
--- a/nixos/tests/ipfs.nix
+++ b/nixos/tests/kubo.nix
@@ -1,19 +1,27 @@
 import ./make-test-python.nix ({ pkgs, ...} : {
-  name = "ipfs";
+  name = "kubo";
   meta = with pkgs.lib.maintainers; {
     maintainers = [ mguentner ];
   };
 
   nodes.machine = { ... }: {
-    services.ipfs = {
+    services.kubo = {
       enable = true;
       # Also will add a unix domain socket socket API address, see module.
       startWhenNeeded = true;
-      apiAddress = "/ip4/127.0.0.1/tcp/2324";
+      settings.Addresses.API = "/ip4/127.0.0.1/tcp/2324";
       dataDir = "/mnt/ipfs";
     };
   };
 
+  nodes.fuse = { ... }: {
+    services.kubo = {
+      enable = true;
+      settings.Addresses.API = "/ip4/127.0.0.1/tcp/2324";
+      autoMount = true;
+    };
+  };
+
   testScript = ''
     start_all()
 
@@ -40,5 +48,14 @@ import ./make-test-python.nix ({ pkgs, ...} : {
     # Test if setting dataDir works properly with the hardened systemd unit
     machine.succeed("test -e /mnt/ipfs/config")
     machine.succeed("test ! -e /var/lib/ipfs/")
+
+    # Test FUSE mountpoint
+    ipfs_hash = fuse.succeed(
+        "echo fnord3 | ipfs --api /ip4/127.0.0.1/tcp/2324 add --quieter"
+    )
+
+    # The FUSE mount functionality is broken as of v0.13.0.
+    # See https://github.com/ipfs/kubo/issues/9044.
+    # fuse.succeed(f"cat /ipfs/{ipfs_hash.strip()} | grep fnord3")
   '';
 })
diff --git a/nixos/tests/ladybird.nix b/nixos/tests/ladybird.nix
new file mode 100644
index 00000000000..4e9ab9a36d1
--- /dev/null
+++ b/nixos/tests/ladybird.nix
@@ -0,0 +1,30 @@
+import ./make-test-python.nix ({ pkgs, ... }: {
+  name = "ladybird";
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ fgaz ];
+  };
+
+  nodes.machine = { config, pkgs, ... }: {
+    imports = [
+      ./common/x11.nix
+    ];
+
+    services.xserver.enable = true;
+    environment.systemPackages = [
+      pkgs.ladybird
+    ];
+  };
+
+  enableOCR = true;
+
+  testScript =
+    ''
+      machine.wait_for_x()
+      machine.succeed("echo '<!DOCTYPE html><html><body><h1>Hello world</h1></body></html>' > page.html")
+      machine.execute("ladybird file://$(pwd)/page.html >&2 &")
+      machine.wait_for_window("Ladybird")
+      machine.sleep(5)
+      machine.wait_for_text("Hello world")
+      machine.screenshot("screen")
+    '';
+})
diff --git a/nixos/tests/languagetool.nix b/nixos/tests/languagetool.nix
new file mode 100644
index 00000000000..e4ab2a47064
--- /dev/null
+++ b/nixos/tests/languagetool.nix
@@ -0,0 +1,19 @@
+import ./make-test-python.nix ({ pkgs, lib, ... }:
+let port = 8082;
+in {
+  name = "languagetool";
+  meta = with lib.maintainers; { maintainers = [ fbeffa ]; };
+
+  nodes.machine = { ... }:
+    {
+      services.languagetool.enable = true;
+      services.languagetool.port = port;
+    };
+
+  testScript = ''
+    machine.start()
+    machine.wait_for_unit("languagetool.service")
+    machine.wait_for_open_port(${toString port})
+    machine.wait_until_succeeds('curl -d "language=en-US" -d "text=a simple test" http://localhost:${toString port}/v2/check')
+  '';
+})
diff --git a/nixos/tests/lemmy.nix b/nixos/tests/lemmy.nix
new file mode 100644
index 00000000000..fb64daa80e6
--- /dev/null
+++ b/nixos/tests/lemmy.nix
@@ -0,0 +1,89 @@
+import ./make-test-python.nix ({ pkgs, lib, ... }:
+let
+  uiPort = 1234;
+  backendPort = 5678;
+  lemmyNodeName = "server";
+in
+{
+  name = "lemmy";
+  meta = with lib.maintainers; { maintainers = [ mightyiam ]; };
+
+  nodes = {
+    client = { };
+
+    "${lemmyNodeName}" = {
+      services.lemmy = {
+        enable = true;
+        ui.port = uiPort;
+        database.createLocally = true;
+        settings = {
+          hostname = "http://${lemmyNodeName}";
+          port = backendPort;
+          # Without setup, the /feeds/* and /nodeinfo/* API endpoints won't return 200
+          setup = {
+            admin_username = "mightyiam";
+            admin_password = "ThisIsWhatIUseEverywhereTryIt";
+            site_name = "Lemmy FTW";
+            admin_email = "mightyiam@example.com";
+          };
+        };
+        caddy.enable = true;
+      };
+
+      networking.firewall.allowedTCPPorts = [ 80 ];
+
+      # pict-rs seems to need more than 1025114112 bytes
+      virtualisation.memorySize = 2000;
+    };
+  };
+
+  testScript = ''
+    server = ${lemmyNodeName}
+
+    with subtest("the backend starts and responds"):
+        server.wait_for_unit("lemmy.service")
+        server.wait_for_open_port(${toString backendPort})
+        server.succeed("curl --fail localhost:${toString backendPort}/api/v3/site")
+
+    with subtest("the UI starts and responds"):
+        server.wait_for_unit("lemmy-ui.service")
+        server.wait_for_open_port(${toString uiPort})
+        server.succeed("curl --fail localhost:${toString uiPort}")
+
+    with subtest("Lemmy-UI responds through the caddy reverse proxy"):
+        server.wait_for_unit("network-online.target")
+        server.wait_for_unit("caddy.service")
+        server.wait_for_open_port(80)
+        body = server.execute("curl --fail --location ${lemmyNodeName}")[1]
+        assert "Lemmy" in body, f"String Lemmy not found in response for ${lemmyNodeName}: \n{body}"
+
+    with subtest("the server is exposed externally"):
+        client.wait_for_unit("network-online.target")
+        client.succeed("curl -v --fail ${lemmyNodeName}")
+
+    with subtest("caddy correctly routes backend requests"):
+        # Make sure we are not hitting frontend
+        server.execute("systemctl stop lemmy-ui.service")
+
+        def assert_http_code(url, expected_http_code, extra_curl_args=""):
+            _, http_code = server.execute(f'curl --silent -o /dev/null {extra_curl_args} --fail --write-out "%{{http_code}}" {url}')
+            assert http_code == str(expected_http_code), f"expected http code {expected_http_code}, got {http_code}"
+
+        # Caddy responds with HTTP code 502 if it cannot handle the requested path
+        assert_http_code("${lemmyNodeName}/obviously-wrong-path/", 502)
+
+        assert_http_code("${lemmyNodeName}/static/js/client.js", 200)
+        assert_http_code("${lemmyNodeName}/api/v3/site", 200)
+
+        # A 404 confirms that the request goes to the backend
+        # No path can return 200 until after we upload an image to pict-rs
+        assert_http_code("${lemmyNodeName}/pictrs/", 404)
+
+        assert_http_code("${lemmyNodeName}/feeds/all.xml", 200)
+        assert_http_code("${lemmyNodeName}/nodeinfo/2.0.json", 200)
+
+        assert_http_code("${lemmyNodeName}/some-other-made-up-path/", 404, "-X POST")
+        assert_http_code("${lemmyNodeName}/some-other-path", 404, "-H 'Accept: application/activity+json'")
+        assert_http_code("${lemmyNodeName}/some-other-path", 404, "-H 'Accept: application/ld+json; profile=\"https://www.w3.org/ns/activitystreams\"'")
+  '';
+})
diff --git a/nixos/tests/libuiohook.nix b/nixos/tests/libuiohook.nix
new file mode 100644
index 00000000000..66c5033d968
--- /dev/null
+++ b/nixos/tests/libuiohook.nix
@@ -0,0 +1,21 @@
+import ./make-test-python.nix ({ pkgs, lib, ... }: {
+  name = "libuiohook";
+  meta = with lib.maintainers; { maintainers = [ anoa ]; };
+
+  nodes.client = { nodes, ... }:
+      let user = nodes.client.config.users.users.alice;
+      in {
+        imports = [ ./common/user-account.nix ./common/x11.nix ];
+
+        environment.systemPackages = [ pkgs.libuiohook.test ];
+
+        test-support.displayManager.auto.user = user.name;
+      };
+
+  testScript = { nodes, ... }:
+    let user = nodes.client.config.users.users.alice;
+    in ''
+      client.wait_for_x()
+      client.succeed("su - alice -c ${pkgs.libuiohook.test}/share/uiohook_tests >&2 &")
+    '';
+})
diff --git a/nixos/tests/libvirtd.nix b/nixos/tests/libvirtd.nix
new file mode 100644
index 00000000000..49258fcb93e
--- /dev/null
+++ b/nixos/tests/libvirtd.nix
@@ -0,0 +1,61 @@
+import ./make-test-python.nix ({ pkgs, ... }: {
+  name = "libvirtd";
+  meta.maintainers = with pkgs.lib.maintainers; [ fpletz ];
+
+  nodes = {
+    virthost =
+      { pkgs, ... }:
+      {
+        virtualisation = {
+          cores = 2;
+          memorySize = 2048;
+
+          libvirtd.enable = true;
+        };
+        boot.supportedFilesystems = [ "zfs" ];
+        networking.hostId = "deadbeef"; # needed for zfs
+        networking.nameservers = [ "192.168.122.1" ];
+        security.polkit.enable = true;
+        environment.systemPackages = with pkgs; [ virt-manager ];
+      };
+  };
+
+  testScript = let
+    nixosInstallISO = (import ../release.nix {}).iso_minimal.${pkgs.hostPlatform.system};
+    virshShutdownCmd = if pkgs.stdenv.isx86_64 then "shutdown" else "destroy";
+  in ''
+    start_all()
+
+    virthost.wait_for_unit("multi-user.target")
+
+    with subtest("enable default network"):
+      virthost.succeed("virsh net-start default")
+      virthost.succeed("virsh net-autostart default")
+      virthost.succeed("virsh net-info default")
+
+    with subtest("check if partition disk pools works with parted"):
+      virthost.succeed("fallocate -l100m /tmp/foo; losetup /dev/loop0 /tmp/foo; echo 'label: dos' | sfdisk /dev/loop0")
+      virthost.succeed("virsh pool-create-as foo disk --source-dev /dev/loop0 --target /dev")
+      virthost.succeed("virsh vol-create-as foo loop0p1 25MB")
+      virthost.succeed("virsh vol-create-as foo loop0p2 50MB")
+
+    with subtest("check if virsh zfs pools work"):
+      virthost.succeed("fallocate -l100m /tmp/zfs; losetup /dev/loop1 /tmp/zfs;")
+      virthost.succeed("zpool create zfs_loop /dev/loop1")
+      virthost.succeed("virsh pool-define-as --name zfs_storagepool --source-name zfs_loop --type zfs")
+      virthost.succeed("virsh pool-start zfs_storagepool")
+      virthost.succeed("virsh vol-create-as zfs_storagepool disk1 25MB")
+
+    with subtest("check if nixos install iso boots, network and autostart works"):
+      virthost.succeed(
+        "virt-install -n nixos --osinfo nixos-unstable --memory 1024 --graphics none --disk `find ${nixosInstallISO}/iso -type f | head -n1`,readonly=on --import --noautoconsole --autostart"
+      )
+      virthost.succeed("virsh domstate nixos | grep running")
+      virthost.wait_until_succeeds("ping -c 1 nixos")
+      virthost.succeed("virsh ${virshShutdownCmd} nixos")
+      virthost.wait_until_succeeds("virsh domstate nixos | grep 'shut off'")
+      virthost.shutdown()
+      virthost.wait_for_unit("multi-user.target")
+      virthost.wait_until_succeeds("ping -c 1 nixos")
+  '';
+})
diff --git a/nixos/tests/lighttpd.nix b/nixos/tests/lighttpd.nix
new file mode 100644
index 00000000000..36e2745c55c
--- /dev/null
+++ b/nixos/tests/lighttpd.nix
@@ -0,0 +1,21 @@
+import ./make-test-python.nix ({ lib, pkgs, ... }: {
+  name = "lighttpd";
+  meta.maintainers = with lib.maintainers; [ bjornfor ];
+
+  nodes = {
+    server = {
+      services.lighttpd.enable = true;
+      services.lighttpd.document-root = pkgs.runCommand "document-root" {} ''
+        mkdir -p "$out"
+        echo "hello nixos test" > "$out/file.txt"
+      '';
+    };
+  };
+
+  testScript = ''
+    start_all()
+    server.wait_for_unit("lighttpd.service")
+    res = server.succeed("curl --fail http://localhost/file.txt")
+    assert "hello nixos test" in res, f"bad server response: '{res}'"
+  '';
+})
diff --git a/nixos/tests/listmonk.nix b/nixos/tests/listmonk.nix
new file mode 100644
index 00000000000..91003653c09
--- /dev/null
+++ b/nixos/tests/listmonk.nix
@@ -0,0 +1,69 @@
+import ./make-test-python.nix ({ lib, ... }: {
+  name = "listmonk";
+  meta.maintainers = with lib.maintainers; [ raitobezarius ];
+
+  nodes.machine = { pkgs, ... }: {
+    services.mailhog.enable = true;
+    services.listmonk = {
+      enable = true;
+      settings = {
+        admin_username = "listmonk";
+        admin_password = "hunter2";
+      };
+      database = {
+        createLocally = true;
+        # https://github.com/knadh/listmonk/blob/174a48f252a146d7e69dab42724e3329dbe25ebe/internal/messenger/email/email.go#L18-L27
+        settings.smtp = [ {
+          enabled = true;
+          host = "localhost";
+          port = 1025;
+          tls_type = "none";
+        }];
+      };
+    };
+  };
+
+  testScript = ''
+    import json
+
+    start_all()
+
+    basic_auth = "listmonk:hunter2"
+    def generate_listmonk_request(type, url, data=None):
+       if data is None: data = {}
+       json_data = json.dumps(data)
+       return f'curl -u "{basic_auth}" -X {type} "http://localhost:9000/api/{url}" -H "Content-Type: application/json; charset=utf-8" --data-raw \'{json_data}\'''
+
+    machine.wait_for_unit("mailhog.service")
+    machine.wait_for_unit("postgresql.service")
+    machine.wait_for_unit("listmonk.service")
+    machine.wait_for_open_port(1025)
+    machine.wait_for_open_port(8025)
+    machine.wait_for_open_port(9000)
+    machine.succeed("[[ -f /var/lib/listmonk/.db_settings_initialized ]]")
+
+    # Test transactional endpoint
+    # subscriber_id=1 is guaranteed to exist at install-time
+    # template_id=2 is guaranteed to exist at install-time and is a transactional template (1 is a campaign template).
+    machine.succeed(
+      generate_listmonk_request('POST', 'tx', data={'subscriber_id': 1, 'template_id': 2})
+    )
+    assert 'Welcome John Doe' in machine.succeed(
+        "curl --fail http://localhost:8025/api/v2/messages"
+    )
+
+    # Test campaign endpoint
+    # Based on https://github.com/knadh/listmonk/blob/174a48f252a146d7e69dab42724e3329dbe25ebe/cmd/campaigns.go#L549 as docs do not exist.
+    campaign_data = json.loads(machine.succeed(
+      generate_listmonk_request('POST', 'campaigns/1/test', data={'template_id': 1, 'subscribers': ['john@example.com'], 'name': 'Test', 'subject': 'NixOS is great', 'lists': [1], 'messenger': 'email'})
+    ))
+
+    assert campaign_data['data']  # This is a boolean asserting if the test was successful or not: https://github.com/knadh/listmonk/blob/174a48f252a146d7e69dab42724e3329dbe25ebe/cmd/campaigns.go#L626
+
+    messages = json.loads(machine.succeed(
+        "curl --fail http://localhost:8025/api/v2/messages"
+    ))
+
+    assert messages['total'] == 2
+  '';
+})
diff --git a/nixos/tests/logrotate.nix b/nixos/tests/logrotate.nix
index b0685f3af9f..94f6ad5103f 100644
--- a/nixos/tests/logrotate.nix
+++ b/nixos/tests/logrotate.nix
@@ -64,29 +64,6 @@ import ./make-test-python.nix ({ pkgs, ... }: rec {
           notifempty = true;
         };
       };
-      # extraConfig compatibility - should be added to top level, early.
-      services.logrotate.extraConfig = ''
-        nomail
-      '';
-      # paths compatibility
-      services.logrotate.paths = {
-        compat_path = {
-          path = "compat_test_path";
-        };
-        # user/group should be grouped as 'su user group'
-        compat_user = {
-          user = config.users.users.root.name;
-          group = "root";
-        };
-        # extraConfig in path should be added to block
-        compat_extraConfig = {
-          extraConfig = "dateext";
-        };
-        # keep -> rotate
-        compat_keep = {
-          keep = 1;
-        };
-      };
     };
   };
 
@@ -127,12 +104,6 @@ import ./make-test-python.nix ({ pkgs, ... }: rec {
               "sed -ne '/\"postrotate\" {/,/}/p' /tmp/logrotate.conf | grep endscript",
               "grep '\"file1\"\n\"file2\" {' /tmp/logrotate.conf",
               "sed -ne '/\"import\" {/,/}/p' /tmp/logrotate.conf | grep noolddir",
-              "sed -ne '1,/^\"/p' /tmp/logrotate.conf | grep nomail",
-              "grep '\"compat_test_path\" {' /tmp/logrotate.conf",
-              "sed -ne '/\"compat_user\" {/,/}/p' /tmp/logrotate.conf | grep 'su root root'",
-              "sed -ne '/\"compat_extraConfig\" {/,/}/p' /tmp/logrotate.conf | grep dateext",
-              "[[ $(sed -ne '/\"compat_keep\" {/,/}/p' /tmp/logrotate.conf | grep -w rotate) = \"  rotate 1\" ]]",
-              "! sed -ne '/\"compat_keep\" {/,/}/p' /tmp/logrotate.conf | grep -w keep",
           )
           # also check configFile option
           failingMachine.succeed(
diff --git a/nixos/tests/lorri/default.nix b/nixos/tests/lorri/default.nix
index 209b87f9f26..a4bdc92490c 100644
--- a/nixos/tests/lorri/default.nix
+++ b/nixos/tests/lorri/default.nix
@@ -1,4 +1,6 @@
 import ../make-test-python.nix {
+  name = "lorri";
+
   nodes.machine = { pkgs, ... }: {
     imports = [ ../../modules/profiles/minimal.nix ];
     environment.systemPackages = [ pkgs.lorri ];
diff --git a/nixos/tests/lxd-image-server.nix b/nixos/tests/lxd-image-server.nix
index 072f4570c2c..e5a292b61bd 100644
--- a/nixos/tests/lxd-image-server.nix
+++ b/nixos/tests/lxd-image-server.nix
@@ -8,8 +8,8 @@ let
     };
   };
 
-  lxd-image-metadata = lxd-image.lxdMeta.${pkgs.system};
-  lxd-image-rootfs = lxd-image.lxdImage.${pkgs.system};
+  lxd-image-metadata = lxd-image.lxdMeta.${pkgs.stdenv.hostPlatform.system};
+  lxd-image-rootfs = lxd-image.lxdImage.${pkgs.stdenv.hostPlatform.system};
 
 in {
   name = "lxd-image-server";
diff --git a/nixos/tests/lxd.nix b/nixos/tests/lxd.nix
index 15d16564d64..2c2c19e0eec 100644
--- a/nixos/tests/lxd.nix
+++ b/nixos/tests/lxd.nix
@@ -11,8 +11,8 @@ let
     };
   };
 
-  lxd-image-metadata = lxd-image.lxdMeta.${pkgs.system};
-  lxd-image-rootfs = lxd-image.lxdImage.${pkgs.system};
+  lxd-image-metadata = lxd-image.lxdMeta.${pkgs.stdenv.hostPlatform.system};
+  lxd-image-rootfs = lxd-image.lxdImage.${pkgs.stdenv.hostPlatform.system};
 
 in {
   name = "lxd";
@@ -23,7 +23,7 @@ in {
 
   nodes.machine = { lib, ... }: {
     virtualisation = {
-      diskSize = 2048;
+      diskSize = 4096;
 
       # Since we're testing `limits.cpu`, we've gotta have a known number of
       # cores to lean on
diff --git a/nixos/tests/maddy.nix b/nixos/tests/maddy.nix
index 581748c1fa5..b9d0416482d 100644
--- a/nixos/tests/maddy.nix
+++ b/nixos/tests/maddy.nix
@@ -49,7 +49,7 @@ import ./make-test-python.nix ({ pkgs, ... }: {
     server.wait_for_open_port(143)
     server.wait_for_open_port(587)
 
-    server.succeed("echo test | maddyctl creds create postmaster@server")
+    server.succeed("maddyctl creds create --password test postmaster@server")
     server.succeed("maddyctl imap-acct create postmaster@server")
 
     client.succeed("send-testmail")
diff --git a/nixos/tests/make-test-python.nix b/nixos/tests/make-test-python.nix
index 7a96f538d8d..28569f1d295 100644
--- a/nixos/tests/make-test-python.nix
+++ b/nixos/tests/make-test-python.nix
@@ -1,6 +1,6 @@
 f: {
   system ? builtins.currentSystem,
-  pkgs ? import ../.. { inherit system; },
+  pkgs ? import ../.. { inherit system; config = {}; overlays = []; },
   ...
 } @ args:
 
diff --git a/nixos/tests/matomo.nix b/nixos/tests/matomo.nix
index 526a24fc4db..0e09ad295f9 100644
--- a/nixos/tests/matomo.nix
+++ b/nixos/tests/matomo.nix
@@ -7,6 +7,8 @@ with pkgs.lib;
 let
   matomoTest = package:
   makeTest {
+    name = "matomo";
+
     nodes.machine = { config, pkgs, ... }: {
       services.matomo = {
         package = package;
diff --git a/nixos/tests/matrix/conduit.nix b/nixos/tests/matrix/conduit.nix
index 780837f962f..2b81c23598e 100644
--- a/nixos/tests/matrix/conduit.nix
+++ b/nixos/tests/matrix/conduit.nix
@@ -3,6 +3,8 @@ import ../make-test-python.nix ({ pkgs, ... }:
     name = "conduit";
   in
   {
+    name = "matrix-conduit";
+
     nodes = {
       conduit = args: {
         services.matrix-conduit = {
diff --git a/nixos/tests/matrix/mjolnir.nix b/nixos/tests/matrix/mjolnir.nix
index 3864f0ff2bb..b1ac55d951c 100644
--- a/nixos/tests/matrix/mjolnir.nix
+++ b/nixos/tests/matrix/mjolnir.nix
@@ -32,6 +32,7 @@ import ../make-test-python.nix (
     name = "mjolnir";
     meta = with pkgs.lib; {
       maintainers = teams.matrix.members;
+      broken = true; # times out after spending many hours
     };
 
     nodes = {
@@ -45,7 +46,6 @@ import ../make-test-python.nix (
             enable_registration = true;
             enable_registration_without_verification = true;
             registration_shared_secret = "supersecret-registration";
-            enable_registration_without_verification = true;
 
             listeners = [ {
               # The default but tls=false
diff --git a/nixos/tests/matrix/synapse.nix b/nixos/tests/matrix/synapse.nix
index 756a8d5de49..698d67c793e 100644
--- a/nixos/tests/matrix/synapse.nix
+++ b/nixos/tests/matrix/synapse.nix
@@ -209,7 +209,7 @@ in {
         "curl --fail -L --cacert ${ca_pem} https://localhost:8448/"
     )
     serverpostgres.require_unit_state("postgresql.service")
-    serverpostgres.succeed("register_new_matrix_user -u ${testUser} -p ${testPassword} -a -k ${registrationSharedSecret} ")
+    serverpostgres.succeed("register_new_matrix_user -u ${testUser} -p ${testPassword} -a -k ${registrationSharedSecret} https://localhost:8448/")
     serverpostgres.succeed("obtain-token-and-register-email")
     serversqlite.wait_for_unit("matrix-synapse.service")
     serversqlite.wait_until_succeeds(
diff --git a/nixos/tests/mediatomb.nix b/nixos/tests/mediatomb.nix
index b7a126a01ad..9c84aa3e92a 100644
--- a/nixos/tests/mediatomb.nix
+++ b/nixos/tests/mediatomb.nix
@@ -1,81 +1,44 @@
-import ./make-test-python.nix ({ pkgs, ... }:
-
-{
+import ./make-test-python.nix {
   name = "mediatomb";
 
   nodes = {
-    serverGerbera =
-      { ... }:
-      let port = 49152;
-      in {
-        imports = [ ../modules/profiles/minimal.nix ];
-        services.mediatomb = {
-          enable = true;
-          serverName = "Gerbera";
-          package = pkgs.gerbera;
-          interface = "eth1";  # accessible from test
-          openFirewall = true;
-          mediaDirectories = [
-            { path = "/var/lib/gerbera/pictures"; recursive = false; hidden-files = false; }
-            { path = "/var/lib/gerbera/audio"; recursive = true; hidden-files = false; }
-          ];
-        };
-      };
-
-    serverMediatomb =
-      { ... }:
-      let port = 49151;
-      in {
-        imports = [ ../modules/profiles/minimal.nix ];
-        services.mediatomb = {
-          enable = true;
-          serverName = "Mediatomb";
-          package = pkgs.mediatomb;
-          interface = "eth1";
-          inherit port;
-          mediaDirectories = [
-            { path = "/var/lib/mediatomb/pictures"; recursive = false; hidden-files = false; }
-            { path = "/var/lib/mediatomb/audio"; recursive = true; hidden-files = false; }
-          ];
-        };
-        networking.firewall.interfaces.eth1 = {
-          allowedUDPPorts = [ 1900 port ];
-          allowedTCPPorts = [ port ];
-        };
+    server = {
+      services.mediatomb = {
+        enable = true;
+        serverName = "Gerbera";
+        interface = "eth1";
+        openFirewall = true;
+        mediaDirectories = [
+          {
+            path = "/var/lib/gerbera/pictures";
+            recursive = false;
+            hidden-files = false;
+          }
+          {
+            path = "/var/lib/gerbera/audio";
+            recursive = true;
+            hidden-files = false;
+          }
+        ];
       };
+      systemd.tmpfiles.rules = [
+        "d /var/lib/gerbera/pictures 0770 mediatomb mediatomb"
+        "d /var/lib/gerbera/audio 0770 mediatomb mediatomb"
+      ];
+    };
 
-      client = { ... }: { };
+    client = {};
   };
 
-  testScript =
-  ''
+  testScript = ''
     start_all()
 
-    port = 49151
-    serverMediatomb.succeed("mkdir -p /var/lib/mediatomb/{pictures,audio}")
-    serverMediatomb.succeed("chown -R mediatomb:mediatomb /var/lib/mediatomb")
-    serverMediatomb.wait_for_unit("mediatomb")
-    serverMediatomb.wait_for_open_port(port)
-    serverMediatomb.succeed(f"curl --fail http://serverMediatomb:{port}/")
-    page = client.succeed(f"curl --fail http://serverMediatomb:{port}/")
-    assert "MediaTomb" in page and "Gerbera" not in page
-    serverMediatomb.shutdown()
+    server.wait_for_unit("mediatomb")
+    server.wait_until_succeeds("nc -z 192.168.1.2 49152")
+    server.succeed("curl -v --fail http://server:49152/")
 
-    port = 49152
-    serverGerbera.succeed("mkdir -p /var/lib/mediatomb/{pictures,audio}")
-    serverGerbera.succeed("chown -R mediatomb:mediatomb /var/lib/mediatomb")
-    # service running gerbera fails the first time claiming something is already bound
-    # gerbera[715]: 2020-07-18 23:52:14   info: Please check if another instance of Gerbera or
-    # gerbera[715]: 2020-07-18 23:52:14   info: another application is running on port TCP 49152 or UDP 1900.
-    # I did not find anything so here I work around this
-    serverGerbera.succeed("sleep 2")
-    serverGerbera.wait_until_succeeds("systemctl restart mediatomb")
-    serverGerbera.wait_for_unit("mediatomb")
-    serverGerbera.succeed(f"curl --fail http://serverGerbera:{port}/")
-    page = client.succeed(f"curl --fail http://serverGerbera:{port}/")
+    client.wait_for_unit("multi-user.target")
+    page = client.succeed("curl -v --fail http://server:49152/")
     assert "Gerbera" in page and "MediaTomb" not in page
-
-    serverGerbera.shutdown()
-    client.shutdown()
   '';
-})
+}
diff --git a/nixos/tests/meilisearch.nix b/nixos/tests/meilisearch.nix
index 6d7514a1cc0..c31dcb0559d 100644
--- a/nixos/tests/meilisearch.nix
+++ b/nixos/tests/meilisearch.nix
@@ -35,20 +35,20 @@ import ./make-test-python.nix ({ pkgs, lib, ... }:
 
       with subtest("create index"):
           machine.succeed(
-              "curl -XPOST --header 'Content-Type: application/json' ${apiUrl}/indexes --data @${indexJSON}"
+              "curl -X POST -H 'Content-Type: application/json' ${apiUrl}/indexes --data @${indexJSON}"
           )
           indexes = json.loads(machine.succeed("curl ${apiUrl}/indexes"))
-          assert len(indexes) == 1, "index wasn't created"
+          assert indexes["total"] == 1, "index wasn't created"
 
       with subtest("add documents"):
           response = json.loads(
               machine.succeed(
-                  "curl -XPOST --header 'Content-Type: application/json' ${apiUrl}/indexes/${uid}/documents --data @${moviesJSON}"
+                  "curl -X POST -H 'Content-Type: application/json' ${apiUrl}/indexes/${uid}/documents --data-binary @${moviesJSON}"
               )
           )
-          update_id = response["updateId"]
+          task_uid = response["taskUid"]
           machine.wait_until_succeeds(
-              f"curl ${apiUrl}/indexes/${uid}/updates/{update_id} | jq -e '.status == \"processed\"'"
+              f"curl ${apiUrl}/tasks/{task_uid} | jq -e '.status == \"succeeded\"'"
           )
 
       with subtest("search"):
diff --git a/nixos/tests/merecat.nix b/nixos/tests/merecat.nix
new file mode 100644
index 00000000000..9d8f66165ee
--- /dev/null
+++ b/nixos/tests/merecat.nix
@@ -0,0 +1,28 @@
+import ./make-test-python.nix ({ pkgs, ... }: {
+  name = "merecat";
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ fgaz ];
+  };
+
+  nodes.machine = { config, pkgs, ... }: {
+    services.merecat = {
+      enable = true;
+      settings = {
+        hostname = "localhost";
+        virtual-host = true;
+        directory = toString (pkgs.runCommand "merecat-webdir" {} ''
+          mkdir -p $out/foo.localhost $out/bar.localhost
+          echo '<h1>Hello foo</h1>' > $out/foo.localhost/index.html
+          echo '<h1>Hello bar</h1>' > $out/bar.localhost/index.html
+        '');
+      };
+    };
+  };
+
+  testScript = ''
+    machine.wait_for_unit("merecat")
+    machine.wait_for_open_port(80)
+    machine.succeed("curl --fail foo.localhost | grep 'Hello foo'")
+    machine.succeed("curl --fail bar.localhost | grep 'Hello bar'")
+  '';
+})
diff --git a/nixos/tests/minecraft-server.nix b/nixos/tests/minecraft-server.nix
index dbe2cd6d56f..6e733bb96c1 100644
--- a/nixos/tests/minecraft-server.nix
+++ b/nixos/tests/minecraft-server.nix
@@ -18,6 +18,8 @@ in import ./make-test-python.nix ({ pkgs, ... }: {
       serverProperties = {
         enable-rcon = true;
         level-seed = seed;
+        level-type = "flat";
+        generate-structures = false;
         online-mode = false;
         "rcon.password" = rcon-pass;
         "rcon.port" = rcon-port;
@@ -33,5 +35,6 @@ in import ./make-test-python.nix ({ pkgs, ... }: {
     assert "${seed}" in server.succeed(
         "mcrcon -H localhost -P ${toString rcon-port} -p '${rcon-pass}' -c 'seed'"
     )
+    server.succeed("systemctl stop minecraft-server")
   '';
 })
diff --git a/nixos/tests/minidlna.nix b/nixos/tests/minidlna.nix
index 76039b0bb42..32721819634 100644
--- a/nixos/tests/minidlna.nix
+++ b/nixos/tests/minidlna.nix
@@ -6,25 +6,24 @@ import ./make-test-python.nix ({ pkgs, ... }: {
       { ... }:
       {
         imports = [ ../modules/profiles/minimal.nix ];
-        networking.firewall.allowedTCPPorts = [ 8200 ];
-        services.minidlna = {
-          enable = true;
-          loglevel = "error";
-          mediaDirs = [
-           "PV,/tmp/stuff"
+        services.minidlna.enable = true;
+        services.minidlna.openFirewall = true;
+        services.minidlna.settings = {
+          log_level = "error";
+          media_dir = [
+            "PV,/tmp/stuff"
+          ];
+          friendly_name = "rpi3";
+          root_container = "B";
+          notify_interval = 60;
+          album_art_names = [
+            "Cover.jpg/cover.jpg/AlbumArtSmall.jpg/albumartsmall.jpg"
+            "AlbumArt.jpg/albumart.jpg/Album.jpg/album.jpg"
+            "Folder.jpg/folder.jpg/Thumb.jpg/thumb.jpg"
           ];
-          friendlyName = "rpi3";
-          rootContainer = "B";
-          extraConfig =
-          ''
-            album_art_names=Cover.jpg/cover.jpg/AlbumArtSmall.jpg/albumartsmall.jpg
-            album_art_names=AlbumArt.jpg/albumart.jpg/Album.jpg/album.jpg
-            album_art_names=Folder.jpg/folder.jpg/Thumb.jpg/thumb.jpg
-            notify_interval=60
-          '';
         };
       };
-      client = { ... }: { };
+    client = { ... }: { };
   };
 
   testScript =
diff --git a/nixos/tests/moodle.nix b/nixos/tests/moodle.nix
index 4570e896388..8fd011e0cb2 100644
--- a/nixos/tests/moodle.nix
+++ b/nixos/tests/moodle.nix
@@ -16,7 +16,7 @@ import ./make-test-python.nix ({ pkgs, lib, ... }: {
 
   testScript = ''
     start_all()
-    machine.wait_for_unit("phpfpm-moodle.service")
+    machine.wait_for_unit("phpfpm-moodle.service", timeout=1800)
     machine.wait_until_succeeds("curl http://localhost/ | grep 'You are not logged in'")
   '';
 })
diff --git a/nixos/tests/mosquitto.nix b/nixos/tests/mosquitto.nix
index d516d3373d9..70eecc89278 100644
--- a/nixos/tests/mosquitto.nix
+++ b/nixos/tests/mosquitto.nix
@@ -4,7 +4,7 @@ let
   port = 1888;
   tlsPort = 1889;
   anonPort = 1890;
-  bindTestPort = 1891;
+  bindTestPort = 18910;
   password = "VERY_secret";
   hashedPassword = "$7$101$/WJc4Mp+I+uYE9sR$o7z9rD1EYXHPwEP5GqQj6A7k4W1yVbePlb8TqNcuOLV9WNCiDgwHOB0JHC1WCtdkssqTBduBNUnUGd6kmZvDSw==";
   topic = "test/foo";
@@ -165,6 +165,10 @@ in {
         for t in threads: t.start()
         for t in threads: t.join()
 
+    def wait_uuid(uuid):
+        server.wait_for_console_text(uuid)
+        return None
+
 
     start_all()
     server.wait_for_unit("mosquitto.service")
@@ -203,14 +207,14 @@ in {
         parallel(
             lambda: client1.succeed(subscribe("-i 3688cdd7-aa07-42a4-be22-cb9352917e40", "reader")),
             lambda: [
-                server.wait_for_console_text("3688cdd7-aa07-42a4-be22-cb9352917e40"),
+                wait_uuid("3688cdd7-aa07-42a4-be22-cb9352917e40"),
                 client2.succeed(publish("-m test", "writer"))
             ])
 
         parallel(
             lambda: client1.fail(subscribe("-i 24ff16a2-ae33-4a51-9098-1b417153c712", "reader")),
             lambda: [
-                server.wait_for_console_text("24ff16a2-ae33-4a51-9098-1b417153c712"),
+                wait_uuid("24ff16a2-ae33-4a51-9098-1b417153c712"),
                 client2.succeed(publish("-m test", "reader"))
             ])
 
@@ -229,7 +233,7 @@ in {
             lambda: client1.succeed(subscribe("-i fd56032c-d9cb-4813-a3b4-6be0e04c8fc3",
                 "anonReader", port=${toString anonPort})),
             lambda: [
-                server.wait_for_console_text("fd56032c-d9cb-4813-a3b4-6be0e04c8fc3"),
+                wait_uuid("fd56032c-d9cb-4813-a3b4-6be0e04c8fc3"),
                 client2.succeed(publish("-m test", "anonWriter", port=${toString anonPort}))
             ])
   '';
diff --git a/nixos/tests/musescore.nix b/nixos/tests/musescore.nix
index 18de0a55023..ac2f4ba74c0 100644
--- a/nixos/tests/musescore.nix
+++ b/nixos/tests/musescore.nix
@@ -69,6 +69,10 @@ in
     # Wait until the export dialogue appears.
     machine.wait_for_window("Export")
     machine.screenshot("MuseScore1")
+    machine.send_key("shift-tab")
+    machine.sleep(1)
+    machine.send_key("shift-tab")
+    machine.sleep(1)
     machine.send_key("ret")
     machine.sleep(1)
     machine.send_key("ret")
diff --git a/nixos/tests/mysql/common.nix b/nixos/tests/mysql/common.nix
index 040d360b6d9..7fdf0f33d3f 100644
--- a/nixos/tests/mysql/common.nix
+++ b/nixos/tests/mysql/common.nix
@@ -1,10 +1,7 @@
 { lib, pkgs }: {
-  mariadbPackages = lib.filterAttrs (n: _: lib.hasPrefix "mariadb" n) (pkgs.callPackage ../../../pkgs/servers/sql/mariadb {
-    inherit (pkgs.darwin) cctools;
-    inherit (pkgs.darwin.apple_sdk.frameworks) CoreServices;
-  });
+  mariadbPackages = lib.filterAttrs (n: _: lib.hasPrefix "mariadb" n) (import ../../../pkgs/servers/sql/mariadb pkgs);
   mysqlPackages = {
-    inherit (pkgs) mysql57 mysql80;
+    inherit (pkgs) mysql80;
   };
   mkTestName = pkg: "mariadb_${builtins.replaceStrings ["."] [""] (lib.versions.majorMinor pkg.version)}";
 }
diff --git a/nixos/tests/neo4j.nix b/nixos/tests/neo4j.nix
index 8329e5630d7..0b57f5b2e03 100644
--- a/nixos/tests/neo4j.nix
+++ b/nixos/tests/neo4j.nix
@@ -2,19 +2,25 @@ import ./make-test-python.nix {
   name = "neo4j";
 
   nodes = {
-    master =
+    server =
       { ... }:
 
       {
+        virtualisation.memorySize = 4096;
+        virtualisation.diskSize = 1024;
+
         services.neo4j.enable = true;
+        # require tls certs to be available
+        services.neo4j.https.enable = false;
+        services.neo4j.bolt.enable = false;
       };
   };
 
   testScript = ''
     start_all()
 
-    master.wait_for_unit("neo4j")
-    master.wait_for_open_port(7474)
-    master.succeed("curl -f http://localhost:7474/")
+    server.wait_for_unit("neo4j.service")
+    server.wait_for_open_port(7474)
+    server.succeed("curl -f http://localhost:7474/")
   '';
 }
diff --git a/nixos/tests/netbird.nix b/nixos/tests/netbird.nix
new file mode 100644
index 00000000000..ef793cfe988
--- /dev/null
+++ b/nixos/tests/netbird.nix
@@ -0,0 +1,21 @@
+import ./make-test-python.nix ({ pkgs, lib, ... }:
+{
+  name = "netbird";
+
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ misuzu ];
+  };
+
+  nodes = {
+    node = { ... }: {
+      services.netbird.enable = true;
+    };
+  };
+
+  testScript = ''
+    start_all()
+    node.wait_for_unit("netbird.service")
+    node.wait_for_file("/var/run/netbird/sock")
+    node.succeed("netbird status | grep -q 'Daemon status: NeedsLogin'")
+  '';
+})
diff --git a/nixos/tests/networking-proxy.nix b/nixos/tests/networking-proxy.nix
index fcb2558cf3b..330bac2588a 100644
--- a/nixos/tests/networking-proxy.nix
+++ b/nixos/tests/networking-proxy.nix
@@ -37,7 +37,7 @@ in import ./make-test-python.nix ({ pkgs, ...} : {
       default-config //
       {
         networking.proxy = {
-          # useless because overriden by the next options
+          # useless because overridden by the next options
           default = "http://user:pass@host:port";
           # advanced proxy setup
           httpProxy = "123-http://user:pass@http-host:port";
diff --git a/nixos/tests/networking.nix b/nixos/tests/networking.nix
index a00ff165d42..71b82b87127 100644
--- a/nixos/tests/networking.nix
+++ b/nixos/tests/networking.nix
@@ -682,6 +682,46 @@ let
               client2.succeed("ip addr show dev vlan >&2")
         '';
     };
+    vlan-ping = let
+        baseIP = number: "10.10.10.${number}";
+        vlanIP = number: "10.1.1.${number}";
+        baseInterface = "eth1";
+        vlanInterface = "vlan42";
+        node = number: {pkgs, ... }: with pkgs.lib; {
+          virtualisation.vlans = [ 1 ];
+          networking = {
+            #useNetworkd = networkd;
+            useDHCP = false;
+            vlans.${vlanInterface} = { id = 42; interface = baseInterface; };
+            interfaces.${baseInterface}.ipv4.addresses = mkOverride 0 [{ address = baseIP number; prefixLength = 24; }];
+            interfaces.${vlanInterface}.ipv4.addresses = mkOverride 0 [{ address = vlanIP number; prefixLength = 24; }];
+          };
+        };
+
+        serverNodeNum = "1";
+        clientNodeNum = "2";
+
+    in {
+      name = "vlan-ping";
+      nodes.server = node serverNodeNum;
+      nodes.client = node clientNodeNum;
+      testScript = { ... }:
+        ''
+          start_all()
+
+          with subtest("Wait for networking to be configured"):
+              server.wait_for_unit("network.target")
+              client.wait_for_unit("network.target")
+
+          with subtest("Test ping on base interface in setup"):
+              client.succeed("ping -I ${baseInterface} -c 1 ${baseIP serverNodeNum}")
+              server.succeed("ping -I ${baseInterface} -c 1 ${baseIP clientNodeNum}")
+
+          with subtest("Test ping on vlan subinterface in setup"):
+              client.succeed("ping -I ${vlanInterface} -c 1 ${vlanIP serverNodeNum}")
+              server.succeed("ping -I ${vlanInterface} -c 1 ${vlanIP clientNodeNum}")
+        '';
+    };
     virtual = {
       name = "Virtual";
       nodes.machine = {
diff --git a/nixos/tests/nextcloud/basic.nix b/nixos/tests/nextcloud/basic.nix
index eb37470a4c7..a475049e7b2 100644
--- a/nixos/tests/nextcloud/basic.nix
+++ b/nixos/tests/nextcloud/basic.nix
@@ -37,6 +37,8 @@ in {
         "d /var/lib/nextcloud-data 0750 nextcloud nginx - -"
       ];
 
+      system.stateVersion = "22.11"; # stateVersion >=21.11 to make sure that we use OpenSSL3
+
       services.nextcloud = {
         enable = true;
         datadir = "/var/lib/nextcloud-data";
@@ -99,6 +101,10 @@ in {
     # This is just to ensure the nextcloud-occ program is working
     nextcloud.succeed("nextcloud-occ status")
     nextcloud.succeed("curl -sSf http://nextcloud/login")
+    # Ensure that no OpenSSL 1.1 is used.
+    nextcloud.succeed(
+        "${nodes.nextcloud.services.phpfpm.pools.nextcloud.phpPackage}/bin/php -i | grep 'OpenSSL Library Version' | awk -F'=>' '{ print $2 }' | awk '{ print $2 }' | grep -v 1.1"
+    )
     nextcloud.succeed(
         "${withRcloneEnv} ${copySharedFile}"
     )
@@ -108,5 +114,6 @@ in {
         "${withRcloneEnv} ${diffSharedFile}"
     )
     assert "hi" in client.succeed("cat /mnt/dav/test-shared-file")
+    nextcloud.succeed("grep -vE '^HBEGIN:oc_encryption_module' /var/lib/nextcloud-data/data/root/files/test-shared-file")
   '';
 })) args
diff --git a/nixos/tests/nextcloud/default.nix b/nixos/tests/nextcloud/default.nix
index 45165b04bf8..b8d3ba75b51 100644
--- a/nixos/tests/nextcloud/default.nix
+++ b/nixos/tests/nextcloud/default.nix
@@ -8,6 +8,10 @@ with pkgs.lib;
 foldl
   (matrix: ver: matrix // {
     "basic${toString ver}" = import ./basic.nix { inherit system pkgs; nextcloudVersion = ver; };
+    "openssl-sse${toString ver}" = import ./openssl-sse.nix {
+      inherit system pkgs;
+      nextcloudVersion = ver;
+    };
     "with-postgresql-and-redis${toString ver}" = import ./with-postgresql-and-redis.nix {
       inherit system pkgs;
       nextcloudVersion = ver;
@@ -16,6 +20,10 @@ foldl
       inherit system pkgs;
       nextcloudVersion = ver;
     };
+    "with-declarative-redis-and-secrets${toString ver}" = import ./with-declarative-redis-and-secrets.nix {
+      inherit system pkgs;
+      nextcloudVersion = ver;
+    };
   })
 { }
-  [ 23 24 ]
+  [ 24 25 ]
diff --git a/nixos/tests/nextcloud/openssl-sse.nix b/nixos/tests/nextcloud/openssl-sse.nix
new file mode 100644
index 00000000000..7595ee2c67e
--- /dev/null
+++ b/nixos/tests/nextcloud/openssl-sse.nix
@@ -0,0 +1,105 @@
+args@{ pkgs, nextcloudVersion ? 25, ... }:
+
+(import ../make-test-python.nix ({ pkgs, ...}: let
+  adminuser = "root";
+  adminpass = "notproduction";
+  nextcloudBase = {
+    networking.firewall.allowedTCPPorts = [ 80 ];
+    system.stateVersion = "22.05"; # stateVersions <22.11 use openssl 1.1 by default
+    services.nextcloud = {
+      enable = true;
+      config.adminpassFile = "${pkgs.writeText "adminpass" adminpass}";
+      package = pkgs.${"nextcloud" + (toString nextcloudVersion)};
+    };
+  };
+in {
+  name = "nextcloud-openssl";
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ ma27 ];
+  };
+  nodes.nextcloudwithopenssl1 = {
+    imports = [ nextcloudBase ];
+    services.nextcloud.hostName = "nextcloudwithopenssl1";
+  };
+  nodes.nextcloudwithopenssl3 = {
+    imports = [ nextcloudBase ];
+    services.nextcloud = {
+      hostName = "nextcloudwithopenssl3";
+      enableBrokenCiphersForSSE = false;
+    };
+  };
+  testScript = { nodes, ... }: let
+    withRcloneEnv = host: pkgs.writeScript "with-rclone-env" ''
+      #!${pkgs.runtimeShell}
+      export RCLONE_CONFIG_NEXTCLOUD_TYPE=webdav
+      export RCLONE_CONFIG_NEXTCLOUD_URL="http://${host}/remote.php/webdav/"
+      export RCLONE_CONFIG_NEXTCLOUD_VENDOR="nextcloud"
+      export RCLONE_CONFIG_NEXTCLOUD_USER="${adminuser}"
+      export RCLONE_CONFIG_NEXTCLOUD_PASS="$(${pkgs.rclone}/bin/rclone obscure ${adminpass})"
+      "''${@}"
+    '';
+    withRcloneEnv1 = withRcloneEnv "nextcloudwithopenssl1";
+    withRcloneEnv3 = withRcloneEnv "nextcloudwithopenssl3";
+    copySharedFile1 = pkgs.writeScript "copy-shared-file" ''
+      #!${pkgs.runtimeShell}
+      echo 'hi' | ${withRcloneEnv1} ${pkgs.rclone}/bin/rclone rcat nextcloud:test-shared-file
+    '';
+    copySharedFile3 = pkgs.writeScript "copy-shared-file" ''
+      #!${pkgs.runtimeShell}
+      echo 'bye' | ${withRcloneEnv3} ${pkgs.rclone}/bin/rclone rcat nextcloud:test-shared-file2
+    '';
+    openssl1-node = nodes.nextcloudwithopenssl1.config.system.build.toplevel;
+    openssl3-node = nodes.nextcloudwithopenssl3.config.system.build.toplevel;
+  in ''
+    nextcloudwithopenssl1.start()
+    nextcloudwithopenssl1.wait_for_unit("multi-user.target")
+    nextcloudwithopenssl1.succeed("nextcloud-occ status")
+    nextcloudwithopenssl1.succeed("curl -sSf http://nextcloudwithopenssl1/login")
+
+    with subtest("With OpenSSL 1 SSE can be enabled and used"):
+        nextcloudwithopenssl1.succeed("nextcloud-occ app:enable encryption")
+        nextcloudwithopenssl1.succeed("nextcloud-occ encryption:enable")
+
+    with subtest("Upload file and ensure it's encrypted"):
+        nextcloudwithopenssl1.succeed("${copySharedFile1}")
+        nextcloudwithopenssl1.succeed("grep -E '^HBEGIN:oc_encryption_module' /var/lib/nextcloud/data/root/files/test-shared-file")
+        nextcloudwithopenssl1.succeed("${withRcloneEnv1} ${pkgs.rclone}/bin/rclone cat nextcloud:test-shared-file | grep hi")
+
+    with subtest("Switch to OpenSSL 3"):
+        nextcloudwithopenssl1.succeed("${openssl3-node}/bin/switch-to-configuration test")
+        nextcloudwithopenssl1.wait_for_open_port(80)
+        nextcloudwithopenssl1.succeed("nextcloud-occ status")
+
+    with subtest("Existing encrypted files cannot be read, but new files can be added"):
+        nextcloudwithopenssl1.fail("${withRcloneEnv3} ${pkgs.rclone}/bin/rclone cat nextcloud:test-shared-file >&2")
+        nextcloudwithopenssl1.succeed("nextcloud-occ encryption:disable")
+        nextcloudwithopenssl1.succeed("${copySharedFile3}")
+        nextcloudwithopenssl1.succeed("grep bye /var/lib/nextcloud/data/root/files/test-shared-file2")
+        nextcloudwithopenssl1.succeed("${withRcloneEnv3} ${pkgs.rclone}/bin/rclone cat nextcloud:test-shared-file2 | grep bye")
+
+    with subtest("Switch back to OpenSSL 1.1 and ensure that encrypted files are readable again"):
+        nextcloudwithopenssl1.succeed("${openssl1-node}/bin/switch-to-configuration test")
+        nextcloudwithopenssl1.wait_for_open_port(80)
+        nextcloudwithopenssl1.succeed("nextcloud-occ status")
+        nextcloudwithopenssl1.succeed("nextcloud-occ encryption:enable")
+        nextcloudwithopenssl1.succeed("${withRcloneEnv1} ${pkgs.rclone}/bin/rclone cat nextcloud:test-shared-file2 | grep bye")
+        nextcloudwithopenssl1.succeed("${withRcloneEnv1} ${pkgs.rclone}/bin/rclone cat nextcloud:test-shared-file | grep hi")
+        nextcloudwithopenssl1.succeed("grep -E '^HBEGIN:oc_encryption_module' /var/lib/nextcloud/data/root/files/test-shared-file")
+        nextcloudwithopenssl1.succeed("grep bye /var/lib/nextcloud/data/root/files/test-shared-file2")
+
+    with subtest("Ensure that everything can be decrypted"):
+        nextcloudwithopenssl1.succeed("echo y | nextcloud-occ encryption:decrypt-all >&2")
+        nextcloudwithopenssl1.succeed("${withRcloneEnv1} ${pkgs.rclone}/bin/rclone cat nextcloud:test-shared-file2 | grep bye")
+        nextcloudwithopenssl1.succeed("${withRcloneEnv1} ${pkgs.rclone}/bin/rclone cat nextcloud:test-shared-file | grep hi")
+        nextcloudwithopenssl1.succeed("grep -vE '^HBEGIN:oc_encryption_module' /var/lib/nextcloud/data/root/files/test-shared-file")
+
+    with subtest("Switch to OpenSSL 3 ensure that all files are usable now"):
+        nextcloudwithopenssl1.succeed("${openssl3-node}/bin/switch-to-configuration test")
+        nextcloudwithopenssl1.wait_for_open_port(80)
+        nextcloudwithopenssl1.succeed("nextcloud-occ status")
+        nextcloudwithopenssl1.succeed("${withRcloneEnv3} ${pkgs.rclone}/bin/rclone cat nextcloud:test-shared-file2 | grep bye")
+        nextcloudwithopenssl1.succeed("${withRcloneEnv3} ${pkgs.rclone}/bin/rclone cat nextcloud:test-shared-file | grep hi")
+
+    nextcloudwithopenssl1.shutdown()
+  '';
+})) args
diff --git a/nixos/tests/nextcloud/with-declarative-redis-and-secrets.nix b/nixos/tests/nextcloud/with-declarative-redis-and-secrets.nix
new file mode 100644
index 00000000000..93e655c3056
--- /dev/null
+++ b/nixos/tests/nextcloud/with-declarative-redis-and-secrets.nix
@@ -0,0 +1,117 @@
+import ../make-test-python.nix ({ pkgs, ...}: let
+  adminpass = "hunter2";
+  adminuser = "custom-admin-username";
+in {
+  name = "nextcloud-with-declarative-redis";
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ eqyiel ];
+  };
+
+  nodes = {
+    # The only thing the client needs to do is download a file.
+    client = { ... }: {};
+
+    nextcloud = { config, pkgs, ... }: {
+      networking.firewall.allowedTCPPorts = [ 80 ];
+
+      services.nextcloud = {
+        enable = true;
+        hostName = "nextcloud";
+        caching = {
+          apcu = false;
+          redis = true;
+          memcached = false;
+        };
+        config = {
+          dbtype = "pgsql";
+          dbname = "nextcloud";
+          dbuser = "nextcloud";
+          dbhost = "/run/postgresql";
+          inherit adminuser;
+          adminpassFile = toString (pkgs.writeText "admin-pass-file" ''
+            ${adminpass}
+          '');
+        };
+        secretFile = "/etc/nextcloud-secrets.json";
+
+        extraOptions.redis = {
+          host = "/run/redis/redis.sock";
+          port = 0;
+          dbindex = 0;
+          timeout = 1.5;
+          # password handled via secretfile below
+        };
+        extraOptions.memcache = {
+          local = "\OC\Memcache\Redis";
+          locking = "\OC\Memcache\Redis";
+        };
+      };
+
+      services.redis.servers."nextcloud".enable = true;
+      services.redis.servers."nextcloud".port = 6379;
+
+      systemd.services.nextcloud-setup= {
+        requires = ["postgresql.service"];
+        after = [
+          "postgresql.service"
+        ];
+      };
+
+      services.postgresql = {
+        enable = true;
+        ensureDatabases = [ "nextcloud" ];
+        ensureUsers = [
+          { name = "nextcloud";
+            ensurePermissions."DATABASE nextcloud" = "ALL PRIVILEGES";
+          }
+        ];
+      };
+
+      # This file is meant to contain secret options which should
+      # not go into the nix store. Here it is just used to set the
+      # databyse type to postgres.
+      environment.etc."nextcloud-secrets.json".text = ''
+        {
+          "redis": {
+            "password": "secret"
+          }
+        }
+      '';
+    };
+  };
+
+  testScript = let
+    withRcloneEnv = pkgs.writeScript "with-rclone-env" ''
+      #!${pkgs.runtimeShell}
+      export RCLONE_CONFIG_NEXTCLOUD_TYPE=webdav
+      export RCLONE_CONFIG_NEXTCLOUD_URL="http://nextcloud/remote.php/webdav/"
+      export RCLONE_CONFIG_NEXTCLOUD_VENDOR="nextcloud"
+      export RCLONE_CONFIG_NEXTCLOUD_USER="${adminuser}"
+      export RCLONE_CONFIG_NEXTCLOUD_PASS="$(${pkgs.rclone}/bin/rclone obscure ${adminpass})"
+      "''${@}"
+    '';
+    copySharedFile = pkgs.writeScript "copy-shared-file" ''
+      #!${pkgs.runtimeShell}
+      echo 'hi' | ${pkgs.rclone}/bin/rclone rcat nextcloud:test-shared-file
+    '';
+
+    diffSharedFile = pkgs.writeScript "diff-shared-file" ''
+      #!${pkgs.runtimeShell}
+      diff <(echo 'hi') <(${pkgs.rclone}/bin/rclone cat nextcloud:test-shared-file)
+    '';
+  in ''
+    start_all()
+    nextcloud.wait_for_unit("multi-user.target")
+    nextcloud.succeed("curl -sSf http://nextcloud/login")
+    nextcloud.succeed(
+        "${withRcloneEnv} ${copySharedFile}"
+    )
+    client.wait_for_unit("multi-user.target")
+    client.succeed(
+        "${withRcloneEnv} ${diffSharedFile}"
+    )
+
+    # redis cache should not be empty
+    nextcloud.fail("redis-cli KEYS * | grep -q 'empty array'")
+  '';
+})
diff --git a/nixos/tests/nextcloud/with-postgresql-and-redis.nix b/nixos/tests/nextcloud/with-postgresql-and-redis.nix
index 36a69fda505..1ef848cfb12 100644
--- a/nixos/tests/nextcloud/with-postgresql-and-redis.nix
+++ b/nixos/tests/nextcloud/with-postgresql-and-redis.nix
@@ -37,9 +37,8 @@ in {
         };
       };
 
-      services.redis = {
-        enable = true;
-      };
+      services.redis.servers."nextcloud".enable = true;
+      services.redis.servers."nextcloud".port = 6379;
 
       systemd.services.nextcloud-setup= {
         requires = ["postgresql.service"];
diff --git a/nixos/tests/nginx-auth.nix b/nixos/tests/nginx-auth.nix
index c0d24a20ddb..a85426dda87 100644
--- a/nixos/tests/nginx-auth.nix
+++ b/nixos/tests/nginx-auth.nix
@@ -13,14 +13,14 @@ import ./make-test-python.nix ({ pkgs, ... }: {
 
         virtualHosts.lockedroot = {
           inherit root;
-          basicAuth.alice = "jane";
+          basicAuth.alice = "pwofa";
         };
 
         virtualHosts.lockedsubdir = {
           inherit root;
           locations."/sublocation/" = {
             alias = "${root}/";
-            basicAuth.bob = "john";
+            basicAuth.bob = "pwofb";
           };
         };
       };
@@ -33,7 +33,7 @@ import ./make-test-python.nix ({ pkgs, ... }: {
 
     webserver.fail("curl --fail --resolve lockedroot:80:127.0.0.1 http://lockedroot")
     webserver.succeed(
-        "curl --fail --resolve lockedroot:80:127.0.0.1 http://alice:jane@lockedroot"
+        "curl --fail --resolve lockedroot:80:127.0.0.1 http://alice:pwofa@lockedroot"
     )
 
     webserver.succeed("curl --fail --resolve lockedsubdir:80:127.0.0.1 http://lockedsubdir")
@@ -41,7 +41,7 @@ import ./make-test-python.nix ({ pkgs, ... }: {
         "curl --fail --resolve lockedsubdir:80:127.0.0.1 http://lockedsubdir/sublocation/index.html"
     )
     webserver.succeed(
-        "curl --fail --resolve lockedsubdir:80:127.0.0.1 http://bob:john@lockedsubdir/sublocation/index.html"
+        "curl --fail --resolve lockedsubdir:80:127.0.0.1 http://bob:pwofb@lockedsubdir/sublocation/index.html"
     )
   '';
 })
diff --git a/nixos/tests/nginx-etag.nix b/nixos/tests/nginx-etag.nix
index b69511d081d..6f45eacf8b4 100644
--- a/nixos/tests/nginx-etag.nix
+++ b/nixos/tests/nginx-etag.nix
@@ -53,14 +53,14 @@ import ./make-test-python.nix {
 
           driver.implicitly_wait(20)
           driver.get('http://server/')
-          driver.find_element_by_xpath('//div[@foo="bar"]')
+          driver.find_element('xpath', '//div[@foo="bar"]')
           open('/tmp/passed_stage1', 'w')
 
           while not os.path.exists('/tmp/proceed'):
               time.sleep(0.5)
 
           driver.get('http://server/')
-          driver.find_element_by_xpath('//div[@foo="yay"]')
+          driver.find_element('xpath', '//div[@foo="yay"]')
           open('/tmp/passed', 'w')
         '';
       in [ pkgs.firefox-unwrapped pkgs.geckodriver testRunner ];
diff --git a/nixos/tests/nginx-globalredirect.nix b/nixos/tests/nginx-globalredirect.nix
new file mode 100644
index 00000000000..5f5f4f344d8
--- /dev/null
+++ b/nixos/tests/nginx-globalredirect.nix
@@ -0,0 +1,24 @@
+import ./make-test-python.nix ({ pkgs, ... }: {
+  name = "nginx-globalredirect";
+
+  nodes = {
+    webserver = { pkgs, lib, ... }: {
+      services.nginx = {
+        enable = true;
+        virtualHosts.localhost = {
+          globalRedirect = "other.example.com";
+          # Add an exception
+          locations."/noredirect".return = "200 'foo'";
+        };
+      };
+    };
+  };
+
+  testScript = ''
+    webserver.wait_for_unit("nginx")
+    webserver.wait_for_open_port(80)
+
+    webserver.succeed("curl --fail -si http://localhost/alf | grep '^Location:.*/alf'")
+    webserver.fail("curl --fail -si http://localhost/noredirect | grep '^Location:'")
+  '';
+})
diff --git a/nixos/tests/nginx-http3.nix b/nixos/tests/nginx-http3.nix
index edd0759464c..319f6aac184 100644
--- a/nixos/tests/nginx-http3.nix
+++ b/nixos/tests/nginx-http3.nix
@@ -70,6 +70,9 @@ in
   testScript = ''
     start_all()
 
+    server.wait_for_unit("nginx")
+    server.wait_for_open_port(443)
+
     # Check http connections
     client.succeed("curl --verbose --http3 https://acme.test | grep 'Hello World!'")
 
diff --git a/nixos/tests/nginx-modsecurity.nix b/nixos/tests/nginx-modsecurity.nix
index 5ceee378729..3c41da3e8d9 100644
--- a/nixos/tests/nginx-modsecurity.nix
+++ b/nixos/tests/nginx-modsecurity.nix
@@ -4,7 +4,7 @@ import ./make-test-python.nix ({ pkgs, lib, ... }: {
   nodes.machine = { config, lib, pkgs, ... }: {
     services.nginx = {
       enable = true;
-      additionalModules = [ pkgs.nginxModules.modsecurity-nginx ];
+      additionalModules = [ pkgs.nginxModules.modsecurity ];
       virtualHosts.localhost =
         let modsecurity_conf = pkgs.writeText "modsecurity.conf" ''
           SecRuleEngine On
diff --git a/nixos/tests/nginx-njs.nix b/nixos/tests/nginx-njs.nix
new file mode 100644
index 00000000000..72be16384f1
--- /dev/null
+++ b/nixos/tests/nginx-njs.nix
@@ -0,0 +1,27 @@
+import ./make-test-python.nix ({ pkgs, lib, ... }: {
+  name = "nginx-njs";
+
+  nodes.machine = { config, lib, pkgs, ... }: {
+    services.nginx = {
+      enable = true;
+      additionalModules = [ pkgs.nginxModules.njs ];
+      commonHttpConfig = ''
+        js_import http from ${builtins.toFile "http.js" ''
+          function hello(r) {
+              r.return(200, "Hello world!");
+          }
+          export default {hello};
+        ''};
+      '';
+      virtualHosts."localhost".locations."/".extraConfig = ''
+        js_content http.hello;
+      '';
+    };
+  };
+  testScript = ''
+    machine.wait_for_unit("nginx")
+
+    response = machine.wait_until_succeeds("curl -fvvv -s http://127.0.0.1/")
+    assert "Hello world!" == response, f"Expected 'Hello world!', got '{response}'"
+  '';
+})
diff --git a/nixos/tests/nginx.nix b/nixos/tests/nginx.nix
index d9d073822a1..73f1133bd6c 100644
--- a/nixos/tests/nginx.nix
+++ b/nixos/tests/nginx.nix
@@ -61,7 +61,7 @@ import ./make-test-python.nix ({ pkgs, ... }: {
 
       specialisation.reloadWithErrorsSystem.configuration = {
         services.nginx.package = pkgs.nginxMainline;
-        services.nginx.virtualHosts."!@$$(#*%".locations."~@#*$*!)".proxyPass = ";;;";
+        services.nginx.virtualHosts."hello".extraConfig = "access_log /does/not/exist.log;";
       };
     };
   };
diff --git a/nixos/tests/nix-ld.nix b/nixos/tests/nix-ld.nix
index ae5297ab87e..8733f5b0c39 100644
--- a/nixos/tests/nix-ld.nix
+++ b/nixos/tests/nix-ld.nix
@@ -12,9 +12,6 @@ import ./make-test-python.nix ({ lib, pkgs, ...} :
   };
   testScript = ''
     start_all()
-    path = "${pkgs.stdenv.cc}/nix-support/dynamic-linker"
-    with open(path) as f:
-        real_ld = f.read().strip()
-    machine.succeed(f"NIX_LD={real_ld} hello")
+    machine.succeed("hello")
  '';
 })
diff --git a/nixos/tests/nixops/default.nix b/nixos/tests/nixops/default.nix
index 227b3881507..b77ac247639 100644
--- a/nixos/tests/nixops/default.nix
+++ b/nixos/tests/nixops/default.nix
@@ -19,6 +19,7 @@ let
   });
 
   testLegacyNetwork = { nixopsPkg }: pkgs.nixosTest ({
+    name = "nixops-legacy-network";
     nodes = {
       deployer = { config, lib, nodes, pkgs, ... }: {
         imports = [ ../../modules/installer/cd-dvd/channel.nix ];
diff --git a/nixos/tests/non-default-filesystems.nix b/nixos/tests/non-default-filesystems.nix
new file mode 100644
index 00000000000..7fa75aaad72
--- /dev/null
+++ b/nixos/tests/non-default-filesystems.nix
@@ -0,0 +1,54 @@
+import ./make-test-python.nix ({ lib, pkgs, ... }:
+{
+  name = "non-default-filesystems";
+
+  nodes.machine =
+    { config, pkgs, lib, ... }:
+    let
+      disk = config.virtualisation.bootDevice;
+    in
+    {
+      virtualisation.useDefaultFilesystems = false;
+
+      boot.initrd.availableKernelModules = [ "btrfs" ];
+      boot.supportedFilesystems = [ "btrfs" ];
+
+      boot.initrd.postDeviceCommands = ''
+        FSTYPE=$(blkid -o value -s TYPE ${disk} || true)
+        if test -z "$FSTYPE"; then
+          modprobe btrfs
+          ${pkgs.btrfs-progs}/bin/mkfs.btrfs ${disk}
+
+          mkdir /nixos
+          mount -t btrfs ${disk} /nixos
+
+          ${pkgs.btrfs-progs}/bin/btrfs subvolume create /nixos/root
+          ${pkgs.btrfs-progs}/bin/btrfs subvolume create /nixos/home
+
+          umount /nixos
+        fi
+      '';
+
+      virtualisation.fileSystems = {
+        "/" = {
+          device = disk;
+          fsType = "btrfs";
+          options = [ "subvol=/root" ];
+        };
+
+        "/home" = {
+          device = disk;
+          fsType = "btrfs";
+          options = [ "subvol=/home" ];
+        };
+      };
+    };
+
+  testScript = ''
+    machine.wait_for_unit("multi-user.target")
+
+    with subtest("BTRFS filesystems are mounted correctly"):
+      machine.succeed("grep -E '/dev/vda / btrfs rw,relatime,space_cache=v2,subvolid=[0-9]+,subvol=/root 0 0' /proc/mounts")
+      machine.succeed("grep -E '/dev/vda /home btrfs rw,relatime,space_cache=v2,subvolid=[0-9]+,subvol=/home 0 0' /proc/mounts")
+  '';
+})
diff --git a/nixos/tests/nscd.nix b/nixos/tests/nscd.nix
new file mode 100644
index 00000000000..1922812ef8c
--- /dev/null
+++ b/nixos/tests/nscd.nix
@@ -0,0 +1,141 @@
+import ./make-test-python.nix ({ pkgs, ... }:
+let
+  # build a getent that itself doesn't see anything in /etc/hosts and
+  # /etc/nsswitch.conf, by using libredirect to steer its own requests to
+  # /dev/null.
+  # This means is /has/ to go via nscd to actuallly resolve any of the
+  # additionally configured hosts.
+  getent' = pkgs.writeScript "getent-without-etc-hosts" ''
+    export NIX_REDIRECTS=/etc/hosts=/dev/null:/etc/nsswitch.conf=/dev/null
+    export LD_PRELOAD=${pkgs.libredirect}/lib/libredirect.so
+    exec getent $@
+  '';
+in
+{
+  name = "nscd";
+
+  nodes.machine = { pkgs, ... }: {
+    imports = [ common/user-account.nix ];
+    networking.extraHosts = ''
+      2001:db8::1 somehost.test
+      192.0.2.1 somehost.test
+    '';
+
+    systemd.services.sockdump = {
+      wantedBy = [ "multi-user.target" ];
+      path = [
+        # necessary for bcc to unpack kernel headers and invoke modprobe
+        pkgs.gnutar
+        pkgs.xz.bin
+        pkgs.kmod
+      ];
+      environment.PYTHONUNBUFFERED = "1";
+
+      serviceConfig = {
+        ExecStart = "${pkgs.sockdump}/bin/sockdump /var/run/nscd/socket";
+        Restart = "on-failure";
+        RestartSec = "1";
+        Type = "simple";
+      };
+    };
+
+    specialisation = {
+      withUnscd.configuration = { ... }: {
+        services.nscd.package = pkgs.unscd;
+      };
+      withNsncd.configuration = { ... }: {
+        services.nscd.enableNsncd = true;
+      };
+    };
+  };
+
+  testScript = { nodes, ... }:
+    let
+      specialisations = "${nodes.machine.system.build.toplevel}/specialisation";
+    in
+    ''
+      # Regression test for https://github.com/NixOS/nixpkgs/issues/50273
+      def test_dynamic_user():
+          with subtest("DynamicUser actually allocates a user"):
+              assert "iamatest" in machine.succeed(
+                  "systemd-run --pty --property=Type=oneshot --property=DynamicUser=yes --property=User=iamatest whoami"
+              )
+
+      # Test resolution of somehost.test with getent', to make sure we go via
+      # nscd protocol
+      def test_host_lookups():
+          with subtest("host lookups via nscd protocol"):
+              # ahosts
+              output = machine.succeed("${getent'} ahosts somehost.test")
+              assert "192.0.2.1" in output
+              assert "2001:db8::1" in output
+
+              # ahostsv4
+              output = machine.succeed("${getent'} ahostsv4 somehost.test")
+              assert "192.0.2.1" in output
+              assert "2001:db8::1" not in output
+
+              # ahostsv6
+              output = machine.succeed("${getent'} ahostsv6 somehost.test")
+              assert "192.0.2.1" not in output
+              assert "2001:db8::1" in output
+
+              # reverse lookups (hosts)
+              assert "somehost.test" in machine.succeed("${getent'} hosts 2001:db8::1")
+              assert "somehost.test" in machine.succeed("${getent'} hosts 192.0.2.1")
+
+
+      # Test host resolution via nss modules works
+      # We rely on nss-myhostname in this case, which resolves *.localhost and
+      # _gateway.
+      # We don't need to use getent' here, as non-glibc nss modules can only be
+      # discovered via nscd.
+      def test_nss_myhostname():
+          with subtest("nss-myhostname provides hostnames (ahosts)"):
+              # ahosts
+              output = machine.succeed("getent ahosts foobar.localhost")
+              assert "::1" in output
+              assert "127.0.0.1" in output
+
+              # ahostsv4
+              output = machine.succeed("getent ahostsv4 foobar.localhost")
+              assert "::1" not in output
+              assert "127.0.0.1" in output
+
+              # ahostsv6
+              output = machine.succeed("getent ahostsv6 foobar.localhost")
+              assert "::1" in output
+              assert "127.0.0.1" not in output
+
+      start_all()
+      machine.wait_for_unit("default.target")
+
+      # give sockdump some time to finish attaching.
+      machine.sleep(5)
+
+      # Test all tests with glibc-nscd.
+      test_dynamic_user()
+      test_host_lookups()
+      test_nss_myhostname()
+
+      with subtest("unscd"):
+          machine.succeed('${specialisations}/withUnscd/bin/switch-to-configuration test')
+          machine.wait_for_unit("default.target")
+
+          # known to fail, unscd doesn't load external NSS modules
+          # test_dynamic_user()
+
+          test_host_lookups()
+
+          # known to fail, unscd doesn't load external NSS modules
+          # test_nss_myhostname()
+
+      with subtest("nsncd"):
+          machine.succeed('${specialisations}/withNsncd/bin/switch-to-configuration test')
+          machine.wait_for_unit("default.target")
+
+          test_dynamic_user()
+          test_host_lookups()
+          test_nss_myhostname()
+    '';
+})
diff --git a/nixos/tests/ntfy-sh.nix b/nixos/tests/ntfy-sh.nix
new file mode 100644
index 00000000000..9a36fcdf392
--- /dev/null
+++ b/nixos/tests/ntfy-sh.nix
@@ -0,0 +1,21 @@
+import ./make-test-python.nix {
+  name = "ntfy-sh";
+
+  nodes.machine = { ... }: {
+    services.ntfy-sh.enable = true;
+  };
+
+  testScript = ''
+    import json
+
+    msg = "Test notification"
+
+    machine.wait_for_unit("multi-user.target")
+
+    machine.succeed(f"curl -d '{msg}' localhost:80/test")
+
+    notif = json.loads(machine.succeed("curl -s localhost:80/test/json?poll=1"))
+
+    assert msg == notif["message"], "Wrong message"
+  '';
+}
diff --git a/nixos/tests/oci-containers.nix b/nixos/tests/oci-containers.nix
index 68077e3540a..1bcfb276dbe 100644
--- a/nixos/tests/oci-containers.nix
+++ b/nixos/tests/oci-containers.nix
@@ -12,7 +12,7 @@ let
     name = "oci-containers-${backend}";
 
     meta = {
-      maintainers = with lib.maintainers; [ adisbladis benley ] ++ lib.teams.serokell.members;
+      maintainers = with lib.maintainers; [ adisbladis benley mkaito ] ++ lib.teams.serokell.members;
     };
 
     nodes = {
diff --git a/nixos/tests/openldap.nix b/nixos/tests/openldap.nix
index 3c388119d5d..075bb5d1f64 100644
--- a/nixos/tests/openldap.nix
+++ b/nixos/tests/openldap.nix
@@ -1,9 +1,4 @@
-{ pkgs ? (import ../.. { inherit system; config = { }; })
-, system ? builtins.currentSystem
-, ...
-}:
-
-let
+import ./make-test-python.nix ({ pkgs, ... }: let
   dbContents = ''
     dn: dc=example
     objectClass: domain
@@ -13,118 +8,149 @@ let
     objectClass: organizationalUnit
     ou: users
   '';
-  testScript = ''
-    machine.wait_for_unit("openldap.service")
-    machine.succeed(
-        'ldapsearch -LLL -D "cn=root,dc=example" -w notapassword -b "dc=example"',
-    )
+
+  ldifConfig = ''
+    dn: cn=config
+    cn: config
+    objectClass: olcGlobal
+    olcLogLevel: stats
+
+    dn: cn=schema,cn=config
+    cn: schema
+    objectClass: olcSchemaConfig
+
+    include: file://${pkgs.openldap}/etc/schema/core.ldif
+    include: file://${pkgs.openldap}/etc/schema/cosine.ldif
+    include: file://${pkgs.openldap}/etc/schema/inetorgperson.ldif
+
+    dn: olcDatabase={0}config,cn=config
+    olcDatabase: {0}config
+    objectClass: olcDatabaseConfig
+    olcRootDN: cn=root,cn=config
+    olcRootPW: configpassword
+
+    dn: olcDatabase={1}mdb,cn=config
+    objectClass: olcDatabaseConfig
+    objectClass: olcMdbConfig
+    olcDatabase: {1}mdb
+    olcDbDirectory: /var/db/openldap
+    olcDbIndex: objectClass eq
+    olcSuffix: dc=example
+    olcRootDN: cn=root,dc=example
+    olcRootPW: notapassword
   '';
+
+  ldapClientConfig = {
+    enable = true;
+    loginPam = false;
+    nsswitch = false;
+    server = "ldap://";
+    base = "dc=example";
+  };
+
 in {
-  # New-style configuration
-  current = import ./make-test-python.nix ({ pkgs, ... }: {
-    inherit testScript;
-    name = "openldap";
+  name = "openldap";
+
+  nodes.machine = { pkgs, ... }: {
+    environment.etc."openldap/root_password".text = "notapassword";
 
-    nodes.machine = { pkgs, ... }: {
-      environment.etc."openldap/root_password".text = "notapassword";
-      services.openldap = {
-        enable = true;
-        settings = {
-          children = {
-            "cn=schema".includes = [
-              "${pkgs.openldap}/etc/schema/core.ldif"
-              "${pkgs.openldap}/etc/schema/cosine.ldif"
-              "${pkgs.openldap}/etc/schema/inetorgperson.ldif"
-              "${pkgs.openldap}/etc/schema/nis.ldif"
-            ];
-            "olcDatabase={1}mdb" = {
-              # This tests string, base64 and path values, as well as lists of string values
-              attrs = {
-                objectClass = [ "olcDatabaseConfig" "olcMdbConfig" ];
-                olcDatabase = "{1}mdb";
-                olcDbDirectory = "/var/db/openldap";
-                olcSuffix = "dc=example";
-                olcRootDN = {
-                  # cn=root,dc=example
-                  base64 = "Y249cm9vdCxkYz1leGFtcGxl";
-                };
-                olcRootPW = {
-                  path = "/etc/openldap/root_password";
-                };
+    users.ldap = ldapClientConfig;
+
+    services.openldap = {
+      enable = true;
+      urlList = [ "ldapi:///" "ldap://" ];
+      settings = {
+        children = {
+          "cn=schema".includes = [
+            "${pkgs.openldap}/etc/schema/core.ldif"
+            "${pkgs.openldap}/etc/schema/cosine.ldif"
+            "${pkgs.openldap}/etc/schema/inetorgperson.ldif"
+            "${pkgs.openldap}/etc/schema/nis.ldif"
+          ];
+          "olcDatabase={0}config" = {
+            attrs = {
+              objectClass = [ "olcDatabaseConfig" ];
+              olcDatabase = "{0}config";
+              olcRootDN = "cn=root,cn=config";
+              olcRootPW = "configpassword";
+            };
+          };
+          "olcDatabase={1}mdb" = {
+            # This tests string, base64 and path values, as well as lists of string values
+            attrs = {
+              objectClass = [ "olcDatabaseConfig" "olcMdbConfig" ];
+              olcDatabase = "{1}mdb";
+              olcDbDirectory = "/var/lib/openldap/db";
+              olcSuffix = "dc=example";
+              olcRootDN = {
+                # cn=root,dc=example
+                base64 = "Y249cm9vdCxkYz1leGFtcGxl";
+              };
+              olcRootPW = {
+                path = "/etc/openldap/root_password";
               };
             };
           };
         };
-        declarativeContents."dc=example" = dbContents;
       };
     };
-  }) { inherit pkgs system; };
-
-  # Old-style configuration
-  oldOptions = import ./make-test-python.nix ({ pkgs, ... }: {
-    inherit testScript;
-    name = "openldap";
 
-    nodes.machine = { pkgs, ... }: {
-      services.openldap = {
-        enable = true;
-        logLevel = "stats acl";
-        defaultSchemas = true;
-        database = "mdb";
-        suffix = "dc=example";
-        rootdn = "cn=root,dc=example";
-        rootpw = "notapassword";
-        declarativeContents."dc=example" = dbContents;
+    specialisation = {
+      declarativeContents.configuration = { ... }: {
+        services.openldap.declarativeContents."dc=example" = dbContents;
       };
-    };
-  }) { inherit system pkgs; };
-
-  # Manually managed configDir, for example if dynamic config is essential
-  manualConfigDir = import ./make-test-python.nix ({ pkgs, ... }: {
-    name = "openldap";
-
-    nodes.machine = { pkgs, ... }: {
-      services.openldap = {
-        enable = true;
-        configDir = "/var/db/slapd.d";
+      mutableConfig.configuration = { ... }: {
+        services.openldap = {
+          declarativeContents."dc=example" = dbContents;
+          mutableConfig = true;
+        };
+      };
+      manualConfigDir = {
+        inheritParentConfig = false;
+        configuration = { ... }: {
+          users.ldap = ldapClientConfig;
+          services.openldap = {
+            enable = true;
+            configDir = "/var/db/slapd.d";
+          };
+        };
       };
     };
+  };
+  testScript = { nodes, ... }: let
+    specializations = "${nodes.machine.config.system.build.toplevel}/specialisation";
+    changeRootPw = ''
+      dn: olcDatabase={1}mdb,cn=config
+      changetype: modify
+      replace: olcRootPW
+      olcRootPW: foobar
+    '';
+  in ''
+    # Test startup with empty DB
+    machine.wait_for_unit("openldap.service")
 
-    testScript = let
-      contents = pkgs.writeText "data.ldif" dbContents;
-      config = pkgs.writeText "config.ldif" ''
-        dn: cn=config
-        cn: config
-        objectClass: olcGlobal
-        olcLogLevel: stats
-        olcPidFile: /run/slapd/slapd.pid
-
-        dn: cn=schema,cn=config
-        cn: schema
-        objectClass: olcSchemaConfig
+    with subtest("declarative contents"):
+      machine.succeed('${specializations}/declarativeContents/bin/switch-to-configuration test')
+      machine.wait_for_unit("openldap.service")
+      machine.succeed('ldapsearch -LLL -D "cn=root,dc=example" -w notapassword')
+      machine.fail('ldapmodify -D cn=root,cn=config -w configpassword -f ${pkgs.writeText "rootpw.ldif" changeRootPw}')
 
-        include: file://${pkgs.openldap}/etc/schema/core.ldif
-        include: file://${pkgs.openldap}/etc/schema/cosine.ldif
-        include: file://${pkgs.openldap}/etc/schema/inetorgperson.ldif
+    with subtest("mutable config"):
+      machine.succeed('${specializations}/mutableConfig/bin/switch-to-configuration test')
+      machine.succeed('ldapsearch -LLL -D "cn=root,dc=example" -w notapassword')
+      machine.succeed('ldapmodify -D cn=root,cn=config -w configpassword -f ${pkgs.writeText "rootpw.ldif" changeRootPw}')
+      machine.succeed('ldapsearch -LLL -D "cn=root,dc=example" -w foobar')
 
-        dn: olcDatabase={1}mdb,cn=config
-        objectClass: olcDatabaseConfig
-        objectClass: olcMdbConfig
-        olcDatabase: {1}mdb
-        olcDbDirectory: /var/db/openldap
-        olcDbIndex: objectClass eq
-        olcSuffix: dc=example
-        olcRootDN: cn=root,dc=example
-        olcRootPW: notapassword
-      '';
-    in ''
+    with subtest("manual config dir"):
       machine.succeed(
-          "mkdir -p /var/db/slapd.d /var/db/openldap",
-          "slapadd -F /var/db/slapd.d -n0 -l ${config}",
-          "slapadd -F /var/db/slapd.d -n1 -l ${contents}",
-          "chown -R openldap:openldap /var/db/slapd.d /var/db/openldap",
-          "systemctl restart openldap",
+        'mkdir /var/db/slapd.d /var/db/openldap',
+        'slapadd -F /var/db/slapd.d -n0 -l ${pkgs.writeText "config.ldif" ldifConfig}',
+        'slapadd -F /var/db/slapd.d -n1 -l ${pkgs.writeText "contents.ldif" dbContents}',
+        'chown -R openldap:openldap /var/db/slapd.d /var/db/openldap',
+        '${specializations}/manualConfigDir/bin/switch-to-configuration test',
       )
-    '' + testScript;
-  }) { inherit system pkgs; };
-}
+      machine.succeed('ldapsearch -LLL -D "cn=root,dc=example" -w notapassword')
+      machine.succeed('ldapmodify -D cn=root,cn=config -w configpassword -f ${pkgs.writeText "rootpw.ldif" changeRootPw}')
+      machine.succeed('ldapsearch -LLL -D "cn=root,dc=example" -w foobar')
+  '';
+})
diff --git a/nixos/tests/pam/pam-file-contents.nix b/nixos/tests/pam/pam-file-contents.nix
index 86c61003aeb..2bafd90618e 100644
--- a/nixos/tests/pam/pam-file-contents.nix
+++ b/nixos/tests/pam/pam-file-contents.nix
@@ -2,6 +2,7 @@ let
   name = "pam";
 in
 import ../make-test-python.nix ({ pkgs, ... }: {
+  name = "pam-file-contents";
 
   nodes.machine = { ... }: {
     imports = [ ../../modules/profiles/minimal.nix ];
diff --git a/nixos/tests/pam/pam-u2f.nix b/nixos/tests/pam/pam-u2f.nix
index d7c540982cf..07408dea797 100644
--- a/nixos/tests/pam/pam-u2f.nix
+++ b/nixos/tests/pam/pam-u2f.nix
@@ -12,6 +12,7 @@ import ../make-test-python.nix ({ ... }:
         debug = true;
         enable = true;
         interactive = true;
+        origin = "nixos-test";
       };
     };
 
@@ -19,7 +20,7 @@ import ../make-test-python.nix ({ ... }:
     ''
       machine.wait_for_unit("multi-user.target")
       machine.succeed(
-          'egrep "auth required .*/lib/security/pam_u2f.so.*debug.*interactive.*cue" /etc/pam.d/ -R'
+          'egrep "auth required .*/lib/security/pam_u2f.so.*debug.*interactive.*cue.*origin=nixos-test" /etc/pam.d/ -R'
       )
     '';
 })
diff --git a/nixos/tests/paperless.nix b/nixos/tests/paperless.nix
index 12883cd62c6..b97834835c2 100644
--- a/nixos/tests/paperless.nix
+++ b/nixos/tests/paperless.nix
@@ -40,5 +40,13 @@ import ./make-test-python.nix ({ lib, ... }: {
         docs = json.loads(machine.succeed("curl -u admin:admin -fs localhost:28981/api/documents/"))['results']
         assert "2005-10-16" in docs[0]['created']
         assert "2005-10-16" in docs[1]['created']
+
+    # Detects gunicorn issues, see PR #190888
+    with subtest("Document metadata can be accessed"):
+        metadata = json.loads(machine.succeed("curl -u admin:admin -fs localhost:28981/api/documents/1/metadata/"))
+        assert "original_checksum" in metadata
+
+        metadata = json.loads(machine.succeed("curl -u admin:admin -fs localhost:28981/api/documents/2/metadata/"))
+        assert "original_checksum" in metadata
   '';
 })
diff --git a/nixos/tests/pass-secret-service.nix b/nixos/tests/pass-secret-service.nix
new file mode 100644
index 00000000000..a85a508bfe1
--- /dev/null
+++ b/nixos/tests/pass-secret-service.nix
@@ -0,0 +1,69 @@
+import ./make-test-python.nix ({ pkgs, lib, ... }: {
+  name = "pass-secret-service";
+  meta.maintainers = with lib; [ aidalgol ];
+
+  nodes.machine = { nodes, pkgs, ... }:
+    {
+      imports = [ ./common/user-account.nix ];
+
+      services.passSecretService.enable = true;
+
+      environment.systemPackages = [
+        # Create a script that tries to make a request to the D-Bus secrets API.
+        (pkgs.writers.writePython3Bin "secrets-dbus-init"
+          {
+            libraries = [ pkgs.python3Packages.secretstorage ];
+          } ''
+          import secretstorage
+          print("Initializing dbus connection...")
+          connection = secretstorage.dbus_init()
+          print("Requesting default collection...")
+          collection = secretstorage.get_default_collection(connection)
+          print("Done!  dbus-org.freedesktop.secrets should now be active.")
+        '')
+        pkgs.pass
+      ];
+
+      programs.gnupg = {
+        agent.enable = true;
+        agent.pinentryFlavor = "tty";
+        dirmngr.enable = true;
+      };
+    };
+
+  # Some of the commands are run via a virtual console because they need to be
+  # run under a real login session, with D-Bus running in the environment.
+  testScript = { nodes, ... }:
+    let
+      user = nodes.machine.config.users.users.alice;
+      gpg-uid = "alice@example.net";
+      gpg-pw = "foobar9000";
+      ready-file = "/tmp/secrets-dbus-init.done";
+    in
+    ''
+      # Initialise the pass(1) storage.
+      machine.succeed("""
+        sudo -u alice gpg --pinentry-mode loopback --batch --passphrase ${gpg-pw} \
+        --quick-gen-key ${gpg-uid} \
+      """)
+      machine.succeed("sudo -u alice pass init ${gpg-uid}")
+
+      with subtest("Service is not running on login"):
+          machine.wait_until_tty_matches("1", "login: ")
+          machine.send_chars("alice\n")
+          machine.wait_until_tty_matches("1", "login: alice")
+          machine.wait_until_succeeds("pgrep login")
+          machine.wait_until_tty_matches("1", "Password: ")
+          machine.send_chars("${user.password}\n")
+          machine.wait_until_succeeds("pgrep -u alice bash")
+
+          _, output = machine.systemctl("status dbus-org.freedesktop.secrets --no-pager", "alice")
+          assert "Active: inactive (dead)" in output
+
+      with subtest("Service starts after a client tries to talk to the D-Bus API"):
+          machine.send_chars("secrets-dbus-init; touch ${ready-file}\n")
+          machine.wait_for_file("${ready-file}")
+          _, output = machine.systemctl("status dbus-org.freedesktop.secrets --no-pager", "alice")
+          assert "Active: active (running)" in output
+    '';
+})
diff --git a/nixos/tests/patroni.nix b/nixos/tests/patroni.nix
new file mode 100644
index 00000000000..1f15cd59677
--- /dev/null
+++ b/nixos/tests/patroni.nix
@@ -0,0 +1,206 @@
+import ./make-test-python.nix ({ pkgs, lib, ... }:
+
+  let
+    nodesIps = [
+      "192.168.1.1"
+      "192.168.1.2"
+      "192.168.1.3"
+    ];
+
+    createNode = index: { pkgs, ... }:
+      let
+        ip = builtins.elemAt nodesIps index; # since we already use IPs to identify servers
+      in
+      {
+        networking.interfaces.eth1.ipv4.addresses = pkgs.lib.mkOverride 0 [
+          { address = ip; prefixLength = 16; }
+        ];
+
+        networking.firewall.allowedTCPPorts = [ 5432 8008 5010 ];
+
+        environment.systemPackages = [ pkgs.jq ];
+
+        services.patroni = {
+
+          enable = true;
+
+          postgresqlPackage = pkgs.postgresql_14.withPackages (p: [ p.pg_safeupdate ]);
+
+          scope = "cluster1";
+          name = "node${toString(index + 1)}";
+          nodeIp = ip;
+          otherNodesIps = builtins.filter (h: h != ip) nodesIps;
+          softwareWatchdog = true;
+
+          settings = {
+            bootstrap = {
+              dcs = {
+                ttl = 30;
+                loop_wait = 10;
+                retry_timeout = 10;
+                maximum_lag_on_failover = 1048576;
+              };
+              initdb = [
+                { encoding = "UTF8"; }
+                "data-checksums"
+              ];
+            };
+
+            postgresql = {
+              use_pg_rewind = true;
+              use_slots = true;
+              authentication = {
+                replication = {
+                  username = "replicator";
+                };
+                superuser = {
+                  username = "postgres";
+                };
+                rewind = {
+                  username = "rewind";
+                };
+              };
+              parameters = {
+                listen_addresses = "${ip}";
+                wal_level = "replica";
+                hot_standby_feedback = "on";
+                unix_socket_directories = "/tmp";
+              };
+              pg_hba = [
+                "host replication replicator 192.168.1.0/24 md5"
+                # Unsafe, do not use for anything other than tests
+                "host all all 0.0.0.0/0 trust"
+              ];
+            };
+
+            etcd3 = {
+              host = "192.168.1.4:2379";
+            };
+          };
+
+          environmentFiles = {
+            PATRONI_REPLICATION_PASSWORD = pkgs.writeText "replication-password" "postgres";
+            PATRONI_SUPERUSER_PASSWORD = pkgs.writeText "superuser-password" "postgres";
+            PATRONI_REWIND_PASSWORD = pkgs.writeText "rewind-password" "postgres";
+          };
+        };
+
+        # We always want to restart so the tests never hang
+        systemd.services.patroni.serviceConfig.StartLimitIntervalSec = 0;
+      };
+  in
+  {
+    name = "patroni";
+
+    nodes = {
+      node1 = createNode 0;
+      node2 = createNode 1;
+      node3 = createNode 2;
+
+      etcd = { pkgs, ... }: {
+
+        networking.interfaces.eth1.ipv4.addresses = pkgs.lib.mkOverride 0 [
+          { address = "192.168.1.4"; prefixLength = 16; }
+        ];
+
+        services.etcd = {
+          enable = true;
+          listenClientUrls = [ "http://192.168.1.4:2379" ];
+        };
+
+        networking.firewall.allowedTCPPorts = [ 2379 ];
+      };
+
+      client = { pkgs, ... }: {
+        environment.systemPackages = [ pkgs.postgresql_14 ];
+
+        networking.interfaces.eth1.ipv4.addresses = pkgs.lib.mkOverride 0 [
+          { address = "192.168.2.1"; prefixLength = 16; }
+        ];
+
+        services.haproxy = {
+          enable = true;
+          config = ''
+            global
+                maxconn 100
+
+            defaults
+                log global
+                mode tcp
+                retries 2
+                timeout client 30m
+                timeout connect 4s
+                timeout server 30m
+                timeout check 5s
+
+            listen cluster1
+                bind 127.0.0.1:5432
+                option httpchk
+                http-check expect status 200
+                default-server inter 3s fall 3 rise 2 on-marked-down shutdown-sessions
+                ${builtins.concatStringsSep "\n" (map (ip: "server postgresql_${ip}_5432 ${ip}:5432 maxconn 100 check port 8008") nodesIps)}
+          '';
+        };
+      };
+    };
+
+
+
+    testScript = ''
+      nodes = [node1, node2, node3]
+
+      def wait_for_all_nodes_ready(expected_replicas=2):
+          booted_nodes = filter(lambda node: node.booted, nodes)
+          for node in booted_nodes:
+              print(node.succeed("patronictl list cluster1"))
+              node.wait_until_succeeds(f"[ $(patronictl list -f json cluster1 | jq 'length') == {expected_replicas + 1} ]")
+              node.wait_until_succeeds("[ $(patronictl list -f json cluster1 | jq 'map(select(.Role | test(\"^Leader$\"))) | map(select(.State | test(\"^running$\"))) | length') == 1 ]")
+              node.wait_until_succeeds(f"[ $(patronictl list -f json cluster1 | jq 'map(select(.Role | test(\"^Replica$\"))) | map(select(.State | test(\"^running$\"))) | length') == {expected_replicas} ]")
+              print(node.succeed("patronictl list cluster1"))
+          client.wait_until_succeeds("psql -h 127.0.0.1 -U postgres --command='select 1;'")
+
+      def run_dummy_queries():
+          client.succeed("psql -h 127.0.0.1 -U postgres --pset='pager=off' --tuples-only --command='insert into dummy(val) values (101);'")
+          client.succeed("test $(psql -h 127.0.0.1 -U postgres --pset='pager=off' --tuples-only --command='select val from dummy where val = 101;') -eq 101")
+          client.succeed("psql -h 127.0.0.1 -U postgres --pset='pager=off' --tuples-only --command='delete from dummy where val = 101;'")
+
+      start_all()
+
+      etcd.wait_for_unit("etcd.service")
+
+      with subtest("should bootstrap a new patroni cluster"):
+          wait_for_all_nodes_ready()
+
+      with subtest("should be able to insert and select"):
+          client.succeed("psql -h 127.0.0.1 -U postgres --command='create table dummy as select * from generate_series(1, 100) as val;'")
+          client.succeed("test $(psql -h 127.0.0.1 -U postgres --pset='pager=off' --tuples-only --command='select count(distinct val) from dummy;') -eq 100")
+
+      with subtest("should restart after all nodes are crashed"):
+          for node in nodes:
+              node.crash()
+          for node in nodes:
+              node.start()
+          wait_for_all_nodes_ready()
+
+      with subtest("should be able to run queries while any one node is crashed"):
+          masterNodeName = node1.succeed("patronictl list -f json cluster1 | jq '.[] | select(.Role | test(\"^Leader$\")) | .Member' -r").strip()
+          masterNodeIndex = int(masterNodeName[len(masterNodeName)-1]) - 1
+
+          # Move master node at the end of the list to avoid multiple failovers (makes the test faster and more consistent)
+          nodes.append(nodes.pop(masterNodeIndex))
+
+          for node in nodes:
+              node.crash()
+              wait_for_all_nodes_ready(1)
+
+              # Execute some queries while a node is down.
+              run_dummy_queries()
+
+              # Restart crashed node.
+              node.start()
+              wait_for_all_nodes_ready()
+
+              # Execute some queries with the node back up.
+              run_dummy_queries()
+    '';
+  })
diff --git a/nixos/tests/pgadmin4-standalone.nix b/nixos/tests/pgadmin4-standalone.nix
index 442570c5306..5aa17fcb5bb 100644
--- a/nixos/tests/pgadmin4-standalone.nix
+++ b/nixos/tests/pgadmin4-standalone.nix
@@ -1,5 +1,5 @@
 import ./make-test-python.nix ({ pkgs, lib, ... }:
-  # This is seperate from pgadmin4 since we don't want both running at once
+  # This is separate from pgadmin4 since we don't want both running at once
 
   {
     name = "pgadmin4-standalone";
diff --git a/nixos/tests/pgadmin4.nix b/nixos/tests/pgadmin4.nix
index b30299d307e..2a2b5aaa284 100644
--- a/nixos/tests/pgadmin4.nix
+++ b/nixos/tests/pgadmin4.nix
@@ -106,12 +106,13 @@ import ./make-test-python.nix ({ pkgs, lib, buildDeps ? [ ], pythonEnv ? [ ], ..
            && sed -i 's|driver_local.maximize_window()||' web/regression/runtests.py"
       )
 
-      # don't bother to test LDAP authentification
+      # Don't bother to test LDAP or kerberos authentication
       with subtest("run browser test"):
           machine.succeed(
                'cd ${pgadmin4SrcDir}/pgadmin4-${pkgs.pgadmin4.version}/web \
-               && python regression/runtests.py --pkg browser --exclude \
-               browser.tests.test_ldap_login.LDAPLoginTestCase,browser.tests.test_ldap_login'
+               && python regression/runtests.py \
+               --pkg browser \
+               --exclude browser.tests.test_ldap_login.LDAPLoginTestCase,browser.tests.test_ldap_login,browser.tests.test_kerberos_with_mocking'
           )
 
       # fontconfig is necessary for chromium to run
@@ -124,9 +125,9 @@ import ./make-test-python.nix ({ pkgs, lib, buildDeps ? [ ], pythonEnv ? [ ], ..
           )
 
       with subtest("run resql test"):
-          machine.succeed(
-               'cd ${pgadmin4SrcDir}/pgadmin4-${pkgs.pgadmin4.version}/web \
-               && python regression/runtests.py --pkg resql'
-          )
+         machine.succeed(
+              'cd ${pgadmin4SrcDir}/pgadmin4-${pkgs.pgadmin4.version}/web \
+              && python regression/runtests.py --pkg resql'
+         )
     '';
   })
diff --git a/nixos/tests/phosh.nix b/nixos/tests/phosh.nix
new file mode 100644
index 00000000000..25bf4848542
--- /dev/null
+++ b/nixos/tests/phosh.nix
@@ -0,0 +1,70 @@
+import ./make-test-python.nix ({ pkgs, ...}: let
+  pin = "1234";
+in {
+  name = "phosh";
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ zhaofengli ];
+  };
+
+  nodes = {
+    phone = { config, pkgs, ... }: {
+      users.users.nixos = {
+        isNormalUser = true;
+        password = pin;
+      };
+
+      services.xserver.desktopManager.phosh = {
+        enable = true;
+        user = "nixos";
+        group = "users";
+
+        phocConfig = {
+          outputs.Virtual-1 = {
+            scale = 2;
+          };
+        };
+      };
+
+      systemd.services.phosh = {
+        environment = {
+          # Accelerated graphics fail on phoc 0.20 (wlroots 0.15)
+          "WLR_RENDERER" = "pixman";
+        };
+      };
+
+      virtualisation.resolution = { x = 720; y = 1440; };
+      virtualisation.qemu.options = [ "-vga none -device virtio-gpu-pci,xres=720,yres=1440" ];
+    };
+  };
+
+  enableOCR = true;
+
+  testScript = ''
+    import time
+
+    start_all()
+    phone.wait_for_unit("phosh.service")
+
+    with subtest("Check that we can see the lock screen info page"):
+        # Saturday, January 1
+        phone.succeed("timedatectl set-time '2022-01-01 07:00'")
+
+        phone.wait_for_text("Saturday")
+        phone.screenshot("01lockinfo")
+
+    with subtest("Check that we can unlock the screen"):
+        phone.send_chars("${pin}", delay=0.2)
+        time.sleep(1)
+        phone.screenshot("02unlock")
+
+        phone.send_chars("\n")
+
+        phone.wait_for_text("All Apps")
+        phone.screenshot("03launcher")
+
+    with subtest("Check the on-screen keyboard shows"):
+        phone.send_chars("setting", delay=0.2)
+        phone.wait_for_text("123") # A button on the OSK
+        phone.screenshot("04osk")
+  '';
+})
diff --git a/nixos/tests/php/pcre.nix b/nixos/tests/php/pcre.nix
index 57407477f4b..8e37d5dcf97 100644
--- a/nixos/tests/php/pcre.nix
+++ b/nixos/tests/php/pcre.nix
@@ -1,7 +1,7 @@
 let
   testString = "can-use-subgroups";
 in
-import ../make-test-python.nix ({ lib, php, ... }: {
+import ../make-test-python.nix ({ pkgs, lib, php, ... }: {
   name = "php-${php.version}-httpd-pcre-jit-test";
   meta.maintainers = lib.teams.php.members;
 
@@ -31,12 +31,22 @@ import ../make-test-python.nix ({ lib, php, ... }: {
         '';
     };
   };
-  testScript = { ... }:
-    ''
+  testScript = let
+    # PCRE JIT SEAlloc feature does not play well with fork()
+    # The feature needs to either be disabled or PHP configured correctly
+    # More information in https://bugs.php.net/bug.php?id=78927 and https://bugs.php.net/bug.php?id=78630
+    pcreJitSeallocForkIssue = pkgs.writeText "pcre-jit-sealloc-issue.php" ''
+      <?php
+      preg_match('/nixos/', 'nixos');
+      $pid = pcntl_fork();
+      pcntl_wait($pid);
+    '';
+  in ''
       machine.wait_for_unit("httpd.service")
       # Ensure php evaluation by matching on the var_dump syntax
       response = machine.succeed("curl -fvvv -s http://127.0.0.1:80/index.php")
       expected = 'string(${toString (builtins.stringLength testString)}) "${testString}"'
       assert expected in response, "Does not appear to be able to use subgroups."
+      machine.succeed("${php}/bin/php -f ${pcreJitSeallocForkIssue}")
     '';
 })
diff --git a/nixos/tests/pinnwand.nix b/nixos/tests/pinnwand.nix
index 0391c413311..42b26e08c18 100644
--- a/nixos/tests/pinnwand.nix
+++ b/nixos/tests/pinnwand.nix
@@ -1,27 +1,7 @@
 import ./make-test-python.nix ({ pkgs, ...}:
 let
-  pythonEnv = pkgs.python3.withPackages (py: with py; [ appdirs toml ]);
-
   port = 8000;
   baseUrl = "http://server:${toString port}";
-
-  configureSteck = pkgs.writeScript "configure.py" ''
-    #!${pythonEnv.interpreter}
-    import appdirs
-    import toml
-    import os
-
-    CONFIG = {
-      "base": "${baseUrl}/",
-      "confirm": False,
-      "magic": True,
-      "ignore": True
-    }
-
-    os.makedirs(appdirs.user_config_dir('steck'))
-    with open(os.path.join(appdirs.user_config_dir('steck'), 'steck.toml'), "w") as fd:
-        toml.dump(CONFIG, fd)
-    '';
 in
 {
   name = "pinnwand";
@@ -44,7 +24,32 @@ in
 
     client = { pkgs, ... }:
     {
-      environment.systemPackages = [ pkgs.steck ];
+      environment.systemPackages = [
+        pkgs.steck
+
+        (pkgs.writers.writePython3Bin "setup-steck.py" {
+          libraries = with pkgs.python3.pkgs; [ appdirs toml ];
+          flakeIgnore = [
+            "E501"
+          ];
+        }
+        ''
+          import appdirs
+          import toml
+          import os
+
+          CONFIG = {
+              "base": "${baseUrl}/",
+              "confirm": False,
+              "magic": True,
+              "ignore": True
+          }
+
+          os.makedirs(appdirs.user_config_dir('steck'))
+          with open(os.path.join(appdirs.user_config_dir('steck'), 'steck.toml'), "w") as fd:
+              toml.dump(CONFIG, fd)
+        '')
+      ];
     };
   };
 
@@ -55,7 +60,7 @@ in
     client.wait_for_unit("network.target")
 
     # create steck.toml config file
-    client.succeed("${configureSteck}")
+    client.succeed("setup-steck.py")
 
     # wait until the server running pinnwand is reachable
     client.wait_until_succeeds("ping -c1 server")
@@ -75,12 +80,6 @@ in
         if line.startswith("Removal link:"):
             removal_link = line.split(":", 1)[1]
 
-
-    # start the reaper, it shouldn't do anything meaningful here
-    server.systemctl("start pinnwand-reaper.service")
-    server.wait_until_fails("systemctl is-active -q pinnwand-reaper.service")
-    server.log(server.execute("journalctl -u pinnwand-reaper -e --no-pager")[1])
-
     # check whether paste matches what we sent
     client.succeed(f"curl {raw_url} > /tmp/machine-id")
     client.succeed("diff /tmp/machine-id /etc/machine-id")
@@ -89,6 +88,6 @@ in
     client.succeed(f"curl {removal_link}")
     client.fail(f"curl --fail {raw_url}")
 
-    server.log(server.succeed("systemd-analyze security pinnwand"))
+    server.log(server.execute("systemd-analyze security pinnwand | grep '✗'")[1])
   '';
 })
diff --git a/nixos/tests/plasma-bigscreen.nix b/nixos/tests/plasma-bigscreen.nix
new file mode 100644
index 00000000000..1c61cafcbff
--- /dev/null
+++ b/nixos/tests/plasma-bigscreen.nix
@@ -0,0 +1,38 @@
+import ./make-test-python.nix ({ pkgs, ...} :
+
+{
+  name = "plasma-bigscreen";
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ ttuegel k900 ];
+  };
+
+  nodes.machine = { ... }:
+
+  {
+    imports = [ ./common/user-account.nix ];
+    services.xserver.enable = true;
+    services.xserver.displayManager.sddm.enable = true;
+    services.xserver.displayManager.defaultSession = "plasma-bigscreen-x11";
+    services.xserver.desktopManager.plasma5.bigscreen.enable = true;
+    services.xserver.displayManager.autoLogin = {
+      enable = true;
+      user = "alice";
+    };
+
+    users.users.alice.extraGroups = ["uinput"];
+  };
+
+  testScript = { nodes, ... }: let
+    user = nodes.machine.users.users.alice;
+    xdo = "${pkgs.xdotool}/bin/xdotool";
+  in ''
+    with subtest("Wait for login"):
+        start_all()
+        machine.wait_for_file("${user.home}/.Xauthority")
+        machine.succeed("xauth merge ${user.home}/.Xauthority")
+
+    with subtest("Check plasmashell started"):
+        machine.wait_until_succeeds("pgrep plasmashell")
+        machine.wait_for_window("Plasma Big Screen")
+  '';
+})
diff --git a/nixos/tests/plasma5.nix b/nixos/tests/plasma5.nix
index 3358a72570e..b3836cf641d 100644
--- a/nixos/tests/plasma5.nix
+++ b/nixos/tests/plasma5.nix
@@ -13,7 +13,10 @@ import ./make-test-python.nix ({ pkgs, ...} :
     services.xserver.enable = true;
     services.xserver.displayManager.sddm.enable = true;
     services.xserver.displayManager.defaultSession = "plasma";
-    services.xserver.desktopManager.plasma5.enable = true;
+    services.xserver.desktopManager.plasma5 = {
+      enable = true;
+      excludePackages = [ pkgs.plasma5Packages.elisa ];
+    };
     services.xserver.displayManager.autoLogin = {
       enable = true;
       user = "alice";
@@ -40,6 +43,9 @@ import ./make-test-python.nix ({ pkgs, ...} :
     with subtest("Check that logging in has given the user ownership of devices"):
         machine.succeed("getfacl -p /dev/snd/timer | grep -q ${user.name}")
 
+    with subtest("Ensure Elisa is not installed"):
+        machine.fail("which elisa")
+
     with subtest("Run Dolphin"):
         machine.execute("su - ${user.name} -c 'DISPLAY=:0.0 dolphin >&2 &'")
         machine.wait_for_window(" Dolphin")
diff --git a/nixos/tests/please.nix b/nixos/tests/please.nix
new file mode 100644
index 00000000000..2437cfe1613
--- /dev/null
+++ b/nixos/tests/please.nix
@@ -0,0 +1,66 @@
+import ./make-test-python.nix ({ lib, ... }:
+{
+  name = "please";
+  meta.maintainers = with lib.maintainers; [ azahi ];
+
+  nodes.machine =
+    { ... }:
+    {
+      users.users = with lib; mkMerge [
+        (listToAttrs (map
+          (n: nameValuePair n { isNormalUser = true; })
+          (genList (x: "user${toString x}") 6)))
+        {
+          user0.extraGroups = [ "wheel" ];
+        }
+      ];
+
+      security.please = {
+        enable = true;
+        wheelNeedsPassword = false;
+        settings = {
+          user2_run_true_as_root = {
+            name = "user2";
+            target = "root";
+            rule = "/run/current-system/sw/bin/true";
+            require_pass = false;
+          };
+          user4_edit_etc_hosts_as_root = {
+            name = "user4";
+            type = "edit";
+            target = "root";
+            rule = "/etc/hosts";
+            editmode = 644;
+            require_pass = false;
+          };
+        };
+      };
+    };
+
+  testScript = ''
+    with subtest("root: can run anything by default"):
+        machine.succeed('please true')
+    with subtest("root: can edit anything by default"):
+        machine.succeed('EDITOR=cat pleaseedit /etc/hosts')
+
+    with subtest("user0: can run as root because it's in the wheel group"):
+        machine.succeed('su - user0 -c "please -u root true"')
+    with subtest("user1: cannot run as root because it's not in the wheel group"):
+        machine.fail('su - user1 -c "please -u root true"')
+
+    with subtest("user0: can edit as root"):
+        machine.succeed('su - user0 -c "EDITOR=cat pleaseedit /etc/hosts"')
+    with subtest("user1: cannot edit as root"):
+        machine.fail('su - user1 -c "EDITOR=cat pleaseedit /etc/hosts"')
+
+    with subtest("user2: can run 'true' as root"):
+        machine.succeed('su - user2 -c "please -u root true"')
+    with subtest("user3: cannot run 'true' as root"):
+        machine.fail('su - user3 -c "please -u root true"')
+
+    with subtest("user4: can edit /etc/hosts"):
+        machine.succeed('su - user4 -c "EDITOR=cat pleaseedit /etc/hosts"')
+    with subtest("user5: cannot edit /etc/hosts"):
+        machine.fail('su - user5 -c "EDITOR=cat pleaseedit /etc/hosts"')
+  '';
+})
diff --git a/nixos/tests/podgrab.nix b/nixos/tests/podgrab.nix
index e5a340dc2ac..dc9dfebaf49 100644
--- a/nixos/tests/podgrab.nix
+++ b/nixos/tests/podgrab.nix
@@ -22,11 +22,11 @@ import ./make-test-python.nix ({ pkgs, ... }: {
     start_all()
 
     default.wait_for_unit("podgrab")
-    default.wait_for_open_port(defaultPort)
+    default.wait_for_open_port(${toString defaultPort})
     default.succeed("curl --fail http://localhost:${toString defaultPort}")
 
     customized.wait_for_unit("podgrab")
-    customized.wait_for_open_port(customPort)
+    customized.wait_for_open_port(${toString customPort})
     customized.succeed("curl --fail http://localhost:${toString customPort}")
   '';
 
diff --git a/nixos/tests/podman/default.nix b/nixos/tests/podman/default.nix
index 67c7823c5a3..106ba2057d0 100644
--- a/nixos/tests/podman/default.nix
+++ b/nixos/tests/podman/default.nix
@@ -1,5 +1,3 @@
-# This test runs podman and checks if simple container starts
-
 import ../make-test-python.nix (
   { pkgs, lib, ... }: {
     name = "podman";
@@ -8,31 +6,31 @@ import ../make-test-python.nix (
     };
 
     nodes = {
-      podman =
-        { pkgs, ... }:
-        {
-          virtualisation.podman.enable = true;
-
-          # To test docker socket support
-          virtualisation.podman.dockerSocket.enable = true;
-          environment.systemPackages = [
-            pkgs.docker-client
-          ];
-
-          users.users.alice = {
-            isNormalUser = true;
-            home = "/home/alice";
-            description = "Alice Foobar";
-            extraGroups = [ "podman" ];
-          };
-
-          users.users.mallory = {
-            isNormalUser = true;
-            home = "/home/mallory";
-            description = "Mallory Foobar";
-          };
+      podman = { pkgs, ... }: {
+        virtualisation.podman.enable = true;
+
+        users.users.alice = {
+          isNormalUser = true;
+        };
+      };
+      docker = { pkgs, ... }: {
+        virtualisation.podman.enable = true;
+
+        virtualisation.podman.dockerSocket.enable = true;
+
+        environment.systemPackages = [
+          pkgs.docker-client
+        ];
 
+        users.users.alice = {
+          isNormalUser = true;
+          extraGroups = [ "podman" ];
         };
+
+        users.users.mallory = {
+          isNormalUser = true;
+        };
+      };
     };
 
     testScript = ''
@@ -45,6 +43,7 @@ import ../make-test-python.nix (
 
 
       podman.wait_for_unit("sockets.target")
+      docker.wait_for_unit("sockets.target")
       start_all()
 
       with subtest("Run container as root with runc"):
@@ -74,8 +73,10 @@ import ../make-test-python.nix (
           podman.succeed("podman stop sleeping")
           podman.succeed("podman rm sleeping")
 
-      # create systemd session for rootless
+      # start systemd session for rootless
       podman.succeed("loginctl enable-linger alice")
+      podman.succeed(su_cmd("whoami"))
+      podman.sleep(1)
 
       with subtest("Run container rootless with runc"):
           podman.succeed(su_cmd("tar cv --files-from /dev/null | podman import - scratchimg"))
@@ -120,22 +121,22 @@ import ../make-test-python.nix (
           assert pid == "2"
 
       with subtest("A podman member can use the docker cli"):
-          podman.succeed(su_cmd("docker version"))
+          docker.succeed(su_cmd("docker version"))
 
       with subtest("Run container via docker cli"):
-          podman.succeed("docker network create default")
-          podman.succeed("tar cv --files-from /dev/null | podman import - scratchimg")
-          podman.succeed(
+          docker.succeed("docker network create default")
+          docker.succeed("tar cv --files-from /dev/null | podman import - scratchimg")
+          docker.succeed(
             "docker run -d --name=sleeping -v /nix/store:/nix/store -v /run/current-system/sw/bin:/bin localhost/scratchimg /bin/sleep 10"
           )
-          podman.succeed("docker ps | grep sleeping")
-          podman.succeed("podman ps | grep sleeping")
-          podman.succeed("docker stop sleeping")
-          podman.succeed("docker rm sleeping")
-          podman.succeed("docker network rm default")
+          docker.succeed("docker ps | grep sleeping")
+          docker.succeed("podman ps | grep sleeping")
+          docker.succeed("docker stop sleeping")
+          docker.succeed("docker rm sleeping")
+          docker.succeed("docker network rm default")
 
       with subtest("A podman non-member can not use the docker cli"):
-          podman.fail(su_cmd("docker version", user="mallory"))
+          docker.fail(su_cmd("docker version", user="mallory"))
 
       # TODO: add docker-compose test
 
diff --git a/nixos/tests/polaris.nix b/nixos/tests/polaris.nix
new file mode 100644
index 00000000000..fb2e67f075a
--- /dev/null
+++ b/nixos/tests/polaris.nix
@@ -0,0 +1,31 @@
+import ./make-test-python.nix ({ lib, ... }:
+
+with lib;
+
+{
+  name = "polaris";
+  meta.maintainers = with maintainers; [ pbsds ];
+
+  nodes.machine =
+    { pkgs, ... }: {
+      environment.systemPackages = [ pkgs.jq ];
+      services.polaris = {
+        enable = true;
+        port = 5050;
+        settings.users = [
+          {
+            name = "test_user";
+            password = "very_secret_password";
+            admin = true;
+          }
+        ];
+      };
+    };
+
+  testScript = ''
+    machine.wait_for_unit("polaris.service")
+    machine.wait_for_open_port(5050)
+    machine.succeed("curl http://localhost:5050/api/version")
+    machine.succeed("curl -X GET http://localhost:5050/api/initial_setup -H  'accept: application/json' | jq -e '.has_any_users == true'")
+  '';
+})
diff --git a/nixos/tests/postgresql.nix b/nixos/tests/postgresql.nix
index 7864f5d6ff3..7e0a82c3882 100644
--- a/nixos/tests/postgresql.nix
+++ b/nixos/tests/postgresql.nix
@@ -130,8 +130,97 @@ let
     '';
 
   };
+
+  mk-ensure-clauses-test = postgresql-name: postgresql-package: makeTest {
+    name = postgresql-name;
+    meta = with pkgs.lib.maintainers; {
+      maintainers = [ zagy ];
+    };
+
+    machine = {...}:
+      {
+        services.postgresql = {
+          enable = true;
+          package = postgresql-package;
+          ensureUsers = [
+            {
+              name = "all-clauses";
+              ensureClauses = {
+                superuser = true;
+                createdb = true;
+                createrole = true;
+                "inherit" = true;
+                login = true;
+                replication = true;
+                bypassrls = true;
+              };
+            }
+            {
+              name = "default-clauses";
+            }
+          ];
+        };
+      };
+
+    testScript = let
+      getClausesQuery = user: pkgs.lib.concatStringsSep " "
+        [
+          "SELECT row_to_json(row)"
+          "FROM ("
+          "SELECT"
+            "rolsuper,"
+            "rolinherit,"
+            "rolcreaterole,"
+            "rolcreatedb,"
+            "rolcanlogin,"
+            "rolreplication,"
+            "rolbypassrls"
+          "FROM pg_roles"
+          "WHERE rolname = '${user}'"
+          ") row;"
+        ];
+    in ''
+      import json
+      machine.start()
+      machine.wait_for_unit("postgresql")
+
+      with subtest("All user permissions are set according to the ensureClauses attr"):
+          clauses = json.loads(
+            machine.succeed(
+                "sudo -u postgres psql -tc \"${getClausesQuery "all-clauses"}\""
+            )
+          )
+          print(clauses)
+          assert clauses['rolsuper'], 'expected user with clauses to have superuser clause'
+          assert clauses['rolinherit'], 'expected user with clauses to have inherit clause'
+          assert clauses['rolcreaterole'], 'expected user with clauses to have create role clause'
+          assert clauses['rolcreatedb'], 'expected user with clauses to have create db clause'
+          assert clauses['rolcanlogin'], 'expected user with clauses to have login clause'
+          assert clauses['rolreplication'], 'expected user with clauses to have replication clause'
+          assert clauses['rolbypassrls'], 'expected user with clauses to have bypassrls clause'
+
+      with subtest("All user permissions default when ensureClauses is not provided"):
+          clauses = json.loads(
+            machine.succeed(
+                "sudo -u postgres psql -tc \"${getClausesQuery "default-clauses"}\""
+            )
+          )
+          assert not clauses['rolsuper'], 'expected user with no clauses set to have default superuser clause'
+          assert clauses['rolinherit'], 'expected user with no clauses set to have default inherit clause'
+          assert not clauses['rolcreaterole'], 'expected user with no clauses set to have default create role clause'
+          assert not clauses['rolcreatedb'], 'expected user with no clauses set to have default create db clause'
+          assert clauses['rolcanlogin'], 'expected user with no clauses set to have default login clause'
+          assert not clauses['rolreplication'], 'expected user with no clauses set to have default replication clause'
+          assert not clauses['rolbypassrls'], 'expected user with no clauses set to have default bypassrls clause'
+
+      machine.shutdown()
+    '';
+  };
 in
-  (mapAttrs' (name: package: { inherit name; value=make-postgresql-test name package false;}) postgresql-versions) // {
+  concatMapAttrs (name: package: {
+    ${name} = make-postgresql-test name package false;
+    ${name + "-clauses"} = mk-ensure-clauses-test name package;
+  }) postgresql-versions
+  // {
     postgresql_11-backup-all = make-postgresql-test "postgresql_11-backup-all" postgresql-versions.postgresql_11 true;
   }
-
diff --git a/nixos/tests/powerdns.nix b/nixos/tests/powerdns.nix
index 70060bad87b..d3708d25f0f 100644
--- a/nixos/tests/powerdns.nix
+++ b/nixos/tests/powerdns.nix
@@ -47,7 +47,9 @@ import ./make-test-python.nix ({ pkgs, lib, ... }: {
     with subtest("Adding an example zone works"):
         # Extract configuration file needed by pdnsutil
         unit = server.succeed("systemctl cat pdns")
-        conf = re.search("(--config-dir=[^ ]+)", unit).group(1)
+        match = re.search("(--config-dir=[^ ]+)", unit)
+        assert(match is not None)
+        conf = match.group(1)
         pdnsutil = "sudo -u pdns pdnsutil " + conf
         server.succeed(f"{pdnsutil} create-zone example.com ns1.example.com")
         server.succeed(f"{pdnsutil} add-record  example.com ns1 A 192.168.1.2")
diff --git a/nixos/tests/pppd.nix b/nixos/tests/pppd.nix
index bda0aa75bb5..e714a6c21a6 100644
--- a/nixos/tests/pppd.nix
+++ b/nixos/tests/pppd.nix
@@ -5,6 +5,8 @@ import ./make-test-python.nix (
       mode = "0640";
     };
   in {
+    name = "pppd";
+
     nodes = {
       server = {config, pkgs, ...}: {
         config = {
diff --git a/nixos/tests/printing.nix b/nixos/tests/printing.nix
index 6338fd8d8ac..cfebe232d92 100644
--- a/nixos/tests/printing.nix
+++ b/nixos/tests/printing.nix
@@ -4,6 +4,7 @@ import ./make-test-python.nix ({pkgs, ... }:
 let
   printingServer = startWhenNeeded: {
     services.printing.enable = true;
+    services.printing.stateless = true;
     services.printing.startWhenNeeded = startWhenNeeded;
     services.printing.listenAddresses = [ "*:631" ];
     services.printing.defaultShared = true;
diff --git a/nixos/tests/privacyidea.nix b/nixos/tests/privacyidea.nix
index fb072514dd9..401ad72c37b 100644
--- a/nixos/tests/privacyidea.nix
+++ b/nixos/tests/privacyidea.nix
@@ -3,7 +3,7 @@
 import ./make-test-python.nix ({ pkgs, ...} : rec {
   name = "privacyidea";
   meta = with pkgs.lib.maintainers; {
-    maintainers = [ fpletz ];
+    maintainers = [ ];
   };
 
   nodes.machine = { ... }: {
diff --git a/nixos/tests/prometheus-exporters.nix b/nixos/tests/prometheus-exporters.nix
index 04a532f7c2e..5f50a3f87d5 100644
--- a/nixos/tests/prometheus-exporters.nix
+++ b/nixos/tests/prometheus-exporters.nix
@@ -6,7 +6,7 @@
 let
   inherit (import ../lib/testing-python.nix { inherit system pkgs; }) makeTest;
   inherit (pkgs.lib) concatStringsSep maintainers mapAttrs mkMerge
-    removeSuffix replaceChars singleton splitString;
+    removeSuffix replaceStrings singleton splitString;
 
   /*
     * The attrset `exporterTests` contains one attribute
@@ -35,7 +35,7 @@ let
     *      };
     *      exporterTest = ''
     *        wait_for_unit("prometheus-<exporterName>-exporter.service")
-    *        wait_for_open_port("1234")
+    *        wait_for_open_port(1234)
     *        succeed("curl -sSf 'localhost:1234/metrics'")
     *      '';
     *    };
@@ -182,7 +182,7 @@ let
         enable = true;
         extraFlags = [ "--web.collectd-push-path /collectd" ];
       };
-      exporterTest = let postData = replaceChars [ "\n" ] [ "" ] ''
+      exporterTest = let postData = replaceStrings [ "\n" ] [ "" ] ''
         [{
           "values":[23],
           "dstypes":["gauge"],
@@ -307,6 +307,19 @@ let
       '';
     };
 
+    ipmi = {
+      exporterConfig = {
+        enable = true;
+      };
+      exporterTest = ''
+        wait_for_unit("prometheus-ipmi-exporter.service")
+        wait_for_open_port(9290)
+        succeed(
+          "curl -sSf http://localhost:9290/metrics | grep 'ipmi_scrape_duration_seconds'"
+        )
+      '';
+    };
+
     jitsi = {
       exporterConfig = {
         enable = true;
@@ -361,25 +374,34 @@ let
     };
 
     kea = let
-      controlSocketPath = "/run/kea/dhcp6.sock";
+      controlSocketPathV4 = "/run/kea/dhcp4.sock";
+      controlSocketPathV6 = "/run/kea/dhcp6.sock";
     in
     {
       exporterConfig = {
         enable = true;
         controlSocketPaths = [
-          controlSocketPath
+          controlSocketPathV4
+          controlSocketPathV6
         ];
       };
       metricProvider = {
-        systemd.services.prometheus-kea-exporter.after = [ "kea-dhcp6-server.service" ];
-
         services.kea = {
+          dhcp4 = {
+            enable = true;
+            settings = {
+              control-socket = {
+                socket-type = "unix";
+                socket-name = controlSocketPathV4;
+              };
+            };
+          };
           dhcp6 = {
             enable = true;
             settings = {
               control-socket = {
                 socket-type = "unix";
-                socket-name = controlSocketPath;
+                socket-name = controlSocketPathV6;
               };
             };
           };
@@ -387,8 +409,10 @@ let
       };
 
       exporterTest = ''
+        wait_for_unit("kea-dhcp4-server.service")
         wait_for_unit("kea-dhcp6-server.service")
-        wait_for_file("${controlSocketPath}")
+        wait_for_file("${controlSocketPathV4}")
+        wait_for_file("${controlSocketPathV6}")
         wait_for_unit("prometheus-kea-exporter.service")
         wait_for_open_port(9547)
         succeed(
@@ -557,10 +581,12 @@ let
         systemd.services.prometheus-mail-exporter = {
           after = [ "postfix.service" ];
           requires = [ "postfix.service" ];
-          preStart = ''
-            mkdir -p -m 0700 mail-exporter/new
-          '';
           serviceConfig = {
+            ExecStartPre = [
+              "${pkgs.writeShellScript "create-maildir" ''
+                mkdir -p -m 0700 mail-exporter/new
+              ''}"
+            ];
             ProtectHome = true;
             ReadOnlyPaths = "/";
             ReadWritePaths = "/var/spool/mail";
@@ -1060,13 +1086,8 @@ let
         ];
       };
       exporterTest = ''
-        wait_for_unit("prometheus-smartctl-exporter.service")
-        wait_for_open_port("9633")
-        wait_until_succeeds(
-          "curl -sSf 'localhost:9633/metrics'"
-        )
         wait_until_succeeds(
-            'journalctl -eu prometheus-smartctl-exporter.service -o cat | grep "/dev/vda: Unable to detect device type"'
+            'journalctl -eu prometheus-smartctl-exporter.service -o cat | grep "Device unavailable"'
         )
       '';
     };
@@ -1198,21 +1219,21 @@ let
         enable = true;
 
         extraFlags = [
-          "--collector.enable-restart-count"
+          "--systemd.collector.enable-restart-count"
         ];
       };
       metricProvider = { };
       exporterTest = ''
         wait_for_unit("prometheus-systemd-exporter.service")
         wait_for_open_port(9558)
-        succeed(
+        wait_until_succeeds(
             "curl -sSf localhost:9558/metrics | grep '{}'".format(
                 'systemd_unit_state{name="basic.target",state="active",type="target"} 1'
             )
         )
         succeed(
             "curl -sSf localhost:9558/metrics | grep '{}'".format(
-                'systemd_service_restart_total{state="prometheus-systemd-exporter.service"} 0'
+                'systemd_service_restart_total{name="prometheus-systemd-exporter.service"} 0'
             )
         )
       '';
@@ -1237,15 +1258,13 @@ let
       '';
     };
 
-    unifi-poller = {
-      nodeName = "unifi_poller";
+    unpoller = {
+      nodeName = "unpoller";
       exporterConfig.enable = true;
       exporterConfig.controllers = [{ }];
       exporterTest = ''
-        wait_for_unit("prometheus-unifi-poller-exporter.service")
-        wait_for_open_port(9130)
-        succeed(
-            "curl -sSf localhost:9130/metrics | grep 'unifipoller_build_info{.\\+} 1'"
+        wait_until_succeeds(
+            'journalctl -eu prometheus-unpoller-exporter.service -o cat | grep "Connection Error"'
         )
       '';
     };
@@ -1273,6 +1292,67 @@ let
       '';
     };
 
+    v2ray = {
+      exporterConfig = {
+        enable = true;
+      };
+
+      metricProvider = {
+        systemd.services.prometheus-nginx-exporter.after = [ "v2ray.service" ];
+        services.v2ray = {
+          enable = true;
+          config = {
+            stats = {};
+            api = {
+              tag = "api";
+              services = [ "StatsService" ];
+            };
+            inbounds = [
+              {
+                port = 1080;
+                listen = "127.0.0.1";
+                protocol = "http";
+              }
+              {
+                listen = "127.0.0.1";
+                port = 54321;
+                protocol = "dokodemo-door";
+                settings = { address = "127.0.0.1"; };
+                tag = "api";
+              }
+            ];
+            outbounds = [
+              {
+                protocol = "freedom";
+              }
+              {
+                protocol = "freedom";
+                settings = {};
+                tag = "api";
+              }
+            ];
+            routing = {
+              strategy = "rules";
+              settings = {
+                rules = [
+                  {
+                    inboundTag = [ "api" ];
+                    outboundTag = "api";
+                    type = "field";
+                  }
+                ];
+              };
+            };
+          };
+        };
+      };
+      exporterTest = ''
+        wait_for_unit("prometheus-v2ray-exporter.service")
+        wait_for_open_port(9299)
+        succeed("curl -sSf localhost:9299/scrape | grep 'v2ray_up 1'")
+      '';
+    };
+
     varnish = {
       exporterConfig = {
         enable = true;
@@ -1328,6 +1408,22 @@ let
           )
         '';
       };
+
+    zfs = {
+      exporterConfig = {
+        enable = true;
+      };
+      metricProvider = {
+        boot.supportedFilesystems = [ "zfs" ];
+        networking.hostId = "7327ded7";
+      };
+      exporterTest = ''
+        wait_for_unit("prometheus-zfs-exporter.service")
+        wait_for_unit("zfs.target")
+        wait_for_open_port(9134)
+        wait_until_succeeds("curl -f localhost:9134/metrics | grep 'zfs_scrape_collector_success{.*} 1'")
+      '';
+    };
   };
 in
 mapAttrs
@@ -1354,7 +1450,7 @@ mapAttrs
       '';
 
       meta = with maintainers; {
-        maintainers = [ willibutz elseym ];
+        maintainers = [ willibutz ];
       };
     }
   )))
diff --git a/nixos/tests/pulseaudio.nix b/nixos/tests/pulseaudio.nix
index cfdc61bc6c2..dc8e33ccd55 100644
--- a/nixos/tests/pulseaudio.nix
+++ b/nixos/tests/pulseaudio.nix
@@ -1,10 +1,10 @@
 let
-  mkTest = { systemWide ? false }:
+  mkTest = { systemWide ? false , fullVersion ? false }:
     import ./make-test-python.nix ({ pkgs, lib, ... }:
       let
         testFile = pkgs.fetchurl {
           url =
-            "https://file-examples-com.github.io/uploads/2017/11/file_example_MP3_700KB.mp3";
+            "https://file-examples.com/storage/fe5947fd2362fc197a3c2df/2017/11/file_example_MP3_700KB.mp3";
           hash = "sha256-+iggJW8s0/LfA/okfXsB550/55Q0Sq3OoIzuBrzOPJQ=";
         };
 
@@ -22,7 +22,7 @@ let
           testPlay32 = { inherit (pkgs.pkgsi686Linux) sox alsa-utils; };
         };
       in {
-        name = "pulseaudio${lib.optionalString systemWide "-systemWide"}";
+        name = "pulseaudio${lib.optionalString fullVersion "Full"}${lib.optionalString systemWide "-systemWide"}";
         meta = with pkgs.lib.maintainers; {
           maintainers = [ synthetica ] ++ pkgs.pulseaudio.meta.maintainers;
         };
@@ -35,12 +35,14 @@ let
               enable = true;
               support32Bit = true;
               inherit systemWide;
+            } // lib.optionalAttrs fullVersion {
+              package = pkgs.pulseaudioFull;
             };
 
             environment.systemPackages = [ testers.testPlay pkgs.pavucontrol ]
               ++ lib.optional pkgs.stdenv.isx86_64 testers.testPlay32;
           } // lib.optionalAttrs systemWide {
-            users.users.alice.extraGroups = [ "audio" ];
+            users.users.alice.extraGroups = [ "pulse-access" ];
             systemd.services.pulseaudio.wantedBy = [ "multi-user.target" ];
           };
 
@@ -58,14 +60,21 @@ let
           ''}
           machine.screenshot("testPlay")
 
+          ${lib.optionalString (!systemWide) ''
+            machine.send_chars("pacmd info && touch /tmp/pacmd_success\n")
+            machine.wait_for_file("/tmp/pacmd_success")
+          ''}
+
           # Pavucontrol only loads when Pulseaudio is running. If it isn't, the
-          # text "Playback" (one of the tabs) will never show.
+          # text "Dummy Output" (sound device name) will never show.
           machine.send_chars("pavucontrol\n")
-          machine.wait_for_text("Playback")
+          machine.wait_for_text("Dummy Output")
           machine.screenshot("Pavucontrol")
         '';
       });
 in builtins.mapAttrs (key: val: mkTest val) {
-  user = { systemWide = false; };
-  system = { systemWide = true; };
+  user = { systemWide = false; fullVersion = false; };
+  system = { systemWide = true; fullVersion = false; };
+  userFull = { systemWide = false; fullVersion = true; };
+  systemFull = { systemWide = true; fullVersion = true; };
 }
diff --git a/nixos/tests/quake3.nix b/nixos/tests/quake3.nix
new file mode 100644
index 00000000000..82af1af463d
--- /dev/null
+++ b/nixos/tests/quake3.nix
@@ -0,0 +1,95 @@
+import ./make-test-python.nix ({ pkgs, ...} :
+
+let
+
+  # Build Quake with coverage instrumentation.
+  overrides = pkgs:
+    {
+      quake3game = pkgs.quake3game.override (args: {
+        stdenv = pkgs.stdenvAdapters.addCoverageInstrumentation args.stdenv;
+      });
+    };
+
+  # Only allow the demo data to be used (only if it's unfreeRedistributable).
+  unfreePredicate = pkg: with pkgs.lib; let
+    allowPackageNames = [ "quake3-demodata" "quake3-pointrelease" ];
+    allowLicenses = [ pkgs.lib.licenses.unfreeRedistributable ];
+  in elem pkg.pname allowPackageNames &&
+     elem (pkg.meta.license or null) allowLicenses;
+
+  client =
+    { pkgs, ... }:
+
+    { imports = [ ./common/x11.nix ];
+      hardware.opengl.driSupport = true;
+      environment.systemPackages = [ pkgs.quake3demo ];
+      nixpkgs.config.packageOverrides = overrides;
+      nixpkgs.config.allowUnfreePredicate = unfreePredicate;
+    };
+
+in
+
+rec {
+  name = "quake3";
+  meta = with pkgs.stdenv.lib.maintainers; {
+    maintainers = [ domenkozar eelco ];
+  };
+
+  # TODO: lcov doesn't work atm
+  #makeCoverageReport = true;
+
+  nodes =
+    { server =
+        { pkgs, ... }:
+
+        { systemd.services.quake3-server =
+            { wantedBy = [ "multi-user.target" ];
+              script =
+                "${pkgs.quake3demo}/bin/quake3-server +set g_gametype 0 " +
+                "+map q3dm7 +addbot grunt +addbot daemia 2> /tmp/log";
+            };
+          nixpkgs.config.packageOverrides = overrides;
+          nixpkgs.config.allowUnfreePredicate = unfreePredicate;
+          networking.firewall.allowedUDPPorts = [ 27960 ];
+        };
+
+      client1 = client;
+      client2 = client;
+    };
+
+  testScript =
+    ''
+      start_all()
+
+      server.wait_for_unit("quake3-server")
+      client1.wait_for_x()
+      client2.wait_for_x()
+
+      client1.execute("quake3 +set r_fullscreen 0 +set name Foo +connect server &")
+      client2.execute("quake3 +set r_fullscreen 0 +set name Bar +connect server &")
+
+      server.wait_until_succeeds("grep -q 'Foo.*entered the game' /tmp/log")
+      server.wait_until_succeeds("grep -q 'Bar.*entered the game' /tmp/log")
+
+      server.sleep(10)  # wait for a while to get a nice screenshot
+
+      client1.block()
+
+      server.sleep(20)
+
+      client1.screenshot("screen1")
+      client2.screenshot("screen2")
+
+      client1.unblock()
+
+      server.sleep(10)
+
+      client1.screenshot("screen3")
+      client2.screenshot("screen4")
+
+      client1.shutdown()
+      client2.shutdown()
+      server.stop_job("quake3-server")
+    '';
+
+})
diff --git a/nixos/tests/rabbitmq.nix b/nixos/tests/rabbitmq.nix
index f8e8e61c47d..040679e68d9 100644
--- a/nixos/tests/rabbitmq.nix
+++ b/nixos/tests/rabbitmq.nix
@@ -1,6 +1,12 @@
 # This test runs rabbitmq and checks if rabbitmq is up and running.
 
-import ./make-test-python.nix ({ pkgs, ... }: {
+import ./make-test-python.nix ({ pkgs, ... }:
+let
+  # in real life, you would keep this out of your repo and deploy it to a safe
+  # location using safe means.
+  configKeyPath = pkgs.writeText "fake-config-key" "hOjWzSEn2Z7cHzKOcf6i183O2NdjurSuoMDIIv01";
+in
+{
   name = "rabbitmq";
   meta = with pkgs.lib.maintainers; {
     maintainers = [ eelco offline ];
@@ -10,6 +16,29 @@ import ./make-test-python.nix ({ pkgs, ... }: {
     services.rabbitmq = {
       enable = true;
       managementPlugin.enable = true;
+
+      # To encrypt:
+      # rabbitmqctl --quiet encode --cipher blowfish_cfb64 --hash sha256 \
+      #   --iterations 10000 '<<"dJT8isYu6t0Xb6u56rPglSj1vK51SlNVlXfwsRxw">>' \
+      #   "hOjWzSEn2Z7cHzKOcf6i183O2NdjurSuoMDIIv01" ;
+      config = ''
+        [ { rabbit
+          , [ {default_user, <<"alice">>}
+            , { default_pass
+              , {encrypted,<<"oKKxyTze9PYmsEfl6FG1MxIUhxY7WPQL7HBoMPRC/1ZOdOZbtr9+DxjWW3e1D5SL48n3D9QOsGD0cOgYG7Qdvb7Txrepw8w=">>}
+              }
+            , {config_entry_decoder
+              , [ {passphrase, {file, <<"${configKeyPath}">>}}
+                , {cipher, blowfish_cfb64}
+                , {hash, sha256}
+                , {iterations, 10000}
+                ]
+              }
+            % , {rabbitmq_management, [{path_prefix, "/_queues"}]}
+            ]
+          }
+        ].
+      '';
     };
     # Ensure there is sufficient extra disk space for rabbitmq to be happy
     virtualisation.diskSize = 1024;
@@ -23,5 +52,10 @@ import ./make-test-python.nix ({ pkgs, ... }: {
         'su -s ${pkgs.runtimeShell} rabbitmq -c "rabbitmqctl status"'
     )
     machine.wait_for_open_port(15672)
+
+    # The password is the plaintext that was encrypted with rabbitmqctl encode above.
+    machine.wait_until_succeeds(
+        '${pkgs.rabbitmq-java-client}/bin/PerfTest --time 10 --uri amqp://alice:dJT8isYu6t0Xb6u56rPglSj1vK51SlNVlXfwsRxw@localhost'
+    )
   '';
 })
diff --git a/nixos/tests/resolv.nix b/nixos/tests/resolv.nix
deleted file mode 100644
index f0aa7e42aaf..00000000000
--- a/nixos/tests/resolv.nix
+++ /dev/null
@@ -1,46 +0,0 @@
-# Test whether DNS resolving returns multiple records and all address families.
-import ./make-test-python.nix ({ pkgs, ... } : {
-  name = "resolv";
-  meta = with pkgs.lib.maintainers; {
-    maintainers = [ ckauhaus ];
-  };
-
-  nodes.resolv = { ... }: {
-    networking.extraHosts = ''
-      # IPv4 only
-      192.0.2.1 host-ipv4.example.net
-      192.0.2.2 host-ipv4.example.net
-      # IP6 only
-      2001:db8::2:1 host-ipv6.example.net
-      2001:db8::2:2 host-ipv6.example.net
-      # dual stack
-      192.0.2.1 host-dual.example.net
-      192.0.2.2 host-dual.example.net
-      2001:db8::2:1 host-dual.example.net
-      2001:db8::2:2 host-dual.example.net
-    '';
-  };
-
-  testScript = ''
-    def addrs_in(hostname, addrs):
-        res = resolv.succeed("getent ahosts {}".format(hostname))
-        for addr in addrs:
-            assert addr in res, "Expected output '{}' not found in\n{}".format(addr, res)
-
-
-    start_all()
-    resolv.wait_for_unit("nscd")
-
-    ipv4 = ["192.0.2.1", "192.0.2.2"]
-    ipv6 = ["2001:db8::2:1", "2001:db8::2:2"]
-
-    with subtest("IPv4 resolves"):
-        addrs_in("host-ipv4.example.net", ipv4)
-
-    with subtest("IPv6 resolves"):
-        addrs_in("host-ipv6.example.net", ipv6)
-
-    with subtest("Dual stack resolves"):
-        addrs_in("host-dual.example.net", ipv4 + ipv6)
-  '';
-})
diff --git a/nixos/tests/restic.nix b/nixos/tests/restic.nix
index 7523d5e5ed5..3681c4cf190 100644
--- a/nixos/tests/restic.nix
+++ b/nixos/tests/restic.nix
@@ -2,9 +2,8 @@ import ./make-test-python.nix (
   { pkgs, ... }:
 
   let
-    password = "some_password";
-    repository = "/tmp/restic-backup";
-    repositoryFile = "${pkgs.writeText "repositoryFile" "/tmp/restic-backup-from-file"}";
+    remoteRepository = "/tmp/restic-backup";
+    remoteFromFileRepository = "/tmp/restic-backup-from-file";
     rcloneRepository = "rclone:local:/tmp/restic-rclone-backup";
 
     backupPrepareCommand = ''
@@ -18,7 +17,6 @@ import ./make-test-python.nix (
     '';
 
     passwordFile = "${pkgs.writeText "password" "correcthorsebatterystaple"}";
-    initialize = true;
     paths = [ "/opt" ];
     pruneOpts = [
       "--keep-daily 2"
@@ -40,12 +38,18 @@ import ./make-test-python.nix (
         {
           services.restic.backups = {
             remotebackup = {
-              inherit repository passwordFile initialize paths pruneOpts backupPrepareCommand backupCleanupCommand;
+              inherit passwordFile paths pruneOpts backupPrepareCommand backupCleanupCommand;
+              repository = remoteRepository;
+              initialize = true;
             };
-            remotebackup-from-file = {
-              inherit repositoryFile passwordFile initialize paths pruneOpts;
+            remote-from-file-backup = {
+              inherit passwordFile paths pruneOpts;
+              initialize = true;
+              repositoryFile = pkgs.writeText "repositoryFile" remoteFromFileRepository;
             };
             rclonebackup = {
+              inherit passwordFile paths pruneOpts;
+              initialize = true;
               repository = rcloneRepository;
               rcloneConfig = {
                 type = "local";
@@ -57,12 +61,22 @@ import ./make-test-python.nix (
                 [local]
                 type=ftp
               '';
-              inherit passwordFile initialize paths pruneOpts;
             };
             remoteprune = {
-              inherit repository passwordFile;
+              inherit passwordFile;
+              repository = remoteRepository;
               pruneOpts = [ "--keep-last 1" ];
             };
+            custompackage = {
+              inherit passwordFile paths;
+              repository = "some-fake-repository";
+              package = pkgs.writeShellScriptBin "restic" ''
+                echo "$@" >> /tmp/fake-restic.log;
+              '';
+
+              pruneOpts = [ "--keep-last 1" ];
+              checkOpts = [ "--some-check-option" ];
+            };
           };
 
           environment.sessionVariables.RCLONE_CONFIG_LOCAL_TYPE = "local";
@@ -73,46 +87,69 @@ import ./make-test-python.nix (
       server.start()
       server.wait_for_unit("dbus.socket")
       server.fail(
-          "${pkgs.restic}/bin/restic -r ${repository} -p ${passwordFile} snapshots",
-          '${pkgs.restic}/bin/restic --repository-file ${repositoryFile} -p ${passwordFile} snapshots"',
+          "${pkgs.restic}/bin/restic -r ${remoteRepository} -p ${passwordFile} snapshots",
+          '${pkgs.restic}/bin/restic -r ${remoteFromFileRepository} -p ${passwordFile} snapshots"',
           "${pkgs.restic}/bin/restic -r ${rcloneRepository} -p ${passwordFile} snapshots",
+          "grep 'backup .* /opt' /tmp/fake-restic.log",
       )
       server.succeed(
+          # set up
           "mkdir -p /opt",
           "touch /opt/some_file",
           "mkdir -p /tmp/restic-rclone-backup",
+
+          # test that remotebackup runs custom commands and produces a snapshot
           "timedatectl set-time '2016-12-13 13:45'",
           "systemctl start restic-backups-remotebackup.service",
           "rm /opt/backupCleanupCommand",
-          "systemctl start restic-backups-remotebackup-from-file.service",
+          '${pkgs.restic}/bin/restic -r ${remoteRepository} -p ${passwordFile} snapshots --json | ${pkgs.jq}/bin/jq "length | . == 1"',
+
+          # test that remote-from-file-backup produces a snapshot
+          "systemctl start restic-backups-remote-from-file-backup.service",
+          '${pkgs.restic}/bin/restic -r ${remoteFromFileRepository} -p ${passwordFile} snapshots --json | ${pkgs.jq}/bin/jq "length | . == 1"',
+
+          # test that rclonebackup produces a snapshot
           "systemctl start restic-backups-rclonebackup.service",
-          '${pkgs.restic}/bin/restic -r ${repository} -p ${passwordFile} snapshots -c | grep -e "^1 snapshot"',
-          '${pkgs.restic}/bin/restic --repository-file ${repositoryFile} -p ${passwordFile} snapshots -c | grep -e "^1 snapshot"',
-          '${pkgs.restic}/bin/restic -r ${rcloneRepository} -p ${passwordFile} snapshots -c | grep -e "^1 snapshot"',
+          '${pkgs.restic}/bin/restic -r ${rcloneRepository} -p ${passwordFile} snapshots --json | ${pkgs.jq}/bin/jq "length | . == 1"',
+
+          # test that custompackage runs both `restic backup` and `restic check` with reasonable commandlines
+          "systemctl start restic-backups-custompackage.service",
+          "grep 'backup .* /opt' /tmp/fake-restic.log",
+          "grep 'check .* --some-check-option' /tmp/fake-restic.log",
+
+          # test that we can create four snapshots in remotebackup and rclonebackup
           "timedatectl set-time '2017-12-13 13:45'",
           "systemctl start restic-backups-remotebackup.service",
           "rm /opt/backupCleanupCommand",
           "systemctl start restic-backups-rclonebackup.service",
+
           "timedatectl set-time '2018-12-13 13:45'",
           "systemctl start restic-backups-remotebackup.service",
           "rm /opt/backupCleanupCommand",
           "systemctl start restic-backups-rclonebackup.service",
+
           "timedatectl set-time '2018-12-14 13:45'",
           "systemctl start restic-backups-remotebackup.service",
           "rm /opt/backupCleanupCommand",
           "systemctl start restic-backups-rclonebackup.service",
+
           "timedatectl set-time '2018-12-15 13:45'",
           "systemctl start restic-backups-remotebackup.service",
           "rm /opt/backupCleanupCommand",
           "systemctl start restic-backups-rclonebackup.service",
+
           "timedatectl set-time '2018-12-16 13:45'",
           "systemctl start restic-backups-remotebackup.service",
           "rm /opt/backupCleanupCommand",
           "systemctl start restic-backups-rclonebackup.service",
-          '${pkgs.restic}/bin/restic -r ${repository} -p ${passwordFile} snapshots -c | grep -e "^4 snapshot"',
-          '${pkgs.restic}/bin/restic -r ${rcloneRepository} -p ${passwordFile} snapshots -c | grep -e "^4 snapshot"',
+
+          '${pkgs.restic}/bin/restic -r ${remoteRepository} -p ${passwordFile} snapshots --json | ${pkgs.jq}/bin/jq "length | . == 4"',
+          '${pkgs.restic}/bin/restic -r ${rcloneRepository} -p ${passwordFile} snapshots --json | ${pkgs.jq}/bin/jq "length | . == 4"',
+
+          # test that remoteprune brings us back to 1 snapshot in remotebackup
           "systemctl start restic-backups-remoteprune.service",
-          '${pkgs.restic}/bin/restic -r ${repository} -p ${passwordFile} snapshots -c | grep -e "^1 snapshot"',
+          '${pkgs.restic}/bin/restic -r ${remoteRepository} -p ${passwordFile} snapshots --json | ${pkgs.jq}/bin/jq "length | . == 1"',
+
       )
     '';
   }
diff --git a/nixos/tests/retroarch.nix b/nixos/tests/retroarch.nix
index c506ed02da8..f4bf232ea72 100644
--- a/nixos/tests/retroarch.nix
+++ b/nixos/tests/retroarch.nix
@@ -2,7 +2,7 @@ import ./make-test-python.nix ({ pkgs, ... }:
 
   {
     name = "retroarch";
-    meta = with pkgs.lib.maintainers; { maintainers = [ j0hax ]; };
+    meta = with pkgs.lib; { maintainers = teams.libretro.members ++ [ maintainers.j0hax ]; };
 
     nodes.machine = { ... }:
 
@@ -11,7 +11,7 @@ import ./make-test-python.nix ({ pkgs, ... }:
         services.xserver.enable = true;
         services.xserver.desktopManager.retroarch = {
           enable = true;
-          package = pkgs.retroarchFull;
+          package = pkgs.retroarchBare;
         };
         services.xserver.displayManager = {
           sddm.enable = true;
diff --git a/nixos/tests/sanoid.nix b/nixos/tests/sanoid.nix
index 3bdbe0a8d8d..411ebcead9f 100644
--- a/nixos/tests/sanoid.nix
+++ b/nixos/tests/sanoid.nix
@@ -34,6 +34,7 @@ in {
           autosnap = true;
         };
         datasets."pool/sanoid".use_template = [ "test" ];
+        datasets."pool/compat".useTemplate = [ "test" ];
         extraArgs = [ "--verbose" ];
       };
 
@@ -48,6 +49,15 @@ in {
           };
           # Take snapshot and sync
           "pool/syncoid".target = "root@target:pool/syncoid";
+
+          # Test pool without parent (regression test for https://github.com/NixOS/nixpkgs/pull/180111)
+          "pool".target = "root@target:pool/full-pool";
+
+          # Test backward compatible options (regression test for https://github.com/NixOS/nixpkgs/issues/181561)
+          "pool/compat" = {
+            target = "root@target:pool/compat";
+            extraArgs = [ "--no-sync-snap" ];
+          };
         };
       };
     };
@@ -67,6 +77,7 @@ in {
         "udevadm settle",
         "zpool create pool -R /mnt /dev/vdb1",
         "zfs create pool/sanoid",
+        "zfs create pool/compat",
         "zfs create pool/syncoid",
         "udevadm settle",
     )
@@ -91,6 +102,7 @@ in {
 
     # Take snapshot with sanoid
     source.succeed("touch /mnt/pool/sanoid/test.txt")
+    source.succeed("touch /mnt/pool/compat/test.txt")
     source.systemctl("start --wait sanoid.service")
 
     assert len(source.succeed("zfs allow pool")) == 0, "Pool shouldn't have delegated permissions set after snapshotting"
@@ -105,6 +117,12 @@ in {
     source.systemctl("start --wait syncoid-pool-syncoid.service")
     target.succeed("cat /mnt/pool/syncoid/test.txt")
 
+    source.systemctl("start --wait syncoid-pool.service")
+    target.succeed("[[ -d /mnt/pool/full-pool/syncoid ]]")
+
+    source.systemctl("start --wait syncoid-pool-compat.service")
+    target.succeed("cat /mnt/pool/compat/test.txt")
+
     assert len(source.succeed("zfs allow pool")) == 0, "Pool shouldn't have delegated permissions set after syncing snapshots"
     assert len(source.succeed("zfs allow pool/sanoid")) == 0, "Sanoid dataset shouldn't have delegated permissions set after syncing snapshots"
     assert len(source.succeed("zfs allow pool/syncoid")) == 0, "Syncoid dataset shouldn't have delegated permissions set after syncing snapshots"
diff --git a/nixos/tests/schleuder.nix b/nixos/tests/schleuder.nix
index a9e4cc325bc..e57ef66bb8f 100644
--- a/nixos/tests/schleuder.nix
+++ b/nixos/tests/schleuder.nix
@@ -82,9 +82,7 @@ import ./make-test-python.nix {
     # Since we don't have internet here, use dnsmasq to provide MX records from /etc/hosts
     services.dnsmasq = {
       enable = true;
-      extraConfig = ''
-        selfmx
-      '';
+      settings.selfmx = true;
     };
 
     networking.extraHosts = ''
diff --git a/nixos/tests/seafile.nix b/nixos/tests/seafile.nix
index 6eec8b1fbe5..78e735f4fed 100644
--- a/nixos/tests/seafile.nix
+++ b/nixos/tests/seafile.nix
@@ -79,18 +79,14 @@ import ./make-test-python.nix ({ pkgs, ... }:
               f"seaf-cli download -l {libid} -s http://server -u admin\@example.com -p seafile_password -d . >&2"
           )
 
-          client1.sleep(3)
-
-          client1.succeed("seaf-cli status |grep synchronized >&2")
+          client1.wait_until_succeeds("seaf-cli status |grep synchronized >&2")
 
           client1.succeed("ls -la >&2")
           client1.succeed("ls -la test01 >&2")
 
           client1.execute("echo bla > test01/first_file")
 
-          client1.sleep(2)
-
-          client1.succeed("seaf-cli status |grep synchronized >&2")
+          client1.wait_until_succeeds("seaf-cli status |grep synchronized >&2")
 
       with subtest("client2 sync"):
           client2.wait_for_unit("default.target")
@@ -110,9 +106,7 @@ import ./make-test-python.nix ({ pkgs, ... }:
               f"seaf-cli download -l {libid} -s http://server -u admin\@example.com -p seafile_password -d . >&2"
           )
 
-          client2.sleep(3)
-
-          client2.succeed("seaf-cli status |grep synchronized >&2")
+          client2.wait_until_succeeds("seaf-cli status |grep synchronized >&2")
 
           client2.succeed("ls -la test01 >&2")
 
diff --git a/nixos/tests/shadow.nix b/nixos/tests/shadow.nix
index 50a9f712464..baa2e5945c0 100644
--- a/nixos/tests/shadow.nix
+++ b/nixos/tests/shadow.nix
@@ -3,6 +3,8 @@ let
   password2 = "helloworld";
   password3 = "bazqux";
   password4 = "asdf123";
+  hashed_bcrypt = "$2b$05$8xIEflrk2RxQtcVXbGIxs.Vl0x7dF1/JSv3cyX6JJt0npzkTCWvxK"; # fnord
+  hashed_yeshash = "$y$j9T$d8Z4EAf8P1SvM/aDFbxMS0$VnTXMp/Hnc7QdCBEaLTq5ZFOAFo2/PM0/xEAFuOE88."; # fnord
 in import ./make-test-python.nix ({ pkgs, ... }: {
   name = "shadow";
   meta = with pkgs.lib.maintainers; { maintainers = [ nequissimus ]; };
@@ -27,6 +29,16 @@ in import ./make-test-python.nix ({ pkgs, ... }: {
         password = password4;
         shell = pkgs.bash;
       };
+      users.berta = {
+        isNormalUser = true;
+        hashedPassword = hashed_bcrypt;
+        shell = pkgs.bash;
+      };
+      users.yesim = {
+        isNormalUser = true;
+        hashedPassword = hashed_yeshash;
+        shell = pkgs.bash;
+      };
     };
   };
 
@@ -115,5 +127,23 @@ in import ./make-test-python.nix ({ pkgs, ... }: {
         shadow.wait_until_succeeds("pgrep login")
         shadow.send_chars("${password2}\n")
         shadow.wait_until_tty_matches("5", "login:")
+
+    with subtest("check alternate password hashes"):
+        shadow.send_key("alt-f6")
+        shadow.wait_until_succeeds("[ $(fgconsole) = 6 ]")
+        for u in ["berta", "yesim"]:
+            shadow.wait_for_unit("getty@tty6.service")
+            shadow.wait_until_succeeds("pgrep -f 'agetty.*tty6'")
+            shadow.wait_until_tty_matches("6", "login: ")
+            shadow.send_chars(f"{u}\n")
+            shadow.wait_until_tty_matches("6", f"login: {u}")
+            shadow.wait_until_succeeds("pgrep login")
+            shadow.sleep(2)
+            shadow.send_chars("fnord\n")
+            shadow.send_chars(f"whoami > /tmp/{u}\n")
+            shadow.wait_for_file(f"/tmp/{u}")
+            print(shadow.succeed(f"cat /tmp/{u}"))
+            assert u in shadow.succeed(f"cat /tmp/{u}")
+            shadow.send_chars("logout\n")
   '';
 })
diff --git a/nixos/tests/signal-desktop.nix b/nixos/tests/signal-desktop.nix
index fbe9cdf84d0..5e2b648c7cf 100644
--- a/nixos/tests/signal-desktop.nix
+++ b/nixos/tests/signal-desktop.nix
@@ -60,7 +60,7 @@ in {
     )
     # Only SQLCipher should be able to read the encrypted DB:
     machine.fail(
-        "su - alice -c 'sqlite3 ~/.config/Signal/sql/db.sqlite .databases'"
+        "su - alice -c 'sqlite3 ~/.config/Signal/sql/db.sqlite .tables'"
     )
     print(machine.succeed(
         "su - alice -c 'sqlcipher ~/.config/Signal/sql/db.sqlite'"
diff --git a/nixos/tests/smokeping.nix b/nixos/tests/smokeping.nix
index ccacf60cfe4..04f81396429 100644
--- a/nixos/tests/smokeping.nix
+++ b/nixos/tests/smokeping.nix
@@ -28,6 +28,8 @@ import ./make-test-python.nix ({ pkgs, ...} : {
     sm.wait_for_unit("thttpd")
     sm.wait_for_file("/var/lib/smokeping/data/Local/LocalMachine.rrd")
     sm.succeed("curl -s -f localhost:8081/smokeping.fcgi?target=Local")
+    # Check that there's a helpful page without explicit path as well.
+    sm.succeed("curl -s -f localhost:8081")
     sm.succeed("ls /var/lib/smokeping/cache/Local/LocalMachine_mini.png")
     sm.succeed("ls /var/lib/smokeping/cache/index.html")
   '';
diff --git a/nixos/tests/sourcehut.nix b/nixos/tests/sourcehut.nix
index d52fbddd20f..9caa1bcd98f 100644
--- a/nixos/tests/sourcehut.nix
+++ b/nixos/tests/sourcehut.nix
@@ -35,7 +35,7 @@ let
           };
 
           security.sudo.wheelNeedsPassword = false;
-          nix.trustedUsers = [ "root" "build" ];
+          nix.settings.trusted-users = [ "root" "build" ];
           documentation.nixos.enable = false;
 
           # builds.sr.ht-image-specific network settings
diff --git a/nixos/tests/spark/default.nix b/nixos/tests/spark/default.nix
index 025c5a5222e..462f0d23a40 100644
--- a/nixos/tests/spark/default.nix
+++ b/nixos/tests/spark/default.nix
@@ -7,6 +7,7 @@ import ../make-test-python.nix ({...}: {
         enable = true;
         master = "master:7077";
       };
+      virtualisation.memorySize = 2048;
     };
     master = { config, pkgs, ... }: {
       services.spark.master = {
diff --git a/nixos/tests/sqlite3-to-mysql.nix b/nixos/tests/sqlite3-to-mysql.nix
new file mode 100644
index 00000000000..029058187df
--- /dev/null
+++ b/nixos/tests/sqlite3-to-mysql.nix
@@ -0,0 +1,65 @@
+import ./make-test-python.nix ({ pkgs, lib, ... }:
+
+/*
+  This test suite replaces the typical pytestCheckHook function in
+  sqlite3-to-mysql due to the need of a running mysql instance.
+*/
+
+{
+  name = "sqlite3-to-mysql";
+  meta.maintainers = with lib.maintainers; [ gador ];
+
+  nodes.machine = { pkgs, ... }: {
+    environment.systemPackages = with pkgs; [
+      sqlite3-to-mysql
+      # create one coherent python environment
+      (python3.withPackages
+        (ps: sqlite3-to-mysql.propagatedBuildInputs ++
+          [
+            python3Packages.pytest
+            python3Packages.pytest-mock
+            python3Packages.pytest-timeout
+            python3Packages.factory_boy
+            python3Packages.docker # only needed so import does not fail
+            sqlite3-to-mysql
+          ])
+      )
+    ];
+    services.mysql = {
+      package = pkgs.mariadb;
+      enable = true;
+      # from https://github.com/techouse/sqlite3-to-mysql/blob/master/tests/conftest.py
+      # and https://github.com/techouse/sqlite3-to-mysql/blob/master/.github/workflows/test.yml
+      initialScript = pkgs.writeText "mysql-init.sql" ''
+        create database test_db DEFAULT CHARACTER SET utf8mb4;
+        create user tester identified by 'testpass';
+        grant all on test_db.* to tester;
+        create user tester@localhost identified by 'testpass';
+        grant all on test_db.* to tester@localhost;
+      '';
+      settings = {
+        mysqld = {
+          character-set-server = "utf8mb4";
+          collation-server = "utf8mb4_unicode_ci";
+          log_warnings = 1;
+        };
+      };
+    };
+  };
+
+  testScript = ''
+    machine.wait_for_unit("mysql")
+
+    machine.succeed(
+         "sqlite3mysql --version | grep ${pkgs.sqlite3-to-mysql.version}"
+    )
+
+    # invalid_database_name: assert '1045 (28000): Access denied' in "1044 (42000): Access denied [...]
+    # invalid_database_user: does not return non-zero exit for some reason
+    # test_version: has problems importing sqlite3_to_mysql and determining the version
+    machine.succeed(
+         "cd ${pkgs.sqlite3-to-mysql.src} \
+          && pytest -v --no-docker -k \"not test_invalid_database_name and not test_invalid_database_user and not test_version\""
+    )
+  '';
+})
diff --git a/nixos/tests/sssd-ldap.nix b/nixos/tests/sssd-ldap.nix
index f816c0652cc..ff83e96068a 100644
--- a/nixos/tests/sssd-ldap.nix
+++ b/nixos/tests/sssd-ldap.nix
@@ -28,7 +28,7 @@ in import ./make-test-python.nix ({pkgs, ...}: {
             attrs = {
               objectClass = [ "olcDatabaseConfig" "olcMdbConfig" ];
               olcDatabase = "{1}mdb";
-              olcDbDirectory = "/var/db/openldap";
+              olcDbDirectory = "/var/lib/openldap/db";
               olcSuffix = dbSuffix;
               olcRootDN = "cn=${ldapRootUser},${dbSuffix}";
               olcRootPW = ldapRootPassword;
@@ -67,6 +67,8 @@ in import ./make-test-python.nix ({pkgs, ...}: {
 
     services.sssd = {
       enable = true;
+      # just for testing purposes, don't put this into the Nix store in production!
+      environmentFile = "${pkgs.writeText "ldap-root" "LDAP_BIND_PW=${ldapRootPassword}"}";
       config = ''
         [sssd]
         config_file_version = 2
@@ -80,7 +82,7 @@ in import ./make-test-python.nix ({pkgs, ...}: {
         ldap_search_base = ${dbSuffix}
         ldap_default_bind_dn = cn=${ldapRootUser},${dbSuffix}
         ldap_default_authtok_type = password
-        ldap_default_authtok = ${ldapRootPassword}
+        ldap_default_authtok = $LDAP_BIND_PW
       '';
     };
   };
@@ -89,6 +91,11 @@ in import ./make-test-python.nix ({pkgs, ...}: {
     machine.start()
     machine.wait_for_unit("openldap.service")
     machine.wait_for_unit("sssd.service")
-    machine.succeed("getent passwd ${testUser}")
+    result = machine.execute("getent passwd ${testUser}")
+    if result[0] == 0:
+      assert "${testUser}" in result[1]
+    else:
+      machine.wait_for_console_text("Backend is online")
+      machine.succeed("getent passwd ${testUser}")
   '';
 })
diff --git a/nixos/tests/stratis/default.nix b/nixos/tests/stratis/default.nix
new file mode 100644
index 00000000000..42daadd5fca
--- /dev/null
+++ b/nixos/tests/stratis/default.nix
@@ -0,0 +1,8 @@
+{ system ? builtins.currentSystem
+, pkgs ? import ../../.. { inherit system; }
+}:
+
+{
+  simple = import ./simple.nix { inherit system pkgs; };
+  encryption = import ./encryption.nix { inherit system pkgs; };
+}
diff --git a/nixos/tests/stratis/encryption.nix b/nixos/tests/stratis/encryption.nix
new file mode 100644
index 00000000000..a555ff8a8e8
--- /dev/null
+++ b/nixos/tests/stratis/encryption.nix
@@ -0,0 +1,32 @@
+import ../make-test-python.nix ({ pkgs, ... }:
+  {
+    name = "stratis";
+
+    meta = with pkgs.lib.maintainers; {
+      maintainers = [ nickcao ];
+    };
+
+    nodes.machine = { pkgs, ... }: {
+      services.stratis.enable = true;
+      virtualisation.emptyDiskImages = [ 2048 ];
+    };
+
+    testScript =
+      let
+        testkey1 = pkgs.writeText "testkey1" "supersecret1";
+        testkey2 = pkgs.writeText "testkey2" "supersecret2";
+      in
+      ''
+        machine.wait_for_unit("stratisd")
+        # test creation of encrypted pool and filesystem
+        machine.succeed("stratis key  set    testkey1  --keyfile-path ${testkey1}")
+        machine.succeed("stratis key  set    testkey2  --keyfile-path ${testkey2}")
+        machine.succeed("stratis pool create testpool /dev/vdb --key-desc testkey1")
+        machine.succeed("stratis fs   create testpool testfs")
+        # test rebinding encrypted pool
+        machine.succeed("stratis pool rebind keyring  testpool testkey2")
+        # test restarting encrypted pool
+        machine.succeed("stratis pool stop   testpool")
+        machine.succeed("stratis pool start  --name testpool --unlock-method keyring")
+      '';
+  })
diff --git a/nixos/tests/stratis/simple.nix b/nixos/tests/stratis/simple.nix
new file mode 100644
index 00000000000..7357d71fc52
--- /dev/null
+++ b/nixos/tests/stratis/simple.nix
@@ -0,0 +1,39 @@
+import ../make-test-python.nix ({ pkgs, ... }:
+  {
+    name = "stratis";
+
+    meta = with pkgs.lib.maintainers; {
+      maintainers = [ nickcao ];
+    };
+
+    nodes.machine = { pkgs, ... }: {
+      services.stratis.enable = true;
+      virtualisation.emptyDiskImages = [ 1024 1024 1024 1024 ];
+    };
+
+    testScript = ''
+      machine.wait_for_unit("stratisd")
+      # test pool creation
+      machine.succeed("stratis pool create     testpool /dev/vdb")
+      machine.succeed("stratis pool add-data   testpool /dev/vdc")
+      machine.succeed("stratis pool init-cache testpool /dev/vdd")
+      machine.succeed("stratis pool add-cache  testpool /dev/vde")
+      # test filesystem creation and rename
+      machine.succeed("stratis filesystem create testpool testfs0")
+      machine.succeed("stratis filesystem rename testpool testfs0 testfs1")
+      # test snapshot
+      machine.succeed("mkdir -p /mnt/testfs1 /mnt/testfs2")
+      machine.wait_for_file("/dev/stratis/testpool/testfs1")
+      machine.succeed("mount /dev/stratis/testpool/testfs1 /mnt/testfs1")
+      machine.succeed("echo test0 > /mnt/testfs1/test0")
+      machine.succeed("echo test1 > /mnt/testfs1/test1")
+      machine.succeed("stratis filesystem snapshot testpool testfs1 testfs2")
+      machine.succeed("echo test2 > /mnt/testfs1/test1")
+      machine.wait_for_file("/dev/stratis/testpool/testfs2")
+      machine.succeed("mount /dev/stratis/testpool/testfs2 /mnt/testfs2")
+      assert "test0" in machine.succeed("cat /mnt/testfs1/test0")
+      assert "test0" in machine.succeed("cat /mnt/testfs2/test0")
+      assert "test2" in machine.succeed("cat /mnt/testfs1/test1")
+      assert "test1" in machine.succeed("cat /mnt/testfs2/test1")
+    '';
+  })
diff --git a/nixos/tests/stunnel.nix b/nixos/tests/stunnel.nix
new file mode 100644
index 00000000000..22c087290fc
--- /dev/null
+++ b/nixos/tests/stunnel.nix
@@ -0,0 +1,174 @@
+{ system ? builtins.currentSystem, config ? { }
+, pkgs ? import ../.. { inherit system config; } }:
+
+with import ../lib/testing-python.nix { inherit system pkgs; };
+with pkgs.lib;
+
+let
+  stunnelCommon = {
+    services.stunnel = {
+      enable = true;
+      user = "stunnel";
+    };
+    users.groups.stunnel = { };
+    users.users.stunnel = {
+      isSystemUser = true;
+      group = "stunnel";
+    };
+  };
+  makeCert = { config, pkgs, ... }: {
+    system.activationScripts.create-test-cert = stringAfter [ "users" ] ''
+      ${pkgs.openssl}/bin/openssl req -batch -x509 -newkey rsa -nodes -out /test-cert.pem -keyout /test-key.pem -subj /CN=${config.networking.hostName}
+      ( umask 077; cat /test-key.pem /test-cert.pem > /test-key-and-cert.pem )
+      chown stunnel /test-key.pem /test-key-and-cert.pem
+    '';
+  };
+  serverCommon = { pkgs, ... }: {
+    networking.firewall.allowedTCPPorts = [ 443 ];
+    services.stunnel.servers.https = {
+      accept = "443";
+      connect = 80;
+      cert = "/test-key-and-cert.pem";
+    };
+    systemd.services.simple-webserver = {
+      wantedBy = [ "multi-user.target" ];
+      script = ''
+        cd /etc/webroot
+        ${pkgs.python3}/bin/python -m http.server 80
+      '';
+    };
+  };
+  copyCert = src: dest: filename: ''
+    from shlex import quote
+    ${src}.wait_for_file("/test-key-and-cert.pem")
+    server_cert = ${src}.succeed("cat /test-cert.pem")
+    ${dest}.succeed("echo %s > ${filename}" % quote(server_cert))
+  '';
+
+in {
+  basicServer = makeTest {
+    name = "basicServer";
+
+    nodes = {
+      client = { };
+      server = {
+        imports = [ makeCert serverCommon stunnelCommon ];
+        environment.etc."webroot/index.html".text = "well met";
+      };
+    };
+
+    testScript = ''
+      start_all()
+
+      ${copyCert "server" "client" "/authorized-server-cert.crt"}
+
+      server.wait_for_unit("simple-webserver")
+      server.wait_for_unit("stunnel")
+
+      client.succeed("curl --fail --cacert /authorized-server-cert.crt https://server/ > out")
+      client.succeed('[[ "$(< out)" == "well met" ]]')
+    '';
+  };
+
+  serverAndClient = makeTest {
+    name = "serverAndClient";
+
+    nodes = {
+      client = {
+        imports = [ stunnelCommon ];
+        services.stunnel.clients = {
+          httpsClient = {
+            accept = "80";
+            connect = "server:443";
+            CAFile = "/authorized-server-cert.crt";
+          };
+          httpsClientWithHostVerify = {
+            accept = "81";
+            connect = "server:443";
+            CAFile = "/authorized-server-cert.crt";
+            verifyHostname = "server";
+          };
+          httpsClientWithHostVerifyFail = {
+            accept = "82";
+            connect = "server:443";
+            CAFile = "/authorized-server-cert.crt";
+            verifyHostname = "wronghostname";
+          };
+        };
+      };
+      server = {
+        imports = [ makeCert serverCommon stunnelCommon ];
+        environment.etc."webroot/index.html".text = "hello there";
+      };
+    };
+
+    testScript = ''
+      start_all()
+
+      ${copyCert "server" "client" "/authorized-server-cert.crt"}
+
+      server.wait_for_unit("simple-webserver")
+      server.wait_for_unit("stunnel")
+
+      # In case stunnel came up before we got the server's cert copied over
+      client.succeed("systemctl reload-or-restart stunnel")
+
+      client.succeed("curl --fail http://localhost/ > out")
+      client.succeed('[[ "$(< out)" == "hello there" ]]')
+
+      client.succeed("curl --fail http://localhost:81/ > out")
+      client.succeed('[[ "$(< out)" == "hello there" ]]')
+
+      client.fail("curl --fail http://localhost:82/ > out")
+      client.succeed('[[ "$(< out)" == "" ]]')
+    '';
+  };
+
+  mutualAuth = makeTest {
+    name = "mutualAuth";
+
+    nodes = rec {
+      client = {
+        imports = [ makeCert stunnelCommon ];
+        services.stunnel.clients.authenticated-https = {
+          accept = "80";
+          connect = "server:443";
+          verifyPeer = true;
+          CAFile = "/authorized-server-cert.crt";
+          cert = "/test-cert.pem";
+          key = "/test-key.pem";
+        };
+      };
+      wrongclient = client;
+      server = {
+        imports = [ makeCert serverCommon stunnelCommon ];
+        services.stunnel.servers.https = {
+          CAFile = "/authorized-client-certs.crt";
+          verifyPeer = true;
+        };
+        environment.etc."webroot/index.html".text = "secret handshake";
+      };
+    };
+
+    testScript = ''
+      start_all()
+
+      ${copyCert "server" "client" "/authorized-server-cert.crt"}
+      ${copyCert "client" "server" "/authorized-client-certs.crt"}
+      ${copyCert "server" "wrongclient" "/authorized-server-cert.crt"}
+
+      # In case stunnel came up before we got the cross-certs in place
+      client.succeed("systemctl reload-or-restart stunnel")
+      server.succeed("systemctl reload-or-restart stunnel")
+      wrongclient.succeed("systemctl reload-or-restart stunnel")
+
+      server.wait_for_unit("simple-webserver")
+      client.fail("curl --fail --insecure https://server/ > out")
+      client.succeed('[[ "$(< out)" == "" ]]')
+      client.succeed("curl --fail http://localhost/ > out")
+      client.succeed('[[ "$(< out)" == "secret handshake" ]]')
+      wrongclient.fail("curl --fail http://localhost/ > out")
+      wrongclient.succeed('[[ "$(< out)" == "" ]]')
+    '';
+  };
+}
diff --git a/nixos/tests/swap-partition.nix b/nixos/tests/swap-partition.nix
new file mode 100644
index 00000000000..2279630b57b
--- /dev/null
+++ b/nixos/tests/swap-partition.nix
@@ -0,0 +1,48 @@
+import ./make-test-python.nix ({ lib, pkgs, ... }:
+{
+  name = "swap-partition";
+
+  nodes.machine =
+    { config, pkgs, lib, ... }:
+    {
+      virtualisation.useDefaultFilesystems = false;
+
+      virtualisation.bootDevice = "/dev/vda1";
+
+      boot.initrd.postDeviceCommands = ''
+        if ! test -b /dev/vda1; then
+          ${pkgs.parted}/bin/parted --script /dev/vda -- mklabel msdos
+          ${pkgs.parted}/bin/parted --script /dev/vda -- mkpart primary 1MiB -250MiB
+          ${pkgs.parted}/bin/parted --script /dev/vda -- mkpart primary -250MiB 100%
+          sync
+        fi
+
+        FSTYPE=$(blkid -o value -s TYPE /dev/vda1 || true)
+        if test -z "$FSTYPE"; then
+          ${pkgs.e2fsprogs}/bin/mke2fs -t ext4 -L root /dev/vda1
+          ${pkgs.util-linux}/bin/mkswap --label swap /dev/vda2
+        fi
+      '';
+
+      virtualisation.fileSystems = {
+        "/" = {
+          device = "/dev/disk/by-label/root";
+          fsType = "ext4";
+        };
+      };
+
+      swapDevices = [
+        {
+          device = "/dev/disk/by-label/swap";
+        }
+      ];
+    };
+
+  testScript = ''
+    machine.wait_for_unit("multi-user.target")
+
+    with subtest("Swap is active"):
+      # Doesn't matter if the numbers reported by `free` are slightly off due to unit conversions.
+      machine.succeed("free -h | grep -E 'Swap:\s+2[45][0-9]Mi'")
+  '';
+})
diff --git a/nixos/tests/switch-test.nix b/nixos/tests/switch-test.nix
index 0198866b6ff..f891a2cb2f4 100644
--- a/nixos/tests/switch-test.nix
+++ b/nixos/tests/switch-test.nix
@@ -214,6 +214,25 @@ in {
           systemd.services."escaped\\x2ddash".serviceConfig.X-Test = "test";
         };
 
+        unitStartingWithDash.configuration = {
+          systemd.services."-" = {
+            wantedBy = [ "multi-user.target" ];
+            serviceConfig = {
+              Type = "oneshot";
+              RemainAfterExit = true;
+              ExecStart = "${pkgs.coreutils}/bin/true";
+            };
+          };
+        };
+
+        unitStartingWithDashModified.configuration = {
+          imports = [ unitStartingWithDash.configuration ];
+          systemd.services."-" = {
+            reloadIfChanged = true;
+            serviceConfig.ExecReload = "${pkgs.coreutils}/bin/true";
+          };
+        };
+
         unitWithRequirement.configuration = {
           systemd.services.required-service = {
             wantedBy = [ "multi-user.target" ];
@@ -637,9 +656,27 @@ in {
         assert_contains(out, "\nstarting the following units: escaped\\x2ddash.service\n")
         assert_lacks(out, "the following new units were started:")
 
+        # Ensure units can start with a dash
+        out = switch_to_specialisation("${machine}", "unitStartingWithDash")
+        assert_contains(out, "stopping the following units: escaped\\x2ddash.service\n")
+        assert_lacks(out, "NOT restarting the following changed units:")
+        assert_lacks(out, "reloading the following units:")
+        assert_lacks(out, "\nrestarting the following units:")
+        assert_lacks(out, "\nstarting the following units:")
+        assert_contains(out, "the following new units were started: -.service\n")
+
+        # The regression only occurs when reloading units
+        out = switch_to_specialisation("${machine}", "unitStartingWithDashModified")
+        assert_lacks(out, "stopping the following units:")
+        assert_lacks(out, "NOT restarting the following changed units:")
+        assert_contains(out, "reloading the following units: -.service")
+        assert_lacks(out, "\nrestarting the following units:")
+        assert_lacks(out, "\nstarting the following units:")
+        assert_lacks(out, "the following new units were started:")
+
         # Ensure units that require changed units are properly reloaded
         out = switch_to_specialisation("${machine}", "unitWithRequirement")
-        assert_contains(out, "stopping the following units: escaped\\x2ddash.service\n")
+        assert_contains(out, "stopping the following units: -.service\n")
         assert_lacks(out, "NOT restarting the following changed units:")
         assert_lacks(out, "reloading the following units:")
         assert_lacks(out, "\nrestarting the following units:")
diff --git a/nixos/tests/sympa.nix b/nixos/tests/sympa.nix
index 76ca17d0a18..80daa4134f7 100644
--- a/nixos/tests/sympa.nix
+++ b/nixos/tests/sympa.nix
@@ -13,7 +13,7 @@ import ./make-test-python.nix ({ pkgs, lib, ... }: {
             webHost = "localhost";
           };
         };
-        listMasters = [ "joe@example.org" ];
+        listMasters = [ "bob@example.org" ];
         web.enable = true;
         web.https = false;
         database = {
diff --git a/nixos/tests/systemd-bpf.nix b/nixos/tests/systemd-bpf.nix
new file mode 100644
index 00000000000..e11347a2a81
--- /dev/null
+++ b/nixos/tests/systemd-bpf.nix
@@ -0,0 +1,42 @@
+import ./make-test-python.nix ({ lib, ... }: {
+  name = "systemd-bpf";
+  meta = with lib.maintainers; {
+    maintainers = [ veehaitch ];
+  };
+  nodes = {
+    node1 = {
+      virtualisation.vlans = [ 1 ];
+      networking = {
+        useNetworkd = true;
+        useDHCP = false;
+        firewall.enable = false;
+        interfaces.eth1.ipv4.addresses = [
+          { address = "192.168.1.1"; prefixLength = 24; }
+        ];
+      };
+    };
+
+    node2 = {
+      virtualisation.vlans = [ 1 ];
+      networking = {
+        useNetworkd = true;
+        useDHCP = false;
+        firewall.enable = false;
+        interfaces.eth1.ipv4.addresses = [
+          { address = "192.168.1.2"; prefixLength = 24; }
+        ];
+      };
+    };
+  };
+
+  testScript = ''
+    start_all()
+    node1.wait_for_unit("systemd-networkd-wait-online.service")
+    node2.wait_for_unit("systemd-networkd-wait-online.service")
+
+    with subtest("test RestrictNetworkInterfaces= works"):
+      node1.succeed("ping -c 5 192.168.1.2")
+      node1.succeed("systemd-run -t -p RestrictNetworkInterfaces='eth1' ping -c 5 192.168.1.2")
+      node1.fail("systemd-run -t -p RestrictNetworkInterfaces='lo' ping -c 5 192.168.1.2")
+  '';
+})
diff --git a/nixos/tests/systemd-confinement.nix b/nixos/tests/systemd-confinement.nix
index bde5b770ea5..428888d41a2 100644
--- a/nixos/tests/systemd-confinement.nix
+++ b/nixos/tests/systemd-confinement.nix
@@ -153,7 +153,7 @@ import ./make-test-python.nix {
 
     options.__testSteps = lib.mkOption {
       type = lib.types.lines;
-      description = "All of the test steps combined as a single script.";
+      description = lib.mdDoc "All of the test steps combined as a single script.";
     };
 
     config.environment.systemPackages = lib.singleton testClient;
diff --git a/nixos/tests/systemd-coredump.nix b/nixos/tests/systemd-coredump.nix
new file mode 100644
index 00000000000..62137820878
--- /dev/null
+++ b/nixos/tests/systemd-coredump.nix
@@ -0,0 +1,44 @@
+import ./make-test-python.nix ({ pkgs, ... }:
+
+let
+
+  crasher = pkgs.writeCBin "crasher" "int main;";
+
+  commonConfig = {
+    systemd.services.crasher.serviceConfig = {
+      ExecStart = "${crasher}/bin/crasher";
+      StateDirectory = "crasher";
+      WorkingDirectory = "%S/crasher";
+      Restart = "no";
+    };
+  };
+
+in
+
+{
+  name = "systemd-coredump";
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ squalus ];
+  };
+
+  nodes.machine1 = { pkgs, lib, ... }: commonConfig;
+  nodes.machine2 = { pkgs, lib, ... }: lib.recursiveUpdate commonConfig {
+    systemd.coredump.enable = false;
+    systemd.package = pkgs.systemd.override {
+      withCoredump = false;
+    };
+  };
+
+  testScript = ''
+    with subtest("systemd-coredump enabled"):
+      machine1.wait_for_unit("multi-user.target")
+      machine1.wait_for_unit("systemd-coredump.socket")
+      machine1.systemctl("start crasher");
+      machine1.wait_until_succeeds("coredumpctl list | grep crasher", timeout=10)
+      machine1.fail("stat /var/lib/crasher/core")
+
+    with subtest("systemd-coredump disabled"):
+      machine2.systemctl("start crasher");
+      machine2.wait_until_succeeds("stat /var/lib/crasher/core", timeout=10)
+  '';
+})
diff --git a/nixos/tests/systemd-cryptenroll.nix b/nixos/tests/systemd-cryptenroll.nix
index 055ae7d1681..9ee2d280fbb 100644
--- a/nixos/tests/systemd-cryptenroll.nix
+++ b/nixos/tests/systemd-cryptenroll.nix
@@ -2,6 +2,7 @@ import ./make-test-python.nix ({ pkgs, ... }: {
   name = "systemd-cryptenroll";
   meta = with pkgs.lib.maintainers; {
     maintainers = [ ymatsiuk ];
+    broken = true; # times out after two hours, details -> https://github.com/NixOS/nixpkgs/issues/167994
   };
 
   nodes.machine = { pkgs, lib, ... }: {
diff --git a/nixos/tests/systemd-initrd-luks-fido2.nix b/nixos/tests/systemd-initrd-luks-fido2.nix
new file mode 100644
index 00000000000..133e552a3dc
--- /dev/null
+++ b/nixos/tests/systemd-initrd-luks-fido2.nix
@@ -0,0 +1,45 @@
+import ./make-test-python.nix ({ lib, pkgs, ... }: {
+  name = "systemd-initrd-luks-fido2";
+
+  nodes.machine = { pkgs, config, ... }: {
+    # Use systemd-boot
+    virtualisation = {
+      emptyDiskImages = [ 512 ];
+      useBootLoader = true;
+      useEFIBoot = true;
+      qemu.package = lib.mkForce (pkgs.qemu_test.override { canokeySupport = true; });
+      qemu.options = [ "-device canokey,file=/tmp/canokey-file" ];
+    };
+    boot.loader.systemd-boot.enable = true;
+
+    boot.initrd.systemd.enable = true;
+
+    environment.systemPackages = with pkgs; [ cryptsetup ];
+
+    specialisation.boot-luks.configuration = {
+      boot.initrd.luks.devices = lib.mkVMOverride {
+        cryptroot = {
+          device = "/dev/vdc";
+          crypttabExtraOpts = [ "fido2-device=auto" ];
+        };
+      };
+      virtualisation.bootDevice = "/dev/mapper/cryptroot";
+    };
+  };
+
+  testScript = ''
+    # Create encrypted volume
+    machine.wait_for_unit("multi-user.target")
+    machine.succeed("echo -n supersecret | cryptsetup luksFormat -q --iter-time=1 /dev/vdc -")
+    machine.succeed("PASSWORD=supersecret SYSTEMD_LOG_LEVEL=debug systemd-cryptenroll --fido2-device=auto /dev/vdc |& systemd-cat")
+
+    # Boot from the encrypted disk
+    machine.succeed("bootctl set-default nixos-generation-1-specialisation-boot-luks.conf")
+    machine.succeed("sync")
+    machine.crash()
+
+    # Boot and decrypt the disk
+    machine.wait_for_unit("multi-user.target")
+    assert "/dev/mapper/cryptroot on / type ext4" in machine.succeed("mount")
+  '';
+})
diff --git a/nixos/tests/systemd-initrd-luks-password.nix b/nixos/tests/systemd-initrd-luks-password.nix
index e8e651f7b35..55d0b4324b4 100644
--- a/nixos/tests/systemd-initrd-luks-password.nix
+++ b/nixos/tests/systemd-initrd-luks-password.nix
@@ -23,6 +23,8 @@ import ./make-test-python.nix ({ lib, pkgs, ... }: {
         cryptroot2.device = "/dev/vdd";
       };
       virtualisation.bootDevice = "/dev/mapper/cryptroot";
+      # test mounting device unlocked in initrd after switching root
+      virtualisation.fileSystems."/cryptroot2".device = "/dev/mapper/cryptroot2";
     };
   };
 
@@ -31,6 +33,8 @@ import ./make-test-python.nix ({ lib, pkgs, ... }: {
     machine.wait_for_unit("multi-user.target")
     machine.succeed("echo -n supersecret | cryptsetup luksFormat -q --iter-time=1 /dev/vdc -")
     machine.succeed("echo -n supersecret | cryptsetup luksFormat -q --iter-time=1 /dev/vdd -")
+    machine.succeed("echo -n supersecret | cryptsetup luksOpen   -q               /dev/vdd cryptroot2")
+    machine.succeed("mkfs.ext4 /dev/mapper/cryptroot2")
 
     # Boot from the encrypted disk
     machine.succeed("bootctl set-default nixos-generation-1-specialisation-boot-luks.conf")
@@ -44,5 +48,6 @@ import ./make-test-python.nix ({ lib, pkgs, ... }: {
     machine.wait_for_unit("multi-user.target")
 
     assert "/dev/mapper/cryptroot on / type ext4" in machine.succeed("mount")
+    assert "/dev/mapper/cryptroot2 on /cryptroot2 type ext4" in machine.succeed("mount")
   '';
 })
diff --git a/nixos/tests/systemd-initrd-luks-tpm2.nix b/nixos/tests/systemd-initrd-luks-tpm2.nix
new file mode 100644
index 00000000000..085088d2ee2
--- /dev/null
+++ b/nixos/tests/systemd-initrd-luks-tpm2.nix
@@ -0,0 +1,72 @@
+import ./make-test-python.nix ({ lib, pkgs, ... }: {
+  name = "systemd-initrd-luks-tpm2";
+
+  nodes.machine = { pkgs, ... }: {
+    # Use systemd-boot
+    virtualisation = {
+      emptyDiskImages = [ 512 ];
+      useBootLoader = true;
+      useEFIBoot = true;
+      qemu.options = ["-chardev socket,id=chrtpm,path=/tmp/mytpm1/swtpm-sock -tpmdev emulator,id=tpm0,chardev=chrtpm -device tpm-tis,tpmdev=tpm0"];
+    };
+    boot.loader.systemd-boot.enable = true;
+
+    boot.initrd.availableKernelModules = [ "tpm_tis" ];
+
+    environment.systemPackages = with pkgs; [ cryptsetup ];
+    boot.initrd.systemd = {
+      enable = true;
+    };
+
+    specialisation.boot-luks.configuration = {
+      boot.initrd.luks.devices = lib.mkVMOverride {
+        cryptroot = {
+          device = "/dev/vdc";
+          crypttabExtraOpts = [ "tpm2-device=auto" ];
+        };
+      };
+      virtualisation.bootDevice = "/dev/mapper/cryptroot";
+    };
+  };
+
+  testScript = ''
+    import subprocess
+    import os
+    import time
+
+
+    class Tpm:
+        def __init__(self):
+            os.mkdir("/tmp/mytpm1")
+            self.start()
+
+        def start(self):
+            self.proc = subprocess.Popen(["${pkgs.swtpm}/bin/swtpm", "socket", "--tpmstate", "dir=/tmp/mytpm1", "--ctrl", "type=unixio,path=/tmp/mytpm1/swtpm-sock", "--log", "level=20", "--tpm2"])
+
+        def wait_for_death_then_restart(self):
+            while self.proc.poll() is None:
+                print("waiting for tpm to die")
+                time.sleep(1)
+            assert self.proc.returncode == 0
+            self.start()
+
+    tpm = Tpm()
+
+
+    # Create encrypted volume
+    machine.wait_for_unit("multi-user.target")
+    machine.succeed("echo -n supersecret | cryptsetup luksFormat -q --iter-time=1 /dev/vdc -")
+    machine.succeed("PASSWORD=supersecret SYSTEMD_LOG_LEVEL=debug systemd-cryptenroll --tpm2-pcrs= --tpm2-device=auto /dev/vdc |& systemd-cat")
+
+    # Boot from the encrypted disk
+    machine.succeed("bootctl set-default nixos-generation-1-specialisation-boot-luks.conf")
+    machine.succeed("sync")
+    machine.crash()
+
+    tpm.wait_for_death_then_restart()
+
+    # Boot and decrypt the disk
+    machine.wait_for_unit("multi-user.target")
+    assert "/dev/mapper/cryptroot on / type ext4" in machine.succeed("mount")
+  '';
+})
diff --git a/nixos/tests/systemd-initrd-modprobe.nix b/nixos/tests/systemd-initrd-modprobe.nix
new file mode 100644
index 00000000000..bf635a10d0e
--- /dev/null
+++ b/nixos/tests/systemd-initrd-modprobe.nix
@@ -0,0 +1,17 @@
+import ./make-test-python.nix ({ lib, pkgs, ... }: {
+  name = "systemd-initrd-modprobe";
+
+  nodes.machine = { pkgs, ... }: {
+    boot.initrd.systemd.enable = true;
+    boot.initrd.kernelModules = [ "loop" ]; # Load module in initrd.
+    boot.extraModprobeConfig = ''
+      options loop max_loop=42
+    '';
+  };
+
+  testScript = ''
+    machine.wait_for_unit("multi-user.target")
+    max_loop = machine.succeed("cat /sys/module/loop/parameters/max_loop")
+    assert int(max_loop) == 42, "Parameter should be respected for initrd kernel modules"
+  '';
+})
diff --git a/nixos/tests/systemd-machinectl.nix b/nixos/tests/systemd-machinectl.nix
index ce0c56a360e..b8ed0c33e8e 100644
--- a/nixos/tests/systemd-machinectl.nix
+++ b/nixos/tests/systemd-machinectl.nix
@@ -1,4 +1,4 @@
-import ./make-test-python.nix (
+import ./make-test-python.nix ({ pkgs, ... }:
   let
 
     container = {
@@ -18,6 +18,7 @@ import ./make-test-python.nix (
     };
 
     containerSystem = (import ../lib/eval-config.nix {
+      inherit (pkgs) system;
       modules = [ container ];
     }).config.system.build.toplevel;
 
@@ -32,15 +33,25 @@ import ./make-test-python.nix (
       # use networkd to obtain systemd network setup
       networking.useNetworkd = true;
       networking.useDHCP = false;
-      services.resolved.enable = false;
-
-      # open DHCP server on interface to container
-      networking.firewall.trustedInterfaces = [ "ve-+" ];
 
       # do not try to access cache.nixos.org
       nix.settings.substituters = lib.mkForce [ ];
 
+      # auto-start container
+      systemd.targets.machines.wants = [ "systemd-nspawn@${containerName}.service" ];
+
       virtualisation.additionalPaths = [ containerSystem ];
+
+      # not needed, but we want to test the nspawn file generation
+      systemd.nspawn.${containerName} = { };
+
+      systemd.services."systemd-nspawn@${containerName}" = {
+        serviceConfig.Environment = [
+          # Disable tmpfs for /tmp
+          "SYSTEMD_NSPAWN_TMPFS_TMP=0"
+        ];
+        overrideStrategy = "asDropin";
+      };
     };
 
     testScript = ''
@@ -60,11 +71,17 @@ import ./make-test-python.nix (
       machine.succeed("machinectl start ${containerName}");
       machine.wait_until_succeeds("systemctl -M ${containerName} is-active default.target");
 
+      # Test nss_mymachines without nscd
+      machine.succeed('LD_LIBRARY_PATH="/run/current-system/sw/lib" getent -s hosts:mymachines hosts ${containerName}');
+
+      # Test nss_mymachines via nscd
+      machine.succeed("getent hosts ${containerName}");
+
       # Test systemd-nspawn network configuration
       machine.succeed("ping -n -c 1 ${containerName}");
 
       # Test systemd-nspawn uses a user namespace
-      machine.succeed("test `stat ${containerRoot}/var/empty -c %u%g` != 00");
+      machine.succeed("test $(machinectl status ${containerName} | grep 'UID Shift: ' | wc -l) = 1")
 
       # Test systemd-nspawn reboot
       machine.succeed("machinectl shell ${containerName} /run/current-system/sw/bin/reboot");
@@ -74,8 +91,20 @@ import ./make-test-python.nix (
       machine.succeed("machinectl reboot ${containerName}");
       machine.wait_until_succeeds("systemctl -M ${containerName} is-active default.target");
 
+      # Restart machine
+      machine.shutdown()
+      machine.start()
+      machine.wait_for_unit("default.target");
+
+      # Test auto-start
+      machine.succeed("machinectl show ${containerName}")
+
       # Test machinectl stop
       machine.succeed("machinectl stop ${containerName}");
+      machine.wait_until_succeeds("test $(systemctl is-active systemd-nspawn@${containerName}) = inactive");
+
+      # Test tmpfs for /tmp
+      machine.fail("mountpoint /tmp");
 
       # Show to to delete the container
       machine.succeed("chattr -i ${containerRoot}/var/empty");
diff --git a/nixos/tests/systemd-networkd-ipv6-prefix-delegation.nix b/nixos/tests/systemd-networkd-ipv6-prefix-delegation.nix
index 37a89fc21e4..279b9aac8ed 100644
--- a/nixos/tests/systemd-networkd-ipv6-prefix-delegation.nix
+++ b/nixos/tests/systemd-networkd-ipv6-prefix-delegation.nix
@@ -7,10 +7,10 @@
 # - VLAN 1 is the connection between the ISP and the router
 # - VLAN 2 is the connection between the router and the client
 
-import ./make-test-python.nix ({pkgs, ...}: {
+import ./make-test-python.nix ({ pkgs, lib, ... }: {
   name = "systemd-networkd-ipv6-prefix-delegation";
-  meta = with pkgs.lib.maintainers; {
-    maintainers = [ andir ];
+  meta = with lib.maintainers; {
+    maintainers = [ andir hexa ];
   };
   nodes = {
 
@@ -22,26 +22,38 @@ import ./make-test-python.nix ({pkgs, ...}: {
     #
     # Note: On the ISPs device we don't really care if we are using networkd in
     # this example. That being said we can't use it (yet) as networkd doesn't
-    # implement the serving side of DHCPv6. We will use ISC's well aged dhcpd6
-    # for that task.
+    # implement the serving side of DHCPv6. We will use ISC Kea for that task.
     isp = { lib, pkgs, ... }: {
       virtualisation.vlans = [ 1 ];
       networking = {
         useDHCP = false;
         firewall.enable = false;
-        interfaces.eth1.ipv4.addresses = lib.mkForce []; # no need for legacy IP
-        interfaces.eth1.ipv6.addresses = lib.mkForce [
-          { address = "2001:DB8::1"; prefixLength = 64; }
-        ];
+        interfaces.eth1 = lib.mkForce {}; # Don't use scripted networking
+      };
+
+      systemd.network = {
+        enable = true;
+
+        networks = {
+          "eth1" = {
+            matchConfig.Name = "eth1";
+            address = [
+              "2001:DB8::1/64"
+            ];
+            networkConfig.IPForward = true;
+          };
+        };
       };
 
       # Since we want to program the routes that we delegate to the "customer"
-      # into our routing table we must give dhcpd the required privs.
-      systemd.services.dhcpd6.serviceConfig.AmbientCapabilities =
-        [ "CAP_NET_ADMIN" ];
+      # into our routing table we must provide kea with the required capability.
+      systemd.services.kea-dhcp6-server.serviceConfig = {
+        AmbientCapabilities = [ "CAP_NET_ADMIN" ];
+        CapabilityBoundingSet = [ "CAP_NET_ADMIN" ];
+      };
 
       services = {
-        # Configure the DHCPv6 server
+        # Configure the DHCPv6 server to hand out both IA_NA and IA_PD.
         #
         # We will hand out /48 prefixes from the subnet 2001:DB8:F000::/36.
         # That gives us ~8k prefixes. That should be enough for this test.
@@ -49,31 +61,70 @@ import ./make-test-python.nix ({pkgs, ...}: {
         # Since (usually) you will not receive a prefix with the router
         # advertisements we also hand out /128 leases from the range
         # 2001:DB8:0000:0000:FFFF::/112.
-        dhcpd6 = {
+        kea.dhcp6 = {
           enable = true;
-          interfaces = [ "eth1" ];
-          extraConfig = ''
-            subnet6 2001:DB8::/36 {
-              range6 2001:DB8:0000:0000:FFFF:: 2001:DB8:0000:0000:FFFF::FFFF;
-              prefix6 2001:DB8:F000:: 2001:DB8:FFFF:: /48;
-            }
-
-            # This is the secret sauce. We have to extract the prefix and the
-            # next hop when commiting the lease to the database.  dhcpd6
-            # (rightfully) has not concept of adding routes to the systems
-            # routing table. It really depends on the setup.
+          settings = {
+            interfaces-config.interfaces = [ "eth1" ];
+            subnet6 = [ {
+              interface = "eth1";
+              subnet = "2001:DB8:F::/36";
+              pd-pools = [ {
+                prefix = "2001:DB8:F::";
+                prefix-len = 36;
+                delegated-len = 48;
+              } ];
+              pools = [ {
+                pool = "2001:DB8:0000:0000:FFFF::-2001:DB8:0000:0000:FFFF::FFFF";
+              } ];
+            } ];
+
+            # This is the glue between Kea and the Kernel FIB. DHCPv6
+            # rightfully has no concept of setting up a route in your
+            # FIB. This step really depends on your setup.
             #
-            # In a production environment your DHCPv6 server is likely not the
-            # router. You might want to consider BGP, custom NetConf calls, …
-            # in those cases.
-            on commit {
-              set IP = pick-first-value(binary-to-ascii(16, 16, ":", substring(option dhcp6.ia-na, 16, 16)), "n/a");
-              set Prefix = pick-first-value(binary-to-ascii(16, 16, ":", suffix(option dhcp6.ia-pd, 16)), "n/a");
-              set PrefixLength = pick-first-value(binary-to-ascii(10, 8, ":", substring(suffix(option dhcp6.ia-pd, 17), 0, 1)), "n/a");
-              log(concat(IP, " ", Prefix, " ", PrefixLength));
-              execute("${pkgs.iproute2}/bin/ip", "-6", "route", "replace", concat(Prefix,"/",PrefixLength), "via", IP);
-            }
-          '';
+            # In a production environment your DHCPv6 server is likely
+            # not the router. You might want to consider BGP, NETCONF
+            # calls, … in those cases.
+            #
+            # In this example we use the run script hook, that lets use
+            # execute anything and passes information via the environment.
+            # https://kea.readthedocs.io/en/kea-2.2.0/arm/hooks.html#run-script-run-script-support-for-external-hook-scripts
+            hooks-libraries = [ {
+              library = "${pkgs.kea}/lib/kea/hooks/libdhcp_run_script.so";
+              parameters = {
+                name = pkgs.writeShellScript "kea-run-hooks" ''
+                  export PATH="${lib.makeBinPath (with pkgs; [ coreutils iproute2 ])}"
+
+                  set -euxo pipefail
+
+                  leases6_committed() {
+                    for i in $(seq $LEASES6_SIZE); do
+                      idx=$((i-1))
+                      prefix_var="LEASES6_AT''${idx}_ADDRESS"
+                      plen_var="LEASES6_AT''${idx}_PREFIX_LEN"
+
+                      ip -6 route replace ''${!prefix_var}/''${!plen_var} via $QUERY6_REMOTE_ADDR dev $QUERY6_IFACE_NAME
+                    done
+                  }
+
+                  unknown_handler() {
+                    echo "Unhandled function call ''${*}"
+                    exit 123
+                  }
+
+                  case "$1" in
+                      "leases6_committed")
+                          leases6_committed
+                          ;;
+                      *)
+                          unknown_handler "''${@}"
+                          ;;
+                  esac
+                '';
+                sync = false;
+              };
+            } ];
+          };
         };
 
         # Finally we have to set up the router advertisements. While we could be
@@ -176,7 +227,7 @@ import ./make-test-python.nix ({pkgs, ...}: {
               IPv6AcceptRA = false;
 
               # Delegate prefixes from the DHCPv6 PD pool.
-              DHCPv6PrefixDelegation = true;
+              DHCPPrefixDelegation = true;
               IPv6SendRA = true;
             };
 
diff --git a/nixos/tests/systemd-no-tainted.nix b/nixos/tests/systemd-no-tainted.nix
new file mode 100644
index 00000000000..f0504065f2a
--- /dev/null
+++ b/nixos/tests/systemd-no-tainted.nix
@@ -0,0 +1,14 @@
+import ./make-test-python.nix ({ pkgs, ... }: {
+  name = "systemd-no-tainted";
+
+  nodes.machine = { };
+
+  testScript = ''
+    machine.wait_for_unit("multi-user.target")
+    with subtest("systemctl should not report tainted with unmerged-usr"):
+        output = machine.succeed("systemctl status")
+        print(output)
+        assert "Tainted" not in output
+        assert "unmerged-usr" not in output
+  '';
+})
diff --git a/nixos/tests/systemd-nspawn.nix b/nixos/tests/systemd-nspawn.nix
index c2cb92d1130..bc77ee2a4d1 100644
--- a/nixos/tests/systemd-nspawn.nix
+++ b/nixos/tests/systemd-nspawn.nix
@@ -10,8 +10,8 @@ let
       Key-Length: 1024
       Subkey-Type: ELG-E
       Subkey-Length: 1024
-      Name-Real: Joe Tester
-      Name-Email: joe@foo.bar
+      Name-Real: Bob Foobar
+      Name-Email: bob@foo.bar
       Expire-Date: 0
       # Do a commit here, so that we can later print "done"
       %commit
@@ -19,7 +19,7 @@ let
     EOF
     gpg --batch --generate-key foo
     rm $out/S.gpg-agent $out/S.gpg-agent.*
-    gpg --export joe@foo.bar -a > $out/pubkey.gpg
+    gpg --export bob@foo.bar -a > $out/pubkey.gpg
   '');
 
   nspawnImages = (pkgs.runCommand "localhost" { buildInputs = [ pkgs.coreutils pkgs.gnupg ]; } ''
diff --git a/nixos/tests/systemd-oomd.nix b/nixos/tests/systemd-oomd.nix
new file mode 100644
index 00000000000..55c4c135000
--- /dev/null
+++ b/nixos/tests/systemd-oomd.nix
@@ -0,0 +1,54 @@
+import ./make-test-python.nix ({ pkgs, ... }:
+
+{
+  name = "systemd-oomd";
+
+  # This test is a simplified version of systemd's testsuite-55.
+  # https://github.com/systemd/systemd/blob/v251/test/units/testsuite-55.sh
+  nodes.machine = { pkgs, ... }: {
+    # Limit VM resource usage.
+    virtualisation.memorySize = 1024;
+    systemd.oomd.extraConfig.DefaultMemoryPressureDurationSec = "1s";
+
+    systemd.slices.workload = {
+      description = "Test slice for memory pressure kills";
+      sliceConfig = {
+        MemoryAccounting = true;
+        ManagedOOMMemoryPressure = "kill";
+        ManagedOOMMemoryPressureLimit = "10%";
+      };
+    };
+
+    systemd.services.testbloat = {
+      description = "Create a lot of memory pressure";
+      serviceConfig = {
+        Slice = "workload.slice";
+        MemoryHigh = "5M";
+        ExecStart = "${pkgs.coreutils}/bin/tail /dev/zero";
+      };
+    };
+
+    systemd.services.testchill = {
+      description = "No memory pressure";
+      serviceConfig = {
+        Slice = "workload.slice";
+        MemoryHigh = "3M";
+        ExecStart = "${pkgs.coreutils}/bin/sleep infinity";
+      };
+    };
+  };
+
+  testScript = ''
+    # Start the system.
+    machine.wait_for_unit("multi-user.target")
+    machine.succeed("oomctl")
+
+    machine.succeed("systemctl start testchill.service")
+    with subtest("OOMd should kill the bad service"):
+        machine.fail("systemctl start --wait testbloat.service")
+        assert machine.get_unit_info("testbloat.service")["Result"] == "oom-kill"
+
+    with subtest("Service without memory pressure should be untouched"):
+        machine.require_unit_state("testchill.service", "active")
+  '';
+})
diff --git a/nixos/tests/systemd-portabled.nix b/nixos/tests/systemd-portabled.nix
new file mode 100644
index 00000000000..ef38258b0d8
--- /dev/null
+++ b/nixos/tests/systemd-portabled.nix
@@ -0,0 +1,51 @@
+import ./make-test-python.nix ({pkgs, lib, ...}: let
+  demo-program = pkgs.writeShellScriptBin "demo" ''
+      while ${pkgs.coreutils}/bin/sleep 3; do
+          echo Hello World > /dev/null
+      done
+  '';
+  demo-service = pkgs.writeText "demo.service" ''
+    [Unit]
+    Description=demo service
+    Requires=demo.socket
+    After=demo.socket
+
+    [Service]
+    Type=simple
+    ExecStart=${demo-program}/bin/demo
+    Restart=always
+
+    [Install]
+    WantedBy=multi-user.target
+    Also=demo.socket
+  '';
+  demo-socket = pkgs.writeText "demo.socket" ''
+    [Unit]
+    Description=demo socket
+
+    [Socket]
+    ListenStream=/run/demo.sock
+    SocketMode=0666
+
+    [Install]
+    WantedBy=sockets.target
+  '';
+  demo-portable = pkgs.portableService {
+    pname = "demo";
+    version = "1.0";
+    description = ''A demo "Portable Service" for a shell program built with nix'';
+    units = [ demo-service demo-socket ];
+  };
+in {
+
+  name = "systemd-portabled";
+  nodes.machine = {};
+  testScript = ''
+    machine.succeed("portablectl")
+    machine.wait_for_unit("systemd-portabled.service")
+    machine.succeed("portablectl attach --now --runtime ${demo-portable}/demo_1.0.raw")
+    machine.wait_for_unit("demo.service")
+    machine.succeed("portablectl detach --now --runtime demo_1.0")
+    machine.fail("systemctl status demo.service")
+  '';
+})
diff --git a/nixos/tests/systemd.nix b/nixos/tests/systemd.nix
index 3317823e03f..3c36291b733 100644
--- a/nixos/tests/systemd.nix
+++ b/nixos/tests/systemd.nix
@@ -87,12 +87,6 @@ import ./make-test-python.nix ({ pkgs, ... }: {
         machine.succeed("test -e /home/alice/user_conf_read")
         machine.succeed("test -z $(ls -1 /var/log/journal)")
 
-    # Regression test for https://github.com/NixOS/nixpkgs/issues/50273
-    with subtest("DynamicUser actually allocates a user"):
-        assert "iamatest" in machine.succeed(
-            "systemd-run --pty --property=Type=oneshot --property=DynamicUser=yes --property=User=iamatest whoami"
-        )
-
     with subtest("regression test for https://bugs.freedesktop.org/show_bug.cgi?id=77507"):
         retcode, output = machine.execute("systemctl status testservice1.service")
         assert retcode in [0, 3]  # https://bugs.freedesktop.org/show_bug.cgi?id=77507
diff --git a/nixos/tests/tandoor-recipes.nix b/nixos/tests/tandoor-recipes.nix
new file mode 100644
index 00000000000..54456238fe6
--- /dev/null
+++ b/nixos/tests/tandoor-recipes.nix
@@ -0,0 +1,43 @@
+import ./make-test-python.nix ({ lib, ... }: {
+  name = "tandoor-recipes";
+  meta.maintainers = with lib.maintainers; [ ambroisie ];
+
+  nodes.machine = { pkgs, ... }: {
+    # Setup using Postgres
+    services.tandoor-recipes = {
+      enable = true;
+
+      extraConfig = {
+        DB_ENGINE = "django.db.backends.postgresql";
+        POSTGRES_HOST = "/run/postgresql";
+        POSTGRES_USER = "tandoor_recipes";
+        POSTGRES_DB = "tandoor_recipes";
+      };
+    };
+
+    services.postgresql = {
+      enable = true;
+      ensureDatabases = [ "tandoor_recipes" ];
+      ensureUsers = [
+        {
+          name = "tandoor_recipes";
+          ensurePermissions."DATABASE tandoor_recipes" = "ALL PRIVILEGES";
+        }
+      ];
+    };
+
+    systemd.services = {
+      tandoor-recipes = {
+        after = [ "postgresql.service" ];
+      };
+    };
+  };
+
+  testScript = ''
+    machine.wait_for_unit("tandoor-recipes.service")
+
+    with subtest("Web interface gets ready"):
+        # Wait until server accepts connections
+        machine.wait_until_succeeds("curl -fs localhost:8080")
+  '';
+})
diff --git a/nixos/tests/tayga.nix b/nixos/tests/tayga.nix
new file mode 100644
index 00000000000..44974f6efea
--- /dev/null
+++ b/nixos/tests/tayga.nix
@@ -0,0 +1,235 @@
+# This test verifies that we can ping an IPv4-only server from an IPv6-only
+# client via a NAT64 router. The hosts and networks are configured as follows:
+#
+#        +------
+# Client | eth1    Address: 2001:db8::2/64
+#        |  |      Route:   64:ff9b::/96 via 2001:db8::1
+#        +--|---
+#           | VLAN 3
+#        +--|---
+#        | eth2    Address: 2001:db8::1/64
+# Router |
+#        | nat64   Address: 64:ff9b::1/128
+#        |         Route:   64:ff9b::/96
+#        |         Address: 192.0.2.0/32
+#        |         Route:   192.0.2.0/24
+#        |
+#        | eth1    Address: 100.64.0.1/24
+#        +--|---
+#           | VLAN 2
+#        +--|---
+# Server | eth1    Address: 100.64.0.2/24
+#        |         Route:   192.0.2.0/24 via 100.64.0.1
+#        +------
+
+import ./make-test-python.nix ({ pkgs, lib, ... }:
+
+{
+  name = "tayga";
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ hax404 ];
+  };
+
+  nodes = {
+    # The server is configured with static IPv4 addresses. RFC 6052 Section 3.1
+    # disallows the mapping of non-global IPv4 addresses like RFC 1918 into the
+    # Well-Known Prefix 64:ff9b::/96. TAYGA also does not allow the mapping of
+    # documentation space (RFC 5737). To circumvent this, 100.64.0.2/24 from
+    # RFC 6589 (Carrier Grade NAT) is used here.
+    # To reach the IPv4 address pool of the NAT64 gateway, there is a static
+    # route configured. In normal cases, where the router would also source NAT
+    # the pool addresses to one IPv4 addresses, this would not be needed.
+    server = {
+      virtualisation.vlans = [
+        2 # towards router
+      ];
+      networking = {
+        useDHCP = false;
+        interfaces.eth1 = lib.mkForce {};
+      };
+      systemd.network = {
+        enable = true;
+        networks."vlan1" = {
+          matchConfig.Name = "eth1";
+          address = [
+            "100.64.0.2/24"
+          ];
+          routes = [
+            { routeConfig = { Destination = "192.0.2.0/24"; Gateway = "100.64.0.1"; }; }
+          ];
+        };
+      };
+    };
+
+    # The router is configured with static IPv4 addresses towards the server
+    # and IPv6 addresses towards the client. For NAT64, the Well-Known prefix
+    # 64:ff9b::/96 is used. NAT64 is done with TAYGA which provides the
+    # tun-interface nat64 and does the translation over it. The IPv6 packets
+    # are sent to this interfaces and received as IPv4 packets and vice versa.
+    # As TAYGA only translates IPv6 addresses to dedicated IPv4 addresses, it
+    # needs a pool of IPv4 addresses which must be at least as big as the
+    # expected amount of clients. In this test, the packets from the pool are
+    # directly routed towards the client. In normal cases, there would be a
+    # second source NAT44 to map all clients behind one IPv4 address.
+    router_systemd = {
+      boot.kernel.sysctl = {
+        "net.ipv4.ip_forward" = 1;
+        "net.ipv6.conf.all.forwarding" = 1;
+      };
+
+      virtualisation.vlans = [
+        2 # towards server
+        3 # towards client
+      ];
+
+      networking = {
+        useDHCP = false;
+        useNetworkd = true;
+        firewall.enable = false;
+        interfaces.eth1 = lib.mkForce {
+          ipv4 = {
+            addresses = [ { address = "100.64.0.1"; prefixLength = 24; } ];
+          };
+        };
+        interfaces.eth2 = lib.mkForce {
+          ipv6 = {
+            addresses = [ { address = "2001:db8::1"; prefixLength = 64; } ];
+          };
+        };
+      };
+
+      services.tayga = {
+        enable = true;
+        ipv4 = {
+          address = "192.0.2.0";
+          router = {
+            address = "192.0.2.1";
+          };
+          pool = {
+            address = "192.0.2.0";
+            prefixLength = 24;
+          };
+        };
+        ipv6 = {
+          address = "2001:db8::1";
+          router = {
+            address = "64:ff9b::1";
+          };
+          pool = {
+            address = "64:ff9b::";
+            prefixLength = 96;
+          };
+        };
+      };
+    };
+
+    router_nixos = {
+      boot.kernel.sysctl = {
+        "net.ipv4.ip_forward" = 1;
+        "net.ipv6.conf.all.forwarding" = 1;
+      };
+
+      virtualisation.vlans = [
+        2 # towards server
+        3 # towards client
+      ];
+
+      networking = {
+        useDHCP = false;
+        firewall.enable = false;
+        interfaces.eth1 = lib.mkForce {
+          ipv4 = {
+            addresses = [ { address = "100.64.0.1"; prefixLength = 24; } ];
+          };
+        };
+        interfaces.eth2 = lib.mkForce {
+          ipv6 = {
+            addresses = [ { address = "2001:db8::1"; prefixLength = 64; } ];
+          };
+        };
+      };
+
+      services.tayga = {
+        enable = true;
+        ipv4 = {
+          address = "192.0.2.0";
+          router = {
+            address = "192.0.2.1";
+          };
+          pool = {
+            address = "192.0.2.0";
+            prefixLength = 24;
+          };
+        };
+        ipv6 = {
+          address = "2001:db8::1";
+          router = {
+            address = "64:ff9b::1";
+          };
+          pool = {
+            address = "64:ff9b::";
+            prefixLength = 96;
+          };
+        };
+      };
+    };
+
+    # The client is configured with static IPv6 addresses. It has also a static
+    # route for the NAT64 IP space where the IPv4 addresses are mapped in. In
+    # normal cases, there would be only a default route.
+    client = {
+      virtualisation.vlans = [
+        3 # towards router
+      ];
+
+      networking = {
+        useDHCP = false;
+        interfaces.eth1 = lib.mkForce {};
+      };
+
+      systemd.network = {
+        enable = true;
+        networks."vlan1" = {
+          matchConfig.Name = "eth1";
+          address = [
+            "2001:db8::2/64"
+          ];
+          routes = [
+            { routeConfig = { Destination = "64:ff9b::/96"; Gateway = "2001:db8::1"; }; }
+          ];
+        };
+      };
+      environment.systemPackages = [ pkgs.mtr ];
+    };
+  };
+
+  testScript = ''
+    # start client and server
+    for machine in client, server:
+      machine.wait_for_unit("network-online.target")
+      machine.log(machine.execute("ip addr")[1])
+      machine.log(machine.execute("ip route")[1])
+      machine.log(machine.execute("ip -6 route")[1])
+
+    # test systemd-networkd and nixos-scripts based router
+    for router in router_systemd, router_nixos:
+      router.start()
+      router.wait_for_unit("network-online.target")
+      router.wait_for_unit("tayga.service")
+      router.log(machine.execute("ip addr")[1])
+      router.log(machine.execute("ip route")[1])
+      router.log(machine.execute("ip -6 route")[1])
+
+      with subtest("Wait for tayga"):
+        router.wait_for_unit("tayga.service")
+
+      with subtest("Test ICMP"):
+        client.wait_until_succeeds("ping -c 3 64:ff9b::100.64.0.2 >&2")
+
+      with subtest("Test ICMP and show a traceroute"):
+        client.wait_until_succeeds("mtr --show-ips --report-wide 64:ff9b::100.64.0.2 >&2")
+
+      router.log(router.execute("systemd-analyze security tayga.service")[1])
+      router.shutdown()
+  '';
+})
diff --git a/nixos/tests/terminal-emulators.nix b/nixos/tests/terminal-emulators.nix
index c724608b915..4269d05056d 100644
--- a/nixos/tests/terminal-emulators.nix
+++ b/nixos/tests/terminal-emulators.nix
@@ -23,8 +23,9 @@ with pkgs.lib;
 let tests = {
       alacritty.pkg = p: p.alacritty;
 
-      contour.pkg = p: p.contour;
-      contour.cmd = "contour $command";
+      # times out after spending many hours
+      #contour.pkg = p: p.contour;
+      #contour.cmd = "contour $command";
 
       cool-retro-term.pkg = p: p.cool-retro-term;
       cool-retro-term.colourTest = false; # broken by gloss effect
@@ -103,7 +104,8 @@ let tests = {
       wayst.pkg = p: p.wayst;
       wayst.pinkValue = "#FF0066";
 
-      wezterm.pkg = p: p.wezterm;
+      # times out after spending many hours
+      #wezterm.pkg = p: p.wezterm;
 
       xfce4-terminal.pkg = p: p.xfce.xfce4-terminal;
 
diff --git a/nixos/tests/thelounge.nix b/nixos/tests/thelounge.nix
index e9b85685bf2..8d5a37d46c4 100644
--- a/nixos/tests/thelounge.nix
+++ b/nixos/tests/thelounge.nix
@@ -1,4 +1,6 @@
 import ./make-test-python.nix {
+  name = "thelounge";
+
   nodes = {
     private = { config, pkgs, ... }: {
       services.thelounge = {
diff --git a/nixos/tests/tmate-ssh-server.nix b/nixos/tests/tmate-ssh-server.nix
new file mode 100644
index 00000000000..e7f94db9bfc
--- /dev/null
+++ b/nixos/tests/tmate-ssh-server.nix
@@ -0,0 +1,73 @@
+import ./make-test-python.nix ({ pkgs, lib, ... }:
+let
+  inherit (import ./ssh-keys.nix pkgs)
+    snakeOilPrivateKey snakeOilPublicKey;
+
+  setUpPrivateKey = name: ''
+    ${name}.succeed(
+        "mkdir -p /root/.ssh",
+        "chown 700 /root/.ssh",
+        "cat '${snakeOilPrivateKey}' > /root/.ssh/id_snakeoil",
+        "chown 600 /root/.ssh/id_snakeoil",
+    )
+    ${name}.wait_for_file("/root/.ssh/id_snakeoil")
+  '';
+
+  sshOpts = "-oStrictHostKeyChecking=no -oUserKnownHostsFile=/dev/null -oIdentityFile=/root/.ssh/id_snakeoil";
+
+in
+{
+  name = "tmate-ssh-server";
+  nodes =
+    {
+      server = { ... }: {
+        services.tmate-ssh-server = {
+          enable = true;
+          port = 2223;
+        };
+      };
+      client = { ... }: {
+        environment.systemPackages = [ pkgs.tmate ];
+        services.openssh.enable = true;
+        users.users.root.openssh.authorizedKeys.keys = [ snakeOilPublicKey ];
+      };
+      client2 = { ... }: {
+        environment.systemPackages = [ pkgs.openssh ];
+      };
+    };
+  testScript = ''
+    start_all()
+
+    server.wait_for_unit("tmate-ssh-server.service")
+    server.wait_for_open_port(2223)
+    server.wait_for_file("/etc/tmate-ssh-server-keys/ssh_host_ed25519_key.pub")
+    server.wait_for_file("/etc/tmate-ssh-server-keys/ssh_host_rsa_key.pub")
+    server.succeed("tmate-client-config > /tmp/tmate.conf")
+    server.wait_for_file("/tmp/tmate.conf")
+
+    ${setUpPrivateKey "server"}
+    client.wait_for_unit("sshd.service")
+    client.wait_for_open_port(22)
+    server.succeed("scp ${sshOpts} /tmp/tmate.conf client:/tmp/tmate.conf")
+
+    client.wait_for_file("/tmp/tmate.conf")
+    client.send_chars("root\n")
+    client.sleep(2)
+    client.send_chars("tmate -f /tmp/tmate.conf\n")
+    client.sleep(2)
+    client.send_chars("q")
+    client.sleep(2)
+    client.send_chars("tmate display -p '#{tmate_ssh}' > /tmp/ssh_command\n")
+    client.wait_for_file("/tmp/ssh_command")
+    ssh_cmd = client.succeed("cat /tmp/ssh_command")
+
+    client2.succeed("mkdir -p ~/.ssh; ssh-keyscan -p 2223 server > ~/.ssh/known_hosts")
+    client2.send_chars("root\n")
+    client2.sleep(2)
+    client2.send_chars(ssh_cmd.strip() + "\n")
+    client2.sleep(2)
+    client2.send_chars("touch /tmp/client_2\n")
+
+    client.wait_for_file("/tmp/client_2")
+  '';
+})
diff --git a/nixos/tests/tracee.nix b/nixos/tests/tracee.nix
new file mode 100644
index 00000000000..72e82ec0b7e
--- /dev/null
+++ b/nixos/tests/tracee.nix
@@ -0,0 +1,49 @@
+import ./make-test-python.nix ({ pkgs, ... }: {
+  name = "tracee-integration";
+  nodes = {
+    machine = { config, pkgs, ... }: {
+      # EventFilters/trace_only_events_from_new_containers requires docker
+      # podman with docker compat will suffice
+      virtualisation.podman.enable = true;
+      virtualisation.podman.dockerCompat = true;
+
+      environment.systemPackages = [
+        # build the go integration tests as a binary
+        (pkgs.tracee.overrideAttrs (oa: {
+          pname = oa.pname + "-integration";
+          patches = oa.patches or [] ++ [
+            # change the prefix from /usr/bin to /run to find nix processes
+            ../../pkgs/tools/security/tracee/test-EventFilters-prefix-nix-friendly.patch
+          ];
+          buildPhase = ''
+            runHook preBuild
+            # just build the static lib we need for the go test binary
+            make $makeFlags ''${enableParallelBuilding:+-j$NIX_BUILD_CORES -l$NIX_BUILD_CORES} bpf-core ./dist/btfhub
+
+            # remove the /usr/bin prefix to work with the patch above
+            substituteInPlace tests/integration/integration_test.go \
+              --replace "/usr/bin/ls" "ls"
+
+            # then compile the tests to be ran later
+            CGO_LDFLAGS="$(pkg-config --libs libbpf)" go test -tags core,ebpf,integration -p 1 -c -o $GOPATH/tracee-integration ./tests/integration/...
+            runHook postBuild
+          '';
+          doCheck = false;
+          installPhase = ''
+            mkdir -p $out/bin
+            cp $GOPATH/tracee-integration $out/bin
+          '';
+          doInstallCheck = false;
+        }))
+      ];
+    };
+  };
+
+  testScript = ''
+    with subtest("run integration tests"):
+      # EventFilters/trace_only_events_from_new_containers also requires a container called "alpine"
+      machine.succeed('tar cv -C ${pkgs.pkgsStatic.busybox} . | podman import - alpine --change ENTRYPOINT=sleep')
+
+      print(machine.succeed('TRC_BIN="${pkgs.tracee}" tracee-integration -test.v'))
+  '';
+})
diff --git a/nixos/tests/upnp.nix b/nixos/tests/upnp.nix
index 451c8607d0e..af7cc1fe241 100644
--- a/nixos/tests/upnp.nix
+++ b/nixos/tests/upnp.nix
@@ -47,7 +47,7 @@ in
 
       client1 =
         { pkgs, nodes, ... }:
-        { environment.systemPackages = [ pkgs.miniupnpc_2 pkgs.netcat ];
+        { environment.systemPackages = [ pkgs.miniupnpc pkgs.netcat ];
           virtualisation.vlans = [ 2 ];
           networking.defaultGateway = internalRouterAddress;
           networking.interfaces.eth1.ipv4.addresses = [
@@ -65,7 +65,7 @@ in
 
       client2 =
         { pkgs, ... }:
-        { environment.systemPackages = [ pkgs.miniupnpc_2 ];
+        { environment.systemPackages = [ pkgs.miniupnpc ];
           virtualisation.vlans = [ 1 ];
           networking.interfaces.eth1.ipv4.addresses = [
             { address = externalClient2Address; prefixLength = 24; }
diff --git a/nixos/tests/uptime-kuma.nix b/nixos/tests/uptime-kuma.nix
new file mode 100644
index 00000000000..3d588d73cdb
--- /dev/null
+++ b/nixos/tests/uptime-kuma.nix
@@ -0,0 +1,19 @@
+import ./make-test-python.nix ({ lib, ... }:
+
+with lib;
+
+{
+  name = "uptime-kuma";
+  meta.maintainers = with maintainers; [ julienmalka ];
+
+  nodes.machine =
+    { pkgs, ... }:
+    { services.uptime-kuma.enable = true; };
+
+  testScript = ''
+    machine.start()
+    machine.wait_for_unit("uptime-kuma.service")
+    machine.wait_for_open_port(3001)
+    machine.succeed("curl --fail http://localhost:3001/")
+  '';
+})
diff --git a/nixos/tests/v2ray.nix b/nixos/tests/v2ray.nix
index fb36ea8557d..9eee962c64e 100644
--- a/nixos/tests/v2ray.nix
+++ b/nixos/tests/v2ray.nix
@@ -20,7 +20,7 @@ import ./make-test-python.nix ({ lib, pkgs, ... }: let
         port = 1081;
         listen = "127.0.0.1";
         protocol = "vmess";
-        settings.clients = [v2rayUser];
+        settings.clients = [ v2rayUser ];
       }
     ];
     outbounds = [
@@ -30,7 +30,7 @@ import ./make-test-python.nix ({ lib, pkgs, ... }: let
         settings.vnext = [{
           address = "127.0.0.1";
           port = 1081;
-          users = [v2rayUser];
+          users = [ v2rayUser ];
         }];
       }
       {
@@ -49,6 +49,14 @@ import ./make-test-python.nix ({ lib, pkgs, ... }: let
         inboundTag = "vmess_in";
         outboundTag = "direct";
       }
+
+      # Assert assets "geoip" and "geosite" are accessible.
+      {
+        type = "field";
+        ip = [ "geoip:private" ];
+        domain = [ "geosite:category-ads" ];
+        outboundTag = "direct";
+      }
     ];
   };
 
diff --git a/nixos/tests/varnish.nix b/nixos/tests/varnish.nix
new file mode 100644
index 00000000000..9dcdeec9d8c
--- /dev/null
+++ b/nixos/tests/varnish.nix
@@ -0,0 +1,55 @@
+{
+  system ? builtins.currentSystem
+, pkgs ? import ../.. { inherit system; }
+, package
+}:
+import ./make-test-python.nix ({ pkgs, ... }: let
+  testPath = pkgs.hello;
+in {
+  name = "varnish";
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ ajs124 ];
+  };
+
+  nodes = {
+    varnish = { config, pkgs, ... }: {
+        services.nix-serve = {
+          enable = true;
+        };
+
+        services.varnish = {
+          inherit package;
+          enable = true;
+          http_address = "0.0.0.0:80";
+          config = ''
+            vcl 4.0;
+
+            backend nix-serve {
+              .host = "127.0.0.1";
+              .port = "${toString config.services.nix-serve.port}";
+            }
+          '';
+        };
+
+        networking.firewall.allowedTCPPorts = [ 80 ];
+        system.extraDependencies = [ testPath ];
+      };
+
+    client = { lib, ... }: {
+      nix.settings = {
+        require-sigs = false;
+        substituters = lib.mkForce [ "http://varnish" ];
+      };
+    };
+  };
+
+  testScript = ''
+    start_all()
+    varnish.wait_for_open_port(80)
+
+    client.wait_until_succeeds("curl -f http://varnish/nix-cache-info");
+
+    client.wait_until_succeeds("nix-store -r ${testPath}");
+    client.succeed("${testPath}/bin/hello");
+  '';
+})
diff --git a/nixos/tests/vault-dev.nix b/nixos/tests/vault-dev.nix
new file mode 100644
index 00000000000..ba9a1015cc1
--- /dev/null
+++ b/nixos/tests/vault-dev.nix
@@ -0,0 +1,35 @@
+import ./make-test-python.nix ({ pkgs, ... }:
+{
+  name = "vault-dev";
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ lnl7 mic92 ];
+  };
+  nodes.machine = { pkgs, config, ... }: {
+    environment.systemPackages = [ pkgs.vault ];
+    environment.variables.VAULT_ADDR = "http://127.0.0.1:8200";
+    environment.variables.VAULT_TOKEN = "phony-secret";
+
+    services.vault = {
+      enable = true;
+      dev = true;
+      devRootTokenID = config.environment.variables.VAULT_TOKEN;
+    };
+  };
+
+  testScript = ''
+    import json
+    start_all()
+    machine.wait_for_unit("multi-user.target")
+    machine.wait_for_unit("vault.service")
+    machine.wait_for_open_port(8200)
+    out = machine.succeed("vault status -format=json")
+    print(out)
+    status = json.loads(out)
+    assert status.get("initialized") == True
+    machine.succeed("vault kv put secret/foo bar=baz")
+    out = machine.succeed("vault kv get -format=json secret/foo")
+    print(out)
+    status = json.loads(out)
+    assert status.get("data", {}).get("data", {}).get("bar") == "baz"
+  '';
+})
diff --git a/nixos/tests/vaultwarden.nix b/nixos/tests/vaultwarden.nix
index 814d8d7c0ab..c0e1d0585b9 100644
--- a/nixos/tests/vaultwarden.nix
+++ b/nixos/tests/vaultwarden.nix
@@ -74,7 +74,10 @@ let
             services.vaultwarden = {
               enable = true;
               dbBackend = backend;
-              config.rocketPort = 80;
+              config = {
+                rocketAddress = "0.0.0.0";
+                rocketPort = 80;
+              };
             };
 
             networking.firewall.allowedTCPPorts = [ 80 ];
@@ -84,7 +87,12 @@ let
                 testRunner = pkgs.writers.writePython3Bin "test-runner"
                   {
                     libraries = [ pkgs.python3Packages.selenium ];
+                    flakeIgnore = [
+                      "E501"
+                    ];
                   } ''
+
+                  from selenium.webdriver.common.by import By
                   from selenium.webdriver import Firefox
                   from selenium.webdriver.firefox.options import Options
                   from selenium.webdriver.support.ui import WebDriverWait
@@ -101,40 +109,40 @@ let
 
                   wait.until(EC.title_contains("Create Account"))
 
-                  driver.find_element_by_css_selector('input#email').send_keys(
-                    '${userEmail}'
+                  driver.find_element(By.CSS_SELECTOR, 'input#register-form_input_email').send_keys(
+                      '${userEmail}'
                   )
-                  driver.find_element_by_css_selector('input#name').send_keys(
-                    'A Cat'
+                  driver.find_element(By.CSS_SELECTOR, 'input#register-form_input_name').send_keys(
+                      'A Cat'
                   )
-                  driver.find_element_by_css_selector('input#masterPassword').send_keys(
-                    '${userPassword}'
+                  driver.find_element(By.CSS_SELECTOR, 'input#register-form_input_master-password').send_keys(
+                      '${userPassword}'
                   )
-                  driver.find_element_by_css_selector('input#masterPasswordRetype').send_keys(
-                    '${userPassword}'
+                  driver.find_element(By.CSS_SELECTOR, 'input#register-form_input_confirm-master-password').send_keys(
+                      '${userPassword}'
                   )
 
-                  driver.find_element_by_xpath("//button[contains(., 'Submit')]").click()
+                  driver.find_element(By.XPATH, "//button[contains(., 'Create Account')]").click()
 
                   wait.until_not(EC.title_contains("Create Account"))
 
-                  driver.find_element_by_css_selector('input#masterPassword').send_keys(
-                    '${userPassword}'
+                  driver.find_element(By.CSS_SELECTOR, 'input#login_input_master-password').send_keys(
+                      '${userPassword}'
                   )
-                  driver.find_element_by_xpath("//button[contains(., 'Log In')]").click()
+                  driver.find_element(By.XPATH, "//button[contains(., 'Log In')]").click()
 
-                  wait.until(EC.title_contains("My Vault"))
+                  wait.until(EC.title_contains("Bitwarden Web Vault"))
 
-                  driver.find_element_by_xpath("//button[contains(., 'Add Item')]").click()
+                  driver.find_element(By.XPATH, "//button[contains(., 'Add Item')]").click()
 
-                  driver.find_element_by_css_selector('input#name').send_keys(
-                    'secrets'
+                  driver.find_element(By.CSS_SELECTOR, 'input#name').send_keys(
+                      'secrets'
                   )
-                  driver.find_element_by_css_selector('input#loginPassword').send_keys(
-                    '${storedPassword}'
+                  driver.find_element(By.CSS_SELECTOR, 'input#loginPassword').send_keys(
+                      '${storedPassword}'
                   )
 
-                  driver.find_element_by_xpath("//button[contains(., 'Save')]").click()
+                  driver.find_element(By.XPATH, "//button[contains(., 'Save')]").click()
                 '';
               in
               [ pkgs.firefox-unwrapped pkgs.geckodriver testRunner ];
@@ -156,7 +164,7 @@ let
       with subtest("configure the cli"):
           client.succeed("bw --nointeraction config server http://server")
 
-      with subtest("can't login to nonexistant account"):
+      with subtest("can't login to nonexistent account"):
           client.fail(
               "bw --nointeraction --raw login ${userEmail} ${userPassword}"
           )
diff --git a/nixos/tests/vector.nix b/nixos/tests/vector.nix
index ecf94e33ff1..9309f6a14dd 100644
--- a/nixos/tests/vector.nix
+++ b/nixos/tests/vector.nix
@@ -21,7 +21,7 @@ with pkgs.lib;
               type = "file";
               inputs = [ "journald" ];
               path = "/var/lib/vector/logs.log";
-              encoding = { codec = "ndjson"; };
+              encoding = { codec = "json"; };
             };
           };
         };
diff --git a/nixos/tests/vengi-tools.nix b/nixos/tests/vengi-tools.nix
index 5bc8d72c772..fd756799148 100644
--- a/nixos/tests/vengi-tools.nix
+++ b/nixos/tests/vengi-tools.nix
@@ -20,10 +20,8 @@ import ./make-test-python.nix ({ pkgs, ... }: {
       machine.wait_for_x()
       machine.execute("vengi-voxedit >&2 &")
       machine.wait_for_window("voxedit")
-      # OCR on voxedit's window is very expensive, so we avoid wasting a try
-      # by letting the window load fully first
+      # Let the window load fully
       machine.sleep(15)
-      machine.wait_for_text("Solid")
       machine.screenshot("screen")
     '';
 })
diff --git a/nixos/tests/virtualbox.nix b/nixos/tests/virtualbox.nix
index 1c1b0dac7f3..0be22bdcaea 100644
--- a/nixos/tests/virtualbox.nix
+++ b/nixos/tests/virtualbox.nix
@@ -28,8 +28,8 @@ let
       messagebus:x:1:
       EOF
 
-      "${pkgs.dbus.daemon}/bin/dbus-daemon" --fork \
-        --config-file="${pkgs.dbus.daemon}/share/dbus-1/system.conf"
+      "${pkgs.dbus}/bin/dbus-daemon" --fork \
+        --config-file="${pkgs.dbus}/share/dbus-1/system.conf"
 
       ${guestAdditions}/bin/VBoxService
       ${(attrs.vmScript or (const "")) pkgs}
diff --git a/nixos/tests/vscodium.nix b/nixos/tests/vscodium.nix
index 3bdb99947a4..ee884cc4295 100644
--- a/nixos/tests/vscodium.nix
+++ b/nixos/tests/vscodium.nix
@@ -70,15 +70,15 @@ let
 
             # Save the file
             machine.send_key('ctrl-s')
-            machine.wait_for_text('Save')
+            machine.wait_for_text('(Save|Desktop|alice|Size)')
             machine.screenshot('save_window')
             machine.send_key('ret')
 
             # (the default filename is the first line of the file)
             machine.wait_for_file(f'/home/alice/{test_string}')
 
-        machine.send_key('ctrl-q')
-        machine.wait_until_fails('pgrep -x codium')
+        # machine.send_key('ctrl-q')
+        # machine.wait_until_fails('pgrep -x codium')
       '';
     });
 
diff --git a/nixos/tests/warzone2100.nix b/nixos/tests/warzone2100.nix
new file mode 100644
index 00000000000..568e04a4699
--- /dev/null
+++ b/nixos/tests/warzone2100.nix
@@ -0,0 +1,26 @@
+import ./make-test-python.nix ({ pkgs, ... }: {
+  name = "warzone2100";
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ fgaz ];
+  };
+
+  nodes.machine = { config, pkgs, ... }: {
+    imports = [
+      ./common/x11.nix
+    ];
+
+    services.xserver.enable = true;
+    environment.systemPackages = [ pkgs.warzone2100 ];
+  };
+
+  enableOCR = true;
+
+  testScript =
+    ''
+      machine.wait_for_x()
+      machine.execute("warzone2100 >&2 &")
+      machine.wait_for_window("Warzone 2100")
+      machine.wait_for_text(r"(Single Player|Multi Player|Tutorial|Options|Quit Game)")
+      machine.screenshot("screen")
+    '';
+})
diff --git a/nixos/tests/web-apps/healthchecks.nix b/nixos/tests/web-apps/healthchecks.nix
new file mode 100644
index 00000000000..41c40cd5dd8
--- /dev/null
+++ b/nixos/tests/web-apps/healthchecks.nix
@@ -0,0 +1,42 @@
+import ../make-test-python.nix ({ lib, pkgs, ... }: {
+  name = "healthchecks";
+
+  meta = with lib.maintainers; {
+    maintainers = [ phaer ];
+  };
+
+  nodes.machine = { ... }: {
+    services.healthchecks = {
+      enable = true;
+      settings = {
+        SITE_NAME = "MyUniqueInstance";
+        COMPRESS_ENABLED = "True";
+        SECRET_KEY_FILE = pkgs.writeText "secret"
+          "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789";
+      };
+    };
+  };
+
+  testScript = ''
+    machine.start()
+    machine.wait_for_unit("healthchecks.target")
+    machine.wait_until_succeeds("journalctl --since -1m --unit healthchecks --grep Listening")
+
+    with subtest("Home screen loads"):
+        machine.succeed(
+            "curl -sSfL http://localhost:8000 | grep '<title>Sign In'"
+        )
+
+    with subtest("Setting SITE_NAME via freeform option works"):
+        machine.succeed(
+            "curl -sSfL http://localhost:8000 | grep 'MyUniqueInstance</title>'"
+        )
+
+    with subtest("Manage script works"):
+        # "shell" sucommand should succeed, needs python in PATH.
+        assert "foo\n" == machine.succeed("echo 'print(\"foo\")' | sudo -u healthchecks healthchecks-manage shell")
+
+        # Shouldn't fail if not called by healthchecks user
+        assert "foo\n" == machine.succeed("echo 'print(\"foo\")' | healthchecks-manage shell")
+  '';
+})
diff --git a/nixos/tests/web-apps/mastodon.nix b/nixos/tests/web-apps/mastodon.nix
deleted file mode 100644
index 279a1c59169..00000000000
--- a/nixos/tests/web-apps/mastodon.nix
+++ /dev/null
@@ -1,170 +0,0 @@
-import ../make-test-python.nix ({pkgs, ...}:
-let
-  test-certificates = pkgs.runCommandLocal "test-certificates" { } ''
-    mkdir -p $out
-    echo insecure-root-password > $out/root-password-file
-    echo insecure-intermediate-password > $out/intermediate-password-file
-    ${pkgs.step-cli}/bin/step certificate create "Example Root CA" $out/root_ca.crt $out/root_ca.key --password-file=$out/root-password-file --profile root-ca
-    ${pkgs.step-cli}/bin/step certificate create "Example Intermediate CA 1" $out/intermediate_ca.crt $out/intermediate_ca.key --password-file=$out/intermediate-password-file --ca-password-file=$out/root-password-file --profile intermediate-ca --ca $out/root_ca.crt --ca-key $out/root_ca.key
-  '';
-
-  hosts = ''
-    192.168.2.10 ca.local
-    192.168.2.11 mastodon.local
-  '';
-
-in
-{
-  name = "mastodon";
-  meta.maintainers = with pkgs.lib.maintainers; [ erictapen izorkin ];
-
-  nodes = {
-    ca = { pkgs, ... }: {
-      networking = {
-        interfaces.eth1 = {
-          ipv4.addresses = [
-            { address = "192.168.2.10"; prefixLength = 24; }
-          ];
-        };
-        extraHosts = hosts;
-      };
-      services.step-ca = {
-        enable = true;
-        address = "0.0.0.0";
-        port = 8443;
-        openFirewall = true;
-        intermediatePasswordFile = "${test-certificates}/intermediate-password-file";
-        settings = {
-          dnsNames = [ "ca.local" ];
-          root = "${test-certificates}/root_ca.crt";
-          crt = "${test-certificates}/intermediate_ca.crt";
-          key = "${test-certificates}/intermediate_ca.key";
-          db = {
-            type = "badger";
-            dataSource = "/var/lib/step-ca/db";
-          };
-          authority = {
-            provisioners = [
-              {
-                type = "ACME";
-                name = "acme";
-              }
-            ];
-          };
-        };
-      };
-    };
-
-    server = { pkgs, ... }: {
-      networking = {
-        interfaces.eth1 = {
-          ipv4.addresses = [
-            { address = "192.168.2.11"; prefixLength = 24; }
-          ];
-        };
-        extraHosts = hosts;
-        firewall.allowedTCPPorts = [ 80 443 ];
-      };
-
-      security = {
-        acme = {
-          acceptTerms = true;
-          defaults.server = "https://ca.local:8443/acme/acme/directory";
-          defaults.email = "mastodon@mastodon.local";
-        };
-        pki.certificateFiles = [ "${test-certificates}/root_ca.crt" ];
-      };
-
-      services.redis.servers.mastodon = {
-        enable = true;
-        bind = "127.0.0.1";
-        port = 31637;
-      };
-
-      services.mastodon = {
-        enable = true;
-        configureNginx = true;
-        localDomain = "mastodon.local";
-        enableUnixSocket = false;
-        redis = {
-          createLocally = true;
-          host = "127.0.0.1";
-          port = 31637;
-        };
-        database = {
-          createLocally = true;
-          host = "/run/postgresql";
-          port = 5432;
-        };
-        smtp = {
-          createLocally = false;
-          fromAddress = "mastodon@mastodon.local";
-        };
-        extraConfig = {
-          EMAIL_DOMAIN_ALLOWLIST = "example.com";
-        };
-      };
-    };
-
-    client = { pkgs, ... }: {
-      environment.systemPackages = [ pkgs.jq ];
-      networking = {
-        interfaces.eth1 = {
-          ipv4.addresses = [
-            { address = "192.168.2.12"; prefixLength = 24; }
-          ];
-        };
-        extraHosts = hosts;
-      };
-
-      security = {
-        pki.certificateFiles = [ "${test-certificates}/root_ca.crt" ];
-      };
-    };
-  };
-
-  testScript = ''
-    start_all()
-
-    ca.wait_for_unit("step-ca.service")
-    ca.wait_for_open_port(8443)
-
-    server.wait_for_unit("nginx.service")
-    server.wait_for_unit("redis-mastodon.service")
-    server.wait_for_unit("postgresql.service")
-    server.wait_for_unit("mastodon-sidekiq.service")
-    server.wait_for_unit("mastodon-streaming.service")
-    server.wait_for_unit("mastodon-web.service")
-    server.wait_for_open_port(55000)
-    server.wait_for_open_port(55001)
-
-    # Check Mastodon version from remote client
-    client.succeed("curl --fail https://mastodon.local/api/v1/instance | jq -r '.version' | grep '${pkgs.mastodon.version}'")
-
-    # Check using admin CLI
-    # Check Mastodon version
-    server.succeed("su - mastodon -s /bin/sh -c 'mastodon-env tootctl version' | grep '${pkgs.mastodon.version}'")
-
-    # Manage accounts
-    server.succeed("su - mastodon -s /bin/sh -c 'mastodon-env tootctl email_domain_blocks add example.com'")
-    server.succeed("su - mastodon -s /bin/sh -c 'mastodon-env tootctl email_domain_blocks list' | grep 'example.com'")
-    server.fail("su - mastodon -s /bin/sh -c 'mastodon-env tootctl email_domain_blocks list' | grep 'mastodon.local'")
-    server.fail("su - mastodon -s /bin/sh -c 'mastodon-env tootctl accounts create alice --email=alice@example.com'")
-    server.succeed("su - mastodon -s /bin/sh -c 'mastodon-env tootctl email_domain_blocks remove example.com'")
-    server.succeed("su - mastodon -s /bin/sh -c 'mastodon-env tootctl accounts create bob --email=bob@example.com'")
-    server.succeed("su - mastodon -s /bin/sh -c 'mastodon-env tootctl accounts approve bob'")
-    server.succeed("su - mastodon -s /bin/sh -c 'mastodon-env tootctl accounts delete bob'")
-
-    # Manage IP access
-    server.succeed("su - mastodon -s /bin/sh -c 'mastodon-env tootctl ip_blocks add 192.168.0.0/16 --severity=no_access'")
-    server.succeed("su - mastodon -s /bin/sh -c 'mastodon-env tootctl ip_blocks export' | grep '192.168.0.0/16'")
-    server.fail("su - mastodon -s /bin/sh -c 'mastodon-env tootctl p_blocks export' | grep '172.16.0.0/16'")
-    client.fail("curl --fail https://mastodon.local/about")
-    server.succeed("su - mastodon -s /bin/sh -c 'mastodon-env tootctl ip_blocks remove 192.168.0.0/16'")
-    client.succeed("curl --fail https://mastodon.local/about")
-
-    ca.shutdown()
-    server.shutdown()
-    client.shutdown()
-  '';
-})
diff --git a/nixos/tests/web-apps/mastodon/default.nix b/nixos/tests/web-apps/mastodon/default.nix
new file mode 100644
index 00000000000..411ebfcd731
--- /dev/null
+++ b/nixos/tests/web-apps/mastodon/default.nix
@@ -0,0 +1,9 @@
+{ system ? builtins.currentSystem, handleTestOn }:
+let
+  supportedSystems = [ "x86_64-linux" "i686-linux" "aarch64-linux" ];
+
+in
+{
+  standard = handleTestOn supportedSystems ./standard.nix { inherit system; };
+  remote-postgresql = handleTestOn supportedSystems ./remote-postgresql.nix { inherit system; };
+}
diff --git a/nixos/tests/web-apps/mastodon/remote-postgresql.nix b/nixos/tests/web-apps/mastodon/remote-postgresql.nix
new file mode 100644
index 00000000000..2fd3983e13e
--- /dev/null
+++ b/nixos/tests/web-apps/mastodon/remote-postgresql.nix
@@ -0,0 +1,160 @@
+import ../../make-test-python.nix ({pkgs, ...}:
+let
+  cert = pkgs: pkgs.runCommand "selfSignedCerts" { buildInputs = [ pkgs.openssl ]; } ''
+    openssl req -x509 -newkey rsa:4096 -keyout key.pem -out cert.pem -nodes -subj '/CN=mastodon.local' -days 36500
+    mkdir -p $out
+    cp key.pem cert.pem $out
+  '';
+
+  hosts = ''
+    192.168.2.103 mastodon.local
+  '';
+
+in
+{
+  name = "mastodon-remote-postgresql";
+  meta.maintainers = with pkgs.lib.maintainers; [ erictapen izorkin turion ];
+
+  nodes = {
+    database = {
+      networking = {
+        interfaces.eth1 = {
+          ipv4.addresses = [
+            { address = "192.168.2.102"; prefixLength = 24; }
+          ];
+        };
+        extraHosts = hosts;
+        firewall.allowedTCPPorts = [ 5432 ];
+      };
+
+      services.postgresql = {
+        enable = true;
+        enableTCPIP = true;
+        authentication = ''
+          hostnossl mastodon_local mastodon_test 192.168.2.201/32 md5
+        '';
+        initialScript = pkgs.writeText "postgresql_init.sql" ''
+          CREATE ROLE mastodon_test LOGIN PASSWORD 'SoDTZcISc3f1M1LJsRLT';
+          CREATE DATABASE mastodon_local TEMPLATE template0 ENCODING UTF8;
+          GRANT ALL PRIVILEGES ON DATABASE mastodon_local TO mastodon_test;
+        '';
+      };
+    };
+
+    nginx = {
+      networking = {
+        interfaces.eth1 = {
+          ipv4.addresses = [
+            { address = "192.168.2.103"; prefixLength = 24; }
+          ];
+        };
+        extraHosts = hosts;
+        firewall.allowedTCPPorts = [ 80 443 ];
+      };
+
+      security = {
+        pki.certificateFiles = [ "${cert pkgs}/cert.pem" ];
+      };
+
+      services.nginx = {
+        enable = true;
+        recommendedProxySettings = true;
+        virtualHosts."mastodon.local" = {
+          root = "/var/empty";
+          forceSSL = true;
+          enableACME = pkgs.lib.mkForce false;
+          sslCertificate = "${cert pkgs}/cert.pem";
+          sslCertificateKey = "${cert pkgs}/key.pem";
+          locations."/" = {
+            tryFiles = "$uri @proxy";
+          };
+          locations."@proxy" = {
+            proxyPass = "http://192.168.2.201:55001";
+            proxyWebsockets = true;
+          };
+          locations."/api/v1/streaming/" = {
+            proxyPass = "http://192.168.2.201:55002";
+            proxyWebsockets = true;
+          };
+        };
+      };
+    };
+
+    server = { pkgs, ... }: {
+      virtualisation.memorySize = 2048;
+
+      environment = {
+        etc = {
+          "mastodon/password-posgressql-db".text = ''
+            SoDTZcISc3f1M1LJsRLT
+          '';
+        };
+      };
+
+      networking = {
+        interfaces.eth1 = {
+          ipv4.addresses = [
+            { address = "192.168.2.201"; prefixLength = 24; }
+          ];
+        };
+        extraHosts = hosts;
+        firewall.allowedTCPPorts = [ 55001 55002 ];
+      };
+
+      services.mastodon = {
+        enable = true;
+        configureNginx = false;
+        localDomain = "mastodon.local";
+        enableUnixSocket = false;
+        database = {
+          createLocally = false;
+          host = "192.168.2.102";
+          port = 5432;
+          name = "mastodon_local";
+          user = "mastodon_test";
+          passwordFile = "/etc/mastodon/password-posgressql-db";
+        };
+        smtp = {
+          createLocally = false;
+          fromAddress = "mastodon@mastodon.local";
+        };
+        extraConfig = {
+          BIND = "0.0.0.0";
+          EMAIL_DOMAIN_ALLOWLIST = "example.com";
+          RAILS_SERVE_STATIC_FILES = "true";
+          TRUSTED_PROXY_IP = "192.168.2.103";
+        };
+      };
+    };
+
+    client = { pkgs, ... }: {
+      environment.systemPackages = [ pkgs.jq ];
+      networking = {
+        interfaces.eth1 = {
+          ipv4.addresses = [
+            { address = "192.168.2.202"; prefixLength = 24; }
+          ];
+        };
+        extraHosts = hosts;
+      };
+
+      security = {
+        pki.certificateFiles = [ "${cert pkgs}/cert.pem" ];
+      };
+    };
+  };
+
+  testScript = import ./script.nix {
+    inherit pkgs;
+    extraInit = ''
+      nginx.wait_for_unit("nginx.service")
+      nginx.wait_for_open_port(443)
+      database.wait_for_unit("postgresql.service")
+      database.wait_for_open_port(5432)
+    '';
+    extraShutdown = ''
+      nginx.shutdown()
+      database.shutdown()
+    '';
+  };
+})
diff --git a/nixos/tests/web-apps/mastodon/script.nix b/nixos/tests/web-apps/mastodon/script.nix
new file mode 100644
index 00000000000..cdb1d4379b6
--- /dev/null
+++ b/nixos/tests/web-apps/mastodon/script.nix
@@ -0,0 +1,54 @@
+{ pkgs
+, extraInit ? ""
+, extraShutdown ? ""
+}:
+
+''
+  start_all()
+
+  ${extraInit}
+
+  server.wait_for_unit("redis-mastodon.service")
+  server.wait_for_unit("mastodon-sidekiq.service")
+  server.wait_for_unit("mastodon-streaming.service")
+  server.wait_for_unit("mastodon-web.service")
+  server.wait_for_open_port(55000)
+  server.wait_for_open_port(55001)
+
+  # Check that mastodon-media-auto-remove is scheduled
+  server.succeed("systemctl status mastodon-media-auto-remove.timer")
+
+  # Check Mastodon version from remote client
+  client.succeed("curl --fail https://mastodon.local/api/v1/instance | jq -r '.version' | grep '${pkgs.mastodon.version}'")
+
+  # Check access from remote client
+  client.succeed("curl --fail https://mastodon.local/about | grep 'Mastodon hosted on mastodon.local'")
+  client.succeed("curl --fail $(curl https://mastodon.local/api/v1/instance 2> /dev/null | jq -r .thumbnail) --output /dev/null")
+
+  # Simple check tootctl commands
+  # Check Mastodon version
+  server.succeed("mastodon-tootctl version | grep '${pkgs.mastodon.version}'")
+
+  # Manage accounts
+  server.succeed("mastodon-tootctl email_domain_blocks add example.com")
+  server.succeed("mastodon-tootctl email_domain_blocks list | grep example.com")
+  server.fail("mastodon-tootctl email_domain_blocks list | grep mastodon.local")
+  server.fail("mastodon-tootctl accounts create alice --email=alice@example.com")
+  server.succeed("mastodon-tootctl email_domain_blocks remove example.com")
+  server.succeed("mastodon-tootctl accounts create bob --email=bob@example.com")
+  server.succeed("mastodon-tootctl accounts approve bob")
+  server.succeed("mastodon-tootctl accounts delete bob")
+
+  # Manage IP access
+  server.succeed("mastodon-tootctl ip_blocks add 192.168.0.0/16 --severity=no_access")
+  server.succeed("mastodon-tootctl ip_blocks export | grep 192.168.0.0/16")
+  server.fail("mastodon-tootctl ip_blocks export | grep 172.16.0.0/16")
+  client.fail("curl --fail https://mastodon.local/about")
+  server.succeed("mastodon-tootctl ip_blocks remove 192.168.0.0/16")
+  client.succeed("curl --fail https://mastodon.local/about")
+
+  server.shutdown()
+  client.shutdown()
+
+  ${extraShutdown}
+''
diff --git a/nixos/tests/web-apps/mastodon/standard.nix b/nixos/tests/web-apps/mastodon/standard.nix
new file mode 100644
index 00000000000..14311afea3f
--- /dev/null
+++ b/nixos/tests/web-apps/mastodon/standard.nix
@@ -0,0 +1,92 @@
+import ../../make-test-python.nix ({pkgs, ...}:
+let
+  cert = pkgs: pkgs.runCommand "selfSignedCerts" { buildInputs = [ pkgs.openssl ]; } ''
+    openssl req -x509 -newkey rsa:4096 -keyout key.pem -out cert.pem -nodes -subj '/CN=mastodon.local' -days 36500
+    mkdir -p $out
+    cp key.pem cert.pem $out
+  '';
+
+  hosts = ''
+    192.168.2.101 mastodon.local
+  '';
+
+in
+{
+  name = "mastodon-standard";
+  meta.maintainers = with pkgs.lib.maintainers; [ erictapen izorkin turion ];
+
+  nodes = {
+    server = { pkgs, ... }: {
+
+      virtualisation.memorySize = 2048;
+
+      networking = {
+        interfaces.eth1 = {
+          ipv4.addresses = [
+            { address = "192.168.2.101"; prefixLength = 24; }
+          ];
+        };
+        extraHosts = hosts;
+        firewall.allowedTCPPorts = [ 80 443 ];
+      };
+
+      security = {
+        pki.certificateFiles = [ "${cert pkgs}/cert.pem" ];
+      };
+
+      services.redis.servers.mastodon = {
+        enable = true;
+        bind = "127.0.0.1";
+        port = 31637;
+      };
+
+      services.mastodon = {
+        enable = true;
+        configureNginx = true;
+        localDomain = "mastodon.local";
+        enableUnixSocket = false;
+        smtp = {
+          createLocally = false;
+          fromAddress = "mastodon@mastodon.local";
+        };
+        extraConfig = {
+          EMAIL_DOMAIN_ALLOWLIST = "example.com";
+        };
+      };
+
+      services.nginx = {
+        virtualHosts."mastodon.local" = {
+          enableACME = pkgs.lib.mkForce false;
+          sslCertificate = "${cert pkgs}/cert.pem";
+          sslCertificateKey = "${cert pkgs}/key.pem";
+        };
+      };
+    };
+
+    client = { pkgs, ... }: {
+      environment.systemPackages = [ pkgs.jq ];
+      networking = {
+        interfaces.eth1 = {
+          ipv4.addresses = [
+            { address = "192.168.2.102"; prefixLength = 24; }
+          ];
+        };
+        extraHosts = hosts;
+      };
+
+      security = {
+        pki.certificateFiles = [ "${cert pkgs}/cert.pem" ];
+      };
+    };
+  };
+
+  testScript = import ./script.nix {
+    inherit pkgs;
+    extraInit = ''
+      server.wait_for_unit("nginx.service")
+      server.wait_for_open_port(443)
+      server.wait_for_unit("postgresql.service")
+      server.wait_for_open_port(5432)
+    '';
+  };
+})
diff --git a/nixos/tests/web-apps/peering-manager.nix b/nixos/tests/web-apps/peering-manager.nix
new file mode 100644
index 00000000000..56b7eebfadf
--- /dev/null
+++ b/nixos/tests/web-apps/peering-manager.nix
@@ -0,0 +1,40 @@
+import ../make-test-python.nix ({ lib, pkgs, ... }: {
+  name = "peering-manager";
+
+  meta = with lib.maintainers; {
+    maintainers = [ yuka ];
+  };
+
+  nodes.machine = { ... }: {
+    services.peering-manager = {
+      enable = true;
+      secretKeyFile = pkgs.writeText "secret" ''
+        abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789
+      '';
+    };
+  };
+
+  testScript = { nodes }: ''
+    machine.start()
+    machine.wait_for_unit("peering-manager.target")
+    machine.wait_until_succeeds("journalctl --since -1m --unit peering-manager --grep Listening")
+
+    print(machine.succeed(
+        "curl -sSfL http://[::1]:8001"
+    ))
+    with subtest("Home screen loads"):
+        machine.succeed(
+            "curl -sSfL http://[::1]:8001 | grep '<title>Home - Peering Manager</title>'"
+        )
+    with subtest("checks succeed"):
+        machine.succeed(
+            "systemctl stop peering-manager peering-manager-rq"
+        )
+        machine.succeed(
+            "sudo -u postgres psql -c 'ALTER USER \"peering-manager\" WITH SUPERUSER;'"
+        )
+        machine.succeed(
+            "cd ${nodes.machine.config.system.build.peeringManagerPkg}/opt/peering-manager ; peering-manager-manage test --no-input"
+        )
+  '';
+})
diff --git a/nixos/tests/web-apps/phylactery.nix b/nixos/tests/web-apps/phylactery.nix
new file mode 100644
index 00000000000..cf2689d2300
--- /dev/null
+++ b/nixos/tests/web-apps/phylactery.nix
@@ -0,0 +1,20 @@
+import ../make-test-python.nix ({ pkgs, lib, ... }: {
+  name = "phylactery";
+
+  nodes.machine = { ... }: {
+    services.phylactery = rec {
+      enable = true;
+      port = 8080;
+      library = "/tmp";
+    };
+  };
+
+  testScript = ''
+    start_all()
+    machine.wait_for_unit('phylactery')
+    machine.wait_for_open_port(8080)
+    machine.wait_until_succeeds('curl localhost:8080')
+  '';
+
+  meta.maintainers = with lib.maintainers; [ McSinyx ];
+})
diff --git a/nixos/tests/web-apps/writefreely.nix b/nixos/tests/web-apps/writefreely.nix
new file mode 100644
index 00000000000..ce614909706
--- /dev/null
+++ b/nixos/tests/web-apps/writefreely.nix
@@ -0,0 +1,44 @@
+{ system ? builtins.currentSystem, config ? { }
+, pkgs ? import ../../.. { inherit system config; } }:
+
+with import ../../lib/testing-python.nix { inherit system pkgs; };
+with pkgs.lib;
+
+let
+  writefreelyTest = { name, type }:
+    makeTest {
+      name = "writefreely-${name}";
+
+      nodes.machine = { config, pkgs, ... }: {
+        services.writefreely = {
+          enable = true;
+          host = "localhost:3000";
+          admin.name = "nixos";
+
+          database = {
+            inherit type;
+            createLocally = type == "mysql";
+            passwordFile = pkgs.writeText "db-pass" "pass";
+          };
+
+          settings.server.port = 3000;
+        };
+      };
+
+      testScript = ''
+        start_all()
+        machine.wait_for_unit("writefreely.service")
+        machine.wait_for_open_port(3000)
+        machine.succeed("curl --fail http://localhost:3000")
+      '';
+    };
+in {
+  sqlite = writefreelyTest {
+    name = "sqlite";
+    type = "sqlite3";
+  };
+  mysql = writefreelyTest {
+    name = "mysql";
+    type = "mysql";
+  };
+}
diff --git a/nixos/tests/web-servers/agate.nix b/nixos/tests/web-servers/agate.nix
index e364e134cfd..e8d789a9ca4 100644
--- a/nixos/tests/web-servers/agate.nix
+++ b/nixos/tests/web-servers/agate.nix
@@ -1,29 +1,27 @@
-import ../make-test-python.nix (
-  { pkgs, lib, ... }:
-  {
-    name = "agate";
-    meta = with lib.maintainers; { maintainers = [ jk ]; };
+{ pkgs, lib, ... }:
+{
+  name = "agate";
+  meta = with lib.maintainers; { maintainers = [ jk ]; };
 
-    nodes = {
-      geminiserver = { pkgs, ... }: {
-        services.agate = {
-          enable = true;
-          hostnames = [ "localhost" ];
-          contentDir = pkgs.writeTextDir "index.gmi" ''
-            # Hello NixOS!
-          '';
-        };
+  nodes = {
+    geminiserver = { pkgs, ... }: {
+      services.agate = {
+        enable = true;
+        hostnames = [ "localhost" ];
+        contentDir = pkgs.writeTextDir "index.gmi" ''
+          # Hello NixOS!
+        '';
       };
     };
+  };
 
-    testScript = { nodes, ... }: ''
-      geminiserver.wait_for_unit("agate")
-      geminiserver.wait_for_open_port(1965)
+  testScript = { nodes, ... }: ''
+    geminiserver.wait_for_unit("agate")
+    geminiserver.wait_for_open_port(1965)
 
-      with subtest("check is serving over gemini"):
-        response = geminiserver.succeed("${pkgs.gmni}/bin/gmni -j once -i -N gemini://localhost:1965")
-        print(response)
-        assert "Hello NixOS!" in response
-    '';
-  }
-)
+    with subtest("check is serving over gemini"):
+      response = geminiserver.succeed("${pkgs.gmni}/bin/gmni -j once -i -N gemini://localhost:1965")
+      print(response)
+      assert "Hello NixOS!" in response
+  '';
+}
diff --git a/nixos/tests/web-servers/unit-php.nix b/nixos/tests/web-servers/unit-php.nix
index 5bef7fab3ef..f0df371945e 100644
--- a/nixos/tests/web-servers/unit-php.nix
+++ b/nixos/tests/web-servers/unit-php.nix
@@ -10,15 +10,15 @@ in {
     services.unit = {
       enable = true;
       config = pkgs.lib.strings.toJSON {
-        listeners."*:9080".application = "php_80";
-        applications.php_80 = {
-          type = "php 8.0";
+        listeners."*:9081".application = "php_81";
+        applications.php_81 = {
+          type = "php 8.1";
           processes = 1;
           user = "testuser";
           group = "testgroup";
           root = "${testdir}/www";
           index = "info.php";
-          options.file = "${pkgs.unit.usedPhp80}/lib/php.ini";
+          options.file = "${pkgs.unit.usedPhp81}/lib/php.ini";
         };
       };
     };
@@ -34,14 +34,19 @@ in {
     };
   };
   testScript = ''
+    machine.start()
+
     machine.wait_for_unit("unit.service")
+    machine.wait_for_open_port(9081)
 
     # Check so we get an evaluated PHP back
-    response = machine.succeed("curl -f -vvv -s http://127.0.0.1:9080/")
-    assert "PHP Version ${pkgs.unit.usedPhp80.version}" in response, "PHP version not detected"
+    response = machine.succeed("curl -f -vvv -s http://127.0.0.1:9081/")
+    assert "PHP Version ${pkgs.unit.usedPhp81.version}" in response, "PHP version not detected"
 
     # Check so we have database and some other extensions loaded
     for ext in ["json", "opcache", "pdo_mysql", "pdo_pgsql", "pdo_sqlite"]:
         assert ext in response, f"Missing {ext} extension"
+
+    machine.shutdown()
   '';
 })
diff --git a/nixos/tests/wine.nix b/nixos/tests/wine.nix
index 8a64c3179c5..7cbe7ac94f1 100644
--- a/nixos/tests/wine.nix
+++ b/nixos/tests/wine.nix
@@ -44,5 +44,8 @@ in
 listToAttrs (
   map (makeWineTest "winePackages" [ hello32 ]) variants
   ++ optionals pkgs.stdenv.is64bit
-    (map (makeWineTest "wineWowPackages" [ hello32 hello64 ]) variants)
+    (map (makeWineTest "wineWowPackages" [ hello32 hello64 ])
+         # This wayland combination times out after spending many hours.
+         # https://hydra.nixos.org/job/nixos/trunk-combined/nixos.tests.wine.wineWowPackages-wayland.x86_64-linux
+         (pkgs.lib.remove "wayland" variants))
 )
diff --git a/nixos/tests/wireguard/wg-quick.nix b/nixos/tests/wireguard/wg-quick.nix
index 961c2e15c30..bc2cba91188 100644
--- a/nixos/tests/wireguard/wg-quick.nix
+++ b/nixos/tests/wireguard/wg-quick.nix
@@ -29,6 +29,8 @@ import ../make-test-python.nix ({ pkgs, lib, ... }:
 
               inherit (wg-snakeoil-keys.peer1) publicKey;
             };
+
+            dns = [ "10.23.42.2" "fc00::2" "wg0" ];
           };
         };
       };
@@ -38,6 +40,7 @@ import ../make-test-python.nix ({ pkgs, lib, ... }:
         ip6 = "fd00::2";
         extraConfig = {
           boot = lib.mkIf (kernelPackages != null) { inherit kernelPackages; };
+          networking.useNetworkd = true;
           networking.wg-quick.interfaces.wg0 = {
             address = [ "10.23.42.2/32" "fc00::2/128" ];
             inherit (wg-snakeoil-keys.peer1) privateKey;
@@ -49,6 +52,8 @@ import ../make-test-python.nix ({ pkgs, lib, ... }:
 
               inherit (wg-snakeoil-keys.peer0) publicKey;
             };
+
+            dns = [ "10.23.42.1" "fc00::1" "wg0" ];
           };
         };
       };
diff --git a/nixos/tests/wrappers.nix b/nixos/tests/wrappers.nix
new file mode 100644
index 00000000000..08c1ad0b6b9
--- /dev/null
+++ b/nixos/tests/wrappers.nix
@@ -0,0 +1,79 @@
+import ./make-test-python.nix ({ pkgs, ... }:
+let
+  userUid = 1000;
+  usersGid = 100;
+  busybox = pkgs : pkgs.busybox.override {
+    # Without this, the busybox binary drops euid to ruid for most applets, including id.
+    # See https://bugs.busybox.net/show_bug.cgi?id=15101
+    extraConfig = "CONFIG_FEATURE_SUID n";
+  };
+in
+{
+  name = "wrappers";
+
+  nodes.machine = { config, pkgs, ... }: {
+    ids.gids.users = usersGid;
+
+    users.users = {
+      regular = {
+        uid = userUid;
+        isNormalUser = true;
+      };
+    };
+
+    security.wrappers = {
+      suidRoot = {
+        owner = "root";
+        group = "root";
+        setuid = true;
+        source = "${busybox pkgs}/bin/busybox";
+        program = "suid_root_busybox";
+      };
+      sgidRoot = {
+        owner = "root";
+        group = "root";
+        setgid = true;
+        source = "${busybox pkgs}/bin/busybox";
+        program = "sgid_root_busybox";
+      };
+      withChown = {
+        owner = "root";
+        group = "root";
+        source = "${pkgs.libcap}/bin/capsh";
+        program = "capsh_with_chown";
+        capabilities = "cap_chown+ep";
+      };
+    };
+  };
+
+  testScript =
+    ''
+      def cmd_as_regular(cmd):
+        return "su -l regular -c '{0}'".format(cmd)
+
+      def test_as_regular(cmd, expected):
+        out = machine.succeed(cmd_as_regular(cmd)).strip()
+        assert out == expected, "Expected {0} to output {1}, but got {2}".format(cmd, expected, out)
+
+      test_as_regular('${busybox pkgs}/bin/busybox id -u', '${toString userUid}')
+      test_as_regular('${busybox pkgs}/bin/busybox id -ru', '${toString userUid}')
+      test_as_regular('${busybox pkgs}/bin/busybox id -g', '${toString usersGid}')
+      test_as_regular('${busybox pkgs}/bin/busybox id -rg', '${toString usersGid}')
+
+      test_as_regular('/run/wrappers/bin/suid_root_busybox id -u', '0')
+      test_as_regular('/run/wrappers/bin/suid_root_busybox id -ru', '${toString userUid}')
+      test_as_regular('/run/wrappers/bin/suid_root_busybox id -g', '${toString usersGid}')
+      test_as_regular('/run/wrappers/bin/suid_root_busybox id -rg', '${toString usersGid}')
+
+      test_as_regular('/run/wrappers/bin/sgid_root_busybox id -u', '${toString userUid}')
+      test_as_regular('/run/wrappers/bin/sgid_root_busybox id -ru', '${toString userUid}')
+      test_as_regular('/run/wrappers/bin/sgid_root_busybox id -g', '0')
+      test_as_regular('/run/wrappers/bin/sgid_root_busybox id -rg', '${toString usersGid}')
+
+      # We are only testing the permitted set, because it's easiest to look at with capsh.
+      machine.fail(cmd_as_regular('${pkgs.libcap}/bin/capsh --has-p=CAP_CHOWN'))
+      machine.fail(cmd_as_regular('${pkgs.libcap}/bin/capsh --has-p=CAP_SYS_ADMIN'))
+      machine.succeed(cmd_as_regular('/run/wrappers/bin/capsh_with_chown --has-p=CAP_CHOWN'))
+      machine.fail(cmd_as_regular('/run/wrappers/bin/capsh_with_chown --has-p=CAP_SYS_ADMIN'))
+    '';
+})
diff --git a/nixos/tests/xmpp/prosody.nix b/nixos/tests/xmpp/prosody.nix
index 14eab56fb82..045ae6430fd 100644
--- a/nixos/tests/xmpp/prosody.nix
+++ b/nixos/tests/xmpp/prosody.nix
@@ -42,7 +42,7 @@ in import ../make-test-python.nix {
         ${nodes.server.config.networking.primaryIPAddress} uploads.example.com
       '';
       environment.systemPackages = [
-        (pkgs.callPackage ./xmpp-sendmessage.nix { connectTo = nodes.server.config.networking.primaryIPAddress; })
+        (pkgs.callPackage ./xmpp-sendmessage.nix { connectTo = "example.com"; })
       ];
     };
     server = { config, pkgs, ... }: {
@@ -82,6 +82,7 @@ in import ../make-test-python.nix {
 
   testScript = { nodes, ... }: ''
     # Check with sqlite storage
+    start_all()
     server.wait_for_unit("prosody.service")
     server.succeed('prosodyctl status | grep "Prosody is running"')
 
diff --git a/nixos/tests/xmpp/xmpp-sendmessage.nix b/nixos/tests/xmpp/xmpp-sendmessage.nix
index 80dfcff2d0e..8ccac061249 100644
--- a/nixos/tests/xmpp/xmpp-sendmessage.nix
+++ b/nixos/tests/xmpp/xmpp-sendmessage.nix
@@ -6,12 +6,13 @@ let
     Please find this *really* important attachment.
 
     Yours truly,
-    John
+    Bob
   '';
 in writeScriptBin "send-message" ''
 #!${(python3.withPackages (ps: [ ps.slixmpp ])).interpreter}
 import logging
 import sys
+import signal
 from types import MethodType
 
 from slixmpp import ClientXMPP
@@ -64,8 +65,13 @@ class CthonTest(ClientXMPP):
         log.info('MUC join success!')
         log.info('XMPP SCRIPT TEST SUCCESS')
 
+def timeout_handler(signalnum, stackframe):
+    print('ERROR: xmpp-sendmessage timed out')
+    sys.exit(1)
 
 if __name__ == '__main__':
+    signal.signal(signal.SIGALRM, timeout_handler)
+    signal.alarm(120)
     logging.basicConfig(level=logging.DEBUG,
                         format='%(levelname)-8s %(message)s')
 
@@ -76,7 +82,7 @@ if __name__ == '__main__':
     ct.register_plugin('xep_0363')
     # MUC
     ct.register_plugin('xep_0045')
-    ct.connect(("server", 5222))
+    ct.connect(("${connectTo}", 5222))
     ct.process(forever=False)
 
     if not ct.test_succeeded:
diff --git a/nixos/tests/xpadneo.nix b/nixos/tests/xpadneo.nix
new file mode 100644
index 00000000000..c7b72831fce
--- /dev/null
+++ b/nixos/tests/xpadneo.nix
@@ -0,0 +1,18 @@
+import ./make-test-python.nix ({ lib, pkgs, ... }: {
+  name = "xpadneo";
+  meta.maintainers = with lib.maintainers; [ kira-bruneau ];
+
+  nodes = {
+    machine = {
+      config.hardware.xpadneo.enable = true;
+    };
+  };
+
+  # This is just a sanity check to make sure the module was
+  # loaded. We'd have to find some way to mock an xbox controller if
+  # we wanted more in-depth testing.
+  testScript = ''
+    machine.start();
+    machine.succeed("modinfo hid_xpadneo | grep 'version:\s\+${pkgs.linuxPackages.xpadneo.version}'")
+  '';
+})
diff --git a/nixos/tests/xrdp.nix b/nixos/tests/xrdp.nix
index 0e1d521c5ac..f277d4b7952 100644
--- a/nixos/tests/xrdp.nix
+++ b/nixos/tests/xrdp.nix
@@ -1,7 +1,7 @@
 import ./make-test-python.nix ({ pkgs, ...} : {
   name = "xrdp";
   meta = with pkgs.lib.maintainers; {
-    maintainers = [ volth ];
+    maintainers = [ ];
   };
 
   nodes = {
diff --git a/nixos/tests/yggdrasil.nix b/nixos/tests/yggdrasil.nix
index b409d9ed785..b60a0e6b06c 100644
--- a/nixos/tests/yggdrasil.nix
+++ b/nixos/tests/yggdrasil.nix
@@ -42,7 +42,7 @@ in import ./make-test-python.nix ({ pkgs, ...} : {
 
         services.yggdrasil = {
           enable = true;
-          config = {
+          settings = {
             Listen = ["tcp://0.0.0.0:12345"];
             MulticastInterfaces = [ ];
           };
@@ -112,7 +112,7 @@ in import ./make-test-python.nix ({ pkgs, ...} : {
         services.yggdrasil = {
           enable = true;
           denyDhcpcdInterfaces = [ "ygg0" ];
-          config = {
+          settings = {
             IfTAPMode = true;
             IfName = "ygg0";
             MulticastInterfaces = [ "eth1" ];
diff --git a/nixos/tests/zfs.nix b/nixos/tests/zfs.nix
index 0b44961a3de..29df691cecb 100644
--- a/nixos/tests/zfs.nix
+++ b/nixos/tests/zfs.nix
@@ -8,7 +8,9 @@ with import ../lib/testing-python.nix { inherit system pkgs; };
 let
 
   makeZfsTest = name:
-    { kernelPackage ? if enableUnstable then pkgs.linuxPackages_latest else pkgs.linuxPackages
+    { kernelPackage ? if enableUnstable
+                      then pkgs.zfsUnstable.latestCompatibleLinuxPackages
+                      else pkgs.linuxPackages
     , enableUnstable ? false
     , extraTest ? ""
     }:
diff --git a/nixos/tests/zigbee2mqtt.nix b/nixos/tests/zigbee2mqtt.nix
index 1592202fb3a..1a40d175df8 100644
--- a/nixos/tests/zigbee2mqtt.nix
+++ b/nixos/tests/zigbee2mqtt.nix
@@ -1,6 +1,6 @@
 import ./make-test-python.nix ({ pkgs, lib, ... }:
-
   {
+    name = "zigbee2mqtt";
     nodes.machine = { pkgs, ... }:
       {
         services.zigbee2mqtt = {
diff --git a/nixos/tests/zrepl.nix b/nixos/tests/zrepl.nix
index 85dd834a6aa..b16c7eddc7a 100644
--- a/nixos/tests/zrepl.nix
+++ b/nixos/tests/zrepl.nix
@@ -1,5 +1,7 @@
 import ./make-test-python.nix (
   {
+    name = "zrepl";
+
     nodes.host = {config, pkgs, ...}: {
       config = {
         # Prerequisites for ZFS and tests.
@@ -56,8 +58,8 @@ import ./make-test-python.nix (
           out = host.succeed("curl -f localhost:9811/metrics")
 
           assert (
-              "zrepl_version_daemon" in out
-          ), "zrepl version metric was not found in Prometheus output"
+              "zrepl_start_time" in out
+          ), "zrepl start time metric was not found in Prometheus output"
 
           assert (
               "zrepl_zfs_snapshot_duration_count{filesystem=\"test\"}" in out