summary refs log tree commit diff
path: root/nixos
diff options
context:
space:
mode:
Diffstat (limited to 'nixos')
-rw-r--r--nixos/doc/manual/development/option-declarations.section.md7
-rw-r--r--nixos/doc/manual/from_md/development/option-declarations.section.xml8
-rw-r--r--nixos/doc/manual/from_md/release-notes/rl-2205.section.xml141
-rw-r--r--nixos/doc/manual/release-notes/rl-2205.section.md39
-rw-r--r--nixos/lib/make-options-doc/mergeJSON.py9
-rw-r--r--nixos/lib/systemd-lib.nix202
-rw-r--r--nixos/modules/installer/tools/nixos-enter.sh22
-rw-r--r--nixos/modules/misc/locate.nix6
-rw-r--r--nixos/modules/services/cluster/hadoop/conf.nix22
-rw-r--r--nixos/modules/services/cluster/hadoop/default.nix97
-rw-r--r--nixos/modules/services/cluster/hadoop/hdfs.nix295
-rw-r--r--nixos/modules/services/cluster/hadoop/yarn.nix98
-rw-r--r--nixos/modules/services/matrix/matrix-synapse.xml6
-rw-r--r--nixos/modules/services/misc/paperless-ng.nix2
-rw-r--r--nixos/modules/services/networking/nsd.nix20
-rw-r--r--nixos/modules/services/networking/pleroma.nix8
-rw-r--r--nixos/modules/services/networking/unbound.nix1
-rw-r--r--nixos/modules/services/networking/vsftpd.nix1
-rw-r--r--nixos/modules/services/security/oauth2_proxy.nix10
-rw-r--r--nixos/modules/services/security/tor.nix5
-rw-r--r--nixos/modules/services/web-apps/nextcloud.nix11
-rw-r--r--nixos/modules/services/x11/display-managers/default.nix1
-rw-r--r--nixos/modules/system/boot/kernel.nix2
-rw-r--r--nixos/modules/system/boot/modprobe.nix20
-rw-r--r--nixos/modules/system/boot/stage-1.nix5
-rw-r--r--nixos/modules/system/boot/systemd.nix217
-rw-r--r--nixos/modules/tasks/auto-upgrade.nix83
-rw-r--r--nixos/modules/tasks/network-interfaces.nix15
-rw-r--r--nixos/tests/all-tests.nix8
-rw-r--r--nixos/tests/hadoop/default.nix7
-rw-r--r--nixos/tests/hadoop/hadoop.nix273
-rw-r--r--nixos/tests/hadoop/hdfs.nix61
-rw-r--r--nixos/tests/hadoop/yarn.nix34
-rw-r--r--nixos/tests/networking.nix33
-rw-r--r--nixos/tests/nextcloud/default.nix2
-rw-r--r--nixos/tests/pleroma.nix20
-rw-r--r--nixos/tests/terminal-emulators.nix207
-rw-r--r--nixos/tests/web-apps/mastodon.nix170
-rw-r--r--nixos/tests/web-apps/peertube.nix3
-rw-r--r--nixos/tests/wine.nix13
40 files changed, 1506 insertions, 678 deletions
diff --git a/nixos/doc/manual/development/option-declarations.section.md b/nixos/doc/manual/development/option-declarations.section.md
index 819fc6d891f..53ecb9b3a62 100644
--- a/nixos/doc/manual/development/option-declarations.section.md
+++ b/nixos/doc/manual/development/option-declarations.section.md
@@ -27,9 +27,10 @@ The function `mkOption` accepts the following arguments.
 
 `type`
 
-:   The type of the option (see [](#sec-option-types)). It may be
-    omitted, but that's not advisable since it may lead to errors that
-    are hard to diagnose.
+:   The type of the option (see [](#sec-option-types)). This
+    argument is mandatory for nixpkgs modules. Setting this is highly
+    recommended for the sake of documentation and type checking. In case it is
+    not set, a fallback type with unspecified behavior is used.
 
 `default`
 
diff --git a/nixos/doc/manual/from_md/development/option-declarations.section.xml b/nixos/doc/manual/from_md/development/option-declarations.section.xml
index 554705e2e42..0ac5e0eeca2 100644
--- a/nixos/doc/manual/from_md/development/option-declarations.section.xml
+++ b/nixos/doc/manual/from_md/development/option-declarations.section.xml
@@ -38,9 +38,11 @@ options = {
       <listitem>
         <para>
           The type of the option (see
-          <xref linkend="sec-option-types" />). It may be omitted, but
-          that’s not advisable since it may lead to errors that are hard
-          to diagnose.
+          <xref linkend="sec-option-types" />). This argument is
+          mandatory for nixpkgs modules. Setting this is highly
+          recommended for the sake of documentation and type checking.
+          In case it is not set, a fallback type with unspecified
+          behavior is used.
         </para>
       </listitem>
     </varlistentry>
diff --git a/nixos/doc/manual/from_md/release-notes/rl-2205.section.xml b/nixos/doc/manual/from_md/release-notes/rl-2205.section.xml
index 66f2ac7ae26..348374026b4 100644
--- a/nixos/doc/manual/from_md/release-notes/rl-2205.section.xml
+++ b/nixos/doc/manual/from_md/release-notes/rl-2205.section.xml
@@ -468,6 +468,12 @@
           freeform type.
         </para>
         <para>
+          The <literal>listeners.*.bind_address</literal> option was
+          renamed to <literal>bind_addresses</literal> in order to match
+          the upstream <literal>homeserver.yaml</literal> option name.
+          It is now also a list of strings instead of a string.
+        </para>
+        <para>
           An example to make the required migration clearer:
         </para>
         <para>
@@ -528,7 +534,7 @@
 
       listeners = [ {
         port = 8448;
-        bind_address = [
+        bind_addresses = [
           &quot;::&quot;
           &quot;0.0.0.0&quot;
         ];
@@ -559,7 +565,14 @@
           Additionally a few option defaults have been synced up with
           upstream default values, for example the
           <literal>max_upload_size</literal> grew from
-          <literal>10M</literal> to <literal>50M</literal>.
+          <literal>10M</literal> to <literal>50M</literal>. For the same
+          reason, the default <literal>media_store_path</literal> was
+          changed from <literal>${dataDir}/media</literal> to
+          <literal>${dataDir}/media_store</literal> if
+          <literal>system.stateVersion</literal> is at least
+          <literal>22.05</literal>. Files will need to be manually moved
+          to the new location if the <literal>stateVersion</literal> is
+          updated.
         </para>
       </listitem>
       <listitem>
@@ -571,6 +584,25 @@
       </listitem>
       <listitem>
         <para>
+          Services in the <literal>hadoop</literal> module previously
+          set <literal>openFirewall</literal> to true by default. This
+          has now been changed to false. Node definitions for multi-node
+          clusters would need <literal>openFirewall = true;</literal> to
+          be added to to hadoop services when upgrading from NixOS
+          21.11.
+        </para>
+      </listitem>
+      <listitem>
+        <para>
+          <literal>services.hadoop.yarn.nodemanager</literal> now uses
+          cgroup-based CPU limit enforcement by default. Additionally,
+          the option <literal>useCGroups</literal> was added to
+          nodemanagers as an easy way to switch back to the old
+          behavior.
+        </para>
+      </listitem>
+      <listitem>
+        <para>
           The <literal>wafHook</literal> hook now honors
           <literal>NIX_BUILD_CORES</literal> when
           <literal>enableParallelBuilding</literal> is not set
@@ -743,6 +775,12 @@
       </listitem>
       <listitem>
         <para>
+          <literal>pkgs._7zz</literal> is now correctly licensed as
+          LGPL3+ and BSD3 with optional unfree unRAR licensed code
+        </para>
+      </listitem>
+      <listitem>
+        <para>
           <literal>tilp2</literal> was removed together with its module
         </para>
       </listitem>
@@ -800,6 +838,16 @@
       </listitem>
       <listitem>
         <para>
+          The Tor SOCKS proxy is now actually disabled if
+          <literal>services.tor.client.enable</literal> is set to
+          <literal>false</literal> (the default). If you are using this
+          functionality but didn’t change the setting or set it to
+          <literal>false</literal>, you now need to set it to
+          <literal>true</literal>.
+        </para>
+      </listitem>
+      <listitem>
+        <para>
           The terraform 0.12 compatibility has been removed and the
           <literal>terraform.withPlugins</literal> and
           <literal>terraform-providers.mkProvider</literal>
@@ -1175,6 +1223,33 @@
       </listitem>
       <listitem>
         <para>
+          Some improvements have been made to the
+          <literal>hadoop</literal> module:
+        </para>
+        <itemizedlist spacing="compact">
+          <listitem>
+            <para>
+              A <literal>gatewayRole</literal> option has been added,
+              for deploying hadoop cluster configuration files to a node
+              that does not have any active services
+            </para>
+          </listitem>
+          <listitem>
+            <para>
+              Support for older versions of hadoop have been added to
+              the module
+            </para>
+          </listitem>
+          <listitem>
+            <para>
+              Overriding and extending site XML files has been made
+              easier
+            </para>
+          </listitem>
+        </itemizedlist>
+      </listitem>
+      <listitem>
+        <para>
           If you are using Wayland you can choose to use the Ozone
           Wayland support in Chrome and several Electron apps by setting
           the environment variable <literal>NIXOS_OZONE_WL=1</literal>
@@ -1198,6 +1273,14 @@
       </listitem>
       <listitem>
         <para>
+          The <literal>unifi</literal> package was switched from
+          <literal>unifi6</literal> to <literal>unifi7</literal>. Direct
+          downgrades from Unifi 7 to Unifi 6 are not possible and
+          require restoring from a backup made by Unifi 6.
+        </para>
+      </listitem>
+      <listitem>
+        <para>
           <literal>programs.zsh.autosuggestions.strategy</literal> now
           takes a list of strings instead of a string.
         </para>
@@ -1273,10 +1356,10 @@
       </listitem>
       <listitem>
         <para>
-          A new option
-          <literal>boot.initrd.extraModprobeConfig</literal> has been
-          added which can be used to configure kernel modules that are
-          loaded in the initrd.
+          The options <literal>boot.extraModprobeConfig</literal> and
+          <literal>boot.blacklistedKernelModules</literal> now also take
+          effect in the initrd by copying the file
+          <literal>/etc/modprobe.d/nixos.conf</literal> into the initrd.
         </para>
       </listitem>
       <listitem>
@@ -1288,6 +1371,52 @@
       </listitem>
       <listitem>
         <para>
+          ORY Kratos was updated to version 0.8.3-alpha.1.pre.0, which
+          introduces some breaking changes:
+        </para>
+        <itemizedlist spacing="compact">
+          <listitem>
+            <para>
+              If you are relying on the SQLite images, update your
+              Docker Pull commands as follows:
+            </para>
+            <itemizedlist spacing="compact">
+              <listitem>
+                <para>
+                  <literal>docker pull oryd/kratos:{version}</literal>
+                </para>
+              </listitem>
+            </itemizedlist>
+          </listitem>
+          <listitem>
+            <para>
+              Additionally, all passwords now have to be at least 8
+              characters long.
+            </para>
+          </listitem>
+          <listitem>
+            <para>
+              For more details, see:
+            </para>
+            <itemizedlist spacing="compact">
+              <listitem>
+                <para>
+                  <link xlink:href="https://github.com/ory/kratos/releases/tag/v0.8.1-alpha.1">Release
+                  Notes for v0.8.1-alpha-1</link>
+                </para>
+              </listitem>
+              <listitem>
+                <para>
+                  <link xlink:href="https://github.com/ory/kratos/releases/tag/v0.8.2-alpha.1">Release
+                  Notes for v0.8.2-alpha-1</link>
+                </para>
+              </listitem>
+            </itemizedlist>
+          </listitem>
+        </itemizedlist>
+      </listitem>
+      <listitem>
+        <para>
           <literal>fetchFromSourcehut</literal> now allows fetching
           repositories recursively using <literal>fetchgit</literal> or
           <literal>fetchhg</literal> if the argument
diff --git a/nixos/doc/manual/release-notes/rl-2205.section.md b/nixos/doc/manual/release-notes/rl-2205.section.md
index 01893993955..37ff778dd9b 100644
--- a/nixos/doc/manual/release-notes/rl-2205.section.md
+++ b/nixos/doc/manual/release-notes/rl-2205.section.md
@@ -158,6 +158,9 @@ In addition to numerous new and upgraded packages, this release has the followin
   module (`services.matrix-synapse`) now need to be moved into `services.matrix-synapse.settings`. And while not all options you
   may use are defined in there, they are still supported, because you can set arbitrary values in this freeform type.
 
+  The `listeners.*.bind_address` option was renamed to `bind_addresses` in order to match the upstream `homeserver.yaml` option
+  name. It is now also a list of strings instead of a string.
+
   An example to make the required migration clearer:
 
   Before:
@@ -215,7 +218,7 @@ In addition to numerous new and upgraded packages, this release has the followin
 
         listeners = [ {
           port = 8448;
-          bind_address = [
+          bind_addresses = [
             "::"
             "0.0.0.0"
           ];
@@ -240,10 +243,20 @@ In addition to numerous new and upgraded packages, this release has the followin
 
   The secrets in your original config should be migrated into a YAML file that is included via `extraConfigFiles`.
 
-  Additionally a few option defaults have been synced up with upstream default values, for example the `max_upload_size` grew from `10M` to `50M`.
+  Additionally a few option defaults have been synced up with upstream default values, for example the `max_upload_size` grew from `10M` to `50M`. For the same reason, the default
+  `media_store_path` was changed from `${dataDir}/media` to `${dataDir}/media_store` if `system.stateVersion` is at least `22.05`. Files will need to be manually moved to the new
+  location if the `stateVersion` is updated.
 
 - The MoinMoin wiki engine (`services.moinmoin`) has been removed, because Python 2 is being retired from nixpkgs.
 
+- Services in the `hadoop` module previously set `openFirewall` to true by default.
+  This has now been changed to false. Node definitions for multi-node clusters would need
+  `openFirewall = true;` to be added to to hadoop services when upgrading from NixOS 21.11.
+
+- `services.hadoop.yarn.nodemanager` now uses cgroup-based CPU limit enforcement by default.
+  Additionally, the option `useCGroups` was added to nodemanagers as an easy way to switch
+  back to the old behavior.
+
 - The `wafHook` hook now honors `NIX_BUILD_CORES` when `enableParallelBuilding` is not set explicitly. Packages can restore the old behaviour by setting `enableParallelBuilding=false`.
 
 - `pkgs.claws-mail-gtk2`, representing Claws Mail's older release version three, was removed in order to get rid of Python 2.
@@ -296,6 +309,8 @@ In addition to numerous new and upgraded packages, this release has the followin
 
 - `pkgs.docbookrx` was removed since it's unmaintained
 
+- `pkgs._7zz` is now correctly licensed as LGPL3+ and BSD3 with optional unfree unRAR licensed code
+
 - `tilp2` was removed together with its module
 
 - The F-PROT antivirus (`fprot` package) and its service module were removed because it
@@ -309,6 +324,8 @@ In addition to numerous new and upgraded packages, this release has the followin
 
 - `systemd-nspawn@.service` settings have been reverted to the default systemd behaviour. User namespaces are now activated by default. If you want to keep running nspawn containers without user namespaces you need to set `systemd.nspawn.<name>.execConfig.PrivateUsers = false`
 
+- The Tor SOCKS proxy is now actually disabled if `services.tor.client.enable` is set to `false` (the default). If you are using this functionality but didn't change the setting or set it to `false`, you now need to set it to `true`.
+
 - The terraform 0.12 compatibility has been removed and the `terraform.withPlugins` and `terraform-providers.mkProvider` implementations simplified. Providers now need to be stored under
 `$out/libexec/terraform-providers/<registry>/<owner>/<name>/<version>/<os>_<arch>/terraform-provider-<name>_v<version>` (which mkProvider does).
 
@@ -436,6 +453,11 @@ In addition to numerous new and upgraded packages, this release has the followin
 
 - The `writers.writePyPy2`/`writers.writePyPy3` and corresponding `writers.writePyPy2Bin`/`writers.writePyPy3Bin` convenience functions to create executable Python 2/3 scripts using the PyPy interpreter were added.
 
+- Some improvements have been made to the `hadoop` module:
+  - A `gatewayRole` option has been added, for deploying hadoop cluster configuration files to a node that does not have any active services
+  - Support for older versions of hadoop have been added to the module
+  - Overriding and extending site XML files has been made easier
+
 - If you are using Wayland you can choose to use the Ozone Wayland support
   in Chrome and several Electron apps by setting the environment variable
   `NIXOS_OZONE_WL=1` (for example via
@@ -449,6 +471,9 @@ In addition to numerous new and upgraded packages, this release has the followin
   combined `influxdb2` package is still provided in this release for
   backwards compatibilty, but will be removed at a later date.
 
+- The `unifi` package was switched from `unifi6` to `unifi7`.
+  Direct downgrades from Unifi 7 to Unifi 6 are not possible and require restoring from a backup made by Unifi 6.
+
 - `programs.zsh.autosuggestions.strategy` now takes a list of strings instead of a string.
 
 - The `services.unifi.openPorts` option default value of `true` is now deprecated and will be changed to `false` in 22.11.
@@ -478,10 +503,18 @@ In addition to numerous new and upgraded packages, this release has the followin
 
 - The option `services.duplicati.dataDir` has been added to allow changing the location of duplicati's files.
 
-- A new option `boot.initrd.extraModprobeConfig` has been added which can be used to configure kernel modules that are loaded in the initrd.
+- The options `boot.extraModprobeConfig` and `boot.blacklistedKernelModules` now also take effect in the initrd by copying the file `/etc/modprobe.d/nixos.conf` into the initrd.
 
 - `nixos-generate-config` now puts the dhcp configuration in `hardware-configuration.nix` instead of `configuration.nix`.
 
+- ORY Kratos was updated to version 0.8.3-alpha.1.pre.0, which introduces some breaking changes:
+  - If you are relying on the SQLite images, update your Docker Pull commands as follows:
+    - `docker pull oryd/kratos:{version}`
+  - Additionally, all passwords now have to be at least 8 characters long.
+  - For more details, see:
+    - [Release Notes for v0.8.1-alpha-1](https://github.com/ory/kratos/releases/tag/v0.8.1-alpha.1)
+    - [Release Notes for v0.8.2-alpha-1](https://github.com/ory/kratos/releases/tag/v0.8.2-alpha.1)
+
 - `fetchFromSourcehut` now allows fetching repositories recursively
   using `fetchgit` or `fetchhg` if the argument `fetchSubmodules`
   is set to `true`.
diff --git a/nixos/lib/make-options-doc/mergeJSON.py b/nixos/lib/make-options-doc/mergeJSON.py
index 029787a3158..8e2ea322dc8 100644
--- a/nixos/lib/make-options-doc/mergeJSON.py
+++ b/nixos/lib/make-options-doc/mergeJSON.py
@@ -66,14 +66,21 @@ for (k, v) in overrides.items():
         elif ov is not None or cur.get(ok, None) is None:
             cur[ok] = ov
 
+severity = "error" if warningsAreErrors else "warning"
+
 # check that every option has a description
 hasWarnings = False
 for (k, v) in options.items():
     if v.value.get('description', None) is None:
-        severity = "error" if warningsAreErrors else "warning"
         hasWarnings = True
         print(f"\x1b[1;31m{severity}: option {v.name} has no description\x1b[0m", file=sys.stderr)
         v.value['description'] = "This option has no description."
+    if v.value.get('type', "unspecified") == "unspecified":
+        hasWarnings = True
+        print(
+            f"\x1b[1;31m{severity}: option {v.name} has no type. Please specify a valid type, see " +
+            "https://nixos.org/manual/nixos/stable/index.html#sec-option-types\x1b[0m", file=sys.stderr)
+
 if hasWarnings and warningsAreErrors:
     print(
         "\x1b[1;31m" +
diff --git a/nixos/lib/systemd-lib.nix b/nixos/lib/systemd-lib.nix
index ab166d7327c..a472d97f5cc 100644
--- a/nixos/lib/systemd-lib.nix
+++ b/nixos/lib/systemd-lib.nix
@@ -5,6 +5,7 @@ with lib;
 let
   cfg = config.systemd;
   lndir = "${pkgs.buildPackages.xorg.lndir}/bin/lndir";
+  systemd = cfg.package;
 in rec {
 
   shellEscape = s: (replaceChars [ "\\" ] [ "\\\\" ] s);
@@ -235,4 +236,205 @@ in rec {
       ''}
     ''; # */
 
+  makeJobScript = name: text:
+    let
+      scriptName = replaceChars [ "\\" "@" ] [ "-" "_" ] (shellEscape name);
+      out = (pkgs.writeShellScriptBin scriptName ''
+        set -e
+        ${text}
+      '').overrideAttrs (_: {
+        # The derivation name is different from the script file name
+        # to keep the script file name short to avoid cluttering logs.
+        name = "unit-script-${scriptName}";
+      });
+    in "${out}/bin/${scriptName}";
+
+  unitConfig = { config, options, ... }: {
+    config = {
+      unitConfig =
+        optionalAttrs (config.requires != [])
+          { Requires = toString config.requires; }
+        // optionalAttrs (config.wants != [])
+          { Wants = toString config.wants; }
+        // optionalAttrs (config.after != [])
+          { After = toString config.after; }
+        // optionalAttrs (config.before != [])
+          { Before = toString config.before; }
+        // optionalAttrs (config.bindsTo != [])
+          { BindsTo = toString config.bindsTo; }
+        // optionalAttrs (config.partOf != [])
+          { PartOf = toString config.partOf; }
+        // optionalAttrs (config.conflicts != [])
+          { Conflicts = toString config.conflicts; }
+        // optionalAttrs (config.requisite != [])
+          { Requisite = toString config.requisite; }
+        // optionalAttrs (config.restartTriggers != [])
+          { X-Restart-Triggers = toString config.restartTriggers; }
+        // optionalAttrs (config.reloadTriggers != [])
+          { X-Reload-Triggers = toString config.reloadTriggers; }
+        // optionalAttrs (config.description != "") {
+          Description = config.description; }
+        // optionalAttrs (config.documentation != []) {
+          Documentation = toString config.documentation; }
+        // optionalAttrs (config.onFailure != []) {
+          OnFailure = toString config.onFailure; }
+        // optionalAttrs (options.startLimitIntervalSec.isDefined) {
+          StartLimitIntervalSec = toString config.startLimitIntervalSec;
+        } // optionalAttrs (options.startLimitBurst.isDefined) {
+          StartLimitBurst = toString config.startLimitBurst;
+        };
+    };
+  };
+
+  serviceConfig = { name, config, ... }: {
+    config = mkMerge
+      [ { # Default path for systemd services.  Should be quite minimal.
+          path = mkAfter
+            [ pkgs.coreutils
+              pkgs.findutils
+              pkgs.gnugrep
+              pkgs.gnused
+              systemd
+            ];
+          environment.PATH = "${makeBinPath config.path}:${makeSearchPathOutput "bin" "sbin" config.path}";
+        }
+        (mkIf (config.preStart != "")
+          { serviceConfig.ExecStartPre =
+              [ (makeJobScript "${name}-pre-start" config.preStart) ];
+          })
+        (mkIf (config.script != "")
+          { serviceConfig.ExecStart =
+              makeJobScript "${name}-start" config.script + " " + config.scriptArgs;
+          })
+        (mkIf (config.postStart != "")
+          { serviceConfig.ExecStartPost =
+              [ (makeJobScript "${name}-post-start" config.postStart) ];
+          })
+        (mkIf (config.reload != "")
+          { serviceConfig.ExecReload =
+              makeJobScript "${name}-reload" config.reload;
+          })
+        (mkIf (config.preStop != "")
+          { serviceConfig.ExecStop =
+              makeJobScript "${name}-pre-stop" config.preStop;
+          })
+        (mkIf (config.postStop != "")
+          { serviceConfig.ExecStopPost =
+              makeJobScript "${name}-post-stop" config.postStop;
+          })
+      ];
+  };
+
+  mountConfig = { config, ... }: {
+    config = {
+      mountConfig =
+        { What = config.what;
+          Where = config.where;
+        } // optionalAttrs (config.type != "") {
+          Type = config.type;
+        } // optionalAttrs (config.options != "") {
+          Options = config.options;
+        };
+    };
+  };
+
+  automountConfig = { config, ... }: {
+    config = {
+      automountConfig =
+        { Where = config.where;
+        };
+    };
+  };
+
+  commonUnitText = def: ''
+      [Unit]
+      ${attrsToSection def.unitConfig}
+    '';
+
+  targetToUnit = name: def:
+    { inherit (def) aliases wantedBy requiredBy enable;
+      text =
+        ''
+          [Unit]
+          ${attrsToSection def.unitConfig}
+        '';
+    };
+
+  serviceToUnit = name: def:
+    { inherit (def) aliases wantedBy requiredBy enable;
+      text = commonUnitText def +
+        ''
+          [Service]
+          ${let env = cfg.globalEnvironment // def.environment;
+            in concatMapStrings (n:
+              let s = optionalString (env.${n} != null)
+                "Environment=${builtins.toJSON "${n}=${env.${n}}"}\n";
+              # systemd max line length is now 1MiB
+              # https://github.com/systemd/systemd/commit/e6dde451a51dc5aaa7f4d98d39b8fe735f73d2af
+              in if stringLength s >= 1048576 then throw "The value of the environment variable ‘${n}’ in systemd service ‘${name}.service’ is too long." else s) (attrNames env)}
+          ${if def.reloadIfChanged then ''
+            X-ReloadIfChanged=true
+          '' else if !def.restartIfChanged then ''
+            X-RestartIfChanged=false
+          '' else ""}
+          ${optionalString (!def.stopIfChanged) "X-StopIfChanged=false"}
+          ${attrsToSection def.serviceConfig}
+        '';
+    };
+
+  socketToUnit = name: def:
+    { inherit (def) aliases wantedBy requiredBy enable;
+      text = commonUnitText def +
+        ''
+          [Socket]
+          ${attrsToSection def.socketConfig}
+          ${concatStringsSep "\n" (map (s: "ListenStream=${s}") def.listenStreams)}
+          ${concatStringsSep "\n" (map (s: "ListenDatagram=${s}") def.listenDatagrams)}
+        '';
+    };
+
+  timerToUnit = name: def:
+    { inherit (def) aliases wantedBy requiredBy enable;
+      text = commonUnitText def +
+        ''
+          [Timer]
+          ${attrsToSection def.timerConfig}
+        '';
+    };
+
+  pathToUnit = name: def:
+    { inherit (def) aliases wantedBy requiredBy enable;
+      text = commonUnitText def +
+        ''
+          [Path]
+          ${attrsToSection def.pathConfig}
+        '';
+    };
+
+  mountToUnit = name: def:
+    { inherit (def) aliases wantedBy requiredBy enable;
+      text = commonUnitText def +
+        ''
+          [Mount]
+          ${attrsToSection def.mountConfig}
+        '';
+    };
+
+  automountToUnit = name: def:
+    { inherit (def) aliases wantedBy requiredBy enable;
+      text = commonUnitText def +
+        ''
+          [Automount]
+          ${attrsToSection def.automountConfig}
+        '';
+    };
+
+  sliceToUnit = name: def:
+    { inherit (def) aliases wantedBy requiredBy enable;
+      text = commonUnitText def +
+        ''
+          [Slice]
+          ${attrsToSection def.sliceConfig}
+        '';
+    };
 }
diff --git a/nixos/modules/installer/tools/nixos-enter.sh b/nixos/modules/installer/tools/nixos-enter.sh
index 115b3d7a7c5..89beeee7cf9 100644
--- a/nixos/modules/installer/tools/nixos-enter.sh
+++ b/nixos/modules/installer/tools/nixos-enter.sh
@@ -63,32 +63,32 @@ mount --rbind /sys "$mountPoint/sys"
 
 # modified from https://github.com/archlinux/arch-install-scripts/blob/bb04ab435a5a89cd5e5ee821783477bc80db797f/arch-chroot.in#L26-L52
 chroot_add_resolv_conf() {
-    local chrootdir=$1 resolv_conf=$1/etc/resolv.conf
+    local chrootDir="$1" resolvConf="$1/etc/resolv.conf"
 
     [[ -e /etc/resolv.conf ]] || return 0
 
     # Handle resolv.conf as a symlink to somewhere else.
-    if [[ -L $chrootdir/etc/resolv.conf ]]; then
+    if [[ -L "$resolvConf" ]]; then
       # readlink(1) should always give us *something* since we know at this point
       # it's a symlink. For simplicity, ignore the case of nested symlinks.
-      # We also ignore the possibility if `../`s escaping the root.
-      resolv_conf=$(readlink "$chrootdir/etc/resolv.conf")
-      if [[ $resolv_conf = /* ]]; then
-        resolv_conf=$chrootdir$resolv_conf
+      # We also ignore the possibility of `../`s escaping the root.
+      resolvConf="$(readlink "$resolvConf")"
+      if [[ "$resolvConf" = /* ]]; then
+        resolvConf="$chrootDir$resolvConf"
       else
-        resolv_conf=$chrootdir/etc/$resolv_conf
+        resolvConf="$chrootDir/etc/$resolvConf"
       fi
     fi
 
     # ensure file exists to bind mount over
-    if [[ ! -f $resolv_conf ]]; then
-      install -Dm644 /dev/null "$resolv_conf" || return 1
+    if [[ ! -f "$resolvConf" ]]; then
+      install -Dm644 /dev/null "$resolvConf" || return 1
     fi
 
-    mount --bind /etc/resolv.conf "$resolv_conf"
+    mount --bind /etc/resolv.conf "$resolvConf"
 }
 
-chroot_add_resolv_conf "$mountPoint" || print "ERROR: failed to set up resolv.conf"
+chroot_add_resolv_conf "$mountPoint" || echo "$0: failed to set up resolv.conf" >&2
 
 (
     # If silent, write both stdout and stderr of activation script to /dev/null
diff --git a/nixos/modules/misc/locate.nix b/nixos/modules/misc/locate.nix
index 66a49b0b888..204a8914300 100644
--- a/nixos/modules/misc/locate.nix
+++ b/nixos/modules/misc/locate.nix
@@ -183,7 +183,11 @@ in
 
     pruneNames = mkOption {
       type = listOf str;
-      default = [ ".bzr" ".cache" ".git" ".hg" ".svn" ];
+      default = lib.optionals (!isFindutils) [ ".bzr" ".cache" ".git" ".hg" ".svn" ];
+      defaultText = literalDocBook ''
+        <literal>[ ".bzr" ".cache" ".git" ".hg" ".svn" ]</literal>, if
+        supported by the locate implementation (i.e. mlocate or plocate).
+      '';
       description = ''
         Directory components which should exclude paths containing them from indexing
       '';
diff --git a/nixos/modules/services/cluster/hadoop/conf.nix b/nixos/modules/services/cluster/hadoop/conf.nix
index 0caec5cfc20..e3c26a0d550 100644
--- a/nixos/modules/services/cluster/hadoop/conf.nix
+++ b/nixos/modules/services/cluster/hadoop/conf.nix
@@ -1,6 +1,6 @@
 { cfg, pkgs, lib }:
 let
-  propertyXml = name: value: ''
+  propertyXml = name: value: lib.optionalString (value != null) ''
     <property>
       <name>${name}</name>
       <value>${builtins.toString value}</value>
@@ -29,16 +29,16 @@ let
     export HADOOP_LOG_DIR=/tmp/hadoop/$USER
   '';
 in
-pkgs.runCommand "hadoop-conf" {} ''
+pkgs.runCommand "hadoop-conf" {} (with cfg; ''
   mkdir -p $out/
-  cp ${siteXml "core-site.xml" cfg.coreSite}/* $out/
-  cp ${siteXml "hdfs-site.xml" cfg.hdfsSite}/* $out/
-  cp ${siteXml "mapred-site.xml" cfg.mapredSite}/* $out/
-  cp ${siteXml "yarn-site.xml" cfg.yarnSite}/* $out/
-  cp ${siteXml "httpfs-site.xml" cfg.httpfsSite}/* $out/
-  cp ${cfgFile "container-executor.cfg" cfg.containerExecutorCfg}/* $out/
+  cp ${siteXml "core-site.xml" (coreSite // coreSiteInternal)}/* $out/
+  cp ${siteXml "hdfs-site.xml" (hdfsSiteDefault // hdfsSite // hdfsSiteInternal)}/* $out/
+  cp ${siteXml "mapred-site.xml" (mapredSiteDefault // mapredSite)}/* $out/
+  cp ${siteXml "yarn-site.xml" (yarnSiteDefault // yarnSite // yarnSiteInternal)}/* $out/
+  cp ${siteXml "httpfs-site.xml" httpfsSite}/* $out/
+  cp ${cfgFile "container-executor.cfg" containerExecutorCfg}/* $out/
   cp ${pkgs.writeTextDir "hadoop-user-functions.sh" userFunctions}/* $out/
   cp ${pkgs.writeTextDir "hadoop-env.sh" hadoopEnv}/* $out/
-  cp ${cfg.log4jProperties} $out/log4j.properties
-  ${lib.concatMapStringsSep "\n" (dir: "cp -r ${dir}/* $out/") cfg.extraConfDirs}
-''
+  cp ${log4jProperties} $out/log4j.properties
+  ${lib.concatMapStringsSep "\n" (dir: "cp -r ${dir}/* $out/") extraConfDirs}
+'')
diff --git a/nixos/modules/services/cluster/hadoop/default.nix b/nixos/modules/services/cluster/hadoop/default.nix
index a1a95fe31ca..a4fdea81037 100644
--- a/nixos/modules/services/cluster/hadoop/default.nix
+++ b/nixos/modules/services/cluster/hadoop/default.nix
@@ -21,24 +21,50 @@ with lib;
         <link xlink:href="https://hadoop.apache.org/docs/current/hadoop-project-dist/hadoop-common/core-default.xml"/>
       '';
     };
+    coreSiteInternal = mkOption {
+      default = {};
+      type = types.attrsOf types.anything;
+      internal = true;
+      description = ''
+        Internal option to add configs to core-site.xml based on module options
+      '';
+    };
 
-    hdfsSite = mkOption {
+    hdfsSiteDefault = mkOption {
       default = {
         "dfs.namenode.rpc-bind-host" = "0.0.0.0";
+        "dfs.namenode.http-address" = "0.0.0.0:9870";
+        "dfs.namenode.servicerpc-bind-host" = "0.0.0.0";
+        "dfs.namenode.http-bind-host" = "0.0.0.0";
       };
       type = types.attrsOf types.anything;
+      description = ''
+        Default options for hdfs-site.xml
+      '';
+    };
+    hdfsSite = mkOption {
+      default = {};
+      type = types.attrsOf types.anything;
       example = literalExpression ''
         {
           "dfs.nameservices" = "namenode1";
         }
       '';
       description = ''
-        Hadoop hdfs-site.xml definition
+        Additional options and overrides for hdfs-site.xml
         <link xlink:href="https://hadoop.apache.org/docs/current/hadoop-project-dist/hadoop-hdfs/hdfs-default.xml"/>
       '';
     };
+    hdfsSiteInternal = mkOption {
+      default = {};
+      type = types.attrsOf types.anything;
+      internal = true;
+      description = ''
+        Internal option to add configs to hdfs-site.xml based on module options
+      '';
+    };
 
-    mapredSite = mkOption {
+    mapredSiteDefault = mkOption {
       default = {
         "mapreduce.framework.name" = "yarn";
         "yarn.app.mapreduce.am.env" = "HADOOP_MAPRED_HOME=${cfg.package}/lib/${cfg.package.untarDir}";
@@ -54,18 +80,25 @@ with lib;
         }
       '';
       type = types.attrsOf types.anything;
+      description = ''
+        Default options for mapred-site.xml
+      '';
+    };
+    mapredSite = mkOption {
+      default = {};
+      type = types.attrsOf types.anything;
       example = literalExpression ''
-        options.services.hadoop.mapredSite.default // {
+        {
           "mapreduce.map.java.opts" = "-Xmx900m -XX:+UseParallelGC";
         }
       '';
       description = ''
-        Hadoop mapred-site.xml definition
+        Additional options and overrides for mapred-site.xml
         <link xlink:href="https://hadoop.apache.org/docs/current/hadoop-mapreduce-client/hadoop-mapreduce-client-core/mapred-default.xml"/>
       '';
     };
 
-    yarnSite = mkOption {
+    yarnSiteDefault = mkOption {
       default = {
         "yarn.nodemanager.admin-env" = "PATH=$PATH";
         "yarn.nodemanager.aux-services" = "mapreduce_shuffle";
@@ -77,19 +110,34 @@ with lib;
         "yarn.nodemanager.linux-container-executor.path" = "/run/wrappers/yarn-nodemanager/bin/container-executor";
         "yarn.nodemanager.log-dirs" = "/var/log/hadoop/yarn/nodemanager";
         "yarn.resourcemanager.bind-host" = "0.0.0.0";
-        "yarn.resourcemanager.scheduler.class" = "org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo.FifoScheduler";
+        "yarn.resourcemanager.scheduler.class" = "org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairScheduler";
       };
       type = types.attrsOf types.anything;
+      description = ''
+        Default options for yarn-site.xml
+      '';
+    };
+    yarnSite = mkOption {
+      default = {};
+      type = types.attrsOf types.anything;
       example = literalExpression ''
-        options.services.hadoop.yarnSite.default // {
+        {
           "yarn.resourcemanager.hostname" = "''${config.networking.hostName}";
         }
       '';
       description = ''
-        Hadoop yarn-site.xml definition
+        Additional options and overrides for yarn-site.xml
         <link xlink:href="https://hadoop.apache.org/docs/current/hadoop-yarn/hadoop-yarn-common/yarn-default.xml"/>
       '';
     };
+    yarnSiteInternal = mkOption {
+      default = {};
+      type = types.attrsOf types.anything;
+      internal = true;
+      description = ''
+        Internal option to add configs to yarn-site.xml based on module options
+      '';
+    };
 
     httpfsSite = mkOption {
       default = { };
@@ -123,6 +171,7 @@ with lib;
         "yarn.nodemanager.linux-container-executor.group"="hadoop";
         "min.user.id"=1000;
         "feature.terminal.enabled"=1;
+        "feature.mount-cgroup.enabled" = 1;
       };
       type = types.attrsOf types.anything;
       example = literalExpression ''
@@ -148,6 +197,8 @@ with lib;
       description = "Directories containing additional config files to be added to HADOOP_CONF_DIR";
     };
 
+    gatewayRole.enable = mkEnableOption "gateway role for deploying hadoop configs";
+
     package = mkOption {
       type = types.package;
       default = pkgs.hadoop;
@@ -157,20 +208,16 @@ with lib;
   };
 
 
-  config = mkMerge [
-    (mkIf (builtins.hasAttr "yarn" config.users.users ||
-           builtins.hasAttr "hdfs" config.users.users ||
-           builtins.hasAttr "httpfs" config.users.users) {
-      users.groups.hadoop = {
-        gid = config.ids.gids.hadoop;
-      };
-      environment = {
-        systemPackages = [ cfg.package ];
-        etc."hadoop-conf".source = let
-          hadoopConf = "${import ./conf.nix { inherit cfg pkgs lib; }}/";
-        in "${hadoopConf}";
-      };
-    })
-
-  ];
+  config = mkIf cfg.gatewayRole.enable {
+    users.groups.hadoop = {
+      gid = config.ids.gids.hadoop;
+    };
+    environment = {
+      systemPackages = [ cfg.package ];
+      etc."hadoop-conf".source = let
+        hadoopConf = "${import ./conf.nix { inherit cfg pkgs lib; }}/";
+      in "${hadoopConf}";
+      variables.HADOOP_CONF_DIR = "/etc/hadoop-conf/";
+    };
+  };
 }
diff --git a/nixos/modules/services/cluster/hadoop/hdfs.nix b/nixos/modules/services/cluster/hadoop/hdfs.nix
index be667aa82d8..325a002ad32 100644
--- a/nixos/modules/services/cluster/hadoop/hdfs.nix
+++ b/nixos/modules/services/cluster/hadoop/hdfs.nix
@@ -1,191 +1,191 @@
-{ config, lib, pkgs, ...}:
+{ config, lib, pkgs, ... }:
 with lib;
 let
   cfg = config.services.hadoop;
+
+  # Config files for hadoop services
   hadoopConf = "${import ./conf.nix { inherit cfg pkgs lib; }}/";
-  restartIfChanged  = mkOption {
-    type = types.bool;
-    description = ''
-      Automatically restart the service on config change.
-      This can be set to false to defer restarts on clusters running critical applications.
-      Please consider the security implications of inadvertently running an older version,
-      and the possibility of unexpected behavior caused by inconsistent versions across a cluster when disabling this option.
-    '';
-    default = false;
-  };
+
+  # Generator for HDFS service options
+  hadoopServiceOption = { serviceName, firewallOption ? true, extraOpts ? null }: {
+    enable = mkEnableOption serviceName;
+    restartIfChanged = mkOption {
+      type = types.bool;
+      description = ''
+        Automatically restart the service on config change.
+        This can be set to false to defer restarts on clusters running critical applications.
+        Please consider the security implications of inadvertently running an older version,
+        and the possibility of unexpected behavior caused by inconsistent versions across a cluster when disabling this option.
+      '';
+      default = false;
+    };
+    extraFlags = mkOption{
+      type = with types; listOf str;
+      default = [];
+      description = "Extra command line flags to pass to ${serviceName}";
+      example = [
+        "-Dcom.sun.management.jmxremote"
+        "-Dcom.sun.management.jmxremote.port=8010"
+      ];
+    };
+    extraEnv = mkOption{
+      type = with types; attrsOf str;
+      default = {};
+      description = "Extra environment variables for ${serviceName}";
+    };
+  } // (optionalAttrs firewallOption {
+    openFirewall = mkOption {
+      type = types.bool;
+      default = false;
+      description = "Open firewall ports for ${serviceName}.";
+    };
+  }) // (optionalAttrs (extraOpts != null) extraOpts);
+
+  # Generator for HDFS service configs
+  hadoopServiceConfig =
+    { name
+    , serviceOptions ? cfg.hdfs."${toLower name}"
+    , description ? "Hadoop HDFS ${name}"
+    , User ? "hdfs"
+    , allowedTCPPorts ? [ ]
+    , preStart ? ""
+    , environment ? { }
+    , extraConfig ? { }
+    }: (
+
+      mkIf serviceOptions.enable ( mkMerge [{
+        systemd.services."hdfs-${toLower name}" = {
+          inherit description preStart;
+          environment = environment // serviceOptions.extraEnv;
+          wantedBy = [ "multi-user.target" ];
+          inherit (serviceOptions) restartIfChanged;
+          serviceConfig = {
+            inherit User;
+            SyslogIdentifier = "hdfs-${toLower name}";
+            ExecStart = "${cfg.package}/bin/hdfs --config ${hadoopConf} ${toLower name} ${escapeShellArgs serviceOptions.extraFlags}";
+            Restart = "always";
+          };
+        };
+
+        services.hadoop.gatewayRole.enable = true;
+
+        networking.firewall.allowedTCPPorts = mkIf
+          ((builtins.hasAttr "openFirewall" serviceOptions) && serviceOptions.openFirewall)
+          allowedTCPPorts;
+      } extraConfig])
+    );
+
 in
 {
   options.services.hadoop.hdfs = {
-    namenode = {
-      enable = mkEnableOption "Whether to run the HDFS NameNode";
+
+    namenode = hadoopServiceOption { serviceName = "HDFS NameNode"; } // {
       formatOnInit = mkOption {
         type = types.bool;
         default = false;
         description = ''
-          Format HDFS namenode on first start. This is useful for quickly spinning up ephemeral HDFS clusters with a single namenode.
-          For HA clusters, initialization involves multiple steps across multiple nodes. Follow [this guide](https://hadoop.apache.org/docs/stable/hadoop-project-dist/hadoop-hdfs/HDFSHighAvailabilityWithQJM.html)
-          to initialize an HA cluster manually.
-        '';
-      };
-      inherit restartIfChanged;
-      openFirewall = mkOption {
-        type = types.bool;
-        default = true;
-        description = ''
-          Open firewall ports for namenode
-        '';
-      };
-    };
-    datanode = {
-      enable = mkEnableOption "Whether to run the HDFS DataNode";
-      inherit restartIfChanged;
-      openFirewall = mkOption {
-        type = types.bool;
-        default = true;
-        description = ''
-          Open firewall ports for datanode
+          Format HDFS namenode on first start. This is useful for quickly spinning up
+          ephemeral HDFS clusters with a single namenode.
+          For HA clusters, initialization involves multiple steps across multiple nodes.
+          Follow this guide to initialize an HA cluster manually:
+          <link xlink:href="https://hadoop.apache.org/docs/stable/hadoop-project-dist/hadoop-hdfs/HDFSHighAvailabilityWithQJM.html"/>
         '';
       };
     };
-    journalnode = {
-      enable = mkEnableOption "Whether to run the HDFS JournalNode";
-      inherit restartIfChanged;
-      openFirewall = mkOption {
-        type = types.bool;
-        default = true;
-        description = ''
-          Open firewall ports for journalnode
-        '';
+
+    datanode = hadoopServiceOption { serviceName = "HDFS DataNode"; } // {
+      dataDirs = mkOption {
+        default = null;
+        description = "Tier and path definitions for datanode storage.";
+        type = with types; nullOr (listOf (submodule {
+          options = {
+            type = mkOption {
+              type = enum [ "SSD" "DISK" "ARCHIVE" "RAM_DISK" ];
+              description = ''
+                Storage types ([SSD]/[DISK]/[ARCHIVE]/[RAM_DISK]) for HDFS storage policies.
+              '';
+            };
+            path = mkOption {
+              type = path;
+              example = [ "/var/lib/hadoop/hdfs/dn" ];
+              description = "Determines where on the local filesystem a data node should store its blocks.";
+            };
+          };
+        }));
       };
     };
-    zkfc = {
-      enable = mkEnableOption "Whether to run the HDFS ZooKeeper failover controller";
-      inherit restartIfChanged;
+
+    journalnode = hadoopServiceOption { serviceName = "HDFS JournalNode"; };
+
+    zkfc = hadoopServiceOption {
+      serviceName = "HDFS ZooKeeper failover controller";
+      firewallOption = false;
     };
-    httpfs = {
-      enable = mkEnableOption "Whether to run the HDFS HTTPfs server";
+
+    httpfs = hadoopServiceOption { serviceName = "HDFS JournalNode"; } // {
       tempPath = mkOption {
         type = types.path;
         default = "/tmp/hadoop/httpfs";
-        description = ''
-          HTTPFS_TEMP path used by HTTPFS
-        '';
-      };
-      inherit restartIfChanged;
-      openFirewall = mkOption {
-        type = types.bool;
-        default = true;
-        description = ''
-          Open firewall ports for HTTPFS
-        '';
+        description = "HTTPFS_TEMP path used by HTTPFS";
       };
     };
+
   };
 
   config = mkMerge [
-    (mkIf cfg.hdfs.namenode.enable {
-      systemd.services.hdfs-namenode = {
-        description = "Hadoop HDFS NameNode";
-        wantedBy = [ "multi-user.target" ];
-        inherit (cfg.hdfs.namenode) restartIfChanged;
-
-        preStart = (mkIf cfg.hdfs.namenode.formatOnInit ''
-          ${cfg.package}/bin/hdfs --config ${hadoopConf} namenode -format -nonInteractive || true
-        '');
-
-        serviceConfig = {
-          User = "hdfs";
-          SyslogIdentifier = "hdfs-namenode";
-          ExecStart = "${cfg.package}/bin/hdfs --config ${hadoopConf} namenode";
-          Restart = "always";
-        };
-      };
-
-      networking.firewall.allowedTCPPorts = (mkIf cfg.hdfs.namenode.openFirewall [
+    (hadoopServiceConfig {
+      name = "NameNode";
+      allowedTCPPorts = [
         9870 # namenode.http-address
         8020 # namenode.rpc-address
-        8022 # namenode. servicerpc-address
-      ]);
+        8022 # namenode.servicerpc-address
+        8019 # dfs.ha.zkfc.port
+      ];
+      preStart = (mkIf cfg.hdfs.namenode.formatOnInit
+        "${cfg.package}/bin/hdfs --config ${hadoopConf} namenode -format -nonInteractive || true"
+      );
     })
-    (mkIf cfg.hdfs.datanode.enable {
-      systemd.services.hdfs-datanode = {
-        description = "Hadoop HDFS DataNode";
-        wantedBy = [ "multi-user.target" ];
-        inherit (cfg.hdfs.datanode) restartIfChanged;
-
-        serviceConfig = {
-          User = "hdfs";
-          SyslogIdentifier = "hdfs-datanode";
-          ExecStart = "${cfg.package}/bin/hdfs --config ${hadoopConf} datanode";
-          Restart = "always";
-        };
-      };
 
-      networking.firewall.allowedTCPPorts = (mkIf cfg.hdfs.datanode.openFirewall [
+    (hadoopServiceConfig {
+      name = "DataNode";
+      # port numbers for datanode changed between hadoop 2 and 3
+      allowedTCPPorts = if versionAtLeast cfg.package.version "3" then [
         9864 # datanode.http.address
         9866 # datanode.address
         9867 # datanode.ipc.address
-      ]);
+      ] else [
+        50075 # datanode.http.address
+        50010 # datanode.address
+        50020 # datanode.ipc.address
+      ];
+      extraConfig.services.hadoop.hdfsSiteInternal."dfs.datanode.data.dir" = let d = cfg.hdfs.datanode.dataDirs; in
+        if (d!= null) then (concatMapStringsSep "," (x: "["+x.type+"]file://"+x.path) cfg.hdfs.datanode.dataDirs) else d;
     })
-    (mkIf cfg.hdfs.journalnode.enable {
-      systemd.services.hdfs-journalnode = {
-        description = "Hadoop HDFS JournalNode";
-        wantedBy = [ "multi-user.target" ];
-        inherit (cfg.hdfs.journalnode) restartIfChanged;
-
-        serviceConfig = {
-          User = "hdfs";
-          SyslogIdentifier = "hdfs-journalnode";
-          ExecStart = "${cfg.package}/bin/hdfs --config ${hadoopConf} journalnode";
-          Restart = "always";
-        };
-      };
 
-      networking.firewall.allowedTCPPorts = (mkIf cfg.hdfs.journalnode.openFirewall [
+    (hadoopServiceConfig {
+      name = "JournalNode";
+      allowedTCPPorts = [
         8480 # dfs.journalnode.http-address
         8485 # dfs.journalnode.rpc-address
-      ]);
+      ];
     })
-    (mkIf cfg.hdfs.zkfc.enable {
-      systemd.services.hdfs-zkfc = {
-        description = "Hadoop HDFS ZooKeeper failover controller";
-        wantedBy = [ "multi-user.target" ];
-        inherit (cfg.hdfs.zkfc) restartIfChanged;
-
-        serviceConfig = {
-          User = "hdfs";
-          SyslogIdentifier = "hdfs-zkfc";
-          ExecStart = "${cfg.package}/bin/hdfs --config ${hadoopConf} zkfc";
-          Restart = "always";
-        };
-      };
-    })
-    (mkIf cfg.hdfs.httpfs.enable {
-      systemd.services.hdfs-httpfs = {
-        description = "Hadoop httpfs";
-        wantedBy = [ "multi-user.target" ];
-        inherit (cfg.hdfs.httpfs) restartIfChanged;
-
-        environment.HTTPFS_TEMP = cfg.hdfs.httpfs.tempPath;
 
-        preStart = ''
-          mkdir -p $HTTPFS_TEMP
-        '';
+    (hadoopServiceConfig {
+      name = "zkfc";
+      description = "Hadoop HDFS ZooKeeper failover controller";
+    })
 
-        serviceConfig = {
-          User = "httpfs";
-          SyslogIdentifier = "hdfs-httpfs";
-          ExecStart = "${cfg.package}/bin/hdfs --config ${hadoopConf} httpfs";
-          Restart = "always";
-        };
-      };
-      networking.firewall.allowedTCPPorts = (mkIf cfg.hdfs.httpfs.openFirewall [
+    (hadoopServiceConfig {
+      name = "HTTPFS";
+      environment.HTTPFS_TEMP = cfg.hdfs.httpfs.tempPath;
+      preStart = "mkdir -p $HTTPFS_TEMP";
+      User = "httpfs";
+      allowedTCPPorts = [
         14000 # httpfs.http.port
-      ]);
+      ];
     })
-    (mkIf (
-        cfg.hdfs.namenode.enable || cfg.hdfs.datanode.enable || cfg.hdfs.journalnode.enable || cfg.hdfs.zkfc.enable
-    ) {
+
+    (mkIf cfg.gatewayRole.enable {
       users.users.hdfs = {
         description = "Hadoop HDFS user";
         group = "hadoop";
@@ -199,5 +199,6 @@ in
         isSystemUser = true;
       };
     })
+
   ];
 }
diff --git a/nixos/modules/services/cluster/hadoop/yarn.nix b/nixos/modules/services/cluster/hadoop/yarn.nix
index 37c26ea10f7..74e16bdec68 100644
--- a/nixos/modules/services/cluster/hadoop/yarn.nix
+++ b/nixos/modules/services/cluster/hadoop/yarn.nix
@@ -13,23 +13,77 @@ let
     '';
     default = false;
   };
+  extraFlags = mkOption{
+    type = with types; listOf str;
+    default = [];
+    description = "Extra command line flags to pass to the service";
+    example = [
+      "-Dcom.sun.management.jmxremote"
+      "-Dcom.sun.management.jmxremote.port=8010"
+    ];
+  };
+  extraEnv = mkOption{
+    type = with types; attrsOf str;
+    default = {};
+    description = "Extra environment variables";
+  };
 in
 {
   options.services.hadoop.yarn = {
     resourcemanager = {
-      enable = mkEnableOption "Whether to run the Hadoop YARN ResourceManager";
-      inherit restartIfChanged;
+      enable = mkEnableOption "Hadoop YARN ResourceManager";
+      inherit restartIfChanged extraFlags extraEnv;
+
       openFirewall = mkOption {
         type = types.bool;
-        default = true;
+        default = false;
         description = ''
           Open firewall ports for resourcemanager
         '';
       };
     };
     nodemanager = {
-      enable = mkEnableOption "Whether to run the Hadoop YARN NodeManager";
-      inherit restartIfChanged;
+      enable = mkEnableOption "Hadoop YARN NodeManager";
+      inherit restartIfChanged extraFlags extraEnv;
+
+      resource = {
+        cpuVCores = mkOption {
+          description = "Number of vcores that can be allocated for containers.";
+          type = with types; nullOr ints.positive;
+          default = null;
+        };
+        maximumAllocationVCores = mkOption {
+          description = "The maximum virtual CPU cores any container can be allocated.";
+          type = with types; nullOr ints.positive;
+          default = null;
+        };
+        memoryMB = mkOption {
+          description = "Amount of physical memory, in MB, that can be allocated for containers.";
+          type = with types; nullOr ints.positive;
+          default = null;
+        };
+        maximumAllocationMB = mkOption {
+          description = "The maximum physical memory any container can be allocated.";
+          type = with types; nullOr ints.positive;
+          default = null;
+        };
+      };
+
+      useCGroups = mkOption {
+        type = types.bool;
+        default = true;
+        description = ''
+          Use cgroups to enforce resource limits on containers
+        '';
+      };
+
+      localDir = mkOption {
+        description = "List of directories to store localized files in.";
+        type = with types; nullOr (listOf path);
+        example = [ "/var/lib/hadoop/yarn/nm" ];
+        default = null;
+      };
+
       addBinBash = mkOption {
         type = types.bool;
         default = true;
@@ -39,7 +93,7 @@ in
       };
       openFirewall = mkOption {
         type = types.bool;
-        default = true;
+        default = false;
         description = ''
           Open firewall ports for nodemanager.
           Because containers can listen on any ephemeral port, TCP ports 1024–65535 will be opened.
@@ -49,10 +103,7 @@ in
   };
 
   config = mkMerge [
-    (mkIf (
-        cfg.yarn.resourcemanager.enable || cfg.yarn.nodemanager.enable
-    ) {
-
+    (mkIf cfg.gatewayRole.enable {
       users.users.yarn = {
         description = "Hadoop YARN user";
         group = "hadoop";
@@ -65,15 +116,19 @@ in
         description = "Hadoop YARN ResourceManager";
         wantedBy = [ "multi-user.target" ];
         inherit (cfg.yarn.resourcemanager) restartIfChanged;
+        environment = cfg.yarn.resourcemanager.extraEnv;
 
         serviceConfig = {
           User = "yarn";
           SyslogIdentifier = "yarn-resourcemanager";
           ExecStart = "${cfg.package}/bin/yarn --config ${hadoopConf} " +
-                      " resourcemanager";
+                      " resourcemanager ${escapeShellArgs cfg.yarn.resourcemanager.extraFlags}";
           Restart = "always";
         };
       };
+
+      services.hadoop.gatewayRole.enable = true;
+
       networking.firewall.allowedTCPPorts = (mkIf cfg.yarn.resourcemanager.openFirewall [
         8088 # resourcemanager.webapp.address
         8030 # resourcemanager.scheduler.address
@@ -94,6 +149,7 @@ in
         description = "Hadoop YARN NodeManager";
         wantedBy = [ "multi-user.target" ];
         inherit (cfg.yarn.nodemanager) restartIfChanged;
+        environment = cfg.yarn.nodemanager.extraEnv;
 
         preStart = ''
           # create log dir
@@ -101,8 +157,9 @@ in
           chown yarn:hadoop /var/log/hadoop/yarn/nodemanager
 
           # set up setuid container executor binary
+          umount /run/wrappers/yarn-nodemanager/cgroup/cpu || true
           rm -rf /run/wrappers/yarn-nodemanager/ || true
-          mkdir -p /run/wrappers/yarn-nodemanager/{bin,etc/hadoop}
+          mkdir -p /run/wrappers/yarn-nodemanager/{bin,etc/hadoop,cgroup/cpu}
           cp ${cfg.package}/lib/${cfg.package.untarDir}/bin/container-executor /run/wrappers/yarn-nodemanager/bin/
           chgrp hadoop /run/wrappers/yarn-nodemanager/bin/container-executor
           chmod 6050 /run/wrappers/yarn-nodemanager/bin/container-executor
@@ -114,11 +171,26 @@ in
           SyslogIdentifier = "yarn-nodemanager";
           PermissionsStartOnly = true;
           ExecStart = "${cfg.package}/bin/yarn --config ${hadoopConf} " +
-                      " nodemanager";
+                      " nodemanager ${escapeShellArgs cfg.yarn.nodemanager.extraFlags}";
           Restart = "always";
         };
       };
 
+      services.hadoop.gatewayRole.enable = true;
+
+      services.hadoop.yarnSiteInternal = with cfg.yarn.nodemanager; {
+        "yarn.nodemanager.local-dirs" = localDir;
+        "yarn.scheduler.maximum-allocation-vcores" = resource.maximumAllocationVCores;
+        "yarn.scheduler.maximum-allocation-mb" = resource.maximumAllocationMB;
+        "yarn.nodemanager.resource.cpu-vcores" = resource.cpuVCores;
+        "yarn.nodemanager.resource.memory-mb" = resource.memoryMB;
+      } // mkIf useCGroups {
+        "yarn.nodemanager.linux-container-executor.cgroups.hierarchy" = "/hadoop-yarn";
+        "yarn.nodemanager.linux-container-executor.resources-handler.class" = "org.apache.hadoop.yarn.server.nodemanager.util.CgroupsLCEResourcesHandler";
+        "yarn.nodemanager.linux-container-executor.cgroups.mount" = "true";
+        "yarn.nodemanager.linux-container-executor.cgroups.mount-path" = "/run/wrappers/yarn-nodemanager/cgroup";
+      };
+
       networking.firewall.allowedTCPPortRanges = [
         (mkIf (cfg.yarn.nodemanager.openFirewall) {from = 1024; to = 65535;})
       ];
diff --git a/nixos/modules/services/matrix/matrix-synapse.xml b/nixos/modules/services/matrix/matrix-synapse.xml
index cdc4b4de1a7..cf33957d58e 100644
--- a/nixos/modules/services/matrix/matrix-synapse.xml
+++ b/nixos/modules/services/matrix/matrix-synapse.xml
@@ -119,7 +119,7 @@ in {
     <link linkend="opt-services.matrix-synapse.settings.listeners">listeners</link> = [
       {
         <link linkend="opt-services.matrix-synapse.settings.listeners._.port">port</link> = 8008;
-        <link linkend="opt-services.matrix-synapse.settings.listeners._.bind_addresses">bind_address</link> = [ "::1" ];
+        <link linkend="opt-services.matrix-synapse.settings.listeners._.bind_addresses">bind_addresses</link> = [ "::1" ];
         <link linkend="opt-services.matrix-synapse.settings.listeners._.type">type</link> = "http";
         <link linkend="opt-services.matrix-synapse.settings.listeners._.tls">tls</link> = false;
         <link linkend="opt-services.matrix-synapse.settings.listeners._.x_forwarded">x_forwarded</link> = true;
@@ -152,10 +152,10 @@ in {
 
   <para>
    If you want to run a server with public registration by anybody, you can
-   then enable <literal><link linkend="opt-services.matrix-synapse.settings.enable_registration">services.matrix-synapse.enable_registration</link> =
+   then enable <literal><link linkend="opt-services.matrix-synapse.settings.enable_registration">services.matrix-synapse.settings.enable_registration</link> =
    true;</literal>. Otherwise, or you can generate a registration secret with
    <command>pwgen -s 64 1</command> and set it with
-   <option><link linkend="opt-services.matrix-synapse.settings.registration_shared_secret">services.matrix-synapse.registration_shared_secret</link></option>.
+   <option><link linkend="opt-services.matrix-synapse.settings.registration_shared_secret">services.matrix-synapse.settings.registration_shared_secret</link></option>.
    To create a new user or admin, run the following after you have set the secret
    and have rebuilt NixOS:
 <screen>
diff --git a/nixos/modules/services/misc/paperless-ng.nix b/nixos/modules/services/misc/paperless-ng.nix
index 44efc234a2b..11e44f5ece5 100644
--- a/nixos/modules/services/misc/paperless-ng.nix
+++ b/nixos/modules/services/misc/paperless-ng.nix
@@ -214,6 +214,8 @@ in
         User = cfg.user;
         ExecStart = "${cfg.package}/bin/paperless-ng qcluster";
         Restart = "on-failure";
+        # The `mbind` syscall is needed for running the classifier.
+        SystemCallFilter = defaultServiceConfig.SystemCallFilter ++ [ "mbind" ];
       };
       environment = env;
       wantedBy = [ "multi-user.target" ];
diff --git a/nixos/modules/services/networking/nsd.nix b/nixos/modules/services/networking/nsd.nix
index cf6c9661dc1..a51fc534534 100644
--- a/nixos/modules/services/networking/nsd.nix
+++ b/nixos/modules/services/networking/nsd.nix
@@ -194,19 +194,8 @@ let
                        zone.children
       );
 
-  # fighting infinite recursion
-  zoneOptions = zoneOptionsRaw // childConfig zoneOptions1 true;
-  zoneOptions1 = zoneOptionsRaw // childConfig zoneOptions2 false;
-  zoneOptions2 = zoneOptionsRaw // childConfig zoneOptions3 false;
-  zoneOptions3 = zoneOptionsRaw // childConfig zoneOptions4 false;
-  zoneOptions4 = zoneOptionsRaw // childConfig zoneOptions5 false;
-  zoneOptions5 = zoneOptionsRaw // childConfig zoneOptions6 false;
-  zoneOptions6 = zoneOptionsRaw // childConfig null         false;
-
-  childConfig = x: v: { options.children = { type = types.attrsOf x; visible = v; }; };
-
   # options are ordered alphanumerically
-  zoneOptionsRaw = types.submodule {
+  zoneOptions = types.submodule {
     options = {
 
       allowAXFRFallback = mkOption {
@@ -246,6 +235,13 @@ let
       };
 
       children = mkOption {
+        # TODO: This relies on the fact that `types.anything` doesn't set any
+        # values of its own to any defaults, because in the above zoneConfigs',
+        # values from children override ones from parents, but only if the
+        # attributes are defined. Because of this, we can't replace the element
+        # type here with `zoneConfigs`, since that would set all the attributes
+        # to default values, breaking the parent inheriting function.
+        type = types.attrsOf types.anything;
         default = {};
         description = ''
           Children zones inherit all options of their parents. Attributes
diff --git a/nixos/modules/services/networking/pleroma.nix b/nixos/modules/services/networking/pleroma.nix
index 9b8382392c0..c6d4c14dcb7 100644
--- a/nixos/modules/services/networking/pleroma.nix
+++ b/nixos/modules/services/networking/pleroma.nix
@@ -1,6 +1,7 @@
 { config, options, lib, pkgs, stdenv, ... }:
 let
   cfg = config.services.pleroma;
+  cookieFile = "/var/lib/pleroma/.cookie";
 in {
   options = {
     services.pleroma = with lib; {
@@ -8,7 +9,7 @@ in {
 
       package = mkOption {
         type = types.package;
-        default = pkgs.pleroma;
+        default = pkgs.pleroma.override { inherit cookieFile; };
         defaultText = literalExpression "pkgs.pleroma";
         description = "Pleroma package to use.";
       };
@@ -100,7 +101,6 @@ in {
       after = [ "network-online.target" "postgresql.service" ];
       wantedBy = [ "multi-user.target" ];
       restartTriggers = [ config.environment.etc."/pleroma/config.exs".source ];
-      environment.RELEASE_COOKIE = "/var/lib/pleroma/.cookie";
       serviceConfig = {
         User = cfg.user;
         Group = cfg.group;
@@ -118,10 +118,10 @@ in {
         # Better be safe than sorry migration-wise.
         ExecStartPre =
           let preScript = pkgs.writers.writeBashBin "pleromaStartPre" ''
-            if [ ! -f /var/lib/pleroma/.cookie ]
+            if [ ! -f "${cookieFile}" ] || [ ! -s "${cookieFile}" ]
             then
               echo "Creating cookie file"
-              dd if=/dev/urandom bs=1 count=16 | hexdump -e '16/1 "%02x"' > /var/lib/pleroma/.cookie
+              dd if=/dev/urandom bs=1 count=16 | ${pkgs.hexdump}/bin/hexdump -e '16/1 "%02x"' > "${cookieFile}"
             fi
             ${cfg.package}/bin/pleroma_ctl migrate
           '';
diff --git a/nixos/modules/services/networking/unbound.nix b/nixos/modules/services/networking/unbound.nix
index f6e96349092..87873c8c1e8 100644
--- a/nixos/modules/services/networking/unbound.nix
+++ b/nixos/modules/services/networking/unbound.nix
@@ -62,6 +62,7 @@ in {
       };
 
       stateDir = mkOption {
+        type = types.path;
         default = "/var/lib/unbound";
         description = "Directory holding all state for unbound to run.";
       };
diff --git a/nixos/modules/services/networking/vsftpd.nix b/nixos/modules/services/networking/vsftpd.nix
index 710c2d9ca17..d205302051e 100644
--- a/nixos/modules/services/networking/vsftpd.nix
+++ b/nixos/modules/services/networking/vsftpd.nix
@@ -153,6 +153,7 @@ in
 
       userlist = mkOption {
         default = [];
+        type = types.listOf types.str;
         description = "See <option>userlistFile</option>.";
       };
 
diff --git a/nixos/modules/services/security/oauth2_proxy.nix b/nixos/modules/services/security/oauth2_proxy.nix
index 4d356242417..ce295bd4ba3 100644
--- a/nixos/modules/services/security/oauth2_proxy.nix
+++ b/nixos/modules/services/security/oauth2_proxy.nix
@@ -102,17 +102,19 @@ in
     # Taken from: https://github.com/oauth2-proxy/oauth2-proxy/blob/master/providers/providers.go
     provider = mkOption {
       type = types.enum [
-        "google"
+        "adfs"
         "azure"
+        "bitbucket"
+        "digitalocean"
         "facebook"
         "github"
-        "keycloak"
         "gitlab"
+        "google"
+        "keycloak"
+        "keycloak-oidc"
         "linkedin"
         "login.gov"
-        "bitbucket"
         "nextcloud"
-        "digitalocean"
         "oidc"
       ];
       default = "google";
diff --git a/nixos/modules/services/security/tor.nix b/nixos/modules/services/security/tor.nix
index ddd216ca7fd..a5822c02794 100644
--- a/nixos/modules/services/security/tor.nix
+++ b/nixos/modules/services/security/tor.nix
@@ -910,6 +910,11 @@ in
         ORPort = mkForce [];
         PublishServerDescriptor = mkForce false;
       })
+      (mkIf (!cfg.client.enable) {
+        # Make sure application connections via SOCKS are disabled
+        # when services.tor.client.enable is false
+        SOCKSPort = mkForce [ 0 ];
+      })
       (mkIf cfg.client.enable (
         { SOCKSPort = [ cfg.client.socksListenAddress ];
         } // optionalAttrs cfg.client.transparentProxy.enable {
diff --git a/nixos/modules/services/web-apps/nextcloud.nix b/nixos/modules/services/web-apps/nextcloud.nix
index 141ab98e29b..b32220a5e57 100644
--- a/nixos/modules/services/web-apps/nextcloud.nix
+++ b/nixos/modules/services/web-apps/nextcloud.nix
@@ -153,7 +153,7 @@ in {
     package = mkOption {
       type = types.package;
       description = "Which package to use for the Nextcloud instance.";
-      relatedPackages = [ "nextcloud21" "nextcloud22" "nextcloud23" ];
+      relatedPackages = [ "nextcloud22" "nextcloud23" ];
     };
     phpPackage = mkOption {
       type = types.package;
@@ -571,15 +571,6 @@ in {
               nextcloud defined in an overlay, please set `services.nextcloud.package` to
               `pkgs.nextcloud`.
             ''
-          # 21.03 will not be an official release - it was instead 21.05.
-          # This versionOlder statement remains set to 21.03 for backwards compatibility.
-          # See https://github.com/NixOS/nixpkgs/pull/108899 and
-          # https://github.com/NixOS/rfcs/blob/master/rfcs/0080-nixos-release-schedule.md.
-          # FIXME(@Ma27) remove this else-if as soon as 21.05 is EOL! This is only here
-          # to ensure that users who are on Nextcloud 19 with a stateVersion <21.05 with
-          # no explicit services.nextcloud.package don't upgrade to v21 by accident (
-          # nextcloud20 throws an eval-error because it's dropped).
-          else if versionOlder stateVersion "21.03" then nextcloud20
           else if versionOlder stateVersion "21.11" then nextcloud21
           else if versionOlder stateVersion "22.05" then nextcloud22
           else nextcloud23
diff --git a/nixos/modules/services/x11/display-managers/default.nix b/nixos/modules/services/x11/display-managers/default.nix
index 92b3af8527f..a5db3dd5dd4 100644
--- a/nixos/modules/services/x11/display-managers/default.nix
+++ b/nixos/modules/services/x11/display-managers/default.nix
@@ -219,6 +219,7 @@ in
 
       session = mkOption {
         default = [];
+        type = types.listOf types.attrs;
         example = literalExpression
           ''
             [ { manage = "desktop";
diff --git a/nixos/modules/system/boot/kernel.nix b/nixos/modules/system/boot/kernel.nix
index d147155d796..db00244ca0a 100644
--- a/nixos/modules/system/boot/kernel.nix
+++ b/nixos/modules/system/boot/kernel.nix
@@ -36,7 +36,7 @@ in
 
     boot.kernelPackages = mkOption {
       default = pkgs.linuxPackages;
-      type = types.unspecified // { merge = mergeEqualOption; };
+      type = types.raw;
       apply = kernelPackages: kernelPackages.extend (self: super: {
         kernel = super.kernel.override (originalArgs: {
           inherit randstructSeed;
diff --git a/nixos/modules/system/boot/modprobe.nix b/nixos/modules/system/boot/modprobe.nix
index 27f78835adb..e683d181729 100644
--- a/nixos/modules/system/boot/modprobe.nix
+++ b/nixos/modules/system/boot/modprobe.nix
@@ -34,23 +34,6 @@ with lib;
       type = types.lines;
     };
 
-    boot.initrd.extraModprobeConfig = mkOption {
-      default = "";
-      example =
-        ''
-          options zfs zfs_arc_max=1073741824
-        '';
-      description = ''
-        Does exactly the same thing as
-        <option>boot.extraModprobeConfig</option>, except
-        that the generated <filename>modprobe.conf</filename>
-        file is also included in the initrd.
-        This is useful for setting module options for kernel
-        modules that are loaded during early boot in the initrd.
-      '';
-      type = types.lines;
-    };
-
   };
 
 
@@ -67,9 +50,6 @@ with lib;
         '')}
         ${config.boot.extraModprobeConfig}
       '';
-    environment.etc."modprobe.d/nixos-initrd.conf".text = ''
-        ${config.boot.initrd.extraModprobeConfig}
-      '';
     environment.etc."modprobe.d/debian.conf".source = pkgs.kmod-debian-aliases;
 
     environment.etc."modprobe.d/systemd.conf".source = "${pkgs.systemd}/lib/modprobe.d/systemd.conf";
diff --git a/nixos/modules/system/boot/stage-1.nix b/nixos/modules/system/boot/stage-1.nix
index 1575c0257d1..8b011d91563 100644
--- a/nixos/modules/system/boot/stage-1.nix
+++ b/nixos/modules/system/boot/stage-1.nix
@@ -338,9 +338,6 @@ let
         { object = pkgs.writeText "mdadm.conf" config.boot.initrd.mdadmConf;
           symlink = "/etc/mdadm.conf";
         }
-        { object = config.environment.etc."modprobe.d/nixos-initrd.conf".source;
-          symlink = "/etc/modprobe.d/nixos-initrd.conf";
-        }
         { object = pkgs.runCommand "initrd-kmod-blacklist-ubuntu" {
               src = "${pkgs.kmod-blacklist-ubuntu}/modprobe.conf";
               preferLocalBuild = true;
@@ -581,7 +578,7 @@ in
         else "gzip"
       );
       defaultText = literalDocBook "<literal>zstd</literal> if the kernel supports it (5.9+), <literal>gzip</literal> if not";
-      type = types.unspecified; # We don't have a function type...
+      type = types.either types.str (types.functionTo types.str);
       description = ''
         The compressor to use on the initrd image. May be any of:
 
diff --git a/nixos/modules/system/boot/systemd.nix b/nixos/modules/system/boot/systemd.nix
index 4019af63ad3..ff002d87a12 100644
--- a/nixos/modules/system/boot/systemd.nix
+++ b/nixos/modules/system/boot/systemd.nix
@@ -11,6 +11,22 @@ let
 
   systemd = cfg.package;
 
+  inherit (systemdUtils.lib)
+    makeJobScript
+    unitConfig
+    serviceConfig
+    mountConfig
+    automountConfig
+    commonUnitText
+    targetToUnit
+    serviceToUnit
+    socketToUnit
+    timerToUnit
+    pathToUnit
+    mountToUnit
+    automountToUnit
+    sliceToUnit;
+
   upstreamSystemUnits =
     [ # Targets.
       "basic.target"
@@ -209,207 +225,6 @@ let
       "xdg-desktop-autostart.target"
     ];
 
-  makeJobScript = name: text:
-    let
-      scriptName = replaceChars [ "\\" "@" ] [ "-" "_" ] (shellEscape name);
-      out = (pkgs.writeShellScriptBin scriptName ''
-        set -e
-        ${text}
-      '').overrideAttrs (_: {
-        # The derivation name is different from the script file name
-        # to keep the script file name short to avoid cluttering logs.
-        name = "unit-script-${scriptName}";
-      });
-    in "${out}/bin/${scriptName}";
-
-  unitConfig = { config, options, ... }: {
-    config = {
-      unitConfig =
-        optionalAttrs (config.requires != [])
-          { Requires = toString config.requires; }
-        // optionalAttrs (config.wants != [])
-          { Wants = toString config.wants; }
-        // optionalAttrs (config.after != [])
-          { After = toString config.after; }
-        // optionalAttrs (config.before != [])
-          { Before = toString config.before; }
-        // optionalAttrs (config.bindsTo != [])
-          { BindsTo = toString config.bindsTo; }
-        // optionalAttrs (config.partOf != [])
-          { PartOf = toString config.partOf; }
-        // optionalAttrs (config.conflicts != [])
-          { Conflicts = toString config.conflicts; }
-        // optionalAttrs (config.requisite != [])
-          { Requisite = toString config.requisite; }
-        // optionalAttrs (config.restartTriggers != [])
-          { X-Restart-Triggers = toString config.restartTriggers; }
-        // optionalAttrs (config.reloadTriggers != [])
-          { X-Reload-Triggers = toString config.reloadTriggers; }
-        // optionalAttrs (config.description != "") {
-          Description = config.description; }
-        // optionalAttrs (config.documentation != []) {
-          Documentation = toString config.documentation; }
-        // optionalAttrs (config.onFailure != []) {
-          OnFailure = toString config.onFailure; }
-        // optionalAttrs (options.startLimitIntervalSec.isDefined) {
-          StartLimitIntervalSec = toString config.startLimitIntervalSec;
-        } // optionalAttrs (options.startLimitBurst.isDefined) {
-          StartLimitBurst = toString config.startLimitBurst;
-        };
-    };
-  };
-
-  serviceConfig = { name, config, ... }: {
-    config = mkMerge
-      [ { # Default path for systemd services.  Should be quite minimal.
-          path = mkAfter
-            [ pkgs.coreutils
-              pkgs.findutils
-              pkgs.gnugrep
-              pkgs.gnused
-              systemd
-            ];
-          environment.PATH = "${makeBinPath config.path}:${makeSearchPathOutput "bin" "sbin" config.path}";
-        }
-        (mkIf (config.preStart != "")
-          { serviceConfig.ExecStartPre =
-              [ (makeJobScript "${name}-pre-start" config.preStart) ];
-          })
-        (mkIf (config.script != "")
-          { serviceConfig.ExecStart =
-              makeJobScript "${name}-start" config.script + " " + config.scriptArgs;
-          })
-        (mkIf (config.postStart != "")
-          { serviceConfig.ExecStartPost =
-              [ (makeJobScript "${name}-post-start" config.postStart) ];
-          })
-        (mkIf (config.reload != "")
-          { serviceConfig.ExecReload =
-              makeJobScript "${name}-reload" config.reload;
-          })
-        (mkIf (config.preStop != "")
-          { serviceConfig.ExecStop =
-              makeJobScript "${name}-pre-stop" config.preStop;
-          })
-        (mkIf (config.postStop != "")
-          { serviceConfig.ExecStopPost =
-              makeJobScript "${name}-post-stop" config.postStop;
-          })
-      ];
-  };
-
-  mountConfig = { config, ... }: {
-    config = {
-      mountConfig =
-        { What = config.what;
-          Where = config.where;
-        } // optionalAttrs (config.type != "") {
-          Type = config.type;
-        } // optionalAttrs (config.options != "") {
-          Options = config.options;
-        };
-    };
-  };
-
-  automountConfig = { config, ... }: {
-    config = {
-      automountConfig =
-        { Where = config.where;
-        };
-    };
-  };
-
-  commonUnitText = def: ''
-      [Unit]
-      ${attrsToSection def.unitConfig}
-    '';
-
-  targetToUnit = name: def:
-    { inherit (def) aliases wantedBy requiredBy enable;
-      text =
-        ''
-          [Unit]
-          ${attrsToSection def.unitConfig}
-        '';
-    };
-
-  serviceToUnit = name: def:
-    { inherit (def) aliases wantedBy requiredBy enable;
-      text = commonUnitText def +
-        ''
-          [Service]
-          ${let env = cfg.globalEnvironment // def.environment;
-            in concatMapStrings (n:
-              let s = optionalString (env.${n} != null)
-                "Environment=${builtins.toJSON "${n}=${env.${n}}"}\n";
-              # systemd max line length is now 1MiB
-              # https://github.com/systemd/systemd/commit/e6dde451a51dc5aaa7f4d98d39b8fe735f73d2af
-              in if stringLength s >= 1048576 then throw "The value of the environment variable ‘${n}’ in systemd service ‘${name}.service’ is too long." else s) (attrNames env)}
-          ${if def.reloadIfChanged then ''
-            X-ReloadIfChanged=true
-          '' else if !def.restartIfChanged then ''
-            X-RestartIfChanged=false
-          '' else ""}
-          ${optionalString (!def.stopIfChanged) "X-StopIfChanged=false"}
-          ${attrsToSection def.serviceConfig}
-        '';
-    };
-
-  socketToUnit = name: def:
-    { inherit (def) aliases wantedBy requiredBy enable;
-      text = commonUnitText def +
-        ''
-          [Socket]
-          ${attrsToSection def.socketConfig}
-          ${concatStringsSep "\n" (map (s: "ListenStream=${s}") def.listenStreams)}
-          ${concatStringsSep "\n" (map (s: "ListenDatagram=${s}") def.listenDatagrams)}
-        '';
-    };
-
-  timerToUnit = name: def:
-    { inherit (def) aliases wantedBy requiredBy enable;
-      text = commonUnitText def +
-        ''
-          [Timer]
-          ${attrsToSection def.timerConfig}
-        '';
-    };
-
-  pathToUnit = name: def:
-    { inherit (def) aliases wantedBy requiredBy enable;
-      text = commonUnitText def +
-        ''
-          [Path]
-          ${attrsToSection def.pathConfig}
-        '';
-    };
-
-  mountToUnit = name: def:
-    { inherit (def) aliases wantedBy requiredBy enable;
-      text = commonUnitText def +
-        ''
-          [Mount]
-          ${attrsToSection def.mountConfig}
-        '';
-    };
-
-  automountToUnit = name: def:
-    { inherit (def) aliases wantedBy requiredBy enable;
-      text = commonUnitText def +
-        ''
-          [Automount]
-          ${attrsToSection def.automountConfig}
-        '';
-    };
-
-  sliceToUnit = name: def:
-    { inherit (def) aliases wantedBy requiredBy enable;
-      text = commonUnitText def +
-        ''
-          [Slice]
-          ${attrsToSection def.sliceConfig}
-        '';
-    };
 
   logindHandlerType = types.enum [
     "ignore" "poweroff" "reboot" "halt" "kexec" "suspend"
diff --git a/nixos/modules/tasks/auto-upgrade.nix b/nixos/modules/tasks/auto-upgrade.nix
index b931b27ad81..1404dcbaf7c 100644
--- a/nixos/modules/tasks/auto-upgrade.nix
+++ b/nixos/modules/tasks/auto-upgrade.nix
@@ -80,6 +80,7 @@ in {
           Reboot the system into the new generation instead of a switch
           if the new generation uses a different kernel, kernel modules
           or initrd than the booted system.
+          See <option>rebootWindow</option> for configuring the times at which a reboot is allowed.
         '';
       };
 
@@ -96,6 +97,32 @@ in {
         '';
       };
 
+      rebootWindow = mkOption {
+        description = ''
+          Define a lower and upper time value (in HH:MM format) which
+          constitute a time window during which reboots are allowed after an upgrade.
+          This option only has an effect when <option>allowReboot</option> is enabled.
+          The default value of <literal>null</literal> means that reboots are allowed at any time.
+        '';
+        default = null;
+        example = { lower = "01:00"; upper = "05:00"; };
+        type = with types; nullOr (submodule {
+          options = {
+            lower = mkOption {
+              description = "Lower limit of the reboot window";
+              type = types.strMatching "[[:digit:]]{2}:[[:digit:]]{2}";
+              example = "01:00";
+            };
+
+            upper = mkOption {
+              description = "Upper limit of the reboot window";
+              type = types.strMatching "[[:digit:]]{2}:[[:digit:]]{2}";
+              example = "05:00";
+            };
+          };
+        });
+      };
+
     };
 
   };
@@ -110,12 +137,10 @@ in {
     }];
 
     system.autoUpgrade.flags = (if cfg.flake == null then
-        [ "--no-build-output" ] ++ (if cfg.channel == null then
-          [ "--upgrade" ]
-        else [
+        [ "--no-build-output" ] ++ optionals (cfg.channel != null) [
           "-I"
           "nixpkgs=${cfg.channel}/nixexprs.tar.xz"
-        ])
+        ]
       else
         [ "--flake ${cfg.flake}" ]);
 
@@ -143,19 +168,52 @@ in {
       ];
 
       script = let
-        nixos-rebuild =
-          "${config.system.build.nixos-rebuild}/bin/nixos-rebuild";
+        nixos-rebuild = "${config.system.build.nixos-rebuild}/bin/nixos-rebuild";
+        date     = "${pkgs.coreutils}/bin/date";
+        readlink = "${pkgs.coreutils}/bin/readlink";
+        shutdown = "${pkgs.systemd}/bin/shutdown";
+        upgradeFlag = optional (cfg.channel == null) "--upgrade";
       in if cfg.allowReboot then ''
-        ${nixos-rebuild} boot ${toString cfg.flags}
-        booted="$(readlink /run/booted-system/{initrd,kernel,kernel-modules})"
-        built="$(readlink /nix/var/nix/profiles/system/{initrd,kernel,kernel-modules})"
-        if [ "$booted" = "$built" ]; then
+        ${nixos-rebuild} boot ${toString (cfg.flags ++ upgradeFlag)}
+        booted="$(${readlink} /run/booted-system/{initrd,kernel,kernel-modules})"
+        built="$(${readlink} /nix/var/nix/profiles/system/{initrd,kernel,kernel-modules})"
+
+        ${optionalString (cfg.rebootWindow != null) ''
+          current_time="$(${date} +%H:%M)"
+
+          lower="${cfg.rebootWindow.lower}"
+          upper="${cfg.rebootWindow.upper}"
+
+          if [[ "''${lower}" < "''${upper}" ]]; then
+            if [[ "''${current_time}" > "''${lower}" ]] && \
+               [[ "''${current_time}" < "''${upper}" ]]; then
+              do_reboot="true"
+            else
+              do_reboot="false"
+            fi
+          else
+            # lower > upper, so we are crossing midnight (e.g. lower=23h, upper=6h)
+            # we want to reboot if cur > 23h or cur < 6h
+            if [[ "''${current_time}" < "''${upper}" ]] || \
+               [[ "''${current_time}" > "''${lower}" ]]; then
+              do_reboot="true"
+            else
+              do_reboot="false"
+            fi
+          fi
+        ''}
+
+        if [ "''${booted}" = "''${built}" ]; then
           ${nixos-rebuild} switch ${toString cfg.flags}
+        ${optionalString (cfg.rebootWindow != null) ''
+          elif [ "''${do_reboot}" != true ]; then
+            echo "Outside of configured reboot window, skipping."
+        ''}
         else
-          /run/current-system/sw/bin/shutdown -r +1
+          ${shutdown} -r +1
         fi
       '' else ''
-        ${nixos-rebuild} switch ${toString cfg.flags}
+        ${nixos-rebuild} switch ${toString (cfg.flags ++ upgradeFlag)}
       '';
 
       startAt = cfg.dates;
@@ -167,3 +225,4 @@ in {
   };
 
 }
+
diff --git a/nixos/modules/tasks/network-interfaces.nix b/nixos/modules/tasks/network-interfaces.nix
index 06117ab451d..01980b80f1c 100644
--- a/nixos/modules/tasks/network-interfaces.nix
+++ b/nixos/modules/tasks/network-interfaces.nix
@@ -1021,6 +1021,12 @@ in
             dev = "enp4s0f0";
             type = "tap";
           };
+          gre6Tunnel = {
+            remote = "fd7a:5634::1";
+            local = "fd7a:5634::2";
+            dev = "enp4s0f0";
+            type = "tun6";
+          };
         }
       '';
       description = ''
@@ -1058,10 +1064,15 @@ in
           };
 
           type = mkOption {
-            type = with types; enum [ "tun" "tap" ];
+            type = with types; enum [ "tun" "tap" "tun6" "tap6" ];
             default = "tap";
             example = "tap";
-            apply = v: if v == "tun" then "gre" else "gretap";
+            apply = v: {
+              tun = "gre";
+              tap = "gretap";
+              tun6 = "ip6gre";
+              tap6 = "ip6gretap";
+            }.${v};
             description = ''
               Whether the tunnel routes layer 2 (tap) or layer 3 (tun) traffic.
             '';
diff --git a/nixos/tests/all-tests.nix b/nixos/tests/all-tests.nix
index 6f4c6213595..474bb423379 100644
--- a/nixos/tests/all-tests.nix
+++ b/nixos/tests/all-tests.nix
@@ -189,9 +189,9 @@ in
   grocy = handleTest ./grocy.nix {};
   grub = handleTest ./grub.nix {};
   gvisor = handleTest ./gvisor.nix {};
-  hadoop.all = handleTestOn [ "x86_64-linux" "aarch64-linux" ] ./hadoop/hadoop.nix {};
-  hadoop.hdfs = handleTestOn [ "x86_64-linux" "aarch64-linux" ] ./hadoop/hdfs.nix {};
-  hadoop.yarn = handleTestOn [ "x86_64-linux" "aarch64-linux" ] ./hadoop/yarn.nix {};
+  hadoop = import ./hadoop { inherit handleTestOn; package=pkgs.hadoop; };
+  hadoop_3_2 = import ./hadoop { inherit handleTestOn; package=pkgs.hadoop_3_2; };
+  hadoop2 = import ./hadoop { inherit handleTestOn; package=pkgs.hadoop2; };
   haka = handleTest ./haka.nix {};
   haproxy = handleTest ./haproxy.nix {};
   hardened = handleTest ./hardened.nix {};
@@ -286,6 +286,7 @@ in
   mailhog = handleTest ./mailhog.nix {};
   man = handleTest ./man.nix {};
   mariadb-galera = handleTest ./mysql/mariadb-galera.nix {};
+  mastodon = handleTestOn ["x86_64-linux" "i686-linux" "aarch64-linux"] ./web-apps/mastodon.nix {};
   matomo = handleTest ./matomo.nix {};
   matrix-appservice-irc = handleTest ./matrix-appservice-irc.nix {};
   matrix-conduit = handleTest ./matrix-conduit.nix {};
@@ -521,6 +522,7 @@ in
   telegraf = handleTest ./telegraf.nix {};
   teleport = handleTest ./teleport.nix {};
   thelounge = handleTest ./thelounge.nix {};
+  terminal-emulators = handleTest ./terminal-emulators.nix {};
   tiddlywiki = handleTest ./tiddlywiki.nix {};
   tigervnc = handleTest ./tigervnc.nix {};
   timezone = handleTest ./timezone.nix {};
diff --git a/nixos/tests/hadoop/default.nix b/nixos/tests/hadoop/default.nix
new file mode 100644
index 00000000000..d2a97cbeffb
--- /dev/null
+++ b/nixos/tests/hadoop/default.nix
@@ -0,0 +1,7 @@
+{ handleTestOn, package, ... }:
+
+{
+  all = handleTestOn [ "x86_64-linux" "aarch64-linux" ] ./hadoop.nix { inherit package; };
+  hdfs = handleTestOn [ "x86_64-linux" "aarch64-linux" ] ./hdfs.nix { inherit package; };
+  yarn = handleTestOn [ "x86_64-linux" "aarch64-linux" ] ./yarn.nix { inherit package; };
+}
diff --git a/nixos/tests/hadoop/hadoop.nix b/nixos/tests/hadoop/hadoop.nix
index 48737debab5..b132f4fa58b 100644
--- a/nixos/tests/hadoop/hadoop.nix
+++ b/nixos/tests/hadoop/hadoop.nix
@@ -1,121 +1,148 @@
 # This test is very comprehensive. It tests whether all hadoop services work well with each other.
 # Run this when updating the Hadoop package or making significant changes to the hadoop module.
 # For a more basic test, see hdfs.nix and yarn.nix
-import ../make-test-python.nix ({pkgs, ...}: {
-
-  nodes = let
-    package = pkgs.hadoop;
-    coreSite = {
-      "fs.defaultFS" = "hdfs://ns1";
-    };
-    hdfsSite = {
-      "dfs.namenode.rpc-bind-host" = "0.0.0.0";
-      "dfs.namenode.http-bind-host" = "0.0.0.0";
-      "dfs.namenode.servicerpc-bind-host" = "0.0.0.0";
-
-      # HA Quorum Journal Manager configuration
-      "dfs.nameservices" = "ns1";
-      "dfs.ha.namenodes.ns1" = "nn1,nn2";
-      "dfs.namenode.shared.edits.dir.ns1.nn1" = "qjournal://jn1:8485;jn2:8485;jn3:8485/ns1";
-      "dfs.namenode.shared.edits.dir.ns1.nn2" = "qjournal://jn1:8485;jn2:8485;jn3:8485/ns1";
-      "dfs.namenode.rpc-address.ns1.nn1" = "nn1:8020";
-      "dfs.namenode.rpc-address.ns1.nn2" = "nn2:8020";
-      "dfs.namenode.servicerpc-address.ns1.nn1" = "nn1:8022";
-      "dfs.namenode.servicerpc-address.ns1.nn2" = "nn2:8022";
-      "dfs.namenode.http-address.ns1.nn1" = "nn1:9870";
-      "dfs.namenode.http-address.ns1.nn2" = "nn2:9870";
-
-      # Automatic failover configuration
-      "dfs.client.failover.proxy.provider.ns1" = "org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider";
-      "dfs.ha.automatic-failover.enabled.ns1" = "true";
-      "dfs.ha.fencing.methods" = "shell(true)";
-      "ha.zookeeper.quorum" = "zk1:2181";
-    };
-    yarnSiteHA = {
-      "yarn.resourcemanager.zk-address" = "zk1:2181";
-      "yarn.resourcemanager.ha.enabled" = "true";
-      "yarn.resourcemanager.ha.rm-ids" = "rm1,rm2";
-      "yarn.resourcemanager.hostname.rm1" = "rm1";
-      "yarn.resourcemanager.hostname.rm2" = "rm2";
-      "yarn.resourcemanager.ha.automatic-failover.enabled" = "true";
-      "yarn.resourcemanager.cluster-id" = "cluster1";
-      # yarn.resourcemanager.webapp.address needs to be defined even though yarn.resourcemanager.hostname is set. This shouldn't be necessary, but there's a bug in
-      # hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/amfilter/AmFilterInitializer.java:70
-      # that causes AM containers to fail otherwise.
-      "yarn.resourcemanager.webapp.address.rm1" = "rm1:8088";
-      "yarn.resourcemanager.webapp.address.rm2" = "rm2:8088";
-    };
-  in {
-    zk1 = { ... }: {
-      services.zookeeper.enable = true;
-      networking.firewall.allowedTCPPorts = [ 2181 ];
-    };
-
-    # HDFS cluster
-    nn1 = {pkgs, options, ...}: {
-      services.hadoop = {
-        inherit package coreSite hdfsSite;
-        hdfs.namenode.enable = true;
-        hdfs.zkfc.enable = true;
+import ../make-test-python.nix ({ package, ... }: {
+  name = "hadoop-combined";
+
+  nodes =
+    let
+      coreSite = {
+        "fs.defaultFS" = "hdfs://ns1";
+      };
+      hdfsSite = {
+        # HA Quorum Journal Manager configuration
+        "dfs.nameservices" = "ns1";
+        "dfs.ha.namenodes.ns1" = "nn1,nn2";
+        "dfs.namenode.shared.edits.dir.ns1" = "qjournal://jn1:8485;jn2:8485;jn3:8485/ns1";
+        "dfs.namenode.rpc-address.ns1.nn1" = "nn1:8020";
+        "dfs.namenode.rpc-address.ns1.nn2" = "nn2:8020";
+        "dfs.namenode.servicerpc-address.ns1.nn1" = "nn1:8022";
+        "dfs.namenode.servicerpc-address.ns1.nn2" = "nn2:8022";
+        "dfs.namenode.http-address.ns1.nn1" = "nn1:9870";
+        "dfs.namenode.http-address.ns1.nn2" = "nn2:9870";
+
+        # Automatic failover configuration
+        "dfs.client.failover.proxy.provider.ns1" = "org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider";
+        "dfs.ha.automatic-failover.enabled.ns1" = "true";
+        "dfs.ha.fencing.methods" = "shell(true)";
+        "ha.zookeeper.quorum" = "zk1:2181";
+      };
+      yarnSite = {
+        "yarn.resourcemanager.zk-address" = "zk1:2181";
+        "yarn.resourcemanager.ha.enabled" = "true";
+        "yarn.resourcemanager.ha.rm-ids" = "rm1,rm2";
+        "yarn.resourcemanager.hostname.rm1" = "rm1";
+        "yarn.resourcemanager.hostname.rm2" = "rm2";
+        "yarn.resourcemanager.ha.automatic-failover.enabled" = "true";
+        "yarn.resourcemanager.cluster-id" = "cluster1";
+        # yarn.resourcemanager.webapp.address needs to be defined even though yarn.resourcemanager.hostname is set. This shouldn't be necessary, but there's a bug in
+        # hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/amfilter/AmFilterInitializer.java:70
+        # that causes AM containers to fail otherwise.
+        "yarn.resourcemanager.webapp.address.rm1" = "rm1:8088";
+        "yarn.resourcemanager.webapp.address.rm2" = "rm2:8088";
+      };
+    in
+    {
+      zk1 = { ... }: {
+        services.zookeeper.enable = true;
+        networking.firewall.allowedTCPPorts = [ 2181 ];
       };
-    };
-    nn2 = {pkgs, options, ...}: {
-      services.hadoop = {
-        inherit package coreSite hdfsSite;
-        hdfs.namenode.enable = true;
-        hdfs.zkfc.enable = true;
+
+      # HDFS cluster
+      nn1 = { ... }: {
+        services.hadoop = {
+          inherit package coreSite hdfsSite;
+          hdfs.namenode = {
+            enable = true;
+            openFirewall = true;
+          };
+          hdfs.zkfc.enable = true;
+        };
+      };
+      nn2 = { ... }: {
+        services.hadoop = {
+          inherit package coreSite hdfsSite;
+          hdfs.namenode = {
+            enable = true;
+            openFirewall = true;
+          };
+          hdfs.zkfc.enable = true;
+        };
       };
-    };
 
-    jn1 = {pkgs, options, ...}: {
-      services.hadoop = {
-        inherit package coreSite hdfsSite;
-        hdfs.journalnode.enable = true;
+      jn1 = { ... }: {
+        services.hadoop = {
+          inherit package coreSite hdfsSite;
+          hdfs.journalnode = {
+            enable = true;
+            openFirewall = true;
+          };
+        };
       };
-    };
-    jn2 = {pkgs, options, ...}: {
-      services.hadoop = {
-        inherit package coreSite hdfsSite;
-        hdfs.journalnode.enable = true;
+      jn2 = { ... }: {
+        services.hadoop = {
+          inherit package coreSite hdfsSite;
+          hdfs.journalnode = {
+            enable = true;
+            openFirewall = true;
+          };
+        };
       };
-    };
-    jn3 = {pkgs, options, ...}: {
-      services.hadoop = {
-        inherit package coreSite hdfsSite;
-        hdfs.journalnode.enable = true;
+      jn3 = { ... }: {
+        services.hadoop = {
+          inherit package coreSite hdfsSite;
+          hdfs.journalnode = {
+            enable = true;
+            openFirewall = true;
+          };
+        };
+      };
+
+      dn1 = { ... }: {
+        services.hadoop = {
+          inherit package coreSite hdfsSite;
+          hdfs.datanode = {
+            enable = true;
+            openFirewall = true;
+          };
+        };
       };
-    };
 
-    dn1 = {pkgs, options, ...}: {
-      services.hadoop = {
-        inherit package coreSite hdfsSite;
-        hdfs.datanode.enable = true;
+      # YARN cluster
+      rm1 = { options, ... }: {
+        services.hadoop = {
+          inherit package coreSite hdfsSite yarnSite;
+          yarn.resourcemanager = {
+            enable = true;
+            openFirewall = true;
+          };
+        };
       };
-    };
-
-    # YARN cluster
-    rm1 = {pkgs, options, ...}: {
-      services.hadoop = {
-        inherit package coreSite hdfsSite;
-        yarnSite = options.services.hadoop.yarnSite.default // yarnSiteHA;
-        yarn.resourcemanager.enable = true;
+      rm2 = { options, ... }: {
+        services.hadoop = {
+          inherit package coreSite hdfsSite yarnSite;
+          yarn.resourcemanager = {
+            enable = true;
+            openFirewall = true;
+          };
+        };
       };
-    };
-    rm2 = {pkgs, options, ...}: {
-      services.hadoop = {
-        inherit package coreSite hdfsSite;
-        yarnSite = options.services.hadoop.yarnSite.default // yarnSiteHA;
-        yarn.resourcemanager.enable = true;
+      nm1 = { options, ... }: {
+        virtualisation.memorySize = 2048;
+        services.hadoop = {
+          inherit package coreSite hdfsSite yarnSite;
+          yarn.nodemanager = {
+            enable = true;
+            openFirewall = true;
+          };
+        };
       };
-    };
-    nm1 = {pkgs, options, ...}: {
-      virtualisation.memorySize = 2048;
-      services.hadoop = {
-        inherit package coreSite hdfsSite;
-        yarnSite = options.services.hadoop.yarnSite.default // yarnSiteHA;
-        yarn.nodemanager.enable = true;
+      client = { options, ... }: {
+        services.hadoop = {
+          gatewayRole.enable = true;
+          inherit package coreSite hdfsSite yarnSite;
+        };
       };
-    };
   };
 
   testScript = ''
@@ -173,26 +200,26 @@ import ../make-test-python.nix ({pkgs, ...}: {
     # DN should have started by now, but confirm anyway
     dn1.wait_for_unit("hdfs-datanode")
     # Print states of namenodes
-    dn1.succeed("sudo -u hdfs hdfs haadmin -getAllServiceState | systemd-cat")
+    client.succeed("sudo -u hdfs hdfs haadmin -getAllServiceState | systemd-cat")
     # Wait for cluster to exit safemode
-    dn1.succeed("sudo -u hdfs hdfs dfsadmin -safemode wait")
-    dn1.succeed("sudo -u hdfs hdfs haadmin -getAllServiceState | systemd-cat")
+    client.succeed("sudo -u hdfs hdfs dfsadmin -safemode wait")
+    client.succeed("sudo -u hdfs hdfs haadmin -getAllServiceState | systemd-cat")
     # test R/W
-    dn1.succeed("echo testfilecontents | sudo -u hdfs hdfs dfs -put - /testfile")
-    assert "testfilecontents" in dn1.succeed("sudo -u hdfs hdfs dfs -cat /testfile")
+    client.succeed("echo testfilecontents | sudo -u hdfs hdfs dfs -put - /testfile")
+    assert "testfilecontents" in client.succeed("sudo -u hdfs hdfs dfs -cat /testfile")
 
     # Test NN failover
     nn1.succeed("systemctl stop hdfs-namenode")
-    assert "active" in dn1.succeed("sudo -u hdfs hdfs haadmin -getAllServiceState")
-    dn1.succeed("sudo -u hdfs hdfs haadmin -getAllServiceState | systemd-cat")
-    assert "testfilecontents" in dn1.succeed("sudo -u hdfs hdfs dfs -cat /testfile")
+    assert "active" in client.succeed("sudo -u hdfs hdfs haadmin -getAllServiceState")
+    client.succeed("sudo -u hdfs hdfs haadmin -getAllServiceState | systemd-cat")
+    assert "testfilecontents" in client.succeed("sudo -u hdfs hdfs dfs -cat /testfile")
 
     nn1.succeed("systemctl start hdfs-namenode")
     nn1.wait_for_open_port(9870)
     nn1.wait_for_open_port(8022)
     nn1.wait_for_open_port(8020)
-    assert "standby" in dn1.succeed("sudo -u hdfs hdfs haadmin -getAllServiceState")
-    dn1.succeed("sudo -u hdfs hdfs haadmin -getAllServiceState | systemd-cat")
+    assert "standby" in client.succeed("sudo -u hdfs hdfs haadmin -getAllServiceState")
+    client.succeed("sudo -u hdfs hdfs haadmin -getAllServiceState | systemd-cat")
 
     #### YARN tests ####
 
@@ -208,21 +235,21 @@ import ../make-test-python.nix ({pkgs, ...}: {
     nm1.wait_for_unit("yarn-nodemanager")
     nm1.wait_for_open_port(8042)
     nm1.wait_for_open_port(8040)
-    nm1.wait_until_succeeds("yarn node -list | grep Nodes:1")
-    nm1.succeed("sudo -u yarn yarn rmadmin -getAllServiceState | systemd-cat")
-    nm1.succeed("sudo -u yarn yarn node -list | systemd-cat")
+    client.wait_until_succeeds("yarn node -list | grep Nodes:1")
+    client.succeed("sudo -u yarn yarn rmadmin -getAllServiceState | systemd-cat")
+    client.succeed("sudo -u yarn yarn node -list | systemd-cat")
 
     # Test RM failover
     rm1.succeed("systemctl stop yarn-resourcemanager")
-    assert "standby" not in nm1.succeed("sudo -u yarn yarn rmadmin -getAllServiceState")
-    nm1.succeed("sudo -u yarn yarn rmadmin -getAllServiceState | systemd-cat")
+    assert "standby" not in client.succeed("sudo -u yarn yarn rmadmin -getAllServiceState")
+    client.succeed("sudo -u yarn yarn rmadmin -getAllServiceState | systemd-cat")
     rm1.succeed("systemctl start yarn-resourcemanager")
     rm1.wait_for_unit("yarn-resourcemanager")
     rm1.wait_for_open_port(8088)
-    assert "standby" in nm1.succeed("sudo -u yarn yarn rmadmin -getAllServiceState")
-    nm1.succeed("sudo -u yarn yarn rmadmin -getAllServiceState | systemd-cat")
+    assert "standby" in client.succeed("sudo -u yarn yarn rmadmin -getAllServiceState")
+    client.succeed("sudo -u yarn yarn rmadmin -getAllServiceState | systemd-cat")
 
-    assert "Estimated value of Pi is" in nm1.succeed("HADOOP_USER_NAME=hdfs yarn jar $(readlink $(which yarn) | sed -r 's~bin/yarn~lib/hadoop-*/share/hadoop/mapreduce/hadoop-mapreduce-examples-*.jar~g') pi 2 10")
-    assert "SUCCEEDED" in nm1.succeed("yarn application -list -appStates FINISHED")
+    assert "Estimated value of Pi is" in client.succeed("HADOOP_USER_NAME=hdfs yarn jar $(readlink $(which yarn) | sed -r 's~bin/yarn~lib/hadoop-*/share/hadoop/mapreduce/hadoop-mapreduce-examples-*.jar~g') pi 2 10")
+    assert "SUCCEEDED" in client.succeed("yarn application -list -appStates FINISHED")
   '';
 })
diff --git a/nixos/tests/hadoop/hdfs.nix b/nixos/tests/hadoop/hdfs.nix
index b63cbf48032..9415500463d 100644
--- a/nixos/tests/hadoop/hdfs.nix
+++ b/nixos/tests/hadoop/hdfs.nix
@@ -1,32 +1,46 @@
 # Test a minimal HDFS cluster with no HA
-import ../make-test-python.nix ({...}: {
-  nodes = {
-    namenode = {pkgs, ...}: {
+import ../make-test-python.nix ({ package, lib, ... }:
+with lib;
+{
+  name = "hadoop-hdfs";
+
+  nodes = let
+    coreSite = {
+      "fs.defaultFS" = "hdfs://namenode:8020";
+      "hadoop.proxyuser.httpfs.groups" = "*";
+      "hadoop.proxyuser.httpfs.hosts" = "*";
+    };
+    in {
+    namenode = { pkgs, ... }: {
       services.hadoop = {
-        package = pkgs.hadoop;
+        inherit package;
         hdfs = {
           namenode = {
             enable = true;
+            openFirewall = true;
             formatOnInit = true;
           };
-          httpfs.enable = true;
-        };
-        coreSite = {
-          "fs.defaultFS" = "hdfs://namenode:8020";
-          "hadoop.proxyuser.httpfs.groups" = "*";
-          "hadoop.proxyuser.httpfs.hosts" = "*";
+          httpfs = {
+            # The NixOS hadoop module only support webHDFS on 3.3 and newer
+            enable = mkIf (versionAtLeast package.version "3.3") true;
+            openFirewall = true;
+          };
         };
+        inherit coreSite;
       };
     };
-    datanode = {pkgs, ...}: {
+    datanode = { pkgs, ... }: {
       services.hadoop = {
-        package = pkgs.hadoop;
-        hdfs.datanode.enable = true;
-        coreSite = {
-          "fs.defaultFS" = "hdfs://namenode:8020";
-          "hadoop.proxyuser.httpfs.groups" = "*";
-          "hadoop.proxyuser.httpfs.hosts" = "*";
+        inherit package;
+        hdfs.datanode = {
+          enable = true;
+          openFirewall = true;
+          dataDirs = [{
+            type = "DISK";
+            path = "/tmp/dn1";
+          }];
         };
+        inherit coreSite;
       };
     };
   };
@@ -37,21 +51,32 @@ import ../make-test-python.nix ({...}: {
     namenode.wait_for_unit("hdfs-namenode")
     namenode.wait_for_unit("network.target")
     namenode.wait_for_open_port(8020)
+    namenode.succeed("ss -tulpne | systemd-cat")
+    namenode.succeed("cat /etc/hadoop*/hdfs-site.xml | systemd-cat")
     namenode.wait_for_open_port(9870)
 
     datanode.wait_for_unit("hdfs-datanode")
     datanode.wait_for_unit("network.target")
+  '' + ( if versionAtLeast package.version "3" then ''
     datanode.wait_for_open_port(9864)
     datanode.wait_for_open_port(9866)
     datanode.wait_for_open_port(9867)
 
-    namenode.succeed("curl -f http://namenode:9870")
     datanode.succeed("curl -f http://datanode:9864")
+  '' else ''
+    datanode.wait_for_open_port(50075)
+    datanode.wait_for_open_port(50010)
+    datanode.wait_for_open_port(50020)
+
+    datanode.succeed("curl -f http://datanode:50075")
+  '' ) + ''
+    namenode.succeed("curl -f http://namenode:9870")
 
     datanode.succeed("sudo -u hdfs hdfs dfsadmin -safemode wait")
     datanode.succeed("echo testfilecontents | sudo -u hdfs hdfs dfs -put - /testfile")
     assert "testfilecontents" in datanode.succeed("sudo -u hdfs hdfs dfs -cat /testfile")
 
+  '' + optionalString ( versionAtLeast package.version "3.3" ) ''
     namenode.wait_for_unit("hdfs-httpfs")
     namenode.wait_for_open_port(14000)
     assert "testfilecontents" in datanode.succeed("curl -f \"http://namenode:14000/webhdfs/v1/testfile?user.name=hdfs&op=OPEN\" 2>&1")
diff --git a/nixos/tests/hadoop/yarn.nix b/nixos/tests/hadoop/yarn.nix
index 09bdb35791c..1bf8e3831f6 100644
--- a/nixos/tests/hadoop/yarn.nix
+++ b/nixos/tests/hadoop/yarn.nix
@@ -1,22 +1,30 @@
 # This only tests if YARN is able to start its services
-import ../make-test-python.nix ({...}: {
+import ../make-test-python.nix ({ package, ... }: {
+  name = "hadoop-yarn";
+
   nodes = {
-    resourcemanager = {pkgs, ...}: {
-      services.hadoop.package = pkgs.hadoop;
-      services.hadoop.yarn.resourcemanager.enable = true;
-      services.hadoop.yarnSite = {
-        "yarn.resourcemanager.scheduler.class" = "org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo.FifoScheduler";
+    resourcemanager = { ... }: {
+      services.hadoop = {
+        inherit package;
+        yarn.resourcemanager = {
+          enable = true;
+          openFirewall = true;
+        };
       };
     };
-    nodemanager = {pkgs, ...}: {
-      services.hadoop.package = pkgs.hadoop;
-      services.hadoop.yarn.nodemanager.enable = true;
-      services.hadoop.yarnSite = {
-        "yarn.resourcemanager.hostname" = "resourcemanager";
-        "yarn.nodemanager.log-dirs" = "/tmp/userlogs";
+    nodemanager = { options, lib, ... }: {
+      services.hadoop = {
+        inherit package;
+        yarn.nodemanager = {
+          enable = true;
+          openFirewall = true;
+        };
+        yarnSite = options.services.hadoop.yarnSite.default // {
+          "yarn.resourcemanager.hostname" = "resourcemanager";
+          "yarn.nodemanager.log-dirs" = "/tmp/userlogs";
+        };
       };
     };
-
   };
 
   testScript = ''
diff --git a/nixos/tests/networking.nix b/nixos/tests/networking.nix
index 8c9df19f2d5..b763cbd4665 100644
--- a/nixos/tests/networking.nix
+++ b/nixos/tests/networking.nix
@@ -498,6 +498,7 @@ let
         networking = {
           useNetworkd = networkd;
           useDHCP = false;
+          firewall.extraCommands = "ip6tables -A nixos-fw -p gre -j nixos-fw-accept";
         };
       };
     in {
@@ -506,7 +507,7 @@ let
         mkMerge [
           (node args)
           {
-            virtualisation.vlans = [ 1 2 ];
+            virtualisation.vlans = [ 1 2 4 ];
             networking = {
               greTunnels = {
                 greTunnel = {
@@ -515,12 +516,24 @@ let
                   dev = "eth2";
                   type = "tap";
                 };
+                gre6Tunnel = {
+                  local = "fd00:1234:5678:4::1";
+                  remote = "fd00:1234:5678:4::2";
+                  dev = "eth3";
+                  type = "tun6";
+                };
               };
               bridges.bridge.interfaces = [ "greTunnel" "eth1" ];
               interfaces.eth1.ipv4.addresses = mkOverride 0 [];
               interfaces.bridge.ipv4.addresses = mkOverride 0 [
                 { address = "192.168.1.1"; prefixLength = 24; }
               ];
+              interfaces.eth3.ipv6.addresses = [
+                { address = "fd00:1234:5678:4::1"; prefixLength = 64; }
+              ];
+              interfaces.gre6Tunnel.ipv6.addresses = mkOverride 0 [
+                { address = "fc00::1"; prefixLength = 64; }
+              ];
             };
           }
         ];
@@ -528,7 +541,7 @@ let
         mkMerge [
           (node args)
           {
-            virtualisation.vlans = [ 2 3 ];
+            virtualisation.vlans = [ 2 3 4 ];
             networking = {
               greTunnels = {
                 greTunnel = {
@@ -537,12 +550,24 @@ let
                   dev = "eth1";
                   type = "tap";
                 };
+                gre6Tunnel = {
+                  local = "fd00:1234:5678:4::2";
+                  remote = "fd00:1234:5678:4::1";
+                  dev = "eth3";
+                  type = "tun6";
+                };
               };
               bridges.bridge.interfaces = [ "greTunnel" "eth2" ];
               interfaces.eth2.ipv4.addresses = mkOverride 0 [];
               interfaces.bridge.ipv4.addresses = mkOverride 0 [
                 { address = "192.168.1.2"; prefixLength = 24; }
               ];
+              interfaces.eth3.ipv6.addresses = [
+                { address = "fd00:1234:5678:4::2"; prefixLength = 64; }
+              ];
+              interfaces.gre6Tunnel.ipv6.addresses = mkOverride 0 [
+                { address = "fc00::2"; prefixLength = 64; }
+              ];
             };
           }
         ];
@@ -562,6 +587,10 @@ let
               client1.wait_until_succeeds("ping -c 1 192.168.1.2")
 
               client2.wait_until_succeeds("ping -c 1 192.168.1.1")
+
+              client1.wait_until_succeeds("ping -c 1 fc00::2")
+
+              client2.wait_until_succeeds("ping -c 1 fc00::1")
         '';
     };
     vlan = let
diff --git a/nixos/tests/nextcloud/default.nix b/nixos/tests/nextcloud/default.nix
index 34d3c345354..b7b1c5c6600 100644
--- a/nixos/tests/nextcloud/default.nix
+++ b/nixos/tests/nextcloud/default.nix
@@ -18,4 +18,4 @@ foldl
     };
   })
 { }
-  [ 21 22 23 ]
+  [ 22 23 ]
diff --git a/nixos/tests/pleroma.nix b/nixos/tests/pleroma.nix
index bf3623fce38..90a9a251104 100644
--- a/nixos/tests/pleroma.nix
+++ b/nixos/tests/pleroma.nix
@@ -32,8 +32,7 @@ import ./make-test-python.nix ({ pkgs, ... }:
     # system one. Overriding this pretty bad default behaviour.
     export REQUESTS_CA_BUNDLE=/etc/ssl/certs/ca-certificates.crt
 
-    export TOOT_LOGIN_CLI_PASSWORD="jamy-password"
-    toot login_cli -i "pleroma.nixos.test" -e "jamy@nixos.test"
+    echo "jamy-password" | toot login_cli -i "pleroma.nixos.test" -e "jamy@nixos.test"
     echo "Login OK"
 
     # Send a toot then verify it's part of the public timeline
@@ -168,21 +167,6 @@ import ./make-test-python.nix ({ pkgs, ... }:
     cp key.pem cert.pem $out
   '';
 
-  /* Toot is preventing users from feeding login_cli a password non
-     interactively. While it makes sense most of the times, it's
-     preventing us to login in this non-interactive test. This patch
-     introduce a TOOT_LOGIN_CLI_PASSWORD env variable allowing us to
-     provide a password to toot login_cli
-
-     If https://github.com/ihabunek/toot/pull/180 gets merged at some
-     point, feel free to remove this patch. */
-  custom-toot = pkgs.toot.overrideAttrs(old:{
-    patches = [ (pkgs.fetchpatch {
-      url = "https://github.com/NinjaTrappeur/toot/commit/b4a4c30f41c0cb7e336714c2c4af9bc9bfa0c9f2.patch";
-      sha256 = "sha256-0xxNwjR/fStLjjUUhwzCCfrghRVts+fc+fvVJqVcaFg=";
-    }) ];
-  });
-
   hosts = nodes: ''
     ${nodes.pleroma.config.networking.primaryIPAddress} pleroma.nixos.test
     ${nodes.client.config.networking.primaryIPAddress} client.nixos.test
@@ -194,7 +178,7 @@ import ./make-test-python.nix ({ pkgs, ... }:
       security.pki.certificateFiles = [ "${tls-cert}/cert.pem" ];
       networking.extraHosts = hosts nodes;
       environment.systemPackages = with pkgs; [
-        custom-toot
+        toot
         send-toot
       ];
     };
diff --git a/nixos/tests/terminal-emulators.nix b/nixos/tests/terminal-emulators.nix
new file mode 100644
index 00000000000..60161b80b96
--- /dev/null
+++ b/nixos/tests/terminal-emulators.nix
@@ -0,0 +1,207 @@
+# Terminal emulators all present a pretty similar interface.
+# That gives us an opportunity to easily test their basic functionality with a single codebase.
+#
+# There are two tests run on each terminal emulator
+# - can it successfully execute a command passed on the cmdline?
+# - can it successfully display a colour?
+# the latter is used as a proxy for "can it display text?", without going through all the intricacies of OCR.
+#
+# 256-colour terminal mode is used to display the test colour, since it has a universally-applicable palette (unlike 8- and 16- colour, where the colours are implementation-defined), and it is widely supported (unlike 24-bit colour).
+#
+# Future work:
+# - Wayland support (both for testing the existing terminals, and for testing wayland-only terminals like foot and havoc)
+# - Test keyboard input? (skipped for now, to eliminate the possibility of race conditions and focus issues)
+
+{ system ? builtins.currentSystem,
+  config ? {},
+  pkgs ? import ../.. { inherit system config; }
+}:
+
+with import ../lib/testing-python.nix { inherit system pkgs; };
+with pkgs.lib;
+
+let tests = {
+      alacritty.pkg = p: p.alacritty;
+
+      contour.pkg = p: p.contour;
+      contour.cmd = "contour $command";
+
+      cool-retro-term.pkg = p: p.cool-retro-term;
+      cool-retro-term.colourTest = false; # broken by gloss effect
+
+      ctx.pkg = p: p.ctx;
+      ctx.pinkValue = "#FE0065";
+
+      darktile.pkg = p: p.darktile;
+
+      eterm.pkg = p: p.eterm;
+      eterm.executable = "Eterm";
+      eterm.pinkValue = "#D40055";
+
+      germinal.pkg = p: p.germinal;
+
+      gnome-terminal.pkg = p: p.gnome.gnome-terminal;
+
+      guake.pkg = p: p.guake;
+      guake.cmd = "SHELL=$command guake --show";
+      guake.kill = true;
+
+      hyper.pkg = p: p.hyper;
+
+      kermit.pkg = p: p.kermit-terminal;
+
+      kgx.pkg = p: p.kgx;
+      kgx.cmd = "kgx -e $command";
+      kgx.kill = true;
+
+      kitty.pkg = p: p.kitty;
+      kitty.cmd = "kitty $command";
+
+      konsole.pkg = p: p.plasma5Packages.konsole;
+
+      lxterminal.pkg = p: p.lxterminal;
+
+      mate-terminal.pkg = p: p.mate.mate-terminal;
+      mate-terminal.cmd = "SHELL=$command mate-terminal --disable-factory"; # factory mode uses dbus, and we don't have a proper dbus session set up
+
+      mlterm.pkg = p: p.mlterm;
+
+      mrxvt.pkg = p: p.mrxvt;
+
+      qterminal.pkg = p: p.lxqt.qterminal;
+      qterminal.kill = true;
+
+      roxterm.pkg = p: p.roxterm;
+      roxterm.cmd = "roxterm -e $command";
+
+      sakura.pkg = p: p.sakura;
+
+      st.pkg = p: p.st;
+      st.kill = true;
+
+      stupidterm.pkg = p: p.stupidterm;
+      stupidterm.cmd = "stupidterm -- $command";
+
+      terminator.pkg = p: p.terminator;
+      terminator.cmd = "terminator -e $command";
+
+      terminology.pkg = p: p.enlightenment.terminology;
+      terminology.cmd = "SHELL=$command terminology --no-wizard=true";
+      terminology.colourTest = false; # broken by gloss effect
+
+      termite.pkg = p: p.termite;
+
+      termonad.pkg = p: p.termonad;
+
+      tilda.pkg = p: p.tilda;
+
+      tilix.pkg = p: p.tilix;
+      tilix.cmd = "tilix -e $command";
+
+      urxvt.pkg = p: p.rxvt-unicode;
+
+      wayst.pkg = p: p.wayst;
+      wayst.pinkValue = "#FF0066";
+
+      wezterm.pkg = p: p.wezterm;
+
+      xfce4-terminal.pkg = p: p.xfce.xfce4-terminal;
+
+      xterm.pkg = p: p.xterm;
+    };
+in mapAttrs (name: { pkg, executable ? name, cmd ? "SHELL=$command ${executable}", colourTest ? true, pinkValue ? "#FF0087", kill ? false }: makeTest
+{
+  name = "terminal-emulator-${name}";
+  meta = with pkgs.stdenv.lib.maintainers; {
+    maintainers = [ jjjollyjim ];
+  };
+
+  machine = { pkgsInner, ... }:
+
+  {
+    imports = [ ./common/x11.nix ./common/user-account.nix ];
+
+    # Hyper (and any other electron-based terminals) won't run as root
+    test-support.displayManager.auto.user = "alice";
+
+    environment.systemPackages = [
+      (pkg pkgs)
+      (pkgs.writeShellScriptBin "report-success" ''
+        echo 1 > /tmp/term-ran-successfully
+        ${optionalString kill "pkill ${executable}"}
+      '')
+      (pkgs.writeShellScriptBin "display-colour" ''
+        # A 256-colour background colour code for pink, then spaces.
+        #
+        # Background is used rather than foreground to minimize the effect of anti-aliasing.
+        #
+        # Keep adding more in case the window is partially offscreen to the left or requires
+        # a change to correctly redraw after initialising the window (as with ctx).
+
+        while :
+        do
+            echo -ne "\e[48;5;198m                   "
+            sleep 0.5
+        done
+        sleep infinity
+      '')
+      (pkgs.writeShellScriptBin "run-in-this-term" "sudo -u alice run-in-this-term-wrapped $1")
+
+      (pkgs.writeShellScriptBin "run-in-this-term-wrapped" "command=\"$(which \"$1\")\"; ${cmd}")
+    ];
+
+    # Helpful reminder to add this test to passthru.tests
+    warnings = if !((pkg pkgs) ? "passthru" && (pkg pkgs).passthru ? "tests") then [ "The package for ${name} doesn't have a passthru.tests" ] else [ ];
+  };
+
+  # We need imagemagick, though not tesseract
+  enableOCR = true;
+
+  testScript = { nodes, ... }: let
+  in ''
+    with subtest("wait for x"):
+        start_all()
+        machine.wait_for_x()
+
+    with subtest("have the terminal run a command"):
+        # We run this command synchronously, so we can be certain the exit codes are happy
+        machine.${if kill then "execute" else "succeed"}("run-in-this-term report-success")
+        machine.wait_for_file("/tmp/term-ran-successfully")
+    ${optionalString colourTest ''
+
+    import tempfile
+    import subprocess
+
+
+    def check_for_pink(final=False) -> bool:
+        with tempfile.NamedTemporaryFile() as tmpin:
+            machine.send_monitor_command("screendump {}".format(tmpin.name))
+
+            cmd = 'convert {} -define histogram:unique-colors=true -format "%c" histogram:info:'.format(
+                tmpin.name
+            )
+            ret = subprocess.run(cmd, shell=True, capture_output=True)
+            if ret.returncode != 0:
+                raise Exception(
+                    "image analysis failed with exit code {}".format(ret.returncode)
+                )
+
+            text = ret.stdout.decode("utf-8")
+            return "${pinkValue}" in text
+
+
+    with subtest("ensuring no pink is present without the terminal"):
+        assert (
+            check_for_pink() == False
+        ), "Pink was present on the screen before we even launched a terminal!"
+
+    with subtest("have the terminal display a colour"):
+        # We run this command in the background
+        machine.shell.send(b"(run-in-this-term display-colour |& systemd-cat -t terminal) &\n")
+
+        with machine.nested("Waiting for the screen to have pink on it:"):
+            retry(check_for_pink)
+  ''}'';
+}
+
+  ) tests
diff --git a/nixos/tests/web-apps/mastodon.nix b/nixos/tests/web-apps/mastodon.nix
new file mode 100644
index 00000000000..279a1c59169
--- /dev/null
+++ b/nixos/tests/web-apps/mastodon.nix
@@ -0,0 +1,170 @@
+import ../make-test-python.nix ({pkgs, ...}:
+let
+  test-certificates = pkgs.runCommandLocal "test-certificates" { } ''
+    mkdir -p $out
+    echo insecure-root-password > $out/root-password-file
+    echo insecure-intermediate-password > $out/intermediate-password-file
+    ${pkgs.step-cli}/bin/step certificate create "Example Root CA" $out/root_ca.crt $out/root_ca.key --password-file=$out/root-password-file --profile root-ca
+    ${pkgs.step-cli}/bin/step certificate create "Example Intermediate CA 1" $out/intermediate_ca.crt $out/intermediate_ca.key --password-file=$out/intermediate-password-file --ca-password-file=$out/root-password-file --profile intermediate-ca --ca $out/root_ca.crt --ca-key $out/root_ca.key
+  '';
+
+  hosts = ''
+    192.168.2.10 ca.local
+    192.168.2.11 mastodon.local
+  '';
+
+in
+{
+  name = "mastodon";
+  meta.maintainers = with pkgs.lib.maintainers; [ erictapen izorkin ];
+
+  nodes = {
+    ca = { pkgs, ... }: {
+      networking = {
+        interfaces.eth1 = {
+          ipv4.addresses = [
+            { address = "192.168.2.10"; prefixLength = 24; }
+          ];
+        };
+        extraHosts = hosts;
+      };
+      services.step-ca = {
+        enable = true;
+        address = "0.0.0.0";
+        port = 8443;
+        openFirewall = true;
+        intermediatePasswordFile = "${test-certificates}/intermediate-password-file";
+        settings = {
+          dnsNames = [ "ca.local" ];
+          root = "${test-certificates}/root_ca.crt";
+          crt = "${test-certificates}/intermediate_ca.crt";
+          key = "${test-certificates}/intermediate_ca.key";
+          db = {
+            type = "badger";
+            dataSource = "/var/lib/step-ca/db";
+          };
+          authority = {
+            provisioners = [
+              {
+                type = "ACME";
+                name = "acme";
+              }
+            ];
+          };
+        };
+      };
+    };
+
+    server = { pkgs, ... }: {
+      networking = {
+        interfaces.eth1 = {
+          ipv4.addresses = [
+            { address = "192.168.2.11"; prefixLength = 24; }
+          ];
+        };
+        extraHosts = hosts;
+        firewall.allowedTCPPorts = [ 80 443 ];
+      };
+
+      security = {
+        acme = {
+          acceptTerms = true;
+          defaults.server = "https://ca.local:8443/acme/acme/directory";
+          defaults.email = "mastodon@mastodon.local";
+        };
+        pki.certificateFiles = [ "${test-certificates}/root_ca.crt" ];
+      };
+
+      services.redis.servers.mastodon = {
+        enable = true;
+        bind = "127.0.0.1";
+        port = 31637;
+      };
+
+      services.mastodon = {
+        enable = true;
+        configureNginx = true;
+        localDomain = "mastodon.local";
+        enableUnixSocket = false;
+        redis = {
+          createLocally = true;
+          host = "127.0.0.1";
+          port = 31637;
+        };
+        database = {
+          createLocally = true;
+          host = "/run/postgresql";
+          port = 5432;
+        };
+        smtp = {
+          createLocally = false;
+          fromAddress = "mastodon@mastodon.local";
+        };
+        extraConfig = {
+          EMAIL_DOMAIN_ALLOWLIST = "example.com";
+        };
+      };
+    };
+
+    client = { pkgs, ... }: {
+      environment.systemPackages = [ pkgs.jq ];
+      networking = {
+        interfaces.eth1 = {
+          ipv4.addresses = [
+            { address = "192.168.2.12"; prefixLength = 24; }
+          ];
+        };
+        extraHosts = hosts;
+      };
+
+      security = {
+        pki.certificateFiles = [ "${test-certificates}/root_ca.crt" ];
+      };
+    };
+  };
+
+  testScript = ''
+    start_all()
+
+    ca.wait_for_unit("step-ca.service")
+    ca.wait_for_open_port(8443)
+
+    server.wait_for_unit("nginx.service")
+    server.wait_for_unit("redis-mastodon.service")
+    server.wait_for_unit("postgresql.service")
+    server.wait_for_unit("mastodon-sidekiq.service")
+    server.wait_for_unit("mastodon-streaming.service")
+    server.wait_for_unit("mastodon-web.service")
+    server.wait_for_open_port(55000)
+    server.wait_for_open_port(55001)
+
+    # Check Mastodon version from remote client
+    client.succeed("curl --fail https://mastodon.local/api/v1/instance | jq -r '.version' | grep '${pkgs.mastodon.version}'")
+
+    # Check using admin CLI
+    # Check Mastodon version
+    server.succeed("su - mastodon -s /bin/sh -c 'mastodon-env tootctl version' | grep '${pkgs.mastodon.version}'")
+
+    # Manage accounts
+    server.succeed("su - mastodon -s /bin/sh -c 'mastodon-env tootctl email_domain_blocks add example.com'")
+    server.succeed("su - mastodon -s /bin/sh -c 'mastodon-env tootctl email_domain_blocks list' | grep 'example.com'")
+    server.fail("su - mastodon -s /bin/sh -c 'mastodon-env tootctl email_domain_blocks list' | grep 'mastodon.local'")
+    server.fail("su - mastodon -s /bin/sh -c 'mastodon-env tootctl accounts create alice --email=alice@example.com'")
+    server.succeed("su - mastodon -s /bin/sh -c 'mastodon-env tootctl email_domain_blocks remove example.com'")
+    server.succeed("su - mastodon -s /bin/sh -c 'mastodon-env tootctl accounts create bob --email=bob@example.com'")
+    server.succeed("su - mastodon -s /bin/sh -c 'mastodon-env tootctl accounts approve bob'")
+    server.succeed("su - mastodon -s /bin/sh -c 'mastodon-env tootctl accounts delete bob'")
+
+    # Manage IP access
+    server.succeed("su - mastodon -s /bin/sh -c 'mastodon-env tootctl ip_blocks add 192.168.0.0/16 --severity=no_access'")
+    server.succeed("su - mastodon -s /bin/sh -c 'mastodon-env tootctl ip_blocks export' | grep '192.168.0.0/16'")
+    server.fail("su - mastodon -s /bin/sh -c 'mastodon-env tootctl p_blocks export' | grep '172.16.0.0/16'")
+    client.fail("curl --fail https://mastodon.local/about")
+    server.succeed("su - mastodon -s /bin/sh -c 'mastodon-env tootctl ip_blocks remove 192.168.0.0/16'")
+    client.succeed("curl --fail https://mastodon.local/about")
+
+    ca.shutdown()
+    server.shutdown()
+    client.shutdown()
+  '';
+})
diff --git a/nixos/tests/web-apps/peertube.nix b/nixos/tests/web-apps/peertube.nix
index 38b31f6c332..706c598338e 100644
--- a/nixos/tests/web-apps/peertube.nix
+++ b/nixos/tests/web-apps/peertube.nix
@@ -120,6 +120,9 @@ import ../make-test-python.nix ({pkgs, ...}:
     # Check if PeerTube is running
     client.succeed("curl --fail http://peertube.local:9000/api/v1/config/about | jq -r '.instance.name' | grep 'PeerTube\ Test\ Server'")
 
+    # Check PeerTube CLI version
+    assert "${pkgs.peertube.version}" in server.succeed('su - peertube -s /bin/sh -c "peertube --version"')
+
     client.shutdown()
     server.shutdown()
     database.shutdown()
diff --git a/nixos/tests/wine.nix b/nixos/tests/wine.nix
index cc449864c76..8135cb90a59 100644
--- a/nixos/tests/wine.nix
+++ b/nixos/tests/wine.nix
@@ -3,7 +3,7 @@
 }:
 
 let
-  inherit (pkgs.lib) concatMapStrings listToAttrs;
+  inherit (pkgs.lib) concatMapStrings listToAttrs optionals optionalString;
   inherit (import ../lib/testing-python.nix { inherit system pkgs; }) makeTest;
 
   hello32 = "${pkgs.pkgsCross.mingw32.hello}/bin/hello.exe";
@@ -27,6 +27,9 @@ let
               "bash -c 'wine ${exe} 2> >(tee wine-stderr >&2)'"
           )
           assert 'Hello, world!' in greeting
+        ''
+        # only the full version contains Gecko, but the error is not printed reliably in other variants
+        + optionalString (variant == "full") ''
           machine.fail(
               "fgrep 'Could not find Wine Gecko. HTML rendering will be disabled.' wine-stderr"
           )
@@ -37,5 +40,9 @@ let
 
   variants = [ "base" "full" "minimal" "staging" "unstable" "wayland" ];
 
-in listToAttrs (map (makeWineTest "winePackages" [ hello32 ]) variants
-  ++ map (makeWineTest "wineWowPackages" [ hello32 hello64 ]) variants)
+in
+listToAttrs (
+  map (makeWineTest "winePackages" [ hello32 ]) variants
+  ++ optionals pkgs.stdenv.is64bit
+    (map (makeWineTest "wineWowPackages" [ hello32 hello64 ]) variants)
+)