summary refs log tree commit diff
path: root/nixos
diff options
context:
space:
mode:
authorAlyssa Ross <hi@alyssa.is>2020-02-20 04:01:13 +0000
committerAlyssa Ross <hi@alyssa.is>2020-02-20 04:02:05 +0000
commit515d3cfa24df96c9e134ac5b87c2396f81354551 (patch)
tree5e67ec19079d3b44ada0c0ac88ccd411ac22070f /nixos
parent372bd003cd2d95a181ccd069322d2229f72acd8a (diff)
parent7bdc103ac8424e08f02f6e9344979d0a821caa62 (diff)
downloadnixpkgs-515d3cfa24df96c9e134ac5b87c2396f81354551.tar
nixpkgs-515d3cfa24df96c9e134ac5b87c2396f81354551.tar.gz
nixpkgs-515d3cfa24df96c9e134ac5b87c2396f81354551.tar.bz2
nixpkgs-515d3cfa24df96c9e134ac5b87c2396f81354551.tar.lz
nixpkgs-515d3cfa24df96c9e134ac5b87c2396f81354551.tar.xz
nixpkgs-515d3cfa24df96c9e134ac5b87c2396f81354551.tar.zst
nixpkgs-515d3cfa24df96c9e134ac5b87c2396f81354551.zip
Merge remote-tracking branch 'nixpkgs/master' into master
Diffstat (limited to 'nixos')
-rw-r--r--nixos/doc/manual/man-nixos-rebuild.xml59
-rw-r--r--nixos/doc/manual/man-nixos-version.xml29
-rw-r--r--nixos/doc/manual/release-notes/release-notes.xml1
-rw-r--r--nixos/doc/manual/release-notes/rl-2003.xml40
-rw-r--r--nixos/doc/manual/release-notes/rl-2009.xml80
-rw-r--r--nixos/lib/test-driver/test-driver.py2
-rw-r--r--nixos/lib/testing-python.nix15
-rw-r--r--nixos/lib/testing.nix5
-rw-r--r--nixos/modules/config/swap.nix2
-rw-r--r--nixos/modules/config/xdg/portal.nix6
-rw-r--r--nixos/modules/hardware/brightnessctl.nix31
-rw-r--r--nixos/modules/installer/cd-dvd/channel.nix4
-rw-r--r--nixos/modules/installer/cd-dvd/installation-cd-graphical-base.nix3
-rw-r--r--nixos/modules/installer/tools/nix-fallback-paths.nix8
-rw-r--r--nixos/modules/installer/tools/nixos-build-vms/build-vms.nix2
-rw-r--r--nixos/modules/installer/tools/nixos-enter.sh18
-rw-r--r--nixos/modules/installer/tools/nixos-rebuild.sh98
-rw-r--r--nixos/modules/installer/tools/nixos-version.sh9
-rw-r--r--nixos/modules/installer/tools/tools.nix9
-rw-r--r--nixos/modules/misc/version.nix16
-rw-r--r--nixos/modules/module-list.nix4
-rw-r--r--nixos/modules/programs/sway.nix4
-rw-r--r--nixos/modules/programs/zsh/zsh.nix3
-rw-r--r--nixos/modules/rename.nix6
-rw-r--r--nixos/modules/security/acme.nix173
-rw-r--r--nixos/modules/security/acme.xml2
-rw-r--r--nixos/modules/security/rngd.nix5
-rw-r--r--nixos/modules/services/audio/alsa.nix6
-rw-r--r--nixos/modules/services/audio/mopidy.nix4
-rw-r--r--nixos/modules/services/continuous-integration/buildkite-agents.nix (renamed from nixos/modules/services/continuous-integration/buildkite-agent.nix)79
-rw-r--r--nixos/modules/services/databases/postgresql.nix35
-rw-r--r--nixos/modules/services/databases/redis.nix47
-rw-r--r--nixos/modules/services/monitoring/heapster.nix2
-rw-r--r--nixos/modules/services/monitoring/statsd.nix2
-rw-r--r--nixos/modules/services/networking/i2pd.nix22
-rw-r--r--nixos/modules/services/networking/iwd.nix7
-rw-r--r--nixos/modules/services/networking/knot.nix40
-rw-r--r--nixos/modules/services/networking/pppd.nix4
-rw-r--r--nixos/modules/services/networking/shorewall.nix8
-rw-r--r--nixos/modules/services/networking/shorewall6.nix8
-rw-r--r--nixos/modules/services/networking/supybot.nix2
-rw-r--r--nixos/modules/services/web-apps/jirafeau.nix169
-rw-r--r--nixos/modules/services/web-apps/mattermost.nix16
-rw-r--r--nixos/modules/services/web-servers/caddy.nix18
-rw-r--r--nixos/modules/services/x11/desktop-managers/mate.nix1
-rw-r--r--nixos/modules/services/x11/desktop-managers/plasma5.nix165
-rw-r--r--nixos/modules/services/x11/display-managers/default.nix1
-rw-r--r--nixos/modules/services/x11/unclutter.nix2
-rw-r--r--nixos/modules/services/x11/urxvtd.nix6
-rw-r--r--nixos/modules/system/boot/kernel.nix7
-rw-r--r--nixos/modules/system/boot/loader/grub/grub.nix6
-rw-r--r--nixos/modules/tasks/encrypted-devices.nix2
-rw-r--r--nixos/modules/tasks/filesystems.nix3
-rw-r--r--nixos/modules/virtualisation/docker-containers.nix29
-rw-r--r--nixos/release-combined.nix179
-rw-r--r--nixos/release-small.nix38
-rw-r--r--nixos/release.nix14
-rw-r--r--nixos/tests/acme.nix119
-rw-r--r--nixos/tests/all-tests.nix5
-rw-r--r--nixos/tests/buildkite-agents.nix (renamed from nixos/tests/buildkite-agent.nix)19
-rw-r--r--nixos/tests/common/letsencrypt/common.nix3
-rw-r--r--nixos/tests/docker-containers.nix27
-rw-r--r--nixos/tests/docker-tools.nix6
-rw-r--r--nixos/tests/firefox.nix6
-rw-r--r--nixos/tests/glusterfs.nix19
-rw-r--r--nixos/tests/installed-tests/default.nix2
-rw-r--r--nixos/tests/installed-tests/glib-testing.nix5
-rw-r--r--nixos/tests/installed-tests/malcontent.nix5
-rw-r--r--nixos/tests/installed-tests/xdg-desktop-portal.nix4
-rw-r--r--nixos/tests/jirafeau.nix22
-rw-r--r--nixos/tests/knot.nix15
-rw-r--r--nixos/tests/krb5/deprecated-config.nix6
-rw-r--r--nixos/tests/krb5/example-config.nix6
-rw-r--r--nixos/tests/nfs/simple.nix14
-rw-r--r--nixos/tests/nsd.nix52
-rw-r--r--nixos/tests/openarena.nix84
-rw-r--r--nixos/tests/orangefs.nix10
-rw-r--r--nixos/tests/plotinus.nix27
-rw-r--r--nixos/tests/postgresql-wal-receiver.nix19
-rw-r--r--nixos/tests/run-in-machine.nix2
-rw-r--r--nixos/tests/solr.nix50
-rw-r--r--nixos/tests/tinydns.nix2
82 files changed, 1483 insertions, 602 deletions
diff --git a/nixos/doc/manual/man-nixos-rebuild.xml b/nixos/doc/manual/man-nixos-rebuild.xml
index 495dbc8859b..f4f663b84f0 100644
--- a/nixos/doc/manual/man-nixos-rebuild.xml
+++ b/nixos/doc/manual/man-nixos-rebuild.xml
@@ -77,7 +77,14 @@
     <option>--builders</option> <replaceable>builder-spec</replaceable>
    </arg>
 
+   <sbr/>
+
+   <arg>
+    <option>--flake</option> <replaceable>flake-uri</replaceable>
+   </arg>
+
    <sbr />
+
    <arg>
     <group choice='req'>
     <arg choice='plain'>
@@ -129,14 +136,17 @@
   <title>Description</title>
 
   <para>
-   This command updates the system so that it corresponds to the configuration
-   specified in <filename>/etc/nixos/configuration.nix</filename>. Thus, every
-   time you modify <filename>/etc/nixos/configuration.nix</filename> or any
-   NixOS module, you must run <command>nixos-rebuild</command> to make the
-   changes take effect. It builds the new system in
-   <filename>/nix/store</filename>, runs its activation script, and stop and
-   (re)starts any system services if needed. Please note that user services need
-   to be started manually as they aren't detected by the activation script at the moment.
+   This command updates the system so that it corresponds to the
+   configuration specified in
+   <filename>/etc/nixos/configuration.nix</filename> or
+   <filename>/etc/nixos/flake.nix</filename>. Thus, every time you
+   modify the configuration or any other NixOS module, you must run
+   <command>nixos-rebuild</command> to make the changes take
+   effect. It builds the new system in
+   <filename>/nix/store</filename>, runs its activation script, and
+   stop and (re)starts any system services if needed. Please note that
+   user services need to be started manually as they aren't detected
+   by the activation script at the moment.
   </para>
 
   <para>
@@ -508,6 +518,24 @@
      </para>
     </listitem>
    </varlistentry>
+
+   <varlistentry>
+    <term>
+     <option>--flake</option> <replaceable>flake-uri</replaceable>[<replaceable>name</replaceable>]
+    </term>
+    <listitem>
+     <para>
+      Build the NixOS system from the specified flake. It defaults to
+      the directory containing the target of the symlink
+      <filename>/etc/nixos/flake.nix</filename>, if it exists. The
+      flake must contain an output named
+      <literal>nixosConfigurations.<replaceable>name</replaceable></literal>. If
+      <replaceable>name</replaceable> is omitted, it default to the
+      current host name.
+     </para>
+    </listitem>
+   </varlistentry>
+
   </variablelist>
 
   <para>
@@ -556,6 +584,21 @@
 
    <varlistentry>
     <term>
+     <filename>/etc/nixos/flake.nix</filename>
+    </term>
+    <listitem>
+     <para>
+      If this file exists, then <command>nixos-rebuild</command> will
+      use it as if the <option>--flake</option> option was given. This
+      file may be a symlink to a <filename>flake.nix</filename> in an
+      actual flake; thus <filename>/etc/nixos</filename> need not be a
+      flake.
+     </para>
+    </listitem>
+   </varlistentry>
+
+   <varlistentry>
+    <term>
      <filename>/run/current-system</filename>
     </term>
     <listitem>
diff --git a/nixos/doc/manual/man-nixos-version.xml b/nixos/doc/manual/man-nixos-version.xml
index e9ad8bddcac..aada08c5b4a 100644
--- a/nixos/doc/manual/man-nixos-version.xml
+++ b/nixos/doc/manual/man-nixos-version.xml
@@ -12,16 +12,22 @@
  </refnamediv>
  <refsynopsisdiv>
   <cmdsynopsis>
-   <command>nixos-version</command> 
+   <command>nixos-version</command>
    <arg>
     <option>--hash</option>
    </arg>
-    
+
    <arg>
     <option>--revision</option>
    </arg>
+
+   <arg>
+    <option>--json</option>
+   </arg>
+
   </cmdsynopsis>
  </refsynopsisdiv>
+
  <refsection>
   <title>Description</title>
   <para>
@@ -84,12 +90,16 @@
    </variablelist>
   </para>
  </refsection>
+
  <refsection>
   <title>Options</title>
+
   <para>
    This command accepts the following options:
   </para>
+
   <variablelist>
+
    <varlistentry>
     <term>
      <option>--hash</option>
@@ -107,6 +117,21 @@
      </para>
     </listitem>
    </varlistentry>
+
+   <varlistentry>
+    <term>
+     <option>--json</option>
+    </term>
+    <listitem>
+     <para>
+      Print a JSON representation of the versions of NixOS and the
+      top-level configuration flake.
+     </para>
+    </listitem>
+   </varlistentry>
+
   </variablelist>
+
  </refsection>
+
 </refentry>
diff --git a/nixos/doc/manual/release-notes/release-notes.xml b/nixos/doc/manual/release-notes/release-notes.xml
index 444862c5739..e2913b8a535 100644
--- a/nixos/doc/manual/release-notes/release-notes.xml
+++ b/nixos/doc/manual/release-notes/release-notes.xml
@@ -8,6 +8,7 @@
   This section lists the release notes for each stable version of NixOS and
   current unstable revision.
  </para>
+ <xi:include href="rl-2009.xml" />
  <xi:include href="rl-2003.xml" />
  <xi:include href="rl-1909.xml" />
  <xi:include href="rl-1903.xml" />
diff --git a/nixos/doc/manual/release-notes/rl-2003.xml b/nixos/doc/manual/release-notes/rl-2003.xml
index d7614cd3488..31f08d9da34 100644
--- a/nixos/doc/manual/release-notes/rl-2003.xml
+++ b/nixos/doc/manual/release-notes/rl-2003.xml
@@ -440,15 +440,19 @@ users.users.me =
    </listitem>
    <listitem>
     <para>
-      The <link linkend="opt-services.buildkite-agent.enable">Buildkite Agent</link>
-      module and corresponding packages have been updated to 3.x.
-      While doing so, the following options have been changed:
+      The <link linkend="opt-services.buildkite-agents">Buildkite
+      Agent</link> module and corresponding packages have been updated to
+      3.x, and to support multiple instances of the agent running at the
+      same time. This means you will have to rename
+      <literal>services.buildkite-agent</literal> to
+      <literal>services.buildkite-agents.&lt;name&gt;</literal>. Furthermore,
+      the following options have been changed:
     </para>
     <itemizedlist>
       <listitem>
        <para>
          <literal>services.buildkite-agent.meta-data</literal> has been renamed to
-         <link linkend="opt-services.buildkite-agent.tags">services.buildkite-agent.tags</link>,
+         <link linkend="opt-services.buildkite-agents">services.buildkite-agents.&lt;name&gt;.tags</link>,
          to match upstreams naming for 3.x.
          Its type has also changed - it now accepts an attrset of strings.
        </para>
@@ -464,13 +468,13 @@ users.users.me =
        <para>
          <literal>services.buildkite-agent.openssh.privateKeyPath</literal>
          has been renamed to
-         <link linkend="opt-services.buildkite-agent.privateSshKeyPath">buildkite-agent.privateSshKeyPath</link>,
+         <link linkend="opt-services.buildkite-agents">buildkite-agents.&lt;name&gt;.privateSshKeyPath</link>,
          as the whole <literal>openssh</literal> now only contained that single option.
        </para>
       </listitem>
       <listitem>
        <para>
-         <link linkend="opt-services.buildkite-agent.shell">services.buildkite-agent.shell</link>
+         <link linkend="opt-services.buildkite-agents">services.buildkite-agents.&lt;name&gt;.shell</link>
          has been introduced, allowing to specify a custom shell to be used.
        </para>
       </listitem>
@@ -621,6 +625,12 @@ auth required pam_succeed_if.so uid >= 1000 quiet
      to a fairly old snapshot  from the <package>gcc7</package>-branch.
     </para>
    </listitem>
+   <listitem>
+    <para>
+     The <citerefentry><refentrytitle>nixos-build-vms</refentrytitle><manvolnum>8</manvolnum>
+     </citerefentry>-script now uses the python test-driver.
+    </para>
+   </listitem>
   </itemizedlist>
  </section>
 
@@ -661,7 +671,23 @@ auth required pam_succeed_if.so uid >= 1000 quiet
        now uses the short rather than full version string.
      </para>
    </listitem>
-    <listitem>
+   <listitem>
+    <para>
+     The ACME module has switched from simp-le to <link xlink:href="https://github.com/go-acme/lego">lego</link>
+     which allows us to support DNS-01 challenges and wildcard certificates. The following options have been added:
+     <link linkend="opt-security.acme.acceptTerms">security.acme.acceptTerms</link>,
+     <link linkend="opt-security.acme.certs">security.acme.certs.&lt;name&gt;.dnsProvider</link>,
+     <link linkend="opt-security.acme.certs">security.acme.certs.&lt;name&gt;.credentialsFile</link>,
+     <link linkend="opt-security.acme.certs">security.acme.certs.&lt;name&gt;.dnsPropagationCheck</link>.
+     As well as this, the options <literal>security.acme.acceptTerms</literal> and either
+     <literal>security.acme.email</literal> or <literal>security.acme.certs.&lt;name&gt;.email</literal>
+     must be set in order to use the ACME module.
+     Certificates will be regenerated anew on the next renewal date. The credentials for simp-le are
+     preserved and thus it is possible to roll back to previous versions without breaking certificate
+     generation.
+    </para>
+   </listitem>
+   <listitem>
     <para>
     It is now possible to unlock LUKS-Encrypted file systems using a FIDO2 token
     via <option>boot.initrd.luks.fido2Support</option>.
diff --git a/nixos/doc/manual/release-notes/rl-2009.xml b/nixos/doc/manual/release-notes/rl-2009.xml
new file mode 100644
index 00000000000..d07b7cf49c3
--- /dev/null
+++ b/nixos/doc/manual/release-notes/rl-2009.xml
@@ -0,0 +1,80 @@
+<section xmlns="http://docbook.org/ns/docbook"
+         xmlns:xlink="http://www.w3.org/1999/xlink"
+         xmlns:xi="http://www.w3.org/2001/XInclude"
+         version="5.0"
+         xml:id="sec-release-20.09">
+ <title>Release 20.09 (“Nightingale”, 2020.09/??)</title>
+
+ <section xmlns="http://docbook.org/ns/docbook"
+         xmlns:xlink="http://www.w3.org/1999/xlink"
+         xmlns:xi="http://www.w3.org/2001/XInclude"
+         version="5.0"
+         xml:id="sec-release-20.09-highlights">
+  <title>Highlights</title>
+
+  <para>
+   In addition to numerous new and upgraded packages, this release has the
+   following highlights:
+  </para>
+
+  <itemizedlist>
+   <listitem>
+    <para>
+     Support is planned until the end of October 2020, handing over to 20.09.
+    </para>
+   </listitem>
+  </itemizedlist>
+ </section>
+
+ <section xmlns="http://docbook.org/ns/docbook"
+         xmlns:xlink="http://www.w3.org/1999/xlink"
+         xmlns:xi="http://www.w3.org/2001/XInclude"
+         version="5.0"
+         xml:id="sec-release-20.09-new-services">
+  <title>New Services</title>
+
+  <para>
+   The following new services were added since the last release:
+  </para>
+
+  <itemizedlist>
+   <listitem>
+    <para />
+   </listitem>
+  </itemizedlist>
+
+ </section>
+
+ <section xmlns="http://docbook.org/ns/docbook"
+         xmlns:xlink="http://www.w3.org/1999/xlink"
+         xmlns:xi="http://www.w3.org/2001/XInclude"
+         version="5.0"
+         xml:id="sec-release-20.09-incompatibilities">
+  <title>Backward Incompatibilities</title>
+
+  <para>
+   When upgrading from a previous release, please be aware of the following
+   incompatible changes:
+  </para>
+
+  <itemizedlist>
+   <listitem>
+    <para />
+   </listitem>
+  </itemizedlist>
+ </section>
+
+ <section xmlns="http://docbook.org/ns/docbook"
+         xmlns:xlink="http://www.w3.org/1999/xlink"
+         xmlns:xi="http://www.w3.org/2001/XInclude"
+         version="5.0"
+         xml:id="sec-release-20.09-notable-changes">
+  <title>Other Notable Changes</title>
+
+  <itemizedlist>
+   <listitem>
+    <para />
+   </listitem>
+  </itemizedlist>
+ </section>
+</section>
diff --git a/nixos/lib/test-driver/test-driver.py b/nixos/lib/test-driver/test-driver.py
index 2b8dffec7d5..c27947bc610 100644
--- a/nixos/lib/test-driver/test-driver.py
+++ b/nixos/lib/test-driver/test-driver.py
@@ -911,7 +911,7 @@ def subtest(name: str) -> Iterator[None]:
 if __name__ == "__main__":
     log = Logger()
 
-    vlan_nrs = list(dict.fromkeys(os.environ["VLANS"].split()))
+    vlan_nrs = list(dict.fromkeys(os.environ.get("VLANS", "").split()))
     vde_sockets = [create_vlan(v) for v in vlan_nrs]
     for nr, vde_socket, _, _ in vde_sockets:
         os.environ["QEMU_VDE_SOCKET_{}".format(nr)] = vde_socket
diff --git a/nixos/lib/testing-python.nix b/nixos/lib/testing-python.nix
index a7f6d792651..6663864f1e5 100644
--- a/nixos/lib/testing-python.nix
+++ b/nixos/lib/testing-python.nix
@@ -218,12 +218,12 @@ in rec {
       '';
 
       testScript = ''
-        startAll;
-        $client->waitForUnit("multi-user.target");
+        start_all()
+        client.wait_for_unit("multi-user.target")
         ${preBuild}
-        $client->succeed("env -i ${bash}/bin/bash ${buildrunner} /tmp/xchg/saved-env >&2");
+        client.succeed("env -i ${bash}/bin/bash ${buildrunner} /tmp/xchg/saved-env >&2")
         ${postBuild}
-        $client->succeed("sync"); # flush all data before pulling the plug
+        client.succeed("sync") # flush all data before pulling the plug
       '';
 
       vmRunCommand = writeText "vm-run" ''
@@ -263,9 +263,12 @@ in rec {
         { ... }:
         {
           inherit require;
+          imports = [
+            ../tests/common/auto.nix
+          ];
           virtualisation.memorySize = 1024;
           services.xserver.enable = true;
-          services.xserver.displayManager.auto.enable = true;
+          test-support.displayManager.auto.enable = true;
           services.xserver.displayManager.defaultSession = "none+icewm";
           services.xserver.windowManager.icewm.enable = true;
         };
@@ -274,7 +277,7 @@ in rec {
         machine = client;
         preBuild =
           ''
-            $client->waitForX;
+            client.wait_for_x()
           '';
       } // args);
 
diff --git a/nixos/lib/testing.nix b/nixos/lib/testing.nix
index c82abd1f990..7d6a5c0a290 100644
--- a/nixos/lib/testing.nix
+++ b/nixos/lib/testing.nix
@@ -250,9 +250,12 @@ in rec {
         { ... }:
         {
           inherit require;
+          imports = [
+            ../tests/common/auto.nix
+          ];
           virtualisation.memorySize = 1024;
           services.xserver.enable = true;
-          services.xserver.displayManager.auto.enable = true;
+          test-support.displayManager.auto.enable = true;
           services.xserver.displayManager.defaultSession = "none+icewm";
           services.xserver.windowManager.icewm.enable = true;
         };
diff --git a/nixos/modules/config/swap.nix b/nixos/modules/config/swap.nix
index d0fc0d4a3ea..adb4e229421 100644
--- a/nixos/modules/config/swap.nix
+++ b/nixos/modules/config/swap.nix
@@ -185,6 +185,8 @@ in
           { description = "Initialisation of swap device ${sw.device}";
             wantedBy = [ "${realDevice'}.swap" ];
             before = [ "${realDevice'}.swap" ];
+            # If swap is encrypted, depending on rngd resolves a possible entropy starvation during boot
+            after = mkIf (config.security.rngd.enable && sw.randomEncryption.enable) [ "rngd.service" ];
             path = [ pkgs.utillinux ] ++ optional sw.randomEncryption.enable pkgs.cryptsetup;
 
             script =
diff --git a/nixos/modules/config/xdg/portal.nix b/nixos/modules/config/xdg/portal.nix
index 95fa8e05fa3..1330a08070c 100644
--- a/nixos/modules/config/xdg/portal.nix
+++ b/nixos/modules/config/xdg/portal.nix
@@ -42,6 +42,10 @@ with lib;
     let
       cfg = config.xdg.portal;
       packages = [ pkgs.xdg-desktop-portal ] ++ cfg.extraPortals;
+      joinedPortals = pkgs.symlinkJoin {
+        name = "xdg-portals";
+        paths = cfg.extraPortals;
+      };
 
     in mkIf cfg.enable {
 
@@ -56,7 +60,7 @@ with lib;
 
       environment.variables = {
         GTK_USE_PORTAL = mkIf cfg.gtkUsePortal "1";
-        XDG_DESKTOP_PORTAL_PATH = map (p: "${p}/share/xdg-desktop-portal/portals") cfg.extraPortals;
+        XDG_DESKTOP_PORTAL_DIR = "${joinedPortals}/share/xdg-desktop-portal/portals";
       };
     };
 }
diff --git a/nixos/modules/hardware/brightnessctl.nix b/nixos/modules/hardware/brightnessctl.nix
deleted file mode 100644
index 2d54398d10d..00000000000
--- a/nixos/modules/hardware/brightnessctl.nix
+++ /dev/null
@@ -1,31 +0,0 @@
-{ config, lib, pkgs, ... }:
-
-with lib;
-let
-  cfg = config.hardware.brightnessctl;
-in
-{
-
-  options = {
-
-    hardware.brightnessctl = {
-
-      enable = mkOption {
-        default = false;
-        type = types.bool;
-        description = ''
-          Enable brightnessctl in userspace.
-          This will allow brightness control from users in the video group.
-        '';
-
-      };
-    };
-  };
-
-
-  config = mkIf cfg.enable {
-    services.udev.packages = with pkgs; [ brightnessctl ];
-    environment.systemPackages = with pkgs; [ brightnessctl ];
-  };
-
-}
diff --git a/nixos/modules/installer/cd-dvd/channel.nix b/nixos/modules/installer/cd-dvd/channel.nix
index ab5e7c0645f..92164d65e53 100644
--- a/nixos/modules/installer/cd-dvd/channel.nix
+++ b/nixos/modules/installer/cd-dvd/channel.nix
@@ -21,7 +21,9 @@ let
       if [ ! -e $out/nixos/nixpkgs ]; then
         ln -s . $out/nixos/nixpkgs
       fi
-      echo -n ${config.system.nixos.revision} > $out/nixos/.git-revision
+      ${optionalString (config.system.nixos.revision != null) ''
+        echo -n ${config.system.nixos.revision} > $out/nixos/.git-revision
+      ''}
       echo -n ${config.system.nixos.versionSuffix} > $out/nixos/.version-suffix
       echo ${config.system.nixos.versionSuffix} | sed -e s/pre// > $out/nixos/svn-revision
     '';
diff --git a/nixos/modules/installer/cd-dvd/installation-cd-graphical-base.nix b/nixos/modules/installer/cd-dvd/installation-cd-graphical-base.nix
index e0b558dcb0d..fa19daf1328 100644
--- a/nixos/modules/installer/cd-dvd/installation-cd-graphical-base.nix
+++ b/nixos/modules/installer/cd-dvd/installation-cd-graphical-base.nix
@@ -44,6 +44,9 @@ with lib;
     pkgs.bvi # binary editor
     pkgs.joe
 
+    # Include some version control tools.
+    pkgs.git
+
     # Firefox for reading the manual.
     pkgs.firefox
 
diff --git a/nixos/modules/installer/tools/nix-fallback-paths.nix b/nixos/modules/installer/tools/nix-fallback-paths.nix
index c2f2578733b..72b5850a4d9 100644
--- a/nixos/modules/installer/tools/nix-fallback-paths.nix
+++ b/nixos/modules/installer/tools/nix-fallback-paths.nix
@@ -1,6 +1,6 @@
 {
-  x86_64-linux = "/nix/store/0q5qnh10m2sfrriszc1ysmggw659q6qm-nix-2.3.2";
-  i686-linux = "/nix/store/i7ad7r5d8a5b3l22hg4a1im2qq05y6vd-nix-2.3.2";
-  aarch64-linux = "/nix/store/bv06pavfw0dbqzr8w3l7s71nx27gnxa0-nix-2.3.2";
-  x86_64-darwin = "/nix/store/x6mnl1nij7y4v5ihlplr4k937ayr403r-nix-2.3.2";
+  x86_64-linux = "/nix/store/68mycwwczrciryylq2a66jwfhxp09zsg-nix-2.3.3-debug";
+  i686-linux = "/nix/store/5axys7hsggb4282dsbps5k5p0v59yv13-nix-2.3.3";
+  aarch64-linux = "/nix/store/k80nwvi19hxwbz3c9cxgp24f1jjxwmcc-nix-2.3.3";
+  x86_64-darwin = "/nix/store/lrnvapsqmf0ja6zfyx4cpxr7ahdr7f9b-nix-2.3.3";
 }
diff --git a/nixos/modules/installer/tools/nixos-build-vms/build-vms.nix b/nixos/modules/installer/tools/nixos-build-vms/build-vms.nix
index c1028a0ad7e..90f0702f717 100644
--- a/nixos/modules/installer/tools/nixos-build-vms/build-vms.nix
+++ b/nixos/modules/installer/tools/nixos-build-vms/build-vms.nix
@@ -5,7 +5,7 @@
 
 let nodes = import networkExpr; in
 
-with import ../../../../lib/testing.nix {
+with import ../../../../lib/testing-python.nix {
   inherit system;
   pkgs = import ../../../../.. { inherit system config; };
 };
diff --git a/nixos/modules/installer/tools/nixos-enter.sh b/nixos/modules/installer/tools/nixos-enter.sh
index 4680cd8ae95..1fdd4627a90 100644
--- a/nixos/modules/installer/tools/nixos-enter.sh
+++ b/nixos/modules/installer/tools/nixos-enter.sh
@@ -60,15 +60,15 @@ chmod 0755 "$mountPoint/dev" "$mountPoint/sys"
 mount --rbind /dev "$mountPoint/dev"
 mount --rbind /sys "$mountPoint/sys"
 
-# If silent, write both stdout and stderr of activation script to /dev/null
-# otherwise, write both streams to stderr of this process
-if [ "$silent" -eq 0 ]; then
-    PIPE_TARGET="/dev/stderr"
-else
-    PIPE_TARGET="/dev/null"
-fi
+(
+    # If silent, write both stdout and stderr of activation script to /dev/null
+    # otherwise, write both streams to stderr of this process
+    if [ "$silent" -eq 1 ]; then
+        exec 2>/dev/null
+    fi
 
-# Run the activation script. Set $LOCALE_ARCHIVE to supress some Perl locale warnings.
-LOCALE_ARCHIVE="$system/sw/lib/locale/locale-archive" chroot "$mountPoint" "$system/activate" >>$PIPE_TARGET 2>&1 || true
+    # Run the activation script. Set $LOCALE_ARCHIVE to supress some Perl locale warnings.
+    LOCALE_ARCHIVE="$system/sw/lib/locale/locale-archive" chroot "$mountPoint" "$system/activate" 1>&2 || true
+)
 
 exec chroot "$mountPoint" "${command[@]}"
diff --git a/nixos/modules/installer/tools/nixos-rebuild.sh b/nixos/modules/installer/tools/nixos-rebuild.sh
index 7db323d38e6..354274478a3 100644
--- a/nixos/modules/installer/tools/nixos-rebuild.sh
+++ b/nixos/modules/installer/tools/nixos-rebuild.sh
@@ -3,6 +3,9 @@
 if [ -x "@shell@" ]; then export SHELL="@shell@"; fi;
 
 set -e
+set -o pipefail
+
+export PATH=@path@:$PATH
 
 showSyntax() {
     exec man nixos-rebuild
@@ -13,6 +16,7 @@ showSyntax() {
 # Parse the command line.
 origArgs=("$@")
 extraBuildFlags=()
+lockFlags=()
 action=
 buildNix=1
 fast=
@@ -58,7 +62,7 @@ while [ "$#" -gt 0 ]; do
         j="$1"; shift 1
         extraBuildFlags+=("$i" "$j")
         ;;
-      --show-trace|--keep-failed|-K|--keep-going|-k|--verbose|-v|-vv|-vvv|-vvvv|-vvvvv|--fallback|--repair|--no-build-output|-Q|-j*)
+      --show-trace|--keep-failed|-K|--keep-going|-k|--verbose|-v|-vv|-vvv|-vvvv|-vvvvv|--fallback|--repair|--no-build-output|-Q|-j*|-L|--refresh|--no-net)
         extraBuildFlags+=("$i")
         ;;
       --option)
@@ -93,6 +97,22 @@ while [ "$#" -gt 0 ]; do
       --use-remote-sudo)
         maybeSudo=(sudo --)
         ;;
+      --flake)
+        flake="$1"
+        shift 1
+        ;;
+      --recreate-lock-file|--no-update-lock-file|--no-write-lock-file|--no-registries|--commit-lock-file)
+        lockFlags+=("$i")
+        ;;
+      --update-input)
+        j="$1"; shift 1
+        lockFlags+=("$i" "$j")
+        ;;
+      --override-input)
+        j="$1"; shift 1
+        k="$1"; shift 1
+        lockFlags+=("$i" "$j" "$k")
+        ;;
       *)
         echo "$0: unknown option \`$i'"
         exit 1
@@ -202,7 +222,7 @@ fi
 
 
 # If ‘--upgrade’ is given, run ‘nix-channel --update nixos’.
-if [ -n "$upgrade" -a -z "$_NIXOS_REBUILD_REEXEC" ]; then
+if [[ -n $upgrade && -z $_NIXOS_REBUILD_REEXEC && -z $flake ]]; then
     nix-channel --update nixos
 
     # If there are other channels that contain a file called
@@ -225,8 +245,15 @@ if [ -z "$_NIXOS_REBUILD_REEXEC" ]; then
     export PATH=@nix@/bin:$PATH
 fi
 
+# Use /etc/nixos/flake.nix if it exists. It can be a symlink to the
+# actual flake.
+if [[ -z $flake && -e /etc/nixos/flake.nix ]]; then
+    flake="$(dirname "$(readlink -f /etc/nixos/flake.nix)")"
+fi
+
 # Re-execute nixos-rebuild from the Nixpkgs tree.
-if [ -z "$_NIXOS_REBUILD_REEXEC" -a -n "$canRun" -a -z "$fast" ]; then
+# FIXME: get nixos-rebuild from $flake.
+if [[ -z $_NIXOS_REBUILD_REEXEC && -n $canRun && -z $fast && -z $flake ]]; then
     if p=$(nix-build --no-out-link --expr 'with import <nixpkgs/nixos> {}; config.system.build.nixos-rebuild' "${extraBuildFlags[@]}"); then
         export _NIXOS_REBUILD_REEXEC=1
         exec $p/bin/nixos-rebuild "${origArgs[@]}"
@@ -234,10 +261,37 @@ if [ -z "$_NIXOS_REBUILD_REEXEC" -a -n "$canRun" -a -z "$fast" ]; then
     fi
 fi
 
+# For convenience, use the hostname as the default configuration to
+# build from the flake.
+if [[ -n $flake ]]; then
+    if [[ $flake =~ ^(.*)\#([^\#\"]*)$ ]]; then
+       flake="${BASH_REMATCH[1]}"
+       flakeAttr="${BASH_REMATCH[2]}"
+    fi
+    if [[ -z $flakeAttr ]]; then
+        read -r hostname < /proc/sys/kernel/hostname
+        if [[ -z $hostname ]]; then
+            hostname=default
+        fi
+        flakeAttr="nixosConfigurations.\"$hostname\""
+    else
+        flakeAttr="nixosConfigurations.\"$flakeAttr\""
+    fi
+fi
+
+# Resolve the flake.
+if [[ -n $flake ]]; then
+    flake=$(nix flake info --json "${extraBuildFlags[@]}" "${lockFlags[@]}" -- "$flake" | jq -r .url)
+fi
+
 # Find configuration.nix and open editor instead of building.
 if [ "$action" = edit ]; then
-    NIXOS_CONFIG=${NIXOS_CONFIG:-$(nix-instantiate --find-file nixos-config)}
-    exec "${EDITOR:-nano}" "$NIXOS_CONFIG"
+    if [[ -z $flake ]]; then
+        NIXOS_CONFIG=${NIXOS_CONFIG:-$(nix-instantiate --find-file nixos-config)}
+        exec "${EDITOR:-nano}" "$NIXOS_CONFIG"
+    else
+        exec nix edit "${lockFlags[@]}" -- "$flake#$flakeAttr"
+    fi
     exit 1
 fi
 
@@ -296,7 +350,7 @@ prebuiltNix() {
 
 remotePATH=
 
-if [ -n "$buildNix" ]; then
+if [[ -n $buildNix && -z $flake ]]; then
     echo "building Nix..." >&2
     nixDrv=
     if ! nixDrv="$(nix-instantiate '<nixpkgs/nixos>' --add-root $tmpDir/nix.drv --indirect -A config.nix.package.out "${extraBuildFlags[@]}")"; then
@@ -337,7 +391,7 @@ fi
 
 # Update the version suffix if we're building from Git (so that
 # nixos-version shows something useful).
-if [ -n "$canRun" ]; then
+if [[ -n $canRun && -z $flake ]]; then
     if nixpkgs=$(nix-instantiate --find-file nixpkgs "${extraBuildFlags[@]}"); then
         suffix=$($SHELL $nixpkgs/nixos/modules/installer/tools/get-version-suffix "${extraBuildFlags[@]}" || true)
         if [ -n "$suffix" ]; then
@@ -358,15 +412,37 @@ fi
 if [ -z "$rollback" ]; then
     echo "building the system configuration..." >&2
     if [ "$action" = switch -o "$action" = boot ]; then
-        pathToConfig="$(nixBuild '<nixpkgs/nixos>' --no-out-link -A system "${extraBuildFlags[@]}")"
+        if [[ -z $flake ]]; then
+            pathToConfig="$(nixBuild '<nixpkgs/nixos>' --no-out-link -A system "${extraBuildFlags[@]}")"
+        else
+            outLink=$tmpDir/result
+            nix build "$flake#$flakeAttr.config.system.build.toplevel" \
+              "${extraBuildFlags[@]}" "${lockFlags[@]}" --out-link $outLink
+            pathToConfig="$(readlink -f $outLink)"
+        fi
         copyToTarget "$pathToConfig"
         targetHostCmd nix-env -p "$profile" --set "$pathToConfig"
     elif [ "$action" = test -o "$action" = build -o "$action" = dry-build -o "$action" = dry-activate ]; then
-        pathToConfig="$(nixBuild '<nixpkgs/nixos>' -A system -k "${extraBuildFlags[@]}")"
+        if [[ -z $flake ]]; then
+            pathToConfig="$(nixBuild '<nixpkgs/nixos>' -A system -k "${extraBuildFlags[@]}")"
+        else
+            nix build "$flake#$flakeAttr.config.system.build.toplevel" "${extraBuildFlags[@]}" "${lockFlags[@]}"
+            pathToConfig="$(readlink -f ./result)"
+        fi
     elif [ "$action" = build-vm ]; then
-        pathToConfig="$(nixBuild '<nixpkgs/nixos>' -A vm -k "${extraBuildFlags[@]}")"
+        if [[ -z $flake ]]; then
+            pathToConfig="$(nixBuild '<nixpkgs/nixos>' -A vm -k "${extraBuildFlags[@]}")"
+        else
+            echo "$0: 'build-vm' is not supported with '--flake'" >&2
+            exit 1
+        fi
     elif [ "$action" = build-vm-with-bootloader ]; then
-        pathToConfig="$(nixBuild '<nixpkgs/nixos>' -A vmWithBootLoader -k "${extraBuildFlags[@]}")"
+        if [[ -z $flake ]]; then
+            pathToConfig="$(nixBuild '<nixpkgs/nixos>' -A vmWithBootLoader -k "${extraBuildFlags[@]}")"
+        else
+            echo "$0: 'build-vm-with-bootloader' is not supported with '--flake'" >&2
+            exit 1
+        fi
     else
         showSyntax
     fi
diff --git a/nixos/modules/installer/tools/nixos-version.sh b/nixos/modules/installer/tools/nixos-version.sh
index 190c49a33ec..fb0fe26116a 100644
--- a/nixos/modules/installer/tools/nixos-version.sh
+++ b/nixos/modules/installer/tools/nixos-version.sh
@@ -6,8 +6,17 @@ case "$1" in
     exit 1
     ;;
   --hash|--revision)
+    if ! [[ @revision@ =~ ^[0-9a-f]+$ ]]; then
+      echo "$0: Nixpkgs commit hash is unknown"
+      exit 1
+    fi
     echo "@revision@"
     ;;
+  --json)
+    cat <<EOF
+@json@
+EOF
+    ;;
   *)
     echo "@version@ (@codeName@)"
     ;;
diff --git a/nixos/modules/installer/tools/tools.nix b/nixos/modules/installer/tools/tools.nix
index 5df9c23e6b6..833865e99bb 100644
--- a/nixos/modules/installer/tools/tools.nix
+++ b/nixos/modules/installer/tools/tools.nix
@@ -31,6 +31,7 @@ let
       nix = config.nix.package.out;
       nix_x86_64_linux = fallback.x86_64-linux;
       nix_i686_linux = fallback.i686-linux;
+      path = makeBinPath [ pkgs.jq ];
     };
 
   nixos-generate-config = makeProg {
@@ -47,6 +48,14 @@ let
     name = "nixos-version";
     src = ./nixos-version.sh;
     inherit (config.system.nixos) version codeName revision;
+    inherit (config.system) configurationRevision;
+    json = builtins.toJSON ({
+      nixosVersion = config.system.nixos.version;
+    } // optionalAttrs (config.system.nixos.revision != null) {
+      nixpkgsRevision = config.system.nixos.revision;
+    } // optionalAttrs (config.system.configurationRevision != null) {
+      configurationRevision = config.system.configurationRevision;
+    });
   };
 
   nixos-enter = makeProg {
diff --git a/nixos/modules/misc/version.nix b/nixos/modules/misc/version.nix
index 8a85035ceb7..9557def622d 100644
--- a/nixos/modules/misc/version.nix
+++ b/nixos/modules/misc/version.nix
@@ -4,10 +4,6 @@ with lib;
 
 let
   cfg = config.system.nixos;
-
-  gitRepo      = "${toString pkgs.path}/.git";
-  gitRepoValid = lib.pathIsGitRepo gitRepo;
-  gitCommitId  = lib.substring 0 7 (commitIdFromGitRepo gitRepo);
 in
 
 {
@@ -42,8 +38,8 @@ in
 
     nixos.revision = mkOption {
       internal = true;
-      type = types.str;
-      default = trivial.revisionWithDefault "master";
+      type = types.nullOr types.str;
+      default = trivial.revisionWithDefault null;
       description = "The Git revision from which this NixOS configuration was built.";
     };
 
@@ -84,6 +80,12 @@ in
       description = "Default NixOS channel to which the root user is subscribed.";
     };
 
+    configurationRevision = mkOption {
+      type = types.nullOr types.str;
+      default = null;
+      description = "The Git revision of the top-level flake from which this configuration was built.";
+    };
+
   };
 
   config = {
@@ -92,8 +94,6 @@ in
       # These defaults are set here rather than up there so that
       # changing them would not rebuild the manual
       version = mkDefault (cfg.release + cfg.versionSuffix);
-      revision      = mkIf gitRepoValid (mkDefault            gitCommitId);
-      versionSuffix = mkIf gitRepoValid (mkDefault (".git." + gitCommitId));
     };
 
     # Generate /etc/os-release.  See
diff --git a/nixos/modules/module-list.nix b/nixos/modules/module-list.nix
index 541a17af6e9..6734929b9d4 100644
--- a/nixos/modules/module-list.nix
+++ b/nixos/modules/module-list.nix
@@ -41,7 +41,6 @@
   ./hardware/acpilight.nix
   ./hardware/all-firmware.nix
   ./hardware/bladeRF.nix
-  ./hardware/brightnessctl.nix
   ./hardware/brillo.nix
   ./hardware/ckb-next.nix
   ./hardware/cpu/amd-microcode.nix
@@ -253,7 +252,7 @@
   ./services/computing/slurm/slurm.nix
   ./services/continuous-integration/buildbot/master.nix
   ./services/continuous-integration/buildbot/worker.nix
-  ./services/continuous-integration/buildkite-agent.nix
+  ./services/continuous-integration/buildkite-agents.nix
   ./services/continuous-integration/hail.nix
   ./services/continuous-integration/hydra/default.nix
   ./services/continuous-integration/gitlab-runner.nix
@@ -821,6 +820,7 @@
   ./services/web-apps/icingaweb2/icingaweb2.nix
   ./services/web-apps/icingaweb2/module-monitoring.nix
   ./services/web-apps/ihatemoney
+  ./services/web-apps/jirafeau.nix
   ./services/web-apps/limesurvey.nix
   ./services/web-apps/mattermost.nix
   ./services/web-apps/mediawiki.nix
diff --git a/nixos/modules/programs/sway.nix b/nixos/modules/programs/sway.nix
index 7e646f8737d..364debddb0f 100644
--- a/nixos/modules/programs/sway.nix
+++ b/nixos/modules/programs/sway.nix
@@ -88,10 +88,10 @@ in {
       default = with pkgs; [
         swaylock swayidle
         xwayland alacritty dmenu
-        rxvt_unicode # For backward compatibility (old default terminal)
+        rxvt-unicode # For backward compatibility (old default terminal)
       ];
       defaultText = literalExample ''
-        with pkgs; [ swaylock swayidle xwayland rxvt_unicode dmenu ];
+        with pkgs; [ swaylock swayidle xwayland rxvt-unicode dmenu ];
       '';
       example = literalExample ''
         with pkgs; [
diff --git a/nixos/modules/programs/zsh/zsh.nix b/nixos/modules/programs/zsh/zsh.nix
index 4fbdba47b1d..930cc1987a3 100644
--- a/nixos/modules/programs/zsh/zsh.nix
+++ b/nixos/modules/programs/zsh/zsh.nix
@@ -162,9 +162,8 @@ in
         # This file is read for all shells.
 
         # Only execute this file once per shell.
-        # But don't clobber the environment of interactive non-login children!
         if [ -n "$__ETC_ZSHENV_SOURCED" ]; then return; fi
-        export __ETC_ZSHENV_SOURCED=1
+        __ETC_ZSHENV_SOURCED=1
 
         if [ -z "$__NIXOS_SET_ENVIRONMENT_DONE" ]; then
             . ${config.system.build.setEnvironment}
diff --git a/nixos/modules/rename.nix b/nixos/modules/rename.nix
index 3b1b1b8bb55..2cc6c46e358 100644
--- a/nixos/modules/rename.nix
+++ b/nixos/modules/rename.nix
@@ -42,6 +42,12 @@ with lib;
       instead, or any other display manager in NixOS as they all support auto-login.
     '')
     (mkRemovedOptionModule [ "services" "dnscrypt-proxy" ] "Use services.dnscrypt-proxy2 instead")
+    (mkRemovedOptionModule ["hardware" "brightnessctl" ] ''
+      The brightnessctl module was removed because newer versions of
+      brightnessctl don't require the udev rules anymore (they can use the
+      systemd-logind API). Instead of using the module you can now
+      simply add the brightnessctl package to environment.systemPackages.
+    '')
 
     # Do NOT add any option renames here, see top of the file
   ];
diff --git a/nixos/modules/security/acme.nix b/nixos/modules/security/acme.nix
index 890c421b0ea..7da6666f79c 100644
--- a/nixos/modules/security/acme.nix
+++ b/nixos/modules/security/acme.nix
@@ -1,7 +1,5 @@
 { config, lib, pkgs, ... }:
-
 with lib;
-
 let
 
   cfg = config.security.acme;
@@ -9,7 +7,8 @@ let
   certOpts = { name, ... }: {
     options = {
       webroot = mkOption {
-        type = types.str;
+        type = types.nullOr types.str;
+        default = null;
         example = "/var/lib/acme/acme-challenges";
         description = ''
           Where the webroot of the HTTP vhost is located.
@@ -38,7 +37,7 @@ let
 
       email = mkOption {
         type = types.nullOr types.str;
-        default = null;
+        default = cfg.email;
         description = "Contact email address for the CA to be able to reach you.";
       };
 
@@ -76,20 +75,6 @@ let
         '';
       };
 
-      plugins = mkOption {
-        type = types.listOf (types.enum [
-          "cert.der" "cert.pem" "chain.pem" "external.sh"
-          "fullchain.pem" "full.pem" "key.der" "key.pem" "account_key.json" "account_reg.json"
-        ]);
-        default = [ "fullchain.pem" "full.pem" "key.pem" "account_key.json" "account_reg.json" ];
-        description = ''
-          Plugins to enable. With default settings simp_le will
-          store public certificate bundle in <filename>fullchain.pem</filename>,
-          private key in <filename>key.pem</filename> and those two previous
-          files combined in <filename>full.pem</filename> in its state directory.
-        '';
-      };
-
       directory = mkOption {
         type = types.str;
         readOnly = true;
@@ -111,6 +96,46 @@ let
           own server roots if needed.
         '';
       };
+
+      keyType = mkOption {
+        type = types.str;
+        default = "ec384";
+        description = ''
+          Key type to use for private keys.
+          For an up to date list of supported values check the --key-type option
+          at https://go-acme.github.io/lego/usage/cli/#usage.
+        '';
+      };
+
+      dnsProvider = mkOption {
+        type = types.nullOr types.str;
+        default = null;
+        example = "route53";
+        description = ''
+          DNS Challenge provider. For a list of supported providers, see the "code"
+          field of the DNS providers listed at https://go-acme.github.io/lego/dns/.
+        '';
+      };
+
+      credentialsFile = mkOption {
+        type = types.path;
+        description = ''
+          Path to an EnvironmentFile for the cert's service containing any required and
+          optional environment variables for your selected dnsProvider.
+          To find out what values you need to set, consult the documentation at
+          https://go-acme.github.io/lego/dns/ for the corresponding dnsProvider.
+        '';
+        example = "/var/src/secrets/example.org-route53-api-token";
+      };
+
+      dnsPropagationCheck = mkOption {
+        type = types.bool;
+        default = true;
+        description = ''
+          Toggles lego DNS propagation check, which is used alongside DNS-01
+          challenge to ensure the DNS entries required are available.
+        '';
+      };
     };
   };
 
@@ -130,14 +155,21 @@ in
     (mkRemovedOptionModule [ "security" "acme" "directory"] "ACME Directory is now hardcoded to /var/lib/acme and its permisisons are managed by systemd. See https://github.com/NixOS/nixpkgs/issues/53852 for more info.")
     (mkRemovedOptionModule [ "security" "acme" "preDelay"] "This option has been removed. If you want to make sure that something executes before certificates are provisioned, add a RequiredBy=acme-\${cert}.service to the service you want to execute before the cert renewal")
     (mkRemovedOptionModule [ "security" "acme" "activationDelay"] "This option has been removed. If you want to make sure that something executes before certificates are provisioned, add a RequiredBy=acme-\${cert}.service to the service you want to execute before the cert renewal")
+    (mkChangedOptionModule [ "security" "acme" "validMin"] [ "security" "acme" "validMinDays"] (config: config.security.acme.validMin / (24 * 3600)))
   ];
   options = {
     security.acme = {
 
-      validMin = mkOption {
+      validMinDays = mkOption {
         type = types.int;
-        default = 30 * 24 * 3600;
-        description = "Minimum remaining validity before renewal in seconds.";
+        default = 30;
+        description = "Minimum remaining validity before renewal in days.";
+      };
+
+      email = mkOption {
+        type = types.nullOr types.str;
+        default = null;
+        description = "Contact email address for the CA to be able to reach you.";
       };
 
       renewInterval = mkOption {
@@ -173,6 +205,15 @@ in
         '';
       };
 
+      acceptTerms = mkOption {
+        type = types.bool;
+        default = false;
+        description = ''
+          Accept the CA's terms of service. The default provier is Let's Encrypt,
+          you can find their ToS at https://letsencrypt.org/repository/
+        '';
+      };
+
       certs = mkOption {
         default = { };
         type = with types; attrsOf (submodule certOpts);
@@ -204,27 +245,55 @@ in
   config = mkMerge [
     (mkIf (cfg.certs != { }) {
 
+      assertions = let
+        certs = (mapAttrsToList (k: v: v) cfg.certs);
+      in [
+        {
+          assertion = all (certOpts: certOpts.dnsProvider == null || certOpts.webroot == null) certs;
+          message = ''
+            Options `security.acme.certs.<name>.dnsProvider` and
+            `security.acme.certs.<name>.webroot` are mutually exclusive.
+          '';
+        }
+        {
+          assertion = cfg.email != null || all (certOpts: certOpts.email != null) certs;
+          message = ''
+            You must define `security.acme.certs.<name>.email` or
+            `security.acme.email` to register with the CA.
+          '';
+        }
+        {
+          assertion = cfg.acceptTerms;
+          message = ''
+            You must accept the CA's terms of service before using
+            the ACME module by setting `security.acme.acceptTerms`
+            to `true`. For Let's Encrypt's ToS see https://letsencrypt.org/repository/
+          '';
+        }
+      ];
+
       systemd.services = let
           services = concatLists servicesLists;
           servicesLists = mapAttrsToList certToServices cfg.certs;
           certToServices = cert: data:
               let
+                # StateDirectory must be relative, and will be created under /var/lib by systemd
                 lpath = "acme/${cert}";
+                apath = "/var/lib/${lpath}";
+                spath = "/var/lib/acme/.lego";
                 rights = if data.allowKeysForGroup then "750" else "700";
-                cmdline = [ "-v" "-d" data.domain "--default_root" data.webroot "--valid_min" cfg.validMin ]
-                          ++ optionals (data.email != null) [ "--email" data.email ]
-                          ++ concatMap (p: [ "-f" p ]) data.plugins
-                          ++ concatLists (mapAttrsToList (name: root: [ "-d" (if root == null then name else "${name}:${root}")]) data.extraDomains)
+                globalOpts = [ "-d" data.domain "--email" data.email "--path" "." "--key-type" data.keyType ]
+                          ++ optionals (cfg.acceptTerms) [ "--accept-tos" ]
+                          ++ optionals (data.dnsProvider != null && !data.dnsPropagationCheck) [ "--dns.disable-cp" ]
+                          ++ concatLists (mapAttrsToList (name: root: [ "-d" name ]) data.extraDomains)
+                          ++ (if data.dnsProvider != null then [ "--dns" data.dnsProvider ] else [ "--http" "--http.webroot" data.webroot ])
                           ++ optionals (cfg.server != null || data.server != null) ["--server" (if data.server == null then cfg.server else data.server)];
+                runOpts = escapeShellArgs (globalOpts ++ [ "run" ]);
+                renewOpts = escapeShellArgs (globalOpts ++ [ "renew" "--days" (toString cfg.validMinDays) ]);
                 acmeService = {
                   description = "Renew ACME Certificate for ${cert}";
                   after = [ "network.target" "network-online.target" ];
                   wants = [ "network-online.target" ];
-                  # simp_le uses requests, which uses certifi under the hood,
-                  # which doesn't respect the system trust store.
-                  # At least in the acme test, we provision a fake CA, impersonating the LE endpoint.
-                  # REQUESTS_CA_BUNDLE is a way to teach python requests to use something else
-                  environment.REQUESTS_CA_BUNDLE = "/etc/ssl/certs/ca-certificates.crt";
                   serviceConfig = {
                     Type = "oneshot";
                     # With RemainAfterExit the service is considered active even
@@ -233,18 +302,37 @@ in
                     # the permissions of the StateDirectory get adjusted
                     # according to the specified group
                     RemainAfterExit = true;
-                    SuccessExitStatus = [ "0" "1" ];
                     User = data.user;
                     Group = data.group;
                     PrivateTmp = true;
-                    StateDirectory = lpath;
+                    StateDirectory = "acme/.lego ${lpath}";
                     StateDirectoryMode = rights;
-                    WorkingDirectory = "/var/lib/${lpath}";
-                    ExecStart = "${pkgs.simp_le}/bin/simp_le ${escapeShellArgs cmdline}";
+                    WorkingDirectory = spath;
+                    # Only try loading the credentialsFile if the dns challenge is enabled
+                    EnvironmentFile = if data.dnsProvider != null then data.credentialsFile else null;
+                    ExecStart = pkgs.writeScript "acme-start" ''
+                      #!${pkgs.runtimeShell} -e
+                      ${pkgs.lego}/bin/lego ${renewOpts} || ${pkgs.lego}/bin/lego ${runOpts}
+                    '';
                     ExecStartPost =
                       let
+                        keyName = builtins.replaceStrings ["*"] ["_"] data.domain;
                         script = pkgs.writeScript "acme-post-start" ''
                           #!${pkgs.runtimeShell} -e
+                          cd ${apath}
+
+                          # Test that existing cert is older than new cert
+                          KEY=${spath}/certificates/${keyName}.key
+                          if [ -e $KEY -a $KEY -nt key.pem ]; then
+                            cp -p ${spath}/certificates/${keyName}.key key.pem
+                            cp -p ${spath}/certificates/${keyName}.crt cert.pem
+                            cp -p ${spath}/certificates/${keyName}.issuer.crt chain.pem
+                            cat cert.pem chain.pem > fullchain.pem
+                            cat key.pem cert.pem chain.pem > full.pem
+                            chmod ${rights} *.pem
+                            chown '${data.user}:${data.group}' *.pem
+                          fi
+
                           ${data.postRun}
                         '';
                       in
@@ -276,17 +364,17 @@ in
                         -out $workdir/server.crt
 
                       # Copy key to destination
-                      cp $workdir/server.key /var/lib/${lpath}/key.pem
+                      cp $workdir/server.key ${apath}/key.pem
 
                       # Create fullchain.pem (same format as "simp_le ... -f fullchain.pem" creates)
-                      cat $workdir/{server.crt,ca.crt} > "/var/lib/${lpath}/fullchain.pem"
+                      cat $workdir/{server.crt,ca.crt} > "${apath}/fullchain.pem"
 
                       # Create full.pem for e.g. lighttpd
-                      cat $workdir/{server.key,server.crt,ca.crt} > "/var/lib/${lpath}/full.pem"
+                      cat $workdir/{server.key,server.crt,ca.crt} > "${apath}/full.pem"
 
                       # Give key acme permissions
-                      chown '${data.user}:${data.group}' "/var/lib/${lpath}/"{key,fullchain,full}.pem
-                      chmod ${rights} "/var/lib/${lpath}/"{key,fullchain,full}.pem
+                      chown '${data.user}:${data.group}' "${apath}/"{key,fullchain,full}.pem
+                      chmod ${rights} "${apath}/"{key,fullchain,full}.pem
                     '';
                   serviceConfig = {
                     Type = "oneshot";
@@ -297,7 +385,7 @@ in
                   };
                   unitConfig = {
                     # Do not create self-signed key when key already exists
-                    ConditionPathExists = "!/var/lib/${lpath}/key.pem";
+                    ConditionPathExists = "!${apath}/key.pem";
                   };
                 };
               in (
@@ -309,8 +397,7 @@ in
           servicesAttr;
 
       systemd.tmpfiles.rules =
-        flip mapAttrsToList cfg.certs
-        (cert: data: "d ${data.webroot}/.well-known/acme-challenge - ${data.user} ${data.group}");
+        map (data: "d ${data.webroot}/.well-known/acme-challenge - ${data.user} ${data.group}") (filter (data: data.webroot != null) (attrValues cfg.certs));
 
       systemd.timers = flip mapAttrs' cfg.certs (cert: data: nameValuePair
         ("acme-${cert}")
@@ -334,7 +421,7 @@ in
   ];
 
   meta = {
-    maintainers = with lib.maintainers; [ abbradar fpletz globin ];
+    maintainers = with lib.maintainers; [ abbradar fpletz globin m1cr0man ];
     doc = ./acme.xml;
   };
 }
diff --git a/nixos/modules/security/acme.xml b/nixos/modules/security/acme.xml
index 9d0a1995e0f..2b29c117484 100644
--- a/nixos/modules/security/acme.xml
+++ b/nixos/modules/security/acme.xml
@@ -7,7 +7,7 @@
  <para>
   NixOS supports automatic domain validation &amp; certificate retrieval and
   renewal using the ACME protocol. This is currently only implemented by and
-  for Let's Encrypt. The alternative ACME client <literal>simp_le</literal> is
+  for Let's Encrypt. The alternative ACME client <literal>lego</literal> is
   used under the hood.
  </para>
  <section xml:id="module-security-acme-prerequisites">
diff --git a/nixos/modules/security/rngd.nix b/nixos/modules/security/rngd.nix
index d9d6d9c9f25..5566c53897d 100644
--- a/nixos/modules/security/rngd.nix
+++ b/nixos/modules/security/rngd.nix
@@ -39,12 +39,15 @@ in
 
       description = "Hardware RNG Entropy Gatherer Daemon";
 
+      # rngd may have to start early to avoid entropy starvation during boot with encrypted swap
+      unitConfig.DefaultDependencies = false;
       serviceConfig = {
         ExecStart = "${pkgs.rng-tools}/sbin/rngd -f"
           + optionalString cfg.debug " -d";
+        # PrivateTmp would introduce a circular dependency if /tmp is on tmpfs and swap is encrypted,
+        # thus depending on rngd before swap, while swap depends on rngd to avoid entropy starvation.
         NoNewPrivileges = true;
         PrivateNetwork = true;
-        PrivateTmp = true;
         ProtectSystem = "full";
         ProtectHome = true;
       };
diff --git a/nixos/modules/services/audio/alsa.nix b/nixos/modules/services/audio/alsa.nix
index 990398e6546..3fe76a16540 100644
--- a/nixos/modules/services/audio/alsa.nix
+++ b/nixos/modules/services/audio/alsa.nix
@@ -91,11 +91,7 @@ in
     environment.systemPackages = [ alsaUtils ];
 
     environment.etc = mkIf (!pulseaudioEnabled && config.sound.extraConfig != "")
-      [
-        { source = pkgs.writeText "asound.conf" config.sound.extraConfig;
-          target = "asound.conf";
-        }
-      ];
+      { "asound.conf".text = config.sound.extraConfig; };
 
     # ALSA provides a udev rule for restoring volume settings.
     services.udev.packages = [ alsaUtils ];
diff --git a/nixos/modules/services/audio/mopidy.nix b/nixos/modules/services/audio/mopidy.nix
index a534b692f17..d30c227db42 100644
--- a/nixos/modules/services/audio/mopidy.nix
+++ b/nixos/modules/services/audio/mopidy.nix
@@ -13,11 +13,11 @@ let
   mopidyEnv = buildEnv {
     name = "mopidy-with-extensions-${mopidy.version}";
     paths = closePropagation cfg.extensionPackages;
-    pathsToLink = [ "/${python.sitePackages}" ];
+    pathsToLink = [ "/${python3.sitePackages}" ];
     buildInputs = [ makeWrapper ];
     postBuild = ''
       makeWrapper ${mopidy}/bin/mopidy $out/bin/mopidy \
-        --prefix PYTHONPATH : $out/${python.sitePackages}
+        --prefix PYTHONPATH : $out/${python3.sitePackages}
     '';
   };
 in {
diff --git a/nixos/modules/services/continuous-integration/buildkite-agent.nix b/nixos/modules/services/continuous-integration/buildkite-agents.nix
index 58bce654941..fbda2731bbf 100644
--- a/nixos/modules/services/continuous-integration/buildkite-agent.nix
+++ b/nixos/modules/services/continuous-integration/buildkite-agents.nix
@@ -3,7 +3,7 @@
 with lib;
 
 let
-  cfg = config.services.buildkite-agent;
+  cfg = config.services.buildkite-agents;
 
   mkHookOption = { name, description, example ? null }: {
     inherit name;
@@ -15,7 +15,7 @@ let
   };
   mkHookOptions = hooks: listToAttrs (map mkHookOption hooks);
 
-  hooksDir = let
+  hooksDir = cfg: let
     mkHookEntry = name: value: ''
       cat > $out/${name} <<'EOF'
       #! ${pkgs.runtimeShell}
@@ -29,12 +29,13 @@ let
     ${concatStringsSep "\n" (mapAttrsToList mkHookEntry (filterAttrs (n: v: v != null) cfg.hooks))}
   '';
 
-in
-
-{
-  options = {
-    services.buildkite-agent = {
-      enable = mkEnableOption "buildkite-agent";
+  buildkiteOptions = { name ? "", config, ... }: {
+    options = {
+      enable = mkOption {
+        default = true;
+        type = types.bool;
+        description = "Whether to enable this buildkite agent";
+      };
 
       package = mkOption {
         default = pkgs.buildkite-agent;
@@ -44,7 +45,7 @@ in
       };
 
       dataDir = mkOption {
-        default = "/var/lib/buildkite-agent";
+        default = "/var/lib/buildkite-agent-${name}";
         description = "The workdir for the agent";
         type = types.str;
       };
@@ -68,9 +69,9 @@ in
 
       name = mkOption {
         type = types.str;
-        default = "%hostname-%n";
+        default = "%hostname-${name}-%n";
         description = ''
-          The name of the agent.
+          The name of the agent as seen in the buildkite dashboard.
         '';
       };
 
@@ -166,11 +167,11 @@ in
 
       hooksPath = mkOption {
         type = types.path;
-        default = hooksDir;
-        defaultText = "generated from services.buildkite-agent.hooks";
+        default = hooksDir config;
+        defaultText = "generated from services.buildkite-agents.<name>.hooks";
         description = ''
           Path to the directory storing the hooks.
-          Consider using <option>services.buildkite-agent.hooks.&lt;name&gt;</option>
+          Consider using <option>services.buildkite-agents.&lt;name&gt;.hooks.&lt;name&gt;</option>
           instead.
         '';
       };
@@ -184,24 +185,38 @@ in
       };
     };
   };
+  enabledAgents = lib.filterAttrs (n: v: v.enable) cfg;
+  mapAgents = function: lib.mkMerge (lib.mapAttrsToList function enabledAgents);
+in
+{
+  options.services.buildkite-agents = mkOption {
+    type = types.attrsOf (types.submodule buildkiteOptions);
+    default = {};
+    description = ''
+      Attribute set of buildkite agents.
+      The attribute key is combined with the hostname and a unique integer to
+      create the final agent name. This can be overridden by setting the `name`
+      attribute.
+    '';
+  };
 
-  config = mkIf config.services.buildkite-agent.enable {
-    users.users.buildkite-agent = {
-      name = "buildkite-agent";
+  config.users.users = mapAgents (name: cfg: {
+    "buildkite-agent-${name}" = {
+      name = "buildkite-agent-${name}";
       home = cfg.dataDir;
       createHome = true;
       description = "Buildkite agent user";
       extraGroups = [ "keys" ];
       isSystemUser = true;
     };
+  });
 
-    environment.systemPackages = [ cfg.package ];
-
-    systemd.services.buildkite-agent =
+  config.systemd.services = mapAgents (name: cfg: {
+    "buildkite-agent-${name}" =
       { description = "Buildkite Agent";
         wantedBy = [ "multi-user.target" ];
         after = [ "network.target" ];
-        path = cfg.runtimePackages ++ [ pkgs.coreutils ];
+        path = cfg.runtimePackages ++ [ cfg.package pkgs.coreutils ];
         environment = config.networking.proxy.envVars // {
           HOME = cfg.dataDir;
           NIX_REMOTE = "daemon";
@@ -230,8 +245,8 @@ in
           '';
 
         serviceConfig =
-          { ExecStart = "${cfg.package}/bin/buildkite-agent start --config /var/lib/buildkite-agent/buildkite-agent.cfg";
-            User = "buildkite-agent";
+          { ExecStart = "${cfg.package}/bin/buildkite-agent start --config ${cfg.dataDir}/buildkite-agent.cfg";
+            User = "buildkite-agent-${name}";
             RestartSec = 5;
             Restart = "on-failure";
             TimeoutSec = 10;
@@ -240,22 +255,18 @@ in
             KillMode = "mixed";
           };
       };
+  });
 
-    assertions = [
+  config.assertions = mapAgents (name: cfg: [
       { assertion = cfg.hooksPath == hooksDir || all (v: v == null) (attrValues cfg.hooks);
         message = ''
-          Options `services.buildkite-agent.hooksPath' and
-          `services.buildkite-agent.hooks.<name>' are mutually exclusive.
+          Options `services.buildkite-agents.${name}.hooksPath' and
+          `services.buildkite-agents.${name}.hooks.<name>' are mutually exclusive.
         '';
       }
-    ];
-  };
+  ]);
+
   imports = [
-    (mkRenamedOptionModule [ "services" "buildkite-agent" "token" ]                    [ "services" "buildkite-agent" "tokenPath" ])
-    (mkRenamedOptionModule [ "services" "buildkite-agent" "openssh" "privateKey" ]     [ "services" "buildkite-agent" "privateSshKeyPath" ])
-    (mkRenamedOptionModule [ "services" "buildkite-agent" "openssh" "privateKeyPath" ] [ "services" "buildkite-agent" "privateSshKeyPath" ])
-    (mkRemovedOptionModule [ "services" "buildkite-agent" "openssh" "publicKey" ]      "SSH public keys aren't necessary to clone private repos.")
-    (mkRemovedOptionModule [ "services" "buildkite-agent" "openssh" "publicKeyPath" ]  "SSH public keys aren't necessary to clone private repos.")
-    (mkRenamedOptionModule [ "services" "buildkite-agent" "meta-data"]                 [ "services" "buildkite-agent" "tags" ])
+    (mkRemovedOptionModule [ "services" "buildkite-agent"] "services.buildkite-agent has been upgraded from version 2 to version 3 and moved to an attribute set at services.buildkite-agents. Please consult the 20.03 release notes for more information.")
   ];
 }
diff --git a/nixos/modules/services/databases/postgresql.nix b/nixos/modules/services/databases/postgresql.nix
index c8fdd89d0d8..0b79a996dc7 100644
--- a/nixos/modules/services/databases/postgresql.nix
+++ b/nixos/modules/services/databases/postgresql.nix
@@ -20,7 +20,9 @@ let
       listen_addresses = '${if cfg.enableTCPIP then "*" else "localhost"}'
       port = ${toString cfg.port}
       ${cfg.extraConfig}
-    '';
+    ''; 
+
+  groupAccessAvailable = versionAtLeast postgresql.version "11.0";
 
 in
 
@@ -88,6 +90,16 @@ in
         '';
       };
 
+      initdbArgs = mkOption {
+        type = with types; listOf str;
+        default = [];
+        example = [ "--data-checksums" "--allow-group-access" ];
+        description = ''
+          Additional arguments passed to <literal>initdb</literal> during data dir
+          initialisation.
+        '';
+      };
+
       initialScript = mkOption {
         type = types.nullOr types.path;
         default = null;
@@ -220,7 +232,7 @@ in
 
   ###### implementation
 
-  config = mkIf config.services.postgresql.enable {
+  config = mkIf cfg.enable {
 
     services.postgresql.package =
       # Note: when changing the default, make it conditional on
@@ -232,13 +244,14 @@ in
             else throw "postgresql_9_4 was removed, please upgrade your postgresql version.");
 
     services.postgresql.dataDir =
-      mkDefault (if versionAtLeast config.system.stateVersion "17.09" then "/var/lib/postgresql/${config.services.postgresql.package.psqlSchema}"
-                 else "/var/db/postgresql");
+      mkDefault (if versionAtLeast config.system.stateVersion "17.09"
+                  then "/var/lib/postgresql/${cfg.package.psqlSchema}"
+                  else "/var/db/postgresql");
 
     services.postgresql.authentication = mkAfter
       ''
         # Generated file; do not edit!
-        local all all              ident
+        local all all              peer
         host  all all 127.0.0.1/32 md5
         host  all all ::1/128      md5
       '';
@@ -284,7 +297,7 @@ in
           ''
             # Initialise the database.
             if ! test -e ${cfg.dataDir}/PG_VERSION; then
-              initdb -U ${cfg.superUser}
+              initdb -U ${cfg.superUser} ${concatStringsSep " " cfg.initdbArgs}
               # See postStart!
               touch "${cfg.dataDir}/.first_startup"
             fi
@@ -293,8 +306,12 @@ in
               ln -sfn "${pkgs.writeText "recovery.conf" cfg.recoveryConfig}" \
                 "${cfg.dataDir}/recovery.conf"
             ''}
+            ${optionalString (!groupAccessAvailable) ''
+              # postgresql pre 11.0 doesn't start if state directory mode is group accessible
+              chmod 0700 "${cfg.dataDir}"
+            ''}
 
-             exec postgres
+            exec postgres
           '';
 
         serviceConfig =
@@ -303,7 +320,7 @@ in
             Group = "postgres";
             PermissionsStartOnly = true;
             RuntimeDirectory = "postgresql";
-            Type = if lib.versionAtLeast cfg.package.version "9.6"
+            Type = if versionAtLeast cfg.package.version "9.6"
                    then "notify"
                    else "simple";
 
@@ -352,5 +369,5 @@ in
   };
 
   meta.doc = ./postgresql.xml;
-  meta.maintainers = with lib.maintainers; [ thoughtpolice ];
+  meta.maintainers = with lib.maintainers; [ thoughtpolice danbst ];
 }
diff --git a/nixos/modules/services/databases/redis.nix b/nixos/modules/services/databases/redis.nix
index 70895fa53e4..5c817422aae 100644
--- a/nixos/modules/services/databases/redis.nix
+++ b/nixos/modules/services/databases/redis.nix
@@ -150,10 +150,20 @@ in
       requirePass = mkOption {
         type = with types; nullOr str;
         default = null;
-        description = "Password for database (STORED PLAIN TEXT, WORLD-READABLE IN NIX STORE)";
+        description = ''
+          Password for database (STORED PLAIN TEXT, WORLD-READABLE IN NIX STORE).
+          Use requirePassFile to store it outside of the nix store in a dedicated file.
+        '';
         example = "letmein!";
       };
 
+      requirePassFile = mkOption {
+        type = with types; nullOr path;
+        default = null;
+        description = "File with password for the database.";
+        example = "/run/keys/redis-password";
+      };
+
       appendOnly = mkOption {
         type = types.bool;
         default = false;
@@ -192,6 +202,10 @@ in
   ###### implementation
 
   config = mkIf config.services.redis.enable {
+    assertions = [{
+      assertion = cfg.requirePass != null -> cfg.requirePassFile == null;
+      message = "You can only set one services.redis.requirePass or services.redis.requirePassFile";
+    }];
     boot.kernel.sysctl = (mkMerge [
       { "vm.nr_hugepages" = "0"; }
       ( mkIf cfg.vmOverCommit { "vm.overcommit_memory" = "1"; } )
@@ -208,21 +222,26 @@ in
 
     environment.systemPackages = [ cfg.package ];
 
-    systemd.services.redis =
-      { description = "Redis Server";
+    systemd.services.redis = {
+      description = "Redis Server";
 
-        wantedBy = [ "multi-user.target" ];
-        after = [ "network.target" ];
+      wantedBy = [ "multi-user.target" ];
+      after = [ "network.target" ];
 
-        serviceConfig = {
-          ExecStart = "${cfg.package}/bin/redis-server ${redisConfig}";
-          RuntimeDirectory = "redis";
-          StateDirectory = "redis";
-          Type = "notify";
-          User = "redis";
-        };
-      };
+      preStart = ''
+        install -m 600 ${redisConfig} /run/redis/redis.conf
+      '' + optionalString (cfg.requirePassFile != null) ''
+        password=$(cat ${escapeShellArg cfg.requirePassFile})
+        echo "requirePass $password" >> /run/redis/redis.conf
+      '';
 
+      serviceConfig = {
+        ExecStart = "${cfg.package}/bin/redis-server /run/redis/redis.conf";
+        RuntimeDirectory = "redis";
+        StateDirectory = "redis";
+        Type = "notify";
+        User = "redis";
+      };
+    };
   };
-
 }
diff --git a/nixos/modules/services/monitoring/heapster.nix b/nixos/modules/services/monitoring/heapster.nix
index 585632943fd..0a9dfa12eaa 100644
--- a/nixos/modules/services/monitoring/heapster.nix
+++ b/nixos/modules/services/monitoring/heapster.nix
@@ -49,7 +49,7 @@ in {
       };
     };
 
-    users.users.heapsterrs = {
+    users.users.heapster = {
       uid = config.ids.uids.heapster;
       description = "Heapster user";
     };
diff --git a/nixos/modules/services/monitoring/statsd.nix b/nixos/modules/services/monitoring/statsd.nix
index 17836e95a6f..30b2916a992 100644
--- a/nixos/modules/services/monitoring/statsd.nix
+++ b/nixos/modules/services/monitoring/statsd.nix
@@ -125,7 +125,7 @@ in
       message = "Only builtin backends (graphite, console, repeater) or backends enumerated in `pkgs.nodePackages` are allowed!";
     }) cfg.backends;
 
-    users.use.statsdrs = {
+    users.users.statsd = {
       uid = config.ids.uids.statsd;
       description = "Statsd daemon user";
     };
diff --git a/nixos/modules/services/networking/i2pd.nix b/nixos/modules/services/networking/i2pd.nix
index 326d34f6ca9..93a21fd4c97 100644
--- a/nixos/modules/services/networking/i2pd.nix
+++ b/nixos/modules/services/networking/i2pd.nix
@@ -158,10 +158,10 @@ let
       (sec "addressbook")
       (strOpt "defaulturl" cfg.addressbook.defaulturl)
     ] ++ (optionalEmptyList "subscriptions" cfg.addressbook.subscriptions)
-      ++ (flip mapAttrs
-      (collect (name: proto: proto ? port && proto ? address && proto ? name) cfg.proto)
+      ++ (flip map
+      (collect (proto: proto ? port && proto ? address) cfg.proto)
       (proto: let protoOpts = [
-        (sec name)
+        (sec proto.name)
         (boolOpt "enabled" proto.enable)
         (strOpt "address" proto.address)
         (intOpt "port" proto.port)
@@ -181,10 +181,10 @@ let
 
   tunnelConf = let opts = [
     notice
-    (flip mapAttrs
-      (collect (name: tun: tun ? port && tun ? destination) cfg.outTunnels)
+    (flip map
+      (collect (tun: tun ? port && tun ? destination) cfg.outTunnels)
       (tun: let outTunOpts = [
-        (sec name)
+        (sec tun.name)
         "type = client"
         (intOpt "port" tun.port)
         (strOpt "destination" tun.destination)
@@ -204,10 +204,10 @@ let
         ++ (if tun ? crypto.tagsToSend then
             optionalNullInt "crypto.tagstosend" tun.crypto.tagsToSend else []);
         in concatStringsSep "\n" outTunOpts))
-    (flip mapAttrs
-      (collect (name: tun: tun ? port && tun ? address) cfg.inTunnels)
+    (flip map
+      (collect (tun: tun ? port && tun ? address) cfg.inTunnels)
       (tun: let inTunOpts = [
-        (sec name)
+        (sec tun.name)
         "type = server"
         (intOpt "port" tun.port)
         (strOpt "host" tun.address)
@@ -606,7 +606,7 @@ in
 
       outTunnels = mkOption {
         default = {};
-        type = with types; loaOf (submodule (
+        type = with types; attrsOf (submodule (
           { name, ... }: {
             options = {
               destinationPort = mkOption {
@@ -627,7 +627,7 @@ in
 
       inTunnels = mkOption {
         default = {};
-        type = with types; loaOf (submodule (
+        type = with types; attrsOf (submodule (
           { name, ... }: {
             options = {
               inPort = mkOption {
diff --git a/nixos/modules/services/networking/iwd.nix b/nixos/modules/services/networking/iwd.nix
index 839fa48d9a4..6be67a8b96f 100644
--- a/nixos/modules/services/networking/iwd.nix
+++ b/nixos/modules/services/networking/iwd.nix
@@ -23,12 +23,7 @@ in {
     systemd.packages = [ pkgs.iwd ];
 
     systemd.services.iwd.wantedBy = [ "multi-user.target" ];
-
-    systemd.tmpfiles.rules = [
-      "d /var/lib/iwd 0700 root root -"
-      "d /var/lib/ead 0700 root root -"
-    ];
   };
 
-  meta.maintainers = with lib.maintainers; [ mic92 ];
+  meta.maintainers = with lib.maintainers; [ mic92 dtzWill ];
 }
diff --git a/nixos/modules/services/networking/knot.nix b/nixos/modules/services/networking/knot.nix
index 47364ecb846..12ff89fe849 100644
--- a/nixos/modules/services/networking/knot.nix
+++ b/nixos/modules/services/networking/knot.nix
@@ -5,14 +5,16 @@ with lib;
 let
   cfg = config.services.knot;
 
-  configFile = pkgs.writeText "knot.conf" cfg.extraConfig;
-  socketFile = "/run/knot/knot.sock";
+  configFile = pkgs.writeTextFile {
+    name = "knot.conf";
+    text = (concatMapStringsSep "\n" (file: "include: ${file}") cfg.keyFiles) + "\n" +
+           cfg.extraConfig;
+    checkPhase = lib.optionalString (cfg.keyFiles == []) ''
+      ${cfg.package}/bin/knotc --config=$out conf-check
+    '';
+  };
 
-  knotConfCheck = file: pkgs.runCommand "knot-config-checked"
-    { buildInputs = [ cfg.package ]; } ''
-    ln -s ${configFile} $out
-    knotc --config=${configFile} conf-check
-  '';
+  socketFile = "/run/knot/knot.sock";
 
   knot-cli-wrappers = pkgs.stdenv.mkDerivation {
     name = "knot-cli-wrappers";
@@ -45,6 +47,19 @@ in {
         '';
       };
 
+      keyFiles = mkOption {
+        type = types.listOf types.path;
+        default = [];
+        description = ''
+          A list of files containing additional configuration
+          to be included using the include directive. This option
+          allows to include configuration like TSIG keys without
+          exposing them to the nix store readable to any process.
+          Note that using this option will also disable configuration
+          checks at build time.
+        '';
+      };
+
       extraConfig = mkOption {
         type = types.lines;
         default = "";
@@ -65,6 +80,13 @@ in {
   };
 
   config = mkIf config.services.knot.enable {
+    users.users.knot = {
+      isSystemUser = true;
+      group = "knot";
+      description = "Knot daemon user";
+    };
+
+    users.groups.knot.gid = null;
     systemd.services.knot = {
       unitConfig.Documentation = "man:knotd(8) man:knot.conf(5) man:knotc(8) https://www.knot-dns.cz/docs/${cfg.package.version}/html/";
       description = cfg.package.meta.description;
@@ -74,12 +96,12 @@ in {
 
       serviceConfig = {
         Type = "notify";
-        ExecStart = "${cfg.package}/bin/knotd --config=${knotConfCheck configFile} --socket=${socketFile} ${concatStringsSep " " cfg.extraArgs}";
+        ExecStart = "${cfg.package}/bin/knotd --config=${configFile} --socket=${socketFile} ${concatStringsSep " " cfg.extraArgs}";
         ExecReload = "${knot-cli-wrappers}/bin/knotc reload";
         CapabilityBoundingSet = "CAP_NET_BIND_SERVICE CAP_SETPCAP";
         AmbientCapabilities = "CAP_NET_BIND_SERVICE CAP_SETPCAP";
         NoNewPrivileges = true;
-        DynamicUser = "yes";
+        User = "knot";
         RuntimeDirectory = "knot";
         StateDirectory = "knot";
         StateDirectoryMode = "0700";
diff --git a/nixos/modules/services/networking/pppd.nix b/nixos/modules/services/networking/pppd.nix
index b31bfa64235..c1cbdb46176 100644
--- a/nixos/modules/services/networking/pppd.nix
+++ b/nixos/modules/services/networking/pppd.nix
@@ -130,7 +130,7 @@ in
     systemdConfigs = listToAttrs (map mkSystemd enabledConfigs);
 
   in mkIf cfg.enable {
-    environment.etc = mkMerge etcFiles;
-    systemd.services = mkMerge systemdConfigs;
+    environment.etc = etcFiles;
+    systemd.services = systemdConfigs;
   };
 }
diff --git a/nixos/modules/services/networking/shorewall.nix b/nixos/modules/services/networking/shorewall.nix
index 0f94d414fcf..c59a5366915 100644
--- a/nixos/modules/services/networking/shorewall.nix
+++ b/nixos/modules/services/networking/shorewall.nix
@@ -33,7 +33,6 @@ in {
           The attribute name defines the name of the config,
           and the attribute value defines the content of the config.
         '';
-        apply = lib.mapAttrs (name: text: pkgs.writeText "${name}" text);
       };
     };
   };
@@ -63,12 +62,7 @@ in {
       '';
     };
     environment = {
-      etc = lib.mapAttrsToList
-              (name: file:
-                { source = file;
-                  target = "shorewall/${name}";
-                })
-              cfg.configs;
+      etc = lib.mapAttrs' (name: conf: lib.nameValuePair "shorewall/${name}" {text=conf;}) cfg.configs;
       systemPackages = [ cfg.package ];
     };
   };
diff --git a/nixos/modules/services/networking/shorewall6.nix b/nixos/modules/services/networking/shorewall6.nix
index 9c22a037c0b..374e407cc7a 100644
--- a/nixos/modules/services/networking/shorewall6.nix
+++ b/nixos/modules/services/networking/shorewall6.nix
@@ -33,7 +33,6 @@ in {
           The attribute name defines the name of the config,
           and the attribute value defines the content of the config.
         '';
-        apply = lib.mapAttrs (name: text: pkgs.writeText "${name}" text);
       };
     };
   };
@@ -63,12 +62,7 @@ in {
       '';
     };
     environment = {
-      etc = lib.mapAttrsToList
-              (name: file:
-                { source = file;
-                  target = "shorewall6/${name}";
-                })
-              cfg.configs;
+      etc = lib.mapAttrs' (name: conf: lib.nameValuePair "shorewall6/${name}" {text=conf;}) cfg.configs;
       systemPackages = [ cfg.package ];
     };
   };
diff --git a/nixos/modules/services/networking/supybot.nix b/nixos/modules/services/networking/supybot.nix
index 92c84bd0e1e..d5b9a97a1c1 100644
--- a/nixos/modules/services/networking/supybot.nix
+++ b/nixos/modules/services/networking/supybot.nix
@@ -45,7 +45,7 @@ in
 
     environment.systemPackages = [ pkgs.pythonPackages.limnoria ];
 
-    users.users.supybotrs = {
+    users.users.supybot = {
       uid = config.ids.uids.supybot;
       group = "supybot";
       description = "Supybot IRC bot user";
diff --git a/nixos/modules/services/web-apps/jirafeau.nix b/nixos/modules/services/web-apps/jirafeau.nix
new file mode 100644
index 00000000000..4f181257ef7
--- /dev/null
+++ b/nixos/modules/services/web-apps/jirafeau.nix
@@ -0,0 +1,169 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+let
+  cfg = config.services.jirafeau;
+
+  group = config.services.nginx.group;
+  user = config.services.nginx.user;
+
+  withTrailingSlash = str: if hasSuffix "/" str then str else "${str}/";
+
+  localConfig = pkgs.writeText "config.local.php" ''
+    <?php
+      $cfg['admin_password'] = '${cfg.adminPasswordSha256}';
+      $cfg['web_root'] = 'http://${withTrailingSlash cfg.hostName}';
+      $cfg['var_root'] = '${withTrailingSlash cfg.dataDir}';
+      $cfg['maximal_upload_size'] = ${builtins.toString cfg.maxUploadSizeMegabytes};
+      $cfg['installation_done'] = true;
+
+      ${cfg.extraConfig}
+  '';
+in
+{
+  options.services.jirafeau = {
+    adminPasswordSha256 = mkOption {
+      type = types.str;
+      default = "";
+      description = ''
+        SHA-256 of the desired administration password. Leave blank/unset for no password.
+      '';
+    };
+
+    dataDir = mkOption {
+      type = types.path;
+      default = "/var/lib/jirafeau/data/";
+      description = "Location of Jirafeau storage directory.";
+    };
+
+    enable = mkEnableOption "Jirafeau file upload application.";
+
+    extraConfig = mkOption {
+      type = types.lines;
+      default = "";
+      example = ''
+        $cfg['style'] = 'courgette';
+        $cfg['organisation'] = 'ACME';
+      '';
+      description = let
+        documentationLink =
+          "https://gitlab.com/mojo42/Jirafeau/-/blob/${cfg.package.version}/lib/config.original.php";
+      in
+        ''
+          Jirefeau configuration. Refer to <link xlink:href="${documentationLink}"/> for supported
+          values.
+        '';
+    };
+
+    hostName = mkOption {
+      type = types.str;
+      default = "localhost";
+      description = "URL of instance. Must have trailing slash.";
+    };
+
+    maxUploadSizeMegabytes = mkOption {
+      type = types.int;
+      default = 0;
+      description = "Maximum upload size of accepted files.";
+    };
+
+    maxUploadTimeout = mkOption {
+      type = types.str;
+      default = "30m";
+      description = let
+        nginxCoreDocumentation = "http://nginx.org/en/docs/http/ngx_http_core_module.html";
+      in
+        ''
+          Timeout for reading client request bodies and headers. Refer to
+          <link xlink:href="${nginxCoreDocumentation}#client_body_timeout"/> and
+          <link xlink:href="${nginxCoreDocumentation}#client_header_timeout"/> for accepted values.
+        '';
+    };
+
+    nginxConfig = mkOption {
+      type = types.submodule
+        (import ../web-servers/nginx/vhost-options.nix { inherit config lib; });
+      default = {};
+      example = {
+        serverAliases = [ "wiki.\${config.networking.domain}" ];
+      };
+      description = "Extra configuration for the nginx virtual host of Jirafeau.";
+    };
+
+    package = mkOption {
+      type = types.package;
+      default = pkgs.jirafeau;
+      defaultText = "pkgs.jirafeau";
+      description = "Jirafeau package to use";
+      example = "pkgs.jirafeau";
+    };
+
+    poolConfig = mkOption {
+      type = with types; attrsOf (oneOf [ str int bool ]);
+      default = {
+        "pm" = "dynamic";
+        "pm.max_children" = 32;
+        "pm.start_servers" = 2;
+        "pm.min_spare_servers" = 2;
+        "pm.max_spare_servers" = 4;
+        "pm.max_requests" = 500;
+      };
+      description = ''
+        Options for Jirafeau PHP pool. See documentation on <literal>php-fpm.conf</literal> for
+        details on configuration directives.
+      '';
+    };
+  };
+
+
+  config = mkIf cfg.enable {
+    services = {
+      nginx = {
+        enable = true;
+        virtualHosts."${cfg.hostName}" = mkMerge [
+          cfg.nginxConfig
+          {
+            extraConfig = let
+              clientMaxBodySize =
+                if cfg.maxUploadSizeMegabytes == 0 then "0" else "${cfg.maxUploadSizeMegabytes}m";
+            in
+              ''
+                index index.php;
+                client_max_body_size ${clientMaxBodySize};
+                client_body_timeout ${cfg.maxUploadTimeout};
+                client_header_timeout ${cfg.maxUploadTimeout};
+              '';
+            locations = {
+              "~ \\.php$".extraConfig = ''
+                include ${pkgs.nginx}/conf/fastcgi_params;
+                fastcgi_split_path_info ^(.+\.php)(/.+)$;
+                fastcgi_index index.php;
+                fastcgi_pass unix:${config.services.phpfpm.pools.jirafeau.socket};
+                fastcgi_param PATH_INFO $fastcgi_path_info;
+                fastcgi_param SCRIPT_FILENAME $document_root$fastcgi_script_name;
+              '';
+            };
+            root = mkForce "${cfg.package}";
+          }
+        ];
+      };
+
+      phpfpm.pools.jirafeau = {
+        inherit group user;
+        phpEnv."JIRAFEAU_CONFIG" = "${localConfig}";
+        settings = {
+          "listen.mode" = "0660";
+          "listen.owner" = user;
+          "listen.group" = group;
+        } // cfg.poolConfig;
+      };
+    };
+
+    systemd.tmpfiles.rules = [
+      "d ${cfg.dataDir} 0750 ${user} ${group} - -"
+      "d ${cfg.dataDir}/files/ 0750 ${user} ${group} - -"
+      "d ${cfg.dataDir}/links/ 0750 ${user} ${group} - -"
+      "d ${cfg.dataDir}/async/ 0750 ${user} ${group} - -"
+    ];
+  };
+}
diff --git a/nixos/modules/services/web-apps/mattermost.nix b/nixos/modules/services/web-apps/mattermost.nix
index 41c52b9653b..853347bf86e 100644
--- a/nixos/modules/services/web-apps/mattermost.nix
+++ b/nixos/modules/services/web-apps/mattermost.nix
@@ -6,14 +6,18 @@ let
 
   cfg = config.services.mattermost;
 
-  defaultConfig = builtins.fromJSON (readFile "${pkgs.mattermost}/config/config.json");
+  defaultConfig = builtins.fromJSON (builtins.replaceStrings [ "\\u0026" ] [ "&" ]
+    (readFile "${pkgs.mattermost}/config/config.json")
+  );
+
+  database = "postgres://${cfg.localDatabaseUser}:${cfg.localDatabasePassword}@localhost:5432/${cfg.localDatabaseName}?sslmode=disable&connect_timeout=10";
 
   mattermostConf = foldl recursiveUpdate defaultConfig
     [ { ServiceSettings.SiteURL = cfg.siteUrl;
         ServiceSettings.ListenAddress = cfg.listenAddress;
         TeamSettings.SiteName = cfg.siteName;
         SqlSettings.DriverName = "postgres";
-        SqlSettings.DataSource = "postgres://${cfg.localDatabaseUser}:${cfg.localDatabasePassword}@localhost:5432/${cfg.localDatabaseName}?sslmode=disable&connect_timeout=10";
+        SqlSettings.DataSource = database;
       }
       cfg.extraConfig
     ];
@@ -175,7 +179,9 @@ in
           mkdir -p ${cfg.statePath}/{data,config,logs}
           ln -sf ${pkgs.mattermost}/{bin,fonts,i18n,templates,client} ${cfg.statePath}
         '' + lib.optionalString (!cfg.mutableConfig) ''
-          ln -sf ${mattermostConfJSON} ${cfg.statePath}/config/config.json
+          rm -f ${cfg.statePath}/config/config.json
+          cp ${mattermostConfJSON} ${cfg.statePath}/config/config.json
+          ${pkgs.mattermost}/bin/mattermost config migrate ${cfg.statePath}/config/config.json ${database}
         '' + lib.optionalString cfg.mutableConfig ''
           if ! test -e "${cfg.statePath}/config/.initial-created"; then
             rm -f ${cfg.statePath}/config/config.json
@@ -201,7 +207,8 @@ in
           PermissionsStartOnly = true;
           User = cfg.user;
           Group = cfg.group;
-          ExecStart = "${pkgs.mattermost}/bin/mattermost";
+          ExecStart = "${pkgs.mattermost}/bin/mattermost" +
+            (lib.optionalString (!cfg.mutableConfig) " -c ${database}");
           WorkingDirectory = "${cfg.statePath}";
           Restart = "always";
           RestartSec = "10";
@@ -227,4 +234,3 @@ in
     })
   ];
 }
-
diff --git a/nixos/modules/services/web-servers/caddy.nix b/nixos/modules/services/web-servers/caddy.nix
index 132c50735d9..0e6e10a5f47 100644
--- a/nixos/modules/services/web-servers/caddy.nix
+++ b/nixos/modules/services/web-servers/caddy.nix
@@ -64,32 +64,38 @@ in {
   config = mkIf cfg.enable {
     systemd.services.caddy = {
       description = "Caddy web server";
+      # upstream unit: https://github.com/caddyserver/caddy/blob/master/dist/init/linux-systemd/caddy.service
       after = [ "network-online.target" ];
+      wants = [ "network-online.target" ]; # systemd-networkd-wait-online.service
       wantedBy = [ "multi-user.target" ];
       environment = mkIf (versionAtLeast config.system.stateVersion "17.09")
         { CADDYPATH = cfg.dataDir; };
       serviceConfig = {
         ExecStart = ''
-          ${cfg.package}/bin/caddy -root=/var/tmp -conf=${configFile} \
+          ${cfg.package}/bin/caddy -log stdout -log-timestamps=false \
+            -root=/var/tmp -conf=${configFile} \
             -ca=${cfg.ca} -email=${cfg.email} ${optionalString cfg.agree "-agree"}
         '';
-        ExecReload = "${pkgs.coreutils}/bin/kill -HUP $MAINPID";
+        ExecReload = "${pkgs.coreutils}/bin/kill -USR1 $MAINPID";
         Type = "simple";
         User = "caddy";
         Group = "caddy";
-        Restart = "on-failure";
-        StartLimitInterval = 86400;
-        StartLimitBurst = 5;
+        Restart = "on-abnormal";
+        StartLimitIntervalSec = 14400;
+        StartLimitBurst = 10;
         AmbientCapabilities = "cap_net_bind_service";
         CapabilityBoundingSet = "cap_net_bind_service";
         NoNewPrivileges = true;
-        LimitNPROC = 64;
+        LimitNPROC = 512;
         LimitNOFILE = 1048576;
         PrivateTmp = true;
         PrivateDevices = true;
         ProtectHome = true;
         ProtectSystem = "full";
         ReadWriteDirectories = cfg.dataDir;
+        KillMode = "mixed";
+        KillSignal = "SIGQUIT";
+        TimeoutStopSec = "5s";
       };
     };
 
diff --git a/nixos/modules/services/x11/desktop-managers/mate.nix b/nixos/modules/services/x11/desktop-managers/mate.nix
index 4a6f2ca727d..910a246d776 100644
--- a/nixos/modules/services/x11/desktop-managers/mate.nix
+++ b/nixos/modules/services/x11/desktop-managers/mate.nix
@@ -86,6 +86,7 @@ in
         pkgs.shared-mime-info
         pkgs.xdg-user-dirs # Update user dirs as described in https://freedesktop.org/wiki/Software/xdg-user-dirs/
         pkgs.mate.mate-settings-daemon
+        pkgs.yelp # for 'Contents' in 'Help' menus
       ];
 
     programs.dconf.enable = true;
diff --git a/nixos/modules/services/x11/desktop-managers/plasma5.nix b/nixos/modules/services/x11/desktop-managers/plasma5.nix
index bd0a2f3481f..f3bf9268b29 100644
--- a/nixos/modules/services/x11/desktop-managers/plasma5.nix
+++ b/nixos/modules/services/x11/desktop-managers/plasma5.nix
@@ -8,6 +8,125 @@ let
   cfg = xcfg.desktopManager.plasma5;
 
   inherit (pkgs) kdeApplications plasma5 libsForQt5 qt5;
+  inherit (pkgs) writeText;
+
+  pulseaudio = config.hardware.pulseaudio;
+  pactl = "${getBin pulseaudio.package}/bin/pactl";
+  startplasma-x11 = "${getBin plasma5.plasma-workspace}/bin/startplasma-x11";
+  sed = "${getBin pkgs.gnused}/bin/sed";
+
+  gtkrc2 = writeText "gtkrc-2.0" ''
+    # Default GTK+ 2 config for NixOS Plasma 5
+    include "/run/current-system/sw/share/themes/Breeze/gtk-2.0/gtkrc"
+    style "user-font"
+    {
+      font_name="Sans Serif Regular"
+    }
+    widget_class "*" style "user-font"
+    gtk-font-name="Sans Serif Regular 10"
+    gtk-theme-name="Breeze"
+    gtk-icon-theme-name="breeze"
+    gtk-fallback-icon-theme="hicolor"
+    gtk-cursor-theme-name="breeze_cursors"
+    gtk-toolbar-style=GTK_TOOLBAR_ICONS
+    gtk-menu-images=1
+    gtk-button-images=1
+  '';
+
+  gtk3_settings = writeText "settings.ini" ''
+    [Settings]
+    gtk-font-name=Sans Serif Regular 10
+    gtk-theme-name=Breeze
+    gtk-icon-theme-name=breeze
+    gtk-fallback-icon-theme=hicolor
+    gtk-cursor-theme-name=breeze_cursors
+    gtk-toolbar-style=GTK_TOOLBAR_ICONS
+    gtk-menu-images=1
+    gtk-button-images=1
+  '';
+
+  kcminputrc = writeText "kcminputrc" ''
+    [Mouse]
+    cursorTheme=breeze_cursors
+    cursorSize=0
+  '';
+
+  activationScript = ''
+    # The KDE icon cache is supposed to update itself automatically, but it uses
+    # the timestamp on the icon theme directory as a trigger. This doesn't work
+    # on NixOS because the timestamp never changes. As a workaround, delete the
+    # icon cache at login and session activation.
+    # See also: http://lists-archives.org/kde-devel/26175-what-when-will-icon-cache-refresh.html
+    rm -fv $HOME/.cache/icon-cache.kcache
+
+    # xdg-desktop-settings generates this empty file but
+    # it makes kbuildsyscoca5 fail silently. To fix this
+    # remove that menu if it exists.
+    rm -fv ''${XDG_CONFIG_HOME:?}/menus/applications-merged/xdg-desktop-menu-dummy.menu
+
+    # Qt writes a weird ‘libraryPath’ line to
+    # ~/.config/Trolltech.conf that causes the KDE plugin
+    # paths of previous KDE invocations to be searched.
+    # Obviously using mismatching KDE libraries is potentially
+    # disastrous, so here we nuke references to the Nix store
+    # in Trolltech.conf.  A better solution would be to stop
+    # Qt from doing this wackiness in the first place.
+    trolltech_conf="''${XDG_CONFIG_HOME:?}/Trolltech.conf"
+    if [ -e "$trolltech_conf" ]; then
+        ${sed} -i "$trolltech_conf" -e '/nix\\store\|nix\/store/ d'
+    fi
+
+    # Remove the kbuildsyscoca5 cache. It will be regenerated
+    # immediately after. This is necessary for kbuildsyscoca5 to
+    # recognize that software that has been removed.
+    rm -fv $HOME/.cache/ksycoca*
+
+    ${pkgs.libsForQt5.kservice}/bin/kbuildsycoca5
+  '';
+
+  startplasma =
+    ''
+      export XDG_CONFIG_HOME="''${XDG_CONFIG_HOME:-$HOME/.config}"
+      mkdir -p "''${XDG_CONFIG_HOME:?}"
+
+    ''
+    + optionalString pulseaudio.enable ''
+      # Load PulseAudio module for routing support.
+      # See also: http://colin.guthr.ie/2009/10/so-how-does-the-kde-pulseaudio-support-work-anyway/
+        ${pactl} load-module module-device-manager "do_routing=1"
+
+    ''
+    + ''
+      ${activationScript}
+
+      # Create default configurations if Plasma has never been started.
+      kdeglobals="''${XDG_CONFIG_HOME:?}/kdeglobals"
+      if ! [ -f "$kdeglobals" ]
+      then
+          kcminputrc="''${XDG_CONFIG_HOME:?}/kcminputrc"
+          if ! [ -f "$kcminputrc" ]
+          then
+              cat ${kcminputrc} >"$kcminputrc"
+          fi
+
+          gtkrc2="$HOME/.gtkrc-2.0"
+          if ! [ -f "$gtkrc2" ]
+          then
+              cat ${gtkrc2} >"$gtkrc2"
+          fi
+
+          gtk3_settings="''${XDG_CONFIG_HOME:?}/gtk-3.0/settings.ini"
+          if ! [ -f "$gtk3_settings" ]
+          then
+              mkdir -p "$(dirname "$gtk3_settings")"
+              cat ${gtk3_settings} >"$gtk3_settings"
+          fi
+      fi
+
+    ''
+    + ''
+      exec "${startplasma-x11}"
+    '';
 
 in
 
@@ -41,27 +160,7 @@ in
       services.xserver.desktopManager.session = singleton {
         name = "plasma5";
         bgSupport = true;
-        start = ''
-          # Load PulseAudio module for routing support.
-          # See http://colin.guthr.ie/2009/10/so-how-does-the-kde-pulseaudio-support-work-anyway/
-          ${optionalString config.hardware.pulseaudio.enable ''
-            ${getBin config.hardware.pulseaudio.package}/bin/pactl load-module module-device-manager "do_routing=1"
-          ''}
-
-          if [ -f "$HOME/.config/kdeglobals" ]
-          then
-              # Remove extraneous font style names.
-              # See also: https://phabricator.kde.org/D9070
-              ${getBin pkgs.gnused}/bin/sed -i "$HOME/.config/kdeglobals" \
-                  -e '/^fixed=/ s/,Regular$//' \
-                  -e '/^font=/ s/,Regular$//' \
-                  -e '/^menuFont=/ s/,Regular$//' \
-                  -e '/^smallestReadableFont=/ s/,Regular$//' \
-                  -e '/^toolBarFont=/ s/,Regular$//'
-          fi
-
-          exec "${getBin plasma5.plasma-workspace}/bin/startplasma-x11"
-        '';
+        start = startplasma;
       };
 
       security.wrappers = {
@@ -227,29 +326,7 @@ in
       xdg.portal.extraPortals = [ pkgs.xdg-desktop-portal-kde ];
 
       # Update the start menu for each user that is currently logged in
-      system.userActivationScripts.plasmaSetup = ''
-        # The KDE icon cache is supposed to update itself
-        # automatically, but it uses the timestamp on the icon
-        # theme directory as a trigger.  Since in Nix the
-        # timestamp is always the same, this doesn't work.  So as
-        # a workaround, nuke the icon cache on login.  This isn't
-        # perfect, since it may require logging out after
-        # installing new applications to update the cache.
-        # See http://lists-archives.org/kde-devel/26175-what-when-will-icon-cache-refresh.html
-        rm -fv $HOME/.cache/icon-cache.kcache
-
-        # xdg-desktop-settings generates this empty file but
-        # it makes kbuildsyscoca5 fail silently. To fix this
-        # remove that menu if it exists.
-        rm -fv $HOME/.config/menus/applications-merged/xdg-desktop-menu-dummy.menu
-
-        # Remove the kbuildsyscoca5 cache. It will be regenerated
-        # immediately after. This is necessary for kbuildsyscoca5 to
-        # recognize that software that has been removed.
-        rm -fv $HOME/.cache/ksycoca*
-
-        ${pkgs.libsForQt5.kservice}/bin/kbuildsycoca5
-      '';
+      system.userActivationScripts.plasmaSetup = activationScript;
     })
   ];
 
diff --git a/nixos/modules/services/x11/display-managers/default.nix b/nixos/modules/services/x11/display-managers/default.nix
index 821886e5fda..5d49ca94387 100644
--- a/nixos/modules/services/x11/display-managers/default.nix
+++ b/nixos/modules/services/x11/display-managers/default.nix
@@ -427,6 +427,7 @@ in
                     TryExec=${script}
                     Exec=${script}
                     Name=${sessionName}
+                    DesktopNames=${sessionName}
                   '';
                 } // {
                   providedSessions = [ sessionName ];
diff --git a/nixos/modules/services/x11/unclutter.nix b/nixos/modules/services/x11/unclutter.nix
index c0868604a68..56e30c79d1f 100644
--- a/nixos/modules/services/x11/unclutter.nix
+++ b/nixos/modules/services/x11/unclutter.nix
@@ -61,7 +61,7 @@ in {
       serviceConfig.ExecStart = ''
         ${cfg.package}/bin/unclutter \
           -idle ${toString cfg.timeout} \
-          -jitter ${toString (cfg.threeshold - 1)} \
+          -jitter ${toString (cfg.threshold - 1)} \
           ${optionalString cfg.keystroke "-keystroke"} \
           ${concatMapStrings (x: " -"+x) cfg.extraOptions} \
           -not ${concatStringsSep " " cfg.excluded} \
diff --git a/nixos/modules/services/x11/urxvtd.nix b/nixos/modules/services/x11/urxvtd.nix
index 9bfcfa9b065..867ac38a944 100644
--- a/nixos/modules/services/x11/urxvtd.nix
+++ b/nixos/modules/services/x11/urxvtd.nix
@@ -18,10 +18,10 @@ in {
     };
 
     package = mkOption {
-      default = pkgs.rxvt_unicode-with-plugins;
-      defaultText = "pkgs.rxvt_unicode-with-plugins";
+      default = pkgs.rxvt-unicode;
+      defaultText = "pkgs.rxvt-unicode";
       description = ''
-        Package to install. Usually pkgs.rxvt_unicode-with-plugins or pkgs.rxvt_unicode
+        Package to install. Usually pkgs.rxvt-unicode.
       '';
       type = types.package;
     };
diff --git a/nixos/modules/system/boot/kernel.nix b/nixos/modules/system/boot/kernel.nix
index 6edb9082e75..c247f334c23 100644
--- a/nixos/modules/system/boot/kernel.nix
+++ b/nixos/modules/system/boot/kernel.nix
@@ -101,7 +101,12 @@ in
       type = types.bool;
       default = false;
       description = ''
-        Whether to activate VESA video mode on boot.
+        (Deprecated) This option, if set, activates the VESA 800x600 video
+        mode on boot and disables kernel modesetting. It is equivalent to
+        specifying <literal>[ "vga=0x317" "nomodeset" ]</literal> in the
+        <option>boot.kernelParams</option> option. This option is
+        deprecated as of 2020: Xorg now works better with modesetting, and
+        you might want a different VESA vga setting, anyway.
       '';
     };
 
diff --git a/nixos/modules/system/boot/loader/grub/grub.nix b/nixos/modules/system/boot/loader/grub/grub.nix
index 26c1197bf97..b97ef88a7ca 100644
--- a/nixos/modules/system/boot/loader/grub/grub.nix
+++ b/nixos/modules/system/boot/loader/grub/grub.nix
@@ -224,7 +224,11 @@ in
 
       extraConfig = mkOption {
         default = "";
-        example = "serial; terminal_output.serial";
+        example = ''
+          serial --unit=0 --speed=115200 --word=8 --parity=no --stop=1
+          terminal_input --append serial
+          terminal_output --append serial
+        '';
         type = types.lines;
         description = ''
           Additional GRUB commands inserted in the configuration file
diff --git a/nixos/modules/tasks/encrypted-devices.nix b/nixos/modules/tasks/encrypted-devices.nix
index 2c9231f5523..bc0933f16fe 100644
--- a/nixos/modules/tasks/encrypted-devices.nix
+++ b/nixos/modules/tasks/encrypted-devices.nix
@@ -65,7 +65,7 @@ in
     boot.initrd = {
       luks = {
         devices =
-          map (dev: { name = dev.encrypted.label; device = dev.encrypted.blkDev; } ) keylessEncDevs;
+          builtins.listToAttrs (map (dev: { name = dev.encrypted.label; value = { device = dev.encrypted.blkDev; }; }) keylessEncDevs);
         forceLuksSupportInInitrd = true;
       };
       postMountCommands =
diff --git a/nixos/modules/tasks/filesystems.nix b/nixos/modules/tasks/filesystems.nix
index 965a1c9eb1a..0ade74b957a 100644
--- a/nixos/modules/tasks/filesystems.nix
+++ b/nixos/modules/tasks/filesystems.nix
@@ -305,7 +305,8 @@ in
       in listToAttrs (map formatDevice (filter (fs: fs.autoFormat) fileSystems));
 
     systemd.tmpfiles.rules = [
-      "Z /run/keys 0750 root ${toString config.ids.gids.keys}"
+      "d /run/keys 0750 root ${toString config.ids.gids.keys}"
+      "z /run/keys 0750 root ${toString config.ids.gids.keys}"
     ];
 
     # Sync mount options with systemd's src/core/mount-setup.c: mount_table.
diff --git a/nixos/modules/virtualisation/docker-containers.nix b/nixos/modules/virtualisation/docker-containers.nix
index 216ba2c733f..5ab990a3d7c 100644
--- a/nixos/modules/virtualisation/docker-containers.nix
+++ b/nixos/modules/virtualisation/docker-containers.nix
@@ -192,16 +192,34 @@ let
             ["--network=host"]
           '';
         };
+
+        autoStart = mkOption {
+          type = types.bool;
+          default = true;
+          description = ''
+            When enabled, the container is automatically started on boot.
+            If this option is set to false, the container has to be started on-demand via its service.
+          '';
+        };
       };
     };
 
   mkService = name: container: let
     mkAfter = map (x: "docker-${x}.service") container.dependsOn;
   in rec {
-    wantedBy = [ "multi-user.target" ];
+    wantedBy = [] ++ optional (container.autoStart) "multi-user.target";
     after = [ "docker.service" "docker.socket" ] ++ mkAfter;
     requires = after;
-
+    path = [ pkgs.docker ];
+
+    preStart = ''
+      docker rm -f ${name} || true
+      ${optionalString (container.imageFile != null) ''
+        docker load -i ${container.imageFile}
+        ''}
+      '';
+    postStop = "docker rm -f ${name} || true";
+        
     serviceConfig = {
       ExecStart = concatStringsSep " \\\n  " ([
         "${pkgs.docker}/bin/docker run"
@@ -220,12 +238,7 @@ let
         ++ map escapeShellArg container.cmd
       );
 
-      ExecStartPre =
-        ["-${pkgs.docker}/bin/docker rm -f ${name}"] ++
-        (optional (container.imageFile != null) "${pkgs.docker}/bin/docker load -i ${container.imageFile}");
-
-      ExecStop = ''${pkgs.bash}/bin/sh -c "[ $SERVICE_RESULT = success ] || ${pkgs.docker}/bin/docker stop ${name}"'';
-      ExecStopPost = "-${pkgs.docker}/bin/docker rm -f ${name}";
+      ExecStop = ''${pkgs.bash}/bin/sh -c "[ $SERVICE_RESULT = success ] || docker stop ${name}"'';
 
       ### There is no generalized way of supporting `reload` for docker
       ### containers. Some containers may respond well to SIGHUP sent to their
diff --git a/nixos/release-combined.nix b/nixos/release-combined.nix
index b46731863ca..e47e64edb29 100644
--- a/nixos/release-combined.nix
+++ b/nixos/release-combined.nix
@@ -38,109 +38,90 @@ in rec {
     nixpkgs = nixpkgsSrc;
   })) [ "unstable" ];
 
-  tested = pkgs.lib.hydraJob (pkgs.releaseTools.aggregate {
+  tested = pkgs.releaseTools.aggregate {
     name = "nixos-${nixos.channel.version}";
     meta = {
       description = "Release-critical builds for the NixOS channel";
       maintainers = with pkgs.lib.maintainers; [ eelco fpletz ];
     };
-    constituents =
-      let
-        # Except for the given systems, return the system-specific constituent
-        except = systems: x: map (system: x.${system}) (pkgs.lib.subtractLists systems supportedSystems);
-        all = x: except [] x;
-      in [
-        nixos.channel
-        (all nixos.dummy)
-        (all nixos.manual)
-
-        nixos.iso_plasma5.x86_64-linux or []
-        nixos.iso_minimal.aarch64-linux or []
-        nixos.iso_minimal.i686-linux or []
-        nixos.iso_minimal.x86_64-linux or []
-        nixos.ova.x86_64-linux or []
-        nixos.sd_image.aarch64-linux or []
-
-        #(all nixos.tests.containers)
-        (all nixos.tests.containers-imperative)
-        (all nixos.tests.containers-ip)
-        nixos.tests.chromium.x86_64-linux or []
-        (all nixos.tests.firefox)
-        (all nixos.tests.firewall)
-        (all nixos.tests.fontconfig-default-fonts)
-        (all nixos.tests.gnome3-xorg)
-        (all nixos.tests.gnome3)
-        (all nixos.tests.pantheon)
-        nixos.tests.installer.zfsroot.x86_64-linux or [] # ZFS is 64bit only
-        (except ["aarch64-linux"] nixos.tests.installer.lvm)
-        (except ["aarch64-linux"] nixos.tests.installer.luksroot)
-        (except ["aarch64-linux"] nixos.tests.installer.separateBoot)
-        (except ["aarch64-linux"] nixos.tests.installer.separateBootFat)
-        (except ["aarch64-linux"] nixos.tests.installer.simple)
-        (except ["aarch64-linux"] nixos.tests.installer.simpleLabels)
-        (except ["aarch64-linux"] nixos.tests.installer.simpleProvided)
-        (except ["aarch64-linux"] nixos.tests.installer.simpleUefiSystemdBoot)
-        (except ["aarch64-linux"] nixos.tests.installer.swraid)
-        (except ["aarch64-linux"] nixos.tests.installer.btrfsSimple)
-        (except ["aarch64-linux"] nixos.tests.installer.btrfsSubvols)
-        (except ["aarch64-linux"] nixos.tests.installer.btrfsSubvolDefault)
-        (except ["aarch64-linux"] nixos.tests.boot.biosCdrom)
-        #(except ["aarch64-linux"] nixos.tests.boot.biosUsb) # disabled due to issue #15690
-        (except ["aarch64-linux"] nixos.tests.boot.uefiCdrom)
-        (except ["aarch64-linux"] nixos.tests.boot.uefiUsb)
-        (all nixos.tests.boot-stage1)
-        (all nixos.tests.hibernate)
-        nixos.tests.docker.x86_64-linux or []
-        (all nixos.tests.ecryptfs)
-        (all nixos.tests.env)
-        (all nixos.tests.ipv6)
-        (all nixos.tests.i3wm)
-        # 2018-06-06: keymap tests temporarily removed from tested job
-        # since non-deterministic failure are blocking the channel (#41538)
-        #(all nixos.tests.keymap.azerty)
-        #(all nixos.tests.keymap.colemak)
-        #(all nixos.tests.keymap.dvorak)
-        #(all nixos.tests.keymap.dvp)
-        #(all nixos.tests.keymap.neo)
-        #(all nixos.tests.keymap.qwertz)
-        (all nixos.tests.plasma5)
-        (all nixos.tests.lightdm)
-        (all nixos.tests.login)
-        (all nixos.tests.misc)
-        (all nixos.tests.mutableUsers)
-        (all nixos.tests.nat.firewall)
-        (all nixos.tests.nat.firewall-conntrack)
-        (all nixos.tests.nat.standalone)
-        (all nixos.tests.networking.scripted.loopback)
-        (all nixos.tests.networking.scripted.static)
-        (all nixos.tests.networking.scripted.dhcpSimple)
-        (all nixos.tests.networking.scripted.dhcpOneIf)
-        (all nixos.tests.networking.scripted.bond)
-        (all nixos.tests.networking.scripted.bridge)
-        (all nixos.tests.networking.scripted.macvlan)
-        (all nixos.tests.networking.scripted.sit)
-        (all nixos.tests.networking.scripted.vlan)
-        (all nixos.tests.nfs3.simple)
-        (all nixos.tests.nfs4.simple)
-        (all nixos.tests.openssh)
-        (all nixos.tests.php-pcre)
-        (all nixos.tests.predictable-interface-names.predictable)
-        (all nixos.tests.predictable-interface-names.unpredictable)
-        (all nixos.tests.predictable-interface-names.predictableNetworkd)
-        (all nixos.tests.predictable-interface-names.unpredictableNetworkd)
-        (all nixos.tests.printing)
-        (all nixos.tests.proxy)
-        (all nixos.tests.sddm.default)
-        (all nixos.tests.simple)
-        (all nixos.tests.switchTest)
-        (all nixos.tests.udisks2)
-        (all nixos.tests.xfce)
-
-        nixpkgs.tarball
-        (all allSupportedNixpkgs.emacs)
-        # The currently available aarch64 JDK is unfree
-        (except ["aarch64-linux"] allSupportedNixpkgs.jdk)
-      ];
-  });
+    constituents = [
+      "nixos.channel"
+      "nixos.dummy.x86_64-linux"
+      "nixos.iso_minimal.aarch64-linux"
+      "nixos.iso_minimal.i686-linux"
+      "nixos.iso_minimal.x86_64-linux"
+      "nixos.iso_plasma5.x86_64-linux"
+      "nixos.manual.x86_64-linux"
+      "nixos.ova.x86_64-linux"
+      "nixos.sd_image.aarch64-linux"
+      "nixos.tests.boot.biosCdrom.x86_64-linux"
+      "nixos.tests.boot-stage1.x86_64-linux"
+      "nixos.tests.boot.uefiCdrom.x86_64-linux"
+      "nixos.tests.boot.uefiUsb.x86_64-linux"
+      "nixos.tests.chromium.x86_64-linux"
+      "nixos.tests.containers-imperative.x86_64-linux"
+      "nixos.tests.containers-ip.x86_64-linux"
+      "nixos.tests.docker.x86_64-linux"
+      "nixos.tests.ecryptfs.x86_64-linux"
+      "nixos.tests.env.x86_64-linux"
+      "nixos.tests.firefox.x86_64-linux"
+      "nixos.tests.firewall.x86_64-linux"
+      "nixos.tests.fontconfig-default-fonts.x86_64-linux"
+      "nixos.tests.gnome3.x86_64-linux"
+      "nixos.tests.gnome3-xorg.x86_64-linux"
+      "nixos.tests.hibernate.x86_64-linux"
+      "nixos.tests.i3wm.x86_64-linux"
+      "nixos.tests.installer.btrfsSimple.x86_64-linux"
+      "nixos.tests.installer.btrfsSubvolDefault.x86_64-linux"
+      "nixos.tests.installer.btrfsSubvols.x86_64-linux"
+      "nixos.tests.installer.luksroot.x86_64-linux"
+      "nixos.tests.installer.lvm.x86_64-linux"
+      "nixos.tests.installer.separateBootFat.x86_64-linux"
+      "nixos.tests.installer.separateBoot.x86_64-linux"
+      "nixos.tests.installer.simpleLabels.x86_64-linux"
+      "nixos.tests.installer.simpleProvided.x86_64-linux"
+      "nixos.tests.installer.simpleUefiSystemdBoot.x86_64-linux"
+      "nixos.tests.installer.simple.x86_64-linux"
+      "nixos.tests.installer.swraid.x86_64-linux"
+      "nixos.tests.ipv6.x86_64-linux"
+      "nixos.tests.lightdm.x86_64-linux"
+      "nixos.tests.login.x86_64-linux"
+      "nixos.tests.misc.x86_64-linux"
+      "nixos.tests.mutableUsers.x86_64-linux"
+      "nixos.tests.nat.firewall-conntrack.x86_64-linux"
+      "nixos.tests.nat.firewall.x86_64-linux"
+      "nixos.tests.nat.standalone.x86_64-linux"
+      "nixos.tests.networking.scripted.bond.x86_64-linux"
+      "nixos.tests.networking.scripted.bridge.x86_64-linux"
+      "nixos.tests.networking.scripted.dhcpOneIf.x86_64-linux"
+      "nixos.tests.networking.scripted.dhcpSimple.x86_64-linux"
+      "nixos.tests.networking.scripted.loopback.x86_64-linux"
+      "nixos.tests.networking.scripted.macvlan.x86_64-linux"
+      "nixos.tests.networking.scripted.sit.x86_64-linux"
+      "nixos.tests.networking.scripted.static.x86_64-linux"
+      "nixos.tests.networking.scripted.vlan.x86_64-linux"
+      "nixos.tests.nfs3.simple.x86_64-linux"
+      "nixos.tests.nfs4.simple.x86_64-linux"
+      "nixos.tests.openssh.x86_64-linux"
+      "nixos.tests.pantheon.x86_64-linux"
+      "nixos.tests.php-pcre.x86_64-linux"
+      "nixos.tests.plasma5.x86_64-linux"
+      "nixos.tests.predictable-interface-names.predictableNetworkd.x86_64-linux"
+      "nixos.tests.predictable-interface-names.predictable.x86_64-linux"
+      "nixos.tests.predictable-interface-names.unpredictableNetworkd.x86_64-linux"
+      "nixos.tests.predictable-interface-names.unpredictable.x86_64-linux"
+      "nixos.tests.printing.x86_64-linux"
+      "nixos.tests.proxy.x86_64-linux"
+      "nixos.tests.sddm.default.x86_64-linux"
+      "nixos.tests.simple.x86_64-linux"
+      "nixos.tests.switchTest.x86_64-linux"
+      "nixos.tests.udisks2.x86_64-linux"
+      "nixos.tests.xfce.x86_64-linux"
+      "nixos.tests.zfs.installer.i686-linux"
+      "nixpkgs.emacs.x86_64-linux"
+      "nixpkgs.jdk.x86_64-linux"
+      "nixpkgs.tarball"
+    ];
+  };
 
 }
diff --git a/nixos/release-small.nix b/nixos/release-small.nix
index 74c16e990f3..7b86a91357e 100644
--- a/nixos/release-small.nix
+++ b/nixos/release-small.nix
@@ -82,18 +82,42 @@ in rec {
       vim;
   };
 
-  tested = lib.hydraJob (pkgs.releaseTools.aggregate {
+  tested = pkgs.releaseTools.aggregate {
     name = "nixos-${nixos.channel.version}";
     meta = {
       description = "Release-critical builds for the NixOS channel";
       maintainers = [ lib.maintainers.eelco ];
     };
     constituents =
-      let all = x: map (system: x.${system}) supportedSystems; in
-      [ nixpkgs.tarball
-        (all nixpkgs.jdk)
-      ]
-      ++ lib.collect lib.isDerivation nixos;
-  });
+      [ "nixos.channel"
+        "nixos.dummy.x86_64-linux"
+        "nixos.iso_minimal.x86_64-linux"
+        "nixos.manual.x86_64-linux"
+        "nixos.tests.boot.biosCdrom.x86_64-linux"
+        "nixos.tests.containers-imperative.x86_64-linux"
+        "nixos.tests.containers-ip.x86_64-linux"
+        "nixos.tests.firewall.x86_64-linux"
+        "nixos.tests.installer.lvm.x86_64-linux"
+        "nixos.tests.installer.separateBoot.x86_64-linux"
+        "nixos.tests.installer.simple.x86_64-linux"
+        "nixos.tests.ipv6.x86_64-linux"
+        "nixos.tests.login.x86_64-linux"
+        "nixos.tests.misc.x86_64-linux"
+        "nixos.tests.nat.firewall-conntrack.x86_64-linux"
+        "nixos.tests.nat.firewall.x86_64-linux"
+        "nixos.tests.nat.standalone.x86_64-linux"
+        "nixos.tests.nfs3.simple.x86_64-linux"
+        "nixos.tests.openssh.x86_64-linux"
+        "nixos.tests.php-pcre.x86_64-linux"
+        "nixos.tests.predictable-interface-names.predictable.x86_64-linux"
+        "nixos.tests.predictable-interface-names.predictableNetworkd.x86_64-linux"
+        "nixos.tests.predictable-interface-names.unpredictable.x86_64-linux"
+        "nixos.tests.predictable-interface-names.unpredictableNetworkd.x86_64-linux"
+        "nixos.tests.proxy.x86_64-linux"
+        "nixos.tests.simple.x86_64-linux"
+        "nixpkgs.jdk.x86_64-linux"
+        "nixpkgs.tarball"
+      ];
+  };
 
 }
diff --git a/nixos/release.nix b/nixos/release.nix
index 512ba714397..6107f352971 100644
--- a/nixos/release.nix
+++ b/nixos/release.nix
@@ -20,7 +20,7 @@ let
   allTestsForSystem = system:
     import ./tests/all-tests.nix {
       inherit system;
-      pkgs = import nixpkgs { inherit system; };
+      pkgs = import ./.. { inherit system; };
       callTest = t: {
         ${system} = hydraJob t.test;
       };
@@ -28,7 +28,7 @@ let
   allTests =
     foldAttrs recursiveUpdate {} (map allTestsForSystem supportedSystems);
 
-  pkgs = import nixpkgs { system = "x86_64-linux"; };
+  pkgs = import ./.. { system = "x86_64-linux"; };
 
 
   versionModule =
@@ -41,7 +41,7 @@ let
   makeIso =
     { module, type, system, ... }:
 
-    with import nixpkgs { inherit system; };
+    with import ./.. { inherit system; };
 
     hydraJob ((import lib/eval-config.nix {
       inherit system;
@@ -54,7 +54,7 @@ let
   makeSdImage =
     { module, system, ... }:
 
-    with import nixpkgs { inherit system; };
+    with import ./.. { inherit system; };
 
     hydraJob ((import lib/eval-config.nix {
       inherit system;
@@ -65,7 +65,7 @@ let
   makeSystemTarball =
     { module, maintainers ? ["viric"], system }:
 
-    with import nixpkgs { inherit system; };
+    with import ./.. { inherit system; };
 
     let
 
@@ -188,7 +188,7 @@ in rec {
   # A bootable VirtualBox virtual appliance as an OVA file (i.e. packaged OVF).
   ova = forMatchingSystems [ "x86_64-linux" ] (system:
 
-    with import nixpkgs { inherit system; };
+    with import ./.. { inherit system; };
 
     hydraJob ((import lib/eval-config.nix {
       inherit system;
@@ -204,7 +204,7 @@ in rec {
   # A disk image that can be imported to Amazon EC2 and registered as an AMI
   amazonImage = forMatchingSystems [ "x86_64-linux" "aarch64-linux" ] (system:
 
-    with import nixpkgs { inherit system; };
+    with import ./.. { inherit system; };
 
     hydraJob ((import lib/eval-config.nix {
       inherit system;
diff --git a/nixos/tests/acme.nix b/nixos/tests/acme.nix
index 6bd315ff1ea..e045f3415fa 100644
--- a/nixos/tests/acme.nix
+++ b/nixos/tests/acme.nix
@@ -1,17 +1,50 @@
 let
   commonConfig = ./common/letsencrypt/common.nix;
+
+  dnsScript = {writeScript, dnsAddress, bash, curl}: writeScript "dns-hook.sh" ''
+    #!${bash}/bin/bash
+    set -euo pipefail
+    echo '[INFO]' "[$2]" 'dns-hook.sh' $*
+    if [ "$1" = "present" ]; then
+      ${curl}/bin/curl --data '{"host": "'"$2"'", "value": "'"$3"'"}' http://${dnsAddress}:8055/set-txt
+    else
+      ${curl}/bin/curl --data '{"host": "'"$2"'"}' http://${dnsAddress}:8055/clear-txt
+    fi
+  '';
+
 in import ./make-test-python.nix {
   name = "acme";
 
   nodes = rec {
-    letsencrypt = ./common/letsencrypt;
+    letsencrypt = { nodes, lib, ... }: {
+      imports = [ ./common/letsencrypt ];
+      networking.nameservers = lib.mkForce [
+        nodes.dnsserver.config.networking.primaryIPAddress
+      ];
+    };
+
+    dnsserver = { nodes, pkgs, ... }: {
+      networking.firewall.allowedTCPPorts = [ 8055 53 ];
+      networking.firewall.allowedUDPPorts = [ 53 ];
+      systemd.services.pebble-challtestsrv = {
+        enable = true;
+        description = "Pebble ACME challenge test server";
+        wantedBy = [ "network.target" ];
+        serviceConfig = {
+          ExecStart = "${pkgs.pebble}/bin/pebble-challtestsrv -dns01 ':53' -defaultIPv6 '' -defaultIPv4 '${nodes.webserver.config.networking.primaryIPAddress}'";
+          # Required to bind on privileged ports.
+          User = "root";
+          Group = "root";
+        };
+      };
+    };
 
-    acmeStandalone = { config, pkgs, ... }: {
+    acmeStandalone = { nodes, lib, config, pkgs, ... }: {
       imports = [ commonConfig ];
+      networking.nameservers = lib.mkForce [
+        nodes.dnsserver.config.networking.primaryIPAddress
+      ];
       networking.firewall.allowedTCPPorts = [ 80 ];
-      networking.extraHosts = ''
-        ${config.networking.primaryIPAddress} standalone.com
-      '';
       security.acme = {
         server = "https://acme-v02.api.letsencrypt.org/dir";
         certs."standalone.com" = {
@@ -29,14 +62,12 @@ in import ./make-test-python.nix {
       };
     };
 
-    webserver = { config, pkgs, ... }: {
+    webserver = { nodes, config, pkgs, lib, ... }: {
       imports = [ commonConfig ];
       networking.firewall.allowedTCPPorts = [ 80 443 ];
-
-      networking.extraHosts = ''
-        ${config.networking.primaryIPAddress} a.example.com
-        ${config.networking.primaryIPAddress} b.example.com
-      '';
+      networking.nameservers = lib.mkForce [
+        nodes.dnsserver.config.networking.primaryIPAddress
+      ];
 
       # A target remains active. Use this to probe the fact that
       # a service fired eventhough it is not RemainAfterExit
@@ -44,6 +75,7 @@ in import ./make-test-python.nix {
       systemd.services."acme-a.example.com" = {
         wants = [ "acme-finished-a.example.com.target" ];
         before = [ "acme-finished-a.example.com.target" ];
+        after = [ "nginx.service" ];
       };
 
       services.nginx.enable = true;
@@ -61,14 +93,11 @@ in import ./make-test-python.nix {
 
       nesting.clone = [
         ({pkgs, ...}: {
-
-          networking.extraHosts = ''
-            ${config.networking.primaryIPAddress} b.example.com
-          '';
           systemd.targets."acme-finished-b.example.com" = {};
           systemd.services."acme-b.example.com" = {
             wants = [ "acme-finished-b.example.com.target" ];
             before = [ "acme-finished-b.example.com.target" ];
+            after = [ "nginx.service" ];
           };
           services.nginx.virtualHosts."b.example.com" = {
             enableACME = true;
@@ -79,15 +108,48 @@ in import ./make-test-python.nix {
             '';
           };
         })
+        ({pkgs, config, nodes, lib, ...}: {
+          security.acme.certs."example.com" = {
+            domain = "*.example.com";
+            dnsProvider = "exec";
+            dnsPropagationCheck = false;
+            credentialsFile = with pkgs; writeText "wildcard.env" ''
+              EXEC_PATH=${dnsScript { inherit writeScript bash curl; dnsAddress = nodes.dnsserver.config.networking.primaryIPAddress; }}
+            '';
+            user = config.services.nginx.user;
+            group = config.services.nginx.group;
+          };
+          systemd.targets."acme-finished-example.com" = {};
+          systemd.services."acme-example.com" = {
+            wants = [ "acme-finished-example.com.target" ];
+            before = [ "acme-finished-example.com.target" "nginx.service" ];
+            wantedBy = [ "nginx.service" ];
+          };
+          services.nginx.virtualHosts."c.example.com" = {
+            forceSSL = true;
+            sslCertificate = config.security.acme.certs."example.com".directory + "/cert.pem";
+            sslTrustedCertificate = config.security.acme.certs."example.com".directory + "/full.pem";
+            sslCertificateKey = config.security.acme.certs."example.com".directory + "/key.pem";
+            locations."/".root = pkgs.runCommand "docroot" {} ''
+              mkdir -p "$out"
+              echo hello world > "$out/index.html"
+            '';
+          };
+        })
       ];
     };
 
-    client = commonConfig;
+    client = {nodes, lib, ...}: {
+      imports = [ commonConfig ];
+      networking.nameservers = lib.mkForce [
+        nodes.dnsserver.config.networking.primaryIPAddress
+      ];
+    };
   };
 
   testScript = {nodes, ...}:
     let
-      newServerSystem = nodes.webserver2.config.system.build.toplevel;
+      newServerSystem = nodes.webserver.config.system.build.toplevel;
       switchToNewServer = "${newServerSystem}/bin/switch-to-configuration test";
     in
     # Note, wait_for_unit does not work for oneshot services that do not have RemainAfterExit=true,
@@ -97,6 +159,17 @@ in import ./make-test-python.nix {
     # can use them to probe that a oneshot fired. It is a bit ugly, but it is the best we can do
     ''
       client.start()
+      dnsserver.start()
+
+      letsencrypt.wait_for_unit("default.target")
+      dnsserver.wait_for_unit("pebble-challtestsrv.service")
+      client.succeed(
+          'curl --data \'{"host": "acme-v02.api.letsencrypt.org", "addresses": ["${nodes.letsencrypt.config.networking.primaryIPAddress}"]}\' http://${nodes.dnsserver.config.networking.primaryIPAddress}:8055/add-a'
+      )
+      client.succeed(
+          'curl --data \'{"host": "standalone.com", "addresses": ["${nodes.acmeStandalone.config.networking.primaryIPAddress}"]}\' http://${nodes.dnsserver.config.networking.primaryIPAddress}:8055/add-a'
+      )
+
       letsencrypt.start()
       acmeStandalone.start()
 
@@ -129,5 +202,17 @@ in import ./make-test-python.nix {
           client.succeed(
               "curl --cacert /tmp/ca.crt https://b.example.com/ | grep -qF 'hello world'"
           )
+
+      with subtest("Can request wildcard certificates using DNS-01 challenge"):
+          webserver.succeed(
+              "${switchToNewServer}"
+          )
+          webserver.succeed(
+              "/run/current-system/fine-tune/child-2/bin/switch-to-configuration test"
+          )
+          webserver.wait_for_unit("acme-finished-example.com.target")
+          client.succeed(
+              "curl --cacert /tmp/ca.crt https://c.example.com/ | grep -qF 'hello world'"
+          )
     '';
 }
diff --git a/nixos/tests/all-tests.nix b/nixos/tests/all-tests.nix
index b773cf3364f..2e547780439 100644
--- a/nixos/tests/all-tests.nix
+++ b/nixos/tests/all-tests.nix
@@ -32,7 +32,7 @@ in
   bees = handleTest ./bees.nix {};
   bind = handleTest ./bind.nix {};
   bittorrent = handleTest ./bittorrent.nix {};
-  buildkite-agent = handleTest ./buildkite-agent.nix {};
+  buildkite-agents = handleTest ./buildkite-agents.nix {};
   boot = handleTestOn ["x86_64-linux"] ./boot.nix {}; # syslinux is unsupported on aarch64
   boot-stage1 = handleTest ./boot-stage1.nix {};
   borgbackup = handleTest ./borgbackup.nix {};
@@ -88,6 +88,7 @@ in
   fancontrol = handleTest ./fancontrol.nix {};
   ferm = handleTest ./ferm.nix {};
   firefox = handleTest ./firefox.nix {};
+  firefox-esr = handleTest ./firefox.nix { esr = true; };
   firewall = handleTest ./firewall.nix {};
   fish = handleTest ./fish.nix {};
   flannel = handleTestOn ["x86_64-linux"] ./flannel.nix {};
@@ -136,6 +137,7 @@ in
   jackett = handleTest ./jackett.nix {};
   jellyfin = handleTest ./jellyfin.nix {};
   jenkins = handleTest ./jenkins.nix {};
+  jirafeau = handleTest ./jirafeau.nix {};
   kafka = handleTest ./kafka.nix {};
   keepalived = handleTest ./keepalived.nix {};
   kerberos = handleTest ./kerberos/default.nix {};
@@ -144,6 +146,7 @@ in
   kernel-testing = handleTest ./kernel-testing.nix {};
   keymap = handleTest ./keymap.nix {};
   knot = handleTest ./knot.nix {};
+  krb5 = discoverTests (import ./krb5 {});
   kubernetes.dns = handleTestOn ["x86_64-linux"] ./kubernetes/dns.nix {};
   # kubernetes.e2e should eventually replace kubernetes.rbac when it works
   #kubernetes.e2e = handleTestOn ["x86_64-linux"] ./kubernetes/e2e.nix {};
diff --git a/nixos/tests/buildkite-agent.nix b/nixos/tests/buildkite-agents.nix
index 3c824c9aedf..a6f33e0143c 100644
--- a/nixos/tests/buildkite-agent.nix
+++ b/nixos/tests/buildkite-agents.nix
@@ -6,18 +6,13 @@ import ./make-test-python.nix ({ pkgs, ... }:
     maintainers = [ flokli ];
   };
 
-  nodes = {
-    node1 = { pkgs, ... }: {
-      services.buildkite-agent = {
-        enable = true;
+  machine = { pkgs, ... }: {
+    services.buildkite-agents = {
+      one = {
         privateSshKeyPath = (import ./ssh-keys.nix pkgs).snakeOilPrivateKey;
         tokenPath = (pkgs.writeText "my-token" "5678");
       };
-    };
-    # don't configure ssh key, run as a separate user
-    node2 = { pkgs, ...}: {
-      services.buildkite-agent = {
-        enable = true;
+      two = {
         tokenPath = (pkgs.writeText "my-token" "1234");
       };
     };
@@ -28,9 +23,9 @@ import ./make-test-python.nix ({ pkgs, ... }:
     # we can't wait on the unit to start up, as we obviously can't connect to buildkite,
     # but we can look whether files are set up correctly
 
-    node1.wait_for_file("/var/lib/buildkite-agent/buildkite-agent.cfg")
-    node1.wait_for_file("/var/lib/buildkite-agent/.ssh/id_rsa")
+    machine.wait_for_file("/var/lib/buildkite-agent-one/buildkite-agent.cfg")
+    machine.wait_for_file("/var/lib/buildkite-agent-one/.ssh/id_rsa")
 
-    node2.wait_for_file("/var/lib/buildkite-agent/buildkite-agent.cfg")
+    machine.wait_for_file("/var/lib/buildkite-agent-two/buildkite-agent.cfg")
   '';
 })
diff --git a/nixos/tests/common/letsencrypt/common.nix b/nixos/tests/common/letsencrypt/common.nix
index c530de817bf..bd559c8dacc 100644
--- a/nixos/tests/common/letsencrypt/common.nix
+++ b/nixos/tests/common/letsencrypt/common.nix
@@ -5,5 +5,8 @@ in {
     nodes.letsencrypt.config.networking.primaryIPAddress
   ];
 
+  security.acme.acceptTerms = true;
+  security.acme.email = "webmaster@example.com";
+
   security.pki.certificateFiles = [ letsencrypt-ca ];
 }
diff --git a/nixos/tests/docker-containers.nix b/nixos/tests/docker-containers.nix
index 9be9bfa80ce..0e318a52d9f 100644
--- a/nixos/tests/docker-containers.nix
+++ b/nixos/tests/docker-containers.nix
@@ -1,30 +1,27 @@
 # Test Docker containers as systemd units
 
-import ./make-test.nix ({ pkgs, lib, ... }:
-
-{
+import ./make-test-python.nix ({ pkgs, lib, ... }: {
   name = "docker-containers";
   meta = {
     maintainers = with lib.maintainers; [ benley mkaito ];
   };
 
   nodes = {
-    docker = { pkgs, ... }:
-      {
-        virtualisation.docker.enable = true;
+    docker = { pkgs, ... }: {
+      virtualisation.docker.enable = true;
 
-        docker-containers.nginx = {
-          image = "nginx-container";
-          imageFile = pkgs.dockerTools.examples.nginx;
-          ports = ["8181:80"];
-        };
+      docker-containers.nginx = {
+        image = "nginx-container";
+        imageFile = pkgs.dockerTools.examples.nginx;
+        ports = ["8181:80"];
       };
+    };
   };
 
   testScript = ''
-    startAll;
-    $docker->waitForUnit("docker-nginx.service");
-    $docker->waitForOpenPort(8181);
-    $docker->waitUntilSucceeds("curl http://localhost:8181|grep Hello");
+    start_all()
+    docker.wait_for_unit("docker-nginx.service")
+    docker.wait_for_open_port(8181)
+    docker.wait_until_succeeds("curl http://localhost:8181 | grep Hello")
   '';
 })
diff --git a/nixos/tests/docker-tools.nix b/nixos/tests/docker-tools.nix
index 07fac533680..ca750e8ba3c 100644
--- a/nixos/tests/docker-tools.nix
+++ b/nixos/tests/docker-tools.nix
@@ -83,5 +83,11 @@ import ./make-test.nix ({ pkgs, ... }: {
 
       # Ensure image with only 2 layers can be loaded
       $docker->succeed("docker load --input='${pkgs.dockerTools.examples.two-layered-image}'");
+
+      # Ensure the bulk layer didn't miss store paths
+      # Regression test for https://github.com/NixOS/nixpkgs/issues/78744
+      $docker->succeed("docker load --input='${pkgs.dockerTools.examples.bulk-layer}'");
+      # This ensure the two output paths (ls and hello) are in the layer
+      $docker->succeed("docker run bulk-layer ls /bin/hello");
     '';
 })
diff --git a/nixos/tests/firefox.nix b/nixos/tests/firefox.nix
index 56ddabbae77..7071baceba7 100644
--- a/nixos/tests/firefox.nix
+++ b/nixos/tests/firefox.nix
@@ -1,4 +1,4 @@
-import ./make-test-python.nix ({ pkgs, ... }: {
+import ./make-test-python.nix ({ pkgs, esr ? false, ... }: {
   name = "firefox";
   meta = with pkgs.stdenv.lib.maintainers; {
     maintainers = [ eelco shlevy ];
@@ -8,7 +8,9 @@ import ./make-test-python.nix ({ pkgs, ... }: {
     { pkgs, ... }:
 
     { imports = [ ./common/x11.nix ];
-      environment.systemPackages = [ pkgs.firefox pkgs.xdotool ];
+      environment.systemPackages =
+        (if esr then [ pkgs.firefox-esr ] else [ pkgs.firefox ])
+        ++ [ pkgs.xdotool ];
     };
 
   testScript = ''
diff --git a/nixos/tests/glusterfs.nix b/nixos/tests/glusterfs.nix
index 8f9cb8973d5..cb07bc09511 100644
--- a/nixos/tests/glusterfs.nix
+++ b/nixos/tests/glusterfs.nix
@@ -4,10 +4,11 @@ let
   client = { pkgs, ... } : {
     environment.systemPackages = [ pkgs.glusterfs ];
     fileSystems = pkgs.lib.mkVMOverride
-    [ { mountPoint = "/gluster";
-        fsType = "glusterfs";
-        device = "server1:/gv0";
-    } ];
+      { "/gluster" =
+          { device = "server1:/gv0";
+            fsType = "glusterfs";
+          };
+      };
   };
 
   server = { pkgs, ... } : {
@@ -22,11 +23,11 @@ let
     virtualisation.emptyDiskImages = [ 1024 ];
 
     fileSystems = pkgs.lib.mkVMOverride
-      [ { mountPoint = "/data";
-          device = "/dev/disk/by-label/data";
-          fsType = "ext4";
-        }
-      ];
+      { "/data" =
+          { device = "/dev/disk/by-label/data";
+            fsType = "ext4";
+          };
+      };
   };
 in {
   name = "glusterfs";
diff --git a/nixos/tests/installed-tests/default.nix b/nixos/tests/installed-tests/default.nix
index 8e997ee4aeb..a189ef63f22 100644
--- a/nixos/tests/installed-tests/default.nix
+++ b/nixos/tests/installed-tests/default.nix
@@ -90,7 +90,9 @@ in
   graphene = callInstalledTest ./graphene.nix {};
   ibus = callInstalledTest ./ibus.nix {};
   libgdata = callInstalledTest ./libgdata.nix {};
+  glib-testing = callInstalledTest ./glib-testing.nix {};
   libxmlb = callInstalledTest ./libxmlb.nix {};
+  malcontent = callInstalledTest ./malcontent.nix {};
   ostree = callInstalledTest ./ostree.nix {};
   xdg-desktop-portal = callInstalledTest ./xdg-desktop-portal.nix {};
 }
diff --git a/nixos/tests/installed-tests/glib-testing.nix b/nixos/tests/installed-tests/glib-testing.nix
new file mode 100644
index 00000000000..7a06cf792bd
--- /dev/null
+++ b/nixos/tests/installed-tests/glib-testing.nix
@@ -0,0 +1,5 @@
+{ pkgs, makeInstalledTest, ... }:
+
+makeInstalledTest {
+  tested = pkgs.glib-testing;
+}
diff --git a/nixos/tests/installed-tests/malcontent.nix b/nixos/tests/installed-tests/malcontent.nix
new file mode 100644
index 00000000000..d4e214c4198
--- /dev/null
+++ b/nixos/tests/installed-tests/malcontent.nix
@@ -0,0 +1,5 @@
+{ pkgs, makeInstalledTest, ... }:
+
+makeInstalledTest {
+  tested = pkgs.malcontent;
+}
diff --git a/nixos/tests/installed-tests/xdg-desktop-portal.nix b/nixos/tests/installed-tests/xdg-desktop-portal.nix
index b16008ff4ad..90529d37ee0 100644
--- a/nixos/tests/installed-tests/xdg-desktop-portal.nix
+++ b/nixos/tests/installed-tests/xdg-desktop-portal.nix
@@ -2,4 +2,8 @@
 
 makeInstalledTest {
   tested = pkgs.xdg-desktop-portal;
+
+  # Ton of breakage.
+  # https://github.com/flatpak/xdg-desktop-portal/pull/428
+  meta.broken = true;
 }
diff --git a/nixos/tests/jirafeau.nix b/nixos/tests/jirafeau.nix
new file mode 100644
index 00000000000..0f5af7f718a
--- /dev/null
+++ b/nixos/tests/jirafeau.nix
@@ -0,0 +1,22 @@
+import ./make-test-python.nix ({ lib, ... }:
+
+with lib;
+
+{
+  name = "jirafeau";
+  meta.maintainers = with maintainers; [ davidtwco ];
+
+  nodes.machine = { pkgs, ... }: {
+    services.jirafeau = {
+      enable = true;
+    };
+  };
+
+  testScript = ''
+    machine.start()
+    machine.wait_for_unit("phpfpm-jirafeau.service")
+    machine.wait_for_unit("nginx.service")
+    machine.wait_for_open_port(80)
+    machine.succeed("curl -sSfL http://localhost/ | grep 'Jirafeau'")
+  '';
+})
diff --git a/nixos/tests/knot.nix b/nixos/tests/knot.nix
index 0588cf86ac0..8bab917a351 100644
--- a/nixos/tests/knot.nix
+++ b/nixos/tests/knot.nix
@@ -28,6 +28,13 @@ let
     name = "knot-zones";
     paths = [ exampleZone delegatedZone ];
   };
+  # DO NOT USE pkgs.writeText IN PRODUCTION. This put secrets in the nix store!
+  tsigFile = pkgs.writeText "tsig.conf" ''
+    key:
+      - id: slave_key
+        algorithm: hmac-sha256
+        secret: zOYgOgnzx3TGe5J5I/0kxd7gTcxXhLYMEq3Ek3fY37s=
+  '';
 in {
   name = "knot";
   meta = with pkgs.stdenv.lib.maintainers; {
@@ -48,6 +55,7 @@ in {
       };
       services.knot.enable = true;
       services.knot.extraArgs = [ "-v" ];
+      services.knot.keyFiles = [ tsigFile ];
       services.knot.extraConfig = ''
         server:
             listen: 0.0.0.0@53
@@ -56,6 +64,7 @@ in {
         acl:
           - id: slave_acl
             address: 192.168.0.2
+            key: slave_key
             action: transfer
 
         remote:
@@ -103,6 +112,7 @@ in {
         ];
       };
       services.knot.enable = true;
+      services.knot.keyFiles = [ tsigFile ];
       services.knot.extraArgs = [ "-v" ];
       services.knot.extraConfig = ''
         server:
@@ -117,6 +127,7 @@ in {
         remote:
           - id: master
             address: 192.168.0.1@53
+            key: slave_key
 
         template:
           - id: default
@@ -155,10 +166,10 @@ in {
         ];
       };
       environment.systemPackages = [ pkgs.knot-dns ];
-    };    
+    };
   };
 
-  testScript = { nodes, ... }: let 
+  testScript = { nodes, ... }: let
     master4 = (lib.head nodes.master.config.networking.interfaces.eth1.ipv4.addresses).address;
     master6 = (lib.head nodes.master.config.networking.interfaces.eth1.ipv6.addresses).address;
 
diff --git a/nixos/tests/krb5/deprecated-config.nix b/nixos/tests/krb5/deprecated-config.nix
index 7d7926309c9..be6ebce9e05 100644
--- a/nixos/tests/krb5/deprecated-config.nix
+++ b/nixos/tests/krb5/deprecated-config.nix
@@ -1,7 +1,7 @@
 # Verifies that the configuration suggested in deprecated example values
 # will result in the expected output.
 
-import ../make-test.nix ({ pkgs, ...} : {
+import ../make-test-python.nix ({ pkgs, ...} : {
   name = "krb5-with-deprecated-config";
   meta = with pkgs.stdenv.lib.maintainers; {
     maintainers = [ eqyiel ];
@@ -43,6 +43,8 @@ import ../make-test.nix ({ pkgs, ...} : {
 
     '';
   in ''
-    $machine->succeed("diff /etc/krb5.conf ${snapshot}");
+    machine.succeed(
+        "diff /etc/krb5.conf ${snapshot}"
+    )
   '';
 })
diff --git a/nixos/tests/krb5/example-config.nix b/nixos/tests/krb5/example-config.nix
index f01cf6988ee..be195b51393 100644
--- a/nixos/tests/krb5/example-config.nix
+++ b/nixos/tests/krb5/example-config.nix
@@ -1,7 +1,7 @@
 # Verifies that the configuration suggested in (non-deprecated) example values
 # will result in the expected output.
 
-import ../make-test.nix ({ pkgs, ...} : {
+import ../make-test-python.nix ({ pkgs, ...} : {
   name = "krb5-with-example-config";
   meta = with pkgs.stdenv.lib.maintainers; {
     maintainers = [ eqyiel ];
@@ -101,6 +101,8 @@ import ../make-test.nix ({ pkgs, ...} : {
         default      = SYSLOG:NOTICE
     '';
   in ''
-    $machine->succeed("diff /etc/krb5.conf ${snapshot}");
+    machine.succeed(
+        "diff /etc/krb5.conf ${snapshot}"
+    )
   '';
 })
diff --git a/nixos/tests/nfs/simple.nix b/nixos/tests/nfs/simple.nix
index a1a09ee0f45..c49ebddc2fd 100644
--- a/nixos/tests/nfs/simple.nix
+++ b/nixos/tests/nfs/simple.nix
@@ -5,13 +5,13 @@ let
   client =
     { pkgs, ... }:
     { fileSystems = pkgs.lib.mkVMOverride
-        [ { mountPoint = "/data";
-            # nfs4 exports the export with fsid=0 as a virtual root directory
-            device = if (version == 4) then "server:/" else "server:/data";
-            fsType = "nfs";
-            options = [ "vers=${toString version}" ];
-          }
-        ];
+        { "/data" =
+           { # nfs4 exports the export with fsid=0 as a virtual root directory
+             device = if (version == 4) then "server:/" else "server:/data";
+             fsType = "nfs";
+             options = [ "vers=${toString version}" ];
+           };
+        };
       networking.firewall.enable = false; # FIXME: only open statd
     };
 
diff --git a/nixos/tests/nsd.nix b/nixos/tests/nsd.nix
index c3c91e71b5c..bcc14e817a8 100644
--- a/nixos/tests/nsd.nix
+++ b/nixos/tests/nsd.nix
@@ -5,7 +5,7 @@ let
     # for a host utility with IPv6 support
     environment.systemPackages = [ pkgs.bind ];
   };
-in import ./make-test.nix ({ pkgs, ...} : {
+in import ./make-test-python.nix ({ pkgs, ...} : {
   name = "nsd";
   meta = with pkgs.stdenv.lib.maintainers; {
     maintainers = [ aszlig ];
@@ -65,37 +65,35 @@ in import ./make-test.nix ({ pkgs, ...} : {
   };
 
   testScript = ''
-    startAll;
+    start_all()
 
-    $clientv4->waitForUnit("network.target");
-    $clientv6->waitForUnit("network.target");
-    $server->waitForUnit("nsd.service");
+    clientv4.wait_for_unit("network.target")
+    clientv6.wait_for_unit("network.target")
+    server.wait_for_unit("nsd.service")
 
-    sub assertHost {
-      my ($type, $rr, $query, $expected) = @_;
-      my $self = $type eq 4 ? $clientv4 : $clientv6;
-      my $out = $self->succeed("host -$type -t $rr $query");
-      $self->log("output: $out");
-      chomp $out;
-      die "DNS IPv$type query on $query gave '$out' instead of '$expected'"
-        if ($out !~ $expected);
-    }
 
-    foreach (4, 6) {
-      subtest "ipv$_", sub {
-        assertHost($_, "a", "example.com", qr/has no [^ ]+ record/);
-        assertHost($_, "aaaa", "example.com", qr/has no [^ ]+ record/);
+    def assert_host(type, rr, query, expected):
+        self = clientv4 if type == 4 else clientv6
+        out = self.succeed(f"host -{type} -t {rr} {query}").rstrip()
+        self.log(f"output: {out}")
+        assert re.search(
+            expected, out
+        ), f"DNS IPv{type} query on {query} gave '{out}' instead of '{expected}'"
 
-        assertHost($_, "soa", "example.com", qr/SOA.*?noc\.example\.com/);
-        assertHost($_, "a", "ipv4.example.com", qr/address 1.2.3.4$/);
-        assertHost($_, "aaaa", "ipv6.example.com", qr/address abcd::eeff$/);
 
-        assertHost($_, "a", "deleg.example.com", qr/address 9.8.7.6$/);
-        assertHost($_, "aaaa", "deleg.example.com", qr/address fedc::bbaa$/);
+    for ipv in 4, 6:
+        with subtest(f"IPv{ipv}"):
+            assert_host(ipv, "a", "example.com", "has no [^ ]+ record")
+            assert_host(ipv, "aaaa", "example.com", "has no [^ ]+ record")
 
-        assertHost($_, "a", "root", qr/address 1.8.7.4$/);
-        assertHost($_, "aaaa", "root", qr/address acbd::4$/);
-      };
-    }
+            assert_host(ipv, "soa", "example.com", "SOA.*?noc\.example\.com")
+            assert_host(ipv, "a", "ipv4.example.com", "address 1.2.3.4$")
+            assert_host(ipv, "aaaa", "ipv6.example.com", "address abcd::eeff$")
+
+            assert_host(ipv, "a", "deleg.example.com", "address 9.8.7.6$")
+            assert_host(ipv, "aaaa", "deleg.example.com", "address fedc::bbaa$")
+
+            assert_host(ipv, "a", "root", "address 1.8.7.4$")
+            assert_host(ipv, "aaaa", "root", "address acbd::4$")
   '';
 })
diff --git a/nixos/tests/openarena.nix b/nixos/tests/openarena.nix
index b315426532b..395ed9153ea 100644
--- a/nixos/tests/openarena.nix
+++ b/nixos/tests/openarena.nix
@@ -1,41 +1,71 @@
-import ./make-test-python.nix ({ pkgs, ...} : {
+import ./make-test-python.nix ({ pkgs, ...} :
+
+let
+  client =
+    { pkgs, ... }:
+
+    { imports = [ ./common/x11.nix ];
+      hardware.opengl.driSupport = true;
+      environment.systemPackages = [ pkgs.openarena ];
+    };
+
+in {
   name = "openarena";
   meta = with pkgs.stdenv.lib.maintainers; {
-    maintainers = [ tomfitzhenry ];
+    maintainers = [ fpletz ];
   };
 
-  machine =
-    { pkgs, ... }:
+  nodes =
+    { server =
+        { services.openarena = {
+            enable = true;
+            extraFlags = [ "+set g_gametype 0" "+map oa_dm7" "+addbot Angelyss" "+addbot Arachna" ];
+            openPorts = true;
+          };
+        };
 
-    { imports = [];
-      environment.systemPackages = with pkgs; [
-        socat
-      ];
-      services.openarena = {
-        enable = true;
-        extraFlags = [
-          "+set dedicated 2"
-          "+set sv_hostname 'My NixOS server'"
-          "+map oa_dm1"
-        ];
-      };
+      client1 = client;
+      client2 = client;
     };
 
   testScript =
     ''
-      machine.wait_for_unit("openarena.service")
-      machine.wait_until_succeeds("ss --numeric --udp --listening | grep -q 27960")
+      start_all()
 
-      # The log line containing 'resolve address' is last and only message that occurs after
-      # the server starts accepting clients.
-      machine.wait_until_succeeds(
-          "journalctl -u openarena.service | grep 'resolve address: dpmaster.deathmask.net'"
-      )
+      server.wait_for_unit("openarena")
+      server.wait_until_succeeds("ss --numeric --udp --listening | grep -q 27960")
+
+      client1.wait_for_x()
+      client2.wait_for_x()
 
-      # Check it's possible to join the server.
-      # Can't use substring match instead of grep because the output is not utf-8
-      machine.succeed(
-          "echo -n -e '\\xff\\xff\\xff\\xffgetchallenge' | socat - UDP4-DATAGRAM:127.0.0.1:27960 | grep -q challengeResponse"
+      client1.execute("openarena +set r_fullscreen 0 +set name Foo +connect server &")
+      client2.execute("openarena +set r_fullscreen 0 +set name Bar +connect server &")
+
+      server.wait_until_succeeds(
+          "journalctl -u openarena -e | grep -q 'Foo.*entered the game'"
+      )
+      server.wait_until_succeeds(
+          "journalctl -u openarena -e | grep -q 'Bar.*entered the game'"
       )
+
+      server.sleep(10)  # wait for a while to get a nice screenshot
+
+      client1.screenshot("screen_client1_1")
+      client2.screenshot("screen_client2_1")
+
+      client1.block()
+
+      server.sleep(10)
+
+      client1.screenshot("screen_client1_2")
+      client2.screenshot("screen_client2_2")
+
+      client1.unblock()
+
+      server.sleep(10)
+
+      client1.screenshot("screen_client1_3")
+      client2.screenshot("screen_client2_3")
     '';
+
 })
diff --git a/nixos/tests/orangefs.nix b/nixos/tests/orangefs.nix
index 46d7a6a72f8..24b7737058c 100644
--- a/nixos/tests/orangefs.nix
+++ b/nixos/tests/orangefs.nix
@@ -10,11 +10,11 @@ let
     virtualisation.emptyDiskImages = [ 4096 ];
 
     fileSystems = pkgs.lib.mkVMOverride
-      [ { mountPoint = "/data";
-          device = "/dev/disk/by-label/data";
-          fsType = "ext4";
-        }
-      ];
+      { "/data" =
+          { device = "/dev/disk/by-label/data";
+            fsType = "ext4";
+          };
+      };
 
     services.orangefs.server = {
       enable = true;
diff --git a/nixos/tests/plotinus.nix b/nixos/tests/plotinus.nix
index 609afe7b214..39a4234dbf7 100644
--- a/nixos/tests/plotinus.nix
+++ b/nixos/tests/plotinus.nix
@@ -1,4 +1,4 @@
-import ./make-test.nix ({ pkgs, ... }: {
+import ./make-test-python.nix ({ pkgs, ... }: {
   name = "plotinus";
   meta = {
     maintainers = pkgs.plotinus.meta.maintainers;
@@ -12,16 +12,17 @@ import ./make-test.nix ({ pkgs, ... }: {
       environment.systemPackages = [ pkgs.gnome3.gnome-calculator pkgs.xdotool ];
     };
 
-  testScript =
-    ''
-      $machine->waitForX;
-      $machine->succeed("gnome-calculator &");
-      $machine->waitForWindow(qr/gnome-calculator/);
-      $machine->succeed("xdotool search --sync --onlyvisible --class gnome-calculator windowfocus --sync key ctrl+shift+p");
-      $machine->sleep(5); # wait for the popup
-      $machine->succeed("xdotool key --delay 100 p r e f e r e n c e s Return");
-      $machine->waitForWindow(qr/Preferences/);
-      $machine->screenshot("screen");
-    '';
-
+  testScript = ''
+    machine.wait_for_x()
+    machine.succeed("gnome-calculator &")
+    machine.wait_for_window("gnome-calculator")
+    machine.succeed(
+        "xdotool search --sync --onlyvisible --class gnome-calculator "
+        + "windowfocus --sync key --clearmodifiers --delay 1 'ctrl+shift+p'"
+    )
+    machine.sleep(5)  # wait for the popup
+    machine.succeed("xdotool key --delay 100 p r e f e r e n c e s Return")
+    machine.wait_for_window("Preferences")
+    machine.screenshot("screen")
+  '';
 })
diff --git a/nixos/tests/postgresql-wal-receiver.nix b/nixos/tests/postgresql-wal-receiver.nix
index 791b041ba95..372dd9d8c1c 100644
--- a/nixos/tests/postgresql-wal-receiver.nix
+++ b/nixos/tests/postgresql-wal-receiver.nix
@@ -6,17 +6,24 @@ with import ../lib/testing.nix { inherit system pkgs; };
 with pkgs.lib;
 
 let
+  makePostgresqlWalReceiverTest = subTestName: postgresqlPackage: let
+
   postgresqlDataDir = "/var/db/postgresql/test";
   replicationUser = "wal_receiver_user";
   replicationSlot = "wal_receiver_slot";
   replicationConn = "postgresql://${replicationUser}@localhost";
   baseBackupDir = "/tmp/pg_basebackup";
   walBackupDir = "/tmp/pg_wal";
-  recoveryConf = pkgs.writeText "recovery.conf" ''
+  atLeast12 = versionAtLeast postgresqlPackage.version "12.0";
+  restoreCommand = ''
     restore_command = 'cp ${walBackupDir}/%f %p'
   '';
 
-  makePostgresqlWalReceiverTest = subTestName: postgresqlPackage: makeTest {
+  recoveryFile = if atLeast12
+      then pkgs.writeTextDir "recovery.signal" ""
+      else pkgs.writeTextDir "recovery.conf" "${restoreCommand}";
+
+  in makeTest {
     name = "postgresql-wal-receiver-${subTestName}";
     meta.maintainers = with maintainers; [ pacien ];
 
@@ -29,6 +36,9 @@ let
           wal_level = archive # alias for replica on pg >= 9.6
           max_wal_senders = 10
           max_replication_slots = 10
+        '' + optionalString atLeast12 ''
+          ${restoreCommand}
+          recovery_end_command = 'touch recovery.done'
         '';
         authentication = ''
           host replication ${replicationUser} all trust
@@ -45,6 +55,9 @@ let
         slot = replicationSlot;
         directory = walBackupDir;
       };
+      # This is only to speedup test, it isn't time racing. Service is set to autorestart always,
+      # default 60sec is fine for real system, but is too much for a test
+      systemd.services.postgresql-wal-receiver-main.serviceConfig.RestartSec = mkForce 5;
     };
 
     testScript = ''
@@ -70,7 +83,7 @@ let
       # prepare WAL and recovery
       $machine->succeed('chmod a+rX -R ${walBackupDir}');
       $machine->execute('for part in ${walBackupDir}/*.partial; do mv $part ''${part%%.*}; done'); # make use of partial segments too
-      $machine->succeed('cp ${recoveryConf} ${postgresqlDataDir}/recovery.conf && chmod 666 ${postgresqlDataDir}/recovery.conf');
+      $machine->succeed('cp ${recoveryFile}/* ${postgresqlDataDir}/ && chmod 666 ${postgresqlDataDir}/recovery*');
 
       # replay WAL
       $machine->systemctl('start postgresql');
diff --git a/nixos/tests/run-in-machine.nix b/nixos/tests/run-in-machine.nix
index 339a4b9a740..67840f3e9fe 100644
--- a/nixos/tests/run-in-machine.nix
+++ b/nixos/tests/run-in-machine.nix
@@ -3,7 +3,7 @@
   pkgs ? import ../.. { inherit system config; }
 }:
 
-with import ../lib/testing.nix { inherit system pkgs; };
+with import ../lib/testing-python.nix { inherit system pkgs; };
 
 let
   output = runInMachine {
diff --git a/nixos/tests/solr.nix b/nixos/tests/solr.nix
index 23e1a960fb3..dc5770e16bc 100644
--- a/nixos/tests/solr.nix
+++ b/nixos/tests/solr.nix
@@ -1,4 +1,4 @@
-import ./make-test.nix ({ pkgs, ... }:
+import ./make-test-python.nix ({ pkgs, ... }:
 
 {
   name = "solr";
@@ -21,28 +21,36 @@ import ./make-test.nix ({ pkgs, ... }:
     };
 
   testScript = ''
-    startAll;
+    start_all()
 
-    $machine->waitForUnit('solr.service');
-    $machine->waitForOpenPort('8983');
-    $machine->succeed('curl --fail http://localhost:8983/solr/');
+    machine.wait_for_unit("solr.service")
+    machine.wait_for_open_port(8983)
+    machine.succeed("curl --fail http://localhost:8983/solr/")
 
     # adapted from pkgs.solr/examples/films/README.txt
-    $machine->succeed('sudo -u solr solr create -c films');
-    $machine->succeed(q(curl http://localhost:8983/solr/films/schema -X POST -H 'Content-type:application/json' --data-binary '{
-      "add-field" : {
-        "name":"name",
-        "type":"text_general",
-        "multiValued":false,
-        "stored":true
-      },
-      "add-field" : {
-        "name":"initial_release_date",
-        "type":"pdate",
-        "stored":true
-      }
-    }')) =~ /"status":0/ or die;
-    $machine->succeed('sudo -u solr post -c films ${pkgs.solr}/example/films/films.json');
-    $machine->succeed('curl http://localhost:8983/solr/films/query?q=name:batman') =~ /"name":"Batman Begins"/ or die;
+    machine.succeed("sudo -u solr solr create -c films")
+    assert '"status":0' in machine.succeed(
+        """
+      curl http://localhost:8983/solr/films/schema -X POST -H 'Content-type:application/json' --data-binary '{
+        "add-field" : {
+          "name":"name",
+          "type":"text_general",
+          "multiValued":false,
+          "stored":true
+        },
+        "add-field" : {
+          "name":"initial_release_date",
+          "type":"pdate",
+          "stored":true
+        }
+      }'
+    """
+    )
+    machine.succeed(
+        "sudo -u solr post -c films ${pkgs.solr}/example/films/films.json"
+    )
+    assert '"name":"Batman Begins"' in machine.succeed(
+        "curl http://localhost:8983/solr/films/query?q=name:batman"
+    )
   '';
 })
diff --git a/nixos/tests/tinydns.nix b/nixos/tests/tinydns.nix
index c7740d5ade3..b80e3451700 100644
--- a/nixos/tests/tinydns.nix
+++ b/nixos/tests/tinydns.nix
@@ -21,6 +21,6 @@ import ./make-test-python.nix ({ lib, ...} : {
   testScript = ''
     nameserver.start()
     nameserver.wait_for_unit("tinydns.service")
-    nameserver.succeed("host bla.foo.bar | grep '1\.2\.3\.4'")
+    nameserver.succeed("host bla.foo.bar 192.168.1.1 | grep '1\.2\.3\.4'")
   '';
 })