summary refs log tree commit diff
path: root/nixos
diff options
context:
space:
mode:
Diffstat (limited to 'nixos')
-rw-r--r--nixos/doc/manual/release-notes/rl-2009.xml13
-rw-r--r--nixos/doc/manual/release-notes/rl-2103.xml68
-rw-r--r--nixos/modules/module-list.nix1
-rw-r--r--nixos/modules/services/databases/redis.nix9
-rw-r--r--nixos/modules/services/mail/roundcube.nix5
-rw-r--r--nixos/modules/services/misc/octoprint.nix2
-rw-r--r--nixos/modules/services/monitoring/prometheus/exporters.nix9
-rw-r--r--nixos/modules/services/monitoring/prometheus/exporters/sql.nix104
-rw-r--r--nixos/modules/services/networking/nar-serve.nix55
-rw-r--r--nixos/modules/services/networking/unbound.nix141
-rw-r--r--nixos/modules/services/torrent/transmission.nix2
-rw-r--r--nixos/modules/system/boot/luksroot.nix2
-rw-r--r--nixos/modules/system/boot/pbkdf2-sha512.c2
-rw-r--r--nixos/modules/virtualisation/ec2-amis.nix36
-rw-r--r--nixos/release-combined.nix2
-rw-r--r--nixos/tests/all-tests.nix2
-rw-r--r--nixos/tests/nar-serve.nix48
-rw-r--r--nixos/tests/prometheus-exporters.nix44
-rw-r--r--nixos/tests/unbound.nix278
19 files changed, 761 insertions, 62 deletions
diff --git a/nixos/doc/manual/release-notes/rl-2009.xml b/nixos/doc/manual/release-notes/rl-2009.xml
index 01f113198eb..75c8adbf45e 100644
--- a/nixos/doc/manual/release-notes/rl-2009.xml
+++ b/nixos/doc/manual/release-notes/rl-2009.xml
@@ -879,12 +879,23 @@ php.override {
    <listitem>
      <para>
        Nginx web server now starting with additional sandbox/hardening options. By default, write access
-       to <literal>services.nginx.stateDir</literal> is allowed. To allow writing to other folders,
+       to <literal>/var/log/nginx</literal> and <literal>/var/cache/nginx</literal> is allowed. To allow writing to other folders,
        use <literal>systemd.services.nginx.serviceConfig.ReadWritePaths</literal>
        <programlisting>
 systemd.services.nginx.serviceConfig.ReadWritePaths = [ "/var/www" ];
        </programlisting>
      </para>
+     <para>
+       Nginx is also started with the systemd option <literal>ProtectHome = mkDefault true;</literal>
+       which forbids it to read anything from <literal>/home</literal>, <literal>/root</literal>
+       and <literal>/run/user</literal> (see
+       <link xlink:href="https://www.freedesktop.org/software/systemd/man/systemd.exec.html#ProtectHome=">ProtectHome docs</link>
+       for details).
+       If you require serving files from home directories, you may choose to set e.g.
+<programlisting>
+systemd.services.nginx.serviceConfig.ProtectHome = "read-only";
+</programlisting>
+     </para>
    </listitem>
    <listitem>
     <para>
diff --git a/nixos/doc/manual/release-notes/rl-2103.xml b/nixos/doc/manual/release-notes/rl-2103.xml
index a90b49ba798..709b2ba5228 100644
--- a/nixos/doc/manual/release-notes/rl-2103.xml
+++ b/nixos/doc/manual/release-notes/rl-2103.xml
@@ -139,6 +139,13 @@
     <package>stanchion</package> package removed along with <varname>services.stanchion</varname> module.
     </para>
    </listitem>
+   <listitem>
+    <para>
+     <package>mutt</package> has been updated to a new major version (2.x), which comes with
+     some backward incompatible changes that are described in the
+     <link xlink:href="http://www.mutt.org/relnotes/2.0/">release notes for Mutt 2.0</link>.
+    </para>
+   </listitem>
   </itemizedlist>
  </section>
 
@@ -164,12 +171,73 @@
    </listitem>
    <listitem>
     <para>
+     The setting <xref linkend="opt-services.redis.bind" /> defaults to <literal>127.0.0.1</literal> now, making Redis listen on the loopback interface only, and not all public network interfaces.
+    </para>
+   </listitem>
+   <listitem>
+    <para>
      NixOS now emits a deprecation warning if systemd's <literal>StartLimitInterval</literal> setting is used in a <literal>serviceConfig</literal> section instead of in a <literal>unitConfig</literal>; that setting is deprecated and now undocumented for the service section by systemd upstream, but still effective and somewhat buggy there, which can be confusing. See <link xlink:href="https://github.com/NixOS/nixpkgs/issues/45785">#45785</link> for details.
     </para>
     <para>
      All services should use <xref linkend="opt-systemd.services._name_.startLimitIntervalSec" /> or <literal>StartLimitIntervalSec</literal> in <xref linkend="opt-systemd.services._name_.unitConfig" /> instead.
     </para>
    </listitem>
+   <listitem>
+    <para>
+     The Unbound DNS resolver service (<literal>services.unbound</literal>) has been refactored to allow reloading, control sockets and to fix startup ordering issues.
+    </para>
+
+    <para>
+     It is now possible to enable a local UNIX control socket for unbound by setting the <xref linkend="opt-services.unbound.localControlSocketPath" />
+     option.
+    </para>
+
+    <para>
+     Previously we just applied a very minimal set of restrictions and
+     trusted unbound to properly drop root privs and capabilities.
+    </para>
+
+    <para>
+     As of this we are (for the most part) just using the upstream
+     example unit file for unbound. The main difference is that we start
+     unbound as <literal>unbound</literal> user with the required capabilities instead of
+     letting unbound do the chroot &amp; uid/gid changes.
+    </para>
+
+    <para>
+     The upstream unit configuration this is based on is a lot stricter with
+     all kinds of permissions then our previous variant. It also came with
+     the default of having the <literal>Type</literal> set to <literal>notify</literal>, therefore we are now also
+     using the <literal>unbound-with-systemd</literal> package here. Unbound will start up,
+     read the configuration files and start listening on the configured ports
+     before systemd will declare the unit <literal>active (running)</literal>.
+     This will likely help with startup order and the occasional race condition during system
+     activation where the DNS service is started but not yet ready to answer
+     queries. Services depending on <literal>nss-lookup.target</literal> or <literal>unbound.service</literal>
+     are now be able to use unbound when those targets have been reached.
+    </para>
+
+    <para>
+     Aditionally to the much stricter runtime environmet the
+     <literal>/dev/urandom</literal> mount lines we previously had in the code (that would
+     randomly failed during the stop-phase) have been removed as systemd will take care of those for us.
+    </para>
+
+    <para>
+     The <literal>preStart</literal> script is now only required if we enabled the trust
+      anchor updates (which are still enabled by default).
+    </para>
+
+    <para>
+     Another benefit of the refactoring is that we can now issue reloads via
+     either <literal>pkill -HUP unbound</literal> and <literal>systemctl reload unbound</literal> to reload the
+     running configuration without taking the daemon offline. A prerequisite
+     of this was that unbound configuration is available on a well known path
+     on the file system. We are using the path <literal>/etc/unbound/unbound.conf</literal> as that is the
+     default in the CLI tooling which in turn enables us to use
+     <literal>unbound-control</literal> without passing a custom configuration location.
+    </para>
+   </listitem>
   </itemizedlist>
  </section>
 </section>
diff --git a/nixos/modules/module-list.nix b/nixos/modules/module-list.nix
index 3fd7ebd1ca7..6ac12e4e138 100644
--- a/nixos/modules/module-list.nix
+++ b/nixos/modules/module-list.nix
@@ -680,6 +680,7 @@
   ./services/networking/murmur.nix
   ./services/networking/mxisd.nix
   ./services/networking/namecoind.nix
+  ./services/networking/nar-serve.nix
   ./services/networking/nat.nix
   ./services/networking/ndppd.nix
   ./services/networking/networkmanager.nix
diff --git a/nixos/modules/services/databases/redis.nix b/nixos/modules/services/databases/redis.nix
index f1777854e14..6b8853ae390 100644
--- a/nixos/modules/services/databases/redis.nix
+++ b/nixos/modules/services/databases/redis.nix
@@ -87,9 +87,12 @@ in
 
       bind = mkOption {
         type = with types; nullOr str;
-        default = null; # All interfaces
-        description = "The IP interface to bind to.";
-        example = "127.0.0.1";
+        default = "127.0.0.1";
+        description = ''
+          The IP interface to bind to.
+          <literal>null</literal> means "all interfaces".
+        '';
+        example = "192.0.2.1";
       };
 
       unixSocket = mkOption {
diff --git a/nixos/modules/services/mail/roundcube.nix b/nixos/modules/services/mail/roundcube.nix
index a0bbab64985..ee7aa7e22fb 100644
--- a/nixos/modules/services/mail/roundcube.nix
+++ b/nixos/modules/services/mail/roundcube.nix
@@ -204,6 +204,11 @@ in
     };
     systemd.services.phpfpm-roundcube.after = [ "roundcube-setup.service" ];
 
+    # Restart on config changes.
+    systemd.services.phpfpm-roundcube.restartTriggers = [
+      config.environment.etc."roundcube/config.inc.php".source
+    ];
+
     systemd.services.roundcube-setup = mkMerge [
       (mkIf (cfg.database.host == "localhost") {
         requires = [ "postgresql.service" ];
diff --git a/nixos/modules/services/misc/octoprint.nix b/nixos/modules/services/misc/octoprint.nix
index e2fbd3b401c..a69e6507305 100644
--- a/nixos/modules/services/misc/octoprint.nix
+++ b/nixos/modules/services/misc/octoprint.nix
@@ -68,7 +68,7 @@ in
       plugins = mkOption {
         default = plugins: [];
         defaultText = "plugins: []";
-        example = literalExample "plugins: with plugins; [ m33-fio stlviewer ]";
+        example = literalExample "plugins: with plugins; [ themeify stlviewer ]";
         description = "Additional plugins to be used. Available plugins are passed through the plugins input.";
       };
 
diff --git a/nixos/modules/services/monitoring/prometheus/exporters.nix b/nixos/modules/services/monitoring/prometheus/exporters.nix
index a4aa470f5bc..995afca96ff 100644
--- a/nixos/modules/services/monitoring/prometheus/exporters.nix
+++ b/nixos/modules/services/monitoring/prometheus/exporters.nix
@@ -45,6 +45,7 @@ let
     "rspamd"
     "rtl_433"
     "snmp"
+    "sql"
     "surfboard"
     "tor"
     "unifi"
@@ -218,6 +219,14 @@ in
         Please specify either 'services.prometheus.exporters.mail.configuration'
           or 'services.prometheus.exporters.mail.configFile'.
       '';
+    } {
+      assertion = cfg.sql.enable -> (
+        (cfg.sql.configFile == null) != (cfg.sql.configuration == null)
+      );
+      message = ''
+        Please specify either 'services.prometheus.exporters.sql.configuration' or
+          'services.prometheus.exporters.sql.configFile'
+      '';
     } ];
   }] ++ [(mkIf config.services.minio.enable {
     services.prometheus.exporters.minio.minioAddress  = mkDefault "http://localhost:9000";
diff --git a/nixos/modules/services/monitoring/prometheus/exporters/sql.nix b/nixos/modules/services/monitoring/prometheus/exporters/sql.nix
new file mode 100644
index 00000000000..d9be724ebc0
--- /dev/null
+++ b/nixos/modules/services/monitoring/prometheus/exporters/sql.nix
@@ -0,0 +1,104 @@
+{ config, lib, pkgs, options }:
+with lib;
+let
+  cfg = config.services.prometheus.exporters.sql;
+  cfgOptions = {
+    options = with types; {
+      jobs = mkOption {
+        type = attrsOf (submodule jobOptions);
+        default = { };
+        description = "An attrset of metrics scraping jobs to run.";
+      };
+    };
+  };
+  jobOptions = {
+    options = with types; {
+      interval = mkOption {
+        type = str;
+        description = ''
+          How often to run this job, specified in
+          <link xlink:href="https://golang.org/pkg/time/#ParseDuration">Go duration</link> format.
+        '';
+      };
+      connections = mkOption {
+        type = listOf str;
+        description = "A list of connection strings of the SQL servers to scrape metrics from";
+      };
+      startupSql = mkOption {
+        type = listOf str;
+        default = [];
+        description = "A list of SQL statements to execute once after making a connection.";
+      };
+      queries = mkOption {
+        type = attrsOf (submodule queryOptions);
+        description = "SQL queries to run.";
+      };
+    };
+  };
+  queryOptions = {
+    options = with types; {
+      help = mkOption {
+        type = nullOr str;
+        default = null;
+        description = "A human-readable description of this metric.";
+      };
+      labels = mkOption {
+        type = listOf str;
+        default = [ ];
+        description = "A set of columns that will be used as Prometheus labels.";
+      };
+      query = mkOption {
+        type = str;
+        description = "The SQL query to run.";
+      };
+      values = mkOption {
+        type = listOf str;
+        description = "A set of columns that will be used as values of this metric.";
+      };
+    };
+  };
+
+  configFile =
+    if cfg.configFile != null
+    then cfg.configFile
+    else
+      let
+        nameInline = mapAttrsToList (k: v: v // { name = k; });
+        renameStartupSql = j: removeAttrs (j // { startup_sql = j.startupSql; }) [ "startupSql" ];
+        configuration = {
+          jobs = map renameStartupSql
+            (nameInline (mapAttrs (k: v: (v // { queries = nameInline v.queries; })) cfg.configuration.jobs));
+        };
+      in
+      builtins.toFile "config.yaml" (builtins.toJSON configuration);
+in
+{
+  extraOpts = {
+    configFile = mkOption {
+      type = with types; nullOr path;
+      default = null;
+      description = ''
+        Path to configuration file.
+      '';
+    };
+    configuration = mkOption {
+      type = with types; nullOr (submodule cfgOptions);
+      default = null;
+      description = ''
+        Exporter configuration as nix attribute set. Mutually exclusive with 'configFile' option.
+      '';
+    };
+  };
+
+  port = 9237;
+  serviceOpts = {
+    serviceConfig = {
+      ExecStart = ''
+        ${pkgs.prometheus-sql-exporter}/bin/sql_exporter \
+          -web.listen-address ${cfg.listenAddress}:${toString cfg.port} \
+          -config.file ${configFile} \
+          ${concatStringsSep " \\\n  " cfg.extraFlags}
+      '';
+    };
+  };
+}
diff --git a/nixos/modules/services/networking/nar-serve.nix b/nixos/modules/services/networking/nar-serve.nix
new file mode 100644
index 00000000000..ddd42fa0107
--- /dev/null
+++ b/nixos/modules/services/networking/nar-serve.nix
@@ -0,0 +1,55 @@
+{ config, pkgs, lib, ... }:
+
+with lib;
+let
+  cfg = config.services.nar-serve;
+in
+{
+  meta = {
+    maintainers = [ maintainers.rizary ];
+  };
+  options = {
+    services.nar-serve = {
+      enable = mkEnableOption "Serve NAR file contents via HTTP";
+
+      port = mkOption {
+        type = types.int;
+        default = 8383;
+        description = ''
+          Port number where nar-serve will listen on.
+        '';
+      };
+
+      cacheURL = mkOption {
+        type = types.str;
+        default = "https://cache.nixos.org/";
+        description = ''
+          Binary cache URL to connect to.
+
+          The URL format is compatible with the nix remote url style, such as:
+          - http://, https:// for binary caches via HTTP or HTTPS
+          - s3:// for binary caches stored in Amazon S3
+          - gs:// for binary caches stored in Google Cloud Storage
+        '';
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    systemd.services.nar-serve = {
+      description = "NAR server";
+      after = [ "network.target" ];
+      wantedBy = [ "multi-user.target" ];
+
+      environment.PORT = toString cfg.port;
+      environment.NAR_CACHE_URL = cfg.cacheURL;
+
+      serviceConfig = {
+        Restart = "always";
+        RestartSec = "5s";
+        ExecStart = "${pkgs.nar-serve}/bin/nar-serve";
+        DynamicUser = true;
+      };
+    };
+  };
+}
diff --git a/nixos/modules/services/networking/unbound.nix b/nixos/modules/services/networking/unbound.nix
index baed83591e1..9a46fa3075f 100644
--- a/nixos/modules/services/networking/unbound.nix
+++ b/nixos/modules/services/networking/unbound.nix
@@ -1,9 +1,7 @@
 { config, lib, pkgs, ... }:
 
 with lib;
-
 let
-
   cfg = config.services.unbound;
 
   stateDir = "/var/lib/unbound";
@@ -17,12 +15,12 @@ let
   forward =
     optionalString (any isLocalAddress cfg.forwardAddresses) ''
       do-not-query-localhost: no
-    '' +
-    optionalString (cfg.forwardAddresses != []) ''
+    ''
+    + optionalString (cfg.forwardAddresses != []) ''
       forward-zone:
         name: .
-    '' +
-    concatMapStringsSep "\n" (x: "    forward-addr: ${x}") cfg.forwardAddresses;
+    ''
+    + concatMapStringsSep "\n" (x: "    forward-addr: ${x}") cfg.forwardAddresses;
 
   rootTrustAnchorFile = "${stateDir}/root.key";
 
@@ -31,19 +29,25 @@ let
 
   confFile = pkgs.writeText "unbound.conf" ''
     server:
+      ip-freebind: yes
       directory: "${stateDir}"
       username: unbound
-      chroot: "${stateDir}"
+      chroot: ""
       pidfile: ""
+      # when running under systemd there is no need to daemonize
+      do-daemonize: no
       ${interfaces}
       ${access}
       ${trustAnchor}
+    ${lib.optionalString (cfg.localControlSocketPath != null) ''
+      remote-control:
+        control-enable: yes
+        control-interface: ${cfg.localControlSocketPath}
+    ''}
     ${cfg.extraConfig}
     ${forward}
   '';
-
 in
-
 {
 
   ###### interface
@@ -55,8 +59,8 @@ in
 
       package = mkOption {
         type = types.package;
-        default = pkgs.unbound;
-        defaultText = "pkgs.unbound";
+        default = pkgs.unbound-with-systemd;
+        defaultText = "pkgs.unbound-with-systemd";
         description = "The unbound package to use";
       };
 
@@ -69,11 +73,14 @@ in
       interfaces = mkOption {
         default = [ "127.0.0.1" ] ++ optional config.networking.enableIPv6 "::1";
         type = types.listOf types.str;
-        description = "What addresses the server should listen on.";
+        description =  ''
+          What addresses the server should listen on. This supports the interface syntax documented in
+          <citerefentry><refentrytitle>unbound.conf</refentrytitle><manvolnum>8</manvolnum></citerefentry>.
+        '';
       };
 
       forwardAddresses = mkOption {
-        default = [ ];
+        default = [];
         type = types.listOf types.str;
         description = "What servers to forward queries to.";
       };
@@ -84,6 +91,28 @@ in
         description = "Use and update root trust anchor for DNSSEC validation.";
       };
 
+      localControlSocketPath = mkOption {
+        default = null;
+        # FIXME: What is the proper type here so users can specify strings,
+        # paths and null?
+        # My guess would be `types.nullOr (types.either types.str types.path)`
+        # but I haven't verified yet.
+        type = types.nullOr types.str;
+        example = "/run/unbound/unbound.ctl";
+        description = ''
+          When not set to <literal>null</literal> this option defines the path
+          at which the unbound remote control socket should be created at. The
+          socket will be owned by the unbound user (<literal>unbound</literal>)
+          and group will be <literal>nogroup</literal>.
+
+          Users that should be permitted to access the socket must be in the
+          <literal>unbound</literal> group.
+
+          If this option is <literal>null</literal> remote control will not be
+          configured at all. Unbounds default values apply.
+        '';
+      };
+
       extraConfig = mkOption {
         default = "";
         type = types.lines;
@@ -106,43 +135,85 @@ in
     users.users.unbound = {
       description = "unbound daemon user";
       isSystemUser = true;
+      group = lib.mkIf (cfg.localControlSocketPath != null) (lib.mkDefault "unbound");
+    };
+
+    # We need a group so that we can give users access to the configured
+    # control socket. Unbound allows access to the socket only to the unbound
+    # user and the primary group.
+    users.groups = lib.mkIf (cfg.localControlSocketPath != null) {
+      unbound = {};
     };
 
     networking.resolvconf.useLocalResolver = mkDefault true;
 
+
+    environment.etc."unbound/unbound.conf".source = confFile;
+
     systemd.services.unbound = {
       description = "Unbound recursive Domain Name Server";
       after = [ "network.target" ];
       before = [ "nss-lookup.target" ];
-      wants = [ "nss-lookup.target" ];
-      wantedBy = [ "multi-user.target" ];
-
-      preStart = ''
-        mkdir -m 0755 -p ${stateDir}/dev/
-        cp ${confFile} ${stateDir}/unbound.conf
-        ${optionalString cfg.enableRootTrustAnchor ''
-          ${cfg.package}/bin/unbound-anchor -a ${rootTrustAnchorFile} || echo "Root anchor updated!"
-          chown unbound ${stateDir} ${rootTrustAnchorFile}
-        ''}
-        touch ${stateDir}/dev/random
-        ${pkgs.utillinux}/bin/mount --bind -n /dev/urandom ${stateDir}/dev/random
+      wantedBy = [ "multi-user.target" "nss-lookup.target" ];
+
+      preStart = lib.mkIf cfg.enableRootTrustAnchor ''
+        ${cfg.package}/bin/unbound-anchor -a ${rootTrustAnchorFile} || echo "Root anchor updated!"
       '';
 
-      serviceConfig = {
-        ExecStart = "${cfg.package}/bin/unbound -d -c ${stateDir}/unbound.conf";
-        ExecStopPost="${pkgs.utillinux}/bin/umount ${stateDir}/dev/random";
+      restartTriggers = [
+        confFile
+      ];
 
-        ProtectSystem = true;
-        ProtectHome = true;
+      serviceConfig = {
+        ExecStart = "${cfg.package}/bin/unbound -p -d -c /etc/unbound/unbound.conf";
+        ExecReload = "+/run/current-system/sw/bin/kill -HUP $MAINPID";
+
+        NotifyAccess = "main";
+        Type = "notify";
+
+        # FIXME: Which of these do we actualy need, can we drop the chroot flag?
+        AmbientCapabilities = [
+          "CAP_NET_BIND_SERVICE"
+          "CAP_NET_RAW"
+          "CAP_SETGID"
+          "CAP_SETUID"
+          "CAP_SYS_CHROOT"
+          "CAP_SYS_RESOURCE"
+        ];
+
+        User = "unbound";
+        Group = lib.mkIf (cfg.localControlSocketPath != null) (lib.mkDefault "unbound");
+
+        MemoryDenyWriteExecute = true;
+        NoNewPrivileges = true;
         PrivateDevices = true;
-        Restart = "always";
-        RestartSec = "5s";
+        PrivateTmp = true;
+        ProtectHome = true;
+        ProtectControlGroups = true;
+        ProtectKernelModules = true;
+        ProtectSystem = "strict";
+        RuntimeDirectory = "unbound";
+        ConfigurationDirectory = "unbound";
+        StateDirectory = "unbound";
+        RestrictAddressFamilies = [ "AF_INET" "AF_INET6" "AF_UNIX" ];
+        RestrictRealtime = true;
+        SystemCallArchitectures = "native";
+        SystemCallFilter = [
+          "~@clock"
+          "@cpu-emulation"
+          "@debug"
+          "@keyring"
+          "@module"
+          "mount"
+          "@obsolete"
+          "@resources"
+        ];
+        RestrictNamespaces = true;
+        LockPersonality = true;
+        RestrictSUIDSGID = true;
       };
     };
-
     # If networkmanager is enabled, ask it to interface with unbound.
     networking.networkmanager.dns = "unbound";
-
   };
-
 }
diff --git a/nixos/modules/services/torrent/transmission.nix b/nixos/modules/services/torrent/transmission.nix
index aeb58a7194f..717c18d367f 100644
--- a/nixos/modules/services/torrent/transmission.nix
+++ b/nixos/modules/services/torrent/transmission.nix
@@ -236,6 +236,7 @@ in
           # an AppArmor profile is provided to get a confinement based upon paths and rights.
           builtins.storeDir
           "/etc"
+          "/run"
           ] ++
           optional (cfg.settings.script-torrent-done-enabled &&
                     cfg.settings.script-torrent-done-filename != "")
@@ -408,6 +409,7 @@ in
           #r @{PROC}/@{pid}/environ,
           r @{PROC}/@{pid}/mounts,
           rwk /tmp/tr_session_id_*,
+          r /run/systemd/resolve/stub-resolv.conf,
 
           r ${pkgs.openssl.out}/etc/**,
           r ${config.systemd.services.transmission.environment.CURL_CA_BUNDLE},
diff --git a/nixos/modules/system/boot/luksroot.nix b/nixos/modules/system/boot/luksroot.nix
index 88190e8200b..8dd2ea20519 100644
--- a/nixos/modules/system/boot/luksroot.nix
+++ b/nixos/modules/system/boot/luksroot.nix
@@ -404,7 +404,7 @@ let
           echo "Please move your mouse to create needed randomness."
         ''}
           echo "Waiting for your FIDO2 device..."
-          fido2luks -i open ${device} ${name} ${fido2.credential} --await-dev ${toString fido2.gracePeriod} --salt string:$passphrase
+          fido2luks open ${device} ${name} ${fido2.credential} --await-dev ${toString fido2.gracePeriod} --salt string:$passphrase
         if [ $? -ne 0 ]; then
           echo "No FIDO2 key found, falling back to normal open procedure"
           open_normally
diff --git a/nixos/modules/system/boot/pbkdf2-sha512.c b/nixos/modules/system/boot/pbkdf2-sha512.c
index b40c383ac02..67e989957ba 100644
--- a/nixos/modules/system/boot/pbkdf2-sha512.c
+++ b/nixos/modules/system/boot/pbkdf2-sha512.c
@@ -35,4 +35,4 @@ int main(int argc, char** argv)
 	fwrite(key, 1, key_length, stdout);
 
 	return 0;
-}
\ No newline at end of file
+}
diff --git a/nixos/modules/virtualisation/ec2-amis.nix b/nixos/modules/virtualisation/ec2-amis.nix
index 4d9c391e046..3da63078a21 100644
--- a/nixos/modules/virtualisation/ec2-amis.nix
+++ b/nixos/modules/virtualisation/ec2-amis.nix
@@ -329,24 +329,24 @@ let self = {
   "20.03".ap-east-1.hvm-ebs = "ami-0d18fdd309cdefa86";
   "20.03".sa-east-1.hvm-ebs = "ami-09859378158ae971d";
 
-  # 20.09.1465.9a0b14b097d
-  "20.09".eu-west-1.hvm-ebs = "ami-0d90f16418e3c364c";
-  "20.09".eu-west-2.hvm-ebs = "ami-0635ec0780ea57cfe";
-  "20.09".eu-west-3.hvm-ebs = "ami-0714e94352f2eabb9";
-  "20.09".eu-central-1.hvm-ebs = "ami-0979d39762a4d2a02";
-  "20.09".eu-north-1.hvm-ebs = "ami-0b14e273185c66e9b";
-  "20.09".us-east-1.hvm-ebs = "ami-0f8b063ac3f2d9645";
-  "20.09".us-east-2.hvm-ebs = "ami-0959202a0393fdd0c";
-  "20.09".us-west-1.hvm-ebs = "ami-096d50833b785478b";
-  "20.09".us-west-2.hvm-ebs = "ami-0fc31031df0df6104";
-  "20.09".ca-central-1.hvm-ebs = "ami-0787786a38cde3905";
-  "20.09".ap-southeast-1.hvm-ebs = "ami-0b3f693d3a2a0b9ae";
-  "20.09".ap-southeast-2.hvm-ebs = "ami-02471872bc876b610";
-  "20.09".ap-northeast-1.hvm-ebs = "ami-06505fd2bf44a59a7";
-  "20.09".ap-northeast-2.hvm-ebs = "ami-0754b4c014eea1e8a";
-  "20.09".ap-south-1.hvm-ebs = "ami-05100e32242ae65a6";
-  "20.09".ap-east-1.hvm-ebs = "ami-045288859a39de009";
-  "20.09".sa-east-1.hvm-ebs = "ami-0a937748db48fb00d";
+  # 20.09.1632.a6a3a368dda
+  "20.09".eu-west-1.hvm-ebs = "ami-01a79d5ce435f4db3";
+  "20.09".eu-west-2.hvm-ebs = "ami-0cbe14f32904e6331";
+  "20.09".eu-west-3.hvm-ebs = "ami-07f493412d6213de6";
+  "20.09".eu-central-1.hvm-ebs = "ami-01d4a0c2248cbfe38";
+  "20.09".eu-north-1.hvm-ebs = "ami-0003f54dd99d68e0f";
+  "20.09".us-east-1.hvm-ebs = "ami-068a62d478710462d";
+  "20.09".us-east-2.hvm-ebs = "ami-01ac677ff61399caa";
+  "20.09".us-west-1.hvm-ebs = "ami-04befdb203b4b17f6";
+  "20.09".us-west-2.hvm-ebs = "ami-0fb7bd4a43261c6b2";
+  "20.09".ca-central-1.hvm-ebs = "ami-06d5ee429f153f856";
+  "20.09".ap-southeast-1.hvm-ebs = "ami-0db0304e23c535b2a";
+  "20.09".ap-southeast-2.hvm-ebs = "ami-045983c4db7e36447";
+  "20.09".ap-northeast-1.hvm-ebs = "ami-0beb18d632cf64e5a";
+  "20.09".ap-northeast-2.hvm-ebs = "ami-0dd0316af578862db";
+  "20.09".ap-south-1.hvm-ebs = "ami-008d15ced81c88aed";
+  "20.09".ap-east-1.hvm-ebs = "ami-071f49713f86ea965";
+  "20.09".sa-east-1.hvm-ebs = "ami-05ded1ae35209b5a8";
 
   latest = self."20.09";
 }; in self
diff --git a/nixos/release-combined.nix b/nixos/release-combined.nix
index 945ba90e345..384ae5765b8 100644
--- a/nixos/release-combined.nix
+++ b/nixos/release-combined.nix
@@ -71,7 +71,6 @@ in rec {
         (onFullSupported "nixos.tests.fontconfig-default-fonts")
         (onFullSupported "nixos.tests.gnome3")
         (onFullSupported "nixos.tests.gnome3-xorg")
-        (onFullSupported "nixos.tests.hardened")
         (onSystems ["x86_64-linux"] "nixos.tests.hibernate")
         (onFullSupported "nixos.tests.i3wm")
         (onSystems ["x86_64-linux"] "nixos.tests.installer.btrfsSimple")
@@ -93,7 +92,6 @@ in rec {
         (onFullSupported "nixos.tests.keymap.dvp")
         (onFullSupported "nixos.tests.keymap.neo")
         (onFullSupported "nixos.tests.keymap.qwertz")
-        (onFullSupported "nixos.tests.latestKernel.hardened")
         (onFullSupported "nixos.tests.latestKernel.login")
         (onFullSupported "nixos.tests.lightdm")
         (onFullSupported "nixos.tests.login")
diff --git a/nixos/tests/all-tests.nix b/nixos/tests/all-tests.nix
index f36b70bae7f..d4aff486225 100644
--- a/nixos/tests/all-tests.nix
+++ b/nixos/tests/all-tests.nix
@@ -225,6 +225,7 @@ in
   mysql-backup = handleTest ./mysql/mysql-backup.nix {};
   mysql-replication = handleTest ./mysql/mysql-replication.nix {};
   nagios = handleTest ./nagios.nix {};
+  nar-serve = handleTest ./nar-serve.nix {};
   nat.firewall = handleTest ./nat.nix { withFirewall = true; };
   nat.firewall-conntrack = handleTest ./nat.nix { withFirewall = true; withConntrackHelpers = true; };
   nat.standalone = handleTest ./nat.nix { withFirewall = false; };
@@ -368,6 +369,7 @@ in
   trezord = handleTest ./trezord.nix {};
   trickster = handleTest ./trickster.nix {};
   tuptime = handleTest ./tuptime.nix {};
+  unbound = handleTest ./unbound.nix {};
   udisks2 = handleTest ./udisks2.nix {};
   unit-php = handleTest ./web-servers/unit-php.nix {};
   upnp = handleTest ./upnp.nix {};
diff --git a/nixos/tests/nar-serve.nix b/nixos/tests/nar-serve.nix
new file mode 100644
index 00000000000..9ee738ffb17
--- /dev/null
+++ b/nixos/tests/nar-serve.nix
@@ -0,0 +1,48 @@
+import ./make-test-python.nix (
+  { pkgs, lib, ... }:
+  {
+    name = "nar-serve";
+    meta.maintainers = [ lib.maintainers.rizary ];
+    nodes =
+      {
+        server = { pkgs, ... }: {
+          services.nginx = {
+            enable = true;
+            virtualHosts.default.root = "/var/www";
+          };
+          services.nar-serve = {
+            enable = true;
+            # Connect to the localhost nginx instead of the default
+            # https://cache.nixos.org
+            cacheURL = "http://localhost/";
+          };
+          environment.systemPackages = [
+            pkgs.hello
+            pkgs.curl
+          ];
+
+          networking.firewall.allowedTCPPorts = [ 8383 ];
+
+          # virtualisation.diskSize = 2 * 1024;
+        };
+      };
+    testScript = ''
+      start_all()
+
+      # Create a fake cache with Nginx service the static files
+      server.succeed(
+          "nix copy --to file:///var/www ${pkgs.hello}"
+      )
+      server.wait_for_unit("nginx.service")
+      server.wait_for_open_port(80)
+
+      # Check that nar-serve can return the content of the derivation
+      drvName = os.path.basename("${pkgs.hello}")
+      drvHash = drvName.split("-")[0]
+      server.wait_for_unit("nar-serve.service")
+      server.succeed(
+          "curl -o hello -f http://localhost:8383/nix/store/{}/bin/hello".format(drvHash)
+      )
+    '';
+  }
+)
diff --git a/nixos/tests/prometheus-exporters.nix b/nixos/tests/prometheus-exporters.nix
index 2553f5dcf74..0b9957404f3 100644
--- a/nixos/tests/prometheus-exporters.nix
+++ b/nixos/tests/prometheus-exporters.nix
@@ -609,6 +609,50 @@ let
       '';
     };
 
+    sql = {
+      exporterConfig = {
+        configuration.jobs.points = {
+          interval = "1m";
+          connections = [
+            "postgres://prometheus-sql-exporter@/data?host=/run/postgresql&sslmode=disable"
+          ];
+          queries = {
+            points = {
+              labels = [ "name" ];
+              help = "Amount of points accumulated per person";
+              values = [ "amount" ];
+              query = "SELECT SUM(amount) as amount, name FROM points GROUP BY name";
+            };
+          };
+        };
+        enable = true;
+        user = "prometheus-sql-exporter";
+      };
+      metricProvider = {
+        services.postgresql = {
+          enable = true;
+          initialScript = builtins.toFile "init.sql" ''
+            CREATE DATABASE data;
+            \c data;
+            CREATE TABLE points (amount INT, name TEXT);
+            INSERT INTO points(amount, name) VALUES (1, 'jack');
+            INSERT INTO points(amount, name) VALUES (2, 'jill');
+            INSERT INTO points(amount, name) VALUES (3, 'jack');
+
+            CREATE USER "prometheus-sql-exporter";
+            GRANT ALL PRIVILEGES ON DATABASE data TO "prometheus-sql-exporter";
+            GRANT SELECT ON points TO "prometheus-sql-exporter";
+          '';
+        };
+        systemd.services.prometheus-sql-exporter.after = [ "postgresql.service" ];
+      };
+      exporterTest = ''
+        wait_for_unit("prometheus-sql-exporter.service")
+        wait_for_open_port(9237)
+        succeed("curl http://localhost:9237/metrics | grep -c 'sql_points{' | grep -q 2")
+      '';
+    };
+
     surfboard = {
       exporterConfig = {
         enable = true;
diff --git a/nixos/tests/unbound.nix b/nixos/tests/unbound.nix
new file mode 100644
index 00000000000..dc8e5a9d3ed
--- /dev/null
+++ b/nixos/tests/unbound.nix
@@ -0,0 +1,278 @@
+/*
+ Test that our unbound module indeed works as most users would expect.
+ There are a few settings that we must consider when modifying the test. The
+ ususal use-cases for unbound are
+   * running a recursive DNS resolver on the local machine
+   * running a recursive DNS resolver on the local machine, forwarding to a local DNS server via UDP/53 & TCP/53
+   * running a recursive DNS resolver on the local machine, forwarding to a local DNS server via TCP/853 (DoT)
+   * running a recursive DNS resolver on a machine in the network awaiting input from clients over TCP/53 & UDP/53
+   * running a recursive DNS resolver on a machine in the network awaiting input from clients over TCP/853 (DoT)
+
+ In the below test setup we are trying to implement all of those use cases.
+
+ Another aspect that we cover is access to the local control UNIX socket. It
+ can optionally be enabled and users can optionally be in a group to gain
+ access. Users that are not in the group (except for root) should not have
+ access to that socket. Also, when there is no socket configured, users
+ shouldn't be able to access the control socket at all. Not even root.
+*/
+import ./make-test-python.nix ({ pkgs, lib, ... }:
+  let
+    # common client configuration that we can just use for the multitude of
+    # clients we are constructing
+    common = { lib, pkgs, ... }: {
+      config = {
+        environment.systemPackages = [ pkgs.knot-dns ];
+
+        # disable the root anchor update as we do not have internet access during
+        # the test execution
+        services.unbound.enableRootTrustAnchor = false;
+      };
+    };
+
+    cert = pkgs.runCommandNoCC "selfSignedCerts" { buildInputs = [ pkgs.openssl ]; } ''
+      openssl req -x509 -newkey rsa:4096 -keyout key.pem -out cert.pem -nodes -subj '/CN=dns.example.local'
+      mkdir -p $out
+      cp key.pem cert.pem $out
+    '';
+  in
+  {
+    name = "unbound";
+    meta = with pkgs.stdenv.lib.maintainers; {
+      maintainers = [ andir ];
+    };
+
+    nodes = {
+
+      # The server that actually serves our zones, this tests unbounds authoriative mode
+      authoritative = { lib, pkgs, config, ... }: {
+        imports = [ common ];
+        networking.interfaces.eth1.ipv4.addresses = lib.mkForce [
+          { address = "192.168.0.1"; prefixLength = 24; }
+        ];
+        networking.interfaces.eth1.ipv6.addresses = lib.mkForce [
+          { address = "fd21::1"; prefixLength = 64; }
+        ];
+        networking.firewall.allowedTCPPorts = [ 53 ];
+        networking.firewall.allowedUDPPorts = [ 53 ];
+
+        services.unbound = {
+          enable = true;
+          interfaces = [ "192.168.0.1" "fd21::1" "::1" "127.0.0.1" ];
+          allowedAccess = [ "192.168.0.0/24" "fd21::/64" "::1" "127.0.0.0/8" ];
+          extraConfig = ''
+            server:
+              local-data: "example.local. IN A 1.2.3.4"
+              local-data: "example.local. IN AAAA abcd::eeff"
+          '';
+        };
+      };
+
+      # The resolver that knows that fowards (only) to the authoritative server
+      # and listens on UDP/53, TCP/53 & TCP/853.
+      resolver = { lib, nodes, ... }: {
+        imports = [ common ];
+        networking.interfaces.eth1.ipv4.addresses = lib.mkForce [
+          { address = "192.168.0.2"; prefixLength = 24; }
+        ];
+        networking.interfaces.eth1.ipv6.addresses = lib.mkForce [
+          { address = "fd21::2"; prefixLength = 64; }
+        ];
+        networking.firewall.allowedTCPPorts = [
+          53 # regular DNS
+          853 # DNS over TLS
+        ];
+        networking.firewall.allowedUDPPorts = [ 53 ];
+
+        services.unbound = {
+          enable = true;
+          allowedAccess = [ "192.168.0.0/24" "fd21::/64" "::1" "127.0.0.0/8" ];
+          interfaces = [ "::1" "127.0.0.1" "192.168.0.2" "fd21::2" "192.168.0.2@853" "fd21::2@853" "::1@853" "127.0.0.1@853" ];
+          forwardAddresses = [
+            (lib.head nodes.authoritative.config.networking.interfaces.eth1.ipv6.addresses).address
+            (lib.head nodes.authoritative.config.networking.interfaces.eth1.ipv4.addresses).address
+          ];
+          extraConfig = ''
+            server:
+              tls-service-pem: ${cert}/cert.pem
+              tls-service-key: ${cert}/key.pem
+          '';
+        };
+      };
+
+      # machine that runs a local unbound that will be reconfigured during test execution
+      local_resolver = { lib, nodes, config, ... }: {
+        imports = [ common ];
+        networking.interfaces.eth1.ipv4.addresses = lib.mkForce [
+          { address = "192.168.0.3"; prefixLength = 24; }
+        ];
+        networking.interfaces.eth1.ipv6.addresses = lib.mkForce [
+          { address = "fd21::3"; prefixLength = 64; }
+        ];
+        networking.firewall.allowedTCPPorts = [
+          53 # regular DNS
+        ];
+        networking.firewall.allowedUDPPorts = [ 53 ];
+
+        services.unbound = {
+          enable = true;
+          allowedAccess = [ "::1" "127.0.0.0/8" ];
+          interfaces = [ "::1" "127.0.0.1" ];
+          localControlSocketPath = "/run/unbound/unbound.ctl";
+          extraConfig = ''
+            include: "/etc/unbound/extra*.conf"
+          '';
+        };
+
+        users.users = {
+          # user that is permitted to access the unix socket
+          someuser.extraGroups = [
+            config.users.users.unbound.group
+          ];
+
+          # user that is not permitted to access the unix socket
+          unauthorizeduser = {};
+        };
+
+        environment.etc = {
+          "unbound-extra1.conf".text = ''
+            forward-zone:
+              name: "example.local."
+              forward-addr: ${(lib.head nodes.resolver.config.networking.interfaces.eth1.ipv6.addresses).address}
+              forward-addr: ${(lib.head nodes.resolver.config.networking.interfaces.eth1.ipv4.addresses).address}
+          '';
+          "unbound-extra2.conf".text = ''
+            auth-zone:
+              name: something.local.
+              zonefile: ${pkgs.writeText "zone" ''
+                something.local. IN A 3.4.5.6
+              ''}
+          '';
+        };
+      };
+
+
+      # plain node that only has network access and doesn't run any part of the
+      # resolver software locally
+      client = { lib, nodes, ... }: {
+        imports = [ common ];
+        networking.nameservers = [
+          (lib.head nodes.resolver.config.networking.interfaces.eth1.ipv6.addresses).address
+          (lib.head nodes.resolver.config.networking.interfaces.eth1.ipv4.addresses).address
+        ];
+        networking.interfaces.eth1.ipv4.addresses = [
+          { address = "192.168.0.10"; prefixLength = 24; }
+        ];
+        networking.interfaces.eth1.ipv6.addresses = [
+          { address = "fd21::10"; prefixLength = 64; }
+        ];
+      };
+    };
+
+    testScript = { nodes, ... }: ''
+      import typing
+      import json
+
+      zone = "example.local."
+      records = [("AAAA", "abcd::eeff"), ("A", "1.2.3.4")]
+
+
+      def query(
+          machine,
+          host: str,
+          query_type: str,
+          query: str,
+          expected: typing.Optional[str] = None,
+          args: typing.Optional[typing.List[str]] = None,
+      ):
+          """
+          Execute a single query and compare the result with expectation
+          """
+          text_args = ""
+          if args:
+              text_args = " ".join(args)
+
+          out = machine.succeed(
+              f"kdig {text_args} {query} {query_type} @{host} +short"
+          ).strip()
+          machine.log(f"{host} replied with {out}")
+          if expected:
+              assert expected == out, f"Expected `{expected}` but got `{out}`"
+
+
+      def test(machine, remotes, /, doh=False, zone=zone, records=records, args=[]):
+          """
+          Run queries for the given remotes on the given machine.
+          """
+          for query_type, expected in records:
+              for remote in remotes:
+                  query(machine, remote, query_type, zone, expected, args)
+                  query(machine, remote, query_type, zone, expected, ["+tcp"] + args)
+                  if doh:
+                      query(
+                          machine,
+                          remote,
+                          query_type,
+                          zone,
+                          expected,
+                          ["+tcp", "+tls"] + args,
+                      )
+
+
+      client.start()
+      authoritative.wait_for_unit("unbound.service")
+
+      # verify that we can resolve locally
+      with subtest("test the authoritative servers local responses"):
+          test(authoritative, ["::1", "127.0.0.1"])
+
+      resolver.wait_for_unit("unbound.service")
+
+      with subtest("root is unable to use unbounc-control when the socket is not configured"):
+          resolver.succeed("which unbound-control")  # the binary must exist
+          resolver.fail("unbound-control list_forwards")  # the invocation must fail
+
+      # verify that the resolver is able to resolve on all the local protocols
+      with subtest("test that the resolver resolves on all protocols and transports"):
+          test(resolver, ["::1", "127.0.0.1"], doh=True)
+
+      resolver.wait_for_unit("multi-user.target")
+
+      with subtest("client should be able to query the resolver"):
+          test(client, ["${(lib.head nodes.resolver.config.networking.interfaces.eth1.ipv6.addresses).address}", "${(lib.head nodes.resolver.config.networking.interfaces.eth1.ipv4.addresses).address}"], doh=True)
+
+      # discard the client we do not need anymore
+      client.shutdown()
+
+      local_resolver.wait_for_unit("multi-user.target")
+
+      # link a new config file to /etc/unbound/extra.conf
+      local_resolver.succeed("ln -s /etc/unbound-extra1.conf /etc/unbound/extra1.conf")
+
+      # reload the server & ensure the forwarding works
+      with subtest("test that the local resolver resolves on all protocols and transports"):
+          local_resolver.succeed("systemctl reload unbound")
+          print(local_resolver.succeed("journalctl -u unbound -n 1000"))
+          test(local_resolver, ["::1", "127.0.0.1"], args=["+timeout=60"])
+
+      with subtest("test that we can use the unbound control socket"):
+          out = local_resolver.succeed(
+              "sudo -u someuser -- unbound-control list_forwards"
+          ).strip()
+
+          # Thank you black! Can't really break this line into a readable version.
+          expected = "example.local. IN forward ${(lib.head nodes.resolver.config.networking.interfaces.eth1.ipv6.addresses).address} ${(lib.head nodes.resolver.config.networking.interfaces.eth1.ipv4.addresses).address}"
+          assert out == expected, f"Expected `{expected}` but got `{out}` instead."
+          local_resolver.fail("sudo -u unauthorizeduser -- unbound-control list_forwards")
+
+
+      # link a new config file to /etc/unbound/extra.conf
+      local_resolver.succeed("ln -sf /etc/unbound-extra2.conf /etc/unbound/extra2.conf")
+
+      # reload the server & ensure the new local zone works
+      with subtest("test that we can query the new local zone"):
+          local_resolver.succeed("unbound-control reload")
+          r = [("A", "3.4.5.6")]
+          test(local_resolver, ["::1", "127.0.0.1"], zone="something.local.", records=r)
+    '';
+  })