summary refs log tree commit diff
path: root/nixos
diff options
context:
space:
mode:
Diffstat (limited to 'nixos')
-rw-r--r--nixos/doc/manual/configuration/wireless.xml5
-rw-r--r--nixos/doc/manual/default.nix1
-rw-r--r--nixos/doc/manual/release-notes/rl-1903.xml41
-rw-r--r--nixos/lib/testing.nix2
-rw-r--r--nixos/maintainers/scripts/openstack/nova-image.nix26
-rw-r--r--nixos/maintainers/scripts/openstack/openstack-image.nix26
-rw-r--r--nixos/modules/installer/tools/nixos-generate-config.pl1
-rw-r--r--nixos/modules/misc/version.nix4
-rw-r--r--nixos/modules/module-list.nix1
-rw-r--r--nixos/modules/programs/iotop.nix17
-rw-r--r--nixos/modules/services/mail/roundcube.nix20
-rw-r--r--nixos/modules/services/misc/airsonic.nix17
-rw-r--r--nixos/modules/services/misc/docker-registry.nix2
-rw-r--r--nixos/modules/services/misc/home-assistant.nix2
-rw-r--r--nixos/modules/services/misc/matrix-synapse.nix11
-rw-r--r--nixos/modules/services/misc/nzbget.nix40
-rw-r--r--nixos/modules/services/misc/plex.nix1
-rw-r--r--nixos/modules/services/misc/rippled.nix346
-rw-r--r--nixos/modules/services/misc/weechat.xml4
-rw-r--r--nixos/modules/services/misc/zoneminder.nix8
-rw-r--r--nixos/modules/services/monitoring/grafana.nix184
-rw-r--r--nixos/modules/services/networking/flannel.nix41
-rw-r--r--nixos/modules/services/networking/syncthing.nix2
-rw-r--r--nixos/modules/services/networking/teamspeak3.nix24
-rw-r--r--nixos/modules/services/networking/unifi.nix3
-rw-r--r--nixos/modules/services/networking/wpa_supplicant.nix21
-rw-r--r--nixos/modules/services/printing/cupsd.nix4
-rw-r--r--nixos/modules/services/web-apps/atlassian/confluence.nix9
-rw-r--r--nixos/modules/services/web-apps/atlassian/crowd.nix9
-rw-r--r--nixos/modules/services/web-apps/atlassian/jira.nix9
-rw-r--r--nixos/modules/services/web-apps/matomo-doc.xml32
-rw-r--r--nixos/modules/services/web-apps/matomo.nix99
-rw-r--r--nixos/modules/services/web-apps/tt-rss.nix87
-rw-r--r--nixos/modules/services/web-servers/phpfpm/default.nix9
-rw-r--r--nixos/modules/services/web-servers/phpfpm/pool-options.nix9
-rw-r--r--nixos/modules/services/x11/xautolock.nix2
-rw-r--r--nixos/modules/system/boot/loader/raspberrypi/raspberrypi.nix2
-rw-r--r--nixos/modules/system/boot/systemd.nix4
-rw-r--r--nixos/modules/virtualisation/amazon-image.nix29
-rw-r--r--nixos/modules/virtualisation/docker-preloader.nix5
-rw-r--r--nixos/modules/virtualisation/ec2-metadata-fetcher.nix23
-rw-r--r--nixos/modules/virtualisation/nova-config.nix60
-rw-r--r--nixos/modules/virtualisation/openstack-config.nix57
-rw-r--r--nixos/tests/all-tests.nix5
-rw-r--r--nixos/tests/common/ec2.nix49
-rw-r--r--nixos/tests/ec2.nix62
-rw-r--r--nixos/tests/flannel.nix5
-rw-r--r--nixos/tests/hydra/default.nix160
-rw-r--r--nixos/tests/matrix-synapse.nix50
-rw-r--r--nixos/tests/openstack-image.nix88
-rw-r--r--nixos/tests/osrm-backend.nix53
-rw-r--r--nixos/tests/printing.nix2
-rw-r--r--nixos/tests/roundcube.nix4
-rw-r--r--nixos/tests/switch-test.nix13
54 files changed, 1242 insertions, 548 deletions
diff --git a/nixos/doc/manual/configuration/wireless.xml b/nixos/doc/manual/configuration/wireless.xml
index 999447234ad..f7e99ff0e35 100644
--- a/nixos/doc/manual/configuration/wireless.xml
+++ b/nixos/doc/manual/configuration/wireless.xml
@@ -29,7 +29,10 @@
   networks are set, it will default to using a configuration file at
   <literal>/etc/wpa_supplicant.conf</literal>. You should edit this file
   yourself to define wireless networks, WPA keys and so on (see
-  wpa_supplicant.conf(5)).
+  <citerefentry>
+    <refentrytitle>wpa_supplicant.conf</refentrytitle>
+    <manvolnum>5</manvolnum>
+  </citerefentry>).
  </para>
 
  <para>
diff --git a/nixos/doc/manual/default.nix b/nixos/doc/manual/default.nix
index faae4f20544..02b91773f5d 100644
--- a/nixos/doc/manual/default.nix
+++ b/nixos/doc/manual/default.nix
@@ -265,6 +265,7 @@ in rec {
       xsltproc \
         ${manualXsltprocOptions} \
         --stringparam target.database.document "${olinkDB}/olinkdb.xml" \
+        --stringparam id.warnings "1" \
         --nonet --output $dst/ \
         ${docbook_xsl_ns}/xml/xsl/docbook/xhtml/chunktoc.xsl \
         ${manual-combined}/manual-combined.xml
diff --git a/nixos/doc/manual/release-notes/rl-1903.xml b/nixos/doc/manual/release-notes/rl-1903.xml
index 943b9d2a608..0937a681d15 100644
--- a/nixos/doc/manual/release-notes/rl-1903.xml
+++ b/nixos/doc/manual/release-notes/rl-1903.xml
@@ -378,6 +378,23 @@
     (<link xlink:href="https://github.com/NixOS/nixpkgs/pull/54637">#54637</link>)
    </para>
   </listitem>
+  <listitem>
+   <para>
+    <literal>matrix-synapse</literal> has been updated to version 0.99. It will
+    <link xlink:href="https://github.com/matrix-org/synapse/pull/4509">no longer generate a self-signed certificate on first launch</link>
+    and will be <link xlink:href="https://matrix.org/blog/2019/02/05/synapse-0-99-0/">the last version to accept self-signed certificates</link>.
+    As such, it is now recommended to use a proper certificate verified by a
+    root CA (for example Let's Encrypt).
+   </para>
+  </listitem>
+   <listitem>
+    <para>
+     <literal>mailutils</literal> now works by default when
+     <literal>sendmail</literal> is not in a setuid wrapper. As a consequence,
+     the <literal>sendmailPath</literal> argument, having lost its main use, has
+     been removed.
+    </para>
+   </listitem>
   </itemizedlist>
  </section>
 
@@ -395,6 +412,23 @@
      <option>services.matomo.package</option> which determines the used
      Matomo version.
     </para>
+    <para>
+     The Matomo module now also comes with the systemd service <literal>matomo-archive-processing.service</literal>
+     and a timer that automatically triggers archive processing every hour.
+     This means that you can safely
+     <link xlink:href="https://matomo.org/docs/setup-auto-archiving/#disable-browser-triggers-for-matomo-archiving-and-limit-matomo-reports-to-updating-every-hour">
+      disable browser triggers for Matomo archiving
+     </link> at <literal>Administration > System > General Settings</literal>.
+    </para>
+    <para>
+     Additionally, you can enable to
+     <link xlink:href="https://matomo.org/docs/privacy/#step-2-delete-old-visitors-logs">
+      delete old visitor logs
+     </link> at <literal>Administration > System > Privacy</literal>,
+     but make sure that you run <literal>systemctl start matomo-archive-processing.service</literal>
+     at least once without errors if you have already collected data before,
+     so that the reports get archived before the source data gets deleted.
+    </para>
    </listitem>
    <listitem>
     <para>
@@ -491,6 +525,13 @@
      the Redmine 3.x series.
     </para>
    </listitem>
+   <listitem>
+    <para>
+     The <link linkend="opt-services.grafana.enable">Grafana module</link> now supports declarative
+     <link xlink:href="http://docs.grafana.org/administration/provisioning/">datasource and dashboard</link>
+     provisioning.
+    </para>
+   </listitem>
   </itemizedlist>
  </section>
 </section>
diff --git a/nixos/lib/testing.nix b/nixos/lib/testing.nix
index e68563ef48d..a13e76a6956 100644
--- a/nixos/lib/testing.nix
+++ b/nixos/lib/testing.nix
@@ -3,7 +3,7 @@
   # Use a minimal kernel?
 , minimal ? false
   # Ignored
-, config ? null
+, config ? {}
   # Modules to add to each VM
 , extraConfigurations ? [] }:
 
diff --git a/nixos/maintainers/scripts/openstack/nova-image.nix b/nixos/maintainers/scripts/openstack/nova-image.nix
deleted file mode 100644
index b6f3a5b1520..00000000000
--- a/nixos/maintainers/scripts/openstack/nova-image.nix
+++ /dev/null
@@ -1,26 +0,0 @@
-# nix-build '<nixpkgs/nixos>' -A config.system.build.novaImage --arg configuration "{ imports = [ ./nixos/maintainers/scripts/openstack/nova-image.nix ]; }"
-
-{ config, lib, pkgs, ... }:
-
-with lib;
-
-{
-  imports =
-    [ ../../../modules/installer/cd-dvd/channel.nix
-      ../../../modules/virtualisation/nova-config.nix
-    ];
-
-  system.build.novaImage = import ../../../lib/make-disk-image.nix {
-    inherit lib config;
-    pkgs = import ../../../.. { inherit (pkgs) system; }; # ensure we use the regular qemu-kvm package
-    diskSize = 8192;
-    format = "qcow2";
-    configFile = pkgs.writeText "configuration.nix"
-      ''
-        {
-          imports = [ <nixpkgs/nixos/modules/virtualisation/nova-config.nix> ];
-        }
-      '';
-  };
-
-}
diff --git a/nixos/maintainers/scripts/openstack/openstack-image.nix b/nixos/maintainers/scripts/openstack/openstack-image.nix
new file mode 100644
index 00000000000..4c464f43f61
--- /dev/null
+++ b/nixos/maintainers/scripts/openstack/openstack-image.nix
@@ -0,0 +1,26 @@
+# nix-build '<nixpkgs/nixos>' -A config.system.build.openstackImage --arg configuration "{ imports = [ ./nixos/maintainers/scripts/openstack/openstack-image.nix ]; }"
+
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+{
+  imports =
+    [ ../../../modules/installer/cd-dvd/channel.nix
+      ../../../modules/virtualisation/openstack-config.nix
+    ];
+
+  system.build.openstackImage = import ../../../lib/make-disk-image.nix {
+    inherit lib config;
+    pkgs = import ../../../.. { inherit (pkgs) system; }; # ensure we use the regular qemu-kvm package
+    diskSize = 8192;
+    format = "qcow2";
+    configFile = pkgs.writeText "configuration.nix"
+      ''
+        {
+          imports = [ <nixpkgs/nixos/modules/virtualisation/openstack-config.nix> ];
+        }
+      '';
+  };
+
+}
diff --git a/nixos/modules/installer/tools/nixos-generate-config.pl b/nixos/modules/installer/tools/nixos-generate-config.pl
index 3bcf90258d7..686204ee034 100644
--- a/nixos/modules/installer/tools/nixos-generate-config.pl
+++ b/nixos/modules/installer/tools/nixos-generate-config.pl
@@ -641,7 +641,6 @@ $bootLoaderConfig
   # Define a user account. Don't forget to set a password with ‘passwd’.
   # users.users.jane = {
   #   isNormalUser = true;
-  #   uid = 1000;
   #   extraGroups = [ "wheel" ]; # Enable ‘sudo’ for the user.
   # };
 
diff --git a/nixos/modules/misc/version.nix b/nixos/modules/misc/version.nix
index 001505320c0..c576cf4cb92 100644
--- a/nixos/modules/misc/version.nix
+++ b/nixos/modules/misc/version.nix
@@ -36,14 +36,14 @@ in
     nixos.revision = mkOption {
       internal = true;
       type = types.str;
-      default = lib.trivial.revisionWithDefault "master";
+      default = trivial.revisionWithDefault "master";
       description = "The Git revision from which this NixOS configuration was built.";
     };
 
     nixos.codeName = mkOption {
       readOnly = true;
       type = types.str;
-      default = lib.trivial.codeName;
+      default = trivial.codeName;
       description = "The NixOS release code name (e.g. <literal>Emu</literal>).";
     };
 
diff --git a/nixos/modules/module-list.nix b/nixos/modules/module-list.nix
index 3ee242ab222..04bcb41cd07 100644
--- a/nixos/modules/module-list.nix
+++ b/nixos/modules/module-list.nix
@@ -101,6 +101,7 @@
   ./programs/gnupg.nix
   ./programs/gphoto2.nix
   ./programs/iftop.nix
+  ./programs/iotop.nix
   ./programs/java.nix
   ./programs/kbdlight.nix
   ./programs/less.nix
diff --git a/nixos/modules/programs/iotop.nix b/nixos/modules/programs/iotop.nix
new file mode 100644
index 00000000000..5512dbc62f7
--- /dev/null
+++ b/nixos/modules/programs/iotop.nix
@@ -0,0 +1,17 @@
+{ config, pkgs, lib, ... }:
+
+with lib;
+
+let
+  cfg = config.programs.iotop;
+in {
+  options = {
+    programs.iotop.enable = mkEnableOption "iotop + setcap wrapper";
+  };
+  config = mkIf cfg.enable {
+    security.wrappers.iotop = {
+      source = "${pkgs.iotop}/bin/iotop";
+      capabilities = "cap_net_admin+p";
+    };
+  };
+}
diff --git a/nixos/modules/services/mail/roundcube.nix b/nixos/modules/services/mail/roundcube.nix
index 6d81c7374f4..66b1c1e3e6f 100644
--- a/nixos/modules/services/mail/roundcube.nix
+++ b/nixos/modules/services/mail/roundcube.nix
@@ -25,6 +25,20 @@ in
       description = "Hostname to use for the nginx vhost";
     };
 
+    package = mkOption {
+      type = types.package;
+      default = pkgs.roundcube;
+
+      example = literalExample ''
+        roundcube.withPlugins (plugins: [ plugins.persistent_login ])
+      '';
+
+      description = ''
+        The package which contains roundcube's sources. Can be overriden to create
+        an environment which contains roundcube and third-party plugins.
+      '';
+    };
+
     database = {
       username = mkOption {
         type = types.str;
@@ -86,7 +100,7 @@ in
           forceSSL = mkDefault true;
           enableACME = mkDefault true;
           locations."/" = {
-            root = pkgs.roundcube;
+            root = cfg.package;
             index = "index.php";
             extraConfig = ''
               location ~* \.php$ {
@@ -140,12 +154,12 @@ in
             ${pkgs.sudo}/bin/sudo -u ${pgSuperUser} psql postgres -c "create database ${cfg.database.dbname} with owner ${cfg.database.username}";
           fi
           PGPASSWORD=${cfg.database.password} ${pkgs.postgresql}/bin/psql -U ${cfg.database.username} \
-            -f ${pkgs.roundcube}/SQL/postgres.initial.sql \
+            -f ${cfg.package}/SQL/postgres.initial.sql \
             -h ${cfg.database.host} ${cfg.database.dbname}
           touch /var/lib/roundcube/db-created
         fi
 
-        ${pkgs.php}/bin/php ${pkgs.roundcube}/bin/update.sh
+        ${pkgs.php}/bin/php ${cfg.package}/bin/update.sh
       '';
       serviceConfig.Type = "oneshot";
     };
diff --git a/nixos/modules/services/misc/airsonic.nix b/nixos/modules/services/misc/airsonic.nix
index 01d7b3cf6b9..8b2ec82c770 100644
--- a/nixos/modules/services/misc/airsonic.nix
+++ b/nixos/modules/services/misc/airsonic.nix
@@ -25,6 +25,14 @@ in {
         '';
       };
 
+      virtualHost = mkOption {
+        type = types.nullOr types.str;
+        default = null;
+        description = ''
+          Name of the nginx virtualhost to use and setup. If null, do not setup any virtualhost.
+        '';
+      };
+
       listenAddress = mkOption {
         type = types.string;
         default = "127.0.0.1";
@@ -116,6 +124,8 @@ in {
           -Dserver.port=${toString cfg.port} \
           -Dairsonic.contextPath=${cfg.contextPath} \
           -Djava.awt.headless=true \
+          ${optionalString (cfg.virtualHost != null)
+            "-Dserver.use-forward-headers=true"} \
           ${toString cfg.jvmOptions} \
           -verbose:gc \
           -jar ${pkgs.airsonic}/webapps/airsonic.war
@@ -126,6 +136,13 @@ in {
       };
     };
 
+    services.nginx = mkIf (cfg.virtualHost != null) {
+      enable = true;
+      virtualHosts."${cfg.virtualHost}" = {
+        locations."${cfg.contextPath}".proxyPass = "http://${cfg.listenAddress}:${toString cfg.port}";
+      };
+    };
+
     users.users.airsonic = {
       description = "Airsonic service user";
       name = cfg.user;
diff --git a/nixos/modules/services/misc/docker-registry.nix b/nixos/modules/services/misc/docker-registry.nix
index 9a3966ab30a..f3d90e532c8 100644
--- a/nixos/modules/services/misc/docker-registry.nix
+++ b/nixos/modules/services/misc/docker-registry.nix
@@ -18,7 +18,7 @@ let
       delete.enabled = cfg.enableDelete;
     };
     http = {
-      addr = ":${builtins.toString cfg.port}";
+      addr = "${cfg.listenAddress}:${builtins.toString cfg.port}";
       headers.X-Content-Type-Options = ["nosniff"];
     };
     health.storagedriver = {
diff --git a/nixos/modules/services/misc/home-assistant.nix b/nixos/modules/services/misc/home-assistant.nix
index 4eabda1d418..4ccfa22c89e 100644
--- a/nixos/modules/services/misc/home-assistant.nix
+++ b/nixos/modules/services/misc/home-assistant.nix
@@ -53,7 +53,7 @@ let
   # If you are changing this, please update the description in applyDefaultConfig
   defaultConfig = {
     homeassistant.time_zone = config.time.timeZone;
-    http.server_port = (toString cfg.port);
+    http.server_port = cfg.port;
   } // optionalAttrs (cfg.lovelaceConfig != null) {
     lovelace.mode = "yaml";
   };
diff --git a/nixos/modules/services/misc/matrix-synapse.nix b/nixos/modules/services/misc/matrix-synapse.nix
index 18e13f6ac03..a01e34d7362 100644
--- a/nixos/modules/services/misc/matrix-synapse.nix
+++ b/nixos/modules/services/misc/matrix-synapse.nix
@@ -651,12 +651,16 @@ in {
 
     services.postgresql.enable = mkIf usePostgresql (mkDefault true);
 
-    systemd.services.matrix-synapse = {
+    systemd.services.matrix-synapse =
+    let
+      python = (pkgs.python3.withPackages (ps: with ps; [ (ps.toPythonModule cfg.package) ]));
+    in
+    {
       description = "Synapse Matrix homeserver";
       after = [ "network.target" "postgresql.service" ];
       wantedBy = [ "multi-user.target" ];
       preStart = ''
-        ${cfg.package}/bin/homeserver \
+        ${python.interpreter} -m synapse.app.homeserver \
           --config-path ${configFile} \
           --keys-directory ${cfg.dataDir} \
           --generate-keys
@@ -687,10 +691,11 @@ in {
         WorkingDirectory = cfg.dataDir;
         PermissionsStartOnly = true;
         ExecStart = ''
-          ${cfg.package}/bin/homeserver \
+          ${python.interpreter} -m synapse.app.homeserver \
             ${ concatMapStringsSep "\n  " (x: "--config-path ${x} \\") ([ configFile ] ++ cfg.extraConfigFiles) }
             --keys-directory ${cfg.dataDir}
         '';
+        ExecReload = "${pkgs.utillinux}/bin/kill -HUP $MAINPID";
         Restart = "on-failure";
       };
     };
diff --git a/nixos/modules/services/misc/nzbget.nix b/nixos/modules/services/misc/nzbget.nix
index e24cecf2080..6ab98751c57 100644
--- a/nixos/modules/services/misc/nzbget.nix
+++ b/nixos/modules/services/misc/nzbget.nix
@@ -4,6 +4,7 @@ with lib;
 
 let
   cfg = config.services.nzbget;
+  dataDir = builtins.dirOf cfg.configFile;
 in {
   options = {
     services.nzbget = {
@@ -41,6 +42,12 @@ in {
         default = "nzbget";
         description = "Group under which NZBGet runs";
       };
+
+      configFile = mkOption {
+        type = types.str;
+        default = "/var/lib/nzbget/nzbget.conf";
+        description = "Path for NZBGet's config file. (If this doesn't exist, the default config template is copied here.)";
+      };
     };
   };
 
@@ -54,36 +61,25 @@ in {
         p7zip
       ];
       preStart = ''
-        datadir=${cfg.dataDir}
-        configfile=${cfg.dataDir}/nzbget.conf
         cfgtemplate=${cfg.package}/share/nzbget/nzbget.conf
-        test -d $datadir || {
-          echo "Creating nzbget data directory in $datadir"
-          mkdir -p $datadir
-        }
-        test -f $configfile || {
-          echo "nzbget.conf not found. Copying default config $cfgtemplate to $configfile"
-          cp $cfgtemplate $configfile
-          echo "Setting $configfile permissions to 0700 (needs to be written and contains plaintext credentials)"
-          chmod 0700 $configfile
+        if [ ! -f ${cfg.configFile} ]; then
+          echo "${cfg.configFile} not found. Copying default config $cfgtemplate to ${cfg.configFile}"
+          install -m 0700 $cfgtemplate ${cfg.configFile}
           echo "Setting temporary \$MAINDIR variable in default config required in order to allow nzbget to complete initial start"
           echo "Remember to change this to a proper value once NZBGet startup has been completed"
-          sed -i -e 's/MainDir=.*/MainDir=\/tmp/g' $configfile
-        }
-        echo "Ensuring proper ownership of $datadir (${cfg.user}:${cfg.group})."
-        chown -R ${cfg.user}:${cfg.group} $datadir
+          sed -i -e 's/MainDir=.*/MainDir=\/tmp/g' ${cfg.configFile}
+        fi
       '';
 
       script = ''
-        configfile=${cfg.dataDir}/nzbget.conf
-        args="--daemon --configfile $configfile"
-        # The script in preStart (above) copies nzbget's config template to datadir on first run, containing paths that point to the nzbget derivation installed at the time. 
-        # These paths break when nzbget is upgraded & the original derivation is garbage collected. If such broken paths are found in the config file, override them to point to 
+        args="--daemon --configfile ${cfg.configFile}"
+        # The script in preStart (above) copies nzbget's config template to datadir on first run, containing paths that point to the nzbget derivation installed at the time.
+        # These paths break when nzbget is upgraded & the original derivation is garbage collected. If such broken paths are found in the config file, override them to point to
         # the currently installed nzbget derivation.
         cfgfallback () {
-          local hit=`grep -Po "(?<=^$1=).*+" "$configfile" | sed 's/[ \t]*$//'` # Strip trailing whitespace
+          local hit=`grep -Po "(?<=^$1=).*+" "${cfg.configFile}" | sed 's/[ \t]*$//'` # Strip trailing whitespace
           ( test $hit && test -e $hit ) || {
-            echo "In $configfile, valid $1 not found; falling back to $1=$2"
+            echo "In ${cfg.configFile}, valid $1 not found; falling back to $1=$2"
             args+=" -o $1=$2"
           }
         }
@@ -93,6 +89,8 @@ in {
       '';
 
       serviceConfig = {
+        StateDirectory = dataDir;
+        StateDirectoryMode = "0700";
         Type = "forking";
         User = cfg.user;
         Group = cfg.group;
diff --git a/nixos/modules/services/misc/plex.nix b/nixos/modules/services/misc/plex.nix
index e4810ce9f87..b06c1c4bbc6 100644
--- a/nixos/modules/services/misc/plex.nix
+++ b/nixos/modules/services/misc/plex.nix
@@ -145,6 +145,7 @@ in
         PLEX_MEDIA_SERVER_HOME="${cfg.package}/usr/lib/plexmediaserver";
         PLEX_MEDIA_SERVER_MAX_PLUGIN_PROCS="6";
         PLEX_MEDIA_SERVER_TMPDIR="/tmp";
+        PLEX_MEDIA_SERVER_USE_SYSLOG="true";
         LD_LIBRARY_PATH="/run/opengl-driver/lib:${cfg.package}/usr/lib/plexmediaserver";
         LC_ALL="en_US.UTF-8";
         LANG="en_US.UTF-8";
diff --git a/nixos/modules/services/misc/rippled.nix b/nixos/modules/services/misc/rippled.nix
index 9d9a0ba44da..cdf61730de3 100644
--- a/nixos/modules/services/misc/rippled.nix
+++ b/nixos/modules/services/misc/rippled.nix
@@ -85,70 +85,70 @@ let
   portOptions = { name, ...}: {
     options = {
       name = mkOption {
-	internal = true;
-	default = name;
+        internal = true;
+        default = name;
       };
 
       ip = mkOption {
-	default = "127.0.0.1";
-	description = "Ip where rippled listens.";
-	type = types.str;
+        default = "127.0.0.1";
+        description = "Ip where rippled listens.";
+        type = types.str;
       };
 
       port = mkOption {
-	description = "Port where rippled listens.";
-	type = types.int;
+        description = "Port where rippled listens.";
+        type = types.int;
       };
 
       protocol = mkOption {
-	description = "Protocols expose by rippled.";
-	type = types.listOf (types.enum ["http" "https" "ws" "wss" "peer"]);
+        description = "Protocols expose by rippled.";
+        type = types.listOf (types.enum ["http" "https" "ws" "wss" "peer"]);
       };
 
       user = mkOption {
-	description = "When set, these credentials will be required on HTTP/S requests.";
-	type = types.str;
-	default = "";
+        description = "When set, these credentials will be required on HTTP/S requests.";
+        type = types.str;
+        default = "";
       };
 
       password = mkOption {
-	description = "When set, these credentials will be required on HTTP/S requests.";
-	type = types.str;
-	default = "";
+        description = "When set, these credentials will be required on HTTP/S requests.";
+        type = types.str;
+        default = "";
       };
 
       admin = mkOption {
-	description = "A comma-separated list of admin IP addresses.";
-	type = types.listOf types.str;
-	default = ["127.0.0.1"];
+        description = "A comma-separated list of admin IP addresses.";
+        type = types.listOf types.str;
+        default = ["127.0.0.1"];
       };
 
       ssl = {
-	key = mkOption {
-	  description = ''
-	    Specifies the filename holding the SSL key in PEM format.
-	  '';
-	  default = null;
-	  type = types.nullOr types.path;
-	};
-
-	cert = mkOption {
-	  description = ''
-	    Specifies the path to the SSL certificate file in PEM format.
-	    This is not needed if the chain includes it.
-	  '';
-	  default = null;
-	  type = types.nullOr types.path;
-	};
-
-	chain = mkOption {
-	  description = ''
-	    If you need a certificate chain, specify the path to the
-	    certificate chain here. The chain may include the end certificate.
-	  '';
-	  default = null;
-	  type = types.nullOr types.path;
-	};
+        key = mkOption {
+          description = ''
+            Specifies the filename holding the SSL key in PEM format.
+          '';
+          default = null;
+          type = types.nullOr types.path;
+        };
+
+        cert = mkOption {
+          description = ''
+            Specifies the path to the SSL certificate file in PEM format.
+            This is not needed if the chain includes it.
+          '';
+          default = null;
+          type = types.nullOr types.path;
+        };
+
+        chain = mkOption {
+          description = ''
+            If you need a certificate chain, specify the path to the
+            certificate chain here. The chain may include the end certificate.
+          '';
+          default = null;
+          type = types.nullOr types.path;
+        };
       };
     };
   };
@@ -175,14 +175,14 @@ let
 
       onlineDelete = mkOption {
         description = "Enable automatic purging of older ledger information.";
-        type = types.addCheck (types.nullOr types.int) (v: v > 256);
+        type = types.nullOr (types.addCheck types.int (v: v > 256));
         default = cfg.ledgerHistory;
       };
 
       advisoryDelete = mkOption {
         description = ''
-	        If set, then require administrative RPC call "can_delete"
-	        to enable online deletion of ledger records.
+          If set, then require administrative RPC call "can_delete"
+          to enable online deletion of ledger records.
         '';
         type = types.nullOr types.bool;
         default = null;
@@ -207,168 +207,168 @@ in
       enable = mkEnableOption "rippled";
 
       package = mkOption {
-	description = "Which rippled package to use.";
-	type = types.package;
-	default = pkgs.rippled;
-	defaultText = "pkgs.rippled";
+        description = "Which rippled package to use.";
+        type = types.package;
+        default = pkgs.rippled;
+        defaultText = "pkgs.rippled";
       };
 
       ports = mkOption {
-	description = "Ports exposed by rippled";
-	type = with types; attrsOf (submodule portOptions);
-	default = {
-	  rpc = {
-	    port = 5005;
-	    admin = ["127.0.0.1"];
-	    protocol = ["http"];
-	  };
-
-	  peer = {
-	    port = 51235;
-	    ip = "0.0.0.0";
-	    protocol = ["peer"];
-	  };
-
-	  ws_public = {
-	    port = 5006;
-	    ip = "0.0.0.0";
-	    protocol = ["ws" "wss"];
-	  };
-	};
+        description = "Ports exposed by rippled";
+        type = with types; attrsOf (submodule portOptions);
+        default = {
+          rpc = {
+            port = 5005;
+            admin = ["127.0.0.1"];
+            protocol = ["http"];
+          };
+
+          peer = {
+            port = 51235;
+            ip = "0.0.0.0";
+            protocol = ["peer"];
+          };
+
+          ws_public = {
+            port = 5006;
+            ip = "0.0.0.0";
+            protocol = ["ws" "wss"];
+          };
+        };
       };
 
       nodeDb = mkOption {
-	description = "Rippled main database options.";
-	type = with types; nullOr (submodule dbOptions);
-	default = {
-	  type = "rocksdb";
-	  extraOpts = ''
-	    open_files=2000
-	    filter_bits=12
-	    cache_mb=256
-	    file_size_pb=8
-	    file_size_mult=2;
-	  '';
-	};
+        description = "Rippled main database options.";
+        type = with types; nullOr (submodule dbOptions);
+        default = {
+          type = "rocksdb";
+          extraOpts = ''
+            open_files=2000
+            filter_bits=12
+            cache_mb=256
+            file_size_pb=8
+            file_size_mult=2;
+          '';
+        };
       };
 
       tempDb = mkOption {
-	description = "Rippled temporary database options.";
-	type = with types; nullOr (submodule dbOptions);
-	default = null;
+        description = "Rippled temporary database options.";
+        type = with types; nullOr (submodule dbOptions);
+        default = null;
       };
 
       importDb = mkOption {
-	description = "Settings for performing a one-time import.";
-	type = with types; nullOr (submodule dbOptions);
-	default = null;
+        description = "Settings for performing a one-time import.";
+        type = with types; nullOr (submodule dbOptions);
+        default = null;
       };
 
       nodeSize = mkOption {
-	description = ''
-	  Rippled size of the node you are running.
-	  "tiny", "small", "medium", "large", and "huge"
-	'';
-	type = types.enum ["tiny" "small" "medium" "large" "huge"];
-	default = "small";
+        description = ''
+          Rippled size of the node you are running.
+          "tiny", "small", "medium", "large", and "huge"
+        '';
+        type = types.enum ["tiny" "small" "medium" "large" "huge"];
+        default = "small";
       };
 
       ips = mkOption {
-	description = ''
-	  List of hostnames or ips where the Ripple protocol is served.
-	  For a starter list, you can either copy entries from:
-	  https://ripple.com/ripple.txt or if you prefer you can let it
-	   default to r.ripple.com 51235
-
-	  A port may optionally be specified after adding a space to the
-	  address. By convention, if known, IPs are listed in from most
-	  to least trusted.
-	'';
-	type = types.listOf types.str;
-	default = ["r.ripple.com 51235"];
+        description = ''
+          List of hostnames or ips where the Ripple protocol is served.
+          For a starter list, you can either copy entries from:
+          https://ripple.com/ripple.txt or if you prefer you can let it
+           default to r.ripple.com 51235
+
+          A port may optionally be specified after adding a space to the
+          address. By convention, if known, IPs are listed in from most
+          to least trusted.
+        '';
+        type = types.listOf types.str;
+        default = ["r.ripple.com 51235"];
       };
 
       ipsFixed = mkOption {
-	description = ''
-	  List of IP addresses or hostnames to which rippled should always
-	  attempt to maintain peer connections with. This is useful for
-	  manually forming private networks, for example to configure a
-	  validation server that connects to the Ripple network through a
-	  public-facing server, or for building a set of cluster peers.
+        description = ''
+          List of IP addresses or hostnames to which rippled should always
+          attempt to maintain peer connections with. This is useful for
+          manually forming private networks, for example to configure a
+          validation server that connects to the Ripple network through a
+          public-facing server, or for building a set of cluster peers.
 
-	  A port may optionally be specified after adding a space to the address
-	'';
-	type = types.listOf types.str;
-	default = [];
+          A port may optionally be specified after adding a space to the address
+        '';
+        type = types.listOf types.str;
+        default = [];
       };
 
       validators = mkOption {
-	description = ''
-	  List of nodes to always accept as validators. Nodes are specified by domain
-	  or public key.
-	'';
-	type = types.listOf types.str;
-	default = [
-	  "n949f75evCHwgyP4fPVgaHqNHxUVN15PsJEZ3B3HnXPcPjcZAoy7  RL1"
-	  "n9MD5h24qrQqiyBC8aeqqCWvpiBiYQ3jxSr91uiDvmrkyHRdYLUj  RL2"
-	  "n9L81uNCaPgtUJfaHh89gmdvXKAmSt5Gdsw2g1iPWaPkAHW5Nm4C  RL3"
-	  "n9KiYM9CgngLvtRCQHZwgC2gjpdaZcCcbt3VboxiNFcKuwFVujzS  RL4"
-	  "n9LdgEtkmGB9E2h3K4Vp7iGUaKuq23Zr32ehxiU8FWY7xoxbWTSA  RL5"
-	];
+        description = ''
+          List of nodes to always accept as validators. Nodes are specified by domain
+          or public key.
+        '';
+        type = types.listOf types.str;
+        default = [
+          "n949f75evCHwgyP4fPVgaHqNHxUVN15PsJEZ3B3HnXPcPjcZAoy7  RL1"
+          "n9MD5h24qrQqiyBC8aeqqCWvpiBiYQ3jxSr91uiDvmrkyHRdYLUj  RL2"
+          "n9L81uNCaPgtUJfaHh89gmdvXKAmSt5Gdsw2g1iPWaPkAHW5Nm4C  RL3"
+          "n9KiYM9CgngLvtRCQHZwgC2gjpdaZcCcbt3VboxiNFcKuwFVujzS  RL4"
+          "n9LdgEtkmGB9E2h3K4Vp7iGUaKuq23Zr32ehxiU8FWY7xoxbWTSA  RL5"
+        ];
       };
 
       databasePath = mkOption {
-	description = ''
-	  Path to the ripple database.
-	'';
-	type = types.path;
-	default = "/var/lib/rippled";
+        description = ''
+          Path to the ripple database.
+        '';
+        type = types.path;
+        default = "/var/lib/rippled";
       };
 
       validationQuorum = mkOption {
-	description = ''
-	  The minimum number of trusted validations a ledger must have before
-	  the server considers it fully validated.
-	'';
-	type = types.int;
-	default = 3;
+        description = ''
+          The minimum number of trusted validations a ledger must have before
+          the server considers it fully validated.
+        '';
+        type = types.int;
+        default = 3;
       };
 
       ledgerHistory = mkOption {
-	description = ''
-	  The number of past ledgers to acquire on server startup and the minimum
-	  to maintain while running.
-	'';
-	type = types.either types.int (types.enum ["full"]);
-	default = 1296000; # 1 month
+        description = ''
+          The number of past ledgers to acquire on server startup and the minimum
+          to maintain while running.
+        '';
+        type = types.either types.int (types.enum ["full"]);
+        default = 1296000; # 1 month
       };
 
       fetchDepth = mkOption {
-	description = ''
-	  The number of past ledgers to serve to other peers that request historical
-	  ledger data (or "full" for no limit).
-	'';
-	type = types.either types.int (types.enum ["full"]);
-	default = "full";
+        description = ''
+          The number of past ledgers to serve to other peers that request historical
+          ledger data (or "full" for no limit).
+        '';
+        type = types.either types.int (types.enum ["full"]);
+        default = "full";
       };
 
       sntpServers = mkOption {
-	description = ''
-	  IP address or domain of NTP servers to use for time synchronization.;
-	'';
-	type = types.listOf types.str;
-	default = [
-	  "time.windows.com"
-	  "time.apple.com"
-	  "time.nist.gov"
-	  "pool.ntp.org"
-	];
+        description = ''
+          IP address or domain of NTP servers to use for time synchronization.;
+        '';
+        type = types.listOf types.str;
+        default = [
+          "time.windows.com"
+          "time.apple.com"
+          "time.nist.gov"
+          "pool.ntp.org"
+        ];
       };
 
       logLevel = mkOption {
         description = "Logging verbosity.";
-	type = types.enum ["debug" "error" "info"];
-	default = "error";
+        type = types.enum ["debug" "error" "info"];
+        default = "error";
       };
 
       statsd = {
@@ -389,14 +389,14 @@ in
 
       extraConfig = mkOption {
         default = "";
-	description = ''
-	  Extra lines to be added verbatim to the rippled.cfg configuration file.
-	'';
+        description = ''
+          Extra lines to be added verbatim to the rippled.cfg configuration file.
+        '';
       };
 
       config = mkOption {
-	internal = true;
-	default = pkgs.writeText "rippled.conf" rippledCfg;
+        internal = true;
+        default = pkgs.writeText "rippled.conf" rippledCfg;
       };
     };
   };
@@ -410,8 +410,8 @@ in
       { name = "rippled";
         description = "Ripple server user";
         uid = config.ids.uids.rippled;
-	home = cfg.databasePath;
-	createHome = true;
+        home = cfg.databasePath;
+        createHome = true;
       };
 
     systemd.services.rippled = {
@@ -421,8 +421,8 @@ in
       serviceConfig = {
         ExecStart = "${cfg.package}/bin/rippled --fg --conf ${cfg.config}";
         User = "rippled";
-	Restart = "on-failure";
-	LimitNOFILE=10000;
+        Restart = "on-failure";
+        LimitNOFILE=10000;
       };
     };
 
diff --git a/nixos/modules/services/misc/weechat.xml b/nixos/modules/services/misc/weechat.xml
index b7f755bbc5c..7255edfb9da 100644
--- a/nixos/modules/services/misc/weechat.xml
+++ b/nixos/modules/services/misc/weechat.xml
@@ -8,7 +8,7 @@
   <link xlink:href="https://weechat.org/">WeeChat</link> is a fast and
   extensible IRC client.
  </para>
- <section>
+ <section xml:id="module-services-weechat-basic-usage">
   <title>Basic Usage</title>
 
   <para>
@@ -35,7 +35,7 @@
    in the state directory <literal>/var/lib/weechat</literal>.
   </para>
  </section>
- <section>
+ <section xml:id="module-services-weechat-reattach">
   <title>Re-attaching to WeeChat</title>
 
   <para>
diff --git a/nixos/modules/services/misc/zoneminder.nix b/nixos/modules/services/misc/zoneminder.nix
index a40e9e84613..ae7de7850d9 100644
--- a/nixos/modules/services/misc/zoneminder.nix
+++ b/nixos/modules/services/misc/zoneminder.nix
@@ -205,15 +205,13 @@ in {
 
       mysql = lib.mkIf cfg.database.createLocally {
         ensureDatabases = [ cfg.database.name ];
-        ensureUsers = {
+        ensureUsers = [{
           name = cfg.database.username;
-          ensurePermissions = [
-            { "${cfg.database.name}.*" = "ALL PRIVILEGES"; }
-          ];
+          ensurePermissions = { "${cfg.database.name}.*" = "ALL PRIVILEGES"; };
           initialDatabases = [
             { inherit (cfg.database) name; schema = "${pkg}/share/zoneminder/db/zm_create.sql"; }
           ];
-        };
+        }];
       };
 
       nginx = lib.mkIf useNginx {
diff --git a/nixos/modules/services/monitoring/grafana.nix b/nixos/modules/services/monitoring/grafana.nix
index 5fb3e377122..85879cfe0b3 100644
--- a/nixos/modules/services/monitoring/grafana.nix
+++ b/nixos/modules/services/monitoring/grafana.nix
@@ -50,6 +50,158 @@ let
     SMTP_FROM_ADDRESS = cfg.smtp.fromAddress;
   } // cfg.extraOptions;
 
+  datasourceConfiguration = {
+    apiVersion = 1;
+    datasources = cfg.provision.datasources;
+  };
+
+  datasourceFile = pkgs.writeText "datasource.yaml" (builtins.toJSON datasourceConfiguration);
+
+  dashboardConfiguration = {
+    apiVersion = 1;
+    providers = cfg.provision.dashboards;
+  };
+
+  dashboardFile = pkgs.writeText "dashboard.yaml" (builtins.toJSON dashboardConfiguration);
+
+  provisionConfDir =  pkgs.runCommand "grafana-provisioning" { } ''
+    mkdir -p $out/{datasources,dashboards}
+    ln -sf ${datasourceFile} $out/datasources/datasource.yaml
+    ln -sf ${dashboardFile} $out/dashboards/dashboard.yaml
+  '';
+
+  # Get a submodule without any embedded metadata:
+  _filter = x: filterAttrs (k: v: k != "_module") x;
+
+  # http://docs.grafana.org/administration/provisioning/#datasources
+  grafanaTypes.datasourceConfig = types.submodule {
+    options = {
+      name = mkOption {
+        type = types.str;
+        description = "Name of the datasource. Required";
+      };
+      type = mkOption {
+        type = types.enum ["graphite" "prometheus" "cloudwatch" "elasticsearch" "influxdb" "opentsdb" "mysql" "mssql" "postgres" "loki"];
+        description = "Datasource type. Required";
+      };
+      access = mkOption {
+        type = types.enum ["proxy" "direct"];
+        default = "proxy";
+        description = "Access mode. proxy or direct (Server or Browser in the UI). Required";
+      };
+      orgId = mkOption {
+        type = types.int;
+        default = 1;
+        description = "Org id. will default to orgId 1 if not specified";
+      };
+      url = mkOption {
+        type = types.str;
+        description = "Url of the datasource";
+      };
+      password = mkOption {
+        type = types.nullOr types.str;
+        default = null;
+        description = "Database password, if used";
+      };
+      user = mkOption {
+        type = types.nullOr types.str;
+        default = null;
+        description = "Database user, if used";
+      };
+      database = mkOption {
+        type = types.nullOr types.str;
+        default = null;
+        description = "Database name, if used";
+      };
+      basicAuth = mkOption {
+        type = types.nullOr types.bool;
+        default = null;
+        description = "Enable/disable basic auth";
+      };
+      basicAuthUser = mkOption {
+        type = types.nullOr types.str;
+        default = null;
+        description = "Basic auth username";
+      };
+      basicAuthPassword = mkOption {
+        type = types.nullOr types.str;
+        default = null;
+        description = "Basic auth password";
+      };
+      withCredentials = mkOption {
+        type = types.bool;
+        default = false;
+        description = "Enable/disable with credentials headers";
+      };
+      isDefault = mkOption {
+        type = types.bool;
+        default = false;
+        description = "Mark as default datasource. Max one per org";
+      };
+      jsonData = mkOption {
+        type = types.nullOr types.attrs;
+        default = null;
+        description = "Datasource specific configuration";
+      };
+      secureJsonData = mkOption {
+        type = types.nullOr types.attrs;
+        default = null;
+        description = "Datasource specific secure configuration";
+      };
+      version = mkOption {
+        type = types.int;
+        default = 1;
+        description = "Version";
+      };
+      editable = mkOption {
+        type = types.bool;
+        default = false;
+        description = "Allow users to edit datasources from the UI.";
+      };
+    };
+  };
+
+  # http://docs.grafana.org/administration/provisioning/#dashboards
+  grafanaTypes.dashboardConfig = types.submodule {
+    options = {
+      name = mkOption {
+        type = types.str;
+        default = "default";
+        description = "Provider name";
+      };
+      orgId = mkOption {
+        type = types.int;
+        default = 1;
+        description = "Organization ID";
+      };
+      folder = mkOption {
+        type = types.str;
+        default = "";
+        description = "Add dashboards to the speciied folder";
+      };
+      type = mkOption {
+        type = types.str;
+        default = "file";
+        description = "Dashboard provider type";
+      };
+      disableDeletion = mkOption {
+        type = types.bool;
+        default = false;
+        description = "Disable deletion when JSON file is removed";
+      };
+      updateIntervalSeconds = mkOption {
+        type = types.int;
+        default = 10;
+        description = "How often Grafana will scan for changed dashboards";
+      };
+      options = {
+        path = mkOption {
+          type = types.path;
+          description = "Path grafana will watch for dashboards";
+        };
+      };
+    };
+  };
 in {
   options.services.grafana = {
     enable = mkEnableOption "grafana";
@@ -175,6 +327,22 @@ in {
       };
     };
 
+    provision = {
+      enable = mkEnableOption "provision";
+      datasources = mkOption {
+        description = "Grafana datasources configuration";
+        default = [];
+        type = types.listOf grafanaTypes.datasourceConfig;
+        apply = x: map _filter x;
+      };
+      dashboards = mkOption {
+        description = "Grafana dashboard configuration";
+        default = [];
+        type = types.listOf grafanaTypes.dashboardConfig;
+        apply = x: map _filter x;
+      };
+    };
+
     security = {
       adminUser = mkOption {
         description = "Default admin username.";
@@ -313,10 +481,15 @@ in {
   };
 
   config = mkIf cfg.enable {
-    warnings = optional (
-      cfg.database.password != opt.database.password.default ||
-      cfg.security.adminPassword != opt.security.adminPassword.default
-    ) "Grafana passwords will be stored as plaintext in the Nix store!";
+    warnings = flatten [
+      (optional (
+        cfg.database.password != opt.database.password.default ||
+        cfg.security.adminPassword != opt.security.adminPassword.default
+      ) "Grafana passwords will be stored as plaintext in the Nix store!")
+      (optional (
+        any (x: x.password != null || x.basicAuthPassword != null || x.secureJsonData != null) cfg.provision.datasources
+      ) "Datasource passwords will be stored as plaintext in the Nix store!")
+    ];
 
     environment.systemPackages = [ cfg.package ];
 
@@ -359,6 +532,9 @@ in {
         ${optionalString (cfg.smtp.passwordFile != null) ''
           export GF_SMTP_PASSWORD="$(cat ${escapeShellArg cfg.smtp.passwordFile})"
         ''}
+        ${optionalString cfg.provision.enable ''
+          export GF_PATHS_PROVISIONING=${provisionConfDir};
+        ''}
         exec ${cfg.package.bin}/bin/grafana-server -homepath ${cfg.dataDir}
       '';
       serviceConfig = {
diff --git a/nixos/modules/services/networking/flannel.nix b/nixos/modules/services/networking/flannel.nix
index b93e28e34ef..6c43573851b 100644
--- a/nixos/modules/services/networking/flannel.nix
+++ b/nixos/modules/services/networking/flannel.nix
@@ -73,11 +73,35 @@ in {
       };
     };
 
+    kubeconfig = mkOption {
+      description = ''
+        Path to kubeconfig to use for storing flannel config using the
+        Kubernetes API
+      '';
+      type = types.nullOr types.path;
+      default = null;
+    };
+
     network = mkOption {
       description = " IPv4 network in CIDR format to use for the entire flannel network.";
       type = types.str;
     };
 
+    nodeName = mkOption {
+      description = ''
+        Needed when running with Kubernetes as backend as this cannot be auto-detected";
+      '';
+      type = types.nullOr types.str;
+      default = with config.networking; (hostName + optionalString (!isNull domain) ".${domain}");
+      example = "node1.example.com";
+    };
+
+    storageBackend = mkOption {
+      description = "Determines where flannel stores its configuration at runtime";
+      type = types.enum ["etcd" "kubernetes"];
+      default = "etcd";
+    };
+
     subnetLen = mkOption {
       description = ''
         The size of the subnet allocated to each host. Defaults to 24 (i.e. /24)
@@ -122,17 +146,22 @@ in {
       after = [ "network.target" ];
       environment = {
         FLANNELD_PUBLIC_IP = cfg.publicIp;
+        FLANNELD_IFACE = cfg.iface;
+      } // optionalAttrs (cfg.storageBackend == "etcd") {
         FLANNELD_ETCD_ENDPOINTS = concatStringsSep "," cfg.etcd.endpoints;
         FLANNELD_ETCD_KEYFILE = cfg.etcd.keyFile;
         FLANNELD_ETCD_CERTFILE = cfg.etcd.certFile;
         FLANNELD_ETCD_CAFILE = cfg.etcd.caFile;
-        FLANNELD_IFACE = cfg.iface;
         ETCDCTL_CERT_FILE = cfg.etcd.certFile;
         ETCDCTL_KEY_FILE = cfg.etcd.keyFile;
         ETCDCTL_CA_FILE = cfg.etcd.caFile;
         ETCDCTL_PEERS = concatStringsSep "," cfg.etcd.endpoints;
+      } // optionalAttrs (cfg.storageBackend == "kubernetes") {
+        FLANNELD_KUBE_SUBNET_MGR = "true";
+        FLANNELD_KUBECONFIG_FILE = cfg.kubeconfig;
+        NODE_NAME = cfg.nodeName;
       };
-      preStart = ''
+      preStart = mkIf (cfg.storageBackend == "etcd") ''
         echo "setting network configuration"
         until ${pkgs.etcdctl.bin}/bin/etcdctl set /coreos.com/network/config '${builtins.toJSON networkConfig}'
         do
@@ -149,6 +178,12 @@ in {
       serviceConfig.ExecStart = "${cfg.package}/bin/flannel";
     };
 
-    services.etcd.enable = mkDefault (cfg.etcd.endpoints == ["http://127.0.0.1:2379"]);
+    services.etcd.enable = mkDefault (cfg.storageBackend == "etcd" && cfg.etcd.endpoints == ["http://127.0.0.1:2379"]);
+
+    # for some reason, flannel doesn't let you configure this path
+    # see: https://github.com/coreos/flannel/blob/master/Documentation/configuration.md#configuration
+    environment.etc."kube-flannel/net-conf.json" = mkIf (cfg.storageBackend == "kubernetes") {
+      source = pkgs.writeText "net-conf.json" (builtins.toJSON networkConfig);
+    };
   };
 }
diff --git a/nixos/modules/services/networking/syncthing.nix b/nixos/modules/services/networking/syncthing.nix
index b2ef1885a95..702481ec517 100644
--- a/nixos/modules/services/networking/syncthing.nix
+++ b/nixos/modules/services/networking/syncthing.nix
@@ -122,7 +122,7 @@ in {
 
     systemd.packages = [ pkgs.syncthing ];
 
-    users = mkIf (cfg.user == defaultUser) {
+    users = mkIf (cfg.systemService && cfg.user == defaultUser) {
       users."${defaultUser}" =
         { group = cfg.group;
           home  = cfg.dataDir;
diff --git a/nixos/modules/services/networking/teamspeak3.nix b/nixos/modules/services/networking/teamspeak3.nix
index 410d650b1f6..9ea9c83e37c 100644
--- a/nixos/modules/services/networking/teamspeak3.nix
+++ b/nixos/modules/services/networking/teamspeak3.nix
@@ -41,8 +41,9 @@ in
       };
 
       voiceIP = mkOption {
-        type = types.str;
-        default = "0.0.0.0";
+        type = types.nullOr types.str;
+        default = null;
+        example = "0.0.0.0";
         description = ''
           IP on which the server instance will listen for incoming voice connections. Defaults to any IP.
         '';
@@ -57,8 +58,9 @@ in
       };
 
       fileTransferIP = mkOption {
-        type = types.str;
-        default = "0.0.0.0";
+        type = types.nullOr types.str;
+        default = null;
+        example = "0.0.0.0";
         description = ''
           IP on which the server instance will listen for incoming file transfer connections. Defaults to any IP.
         '';
@@ -73,8 +75,9 @@ in
       };
 
       queryIP = mkOption {
-        type = types.str;
-        default = "0.0.0.0";
+        type = types.nullOr types.str;
+        default = null;
+        example = "0.0.0.0";
         description = ''
           IP on which the server instance will listen for incoming ServerQuery connections. Defaults to any IP.
         '';
@@ -122,9 +125,12 @@ in
         ExecStart = ''
           ${ts3}/bin/ts3server \
             dbsqlpath=${ts3}/lib/teamspeak/sql/ logpath=${cfg.logPath} \
-            voice_ip=${cfg.voiceIP} default_voice_port=${toString cfg.defaultVoicePort} \
-            filetransfer_ip=${cfg.fileTransferIP} filetransfer_port=${toString cfg.fileTransferPort} \
-            query_ip=${cfg.queryIP} query_port=${toString cfg.queryPort} license_accepted=1
+            ${optionalString (cfg.voiceIP != null) "voice_ip=${cfg.voiceIP}"} \
+            default_voice_port=${toString cfg.defaultVoicePort} \
+            ${optionalString (cfg.fileTransferIP != null) "filetransfer_ip=${cfg.fileTransferIP}"} \
+            filetransfer_port=${toString cfg.fileTransferPort} \
+            ${optionalString (cfg.queryIP != null) "query_ip=${cfg.queryIP}"} \
+            query_port=${toString cfg.queryPort} license_accepted=1
         '';
         WorkingDirectory = cfg.dataDir;
         User = user;
diff --git a/nixos/modules/services/networking/unifi.nix b/nixos/modules/services/networking/unifi.nix
index 89b9ac4eadf..c82e0af2803 100644
--- a/nixos/modules/services/networking/unifi.nix
+++ b/nixos/modules/services/networking/unifi.nix
@@ -121,11 +121,12 @@ in
     };
 
     networking.firewall = mkIf cfg.openPorts {
-      # https://help.ubnt.com/hc/en-us/articles/204910084-UniFi-Change-Default-Ports-for-Controller-and-UAPs
+      # https://help.ubnt.com/hc/en-us/articles/218506997
       allowedTCPPorts = [
         8080  # Port for UAP to inform controller.
         8880  # Port for HTTP portal redirect, if guest portal is enabled.
         8843  # Port for HTTPS portal redirect, ditto.
+        6789  # Port for UniFi mobile speed test.
       ];
       allowedUDPPorts = [
         3478  # UDP port used for STUN.
diff --git a/nixos/modules/services/networking/wpa_supplicant.nix b/nixos/modules/services/networking/wpa_supplicant.nix
index 8622212f085..cdfe98aa034 100644
--- a/nixos/modules/services/networking/wpa_supplicant.nix
+++ b/nixos/modules/services/networking/wpa_supplicant.nix
@@ -86,7 +86,12 @@ in {
               '';
               description = ''
                 Use this option to configure advanced authentication methods like EAP.
-                See wpa_supplicant.conf(5) for example configurations.
+                See
+                <citerefentry>
+                  <refentrytitle>wpa_supplicant.conf</refentrytitle>
+                  <manvolnum>5</manvolnum>
+                </citerefentry>
+                for example configurations.
 
                 Mutually exclusive with <varname>psk</varname> and <varname>pskRaw</varname>.
               '';
@@ -122,7 +127,12 @@ in {
               '';
               description = ''
                 Extra configuration lines appended to the network block.
-                See wpa_supplicant.conf(5) for available options.
+                See
+                <citerefentry>
+                  <refentrytitle>wpa_supplicant.conf</refentrytitle>
+                  <manvolnum>5</manvolnum>
+                </citerefentry>
+                for available options.
               '';
             };
 
@@ -174,7 +184,12 @@ in {
         '';
         description = ''
           Extra lines appended to the configuration file.
-          See wpa_supplicant.conf(5) for available options.
+          See
+          <citerefentry>
+            <refentrytitle>wpa_supplicant.conf</refentrytitle>
+            <manvolnum>5</manvolnum>
+          </citerefentry>
+          for available options.
         '';
       };
     };
diff --git a/nixos/modules/services/printing/cupsd.nix b/nixos/modules/services/printing/cupsd.nix
index 1031d6f3d7e..3a43ebbb889 100644
--- a/nixos/modules/services/printing/cupsd.nix
+++ b/nixos/modules/services/printing/cupsd.nix
@@ -316,6 +316,10 @@ in
             mkdir -m 0755 -p ${cfg.tempDir}
 
             mkdir -m 0755 -p /var/lib/cups
+            # While cups will automatically create self-signed certificates if accessed via TLS,
+            # this directory to store the certificates needs to be created manually.
+            mkdir -m 0700 -p /var/lib/cups/ssl
+
             # Backwards compatibility
             if [ ! -L /etc/cups ]; then
               mv /etc/cups/* /var/lib/cups
diff --git a/nixos/modules/services/web-apps/atlassian/confluence.nix b/nixos/modules/services/web-apps/atlassian/confluence.nix
index b71887fcc6e..15744d90cc7 100644
--- a/nixos/modules/services/web-apps/atlassian/confluence.nix
+++ b/nixos/modules/services/web-apps/atlassian/confluence.nix
@@ -6,7 +6,7 @@ let
 
   cfg = config.services.confluence;
 
-  pkg = pkgs.atlassian-confluence.override (optionalAttrs cfg.sso.enable {
+  pkg = cfg.package.override (optionalAttrs cfg.sso.enable {
     enableSSO = cfg.sso.enable;
     crowdProperties = ''
       application.name                        ${cfg.sso.applicationName}
@@ -125,7 +125,12 @@ in
         };
       };
 
-
+      package = mkOption {
+        type = types.package;
+        default = pkgs.atlassian-confluence;
+        defaultText = "pkgs.atlassian-confluence";
+        description = "Atlassian Confluence package to use.";
+      };
 
       jrePackage = mkOption {
         type = types.package;
diff --git a/nixos/modules/services/web-apps/atlassian/crowd.nix b/nixos/modules/services/web-apps/atlassian/crowd.nix
index 9f48d1e16a4..c144b21bdaf 100644
--- a/nixos/modules/services/web-apps/atlassian/crowd.nix
+++ b/nixos/modules/services/web-apps/atlassian/crowd.nix
@@ -6,7 +6,7 @@ let
 
   cfg = config.services.crowd;
 
-  pkg = pkgs.atlassian-crowd.override {
+  pkg = cfg.package.override {
     home = cfg.home;
     port = cfg.listenPort;
     openidPassword = cfg.openidPassword;
@@ -93,6 +93,13 @@ in
         };
       };
 
+      package = mkOption {
+        type = types.package;
+        default = pkgs.atlassian-crowd;
+        defaultText = "pkgs.atlassian-crowd";
+        description = "Atlassian Crowd package to use.";
+      };
+
       jrePackage = mkOption {
         type = types.package;
         default = pkgs.oraclejre8;
diff --git a/nixos/modules/services/web-apps/atlassian/jira.nix b/nixos/modules/services/web-apps/atlassian/jira.nix
index dba970c612b..0b3a5722d6c 100644
--- a/nixos/modules/services/web-apps/atlassian/jira.nix
+++ b/nixos/modules/services/web-apps/atlassian/jira.nix
@@ -6,7 +6,7 @@ let
 
   cfg = config.services.jira;
 
-  pkg = pkgs.atlassian-jira.override (optionalAttrs cfg.sso.enable {
+  pkg = cfg.package.override (optionalAttrs cfg.sso.enable {
     enableSSO = cfg.sso.enable;
     crowdProperties = ''
       application.name                        ${cfg.sso.applicationName}
@@ -131,6 +131,13 @@ in
         };
       };
 
+      package = mkOption {
+        type = types.package;
+        default = pkgs.atlassian-jira;
+        defaultText = "pkgs.atlassian-jira";
+        description = "Atlassian JIRA package to use.";
+      };
+
       jrePackage = mkOption {
         type = types.package;
         default = pkgs.oraclejre8;
diff --git a/nixos/modules/services/web-apps/matomo-doc.xml b/nixos/modules/services/web-apps/matomo-doc.xml
index 510a335edc3..20d2de9f418 100644
--- a/nixos/modules/services/web-apps/matomo-doc.xml
+++ b/nixos/modules/services/web-apps/matomo-doc.xml
@@ -12,15 +12,15 @@
   An automatic setup is not suported by Matomo, so you need to configure Matomo
   itself in the browser-based Matomo setup.
  </para>
+
  <section xml:id="module-services-matomo-database-setup">
   <title>Database Setup</title>
-
   <para>
    You also need to configure a MariaDB or MySQL database and -user for Matomo
    yourself, and enter those credentials in your browser. You can use
    passwordless database authentication via the UNIX_SOCKET authentication
    plugin with the following SQL commands:
-<programlisting>
+   <programlisting>
         # For MariaDB
         INSTALL PLUGIN unix_socket SONAME 'auth_socket';
         CREATE DATABASE matomo;
@@ -32,7 +32,7 @@
         CREATE DATABASE matomo;
         CREATE USER 'matomo'@'localhost' IDENTIFIED WITH auth_socket;
         GRANT ALL PRIVILEGES ON matomo.* TO 'matomo'@'localhost';
-      </programlisting>
+   </programlisting>
    Then fill in <literal>matomo</literal> as database user and database name,
    and leave the password field blank. This authentication works by allowing
    only the <literal>matomo</literal> unix user to authenticate as the
@@ -46,9 +46,30 @@
    database is not on the same host.
   </para>
  </section>
+
+ <section xml:id="module-services-matomo-archive-processing">
+  <title>Archive Processing</title>
+  <para>
+   This module comes with the systemd service <literal>matomo-archive-processing.service</literal>
+   and a timer that automatically triggers archive processing every hour.
+   This means that you can safely
+   <link xlink:href="https://matomo.org/docs/setup-auto-archiving/#disable-browser-triggers-for-matomo-archiving-and-limit-matomo-reports-to-updating-every-hour">
+    disable browser triggers for Matomo archiving
+   </link> at <literal>Administration > System > General Settings</literal>.
+  </para>
+  <para>
+   With automatic archive processing, you can now also enable to
+   <link xlink:href="https://matomo.org/docs/privacy/#step-2-delete-old-visitors-logs">
+    delete old visitor logs
+   </link> at <literal>Administration > System > Privacy</literal>,
+   but make sure that you run <literal>systemctl start matomo-archive-processing.service</literal>
+   at least once without errors if you have already collected data before,
+   so that the reports get archived before the source data gets deleted.
+  </para>
+ </section>
+
  <section xml:id="module-services-matomo-backups">
   <title>Backup</title>
-
   <para>
    You only need to take backups of your MySQL database and the
    <filename>/var/lib/matomo/config/config.ini.php</filename> file. Use a user
@@ -57,9 +78,9 @@
    <link xlink:href="https://matomo.org/faq/how-to-install/faq_138/" />.
   </para>
  </section>
+
  <section xml:id="module-services-matomo-issues">
   <title>Issues</title>
-
   <itemizedlist>
    <listitem>
     <para>
@@ -76,6 +97,7 @@
    </listitem>
   </itemizedlist>
  </section>
+
  <section xml:id="module-services-matomo-other-web-servers">
   <title>Using other Web Servers than nginx</title>
 
diff --git a/nixos/modules/services/web-apps/matomo.nix b/nixos/modules/services/web-apps/matomo.nix
index 9fddf832074..14aca45a342 100644
--- a/nixos/modules/services/web-apps/matomo.nix
+++ b/nixos/modules/services/web-apps/matomo.nix
@@ -23,20 +23,24 @@ in {
   options = {
     services.matomo = {
       # NixOS PR for database setup: https://github.com/NixOS/nixpkgs/pull/6963
-      # matomo issue for automatic matomo setup: https://github.com/matomo-org/matomo/issues/10257
-      # TODO: find a nice way to do this when more NixOS MySQL and / or matomo automatic setup stuff is implemented.
+      # Matomo issue for automatic Matomo setup: https://github.com/matomo-org/matomo/issues/10257
+      # TODO: find a nice way to do this when more NixOS MySQL and / or Matomo automatic setup stuff is implemented.
       enable = mkOption {
         type = types.bool;
         default = false;
         description = ''
-          Enable matomo web analytics with php-fpm backend.
+          Enable Matomo web analytics with php-fpm backend.
           Either the nginx option or the webServerUser option is mandatory.
         '';
       };
 
       package = mkOption {
         type = types.package;
-        description = "Matomo package to use";
+        description = ''
+          Matomo package for the service to use.
+          This can be used to point to newer releases from nixos-unstable,
+          as they don't get backported if they are not security-relevant.
+        '';
         default = pkgs.matomo;
         defaultText = "pkgs.matomo";
       };
@@ -45,12 +49,25 @@ in {
         type = types.nullOr types.str;
         default = null;
         example = "lighttpd";
-        # TODO: piwik.php might get renamed to matomo.php in future releases
         description = ''
-          Name of the web server user that forwards requests to the ${phpSocket} fastcgi socket for matomo if the nginx
+          Name of the web server user that forwards requests to the ${phpSocket} fastcgi socket for Matomo if the nginx
           option is not used. Either this option or the nginx option is mandatory.
           If you want to use another webserver than nginx, you need to set this to that server's user
-          and pass fastcgi requests to `index.php` and `piwik.php` to this socket.
+          and pass fastcgi requests to `index.php`, `matomo.php` and `piwik.php` (legacy name) to this socket.
+        '';
+      };
+
+      periodicArchiveProcessing = mkOption {
+        type = types.bool;
+        default = true;
+        description = ''
+          Enable periodic archive processing, which generates aggregated reports from the visits.
+
+          This means that you can safely disable browser triggers for Matomo archiving,
+          and safely enable to delete old visitor logs.
+          Before deleting visitor logs,
+          make sure though that you run <literal>systemctl start matomo-archive-processing.service</literal>
+          at least once without errors if you have already collected data before.
         '';
       };
 
@@ -69,7 +86,7 @@ in {
           catch_workers_output = yes
         '';
         description = ''
-          Settings for phpfpm's process manager. You might need to change this depending on the load for matomo.
+          Settings for phpfpm's process manager. You might need to change this depending on the load for Matomo.
         '';
       };
 
@@ -79,7 +96,7 @@ in {
             (import ../web-servers/nginx/vhost-options.nix { inherit config lib; })
             {
               # enable encryption by default,
-              # as sensitive login and matomo data should not be transmitted in clear text.
+              # as sensitive login and Matomo data should not be transmitted in clear text.
               options.forceSSL.default = true;
               options.enableACME.default = true;
             }
@@ -94,7 +111,7 @@ in {
           enableACME = false;
         };
         description = ''
-            With this option, you can customize an nginx virtualHost which already has sensible defaults for matomo.
+            With this option, you can customize an nginx virtualHost which already has sensible defaults for Matomo.
             Either this option or the webServerUser option is mandatory.
             Set this to {} to just enable the virtualHost if you don't need any customization.
             If enabled, then by default, the <option>serverName</option> is
@@ -124,29 +141,30 @@ in {
     };
     users.groups.${user} = {};
 
-    systemd.services.matomo_setup_update = {
-      # everything needs to set up and up to date before matomo php files are executed
+    systemd.services.matomo-setup-update = {
+      # everything needs to set up and up to date before Matomo php files are executed
       requiredBy = [ "${phpExecutionUnit}.service" ];
       before = [ "${phpExecutionUnit}.service" ];
       # the update part of the script can only work if the database is already up and running
       requires = [ databaseService ];
       after = [ databaseService ];
       path = [ cfg.package ];
+      environment.PIWIK_USER_PATH = dataDir;
       serviceConfig = {
         Type = "oneshot";
         User = user;
         # hide especially config.ini.php from other
         UMask = "0007";
         # TODO: might get renamed to MATOMO_USER_PATH in future versions
-        Environment = "PIWIK_USER_PATH=${dataDir}";
         # chown + chmod in preStart needs root
         PermissionsStartOnly = true;
       };
+
       # correct ownership and permissions in case they're not correct anymore,
       # e.g. after restoring from backup or moving from another system.
       # Note that ${dataDir}/config/config.ini.php might contain the MySQL password.
       preStart = ''
-        # migrate data from piwik to matomo folder
+        # migrate data from piwik to Matomo folder
         if [ -d ${deprecatedDataDir} ]; then
           echo "Migrating from ${deprecatedDataDir} to ${dataDir}"
           mv -T ${deprecatedDataDir} ${dataDir}
@@ -155,7 +173,7 @@ in {
         chmod -R ug+rwX,o-rwx ${dataDir}
         '';
       script = ''
-            # Use User-Private Group scheme to protect matomo data, but allow administration / backup via matomo group
+            # Use User-Private Group scheme to protect Matomo data, but allow administration / backup via 'matomo' group
             # Copy config folder
             chmod g+s "${dataDir}"
             cp -r "${cfg.package}/config" "${dataDir}/"
@@ -169,8 +187,39 @@ in {
       '';
     };
 
+    # If this is run regularly via the timer,
+    # 'Browser trigger archiving' can be disabled in Matomo UI > Settings > General Settings.
+    systemd.services.matomo-archive-processing = {
+      description = "Archive Matomo reports";
+      # the archiving can only work if the database is already up and running
+      requires = [ databaseService ];
+      after = [ databaseService ];
+
+      # TODO: might get renamed to MATOMO_USER_PATH in future versions
+      environment.PIWIK_USER_PATH = dataDir;
+      serviceConfig = {
+        Type = "oneshot";
+        User = user;
+        UMask = "0007";
+        CPUSchedulingPolicy = "idle";
+        IOSchedulingClass = "idle";
+        ExecStart = "${cfg.package}/bin/matomo-console core:archive --url=https://${user}.${fqdn}";
+      };
+    };
+
+    systemd.timers.matomo-archive-processing = mkIf cfg.periodicArchiveProcessing {
+      description = "Automatically archive Matomo reports every hour";
+
+      wantedBy = [ "timers.target" ];
+      timerConfig = {
+        OnCalendar = "hourly";
+        Persistent = "yes";
+        AccuracySec = "10m";
+      };
+    };
+
     systemd.services.${phpExecutionUnit} = {
-      # stop phpfpm on package upgrade, do database upgrade via matomo_setup_update, and then restart
+      # stop phpfpm on package upgrade, do database upgrade via matomo-setup-update, and then restart
       restartTriggers = [ cfg.package ];
       # stop config.ini.php from getting written with read permission for others
       serviceConfig.UMask = "0007";
@@ -200,13 +249,13 @@ in {
       # https://fralef.me/piwik-hardening-with-nginx-and-php-fpm.html
       # https://github.com/perusio/piwik-nginx
       "${user}.${fqdn}" = mkMerge [ cfg.nginx {
-        # don't allow to override the root easily, as it will almost certainly break matomo.
+        # don't allow to override the root easily, as it will almost certainly break Matomo.
         # disadvantage: not shown as default in docs.
         root = mkForce "${cfg.package}/share";
 
         # define locations here instead of as the submodule option's default
         # so that they can easily be extended with additional locations if required
-        # without needing to redefine the matomo ones.
+        # without needing to redefine the Matomo ones.
         # disadvantage: not shown as default in docs.
         locations."/" = {
           index = "index.php";
@@ -215,8 +264,11 @@ in {
         locations."= /index.php".extraConfig = ''
           fastcgi_pass unix:${phpSocket};
         '';
-        # TODO: might get renamed to matomo.php in future versions
-        # allow piwik.php for tracking
+        # allow matomo.php for tracking
+        locations."= /matomo.php".extraConfig = ''
+          fastcgi_pass unix:${phpSocket};
+        '';
+        # allow piwik.php for tracking (deprecated name)
         locations."= /piwik.php".extraConfig = ''
           fastcgi_pass unix:${phpSocket};
         '';
@@ -237,8 +289,11 @@ in {
         locations."= /robots.txt".extraConfig = ''
           return 200 "User-agent: *\nDisallow: /\n";
         '';
-        # TODO: might get renamed to matomo.js in future versions
-        # let browsers cache piwik.js
+        # let browsers cache matomo.js
+        locations."= /matomo.js".extraConfig = ''
+          expires 1M;
+        '';
+        # let browsers cache piwik.js (deprecated name)
         locations."= /piwik.js".extraConfig = ''
           expires 1M;
         '';
diff --git a/nixos/modules/services/web-apps/tt-rss.nix b/nixos/modules/services/web-apps/tt-rss.nix
index 90b35d19ea1..cf6f79c92f4 100644
--- a/nixos/modules/services/web-apps/tt-rss.nix
+++ b/nixos/modules/services/web-apps/tt-rss.nix
@@ -34,7 +34,14 @@ let
       define('DB_HOST', '${optionalString (cfg.database.host != null) cfg.database.host}');
       define('DB_USER', '${cfg.database.user}');
       define('DB_NAME', '${cfg.database.name}');
-      define('DB_PASS', '${optionalString (cfg.database.password != null) (escape ["'" "\\"] cfg.database.password)}');
+      define('DB_PASS', ${
+        if (cfg.database.password != null) then
+          "'${(escape ["'" "\\"] cfg.database.password)}'"
+        else if (cfg.database.passwordFile != null) then
+          "file_get_contents('${cfg.database.passwordFile}')"
+        else
+          ""
+      });
       define('DB_PORT', '${toString dbPort}');
 
       define('AUTH_AUTO_CREATE', ${boolToString cfg.auth.autoCreate});
@@ -46,7 +53,17 @@ let
       define('SINGLE_USER_MODE', ${boolToString cfg.singleUserMode});
 
       define('SIMPLE_UPDATE_MODE', ${boolToString cfg.simpleUpdateMode});
-      define('CHECK_FOR_UPDATES', ${boolToString cfg.checkForUpdates});
+
+      // Never check for updates - the running version of the code should be
+      // controlled entirely by the version of TT-RSS active in the current Nix
+      // profile. If TT-RSS updates itself to a version requiring a database
+      // schema upgrade, and then the SystemD tt-rss.service is restarted, the
+      // old code copied from the Nix store will overwrite the updated version,
+      // causing the code to detect the need for a schema "upgrade" (since the
+      // schema version in the database is different than in the code), but the
+      // update schema operation in TT-RSS will do nothing because the schema
+      // version in the database is newer than that in the code.
+      define('CHECK_FOR_UPDATES', false);
 
       define('FORCE_ARTICLE_PURGE', ${toString cfg.forceArticlePurge});
       define('SESSION_COOKIE_LIFETIME', ${toString cfg.sessionCookieLifetime});
@@ -168,6 +185,14 @@ let
           '';
         };
 
+        passwordFile = mkOption {
+          type = types.nullOr types.str;
+          default = null;
+          description = ''
+            The database user's password.
+          '';
+        };
+
         port = mkOption {
           type = types.nullOr types.int;
           default = null;
@@ -399,14 +424,6 @@ let
         '';
       };
 
-      checkForUpdates = mkOption {
-        type = types.bool;
-        default = true;
-        description = ''
-          Check for updates automatically if running Git version
-        '';
-      };
-
       enableGZipOutput = mkOption {
         type = types.bool;
         default = true;
@@ -474,26 +491,43 @@ let
     };
   };
 
+  imports = [
+    (mkRemovedOptionModule ["services" "tt-rss" "checkForUpdates"] ''
+      This option was removed because setting this to true will cause TT-RSS
+      to be unable to start if an automatic update of the code in
+      services.tt-rss.root leads to a database schema upgrade that is not
+      supported by the code active in the Nix store.
+    '')
+  ];
 
   ###### implementation
 
   config = mkIf cfg.enable {
 
-    services.phpfpm.poolConfigs = mkIf (cfg.pool == "${poolName}") {
-      "${poolName}" = ''
-        listen = "${phpfpmSocketName}";
-        listen.owner = nginx
-        listen.group = nginx
-        listen.mode = 0600
-        user = ${cfg.user}
-        pm = dynamic
-        pm.max_children = 75
-        pm.start_servers = 10
-        pm.min_spare_servers = 5
-        pm.max_spare_servers = 20
-        pm.max_requests = 500
-        catch_workers_output = 1
-      '';
+    assertions = [
+      {
+        assertion = cfg.database.password != null -> cfg.database.passwordFile == null;
+        message = "Cannot set both password and passwordFile";
+      }
+    ];
+
+    services.phpfpm.pools = mkIf (cfg.pool == "${poolName}") {
+      "${poolName}" = {
+        listen = "/var/run/phpfpm/${poolName}.sock";
+        extraConfig = ''
+          listen.owner = nginx
+          listen.group = nginx
+          listen.mode = 0600
+          user = ${cfg.user}
+          pm = dynamic
+          pm.max_children = 75
+          pm.start_servers = 10
+          pm.min_spare_servers = 5
+          pm.max_spare_servers = 20
+          pm.max_requests = 500
+          catch_workers_output = 1
+        '';
+      };
     };
 
     # NOTE: No configuration is done if not using virtual host
@@ -510,7 +544,7 @@ let
           locations."~ \.php$" = {
             extraConfig = ''
               fastcgi_split_path_info ^(.+\.php)(/.+)$;
-              fastcgi_pass unix:${phpfpmSocketName};
+              fastcgi_pass unix:${config.services.phpfpm.pools.${cfg.pool}.listen};
               fastcgi_index index.php;
             '';
           };
@@ -528,6 +562,7 @@ let
           callSql = e:
               if cfg.database.type == "pgsql" then ''
                   ${optionalString (cfg.database.password != null) "PGPASSWORD=${cfg.database.password}"} \
+                  ${optionalString (cfg.database.passwordFile != null) "PGPASSWORD=$(cat ${cfg.database.passwordFile}"}) \
                   ${pkgs.sudo}/bin/sudo -u ${cfg.user} ${config.services.postgresql.package}/bin/psql \
                     -U ${cfg.database.user} \
                     ${optionalString (cfg.database.host != null) "-h ${cfg.database.host} --port ${toString dbPort}"} \
diff --git a/nixos/modules/services/web-servers/phpfpm/default.nix b/nixos/modules/services/web-servers/phpfpm/default.nix
index 152c89a2cae..97c730061bd 100644
--- a/nixos/modules/services/web-servers/phpfpm/default.nix
+++ b/nixos/modules/services/web-servers/phpfpm/default.nix
@@ -14,11 +14,13 @@ let
 
   mapPoolConfig = n: p: {
     phpPackage = cfg.phpPackage;
+    phpOptions = cfg.phpOptions;
     config = p;
   };
 
   mapPool = n: p: {
     phpPackage = p.phpPackage;
+    phpOptions = p.phpOptions;
     config = ''
       listen = ${p.listen}
       ${p.extraConfig}
@@ -35,8 +37,8 @@ let
     ${conf}
   '';
 
-  phpIni = pkgs.runCommand "php.ini" {
-    inherit (cfg) phpPackage phpOptions;
+  phpIni = pool: pkgs.runCommand "php.ini" {
+    inherit (pool) phpPackage phpOptions;
     nixDefaults = ''
       sendmail_path = "/run/wrappers/bin/sendmail -t -i"
     '';
@@ -156,6 +158,7 @@ in {
         '';
         serviceConfig = let
           cfgFile = fpmCfgFile pool poolConfig.config;
+          iniFile = phpIni poolConfig;
         in {
           Slice = "phpfpm.slice";
           PrivateDevices = true;
@@ -164,7 +167,7 @@ in {
           # XXX: We need AF_NETLINK to make the sendmail SUID binary from postfix work
           RestrictAddressFamilies = "AF_UNIX AF_INET AF_INET6 AF_NETLINK";
           Type = "notify";
-          ExecStart = "${poolConfig.phpPackage}/bin/php-fpm -y ${cfgFile} -c ${phpIni}";
+          ExecStart = "${poolConfig.phpPackage}/bin/php-fpm -y ${cfgFile} -c ${iniFile}";
           ExecReload = "${pkgs.coreutils}/bin/kill -USR2 $MAINPID";
         };
       }
diff --git a/nixos/modules/services/web-servers/phpfpm/pool-options.nix b/nixos/modules/services/web-servers/phpfpm/pool-options.nix
index 40c83cddb95..d9ad7eff71f 100644
--- a/nixos/modules/services/web-servers/phpfpm/pool-options.nix
+++ b/nixos/modules/services/web-servers/phpfpm/pool-options.nix
@@ -25,6 +25,15 @@ with lib; {
       '';
     };
 
+    phpOptions = mkOption {
+      type = types.lines;
+      default = fpmCfg.phpOptions;
+      defaultText = "config.services.phpfpm.phpOptions";
+      description = ''
+        "Options appended to the PHP configuration file <filename>php.ini</filename> used for this PHP-FPM pool."
+      '';
+    };
+
     extraConfig = mkOption {
       type = types.lines;
       example = ''
diff --git a/nixos/modules/services/x11/xautolock.nix b/nixos/modules/services/x11/xautolock.nix
index a614559970e..cbe000058dc 100644
--- a/nixos/modules/services/x11/xautolock.nix
+++ b/nixos/modules/services/x11/xautolock.nix
@@ -21,7 +21,7 @@ in
           type = types.int;
 
           description = ''
-            Idle time to wait until xautolock locks the computer.
+            Idle time (in minutes) to wait until xautolock locks the computer.
           '';
         };
 
diff --git a/nixos/modules/system/boot/loader/raspberrypi/raspberrypi.nix b/nixos/modules/system/boot/loader/raspberrypi/raspberrypi.nix
index 047651dc642..7db60daa60b 100644
--- a/nixos/modules/system/boot/loader/raspberrypi/raspberrypi.nix
+++ b/nixos/modules/system/boot/loader/raspberrypi/raspberrypi.nix
@@ -33,7 +33,7 @@ let
       avoid_warnings=1
     '' + optional isAarch64 ''
       # Boot in 64-bit mode.
-      arm_control=0x200
+      arm_64bit=1
     '' + (if cfg.uboot.enable then ''
       kernel=u-boot-rpi.bin
     '' else ''
diff --git a/nixos/modules/system/boot/systemd.nix b/nixos/modules/system/boot/systemd.nix
index f783daba902..58812bf33d9 100644
--- a/nixos/modules/system/boot/systemd.nix
+++ b/nixos/modules/system/boot/systemd.nix
@@ -321,7 +321,9 @@ let
             in concatMapStrings (n:
               let s = optionalString (env."${n}" != null)
                 "Environment=${builtins.toJSON "${n}=${env.${n}}"}\n";
-              in if stringLength s >= 2048 then throw "The value of the environment variable ‘${n}’ in systemd service ‘${name}.service’ is too long." else s) (attrNames env)}
+              # systemd max line length is now 1MiB
+              # https://github.com/systemd/systemd/commit/e6dde451a51dc5aaa7f4d98d39b8fe735f73d2af
+              in if stringLength s >= 1048576 then throw "The value of the environment variable ‘${n}’ in systemd service ‘${name}.service’ is too long." else s) (attrNames env)}
           ${if def.reloadIfChanged then ''
             X-ReloadIfChanged=true
           '' else if !def.restartIfChanged then ''
diff --git a/nixos/modules/virtualisation/amazon-image.nix b/nixos/modules/virtualisation/amazon-image.nix
index 9015200beea..6f4f99caa6f 100644
--- a/nixos/modules/virtualisation/amazon-image.nix
+++ b/nixos/modules/virtualisation/amazon-image.nix
@@ -8,7 +8,13 @@
 
 with lib;
 
-let cfg = config.ec2; in
+let
+  cfg = config.ec2;
+  metadataFetcher = import ./ec2-metadata-fetcher.nix {
+    targetRoot = "$targetRoot/";
+    wgetExtraOptions = "-q";
+  };
+in
 
 {
   imports = [ ../profiles/headless.nix ./ec2-data.nix ./amazon-init.nix ];
@@ -61,26 +67,7 @@ let cfg = config.ec2; in
     # Nix operations.
     boot.initrd.postMountCommands =
       ''
-        metaDir=$targetRoot/etc/ec2-metadata
-        mkdir -m 0755 -p "$metaDir"
-
-        echo "getting EC2 instance metadata..."
-
-        if ! [ -e "$metaDir/ami-manifest-path" ]; then
-          wget -q -O "$metaDir/ami-manifest-path" http://169.254.169.254/1.0/meta-data/ami-manifest-path
-        fi
-
-        if ! [ -e "$metaDir/user-data" ]; then
-          wget -q -O "$metaDir/user-data" http://169.254.169.254/1.0/user-data && chmod 600 "$metaDir/user-data"
-        fi
-
-        if ! [ -e "$metaDir/hostname" ]; then
-          wget -q -O "$metaDir/hostname" http://169.254.169.254/1.0/meta-data/hostname
-        fi
-
-        if ! [ -e "$metaDir/public-keys-0-openssh-key" ]; then
-          wget -q -O "$metaDir/public-keys-0-openssh-key" http://169.254.169.254/1.0/meta-data/public-keys/0/openssh-key
-        fi
+        ${metadataFetcher}
 
         diskNr=0
         diskForUnionfs=
diff --git a/nixos/modules/virtualisation/docker-preloader.nix b/nixos/modules/virtualisation/docker-preloader.nix
index faa94f53d98..6ab83058dee 100644
--- a/nixos/modules/virtualisation/docker-preloader.nix
+++ b/nixos/modules/virtualisation/docker-preloader.nix
@@ -78,12 +78,11 @@ in
     };
   };
 
-  config = {
+  config = mkIf (cfg.dockerPreloader.images != []) {
     assertions = [{
       # If docker.storageDriver is null, Docker choose the storage
       # driver. So, in this case, we cannot be sure overlay2 is used.
-      assertion = cfg.dockerPreloader.images == []
-        || cfg.docker.storageDriver == "overlay2"
+      assertion = cfg.docker.storageDriver == "overlay2"
         || cfg.docker.storageDriver == "overlay"
         || cfg.docker.storageDriver == null;
       message = "The Docker image Preloader only works with overlay2 storage driver!";
diff --git a/nixos/modules/virtualisation/ec2-metadata-fetcher.nix b/nixos/modules/virtualisation/ec2-metadata-fetcher.nix
new file mode 100644
index 00000000000..b531787c31a
--- /dev/null
+++ b/nixos/modules/virtualisation/ec2-metadata-fetcher.nix
@@ -0,0 +1,23 @@
+{ targetRoot, wgetExtraOptions }:
+''
+  metaDir=${targetRoot}etc/ec2-metadata
+  mkdir -m 0755 -p "$metaDir"
+
+  echo "getting EC2 instance metadata..."
+
+  if ! [ -e "$metaDir/ami-manifest-path" ]; then
+    wget ${wgetExtraOptions} -O "$metaDir/ami-manifest-path" http://169.254.169.254/1.0/meta-data/ami-manifest-path
+  fi
+
+  if ! [ -e "$metaDir/user-data" ]; then
+    wget ${wgetExtraOptions} -O "$metaDir/user-data" http://169.254.169.254/1.0/user-data && chmod 600 "$metaDir/user-data"
+  fi
+
+  if ! [ -e "$metaDir/hostname" ]; then
+    wget ${wgetExtraOptions} -O "$metaDir/hostname" http://169.254.169.254/1.0/meta-data/hostname
+  fi
+
+  if ! [ -e "$metaDir/public-keys-0-openssh-key" ]; then
+    wget ${wgetExtraOptions} -O "$metaDir/public-keys-0-openssh-key" http://169.254.169.254/1.0/meta-data/public-keys/0/openssh-key
+  fi
+''
diff --git a/nixos/modules/virtualisation/nova-config.nix b/nixos/modules/virtualisation/nova-config.nix
deleted file mode 100644
index cecf2a3f144..00000000000
--- a/nixos/modules/virtualisation/nova-config.nix
+++ /dev/null
@@ -1,60 +0,0 @@
-{ lib, ... }:
-
-with lib;
-
-{
-  imports = [
-    ../profiles/qemu-guest.nix
-    ../profiles/headless.nix
-  ];
-
-  config = {
-    fileSystems."/" = {
-      device = "/dev/disk/by-label/nixos";
-      autoResize = true;
-    };
-
-    boot.growPartition = true;
-    boot.kernelParams = [ "console=ttyS0" ];
-    boot.loader.grub.device = "/dev/vda";
-    boot.loader.timeout = 0;
-
-    # Allow root logins
-    services.openssh = {
-      enable = true;
-      permitRootLogin = "prohibit-password";
-      passwordAuthentication = mkDefault false;
-    };
-
-    services.cloud-init.enable = true;
-
-    # Put /tmp and /var on /ephemeral0, which has a lot more space.
-    # Unfortunately we can't do this with the `fileSystems' option
-    # because it has no support for creating the source of a bind
-    # mount.  Also, "move" /nix to /ephemeral0 by layering a unionfs-fuse
-    # mount on top of it so we have a lot more space for Nix operations.
-
-    /*
-    boot.initrd.postMountCommands =
-      ''
-        mkdir -m 1777 -p $targetRoot/ephemeral0/tmp
-        mkdir -m 1777 -p $targetRoot/tmp
-        mount --bind $targetRoot/ephemeral0/tmp $targetRoot/tmp
-
-        mkdir -m 755 -p $targetRoot/ephemeral0/var
-        mkdir -m 755 -p $targetRoot/var
-        mount --bind $targetRoot/ephemeral0/var $targetRoot/var
-
-        mkdir -p /unionfs-chroot/ro-nix
-        mount --rbind $targetRoot/nix /unionfs-chroot/ro-nix
-
-        mkdir -p /unionfs-chroot/rw-nix
-        mkdir -m 755 -p $targetRoot/ephemeral0/nix
-        mount --rbind $targetRoot/ephemeral0/nix /unionfs-chroot/rw-nix
-        unionfs -o allow_other,cow,nonempty,chroot=/unionfs-chroot,max_files=32768 /rw-nix=RW:/ro-nix=RO $targetRoot/nix
-      '';
-
-      boot.initrd.supportedFilesystems = [ "unionfs-fuse" ];
-    */
-  };
-}
diff --git a/nixos/modules/virtualisation/openstack-config.nix b/nixos/modules/virtualisation/openstack-config.nix
new file mode 100644
index 00000000000..d5e862da0ea
--- /dev/null
+++ b/nixos/modules/virtualisation/openstack-config.nix
@@ -0,0 +1,57 @@
+{ pkgs, lib, ... }:
+
+with lib;
+
+let
+  metadataFetcher = import ./ec2-metadata-fetcher.nix {
+    targetRoot = "/";
+    wgetExtraOptions = "--retry-connrefused";
+  };
+in
+{
+  imports = [
+    ../profiles/qemu-guest.nix
+    ../profiles/headless.nix
+    # The Openstack Metadata service exposes data on an EC2 API also.
+    ./ec2-data.nix
+    ./amazon-init.nix
+  ];
+
+  config = {
+    fileSystems."/" = {
+      device = "/dev/disk/by-label/nixos";
+      autoResize = true;
+    };
+
+    boot.growPartition = true;
+    boot.kernelParams = [ "console=ttyS0" ];
+    boot.loader.grub.device = "/dev/vda";
+    boot.loader.timeout = 0;
+
+    # Allow root logins
+    services.openssh = {
+      enable = true;
+      permitRootLogin = "prohibit-password";
+      passwordAuthentication = mkDefault false;
+    };
+
+    # Force getting the hostname from Openstack metadata.
+    networking.hostName = mkDefault "";
+
+    systemd.services.openstack-init = {
+      path = [ pkgs.wget ];
+      description = "Fetch Metadata on startup";
+      wantedBy = [ "multi-user.target" ];
+      before = [ "apply-ec2-data.service" "amazon-init.service"];
+      wants = [ "network-online.target" ];
+      after = [ "network-online.target" ];
+      script = metadataFetcher;
+      restartIfChanged = false;
+      unitConfig.X-StopOnRemoval = false;
+      serviceConfig = {
+        Type = "oneshot";
+        RemainAfterExit = true;
+      };
+    };
+  };
+}
diff --git a/nixos/tests/all-tests.nix b/nixos/tests/all-tests.nix
index 4450cafd280..229f2c3abf7 100644
--- a/nixos/tests/all-tests.nix
+++ b/nixos/tests/all-tests.nix
@@ -73,6 +73,7 @@ in
   ferm = handleTest ./ferm.nix {};
   firefox = handleTest ./firefox.nix {};
   firewall = handleTest ./firewall.nix {};
+  flannel = handleTestOn ["x86_64-linux"] ./flannel.nix {};
   flatpak = handleTest ./flatpak.nix {};
   fsck = handleTest ./fsck.nix {};
   fwupd = handleTestOn ["x86_64-linux"] ./fwupd.nix {}; # libsmbios is unsupported on aarch64
@@ -163,7 +164,11 @@ in
   openldap = handleTest ./openldap.nix {};
   opensmtpd = handleTest ./opensmtpd.nix {};
   openssh = handleTest ./openssh.nix {};
+  # openstack-image-userdata doesn't work in a sandbox as the simulated openstack instance needs network access
+  #openstack-image-userdata = (handleTestOn ["x86_64-linux"] ./openstack-image.nix {}).userdata or {};
+  openstack-image-metadata = (handleTestOn ["x86_64-linux"] ./openstack-image.nix {}).metadata or {};
   osquery = handleTest ./osquery.nix {};
+  osrm-backend = handleTest ./osrm-backend.nix {};
   ostree = handleTest ./ostree.nix {};
   pam-oath-login = handleTest ./pam-oath-login.nix {};
   pam-u2f = handleTest ./pam-u2f.nix {};
diff --git a/nixos/tests/common/ec2.nix b/nixos/tests/common/ec2.nix
new file mode 100644
index 00000000000..1e69b63191a
--- /dev/null
+++ b/nixos/tests/common/ec2.nix
@@ -0,0 +1,49 @@
+{ pkgs, makeTest }:
+
+with pkgs.lib;
+
+{
+  makeEc2Test = { name, image, userData, script, hostname ? "ec2-instance", sshPublicKey ? null }:
+    let
+      metaData = pkgs.stdenv.mkDerivation {
+        name = "metadata";
+        buildCommand = ''
+          mkdir -p $out/1.0/meta-data
+          ln -s ${pkgs.writeText "userData" userData} $out/1.0/user-data
+          echo "${hostname}" > $out/1.0/meta-data/hostname
+          echo "(unknown)" > $out/1.0/meta-data/ami-manifest-path
+        '' + optionalString (sshPublicKey != null) ''
+          mkdir -p $out/1.0/meta-data/public-keys/0
+          ln -s ${pkgs.writeText "sshPublicKey" sshPublicKey} $out/1.0/meta-data/public-keys/0/openssh-key
+        '';
+      };
+    in makeTest {
+      name = "ec2-" + name;
+      nodes = {};
+      testScript =
+        ''
+          my $imageDir = ($ENV{'TMPDIR'} // "/tmp") . "/vm-state-machine";
+          mkdir $imageDir, 0700;
+          my $diskImage = "$imageDir/machine.qcow2";
+          system("qemu-img create -f qcow2 -o backing_file=${image}/nixos.qcow2 $diskImage") == 0 or die;
+          system("qemu-img resize $diskImage 10G") == 0 or die;
+
+          # Note: we use net=169.0.0.0/8 rather than
+          # net=169.254.0.0/16 to prevent dhcpcd from getting horribly
+          # confused. (It would get a DHCP lease in the 169.254.*
+          # range, which it would then configure and prompty delete
+          # again when it deletes link-local addresses.) Ideally we'd
+          # turn off the DHCP server, but qemu does not have an option
+          # to do that.
+          my $startCommand = "qemu-kvm -m 768";
+          $startCommand .= " -device virtio-net-pci,netdev=vlan0";
+          $startCommand .= " -netdev 'user,id=vlan0,net=169.0.0.0/8,guestfwd=tcp:169.254.169.254:80-cmd:${pkgs.micro-httpd}/bin/micro_httpd ${metaData}'";
+          $startCommand .= " -drive file=$diskImage,if=virtio,werror=report";
+          $startCommand .= " \$QEMU_OPTS";
+
+          my $machine = createMachine({ startCommand => $startCommand });
+
+          ${script}
+        '';
+    };
+}
diff --git a/nixos/tests/ec2.nix b/nixos/tests/ec2.nix
index ed6bf7da988..384fce67c22 100644
--- a/nixos/tests/ec2.nix
+++ b/nixos/tests/ec2.nix
@@ -6,6 +6,8 @@
 with import ../lib/testing.nix { inherit system pkgs; };
 with pkgs.lib;
 
+with import common/ec2.nix { inherit makeTest pkgs; };
+
 let
   image =
     (import ../lib/eval-config.nix {
@@ -39,65 +41,14 @@ let
       ];
     }).config.system.build.amazonImage;
 
-  makeEc2Test = { name, userData, script, hostname ? "ec2-instance", sshPublicKey ? null }:
-    let
-      metaData = pkgs.stdenv.mkDerivation {
-        name = "metadata";
-        buildCommand = ''
-          mkdir -p $out/1.0/meta-data
-          ln -s ${pkgs.writeText "userData" userData} $out/1.0/user-data
-          echo "${hostname}" > $out/1.0/meta-data/hostname
-          echo "(unknown)" > $out/1.0/meta-data/ami-manifest-path
-        '' + optionalString (sshPublicKey != null) ''
-          mkdir -p $out/1.0/meta-data/public-keys/0
-          ln -s ${pkgs.writeText "sshPublicKey" sshPublicKey} $out/1.0/meta-data/public-keys/0/openssh-key
-        '';
-      };
-    in makeTest {
-      name = "ec2-" + name;
-      nodes = {};
-      testScript =
-        ''
-          my $imageDir = ($ENV{'TMPDIR'} // "/tmp") . "/vm-state-machine";
-          mkdir $imageDir, 0700;
-          my $diskImage = "$imageDir/machine.qcow2";
-          system("qemu-img create -f qcow2 -o backing_file=${image}/nixos.qcow2 $diskImage") == 0 or die;
-          system("qemu-img resize $diskImage 10G") == 0 or die;
-
-          # Note: we use net=169.0.0.0/8 rather than
-          # net=169.254.0.0/16 to prevent dhcpcd from getting horribly
-          # confused. (It would get a DHCP lease in the 169.254.*
-          # range, which it would then configure and prompty delete
-          # again when it deletes link-local addresses.) Ideally we'd
-          # turn off the DHCP server, but qemu does not have an option
-          # to do that.
-          my $startCommand = "qemu-kvm -m 768";
-          $startCommand .= " -device virtio-net-pci,netdev=vlan0";
-          $startCommand .= " -netdev 'user,id=vlan0,net=169.0.0.0/8,guestfwd=tcp:169.254.169.254:80-cmd:${pkgs.micro-httpd}/bin/micro_httpd ${metaData}'";
-          $startCommand .= " -drive file=$diskImage,if=virtio,werror=report";
-          $startCommand .= " \$QEMU_OPTS";
-
-          my $machine = createMachine({ startCommand => $startCommand });
-
-          ${script}
-        '';
-    };
-
-  snakeOilPrivateKey = ''
-    -----BEGIN OPENSSH PRIVATE KEY-----
-    b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAAAMwAAAAtzc2gtZW
-    QyNTUxOQAAACDEPmwZv5dDPrMUaq0dDP+6eBTTe+QNrz14KBEIdhHd1QAAAJDufJ4S7nye
-    EgAAAAtzc2gtZWQyNTUxOQAAACDEPmwZv5dDPrMUaq0dDP+6eBTTe+QNrz14KBEIdhHd1Q
-    AAAECgwbDlYATM5/jypuptb0GF/+zWZcJfoVIFBG3LQeRyGsQ+bBm/l0M+sxRqrR0M/7p4
-    FNN75A2vPXgoEQh2Ed3VAAAADEVDMiB0ZXN0IGtleQE=
-    -----END OPENSSH PRIVATE KEY-----
-  '';
-
-  snakeOilPublicKey = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIMQ+bBm/l0M+sxRqrR0M/7p4FNN75A2vPXgoEQh2Ed3V EC2 test key";
+  sshKeys = import ./ssh-keys.nix pkgs;
+  snakeOilPrivateKey = sshKeys.snakeOilPrivateKey.text;
+  snakeOilPublicKey = sshKeys.snakeOilPublicKey;
 
 in {
   boot-ec2-nixops = makeEc2Test {
     name         = "nixops-userdata";
+    inherit image;
     sshPublicKey = snakeOilPublicKey; # That's right folks! My user's key is also the host key!
 
     userData = ''
@@ -142,6 +93,7 @@ in {
 
   boot-ec2-config = makeEc2Test {
     name         = "config-userdata";
+    inherit image;
     sshPublicKey = snakeOilPublicKey;
 
     # ### http://nixos.org/channels/nixos-unstable nixos
diff --git a/nixos/tests/flannel.nix b/nixos/tests/flannel.nix
index fb66fe28209..0b261a68477 100644
--- a/nixos/tests/flannel.nix
+++ b/nixos/tests/flannel.nix
@@ -21,8 +21,9 @@ import ./make-test.nix ({ pkgs, ...} : rec {
       services = {
         etcd = {
           enable = true;
-          listenClientUrls = ["http://etcd:2379"];
-          listenPeerUrls = ["http://etcd:2380"];
+          listenClientUrls = ["http://0.0.0.0:2379"]; # requires ip-address for binding
+          listenPeerUrls = ["http://0.0.0.0:2380"]; # requires ip-address for binding
+          advertiseClientUrls = ["http://etcd:2379"];
           initialAdvertisePeerUrls = ["http://etcd:2379"];
           initialCluster = ["etcd=http://etcd:2379"];
         };
diff --git a/nixos/tests/hydra/default.nix b/nixos/tests/hydra/default.nix
index db4e97e0039..882bced86d3 100644
--- a/nixos/tests/hydra/default.nix
+++ b/nixos/tests/hydra/default.nix
@@ -1,77 +1,91 @@
-import ../make-test.nix ({ pkgs, ...} :
+{ system ? builtins.currentSystem
+, config ? { }
+, pkgs ? import ../../.. { inherit system config; }
+}:
 
 let
-   trivialJob = pkgs.writeTextDir "trivial.nix" ''
-     { trivial = builtins.derivation {
-         name = "trivial";
-         system = "x86_64-linux";
-         builder = "/bin/sh";
-         args = ["-c" "echo success > $out; exit 0"];
-       };
-     }
-   '';
-
-    createTrivialProject = pkgs.stdenv.mkDerivation {
-      name = "create-trivial-project";
-      unpackPhase = ":";
-      buildInputs = [ pkgs.makeWrapper ];
-      installPhase = "install -m755 -D ${./create-trivial-project.sh} $out/bin/create-trivial-project.sh";
-      postFixup = ''
-        wrapProgram "$out/bin/create-trivial-project.sh" --prefix PATH ":" ${pkgs.stdenv.lib.makeBinPath [ pkgs.curl ]} --set EXPR_PATH ${trivialJob}
-      '';
-    };
-
-in {
-  name = "hydra-init-localdb";
-  meta = with pkgs.stdenv.lib.maintainers; {
-    maintainers = [ pstn lewo ma27 ];
-  };
 
-  machine =
-    { pkgs, ... }:
-
-    {
-      virtualisation.memorySize = 1024;
-      time.timeZone = "UTC";
-
-      environment.systemPackages = [ createTrivialProject pkgs.jq ];
-      services.hydra = {
-        enable = true;
-
-        #Hydra needs those settings to start up, so we add something not harmfull.
-        hydraURL = "example.com";
-        notificationSender = "example@example.com";
-      };
-      nix = {
-        buildMachines = [{
-          hostName = "localhost";
-          systems = [ "x86_64-linux" ];
-        }];
-
-        binaryCaches = [];
-      };
-    };
-
-  testScript =
-    ''
-      # let the system boot up
-      $machine->waitForUnit("multi-user.target");
-      # test whether the database is running
-      $machine->succeed("systemctl status postgresql.service");
-      # test whether the actual hydra daemons are running
-      $machine->succeed("systemctl status hydra-queue-runner.service");
-      $machine->succeed("systemctl status hydra-init.service");
-      $machine->succeed("systemctl status hydra-evaluator.service");
-      $machine->succeed("systemctl status hydra-send-stats.service");
-
-      $machine->succeed("hydra-create-user admin --role admin --password admin");
-
-      # create a project with a trivial job
-      $machine->waitForOpenPort(3000);
-
-      # make sure the build as been successfully built
-      $machine->succeed("create-trivial-project.sh");
-
-      $machine->waitUntilSucceeds('curl -L -s http://localhost:3000/build/1 -H "Accept: application/json" |  jq .buildstatus | xargs test 0 -eq');
+  trivialJob = pkgs.writeTextDir "trivial.nix" ''
+   { trivial = builtins.derivation {
+       name = "trivial";
+       system = "x86_64-linux";
+       builder = "/bin/sh";
+       args = ["-c" "echo success > $out; exit 0"];
+     };
+   }
+  '';
+
+  createTrivialProject = pkgs.stdenv.mkDerivation {
+    name = "create-trivial-project";
+    unpackPhase = ":";
+    buildInputs = [ pkgs.makeWrapper ];
+    installPhase = "install -m755 -D ${./create-trivial-project.sh} $out/bin/create-trivial-project.sh";
+    postFixup = ''
+      wrapProgram "$out/bin/create-trivial-project.sh" --prefix PATH ":" ${pkgs.stdenv.lib.makeBinPath [ pkgs.curl ]} --set EXPR_PATH ${trivialJob}
     '';
-})
+  };
+
+  callTest = f: f { inherit system pkgs; };
+
+  hydraPkgs = {
+    inherit (pkgs) nixStable nixUnstable;
+  };
+
+  tests = pkgs.lib.flip pkgs.lib.mapAttrs hydraPkgs (name: nix:
+    callTest (import ../make-test.nix ({ pkgs, lib, ... }:
+      {
+        name = "hydra-with-${name}";
+        meta = with pkgs.stdenv.lib.maintainers; {
+          maintainers = [ pstn lewo ma27 ];
+        };
+
+        machine = { pkgs, ... }:
+          {
+            virtualisation.memorySize = 1024;
+            time.timeZone = "UTC";
+
+            environment.systemPackages = [ createTrivialProject pkgs.jq ];
+            services.hydra = {
+              enable = true;
+
+              #Hydra needs those settings to start up, so we add something not harmfull.
+              hydraURL = "example.com";
+              notificationSender = "example@example.com";
+
+              package = pkgs.hydra.override { inherit nix; };
+            };
+            nix = {
+              buildMachines = [{
+                hostName = "localhost";
+                systems = [ "x86_64-linux" ];
+              }];
+
+              binaryCaches = [];
+            };
+          };
+
+        testScript = ''
+          # let the system boot up
+          $machine->waitForUnit("multi-user.target");
+          # test whether the database is running
+          $machine->succeed("systemctl status postgresql.service");
+          # test whether the actual hydra daemons are running
+          $machine->succeed("systemctl status hydra-queue-runner.service");
+          $machine->succeed("systemctl status hydra-init.service");
+          $machine->succeed("systemctl status hydra-evaluator.service");
+          $machine->succeed("systemctl status hydra-send-stats.service");
+
+          $machine->succeed("hydra-create-user admin --role admin --password admin");
+
+          # create a project with a trivial job
+          $machine->waitForOpenPort(3000);
+
+          # make sure the build as been successfully built
+          $machine->succeed("create-trivial-project.sh");
+
+          $machine->waitUntilSucceeds('curl -L -s http://localhost:3000/build/1 -H "Accept: application/json" |  jq .buildstatus | xargs test 0 -eq');
+        '';
+      })));
+
+in
+  tests
diff --git a/nixos/tests/matrix-synapse.nix b/nixos/tests/matrix-synapse.nix
index 8504a7c0d05..882e4b75814 100644
--- a/nixos/tests/matrix-synapse.nix
+++ b/nixos/tests/matrix-synapse.nix
@@ -1,4 +1,32 @@
-import ./make-test.nix ({ pkgs, ... } : {
+import ./make-test.nix ({ pkgs, ... } : let
+
+
+  runWithOpenSSL = file: cmd: pkgs.runCommand file {
+    buildInputs = [ pkgs.openssl ];
+  } cmd;
+
+
+  ca_key = runWithOpenSSL "ca-key.pem" "openssl genrsa -out $out 2048";
+  ca_pem = runWithOpenSSL "ca.pem" ''
+    openssl req \
+      -x509 -new -nodes -key ${ca_key} \
+      -days 10000 -out $out -subj "/CN=snakeoil-ca"
+  '';
+  key = runWithOpenSSL "matrix_key.pem" "openssl genrsa -out $out 2048";
+  csr = runWithOpenSSL "matrix.csr" ''
+    openssl req \
+       -new -key ${key} \
+       -out $out -subj "/CN=localhost" \
+  '';
+  cert = runWithOpenSSL "matrix_cert.pem" ''
+    openssl x509 \
+      -req -in ${csr} \
+      -CA ${ca_pem} -CAkey ${ca_key} \
+      -CAcreateserial -out $out \
+      -days 365
+  '';
+
+in {
 
   name = "matrix-synapse";
   meta = with pkgs.stdenv.lib.maintainers; {
@@ -8,23 +36,31 @@ import ./make-test.nix ({ pkgs, ... } : {
   nodes = {
     # Since 0.33.0, matrix-synapse doesn't allow underscores in server names
     serverpostgres = args: {
-      services.matrix-synapse.enable = true;
-      services.matrix-synapse.database_type = "psycopg2";
+      services.matrix-synapse = {
+        enable = true;
+        database_type = "psycopg2";
+        tls_certificate_path = "${cert}";
+        tls_private_key_path = "${key}";
+      };
     };
 
     serversqlite = args: {
-      services.matrix-synapse.enable = true;
-      services.matrix-synapse.database_type = "sqlite3";
+      services.matrix-synapse = {
+        enable = true;
+        database_type = "sqlite3";
+        tls_certificate_path = "${cert}";
+        tls_private_key_path = "${key}";
+      };
     };
   };
 
   testScript = ''
     startAll;
     $serverpostgres->waitForUnit("matrix-synapse.service");
-    $serverpostgres->waitUntilSucceeds("curl -Lk https://localhost:8448/");
+    $serverpostgres->waitUntilSucceeds("curl -L --cacert ${ca_pem} https://localhost:8448/");
     $serverpostgres->requireActiveUnit("postgresql.service");
     $serversqlite->waitForUnit("matrix-synapse.service");
-    $serversqlite->waitUntilSucceeds("curl -Lk https://localhost:8448/");
+    $serversqlite->waitUntilSucceeds("curl -L --cacert ${ca_pem} https://localhost:8448/");
     $serversqlite->mustSucceed("[ -e /var/lib/matrix-synapse/homeserver.db ]");
   '';
 
diff --git a/nixos/tests/openstack-image.nix b/nixos/tests/openstack-image.nix
new file mode 100644
index 00000000000..d0225016ab7
--- /dev/null
+++ b/nixos/tests/openstack-image.nix
@@ -0,0 +1,88 @@
+{ system ? builtins.currentSystem,
+  config ? {},
+  pkgs ? import ../.. { inherit system config; }
+}:
+
+with import ../lib/testing.nix { inherit system pkgs; };
+with pkgs.lib;
+
+with import common/ec2.nix { inherit makeTest pkgs; };
+
+let
+  image =
+    (import ../lib/eval-config.nix {
+      inherit system;
+      modules = [
+        ../maintainers/scripts/openstack/openstack-image.nix
+        ../modules/testing/test-instrumentation.nix
+        ../modules/profiles/qemu-guest.nix
+      ];
+    }).config.system.build.openstackImage;
+
+  sshKeys = import ./ssh-keys.nix pkgs;
+  snakeOilPrivateKey = sshKeys.snakeOilPrivateKey.text;
+  snakeOilPublicKey = sshKeys.snakeOilPublicKey;
+
+in {
+  metadata = makeEc2Test {
+    name = "openstack-ec2-metadata";
+    inherit image;
+    sshPublicKey = snakeOilPublicKey;
+    userData = ''
+      SSH_HOST_ED25519_KEY_PUB:${snakeOilPublicKey}
+      SSH_HOST_ED25519_KEY:${replaceStrings ["\n"] ["|"] snakeOilPrivateKey}
+    '';
+    script = ''
+      $machine->start;
+      $machine->waitForFile("/etc/ec2-metadata/user-data");
+      $machine->waitForUnit("sshd.service");
+
+      $machine->succeed("grep unknown /etc/ec2-metadata/ami-manifest-path");
+
+      # We have no keys configured on the client side yet, so this should fail
+      $machine->fail("ssh -o BatchMode=yes localhost exit");
+
+      # Let's install our client private key
+      $machine->succeed("mkdir -p ~/.ssh");
+
+      $machine->succeed("echo '${snakeOilPrivateKey}' > ~/.ssh/id_ed25519");
+      $machine->succeed("chmod 600 ~/.ssh/id_ed25519");
+
+      # We haven't configured the host key yet, so this should still fail
+      $machine->fail("ssh -o BatchMode=yes localhost exit");
+
+      # Add the host key; ssh should finally succeed
+      $machine->succeed("echo localhost,127.0.0.1 ${snakeOilPublicKey} > ~/.ssh/known_hosts");
+      $machine->succeed("ssh -o BatchMode=yes localhost exit");
+
+      # Just to make sure resizing is idempotent.
+      $machine->shutdown;
+      $machine->start;
+      $machine->waitForFile("/etc/ec2-metadata/user-data");
+    '';
+  };
+
+  userdata = makeEc2Test {
+    name = "openstack-ec2-metadata";
+    inherit image;
+    sshPublicKey = snakeOilPublicKey;
+    userData = ''
+      { pkgs, ... }:
+      {
+        imports = [
+          <nixpkgs/nixos/modules/virtualisation/openstack-config.nix>
+          <nixpkgs/nixos/modules/testing/test-instrumentation.nix>
+          <nixpkgs/nixos/modules/profiles/qemu-guest.nix>
+        ];
+        environment.etc.testFile = {
+          text = "whoa";
+        };
+      }
+    '';
+    script = ''
+      $machine->start;
+      $machine->waitForFile("/etc/testFile");
+      $machine->succeed("cat /etc/testFile | grep -q 'whoa'");
+    '';
+  };
+}
diff --git a/nixos/tests/osrm-backend.nix b/nixos/tests/osrm-backend.nix
new file mode 100644
index 00000000000..6e2d098d4ad
--- /dev/null
+++ b/nixos/tests/osrm-backend.nix
@@ -0,0 +1,53 @@
+import ./make-test.nix ({ pkgs, lib, ... }:
+let
+  port = 5000;
+in {
+  name = "osrm-backend";
+  meta.maintainers = [ lib.maintainers.erictapen ];
+
+  machine = { config, pkgs, ... }:{
+
+    services.osrm = {
+      enable = true;
+      inherit port;
+      dataFile = let
+        filename = "monaco";
+        osrm-data = pkgs.stdenv.mkDerivation {
+          name = "osrm-data";
+
+          buildInputs = [ pkgs.osrm-backend ];
+
+          # This is a pbf file of monaco, downloaded at 2019-01-04 from
+          # http://download.geofabrik.de/europe/monaco-latest.osm.pbf
+          # as apparently no provider of OSM files guarantees immutability,
+          # this is hosted as a gist on GitHub.
+          src = pkgs.fetchgit {
+            url = "https://gist.github.com/erictapen/01e39f73a6c856eac53ba809a94cdb83";
+            rev = "9b1ff0f24deb40e5cf7df51f843dbe860637b8ce";
+            sha256 = "1scqhmrfnpwsy5i2a9jpggqnvfgj4hv9p4qyvc79321pzkbv59nx";
+          };
+
+          buildCommand = ''
+            cp $src/${filename}.osm.pbf .
+            ${pkgs.osrm-backend}/bin/osrm-extract -p ${pkgs.osrm-backend}/share/osrm/profiles/car.lua ${filename}.osm.pbf
+            ${pkgs.osrm-backend}/bin/osrm-partition ${filename}.osrm
+            ${pkgs.osrm-backend}/bin/osrm-customize ${filename}.osrm
+            mkdir -p $out
+            cp ${filename}* $out/
+          '';
+        };
+      in "${osrm-data}/${filename}.osrm";
+    };
+
+    environment.systemPackages = [ pkgs.jq ];
+  };
+
+  testScript = let
+    query = "http://localhost:${toString port}/route/v1/driving/7.41720,43.73304;7.42463,43.73886?steps=true";
+  in ''
+    $machine->waitForUnit("osrm.service");
+    $machine->waitForOpenPort(${toString port});
+    $machine->succeed("curl --silent '${query}' | jq .waypoints[0].name | grep -F 'Boulevard Rainier III'");
+    $machine->succeed("curl --silent '${query}' | jq .waypoints[1].name | grep -F 'Avenue de la Costa'");
+  '';
+})
diff --git a/nixos/tests/printing.nix b/nixos/tests/printing.nix
index d85abf3c105..7026637ead1 100644
--- a/nixos/tests/printing.nix
+++ b/nixos/tests/printing.nix
@@ -39,6 +39,8 @@ import ./make-test.nix ({pkgs, ... }: {
       $client->waitForUnit("cups.service");
       $client->sleep(10); # wait until cups is fully initialized
       $client->succeed("lpstat -r") =~ /scheduler is running/ or die;
+      # check local encrypted connections work without error
+      $client->succeed("lpstat -E -r") =~ /scheduler is running/ or die;
       # Test that UNIX socket is used for connections.
       $client->succeed("lpstat -H") =~ "/var/run/cups/cups.sock" or die;
       # Test that HTTP server is available too.
diff --git a/nixos/tests/roundcube.nix b/nixos/tests/roundcube.nix
index 178134fd9b3..ed0ebd7dd19 100644
--- a/nixos/tests/roundcube.nix
+++ b/nixos/tests/roundcube.nix
@@ -10,6 +10,8 @@ import ./make-test.nix ({ pkgs, ...} : {
         enable = true;
         hostName = "roundcube";
         database.password = "notproduction";
+        package = pkgs.roundcube.withPlugins (plugins: [ plugins.persistent_login ]);
+        plugins = [ "persistent_login" ];
       };
       services.nginx.virtualHosts.roundcube = {
         forceSSL = false;
@@ -23,6 +25,6 @@ import ./make-test.nix ({ pkgs, ...} : {
     $roundcube->waitForUnit("postgresql.service");
     $roundcube->waitForUnit("phpfpm-roundcube.service");
     $roundcube->waitForUnit("nginx.service");
-    $roundcube->succeed("curl -sSfL http://roundcube/");
+    $roundcube->succeed("curl -sSfL http://roundcube/ | grep 'Keep me logged in'");
   '';
 })
diff --git a/nixos/tests/switch-test.nix b/nixos/tests/switch-test.nix
index 32010838e67..0dba3697980 100644
--- a/nixos/tests/switch-test.nix
+++ b/nixos/tests/switch-test.nix
@@ -18,8 +18,17 @@ import ./make-test.nix ({ pkgs, ...} : {
   testScript = {nodes, ...}: let
     originalSystem = nodes.machine.config.system.build.toplevel;
     otherSystem = nodes.other.config.system.build.toplevel;
+
+    # Ensures failures pass through using pipefail, otherwise failing to
+    # switch-to-configuration is hidden by the success of `tee`.
+    stderrRunner = pkgs.writeScript "stderr-runner" ''
+      #! ${pkgs.stdenv.shell}
+      set -e
+      set -o pipefail
+      exec env -i "$@" | tee /dev/stderr
+    '';
   in ''
-    $machine->succeed("env -i ${originalSystem}/bin/switch-to-configuration test | tee /dev/stderr");
-    $machine->succeed("env -i ${otherSystem}/bin/switch-to-configuration test | tee /dev/stderr");
+    $machine->succeed("${stderrRunner} ${originalSystem}/bin/switch-to-configuration test");
+    $machine->succeed("${stderrRunner} ${otherSystem}/bin/switch-to-configuration test");
   '';
 })