summary refs log tree commit diff
path: root/nixos/modules/services
diff options
context:
space:
mode:
Diffstat (limited to 'nixos/modules/services')
-rw-r--r--nixos/modules/services/cluster/kubernetes/kubelet.nix2
-rw-r--r--nixos/modules/services/mail/postfix.nix4
-rw-r--r--nixos/modules/services/matrix/mjolnir.nix240
-rw-r--r--nixos/modules/services/matrix/mjolnir.xml134
-rw-r--r--nixos/modules/services/matrix/pantalaimon-options.nix70
-rw-r--r--nixos/modules/services/matrix/pantalaimon.nix70
-rw-r--r--nixos/modules/services/misc/ananicy.nix107
-rw-r--r--nixos/modules/services/monitoring/collectd.nix4
-rw-r--r--nixos/modules/services/monitoring/prometheus/default.nix1509
-rw-r--r--nixos/modules/services/networking/antennas.nix80
-rw-r--r--nixos/modules/services/networking/tinc.nix4
-rw-r--r--nixos/modules/services/security/tor.nix2
-rw-r--r--nixos/modules/services/web-apps/hledger-web.nix2
-rw-r--r--nixos/modules/services/web-apps/mastodon.nix2
-rw-r--r--nixos/modules/services/web-apps/matomo.nix20
-rw-r--r--nixos/modules/services/web-apps/openwebrx.nix33
-rw-r--r--nixos/modules/services/web-apps/plausible.nix92
-rw-r--r--nixos/modules/services/web-apps/zabbix.nix3
-rw-r--r--nixos/modules/services/x11/clight.nix8
-rw-r--r--nixos/modules/services/x11/display-managers/lightdm.nix2
20 files changed, 1972 insertions, 416 deletions
diff --git a/nixos/modules/services/cluster/kubernetes/kubelet.nix b/nixos/modules/services/cluster/kubernetes/kubelet.nix
index 3a2a0ed363d..eb0cb1f3dbc 100644
--- a/nixos/modules/services/cluster/kubernetes/kubelet.nix
+++ b/nixos/modules/services/cluster/kubernetes/kubelet.nix
@@ -258,6 +258,8 @@ in
         "net.bridge.bridge-nf-call-ip6tables" = 1;
       };
 
+      systemd.enableUnifiedCgroupHierarchy = false; # true breaks node memory metrics
+
       systemd.services.kubelet = {
         description = "Kubernetes Kubelet Service";
         wantedBy = [ "kubernetes.target" ];
diff --git a/nixos/modules/services/mail/postfix.nix b/nixos/modules/services/mail/postfix.nix
index 6fc09682e0c..23d3574ae27 100644
--- a/nixos/modules/services/mail/postfix.nix
+++ b/nixos/modules/services/mail/postfix.nix
@@ -294,7 +294,7 @@ in
       };
 
       submissionOptions = mkOption {
-        type = types.attrs;
+        type = with types; attrsOf str;
         default = {
           smtpd_tls_security_level = "encrypt";
           smtpd_sasl_auth_enable = "yes";
@@ -312,7 +312,7 @@ in
       };
 
       submissionsOptions = mkOption {
-        type = types.attrs;
+        type = with types; attrsOf str;
         default = {
           smtpd_sasl_auth_enable = "yes";
           smtpd_client_restrictions = "permit_sasl_authenticated,reject";
diff --git a/nixos/modules/services/matrix/mjolnir.nix b/nixos/modules/services/matrix/mjolnir.nix
new file mode 100644
index 00000000000..8a54f93d98d
--- /dev/null
+++ b/nixos/modules/services/matrix/mjolnir.nix
@@ -0,0 +1,240 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+let
+  cfg = config.services.mjolnir;
+
+  yamlConfig = {
+    inherit (cfg) dataPath managementRoom protectedRooms;
+
+    accessToken = "@ACCESS_TOKEN@"; # will be replaced in "generateConfig"
+    homeserverUrl =
+      if cfg.pantalaimon.enable then
+        "http://${cfg.pantalaimon.options.listenAddress}:${toString cfg.pantalaimon.options.listenPort}"
+      else
+        cfg.homeserverUrl;
+
+    pantalaimon = {
+      inherit (cfg.pantalaimon) username;
+
+      use = cfg.pantalaimon.enable;
+      password = "@PANTALAIMON_PASSWORD@"; # will be replaced in "generateConfig"
+    };
+  };
+
+  moduleConfigFile = pkgs.writeText "module-config.yaml" (
+    generators.toYAML { } (filterAttrs (_: v: v != null)
+      (fold recursiveUpdate { } [ yamlConfig cfg.settings ])));
+
+  # these config files will be merged one after the other to build the final config
+  configFiles = [
+    "${pkgs.mjolnir}/share/mjolnir/config/default.yaml"
+    moduleConfigFile
+  ];
+
+  # this will generate the default.yaml file with all configFiles as inputs and
+  # replace all secret strings using replace-secret
+  generateConfig = pkgs.writeShellScript "mjolnir-generate-config" (
+    let
+      yqEvalStr = concatImapStringsSep " * " (pos: _: "select(fileIndex == ${toString (pos - 1)})") configFiles;
+      yqEvalArgs = concatStringsSep " " configFiles;
+    in
+    ''
+      set -euo pipefail
+
+      umask 077
+
+      # mjolnir will try to load a config from "./config/default.yaml" in the working directory
+      # -> let's place the generated config there
+      mkdir -p ${cfg.dataPath}/config
+
+      # merge all config files into one, overriding settings of the previous one with the next config
+      # e.g. "eval-all 'select(fileIndex == 0) * select(fileIndex == 1)' filea.yaml fileb.yaml" will merge filea.yaml with fileb.yaml
+      ${pkgs.yq-go}/bin/yq eval-all -P '${yqEvalStr}' ${yqEvalArgs} > ${cfg.dataPath}/config/default.yaml
+
+      ${optionalString (cfg.accessTokenFile != null) ''
+        ${pkgs.replace-secret}/bin/replace-secret '@ACCESS_TOKEN@' '${cfg.accessTokenFile}' ${cfg.dataPath}/config/default.yaml
+      ''}
+      ${optionalString (cfg.pantalaimon.passwordFile != null) ''
+        ${pkgs.replace-secret}/bin/replace-secret '@PANTALAIMON_PASSWORD@' '${cfg.pantalaimon.passwordFile}' ${cfg.dataPath}/config/default.yaml
+      ''}
+    ''
+  );
+in
+{
+  options.services.mjolnir = {
+    enable = mkEnableOption "Mjolnir, a moderation tool for Matrix";
+
+    homeserverUrl = mkOption {
+      type = types.str;
+      default = "https://matrix.org";
+      description = ''
+        Where the homeserver is located (client-server URL).
+
+        If <literal>pantalaimon.enable</literal> is <literal>true</literal>, this option will become the homeserver to which <literal>pantalaimon</literal> connects.
+        The listen address of <literal>pantalaimon</literal> will then become the <literal>homeserverUrl</literal> of <literal>mjolnir</literal>.
+      '';
+    };
+
+    accessTokenFile = mkOption {
+      type = with types; nullOr path;
+      default = null;
+      description = ''
+        File containing the matrix access token for the <literal>mjolnir</literal> user.
+      '';
+    };
+
+    pantalaimon = mkOption {
+      description = ''
+        <literal>pantalaimon</literal> options (enables E2E Encryption support).
+
+        This will create a <literal>pantalaimon</literal> instance with the name "mjolnir".
+      '';
+      default = { };
+      type = types.submodule {
+        options = {
+          enable = mkEnableOption ''
+            If true, accessToken is ignored and the username/password below will be
+            used instead. The access token of the bot will be stored in the dataPath.
+          '';
+
+          username = mkOption {
+            type = types.str;
+            description = "The username to login with.";
+          };
+
+          passwordFile = mkOption {
+            type = with types; nullOr path;
+            default = null;
+            description = ''
+              File containing the matrix password for the <literal>mjolnir</literal> user.
+            '';
+          };
+
+          options = mkOption {
+            type = types.submodule (import ./pantalaimon-options.nix);
+            default = { };
+            description = ''
+              passthrough additional options to the <literal>pantalaimon</literal> service.
+            '';
+          };
+        };
+      };
+    };
+
+    dataPath = mkOption {
+      type = types.path;
+      default = "/var/lib/mjolnir";
+      description = ''
+        The directory the bot should store various bits of information in.
+      '';
+    };
+
+    managementRoom = mkOption {
+      type = types.str;
+      default = "#moderators:example.org";
+      description = ''
+        The room ID where people can use the bot. The bot has no access controls, so
+        anyone in this room can use the bot - secure your room!
+        This should be a room alias or room ID - not a matrix.to URL.
+        Note: <literal>mjolnir</literal> is fairly verbose - expect a lot of messages from it.
+      '';
+    };
+
+    protectedRooms = mkOption {
+      type = types.listOf types.str;
+      default = [ ];
+      example = literalExpression ''
+        [
+          "https://matrix.to/#/#yourroom:example.org"
+          "https://matrix.to/#/#anotherroom:example.org"
+        ]
+      '';
+      description = ''
+        A list of rooms to protect (matrix.to URLs).
+      '';
+    };
+
+    settings = mkOption {
+      default = { };
+      type = (pkgs.formats.yaml { }).type;
+      example = literalExpression ''
+        {
+          autojoinOnlyIfManager = true;
+          automaticallyRedactForReasons = [ "spam" "advertising" ];
+        }
+      '';
+      description = ''
+        Additional settings (see <link xlink:href="https://github.com/matrix-org/mjolnir/blob/main/config/default.yaml">mjolnir default config</link> for available settings). These settings will override settings made by the module config.
+      '';
+    };
+  };
+
+  config = mkIf config.services.mjolnir.enable {
+    assertions = [
+      {
+        assertion = !(cfg.pantalaimon.enable && cfg.pantalaimon.passwordFile == null);
+        message = "Specify pantalaimon.passwordFile";
+      }
+      {
+        assertion = !(cfg.pantalaimon.enable && cfg.accessTokenFile != null);
+        message = "Do not specify accessTokenFile when using pantalaimon";
+      }
+      {
+        assertion = !(!cfg.pantalaimon.enable && cfg.accessTokenFile == null);
+        message = "Specify accessTokenFile when not using pantalaimon";
+      }
+    ];
+
+    services.pantalaimon-headless.instances."mjolnir" = mkIf cfg.pantalaimon.enable
+      {
+        homeserver = cfg.homeserverUrl;
+      } // cfg.pantalaimon.options;
+
+    systemd.services.mjolnir = {
+      description = "mjolnir - a moderation tool for Matrix";
+      wants = [ "network-online.target" ] ++ optionals (cfg.pantalaimon.enable) [ "pantalaimon-mjolnir.service" ];
+      after = [ "network-online.target" ] ++ optionals (cfg.pantalaimon.enable) [ "pantalaimon-mjolnir.service" ];
+      wantedBy = [ "multi-user.target" ];
+
+      serviceConfig = {
+        ExecStart = ''${pkgs.mjolnir}/bin/mjolnir'';
+        ExecStartPre = [ generateConfig ];
+        WorkingDirectory = cfg.dataPath;
+        StateDirectory = "mjolnir";
+        StateDirectoryMode = "0700";
+        ProtectSystem = "strict";
+        ProtectHome = true;
+        PrivateTmp = true;
+        NoNewPrivileges = true;
+        PrivateDevices = true;
+        User = "mjolnir";
+        Restart = "on-failure";
+
+        /* TODO: wait for #102397 to be resolved. Then load secrets from $CREDENTIALS_DIRECTORY+"/NAME"
+        DynamicUser = true;
+        LoadCredential = [] ++
+          optionals (cfg.accessTokenFile != null) [
+            "access_token:${cfg.accessTokenFile}"
+          ] ++
+          optionals (cfg.pantalaimon.passwordFile != null) [
+            "pantalaimon_password:${cfg.pantalaimon.passwordFile}"
+          ];
+        */
+      };
+    };
+
+    users = {
+      users.mjolnir = {
+        group = "mjolnir";
+        isSystemUser = true;
+      };
+      groups.mjolnir = { };
+    };
+  };
+
+  meta = {
+    doc = ./mjolnir.xml;
+    maintainers = with maintainers; [ jojosch ];
+  };
+}
diff --git a/nixos/modules/services/matrix/mjolnir.xml b/nixos/modules/services/matrix/mjolnir.xml
new file mode 100644
index 00000000000..d462ddf7b01
--- /dev/null
+++ b/nixos/modules/services/matrix/mjolnir.xml
@@ -0,0 +1,134 @@
+<chapter xmlns="http://docbook.org/ns/docbook"
+         xmlns:xlink="http://www.w3.org/1999/xlink"
+         xmlns:xi="http://www.w3.org/2001/XInclude"
+         version="5.0"
+         xml:id="module-services-mjolnir">
+ <title>Mjolnir (Matrix Moderation Tool)</title>
+ <para>
+  This chapter will show you how to set up your own, self-hosted
+  <link xlink:href="https://github.com/matrix-org/mjolnir">Mjolnir</link>
+  instance.
+ </para>
+ <para>
+  As an all-in-one moderation tool, it can protect your server from
+  malicious invites, spam messages, and whatever else you don't want.
+  In addition to server-level protection, Mjolnir is great for communities
+  wanting to protect their rooms without having to use their personal
+  accounts for moderation.
+ </para>
+ <para>
+  The bot by default includes support for bans, redactions, anti-spam,
+  server ACLs, room directory changes, room alias transfers, account
+  deactivation, room shutdown, and more.
+ </para>
+ <para>
+  See the <link xlink:href="https://github.com/matrix-org/mjolnir#readme">README</link>
+  page and the <link xlink:href="https://github.com/matrix-org/mjolnir/blob/main/docs/moderators.md">Moderator's guide</link>
+  for additional instructions on how to setup and use Mjolnir.
+ </para>
+ <para>
+  For <link linkend="opt-services.mjolnir.settings">additional settings</link>
+  see <link xlink:href="https://github.com/matrix-org/mjolnir/blob/main/config/default.yaml">the default configuration</link>.
+ </para>
+ <section xml:id="module-services-mjolnir-setup">
+  <title>Mjolnir Setup</title>
+  <para>
+   First create a new Room which will be used as a management room for Mjolnir. In
+   this room, Mjolnir will log possible errors and debugging information. You'll
+   need to set this Room-ID in <link linkend="opt-services.mjolnir.managementRoom">services.mjolnir.managementRoom</link>.
+  </para>
+  <para>
+   Next, create a new user for Mjolnir on your homeserver, if not present already.
+  </para>
+  <para>
+   The Mjolnir Matrix user expects to be free of any rate limiting.
+   See <link xlink:href="https://github.com/matrix-org/synapse/issues/6286">Synapse #6286</link>
+   for an example on how to achieve this.
+  </para>
+  <para>
+   If you want Mjolnir to be able to deactivate users, move room aliases, shutdown rooms, etc.
+   you'll need to make the Mjolnir user a Matrix server admin.
+  </para>
+  <para>
+   Now invite the Mjolnir user to the management room.
+  </para>
+  <para>
+   It is recommended to use <link xlink:href="https://github.com/matrix-org/pantalaimon">Pantalaimon</link>,
+   so your management room can be encrypted. This also applies if you are looking to moderate an encrypted room.
+  </para>
+  <para>
+   To enable the Pantalaimon E2E Proxy for mjolnir, enable
+   <link linkend="opt-services.mjolnir.pantalaimon.enable">services.mjolnir.pantalaimon</link>. This will
+   autoconfigure a new Pantalaimon instance, which will connect to the homeserver
+   set in <link linkend="opt-services.mjolnir.homeserverUrl">services.mjolnir.homeserverUrl</link> and Mjolnir itself
+   will be configured to connect to the new Pantalaimon instance.
+  </para>
+<programlisting>
+{
+  services.mjolnir = {
+    enable = true;
+    <link linkend="opt-services.mjolnir.homeserverUrl">homeserverUrl</link> = "https://matrix.domain.tld";
+    <link linkend="opt-services.mjolnir.pantalaimon">pantalaimon</link> = {
+       <link linkend="opt-services.mjolnir.pantalaimon.enable">enable</link> = true;
+       <link linkend="opt-services.mjolnir.pantalaimon.username">username</link> = "mjolnir";
+       <link linkend="opt-services.mjolnir.pantalaimon.passwordFile">passwordFile</link> = "/run/secrets/mjolnir-password";
+    };
+    <link linkend="opt-services.mjolnir.protectedRooms">protectedRooms</link> = [
+      "https://matrix.to/#/!xxx:domain.tld"
+    ];
+    <link linkend="opt-services.mjolnir.managementRoom">managementRoom</link> = "!yyy:domain.tld";
+  };
+}
+</programlisting>
+ <section xml:id="module-services-mjolnir-setup-ems">
+  <title>Element Matrix Services (EMS)</title>
+  <para>
+   If you are using a managed <link xlink:href="https://ems.element.io/">"Element Matrix Services (EMS)"</link>
+   server, you will need to consent to the terms and conditions. Upon startup, an error
+   log entry with a URL to the consent page will be generated.
+  </para>
+ </section>
+ </section>
+
+ <section xml:id="module-services-mjolnir-matrix-synapse-antispam">
+  <title>Synapse Antispam Module</title>
+  <para>
+   A Synapse module is also available to apply the same rulesets the bot
+   uses across an entire homeserver.
+  </para>
+  <para>
+   To use the Antispam Module, add <package>matrix-synapse-plugins.matrix-synapse-mjolnir-antispam</package>
+   to the Synapse plugin list and enable the <literal>mjolnir.AntiSpam</literal> module.
+  </para>
+<programlisting>
+{
+  services.matrix-synapse = {
+    plugins = with pkgs; [
+      matrix-synapse-plugins.matrix-synapse-mjolnir-antispam
+    ];
+    extraConfig = ''
+      modules:
+        - module: mjolnir.AntiSpam
+          config:
+            # Prevent servers/users in the ban lists from inviting users on this
+            # server to rooms. Default true.
+            block_invites: true
+            # Flag messages sent by servers/users in the ban lists as spam. Currently
+            # this means that spammy messages will appear as empty to users. Default
+            # false.
+            block_messages: false
+            # Remove users from the user directory search by filtering matrix IDs and
+            # display names by the entries in the user ban list. Default false.
+            block_usernames: false
+            # The room IDs of the ban lists to honour. Unlike other parts of Mjolnir,
+            # this list cannot be room aliases or permalinks. This server is expected
+            # to already be joined to the room - Mjolnir will not automatically join
+            # these rooms.
+            ban_lists:
+              - "!roomid:example.org"
+    '';
+  };
+}
+</programlisting>
+ </section>
+</chapter>
diff --git a/nixos/modules/services/matrix/pantalaimon-options.nix b/nixos/modules/services/matrix/pantalaimon-options.nix
new file mode 100644
index 00000000000..035c57540d0
--- /dev/null
+++ b/nixos/modules/services/matrix/pantalaimon-options.nix
@@ -0,0 +1,70 @@
+{ config, lib, name, ... }:
+
+with lib;
+{
+  options = {
+    dataPath = mkOption {
+      type = types.path;
+      default = "/var/lib/pantalaimon-${name}";
+      description = ''
+        The directory where <literal>pantalaimon</literal> should store its state such as the database file.
+      '';
+    };
+
+    logLevel = mkOption {
+      type = types.enum [ "info" "warning" "error" "debug" ];
+      default = "warning";
+      description = ''
+        Set the log level of the daemon.
+      '';
+    };
+
+    homeserver = mkOption {
+      type = types.str;
+      example = "https://matrix.org";
+      description = ''
+        The URI of the homeserver that the <literal>pantalaimon</literal> proxy should
+        forward requests to, without the matrix API path but including
+        the http(s) schema.
+      '';
+    };
+
+    ssl = mkOption {
+      type = types.bool;
+      default = true;
+      description = ''
+        Whether or not SSL verification should be enabled for outgoing
+        connections to the homeserver.
+      '';
+    };
+
+    listenAddress = mkOption {
+      type = types.str;
+      default = "localhost";
+      description = ''
+        The address where the daemon will listen to client connections
+        for this homeserver.
+      '';
+    };
+
+    listenPort = mkOption {
+      type = types.port;
+      default = 8009;
+      description = ''
+        The port where the daemon will listen to client connections for
+        this homeserver. Note that the listen address/port combination
+        needs to be unique between different homeservers.
+      '';
+    };
+
+    extraSettings = mkOption {
+      type = types.attrs;
+      default = { };
+      description = ''
+        Extra configuration options. See
+        <link xlink:href="https://github.com/matrix-org/pantalaimon/blob/master/docs/man/pantalaimon.5.md">pantalaimon(5)</link>
+        for available options.
+      '';
+    };
+  };
+}
diff --git a/nixos/modules/services/matrix/pantalaimon.nix b/nixos/modules/services/matrix/pantalaimon.nix
new file mode 100644
index 00000000000..63b40099ca5
--- /dev/null
+++ b/nixos/modules/services/matrix/pantalaimon.nix
@@ -0,0 +1,70 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+let
+  cfg = config.services.pantalaimon-headless;
+
+  iniFmt = pkgs.formats.ini { };
+
+  mkConfigFile = name: instanceConfig: iniFmt.generate "pantalaimon.conf" {
+    Default = {
+      LogLevel = instanceConfig.logLevel;
+      Notifications = false;
+    };
+
+    ${name} = (recursiveUpdate
+      {
+        Homeserver = instanceConfig.homeserver;
+        ListenAddress = instanceConfig.listenAddress;
+        ListenPort = instanceConfig.listenPort;
+        SSL = instanceConfig.ssl;
+
+        # Set some settings to prevent user interaction for headless operation
+        IgnoreVerification = true;
+        UseKeyring = false;
+      }
+      instanceConfig.extraSettings
+    );
+  };
+
+  mkPantalaimonService = name: instanceConfig:
+    nameValuePair "pantalaimon-${name}" {
+      description = "pantalaimon instance ${name} - E2EE aware proxy daemon for matrix clients";
+      wants = [ "network-online.target" ];
+      after = [ "network-online.target" ];
+      wantedBy = [ "multi-user.target" ];
+
+      serviceConfig = {
+        ExecStart = ''${pkgs.pantalaimon-headless}/bin/pantalaimon --config ${mkConfigFile name instanceConfig} --data-path ${instanceConfig.dataPath}'';
+        Restart = "on-failure";
+        DynamicUser = true;
+        NoNewPrivileges = true;
+        PrivateDevices = true;
+        PrivateTmp = true;
+        ProtectHome = true;
+        ProtectSystem = "strict";
+        StateDirectory = "pantalaimon-${name}";
+      };
+    };
+in
+{
+  options.services.pantalaimon-headless.instances = mkOption {
+    default = { };
+    type = types.attrsOf (types.submodule (import ./pantalaimon-options.nix));
+    description = ''
+      Declarative instance config.
+
+      Note: to use pantalaimon interactively, e.g. for a Matrix client which does not
+      support End-to-end encryption (like <literal>fractal</literal>), refer to the home-manager module.
+    '';
+  };
+
+  config = mkIf (config.services.pantalaimon-headless.instances != { })
+    {
+      systemd.services = mapAttrs' mkPantalaimonService config.services.pantalaimon-headless.instances;
+    };
+
+  meta = {
+    maintainers = with maintainers; [ jojosch ];
+  };
+}
diff --git a/nixos/modules/services/misc/ananicy.nix b/nixos/modules/services/misc/ananicy.nix
new file mode 100644
index 00000000000..f76f534fb45
--- /dev/null
+++ b/nixos/modules/services/misc/ananicy.nix
@@ -0,0 +1,107 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.ananicy;
+  configFile = pkgs.writeText "ananicy.conf" (generators.toKeyValue { } cfg.settings);
+  extraRules = pkgs.writeText "extraRules" cfg.extraRules;
+  servicename = if ((lib.getName cfg.package) == (lib.getName pkgs.ananicy-cpp)) then "ananicy-cpp" else "ananicy";
+in
+{
+  options = {
+    services.ananicy = {
+      enable = mkEnableOption "Ananicy, an auto nice daemon";
+
+      package = mkOption {
+        type = types.package;
+        default = pkgs.ananicy;
+        defaultText = literalExpression "pkgs.ananicy";
+        example = literalExpression "pkgs.ananicy-cpp";
+        description = ''
+          Which ananicy package to use.
+        '';
+      };
+
+      settings = mkOption {
+        type = with types; attrsOf (oneOf [ int bool str ]);
+        default = { };
+        example = {
+          apply_nice = false;
+        };
+        description = ''
+          See <link xlink:href="https://github.com/Nefelim4ag/Ananicy/blob/master/ananicy.d/ananicy.conf"/>
+        '';
+      };
+
+      extraRules = mkOption {
+        type = types.str;
+        default = "";
+        description = ''
+          Extra rules in json format on separate lines. See:
+          <link xlink:href="https://github.com/Nefelim4ag/Ananicy#configuration"/>
+          <link xlink:href="https://gitlab.com/ananicy-cpp/ananicy-cpp/#global-configuration"/>
+        '';
+        example = literalExpression ''
+          '''
+            { "name": "eog", "type": "Image-View" }
+            { "name": "fdupes", "type": "BG_CPUIO" }
+          '''
+        '';
+
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    environment = {
+      systemPackages = [ cfg.package ];
+      etc."ananicy.d".source = pkgs.runCommandLocal "ananicyfiles" { } ''
+        mkdir -p $out
+        # ananicy-cpp does not include rules or settings on purpose
+        cp -r ${pkgs.ananicy}/etc/ananicy.d/* $out
+        rm $out/ananicy.conf
+        cp ${configFile} $out/ananicy.conf
+        ${optionalString (cfg.extraRules != "") "cp ${extraRules} $out/nixRules.rules"}
+      '';
+    };
+
+    # ananicy and ananicy-cpp have different default settings
+    services.ananicy.settings =
+      let
+        mkOD = mkOptionDefault;
+      in
+      {
+        cgroup_load = mkOD true;
+        type_load = mkOD true;
+        rule_load = mkOD true;
+        apply_nice = mkOD true;
+        apply_ioclass = mkOD true;
+        apply_ionice = mkOD true;
+        apply_sched = mkOD true;
+        apply_oom_score_adj = mkOD true;
+        apply_cgroup = mkOD true;
+      } // (if ((lib.getName cfg.package) == (lib.getName pkgs.ananicy-cpp)) then {
+        # https://gitlab.com/ananicy-cpp/ananicy-cpp/-/blob/master/src/config.cpp#L12
+        loglevel = mkOD "warn"; # default is info but its spammy
+        cgroup_realtime_workaround = mkOD true;
+      } else {
+        # https://github.com/Nefelim4ag/Ananicy/blob/master/ananicy.d/ananicy.conf
+        check_disks_schedulers = mkOD true;
+        check_freq = mkOD 5;
+      });
+
+    systemd = {
+      # https://gitlab.com/ananicy-cpp/ananicy-cpp/#cgroups applies to both ananicy and -cpp
+      enableUnifiedCgroupHierarchy = mkDefault false;
+      packages = [ cfg.package ];
+      services."${servicename}" = {
+        wantedBy = [ "default.target" ];
+      };
+    };
+  };
+
+  meta = {
+    maintainers = with maintainers; [ artturin ];
+  };
+}
diff --git a/nixos/modules/services/monitoring/collectd.nix b/nixos/modules/services/monitoring/collectd.nix
index ad0cf4735ad..6af04d22f0f 100644
--- a/nixos/modules/services/monitoring/collectd.nix
+++ b/nixos/modules/services/monitoring/collectd.nix
@@ -57,7 +57,7 @@ in {
       description = ''
         Build a minimal collectd package with only the configured `services.collectd.plugins`
       '';
-      type = types.bool;
+      type = bool;
     };
 
     user = mkOption {
@@ -98,7 +98,7 @@ in {
       description = ''
         Attribute set of plugin names to plugin config segments
       '';
-      type = types.attrsOf types.str;
+      type = attrsOf lines;
     };
 
     extraConfig = mkOption {
diff --git a/nixos/modules/services/monitoring/prometheus/default.nix b/nixos/modules/services/monitoring/prometheus/default.nix
index 5f7bda1acbc..f20b8dde1ab 100644
--- a/nixos/modules/services/monitoring/prometheus/default.nix
+++ b/nixos/modules/services/monitoring/prometheus/default.nix
@@ -9,13 +9,6 @@ let
 
   prometheusYmlOut = "${workingDir}/prometheus-substituted.yaml";
 
-  writeConfig = pkgs.writeShellScriptBin "write-prometheus-config" ''
-    PATH="${makeBinPath (with pkgs; [ coreutils envsubst ])}"
-    touch '${prometheusYmlOut}'
-    chmod 600 '${prometheusYmlOut}'
-    envsubst -o '${prometheusYmlOut}' -i '${prometheusYml}'
-  '';
-
   triggerReload = pkgs.writeShellScriptBin "trigger-reload-prometheus" ''
     PATH="${makeBinPath (with pkgs; [ systemd ])}"
     if systemctl -q is-active prometheus.service; then
@@ -37,13 +30,13 @@ let
       pkgs.runCommandLocal
         "${name}-${replaceStrings [" "] [""] what}-checked"
         { buildInputs = [ cfg.package ]; } ''
-      ln -s ${file} $out
-      promtool ${what} $out
-    '' else file;
+        ln -s ${file} $out
+        promtool ${what} $out
+      '' else file;
 
   # Pretty-print JSON to a file
   writePrettyJSON = name: x:
-    pkgs.runCommandLocal name {} ''
+    pkgs.runCommandLocal name { } ''
       echo '${builtins.toJSON x}' | ${pkgs.jq}/bin/jq . > $out
     '';
 
@@ -63,52 +56,111 @@ let
     };
   };
 
-  prometheusYml = let
-    yml = if cfg.configText != null then
-      pkgs.writeText "prometheus.yml" cfg.configText
-      else generatedPrometheusYml;
-    in promtoolCheck "check config" "prometheus.yml" yml;
+  prometheusYml =
+    let
+      yml =
+        if cfg.configText != null then
+          pkgs.writeText "prometheus.yml" cfg.configText
+        else generatedPrometheusYml;
+    in
+    promtoolCheck "check config" "prometheus.yml" yml;
 
   cmdlineArgs = cfg.extraFlags ++ [
     "--storage.tsdb.path=${workingDir}/data/"
     "--config.file=${
       if cfg.enableReload
-      then prometheusYmlOut
-      else "/run/prometheus/prometheus-substituted.yaml"
+      then "/etc/prometheus/prometheus.yaml"
+      else prometheusYml
     }"
     "--web.listen-address=${cfg.listenAddress}:${builtins.toString cfg.port}"
     "--alertmanager.notification-queue-capacity=${toString cfg.alertmanagerNotificationQueueCapacity}"
     "--alertmanager.timeout=${toString cfg.alertmanagerTimeout}s"
   ] ++ optional (cfg.webExternalUrl != null) "--web.external-url=${cfg.webExternalUrl}"
-    ++ optional (cfg.retentionTime != null)  "--storage.tsdb.retention.time=${cfg.retentionTime}";
+    ++ optional (cfg.retentionTime != null) "--storage.tsdb.retention.time=${cfg.retentionTime}";
 
   filterValidPrometheus = filterAttrsListRecursive (n: v: !(n == "_module" || v == null));
   filterAttrsListRecursive = pred: x:
     if isAttrs x then
-      listToAttrs (
-        concatMap (name:
-          let v = x.${name}; in
-          if pred name v then [
-            (nameValuePair name (filterAttrsListRecursive pred v))
-          ] else []
-        ) (attrNames x)
-      )
+      listToAttrs
+        (
+          concatMap
+            (name:
+              let v = x.${name}; in
+              if pred name v then [
+                (nameValuePair name (filterAttrsListRecursive pred v))
+              ] else [ ]
+            )
+            (attrNames x)
+        )
     else if isList x then
       map (filterAttrsListRecursive pred) x
     else x;
 
-  mkDefOpt = type : defaultStr : description : mkOpt type (description + ''
+  #
+  # Config types: helper functions
+  #
+
+  mkDefOpt = type: defaultStr: description: mkOpt type (description + ''
 
     Defaults to <literal>${defaultStr}</literal> in prometheus
     when set to <literal>null</literal>.
   '');
 
-  mkOpt = type : description : mkOption {
+  mkOpt = type: description: mkOption {
     type = types.nullOr type;
     default = null;
     inherit description;
   };
 
+  mkSdConfigModule = extraOptions: types.submodule {
+    options = {
+      basic_auth = mkOpt promTypes.basic_auth ''
+        Optional HTTP basic authentication information.
+      '';
+
+      authorization = mkOpt
+        (types.submodule {
+          options = {
+            type = mkDefOpt types.str "Bearer" ''
+              Sets the authentication type.
+            '';
+
+            credentials = mkOpt types.str ''
+              Sets the credentials. It is mutually exclusive with `credentials_file`.
+            '';
+
+            credentials_file = mkOpt types.str ''
+              Sets the credentials to the credentials read from the configured file.
+              It is mutually exclusive with `credentials`.
+            '';
+          };
+        }) ''
+        Optional `Authorization` header configuration.
+      '';
+
+      oauth2 = mkOpt promtypes.oauth2 ''
+        Optional OAuth 2.0 configuration.
+        Cannot be used at the same time as basic_auth or authorization.
+      '';
+
+      proxy_url = mkOpt types.str ''
+        Optional proxy URL.
+      '';
+
+      follow_redirects = mkDefOpt types.bool "true" ''
+        Configure whether HTTP requests follow HTTP 3xx redirects.
+      '';
+
+      tls_config = mkOpt promTypes.tls_config ''
+        TLS configuration.
+      '';
+    } // extraOptions;
+  };
+
+  #
+  # Config types: general
+  #
+
   promTypes.globalConfig = types.submodule {
     options = {
       scrape_interval = mkDefOpt types.str "1m" ''
@@ -131,153 +183,68 @@ let
     };
   };
 
-  promTypes.remote_read = types.submodule {
+  promTypes.basic_auth = types.submodule {
     options = {
-      url = mkOption {
+      username = mkOption {
         type = types.str;
         description = ''
-          ServerName extension to indicate the name of the server.
-          http://tools.ietf.org/html/rfc4366#section-3.1
+          HTTP username
         '';
       };
-      name = mkOpt types.str ''
-        Name of the remote read config, which if specified must be unique among remote read configs.
-        The name will be used in metrics and logging in place of a generated value to help users distinguish between
-        remote read configs.
-      '';
-      required_matchers = mkOpt (types.attrsOf types.str) ''
-        An optional list of equality matchers which have to be
-        present in a selector to query the remote read endpoint.
-      '';
-      remote_timeout = mkOpt types.str ''
-        Timeout for requests to the remote read endpoint.
-      '';
-      read_recent = mkOpt types.bool ''
-        Whether reads should be made for queries for time ranges that
-        the local storage should have complete data for.
+      password = mkOpt types.str "HTTP password";
+      password_file = mkOpt types.str "HTTP password file";
+    };
+  };
+
+  promTypes.tls_config = types.submodule {
+    options = {
+      ca_file = mkOpt types.str ''
+        CA certificate to validate API server certificate with.
       '';
-      basic_auth = mkOpt (types.submodule {
-        options = {
-          username = mkOption {
-            type = types.str;
-            description = ''
-              HTTP username
-            '';
-          };
-          password = mkOpt types.str "HTTP password";
-          password_file = mkOpt types.str "HTTP password file";
-        };
-      }) ''
-        Sets the `Authorization` header on every remote read request with the
-        configured username and password.
-        password and password_file are mutually exclusive.
+
+      cert_file = mkOpt types.str ''
+        Certificate file for client cert authentication to the server.
       '';
-      bearer_token = mkOpt types.str ''
-        Sets the `Authorization` header on every remote read request with
-        the configured bearer token. It is mutually exclusive with `bearer_token_file`.
+
+      key_file = mkOpt types.str ''
+        Key file for client cert authentication to the server.
       '';
-      bearer_token_file = mkOpt types.str ''
-        Sets the `Authorization` header on every remote read request with the bearer token
-        read from the configured file. It is mutually exclusive with `bearer_token`.
+
+      server_name = mkOpt types.str ''
+        ServerName extension to indicate the name of the server.
+        http://tools.ietf.org/html/rfc4366#section-3.1
       '';
-      tls_config = mkOpt promTypes.tls_config ''
-        Configures the remote read request's TLS settings.
+
+      insecure_skip_verify = mkOpt types.bool ''
+        Disable validation of the server certificate.
       '';
-      proxy_url = mkOpt types.str "Optional Proxy URL.";
     };
   };
 
-  promTypes.remote_write = types.submodule {
+  promtypes.oauth2 = types.submodule {
     options = {
-      url = mkOption {
-        type = types.str;
-        description = ''
-          ServerName extension to indicate the name of the server.
-          http://tools.ietf.org/html/rfc4366#section-3.1
-        '';
-      };
-      remote_timeout = mkOpt types.str ''
-        Timeout for requests to the remote write endpoint.
+      client_id = mkOpt types.str ''
+        OAuth client ID.
       '';
-      write_relabel_configs = mkOpt (types.listOf promTypes.relabel_config) ''
-        List of remote write relabel configurations.
-      '';
-      name = mkOpt types.str ''
-        Name of the remote write config, which if specified must be unique among remote write configs.
-        The name will be used in metrics and logging in place of a generated value to help users distinguish between
-        remote write configs.
-      '';
-      basic_auth = mkOpt (types.submodule {
-        options = {
-          username = mkOption {
-            type = types.str;
-            description = ''
-              HTTP username
-            '';
-          };
-          password = mkOpt types.str "HTTP password";
-          password_file = mkOpt types.str "HTTP password file";
-        };
-      }) ''
-        Sets the `Authorization` header on every remote write request with the
-        configured username and password.
-        password and password_file are mutually exclusive.
-      '';
-      bearer_token = mkOpt types.str ''
-        Sets the `Authorization` header on every remote write request with
-        the configured bearer token. It is mutually exclusive with `bearer_token_file`.
+
+      client_secret = mkOpt types.str ''
+        OAuth client secret.
       '';
-      bearer_token_file = mkOpt types.str ''
-        Sets the `Authorization` header on every remote write request with the bearer token
-        read from the configured file. It is mutually exclusive with `bearer_token`.
+
+      client_secret_file = mkOpt types.str ''
+        Read the client secret from a file. It is mutually exclusive with `client_secret`.
       '';
-      tls_config = mkOpt promTypes.tls_config ''
-        Configures the remote write request's TLS settings.
+
+      scopes = mkOpt (types.listOf types.str) ''
+        Scopes for the token request.
       '';
-      proxy_url = mkOpt types.str "Optional Proxy URL.";
-      queue_config = mkOpt (types.submodule {
-        options = {
-          capacity = mkOpt types.int ''
-            Number of samples to buffer per shard before we block reading of more
-            samples from the WAL. It is recommended to have enough capacity in each
-            shard to buffer several requests to keep throughput up while processing
-            occasional slow remote requests.
-          '';
-          max_shards = mkOpt types.int ''
-            Maximum number of shards, i.e. amount of concurrency.
-          '';
-          min_shards = mkOpt types.int ''
-            Minimum number of shards, i.e. amount of concurrency.
-          '';
-          max_samples_per_send = mkOpt types.int ''
-            Maximum number of samples per send.
-          '';
-          batch_send_deadline = mkOpt types.str ''
-            Maximum time a sample will wait in buffer.
-          '';
-          min_backoff = mkOpt types.str ''
-            Initial retry delay. Gets doubled for every retry.
-          '';
-          max_backoff = mkOpt types.str ''
-            Maximum retry delay.
-          '';
-        };
-      }) ''
-        Configures the queue used to write to remote storage.
+
+      token_url = mkOpt types.str ''
+        The URL to fetch the token from.
       '';
-      metadata_config = mkOpt (types.submodule {
-        options = {
-          send = mkOpt types.bool ''
-            Whether metric metadata is sent to remote storage or not.
-          '';
-          send_interval = mkOpt types.str ''
-            How frequently metric metadata is sent to remote storage.
-          '';
-        };
-      }) ''
-        Configures the sending of series metadata to remote storage.
-        Metadata configuration is subject to change at any point
-        or be removed in future releases.
+
+      endpoint_params = mkOpt (types.attrsOf types.str) ''
+        Optional parameters to append to the token URL.
       '';
     };
   };
@@ -335,7 +302,7 @@ let
         by the target will be ignored.
       '';
 
-      scheme = mkDefOpt (types.enum ["http" "https"]) "http" ''
+      scheme = mkDefOpt (types.enum [ "http" "https" ]) "http" ''
         The URL scheme with which to fetch metrics from targets.
       '';
 
@@ -343,18 +310,7 @@ let
         Optional HTTP URL parameters.
       '';
 
-      basic_auth = mkOpt (types.submodule {
-        options = {
-          username = mkOption {
-            type = types.str;
-            description = ''
-              HTTP username
-            '';
-          };
-          password = mkOpt types.str "HTTP password";
-          password_file = mkOpt types.str "HTTP password file";
-        };
-      }) ''
+      basic_auth = mkOpt promTypes.basic_auth ''
         Sets the `Authorization` header on every scrape request with the
         configured username and password.
         password and password_file are mutually exclusive.
@@ -380,16 +336,36 @@ let
         Optional proxy URL.
       '';
 
-      ec2_sd_configs = mkOpt (types.listOf promTypes.ec2_sd_config) ''
-        List of EC2 service discovery configurations.
+      azure_sd_configs = mkOpt (types.listOf promTypes.azure_sd_config) ''
+        List of Azure service discovery configurations.
+      '';
+
+      consul_sd_configs = mkOpt (types.listOf promTypes.consul_sd_config) ''
+        List of Consul service discovery configurations.
+      '';
+
+      digitalocean_sd_configs = mkOpt (types.listOf promTypes.digitalocean_sd_config) ''
+        List of DigitalOcean service discovery configurations.
+      '';
+
+      docker_sd_configs = mkOpt (types.listOf promTypes.docker_sd_config) ''
+        List of Docker service discovery configurations.
+      '';
+
+      dockerswarm_sd_configs = mkOpt (types.listOf promTypes.dockerswarm_sd_config) ''
+        List of Docker Swarm service discovery configurations.
       '';
 
       dns_sd_configs = mkOpt (types.listOf promTypes.dns_sd_config) ''
         List of DNS service discovery configurations.
       '';
 
-      consul_sd_configs = mkOpt (types.listOf promTypes.consul_sd_config) ''
-        List of Consul service discovery configurations.
+      ec2_sd_configs = mkOpt (types.listOf promTypes.ec2_sd_config) ''
+        List of EC2 service discovery configurations.
+      '';
+
+      eureka_sd_configs = mkOpt (types.listOf promTypes.eureka_sd_config) ''
+        List of Eureka service discovery configurations.
       '';
 
       file_sd_configs = mkOpt (types.listOf promTypes.file_sd_config) ''
@@ -404,6 +380,62 @@ let
         relevant Prometheus configuration docs</link> for more detail.
       '';
 
+      hetzner_sd_configs = mkOpt (types.listOf promTypes.hetzner_sd_config) ''
+        List of Hetzner service discovery configurations.
+      '';
+
+      http_sd_configs = mkOpt (types.listOf promTypes.http_sd_config) ''
+        List of HTTP service discovery configurations.
+      '';
+
+      kubernetes_sd_configs = mkOpt (types.listOf promTypes.kubernetes_sd_config) ''
+        List of Kubernetes service discovery configurations.
+      '';
+
+      kuma_sd_configs = mkOpt (types.listOf promTypes.kuma_sd_config) ''
+        List of Kuma service discovery configurations.
+      '';
+
+      lightsail_sd_configs = mkOpt (types.listOf promTypes.lightsail_sd_config) ''
+        List of Lightsail service discovery configurations.
+      '';
+
+      linode_sd_configs = mkOpt (types.listOf promTypes.linode_sd_config) ''
+        List of Linode service discovery configurations.
+      '';
+
+      marathon_sd_configs = mkOpt (types.listOf promTypes.marathon_sd_config) ''
+        List of Marathon service discovery configurations.
+      '';
+
+      nerve_sd_configs = mkOpt (types.listOf promTypes.nerve_sd_config) ''
+        List of AirBnB's Nerve service discovery configurations.
+      '';
+
+      openstack_sd_configs = mkOpt (types.listOf promTypes.openstack_sd_config) ''
+        List of OpenStack service discovery configurations.
+      '';
+
+      puppetdb_sd_configs = mkOpt (types.listOf promTypes.puppetdb_sd_config) ''
+        List of PuppetDB service discovery configurations.
+      '';
+
+      scaleway_sd_configs = mkOpt (types.listOf promTypes.scaleway_sd_config) ''
+        List of Scaleway service discovery configurations.
+      '';
+
+      serverset_sd_configs = mkOpt (types.listOf promTypes.serverset_sd_config) ''
+        List of Zookeeper Serverset service discovery configurations.
+      '';
+
+      triton_sd_configs = mkOpt (types.listOf promTypes.triton_sd_config) ''
+        List of Triton Serverset service discovery configurations.
+      '';
+
+      uyuni_sd_configs = mkOpt (types.listOf promTypes.uyuni_sd_config) ''
+        List of Uyuni Serverset service discovery configurations.
+      '';
+
       static_configs = mkOpt (types.listOf promTypes.static_config) ''
         List of labeled target groups for this job.
       '';
@@ -416,29 +448,245 @@ let
         List of metric relabel configurations.
       '';
 
+      body_size_limit = mkDefOpt types.str "0" ''
+        An uncompressed response body larger than this many bytes will cause the
+        scrape to fail. 0 means no limit. Example: 100MB.
+        This is an experimental feature, this behaviour could
+        change or be removed in the future.
+      '';
+
       sample_limit = mkDefOpt types.int "0" ''
         Per-scrape limit on number of scraped samples that will be accepted.
         If more than this number of samples are present after metric relabelling
         the entire scrape will be treated as failed. 0 means no limit.
       '';
+
+      label_limit = mkDefOpt types.int "0" ''
+        Per-scrape limit on number of labels that will be accepted for a sample. If
+        more than this number of labels are present post metric-relabeling, the
+        entire scrape will be treated as failed. 0 means no limit.
+      '';
+
+      label_name_length_limit = mkDefOpt types.int "0" ''
+        Per-scrape limit on length of labels name that will be accepted for a sample.
+        If a label name is longer than this number post metric-relabeling, the entire
+        scrape will be treated as failed. 0 means no limit.
+      '';
+
+      label_value_length_limit = mkDefOpt types.int "0" ''
+        Per-scrape limit on length of labels value that will be accepted for a sample.
+        If a label value is longer than this number post metric-relabeling, the
+        entire scrape will be treated as failed. 0 means no limit.
+      '';
+
+      target_limit = mkDefOpt types.int "0" ''
+        Per-scrape config limit on number of unique targets that will be
+        accepted. If more than this number of targets are present after target
+        relabeling, Prometheus will mark the targets as failed without scraping them.
+        0 means no limit. This is an experimental feature, this behaviour could
+        change in the future.
+      '';
     };
   };
 
-  promTypes.static_config = types.submodule {
+  #
+  # Config types: service discovery
+  #
+
+  # For this one, the docs actually define all types needed to use mkSdConfigModule, but a bunch
+  # of them are marked with 'currently not support by Azure' so we don't bother adding them in
+  # here.
+  promTypes.azure_sd_config = types.submodule {
     options = {
-      targets = mkOption {
-        type = types.listOf types.str;
+      environment = mkDefOpt types.str "AzurePublicCloud" ''
+        The Azure environment.
+      '';
+
+      authentication_method = mkDefOpt (types.enum [ "OAuth" "ManagedIdentity" ]) "OAuth" ''
+        The authentication method, either OAuth or ManagedIdentity.
+        See https://docs.microsoft.com/en-us/azure/active-directory/managed-identities-azure-resources/overview
+      '';
+
+      subscription_id = mkOption {
+        type = types.str;
         description = ''
-          The targets specified by the target group.
+          The subscription ID.
         '';
       };
-      labels = mkOption {
-        type = types.attrsOf types.str;
-        default = {};
+
+      tenant_id = mkOpt types.str ''
+        Optional tenant ID. Only required with authentication_method OAuth.
+      '';
+
+      client_id = mkOpt types.str ''
+        Optional client ID. Only required with authentication_method OAuth.
+      '';
+
+      client_secret = mkOpt types.str ''
+        Optional client secret. Only required with authentication_method OAuth.
+      '';
+
+      refresh_interval = mkDefOpt types.str "300s" ''
+        Refresh interval to re-read the instance list.
+      '';
+
+      port = mkDefOpt types.int "80" ''
+        The port to scrape metrics from. If using the public IP
+        address, this must instead be specified in the relabeling
+        rule.
+      '';
+
+      proxy_url = mkOpt types.str ''
+        Optional proxy URL.
+      '';
+
+      follow_redirects = mkDefOpt types.bool "true" ''
+        Configure whether HTTP requests follow HTTP 3xx redirects.
+      '';
+
+      tls_config = mkOpt promTypes.tls_config ''
+        TLS configuration.
+      '';
+    };
+  };
+
+  promTypes.consul_sd_config = mkSdConfigModule {
+    server = mkDefOpt types.str "localhost:8500" ''
+      Consul server to query.
+    '';
+
+    token = mkOpt types.str "Consul token";
+
+    datacenter = mkOpt types.str "Consul datacenter";
+
+    scheme = mkDefOpt types.str "http" "Consul scheme";
+
+    username = mkOpt types.str "Consul username";
+
+    password = mkOpt types.str "Consul password";
+
+    tls_config = mkOpt promTypes.tls_config ''
+      Configures the Consul request's TLS settings.
+    '';
+
+    services = mkOpt (types.listOf types.str) ''
+      A list of services for which targets are retrieved.
+    '';
+
+    tags = mkOpt (types.listOf types.str) ''
+      An optional list of tags used to filter nodes for a given
+      service. Services must contain all tags in the list.
+    '';
+
+    node_meta = mkOpt (types.attrsOf types.str) ''
+      Node metadata used to filter nodes for a given service.
+    '';
+
+    tag_separator = mkDefOpt types.str "," ''
+      The string by which Consul tags are joined into the tag label.
+    '';
+
+    allow_stale = mkOpt types.bool ''
+      Allow stale Consul results
+      (see <link xlink:href="https://www.consul.io/api/index.html#consistency-modes"/>).
+
+      Will reduce load on Consul.
+    '';
+
+    refresh_interval = mkDefOpt types.str "30s" ''
+      The time after which the provided names are refreshed.
+
+      On large setup it might be a good idea to increase this value
+      because the catalog will change all the time.
+    '';
+  };
+
+  promTypes.digitalocean_sd_config = mkSdConfigModule {
+    port = mkDefOpt types.int "80" ''
+      The port to scrape metrics from.
+    '';
+
+    refresh_interval = mkDefOpt types.str "60s" ''
+      The time after which the droplets are refreshed.
+    '';
+  };
+
+  mkDockerSdConfigModule = extraOptions: mkSdConfigModule ({
+    host = mkOption {
+      type = types.str;
+      description = ''
+        Address of the Docker daemon.
+      '';
+    };
+
+    port = mkDefOpt types.int "80" ''
+      The port to scrape metrics from, when `role` is nodes, and for discovered
+      tasks and services that don't have published ports.
+    '';
+
+    filters = mkOpt
+      (types.listOf (types.submodule {
+        options = {
+          name = mkOption {
+            type = types.str;
+            description = ''
+              Name of the filter. The available filters are listed in the upstream documentation:
+              Services: <link xlink:href="https://docs.docker.com/engine/api/v1.40/#operation/ServiceList"/>
+              Tasks: <link xlink:href="https://docs.docker.com/engine/api/v1.40/#operation/TaskList"/>
+              Nodes: <link xlink:href="https://docs.docker.com/engine/api/v1.40/#operation/NodeList"/>
+            '';
+          };
+          values = mkOption {
+            type = types.str;
+            description = ''
+              Value for the filter.
+            '';
+          };
+        };
+      })) ''
+      Optional filters to limit the discovery process to a subset of available resources.
+    '';
+
+    refresh_interval = mkDefOpt types.str "60s" ''
+      The time after which the containers are refreshed.
+    '';
+  } // extraOptions);
+
+  promTypes.docker_sd_config = mkDockerSdConfigModule {
+    host_networking_host = mkDefOpt types.str "localhost" ''
+      The host to use if the container is in host networking mode.
+    '';
+  };
+
+  promTypes.dockerswarm_sd_config = mkDockerSdConfigModule {
+    role = mkOption {
+      type = types.enum [ "services" "tasks" "nodes" ];
+      description = ''
+        Role of the targets to retrieve. Must be `services`, `tasks`, or `nodes`.
+      '';
+    };
+  };
+
+  promTypes.dns_sd_config = types.submodule {
+    options = {
+      names = mkOption {
+        type = types.listOf types.str;
         description = ''
-          Labels assigned to all metrics scraped from the targets.
+          A list of DNS SRV record names to be queried.
         '';
       };
+
+      type = mkDefOpt (types.enum [ "SRV" "A" "AAAA" ]) "SRV" ''
+        The type of DNS query to perform. One of SRV, A, or AAAA.
+      '';
+
+      port = mkOpt types.int ''
+        The port number used if the query type is not SRV.
+      '';
+
+      refresh_interval = mkDefOpt types.str "30s" ''
+        The time after which the provided names are refreshed.
+      '';
     };
   };
 
@@ -447,7 +695,7 @@ let
       region = mkOption {
         type = types.str;
         description = ''
-          The AWS Region.
+          The AWS Region. If blank, the region from the instance metadata is used.
         '';
       };
       endpoint = mkOpt types.str ''
@@ -464,7 +712,7 @@ let
          <literal>AWS_SECRET_ACCESS_KEY</literal> is used.
       '';
 
-      profile = mkOpt  types.str ''
+      profile = mkOpt types.str ''
         Named AWS profile used to connect to the API.
       '';
 
@@ -482,163 +730,653 @@ let
         rule.
       '';
 
-      filters = mkOpt (types.listOf promTypes.filter) ''
+      filters = mkOpt
+        (types.listOf (types.submodule {
+          options = {
+            name = mkOption {
+              type = types.str;
+              description = ''
+                See <link xlink:href="https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeInstances.html">this list</link>
+                for the available filters.
+              '';
+            };
+
+            values = mkOption {
+              type = types.listOf types.str;
+              default = [ ];
+              description = ''
+                Value of the filter.
+              '';
+            };
+          };
+        })) ''
         Filters can be used optionally to filter the instance list by other criteria.
       '';
     };
   };
 
-  promTypes.filter = types.submodule {
+  promTypes.eureka_sd_config = mkSdConfigModule {
+    server = mkOption {
+      type = types.str;
+      description = ''
+        The URL to connect to the Eureka server.
+      '';
+    };
+  };
+
+  promTypes.file_sd_config = types.submodule {
+    options = {
+      files = mkOption {
+        type = types.listOf types.str;
+        description = ''
+          Patterns for files from which target groups are extracted. Refer
+          to the Prometheus documentation for permitted filename patterns
+          and formats.
+        '';
+      };
+
+      refresh_interval = mkDefOpt types.str "5m" ''
+        Refresh interval to re-read the files.
+      '';
+    };
+  };
+
+  promTypes.gce_sd_config = types.submodule {
     options = {
-      name = mkOption {
+      # Use `mkOption` instead of `mkOpt` for project and zone because they are
+      # required configuration values for `gce_sd_config`.
+      project = mkOption {
         type = types.str;
         description = ''
-          See <link xlink:href="https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeInstances.html">this list</link>
-          for the available filters.
+          The GCP Project.
         '';
       };
 
-      values = mkOption {
-        type = types.listOf types.str;
-        default = [];
+      zone = mkOption {
+        type = types.str;
         description = ''
-          Value of the filter.
+          The zone of the scrape targets. If you need multiple zones use multiple
+          gce_sd_configs.
         '';
       };
+
+      filter = mkOpt types.str ''
+        Filter can be used optionally to filter the instance list by other
+        criteria Syntax of this filter string is described here in the filter
+        query parameter section: <link
+        xlink:href="https://cloud.google.com/compute/docs/reference/latest/instances/list"
+        />.
+      '';
+
+      refresh_interval = mkDefOpt types.str "60s" ''
+        Refresh interval to re-read the cloud instance list.
+      '';
+
+      port = mkDefOpt types.port "80" ''
+        The port to scrape metrics from. If using the public IP address, this
+        must instead be specified in the relabeling rule.
+      '';
+
+      tag_separator = mkDefOpt types.str "," ''
+        The tag separator used to separate concatenated GCE instance network tags.
+
+        See the GCP documentation on network tags for more information:
+        <link xlink:href="https://cloud.google.com/vpc/docs/add-remove-network-tags" />
+      '';
     };
   };
 
-  promTypes.dns_sd_config = types.submodule {
+  promTypes.hetzner_sd_config = mkSdConfigModule {
+    role = mkOption {
+      type = types.enum [ "robot" "hcloud" ];
+      description = ''
+        The Hetzner role of entities that should be discovered.
+        One of <literal>robot</literal> or <literal>hcloud</literal>.
+      '';
+    };
+
+    port = mkDefOpt types.int "80" ''
+      The port to scrape metrics from.
+    '';
+
+    refresh_interval = mkDefOpt types.str "60s" ''
+      The time after which the servers are refreshed.
+    '';
+  };
+
+  promTypes.http_sd_config = types.submodule {
     options = {
-      names = mkOption {
-        type = types.listOf types.str;
+      url = mkOption {
+        type = types.str;
         description = ''
-          A list of DNS SRV record names to be queried.
+          URL from which the targets are fetched.
         '';
       };
 
-      refresh_interval = mkDefOpt types.str "30s" ''
-        The time after which the provided names are refreshed.
+      refresh_interval = mkDefOpt types.str "60s" ''
+        Refresh interval to re-query the endpoint.
+      '';
+
+      basic_auth = mkOpt promTypes.basic_auth ''
+        Authentication information used to authenticate to the API server.
+        password and password_file are mutually exclusive.
+      '';
+
+      proxy_url = mkOpt types.str ''
+        Optional proxy URL.
+      '';
+
+      follow_redirects = mkDefOpt types.bool "true" ''
+        Configure whether HTTP requests follow HTTP 3xx redirects.
+      '';
+
+      tls_config = mkOpt promTypes.tls_config ''
+        Configures the scrape request's TLS settings.
       '';
     };
   };
 
-  promTypes.consul_sd_config = types.submodule {
-    options = {
-      server = mkDefOpt types.str "localhost:8500" ''
-        Consul server to query.
+  promTypes.kubernetes_sd_config = mkSdConfigModule {
+    api_server = mkOpt types.str ''
+      The API server addresses. If left empty, Prometheus is assumed to run inside
+      of the cluster and will discover API servers automatically and use the pod's
+      CA certificate and bearer token file at /var/run/secrets/kubernetes.io/serviceaccount/.
+    '';
+
+    role = mkOption {
+      type = types.enum [ "endpoints" "service" "pod" "node" "ingress" ];
+      description = ''
+        The Kubernetes role of entities that should be discovered.
+        One of endpoints, service, pod, node, or ingress.
       '';
+    };
 
-      token = mkOpt types.str "Consul token";
+    kubeconfig_file = mkOpt types.str ''
+      Optional path to a kubeconfig file.
+      Note that api_server and kube_config are mutually exclusive.
+    '';
+
+    namespaces = mkOpt
+      (
+        types.submodule {
+          options = {
+            names = mkOpt (types.listOf types.str) ''
+              Namespace name.
+            '';
+          };
+        }
+      ) ''
+      Optional namespace discovery. If omitted, all namespaces are used.
+    '';
 
-      datacenter = mkOpt types.str "Consul datacenter";
+    selectors = mkOpt
+      (
+        types.listOf (
+          types.submodule {
+            options = {
+              role = mkOption {
+                type = types.str;
+                description = ''
+                  Selector role
+                '';
+              };
+
+              label = mkOpt types.str ''
+                Selector label
+              '';
+
+              field = mkOpt types.str ''
+                Selector field
+              '';
+            };
+          }
+        )
+      ) ''
+      Optional label and field selectors to limit the discovery process to a subset of available resources.
+      See https://kubernetes.io/docs/concepts/overview/working-with-objects/field-selectors/
+      and https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ to learn more about the possible
+      filters that can be used. Endpoints role supports pod, service and endpoints selectors, other roles
+      only support selectors matching the role itself (e.g. node role can only contain node selectors).
+
+      Note: When making decision about using field/label selector make sure that this
+      is the best approach - it will prevent Prometheus from reusing single list/watch
+      for all scrape configs. This might result in a bigger load on the Kubernetes API,
+      because per each selector combination there will be additional LIST/WATCH. On the other hand,
+      if you just want to monitor small subset of pods in large cluster it's recommended to use selectors.
+      Decision, if selectors should be used or not depends on the particular situation.
+    '';
+  };
 
-      scheme = mkDefOpt types.str "http" "Consul scheme";
+  promTypes.kuma_sd_config = mkSdConfigModule {
+    server = mkOption {
+      type = types.str;
+      description = ''
+        Address of the Kuma Control Plane's MADS xDS server.
+      '';
+    };
 
-      username = mkOpt types.str "Consul username";
+    refresh_interval = mkDefOpt types.str "30s" ''
+      The time to wait between polling update requests.
+    '';
 
-      password = mkOpt types.str "Consul password";
+    fetch_timeout = mkDefOpt types.str "2m" ''
+      The time after which the monitoring assignments are refreshed.
+    '';
+  };
 
-      tls_config = mkOpt promTypes.tls_config ''
-        Configures the Consul request's TLS settings.
+  promTypes.lightsail_sd_config = types.submodule {
+    options = {
+      region = mkOpt types.str ''
+        The AWS region. If blank, the region from the instance metadata is used.
       '';
 
-      services = mkOpt (types.listOf types.str) ''
-        A list of services for which targets are retrieved.
+      endpoint = mkOpt types.str ''
+        Custom endpoint to be used.
       '';
 
-      tags = mkOpt (types.listOf types.str) ''
-        An optional list of tags used to filter nodes for a given
-        service. Services must contain all tags in the list.
+      access_key = mkOpt types.str ''
+        The AWS API keys. If blank, the environment variable <literal>AWS_ACCESS_KEY_ID</literal> is used.
       '';
 
-      node_meta = mkOpt (types.attrsOf types.str) ''
-        Node metadata used to filter nodes for a given service.
+      secret_key = mkOpt types.str ''
+        The AWS API keys. If blank, the environment variable <literal>AWS_SECRET_ACCESS_KEY</literal> is used.
       '';
 
-      tag_separator = mkDefOpt types.str "," ''
-        The string by which Consul tags are joined into the tag label.
+      profile = mkOpt types.str ''
+        Named AWS profile used to connect to the API.
       '';
 
-      allow_stale = mkOpt types.bool ''
-        Allow stale Consul results
-        (see <link xlink:href="https://www.consul.io/api/index.html#consistency-modes"/>).
+      role_arn = mkOpt types.str ''
+        AWS Role ARN, an alternative to using AWS API keys.
+      '';
 
-        Will reduce load on Consul.
+      refresh_interval = mkDefOpt types.str "60s" ''
+        Refresh interval to re-read the instance list.
       '';
 
-      refresh_interval = mkDefOpt types.str "30s" ''
-        The time after which the provided names are refreshed.
+      port = mkDefOpt types.int "80" ''
+        The port to scrape metrics from. If using the public IP address, this must
+        instead be specified in the relabeling rule.
+      '';
+    };
+  };
+
+  promTypes.linode_sd_config = mkSdConfigModule {
+    port = mkDefOpt types.int "80" ''
+      The port to scrape metrics from.
+    '';
+
+    tag_separator = mkDefOpt types.str "," ''
+      The string by which Linode Instance tags are joined into the tag label.
+    '';
+
+    refresh_interval = mkDefOpt types.str "60s" ''
+      The time after which the linode instances are refreshed.
+    '';
+  };
 
-        On large setup it might be a good idea to increase this value
-        because the catalog will change all the time.
+  promTypes.marathon_sd_config = mkSdConfigModule {
+    servers = mkOption {
+      type = types.listOf types.str;
+      description = ''
+        List of URLs to be used to contact Marathon servers. You need to provide at least one server URL.
       '';
     };
+
+    refresh_interval = mkDefOpt types.str "30s" ''
+      Polling interval.
+    '';
+
+    auth_token = mkOpt types.str ''
+      Optional authentication information for token-based authentication:
+      <link xlink:href="https://docs.mesosphere.com/1.11/security/ent/iam-api/#passing-an-authentication-token" />
+      It is mutually exclusive with <literal>auth_token_file</literal> and other authentication mechanisms.
+    '';
+
+    auth_token_file = mkOpt types.str ''
+      Optional authentication information for token-based authentication:
+      <link xlink:href="https://docs.mesosphere.com/1.11/security/ent/iam-api/#passing-an-authentication-token" />
+      It is mutually exclusive with <literal>auth_token</literal> and other authentication mechanisms.
+    '';
   };
 
-  promTypes.file_sd_config = types.submodule {
+  promTypes.nerve_sd_config = types.submodule {
     options = {
-      files = mkOption {
+      servers = mkOption {
         type = types.listOf types.str;
         description = ''
-          Patterns for files from which target groups are extracted. Refer
-          to the Prometheus documentation for permitted filename patterns
-          and formats.
+          The Zookeeper servers.
         '';
       };
 
-      refresh_interval = mkDefOpt types.str "5m" ''
-        Refresh interval to re-read the files.
+      paths = mkOption {
+        type = types.listOf types.str;
+        description = ''
+          Paths can point to a single service, or the root of a tree of services.
+        '';
+      };
+
+      timeout = mkDefOpt types.str "10s" ''
+        Timeout value.
       '';
     };
   };
 
-  promTypes.gce_sd_config = types.submodule {
+  promTypes.openstack_sd_config = types.submodule {
+    options =
+      let
+        userDescription = ''
+          username is required if using Identity V2 API. Consult with your provider's
+          control panel to discover your account's username. In Identity V3, either
+          userid or a combination of username and domain_id or domain_name are needed.
+        '';
+
+        domainDescription = ''
+          At most one of domain_id and domain_name must be provided if using username
+          with Identity V3. Otherwise, either are optional.
+        '';
+
+        projectDescription = ''
+          The project_id and project_name fields are optional for the Identity V2 API.
+          Some providers allow you to specify a project_name instead of the project_id.
+          Some require both. Your provider's authentication policies will determine
+          how these fields influence authentication.
+        '';
+
+        applicationDescription = ''
+          The application_credential_id or application_credential_name fields are
+          required if using an application credential to authenticate. Some providers
+          allow you to create an application credential to authenticate rather than a
+          password.
+        '';
+      in
+      {
+        role = mkOption {
+          type = types.str;
+          description = ''
+            The OpenStack role of entities that should be discovered.
+          '';
+        };
+
+        region = mkOption {
+          type = types.str;
+          description = ''
+            The OpenStack Region.
+          '';
+        };
+
+        identity_endpoint = mkOpt types.str ''
+          identity_endpoint specifies the HTTP endpoint that is required to work with
+          the Identity API of the appropriate version. While it's ultimately needed by
+          all of the identity services, it will often be populated by a provider-level
+          function.
+        '';
+
+        username = mkOpt types.str userDescription;
+        userid = mkOpt types.str userDescription;
+
+        password = mkOpt types.str ''
+          password for the Identity V2 and V3 APIs. Consult with your provider's
+          control panel to discover your account's preferred method of authentication.
+        '';
+
+        domain_name = mkOpt types.str domainDescription;
+        domain_id = mkOpt types.str domainDescription;
+
+        project_name = mkOpt types.str projectDescription;
+        project_id = mkOpt types.str projectDescription;
+
+        application_credential_name = mkOpt types.str applicationDescription;
+        application_credential_id = mkOpt types.str applicationDescription;
+
+        application_credential_secret = mkOpt types.str ''
+          The application_credential_secret field is required if using an application
+          credential to authenticate.
+        '';
+
+        all_tenants = mkDefOpt types.bool "false" ''
+          Whether the service discovery should list all instances for all projects.
+          It is only relevant for the 'instance' role and usually requires admin permissions.
+        '';
+
+        refresh_interval = mkDefOpt types.str "60s" ''
+          Refresh interval to re-read the instance list.
+        '';
+
+        port = mkDefOpt types.int "80" ''
+          The port to scrape metrics from. If using the public IP address, this must
+          instead be specified in the relabeling rule.
+        '';
+
+        availability = mkDefOpt (types.enum [ "public" "admin" "internal" ]) "public" ''
+          The availability of the endpoint to connect to. Must be one of public, admin or internal.
+        '';
+
+        tls_config = mkOpt promTypes.tls_config ''
+          TLS configuration.
+        '';
+      };
+  };
+
+  promTypes.puppetdb_sd_config = mkSdConfigModule {
+    url = mkOption {
+      type = types.str;
+      description = ''
+        The URL of the PuppetDB root query endpoint.
+      '';
+    };
+
+    query = mkOption {
+      type = types.str;
+      description = ''
+        Puppet Query Language (PQL) query. Only resources are supported.
+        https://puppet.com/docs/puppetdb/latest/api/query/v4/pql.html
+      '';
+    };
+
+    include_parameters = mkDefOpt types.bool "false" ''
+      Whether to include the parameters as meta labels.
+      Due to the differences between parameter types and Prometheus labels,
+      some parameters might not be rendered. The format of the parameters might
+      also change in future releases.
+
+      Note: Enabling this exposes parameters in the Prometheus UI and API. Make sure
+      that you don't have secrets exposed as parameters if you enable this.
+    '';
+
+    refresh_interval = mkDefOpt types.str "60s" ''
+      Refresh interval to re-read the resources list.
+    '';
+
+    port = mkDefOpt types.int "80" ''
+      The port to scrape metrics from.
+    '';
+  };
+
+  promTypes.scaleway_sd_config = types.submodule {
     options = {
-      # Use `mkOption` instead of `mkOpt` for project and zone because they are
-      # required configuration values for `gce_sd_config`.
-      project = mkOption {
+      access_key = mkOption {
         type = types.str;
         description = ''
-          The GCP Project.
+          Access key to use. https://console.scaleway.com/project/credentials
         '';
       };
 
-      zone = mkOption {
+      secret_key = mkOpt types.str ''
+        Secret key to use when listing targets. https://console.scaleway.com/project/credentials
+        It is mutually exclusive with `secret_key_file`.
+      '';
+
+      secret_key_file = mkOpt types.str ''
+        Sets the secret key with the credentials read from the configured file.
+        It is mutually exclusive with `secret_key`.
+      '';
+
+      project_id = mkOption {
         type = types.str;
         description = ''
-          The zone of the scrape targets. If you need multiple zones use multiple
-          gce_sd_configs.
+          Project ID of the targets.
         '';
       };
 
-      filter = mkOpt types.str ''
-        Filter can be used optionally to filter the instance list by other
-        criteria Syntax of this filter string is described here in the filter
-        query parameter section: <link
-        xlink:href="https://cloud.google.com/compute/docs/reference/latest/instances/list"
-        />.
+      role = mkOption {
+        type = types.enum [ "instance" "baremetal" ];
+        description = ''
+          Role of the targets to retrieve. Must be `instance` or `baremetal`.
+        '';
+      };
+
+      port = mkDefOpt types.int "80" ''
+        The port to scrape metrics from.
+      '';
+
+      api_url = mkDefOpt types.str "https://api.scaleway.com" ''
+        API URL to use when doing the server listing requests.
+      '';
+
+      zone = mkDefOpt types.str "fr-par-1" ''
+        Zone is the availability zone of your targets (e.g. fr-par-1).
+      '';
+
+      name_filter = mkOpt types.str ''
+        Specify a name filter (works as a LIKE) to apply on the server listing request.
+      '';
+
+      tags_filter = mkOpt (types.listOf types.str) ''
+        Specify a tag filter (a server needs to have all defined tags to be listed) to apply on the server listing request.
       '';
 
       refresh_interval = mkDefOpt types.str "60s" ''
-        Refresh interval to re-read the cloud instance list.
+        Refresh interval to re-read the managed targets list.
       '';
 
-      port = mkDefOpt types.port "80" ''
-        The port to scrape metrics from. If using the public IP address, this
-        must instead be specified in the relabeling rule.
+      proxy_url = mkOpt types.str ''
+        Optional proxy URL.
       '';
 
-      tag_separator = mkDefOpt types.str "," ''
-        The tag separator used to separate concatenated GCE instance network tags.
+      follow_redirects = mkDefOpt types.bool "true" ''
+        Configure whether HTTP requests follow HTTP 3xx redirects.
+      '';
+
+      tls_config = mkOpt promTypes.tls_config ''
+        TLS configuration.
+      '';
+    };
+  };
+
+  # These are exactly the same.
+  promTypes.serverset_sd_config = promTypes.nerve_sd_config;
+
+  promTypes.triton_sd_config = types.submodule {
+    options = {
+      account = mkOption {
+        type = types.str;
+        description = ''
+          The account to use for discovering new targets.
+        '';
+      };
 
-        See the GCP documentation on network tags for more information: <link
-        xlink:href="https://cloud.google.com/vpc/docs/add-remove-network-tags"
-        />
+      role = mkDefOpt (types.enum [ "container" "cn" ]) "container" ''
+        The type of targets to discover, can be set to:
+        - "container" to discover virtual machines (SmartOS zones, lx/KVM/bhyve branded zones) running on Triton
+        - "cn" to discover compute nodes (servers/global zones) making up the Triton infrastructure
       '';
+
+      dns_suffix = mkOption {
+        type = types.str;
+        description = ''
+          The DNS suffix which should be applied to target.
+        '';
+      };
+
+      endpoint = mkOption {
+        type = types.str;
+        description = ''
+          The Triton discovery endpoint (e.g. <literal>cmon.us-east-3b.triton.zone</literal>). This is
+          often the same value as dns_suffix.
+        '';
+      };
+
+      groups = mkOpt (types.listOf types.str) ''
+        A list of groups for which targets are retrieved, only supported when targeting the <literal>container</literal> role.
+        If omitted all containers owned by the requesting account are scraped.
+      '';
+
+      port = mkDefOpt types.int "9163" ''
+        The port to use for discovery and metric scraping.
+      '';
+
+      refresh_interval = mkDefOpt types.str "60s" ''
+        The interval which should be used for refreshing targets.
+      '';
+
+      version = mkDefOpt types.int "1" ''
+        The Triton discovery API version.
+      '';
+
+      tls_config = mkOpt promTypes.tls_config ''
+        TLS configuration.
+      '';
+    };
+  };
+
+  promTypes.uyuni_sd_config = mkSdConfigModule {
+    server = mkOption {
+      type = types.str;
+      description = ''
+        The URL to connect to the Uyuni server.
+      '';
+    };
+
+    username = mkOption {
+      type = types.str;
+      description = ''
+        Credentials are used to authenticate the requests to Uyuni API.
+      '';
+    };
+
+    password = mkOption {
+      type = types.str;
+      description = ''
+        Credentials are used to authenticate the requests to Uyuni API.
+      '';
+    };
+
+    entitlement = mkDefOpt types.str "monitoring_entitled" ''
+      The entitlement string to filter eligible systems.
+    '';
+
+    separator = mkDefOpt types.str "," ''
+      The string by which Uyuni group names are joined into the groups label
+    '';
+
+    refresh_interval = mkDefOpt types.str "60s" ''
+      Refresh interval to re-read the managed targets list.
+    '';
+  };
+
+  promTypes.static_config = types.submodule {
+    options = {
+      targets = mkOption {
+        type = types.listOf types.str;
+        description = ''
+          The targets specified by the target group.
+        '';
+      };
+      labels = mkOption {
+        type = types.attrsOf types.str;
+        default = { };
+        description = ''
+          Labels assigned to all metrics scraped from the targets.
+        '';
+      };
     };
   };
 
+  #
+  # Config types: relabling
+  #
+
   promTypes.relabel_config = types.submodule {
     options = {
       source_labels = mkOpt (types.listOf types.str) ''
@@ -670,41 +1408,154 @@ let
       '';
 
       action =
-        mkDefOpt (types.enum ["replace" "keep" "drop" "hashmod" "labelmap" "labeldrop" "labelkeep"]) "replace" ''
-        Action to perform based on regex matching.
-      '';
+        mkDefOpt (types.enum [ "replace" "keep" "drop" "hashmod" "labelmap" "labeldrop" "labelkeep" ]) "replace" ''
+          Action to perform based on regex matching.
+        '';
     };
   };
 
-  promTypes.tls_config = types.submodule {
+  #
+  # Config types : remote read / write
+  #
+
+  promTypes.remote_write = types.submodule {
     options = {
-      ca_file = mkOpt types.str ''
-        CA certificate to validate API server certificate with.
+      url = mkOption {
+        type = types.str;
+        description = ''
+          ServerName extension to indicate the name of the server.
+          http://tools.ietf.org/html/rfc4366#section-3.1
+        '';
+      };
+      remote_timeout = mkOpt types.str ''
+        Timeout for requests to the remote write endpoint.
       '';
-
-      cert_file = mkOpt types.str ''
-        Certificate file for client cert authentication to the server.
+      write_relabel_configs = mkOpt (types.listOf promTypes.relabel_config) ''
+        List of remote write relabel configurations.
       '';
-
-      key_file = mkOpt types.str ''
-        Key file for client cert authentication to the server.
+      name = mkOpt types.str ''
+        Name of the remote write config, which if specified must be unique among remote write configs.
+        The name will be used in metrics and logging in place of a generated value to help users distinguish between
+        remote write configs.
       '';
-
-      server_name = mkOpt types.str ''
-        ServerName extension to indicate the name of the server.
-        http://tools.ietf.org/html/rfc4366#section-3.1
+      basic_auth = mkOpt promTypes.basic_auth ''
+        Sets the `Authorization` header on every remote write request with the
+        configured username and password.
+        password and password_file are mutually exclusive.
+      '';
+      bearer_token = mkOpt types.str ''
+        Sets the `Authorization` header on every remote write request with
+        the configured bearer token. It is mutually exclusive with `bearer_token_file`.
+      '';
+      bearer_token_file = mkOpt types.str ''
+        Sets the `Authorization` header on every remote write request with the bearer token
+        read from the configured file. It is mutually exclusive with `bearer_token`.
+      '';
+      tls_config = mkOpt promTypes.tls_config ''
+        Configures the remote write request's TLS settings.
+      '';
+      proxy_url = mkOpt types.str "Optional Proxy URL.";
+      queue_config = mkOpt
+        (types.submodule {
+          options = {
+            capacity = mkOpt types.int ''
+              Number of samples to buffer per shard before we block reading of more
+              samples from the WAL. It is recommended to have enough capacity in each
+              shard to buffer several requests to keep throughput up while processing
+              occasional slow remote requests.
+            '';
+            max_shards = mkOpt types.int ''
+              Maximum number of shards, i.e. amount of concurrency.
+            '';
+            min_shards = mkOpt types.int ''
+              Minimum number of shards, i.e. amount of concurrency.
+            '';
+            max_samples_per_send = mkOpt types.int ''
+              Maximum number of samples per send.
+            '';
+            batch_send_deadline = mkOpt types.str ''
+              Maximum time a sample will wait in buffer.
+            '';
+            min_backoff = mkOpt types.str ''
+              Initial retry delay. Gets doubled for every retry.
+            '';
+            max_backoff = mkOpt types.str ''
+              Maximum retry delay.
+            '';
+          };
+        }) ''
+        Configures the queue used to write to remote storage.
+      '';
+      metadata_config = mkOpt
+        (types.submodule {
+          options = {
+            send = mkOpt types.bool ''
+              Whether metric metadata is sent to remote storage or not.
+            '';
+            send_interval = mkOpt types.str ''
+              How frequently metric metadata is sent to remote storage.
+            '';
+          };
+        }) ''
+        Configures the sending of series metadata to remote storage.
+        Metadata configuration is subject to change at any point
+        or be removed in future releases.
       '';
+    };
+  };
 
-      insecure_skip_verify = mkOpt types.bool ''
-        Disable validation of the server certificate.
+  promTypes.remote_read = types.submodule {
+    options = {
+      url = mkOption {
+        type = types.str;
+        description = ''
+          ServerName extension to indicate the name of the server.
+          http://tools.ietf.org/html/rfc4366#section-3.1
+        '';
+      };
+      name = mkOpt types.str ''
+        Name of the remote read config, which if specified must be unique among remote read configs.
+        The name will be used in metrics and logging in place of a generated value to help users distinguish between
+        remote read configs.
+      '';
+      required_matchers = mkOpt (types.attrsOf types.str) ''
+        An optional list of equality matchers which have to be
+        present in a selector to query the remote read endpoint.
+      '';
+      remote_timeout = mkOpt types.str ''
+        Timeout for requests to the remote read endpoint.
+      '';
+      read_recent = mkOpt types.bool ''
+        Whether reads should be made for queries for time ranges that
+        the local storage should have complete data for.
+      '';
+      basic_auth = mkOpt promTypes.basic_auth ''
+        Sets the `Authorization` header on every remote read request with the
+        configured username and password.
+        password and password_file are mutually exclusive.
+      '';
+      bearer_token = mkOpt types.str ''
+        Sets the `Authorization` header on every remote read request with
+        the configured bearer token. It is mutually exclusive with `bearer_token_file`.
+      '';
+      bearer_token_file = mkOpt types.str ''
+        Sets the `Authorization` header on every remote read request with the bearer token
+        read from the configured file. It is mutually exclusive with `bearer_token`.
+      '';
+      tls_config = mkOpt promTypes.tls_config ''
+        Configures the remote read request's TLS settings.
       '';
+      proxy_url = mkOpt types.str "Optional Proxy URL.";
     };
   };
 
-in {
+in
+{
 
   imports = [
     (mkRenamedOptionModule [ "services" "prometheus2" ] [ "services" "prometheus" ])
+    (mkRemovedOptionModule [ "services" "prometheus" "environmentFile" ]
+      "It has been removed since it was causing issues (https://github.com/NixOS/nixpkgs/issues/126083) and Prometheus now has native support for secret files, i.e. `basic_auth.password_file` and `authorization.credentials_file`.")
   ];
 
   options.services.prometheus = {
@@ -753,7 +1604,7 @@ in {
 
     extraFlags = mkOption {
       type = types.listOf types.str;
-      default = [];
+      default = [ ];
       description = ''
         Extra commandline options when launching Prometheus.
       '';
@@ -769,51 +1620,6 @@ in {
         (<literal>switch-to-configuration</literal>) that changes the prometheus
         configuration only finishes successully when prometheus has finished
         loading the new configuration.
-
-        Note that prometheus will also get reloaded when the location of the
-        <option>environmentFile</option> changes but not when its contents
-        changes. So when you change it contents make sure to reload prometheus
-        manually or include the hash of <option>environmentFile</option> in its
-        name.
-      '';
-    };
-
-    environmentFile = mkOption {
-      type = types.nullOr types.path;
-      default = null;
-      example = "/root/prometheus.env";
-      description = ''
-        Environment file as defined in <citerefentry>
-        <refentrytitle>systemd.exec</refentrytitle><manvolnum>5</manvolnum>
-        </citerefentry>.
-
-        Secrets may be passed to the service without adding them to the
-        world-readable Nix store, by specifying placeholder variables as
-        the option value in Nix and setting these variables accordingly in the
-        environment file.
-
-        Environment variables from this file will be interpolated into the
-        config file using envsubst with this syntax:
-        <literal>$ENVIRONMENT ''${VARIABLE}</literal>
-
-        <programlisting>
-          # Example scrape config entry handling an OAuth bearer token
-          {
-            job_name = "home_assistant";
-            metrics_path = "/api/prometheus";
-            scheme = "https";
-            bearer_token = "\''${HOME_ASSISTANT_BEARER_TOKEN}";
-            [...]
-          }
-        </programlisting>
-
-        <programlisting>
-          # Content of the environment file
-          HOME_ASSISTANT_BEARER_TOKEN=someoauthbearertoken
-        </programlisting>
-
-        Note that this file needs to be available on the host on which
-        <literal>Prometheus</literal> is running.
       '';
     };
 
@@ -829,7 +1635,7 @@ in {
 
     globalConfig = mkOption {
       type = promTypes.globalConfig;
-      default = {};
+      default = { };
       description = ''
         Parameters that are valid in all  configuration contexts. They
         also serve as defaults for other configuration sections
@@ -838,7 +1644,7 @@ in {
 
     remoteRead = mkOption {
       type = types.listOf promTypes.remote_read;
-      default = [];
+      default = [ ];
       description = ''
         Parameters of the endpoints to query from.
         See <link xlink:href="https://prometheus.io/docs/prometheus/latest/configuration/configuration/#remote_read">the official documentation</link> for more information.
@@ -847,7 +1653,7 @@ in {
 
     remoteWrite = mkOption {
       type = types.listOf promTypes.remote_write;
-      default = [];
+      default = [ ];
       description = ''
         Parameters of the endpoints to send samples to.
         See <link xlink:href="https://prometheus.io/docs/prometheus/latest/configuration/configuration/#remote_write">the official documentation</link> for more information.
@@ -856,7 +1662,7 @@ in {
 
     rules = mkOption {
       type = types.listOf types.str;
-      default = [];
+      default = [ ];
       description = ''
         Alerting and/or Recording rules to evaluate at runtime.
       '';
@@ -864,7 +1670,7 @@ in {
 
     ruleFiles = mkOption {
       type = types.listOf types.path;
-      default = [];
+      default = [ ];
       description = ''
         Any additional rules files to include in this configuration.
       '';
@@ -872,7 +1678,7 @@ in {
 
     scrapeConfigs = mkOption {
       type = types.listOf promTypes.scrape_config;
-      default = [];
+      default = [ ];
       description = ''
         A list of scrape configurations.
       '';
@@ -891,7 +1697,7 @@ in {
           } ];
         } ]
       '';
-      default = [];
+      default = [ ];
       description = ''
         A list of alertmanagers to send alerts to.
         See <link xlink:href="https://prometheus.io/docs/prometheus/latest/configuration/configuration/#alertmanager_config">the official documentation</link> for more information.
@@ -950,11 +1756,13 @@ in {
 
   config = mkIf cfg.enable {
     assertions = [
-      ( let
+      (
+        let
           # Match something with dots (an IPv4 address) or something ending in
           # a square bracket (an IPv6 addresses) followed by a port number.
           legacy = builtins.match "(.*\\..*|.*]):([[:digit:]]+)" cfg.listenAddress;
-        in {
+        in
+        {
           assertion = legacy == null;
           message = ''
             Do not specify the port for Prometheus to listen on in the
@@ -972,21 +1780,19 @@ in {
       uid = config.ids.uids.prometheus;
       group = "prometheus";
     };
+    environment.etc."prometheus/prometheus.yaml" = mkIf cfg.enableReload {
+      source = prometheusYml;
+    };
     systemd.services.prometheus = {
       wantedBy = [ "multi-user.target" ];
-      after    = [ "network.target" ];
-      preStart = mkIf (!cfg.enableReload) ''
-         ${lib.getBin pkgs.envsubst}/bin/envsubst -o "/run/prometheus/prometheus-substituted.yaml" \
-                                                  -i "${prometheusYml}"
-      '';
+      after = [ "network.target" ];
       serviceConfig = {
         ExecStart = "${cfg.package}/bin/prometheus" +
           optionalString (length cmdlineArgs != 0) (" \\\n  " +
             concatStringsSep " \\\n  " cmdlineArgs);
         ExecReload = mkIf cfg.enableReload "+${reload}/bin/reload-prometheus";
         User = "prometheus";
-        Restart  = "always";
-        EnvironmentFile = mkIf (cfg.environmentFile != null && !cfg.enableReload) [ cfg.environmentFile ];
+        Restart = "always";
         RuntimeDirectory = "prometheus";
         RuntimeDirectoryMode = "0700";
         WorkingDirectory = workingDir;
@@ -994,18 +1800,6 @@ in {
         StateDirectoryMode = "0700";
       };
     };
-    systemd.services.prometheus-config-write = mkIf cfg.enableReload {
-      wantedBy = [ "prometheus.service" ];
-      before = [ "prometheus.service" ];
-      serviceConfig = {
-        Type = "oneshot";
-        User = "prometheus";
-        StateDirectory = cfg.stateDir;
-        StateDirectoryMode = "0700";
-        EnvironmentFile = mkIf (cfg.environmentFile != null) [ cfg.environmentFile ];
-        ExecStart = "${writeConfig}/bin/write-prometheus-config";
-      };
-    };
     # prometheus-config-reload will activate after prometheus. However, what we
     # don't want is that on startup it immediately reloads prometheus because
     # prometheus itself might have just started.
@@ -1015,26 +1809,19 @@ in {
     # harmless message and then stay active (RemainAfterExit).
     #
     # Then, when the config file has changed, switch-to-configuration notices
-    # that this service has changed and needs to be reloaded
-    # (reloadIfChanged). The reload command then actually writes the new config
-    # and reloads prometheus.
+    # that this service has changed (restartTriggers) and needs to be reloaded
+    # (reloadIfChanged). The reload command then reloads prometheus.
     systemd.services.prometheus-config-reload = mkIf cfg.enableReload {
       wantedBy = [ "prometheus.service" ];
       after = [ "prometheus.service" ];
       reloadIfChanged = true;
+      restartTriggers = [ prometheusYml ];
       serviceConfig = {
         Type = "oneshot";
-        User = "prometheus";
-        StateDirectory = cfg.stateDir;
-        StateDirectoryMode = "0700";
-        EnvironmentFile = mkIf (cfg.environmentFile != null) [ cfg.environmentFile ];
         RemainAfterExit = true;
         TimeoutSec = 60;
         ExecStart = "${pkgs.logger}/bin/logger 'prometheus-config-reload will only reload prometheus when reloaded itself.'";
-        ExecReload = [
-          "${writeConfig}/bin/write-prometheus-config"
-          "+${triggerReload}/bin/trigger-reload-prometheus"
-        ];
+        ExecReload = [ "${triggerReload}/bin/trigger-reload-prometheus" ];
       };
     };
   };
diff --git a/nixos/modules/services/networking/antennas.nix b/nixos/modules/services/networking/antennas.nix
new file mode 100644
index 00000000000..ef98af22f20
--- /dev/null
+++ b/nixos/modules/services/networking/antennas.nix
@@ -0,0 +1,80 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let cfg = config.services.antennas;
+in
+
+{
+  options = {
+    services.antennas = {
+      enable = mkEnableOption "Antennas";
+
+      tvheadendUrl = mkOption {
+        type        = types.str;
+        default     = "http://localhost:9981";
+        description = "URL of Tvheadend.";
+      };
+
+      antennasUrl = mkOption {
+        type        = types.str;
+        default     = "http://127.0.0.1:5004";
+        description = "URL of Antennas.";
+      };
+
+      tunerCount = mkOption {
+        type        = types.int;
+        default     = 6;
+        description = "Numbers of tuners in tvheadend.";
+      };
+
+      deviceUUID = mkOption {
+        type        = types.str;
+        default     = "2f70c0d7-90a3-4429-8275-cbeeee9cd605";
+        description = "Device tuner UUID. Change this if you are running multiple instances.";
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    systemd.services.antennas = {
+      description = "Antennas HDHomeRun emulator for Tvheadend. ";
+      wantedBy    = [ "multi-user.target" ];
+
+      # Config
+      environment = {
+        TVHEADEND_URL = cfg.tvheadendUrl;
+        ANTENNAS_URL = cfg.antennasUrl;
+        TUNER_COUNT = toString cfg.tunerCount;
+        DEVICE_UUID = cfg.deviceUUID;
+      };
+
+      serviceConfig = {
+         ExecStart = "${pkgs.antennas}/bin/antennas";
+
+        # Antennas expects all resources like html and config to be relative to it's working directory
+        WorkingDirectory = "${pkgs.antennas}/libexec/antennas/deps/antennas/";
+
+        # Hardening
+        CapabilityBoundingSet = [ "" ];
+        DynamicUser = true;
+        LockPersonality = true;
+        ProcSubset = "pid";
+        PrivateDevices = true;
+        PrivateUsers = true;
+        PrivateTmp = true;
+        ProtectClock = true;
+        ProtectControlGroups = true;
+        ProtectHome = true;
+        ProtectHostname = true;
+        ProtectKernelLogs = true;
+        ProtectKernelModules = true;
+        ProtectKernelTunables = true;
+        ProtectProc = "invisible";
+        ProtectSystem = "strict";
+        RestrictNamespaces = true;
+        RestrictRealtime = true;
+      };
+    };
+  };
+}
diff --git a/nixos/modules/services/networking/tinc.nix b/nixos/modules/services/networking/tinc.nix
index 1d77503d68b..9db433fa073 100644
--- a/nixos/modules/services/networking/tinc.nix
+++ b/nixos/modules/services/networking/tinc.nix
@@ -289,13 +289,13 @@ in
             };
 
             chroot = mkOption {
-              default = true;
+              default = false;
               type = types.bool;
               description = ''
                 Change process root directory to the directory where the config file is located (/etc/tinc/netname/), for added security.
                 The chroot is performed after all the initialization is done, after writing pid files and opening network sockets.
 
-                Note that tinc can't run scripts anymore (such as tinc-down or host-up), unless it is setup to be runnable inside chroot environment.
+                Note that this currently breaks dns resolution and tinc can't run scripts anymore (such as tinc-down or host-up), unless it is setup to be runnable inside chroot environment.
               '';
             };
 
diff --git a/nixos/modules/services/security/tor.nix b/nixos/modules/services/security/tor.nix
index c94b248d5f1..c3e3248ee8a 100644
--- a/nixos/modules/services/security/tor.nix
+++ b/nixos/modules/services/security/tor.nix
@@ -1012,6 +1012,7 @@ in
         # Tor cannot currently bind privileged port when PrivateUsers=true,
         # see https://gitlab.torproject.org/legacy/trac/-/issues/20930
         PrivateUsers = !bindsPrivilegedPort;
+        ProcSubset = "pid";
         ProtectClock = true;
         ProtectControlGroups = true;
         ProtectHome = true;
@@ -1019,6 +1020,7 @@ in
         ProtectKernelLogs = true;
         ProtectKernelModules = true;
         ProtectKernelTunables = true;
+        ProtectProc = "invisible";
         ProtectSystem = "strict";
         RemoveIPC = true;
         RestrictAddressFamilies = [ "AF_UNIX" "AF_INET" "AF_INET6" "AF_NETLINK" ];
diff --git a/nixos/modules/services/web-apps/hledger-web.nix b/nixos/modules/services/web-apps/hledger-web.nix
index 9c66589dffd..4f6a34e6d2f 100644
--- a/nixos/modules/services/web-apps/hledger-web.nix
+++ b/nixos/modules/services/web-apps/hledger-web.nix
@@ -118,7 +118,7 @@ in {
         ++ extraOptions);
     in {
       description = "hledger-web - web-app for the hledger accounting tool.";
-      documentation = [ https://hledger.org/hledger-web.html ];
+      documentation = [ "https://hledger.org/hledger-web.html" ];
       wantedBy = [ "multi-user.target" ];
       after = [ "networking.target" ];
       serviceConfig = mkMerge [
diff --git a/nixos/modules/services/web-apps/mastodon.nix b/nixos/modules/services/web-apps/mastodon.nix
index 7910f398048..1e3c7e53c17 100644
--- a/nixos/modules/services/web-apps/mastodon.nix
+++ b/nixos/modules/services/web-apps/mastodon.nix
@@ -545,7 +545,7 @@ in {
         RuntimeDirectory = "mastodon-web";
         RuntimeDirectoryMode = "0750";
         # System Call Filtering
-        SystemCallFilter = [ ("~" + lib.concatStringsSep " " (systemCallsList ++ [ "@resources" ])) "@chown" "pipe" "pipe2" ];
+        SystemCallFilter = [ ("~" + lib.concatStringsSep " " systemCallsList) "@chown" "pipe" "pipe2" ];
       } // cfgService;
       path = with pkgs; [ file imagemagick ffmpeg ];
     };
diff --git a/nixos/modules/services/web-apps/matomo.nix b/nixos/modules/services/web-apps/matomo.nix
index b0d281cfb6e..eba55e7e9be 100644
--- a/nixos/modules/services/web-apps/matomo.nix
+++ b/nixos/modules/services/web-apps/matomo.nix
@@ -24,6 +24,7 @@ in {
     (mkRemovedOptionModule [ "services" "piwik" "phpfpmProcessManagerConfig" ] "Use services.phpfpm.pools.<name>.settings")
     (mkRemovedOptionModule [ "services" "matomo" "phpfpmProcessManagerConfig" ] "Use services.phpfpm.pools.<name>.settings")
     (mkRenamedOptionModule [ "services" "piwik" "nginx" ] [ "services" "matomo" "nginx" ])
+    (mkRenamedOptionModule [ "services" "matomo" "periodicArchiveProcessingUrl" ] [ "services" "matomo" "hostname" ])
   ];
 
   options = {
@@ -77,7 +78,7 @@ in {
         '';
       };
 
-      periodicArchiveProcessingUrl = mkOption {
+      hostname = mkOption {
         type = types.str;
         default = "${user}.${fqdn}";
         example = "matomo.yourdomain.org";
@@ -170,6 +171,19 @@ in {
         fi
         chown -R ${user}:${user} ${dataDir}
         chmod -R ug+rwX,o-rwx ${dataDir}
+
+        if [ -e ${dataDir}/current-package ]; then
+          CURRENT_PACKAGE=$(readlink ${dataDir}/current-package)
+          NEW_PACKAGE=${cfg.package}
+          if [ "$CURRENT_PACKAGE" != "$NEW_PACKAGE" ]; then
+            # keeping tmp arround between upgrades seems to bork stuff, so delete it
+            rm -rf ${dataDir}/tmp
+          fi
+        elif [ -e ${dataDir}/tmp ]; then
+          # upgrade from 4.4.1
+          rm -rf ${dataDir}/tmp
+        fi
+        ln -sfT ${cfg.package} ${dataDir}/current-package
         '';
       script = ''
             # Use User-Private Group scheme to protect Matomo data, but allow administration / backup via 'matomo' group
@@ -202,7 +216,7 @@ in {
         UMask = "0007";
         CPUSchedulingPolicy = "idle";
         IOSchedulingClass = "idle";
-        ExecStart = "${cfg.package}/bin/matomo-console core:archive --url=https://${cfg.periodicArchiveProcessingUrl}";
+        ExecStart = "${cfg.package}/bin/matomo-console core:archive --url=https://${cfg.hostname}";
       };
     };
 
@@ -258,7 +272,7 @@ in {
       # References:
       # https://fralef.me/piwik-hardening-with-nginx-and-php-fpm.html
       # https://github.com/perusio/piwik-nginx
-      "${user}.${fqdn}" = mkMerge [ cfg.nginx {
+      "${cfg.hostname}" = mkMerge [ cfg.nginx {
         # don't allow to override the root easily, as it will almost certainly break Matomo.
         # disadvantage: not shown as default in docs.
         root = mkForce "${cfg.package}/share";
diff --git a/nixos/modules/services/web-apps/openwebrx.nix b/nixos/modules/services/web-apps/openwebrx.nix
new file mode 100644
index 00000000000..51005cd1e49
--- /dev/null
+++ b/nixos/modules/services/web-apps/openwebrx.nix
@@ -0,0 +1,33 @@
+{ config, lib, pkgs, ... }:
+let
+  cfg = config.services.openwebrx;
+in
+{
+  options.services.openwebrx = with lib; {
+    enable = mkEnableOption "OpenWebRX Web interface for Software-Defined Radios on http://localhost:8073";
+
+    package = mkOption {
+      type = types.package;
+      default = pkgs.openwebrx;
+      description = "OpenWebRX package to use for the service";
+    };
+  };
+
+  config = lib.mkIf cfg.enable {
+    systemd.services.openwebrx = {
+      wantedBy = [ "multi-user.target" ];
+      path = with pkgs; [
+        csdr
+        alsaUtils
+        netcat
+      ];
+      serviceConfig = {
+        ExecStart = "${cfg.package}/bin/openwebrx";
+        Restart = "always";
+        DynamicUser = true;
+        # openwebrx uses /var/lib/openwebrx by default
+        StateDirectory = [ "openwebrx" ];
+      };
+    };
+  };
+}
diff --git a/nixos/modules/services/web-apps/plausible.nix b/nixos/modules/services/web-apps/plausible.nix
index b56848b79d2..b6c48186a1d 100644
--- a/nixos/modules/services/web-apps/plausible.nix
+++ b/nixos/modules/services/web-apps/plausible.nix
@@ -5,23 +5,18 @@ with lib;
 let
   cfg = config.services.plausible;
 
-  # FIXME consider using LoadCredential as soon as it actually works.
-  envSecrets = ''
-    ADMIN_USER_PWD="$(<${cfg.adminUser.passwordFile})"
-    export ADMIN_USER_PWD # separate export to make `set -e` work
-
-    SECRET_KEY_BASE="$(<${cfg.server.secretKeybaseFile})"
-    export SECRET_KEY_BASE # separate export to make `set -e` work
-
-    ${optionalString (cfg.mail.smtp.passwordFile != null) ''
-      SMTP_USER_PWD="$(<${cfg.mail.smtp.passwordFile})"
-      export SMTP_USER_PWD # separate export to make `set -e` work
-    ''}
-  '';
 in {
   options.services.plausible = {
     enable = mkEnableOption "plausible";
 
+    releaseCookiePath = mkOption {
+      default = null;
+      type = with types; nullOr (either str path);
+      description = ''
+        The path to the file with release cookie. (used for remote connection to the running node).
+      '';
+    };
+
     adminUser = {
       name = mkOption {
         default = "admin";
@@ -184,13 +179,17 @@ in {
       enable = true;
     };
 
+    services.epmd.enable = true;
+
+    environment.systemPackages = [ pkgs.plausible ];
+
     systemd.services = mkMerge [
       {
         plausible = {
           inherit (pkgs.plausible.meta) description;
           documentation = [ "https://plausible.io/docs/self-hosting" ];
           wantedBy = [ "multi-user.target" ];
-          after = optional cfg.database.postgres.setup "plausible-postgres.service";
+          after = optionals cfg.database.postgres.setup [ "postgresql.service" "plausible-postgres.service" ];
           requires = optional cfg.database.clickhouse.setup "clickhouse.service"
             ++ optionals cfg.database.postgres.setup [
               "postgresql.service"
@@ -200,7 +199,7 @@ in {
           environment = {
             # NixOS specific option to avoid that it's trying to write into its store-path.
             # See also https://github.com/lau/tzdata#data-directory-and-releases
-            TZDATA_DIR = "/var/lib/plausible/elixir_tzdata";
+            STORAGE_DIR = "/var/lib/plausible/elixir_tzdata";
 
             # Configuration options from
             # https://plausible.io/docs/self-hosting-configuration
@@ -208,6 +207,8 @@ in {
             DISABLE_REGISTRATION = boolToString cfg.server.disableRegistration;
 
             RELEASE_TMP = "/var/lib/plausible/tmp";
+            # Home is needed to connect to the node with iex
+            HOME = "/var/lib/plausible";
 
             ADMIN_USER_NAME = cfg.adminUser.name;
             ADMIN_USER_EMAIL = cfg.adminUser.email;
@@ -231,28 +232,33 @@ in {
 
           path = [ pkgs.plausible ]
             ++ optional cfg.database.postgres.setup config.services.postgresql.package;
+          script = ''
+            export CONFIG_DIR=$CREDENTIALS_DIRECTORY
+
+            # setup
+            ${pkgs.plausible}/createdb.sh
+            ${pkgs.plausible}/migrate.sh
+            ${optionalString cfg.adminUser.activate ''
+              if ! ${pkgs.plausible}/init-admin.sh | grep 'already exists'; then
+                psql -d plausible <<< "UPDATE users SET email_verified=true;"
+              fi
+            ''}
+            ${optionalString (cfg.releaseCookiePath != null) ''
+              export RELEASE_COOKIE="$(< $CREDENTIALS_DIRECTORY/RELEASE_COOKIE )"
+            ''}
+            plausible start
+          '';
 
           serviceConfig = {
             DynamicUser = true;
             PrivateTmp = true;
             WorkingDirectory = "/var/lib/plausible";
             StateDirectory = "plausible";
-            ExecStartPre = "@${pkgs.writeShellScript "plausible-setup" ''
-              set -eu -o pipefail
-              ${envSecrets}
-              ${pkgs.plausible}/createdb.sh
-              ${pkgs.plausible}/migrate.sh
-              ${optionalString cfg.adminUser.activate ''
-                if ! ${pkgs.plausible}/init-admin.sh | grep 'already exists'; then
-                  psql -d plausible <<< "UPDATE users SET email_verified=true;"
-                fi
-              ''}
-            ''} plausible-setup";
-            ExecStart = "@${pkgs.writeShellScript "plausible" ''
-              set -eu -o pipefail
-              ${envSecrets}
-              plausible start
-            ''} plausible";
+            LoadCredential = [
+              "ADMIN_USER_PWD:${cfg.adminUser.passwordFile}"
+              "SECRET_KEY_BASE:${cfg.server.secretKeybaseFile}"
+            ] ++ lib.optionals (cfg.mail.smtp.passwordFile != null) [ "SMTP_USER_PWD:${cfg.mail.smtp.passwordFile}"]
+            ++ lib.optionals (cfg.releaseCookiePath != null) [ "RELEASE_COOKIE:${cfg.releaseCookiePath}"];
           };
         };
       }
@@ -260,20 +266,22 @@ in {
         # `plausible' requires the `citext'-extension.
         plausible-postgres = {
           after = [ "postgresql.service" ];
-          bindsTo = [ "postgresql.service" ];
-          requiredBy = [ "plausible.service" ];
           partOf = [ "plausible.service" ];
-          serviceConfig.Type = "oneshot";
-          unitConfig.ConditionPathExists = "!/var/lib/plausible/.db-setup";
-          script = ''
-            mkdir -p /var/lib/plausible/
+          serviceConfig = {
+            Type = "oneshot";
+            User = config.services.postgresql.superUser;
+            RemainAfterExit = true;
+          };
+          script = with cfg.database.postgres; ''
             PSQL() {
-              /run/wrappers/bin/sudo -Hu postgres ${config.services.postgresql.package}/bin/psql --port=5432 "$@"
+              ${config.services.postgresql.package}/bin/psql --port=5432 "$@"
             }
-            PSQL -tAc "CREATE ROLE plausible WITH LOGIN;"
-            PSQL -tAc "CREATE DATABASE plausible WITH OWNER plausible;"
-            PSQL -d plausible -tAc "CREATE EXTENSION IF NOT EXISTS citext;"
-            touch /var/lib/plausible/.db-setup
+            # check if the database already exists
+            if ! PSQL -lqt | ${pkgs.coreutils}/bin/cut -d \| -f 1 | ${pkgs.gnugrep}/bin/grep -qw ${dbname} ; then
+              PSQL -tAc "CREATE ROLE plausible WITH LOGIN;"
+              PSQL -tAc "CREATE DATABASE ${dbname} WITH OWNER plausible;"
+              PSQL -d ${dbname} -tAc "CREATE EXTENSION IF NOT EXISTS citext;"
+            fi
           '';
         };
       })
diff --git a/nixos/modules/services/web-apps/zabbix.nix b/nixos/modules/services/web-apps/zabbix.nix
index 21567896a89..ff50b95254f 100644
--- a/nixos/modules/services/web-apps/zabbix.nix
+++ b/nixos/modules/services/web-apps/zabbix.nix
@@ -21,7 +21,8 @@ let
     $DB['PORT'] = '${toString cfg.database.port}';
     $DB['DATABASE'] = '${cfg.database.name}';
     $DB['USER'] = '${cfg.database.user}';
-    $DB['PASSWORD'] = ${if cfg.database.passwordFile != null then "file_get_contents('${cfg.database.passwordFile}')" else "''"};
+    # NOTE: file_get_contents adds newline at the end of returned string
+    $DB['PASSWORD'] = ${if cfg.database.passwordFile != null then "trim(file_get_contents('${cfg.database.passwordFile}'), \"\\r\\n\")" else "''"};
     // Schema name. Used for IBM DB2 and PostgreSQL.
     $DB['SCHEMA'] = ''';
     $ZBX_SERVER = '${cfg.server.address}';
diff --git a/nixos/modules/services/x11/clight.nix b/nixos/modules/services/x11/clight.nix
index 873f425fb8b..d994a658cba 100644
--- a/nixos/modules/services/x11/clight.nix
+++ b/nixos/modules/services/x11/clight.nix
@@ -71,6 +71,14 @@ in {
   };
 
   config = mkIf cfg.enable {
+    assertions = let
+      inRange = v: l: r: v >= l && v <= r;
+    in [
+      { assertion = config.location.provider == "manual" ->
+          inRange config.location.latitude (-90) 90 && inRange config.location.longitude (-180) 180;
+        message = "You must specify a valid latitude and longitude if manually providing location"; }
+    ];
+
     boot.kernelModules = [ "i2c_dev" ];
     environment.systemPackages = with pkgs; [ clight clightd ];
     services.dbus.packages = with pkgs; [ clight clightd ];
diff --git a/nixos/modules/services/x11/display-managers/lightdm.nix b/nixos/modules/services/x11/display-managers/lightdm.nix
index 1c9a5f978c5..9a7532b4764 100644
--- a/nixos/modules/services/x11/display-managers/lightdm.nix
+++ b/nixos/modules/services/x11/display-managers/lightdm.nix
@@ -146,7 +146,7 @@ in
       };
 
       background = mkOption {
-        type = types.path;
+        type = types.either types.path (types.strMatching "^#[0-9]\{6\}$");
         # Manual cannot depend on packages, we are actually setting the default in config below.
         defaultText = literalExpression "pkgs.nixos-artwork.wallpapers.simple-dark-gray-bottom.gnomeFilePath";
         description = ''