summary refs log tree commit diff
path: root/nixos
diff options
context:
space:
mode:
Diffstat (limited to 'nixos')
-rw-r--r--nixos/README.md34
-rw-r--r--nixos/doc/manual/release-notes/rl-2305.section.md2
-rw-r--r--nixos/doc/manual/release-notes/rl-2311.section.md37
-rw-r--r--nixos/modules/config/stevenblack.nix2
-rw-r--r--nixos/modules/installer/tools/nixos-generate-config.pl14
-rw-r--r--nixos/modules/module-list.nix2
-rw-r--r--nixos/modules/programs/ausweisapp.nix6
-rw-r--r--nixos/modules/security/acme/default.nix4
-rw-r--r--nixos/modules/security/sudo.nix10
-rw-r--r--nixos/modules/services/audio/navidrome.nix7
-rw-r--r--nixos/modules/services/audio/slimserver.nix2
-rw-r--r--nixos/modules/services/databases/postgresql.md119
-rw-r--r--nixos/modules/services/databases/postgresql.nix58
-rw-r--r--nixos/modules/services/desktops/pipewire/pipewire.nix17
-rw-r--r--nixos/modules/services/development/zammad.nix4
-rw-r--r--nixos/modules/services/finance/odoo.nix2
-rw-r--r--nixos/modules/services/hardware/fwupd.nix9
-rw-r--r--nixos/modules/services/mail/listmonk.nix2
-rw-r--r--nixos/modules/services/mail/mailman.nix5
-rw-r--r--nixos/modules/services/mail/roundcube.nix14
-rw-r--r--nixos/modules/services/mail/sympa.nix10
-rw-r--r--nixos/modules/services/matrix/matrix-sliding-sync.nix4
-rw-r--r--nixos/modules/services/matrix/mautrix-facebook.nix4
-rw-r--r--nixos/modules/services/misc/atuin.nix4
-rw-r--r--nixos/modules/services/misc/autofs.nix2
-rw-r--r--nixos/modules/services/misc/forgejo.md79
-rw-r--r--nixos/modules/services/misc/forgejo.nix11
-rw-r--r--nixos/modules/services/misc/gitea.nix10
-rw-r--r--nixos/modules/services/misc/redmine.nix4
-rw-r--r--nixos/modules/services/misc/soft-serve.nix4
-rw-r--r--nixos/modules/services/misc/sourcehut/service.nix10
-rw-r--r--nixos/modules/services/monitoring/grafana.nix1
-rw-r--r--nixos/modules/services/monitoring/parsedmarc.nix1
-rw-r--r--nixos/modules/services/monitoring/prometheus/exporters.nix46
-rw-r--r--nixos/modules/services/monitoring/prometheus/exporters/exportarr.nix55
-rw-r--r--nixos/modules/services/monitoring/zabbix-proxy.nix4
-rw-r--r--nixos/modules/services/monitoring/zabbix-server.nix4
-rw-r--r--nixos/modules/services/networking/harmonia.nix2
-rw-r--r--nixos/modules/services/networking/ircd-hybrid/builder.sh2
-rw-r--r--nixos/modules/services/networking/nix-serve.nix2
-rw-r--r--nixos/modules/services/networking/ntp/chrony.nix39
-rw-r--r--nixos/modules/services/networking/syncthing.nix4
-rw-r--r--nixos/modules/services/networking/unbound.nix28
-rw-r--r--nixos/modules/services/networking/unifi.nix82
-rw-r--r--nixos/modules/services/printing/cupsd.nix7
-rw-r--r--nixos/modules/services/search/kibana.nix213
-rw-r--r--nixos/modules/services/search/opensearch.nix19
-rw-r--r--nixos/modules/services/security/hockeypuck.nix2
-rw-r--r--nixos/modules/services/torrent/flexget.nix1
-rw-r--r--nixos/modules/services/web-apps/code-server.nix259
-rw-r--r--nixos/modules/services/web-apps/coder.nix10
-rw-r--r--nixos/modules/services/web-apps/gotosocial.nix4
-rw-r--r--nixos/modules/services/web-apps/invidious.nix15
-rw-r--r--nixos/modules/services/web-apps/jitsi-meet.nix178
-rw-r--r--nixos/modules/services/web-apps/lemmy.nix2
-rw-r--r--nixos/modules/services/web-apps/mastodon.nix138
-rw-r--r--nixos/modules/services/web-apps/mediawiki.nix4
-rw-r--r--nixos/modules/services/web-apps/miniflux.nix17
-rw-r--r--nixos/modules/services/web-apps/mobilizon.nix13
-rw-r--r--nixos/modules/services/web-apps/moodle.nix4
-rw-r--r--nixos/modules/services/web-apps/netbox.nix4
-rw-r--r--nixos/modules/services/web-apps/nextcloud.nix2
-rw-r--r--nixos/modules/services/web-apps/onlyoffice.nix2
-rw-r--r--nixos/modules/services/web-apps/outline.nix2
-rw-r--r--nixos/modules/services/web-apps/peering-manager.nix4
-rw-r--r--nixos/modules/services/web-apps/pixelfed.nix1
-rw-r--r--nixos/modules/services/web-apps/plantuml-server.nix118
-rw-r--r--nixos/modules/services/web-apps/plausible.nix52
-rw-r--r--nixos/modules/services/web-apps/tt-rss.nix13
-rw-r--r--nixos/modules/services/web-servers/hydron.nix2
-rw-r--r--nixos/modules/services/web-servers/jboss/builder.sh2
-rw-r--r--nixos/modules/system/boot/kernel.nix9
-rw-r--r--nixos/modules/system/boot/loader/systemd-boot/systemd-boot-builder.py110
-rw-r--r--nixos/modules/system/boot/loader/systemd-boot/systemd-boot.nix2
-rw-r--r--nixos/modules/system/boot/systemd/initrd.nix2
-rw-r--r--nixos/modules/tasks/filesystems/bcachefs.nix51
-rw-r--r--nixos/modules/tasks/network-interfaces.nix2
-rw-r--r--nixos/modules/testing/test-instrumentation.nix27
-rw-r--r--nixos/modules/virtualisation/azure-agent.nix9
-rw-r--r--nixos/modules/virtualisation/azure-image.nix37
-rw-r--r--nixos/tests/all-tests.nix4
-rw-r--r--nixos/tests/archi.nix31
-rw-r--r--nixos/tests/code-server.nix22
-rw-r--r--nixos/tests/dex-oidc.nix2
-rw-r--r--nixos/tests/dnscrypt-wrapper/default.nix144
-rw-r--r--nixos/tests/elk.nix14
-rw-r--r--nixos/tests/ferretdb.nix2
-rw-r--r--nixos/tests/freshrss-pgsql.nix4
-rw-r--r--nixos/tests/grafana/basic.nix2
-rw-r--r--nixos/tests/hockeypuck.nix2
-rw-r--r--nixos/tests/home-assistant.nix12
-rw-r--r--nixos/tests/invidious.nix3
-rw-r--r--nixos/tests/paperless.nix2
-rw-r--r--nixos/tests/pgadmin4.nix8
-rw-r--r--nixos/tests/pgbouncer.nix10
-rw-r--r--nixos/tests/plantuml-server.nix20
-rw-r--r--nixos/tests/plausible.nix7
-rw-r--r--nixos/tests/pleroma.nix9
-rw-r--r--nixos/tests/powerdns-admin.nix4
-rw-r--r--nixos/tests/prometheus-exporters.nix19
-rw-r--r--nixos/tests/sftpgo.nix2
-rw-r--r--nixos/tests/slimserver.nix47
-rw-r--r--nixos/tests/systemd-boot.nix45
-rw-r--r--nixos/tests/tandoor-recipes.nix23
-rw-r--r--nixos/tests/vikunja.nix2
-rw-r--r--nixos/tests/web-apps/mastodon/remote-postgresql.nix22
-rw-r--r--nixos/tests/web-apps/mastodon/script.nix3
-rw-r--r--nixos/tests/web-apps/mastodon/standard.nix4
-rw-r--r--nixos/tests/wiki-js.nix5
-rw-r--r--nixos/tests/wordpress.nix2
-rw-r--r--nixos/tests/xmpp/ejabberd.nix2
111 files changed, 1537 insertions, 1040 deletions
diff --git a/nixos/README.md b/nixos/README.md
index b3cd9d234fa..07e82bf0ad9 100644
--- a/nixos/README.md
+++ b/nixos/README.md
@@ -8,6 +8,27 @@ https://nixos.org/nixos and in the manual in doc/manual.
 
 You can add new module to your NixOS configuration file (usually it’s `/etc/nixos/configuration.nix`). And do `sudo nixos-rebuild test -I nixpkgs=<path to your local nixpkgs folder> --fast`.
 
+## Commit conventions
+
+- Make sure you read about the [commit conventions](../CONTRIBUTING.md#commit-conventions) common to Nixpkgs as a whole.
+
+- Format the commit messages in the following way:
+
+  ```
+  nixos/(module): (init module | add setting | refactor | etc)
+
+  (Motivation for change. Link to release notes. Additional information.)
+  ```
+
+  Examples:
+
+  * nixos/hydra: add bazBaz option
+
+    Dual baz behavior is needed to do foo.
+  * nixos/nginx: refactor config generation
+
+    The old config generation system used impure shell scripts and could break in specific circumstances (see #1234).
+
 ## Reviewing contributions
 
 When changing the bootloader installation process, extra care must be taken. Grub installations cannot be rolled back, hence changes may break people’s installations forever. For any non-trivial change to the bootloader please file a PR asking for review, especially from \@edolstra.
@@ -21,12 +42,14 @@ Reviewing process:
 - Ensure that the module maintainers are notified.
   - [CODEOWNERS](https://help.github.com/articles/about-codeowners/) will make GitHub notify users based on the submitted changes, but it can happen that it misses some of the package maintainers.
 - Ensure that the module tests, if any, are succeeding.
+  - You may invoke OfBorg with `@ofborg test <module>` to build `nixosTests.<module>`
 - Ensure that the introduced options are correct.
   - Type should be appropriate (string related types differs in their merging capabilities, `loaOf` and `string` types are deprecated).
   - Description, default and example should be provided.
 - Ensure that option changes are backward compatible.
-  - `mkRenamedOptionModuleWith` provides a way to make option changes backward compatible.
-- Ensure that removed options are declared with `mkRemovedOptionModule`
+  - `mkRenamedOptionModuleWith` provides a way to make renamed option backward compatible.
+  - Use `lib.versionAtLeast config.system.stateVersion "23.11"` on backward incompatible changes which may corrupt, change or update the state stored on existing setups.
+- Ensure that removed options are declared with `mkRemovedOptionModule`.
 - Ensure that changes that are not backward compatible are mentioned in release notes.
 - Ensure that documentations affected by the change is updated.
 
@@ -55,6 +78,7 @@ New modules submissions introduce a new module to NixOS.
 
 Reviewing process:
 
+- Ensure that all file paths [fit the guidelines](../CONTRIBUTING.md#file-naming-and-organisation).
 - Ensure that the module tests, if any, are succeeding.
 - Ensure that the introduced options are correct.
   - Type should be appropriate (string related types differs in their merging capabilities, `loaOf` and `string` types are deprecated).
@@ -76,9 +100,9 @@ Sample template for a new module review is provided below.
 - [ ] options have default
 - [ ] options have example
 - [ ] options have descriptions
-- [ ] No unneeded package is added to environment.systemPackages
-- [ ] meta.maintainers is set
-- [ ] module documentation is declared in meta.doc
+- [ ] No unneeded package is added to `environment.systemPackages`
+- [ ] `meta.maintainers` is set
+- [ ] module documentation is declared in `meta.doc`
 
 ##### Possible improvements
 
diff --git a/nixos/doc/manual/release-notes/rl-2305.section.md b/nixos/doc/manual/release-notes/rl-2305.section.md
index 0b54b8b32a3..21c798b3b4a 100644
--- a/nixos/doc/manual/release-notes/rl-2305.section.md
+++ b/nixos/doc/manual/release-notes/rl-2305.section.md
@@ -660,5 +660,5 @@ If reloading the module is not an option, proceed to [Nuclear option](#sec-relea
 
 #### Nuclear option {#sec-release-23.05-migration-pipewire-nuclear}
 If all else fails, you can still manually copy the contents of the default configuration file
-from `${pkgs.pipewire.lib}/share/pipewire` to `/etc/pipewire` and edit it to fully override the default.
+from `${pkgs.pipewire}/share/pipewire` to `/etc/pipewire` and edit it to fully override the default.
 However, this should be done only as a last resort. Please talk to the Pipewire maintainers if you ever need to do this.
diff --git a/nixos/doc/manual/release-notes/rl-2311.section.md b/nixos/doc/manual/release-notes/rl-2311.section.md
index d838d33e35d..d12695e20de 100644
--- a/nixos/doc/manual/release-notes/rl-2311.section.md
+++ b/nixos/doc/manual/release-notes/rl-2311.section.md
@@ -104,6 +104,8 @@
 
 - [eris-server](https://codeberg.org/eris/eris-go). [ERIS](https://eris.codeberg.page/) is an encoding for immutable storage and this server provides block exchange as well as content decoding over HTTP and through a FUSE file-system. Available as [services.eris-server](#opt-services.eris-server.enable).
 
+- [forgejo](https://forgejo.org/), a git forge. Previously deployed as a drop-in replacement package in the [gitea module](#opt-services.gitea.package). Available as [services.forgejo](#opt-services.forgejo.enable). See migration instructions in the [NixOS manual](#module-forgejo) on how to migrate your forgejo instance using [`services.gitea.package = pkgs.forgejo`](#opt-services.gitea.package) to [`services.forgejo`](#opt-services.forgejo.enable).
+
 - hardware/infiniband.nix adds infiniband subnet manager support using an [opensm](https://github.com/linux-rdma/opensm) systemd-template service, instantiated on card guids. The module also adds kernel modules and cli tooling to help administrators debug and measure performance. Available as [hardware.infiniband.enable](#opt-hardware.infiniband.enable).
 
 - [zwave-js](https://github.com/zwave-js/zwave-js-server), a small server wrapper around Z-Wave JS to access it via a WebSocket. Available as [services.zwave-js](#opt-services.zwave-js.enable).
@@ -127,6 +129,8 @@
 
 - [ZITADEL](https://zitadel.com), a turnkey identity and access management platform. Available as [services.zitadel](#opt-services.zitadel.enable).
 
+- [exportarr](https://github.com/onedr0p/exportarr), Prometheus Exporters for Bazarr, Lidarr, Prowlarr, Radarr, Readarr, and Sonarr. Available as [services.prometheus.exporters.exportarr-bazarr](#opt-services.prometheus.exporters.exportarr-bazarr.enable)/[services.prometheus.exporters.exportarr-lidarr](#opt-services.prometheus.exporters.exportarr-lidarr.enable)/[services.prometheus.exporters.exportarr-prowlarr](#opt-services.prometheus.exporters.exportarr-prowlarr.enable)/[services.prometheus.exporters.exportarr-radarr](#opt-services.prometheus.exporters.exportarr-radarr.enable)/[services.prometheus.exporters.exportarr-readarr](#opt-services.prometheus.exporters.exportarr-readarr.enable)/[services.prometheus.exporters.exportarr-sonarr](#opt-services.prometheus.exporters.exportarr-sonarr.enable).
+
 - [netclient](https://github.com/gravitl/netclient), an automated WireGuard® Management Client. Available as [services.netclient](#opt-services.netclient.enable).
 
 - [trunk-ng](https://github.com/ctron/trunk), A fork of `trunk`: Build, bundle & ship your Rust WASM application to the web
@@ -141,6 +145,9 @@
 
 ## Backward Incompatibilities {#sec-release-23.11-incompatibilities}
 
+- `services.postgresql.ensurePermissions` has been deprecated in favor of `services.postgresql.ensureUsers.*.ensureDBOwnership` which simplifies the setup of database owned by a certain system user
+  in local database contexts (which make use of peer authentication via UNIX sockets), migration guidelines were provided in the NixOS manual, please refer to them if you are affected by a PostgreSQL 15 changing the way `GRANT ALL PRIVILEGES` is working. `services.postgresql.ensurePermissions` will be removed in 24.05. All NixOS modules were migrated using one of the strategy, e.g. `ensureDBOwnership` or `postStart`. More about this situation can be learnt in https://github.com/NixOS/nixpkgs/pull/266270.
+
 - `network-online.target` has been fixed to no longer time out for systems with `networking.useDHCP = true` and `networking.useNetworkd = true`.
   Workarounds for this can be removed.
 
@@ -152,6 +159,8 @@
 
 - The latest version of `clonehero` now stores custom content in `~/.clonehero`. See the [migration instructions](https://clonehero.net/2022/11/29/v23-to-v1-migration-instructions.html). Typically, these content files would exist along side the binary, but the previous build used a wrapper script that would store them in `~/.config/unity3d/srylain Inc_/Clone Hero`.
 
+- `services.mastodon` doesn't support providing a TCP port to its `streaming` component anymore, as upstream implemented parallelization by running multiple instances instead of running multiple processes in one instance. Please create a PR if you are interested in this feature.
+
 - The `services.hostapd` module was rewritten to support `passwordFile` like options, WPA3-SAE, and management of multiple interfaces. This breaks compatibility with older configurations.
   - `hostapd` is now started with additional systemd sandbox/hardening options for better security.
   - `services.hostapd.interface` was replaced with a per-radio and per-bss configuration scheme using [services.hostapd.radios](#opt-services.hostapd.radios).
@@ -187,6 +196,8 @@
 
 - JACK tools (`jack_*` except `jack_control`) have moved from the `jack2` package to `jack-example-tools`
 
+- The `waagent` service does provisioning now
+
 - The `matrix-synapse` package & module have undergone some significant internal changes, for most setups no intervention is needed, though:
   - The option [`services.matrix-synapse.package`](#opt-services.matrix-synapse.package) is now read-only. For modifying the package, use an overlay which modifies `matrix-synapse-unwrapped` instead. More on that below.
   - The `enableSystemd` & `enableRedis` arguments have been removed and `matrix-synapse` has been renamed to `matrix-synapse-unwrapped`. Also, several optional dependencies (such as `psycopg2` or `authlib`) have been removed.
@@ -264,6 +275,18 @@
 
 - `fileSystems.<name>.autoResize` now uses `systemd-growfs` to resize the file system online in stage 2. This means that `f2fs` and `ext2` can no longer be auto resized, while `xfs` and `btrfs` now can be.
 
+- `fuse3` has been updated from 3.11.0 to 3.16.2; see [ChangeLog.rst](https://github.com/libfuse/libfuse/blob/fuse-3.16.2/ChangeLog.rst#libfuse-3162-2023-10-10) for an overview of the changes.
+
+  Unsupported mount options are no longer silently accepted [(since 3.15.0)](https://github.com/libfuse/libfuse/blob/fuse-3.16.2/ChangeLog.rst#libfuse-3150-2023-06-09). The [affected mount options](https://github.com/libfuse/libfuse/commit/dba6b3983af34f30de01cf532dff0b66f0ed6045) are: `atime`, `diratime`, `lazytime`, `nolazytime`, `relatime`, `norelatime`, `strictatime`.
+
+  For example,
+
+  ```bash
+  $ sshfs 127.0.0.1:/home/test/testdir /home/test/sshfs_mnt -o atime`
+  ```
+
+  would previously terminate successfully with the mount point established, now it outputs the error message ``fuse: unknown option(s): `-o atime'`` and terminates with exit status 1.
+
 - `nixos-rebuild {switch,boot,test,dry-activate}` now runs the system activation inside `systemd-run`, creating an ephemeral systemd service and protecting the system switch against issues like network disconnections during remote (e.g. SSH) sessions. This has the side effect of running the switch in an isolated environment, that could possible break post-switch scripts that depends on things like environment variables being set. If you want to opt-out from this behavior for now, you may set the `NIXOS_SWITCH_USE_DIRTY_ENV` environment variable before running `nixos-rebuild`. However, keep in mind that this option will be removed in the future.
 
 - The `services.vaultwarden.config` option default value was changed to make Vaultwarden only listen on localhost, following the [secure defaults for most NixOS services](https://github.com/NixOS/nixpkgs/issues/100192).
@@ -300,7 +323,7 @@
 
 - The default `kops` version is now 1.28.0 and support for 1.25 and older has been dropped.
 
-- `pharo` has been updated to latest stable (PharoVM 10.0.5), which is compatible with the latest stable and oldstable images (Pharo 10 and 11). The VM in question is the 64bit Spur. The 32bit version has been dropped due to lack of maintenance. The Cog VM has been deleted because it is severily outdated. Finally, the `pharo-launcher` package has been deleted because it was not compatible with the newer VM, and due to lack of maintenance.
+- `pharo` has been updated to latest stable (PharoVM 10.0.8), which is compatible with the latest stable and oldstable images (Pharo 10 and 11). The VM in question is the 64bit Spur. The 32bit version has been dropped due to lack of maintenance. The Cog VM has been deleted because it is severily outdated. Finally, the `pharo-launcher` package has been deleted because it was not compatible with the newer VM, and due to lack of maintenance.
 
 - Emacs mainline version 29 was introduced. This new version includes many major additions, most notably `tree-sitter` support (enabled by default) and the pgtk variant (useful for Wayland users), which is available under the attribute `emacs29-pgtk`.
 
@@ -321,6 +344,8 @@
 
 - Package `pash` was removed due to being archived upstream. Use `powershell` as an alternative.
 
+- The option `services.plausible.releaseCookiePath` has been removed: Plausible does not use any distributed Erlang features, and does not plan to (see [discussion](https://github.com/NixOS/nixpkgs/pull/130297#issuecomment-1805851333)), so NixOS now disables them, and the Erlang cookie becomes unnecessary. You may delete the file that `releaseCookiePath` was set to.
+
 - `security.sudo.extraRules` now includes `root`'s default rule, with ordering
   priority 400. This is functionally identical for users not specifying rule
   order, or relying on `mkBefore` and `mkAfter`, but may impact users calling
@@ -365,6 +390,8 @@
 
 - The `prayer` package as well as `services.prayer` have been removed because it's been unmaintained for several years and the author's website has vanished.
 
+- The `chrony` NixOS module now tracks the Real-Time Clock drift from the System Clock with `rtcfile` and automatically adjusts it with `rtcautotrim` when it exceeds the maximum error specified in `services.chrony.autotrimThreshold` (default 30 seconds). If you enabled `rtcsync` in `extraConfig`, you should remove RTC related options from `extraConfig`. If you do not want chrony configured to keep the RTC in check, you can set `services.chrony.enableRTCTrimming = false;`
+
 ## Other Notable Changes {#sec-release-23.11-notable-changes}
 
 - A new option `system.switch.enable` was added. By default, this is option is
@@ -519,8 +546,14 @@ The module update takes care of the new config syntax and the data itself (user
 
 - The Home Assistant module now offers support for installing custom components and lovelace modules. Available at [`services.home-assistant.customComponents`](#opt-services.home-assistant.customComponents) and [`services.home-assistant.customLovelaceModules`](#opt-services.home-assistant.customLovelaceModules).
 
+- The argument `vendorSha256` of `buildGoModule` is deprecated. Use `vendorHash` instead. ([\#259999](https://github.com/NixOS/nixpkgs/pull/259999))
+
 ## Nixpkgs internals {#sec-release-23.11-nixpkgs-internals}
 
+- Node.js v14, v16 has been removed as they were end of life. Any dependent packages that contributors were not able to reasonably upgrade were dropped after a month of notice to their maintainers, were **removed**.
+  - This includes VSCode Server.
+  - This includes Kibana 7 as the ELK stack is unmaintained in nixpkgs and is marked for slow removal.
+
 - The use of `sourceRoot = "source";`, `sourceRoot = "source/subdir";`, and similar lines in package derivations using the default `unpackPhase` is deprecated as it requires `unpackPhase` to always produce a directory named "source". Use `sourceRoot = src.name`, `sourceRoot = "${src.name}/subdir";`, or `setSourceRoot = "sourceRoot=$(echo */subdir)";` or similar instead.
 
 - The `django` alias in the python package set was upgraded to Django 4.x.
@@ -563,4 +596,6 @@ The module update takes care of the new config syntax and the data itself (user
 
 - The Linux kernel module `msr` (see [`msr(4)`](https://man7.org/linux/man-pages/man4/msr.4.html)), which provides an interface to read and write the model-specific registers (MSRs) of an x86 CPU, can now be configured via `hardware.cpu.x86.msr`.
 
+- Docker now defaults to 24, as 20.10 is stopping to receive security updates and bug fixes after [December 10, 2023](https://github.com/moby/moby/discussions/45104).
+
 - There is a new NixOS option when writing NixOS tests `testing.initrdBackdoor`, that enables `backdoor.service` in initrd. Requires `boot.initrd.systemd.enable` to be enabled. Boot will pause in stage 1 at `initrd.target`, and will listen for commands from the `Machine` python interface, just like stage 2 normally does. This enables commands to be sent to test and debug stage 1. Use `machine.switch_root()` to leave stage 1 and proceed to stage 2.
diff --git a/nixos/modules/config/stevenblack.nix b/nixos/modules/config/stevenblack.nix
index 30ef7ff259f..7e623516984 100644
--- a/nixos/modules/config/stevenblack.nix
+++ b/nixos/modules/config/stevenblack.nix
@@ -30,5 +30,5 @@ in
       ++ optionals (activatedHosts == [ ]) [ "${pkgs.stevenblack-blocklist}/hosts" ];
   };
 
-  meta.maintainers = [ maintainers.fortuneteller2k maintainers.artturin ];
+  meta.maintainers = [ maintainers.moni maintainers.artturin ];
 }
diff --git a/nixos/modules/installer/tools/nixos-generate-config.pl b/nixos/modules/installer/tools/nixos-generate-config.pl
index 71737cd8ebc..2f9edba4f0c 100644
--- a/nixos/modules/installer/tools/nixos-generate-config.pl
+++ b/nixos/modules/installer/tools/nixos-generate-config.pl
@@ -257,6 +257,7 @@ foreach my $path (glob "/sys/class/{block,mmc_host}/*") {
 
 # Add bcache module, if needed.
 my @bcacheDevices = glob("/dev/bcache*");
+@bcacheDevices = grep(!qr#dev/bcachefs.*#, @bcacheDevices);
 if (scalar @bcacheDevices > 0) {
     push @initrdAvailableKernelModules, "bcache";
 }
@@ -467,6 +468,19 @@ EOF
     # boot.tmp.useTmpfs option in configuration.nix (managed declaratively).
     next if ($mountPoint eq "/tmp" && $fsType eq "tmpfs");
 
+    # This should work for single and multi-device systems.
+    # still needs subvolume support
+    if ($fsType eq "bcachefs") {
+        my ($status, @info) = runCommand("bcachefs fs usage $rootDir$mountPoint");
+        my $UUID = $info[0];
+
+        if ($status == 0 && $UUID =~ /^Filesystem:[ \t\n]*([0-9a-z-]+)/) {
+            $stableDevPath = "UUID=$1";
+        } else {
+            print STDERR "warning: can't find bcachefs mount UUID falling back to device-path";
+        }
+    }
+
     # Emit the filesystem.
     $fileSystems .= <<EOF;
   fileSystems.\"$mountPoint\" =
diff --git a/nixos/modules/module-list.nix b/nixos/modules/module-list.nix
index 00da6399295..d02c5b593b3 100644
--- a/nixos/modules/module-list.nix
+++ b/nixos/modules/module-list.nix
@@ -1147,7 +1147,6 @@
   ./services/search/elasticsearch-curator.nix
   ./services/search/elasticsearch.nix
   ./services/search/hound.nix
-  ./services/search/kibana.nix
   ./services/search/meilisearch.nix
   ./services/search/opensearch.nix
   ./services/search/qdrant.nix
@@ -1241,7 +1240,6 @@
   ./services/web-apps/changedetection-io.nix
   ./services/web-apps/chatgpt-retrieval-plugin.nix
   ./services/web-apps/cloudlog.nix
-  ./services/web-apps/code-server.nix
   ./services/web-apps/convos.nix
   ./services/web-apps/dex.nix
   ./services/web-apps/discourse.nix
diff --git a/nixos/modules/programs/ausweisapp.nix b/nixos/modules/programs/ausweisapp.nix
index ef1f059568c..91870df2024 100644
--- a/nixos/modules/programs/ausweisapp.nix
+++ b/nixos/modules/programs/ausweisapp.nix
@@ -7,11 +7,11 @@ let
 in
 {
   options.programs.ausweisapp = {
-    enable = mkEnableOption (lib.mdDoc "AusweisApp2");
+    enable = mkEnableOption (lib.mdDoc "AusweisApp");
 
     openFirewall = mkOption {
       description = lib.mdDoc ''
-        Whether to open the required firewall ports for the Smartphone as Card Reader (SaC) functionality of AusweisApp2.
+        Whether to open the required firewall ports for the Smartphone as Card Reader (SaC) functionality of AusweisApp.
       '';
       default = false;
       type = lib.types.bool;
@@ -19,7 +19,7 @@ in
   };
 
   config = mkIf cfg.enable {
-    environment.systemPackages = with pkgs; [ AusweisApp2 ];
+    environment.systemPackages = with pkgs; [ ausweisapp ];
     networking.firewall.allowedUDPPorts = lib.optionals cfg.openFirewall [ 24727 ];
   };
 }
diff --git a/nixos/modules/security/acme/default.nix b/nixos/modules/security/acme/default.nix
index 932bf3e7911..7cc302969fb 100644
--- a/nixos/modules/security/acme/default.nix
+++ b/nixos/modules/security/acme/default.nix
@@ -345,6 +345,10 @@ let
       serviceConfig = commonServiceConfig // {
         Group = data.group;
 
+        # Let's Encrypt Failed Validation Limit allows 5 retries per hour, per account, hostname and hour.
+        # This avoids eating them all up if something is misconfigured upon the first try.
+        RestartSec = 15 * 60;
+
         # Keep in mind that these directories will be deleted if the user runs
         # systemctl clean --what=state
         # acme/.lego/${cert} is listed for this reason.
diff --git a/nixos/modules/security/sudo.nix b/nixos/modules/security/sudo.nix
index ff912dec507..3dd5d2e525d 100644
--- a/nixos/modules/security/sudo.nix
+++ b/nixos/modules/security/sudo.nix
@@ -192,10 +192,12 @@ in
   ###### implementation
 
   config = mkIf cfg.enable {
-    assertions = [
-      { assertion = cfg.package.pname != "sudo-rs";
-        message = "The NixOS `sudo` module does not work with `sudo-rs` yet."; }
-    ];
+    assertions = [ {
+      assertion = cfg.package.pname != "sudo-rs";
+      message = ''
+        NixOS' `sudo` module does not support `sudo-rs`; see `security.sudo-rs` instead.
+      '';
+    } ];
 
     security.sudo.extraRules =
       let
diff --git a/nixos/modules/services/audio/navidrome.nix b/nixos/modules/services/audio/navidrome.nix
index e18e61eb6d4..77a0e74af9c 100644
--- a/nixos/modules/services/audio/navidrome.nix
+++ b/nixos/modules/services/audio/navidrome.nix
@@ -28,10 +28,17 @@ in {
         '';
       };
 
+      openFirewall = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc "Whether to open the TCP port in the firewall";
+      };
     };
   };
 
   config = mkIf cfg.enable {
+    networking.firewall.allowedTCPPorts = mkIf cfg.openFirewall [cfg.settings.Port];
+
     systemd.services.navidrome = {
       description = "Navidrome Media Server";
       after = [ "network.target" ];
diff --git a/nixos/modules/services/audio/slimserver.nix b/nixos/modules/services/audio/slimserver.nix
index 9fbc68b7136..cdd9d551c50 100644
--- a/nixos/modules/services/audio/slimserver.nix
+++ b/nixos/modules/services/audio/slimserver.nix
@@ -54,7 +54,7 @@ in {
       serviceConfig = {
         User = "slimserver";
         # Issue 40589: Disable broken image/video support (audio still works!)
-        ExecStart = "${cfg.package}/slimserver.pl --logdir ${cfg.dataDir}/logs --prefsdir ${cfg.dataDir}/prefs --cachedir ${cfg.dataDir}/cache --noimage --novideo";
+        ExecStart = "${lib.getExe cfg.package} --logdir ${cfg.dataDir}/logs --prefsdir ${cfg.dataDir}/prefs --cachedir ${cfg.dataDir}/cache --noimage --novideo";
       };
     };
 
diff --git a/nixos/modules/services/databases/postgresql.md b/nixos/modules/services/databases/postgresql.md
index d65d9616e2f..e5e0b7efec2 100644
--- a/nixos/modules/services/databases/postgresql.md
+++ b/nixos/modules/services/databases/postgresql.md
@@ -39,6 +39,125 @@ By default, PostgreSQL stores its databases in {file}`/var/lib/postgresql/$psqlS
 services.postgresql.dataDir = "/data/postgresql";
 ```
 
+## Initializing {#module-services-postgres-initializing}
+
+As of NixOS 23.11,
+`services.postgresql.ensureUsers.*.ensurePermissions` has been
+deprecated, after a change to default permissions in PostgreSQL 15
+invalidated most of its previous use cases:
+
+- In psql < 15, `ALL PRIVILEGES` used to include `CREATE TABLE`, where
+  in psql >= 15 that would be a separate permission
+- psql >= 15 instead gives only the database owner create permissions
+- Even on psql < 15 (or databases migrated to >= 15), it is
+  recommended to manually assign permissions along these lines
+  - https://www.postgresql.org/docs/release/15.0/
+  - https://www.postgresql.org/docs/15/ddl-schemas.html#DDL-SCHEMAS-PRIV
+
+### Assigning ownership {#module-services-postgres-initializing-ownership}
+
+Usually, the database owner should be a database user of the same
+name. This can be done with
+`services.postgresql.ensureUsers.*.ensureDBOwnership = true;`.
+
+If the database user name equals the connecting system user name,
+postgres by default will accept a passwordless connection via unix
+domain socket. This makes it possible to run many postgres-backed
+services without creating any database secrets at all
+
+### Assigning extra permissions {#module-services-postgres-initializing-extra-permissions}
+
+For many cases, it will be enough to have the database user be the
+owner. Until `services.postgresql.ensureUsers.*.ensurePermissions` has
+been re-thought, if more users need access to the database, please use
+one of the following approaches:
+
+**WARNING:** `services.postgresql.initialScript` is not recommended
+for `ensurePermissions` replacement, as that is *only run on first
+start of PostgreSQL*.
+
+**NOTE:** all of these methods may be obsoleted, when `ensure*` is
+reworked, but it is expected that they will stay viable for running
+database migrations.
+
+**NOTE:** please make sure that any added migrations are idempotent (re-runnable).
+
+#### as superuser {#module-services-postgres-initializing-extra-permissions-superuser}
+
+**Advantage:** compatible with postgres < 15, because it's run
+as the database superuser `postgres`.
+
+##### in database `postStart` {#module-services-postgres-initializing-extra-permissions-superuser-post-start}
+
+**Disadvantage:** need to take care of ordering yourself. In this
+example, `mkAfter` ensures that permissions are assigned after any
+databases from `ensureDatabases` and `extraUser1` from `ensureUsers`
+are already created.
+
+```nix
+    systemd.services.postgresql.postStart = lib.mkAfter ''
+      $PSQL service1 -c 'GRANT SELECT ON ALL TABLES IN SCHEMA public TO "extraUser1"'
+      $PSQL service1 -c 'GRANT SELECT ON ALL SEQUENCES IN SCHEMA public TO "extraUser1"'
+      # ....
+    '';
+```
+
+##### in intermediate oneshot service {#module-services-postgres-initializing-extra-permissions-superuser-oneshot}
+
+```nix
+    systemd.services."migrate-service1-db1" = {
+      serviceConfig.Type = "oneshot";
+      requiredBy = "service1.service";
+      before = "service1.service";
+      after = "postgresql.service";
+      serviceConfig.User = "postgres";
+      environment.PSQL = "psql --port=${toString services.postgresql.port}";
+      path = [ postgresql ];
+      script = ''
+        $PSQL service1 -c 'GRANT SELECT ON ALL TABLES IN SCHEMA public TO "extraUser1"'
+        $PSQL service1 -c 'GRANT SELECT ON ALL SEQUENCES IN SCHEMA public TO "extraUser1"'
+        # ....
+      '';
+    };
+```
+
+#### as service user {#module-services-postgres-initializing-extra-permissions-service-user}
+
+**Advantage:** re-uses systemd's dependency ordering;
+
+**Disadvantage:** relies on service user having grant permission. To be combined with `ensureDBOwnership`.
+
+##### in service `preStart` {#module-services-postgres-initializing-extra-permissions-service-user-pre-start}
+
+```nix
+    environment.PSQL = "psql --port=${toString services.postgresql.port}";
+    path = [ postgresql ];
+    systemd.services."service1".preStart = ''
+      $PSQL -c 'GRANT SELECT ON ALL TABLES IN SCHEMA public TO "extraUser1"'
+      $PSQL -c 'GRANT SELECT ON ALL SEQUENCES IN SCHEMA public TO "extraUser1"'
+      # ....
+    '';
+```
+
+##### in intermediate oneshot service {#module-services-postgres-initializing-extra-permissions-service-user-oneshot}
+
+```nix
+    systemd.services."migrate-service1-db1" = {
+      serviceConfig.Type = "oneshot";
+      requiredBy = "service1.service";
+      before = "service1.service";
+      after = "postgresql.service";
+      serviceConfig.User = "service1";
+      environment.PSQL = "psql --port=${toString services.postgresql.port}";
+      path = [ postgresql ];
+      script = ''
+        $PSQL -c 'GRANT SELECT ON ALL TABLES IN SCHEMA public TO "extraUser1"'
+        $PSQL -c 'GRANT SELECT ON ALL SEQUENCES IN SCHEMA public TO "extraUser1"'
+        # ....
+      '';
+    };
+```
+
 ## Upgrading {#module-services-postgres-upgrading}
 
 ::: {.note}
diff --git a/nixos/modules/services/databases/postgresql.nix b/nixos/modules/services/databases/postgresql.nix
index af4db5c9611..a9067d5974a 100644
--- a/nixos/modules/services/databases/postgresql.nix
+++ b/nixos/modules/services/databases/postgresql.nix
@@ -168,7 +168,12 @@ in
             ensurePermissions = mkOption {
               type = types.attrsOf types.str;
               default = {};
+              visible = false; # This option has been deprecated.
               description = lib.mdDoc ''
+                This option is DEPRECATED and should not be used in nixpkgs anymore,
+                use `ensureDBOwnership` instead. It can also break with newer
+                versions of PostgreSQL (≥ 15).
+
                 Permissions to ensure for the user, specified as an attribute set.
                 The attribute names specify the database and tables to grant the permissions for.
                 The attribute values specify the permissions to grant. You may specify one or
@@ -187,6 +192,16 @@ in
               '';
             };
 
+            ensureDBOwnership = mkOption {
+              type = types.bool;
+              default = false;
+              description = mdDoc ''
+                Grants the user ownership to a database with the same name.
+                This database must be defined manually in
+                [](#opt-services.postgresql.ensureDatabases).
+              '';
+            };
+
             ensureClauses = mkOption {
               description = lib.mdDoc ''
                 An attrset of clauses to grant to the user. Under the hood this uses the
@@ -338,26 +353,21 @@ in
         });
         default = [];
         description = lib.mdDoc ''
-          Ensures that the specified users exist and have at least the ensured permissions.
+          Ensures that the specified users exist.
           The PostgreSQL users will be identified using peer authentication. This authenticates the Unix user with the
           same name only, and that without the need for a password.
-          This option will never delete existing users or remove permissions, especially not when the value of this
-          option is changed. This means that users created and permissions assigned once through this option or
-          otherwise have to be removed manually.
+          This option will never delete existing users or remove DB ownership of databases
+          once granted with `ensureDBOwnership = true;`. This means that this must be
+          cleaned up manually when changing after changing the config in here.
         '';
         example = literalExpression ''
           [
             {
               name = "nextcloud";
-              ensurePermissions = {
-                "DATABASE nextcloud" = "ALL PRIVILEGES";
-              };
             }
             {
               name = "superuser";
-              ensurePermissions = {
-                "ALL TABLES IN SCHEMA public" = "ALL PRIVILEGES";
-              };
+              ensureDBOwnership = true;
             }
           ]
         '';
@@ -445,6 +455,27 @@ in
 
   config = mkIf cfg.enable {
 
+    assertions = map ({ name, ensureDBOwnership, ... }: {
+      assertion = ensureDBOwnership -> builtins.elem name cfg.ensureDatabases;
+      message = ''
+        For each database user defined with `services.postgresql.ensureUsers` and
+        `ensureDBOwnership = true;`, a database with the same name must be defined
+        in `services.postgresql.ensureDatabases`.
+
+        Offender: ${name} has not been found among databases.
+      '';
+    }) cfg.ensureUsers;
+    # `ensurePermissions` is now deprecated, let's avoid it.
+    warnings = lib.optional (any ({ ensurePermissions, ... }: ensurePermissions != {}) cfg.ensureUsers) "
+      `services.postgresql.*.ensurePermissions` is used in your expressions,
+      this option is known to be broken with newer PostgreSQL versions,
+      consider migrating to `services.postgresql.*.ensureDBOwnership` or
+      consult the release notes or manual for more migration guidelines.
+
+      This option will be removed in NixOS 24.05 unless it sees significant
+      maintenance improvements.
+    ";
+
     services.postgresql.settings =
       {
         hba_file = "${pkgs.writeText "pg_hba.conf" cfg.authentication}";
@@ -556,12 +587,15 @@ in
             ${
               concatMapStrings
               (user:
-                let
+              let
                   userPermissions = concatStringsSep "\n"
                     (mapAttrsToList
                       (database: permission: ''$PSQL -tAc 'GRANT ${permission} ON ${database} TO "${user.name}"' '')
                       user.ensurePermissions
                     );
+                  dbOwnershipStmt = optionalString
+                    user.ensureDBOwnership
+                    ''$PSQL -tAc 'ALTER DATABASE "${user.name}" OWNER TO "${user.name}";' '';
 
                   filteredClauses = filterAttrs (name: value: value != null) user.ensureClauses;
 
@@ -572,6 +606,8 @@ in
                   $PSQL -tAc "SELECT 1 FROM pg_roles WHERE rolname='${user.name}'" | grep -q 1 || $PSQL -tAc 'CREATE USER "${user.name}"'
                   ${userPermissions}
                   ${userClauses}
+
+                  ${dbOwnershipStmt}
                 ''
               )
               cfg.ensureUsers
diff --git a/nixos/modules/services/desktops/pipewire/pipewire.nix b/nixos/modules/services/desktops/pipewire/pipewire.nix
index ae695baf42c..07ca2727cf4 100644
--- a/nixos/modules/services/desktops/pipewire/pipewire.nix
+++ b/nixos/modules/services/desktops/pipewire/pipewire.nix
@@ -115,8 +115,7 @@ in {
     environment.systemPackages = [ cfg.package ]
                                  ++ lib.optional cfg.jack.enable jack-libs;
 
-    systemd.packages = [ cfg.package ]
-                       ++ lib.optional cfg.pulse.enable cfg.package.pulse;
+    systemd.packages = [ cfg.package ];
 
     # PipeWire depends on DBUS but doesn't list it. Without this booting
     # into a terminal results in the service crashing with an error.
@@ -130,9 +129,13 @@ in {
     systemd.user.sockets.pipewire.enable = !cfg.systemWide;
     systemd.user.services.pipewire.enable = !cfg.systemWide;
 
+    # Mask pw-pulse if it's not wanted
+    systemd.user.services.pipewire-pulse.enable = cfg.pulse.enable;
+    systemd.user.sockets.pipewire-pulse.enable = cfg.pulse.enable;
+
     systemd.sockets.pipewire.wantedBy = lib.mkIf cfg.socketActivation [ "sockets.target" ];
     systemd.user.sockets.pipewire.wantedBy = lib.mkIf cfg.socketActivation [ "sockets.target" ];
-    systemd.user.sockets.pipewire-pulse.wantedBy = lib.mkIf (cfg.socketActivation && cfg.pulse.enable) ["sockets.target"];
+    systemd.user.sockets.pipewire-pulse.wantedBy = lib.mkIf cfg.socketActivation [ "sockets.target" ];
 
     services.udev.packages = [ cfg.package ];
 
@@ -140,14 +143,14 @@ in {
     environment.etc."alsa/conf.d/49-pipewire-modules.conf" = mkIf cfg.alsa.enable {
       text = ''
         pcm_type.pipewire {
-          libs.native = ${cfg.package.lib}/lib/alsa-lib/libasound_module_pcm_pipewire.so ;
+          libs.native = ${cfg.package}/lib/alsa-lib/libasound_module_pcm_pipewire.so ;
           ${optionalString enable32BitAlsaPlugins
-            "libs.32Bit = ${pkgs.pkgsi686Linux.pipewire.lib}/lib/alsa-lib/libasound_module_pcm_pipewire.so ;"}
+            "libs.32Bit = ${pkgs.pkgsi686Linux.pipewire}/lib/alsa-lib/libasound_module_pcm_pipewire.so ;"}
         }
         ctl_type.pipewire {
-          libs.native = ${cfg.package.lib}/lib/alsa-lib/libasound_module_ctl_pipewire.so ;
+          libs.native = ${cfg.package}/lib/alsa-lib/libasound_module_ctl_pipewire.so ;
           ${optionalString enable32BitAlsaPlugins
-            "libs.32Bit = ${pkgs.pkgsi686Linux.pipewire.lib}/lib/alsa-lib/libasound_module_ctl_pipewire.so ;"}
+            "libs.32Bit = ${pkgs.pkgsi686Linux.pipewire}/lib/alsa-lib/libasound_module_ctl_pipewire.so ;"}
         }
       '';
     };
diff --git a/nixos/modules/services/development/zammad.nix b/nixos/modules/services/development/zammad.nix
index 7dd143eebf1..d24ed24ef39 100644
--- a/nixos/modules/services/development/zammad.nix
+++ b/nixos/modules/services/development/zammad.nix
@@ -204,7 +204,7 @@ in
 
     assertions = [
       {
-        assertion = cfg.database.createLocally -> cfg.database.user == "zammad";
+        assertion = cfg.database.createLocally -> cfg.database.user == "zammad" && cfg.database.name == "zammad";
         message = "services.zammad.database.user must be set to \"zammad\" if services.zammad.database.createLocally is set to true";
       }
       {
@@ -231,7 +231,7 @@ in
       ensureUsers = [
         {
           name = cfg.database.user;
-          ensurePermissions = { "DATABASE ${cfg.database.name}" = "ALL PRIVILEGES"; };
+          ensureDBOwnership = true;
         }
       ];
     };
diff --git a/nixos/modules/services/finance/odoo.nix b/nixos/modules/services/finance/odoo.nix
index eec7c4e30cc..b8574ed09af 100644
--- a/nixos/modules/services/finance/odoo.nix
+++ b/nixos/modules/services/finance/odoo.nix
@@ -121,7 +121,7 @@ in
       ensureDatabases = [ "odoo" ];
       ensureUsers = [{
         name = "odoo";
-        ensurePermissions = { "DATABASE odoo" = "ALL PRIVILEGES"; };
+        ensureDBOwnership = true;
       }];
     };
   });
diff --git a/nixos/modules/services/hardware/fwupd.nix b/nixos/modules/services/hardware/fwupd.nix
index 7a938459d0c..7b6c336bd22 100644
--- a/nixos/modules/services/hardware/fwupd.nix
+++ b/nixos/modules/services/hardware/fwupd.nix
@@ -187,13 +187,20 @@ in {
       # fwupd-refresh expects a user that we do not create, so just run with DynamicUser
       # instead and ensure we take ownership of /var/lib/fwupd
       services.fwupd-refresh.serviceConfig = {
-        DynamicUser = true;
         StateDirectory = "fwupd";
+        # Better for debugging, upstream sets stderr to null for some reason..
+        StandardError = "inherit";
       };
 
       timers.fwupd-refresh.wantedBy = [ "timers.target" ];
     };
 
+    users.users.fwupd-refresh = {
+      isSystemUser = true;
+      group = "fwupd-refresh";
+    };
+    users.groups.fwupd-refresh = {};
+
     security.polkit.enable = true;
   };
 
diff --git a/nixos/modules/services/mail/listmonk.nix b/nixos/modules/services/mail/listmonk.nix
index 11b2a518622..cea1bc95608 100644
--- a/nixos/modules/services/mail/listmonk.nix
+++ b/nixos/modules/services/mail/listmonk.nix
@@ -168,7 +168,7 @@ in {
 
       ensureUsers = [{
         name = "listmonk";
-        ensurePermissions = { "DATABASE listmonk" = "ALL PRIVILEGES"; };
+        ensureDBOwnership = true;
       }];
 
       ensureDatabases = [ "listmonk" ];
diff --git a/nixos/modules/services/mail/mailman.nix b/nixos/modules/services/mail/mailman.nix
index a7e8aee1f2a..76035625fbe 100644
--- a/nixos/modules/services/mail/mailman.nix
+++ b/nixos/modules/services/mail/mailman.nix
@@ -493,6 +493,9 @@ in {
           RuntimeDirectory = "mailman";
           LogsDirectory = "mailman";
           PIDFile = "/run/mailman/master.pid";
+          Restart = "on-failure";
+          TimeoutStartSec = 180;
+          TimeoutStopSec = 180;
         };
       };
 
@@ -596,6 +599,7 @@ in {
           User = cfg.webUser;
           Group = "mailman";
           RuntimeDirectory = "mailman-uwsgi";
+          Restart = "on-failure";
         };
       });
 
@@ -620,6 +624,7 @@ in {
           User = cfg.webUser;
           Group = "mailman";
           WorkingDirectory = "/var/lib/mailman-web";
+          Restart = "on-failure";
         };
       };
     } // flip lib.mapAttrs' {
diff --git a/nixos/modules/services/mail/roundcube.nix b/nixos/modules/services/mail/roundcube.nix
index 22a4e3c451a..4e29f567ed9 100644
--- a/nixos/modules/services/mail/roundcube.nix
+++ b/nixos/modules/services/mail/roundcube.nix
@@ -179,14 +179,22 @@ in
       };
     };
 
+    assertions = [
+      {
+        assertion = localDB -> cfg.database.username == cfg.database.dbname;
+        message = ''
+          When setting up a DB and its owner user, the owner and the DB name must be
+          equal!
+        '';
+      }
+    ];
+
     services.postgresql = mkIf localDB {
       enable = true;
       ensureDatabases = [ cfg.database.dbname ];
       ensureUsers = [ {
         name = cfg.database.username;
-        ensurePermissions = {
-          "DATABASE ${cfg.database.username}" = "ALL PRIVILEGES";
-        };
+        ensureDBOwnership = true;
       } ];
     };
 
diff --git a/nixos/modules/services/mail/sympa.nix b/nixos/modules/services/mail/sympa.nix
index 7a5047b2bea..04ae46f66ee 100644
--- a/nixos/modules/services/mail/sympa.nix
+++ b/nixos/modules/services/mail/sympa.nix
@@ -218,7 +218,7 @@ in
         default = null;
         example = "/run/keys/sympa-dbpassword";
         description = lib.mdDoc ''
-          A file containing the password for {option}`services.sympa.database.user`.
+          A file containing the password for {option}`services.sympa.database.name`.
         '';
       };
 
@@ -342,6 +342,7 @@ in
 
       db_type = cfg.database.type;
       db_name = cfg.database.name;
+      db_user = cfg.database.name;
     }
     // (optionalAttrs (cfg.database.host != null) {
       db_host = cfg.database.host;
@@ -355,9 +356,6 @@ in
     // (optionalAttrs (cfg.database.port != null) {
       db_port = cfg.database.port;
     })
-    // (optionalAttrs (cfg.database.user != null) {
-      db_user = cfg.database.user;
-    })
     // (optionalAttrs (cfg.mta.type == "postfix") {
       sendmail_aliases = "${dataDir}/sympa_transport";
       aliases_program  = "${pkgs.postfix}/bin/postmap";
@@ -393,7 +391,7 @@ in
     users.groups.${group} = {};
 
     assertions = [
-      { assertion = cfg.database.createLocally -> cfg.database.user == user;
+      { assertion = cfg.database.createLocally -> cfg.database.user == user && cfg.database.name == cfg.database.user;
         message = "services.sympa.database.user must be set to ${user} if services.sympa.database.createLocally is set to true";
       }
       { assertion = cfg.database.createLocally -> cfg.database.passwordFile == null;
@@ -579,7 +577,7 @@ in
       ensureDatabases = [ cfg.database.name ];
       ensureUsers = [
         { name = cfg.database.user;
-          ensurePermissions = { "DATABASE ${cfg.database.name}" = "ALL PRIVILEGES"; };
+          ensureDBOwnership = true;
         }
       ];
     };
diff --git a/nixos/modules/services/matrix/matrix-sliding-sync.nix b/nixos/modules/services/matrix/matrix-sliding-sync.nix
index 9807cde4091..84bb38f35ae 100644
--- a/nixos/modules/services/matrix/matrix-sliding-sync.nix
+++ b/nixos/modules/services/matrix/matrix-sliding-sync.nix
@@ -74,9 +74,9 @@ in
     services.postgresql = lib.optionalAttrs cfg.createDatabase {
       enable = true;
       ensureDatabases = [ "matrix-sliding-sync" ];
-      ensureUsers = [ rec {
+      ensureUsers = [ {
         name = "matrix-sliding-sync";
-        ensurePermissions."DATABASE \"${name}\"" = "ALL PRIVILEGES";
+        ensureDBOwnership = true;
       } ];
     };
 
diff --git a/nixos/modules/services/matrix/mautrix-facebook.nix b/nixos/modules/services/matrix/mautrix-facebook.nix
index 671040500df..d7cf024bb80 100644
--- a/nixos/modules/services/matrix/mautrix-facebook.nix
+++ b/nixos/modules/services/matrix/mautrix-facebook.nix
@@ -135,9 +135,7 @@ in {
       ensureDatabases = ["mautrix-facebook"];
       ensureUsers = [{
         name = "mautrix-facebook";
-        ensurePermissions = {
-          "DATABASE \"mautrix-facebook\"" = "ALL PRIVILEGES";
-        };
+        ensureDBOwnership = true;
       }];
     };
 
diff --git a/nixos/modules/services/misc/atuin.nix b/nixos/modules/services/misc/atuin.nix
index 8d2c1b5242f..2d6ffc510ce 100644
--- a/nixos/modules/services/misc/atuin.nix
+++ b/nixos/modules/services/misc/atuin.nix
@@ -73,9 +73,7 @@ in
       enable = true;
       ensureUsers = [{
         name = "atuin";
-        ensurePermissions = {
-          "DATABASE atuin" = "ALL PRIVILEGES";
-        };
+        ensureDBOwnership = true;
       }];
       ensureDatabases = [ "atuin" ];
     };
diff --git a/nixos/modules/services/misc/autofs.nix b/nixos/modules/services/misc/autofs.nix
index 55ab15ff003..723b67e8bb6 100644
--- a/nixos/modules/services/misc/autofs.nix
+++ b/nixos/modules/services/misc/autofs.nix
@@ -74,7 +74,7 @@ in
 
   config = mkIf cfg.enable {
 
-    boot.kernelModules = [ "autofs4" ];
+    boot.kernelModules = [ "autofs" ];
 
     systemd.services.autofs =
       { description = "Automounts filesystems on demand";
diff --git a/nixos/modules/services/misc/forgejo.md b/nixos/modules/services/misc/forgejo.md
new file mode 100644
index 00000000000..6a340738208
--- /dev/null
+++ b/nixos/modules/services/misc/forgejo.md
@@ -0,0 +1,79 @@
+# Forgejo {#module-forgejo}
+
+Forgejo is a soft-fork of gitea, with strong community focus, as well
+as on self-hosting and federation. [Codeberg](https://codeberg.org) is
+deployed from it.
+
+See [upstream docs](https://forgejo.org/docs/latest/).
+
+The method of choice for running forgejo is using [`services.forgejo`](#opt-services.forgejo.enable).
+
+::: {.warning}
+Running forgejo using `services.gitea.package = pkgs.forgejo` is no longer
+recommended.
+If you experience issues with your instance using `services.gitea`,
+**DO NOT** report them to the `services.gitea` module maintainers.
+**DO** report them to the `services.forgejo` module maintainers instead.
+:::
+
+## Migration from Gitea {#module-forgejo-migration-gitea}
+
+::: {.note}
+Migrating is, while not strictly necessary at this point, highly recommended.
+Both modules and projects are likely to divide further with each release.
+Which might lead to an even more involved migration.
+:::
+
+### Full-Migration {#module-forgejo-migration-gitea-default}
+
+This will migrate the state directory (data), rename and chown the database and
+delete the gitea user.
+
+::: {.note}
+This will also change the git remote ssh-url user from `gitea@` to `forgejo@`,
+when using the host's openssh server (default) instead of the integrated one.
+:::
+
+Instructions for PostgreSQL (default). Adapt accordingly for other databases:
+
+```sh
+systemctl stop gitea
+mv /var/lib/gitea /var/lib/forgejo
+runuser -u postgres -- psql -c '
+  ALTER USER gitea RENAME TO forgejo;
+  ALTER DATABASE gitea RENAME TO forgejo;
+'
+nixos-rebuild switch
+systemctl stop forgejo
+chown -R forgejo:forgejo /var/lib/forgejo
+systemctl restart forgejo
+```
+
+### Alternatively, keeping the gitea user {#module-forgejo-migration-gitea-impersonate}
+
+Alternatively, instead of renaming the database, copying the state folder and
+changing the user, the forgejo module can be set up to re-use the old storage
+locations and database, instead of having to copy or rename them.
+Make sure to disable `services.gitea`, when doing this.
+
+```nix
+services.gitea.enable = false;
+
+services.forgejo = {
+  enable = true;
+  user = "gitea";
+  group = "gitea";
+  stateDir = "/var/lib/gitea";
+  database.name = "gitea";
+  database.user = "gitea";
+};
+
+users.users,gitea = {
+  home = "/var/lib/gitea";
+  useDefaultShell = true;
+  group = "gitea";
+  isSystemUser = true;
+};
+
+users.groups.gitea = {};
+```
diff --git a/nixos/modules/services/misc/forgejo.nix b/nixos/modules/services/misc/forgejo.nix
index 90b5f16f418..15966adfe38 100644
--- a/nixos/modules/services/misc/forgejo.nix
+++ b/nixos/modules/services/misc/forgejo.nix
@@ -357,6 +357,14 @@ in
         assertion = cfg.database.createDatabase -> useSqlite || cfg.database.user == cfg.user;
         message = "services.forgejo.database.user must match services.forgejo.user if the database is to be automatically provisioned";
       }
+      { assertion = cfg.database.createDatabase && usePostgresql -> cfg.database.user == cfg.database.name;
+        message = ''
+          When creating a database via NixOS, the db user and db name must be equal!
+          If you already have an existing DB+user and this assertion is new, you can safely set
+          `services.forgejo.createDatabase` to `false` because removal of `ensureUsers`
+          and `ensureDatabases` doesn't have any effect.
+        '';
+      }
     ];
 
     services.forgejo.settings = {
@@ -423,7 +431,7 @@ in
       ensureUsers = [
         {
           name = cfg.database.user;
-          ensurePermissions = { "DATABASE ${cfg.database.name}" = "ALL PRIVILEGES"; };
+          ensureDBOwnership = true;
         }
       ];
     };
@@ -677,5 +685,6 @@ in
     };
   };
 
+  meta.doc = ./forgejo.md;
   meta.maintainers = with lib.maintainers; [ bendlas emilylange ];
 }
diff --git a/nixos/modules/services/misc/gitea.nix b/nixos/modules/services/misc/gitea.nix
index 3f690f85d62..be528a29899 100644
--- a/nixos/modules/services/misc/gitea.nix
+++ b/nixos/modules/services/misc/gitea.nix
@@ -394,6 +394,14 @@ in
       { assertion = cfg.database.createDatabase -> useSqlite || cfg.database.user == cfg.user;
         message = "services.gitea.database.user must match services.gitea.user if the database is to be automatically provisioned";
       }
+      { assertion = cfg.database.createDatabase && usePostgresql -> cfg.database.user == cfg.database.name;
+        message = ''
+          When creating a database via NixOS, the db user and db name must be equal!
+          If you already have an existing DB+user and this assertion is new, you can safely set
+          `services.gitea.createDatabase` to `false` because removal of `ensureUsers`
+          and `ensureDatabases` doesn't have any effect.
+        '';
+      }
     ];
 
     services.gitea.settings = {
@@ -461,7 +469,7 @@ in
       ensureDatabases = [ cfg.database.name ];
       ensureUsers = [
         { name = cfg.database.user;
-          ensurePermissions = { "DATABASE ${cfg.database.name}" = "ALL PRIVILEGES"; };
+          ensureDBOwnership = true;
         }
       ];
     };
diff --git a/nixos/modules/services/misc/redmine.nix b/nixos/modules/services/misc/redmine.nix
index a296fd3816b..20fa71507b6 100644
--- a/nixos/modules/services/misc/redmine.nix
+++ b/nixos/modules/services/misc/redmine.nix
@@ -267,7 +267,7 @@ in
       { assertion = cfg.database.passwordFile != null || cfg.database.socket != null;
         message = "one of services.redmine.database.socket or services.redmine.database.passwordFile must be set";
       }
-      { assertion = cfg.database.createLocally -> cfg.database.user == cfg.user;
+      { assertion = cfg.database.createLocally -> cfg.database.user == cfg.user && cfg.database.user == cfg.database.name;
         message = "services.redmine.database.user must be set to ${cfg.user} if services.redmine.database.createLocally is set true";
       }
       { assertion = cfg.database.createLocally -> cfg.database.socket != null;
@@ -315,7 +315,7 @@ in
       ensureDatabases = [ cfg.database.name ];
       ensureUsers = [
         { name = cfg.database.user;
-          ensurePermissions = { "DATABASE ${cfg.database.name}" = "ALL PRIVILEGES"; };
+          ensureDBOwnership = true;
         }
       ];
     };
diff --git a/nixos/modules/services/misc/soft-serve.nix b/nixos/modules/services/misc/soft-serve.nix
index 0f246493880..2b63b6bcd86 100644
--- a/nixos/modules/services/misc/soft-serve.nix
+++ b/nixos/modules/services/misc/soft-serve.nix
@@ -12,7 +12,7 @@ in
 {
   options = {
     services.soft-serve = {
-      enable = mkEnableOption "Enable soft-serve service";
+      enable = mkEnableOption "soft-serve";
 
       package = mkPackageOption pkgs "soft-serve" { };
 
@@ -20,7 +20,7 @@ in
         type = format.type;
         default = { };
         description = mdDoc ''
-          The contents of the configuration file.
+          The contents of the configuration file for soft-serve.
 
           See <${docUrl}>.
         '';
diff --git a/nixos/modules/services/misc/sourcehut/service.nix b/nixos/modules/services/misc/sourcehut/service.nix
index 18c2f5effc5..f08d5eb4687 100644
--- a/nixos/modules/services/misc/sourcehut/service.nix
+++ b/nixos/modules/services/misc/sourcehut/service.nix
@@ -249,10 +249,13 @@ in
       ensureDatabases = [ srvCfg.postgresql.database ];
       ensureUsers = map (name: {
           inherit name;
-          ensurePermissions = { "DATABASE \"${srvCfg.postgresql.database}\"" = "ALL PRIVILEGES"; };
+          # We don't use it because we have a special default database name with dots.
+          # TODO(for maintainers of sourcehut): migrate away from custom preStart script.
+          ensureDBOwnership = false;
         }) [srvCfg.user];
     };
 
+
     services.sourcehut.settings = mkMerge [
       {
         "${srv}.sr.ht".origin = mkDefault "https://${srv}.${cfg.settings."sr.ht".global-domain}";
@@ -378,10 +381,11 @@ in
         extraService
       ])) extraServices)
 
-      # Work around 'pq: permission denied for schema public' with postgres v15, until a
-      # solution for `services.postgresql.ensureUsers` is found.
+      # Work around 'pq: permission denied for schema public' with postgres v15.
       # See https://github.com/NixOS/nixpkgs/issues/216989
       # Workaround taken from nixos/forgejo: https://github.com/NixOS/nixpkgs/pull/262741
+      # TODO(to maintainers of sourcehut): please migrate away from this workaround
+      # by migrating away from database name defaults with dots.
       (lib.mkIf (
           cfg.postgresql.enable
           && lib.strings.versionAtLeast config.services.postgresql.package.version "15.0"
diff --git a/nixos/modules/services/monitoring/grafana.nix b/nixos/modules/services/monitoring/grafana.nix
index 5e21407042b..f84d677f14d 100644
--- a/nixos/modules/services/monitoring/grafana.nix
+++ b/nixos/modules/services/monitoring/grafana.nix
@@ -1841,6 +1841,7 @@ in
       serviceConfig = {
         WorkingDirectory = cfg.dataDir;
         User = "grafana";
+        Restart = "on-failure";
         RuntimeDirectory = "grafana";
         RuntimeDirectoryMode = "0755";
         # Hardening
diff --git a/nixos/modules/services/monitoring/parsedmarc.nix b/nixos/modules/services/monitoring/parsedmarc.nix
index 44fc359b6a7..a146e7ab954 100644
--- a/nixos/modules/services/monitoring/parsedmarc.nix
+++ b/nixos/modules/services/monitoring/parsedmarc.nix
@@ -301,6 +301,7 @@ in
               description = lib.mdDoc ''
                 The addresses to send outgoing mail to.
               '';
+              apply = x: if x == [] then null else lib.concatStringsSep "," x;
             };
           };
 
diff --git a/nixos/modules/services/monitoring/prometheus/exporters.nix b/nixos/modules/services/monitoring/prometheus/exporters.nix
index 305f235054b..f89522c0986 100644
--- a/nixos/modules/services/monitoring/prometheus/exporters.nix
+++ b/nixos/modules/services/monitoring/prometheus/exporters.nix
@@ -2,8 +2,8 @@
 
 let
   inherit (lib) concatStrings foldl foldl' genAttrs literalExpression maintainers
-                mapAttrsToList mkDefault mkEnableOption mkIf mkMerge mkOption
-                optional types mkOptionDefault flip attrNames;
+    mapAttrs mapAttrsToList mkDefault mkEnableOption mkIf mkMerge mkOption
+    optional types mkOptionDefault flip attrNames;
 
   cfg = config.services.prometheus.exporters;
 
@@ -20,7 +20,7 @@ let
   #  systemd service must be provided by specifying either
   #  `serviceOpts.script` or `serviceOpts.serviceConfig.ExecStart`
 
-  exporterOpts = genAttrs [
+  exporterOpts = (genAttrs [
     "apcupsd"
     "artifactory"
     "bind"
@@ -34,14 +34,15 @@ let
     "domain"
     "dovecot"
     "fastly"
+    "flow"
     "fritzbox"
     "graphite"
     "idrac"
     "imap-mailstat"
     "influxdb"
     "ipmi"
-    "json"
     "jitsi"
+    "json"
     "junos-czerwonk"
     "kea"
     "keylight"
@@ -74,9 +75,9 @@ let
     "scaphandre"
     "script"
     "shelly"
-    "snmp"
     "smartctl"
     "smokeping"
+    "snmp"
     "sql"
     "statsd"
     "surfboard"
@@ -88,10 +89,39 @@ let
     "v2ray"
     "varnish"
     "wireguard"
-    "flow"
     "zfs"
-  ] (name:
-    import (./. + "/exporters/${name}.nix") { inherit config lib pkgs options; }
+  ]
+    (name:
+      import (./. + "/exporters/${name}.nix") { inherit config lib pkgs options; }
+    )) // (mapAttrs
+    (name: params:
+      import (./. + "/exporters/${params.name}.nix") { inherit config lib pkgs options; type = params.type ; })
+    {
+      exportarr-bazarr = {
+        name = "exportarr";
+        type = "bazarr";
+      };
+      exportarr-lidarr = {
+        name = "exportarr";
+        type = "lidarr";
+      };
+      exportarr-prowlarr = {
+        name = "exportarr";
+        type = "prowlarr";
+      };
+      exportarr-radarr = {
+        name = "exportarr";
+        type = "radarr";
+      };
+      exportarr-readarr = {
+        name = "exportarr";
+        type = "readarr";
+      };
+      exportarr-sonarr = {
+        name = "exportarr";
+        type = "sonarr";
+      };
+    }
   );
 
   mkExporterOpts = ({ name, port }: {
diff --git a/nixos/modules/services/monitoring/prometheus/exporters/exportarr.nix b/nixos/modules/services/monitoring/prometheus/exporters/exportarr.nix
new file mode 100644
index 00000000000..13220933541
--- /dev/null
+++ b/nixos/modules/services/monitoring/prometheus/exporters/exportarr.nix
@@ -0,0 +1,55 @@
+{ config, lib, pkgs, options, type }:
+
+let
+  cfg = config.services.prometheus.exporters."exportarr-${type}";
+  exportarrEnvironment = (
+    lib.mapAttrs (_: toString) cfg.environment
+  ) // {
+    PORT = toString cfg.port;
+    URL = cfg.url;
+    API_KEY_FILE = lib.mkIf (cfg.apiKeyFile != null) "%d/api-key";
+  };
+in
+{
+  port = 9708;
+  extraOpts = {
+    url = lib.mkOption {
+      type = lib.types.str;
+      default = "http://127.0.0.1";
+      description = lib.mdDoc ''
+        The full URL to Sonarr, Radarr, or Lidarr.
+      '';
+    };
+
+    apiKeyFile = lib.mkOption {
+      type = lib.types.nullOr lib.types.path;
+      default = null;
+      description = lib.mdDoc ''
+        File containing the api-key.
+      '';
+    };
+
+    package = lib.mkPackageOptionMD pkgs "exportarr" { };
+
+    environment = lib.mkOption {
+      type = lib.types.attrsOf lib.types.str;
+      default = { };
+      description = lib.mdDoc ''
+        See [the configuration guide](https://github.com/onedr0p/exportarr#configuration) for available options.
+      '';
+      example = {
+        PROWLARR__BACKFILL = true;
+      };
+    };
+  };
+  serviceOpts = {
+    serviceConfig = {
+      LoadCredential = lib.optionalString (cfg.apiKeyFile != null) "api-key:${cfg.apiKeyFile}";
+      ExecStart = ''${cfg.package}/bin/exportarr ${type} "$@"'';
+      ProcSubset = "pid";
+      ProtectProc = "invisible";
+      SystemCallFilter = ["@system-service" "~@privileged"];
+    };
+    environment = exportarrEnvironment;
+  };
+}
diff --git a/nixos/modules/services/monitoring/zabbix-proxy.nix b/nixos/modules/services/monitoring/zabbix-proxy.nix
index 85da416ba6c..503e81b48a5 100644
--- a/nixos/modules/services/monitoring/zabbix-proxy.nix
+++ b/nixos/modules/services/monitoring/zabbix-proxy.nix
@@ -203,7 +203,7 @@ in
       { assertion = !config.services.zabbixServer.enable;
         message = "Please choose one of services.zabbixServer or services.zabbixProxy.";
       }
-      { assertion = cfg.database.createLocally -> cfg.database.user == user;
+      { assertion = cfg.database.createLocally -> cfg.database.user == user && cfg.database.name == cfg.database.user;
         message = "services.zabbixProxy.database.user must be set to ${user} if services.zabbixProxy.database.createLocally is set true";
       }
       { assertion = cfg.database.createLocally -> cfg.database.passwordFile == null;
@@ -252,7 +252,7 @@ in
       ensureDatabases = [ cfg.database.name ];
       ensureUsers = [
         { name = cfg.database.user;
-          ensurePermissions = { "DATABASE ${cfg.database.name}" = "ALL PRIVILEGES"; };
+          ensureDBOwnership = true;
         }
       ];
     };
diff --git a/nixos/modules/services/monitoring/zabbix-server.nix b/nixos/modules/services/monitoring/zabbix-server.nix
index 2b50280e396..0607188d213 100644
--- a/nixos/modules/services/monitoring/zabbix-server.nix
+++ b/nixos/modules/services/monitoring/zabbix-server.nix
@@ -191,7 +191,7 @@ in
   config = mkIf cfg.enable {
 
     assertions = [
-      { assertion = cfg.database.createLocally -> cfg.database.user == user;
+      { assertion = cfg.database.createLocally -> cfg.database.user == user && cfg.database.user == cfg.database.name;
         message = "services.zabbixServer.database.user must be set to ${user} if services.zabbixServer.database.createLocally is set true";
       }
       { assertion = cfg.database.createLocally -> cfg.database.passwordFile == null;
@@ -240,7 +240,7 @@ in
       ensureDatabases = [ cfg.database.name ];
       ensureUsers = [
         { name = cfg.database.user;
-          ensurePermissions = { "DATABASE ${cfg.database.name}" = "ALL PRIVILEGES"; };
+          ensureDBOwnership = true;
         }
       ];
     };
diff --git a/nixos/modules/services/networking/harmonia.nix b/nixos/modules/services/networking/harmonia.nix
index 144fa6c708e..4733165cf7d 100644
--- a/nixos/modules/services/networking/harmonia.nix
+++ b/nixos/modules/services/networking/harmonia.nix
@@ -28,6 +28,8 @@ in
   };
 
   config = lib.mkIf cfg.enable {
+    nix.settings.extra-allowed-users = [ "harmonia" ];
+
     systemd.services.harmonia = {
       description = "harmonia binary cache service";
 
diff --git a/nixos/modules/services/networking/ircd-hybrid/builder.sh b/nixos/modules/services/networking/ircd-hybrid/builder.sh
index d9d2e4264df..07a3788abf7 100644
--- a/nixos/modules/services/networking/ircd-hybrid/builder.sh
+++ b/nixos/modules/services/networking/ircd-hybrid/builder.sh
@@ -1,4 +1,4 @@
-if [ -e .attrs.sh ]; then source .attrs.sh; fi
+if [ -e "$NIX_ATTRS_SH_FILE" ]; then . "$NIX_ATTRS_SH_FILE"; elif [ -f .attrs.sh ]; then . .attrs.sh; fi
 source $stdenv/setup
 
 doSub() {
diff --git a/nixos/modules/services/networking/nix-serve.nix b/nixos/modules/services/networking/nix-serve.nix
index f37be31270b..8c4352bc95e 100644
--- a/nixos/modules/services/networking/nix-serve.nix
+++ b/nixos/modules/services/networking/nix-serve.nix
@@ -67,6 +67,8 @@ in
   };
 
   config = mkIf cfg.enable {
+    nix.settings.extra-allowed-users = [ "nix-serve" ];
+
     systemd.services.nix-serve = {
       description = "nix-serve binary cache server";
       after = [ "network.target" ];
diff --git a/nixos/modules/services/networking/ntp/chrony.nix b/nixos/modules/services/networking/ntp/chrony.nix
index afd721e34da..d370e6946d7 100644
--- a/nixos/modules/services/networking/ntp/chrony.nix
+++ b/nixos/modules/services/networking/ntp/chrony.nix
@@ -9,6 +9,7 @@ let
   stateDir = cfg.directory;
   driftFile = "${stateDir}/chrony.drift";
   keyFile = "${stateDir}/chrony.keys";
+  rtcFile = "${stateDir}/chrony.rtc";
 
   configFile = pkgs.writeText "chrony.conf" ''
     ${concatMapStringsSep "\n" (server: "server " + server + " " + cfg.serverOption + optionalString (cfg.enableNTS) " nts") cfg.servers}
@@ -20,8 +21,10 @@ let
 
     driftfile ${driftFile}
     keyfile ${keyFile}
+    ${optionalString (cfg.enableRTCTrimming) "rtcfile ${rtcFile}"}
     ${optionalString (cfg.enableNTS) "ntsdumpdir ${stateDir}"}
 
+    ${optionalString (cfg.enableRTCTrimming) "rtcautotrim ${builtins.toString cfg.autotrimThreshold}"}
     ${optionalString (!config.time.hardwareClockInLocalTime) "rtconutc"}
 
     ${cfg.extraConfig}
@@ -85,6 +88,33 @@ in
         '';
       };
 
+      enableRTCTrimming = mkOption {
+        type = types.bool;
+        default = true;
+        description = lib.mdDoc ''
+          Enable tracking of the RTC offset to the system clock and automatic trimming.
+          See also [](#opt-services.chrony.autotrimThreshold)
+
+          ::: {.note}
+          This is not compatible with the `rtcsync` directive, which naively syncs the RTC time every 11 minutes.
+
+          Tracking the RTC drift will allow more precise timekeeping,
+          especially on intermittently running devices, where the RTC is very relevant.
+          :::
+        '';
+      };
+
+      autotrimThreshold = mkOption {
+        type = types.ints.positive;
+        default = 30;
+        example = 10;
+        description = ''
+          Maximum estimated error threshold for the `rtcautotrim` command.
+          When reached, the RTC will be trimmed.
+          Only used when [](#opt-services.chrony.enableRTCTrimming) is enabled.
+        '';
+      };
+
       enableNTS = mkOption {
         type = types.bool;
         default = false;
@@ -141,7 +171,7 @@ in
   };
 
   config = mkIf cfg.enable {
-    meta.maintainers = with lib.maintainers; [ thoughtpolice ];
+    meta.maintainers = with lib.maintainers; [ thoughtpolice vifino ];
 
     environment.systemPackages = [ chronyPkg ];
 
@@ -156,12 +186,19 @@ in
 
     services.timesyncd.enable = mkForce false;
 
+    # If chrony controls and tracks the RTC, writing it externally causes clock error.
+    systemd.services.save-hwclock = lib.mkIf cfg.enableRTCTrimming {
+      enable = lib.mkForce false;
+    };
+
     systemd.services.systemd-timedated.environment = { SYSTEMD_TIMEDATED_NTP_SERVICES = "chronyd.service"; };
 
     systemd.tmpfiles.rules = [
       "d ${stateDir} 0750 chrony chrony - -"
       "f ${driftFile} 0640 chrony chrony - -"
       "f ${keyFile} 0640 chrony chrony - -"
+    ] ++ lib.optionals cfg.enableRTCTrimming [
+      "f ${rtcFile} 0640 chrony chrony - -"
     ];
 
     systemd.services.chronyd =
diff --git a/nixos/modules/services/networking/syncthing.nix b/nixos/modules/services/networking/syncthing.nix
index bdcdaf056d0..6d9af6141f1 100644
--- a/nixos/modules/services/networking/syncthing.nix
+++ b/nixos/modules/services/networking/syncthing.nix
@@ -666,7 +666,9 @@ in {
             ${cfg.package}/bin/syncthing \
               -no-browser \
               -gui-address=${if isUnixGui then "unix://" else ""}${cfg.guiAddress} \
-              -home=${cfg.configDir} ${escapeShellArgs cfg.extraFlags}
+              -config=${cfg.configDir} \
+              -data=${cfg.dataDir} \
+              ${escapeShellArgs cfg.extraFlags}
           '';
           MemoryDenyWriteExecute = true;
           NoNewPrivileges = true;
diff --git a/nixos/modules/services/networking/unbound.nix b/nixos/modules/services/networking/unbound.nix
index 0426dbb0c83..b6579af10a7 100644
--- a/nixos/modules/services/networking/unbound.nix
+++ b/nixos/modules/services/networking/unbound.nix
@@ -166,7 +166,7 @@ in {
     services.unbound.settings = {
       server = {
         directory = mkDefault cfg.stateDir;
-        username = cfg.user;
+        username = ''""'';
         chroot = ''""'';
         pidfile = ''""'';
         # when running under systemd there is no need to daemonize
@@ -245,14 +245,13 @@ in {
         NotifyAccess = "main";
         Type = "notify";
 
-        # FIXME: Which of these do we actually need, can we drop the chroot flag?
         AmbientCapabilities = [
           "CAP_NET_BIND_SERVICE"
+          "CAP_NET_RAW" # needed if ip-transparent is set to true
+        ];
+        CapabilityBoundingSet = [
+          "CAP_NET_BIND_SERVICE"
           "CAP_NET_RAW"
-          "CAP_SETGID"
-          "CAP_SETUID"
-          "CAP_SYS_CHROOT"
-          "CAP_SYS_RESOURCE"
         ];
 
         User = cfg.user;
@@ -266,22 +265,19 @@ in {
         ProtectControlGroups = true;
         ProtectKernelModules = true;
         ProtectSystem = "strict";
+        ProtectClock = true;
+        ProtectHostname = true;
+        ProtectProc = "invisible";
+        ProcSubset = "pid";
+        ProtectKernelLogs = true;
+        ProtectKernelTunables = true;
         RuntimeDirectory = "unbound";
         ConfigurationDirectory = "unbound";
         StateDirectory = "unbound";
         RestrictAddressFamilies = [ "AF_INET" "AF_INET6" "AF_NETLINK" "AF_UNIX" ];
         RestrictRealtime = true;
         SystemCallArchitectures = "native";
-        SystemCallFilter = [
-          "~@clock"
-          "@cpu-emulation"
-          "@debug"
-          "@keyring"
-          "@module"
-          "mount"
-          "@obsolete"
-          "@resources"
-        ];
+        SystemCallFilter = [ "@system-service" ];
         RestrictNamespaces = true;
         LockPersonality = true;
         RestrictSUIDSGID = true;
diff --git a/nixos/modules/services/networking/unifi.nix b/nixos/modules/services/networking/unifi.nix
index 6b683710980..537a4db95ca 100644
--- a/nixos/modules/services/networking/unifi.nix
+++ b/nixos/modules/services/networking/unifi.nix
@@ -1,60 +1,61 @@
 { config, options, lib, pkgs, utils, ... }:
-with lib;
 let
   cfg = config.services.unifi;
   stateDir = "/var/lib/unifi";
-  cmd = ''
-    @${cfg.jrePackage}/bin/java java \
-        ${optionalString (lib.versionAtLeast (lib.getVersion cfg.jrePackage) "16")
-        ("--add-opens java.base/java.lang=ALL-UNNAMED --add-opens=java.base/java.time=ALL-UNNAMED "
-        + "--add-opens java.base/sun.security.util=ALL-UNNAMED --add-opens java.base/java.io=ALL-UNNAMED "
-        + "--add-opens java.rmi/sun.rmi.transport=ALL-UNNAMED")} \
-        ${optionalString (cfg.initialJavaHeapSize != null) "-Xms${(toString cfg.initialJavaHeapSize)}m"} \
-        ${optionalString (cfg.maximumJavaHeapSize != null) "-Xmx${(toString cfg.maximumJavaHeapSize)}m"} \
-        -jar ${stateDir}/lib/ace.jar
-  '';
+  cmd = lib.escapeShellArgs ([ "@${cfg.jrePackage}/bin/java" "java" ]
+    ++ lib.optionals (lib.versionAtLeast (lib.getVersion cfg.jrePackage) "16") [
+      "--add-opens=java.base/java.lang=ALL-UNNAMED"
+      "--add-opens=java.base/java.time=ALL-UNNAMED"
+      "--add-opens=java.base/sun.security.util=ALL-UNNAMED"
+      "--add-opens=java.base/java.io=ALL-UNNAMED"
+      "--add-opens=java.rmi/sun.rmi.transport=ALL-UNNAMED"
+    ]
+    ++ (lib.optional (cfg.initialJavaHeapSize != null) "-Xms${(toString cfg.initialJavaHeapSize)}m")
+    ++ (lib.optional (cfg.maximumJavaHeapSize != null) "-Xmx${(toString cfg.maximumJavaHeapSize)}m")
+    ++ cfg.extraJvmOptions
+    ++ [ "-jar" "${stateDir}/lib/ace.jar" ]);
 in
 {
 
   options = {
 
-    services.unifi.enable = mkOption {
-      type = types.bool;
+    services.unifi.enable = lib.mkOption {
+      type = lib.types.bool;
       default = false;
       description = lib.mdDoc ''
         Whether or not to enable the unifi controller service.
       '';
     };
 
-    services.unifi.jrePackage = mkOption {
-      type = types.package;
+    services.unifi.jrePackage = lib.mkOption {
+      type = lib.types.package;
       default = if (lib.versionAtLeast (lib.getVersion cfg.unifiPackage) "7.5") then pkgs.jdk17_headless else if (lib.versionAtLeast (lib.getVersion cfg.unifiPackage) "7.3") then pkgs.jdk11 else pkgs.jre8;
-      defaultText = literalExpression ''if (lib.versionAtLeast (lib.getVersion cfg.unifiPackage) "7.5") then pkgs.jdk17_headless else if (lib.versionAtLeast (lib.getVersion cfg.unifiPackage) "7.3" then pkgs.jdk11 else pkgs.jre8'';
+      defaultText = lib.literalExpression ''if (lib.versionAtLeast (lib.getVersion cfg.unifiPackage) "7.5") then pkgs.jdk17_headless else if (lib.versionAtLeast (lib.getVersion cfg.unifiPackage) "7.3" then pkgs.jdk11 else pkgs.jre8'';
       description = lib.mdDoc ''
         The JRE package to use. Check the release notes to ensure it is supported.
       '';
     };
 
-    services.unifi.unifiPackage = mkOption {
-      type = types.package;
+    services.unifi.unifiPackage = lib.mkOption {
+      type = lib.types.package;
       default = pkgs.unifi5;
-      defaultText = literalExpression "pkgs.unifi5";
+      defaultText = lib.literalExpression "pkgs.unifi5";
       description = lib.mdDoc ''
         The unifi package to use.
       '';
     };
 
-    services.unifi.mongodbPackage = mkOption {
-      type = types.package;
+    services.unifi.mongodbPackage = lib.mkOption {
+      type = lib.types.package;
       default = pkgs.mongodb-4_4;
-      defaultText = literalExpression "pkgs.mongodb";
+      defaultText = lib.literalExpression "pkgs.mongodb";
       description = lib.mdDoc ''
         The mongodb package to use. Please note: unifi7 officially only supports mongodb up until 3.6 but works with 4.4.
       '';
     };
 
-    services.unifi.openFirewall = mkOption {
-      type = types.bool;
+    services.unifi.openFirewall = lib.mkOption {
+      type = lib.types.bool;
       default = false;
       description = lib.mdDoc ''
         Whether or not to open the minimum required ports on the firewall.
@@ -65,8 +66,8 @@ in
       '';
     };
 
-    services.unifi.initialJavaHeapSize = mkOption {
-      type = types.nullOr types.int;
+    services.unifi.initialJavaHeapSize = lib.mkOption {
+      type = with lib.types; nullOr int;
       default = null;
       example = 1024;
       description = lib.mdDoc ''
@@ -75,8 +76,8 @@ in
       '';
     };
 
-    services.unifi.maximumJavaHeapSize = mkOption {
-      type = types.nullOr types.int;
+    services.unifi.maximumJavaHeapSize = lib.mkOption {
+      type = with lib.types; nullOr int;
       default = null;
       example = 4096;
       description = lib.mdDoc ''
@@ -85,9 +86,18 @@ in
       '';
     };
 
+    services.unifi.extraJvmOptions = lib.mkOption {
+      type = with lib.types; listOf str;
+      default = [ ];
+      example = lib.literalExpression ''["-Xlog:gc"]'';
+      description = lib.mdDoc ''
+        Set extra options to pass to the JVM.
+      '';
+    };
+
   };
 
-  config = mkIf cfg.enable {
+  config = lib.mkIf cfg.enable {
 
     users.users.unifi = {
       isSystemUser = true;
@@ -97,7 +107,7 @@ in
     };
     users.groups.unifi = {};
 
-    networking.firewall = mkIf cfg.openFirewall {
+    networking.firewall = lib.mkIf cfg.openFirewall {
       # https://help.ubnt.com/hc/en-us/articles/218506997
       allowedTCPPorts = [
         8080  # Port for UAP to inform controller.
@@ -123,8 +133,8 @@ in
 
       serviceConfig = {
         Type = "simple";
-        ExecStart = "${(removeSuffix "\n" cmd)} start";
-        ExecStop = "${(removeSuffix "\n" cmd)} stop";
+        ExecStart = "${cmd} start";
+        ExecStop = "${cmd} stop";
         Restart = "on-failure";
         TimeoutSec = "5min";
         User = "unifi";
@@ -166,7 +176,7 @@ in
         StateDirectory = "unifi";
         RuntimeDirectory = "unifi";
         LogsDirectory = "unifi";
-        CacheDirectory= "unifi";
+        CacheDirectory = "unifi";
 
         TemporaryFileSystem = [
           # required as we want to create bind mounts below
@@ -176,7 +186,7 @@ in
         # We must create the binary directories as bind mounts instead of symlinks
         # This is because the controller resolves all symlinks to absolute paths
         # to be used as the working directory.
-        BindPaths =  [
+        BindPaths = [
           "/var/log/unifi:${stateDir}/logs"
           "/run/unifi:${stateDir}/run"
           "${cfg.unifiPackage}/dl:${stateDir}/dl"
@@ -194,7 +204,7 @@ in
 
   };
   imports = [
-    (mkRemovedOptionModule [ "services" "unifi" "dataDir" ] "You should move contents of dataDir to /var/lib/unifi/data" )
-    (mkRenamedOptionModule [ "services" "unifi" "openPorts" ] [ "services" "unifi" "openFirewall" ])
+    (lib.mkRemovedOptionModule [ "services" "unifi" "dataDir" ] "You should move contents of dataDir to /var/lib/unifi/data")
+    (lib.mkRenamedOptionModule [ "services" "unifi" "openPorts" ] [ "services" "unifi" "openFirewall" ])
   ];
 }
diff --git a/nixos/modules/services/printing/cupsd.nix b/nixos/modules/services/printing/cupsd.nix
index 25367f8e61d..3a274430347 100644
--- a/nixos/modules/services/printing/cupsd.nix
+++ b/nixos/modules/services/printing/cupsd.nix
@@ -109,11 +109,10 @@ let
   getGutenprint = pkgs: head (filterGutenprint pkgs);
 
   parsePorts = addresses: let
-    splitAddress = addr: lib.strings.splitString ":" addr;
-    extractPort = addr: builtins.elemAt (builtins.tail (splitAddress addr)) 0;
-    toInt = str: lib.strings.toInt str;
+    splitAddress = addr: strings.splitString ":" addr;
+    extractPort = addr: builtins.foldl' (a: b: b) "" (splitAddress addr);
   in
-    builtins.map (address: toInt (extractPort address)) addresses;
+    builtins.map (address: strings.toInt (extractPort address)) addresses;
 
 in
 
diff --git a/nixos/modules/services/search/kibana.nix b/nixos/modules/services/search/kibana.nix
deleted file mode 100644
index a5e132d5c38..00000000000
--- a/nixos/modules/services/search/kibana.nix
+++ /dev/null
@@ -1,213 +0,0 @@
-{ config, lib, options, pkgs, ... }:
-
-with lib;
-
-let
-  cfg = config.services.kibana;
-  opt = options.services.kibana;
-
-  ge7 = builtins.compareVersions cfg.package.version "7" >= 0;
-  lt6_6 = builtins.compareVersions cfg.package.version "6.6" < 0;
-
-  cfgFile = pkgs.writeText "kibana.json" (builtins.toJSON (
-    (filterAttrsRecursive (n: v: v != null && v != []) ({
-      server.host = cfg.listenAddress;
-      server.port = cfg.port;
-      server.ssl.certificate = cfg.cert;
-      server.ssl.key = cfg.key;
-
-      kibana.index = cfg.index;
-      kibana.defaultAppId = cfg.defaultAppId;
-
-      elasticsearch.url = cfg.elasticsearch.url;
-      elasticsearch.hosts = cfg.elasticsearch.hosts;
-      elasticsearch.username = cfg.elasticsearch.username;
-      elasticsearch.password = cfg.elasticsearch.password;
-
-      elasticsearch.ssl.certificate = cfg.elasticsearch.cert;
-      elasticsearch.ssl.key = cfg.elasticsearch.key;
-      elasticsearch.ssl.certificateAuthorities = cfg.elasticsearch.certificateAuthorities;
-    } // cfg.extraConf)
-  )));
-
-in {
-  options.services.kibana = {
-    enable = mkEnableOption (lib.mdDoc "kibana service");
-
-    listenAddress = mkOption {
-      description = lib.mdDoc "Kibana listening host";
-      default = "127.0.0.1";
-      type = types.str;
-    };
-
-    port = mkOption {
-      description = lib.mdDoc "Kibana listening port";
-      default = 5601;
-      type = types.port;
-    };
-
-    cert = mkOption {
-      description = lib.mdDoc "Kibana ssl certificate.";
-      default = null;
-      type = types.nullOr types.path;
-    };
-
-    key = mkOption {
-      description = lib.mdDoc "Kibana ssl key.";
-      default = null;
-      type = types.nullOr types.path;
-    };
-
-    index = mkOption {
-      description = lib.mdDoc "Elasticsearch index to use for saving kibana config.";
-      default = ".kibana";
-      type = types.str;
-    };
-
-    defaultAppId = mkOption {
-      description = lib.mdDoc "Elasticsearch default application id.";
-      default = "discover";
-      type = types.str;
-    };
-
-    elasticsearch = {
-      url = mkOption {
-        description = lib.mdDoc ''
-          Elasticsearch url.
-
-          Defaults to `"http://localhost:9200"`.
-
-          Don't set this when using Kibana >= 7.0.0 because it will result in a
-          configuration error. Use {option}`services.kibana.elasticsearch.hosts`
-          instead.
-        '';
-        default = null;
-        type = types.nullOr types.str;
-      };
-
-      hosts = mkOption {
-        description = lib.mdDoc ''
-          The URLs of the Elasticsearch instances to use for all your queries.
-          All nodes listed here must be on the same cluster.
-
-          Defaults to `[ "http://localhost:9200" ]`.
-
-          This option is only valid when using kibana >= 6.6.
-        '';
-        default = null;
-        type = types.nullOr (types.listOf types.str);
-      };
-
-      username = mkOption {
-        description = lib.mdDoc "Username for elasticsearch basic auth.";
-        default = null;
-        type = types.nullOr types.str;
-      };
-
-      password = mkOption {
-        description = lib.mdDoc "Password for elasticsearch basic auth.";
-        default = null;
-        type = types.nullOr types.str;
-      };
-
-      ca = mkOption {
-        description = lib.mdDoc ''
-          CA file to auth against elasticsearch.
-
-          It's recommended to use the {option}`certificateAuthorities` option
-          when using kibana-5.4 or newer.
-        '';
-        default = null;
-        type = types.nullOr types.path;
-      };
-
-      certificateAuthorities = mkOption {
-        description = lib.mdDoc ''
-          CA files to auth against elasticsearch.
-
-          Please use the {option}`ca` option when using kibana \< 5.4
-          because those old versions don't support setting multiple CA's.
-
-          This defaults to the singleton list [ca] when the {option}`ca` option is defined.
-        '';
-        default = lib.optional (cfg.elasticsearch.ca != null) ca;
-        defaultText = literalExpression ''
-          lib.optional (config.${opt.elasticsearch.ca} != null) ca
-        '';
-        type = types.listOf types.path;
-      };
-
-      cert = mkOption {
-        description = lib.mdDoc "Certificate file to auth against elasticsearch.";
-        default = null;
-        type = types.nullOr types.path;
-      };
-
-      key = mkOption {
-        description = lib.mdDoc "Key file to auth against elasticsearch.";
-        default = null;
-        type = types.nullOr types.path;
-      };
-    };
-
-    package = mkOption {
-      description = lib.mdDoc "Kibana package to use";
-      default = pkgs.kibana;
-      defaultText = literalExpression "pkgs.kibana";
-      type = types.package;
-    };
-
-    dataDir = mkOption {
-      description = lib.mdDoc "Kibana data directory";
-      default = "/var/lib/kibana";
-      type = types.path;
-    };
-
-    extraConf = mkOption {
-      description = lib.mdDoc "Kibana extra configuration";
-      default = {};
-      type = types.attrs;
-    };
-  };
-
-  config = mkIf (cfg.enable) {
-    assertions = [
-      {
-        assertion = ge7 -> cfg.elasticsearch.url == null;
-        message =
-          "The option services.kibana.elasticsearch.url has been removed when using kibana >= 7.0.0. " +
-          "Please use option services.kibana.elasticsearch.hosts instead.";
-      }
-      {
-        assertion = lt6_6 -> cfg.elasticsearch.hosts == null;
-        message =
-          "The option services.kibana.elasticsearch.hosts is only valid for kibana >= 6.6.";
-      }
-    ];
-    systemd.services.kibana = {
-      description = "Kibana Service";
-      wantedBy = [ "multi-user.target" ];
-      after = [ "network.target" "elasticsearch.service" ];
-      environment = { BABEL_CACHE_PATH = "${cfg.dataDir}/.babelcache.json"; };
-      serviceConfig = {
-        ExecStart =
-          "${cfg.package}/bin/kibana" +
-          " --config ${cfgFile}" +
-          " --path.data ${cfg.dataDir}";
-        User = "kibana";
-        WorkingDirectory = cfg.dataDir;
-      };
-    };
-
-    environment.systemPackages = [ cfg.package ];
-
-    users.users.kibana = {
-      isSystemUser = true;
-      description = "Kibana service user";
-      home = cfg.dataDir;
-      createHome = true;
-      group = "kibana";
-    };
-    users.groups.kibana = {};
-  };
-}
diff --git a/nixos/modules/services/search/opensearch.nix b/nixos/modules/services/search/opensearch.nix
index 9a50e796313..ae79d5545fd 100644
--- a/nixos/modules/services/search/opensearch.nix
+++ b/nixos/modules/services/search/opensearch.nix
@@ -72,6 +72,18 @@ in
             The port to listen on for transport traffic.
           '';
         };
+
+        options."plugins.security.disabled" = lib.mkOption {
+          type = lib.types.bool;
+          default = true;
+          description = lib.mdDoc ''
+            Whether to enable the security plugin,
+            `plugins.security.ssl.transport.keystore_filepath` or
+            `plugins.security.ssl.transport.server.pemcert_filepath` and
+            `plugins.security.ssl.transport.client.pemcert_filepath`
+            must be set for this plugin to be enabled.
+          '';
+        };
       };
 
       default = {};
@@ -186,6 +198,13 @@ in
               shopt -s inherit_errexit
 
               # Install plugins
+
+              # remove plugins directory if it is empty.
+              if [ -z "$(ls -A ${cfg.dataDir}/plugins)" ]; then
+                rm -r "${cfg.dataDir}/plugins"
+              fi
+
+              ln -sfT "${cfg.package}/plugins" "${cfg.dataDir}/plugins"
               ln -sfT ${cfg.package}/lib ${cfg.dataDir}/lib
               ln -sfT ${cfg.package}/modules ${cfg.dataDir}/modules
 
diff --git a/nixos/modules/services/security/hockeypuck.nix b/nixos/modules/services/security/hockeypuck.nix
index 127134bc5db..56c13d79192 100644
--- a/nixos/modules/services/security/hockeypuck.nix
+++ b/nixos/modules/services/security/hockeypuck.nix
@@ -55,7 +55,7 @@ in {
             ensureDatabases = [ "hockeypuck" ];
             ensureUsers = [{
               name = "hockeypuck";
-              ensurePermissions."DATABASE hockeypuck" = "ALL PRIVILEGES";
+              ensureDBOwnership = true;
             }];
           };
         ```
diff --git a/nixos/modules/services/torrent/flexget.nix b/nixos/modules/services/torrent/flexget.nix
index 5cd7ae6ad7d..58a4b700149 100644
--- a/nixos/modules/services/torrent/flexget.nix
+++ b/nixos/modules/services/torrent/flexget.nix
@@ -64,7 +64,6 @@ in {
         path = [ pkg ];
         serviceConfig = {
           User = cfg.user;
-          Environment = "TZ=${config.time.timeZone}";
           ExecStartPre = "${pkgs.coreutils}/bin/install -m644 ${ymlFile} ${configFile}";
           ExecStart = "${pkg}/bin/flexget -c ${configFile} daemon start";
           ExecStop = "${pkg}/bin/flexget -c ${configFile} daemon stop";
diff --git a/nixos/modules/services/web-apps/code-server.nix b/nixos/modules/services/web-apps/code-server.nix
deleted file mode 100644
index 11601f6c304..00000000000
--- a/nixos/modules/services/web-apps/code-server.nix
+++ /dev/null
@@ -1,259 +0,0 @@
-{ config, lib, pkgs, ... }:
-
-let
-  cfg = config.services.code-server;
-  defaultUser = "code-server";
-  defaultGroup = defaultUser;
-in {
-  options = {
-    services.code-server = {
-      enable = lib.mkEnableOption (lib.mdDoc "code-server");
-
-      package = lib.mkPackageOptionMD pkgs "code-server" {
-        example = ''
-          pkgs.vscode-with-extensions.override {
-            vscode = pkgs.code-server;
-            vscodeExtensions = with pkgs.vscode-extensions; [
-              bbenoist.nix
-              dracula-theme.theme-dracula
-            ];
-          }
-        '';
-      };
-
-      extraPackages = lib.mkOption {
-        default = [ ];
-        description = lib.mdDoc ''
-          Additional packages to add to the code-server {env}`PATH`.
-        '';
-        example = lib.literalExpression "[ pkgs.go ]";
-        type = lib.types.listOf lib.types.package;
-      };
-
-      extraEnvironment = lib.mkOption {
-        type = lib.types.attrsOf lib.types.str;
-        description = lib.mdDoc ''
-          Additional environment variables to pass to code-server.
-        '';
-        default = { };
-        example = { PKG_CONFIG_PATH = "/run/current-system/sw/lib/pkgconfig"; };
-      };
-
-      extraArguments = lib.mkOption {
-        default = [ ];
-        description = lib.mdDoc ''
-          Additional arguments to pass to code-server.
-        '';
-        example = lib.literalExpression ''[ "--log=info" ]'';
-        type = lib.types.listOf lib.types.str;
-      };
-
-      host = lib.mkOption {
-        default = "localhost";
-        description = lib.mdDoc ''
-          The host name or IP address the server should listen to.
-        '';
-        type = lib.types.str;
-      };
-
-      port = lib.mkOption {
-        default = 4444;
-        description = lib.mdDoc ''
-          The port the server should listen to.
-        '';
-        type = lib.types.port;
-      };
-
-      auth = lib.mkOption {
-        default = "password";
-        description = lib.mdDoc ''
-          The type of authentication to use.
-        '';
-        type = lib.types.enum [ "none" "password" ];
-      };
-
-      hashedPassword = lib.mkOption {
-        default = "";
-        description = lib.mdDoc ''
-          Create the password with: `echo -n 'thisismypassword' | npx argon2-cli -e`.
-        '';
-        type = lib.types.str;
-      };
-
-      user = lib.mkOption {
-        default = defaultUser;
-        example = "yourUser";
-        description = lib.mdDoc ''
-          The user to run code-server as.
-          By default, a user named `${defaultUser}` will be created.
-        '';
-        type = lib.types.str;
-      };
-
-      group = lib.mkOption {
-        default = defaultGroup;
-        example = "yourGroup";
-        description = lib.mdDoc ''
-          The group to run code-server under.
-          By default, a group named `${defaultGroup}` will be created.
-        '';
-        type = lib.types.str;
-      };
-
-      extraGroups = lib.mkOption {
-        default = [ ];
-        description = lib.mdDoc ''
-          An array of additional groups for the `${defaultUser}` user.
-        '';
-        example = [ "docker" ];
-        type = lib.types.listOf lib.types.str;
-      };
-
-      socket = lib.mkOption {
-        default = null;
-        example = "/run/code-server/socket";
-        description = lib.mdDoc ''
-          Path to a socket (bind-addr will be ignored).
-        '';
-        type = lib.types.nullOr lib.types.str;
-      };
-
-      socketMode = lib.mkOption {
-        default = null;
-        description = lib.mdDoc ''
-           File mode of the socket.
-        '';
-        type = lib.types.nullOr lib.types.str;
-      };
-
-      userDataDir = lib.mkOption {
-        default = null;
-        description = lib.mdDoc ''
-          Path to the user data directory.
-        '';
-        type = lib.types.nullOr lib.types.str;
-      };
-
-      extensionsDir = lib.mkOption {
-        default = null;
-        description = lib.mdDoc ''
-          Path to the extensions directory.
-        '';
-        type = lib.types.nullOr lib.types.str;
-      };
-
-      proxyDomain = lib.mkOption {
-        default = null;
-        example = "code-server.lan";
-        description = lib.mdDoc ''
-          Domain used for proxying ports.
-        '';
-        type = lib.types.nullOr lib.types.str;
-      };
-
-      disableTelemetry = lib.mkOption {
-        default = false;
-        example = true;
-        description = lib.mdDoc ''
-          Disable telemetry.
-        '';
-        type = lib.types.bool;
-      };
-
-      disableUpdateCheck = lib.mkOption {
-        default = false;
-        example = true;
-        description = lib.mdDoc ''
-          Disable update check.
-          Without this flag, code-server checks every 6 hours against the latest github release and
-          then notifies you once every week that a new release is available.
-        '';
-        type = lib.types.bool;
-      };
-
-      disableFileDownloads = lib.mkOption {
-        default = false;
-        example = true;
-        description = lib.mdDoc ''
-          Disable file downloads from Code.
-        '';
-        type = lib.types.bool;
-      };
-
-      disableWorkspaceTrust = lib.mkOption {
-        default = false;
-        example = true;
-        description = lib.mdDoc ''
-          Disable Workspace Trust feature.
-        '';
-        type = lib.types.bool;
-      };
-
-      disableGettingStartedOverride = lib.mkOption {
-        default = false;
-        example = true;
-        description = lib.mdDoc ''
-          Disable the coder/coder override in the Help: Getting Started page.
-        '';
-        type = lib.types.bool;
-      };
-
-    };
-  };
-
-  config = lib.mkIf cfg.enable {
-    systemd.services.code-server = {
-      description = "Code server";
-      wantedBy = [ "multi-user.target" ];
-      after = [ "network-online.target" ];
-      path = cfg.extraPackages;
-      environment = {
-        HASHED_PASSWORD = cfg.hashedPassword;
-      } // cfg.extraEnvironment;
-      serviceConfig = {
-        ExecStart = ''
-          ${lib.getExe cfg.package} \
-            --auth=${cfg.auth} \
-            --bind-addr=${cfg.host}:${toString cfg.port} \
-          '' + lib.optionalString (cfg.socket != null) ''
-            --socket=${cfg.socket} \
-          '' + lib.optionalString (cfg.userDataDir != null) ''
-            --user-data-dir=${cfg.userDataDir} \
-          '' + lib.optionalString (cfg.extensionsDir != null) ''
-            --extensions-dir=${cfg.extensionsDir} \
-          '' + lib.optionalString (cfg.disableTelemetry == true) ''
-            --disable-telemetry \
-          '' + lib.optionalString (cfg.disableUpdateCheck == true) ''
-            --disable-update-check \
-          '' + lib.optionalString (cfg.disableFileDownloads == true) ''
-            --disable-file-downloads \
-          '' + lib.optionalString (cfg.disableWorkspaceTrust == true) ''
-            --disable-workspace-trust \
-          '' + lib.optionalString (cfg.disableGettingStartedOverride == true) ''
-            --disable-getting-started-override \
-          '' + lib.escapeShellArgs cfg.extraArguments;
-        ExecReload = "${pkgs.coreutils}/bin/kill -HUP $MAINPID";
-        RuntimeDirectory = cfg.user;
-        User = cfg.user;
-        Group = cfg.group;
-        Restart = "on-failure";
-      };
-    };
-
-    users.users."${cfg.user}" = lib.mkMerge [
-      (lib.mkIf (cfg.user == defaultUser) {
-        isNormalUser = true;
-        description = "code-server user";
-        inherit (cfg) group;
-      })
-      {
-        packages = cfg.extraPackages;
-        inherit (cfg) extraGroups;
-      }
-    ];
-
-    users.groups."${defaultGroup}" = lib.mkIf (cfg.group == defaultGroup) { };
-  };
-
-  meta.maintainers = [ lib.maintainers.stackshadow ];
-}
diff --git a/nixos/modules/services/web-apps/coder.nix b/nixos/modules/services/web-apps/coder.nix
index 469a29bc3aa..f65211308c4 100644
--- a/nixos/modules/services/web-apps/coder.nix
+++ b/nixos/modules/services/web-apps/coder.nix
@@ -149,8 +149,8 @@ in {
 
   config = mkIf cfg.enable {
     assertions = [
-      { assertion = cfg.database.createLocally -> cfg.database.username == name;
-        message = "services.coder.database.username must be set to ${user} if services.coder.database.createLocally is set true";
+      { assertion = cfg.database.createLocally -> cfg.database.username == name && cfg.database.database == cfg.database.username;
+        message = "services.coder.database.username must be set to ${name} if services.coder.database.createLocally is set true";
       }
     ];
 
@@ -193,10 +193,8 @@ in {
         cfg.database.database
       ];
       ensureUsers = [{
-        name = cfg.database.username;
-        ensurePermissions = {
-          "DATABASE \"${cfg.database.database}\"" = "ALL PRIVILEGES";
-        };
+        name = cfg.user;
+        ensureDBOwnership = true;
         }
       ];
     };
diff --git a/nixos/modules/services/web-apps/gotosocial.nix b/nixos/modules/services/web-apps/gotosocial.nix
index f7ae018d5b7..9c21719a575 100644
--- a/nixos/modules/services/web-apps/gotosocial.nix
+++ b/nixos/modules/services/web-apps/gotosocial.nix
@@ -128,9 +128,7 @@ in
       ensureUsers = [
         {
           name = "gotosocial";
-          ensurePermissions = {
-            "DATABASE gotosocial" = "ALL PRIVILEGES";
-          };
+          ensureDBOwnership = true;
         }
       ];
     };
diff --git a/nixos/modules/services/web-apps/invidious.nix b/nixos/modules/services/web-apps/invidious.nix
index 5603ef7392e..e4fbc6fd936 100644
--- a/nixos/modules/services/web-apps/invidious.nix
+++ b/nixos/modules/services/web-apps/invidious.nix
@@ -109,15 +109,17 @@ let
     # Default to using the local database if we create it
     services.invidious.database.host = lib.mkDefault null;
 
+
+    # TODO(raitobezarius to maintainers of invidious): I strongly advise to clean up the kemal specific
+    # thing for 24.05 and use `ensureDBOwnership`.
+    # See https://github.com/NixOS/nixpkgs/issues/216989
+    systemd.services.postgresql.postStart = lib.mkAfter ''
+      $PSQL -tAc 'ALTER DATABASE "${cfg.settings.db.dbname}" OWNER TO "${cfg.settings.db.user}";'
+    '';
     services.postgresql = {
       enable = true;
+      ensureUsers = lib.singleton { name = cfg.settings.db.user; ensureDBOwnership = false; };
       ensureDatabases = lib.singleton cfg.settings.db.dbname;
-      ensureUsers = lib.singleton {
-        name = cfg.settings.db.user;
-        ensurePermissions = {
-          "DATABASE ${cfg.settings.db.dbname}" = "ALL PRIVILEGES";
-        };
-      };
       # This is only needed because the unix user invidious isn't the same as
       # the database user. This tells postgres to map one to the other.
       identMap = ''
@@ -136,6 +138,7 @@ let
       documentation = [ "https://docs.invidious.io/Database-Information-and-Maintenance.md" ];
       startAt = lib.mkDefault "weekly";
       path = [ config.services.postgresql.package ];
+      after = [ "postgresql.service" ];
       script = ''
         psql ${cfg.settings.db.dbname} ${cfg.settings.db.user} -c "DELETE FROM nonces * WHERE expire < current_timestamp"
         psql ${cfg.settings.db.dbname} ${cfg.settings.db.user} -c "TRUNCATE TABLE videos"
diff --git a/nixos/modules/services/web-apps/jitsi-meet.nix b/nixos/modules/services/web-apps/jitsi-meet.nix
index 21416be3587..c0f9d785eea 100644
--- a/nixos/modules/services/web-apps/jitsi-meet.nix
+++ b/nixos/modules/services/web-apps/jitsi-meet.nix
@@ -169,6 +169,15 @@ in
         off if you want to configure it manually.
       '';
     };
+
+    excalidraw.enable = mkEnableOption (lib.mdDoc "Excalidraw collaboration backend for Jitsi");
+    excalidraw.port = mkOption {
+      type = types.port;
+      default = 3002;
+      description = lib.mdDoc ''The port which the Excalidraw backend for Jitsi should listen to.'';
+    };
+
+    secureDomain.enable = mkEnableOption (lib.mdDoc "Authenticated room creation");
   };
 
   config = mkIf cfg.enable {
@@ -192,41 +201,118 @@ in
           roomLocking = false;
           roomDefaultPublicJids = true;
           extraConfig = ''
+            restrict_room_creation = true
+            storage = "memory"
+            admins = { "focus@auth.${cfg.hostName}" }
+          '';
+        }
+        {
+          domain = "breakout.${cfg.hostName}";
+          name = "Jitsi Meet Breakout MUC";
+          roomLocking = false;
+          roomDefaultPublicJids = true;
+          extraConfig = ''
+            restrict_room_creation = true
             storage = "memory"
+            admins = { "focus@auth.${cfg.hostName}" }
           '';
         }
         {
-          domain = "internal.${cfg.hostName}";
+          domain = "internal.auth.${cfg.hostName}";
           name = "Jitsi Meet Videobridge MUC";
+          roomLocking = false;
+          roomDefaultPublicJids = true;
           extraConfig = ''
             storage = "memory"
             admins = { "focus@auth.${cfg.hostName}", "jvb@auth.${cfg.hostName}" }
           '';
           #-- muc_room_cache_size = 1000
         }
+        {
+          domain = "lobby.${cfg.hostName}";
+          name = "Jitsi Meet Lobby MUC";
+          roomLocking = false;
+          roomDefaultPublicJids = true;
+          extraConfig = ''
+            restrict_room_creation = true
+            storage = "memory"
+          '';
+        }
+      ];
+      extraModules = [
+        "pubsub"
+        "smacks"
+        "speakerstats"
+        "external_services"
+        "conference_duration"
+        "end_conference"
+        "muc_lobby_rooms"
+        "muc_breakout_rooms"
+        "av_moderation"
+        "muc_hide_all"
+        "muc_meeting_id"
+        "muc_domain_mapper"
+        "muc_rate_limit"
+        "limits_exception"
+        "persistent_lobby"
+        "room_metadata"
       ];
-      extraModules = [ "pubsub" "smacks" ];
       extraPluginPaths = [ "${pkgs.jitsi-meet-prosody}/share/prosody-plugins" ];
-      extraConfig = lib.mkMerge [ (mkAfter ''
-        Component "focus.${cfg.hostName}" "client_proxy"
-          target_address = "focus@auth.${cfg.hostName}"
+      extraConfig = lib.mkMerge [
+        (mkAfter ''
+          Component "focus.${cfg.hostName}" "client_proxy"
+            target_address = "focus@auth.${cfg.hostName}"
+
+          Component "speakerstats.${cfg.hostName}" "speakerstats_component"
+            muc_component = "conference.${cfg.hostName}"
+
+          Component "conferenceduration.${cfg.hostName}" "conference_duration_component"
+            muc_component = "conference.${cfg.hostName}"
+
+          Component "endconference.${cfg.hostName}" "end_conference"
+            muc_component = "conference.${cfg.hostName}"
+
+          Component "avmoderation.${cfg.hostName}" "av_moderation_component"
+            muc_component = "conference.${cfg.hostName}"
+
+          Component "metadata.${cfg.hostName}" "room_metadata_component"
+            muc_component = "conference.${cfg.hostName}"
+            breakout_rooms_component = "breakout.${cfg.hostName}"
         '')
         (mkBefore ''
+          muc_mapper_domain_base = "${cfg.hostName}"
+
           cross_domain_websocket = true;
           consider_websocket_secure = true;
+
+          unlimited_jids = {
+            "focus@auth.${cfg.hostName}",
+            "jvb@auth.${cfg.hostName}"
+          }
         '')
       ];
       virtualHosts.${cfg.hostName} = {
         enabled = true;
         domain = cfg.hostName;
         extraConfig = ''
-          authentication = "anonymous"
+          authentication = ${if cfg.secureDomain.enable then "\"internal_hashed\"" else "\"jitsi-anonymous\""}
           c2s_require_encryption = false
           admins = { "focus@auth.${cfg.hostName}" }
           smacks_max_unacked_stanzas = 5
           smacks_hibernation_time = 60
           smacks_max_hibernated_sessions = 1
           smacks_max_old_sessions = 1
+
+          av_moderation_component = "avmoderation.${cfg.hostName}"
+          speakerstats_component = "speakerstats.${cfg.hostName}"
+          conference_duration_component = "conferenceduration.${cfg.hostName}"
+          end_conference_component = "endconference.${cfg.hostName}"
+
+          c2s_require_encryption = false
+          lobby_muc = "lobby.${cfg.hostName}"
+          breakout_rooms_muc = "breakout.${cfg.hostName}"
+          room_metadata_component = "metadata.${cfg.hostName}"
+          main_muc = "conference.${cfg.hostName}"
         '';
         ssl = {
           cert = "/var/lib/jitsi-meet/jitsi-meet.crt";
@@ -237,7 +323,7 @@ in
         enabled = true;
         domain = "auth.${cfg.hostName}";
         extraConfig = ''
-          authentication = "internal_plain"
+          authentication = "internal_hashed"
         '';
         ssl = {
           cert = "/var/lib/jitsi-meet/jitsi-meet.crt";
@@ -252,6 +338,14 @@ in
           c2s_require_encryption = false
         '';
       };
+      virtualHosts."guest.${cfg.hostName}" = {
+        enabled = true;
+        domain = "guest.${cfg.hostName}";
+        extraConfig = ''
+          authentication = "anonymous"
+          c2s_require_encryption = false
+        '';
+      };
     };
     systemd.services.prosody = mkIf cfg.prosody.enable {
       preStart = let
@@ -270,7 +364,7 @@ in
       reloadIfChanged = true;
     };
 
-    users.groups.jitsi-meet = {};
+    users.groups.jitsi-meet = { };
     systemd.tmpfiles.rules = [
       "d '/var/lib/jitsi-meet' 0750 root jitsi-meet - -"
     ];
@@ -317,6 +411,20 @@ in
       '';
     };
 
+    systemd.services.jitsi-excalidraw = mkIf cfg.excalidraw.enable {
+      description = "Excalidraw collaboration backend for Jitsi";
+      after = [ "network.target" ];
+      wantedBy = [ "multi-user.target" ];
+      environment.PORT = toString cfg.excalidraw.port;
+
+      serviceConfig = {
+        Type = "simple";
+        ExecStart = "${pkgs.jitsi-excalidraw}/bin/jitsi-excalidraw-backend";
+        Restart = "on-failure";
+        Group = "jitsi-meet";
+      };
+    };
+
     services.nginx = mkIf cfg.nginx.enable {
       enable = mkDefault true;
       virtualHosts.${cfg.hostName} = {
@@ -345,12 +453,23 @@ in
         locations."=/external_api.js" = mkDefault {
           alias = "${pkgs.jitsi-meet}/libs/external_api.min.js";
         };
+        locations."=/_api/room-info" = {
+          proxyPass = "http://localhost:5280/room-info";
+          extraConfig = ''
+            proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
+            proxy_set_header Host $host;
+          '';
+        };
         locations."=/config.js" = mkDefault {
           alias = overrideJs "${pkgs.jitsi-meet}/config.js" "config" (recursiveUpdate defaultCfg cfg.config) cfg.extraConfig;
         };
         locations."=/interface_config.js" = mkDefault {
           alias = overrideJs "${pkgs.jitsi-meet}/interface_config.js" "interfaceConfig" cfg.interfaceConfig "";
         };
+        locations."/socket.io/" = mkIf cfg.excalidraw.enable {
+          proxyPass = "http://127.0.0.1:${toString cfg.excalidraw.port}";
+          proxyWebsockets = true;
+        };
       };
     };
 
@@ -359,7 +478,7 @@ in
       virtualHosts.${cfg.hostName} = {
         extraConfig =
         let
-          templatedJitsiMeet = pkgs.runCommand "templated-jitsi-meet" {} ''
+          templatedJitsiMeet = pkgs.runCommand "templated-jitsi-meet" { } ''
             cp -R ${pkgs.jitsi-meet}/* .
             for file in *.html **/*.html ; do
               ${pkgs.sd}/bin/sd '<!--#include virtual="(.*)" -->' '{{ include "$1" }}' $file
@@ -390,13 +509,24 @@ in
       };
     };
 
+    services.jitsi-meet.config = recursiveUpdate
+      (mkIf cfg.excalidraw.enable {
+        whiteboard = {
+          enabled = true;
+          collabServerBaseUrl = "https://${cfg.hostName}";
+        };
+      })
+      (mkIf cfg.secureDomain.enable {
+        hosts.anonymousdomain = "guest.${cfg.hostName}";
+      });
+
     services.jitsi-videobridge = mkIf cfg.videobridge.enable {
       enable = true;
       xmppConfigs."localhost" = {
         userName = "jvb";
         domain = "auth.${cfg.hostName}";
         passwordFile = "/var/lib/jitsi-meet/videobridge-secret";
-        mucJids = "jvbbrewery@internal.${cfg.hostName}";
+        mucJids = "jvbbrewery@internal.auth.${cfg.hostName}";
         disableCertificateVerification = true;
       };
     };
@@ -409,17 +539,27 @@ in
       userName = "focus";
       userPasswordFile = "/var/lib/jitsi-meet/jicofo-user-secret";
       componentPasswordFile = "/var/lib/jitsi-meet/jicofo-component-secret";
-      bridgeMuc = "jvbbrewery@internal.${cfg.hostName}";
+      bridgeMuc = "jvbbrewery@internal.auth.${cfg.hostName}";
       config = mkMerge [{
         jicofo.xmpp.service.disable-certificate-verification = true;
         jicofo.xmpp.client.disable-certificate-verification = true;
-      #} (lib.mkIf cfg.jibri.enable {
-       } (lib.mkIf (config.services.jibri.enable || cfg.jibri.enable) {
-         jicofo.jibri = {
-           brewery-jid = "JibriBrewery@internal.${cfg.hostName}";
-           pending-timeout = "90";
-         };
-      })];
+      }
+        (lib.mkIf (config.services.jibri.enable || cfg.jibri.enable) {
+          jicofo.jibri = {
+            brewery-jid = "JibriBrewery@internal.auth.${cfg.hostName}";
+            pending-timeout = "90";
+          };
+        })
+        (lib.mkIf cfg.secureDomain.enable {
+          jicofo = {
+            authentication = {
+              enabled = "true";
+              type = "XMPP";
+              login-url = cfg.hostName;
+            };
+            xmpp.client.client-proxy = "focus.${cfg.hostName}";
+          };
+        })];
     };
 
     services.jibri = mkIf cfg.jibri.enable {
@@ -430,7 +570,7 @@ in
         xmppDomain = cfg.hostName;
 
         control.muc = {
-          domain = "internal.${cfg.hostName}";
+          domain = "internal.auth.${cfg.hostName}";
           roomName = "JibriBrewery";
           nickname = "jibri";
         };
diff --git a/nixos/modules/services/web-apps/lemmy.nix b/nixos/modules/services/web-apps/lemmy.nix
index 20d9dcb7c26..32389f7a59d 100644
--- a/nixos/modules/services/web-apps/lemmy.nix
+++ b/nixos/modules/services/web-apps/lemmy.nix
@@ -146,7 +146,7 @@ in
         ensureDatabases = [ cfg.settings.database.database ];
         ensureUsers = [{
           name = cfg.settings.database.user;
-          ensurePermissions."DATABASE ${cfg.settings.database.database}" = "ALL PRIVILEGES";
+          ensureDBOwnership = true;
         }];
       };
 
diff --git a/nixos/modules/services/web-apps/mastodon.nix b/nixos/modules/services/web-apps/mastodon.nix
index 2aab97438b7..8686506b1c2 100644
--- a/nixos/modules/services/web-apps/mastodon.nix
+++ b/nixos/modules/services/web-apps/mastodon.nix
@@ -17,9 +17,6 @@ let
     WEB_CONCURRENCY = toString cfg.webProcesses;
     MAX_THREADS = toString cfg.webThreads;
 
-    # mastodon-streaming concurrency.
-    STREAMING_CLUSTER_NUM = toString cfg.streamingProcesses;
-
     DB_USER = cfg.database.user;
 
     REDIS_HOST = cfg.redis.host;
@@ -33,13 +30,15 @@ let
     PAPERCLIP_ROOT_PATH = "/var/lib/mastodon/public-system";
     PAPERCLIP_ROOT_URL = "/system";
     ES_ENABLED = if (cfg.elasticsearch.host != null) then "true" else "false";
-    ES_HOST = cfg.elasticsearch.host;
-    ES_PORT = toString(cfg.elasticsearch.port);
 
     TRUSTED_PROXY_IP = cfg.trustedProxy;
   }
   // lib.optionalAttrs (cfg.database.host != "/run/postgresql" && cfg.database.port != null) { DB_PORT = toString cfg.database.port; }
   // lib.optionalAttrs cfg.smtp.authenticate { SMTP_LOGIN  = cfg.smtp.user; }
+  // lib.optionalAttrs (cfg.elasticsearch.host != null) { ES_HOST = cfg.elasticsearch.host; }
+  // lib.optionalAttrs (cfg.elasticsearch.host != null) { ES_PORT = toString(cfg.elasticsearch.port); }
+  // lib.optionalAttrs (cfg.elasticsearch.host != null) { ES_PRESET = cfg.elasticsearch.preset; }
+  // lib.optionalAttrs (cfg.elasticsearch.user != null) { ES_USER = cfg.elasticsearch.user; }
   // cfg.extraConfig;
 
   systemCallsList = [ "@cpu-emulation" "@debug" "@keyring" "@ipc" "@mount" "@obsolete" "@privileged" "@setuid" ];
@@ -141,8 +140,44 @@ let
     })
   ) cfg.sidekiqProcesses;
 
+  streamingUnits = builtins.listToAttrs
+      (map (i: {
+        name = "mastodon-streaming-${toString i}";
+        value = {
+          after = [ "network.target" "mastodon-init-dirs.service" ]
+            ++ lib.optional databaseActuallyCreateLocally "postgresql.service"
+            ++ lib.optional cfg.automaticMigrations "mastodon-init-db.service";
+          requires = [ "mastodon-init-dirs.service" ]
+            ++ lib.optional databaseActuallyCreateLocally "postgresql.service"
+            ++ lib.optional cfg.automaticMigrations "mastodon-init-db.service";
+          wantedBy = [ "mastodon.target" "mastodon-streaming.target" ];
+          description = "Mastodon streaming ${toString i}";
+          environment = env // { SOCKET = "/run/mastodon-streaming/streaming-${toString i}.socket"; };
+          serviceConfig = {
+            ExecStart = "${cfg.package}/run-streaming.sh";
+            Restart = "always";
+            RestartSec = 20;
+            EnvironmentFile = [ "/var/lib/mastodon/.secrets_env" ] ++ cfg.extraEnvFiles;
+            WorkingDirectory = cfg.package;
+            # Runtime directory and mode
+            RuntimeDirectory = "mastodon-streaming";
+            RuntimeDirectoryMode = "0750";
+            # System Call Filtering
+            SystemCallFilter = [ ("~" + lib.concatStringsSep " " (systemCallsList ++ [ "@memlock" "@resources" ])) "pipe" "pipe2" ];
+          } // cfgService;
+        };
+      })
+      (lib.range 1 cfg.streamingProcesses));
+
 in {
 
+  imports = [
+    (lib.mkRemovedOptionModule
+      [ "services" "mastodon" "streamingPort" ]
+      "Mastodon currently doesn't support streaming via TCP ports. Please open a PR if you need this."
+    )
+  ];
+
   options = {
     services.mastodon = {
       enable = lib.mkEnableOption (lib.mdDoc "Mastodon, a federated social network server");
@@ -191,18 +226,13 @@ in {
         default = "mastodon";
       };
 
-      streamingPort = lib.mkOption {
-        description = lib.mdDoc "TCP port used by the mastodon-streaming service.";
-        type = lib.types.port;
-        default = 55000;
-      };
       streamingProcesses = lib.mkOption {
         description = lib.mdDoc ''
-          Processes used by the mastodon-streaming service.
-          Defaults to the number of CPU cores minus one.
+          Number of processes used by the mastodon-streaming service.
+          Recommended is the amount of your CPU cores minus one.
         '';
-        type = lib.types.nullOr lib.types.int;
-        default = null;
+        type = lib.types.ints.positive;
+        example = 3;
       };
 
       webPort = lib.mkOption {
@@ -485,6 +515,31 @@ in {
           type = lib.types.port;
           default = 9200;
         };
+
+        preset = lib.mkOption {
+          description = lib.mdDoc ''
+            It controls the ElasticSearch indices configuration (number of shards and replica).
+          '';
+          type = lib.types.enum [ "single_node_cluster" "small_cluster" "large_cluster" ];
+          default = "single_node_cluster";
+          example = "large_cluster";
+        };
+
+        user = lib.mkOption {
+          description = lib.mdDoc "Used for optionally authenticating with Elasticsearch.";
+          type = lib.types.nullOr lib.types.str;
+          default = null;
+          example = "elasticsearch-mastodon";
+        };
+
+        passwordFile = lib.mkOption {
+          description = lib.mdDoc ''
+            Path to file containing password for optionally authenticating with Elasticsearch.
+          '';
+          type = lib.types.nullOr lib.types.path;
+          default = null;
+          example = "/var/lib/mastodon/secrets/elasticsearch-password";
+        };
       };
 
       package = lib.mkOption {
@@ -557,7 +612,7 @@ in {
   config = lib.mkIf cfg.enable (lib.mkMerge [{
     assertions = [
       {
-        assertion = databaseActuallyCreateLocally -> (cfg.user == cfg.database.user);
+        assertion = databaseActuallyCreateLocally -> (cfg.user == cfg.database.user && cfg.database.user == cfg.database.name);
         message = ''
           For local automatic database provisioning (services.mastodon.database.createLocally == true) with peer
             authentication (services.mastodon.database.host == "/run/postgresql") to work services.mastodon.user
@@ -603,6 +658,12 @@ in {
       after = [ "network.target" ];
     };
 
+    systemd.targets.mastodon-streaming = {
+      description = "Target for all Mastodon streaming services";
+      wantedBy = [ "multi-user.target" "mastodon.target" ];
+      after = [ "network.target" ];
+    };
+
     systemd.services.mastodon-init-dirs = {
       script = ''
         umask 077
@@ -631,6 +692,8 @@ in {
         DB_PASS="$(cat ${cfg.database.passwordFile})"
       '' + lib.optionalString cfg.smtp.authenticate ''
         SMTP_PASSWORD="$(cat ${cfg.smtp.passwordFile})"
+      '' + lib.optionalString (cfg.elasticsearch.passwordFile != null) ''
+        ES_PASS="$(cat ${cfg.elasticsearch.passwordFile})"
       '' + ''
         EOF
       '';
@@ -688,33 +751,6 @@ in {
         ++ lib.optional databaseActuallyCreateLocally "postgresql.service";
     };
 
-    systemd.services.mastodon-streaming = {
-      after = [ "network.target" "mastodon-init-dirs.service" ]
-        ++ lib.optional databaseActuallyCreateLocally "postgresql.service"
-        ++ lib.optional cfg.automaticMigrations "mastodon-init-db.service";
-      requires = [ "mastodon-init-dirs.service" ]
-        ++ lib.optional databaseActuallyCreateLocally "postgresql.service"
-        ++ lib.optional cfg.automaticMigrations "mastodon-init-db.service";
-      wantedBy = [ "mastodon.target" ];
-      description = "Mastodon streaming";
-      environment = env // (if cfg.enableUnixSocket
-        then { SOCKET = "/run/mastodon-streaming/streaming.socket"; }
-        else { PORT = toString(cfg.streamingPort); }
-      );
-      serviceConfig = {
-        ExecStart = "${cfg.package}/run-streaming.sh";
-        Restart = "always";
-        RestartSec = 20;
-        EnvironmentFile = [ "/var/lib/mastodon/.secrets_env" ] ++ cfg.extraEnvFiles;
-        WorkingDirectory = cfg.package;
-        # Runtime directory and mode
-        RuntimeDirectory = "mastodon-streaming";
-        RuntimeDirectoryMode = "0750";
-        # System Call Filtering
-        SystemCallFilter = [ ("~" + lib.concatStringsSep " " (systemCallsList ++ [ "@memlock" "@resources" ])) "pipe" "pipe2" ];
-      } // cfgService;
-    };
-
     systemd.services.mastodon-web = {
       after = [ "network.target" "mastodon-init-dirs.service" ]
         ++ lib.optional databaseActuallyCreateLocally "postgresql.service"
@@ -780,10 +816,20 @@ in {
         };
 
         locations."/api/v1/streaming/" = {
-          proxyPass = (if cfg.enableUnixSocket then "http://unix:/run/mastodon-streaming/streaming.socket" else "http://127.0.0.1:${toString(cfg.streamingPort)}/");
+          proxyPass = "http://mastodon-streaming";
           proxyWebsockets = true;
         };
       };
+      upstreams.mastodon-streaming = {
+        extraConfig = ''
+          least_conn;
+        '';
+        servers = builtins.listToAttrs
+          (map (i: {
+            name = "unix:/run/mastodon-streaming/streaming-${toString i}.socket";
+            value = { };
+          }) (lib.range 1 cfg.streamingProcesses));
+      };
     };
 
     services.postfix = lib.mkIf (cfg.smtp.createLocally && cfg.smtp.host == "127.0.0.1") {
@@ -799,8 +845,8 @@ in {
       enable = true;
       ensureUsers = [
         {
-          name = cfg.database.user;
-          ensurePermissions."DATABASE ${cfg.database.name}" = "ALL PRIVILEGES";
+          name = cfg.database.name;
+          ensureDBOwnership = true;
         }
       ];
       ensureDatabases = [ cfg.database.name ];
@@ -819,7 +865,7 @@ in {
 
     users.groups.${cfg.group}.members = lib.optional cfg.configureNginx config.services.nginx.user;
   }
-  { systemd.services = sidekiqUnits; }
+  { systemd.services = lib.mkMerge [ sidekiqUnits streamingUnits ]; }
   ]);
 
   meta.maintainers = with lib.maintainers; [ happy-river erictapen ];
diff --git a/nixos/modules/services/web-apps/mediawiki.nix b/nixos/modules/services/web-apps/mediawiki.nix
index 8b494b7c120..ce7bcd94b3f 100644
--- a/nixos/modules/services/web-apps/mediawiki.nix
+++ b/nixos/modules/services/web-apps/mediawiki.nix
@@ -454,7 +454,7 @@ in
       { assertion = cfg.database.createLocally -> (cfg.database.type == "mysql" || cfg.database.type == "postgres");
         message = "services.mediawiki.createLocally is currently only supported for database type 'mysql' and 'postgres'";
       }
-      { assertion = cfg.database.createLocally -> cfg.database.user == user;
+      { assertion = cfg.database.createLocally -> cfg.database.user == user && cfg.database.name == cfg.database.user;
         message = "services.mediawiki.database.user must be set to ${user} if services.mediawiki.database.createLocally is set true";
       }
       { assertion = cfg.database.createLocally -> cfg.database.socket != null;
@@ -486,7 +486,7 @@ in
       ensureDatabases = [ cfg.database.name ];
       ensureUsers = [{
         name = cfg.database.user;
-        ensurePermissions = { "DATABASE \"${cfg.database.name}\"" = "ALL PRIVILEGES"; };
+        ensureDBOwnership = true;
       }];
     };
 
diff --git a/nixos/modules/services/web-apps/miniflux.nix b/nixos/modules/services/web-apps/miniflux.nix
index 3374c746ad3..5c8c93c13c4 100644
--- a/nixos/modules/services/web-apps/miniflux.nix
+++ b/nixos/modules/services/web-apps/miniflux.nix
@@ -6,13 +6,10 @@ let
 
   defaultAddress = "localhost:8080";
 
-  dbUser = "miniflux";
-  dbName = "miniflux";
-
   pgbin = "${config.services.postgresql.package}/bin";
   preStart = pkgs.writeScript "miniflux-pre-start" ''
     #!${pkgs.runtimeShell}
-    ${pgbin}/psql "${dbName}" -c "CREATE EXTENSION IF NOT EXISTS hstore"
+    ${pgbin}/psql "miniflux" -c "CREATE EXTENSION IF NOT EXISTS hstore"
   '';
 in
 
@@ -62,7 +59,7 @@ in
 
     services.miniflux.config =  {
       LISTEN_ADDR = mkDefault defaultAddress;
-      DATABASE_URL = "user=${dbUser} host=/run/postgresql dbname=${dbName}";
+      DATABASE_URL = "user=miniflux host=/run/postgresql dbname=miniflux";
       RUN_MIGRATIONS = "1";
       CREATE_ADMIN = "1";
     };
@@ -70,12 +67,10 @@ in
     services.postgresql = {
       enable = true;
       ensureUsers = [ {
-        name = dbUser;
-        ensurePermissions = {
-          "DATABASE ${dbName}" = "ALL PRIVILEGES";
-        };
+        name = "miniflux";
+        ensureDBOwnership = true;
       } ];
-      ensureDatabases = [ dbName ];
+      ensureDatabases = [ "miniflux" ];
     };
 
     systemd.services.miniflux-dbsetup = {
@@ -97,7 +92,7 @@ in
 
       serviceConfig = {
         ExecStart = "${cfg.package}/bin/miniflux";
-        User = dbUser;
+        User = "miniflux";
         DynamicUser = true;
         RuntimeDirectory = "miniflux";
         RuntimeDirectoryMode = "0700";
diff --git a/nixos/modules/services/web-apps/mobilizon.nix b/nixos/modules/services/web-apps/mobilizon.nix
index 343c5cead2b..bb4319b51a2 100644
--- a/nixos/modules/services/web-apps/mobilizon.nix
+++ b/nixos/modules/services/web-apps/mobilizon.nix
@@ -347,12 +347,18 @@ in
 
       # Taken from here:
       # https://framagit.org/framasoft/mobilizon/-/blob/1.1.0/priv/templates/setup_db.eex
+      # TODO(to maintainers of mobilizon): the owner database alteration is necessary
+      # as PostgreSQL 15 changed their behaviors w.r.t. to privileges.
+      # See https://github.com/NixOS/nixpkgs/issues/216989 to get rid
+      # of that workaround.
       script =
         ''
           psql "${repoSettings.database}" -c "\
             CREATE EXTENSION IF NOT EXISTS postgis; \
             CREATE EXTENSION IF NOT EXISTS pg_trgm; \
             CREATE EXTENSION IF NOT EXISTS unaccent;"
+          psql -tAc 'ALTER DATABASE "${repoSettings.database}" OWNER TO "${dbUser}";'
+
         '';
 
       serviceConfig = {
@@ -372,9 +378,10 @@ in
       ensureUsers = [
         {
           name = dbUser;
-          ensurePermissions = {
-            "DATABASE \"${repoSettings.database}\"" = "ALL PRIVILEGES";
-          };
+          # Given that `dbUser` is potentially arbitrarily custom, we will perform
+          # manual fixups in mobilizon-postgres.
+          # TODO(to maintainers of mobilizon): Feel free to simplify your setup by using `ensureDBOwnership`.
+          ensureDBOwnership = false;
         }
       ];
       extraPlugins = with postgresql.pkgs; [ postgis ];
diff --git a/nixos/modules/services/web-apps/moodle.nix b/nixos/modules/services/web-apps/moodle.nix
index b617e9a5937..04ae6bd7f17 100644
--- a/nixos/modules/services/web-apps/moodle.nix
+++ b/nixos/modules/services/web-apps/moodle.nix
@@ -194,7 +194,7 @@ in
   config = mkIf cfg.enable {
 
     assertions = [
-      { assertion = cfg.database.createLocally -> cfg.database.user == user;
+      { assertion = cfg.database.createLocally -> cfg.database.user == user && cfg.database.user == cfg.database.name;
         message = "services.moodle.database.user must be set to ${user} if services.moodle.database.createLocally is set true";
       }
       { assertion = cfg.database.createLocally -> cfg.database.passwordFile == null;
@@ -220,7 +220,7 @@ in
       ensureDatabases = [ cfg.database.name ];
       ensureUsers = [
         { name = cfg.database.user;
-          ensurePermissions = { "DATABASE ${cfg.database.name}" = "ALL PRIVILEGES"; };
+          ensureDBOwnership = true;
         }
       ];
     };
diff --git a/nixos/modules/services/web-apps/netbox.nix b/nixos/modules/services/web-apps/netbox.nix
index 8ba1852848e..3b9434e3d34 100644
--- a/nixos/modules/services/web-apps/netbox.nix
+++ b/nixos/modules/services/web-apps/netbox.nix
@@ -257,9 +257,7 @@ in {
       ensureUsers = [
         {
           name = "netbox";
-          ensurePermissions = {
-            "DATABASE netbox" = "ALL PRIVILEGES";
-          };
+          ensureDBOwnership = true;
         }
       ];
     };
diff --git a/nixos/modules/services/web-apps/nextcloud.nix b/nixos/modules/services/web-apps/nextcloud.nix
index f9713cac47e..f1ac3770d40 100644
--- a/nixos/modules/services/web-apps/nextcloud.nix
+++ b/nixos/modules/services/web-apps/nextcloud.nix
@@ -1042,7 +1042,7 @@ in {
         ensureDatabases = [ cfg.config.dbname ];
         ensureUsers = [{
           name = cfg.config.dbuser;
-          ensurePermissions = { "DATABASE ${cfg.config.dbname}" = "ALL PRIVILEGES"; };
+          ensureDBOwnership = true;
         }];
       };
 
diff --git a/nixos/modules/services/web-apps/onlyoffice.nix b/nixos/modules/services/web-apps/onlyoffice.nix
index 3494f2fa21f..f958566b91f 100644
--- a/nixos/modules/services/web-apps/onlyoffice.nix
+++ b/nixos/modules/services/web-apps/onlyoffice.nix
@@ -198,7 +198,7 @@ in
         ensureDatabases = [ "onlyoffice" ];
         ensureUsers = [{
           name = "onlyoffice";
-          ensurePermissions = { "DATABASE \"onlyoffice\"" = "ALL PRIVILEGES"; };
+          ensureDBOwnership = true;
         }];
       };
     };
diff --git a/nixos/modules/services/web-apps/outline.nix b/nixos/modules/services/web-apps/outline.nix
index 0e3bd07c1fc..d97b45d6241 100644
--- a/nixos/modules/services/web-apps/outline.nix
+++ b/nixos/modules/services/web-apps/outline.nix
@@ -581,7 +581,7 @@ in
       enable = true;
       ensureUsers = [{
         name = "outline";
-        ensurePermissions."DATABASE outline" = "ALL PRIVILEGES";
+        ensureDBOwnership = true;
       }];
       ensureDatabases = [ "outline" ];
     };
diff --git a/nixos/modules/services/web-apps/peering-manager.nix b/nixos/modules/services/web-apps/peering-manager.nix
index 7012df6dffb..d6f6077268d 100644
--- a/nixos/modules/services/web-apps/peering-manager.nix
+++ b/nixos/modules/services/web-apps/peering-manager.nix
@@ -186,9 +186,7 @@ in {
       ensureUsers = [
         {
           name = "peering-manager";
-          ensurePermissions = {
-            "DATABASE \"peering-manager\"" = "ALL PRIVILEGES";
-          };
+          ensureDBOwnership = true;
         }
       ];
     };
diff --git a/nixos/modules/services/web-apps/pixelfed.nix b/nixos/modules/services/web-apps/pixelfed.nix
index 159fb52476a..b0a25dcce9e 100644
--- a/nixos/modules/services/web-apps/pixelfed.nix
+++ b/nixos/modules/services/web-apps/pixelfed.nix
@@ -271,7 +271,6 @@ in {
         ensureDatabases = [ cfg.database.name ];
         ensureUsers = [{
           name = user;
-          ensurePermissions = { };
         }];
       };
 
diff --git a/nixos/modules/services/web-apps/plantuml-server.nix b/nixos/modules/services/web-apps/plantuml-server.nix
index 5ebee48c3e0..1fa69814c6c 100644
--- a/nixos/modules/services/web-apps/plantuml-server.nix
+++ b/nixos/modules/services/web-apps/plantuml-server.nix
@@ -1,123 +1,110 @@
 { config, lib, pkgs, ... }:
 
-with lib;
-
 let
+  inherit (lib)
+    literalExpression
+    mdDoc
+    mkEnableOption
+    mkIf
+    mkOption
+    mkPackageOptionMD
+    mkRemovedOptionModule
+    types
+    ;
 
   cfg = config.services.plantuml-server;
 
 in
 
 {
+  imports = [
+    (mkRemovedOptionModule [ "services" "plantuml-server" "allowPlantumlInclude" ] "This option has been removed from PlantUML.")
+  ];
+
   options = {
     services.plantuml-server = {
-      enable = mkEnableOption (lib.mdDoc "PlantUML server");
+      enable = mkEnableOption (mdDoc "PlantUML server");
 
-      package = mkOption {
-        type = types.package;
-        default = pkgs.plantuml-server;
-        defaultText = literalExpression "pkgs.plantuml-server";
-        description = lib.mdDoc "PlantUML server package to use";
-      };
+      package = mkPackageOptionMD pkgs "plantuml-server" { };
 
       packages = {
-        jdk = mkOption {
-          type = types.package;
-          default = pkgs.jdk;
-          defaultText = literalExpression "pkgs.jdk";
-          description = lib.mdDoc "JDK package to use for the server";
-        };
-        jetty = mkOption {
-          type = types.package;
-          default = pkgs.jetty;
-          defaultText = literalExpression "pkgs.jetty";
-          description = lib.mdDoc "Jetty package to use for the server";
+        jdk = mkPackageOptionMD pkgs "jdk" { };
+        jetty = mkPackageOptionMD pkgs "jetty" {
+          default = "jetty_11";
+          extraDescription = ''
+            At the time of writing (v1.2023.12), PlantUML Server does not support
+            Jetty versions higher than 12.x.
+
+            Jetty 12.x has introduced major breaking changes, see
+            <https://github.com/jetty/jetty.project/releases/tag/jetty-12.0.0> and
+            <https://eclipse.dev/jetty/documentation/jetty-12/programming-guide/index.html#pg-migration-11-to-12>
+          '';
         };
       };
 
       user = mkOption {
         type = types.str;
         default = "plantuml";
-        description = lib.mdDoc "User which runs PlantUML server.";
+        description = mdDoc "User which runs PlantUML server.";
       };
 
       group = mkOption {
         type = types.str;
         default = "plantuml";
-        description = lib.mdDoc "Group which runs PlantUML server.";
+        description = mdDoc "Group which runs PlantUML server.";
       };
 
       home = mkOption {
-        type = types.str;
+        type = types.path;
         default = "/var/lib/plantuml";
-        description = lib.mdDoc "Home directory of the PlantUML server instance.";
+        description = mdDoc "Home directory of the PlantUML server instance.";
       };
 
       listenHost = mkOption {
         type = types.str;
         default = "127.0.0.1";
-        description = lib.mdDoc "Host to listen on.";
+        description = mdDoc "Host to listen on.";
       };
 
       listenPort = mkOption {
         type = types.int;
         default = 8080;
-        description = lib.mdDoc "Port to listen on.";
+        description = mdDoc "Port to listen on.";
       };
 
       plantumlLimitSize = mkOption {
         type = types.int;
         default = 4096;
-        description = lib.mdDoc "Limits image width and height.";
+        description = mdDoc "Limits image width and height.";
       };
 
-      graphvizPackage = mkOption {
-        type = types.package;
-        default = pkgs.graphviz;
-        defaultText = literalExpression "pkgs.graphviz";
-        description = lib.mdDoc "Package containing the dot executable.";
-      };
+      graphvizPackage = mkPackageOptionMD pkgs "graphviz" { };
 
       plantumlStats = mkOption {
         type = types.bool;
         default = false;
-        description = lib.mdDoc "Set it to on to enable statistics report (https://plantuml.com/statistics-report).";
+        description = mdDoc "Set it to on to enable statistics report (https://plantuml.com/statistics-report).";
       };
 
       httpAuthorization = mkOption {
         type = types.nullOr types.str;
         default = null;
-        description = lib.mdDoc "When calling the proxy endpoint, the value of HTTP_AUTHORIZATION will be used to set the HTTP Authorization header.";
-      };
-
-      allowPlantumlInclude = mkOption {
-        type = types.bool;
-        default = false;
-        description = lib.mdDoc "Enables !include processing which can read files from the server into diagrams. Files are read relative to the current working directory.";
+        description = mdDoc "When calling the proxy endpoint, the value of HTTP_AUTHORIZATION will be used to set the HTTP Authorization header.";
       };
     };
   };
 
   config = mkIf cfg.enable {
-    users.users.${cfg.user} = {
-      isSystemUser = true;
-      group = cfg.group;
-      home = cfg.home;
-      createHome = true;
-    };
-
-    users.groups.${cfg.group} = {};
-
     systemd.services.plantuml-server = {
       description = "PlantUML server";
       wantedBy = [ "multi-user.target" ];
       path = [ cfg.home ];
+
       environment = {
         PLANTUML_LIMIT_SIZE = builtins.toString cfg.plantumlLimitSize;
         GRAPHVIZ_DOT = "${cfg.graphvizPackage}/bin/dot";
         PLANTUML_STATS = if cfg.plantumlStats then "on" else "off";
         HTTP_AUTHORIZATION = cfg.httpAuthorization;
-        ALLOW_PLANTUML_INCLUDE = if cfg.allowPlantumlInclude then "true" else "false";
       };
       script = ''
       ${cfg.packages.jdk}/bin/java \
@@ -128,13 +115,40 @@ in
           jetty.http.host=${cfg.listenHost} \
           jetty.http.port=${builtins.toString cfg.listenPort}
       '';
+
       serviceConfig = {
         User = cfg.user;
         Group = cfg.group;
+        StateDirectory = mkIf (cfg.home == "/var/lib/plantuml") "plantuml";
+        StateDirectoryMode = mkIf (cfg.home == "/var/lib/plantuml") "0750";
+
+        # Hardening
+        AmbientCapabilities = [ "" ];
+        CapabilityBoundingSet = [ "" ];
+        DynamicUser = true;
+        LockPersonality = true;
+        NoNewPrivileges = true;
+        PrivateDevices = true;
+        PrivateNetwork = false;
         PrivateTmp = true;
+        PrivateUsers = true;
+        ProtectClock = true;
+        ProtectControlGroups = true;
+        ProtectHome = true;
+        ProtectHostname = true;
+        ProtectKernelLogs = true;
+        ProtectKernelModules = true;
+        ProtectKernelTunables = true;
+        ProtectSystem = "strict";
+        RestrictAddressFamilies = [ "AF_UNIX" "AF_INET" "AF_INET6" ];
+        RestrictNamespaces = true;
+        RestrictRealtime = true;
+        RestrictSUIDSGID = true;
+        SystemCallArchitectures = "native";
+        SystemCallFilter = [ "@system-service" ];
       };
     };
   };
 
-  meta.maintainers = with lib.maintainers; [ truh ];
+  meta.maintainers = with lib.maintainers; [ truh anthonyroussel ];
 }
diff --git a/nixos/modules/services/web-apps/plausible.nix b/nixos/modules/services/web-apps/plausible.nix
index 576b54a7edf..300a0f892ef 100644
--- a/nixos/modules/services/web-apps/plausible.nix
+++ b/nixos/modules/services/web-apps/plausible.nix
@@ -11,13 +11,6 @@ in {
 
     package = mkPackageOptionMD pkgs "plausible" { };
 
-    releaseCookiePath = mkOption {
-      type = with types; either str path;
-      description = lib.mdDoc ''
-        The path to the file with release cookie. (used for remote connection to the running node).
-      '';
-    };
-
     adminUser = {
       name = mkOption {
         default = "admin";
@@ -92,6 +85,13 @@ in {
           framework docs](https://hexdocs.pm/phoenix/Mix.Tasks.Phx.Gen.Secret.html#content).
         '';
       };
+      listenAddress = mkOption {
+        default = "127.0.0.1";
+        type = types.str;
+        description = lib.mdDoc ''
+          The IP address on which the server is listening.
+        '';
+      };
       port = mkOption {
         default = 8000;
         type = types.port;
@@ -162,6 +162,10 @@ in {
     };
   };
 
+  imports = [
+    (mkRemovedOptionModule [ "services" "plausible" "releaseCookiePath" ] "Plausible uses no distributed Erlang features, so this option is no longer necessary and was removed")
+  ];
+
   config = mkIf cfg.enable {
     assertions = [
       { assertion = cfg.adminUser.activate -> cfg.database.postgres.setup;
@@ -180,8 +184,6 @@ in {
       enable = true;
     };
 
-    services.epmd.enable = true;
-
     environment.systemPackages = [ cfg.package ];
 
     systemd.services = mkMerge [
@@ -209,6 +211,32 @@ in {
             # Configuration options from
             # https://plausible.io/docs/self-hosting-configuration
             PORT = toString cfg.server.port;
+            LISTEN_IP = cfg.server.listenAddress;
+
+            # Note [plausible-needs-no-erlang-distributed-features]:
+            # Plausible does not use, and does not plan to use, any of
+            # Erlang's distributed features, see:
+            #     https://github.com/plausible/analytics/pull/1190#issuecomment-1018820934
+            # Thus, disable distribution for improved simplicity and security:
+            #
+            # When distribution is enabled,
+            # Elixir spwans the Erlang VM, which will listen by default on all
+            # interfaces for messages between Erlang nodes (capable of
+            # remote code execution); it can be protected by a cookie; see
+            # https://erlang.org/doc/reference_manual/distributed.html#security).
+            #
+            # It would be possible to restrict the interface to one of our choice
+            # (e.g. localhost or a VPN IP) similar to how we do it with `listenAddress`
+            # for the Plausible web server; if distribution is ever needed in the future,
+            # https://github.com/NixOS/nixpkgs/pull/130297 shows how to do it.
+            #
+            # But since Plausible does not use this feature in any way,
+            # we just disable it.
+            RELEASE_DISTRIBUTION = "none";
+            # Additional safeguard, in case `RELEASE_DISTRIBUTION=none` ever
+            # stops disabling the start of EPMD.
+            ERL_EPMD_ADDRESS = "127.0.0.1";
+
             DISABLE_REGISTRATION = if isBool cfg.server.disableRegistration then boolToString cfg.server.disableRegistration else cfg.server.disableRegistration;
 
             RELEASE_TMP = "/var/lib/plausible/tmp";
@@ -238,7 +266,10 @@ in {
           path = [ cfg.package ]
             ++ optional cfg.database.postgres.setup config.services.postgresql.package;
           script = ''
-            export RELEASE_COOKIE="$(< $CREDENTIALS_DIRECTORY/RELEASE_COOKIE )"
+            # Elixir does not start up if `RELEASE_COOKIE` is not set,
+            # even though we set `RELEASE_DISTRIBUTION=none` so the cookie should be unused.
+            # Thus, make a random one, which should then be ignored.
+            export RELEASE_COOKIE=$(tr -dc A-Za-z0-9 < /dev/urandom | head -c 20)
             export ADMIN_USER_PWD="$(< $CREDENTIALS_DIRECTORY/ADMIN_USER_PWD )"
             export SECRET_KEY_BASE="$(< $CREDENTIALS_DIRECTORY/SECRET_KEY_BASE )"
 
@@ -265,7 +296,6 @@ in {
             LoadCredential = [
               "ADMIN_USER_PWD:${cfg.adminUser.passwordFile}"
               "SECRET_KEY_BASE:${cfg.server.secretKeybaseFile}"
-              "RELEASE_COOKIE:${cfg.releaseCookiePath}"
             ] ++ lib.optionals (cfg.mail.smtp.passwordFile != null) [ "SMTP_USER_PWD:${cfg.mail.smtp.passwordFile}"];
           };
         };
diff --git a/nixos/modules/services/web-apps/tt-rss.nix b/nixos/modules/services/web-apps/tt-rss.nix
index 7b2e3be4295..a8fb37d2c5e 100644
--- a/nixos/modules/services/web-apps/tt-rss.nix
+++ b/nixos/modules/services/web-apps/tt-rss.nix
@@ -529,6 +529,15 @@ let
         assertion = cfg.database.password != null -> cfg.database.passwordFile == null;
         message = "Cannot set both password and passwordFile";
       }
+      {
+        assertion = cfg.database.createLocally -> cfg.database.name == cfg.user && cfg.database.user == cfg.user;
+        message = ''
+          When creating a database via NixOS, the db user and db name must be equal!
+          If you already have an existing DB+user and this assertion is new, you can safely set
+          `services.tt-rss.database.createLocally` to `false` because removal of `ensureUsers`
+          and `ensureDatabases` doesn't have any effect.
+        '';
+      }
     ];
 
     services.phpfpm.pools = mkIf (cfg.pool == "${poolName}") {
@@ -632,8 +641,8 @@ let
       enable = mkDefault true;
       ensureDatabases = [ cfg.database.name ];
       ensureUsers = [
-        { name = cfg.user;
-          ensurePermissions = { "DATABASE ${cfg.database.name}" = "ALL PRIVILEGES"; };
+        { name = cfg.database.user;
+          ensureDBOwnership = true;
         }
       ];
     };
diff --git a/nixos/modules/services/web-servers/hydron.nix b/nixos/modules/services/web-servers/hydron.nix
index 4434965b217..9d30fdc0caa 100644
--- a/nixos/modules/services/web-servers/hydron.nix
+++ b/nixos/modules/services/web-servers/hydron.nix
@@ -93,7 +93,7 @@ in with lib; {
       ensureDatabases = [ "hydron" ];
       ensureUsers = [
         { name = "hydron";
-          ensurePermissions = { "DATABASE hydron" = "ALL PRIVILEGES"; };
+          ensureDBOwnership = true;
         }
       ];
     };
diff --git a/nixos/modules/services/web-servers/jboss/builder.sh b/nixos/modules/services/web-servers/jboss/builder.sh
index ac573089cd5..8c49b87db06 100644
--- a/nixos/modules/services/web-servers/jboss/builder.sh
+++ b/nixos/modules/services/web-servers/jboss/builder.sh
@@ -1,6 +1,6 @@
 set -e
 
-if [ -e .attrs.sh ]; then source .attrs.sh; fi
+if [ -e "$NIX_ATTRS_SH_FILE" ]; then . "$NIX_ATTRS_SH_FILE"; elif [ -f .attrs.sh ]; then . .attrs.sh; fi
 source $stdenv/setup
 
 mkdir -p $out/bin
diff --git a/nixos/modules/system/boot/kernel.nix b/nixos/modules/system/boot/kernel.nix
index 6b07686efcb..a46331ccd43 100644
--- a/nixos/modules/system/boot/kernel.nix
+++ b/nixos/modules/system/boot/kernel.nix
@@ -96,8 +96,8 @@ in
                                         # (required, but can be null if only config changes
                                         # are needed)
 
-          extraStructuredConfig = {     # attrset of extra configuration parameters
-            FOO = lib.kernel.yes;       # (without the CONFIG_ prefix, optional)
+          extraStructuredConfig = {     # attrset of extra configuration parameters without the CONFIG_ prefix
+            FOO = lib.kernel.yes;       # (optional)
           };                            # values should generally be lib.kernel.yes,
                                         # lib.kernel.no or lib.kernel.module
 
@@ -105,8 +105,9 @@ in
             foo = true;                 # (may be checked by other NixOS modules, optional)
           };
 
-          extraConfig = "CONFIG_FOO y"; # extra configuration options in string form
-                                        # (deprecated, use extraStructuredConfig instead, optional)
+          extraConfig = "FOO y";        # extra configuration options in string form without the CONFIG_ prefix
+                                        # (optional, multiple lines allowed to specify multiple options)
+                                        # (deprecated, use extraStructuredConfig instead)
         }
         ```
 
diff --git a/nixos/modules/system/boot/loader/systemd-boot/systemd-boot-builder.py b/nixos/modules/system/boot/loader/systemd-boot/systemd-boot-builder.py
index 310584e398b..7d06e0131d9 100644
--- a/nixos/modules/system/boot/loader/systemd-boot/systemd-boot-builder.py
+++ b/nixos/modules/system/boot/loader/systemd-boot/systemd-boot-builder.py
@@ -11,7 +11,23 @@ import shutil
 import subprocess
 import sys
 import warnings
-from typing import NamedTuple
+import json
+from typing import NamedTuple, Dict, List
+from dataclasses import dataclass
+
+
+@dataclass
+class BootSpec:
+    init: str
+    initrd: str
+    initrdSecrets: str
+    kernel: str
+    kernelParams: List[str]
+    label: str
+    system: str
+    toplevel: str
+    specialisations: Dict[str, "BootSpec"]
+
 
 
 libc = ctypes.CDLL("libc.so.6")
@@ -71,12 +87,31 @@ def write_loader_conf(profile: str | None, generation: int, specialisation: str
     os.rename("@efiSysMountPoint@/loader/loader.conf.tmp", "@efiSysMountPoint@/loader/loader.conf")
 
 
-def profile_path(profile: str | None, generation: int, specialisation: str | None, name: str) -> str:
-    return os.path.realpath("%s/%s" % (system_dir(profile, generation, specialisation), name))
+def get_bootspec(profile: str | None, generation: int) -> BootSpec:
+    system_directory = system_dir(profile, generation, None)
+    boot_json_path = os.path.realpath("%s/%s" % (system_directory, "boot.json"))
+    if os.path.isfile(boot_json_path):
+        boot_json_f = open(boot_json_path, 'r')
+        bootspec_json = json.load(boot_json_f)
+    else:
+        boot_json_str = subprocess.check_output([
+        "@bootspecTools@/bin/synthesize",
+        "--version",
+        "1",
+        system_directory,
+        "/dev/stdout"],
+        universal_newlines=True)
+        bootspec_json = json.loads(boot_json_str)
+    return bootspec_from_json(bootspec_json)
+
+def bootspec_from_json(bootspec_json: Dict) -> BootSpec:
+    specialisations = bootspec_json['org.nixos.specialisation.v1']
+    specialisations = {k: bootspec_from_json(v) for k, v in specialisations.items()}
+    return BootSpec(**bootspec_json['org.nixos.bootspec.v1'], specialisations=specialisations)
 
 
-def copy_from_profile(profile: str | None, generation: int, specialisation: str | None, name: str, dry_run: bool = False) -> str:
-    store_file_path = profile_path(profile, generation, specialisation, name)
+def copy_from_file(file: str, dry_run: bool = False) -> str:
+    store_file_path = os.path.realpath(file)
     suffix = os.path.basename(store_file_path)
     store_dir = os.path.basename(os.path.dirname(store_file_path))
     efi_file_path = "/efi/nixos/%s-%s.efi" % (store_dir, suffix)
@@ -84,40 +119,19 @@ def copy_from_profile(profile: str | None, generation: int, specialisation: str
         copy_if_not_exists(store_file_path, "@efiSysMountPoint@%s" % (efi_file_path))
     return efi_file_path
 
-
-def describe_generation(profile: str | None, generation: int, specialisation: str | None) -> str:
-    try:
-        with open(profile_path(profile, generation, specialisation, "nixos-version")) as f:
-            nixos_version = f.read()
-    except IOError:
-        nixos_version = "Unknown"
-
-    kernel_dir = os.path.dirname(profile_path(profile, generation, specialisation, "kernel"))
-    module_dir = glob.glob("%s/lib/modules/*" % kernel_dir)[0]
-    kernel_version = os.path.basename(module_dir)
-
-    build_time = int(os.path.getctime(system_dir(profile, generation, specialisation)))
-    build_date = datetime.datetime.fromtimestamp(build_time).strftime('%F')
-
-    description = "@distroName@ {}, Linux Kernel {}, Built on {}".format(
-        nixos_version, kernel_version, build_date
-    )
-
-    return description
-
-
 def write_entry(profile: str | None, generation: int, specialisation: str | None,
-                machine_id: str, current: bool) -> None:
-    kernel = copy_from_profile(profile, generation, specialisation, "kernel")
-    initrd = copy_from_profile(profile, generation, specialisation, "initrd")
+                machine_id: str, bootspec: BootSpec, current: bool) -> None:
+    if specialisation:
+        bootspec = bootspec.specialisations[specialisation]
+    kernel = copy_from_file(bootspec.kernel)
+    initrd = copy_from_file(bootspec.initrd)
 
     title = "@distroName@{profile}{specialisation}".format(
         profile=" [" + profile + "]" if profile else "",
         specialisation=" (%s)" % specialisation if specialisation else "")
 
     try:
-        append_initrd_secrets = profile_path(profile, generation, specialisation, "append-initrd-secrets")
-        subprocess.check_call([append_initrd_secrets, "@efiSysMountPoint@%s" % (initrd)])
+        subprocess.check_call([bootspec.initrdSecrets, "@efiSysMountPoint@%s" % (initrd)])
     except FileNotFoundError:
         pass
     except subprocess.CalledProcessError:
@@ -132,17 +146,19 @@ def write_entry(profile: str | None, generation: int, specialisation: str | None
     entry_file = "@efiSysMountPoint@/loader/entries/%s" % (
         generation_conf_filename(profile, generation, specialisation))
     tmp_path = "%s.tmp" % (entry_file)
-    kernel_params = "init=%s " % profile_path(profile, generation, specialisation, "init")
+    kernel_params = "init=%s " % bootspec.init
+
+    kernel_params = kernel_params + " ".join(bootspec.kernelParams)
+    build_time = int(os.path.getctime(system_dir(profile, generation, specialisation)))
+    build_date = datetime.datetime.fromtimestamp(build_time).strftime('%F')
 
-    with open(profile_path(profile, generation, specialisation, "kernel-params")) as params_file:
-        kernel_params = kernel_params + params_file.read()
     with open(tmp_path, 'w') as f:
         f.write(BOOT_ENTRY.format(title=title,
                     generation=generation,
                     kernel=kernel,
                     initrd=initrd,
                     kernel_params=kernel_params,
-                    description=describe_generation(profile, generation, specialisation)))
+                    description=f"{bootspec.label}, built on {build_date}"))
         if machine_id is not None:
             f.write("machine-id %s\n" % machine_id)
         f.flush()
@@ -173,21 +189,14 @@ def get_generations(profile: str | None = None) -> list[SystemIdentifier]:
     return configurations[-configurationLimit:]
 
 
-def get_specialisations(profile: str | None, generation: int, _: str | None) -> list[SystemIdentifier]:
-    specialisations_dir = os.path.join(
-            system_dir(profile, generation, None), "specialisation")
-    if not os.path.exists(specialisations_dir):
-        return []
-    return [SystemIdentifier(profile, generation, spec) for spec in os.listdir(specialisations_dir)]
-
-
 def remove_old_entries(gens: list[SystemIdentifier]) -> None:
     rex_profile = re.compile(r"^@efiSysMountPoint@/loader/entries/nixos-(.*)-generation-.*\.conf$")
     rex_generation = re.compile(r"^@efiSysMountPoint@/loader/entries/nixos.*-generation-([0-9]+)(-specialisation-.*)?\.conf$")
     known_paths = []
     for gen in gens:
-        known_paths.append(copy_from_profile(*gen, "kernel", True))
-        known_paths.append(copy_from_profile(*gen, "initrd", True))
+        bootspec = get_bootspec(gen.profile, gen.generation)
+        known_paths.append(copy_from_file(bootspec.kernel, True))
+        known_paths.append(copy_from_file(bootspec.initrd, True))
     for path in glob.iglob("@efiSysMountPoint@/loader/entries/nixos*-generation-[1-9]*.conf"):
         if rex_profile.match(path):
             prof = rex_profile.sub(r"\1", path)
@@ -279,10 +288,11 @@ def install_bootloader(args: argparse.Namespace) -> None:
     remove_old_entries(gens)
     for gen in gens:
         try:
-            is_default = os.path.dirname(profile_path(*gen, "init")) == args.default_config
-            write_entry(*gen, machine_id, current=is_default)
-            for specialisation in get_specialisations(*gen):
-                write_entry(*specialisation, machine_id, current=is_default)
+            bootspec = get_bootspec(gen.profile, gen.generation)
+            is_default = os.path.dirname(bootspec.init) == args.default_config
+            write_entry(*gen, machine_id, bootspec, current=is_default)
+            for specialisation in bootspec.specialisations.keys():
+                write_entry(gen.profile, gen.generation, specialisation, machine_id, bootspec, current=is_default)
             if is_default:
                 write_loader_conf(*gen)
         except OSError as e:
diff --git a/nixos/modules/system/boot/loader/systemd-boot/systemd-boot.nix b/nixos/modules/system/boot/loader/systemd-boot/systemd-boot.nix
index 1086ab80b14..9d55c21077d 100644
--- a/nixos/modules/system/boot/loader/systemd-boot/systemd-boot.nix
+++ b/nixos/modules/system/boot/loader/systemd-boot/systemd-boot.nix
@@ -16,6 +16,8 @@ let
 
     systemd = config.systemd.package;
 
+    bootspecTools = pkgs.bootspec;
+
     nix = config.nix.package.out;
 
     timeout = optionalString (config.boot.loader.timeout != null) config.boot.loader.timeout;
diff --git a/nixos/modules/system/boot/systemd/initrd.nix b/nixos/modules/system/boot/systemd/initrd.nix
index e223451652b..0e7d59b3207 100644
--- a/nixos/modules/system/boot/systemd/initrd.nix
+++ b/nixos/modules/system/boot/systemd/initrd.nix
@@ -370,7 +370,7 @@ in {
 
     boot.initrd.availableKernelModules = [
       # systemd needs this for some features
-      "autofs4"
+      "autofs"
       # systemd-cryptenroll
     ] ++ lib.optional cfg.enableTpm2 "tpm-tis"
     ++ lib.optional (cfg.enableTpm2 && !(pkgs.stdenv.hostPlatform.isRiscV64 || pkgs.stdenv.hostPlatform.isArmv7)) "tpm-crb";
diff --git a/nixos/modules/tasks/filesystems/bcachefs.nix b/nixos/modules/tasks/filesystems/bcachefs.nix
index 4eadec239e6..af7ba7aa6a0 100644
--- a/nixos/modules/tasks/filesystems/bcachefs.nix
+++ b/nixos/modules/tasks/filesystems/bcachefs.nix
@@ -6,23 +6,39 @@ let
 
   bootFs = filterAttrs (n: fs: (fs.fsType == "bcachefs") && (utils.fsNeededForBoot fs)) config.fileSystems;
 
-  mountCommand = pkgs.runCommand "mount.bcachefs" {} ''
-    mkdir -p $out/bin
-    cat > $out/bin/mount.bcachefs <<EOF
-    #!/bin/sh
-    exec "/bin/bcachefs" mount "\$@"
-    EOF
-    chmod +x $out/bin/mount.bcachefs
-  '';
-
   commonFunctions = ''
     prompt() {
         local name="$1"
         printf "enter passphrase for $name: "
     }
+
     tryUnlock() {
         local name="$1"
         local path="$2"
+        local success=false
+        local target
+        local uuid=$(echo -n $path | sed -e 's,UUID=\(.*\),\1,g')
+
+        printf "waiting for device to appear $path"
+        for try in $(seq 10); do
+          if [ -e $path ]; then
+              success=true
+              break
+          else
+              target=$(blkid --uuid $uuid)
+              if [ $? == 0 ]; then
+                 success=true
+                 break
+              fi
+          fi
+          echo -n "."
+          sleep 1
+        done
+        printf "\n"
+        if [ $success == true ]; then
+            path=$target
+        fi
+
         if bcachefs unlock -c $path > /dev/null 2> /dev/null; then    # test for encryption
             prompt $name
             until bcachefs unlock $path 2> /dev/null; do              # repeat until successfully unlocked
@@ -30,6 +46,8 @@ let
                 prompt $name
             done
             printf "unlocking successful.\n"
+        else
+            echo "Cannot unlock device $uuid with path $path" >&2
         fi
     }
   '';
@@ -77,13 +95,11 @@ in
 {
   config = mkIf (elem "bcachefs" config.boot.supportedFilesystems) (mkMerge [
     {
-      # We do not want to include bachefs in the fsPackages for systemd-initrd
-      # because we provide the unwrapped version of mount.bcachefs
-      # through the extraBin option, which will make it available for use.
-      system.fsPackages = lib.optional (!config.boot.initrd.systemd.enable) pkgs.bcachefs-tools;
-      environment.systemPackages = lib.optional (config.boot.initrd.systemd.enable) pkgs.bcachefs-tools;
+      # needed for systemd-remount-fs
+      system.fsPackages = [ pkgs.bcachefs-tools ];
 
       # use kernel package with bcachefs support until it's in mainline
+      # TODO replace with requireKernelConfig
       boot.kernelPackages = pkgs.linuxPackages_testing_bcachefs;
 
       systemd.services = lib.mapAttrs' (mkUnits "") (lib.filterAttrs (n: fs: (fs.fsType == "bcachefs") && (!utils.fsNeededForBoot fs)) config.fileSystems);
@@ -92,15 +108,14 @@ in
     (mkIf ((elem "bcachefs" config.boot.initrd.supportedFilesystems) || (bootFs != {})) {
       # chacha20 and poly1305 are required only for decryption attempts
       boot.initrd.availableKernelModules = [ "bcachefs" "sha256" "chacha20" "poly1305" ];
-
       boot.initrd.systemd.extraBin = {
+        # do we need this? boot/systemd.nix:566 & boot/systemd/initrd.nix:357
         "bcachefs" = "${pkgs.bcachefs-tools}/bin/bcachefs";
-        "mount.bcachefs" = "${mountCommand}/bin/mount.bcachefs";
+        "mount.bcachefs" = "${pkgs.bcachefs-tools}/bin/mount.bcachefs";
       };
-
       boot.initrd.extraUtilsCommands = lib.mkIf (!config.boot.initrd.systemd.enable) ''
         copy_bin_and_libs ${pkgs.bcachefs-tools}/bin/bcachefs
-        copy_bin_and_libs ${mountCommand}/bin/mount.bcachefs
+        copy_bin_and_libs ${pkgs.bcachefs-tools}/bin/mount.bcachefs
       '';
       boot.initrd.extraUtilsCommandsTest = lib.mkIf (!config.boot.initrd.systemd.enable) ''
         $out/bin/bcachefs version
diff --git a/nixos/modules/tasks/network-interfaces.nix b/nixos/modules/tasks/network-interfaces.nix
index d976f9951bb..298add13437 100644
--- a/nixos/modules/tasks/network-interfaces.nix
+++ b/nixos/modules/tasks/network-interfaces.nix
@@ -498,7 +498,7 @@ in
         option will result in an evaluation error if the hostname is empty or
         no domain is specified.
 
-        Modules that accept a mere `networing.hostName` but prefer a fully qualified
+        Modules that accept a mere `networking.hostName` but prefer a fully qualified
         domain name may use `networking.fqdnOrHostName` instead.
       '';
     };
diff --git a/nixos/modules/testing/test-instrumentation.nix b/nixos/modules/testing/test-instrumentation.nix
index abe68dd6eae..9ee77cd79a9 100644
--- a/nixos/modules/testing/test-instrumentation.nix
+++ b/nixos/modules/testing/test-instrumentation.nix
@@ -11,10 +11,6 @@ let
   qemu-common = import ../../lib/qemu-common.nix { inherit lib pkgs; };
 
   backdoorService = {
-    wantedBy = [ "sysinit.target" ];
-    unitConfig.DefaultDependencies = false;
-    conflicts = [ "shutdown.target" "initrd-switch-root.target" ];
-    before = [ "shutdown.target" "initrd-switch-root.target" ];
     requires = [ "dev-hvc0.device" "dev-${qemu-common.qemuSerialDevice}.device" ];
     after = [ "dev-hvc0.device" "dev-${qemu-common.qemuSerialDevice}.device" ];
     script =
@@ -80,7 +76,12 @@ in
       }
     ];
 
-    systemd.services.backdoor = backdoorService;
+    systemd.services.backdoor = lib.mkMerge [
+      backdoorService
+      {
+        wantedBy = [ "multi-user.target" ];
+      }
+    ];
 
     boot.initrd.systemd = lib.mkMerge [
       {
@@ -104,7 +105,21 @@ in
           "/bin/true"
         ];
 
-        services.backdoor = backdoorService;
+        services.backdoor = lib.mkMerge [
+          backdoorService
+          {
+            # TODO: Both stage 1 and stage 2 should use these same
+            # settings. But a lot of existing tests rely on
+            # backdoor.service having default orderings,
+            # e.g. systemd-boot.update relies on /boot being mounted
+            # as soon as backdoor starts. But it can be useful for
+            # backdoor to start even earlier.
+            wantedBy = [ "sysinit.target" ];
+            unitConfig.DefaultDependencies = false;
+            conflicts = [ "shutdown.target" "initrd-switch-root.target" ];
+            before = [ "shutdown.target" "initrd-switch-root.target" ];
+          }
+        ];
 
         contents."/usr/bin/env".source = "${pkgs.coreutils}/bin/env";
       })
diff --git a/nixos/modules/virtualisation/azure-agent.nix b/nixos/modules/virtualisation/azure-agent.nix
index a88b78bc982..e712fac17a4 100644
--- a/nixos/modules/virtualisation/azure-agent.nix
+++ b/nixos/modules/virtualisation/azure-agent.nix
@@ -61,7 +61,7 @@ in
 
         # Which provisioning agent to use. Supported values are "auto" (default), "waagent",
         # "cloud-init", or "disabled".
-        Provisioning.Agent=disabled
+        Provisioning.Agent=auto
 
         # Password authentication for root account will be unavailable.
         Provisioning.DeleteRootPassword=n
@@ -246,7 +246,7 @@ in
         pkgs.bash
 
         # waagent's Microsoft.OSTCExtensions.VMAccessForLinux needs Python 3
-        pkgs.python3
+        pkgs.python39
 
         # waagent's Microsoft.CPlat.Core.RunCommandLinux needs lsof
         pkgs.lsof
@@ -259,5 +259,10 @@ in
       };
     };
 
+    # waagent will generate files under /etc/sudoers.d during provisioning
+    security.sudo.extraConfig = ''
+      #includedir /etc/sudoers.d
+    '';
+
   };
 }
diff --git a/nixos/modules/virtualisation/azure-image.nix b/nixos/modules/virtualisation/azure-image.nix
index 39c6cab5980..d909680cca1 100644
--- a/nixos/modules/virtualisation/azure-image.nix
+++ b/nixos/modules/virtualisation/azure-image.nix
@@ -37,42 +37,5 @@ in
       inherit config lib pkgs;
     };
 
-    # Azure metadata is available as a CD-ROM drive.
-    fileSystems."/metadata".device = "/dev/sr0";
-
-    systemd.services.fetch-ssh-keys = {
-      description = "Fetch host keys and authorized_keys for root user";
-
-      wantedBy = [ "sshd.service" "waagent.service" ];
-      before = [ "sshd.service" "waagent.service" ];
-
-      path  = [ pkgs.coreutils ];
-      script =
-        ''
-          eval "$(cat /metadata/CustomData.bin)"
-          if ! [ -z "$ssh_host_ecdsa_key" ]; then
-            echo "downloaded ssh_host_ecdsa_key"
-            echo "$ssh_host_ecdsa_key" > /etc/ssh/ssh_host_ed25519_key
-            chmod 600 /etc/ssh/ssh_host_ed25519_key
-          fi
-
-          if ! [ -z "$ssh_host_ecdsa_key_pub" ]; then
-            echo "downloaded ssh_host_ecdsa_key_pub"
-            echo "$ssh_host_ecdsa_key_pub" > /etc/ssh/ssh_host_ed25519_key.pub
-            chmod 644 /etc/ssh/ssh_host_ed25519_key.pub
-          fi
-
-          if ! [ -z "$ssh_root_auth_key" ]; then
-            echo "downloaded ssh_root_auth_key"
-            mkdir -m 0700 -p /root/.ssh
-            echo "$ssh_root_auth_key" > /root/.ssh/authorized_keys
-            chmod 600 /root/.ssh/authorized_keys
-          fi
-        '';
-      serviceConfig.Type = "oneshot";
-      serviceConfig.RemainAfterExit = true;
-      serviceConfig.StandardError = "journal+console";
-      serviceConfig.StandardOutput = "journal+console";
-    };
   };
 }
diff --git a/nixos/tests/all-tests.nix b/nixos/tests/all-tests.nix
index f44fcfcf54a..325e99c9774 100644
--- a/nixos/tests/all-tests.nix
+++ b/nixos/tests/all-tests.nix
@@ -124,6 +124,7 @@ in {
   apfs = runTest ./apfs.nix;
   appliance-repart-image = runTest ./appliance-repart-image.nix;
   apparmor = handleTest ./apparmor.nix {};
+  archi = handleTest ./archi.nix {};
   atd = handleTest ./atd.nix {};
   atop = handleTest ./atop.nix {};
   atuin = handleTest ./atuin.nix {};
@@ -191,7 +192,6 @@ in {
   cntr = handleTestOn ["aarch64-linux" "x86_64-linux"] ./cntr.nix {};
   cockpit = handleTest ./cockpit.nix {};
   cockroachdb = handleTestOn ["x86_64-linux"] ./cockroachdb.nix {};
-  code-server = handleTest ./code-server.nix {};
   coder = handleTest ./coder.nix {};
   collectd = handleTest ./collectd.nix {};
   connman = handleTest ./connman.nix {};
@@ -656,6 +656,7 @@ in {
   phylactery = handleTest ./web-apps/phylactery.nix {};
   pict-rs = handleTest ./pict-rs.nix {};
   pinnwand = handleTest ./pinnwand.nix {};
+  plantuml-server = handleTest ./plantuml-server.nix {};
   plasma-bigscreen = handleTest ./plasma-bigscreen.nix {};
   plasma5 = handleTest ./plasma5.nix {};
   plasma5-systemd-start = handleTest ./plasma5-systemd-start.nix {};
@@ -748,6 +749,7 @@ in {
   signal-desktop = handleTest ./signal-desktop.nix {};
   simple = handleTest ./simple.nix {};
   sing-box = handleTest ./sing-box.nix {};
+  slimserver = handleTest ./slimserver.nix {};
   slurm = handleTest ./slurm.nix {};
   smokeping = handleTest ./smokeping.nix {};
   snapcast = handleTest ./snapcast.nix {};
diff --git a/nixos/tests/archi.nix b/nixos/tests/archi.nix
new file mode 100644
index 00000000000..59f2e940c00
--- /dev/null
+++ b/nixos/tests/archi.nix
@@ -0,0 +1,31 @@
+import ./make-test-python.nix ({ lib, ... }: {
+  name = "archi";
+  meta.maintainers = with lib.maintainers; [ paumr ];
+
+  nodes.machine = { pkgs, ... }: {
+    imports = [
+      ./common/x11.nix
+    ];
+
+    environment.systemPackages = with pkgs; [ archi ];
+  };
+
+  enableOCR = true;
+
+  testScript = ''
+    machine.wait_for_x()
+
+    with subtest("createEmptyModel via CLI"):
+         machine.succeed("Archi -application com.archimatetool.commandline.app -consoleLog -nosplash --createEmptyModel --saveModel smoke.archimate")
+         machine.copy_from_vm("smoke.archimate", "")
+
+    with subtest("UI smoketest"):
+         machine.succeed("DISPLAY=:0 Archi --createEmptyModel >&2 &")
+         machine.wait_for_window("Archi")
+
+         # wait till main UI is open
+         machine.wait_for_text("Welcome to Archi")
+
+         machine.screenshot("welcome-screen")
+  '';
+})
diff --git a/nixos/tests/code-server.nix b/nixos/tests/code-server.nix
deleted file mode 100644
index 7d523dfc617..00000000000
--- a/nixos/tests/code-server.nix
+++ /dev/null
@@ -1,22 +0,0 @@
-import ./make-test-python.nix ({pkgs, lib, ...}:
-{
-  name = "code-server";
-
-  nodes = {
-    machine = {pkgs, ...}: {
-      services.code-server = {
-        enable = true;
-        auth = "none";
-      };
-    };
-  };
-
-  testScript = ''
-    start_all()
-    machine.wait_for_unit("code-server.service")
-    machine.wait_for_open_port(4444)
-    machine.succeed("curl -k --fail http://localhost:4444", timeout=10)
-  '';
-
-  meta.maintainers = [ lib.maintainers.drupol ];
-})
diff --git a/nixos/tests/dex-oidc.nix b/nixos/tests/dex-oidc.nix
index 37275a97ef0..e54ae18ca93 100644
--- a/nixos/tests/dex-oidc.nix
+++ b/nixos/tests/dex-oidc.nix
@@ -49,7 +49,7 @@ import ./make-test-python.nix ({ lib, ... }: {
       ensureUsers = [
         {
           name = "dex";
-          ensurePermissions = { "DATABASE dex" = "ALL PRIVILEGES"; };
+          ensureDBOwnership = true;
         }
       ];
     };
diff --git a/nixos/tests/dnscrypt-wrapper/default.nix b/nixos/tests/dnscrypt-wrapper/default.nix
index 1c05376e097..1a794931dc5 100644
--- a/nixos/tests/dnscrypt-wrapper/default.nix
+++ b/nixos/tests/dnscrypt-wrapper/default.nix
@@ -1,5 +1,15 @@
+
 { lib, pkgs, ... }:
 
+let
+  snakeoil = import ../common/acme/server/snakeoil-certs.nix;
+
+  hosts = lib.mkForce
+   { "fd::a" = [ "server" snakeoil.domain ];
+     "fd::b" = [ "client" ];
+   };
+in
+
 {
   name = "dnscrypt-wrapper";
   meta = with pkgs.lib.maintainers; {
@@ -7,59 +17,122 @@
   };
 
   nodes = {
-    server = { lib, ... }:
-      { services.dnscrypt-wrapper = with builtins;
+    server = {
+      networking.hosts = hosts;
+      networking.interfaces.eth1.ipv6.addresses = lib.singleton
+        { address = "fd::a"; prefixLength = 64; };
+
+        services.dnscrypt-wrapper =
           { enable = true;
-            address = "192.168.1.1";
+            address = "[::]";
+            port = 5353;
             keys.expiration = 5; # days
             keys.checkInterval = 2;  # min
             # The keypair was generated by the command:
             # dnscrypt-wrapper --gen-provider-keypair \
             #  --provider-name=2.dnscrypt-cert.server \
-            #  --ext-address=192.168.1.1:5353
-            providerKey.public = toFile "public.key" (readFile ./public.key);
-            providerKey.secret = toFile "secret.key" (readFile ./secret.key);
+            providerKey.public = "${./public.key}";
+            providerKey.secret = "${./secret.key}";
+          };
+
+        # nameserver
+        services.bind.enable = true;
+        services.bind.zones = lib.singleton
+          { name = ".";
+            master = true;
+            file = pkgs.writeText "root.zone" ''
+              $TTL 3600
+              . IN SOA example.org. admin.example.org. ( 1 3h 1h 1w 1d )
+              . IN NS example.org.
+              example.org. IN AAAA 2001:db8::1
+            '';
+          };
+
+        # webserver
+        services.nginx.enable = true;
+        services.nginx.virtualHosts.${snakeoil.domain} =
+          { onlySSL = true;
+            listenAddresses = [ "localhost" ];
+            sslCertificate = snakeoil.${snakeoil.domain}.cert;
+            sslCertificateKey = snakeoil.${snakeoil.domain}.key;
+            locations."/ip".extraConfig = ''
+              default_type text/plain;
+              return 200 "Ciao $remote_addr!\n";
+            '';
           };
-        services.tinydns.enable = true;
-        services.tinydns.data = ''
-          ..:192.168.1.1:a
-          +it.works:1.2.3.4
-        '';
-        networking.firewall.allowedUDPPorts = [ 5353 ];
-        networking.firewall.allowedTCPPorts = [ 5353 ];
-        networking.interfaces.eth1.ipv4.addresses = lib.mkForce
-          [ { address = "192.168.1.1"; prefixLength = 24; } ];
+
+        # demultiplex HTTP and DNS from port 443
+        services.sslh =
+          { enable = true;
+            method = "ev";
+            settings.transparent = true;
+            settings.listen = lib.mkForce
+              [ { host = "server"; port = "443"; is_udp = false; }
+                { host = "server"; port = "443"; is_udp = true; }
+              ];
+            settings.protocols =
+              [ # Send TLS to webserver (TCP)
+                { name = "tls"; host= "localhost"; port= "443"; }
+                # Send DNSCrypt to dnscrypt-wrapper (TCP or UDP)
+                { name = "anyprot"; host = "localhost"; port = "5353"; }
+                { name = "anyprot"; host = "localhost"; port = "5353"; is_udp = true;}
+              ];
+          };
+
+        networking.firewall.allowedTCPPorts = [ 443 ];
+        networking.firewall.allowedUDPPorts = [ 443 ];
       };
 
-    client = { lib, ... }:
-      { services.dnscrypt-proxy2.enable = true;
-        services.dnscrypt-proxy2.upstreamDefaults = false;
-        services.dnscrypt-proxy2.settings = {
-          server_names = [ "server" ];
-          static.server.stamp = "sdns://AQAAAAAAAAAAEDE5Mi4xNjguMS4xOjUzNTMgFEHYOv0SCKSuqR5CDYa7-58cCBuXO2_5uTSVU9wNQF0WMi5kbnNjcnlwdC1jZXJ0LnNlcnZlcg";
+    client = {
+      networking.hosts = hosts;
+      networking.interfaces.eth1.ipv6.addresses = lib.singleton
+        { address = "fd::b"; prefixLength = 64; };
+
+      services.dnscrypt-proxy2.enable = true;
+      services.dnscrypt-proxy2.upstreamDefaults = false;
+      services.dnscrypt-proxy2.settings =
+        { server_names = [ "server" ];
+          listen_addresses = [ "[::1]:53" ];
+          cache = false;
+          # Computed using https://dnscrypt.info/stamps/
+          static.server.stamp =
+            "sdns://AQAAAAAAAAAADzE5Mi4xNjguMS4yOjQ0MyAUQdg6"
+            +"_RIIpK6pHkINhrv7nxwIG5c7b_m5NJVT3A1AXRYyLmRuc2NyeXB0LWNlcnQuc2VydmVy";
         };
-        networking.nameservers = [ "127.0.0.1" ];
-        networking.interfaces.eth1.ipv4.addresses = lib.mkForce
-          [ { address = "192.168.1.2"; prefixLength = 24; } ];
-      };
+      networking.nameservers = [ "::1" ];
+      security.pki.certificateFiles = [ snakeoil.ca.cert ];
+    };
 
   };
 
   testScript = ''
-    start_all()
-
     with subtest("The server can generate the ephemeral keypair"):
         server.wait_for_unit("dnscrypt-wrapper")
         server.wait_for_file("/var/lib/dnscrypt-wrapper/2.dnscrypt-cert.server.key")
         server.wait_for_file("/var/lib/dnscrypt-wrapper/2.dnscrypt-cert.server.crt")
         almost_expiration = server.succeed("date --date '4days 23 hours 56min'").strip()
 
-    with subtest("The client can connect to the server"):
-        server.wait_for_unit("tinydns")
-        client.wait_for_unit("dnscrypt-proxy2")
-        assert "1.2.3.4" in client.wait_until_succeeds(
-            "host it.works"
-        ), "The IP address of 'it.works' does not match 1.2.3.4"
+    with subtest("The DNSCrypt client can connect to the server"):
+        server.wait_for_unit("sslh")
+        client.wait_until_succeeds("journalctl -u dnscrypt-proxy2 --grep '\[server\] OK'")
+
+    with subtest("HTTP client can connect to the server"):
+        server.wait_for_unit("nginx")
+        client.succeed("curl -s --fail https://${snakeoil.domain}/ip | grep -q fd::b")
+
+    with subtest("DNS queries over UDP are working"):
+        server.wait_for_unit("bind")
+        client.wait_for_open_port(53)
+        assert "2001:db8::1" in client.wait_until_succeeds(
+            "host -U example.org"
+        ), "The IP address of 'example.org' does not match 2001:db8::1"
+
+    with subtest("DNS queries over TCP are working"):
+        server.wait_for_unit("bind")
+        client.wait_for_open_port(53)
+        assert "2001:db8::1" in client.wait_until_succeeds(
+            "host -T example.org"
+        ), "The IP address of 'example.org' does not match 2001:db8::1"
 
     with subtest("The server rotates the ephemeral keys"):
         # advance time by a little less than 5 days
@@ -68,7 +141,8 @@
         server.wait_for_file("/var/lib/dnscrypt-wrapper/oldkeys")
 
     with subtest("The client can still connect to the server"):
-        server.wait_for_unit("dnscrypt-wrapper")
-        client.succeed("host it.works")
+        client.systemctl("restart dnscrypt-proxy2")
+        client.wait_until_succeeds("host -T example.org")
+        client.wait_until_succeeds("host -U example.org")
   '';
 }
diff --git a/nixos/tests/elk.nix b/nixos/tests/elk.nix
index 0122bc44036..900ea632010 100644
--- a/nixos/tests/elk.nix
+++ b/nixos/tests/elk.nix
@@ -119,11 +119,6 @@ let
                 package = elk.elasticsearch;
               };
 
-              kibana = {
-                enable = true;
-                package = elk.kibana;
-              };
-
               elasticsearch-curator = {
                 enable = true;
                 actionYAML = ''
@@ -217,13 +212,6 @@ let
           one.wait_until_succeeds("cat /tmp/logstash.out | grep flowers")
           one.wait_until_succeeds("cat /tmp/logstash.out | grep -v dragons")
 
-      with subtest("Kibana is healthy"):
-          one.wait_for_unit("kibana.service")
-          one.wait_until_succeeds(
-              "curl --silent --show-error --fail-with-body 'http://localhost:5601/api/status'"
-              + " | jq -es 'if . == [] then null else .[] | .status.overall.state == \"green\" end'"
-          )
-
       with subtest("Metricbeat is running"):
           one.wait_for_unit("metricbeat.service")
 
@@ -274,7 +262,6 @@ in {
   #   name = "elk-7";
   #   elasticsearch = pkgs.elasticsearch7-oss;
   #   logstash      = pkgs.logstash7-oss;
-  #   kibana        = pkgs.kibana7-oss;
   #   filebeat      = pkgs.filebeat7;
   #   metricbeat    = pkgs.metricbeat7;
   # };
@@ -282,7 +269,6 @@ in {
     ELK-7 = mkElkTest "elk-7" {
       elasticsearch = pkgs.elasticsearch7;
       logstash      = pkgs.logstash7;
-      kibana        = pkgs.kibana7;
       filebeat      = pkgs.filebeat7;
       metricbeat    = pkgs.metricbeat7;
     };
diff --git a/nixos/tests/ferretdb.nix b/nixos/tests/ferretdb.nix
index 9ad7397ade8..7251198af77 100644
--- a/nixos/tests/ferretdb.nix
+++ b/nixos/tests/ferretdb.nix
@@ -39,7 +39,7 @@ with import ../lib/testing-python.nix { inherit system; };
             ensureDatabases = [ "ferretdb" ];
             ensureUsers = [{
               name = "ferretdb";
-              ensurePermissions."DATABASE ferretdb" = "ALL PRIVILEGES";
+              ensureDBOwnership = true;
             }];
           };
 
diff --git a/nixos/tests/freshrss-pgsql.nix b/nixos/tests/freshrss-pgsql.nix
index 055bd51ed43..c685f4a8159 100644
--- a/nixos/tests/freshrss-pgsql.nix
+++ b/nixos/tests/freshrss-pgsql.nix
@@ -22,9 +22,7 @@ import ./make-test-python.nix ({ lib, pkgs, ... }: {
       ensureUsers = [
         {
           name = "freshrss";
-          ensurePermissions = {
-            "DATABASE freshrss" = "ALL PRIVILEGES";
-          };
+          ensureDBOwnership = true;
         }
       ];
       initialScript = pkgs.writeText "postgresql-password" ''
diff --git a/nixos/tests/grafana/basic.nix b/nixos/tests/grafana/basic.nix
index 8bf4caad7fb..dd389bc8a3d 100644
--- a/nixos/tests/grafana/basic.nix
+++ b/nixos/tests/grafana/basic.nix
@@ -55,7 +55,7 @@ let
         ensureDatabases = [ "grafana" ];
         ensureUsers = [{
           name = "grafana";
-          ensurePermissions."DATABASE grafana" = "ALL PRIVILEGES";
+          ensureDBOwnership = true;
         }];
       };
       systemd.services.grafana.after = [ "postgresql.service" ];
diff --git a/nixos/tests/hockeypuck.nix b/nixos/tests/hockeypuck.nix
index 2b9dba8720a..675d6b226ad 100644
--- a/nixos/tests/hockeypuck.nix
+++ b/nixos/tests/hockeypuck.nix
@@ -35,7 +35,7 @@ in {
       ensureDatabases = [ "hockeypuck" ];
       ensureUsers = [{
         name = "hockeypuck";
-        ensurePermissions."DATABASE hockeypuck" = "ALL PRIVILEGES";
+        ensureDBOwnership = true;
       }];
     };
   };
diff --git a/nixos/tests/home-assistant.nix b/nixos/tests/home-assistant.nix
index e97e8a467b1..e1588088ba1 100644
--- a/nixos/tests/home-assistant.nix
+++ b/nixos/tests/home-assistant.nix
@@ -9,13 +9,11 @@ in {
   nodes.hass = { pkgs, ... }: {
     services.postgresql = {
       enable = true;
-
-      # FIXME: hack for https://github.com/NixOS/nixpkgs/issues/216989
-      # Should be replaced with ensureUsers again when a solution for that is found
-      initialScript = pkgs.writeText "hass-setup-db.sql" ''
-        CREATE ROLE hass WITH LOGIN;
-        CREATE DATABASE hass WITH OWNER hass;
-      '';
+      ensureDatabases = [ "hass" ];
+      ensureUsers = [{
+        name = "hass";
+        ensureDBOwnership = true;
+      }];
     };
 
     services.home-assistant = {
diff --git a/nixos/tests/invidious.nix b/nixos/tests/invidious.nix
index 582d1550fff..701e8e5e7a3 100644
--- a/nixos/tests/invidious.nix
+++ b/nixos/tests/invidious.nix
@@ -44,8 +44,7 @@ import ./make-test-python.nix ({ pkgs, ... }: {
             enable = true;
             initialScript = pkgs.writeText "init-postgres-with-password" ''
               CREATE USER kemal WITH PASSWORD 'correct horse battery staple';
-              CREATE DATABASE invidious;
-              GRANT ALL PRIVILEGES ON DATABASE invidious TO kemal;
+              CREATE DATABASE invidious OWNER kemal;
             '';
           };
       };
diff --git a/nixos/tests/paperless.nix b/nixos/tests/paperless.nix
index 22409e89923..6a51cc522bd 100644
--- a/nixos/tests/paperless.nix
+++ b/nixos/tests/paperless.nix
@@ -17,7 +17,7 @@ import ./make-test-python.nix ({ lib, ... }: {
         ensureDatabases = [ "paperless" ];
         ensureUsers = [
           { name = config.services.paperless.user;
-            ensurePermissions = { "DATABASE \"paperless\"" = "ALL PRIVILEGES"; };
+            ensureDBOwnership = true;
           }
         ];
       };
diff --git a/nixos/tests/pgadmin4.nix b/nixos/tests/pgadmin4.nix
index cb8de87c9ee..3ee7ed19fa1 100644
--- a/nixos/tests/pgadmin4.nix
+++ b/nixos/tests/pgadmin4.nix
@@ -19,14 +19,6 @@ import ./make-test-python.nix ({ pkgs, lib, ... }:
       authentication = ''
         host    all             all             localhost               trust
       '';
-      ensureUsers = [
-        {
-          name = "postgres";
-          ensurePermissions = {
-            "DATABASE \"postgres\"" = "ALL PRIVILEGES";
-          };
-        }
-      ];
     };
 
     services.pgadmin = {
diff --git a/nixos/tests/pgbouncer.nix b/nixos/tests/pgbouncer.nix
index 1e72327d420..bb5afd35ee2 100644
--- a/nixos/tests/pgbouncer.nix
+++ b/nixos/tests/pgbouncer.nix
@@ -17,7 +17,8 @@ in
 
       systemd.services.postgresql = {
         postStart = ''
-            ${pkgs.postgresql}/bin/psql -U postgres -c "ALTER ROLE testuser WITH LOGIN PASSWORD 'testpass'";
+          ${pkgs.postgresql}/bin/psql -U postgres -c "ALTER ROLE testuser WITH LOGIN PASSWORD 'testpass'";
+          ${pkgs.postgresql}/bin/psql -U postgres -c "ALTER DATABASE testdb OWNER TO testuser;";
         '';
       };
 
@@ -28,9 +29,6 @@ in
           ensureUsers = [
           {
             name = "testuser";
-            ensurePermissions = {
-              "DATABASE testdb" = "ALL PRIVILEGES";
-            };
           }];
           authentication = ''
             local testdb testuser scram-sha-256
@@ -40,7 +38,7 @@ in
         pgbouncer = {
           enable = true;
           listenAddress = "localhost";
-          databases = { testdb = "host=/run/postgresql/ port=5432 auth_user=testuser dbname=testdb"; };
+          databases = { test = "host=/run/postgresql/ port=5432 auth_user=testuser dbname=testdb"; };
           authType = "scram-sha-256";
           authFile = testAuthFile;
         };
@@ -55,7 +53,7 @@ in
 
     # Test if we can make a query through PgBouncer
     one.wait_until_succeeds(
-        "psql 'postgres://testuser:testpass@localhost:6432/testdb' -c 'SELECT 1;'"
+        "psql 'postgres://testuser:testpass@localhost:6432/test' -c 'SELECT 1;'"
     )
   '';
 })
diff --git a/nixos/tests/plantuml-server.nix b/nixos/tests/plantuml-server.nix
new file mode 100644
index 00000000000..460c30919ae
--- /dev/null
+++ b/nixos/tests/plantuml-server.nix
@@ -0,0 +1,20 @@
+import ./make-test-python.nix ({ pkgs, lib, ... }: {
+  name = "plantuml-server";
+  meta.maintainers = with lib.maintainers; [ anthonyroussel ];
+
+  nodes.machine = { pkgs, ... }: {
+    environment.systemPackages = [ pkgs.curl ];
+    services.plantuml-server.enable = true;
+  };
+
+  testScript = ''
+    start_all()
+
+    machine.wait_for_unit("plantuml-server.service")
+    machine.wait_for_open_port(8080)
+
+    with subtest("Generate chart"):
+      chart_id = machine.succeed("curl -sSf http://localhost:8080/plantuml/coder -d 'Alice -> Bob'")
+      machine.succeed("curl -sSf http://localhost:8080/plantuml/txt/{}".format(chart_id))
+  '';
+})
diff --git a/nixos/tests/plausible.nix b/nixos/tests/plausible.nix
index 9afd3db75de..9c26c509a5a 100644
--- a/nixos/tests/plausible.nix
+++ b/nixos/tests/plausible.nix
@@ -8,9 +8,6 @@ import ./make-test-python.nix ({ pkgs, lib, ... }: {
     virtualisation.memorySize = 4096;
     services.plausible = {
       enable = true;
-      releaseCookiePath = "${pkgs.runCommand "cookie" { } ''
-        ${pkgs.openssl}/bin/openssl rand -base64 64 >"$out"
-      ''}";
       adminUser = {
         email = "admin@example.org";
         passwordFile = "${pkgs.writeText "pwd" "foobar"}";
@@ -28,6 +25,10 @@ import ./make-test-python.nix ({ pkgs, lib, ... }: {
     machine.wait_for_unit("plausible.service")
     machine.wait_for_open_port(8000)
 
+    # Ensure that the software does not make not make the machine
+    # listen on any public interfaces by default.
+    machine.fail("ss -tlpn 'src = 0.0.0.0 or src = [::]' | grep LISTEN")
+
     machine.succeed("curl -f localhost:8000 >&2")
 
     machine.succeed("curl -f localhost:8000/js/script.js >&2")
diff --git a/nixos/tests/pleroma.nix b/nixos/tests/pleroma.nix
index 4f1aef85414..08a01585f87 100644
--- a/nixos/tests/pleroma.nix
+++ b/nixos/tests/pleroma.nix
@@ -164,9 +164,12 @@ import ./make-test-python.nix ({ pkgs, ... }:
   '';
 
   tls-cert = pkgs.runCommand "selfSignedCerts" { buildInputs = [ pkgs.openssl ]; } ''
-    openssl req -x509 -newkey rsa:4096 -keyout key.pem -out cert.pem -nodes -subj '/CN=pleroma.nixos.test' -days 36500
     mkdir -p $out
-    cp key.pem cert.pem $out
+    openssl req -x509 \
+      -subj '/CN=pleroma.nixos.test/' -days 49710 \
+      -addext 'subjectAltName = DNS:pleroma.nixos.test' \
+      -keyout "$out/key.pem" -newkey ed25519 \
+      -out "$out/cert.pem" -noenc
   '';
 
   hosts = nodes: ''
@@ -180,7 +183,7 @@ import ./make-test-python.nix ({ pkgs, ... }:
       security.pki.certificateFiles = [ "${tls-cert}/cert.pem" ];
       networking.extraHosts = hosts nodes;
       environment.systemPackages = with pkgs; [
-        toot
+        pkgs.toot
         send-toot
       ];
     };
diff --git a/nixos/tests/powerdns-admin.nix b/nixos/tests/powerdns-admin.nix
index d7bacb24eec..d326d74a982 100644
--- a/nixos/tests/powerdns-admin.nix
+++ b/nixos/tests/powerdns-admin.nix
@@ -87,9 +87,7 @@ let
           ensureUsers = [
             {
               name = "powerdnsadmin";
-              ensurePermissions = {
-                "DATABASE powerdnsadmin" = "ALL PRIVILEGES";
-              };
+              ensureDBOwnership = true;
             }
           ];
         };
diff --git a/nixos/tests/prometheus-exporters.nix b/nixos/tests/prometheus-exporters.nix
index 4bad56991cc..7840130d4a3 100644
--- a/nixos/tests/prometheus-exporters.nix
+++ b/nixos/tests/prometheus-exporters.nix
@@ -257,6 +257,21 @@ let
       '';
     };
 
+    exportarr-sonarr = {
+      nodeName = "exportarr_sonarr";
+      exporterConfig = {
+        enable = true;
+        url = "http://127.0.0.1:8989";
+        # testing for real data is tricky, because the api key can not be preconfigured
+        apiKeyFile = pkgs.writeText "dummy-api-key" "eccff6a992bc2e4b88e46d064b26bb4e";
+      };
+      exporterTest = ''
+        wait_for_unit("prometheus-exportarr-sonarr-exporter.service")
+        wait_for_open_port(9707)
+        succeed("curl -sSf 'http://localhost:9707/metrics")
+      '';
+    };
+
     fastly = {
       exporterConfig = {
         enable = true;
@@ -1318,12 +1333,12 @@ let
         wait_for_open_port(9374)
         wait_until_succeeds(
             "curl -sSf localhost:9374/metrics | grep '{}' | grep -v ' 0$'".format(
-                'smokeping_requests_total{host="127.0.0.1",ip="127.0.0.1"} '
+                'smokeping_requests_total{host="127.0.0.1",ip="127.0.0.1",source=""} '
             )
         )
         wait_until_succeeds(
             "curl -sSf localhost:9374/metrics | grep '{}'".format(
-                'smokeping_response_ttl{host="127.0.0.1",ip="127.0.0.1"}'
+                'smokeping_response_ttl{host="127.0.0.1",ip="127.0.0.1",source=""}'
             )
         )
       '';
diff --git a/nixos/tests/sftpgo.nix b/nixos/tests/sftpgo.nix
index db0098d2ac4..a5bb1981d2c 100644
--- a/nixos/tests/sftpgo.nix
+++ b/nixos/tests/sftpgo.nix
@@ -156,7 +156,7 @@ in
         ensureDatabases = [ "sftpgo" ];
         ensureUsers = [{
           name = "sftpgo";
-          ensurePermissions."DATABASE sftpgo" = "ALL PRIVILEGES";
+          ensureDBOwnership = true;
         }];
       };
 
diff --git a/nixos/tests/slimserver.nix b/nixos/tests/slimserver.nix
new file mode 100644
index 00000000000..c3f7b6fde4d
--- /dev/null
+++ b/nixos/tests/slimserver.nix
@@ -0,0 +1,47 @@
+import ./make-test-python.nix ({ pkgs, ...} : {
+  name = "slimserver";
+  meta.maintainers = with pkgs.lib.maintainers; [ adamcstephens ];
+
+  nodes.machine = { ... }: {
+    services.slimserver.enable = true;
+    services.squeezelite = {
+      enable = true;
+      extraArguments = "-s 127.0.0.1 -d slimproto=info";
+    };
+    sound.enable = true;
+    boot.initrd.kernelModules = ["snd-dummy"];
+  };
+
+  testScript =
+    ''
+      import json
+      rpc_get_player = {
+          "id": 1,
+          "method": "slim.request",
+          "params":[0,["player", "id", "0", "?"]]
+      }
+
+      with subtest("slimserver is started"):
+          machine.wait_for_unit("slimserver.service")
+          # give slimserver a moment to report errors
+          machine.sleep(2)
+
+      with subtest('slimserver module errors are not reported'):
+          machine.fail("journalctl -u slimserver.service | grep 'throw_exception'")
+          machine.fail("journalctl -u slimserver.service | grep 'not installed'")
+          machine.fail("journalctl -u slimserver.service | grep 'not found'")
+          machine.fail("journalctl -u slimserver.service | grep 'The following CPAN modules were found but cannot work with Logitech Media Server'")
+          machine.fail("journalctl -u slimserver.service | grep 'please use the buildme.sh'")
+
+      with subtest('slimserver is ready'):
+          machine.wait_for_open_port(9000)
+          machine.wait_until_succeeds("journalctl -u slimserver.service | grep 'Completed dbOptimize Scan'")
+
+      with subtest("squeezelite player successfully connects to slimserver"):
+          machine.wait_for_unit("squeezelite.service")
+          machine.wait_until_succeeds("journalctl -u squeezelite.service | grep 'slimproto:937 connected'")
+          player_mac = machine.wait_until_succeeds("journalctl -eu squeezelite.service | grep 'sendHELO:148 mac:'").strip().split(" ")[-1]
+          player_id = machine.succeed(f"curl http://localhost:9000/jsonrpc.js -g -X POST -d '{json.dumps(rpc_get_player)}'")
+          assert player_mac == json.loads(player_id)["result"]["_id"], "squeezelite player not found"
+    '';
+})
diff --git a/nixos/tests/systemd-boot.nix b/nixos/tests/systemd-boot.nix
index 13007d0d80d..256a18532b0 100644
--- a/nixos/tests/systemd-boot.nix
+++ b/nixos/tests/systemd-boot.nix
@@ -252,6 +252,35 @@ in
     '';
   };
 
+  garbage-collect-entry = makeTest {
+    name = "systemd-boot-switch-test";
+    meta.maintainers = with pkgs.lib.maintainers; [ julienmalka ];
+
+    nodes = {
+      inherit common;
+      machine = { pkgs, nodes, ... }: {
+        imports = [ common ];
+
+        # These are configs for different nodes, but we'll use them here in `machine`
+        system.extraDependencies = [
+          nodes.common.system.build.toplevel
+        ];
+      };
+    };
+
+    testScript = { nodes, ... }:
+      let
+        baseSystem = nodes.common.system.build.toplevel;
+      in
+      ''
+        machine.succeed("nix-env -p /nix/var/nix/profiles/system --set ${baseSystem}")
+        machine.succeed("nix-env -p /nix/var/nix/profiles/system --delete-generations 1")
+        machine.succeed("${baseSystem}/bin/switch-to-configuration boot")
+        machine.fail("test -e /boot/loader/entries/nixos-generation-1.conf")
+        machine.succeed("test -e /boot/loader/entries/nixos-generation-2.conf")
+      '';
+  };
+
   # Some UEFI firmwares fail on large reads. Now that systemd-boot loads initrd
   # itself, systems with such firmware won't boot without this fix
   uefiLargeFileWorkaround = makeTest {
@@ -277,4 +306,20 @@ in
       machine.wait_for_unit("multi-user.target")
     '';
   };
+
+  no-bootspec = makeTest
+    {
+      name = "systemd-boot-no-bootspec";
+      meta.maintainers = with pkgs.lib.maintainers; [ julienmalka ];
+
+      nodes.machine = {
+        imports = [ common ];
+        boot.bootspec.enable = false;
+      };
+
+      testScript = ''
+        machine.start()
+        machine.wait_for_unit("multi-user.target")
+      '';
+    };
 }
diff --git a/nixos/tests/tandoor-recipes.nix b/nixos/tests/tandoor-recipes.nix
index f3369da99a0..18beaac6f06 100644
--- a/nixos/tests/tandoor-recipes.nix
+++ b/nixos/tests/tandoor-recipes.nix
@@ -5,6 +5,29 @@ import ./make-test-python.nix ({ lib, ... }: {
   nodes.machine = { pkgs, ... }: {
     services.tandoor-recipes = {
       enable = true;
+      extraConfig = {
+        DB_ENGINE = "django.db.backends.postgresql";
+        POSTGRES_HOST = "/run/postgresql";
+        POSTGRES_USER = "tandoor_recipes";
+        POSTGRES_DB = "tandoor_recipes";
+      };
+    };
+
+    services.postgresql = {
+      enable = true;
+      ensureDatabases = [ "tandoor_recipes" ];
+      ensureUsers = [
+        {
+          name = "tandoor_recipes";
+          ensureDBOwnership = true;
+        }
+      ];
+    };
+
+    systemd.services = {
+      tandoor-recipes = {
+        after = [ "postgresql.service" ];
+      };
     };
   };
 
diff --git a/nixos/tests/vikunja.nix b/nixos/tests/vikunja.nix
index 2660aa9767c..60fd5ce1385 100644
--- a/nixos/tests/vikunja.nix
+++ b/nixos/tests/vikunja.nix
@@ -33,7 +33,7 @@ import ./make-test-python.nix ({ pkgs, lib, ... }: {
         ensureDatabases = [ "vikunja-api" ];
         ensureUsers = [
           { name = "vikunja-api";
-            ensurePermissions = { "DATABASE \"vikunja-api\"" = "ALL PRIVILEGES"; };
+            ensureDBOwnership = true;
           }
         ];
       };
diff --git a/nixos/tests/web-apps/mastodon/remote-postgresql.nix b/nixos/tests/web-apps/mastodon/remote-postgresql.nix
index 715477191bf..6548883db45 100644
--- a/nixos/tests/web-apps/mastodon/remote-postgresql.nix
+++ b/nixos/tests/web-apps/mastodon/remote-postgresql.nix
@@ -16,7 +16,7 @@ in
   meta.maintainers = with pkgs.lib.maintainers; [ erictapen izorkin ];
 
   nodes = {
-    database = {
+    database = { config, ... }: {
       networking = {
         interfaces.eth1 = {
           ipv4.addresses = [
@@ -24,11 +24,13 @@ in
           ];
         };
         extraHosts = hosts;
-        firewall.allowedTCPPorts = [ 5432 ];
+        firewall.allowedTCPPorts = [ config.services.postgresql.port ];
       };
 
       services.postgresql = {
         enable = true;
+        # TODO remove once https://github.com/NixOS/nixpkgs/pull/266270 is resolved.
+        package = pkgs.postgresql_14;
         enableTCPIP = true;
         authentication = ''
           hostnossl mastodon_local mastodon_test 192.168.2.201/32 md5
@@ -41,7 +43,7 @@ in
       };
     };
 
-    nginx = {
+    nginx = { nodes, ... }: {
       networking = {
         interfaces.eth1 = {
           ipv4.addresses = [
@@ -69,18 +71,14 @@ in
             tryFiles = "$uri @proxy";
           };
           locations."@proxy" = {
-            proxyPass = "http://192.168.2.201:55001";
-            proxyWebsockets = true;
-          };
-          locations."/api/v1/streaming/" = {
-            proxyPass = "http://192.168.2.201:55002";
+            proxyPass = "http://192.168.2.201:${toString nodes.server.services.mastodon.webPort}";
             proxyWebsockets = true;
           };
         };
       };
     };
 
-    server = { pkgs, ... }: {
+    server = { config, pkgs, ... }: {
       virtualisation.memorySize = 2048;
 
       environment = {
@@ -98,7 +96,10 @@ in
           ];
         };
         extraHosts = hosts;
-        firewall.allowedTCPPorts = [ 55001 55002 ];
+        firewall.allowedTCPPorts = [
+          config.services.mastodon.webPort
+          config.services.mastodon.sidekiqPort
+        ];
       };
 
       services.mastodon = {
@@ -106,6 +107,7 @@ in
         configureNginx = false;
         localDomain = "mastodon.local";
         enableUnixSocket = false;
+        streamingProcesses = 2;
         database = {
           createLocally = false;
           host = "192.168.2.102";
diff --git a/nixos/tests/web-apps/mastodon/script.nix b/nixos/tests/web-apps/mastodon/script.nix
index a89b4b7480e..afb7c0e0a0e 100644
--- a/nixos/tests/web-apps/mastodon/script.nix
+++ b/nixos/tests/web-apps/mastodon/script.nix
@@ -10,9 +10,8 @@
 
   server.wait_for_unit("redis-mastodon.service")
   server.wait_for_unit("mastodon-sidekiq-all.service")
-  server.wait_for_unit("mastodon-streaming.service")
+  server.wait_for_unit("mastodon-streaming.target")
   server.wait_for_unit("mastodon-web.service")
-  server.wait_for_open_port(55000)
   server.wait_for_open_port(55001)
 
   # Check that mastodon-media-auto-remove is scheduled
diff --git a/nixos/tests/web-apps/mastodon/standard.nix b/nixos/tests/web-apps/mastodon/standard.nix
index 14311afea3f..e5eb30fef59 100644
--- a/nixos/tests/web-apps/mastodon/standard.nix
+++ b/nixos/tests/web-apps/mastodon/standard.nix
@@ -40,11 +40,15 @@ in
         port = 31637;
       };
 
+      # TODO remove once https://github.com/NixOS/nixpkgs/pull/266270 is resolved.
+      services.postgresql.package = pkgs.postgresql_14;
+
       services.mastodon = {
         enable = true;
         configureNginx = true;
         localDomain = "mastodon.local";
         enableUnixSocket = false;
+        streamingProcesses = 2;
         smtp = {
           createLocally = false;
           fromAddress = "mastodon@mastodon.local";
diff --git a/nixos/tests/wiki-js.nix b/nixos/tests/wiki-js.nix
index fd054a9c590..8b3c51935a6 100644
--- a/nixos/tests/wiki-js.nix
+++ b/nixos/tests/wiki-js.nix
@@ -10,14 +10,15 @@ import ./make-test-python.nix ({ pkgs, lib, ...} : {
       enable = true;
       settings.db.host = "/run/postgresql";
       settings.db.user = "wiki-js";
+      settings.db.db = "wiki-js";
       settings.logLevel = "debug";
     };
     services.postgresql = {
       enable = true;
-      ensureDatabases = [ "wiki" ];
+      ensureDatabases = [ "wiki-js" ];
       ensureUsers = [
         { name = "wiki-js";
-          ensurePermissions."DATABASE wiki" = "ALL PRIVILEGES";
+          ensureDBOwnership = true;
         }
       ];
     };
diff --git a/nixos/tests/wordpress.nix b/nixos/tests/wordpress.nix
index 937b505af2a..592af9a094f 100644
--- a/nixos/tests/wordpress.nix
+++ b/nixos/tests/wordpress.nix
@@ -67,7 +67,7 @@ rec {
       networking.hosts."127.0.0.1" = [ "site1.local" "site2.local" ];
     };
   }) {} [
-    "6_3"
+    "6_3" "6_4"
   ];
 
   testScript = ''
diff --git a/nixos/tests/xmpp/ejabberd.nix b/nixos/tests/xmpp/ejabberd.nix
index 7926fe80de2..1a807b27b6f 100644
--- a/nixos/tests/xmpp/ejabberd.nix
+++ b/nixos/tests/xmpp/ejabberd.nix
@@ -1,7 +1,7 @@
 import ../make-test-python.nix ({ pkgs, ... }: {
   name = "ejabberd";
   meta = with pkgs.lib.maintainers; {
-    maintainers = [ ajs124 ];
+    maintainers = [ ];
   };
   nodes = {
     client = { nodes, pkgs, ... }: {