summary refs log tree commit diff
path: root/nixos
diff options
context:
space:
mode:
authorAlyssa Ross <hi@alyssa.is>2020-05-13 00:32:00 +0000
committerAlyssa Ross <hi@alyssa.is>2020-05-13 00:41:26 +0000
commit439d80fbdcdf6245444e99e3764f233122c86358 (patch)
treebd769aabc0c8e46d3fdf8f0cc80297463e7d0dba /nixos
parentcc2d9c385f776f38fa37656b8440b5c4a460e9a7 (diff)
parent9f5e9ef4b71a2a1ea8efef56f5876cdc846d6387 (diff)
downloadnixpkgs-439d80fbdcdf6245444e99e3764f233122c86358.tar
nixpkgs-439d80fbdcdf6245444e99e3764f233122c86358.tar.gz
nixpkgs-439d80fbdcdf6245444e99e3764f233122c86358.tar.bz2
nixpkgs-439d80fbdcdf6245444e99e3764f233122c86358.tar.lz
nixpkgs-439d80fbdcdf6245444e99e3764f233122c86358.tar.xz
nixpkgs-439d80fbdcdf6245444e99e3764f233122c86358.tar.zst
nixpkgs-439d80fbdcdf6245444e99e3764f233122c86358.zip
Merge remote-tracking branch 'nixpkgs/master' into master
Diffstat (limited to 'nixos')
-rw-r--r--nixos/README2
-rw-r--r--nixos/doc/manual/configuration/adding-custom-packages.xml2
-rw-r--r--nixos/doc/manual/configuration/config-syntax.xml2
-rw-r--r--nixos/doc/manual/configuration/summary.xml2
-rw-r--r--nixos/doc/manual/configuration/x-windows.xml1
-rw-r--r--nixos/doc/manual/configuration/xfce.xml10
-rwxr-xr-xnixos/doc/manual/development/releases.xml2
-rw-r--r--nixos/doc/manual/development/replace-modules.xml2
-rw-r--r--nixos/doc/manual/installation/installing-behind-a-proxy.xml2
-rw-r--r--nixos/doc/manual/installation/installing.xml5
-rw-r--r--nixos/doc/manual/installation/obtaining.xml4
-rw-r--r--nixos/doc/manual/installation/upgrading.xml16
-rw-r--r--nixos/doc/manual/man-nixos-install.xml16
-rw-r--r--nixos/doc/manual/release-notes/rl-1404.xml2
-rw-r--r--nixos/doc/manual/release-notes/rl-1509.xml4
-rw-r--r--nixos/doc/manual/release-notes/rl-1603.xml2
-rw-r--r--nixos/doc/manual/release-notes/rl-2003.xml320
-rw-r--r--nixos/doc/manual/release-notes/rl-2009.xml255
-rw-r--r--nixos/lib/test-driver/log2html.xsl135
-rw-r--r--nixos/lib/test-driver/logfile.css129
-rw-r--r--nixos/lib/test-driver/test-driver.py34
-rw-r--r--nixos/lib/test-driver/treebits.js30
-rw-r--r--nixos/lib/testing-python.nix18
-rw-r--r--nixos/lib/testing.nix16
-rw-r--r--nixos/maintainers/scripts/azure-new/examples/basic/image.nix2
-rw-r--r--nixos/modules/config/fonts/fontconfig.nix5
-rw-r--r--nixos/modules/config/fonts/fontdir.nix1
-rw-r--r--nixos/modules/config/fonts/ghostscript.nix1
-rw-r--r--nixos/modules/config/ldap.nix2
-rw-r--r--nixos/modules/config/networking.nix2
-rw-r--r--nixos/modules/config/nsswitch.nix153
-rw-r--r--nixos/modules/config/qt5.nix6
-rw-r--r--nixos/modules/hardware/all-firmware.nix1
-rw-r--r--nixos/modules/hardware/device-tree.nix4
-rw-r--r--nixos/modules/hardware/opengl.nix26
-rw-r--r--nixos/modules/hardware/video/nvidia.nix63
-rw-r--r--nixos/modules/installer/cd-dvd/installation-cd-base.nix5
-rw-r--r--nixos/modules/installer/cd-dvd/installation-cd-graphical-gnome.nix3
-rw-r--r--nixos/modules/installer/tools/nix-fallback-paths.nix4
-rw-r--r--nixos/modules/installer/tools/nixos-build-vms/build-vms.nix7
-rw-r--r--nixos/modules/installer/tools/nixos-generate-config.pl8
-rw-r--r--nixos/modules/installer/tools/nixos-install.sh13
-rw-r--r--nixos/modules/installer/tools/tools.nix8
-rw-r--r--nixos/modules/misc/version.nix4
-rw-r--r--nixos/modules/module-list.nix8
-rw-r--r--nixos/modules/profiles/docker-container.nix9
-rw-r--r--nixos/modules/profiles/hardened.nix39
-rw-r--r--nixos/modules/programs/captive-browser.nix2
-rw-r--r--nixos/modules/programs/cdemu.nix1
-rw-r--r--nixos/modules/programs/chromium.nix10
-rw-r--r--nixos/modules/programs/criu.nix1
-rw-r--r--nixos/modules/programs/fish.nix4
-rw-r--r--nixos/modules/programs/npm.nix11
-rw-r--r--nixos/modules/programs/singularity.nix4
-rw-r--r--nixos/modules/programs/systemtap.nix1
-rw-r--r--nixos/modules/programs/venus.nix2
-rw-r--r--nixos/modules/programs/xonsh.nix27
-rw-r--r--nixos/modules/programs/zsh/oh-my-zsh.nix1
-rw-r--r--nixos/modules/rename.nix4
-rw-r--r--nixos/modules/security/acme.nix28
-rw-r--r--nixos/modules/security/acme.xml247
-rw-r--r--nixos/modules/security/apparmor-suid.nix1
-rw-r--r--nixos/modules/security/doas.nix274
-rw-r--r--nixos/modules/security/google_oslogin.nix1
-rw-r--r--nixos/modules/security/pam.nix29
-rw-r--r--nixos/modules/security/prey.nix51
-rw-r--r--nixos/modules/security/systemd-confinement.nix5
-rw-r--r--nixos/modules/services/amqp/rabbitmq.nix1
-rw-r--r--nixos/modules/services/audio/mopidy.nix4
-rw-r--r--nixos/modules/services/audio/mpd.nix2
-rw-r--r--nixos/modules/services/backup/mysql-backup.nix7
-rw-r--r--nixos/modules/services/backup/postgresql-backup.nix7
-rw-r--r--nixos/modules/services/backup/znapzend.nix46
-rw-r--r--nixos/modules/services/cluster/k3s/default.nix101
-rw-r--r--nixos/modules/services/computing/boinc/client.nix6
-rw-r--r--nixos/modules/services/continuous-integration/gitlab-runner.nix540
-rw-r--r--nixos/modules/services/continuous-integration/hydra/default.nix52
-rw-r--r--nixos/modules/services/databases/clickhouse.nix5
-rw-r--r--nixos/modules/services/databases/cockroachdb.nix2
-rw-r--r--nixos/modules/services/databases/firebird.nix7
-rw-r--r--nixos/modules/services/databases/memcached.nix7
-rw-r--r--nixos/modules/services/databases/mongodb.nix7
-rw-r--r--nixos/modules/services/databases/openldap.nix4
-rw-r--r--nixos/modules/services/databases/postgresql.nix8
-rw-r--r--nixos/modules/services/databases/virtuoso.nix5
-rw-r--r--nixos/modules/services/editors/emacs.xml2
-rw-r--r--nixos/modules/services/hardware/ratbagd.nix7
-rw-r--r--nixos/modules/services/hardware/thermald.nix7
-rw-r--r--nixos/modules/services/logging/awstats.nix4
-rw-r--r--nixos/modules/services/mail/dovecot.nix2
-rw-r--r--nixos/modules/services/mail/postfix.nix1
-rw-r--r--nixos/modules/services/mail/roundcube.nix19
-rw-r--r--nixos/modules/services/mail/rss2email.nix2
-rw-r--r--nixos/modules/services/mail/spamassassin.nix6
-rw-r--r--nixos/modules/services/misc/airsonic.nix1
-rw-r--r--nixos/modules/services/misc/autofs.nix2
-rw-r--r--nixos/modules/services/misc/cgminer.nix8
-rwxr-xr-xnixos/modules/services/misc/confd.nix2
-rw-r--r--nixos/modules/services/misc/devmon.nix7
-rw-r--r--nixos/modules/services/misc/disnix.nix10
-rw-r--r--nixos/modules/services/misc/dysnomia.nix2
-rw-r--r--nixos/modules/services/misc/etcd.nix2
-rw-r--r--nixos/modules/services/misc/felix.nix5
-rw-r--r--nixos/modules/services/misc/gitea.nix162
-rw-r--r--nixos/modules/services/misc/gitlab.nix2
-rw-r--r--nixos/modules/services/misc/gogs.nix6
-rw-r--r--nixos/modules/services/misc/ihaskell.nix1
-rw-r--r--nixos/modules/services/misc/leaps.nix2
-rw-r--r--nixos/modules/services/misc/matrix-synapse.xml3
-rw-r--r--nixos/modules/services/misc/nix-daemon.nix5
-rw-r--r--nixos/modules/services/misc/octoprint.nix9
-rw-r--r--nixos/modules/services/misc/pykms.nix1
-rw-r--r--nixos/modules/services/misc/redmine.nix4
-rw-r--r--nixos/modules/services/misc/safeeyes.nix5
-rw-r--r--nixos/modules/services/misc/ssm-agent.nix3
-rw-r--r--nixos/modules/services/misc/sssd.nix5
-rw-r--r--nixos/modules/services/misc/svnserve.nix1
-rw-r--r--nixos/modules/services/misc/synergy.nix16
-rw-r--r--nixos/modules/services/monitoring/bosun.nix2
-rw-r--r--nixos/modules/services/monitoring/datadog-agent.nix2
-rw-r--r--nixos/modules/services/monitoring/grafana-reporter.nix2
-rw-r--r--nixos/modules/services/monitoring/grafana.nix2
-rw-r--r--nixos/modules/services/monitoring/prometheus/alertmanager.nix11
-rw-r--r--nixos/modules/services/monitoring/prometheus/default.nix4
-rw-r--r--nixos/modules/services/monitoring/prometheus/exporters/snmp.nix2
-rw-r--r--nixos/modules/services/monitoring/scollector.nix2
-rw-r--r--nixos/modules/services/monitoring/tuptime.nix84
-rw-r--r--nixos/modules/services/network-filesystems/ipfs.nix4
-rw-r--r--nixos/modules/services/network-filesystems/netatalk.nix6
-rw-r--r--nixos/modules/services/network-filesystems/rsyncd.nix5
-rw-r--r--nixos/modules/services/network-filesystems/xtreemfs.nix6
-rw-r--r--nixos/modules/services/network-filesystems/yandex-disk.nix1
-rw-r--r--nixos/modules/services/networking/amuled.nix1
-rw-r--r--nixos/modules/services/networking/babeld.nix7
-rw-r--r--nixos/modules/services/networking/bind.nix7
-rw-r--r--nixos/modules/services/networking/bitlbee.nix1
-rw-r--r--nixos/modules/services/networking/cntlm.nix7
-rw-r--r--nixos/modules/services/networking/consul.nix8
-rw-r--r--nixos/modules/services/networking/flannel.nix6
-rw-r--r--nixos/modules/services/networking/flashpolicyd.nix1
-rw-r--r--nixos/modules/services/networking/gogoclient.nix1
-rw-r--r--nixos/modules/services/networking/gvpe.nix8
-rw-r--r--nixos/modules/services/networking/hostapd.nix40
-rw-r--r--nixos/modules/services/networking/ircd-hybrid/default.nix7
-rw-r--r--nixos/modules/services/networking/mailpile.nix8
-rw-r--r--nixos/modules/services/networking/monero.nix2
-rw-r--r--nixos/modules/services/networking/nftables.nix2
-rw-r--r--nixos/modules/services/networking/ntp/chrony.nix15
-rw-r--r--nixos/modules/services/networking/ntp/ntpd.nix1
-rw-r--r--nixos/modules/services/networking/openfire.nix8
-rw-r--r--nixos/modules/services/networking/pixiecore.nix2
-rw-r--r--nixos/modules/services/networking/prayer.nix7
-rw-r--r--nixos/modules/services/networking/prosody.nix392
-rw-r--r--nixos/modules/services/networking/prosody.xml88
-rw-r--r--nixos/modules/services/networking/quassel.nix7
-rw-r--r--nixos/modules/services/networking/radvd.nix1
-rw-r--r--nixos/modules/services/networking/rdnssd.nix1
-rw-r--r--nixos/modules/services/networking/sabnzbd.nix6
-rw-r--r--nixos/modules/services/networking/shairport-sync.nix1
-rw-r--r--nixos/modules/services/networking/skydns.nix2
-rw-r--r--nixos/modules/services/networking/ssh/lshd.nix7
-rw-r--r--nixos/modules/services/networking/tailscale.nix5
-rw-r--r--nixos/modules/services/networking/tcpcrypt.nix1
-rw-r--r--nixos/modules/services/networking/thelounge.nix1
-rw-r--r--nixos/modules/services/networking/wicd.nix1
-rw-r--r--nixos/modules/services/networking/xinetd.nix7
-rw-r--r--nixos/modules/services/networking/yggdrasil.nix11
-rw-r--r--nixos/modules/services/printing/cupsd.nix16
-rw-r--r--nixos/modules/services/security/bitwarden_rs/default.nix2
-rw-r--r--nixos/modules/services/security/fprot.nix7
-rw-r--r--nixos/modules/services/security/hologram-agent.nix4
-rw-r--r--nixos/modules/services/security/hologram-server.nix2
-rw-r--r--nixos/modules/services/security/oauth2_proxy.nix31
-rw-r--r--nixos/modules/services/system/kerberos/default.nix7
-rw-r--r--nixos/modules/services/system/localtime.nix1
-rw-r--r--nixos/modules/services/system/nscd.nix18
-rw-r--r--nixos/modules/services/system/uptimed.nix1
-rw-r--r--nixos/modules/services/torrent/deluge.nix35
-rw-r--r--nixos/modules/services/web-apps/dokuwiki.nix508
-rw-r--r--nixos/modules/services/web-apps/mattermost.nix2
-rw-r--r--nixos/modules/services/web-apps/mediawiki.nix23
-rw-r--r--nixos/modules/services/web-apps/nextcloud.nix21
-rw-r--r--nixos/modules/services/web-apps/wordpress.nix4
-rw-r--r--nixos/modules/services/web-servers/apache-httpd/default.nix46
-rw-r--r--nixos/modules/services/web-servers/apache-httpd/vhost-options.nix2
-rw-r--r--nixos/modules/services/web-servers/jboss/default.nix2
-rw-r--r--nixos/modules/services/web-servers/nginx/default.nix28
-rw-r--r--nixos/modules/services/web-servers/phpfpm/default.nix4
-rw-r--r--nixos/modules/services/web-servers/traefik.nix125
-rw-r--r--nixos/modules/services/web-servers/unit/default.nix30
-rw-r--r--nixos/modules/services/x11/desktop-managers/enlightenment.nix1
-rw-r--r--nixos/modules/services/x11/desktop-managers/gnome3.nix1
-rw-r--r--nixos/modules/services/x11/desktop-managers/kodi.nix1
-rw-r--r--nixos/modules/services/x11/desktop-managers/pantheon.nix2
-rw-r--r--nixos/modules/services/x11/desktop-managers/pantheon.xml2
-rw-r--r--nixos/modules/services/x11/display-managers/gdm.nix3
-rw-r--r--nixos/modules/services/x11/display-managers/lightdm-greeters/mini.nix6
-rw-r--r--nixos/modules/services/x11/display-managers/startx.nix1
-rw-r--r--nixos/modules/services/x11/gdk-pixbuf.nix2
-rw-r--r--nixos/modules/services/x11/hardware/digimend.nix7
-rw-r--r--nixos/modules/services/x11/hardware/libinput.nix6
-rw-r--r--nixos/modules/services/x11/hardware/wacom.nix1
-rw-r--r--nixos/modules/services/x11/picom.nix187
-rw-r--r--nixos/modules/services/x11/window-managers/default.nix1
-rw-r--r--nixos/modules/services/x11/window-managers/tinywm.nix25
-rw-r--r--nixos/modules/system/activation/no-clone.nix3
-rw-r--r--nixos/modules/system/activation/top-level.nix65
-rw-r--r--nixos/modules/system/boot/initrd-ssh.nix10
-rw-r--r--nixos/modules/system/boot/loader/grub/install-grub.pl5
-rw-r--r--nixos/modules/system/boot/loader/init-script/init-script-builder.sh2
-rw-r--r--nixos/modules/system/boot/loader/raspberrypi/raspberrypi-builder.nix4
-rw-r--r--nixos/modules/system/boot/loader/raspberrypi/uboot-builder.nix2
-rw-r--r--nixos/modules/system/boot/networkd.nix141
-rw-r--r--nixos/modules/system/boot/resolved.nix4
-rw-r--r--nixos/modules/system/boot/stage-1.nix21
-rw-r--r--nixos/modules/system/boot/systemd-lib.nix13
-rw-r--r--nixos/modules/system/boot/systemd-nspawn.nix2
-rw-r--r--nixos/modules/system/boot/systemd.nix111
-rw-r--r--nixos/modules/tasks/network-interfaces-scripted.nix33
-rw-r--r--nixos/modules/tasks/network-interfaces-systemd.nix7
-rw-r--r--nixos/modules/tasks/network-interfaces.nix38
-rw-r--r--nixos/modules/virtualisation/containers.nix879
-rw-r--r--nixos/modules/virtualisation/cri-o.nix59
-rw-r--r--nixos/modules/virtualisation/ec2-amis.nix54
-rw-r--r--nixos/modules/virtualisation/ecs-agent.nix3
-rw-r--r--nixos/modules/virtualisation/hyperv-image.nix69
-rw-r--r--nixos/modules/virtualisation/lxd.nix2
-rw-r--r--nixos/modules/virtualisation/nixos-containers.nix844
-rw-r--r--nixos/modules/virtualisation/oci-containers.nix (renamed from nixos/modules/virtualisation/docker-containers.nix)124
-rw-r--r--nixos/modules/virtualisation/podman.nix123
-rw-r--r--nixos/modules/virtualisation/qemu-vm.nix15
-rw-r--r--nixos/modules/virtualisation/xen-dom0.nix11
-rw-r--r--nixos/release-combined.nix11
-rw-r--r--nixos/release.nix8
-rw-r--r--nixos/tests/acme.nix173
-rw-r--r--nixos/tests/all-tests.nix33
-rw-r--r--nixos/tests/caddy.nix44
-rw-r--r--nixos/tests/chromium.nix2
-rw-r--r--nixos/tests/cockroachdb.nix28
-rw-r--r--nixos/tests/common/acme/client/default.nix15
-rw-r--r--nixos/tests/common/acme/server/default.nix (renamed from nixos/tests/common/letsencrypt/default.nix)34
-rw-r--r--nixos/tests/common/acme/server/mkcerts.nix (renamed from nixos/tests/common/letsencrypt/mkcerts.nix)5
-rwxr-xr-xnixos/tests/common/acme/server/mkcerts.sh (renamed from nixos/tests/common/letsencrypt/mkcerts.sh)0
-rw-r--r--nixos/tests/common/acme/server/snakeoil-certs.nix171
-rw-r--r--nixos/tests/common/letsencrypt/common.nix12
-rw-r--r--nixos/tests/common/letsencrypt/snakeoil-certs.nix254
-rw-r--r--nixos/tests/common/resolver.nix2
-rw-r--r--nixos/tests/containers-custom-pkgs.nix42
-rw-r--r--nixos/tests/deluge.nix109
-rw-r--r--nixos/tests/doas.nix83
-rw-r--r--nixos/tests/docker-containers.nix27
-rw-r--r--nixos/tests/dokuwiki.nix83
-rw-r--r--nixos/tests/ec2.nix2
-rw-r--r--nixos/tests/elk.nix1
-rw-r--r--nixos/tests/enlightenment.nix101
-rw-r--r--nixos/tests/flannel.nix37
-rw-r--r--nixos/tests/gitdaemon.nix3
-rw-r--r--nixos/tests/gnome3-xorg.nix6
-rw-r--r--nixos/tests/gnome3.nix6
-rw-r--r--nixos/tests/google-oslogin/default.nix18
-rw-r--r--nixos/tests/google-oslogin/server.py83
-rw-r--r--nixos/tests/hardened.nix11
-rw-r--r--nixos/tests/hydra/db-migration.nix12
-rw-r--r--nixos/tests/installed-tests/default.nix1
-rw-r--r--nixos/tests/installed-tests/libjcat.nix5
-rw-r--r--nixos/tests/installer.nix50
-rw-r--r--nixos/tests/iodine.nix5
-rw-r--r--nixos/tests/ipfs.nix58
-rw-r--r--nixos/tests/k3s.nix78
-rw-r--r--nixos/tests/ldap.nix405
-rw-r--r--nixos/tests/mediawiki.nix7
-rw-r--r--nixos/tests/minio.nix2
-rw-r--r--nixos/tests/mysql/mariadb-galera-mariabackup.nix223
-rw-r--r--nixos/tests/mysql/mariadb-galera-rsync.nix216
-rw-r--r--nixos/tests/mysql/mysql-autobackup.nix (renamed from nixos/tests/automysqlbackup.nix)2
-rw-r--r--nixos/tests/mysql/mysql-backup.nix (renamed from nixos/tests/mysql-backup.nix)2
-rw-r--r--nixos/tests/mysql/mysql-replication.nix (renamed from nixos/tests/mysql-replication.nix)2
-rw-r--r--nixos/tests/mysql/mysql.nix (renamed from nixos/tests/mysql.nix)2
-rw-r--r--nixos/tests/mysql/testdb.sql (renamed from nixos/tests/testdb.sql)0
-rw-r--r--nixos/tests/nesting.nix44
-rw-r--r--nixos/tests/networking.nix50
-rw-r--r--nixos/tests/nginx-etag.nix4
-rw-r--r--nixos/tests/nginx.nix53
-rw-r--r--nixos/tests/oci-containers.nix43
-rw-r--r--nixos/tests/partition.nix247
-rw-r--r--nixos/tests/php/default.nix1
-rw-r--r--nixos/tests/php/fpm.nix14
-rw-r--r--nixos/tests/php/httpd.nix31
-rw-r--r--nixos/tests/php/pcre.nix10
-rw-r--r--nixos/tests/podman.nix60
-rw-r--r--nixos/tests/prometheus.nix2
-rw-r--r--nixos/tests/redmine.nix70
-rw-r--r--nixos/tests/roundcube.nix1
-rw-r--r--nixos/tests/service-runner.nix2
-rw-r--r--nixos/tests/specialisation.nix43
-rw-r--r--nixos/tests/systemd-boot.nix31
-rw-r--r--nixos/tests/systemd-confinement.nix135
-rw-r--r--nixos/tests/systemd-networkd-dhcpserver.nix58
-rw-r--r--nixos/tests/systemd-networkd-ipv6-prefix-delegation.nix295
-rw-r--r--nixos/tests/traefik.nix87
-rw-r--r--nixos/tests/tuptime.nix29
-rw-r--r--nixos/tests/udisks2.nix2
-rw-r--r--nixos/tests/web-servers/unit-php.nix47
-rw-r--r--nixos/tests/xmpp/prosody-mysql.nix15
-rw-r--r--nixos/tests/xmpp/prosody.nix85
-rw-r--r--nixos/tests/xmpp/xmpp-sendmessage.nix105
306 files changed, 7659 insertions, 4205 deletions
diff --git a/nixos/README b/nixos/README
index 4ecf648a930..ce4dd1988d2 100644
--- a/nixos/README
+++ b/nixos/README
@@ -2,4 +2,4 @@
 
 NixOS is a Linux distribution based on the purely functional package
 management system Nix.  More information can be found at
-http://nixos.org/nixos and in the manual in doc/manual.
+https://nixos.org/nixos and in the manual in doc/manual.
diff --git a/nixos/doc/manual/configuration/adding-custom-packages.xml b/nixos/doc/manual/configuration/adding-custom-packages.xml
index 182641055e4..02cb78f47e8 100644
--- a/nixos/doc/manual/configuration/adding-custom-packages.xml
+++ b/nixos/doc/manual/configuration/adding-custom-packages.xml
@@ -11,7 +11,7 @@
   the package to your clone, and (optionally) submit a patch or pull request to
   have it accepted into the main Nixpkgs repository. This is described in
   detail in the <link
-xlink:href="http://nixos.org/nixpkgs/manual">Nixpkgs
+xlink:href="https://nixos.org/nixpkgs/manual">Nixpkgs
   manual</link>. In short, you clone Nixpkgs:
 <screen>
 <prompt>$ </prompt>git clone https://github.com/NixOS/nixpkgs
diff --git a/nixos/doc/manual/configuration/config-syntax.xml b/nixos/doc/manual/configuration/config-syntax.xml
index 5ef498cf9ae..5526dea247c 100644
--- a/nixos/doc/manual/configuration/config-syntax.xml
+++ b/nixos/doc/manual/configuration/config-syntax.xml
@@ -14,7 +14,7 @@
   when managing complex systems. The syntax and semantics of the Nix language
   are fully described in the
   <link
-xlink:href="http://nixos.org/nix/manual/#chap-writing-nix-expressions">Nix
+xlink:href="https://nixos.org/nix/manual/#chap-writing-nix-expressions">Nix
   manual</link>, but here we give a short overview of the most important
   constructs useful in NixOS configuration files.
  </para>
diff --git a/nixos/doc/manual/configuration/summary.xml b/nixos/doc/manual/configuration/summary.xml
index ea980254a8f..289face16de 100644
--- a/nixos/doc/manual/configuration/summary.xml
+++ b/nixos/doc/manual/configuration/summary.xml
@@ -10,7 +10,7 @@
   expression language. It’s not complete. In particular, there are many other
   built-in functions. See the
   <link
-xlink:href="http://nixos.org/nix/manual/#chap-writing-nix-expressions">Nix
+xlink:href="https://nixos.org/nix/manual/#chap-writing-nix-expressions">Nix
   manual</link> for the rest.
  </para>
 
diff --git a/nixos/doc/manual/configuration/x-windows.xml b/nixos/doc/manual/configuration/x-windows.xml
index 06dd7c8bfb9..110712baf5f 100644
--- a/nixos/doc/manual/configuration/x-windows.xml
+++ b/nixos/doc/manual/configuration/x-windows.xml
@@ -31,6 +31,7 @@
 <xref linkend="opt-services.xserver.windowManager.twm.enable"/> = true;
 <xref linkend="opt-services.xserver.windowManager.icewm.enable"/> = true;
 <xref linkend="opt-services.xserver.windowManager.i3.enable"/> = true;
+<xref linkend="opt-services.xserver.windowManager.herbstluftwm.enable"/> = true;
 </programlisting>
  </para>
  <para>
diff --git a/nixos/doc/manual/configuration/xfce.xml b/nixos/doc/manual/configuration/xfce.xml
index ebf1f493c5c..abcf5f648a4 100644
--- a/nixos/doc/manual/configuration/xfce.xml
+++ b/nixos/doc/manual/configuration/xfce.xml
@@ -16,11 +16,11 @@
   effects, some example settings:
 <programlisting>
 <link linkend="opt-services.picom.enable">services.picom</link> = {
-  <link linkend="opt-services.picom.enable">enable</link>          = true;
-  <link linkend="opt-services.picom.fade">fade</link>            = true;
-  <link linkend="opt-services.picom.inactiveOpacity">inactiveOpacity</link> = "0.9";
-  <link linkend="opt-services.picom.shadow">shadow</link>          = true;
-  <link linkend="opt-services.picom.fadeDelta">fadeDelta</link>       = 4;
+  <link linkend="opt-services.picom.enable">enable</link> = true;
+  <link linkend="opt-services.picom.fade">fade</link> = true;
+  <link linkend="opt-services.picom.inactiveOpacity">inactiveOpacity</link> = 0.9;
+  <link linkend="opt-services.picom.shadow">shadow</link> = true;
+  <link linkend="opt-services.picom.fadeDelta">fadeDelta</link> = 4;
 };
 </programlisting>
  </para>
diff --git a/nixos/doc/manual/development/releases.xml b/nixos/doc/manual/development/releases.xml
index cc0ec78cc74..8abc66dfec1 100755
--- a/nixos/doc/manual/development/releases.xml
+++ b/nixos/doc/manual/development/releases.xml
@@ -57,7 +57,7 @@
     <listitem>
      <para>
       <link xlink:href="https://github.com/NixOS/nixos-org-configurations/pull/18">
-      Make sure a channel is created at http://nixos.org/channels/. </link>
+      Make sure a channel is created at https://nixos.org/channels/. </link>
      </para>
     </listitem>
     <listitem>
diff --git a/nixos/doc/manual/development/replace-modules.xml b/nixos/doc/manual/development/replace-modules.xml
index b4a466e2294..9fc5678ca1b 100644
--- a/nixos/doc/manual/development/replace-modules.xml
+++ b/nixos/doc/manual/development/replace-modules.xml
@@ -37,7 +37,7 @@
 
   imports =
     [ # Use postgresql service from nixos-unstable channel.
-      # sudo nix-channel --add http://nixos.org/channels/nixos-unstable nixos-unstable
+      # sudo nix-channel --add https://nixos.org/channels/nixos-unstable nixos-unstable
       &lt;nixos-unstable/nixos/modules/services/databases/postgresql.nix&gt;
     ];
 
diff --git a/nixos/doc/manual/installation/installing-behind-a-proxy.xml b/nixos/doc/manual/installation/installing-behind-a-proxy.xml
index 8f9baff44b5..c1ef638e876 100644
--- a/nixos/doc/manual/installation/installing-behind-a-proxy.xml
+++ b/nixos/doc/manual/installation/installing-behind-a-proxy.xml
@@ -40,7 +40,7 @@ networking.proxy.noProxy = &quot;127.0.0.1,localhost,internal.domain&quot;;
  <note>
   <para>
    If you are switching networks with different proxy configurations, use the
-   <literal>nesting.clone</literal> option in
+   <literal>specialisation</literal> option in
    <literal>configuration.nix</literal> to switch proxies at runtime. Refer to
    <xref linkend="ch-options" /> for more information.
   </para>
diff --git a/nixos/doc/manual/installation/installing.xml b/nixos/doc/manual/installation/installing.xml
index 0dbfb39c32b..673df8f2e4c 100644
--- a/nixos/doc/manual/installation/installing.xml
+++ b/nixos/doc/manual/installation/installing.xml
@@ -41,6 +41,11 @@
    neo</command>!)
   </para>
 
+  <para>
+   If the text is too small to be legible, try <command>setfont ter-132n</command>
+   to increase the font size.
+  </para>
+
   <section xml:id="sec-installation-booting-networking">
    <title>Networking in the installer</title>
 
diff --git a/nixos/doc/manual/installation/obtaining.xml b/nixos/doc/manual/installation/obtaining.xml
index 56af5c0e25a..3b8671782de 100644
--- a/nixos/doc/manual/installation/obtaining.xml
+++ b/nixos/doc/manual/installation/obtaining.xml
@@ -7,7 +7,7 @@
  <para>
   NixOS ISO images can be downloaded from the
   <link
-xlink:href="http://nixos.org/nixos/download.html">NixOS download
+xlink:href="https://nixos.org/nixos/download.html">NixOS download
   page</link>. There are a number of installation options. If you happen to
   have an optical drive and a spare CD, burning the image to CD and booting
   from that is probably the easiest option. Most people will need to prepare a
@@ -26,7 +26,7 @@ xlink:href="https://nixos.wiki/wiki/NixOS_Installation_Guide#Making_the_installa
     <para>
      Using virtual appliances in Open Virtualization Format (OVF) that can be
      imported into VirtualBox. These are available from the
-     <link xlink:href="http://nixos.org/nixos/download.html">NixOS download
+     <link xlink:href="https://nixos.org/nixos/download.html">NixOS download
      page</link>.
     </para>
    </listitem>
diff --git a/nixos/doc/manual/installation/upgrading.xml b/nixos/doc/manual/installation/upgrading.xml
index 92864cf2557..e5e02aa0752 100644
--- a/nixos/doc/manual/installation/upgrading.xml
+++ b/nixos/doc/manual/installation/upgrading.xml
@@ -14,7 +14,7 @@
     <para>
      <emphasis>Stable channels</emphasis>, such as
      <literal
-    xlink:href="https://nixos.org/channels/nixos-19.09">nixos-19.09</literal>.
+    xlink:href="https://nixos.org/channels/nixos-20.03">nixos-20.03</literal>.
      These only get conservative bug fixes and package upgrades. For instance,
      a channel update may cause the Linux kernel on your system to be upgraded
      from 4.19.34 to 4.19.38 (a minor bug fix), but not from
@@ -38,7 +38,7 @@
     <para>
      <emphasis>Small channels</emphasis>, such as
      <literal
-    xlink:href="https://nixos.org/channels/nixos-19.09-small">nixos-19.09-small</literal>
+    xlink:href="https://nixos.org/channels/nixos-20.03-small">nixos-20.03-small</literal>
      or
      <literal
     xlink:href="https://nixos.org/channels/nixos-unstable-small">nixos-unstable-small</literal>.
@@ -63,8 +63,8 @@
  <para>
   When you first install NixOS, you’re automatically subscribed to the NixOS
   channel that corresponds to your installation source. For instance, if you
-  installed from a 19.09 ISO, you will be subscribed to the
-  <literal>nixos-19.09</literal> channel. To see which NixOS channel you’re
+  installed from a 20.03 ISO, you will be subscribed to the
+  <literal>nixos-20.03</literal> channel. To see which NixOS channel you’re
   subscribed to, run the following as root:
 <screen>
 # nix-channel --list | grep nixos
@@ -75,13 +75,13 @@ nixos https://nixos.org/channels/nixos-unstable
 # nix-channel --add https://nixos.org/channels/<replaceable>channel-name</replaceable> nixos
 </screen>
   (Be sure to include the <literal>nixos</literal> parameter at the end.) For
-  instance, to use the NixOS 19.09 stable channel:
+  instance, to use the NixOS 20.03 stable channel:
 <screen>
-# nix-channel --add https://nixos.org/channels/nixos-19.09 nixos
+# nix-channel --add https://nixos.org/channels/nixos-20.03 nixos
 </screen>
   If you have a server, you may want to use the “small” channel instead:
 <screen>
-# nix-channel --add https://nixos.org/channels/nixos-19.09-small nixos
+# nix-channel --add https://nixos.org/channels/nixos-20.03-small nixos
 </screen>
   And if you want to live on the bleeding edge:
 <screen>
@@ -132,7 +132,7 @@ nixos https://nixos.org/channels/nixos-unstable
    kernel, initrd or kernel modules.
    You can also specify a channel explicitly, e.g.
 <programlisting>
-<xref linkend="opt-system.autoUpgrade.channel"/> = https://nixos.org/channels/nixos-19.09;
+<xref linkend="opt-system.autoUpgrade.channel"/> = https://nixos.org/channels/nixos-20.03;
 </programlisting>
   </para>
  </section>
diff --git a/nixos/doc/manual/man-nixos-install.xml b/nixos/doc/manual/man-nixos-install.xml
index 9255ce763ef..84849282e9a 100644
--- a/nixos/doc/manual/man-nixos-install.xml
+++ b/nixos/doc/manual/man-nixos-install.xml
@@ -25,16 +25,6 @@
     </group>
    </arg>
    <arg>
-    <group choice='req'>
-     <arg choice='plain'>
-      <option>--print-build-logs</option>
-     </arg>
-     <arg choice='plain'>
-      <option>-L</option>
-     </arg>
-    </group>
-   </arg>
-   <arg>
     <arg choice='plain'>
      <option>-I</option>
     </arg>
@@ -179,12 +169,6 @@
     </listitem>
    </varlistentry>
    <varlistentry>
-    <term><option>--print-build-logs</option> / <option>-L</option></term>
-    <listitem>
-     <para>Print the full build logs of <command>nix build</command> to stderr.</para>
-    </listitem>
-   </varlistentry>
-   <varlistentry>
     <term>
      <option>--root</option>
     </term>
diff --git a/nixos/doc/manual/release-notes/rl-1404.xml b/nixos/doc/manual/release-notes/rl-1404.xml
index 8d8cea4303a..56dbb74a71d 100644
--- a/nixos/doc/manual/release-notes/rl-1404.xml
+++ b/nixos/doc/manual/release-notes/rl-1404.xml
@@ -49,7 +49,7 @@
     <para>
      Nix has been updated to 1.7
      (<link
-  xlink:href="http://nixos.org/nix/manual/#ssec-relnotes-1.7">details</link>).
+  xlink:href="https://nixos.org/nix/manual/#ssec-relnotes-1.7">details</link>).
     </para>
    </listitem>
    <listitem>
diff --git a/nixos/doc/manual/release-notes/rl-1509.xml b/nixos/doc/manual/release-notes/rl-1509.xml
index 5c4d9970178..098c8c5095b 100644
--- a/nixos/doc/manual/release-notes/rl-1509.xml
+++ b/nixos/doc/manual/release-notes/rl-1509.xml
@@ -22,7 +22,7 @@
     in excess of 8,000 Haskell packages. Detailed instructions on how to use
     that infrastructure can be found in the
     <link
-    xlink:href="http://nixos.org/nixpkgs/manual/#users-guide-to-the-haskell-infrastructure">User's
+    xlink:href="https://nixos.org/nixpkgs/manual/#users-guide-to-the-haskell-infrastructure">User's
     Guide to the Haskell Infrastructure</link>. Users migrating from an earlier
     release may find helpful information below, in the list of
     backwards-incompatible changes. Furthermore, we distribute 51(!) additional
@@ -555,7 +555,7 @@ nix-env -f &quot;&lt;nixpkgs&gt;&quot; -iA haskellPackages.pandoc
      the compiler now is the <literal>haskellPackages.ghcWithPackages</literal>
      function. The
      <link
-    xlink:href="http://nixos.org/nixpkgs/manual/#users-guide-to-the-haskell-infrastructure">User's
+    xlink:href="https://nixos.org/nixpkgs/manual/#users-guide-to-the-haskell-infrastructure">User's
      Guide to the Haskell Infrastructure</link> provides more information about
      this subject.
     </para>
diff --git a/nixos/doc/manual/release-notes/rl-1603.xml b/nixos/doc/manual/release-notes/rl-1603.xml
index 9b512c4b1e5..6d4b28825fa 100644
--- a/nixos/doc/manual/release-notes/rl-1603.xml
+++ b/nixos/doc/manual/release-notes/rl-1603.xml
@@ -54,7 +54,7 @@
     xlink:href="https://reproducible-builds.org/specs/source-date-epoch/">SOURCE_DATE_EPOCH</envar>
     to a deterministic value, and Nix has
     <link
-    xlink:href="http://nixos.org/nix/manual/#ssec-relnotes-1.11">gained
+    xlink:href="https://nixos.org/nix/manual/#ssec-relnotes-1.11">gained
     an option</link> to repeat a build a number of times to test determinism.
     An ongoing project, the goal of exact reproducibility is to allow binaries
     to be verified independently (e.g., a user might only trust binaries that
diff --git a/nixos/doc/manual/release-notes/rl-2003.xml b/nixos/doc/manual/release-notes/rl-2003.xml
index f09fb3255d8..393a9286ca4 100644
--- a/nixos/doc/manual/release-notes/rl-2003.xml
+++ b/nixos/doc/manual/release-notes/rl-2003.xml
@@ -3,7 +3,7 @@
          xmlns:xi="http://www.w3.org/2001/XInclude"
          version="5.0"
          xml:id="sec-release-20.03">
- <title>Release 20.03 (“Markhor”, 2020.03/??)</title>
+ <title>Release 20.03 (“Markhor”, 2020.04/20)</title>
 
  <section xmlns="http://docbook.org/ns/docbook"
          xmlns:xlink="http://www.w3.org/1999/xlink"
@@ -24,10 +24,23 @@
     </para>
    </listitem>
    <listitem>
+    <para>Core version changes:</para>
+    <para>gcc: 8.3.0 -&gt; 9.2.0</para>
+    <para>glibc: 2.27 -&gt; 2.30</para>
+    <para>linux: 4.19 -&gt; 5.4</para>
+    <para>mesa: 19.1.5 -&gt; 19.3.3</para>
+    <para>openssl: 1.0.2u -&gt; 1.1.1d</para>
+   </listitem>
+   <listitem>
+    <para>Desktop version changes:</para>
+    <para>plasma5: 5.16.5 -&gt; 5.17.5</para>
+    <para>kdeApplications: 19.08.2 -&gt; 19.12.3</para>
+    <para>gnome3: 3.32 -&gt; 3.34</para>
+    <para>pantheon: 5.0 -&gt; 5.1.3</para>
+   </listitem>
+   <listitem>
     <para>
      Linux kernel is updated to branch 5.4 by default (from 4.19).
-     Users of Intel GPUs may prefer to explicitly set branch to 4.19 to avoid some regressions.
-     <programlisting>boot.kernelPackages = pkgs.linuxPackages_4_19;</programlisting>
     </para>
    </listitem>
    <listitem>
@@ -44,6 +57,24 @@
     </para>
    </listitem>
    <listitem>
+    <para>
+     GNOME 3 has been upgraded to 3.34. Please take a look at their
+     <link xlink:href="https://help.gnome.org/misc/release-notes/3.34">Release Notes</link>
+     for details.
+    </para>
+   </listitem>
+   <listitem>
+    <para>
+     If you enable the Pantheon Desktop Manager via
+     <xref linkend="opt-services.xserver.desktopManager.pantheon.enable" />, we now default to also use
+     <link xlink:href="https://blog.elementary.io/say-hello-to-the-new-greeter/">
+      Pantheon's newly designed greeter
+     </link>.
+      Contrary to NixOS's usual update policy, Pantheon will receive updates during the cycle of
+      NixOS 20.03 when backwards compatible.
+    </para>
+   </listitem>
+   <listitem>
      <para>
        By default zfs pools will now be trimmed on a weekly basis.
        Trimming is only done on supported devices (i.e. NVME or SSDs)
@@ -128,6 +159,241 @@ See https://github.com/NixOS/nixpkgs/pull/71684 for details.
      It was created so Geary could function properly outside of GNOME.
     </para>
    </listitem>
+   <listitem>
+     <para>
+       <filename>./config/console.nix</filename>
+     </para>
+   </listitem>
+   <listitem>
+     <para>
+       <filename>./hardware/brillo.nix</filename>
+     </para>
+   </listitem>
+   <listitem>
+     <para>
+       <filename>./hardware/tuxedo-keyboard.nix</filename>
+     </para>
+   </listitem>
+   <listitem>
+     <para>
+       <filename>./programs/bandwhich.nix</filename>
+     </para>
+   </listitem>
+   <listitem>
+     <para>
+       <filename>./programs/bash-my-aws.nix</filename>
+     </para>
+   </listitem>
+   <listitem>
+     <para>
+       <filename>./programs/liboping.nix</filename>
+     </para>
+   </listitem>
+   <listitem>
+     <para>
+       <filename>./programs/traceroute.nix</filename>
+     </para>
+   </listitem>
+   <listitem>
+     <para>
+       <filename>./services/backup/sanoid.nix</filename>
+     </para>
+   </listitem>
+   <listitem>
+     <para>
+       <filename>./services/backup/syncoid.nix</filename>
+     </para>
+   </listitem>
+   <listitem>
+     <para>
+       <filename>./services/backup/zfs-replication.nix</filename>
+     </para>
+   </listitem>
+   <listitem>
+     <para>
+       <filename>./services/continuous-integration/buildkite-agents.nix</filename>
+     </para>
+   </listitem>
+   <listitem>
+     <para>
+       <filename>./services/databases/victoriametrics.nix</filename>
+     </para>
+   </listitem>
+   <listitem>
+     <para>
+       <filename>./services/desktops/gnome3/gnome-initial-setup.nix</filename>
+     </para>
+   </listitem>
+   <listitem>
+     <para>
+       <filename>./services/desktops/neard.nix</filename>
+     </para>
+   </listitem>
+   <listitem>
+     <para>
+       <filename>./services/games/openarena.nix</filename>
+     </para>
+   </listitem>
+   <listitem>
+     <para>
+       <filename>./services/hardware/fancontrol.nix</filename>
+     </para>
+   </listitem>
+   <listitem>
+     <para>
+       <filename>./services/mail/sympa.nix</filename>
+     </para>
+   </listitem>
+   <listitem>
+     <para>
+       <filename>./services/misc/freeswitch.nix</filename>
+     </para>
+   </listitem>
+   <listitem>
+     <para>
+       <filename>./services/misc/mame.nix</filename>
+     </para>
+   </listitem>
+   <listitem>
+     <para>
+       <filename>./services/monitoring/do-agent.nix</filename>
+     </para>
+   </listitem>
+   <listitem>
+     <para>
+       <filename>./services/monitoring/prometheus/xmpp-alerts.nix</filename>
+     </para>
+   </listitem>
+   <listitem>
+     <para>
+       <filename>./services/network-filesystems/orangefs/server.nix</filename>
+     </para>
+   </listitem>
+   <listitem>
+     <para>
+       <filename>./services/network-filesystems/orangefs/client.nix</filename>
+     </para>
+   </listitem>
+   <listitem>
+     <para>
+       <filename>./services/networking/3proxy.nix</filename>
+     </para>
+   </listitem>
+   <listitem>
+     <para>
+       <filename>./services/networking/corerad.nix</filename>
+     </para>
+   </listitem>
+   <listitem>
+     <para>
+       <filename>./services/networking/go-shadowsocks2.nix</filename>
+     </para>
+   </listitem>
+   <listitem>
+     <para>
+       <filename>./services/networking/ntp/openntpd.nix</filename>
+     </para>
+   </listitem>
+   <listitem>
+     <para>
+       <filename>./services/networking/shorewall.nix</filename>
+     </para>
+   </listitem>
+   <listitem>
+     <para>
+       <filename>./services/networking/shorewall6.nix</filename>
+     </para>
+   </listitem>
+   <listitem>
+     <para>
+       <filename>./services/networking/spacecookie.nix</filename>
+     </para>
+   </listitem>
+   <listitem>
+     <para>
+       <filename>./services/networking/trickster.nix</filename>
+     </para>
+   </listitem>
+   <listitem>
+     <para>
+       <filename>./services/networking/v2ray.nix</filename>
+     </para>
+   </listitem>
+   <listitem>
+     <para>
+       <filename>./services/networking/xandikos.nix</filename>
+     </para>
+   </listitem>
+   <listitem>
+     <para>
+       <filename>./services/networking/yggdrasil.nix</filename>
+     </para>
+   </listitem>
+   <listitem>
+     <para>
+       <filename>./services/web-apps/dokuwiki.nix</filename>
+     </para>
+   </listitem>
+   <listitem>
+     <para>
+       <filename>./services/web-apps/gotify-server.nix</filename>
+     </para>
+   </listitem>
+   <listitem>
+     <para>
+       <filename>./services/web-apps/grocy.nix</filename>
+     </para>
+   </listitem>
+   <listitem>
+     <para>
+       <filename>./services/web-apps/ihatemoney</filename>
+     </para>
+   </listitem>
+   <listitem>
+     <para>
+       <filename>./services/web-apps/moinmoin.nix</filename>
+     </para>
+   </listitem>
+   <listitem>
+     <para>
+       <filename>./services/web-apps/trac.nix</filename>
+     </para>
+   </listitem>
+   <listitem>
+     <para>
+       <filename>./services/web-apps/trilium.nix</filename>
+     </para>
+   </listitem>
+   <listitem>
+     <para>
+       <filename>./services/web-apps/shiori.nix</filename>
+     </para>
+   </listitem>
+   <listitem>
+     <para>
+       <filename>./services/web-servers/ttyd.nix</filename>
+     </para>
+   </listitem>
+   <listitem>
+     <para>
+       <filename>./services/x11/picom.nix</filename>
+     </para>
+   </listitem>
+   <listitem>
+     <para>
+       <filename>./services/x11/hardware/digimend.nix</filename>
+     </para>
+   </listitem>
+   <listitem>
+     <para>
+       <filename>./services/x11/imwheel.nix</filename>
+     </para>
+   </listitem>
+   <listitem>
+     <para>
+       <filename>./virtualisation/cri-o.nix</filename>
+     </para>
+   </listitem>
   </itemizedlist>
 
  </section>
@@ -632,6 +898,25 @@ auth required pam_succeed_if.so uid >= 1000 quiet
      The <option>services.dnscrypt-proxy</option> module has been removed
      as it used the deprecated version of dnscrypt-proxy. We've added
      <xref linkend="opt-services.dnscrypt-proxy2.enable"/> to use the supported version.
+     This module supports configuration via the Nix attribute set
+     <xref linkend="opt-services.dnscrypt-proxy2.settings" />, or by passing a TOML configuration file via
+     <xref linkend="opt-services.dnscrypt-proxy2.configFile" />.
+<programlisting>
+# Example configuration:
+services.dnscrypt-proxy2.enable = true;
+services.dnscrypt-proxy2.settings = {
+  listen_addresses = [ "127.0.0.1:43" ];
+  sources.public-resolvers = {
+    urls = [ "https://download.dnscrypt.info/resolvers-list/v2/public-resolvers.md" ];
+    cache_file = "public-resolvers.md";
+    minisign_key = "RWQf6LRCGA9i53mlYecO4IzT51TGPpvWucNSCh1CBM0QTaLn73Y7GFO3";
+    refresh_delay = 72;
+  };
+};
+
+services.dnsmasq.enable = true;
+services.dnsmasq.servers = [ "127.0.0.1#43" ];
+</programlisting>
     </para>
    </listitem>
    <listitem>
@@ -650,6 +935,15 @@ auth required pam_succeed_if.so uid >= 1000 quiet
    </listitem>
    <listitem>
     <para>
+      Haskell <varname>env</varname> and <varname>shellFor</varname> dev shell environments now organize dependencies the same way as regular builds.
+      In particular, rather than receiving all the different lists of dependencies mashed together as one big list, and then partitioning into Haskell and non-Hakell dependencies, they work from the original many different dependency parameters and don't need to algorithmically partition anything.
+    </para>
+    <para>
+      This means that if you incorrectly categorize a dependency, e.g. non-Haskell library dependency as a <varname>buildDepends</varname> or run-time Haskell dependency as a <varname>setupDepends</varname>, whereas things would have worked before they may not work now.
+    </para>
+   </listitem>
+   <listitem>
+    <para>
      The <package>gcc-snapshot</package>-package has been removed. It's marked as broken for &gt;2 years and used to point
      to a fairly old snapshot  from the <package>gcc7</package>-branch.
     </para>
@@ -720,7 +1014,7 @@ auth required pam_succeed_if.so uid >= 1000 quiet
     </para>
     <warning>
      <para>
-      Please note that if you're comming from <literal>19.03</literal> or older, you have
+      Please note that if you're coming from <literal>19.03</literal> or older, you have
       to manually upgrade to <literal>19.09</literal> first to upgrade your server
       to Nextcloud v16.
      </para>
@@ -809,7 +1103,8 @@ auth required pam_succeed_if.so uid >= 1000 quiet
    <listitem>
     <para>
      The nginx web server previously started its master process as root
-     privileged, then ran worker processes as a less privileged identity user.
+     privileged, then ran worker processes as a less privileged identity user
+     (the <literal>nginx</literal> user).
      This was changed to start all of nginx as a less privileged user (defined by
      <literal>services.nginx.user</literal> and
      <literal>services.nginx.group</literal>). As a consequence, all files that
@@ -817,6 +1112,13 @@ auth required pam_succeed_if.so uid >= 1000 quiet
      certificates and keys, etc.) must now be readable by this less privileged
      user/group.
     </para>
+    <para>
+     To continue to use the old approach, you can configure:
+      <programlisting>
+services.nginx.appendConfig = let cfg = config.services.nginx; in ''user ${cfg.user} ${cfg.group};'';
+systemd.services.nginx.serviceConfig.User = lib.mkForce "root";
+      </programlisting>
+    </para>
    </listitem>
    <listitem>
     <para>
@@ -843,9 +1145,11 @@ auth required pam_succeed_if.so uid >= 1000 quiet
      As well as this, the options <literal>security.acme.acceptTerms</literal> and either
      <literal>security.acme.email</literal> or <literal>security.acme.certs.&lt;name&gt;.email</literal>
      must be set in order to use the ACME module.
-     Certificates will be regenerated anew on the next renewal date. The credentials for simp-le are
-     preserved and thus it is possible to roll back to previous versions without breaking certificate
-     generation.
+     Certificates will be regenerated on activation, no account or certificate will be migrated from simp-le.
+     In particular private keys will not be preserved. However, the credentials for simp-le are preserved and
+     thus it is possible to roll back to previous versions without breaking certificate generation.
+     Note also that in contrary to simp-le a new private key is recreated at each renewal by default, which can
+     have consequences if you embed your public key in apps.
     </para>
    </listitem>
    <listitem>
diff --git a/nixos/doc/manual/release-notes/rl-2009.xml b/nixos/doc/manual/release-notes/rl-2009.xml
index cf7946ffb80..ede7bed609d 100644
--- a/nixos/doc/manual/release-notes/rl-2009.xml
+++ b/nixos/doc/manual/release-notes/rl-2009.xml
@@ -28,6 +28,11 @@
    </listitem>
    <listitem>
     <para>
+     We now distribute a GNOME ISO.
+    </para>
+   </listitem>
+   <listitem>
+    <para>
      PHP now defaults to PHP 7.4, updated from 7.3.
     </para>
    </listitem>
@@ -40,6 +45,22 @@
      make use of these new options instead.
     </para>
    </listitem>
+   <listitem>
+    <para>
+     There is a new module for Podman(<varname>virtualisation.podman</varname>), a drop-in replacement for the Docker command line.
+    </para>
+   </listitem>
+   <listitem>
+    <para>
+     The new <varname>virtualisation.containers</varname> module manages configuration shared by the CRI-O and Podman modules.
+    </para>
+   </listitem>
+   <listitem>
+    <para>
+      Declarative Docker containers are renamed from <varname>docker-containers</varname> to <varname>virtualisation.oci-containers.containers</varname>.
+      This is to make it possible to use <literal>podman</literal> instead of <literal>docker</literal>.
+    </para>
+   </listitem>
   </itemizedlist>
  </section>
 
@@ -56,7 +77,9 @@
 
   <itemizedlist>
    <listitem>
-    <para />
+    <para>
+      There is a new <xref linkend="opt-security.doas.enable"/> module that provides <command>doas</command>, a lighter alternative to <command>sudo</command> with many of the same features.
+    </para>
    </listitem>
   </itemizedlist>
 
@@ -130,69 +153,69 @@
    </listitem>
    <listitem>
      <para>
-       Since this release there's an easy way to customize your PHP install to get a much smaller
-       base PHP with only wanted extensions enabled. See the following snippet installing a smaller PHP
-       with the extensions <literal>imagick</literal>, <literal>opcache</literal> and
+       Since this release there's an easy way to customize your PHP
+       install to get a much smaller base PHP with only wanted
+       extensions enabled. See the following snippet installing a
+       smaller PHP with the extensions <literal>imagick</literal>,
+       <literal>opcache</literal>, <literal>pdo</literal> and
        <literal>pdo_mysql</literal> loaded:
 
        <programlisting>
 environment.systemPackages = [
-(pkgs.php.buildEnv { extensions = pp: with pp; [
-    imagick
-    opcache
-    pdo_mysql
-  ]; })
+  (pkgs.php.withExtensions
+    ({ all, ... }: with all; [
+      imagick
+      opcache
+      pdo
+      pdo_mysql
+    ])
+  )
 ];</programlisting>
 
-       The default <literal>php</literal> attribute hasn't lost any extensions -
-       the <literal>opcache</literal> extension was added there.
+       The default <literal>php</literal> attribute hasn't lost any
+       extensions. The <literal>opcache</literal> extension has been
+       added.
 
        All upstream PHP extensions are available under <package><![CDATA[php.extensions.<name?>]]></package>.
      </para>
      <para>
-       The updated <literal>php</literal> attribute is now easily customizable to your liking
-       by using extensions instead of writing config files or changing configure flags.
-
-       Therefore we have removed the following configure flags:
+       All PHP <literal>config</literal> flags have been removed for
+       the following reasons:
 
        <itemizedlist>
-         <title>PHP <literal>config</literal> flags that we don't read anymore:</title>
-         <listitem><para><literal>config.php.argon2</literal></para></listitem>
-         <listitem><para><literal>config.php.bcmath</literal></para></listitem>
-         <listitem><para><literal>config.php.bz2</literal></para></listitem>
-         <listitem><para><literal>config.php.calendar</literal></para></listitem>
-         <listitem><para><literal>config.php.curl</literal></para></listitem>
-         <listitem><para><literal>config.php.exif</literal></para></listitem>
-         <listitem><para><literal>config.php.ftp</literal></para></listitem>
-         <listitem><para><literal>config.php.gd</literal></para></listitem>
-         <listitem><para><literal>config.php.gettext</literal></para></listitem>
-         <listitem><para><literal>config.php.gmp</literal></para></listitem>
-         <listitem><para><literal>config.php.imap</literal></para></listitem>
-         <listitem><para><literal>config.php.intl</literal></para></listitem>
-         <listitem><para><literal>config.php.ldap</literal></para></listitem>
-         <listitem><para><literal>config.php.libxml2</literal></para></listitem>
-         <listitem><para><literal>config.php.libzip</literal></para></listitem>
-         <listitem><para><literal>config.php.mbstring</literal></para></listitem>
-         <listitem><para><literal>config.php.mysqli</literal></para></listitem>
-         <listitem><para><literal>config.php.mysqlnd</literal></para></listitem>
-         <listitem><para><literal>config.php.openssl</literal></para></listitem>
-         <listitem><para><literal>config.php.pcntl</literal></para></listitem>
-         <listitem><para><literal>config.php.pdo_mysql</literal></para></listitem>
-         <listitem><para><literal>config.php.pdo_odbc</literal></para></listitem>
-         <listitem><para><literal>config.php.pdo_pgsql</literal></para></listitem>
-         <listitem><para><literal>config.php.phpdbg</literal></para></listitem>
-         <listitem><para><literal>config.php.postgresql</literal></para></listitem>
-         <listitem><para><literal>config.php.readline</literal></para></listitem>
-         <listitem><para><literal>config.php.soap</literal></para></listitem>
-         <listitem><para><literal>config.php.sockets</literal></para></listitem>
-         <listitem><para><literal>config.php.sodium</literal></para></listitem>
-         <listitem><para><literal>config.php.sqlite</literal></para></listitem>
-         <listitem><para><literal>config.php.tidy</literal></para></listitem>
-         <listitem><para><literal>config.php.xmlrpc</literal></para></listitem>
-         <listitem><para><literal>config.php.xsl</literal></para></listitem>
-         <listitem><para><literal>config.php.zip</literal></para></listitem>
-         <listitem><para><literal>config.php.zlib</literal></para></listitem>
+         <listitem>
+           <para>
+             The updated <literal>php</literal> attribute is now easily
+             customizable to your liking by using
+             <literal>php.withExtensions</literal> or
+             <literal>php.buildEnv</literal> instead of writing config files
+             or changing configure flags.             
+           </para>
+         </listitem>
+         <listitem>
+           <para>
+             The remaining configuration flags can now be set directly on
+             the <literal>php</literal> attribute. For example, instead of 
+
+             <programlisting>
+php.override {
+  config.php.embed = true;
+  config.php.apxs2 = false;
+}
+             </programlisting>
+
+             you should now write
+
+             <programlisting>
+php.override {
+  embedSupport = true;
+  apxs2Support = false;
+}
+             </programlisting>
+           </para>
+         </listitem>
        </itemizedlist>
+
      </para>
    </listitem>
    <listitem>
@@ -203,6 +226,124 @@ environment.systemPackages = [
       <link xlink:href="https://github.com/gollum/gollum/wiki/5.0-release-notes#migrating-your-wiki">here</link>.
      </para>
    </listitem>
+   <listitem>
+     <para>
+       Deluge 2.x was added and is used as default for new NixOS
+       installations where stateVersion is >= 20.09. If you are upgrading from a previous
+       NixOS version, you can set <literal>service.deluge.package = pkgs.deluge-2_x</literal>
+       to upgrade to Deluge 2.x and migrate the state to the new format.
+       Be aware that backwards state migrations are not supported by Deluge.
+     </para>
+   </listitem>
+
+   <listitem>
+    <para>
+      The NixOS options <literal>nesting.clone</literal> and
+      <literal>nesting.children</literal> have been deleted, and
+      replaced with named <xref linkend="opt-specialisation"/>
+      configurations.
+    </para>
+
+    <para>
+      Replace a <literal>nesting.clone</literal> entry with:
+
+<programlisting>{
+<link xlink:href="#opt-specialisation">specialisation.example-sub-configuration</link> = {
+  <link xlink:href="#opt-specialisation._name_.configuration">configuration</link> = {
+    ...
+  };
+};</programlisting>
+
+    </para>
+    <para>
+      Replace a <literal>nesting.children</literal> entry with:
+
+<programlisting>{
+<link xlink:href="#opt-specialisation">specialisation.example-sub-configuration</link> = {
+  <link xlink:href="#opt-specialisation._name_.inheritParentConfig">inheritParentConfig</link> = false;
+  <link xlink:href="#opt-specialisation._name_.configuration">configuration</link> = {
+    ...
+  };
+};</programlisting>
+    </para>
+
+    <para>
+     To switch to a specialised configuration at runtime you need to
+     run:
+<programlisting>
+# sudo /run/current-system/specialisation/example-sub-configuration/bin/switch-to-configuration test
+</programlisting>
+     Before you would have used:
+<programlisting>
+# sudo /run/current-system/fine-tune/child-1/bin/switch-to-configuration test
+</programlisting>
+    </para>
+   </listitem>
+   <listitem>
+    <para>
+      The Nginx log directory has been moved to <literal>/var/log/nginx</literal>, the cache directory
+      to <literal>/var/cache/nginx</literal>. The option <literal>services.nginx.stateDir</literal> has
+      been removed.
+    </para>
+   </listitem>
+   <listitem>
+    <para>
+     The httpd web server previously started its main process as root
+     privileged, then ran worker processes as a less privileged identity user.
+     This was changed to start all of httpd as a less privileged user (defined by
+     <xref linkend="opt-services.httpd.user"/> and
+     <xref linkend="opt-services.httpd.group"/>). As a consequence, all files that
+     are needed for httpd to run (included configuration fragments, SSL
+     certificates and keys, etc.) must now be readable by this less privileged
+     user/group.
+    </para>
+    <para>
+     The default value for <xref linkend="opt-services.httpd.mpm"/>
+     has been changed from <literal>prefork</literal> to <literal>event</literal>. Along with
+     this change the default value for
+     <link linkend="opt-services.httpd.virtualHosts">services.httpd.virtualHosts.&lt;name&gt;.http2</link>
+     has been set to <literal>true</literal>.
+    </para>
+   </listitem>
+   <listitem>
+    <para>
+      The <literal>systemd-networkd</literal> option
+      <literal>systemd.network.networks.&lt;name&gt;.dhcp.CriticalConnection</literal>
+      has been removed following upstream systemd's deprecation of the same. It is recommended to use
+      <literal>systemd.network.networks.&lt;name&gt;.networkConfig.KeepConfiguration</literal> instead.
+      See <citerefentry><refentrytitle>systemd.network</refentrytitle>
+      <manvolnum>5</manvolnum></citerefentry> for details.
+    </para>
+   </listitem>
+   <listitem>
+    <para>
+     The <literal>systemd-networkd</literal> option
+     <literal>systemd.network.networks._name_.dhcpConfig</literal>
+     has been renamed to
+     <xref linkend="opt-systemd.network.networks._name_.dhcpV4Config"/>
+     following upstream systemd's documentation change.
+     See <citerefentry><refentrytitle>systemd.network</refentrytitle>
+     <manvolnum>5</manvolnum></citerefentry> for details.
+    </para>
+   </listitem>
+   <listitem>
+    <para>
+      In the <literal>picom</literal> module, several options that accepted
+      floating point numbers encoded as strings (for example
+      <xref linkend="opt-services.picom.activeOpacity"/>) have been changed
+      to the (relatively) new native <literal>float</literal> type. To migrate
+      your configuration simply remove the quotes around the numbers.
+    </para>
+   </listitem>
+   <listitem>
+    <para>
+      When using <literal>buildBazelPackage</literal> from Nixpkgs,
+      <literal>flat</literal> hash mode is now used for dependencies
+      instead of <literal>recursive</literal>. This is to better allow
+      using hashed mirrors where needed. As a result, these hashes
+      will have changed.
+    </para>
+   </listitem>
   </itemizedlist>
  </section>
 
@@ -216,6 +357,13 @@ environment.systemPackages = [
   <itemizedlist>
    <listitem>
     <para>
+     <option>services.journald.rateLimitBurst</option> was updated from
+     <literal>1000</literal> to <literal>10000</literal> to follow the new
+     upstream systemd default.
+    </para>
+   </listitem>
+   <listitem>
+    <para>
      The <package>notmuch</package> package move its emacs-related binaries and
      emacs lisp files to a separate output. They're not part
      of the default <literal>out</literal> output anymore - if you relied on the
@@ -223,6 +371,11 @@ environment.systemPackages = [
      the <literal>notmuch.emacs</literal> output.
     </para>
    </listitem>
+   <listitem>
+   <para>
+     The default output of <literal>buildGoPackage</literal> is now <literal>$out</literal> instead of <literal>$bin</literal>.
+   </para>
+   </listitem>
   </itemizedlist>
  </section>
 </section>
diff --git a/nixos/lib/test-driver/log2html.xsl b/nixos/lib/test-driver/log2html.xsl
deleted file mode 100644
index 0485412b4c8..00000000000
--- a/nixos/lib/test-driver/log2html.xsl
+++ /dev/null
@@ -1,135 +0,0 @@
-<?xml version="1.0"?>
-
-<xsl:stylesheet version="1.0" xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
-
-  <xsl:output method='html' encoding="UTF-8"
-              doctype-public="-//W3C//DTD HTML 4.01//EN"
-              doctype-system="http://www.w3.org/TR/html4/strict.dtd" />
-
-  <xsl:template match="logfile">
-    <html>
-      <head>
-        <script type="text/javascript" src="jquery.min.js"></script>
-        <script type="text/javascript" src="jquery-ui.min.js"></script>
-        <script type="text/javascript" src="treebits.js" />
-        <link rel="stylesheet" href="logfile.css" type="text/css" />
-        <title>Log File</title>
-      </head>
-      <body>
-        <h1>VM build log</h1>
-        <p>
-          <a href="javascript:" class="logTreeExpandAll">Expand all</a> |
-          <a href="javascript:" class="logTreeCollapseAll">Collapse all</a>
-        </p>
-        <ul class='toplevel'>
-          <xsl:for-each select='line|nest'>
-            <li>
-              <xsl:apply-templates select='.'/>
-            </li>
-          </xsl:for-each>
-        </ul>
-
-        <xsl:if test=".//*[@image]">
-          <h1>Screenshots</h1>
-          <ul class="vmScreenshots">
-            <xsl:for-each select='.//*[@image]'>
-              <li><a href="{@image}"><xsl:value-of select="@image" /></a></li>
-            </xsl:for-each>
-          </ul>
-        </xsl:if>
-
-      </body>
-    </html>
-  </xsl:template>
-
-
-  <xsl:template match="nest">
-
-    <!-- The tree should be collapsed by default if all children are
-         unimportant or if the header is unimportant. -->
-    <xsl:variable name="collapsed" select="not(./head[@expanded]) and count(.//*[@error]) = 0"/>
-
-    <xsl:variable name="style"><xsl:if test="$collapsed">display: none;</xsl:if></xsl:variable>
-
-    <xsl:if test="line|nest">
-      <a href="javascript:" class="logTreeToggle">
-        <xsl:choose>
-          <xsl:when test="$collapsed"><xsl:text>+</xsl:text></xsl:when>
-          <xsl:otherwise><xsl:text>-</xsl:text></xsl:otherwise>
-        </xsl:choose>
-      </a>
-      <xsl:text> </xsl:text>
-    </xsl:if>
-
-    <xsl:apply-templates select='head'/>
-
-    <!-- Be careful to only generate <ul>s if there are <li>s, otherwise it’s malformed. -->
-    <xsl:if test="line|nest">
-
-      <ul class='nesting' style="{$style}">
-        <xsl:for-each select='line|nest'>
-
-          <!-- Is this the last line?  If so, mark it as such so that it
-               can be rendered differently. -->
-          <xsl:variable name="class"><xsl:choose><xsl:when test="position() != last()">line</xsl:when><xsl:otherwise>lastline</xsl:otherwise></xsl:choose></xsl:variable>
-
-          <li class='{$class}'>
-            <span class='lineconn' />
-            <span class='linebody'>
-              <xsl:apply-templates select='.'/>
-            </span>
-          </li>
-        </xsl:for-each>
-      </ul>
-    </xsl:if>
-
-  </xsl:template>
-
-
-  <xsl:template match="head|line">
-    <code>
-      <xsl:if test="@error">
-        <xsl:attribute name="class">errorLine</xsl:attribute>
-      </xsl:if>
-      <xsl:if test="@warning">
-        <xsl:attribute name="class">warningLine</xsl:attribute>
-      </xsl:if>
-      <xsl:if test="@priority = 3">
-        <xsl:attribute name="class">prio3</xsl:attribute>
-      </xsl:if>
-
-      <xsl:if test="@type = 'serial'">
-        <xsl:attribute name="class">serial</xsl:attribute>
-      </xsl:if>
-
-      <xsl:if test="@machine">
-        <xsl:choose>
-          <xsl:when test="@type = 'serial'">
-            <span class="machine"><xsl:value-of select="@machine"/># </span>
-          </xsl:when>
-          <xsl:otherwise>
-            <span class="machine"><xsl:value-of select="@machine"/>: </span>
-          </xsl:otherwise>
-        </xsl:choose>
-      </xsl:if>
-
-      <xsl:choose>
-        <xsl:when test="@image">
-          <a href="{@image}"><xsl:apply-templates/></a>
-        </xsl:when>
-        <xsl:otherwise>
-          <xsl:apply-templates/>
-        </xsl:otherwise>
-      </xsl:choose>
-    </code>
-  </xsl:template>
-
-
-  <xsl:template match="storeref">
-    <em class='storeref'>
-      <span class='popup'><xsl:apply-templates/></span>
-      <span class='elided'>/...</span><xsl:apply-templates select='name'/><xsl:apply-templates select='path'/>
-    </em>
-  </xsl:template>
-
-</xsl:stylesheet>
diff --git a/nixos/lib/test-driver/logfile.css b/nixos/lib/test-driver/logfile.css
deleted file mode 100644
index a54d8504a86..00000000000
--- a/nixos/lib/test-driver/logfile.css
+++ /dev/null
@@ -1,129 +0,0 @@
-body {
-    font-family: sans-serif;
-    background: white;
-}
-
-h1
-{
-    color: #005aa0;
-    font-size: 180%;
-}
-
-a {
-    text-decoration: none;
-}
-
-
-ul.nesting, ul.toplevel {
-    padding: 0;
-    margin: 0;
-}
-
-ul.toplevel {
-    list-style-type: none;
-}
-
-.line, .head {
-    padding-top: 0em;
-}
-
-ul.nesting li.line, ul.nesting li.lastline {
-    position: relative;
-    list-style-type: none;
-}
-
-ul.nesting li.line {
-    padding-left: 2.0em;
-}
-
-ul.nesting li.lastline {
-    padding-left: 2.1em; /* for the 0.1em border-left in .lastline > .lineconn */
-}
-
-li.line {
-    border-left: 0.1em solid #6185a0;
-}
-
-li.line > span.lineconn, li.lastline > span.lineconn {
-    position: absolute;
-    height: 0.65em;
-    left: 0em;
-    width: 1.5em;
-    border-bottom: 0.1em solid #6185a0;
-}
-
-li.lastline > span.lineconn {
-    border-left: 0.1em solid #6185a0;
-}
-
-
-em.storeref {
-    color: #500000;
-    position: relative; 
-    width: 100%;
-}
-
-em.storeref:hover {
-    background-color: #eeeeee;
-}
-
-*.popup {
-    display: none;
-/*    background: url('http://losser.st-lab.cs.uu.nl/~mbravenb/menuback.png') repeat; */
-    background: #ffffcd;
-    border: solid #555555 1px;
-    position: absolute;
-    top: 0em;
-    left: 0em;
-    margin: 0;
-    padding: 0;
-    z-index: 100;
-}
-
-em.storeref:hover span.popup {
-    display: inline;
-    width: 40em;
-}
-
-
-.logTreeToggle {
-    text-decoration: none;
-    font-family: monospace;
-    font-size: larger;
-}
-
-.errorLine {
-    color: #ff0000;
-    font-weight: bold;
-}
-
-.warningLine {
-    color: darkorange;
-    font-weight: bold;
-}
-
-.prio3 {
-    font-style: italic;
-}
-
-code {
-    white-space: pre-wrap;
-}
-
-.serial {
-    color: #56115c;
-}
-
-.machine {
-    color: #002399;
-    font-style: italic;
-}
-
-ul.vmScreenshots {
-    padding-left: 1em;
-}
-
-ul.vmScreenshots li {
-    font-family: monospace;
-    list-style: square;
-}
diff --git a/nixos/lib/test-driver/test-driver.py b/nixos/lib/test-driver/test-driver.py
index 07f27515990..bf46d0df97f 100644
--- a/nixos/lib/test-driver/test-driver.py
+++ b/nixos/lib/test-driver/test-driver.py
@@ -85,8 +85,6 @@ CHAR_TO_KEY = {
 }
 
 # Forward references
-nr_tests: int
-failed_tests: list
 log: "Logger"
 machines: "List[Machine]"
 
@@ -145,7 +143,7 @@ class Logger:
         self.logfile = os.environ.get("LOGFILE", "/dev/null")
         self.logfile_handle = codecs.open(self.logfile, "wb")
         self.xml = XMLGenerator(self.logfile_handle, encoding="utf-8")
-        self.queue: "Queue[Dict[str, str]]" = Queue(1000)
+        self.queue: "Queue[Dict[str, str]]" = Queue()
 
         self.xml.startDocument()
         self.xml.startElement("logfile", attrs={})
@@ -371,7 +369,7 @@ class Machine:
             q = q.replace("'", "\\'")
             return self.execute(
                 (
-                    "su -l {} -c "
+                    "su -l {} --shell /bin/sh -c "
                     "$'XDG_RUNTIME_DIR=/run/user/`id -u` "
                     "systemctl --user {}'"
                 ).format(user, q)
@@ -393,11 +391,11 @@ class Machine:
     def execute(self, command: str) -> Tuple[int, str]:
         self.connect()
 
-        out_command = "( {} ); echo '|!EOF' $?\n".format(command)
+        out_command = "( {} ); echo '|!=EOF' $?\n".format(command)
         self.shell.send(out_command.encode())
 
         output = ""
-        status_code_pattern = re.compile(r"(.*)\|\!EOF\s+(\d+)")
+        status_code_pattern = re.compile(r"(.*)\|\!=EOF\s+(\d+)")
 
         while True:
             chunk = self.shell.recv(4096).decode(errors="ignore")
@@ -882,33 +880,16 @@ def run_tests() -> None:
         if machine.is_up():
             machine.execute("sync")
 
-    if nr_tests != 0:
-        nr_succeeded = nr_tests - len(failed_tests)
-        eprint("{} out of {} tests succeeded".format(nr_succeeded, nr_tests))
-        if len(failed_tests) > 0:
-            eprint(
-                "The following tests have failed:\n - {}".format(
-                    "\n - ".join(failed_tests)
-                )
-            )
-            sys.exit(1)
-
 
 @contextmanager
 def subtest(name: str) -> Iterator[None]:
-    global nr_tests
-    global failed_tests
-
     with log.nested(name):
-        nr_tests += 1
         try:
             yield
             return True
         except Exception as e:
-            failed_tests.append(
-                'Test "{}" failed with error: "{}"'.format(name, str(e))
-            )
-            log.log("error: {}".format(str(e)))
+            log.log(f'Test "{name}" failed with error: "{e}"')
+            raise e
 
     return False
 
@@ -928,9 +909,6 @@ if __name__ == "__main__":
     ]
     exec("\n".join(machine_eval))
 
-    nr_tests = 0
-    failed_tests = []
-
     @atexit.register
     def clean_up() -> None:
         with log.nested("cleaning up"):
diff --git a/nixos/lib/test-driver/treebits.js b/nixos/lib/test-driver/treebits.js
deleted file mode 100644
index 9754093dfd0..00000000000
--- a/nixos/lib/test-driver/treebits.js
+++ /dev/null
@@ -1,30 +0,0 @@
-$(document).ready(function() {
-
-    /* When a toggle is clicked, show or hide the subtree. */
-    $(".logTreeToggle").click(function() {
-        if ($(this).siblings("ul:hidden").length != 0) {
-            $(this).siblings("ul").show();
-            $(this).text("-");
-        } else {
-            $(this).siblings("ul").hide();
-            $(this).text("+");
-        }
-    });
-
-    /* Implementation of the expand all link. */
-    $(".logTreeExpandAll").click(function() {
-        $(".logTreeToggle", $(this).parent().siblings(".toplevel")).map(function() {
-            $(this).siblings("ul").show();
-            $(this).text("-");
-        });
-    });
-
-    /* Implementation of the collapse all link. */
-    $(".logTreeCollapseAll").click(function() {
-        $(".logTreeToggle", $(this).parent().siblings(".toplevel")).map(function() {
-            $(this).siblings("ul").hide();
-            $(this).text("+");
-        });
-    });
-
-});
diff --git a/nixos/lib/testing-python.nix b/nixos/lib/testing-python.nix
index 3891adc1043..88801f20517 100644
--- a/nixos/lib/testing-python.nix
+++ b/nixos/lib/testing-python.nix
@@ -62,25 +62,11 @@ in rec {
 
       requiredSystemFeatures = [ "kvm" "nixos-test" ];
 
-      buildInputs = [ libxslt ];
-
       buildCommand =
         ''
-          mkdir -p $out/nix-support
-
-          LOGFILE=$out/log.xml tests='exec(os.environ["testScript"])' ${driver}/bin/nixos-test-driver
-
-          # Generate a pretty-printed log.
-          xsltproc --output $out/log.html ${./test-driver/log2html.xsl} $out/log.xml
-          ln -s ${./test-driver/logfile.css} $out/logfile.css
-          ln -s ${./test-driver/treebits.js} $out/treebits.js
-          ln -s ${jquery}/js/jquery.min.js $out/
-          ln -s ${jquery}/js/jquery.js $out/
-          ln -s ${jquery-ui}/js/jquery-ui.min.js $out/
-          ln -s ${jquery-ui}/js/jquery-ui.js $out/
+          mkdir -p $out
 
-          touch $out/nix-support/hydra-build-products
-          echo "report testlog $out log.html" >> $out/nix-support/hydra-build-products
+          LOGFILE=/dev/null tests='exec(os.environ["testScript"])' ${driver}/bin/nixos-test-driver
 
           for i in */xchg/coverage-data; do
             mkdir -p $out/coverage-data
diff --git a/nixos/lib/testing.nix b/nixos/lib/testing.nix
index 7d6a5c0a290..cbb7faf039e 100644
--- a/nixos/lib/testing.nix
+++ b/nixos/lib/testing.nix
@@ -58,23 +58,11 @@ in rec {
 
       requiredSystemFeatures = [ "kvm" "nixos-test" ];
 
-      buildInputs = [ libxslt ];
-
       buildCommand =
         ''
-          mkdir -p $out/nix-support
-
-          LOGFILE=$out/log.xml tests='eval $ENV{testScript}; die $@ if $@;' ${driver}/bin/nixos-test-driver
-
-          # Generate a pretty-printed log.
-          xsltproc --output $out/log.html ${./test-driver/log2html.xsl} $out/log.xml
-          ln -s ${./test-driver/logfile.css} $out/logfile.css
-          ln -s ${./test-driver/treebits.js} $out/treebits.js
-          ln -s ${jquery}/js/jquery.min.js $out/
-          ln -s ${jquery-ui}/js/jquery-ui.min.js $out/
+          mkdir -p $out
 
-          touch $out/nix-support/hydra-build-products
-          echo "report testlog $out log.html" >> $out/nix-support/hydra-build-products
+          LOGFILE=/dev/null tests='eval $ENV{testScript}; die $@ if $@;' ${driver}/bin/nixos-test-driver
 
           for i in */xchg/coverage-data; do
             mkdir -p $out/coverage-data
diff --git a/nixos/maintainers/scripts/azure-new/examples/basic/image.nix b/nixos/maintainers/scripts/azure-new/examples/basic/image.nix
index 74b12815158..ad62dcd14a0 100644
--- a/nixos/maintainers/scripts/azure-new/examples/basic/image.nix
+++ b/nixos/maintainers/scripts/azure-new/examples/basic/image.nix
@@ -1,5 +1,5 @@
 let
-  pkgs = (import <nixpkgs> {});
+  pkgs = (import ../../../../../../default.nix {});
   machine = import "${pkgs.path}/nixos/lib/eval-config.nix" {
     system = "x86_64-linux";
     modules = [
diff --git a/nixos/modules/config/fonts/fontconfig.nix b/nixos/modules/config/fonts/fontconfig.nix
index 3bfa1893a8b..6ac64b0ec9c 100644
--- a/nixos/modules/config/fonts/fontconfig.nix
+++ b/nixos/modules/config/fonts/fontconfig.nix
@@ -45,6 +45,9 @@ let
 
   # generate the font cache setting file for a fontconfig version
   # use latest when no version is passed
+  # When cross-compiling, we can’t generate the cache, so we skip the
+  # <cachedir> part. fontconfig still works but is a little slower in
+  # looking things up.
   makeCacheConf = { version ? null }:
     let
       fcPackage = if version == null
@@ -60,11 +63,13 @@ let
       <fontconfig>
         <!-- Font directories -->
         ${concatStringsSep "\n" (map (font: "<dir>${font}</dir>") config.fonts.fonts)}
+        ${optionalString (pkgs.stdenv.hostPlatform == pkgs.stdenv.buildPlatform) ''
         <!-- Pre-generated font caches -->
         <cachedir>${cache}</cachedir>
         ${optionalString (pkgs.stdenv.isx86_64 && cfg.cache32Bit) ''
           <cachedir>${cache32}</cachedir>
         ''}
+        ''}
       </fontconfig>
     '';
 
diff --git a/nixos/modules/config/fonts/fontdir.nix b/nixos/modules/config/fonts/fontdir.nix
index cc70fbf8744..a6aa84ae822 100644
--- a/nixos/modules/config/fonts/fontdir.nix
+++ b/nixos/modules/config/fonts/fontdir.nix
@@ -25,6 +25,7 @@ in
     fonts = {
 
       enableFontDir = mkOption {
+        type = types.bool;
         default = false;
         description = ''
           Whether to create a directory with links to all fonts in
diff --git a/nixos/modules/config/fonts/ghostscript.nix b/nixos/modules/config/fonts/ghostscript.nix
index 1c62a525de9..b1dd81bf2d2 100644
--- a/nixos/modules/config/fonts/ghostscript.nix
+++ b/nixos/modules/config/fonts/ghostscript.nix
@@ -9,6 +9,7 @@ with lib;
     fonts = {
 
       enableGhostscriptFonts = mkOption {
+        type = types.bool;
         default = false;
         description = ''
           Whether to add the fonts provided by Ghostscript (such as
diff --git a/nixos/modules/config/ldap.nix b/nixos/modules/config/ldap.nix
index b554f197dc4..4c8b527676b 100644
--- a/nixos/modules/config/ldap.nix
+++ b/nixos/modules/config/ldap.nix
@@ -88,6 +88,7 @@ in
       };
 
       useTLS = mkOption {
+        type = types.bool;
         default = false;
         description = ''
           If enabled, use TLS (encryption) over an LDAP (port 389)
@@ -109,6 +110,7 @@ in
 
       daemon = {
         enable = mkOption {
+          type = types.bool;
           default = false;
           description = ''
             Whether to let the nslcd daemon (nss-pam-ldapd) handle the
diff --git a/nixos/modules/config/networking.nix b/nixos/modules/config/networking.nix
index dd36696b94d..03944de8249 100644
--- a/nixos/modules/config/networking.nix
+++ b/nixos/modules/config/networking.nix
@@ -197,7 +197,7 @@ in
 
       } // optionalAttrs (pkgs.stdenv.hostPlatform.libc == "glibc") {
         # /etc/rpc: RPC program numbers.
-        rpc.source = pkgs.glibc.out + "/etc/rpc";
+        rpc.source = pkgs.stdenv.cc.libc.out + "/etc/rpc";
       };
 
       networking.proxy.envVars =
diff --git a/nixos/modules/config/nsswitch.nix b/nixos/modules/config/nsswitch.nix
index 13277fe56e4..22ddb3490c8 100644
--- a/nixos/modules/config/nsswitch.nix
+++ b/nixos/modules/config/nsswitch.nix
@@ -8,37 +8,28 @@ let
 
   # only with nscd up and running we can load NSS modules that are not integrated in NSS
   canLoadExternalModules = config.services.nscd.enable;
-  myhostname = canLoadExternalModules;
-  mymachines = canLoadExternalModules;
+  # XXX Move these to their respective modules
   nssmdns = canLoadExternalModules && config.services.avahi.nssmdns;
   nsswins = canLoadExternalModules && config.services.samba.nsswins;
   ldap = canLoadExternalModules && (config.users.ldap.enable && config.users.ldap.nsswitch);
-  sssd = canLoadExternalModules && config.services.sssd.enable;
-  resolved = canLoadExternalModules && config.services.resolved.enable;
-  googleOsLogin = canLoadExternalModules && config.security.googleOsLogin.enable;
-
-  hostArray = [ "files" ]
-    ++ optional mymachines "mymachines"
-    ++ optional nssmdns "mdns_minimal [NOTFOUND=return]"
-    ++ optional nsswins "wins"
-    ++ optional resolved "resolve [!UNAVAIL=return]"
-    ++ [ "dns" ]
-    ++ optional nssmdns "mdns"
-    ++ optional myhostname "myhostname";
-
-  passwdArray = [ "files" ]
-    ++ optional sssd "sss"
-    ++ optional ldap "ldap"
-    ++ optional mymachines "mymachines"
-    ++ optional googleOsLogin "cache_oslogin oslogin"
-    ++ [ "systemd" ];
-
-  shadowArray = [ "files" ]
-    ++ optional sssd "sss"
-    ++ optional ldap "ldap";
-
-  servicesArray = [ "files" ]
-    ++ optional sssd "sss";
+
+  hostArray = mkMerge [
+    (mkBefore [ "files" ])
+    (mkIf nssmdns [ "mdns_minimal [NOTFOUND=return]" ])
+    (mkIf nsswins [ "wins" ])
+    (mkAfter [ "dns" ])
+    (mkIf nssmdns (mkOrder 1501 [ "mdns" ])) # 1501 to ensure it's after dns
+  ];
+
+  passwdArray = mkMerge [
+    (mkBefore [ "files" ])
+    (mkIf ldap [ "ldap" ])
+  ];
+
+  shadowArray = mkMerge [
+    (mkBefore [ "files" ])
+    (mkIf ldap [ "ldap" ])
+  ];
 
 in {
   options = {
@@ -61,17 +52,73 @@ in {
         };
     };
 
-    system.nssHosts = mkOption {
-      type = types.listOf types.str;
-      default = [];
-      example = [ "mdns" ];
-      description = ''
-        List of host entries to configure in <filename>/etc/nsswitch.conf</filename>.
-      '';
-    };
+    system.nssDatabases = {
+      passwd = mkOption {
+        type = types.listOf types.str;
+        description = ''
+          List of passwd entries to configure in <filename>/etc/nsswitch.conf</filename>.
+
+          Note that "files" is always prepended while "systemd" is appended if nscd is enabled.
+
+          This option only takes effect if nscd is enabled.
+        '';
+        default = [];
+      };
+
+      group = mkOption {
+        type = types.listOf types.str;
+        description = ''
+          List of group entries to configure in <filename>/etc/nsswitch.conf</filename>.
+
+          Note that "files" is always prepended while "systemd" is appended if nscd is enabled.
+
+          This option only takes effect if nscd is enabled.
+        '';
+        default = [];
+      };
+
+      shadow = mkOption {
+        type = types.listOf types.str;
+        description = ''
+          List of shadow entries to configure in <filename>/etc/nsswitch.conf</filename>.
 
+          Note that "files" is always prepended.
+
+          This option only takes effect if nscd is enabled.
+        '';
+        default = [];
+      };
+
+      hosts = mkOption {
+        type = types.listOf types.str;
+        description = ''
+          List of hosts entries to configure in <filename>/etc/nsswitch.conf</filename>.
+
+          Note that "files" is always prepended, and "dns" and "myhostname" are always appended.
+
+          This option only takes effect if nscd is enabled.
+        '';
+        default = [];
+      };
+
+      services = mkOption {
+        type = types.listOf types.str;
+        description = ''
+          List of services entries to configure in <filename>/etc/nsswitch.conf</filename>.
+
+          Note that "files" is always prepended.
+
+          This option only takes effect if nscd is enabled.
+        '';
+        default = [];
+      };
+    };
   };
 
+  imports = [
+    (mkRenamedOptionModule [ "system" "nssHosts" ] [ "system" "nssDatabases" "hosts" ])
+  ];
+
   config = {
     assertions = [
       {
@@ -79,38 +126,30 @@ in {
         assertion = config.system.nssModules.path != "" -> canLoadExternalModules;
         message = "Loading NSS modules from path ${config.system.nssModules.path} requires nscd being enabled.";
       }
-      {
-        # resolved does not need to add to nssModules, therefore needs an extra assertion
-        assertion = resolved -> canLoadExternalModules;
-        message = "Loading systemd-resolved's nss-resolve NSS module requires nscd being enabled.";
-      }
     ];
 
     # Name Service Switch configuration file.  Required by the C
-    # library.  !!! Factor out the mdns stuff.  The avahi module
-    # should define an option used by this module.
+    # library.
     environment.etc."nsswitch.conf".text = ''
-      passwd:    ${concatStringsSep " " passwdArray}
-      group:     ${concatStringsSep " " passwdArray}
-      shadow:    ${concatStringsSep " " shadowArray}
+      passwd:    ${concatStringsSep " " config.system.nssDatabases.passwd}
+      group:     ${concatStringsSep " " config.system.nssDatabases.group}
+      shadow:    ${concatStringsSep " " config.system.nssDatabases.shadow}
 
-      hosts:     ${concatStringsSep " " config.system.nssHosts}
+      hosts:     ${concatStringsSep " " config.system.nssDatabases.hosts}
       networks:  files
 
       ethers:    files
-      services:  ${concatStringsSep " " servicesArray}
+      services:  ${concatStringsSep " " config.system.nssDatabases.services}
       protocols: files
       rpc:       files
     '';
 
-    system.nssHosts = hostArray;
-
-    # Systemd provides nss-myhostname to ensure that our hostname
-    # always resolves to a valid IP address.  It returns all locally
-    # configured IP addresses, or ::1 and 127.0.0.2 as
-    # fallbacks. Systemd also provides nss-mymachines to return IP
-    # addresses of local containers.
-    system.nssModules = (optionals canLoadExternalModules [ config.systemd.package.out ])
-      ++ optional googleOsLogin pkgs.google-compute-engine-oslogin.out;
+    system.nssDatabases = {
+      passwd = passwdArray;
+      group = passwdArray;
+      shadow = shadowArray;
+      hosts = hostArray;
+      services = mkBefore [ "files" ];
+    };
   };
 }
diff --git a/nixos/modules/config/qt5.nix b/nixos/modules/config/qt5.nix
index d9dec74f155..eabba9ad95f 100644
--- a/nixos/modules/config/qt5.nix
+++ b/nixos/modules/config/qt5.nix
@@ -6,8 +6,8 @@ let
 
   cfg = config.qt5;
 
-  isQGnome = cfg.platformTheme == "gnome" && cfg.style == "adwaita";
-  isQtStyle = cfg.platformTheme == "gtk2" && cfg.style != "adwaita";
+  isQGnome = cfg.platformTheme == "gnome" && builtins.elem cfg.style ["adwaita" "adwaita-dark"];
+  isQtStyle = cfg.platformTheme == "gtk2" && !(builtins.elem cfg.style ["adwaita" "adwaita-dark"]);
 
   packages = if isQGnome then [ pkgs.qgnomeplatform pkgs.adwaita-qt ]
     else if isQtStyle then [ pkgs.libsForQt5.qtstyleplugins ]
@@ -55,6 +55,7 @@ in
       style = mkOption {
         type = types.enum [
           "adwaita"
+          "adwaita-dark"
           "cleanlooks"
           "gtk2"
           "motif"
@@ -71,6 +72,7 @@ in
           <variablelist>
             <varlistentry>
               <term><literal>adwaita</literal></term>
+              <term><literal>adwaita-dark</literal></term>
               <listitem><para>Use Adwaita Qt style with
                 <link xlink:href="https://github.com/FedoraQt/adwaita-qt">adwaita</link>
               </para></listitem>
diff --git a/nixos/modules/hardware/all-firmware.nix b/nixos/modules/hardware/all-firmware.nix
index 16be8bcfdd7..b07edb0f6ac 100644
--- a/nixos/modules/hardware/all-firmware.nix
+++ b/nixos/modules/hardware/all-firmware.nix
@@ -51,6 +51,7 @@ in {
         rtlwifi_new-firmware
         zd1211fw
         alsa-firmware
+        sof-firmware
         openelec-dvb-firmware
       ] ++ optional (pkgs.stdenv.hostPlatform.isAarch32 || pkgs.stdenv.hostPlatform.isAarch64) raspberrypiWirelessFirmware
         ++ optionals (versionOlder config.boot.kernelPackages.kernel.version "4.13") [
diff --git a/nixos/modules/hardware/device-tree.nix b/nixos/modules/hardware/device-tree.nix
index f57502d4c83..cf553497c89 100644
--- a/nixos/modules/hardware/device-tree.nix
+++ b/nixos/modules/hardware/device-tree.nix
@@ -19,7 +19,7 @@ in {
         base = mkOption {
           default = "${config.boot.kernelPackages.kernel}/dtbs";
           defaultText = "\${config.boot.kernelPackages.kernel}/dtbs";
-          example = literalExample "pkgs.deviceTree_rpi";
+          example = literalExample "pkgs.device-tree_rpi";
           type = types.path;
           description = ''
             The package containing the base device-tree (.dtb) to boot. Contains
@@ -30,7 +30,7 @@ in {
         overlays = mkOption {
           default = [];
           example = literalExample
-            "[\"\${pkgs.deviceTree_rpi.overlays}/w1-gpio.dtbo\"]";
+            "[\"\${pkgs.device-tree_rpi.overlays}/w1-gpio.dtbo\"]";
           type = types.listOf types.path;
           description = ''
             A path containing device tree overlays (.dtbo) to be applied to all
diff --git a/nixos/modules/hardware/opengl.nix b/nixos/modules/hardware/opengl.nix
index 28cddea8b79..061528f4b1b 100644
--- a/nixos/modules/hardware/opengl.nix
+++ b/nixos/modules/hardware/opengl.nix
@@ -10,14 +10,6 @@ let
 
   videoDrivers = config.services.xserver.videoDrivers;
 
-  makePackage = p: pkgs.buildEnv {
-    name = "mesa-drivers+txc-${p.mesa.version}";
-    paths =
-      [ p.mesa.drivers
-        (if cfg.s3tcSupport then p.libtxc_dxtn else p.libtxc_dxtn_s2tc)
-      ];
-  };
-
   package = pkgs.buildEnv {
     name = "opengl-drivers";
     paths = [ cfg.package ] ++ cfg.extraPackages;
@@ -34,6 +26,9 @@ in
 
   imports = [
     (mkRenamedOptionModule [ "services" "xserver" "vaapiDrivers" ] [ "hardware" "opengl" "extraPackages" ])
+    (mkRemovedOptionModule [ "hardware" "opengl" "s3tcSupport" ] ''
+      S3TC support is now always enabled in Mesa.
+    '')
   ];
 
   options = {
@@ -74,17 +69,6 @@ in
         '';
       };
 
-      s3tcSupport = mkOption {
-        type = types.bool;
-        default = false;
-        description = ''
-          Make S3TC(S3 Texture Compression) via libtxc_dxtn available
-          to OpenGL drivers instead of the patent-free S2TC replacement.
-
-          Using this library may require a patent license depending on your location.
-        '';
-      };
-
       package = mkOption {
         type = types.package;
         internal = true;
@@ -166,8 +150,8 @@ in
     environment.sessionVariables.LD_LIBRARY_PATH = mkIf cfg.setLdLibraryPath
       ([ "/run/opengl-driver/lib" ] ++ optional cfg.driSupport32Bit "/run/opengl-driver-32/lib");
 
-    hardware.opengl.package = mkDefault (makePackage pkgs);
-    hardware.opengl.package32 = mkDefault (makePackage pkgs.pkgsi686Linux);
+    hardware.opengl.package = mkDefault pkgs.mesa.drivers;
+    hardware.opengl.package32 = mkDefault pkgs.pkgsi686Linux.mesa.drivers;
 
     boot.extraModulePackages = optional (elem "virtualbox" videoDrivers) kernelPackages.virtualboxGuestAdditions;
   };
diff --git a/nixos/modules/hardware/video/nvidia.nix b/nixos/modules/hardware/video/nvidia.nix
index 7461e231402..6328971492c 100644
--- a/nixos/modules/hardware/video/nvidia.nix
+++ b/nixos/modules/hardware/video/nvidia.nix
@@ -34,10 +34,12 @@ let
   enabled = nvidia_x11 != null;
 
   cfg = config.hardware.nvidia;
+
   pCfg = cfg.prime;
   syncCfg = pCfg.sync;
   offloadCfg = pCfg.offload;
   primeEnabled = syncCfg.enable || offloadCfg.enable;
+  nvidiaPersistencedEnabled =  cfg.nvidiaPersistenced;
 in
 
 {
@@ -50,6 +52,15 @@ in
     ];
 
   options = {
+    hardware.nvidia.powerManagement.enable = mkOption {
+      type = types.bool;
+      default = false;
+      description = ''
+        Experimental power management through systemd. For more information, see
+        the NVIDIA docs, on Chapter 21. Configuring Power Management Support.
+      '';
+    };
+
     hardware.nvidia.modesetting.enable = mkOption {
       type = types.bool;
       default = false;
@@ -129,6 +140,15 @@ in
         <option>hardware.nvidia.prime.intelBusId</option>).
       '';
     };
+
+    hardware.nvidia.nvidiaPersistenced = mkOption {
+      default = false;
+      type = types.bool;
+      description = ''
+        Update for NVIDA GPU headless mode, i.e. nvidia-persistenced. It ensures all
+        GPUs stay awake even during headless mode.
+      '';
+    };
   };
 
   config = mkIf enabled {
@@ -215,6 +235,46 @@ in
     environment.systemPackages = [ nvidia_x11.bin nvidia_x11.settings ]
       ++ filter (p: p != null) [ nvidia_x11.persistenced ];
 
+    systemd.packages = optional cfg.powerManagement.enable nvidia_x11.out;
+
+    systemd.services = let
+      baseNvidiaService = state: {
+        description = "NVIDIA system ${state} actions";
+
+        path = with pkgs; [ kbd ];
+        serviceConfig = {
+          Type = "oneshot";
+          ExecStart = "${nvidia_x11.out}/bin/nvidia-sleep.sh '${state}'";
+        };
+      };
+
+      nvidiaService = sleepState: (baseNvidiaService sleepState) // {
+        before = [ "systemd-${sleepState}.service" ];
+        requiredBy = [ "systemd-${sleepState}.service" ];
+      };
+
+      services = (builtins.listToAttrs (map (t: nameValuePair "nvidia-${t}" (nvidiaService t)) ["hibernate" "suspend"]))
+        // {
+          nvidia-resume = (baseNvidiaService "resume") // {
+            after = [ "systemd-suspend.service" "systemd-hibernate.service" ];
+            requiredBy = [ "systemd-suspend.service" "systemd-hibernate.service" ];
+          };
+        };
+    in optionalAttrs cfg.powerManagement.enable services
+      // optionalAttrs nvidiaPersistencedEnabled {
+        "nvidia-persistenced" = mkIf nvidiaPersistencedEnabled {
+          description = "NVIDIA Persistence Daemon";
+          wantedBy = [ "multi-user.target" ];
+          serviceConfig = {
+            Type = "forking";
+            Restart = "always";
+            PIDFile = "/var/run/nvidia-persistenced/nvidia-persistenced.pid";
+            ExecStart = "${nvidia_x11.persistenced}/bin/nvidia-persistenced --verbose";
+            ExecStopPost = "${pkgs.coreutils}/bin/rm -rf /var/run/nvidia-persistenced";
+          };
+        };
+      };
+
     systemd.tmpfiles.rules = optional config.virtualisation.docker.enableNvidia
         "L+ /run/nvidia-docker/bin - - - - ${nvidia_x11.bin}/origBin"
       ++ optional (nvidia_x11.persistenced != null && config.virtualisation.docker.enableNvidia)
@@ -227,7 +287,8 @@ in
       optionals config.services.xserver.enable [ "nvidia" "nvidia_modeset" "nvidia_drm" ];
 
     # If requested enable modesetting via kernel parameter.
-    boot.kernelParams = optional (offloadCfg.enable || cfg.modesetting.enable) "nvidia-drm.modeset=1";
+    boot.kernelParams = optional (offloadCfg.enable || cfg.modesetting.enable) "nvidia-drm.modeset=1"
+      ++ optional cfg.powerManagement.enable "nvidia.NVreg_PreserveVideoMemoryAllocations=1";
 
     # Create /dev/nvidia-uvm when the nvidia-uvm module is loaded.
     services.udev.extraRules =
diff --git a/nixos/modules/installer/cd-dvd/installation-cd-base.nix b/nixos/modules/installer/cd-dvd/installation-cd-base.nix
index 9303e8fde13..6c7ea293e8a 100644
--- a/nixos/modules/installer/cd-dvd/installation-cd-base.nix
+++ b/nixos/modules/installer/cd-dvd/installation-cd-base.nix
@@ -1,7 +1,7 @@
 # This module contains the basic configuration for building a NixOS
 # installation CD.
 
-{ config, lib, pkgs, ... }:
+{ config, lib, options, pkgs, ... }:
 
 with lib;
 
@@ -15,6 +15,9 @@ with lib;
       ../../profiles/installation-device.nix
     ];
 
+  # Adds terminus_font for people with HiDPI displays
+  console.packages = options.console.packages.default ++ [ pkgs.terminus_font ];
+
   # ISO naming.
   isoImage.isoName = "${config.isoImage.isoBaseName}-${config.system.nixos.label}-${pkgs.stdenv.hostPlatform.system}.iso";
 
diff --git a/nixos/modules/installer/cd-dvd/installation-cd-graphical-gnome.nix b/nixos/modules/installer/cd-dvd/installation-cd-graphical-gnome.nix
index 84394a1ecae..3707c4b7ec6 100644
--- a/nixos/modules/installer/cd-dvd/installation-cd-graphical-gnome.nix
+++ b/nixos/modules/installer/cd-dvd/installation-cd-graphical-gnome.nix
@@ -11,9 +11,6 @@ with lib;
 
   services.xserver.desktopManager.gnome3.enable = true;
 
-  # Wayland can be problematic for some hardware like Nvidia graphics cards.
-  services.xserver.displayManager.defaultSession = "gnome-xorg";
-
   services.xserver.displayManager.gdm = {
     enable = true;
     # autoSuspend makes the machine automatically suspend after inactivity.
diff --git a/nixos/modules/installer/tools/nix-fallback-paths.nix b/nixos/modules/installer/tools/nix-fallback-paths.nix
index 9038bfbe3bd..842976c3574 100644
--- a/nixos/modules/installer/tools/nix-fallback-paths.nix
+++ b/nixos/modules/installer/tools/nix-fallback-paths.nix
@@ -1,6 +1,6 @@
 {
-  x86_64-linux = "/nix/store/3swr40kk8ll7pi9dd7b4npr25hyn5qhv-nix-2.3.4-debug";
+  x86_64-linux = "/nix/store/8928ygfyf9iassfrnj76v55s6zid58ja-nix-2.3.4";
   i686-linux = "/nix/store/b5cx3nmba9ahx3wk5ybxa67k40pdpdxn-nix-2.3.4";
-  aarch64-linux = "/nix/store/lz9s7vi1kvgqzvrcjwwdszx8an61n266-nix-2.3.4-debug";
+  aarch64-linux = "/nix/store/p6j4mis6agdjlk4j0cyg7yh58wpm3kif-nix-2.3.4";
   x86_64-darwin = "/nix/store/aizhr07dljmlbf17wfrj40x3s0b5iv3d-nix-2.3.4";
 }
diff --git a/nixos/modules/installer/tools/nixos-build-vms/build-vms.nix b/nixos/modules/installer/tools/nixos-build-vms/build-vms.nix
index 90f0702f717..0c9f8522cc1 100644
--- a/nixos/modules/installer/tools/nixos-build-vms/build-vms.nix
+++ b/nixos/modules/installer/tools/nixos-build-vms/build-vms.nix
@@ -3,7 +3,12 @@
 , networkExpr
 }:
 
-let nodes = import networkExpr; in
+let
+  nodes = builtins.mapAttrs (vm: module: {
+    _file = "${networkExpr}@node-${vm}";
+    imports = [ module ];
+  }) (import networkExpr);
+in
 
 with import ../../../../lib/testing-python.nix {
   inherit system;
diff --git a/nixos/modules/installer/tools/nixos-generate-config.pl b/nixos/modules/installer/tools/nixos-generate-config.pl
index a32c19a4eba..422c405054d 100644
--- a/nixos/modules/installer/tools/nixos-generate-config.pl
+++ b/nixos/modules/installer/tools/nixos-generate-config.pl
@@ -180,7 +180,7 @@ sub pciCheck {
         ) )
     {
         # we need e.g. brcmfmac43602-pcie.bin
-        push @imports, "<nixpkgs/nixos/modules/hardware/network/broadcom-43xx.nix>";
+        push @imports, "(modulesPath + \"/hardware/network/broadcom-43xx.nix\")";
     }
 
     # Can't rely on $module here, since the module may not be loaded
@@ -279,7 +279,7 @@ if ($virt eq "oracle") {
 
 # Likewise for QEMU.
 if ($virt eq "qemu" || $virt eq "kvm" || $virt eq "bochs") {
-    push @imports, "<nixpkgs/nixos/modules/profiles/qemu-guest.nix>";
+    push @imports, "(modulesPath + \"/profiles/qemu-guest.nix\")";
 }
 
 # Also for Hyper-V.
@@ -296,7 +296,7 @@ if ($virt eq "systemd-nspawn") {
 
 # Provide firmware for devices that are not detected by this script,
 # unless we're in a VM/container.
-push @imports, "<nixpkgs/nixos/modules/installer/scan/not-detected.nix>"
+push @imports, "(modulesPath + \"/installer/scan/not-detected.nix\")"
     if $virt eq "none";
 
 
@@ -549,7 +549,7 @@ my $hwConfig = <<EOF;
 # Do not modify this file!  It was generated by ‘nixos-generate-config’
 # and may be overwritten by future invocations.  Please make changes
 # to /etc/nixos/configuration.nix instead.
-{ config, lib, pkgs, ... }:
+{ config, lib, pkgs, modulesPath, ... }:
 
 {
   imports =${\multiLineList("    ", @imports)};
diff --git a/nixos/modules/installer/tools/nixos-install.sh b/nixos/modules/installer/tools/nixos-install.sh
index a3ff3fe2c0c..1bccbbfaf24 100644
--- a/nixos/modules/installer/tools/nixos-install.sh
+++ b/nixos/modules/installer/tools/nixos-install.sh
@@ -15,7 +15,6 @@ mountPoint=/mnt
 channelPath=
 system=
 verbosity=()
-buildLogs=
 
 while [ "$#" -gt 0 ]; do
     i="$1"; shift 1
@@ -60,9 +59,6 @@ while [ "$#" -gt 0 ]; do
         -v*|--verbose)
             verbosity+=("$i")
             ;;
-        -L|--print-build-logs)
-            buildLogs="$i"
-            ;;
         *)
             echo "$0: unknown option \`$i'"
             exit 1
@@ -91,8 +87,11 @@ if [[ ! -e $NIXOS_CONFIG && -z $system ]]; then
 fi
 
 # A place to drop temporary stuff.
+tmpdir="$(mktemp -d -p $mountPoint)"
 trap "rm -rf $tmpdir" EXIT
-tmpdir="$(mktemp -d)"
+
+# store temporary files on target filesystem by default
+export TMPDIR=${TMPDIR:-$tmpdir}
 
 sub="auto?trusted=1"
 
@@ -100,9 +99,9 @@ sub="auto?trusted=1"
 if [[ -z $system ]]; then
     echo "building the configuration in $NIXOS_CONFIG..."
     outLink="$tmpdir/system"
-    nix build --out-link "$outLink" --store "$mountPoint" "${extraBuildFlags[@]}" \
+    nix-build --out-link "$outLink" --store "$mountPoint" "${extraBuildFlags[@]}" \
         --extra-substituters "$sub" \
-        -f '<nixpkgs/nixos>' system -I "nixos-config=$NIXOS_CONFIG" ${verbosity[@]} ${buildLogs}
+        '<nixpkgs/nixos>' -A system -I "nixos-config=$NIXOS_CONFIG" ${verbosity[@]}
     system=$(readlink -f $outLink)
 fi
 
diff --git a/nixos/modules/installer/tools/tools.nix b/nixos/modules/installer/tools/tools.nix
index 655d77db157..11128621424 100644
--- a/nixos/modules/installer/tools/tools.nix
+++ b/nixos/modules/installer/tools/tools.nix
@@ -111,10 +111,10 @@ in
         # networking.proxy.noProxy = "127.0.0.1,localhost,internal.domain";
 
         # Select internationalisation properties.
-        # i18n = {
-        #   consoleFont = "Lat2-Terminus16";
-        #   consoleKeyMap = "us";
-        #   defaultLocale = "en_US.UTF-8";
+        # i18n.defaultLocale = "en_US.UTF-8";
+        # console = {
+        #   font = "Lat2-Terminus16";
+        #   keyMap = "us";
         # };
 
         # Set your time zone.
diff --git a/nixos/modules/misc/version.nix b/nixos/modules/misc/version.nix
index ae98fba1580..a6fffb76f6e 100644
--- a/nixos/modules/misc/version.nix
+++ b/nixos/modules/misc/version.nix
@@ -109,8 +109,8 @@ in
         PRETTY_NAME="NixOS ${cfg.release} (${cfg.codeName})"
         LOGO="nix-snowflake"
         HOME_URL="https://nixos.org/"
-        DOCUMENTATION_URL="https://nixos.org/nixos/manual/index.html"
-        SUPPORT_URL="https://nixos.org/nixos/support.html"
+        DOCUMENTATION_URL="https://nixos.org/learn.html"
+        SUPPORT_URL="https://nixos.org/community.html"
         BUG_REPORT_URL="https://github.com/NixOS/nixpkgs/issues"
       '';
 
diff --git a/nixos/modules/module-list.nix b/nixos/modules/module-list.nix
index c3d2bb85809..40904ef0c17 100644
--- a/nixos/modules/module-list.nix
+++ b/nixos/modules/module-list.nix
@@ -196,11 +196,11 @@
   ./security/pam_usb.nix
   ./security/pam_mount.nix
   ./security/polkit.nix
-  ./security/prey.nix
   ./security/rngd.nix
   ./security/rtkit.nix
   ./security/wrappers/default.nix
   ./security/sudo.nix
+  ./security/doas.nix
   ./security/systemd-confinement.nix
   ./security/tpm2.nix
   ./services/admin/oxidized.nix
@@ -238,6 +238,7 @@
   ./services/backup/zfs-replication.nix
   ./services/backup/znapzend.nix
   ./services/cluster/hadoop/default.nix
+  ./services/cluster/k3s/default.nix
   ./services/cluster/kubernetes/addons/dns.nix
   ./services/cluster/kubernetes/addons/dashboard.nix
   ./services/cluster/kubernetes/addon-manager.nix
@@ -545,6 +546,7 @@
   ./services/monitoring/teamviewer.nix
   ./services/monitoring/telegraf.nix
   ./services/monitoring/thanos.nix
+  ./services/monitoring/tuptime.nix
   ./services/monitoring/ups.nix
   ./services/monitoring/uptime.nix
   ./services/monitoring/vnstat.nix
@@ -982,9 +984,10 @@
   ./virtualisation/anbox.nix
   ./virtualisation/container-config.nix
   ./virtualisation/containers.nix
+  ./virtualisation/nixos-containers.nix
+  ./virtualisation/oci-containers.nix
   ./virtualisation/cri-o.nix
   ./virtualisation/docker.nix
-  ./virtualisation/docker-containers.nix
   ./virtualisation/ecs-agent.nix
   ./virtualisation/libvirtd.nix
   ./virtualisation/lxc.nix
@@ -995,6 +998,7 @@
   ./virtualisation/kvmgt.nix
   ./virtualisation/openvswitch.nix
   ./virtualisation/parallels-guest.nix
+  ./virtualisation/podman.nix
   ./virtualisation/qemu-guest-agent.nix
   ./virtualisation/railcar.nix
   ./virtualisation/rkt.nix
diff --git a/nixos/modules/profiles/docker-container.nix b/nixos/modules/profiles/docker-container.nix
index 5d6b11498b5..183645de36f 100644
--- a/nixos/modules/profiles/docker-container.nix
+++ b/nixos/modules/profiles/docker-container.nix
@@ -2,6 +2,8 @@
 
 with lib;
 
+let inherit (pkgs) writeScript; in
+
 let
  pkgs2storeContents = l : map (x: { object = x; symlink = "none"; }) l;
 
@@ -30,7 +32,12 @@ in {
     ];
 
     # Some container managers like lxc need these
-    extraCommands = "mkdir -p proc sys dev";
+    extraCommands =
+      let script = writeScript "extra-commands.sh" ''
+            rm etc
+            mkdir -p proc sys dev etc
+          '';
+      in script;
   };
 
   boot.isContainer = true;
diff --git a/nixos/modules/profiles/hardened.nix b/nixos/modules/profiles/hardened.nix
index 35743d83134..ef8c0d74f06 100644
--- a/nixos/modules/profiles/hardened.nix
+++ b/nixos/modules/profiles/hardened.nix
@@ -7,7 +7,7 @@ with lib;
 
 {
   meta = {
-    maintainers = [ maintainers.joachifm ];
+    maintainers = [ maintainers.joachifm maintainers.emily ];
   };
 
   boot.kernelPackages = mkDefault pkgs.linuxPackages_hardened;
@@ -21,8 +21,6 @@ with lib;
 
   security.lockKernelModules = mkDefault true;
 
-  security.allowUserNamespaces = mkDefault false;
-
   security.protectKernelImage = mkDefault true;
 
   security.allowSimultaneousMultithreading = mkDefault false;
@@ -37,15 +35,9 @@ with lib;
     # Slab/slub sanity checks, redzoning, and poisoning
     "slub_debug=FZP"
 
-    # Disable slab merging to make certain heap overflow attacks harder
-    "slab_nomerge"
-
     # Overwrite free'd memory
     "page_poison=1"
 
-    # Disable legacy virtual syscalls
-    "vsyscall=none"
-
     # Enable page allocator randomization
     "page_alloc.shuffle=1"
   ];
@@ -82,38 +74,12 @@ with lib;
   # (e.g., parent/child)
   boot.kernel.sysctl."kernel.yama.ptrace_scope" = mkOverride 500 1;
 
-  # Restrict access to kernel ring buffer (information leaks)
-  boot.kernel.sysctl."kernel.dmesg_restrict" = mkDefault true;
-
   # Hide kptrs even for processes with CAP_SYSLOG
   boot.kernel.sysctl."kernel.kptr_restrict" = mkOverride 500 2;
 
-  # Unprivileged access to bpf() has been used for privilege escalation in
-  # the past
-  boot.kernel.sysctl."kernel.unprivileged_bpf_disabled" = mkDefault true;
-
   # Disable bpf() JIT (to eliminate spray attacks)
   boot.kernel.sysctl."net.core.bpf_jit_enable" = mkDefault false;
 
-  # ... or at least apply some hardening to it
-  boot.kernel.sysctl."net.core.bpf_jit_harden" = mkDefault true;
-
-  # Raise ASLR entropy for 64bit & 32bit, respectively.
-  #
-  # Note: mmap_rnd_compat_bits may not exist on 64bit.
-  boot.kernel.sysctl."vm.mmap_rnd_bits" = mkDefault 32;
-  boot.kernel.sysctl."vm.mmap_rnd_compat_bits" = mkDefault 16;
-
-  # Allowing users to mmap() memory starting at virtual address 0 can turn a
-  # NULL dereference bug in the kernel into code execution with elevated
-  # privilege.  Mitigate by enforcing a minimum base addr beyond the NULL memory
-  # space.  This breaks applications that require mapping the 0 page, such as
-  # dosemu or running 16bit applications under wine.  It also breaks older
-  # versions of qemu.
-  #
-  # The value is taken from the KSPP recommendations (Debian uses 4096).
-  boot.kernel.sysctl."vm.mmap_min_addr" = mkDefault 65536;
-
   # Disable ftrace debugging
   boot.kernel.sysctl."kernel.ftrace_enabled" = mkDefault false;
 
@@ -140,7 +106,4 @@ with lib;
   # Ignore outgoing ICMP redirects (this is ipv4 only)
   boot.kernel.sysctl."net.ipv4.conf.all.send_redirects" = mkDefault false;
   boot.kernel.sysctl."net.ipv4.conf.default.send_redirects" = mkDefault false;
-
-  # Restrict userfaultfd syscalls to processes with the SYS_PTRACE capability
-  boot.kernel.sysctl."vm.unprivileged_userfaultfd" = mkDefault false;
 }
diff --git a/nixos/modules/programs/captive-browser.nix b/nixos/modules/programs/captive-browser.nix
index 55d474e5c9d..26db1675072 100644
--- a/nixos/modules/programs/captive-browser.nix
+++ b/nixos/modules/programs/captive-browser.nix
@@ -28,7 +28,7 @@ in
       browser = mkOption {
         type = types.str;
         default = concatStringsSep " " [ ''${pkgs.chromium}/bin/chromium''
-                                         ''--user-data-dir=$HOME/.chromium-captive''
+                                         ''--user-data-dir=''${XDG_DATA_HOME:-$HOME/.local/share}/chromium-captive''
                                          ''--proxy-server="socks5://$PROXY"''
                                          ''--host-resolver-rules="MAP * ~NOTFOUND , EXCLUDE localhost"''
                                          ''--no-first-run''
diff --git a/nixos/modules/programs/cdemu.nix b/nixos/modules/programs/cdemu.nix
index 6a0185d362c..a59cd93cadf 100644
--- a/nixos/modules/programs/cdemu.nix
+++ b/nixos/modules/programs/cdemu.nix
@@ -8,6 +8,7 @@ in {
   options = {
     programs.cdemu = {
       enable = mkOption {
+        type = types.bool;
         default = false;
         description = ''
           <command>cdemu</command> for members of
diff --git a/nixos/modules/programs/chromium.nix b/nixos/modules/programs/chromium.nix
index 41c49db8c71..16c063ebc89 100644
--- a/nixos/modules/programs/chromium.nix
+++ b/nixos/modules/programs/chromium.nix
@@ -9,9 +9,7 @@ let
     HomepageLocation = cfg.homepageLocation;
     DefaultSearchProviderSearchURL = cfg.defaultSearchProviderSearchURL;
     DefaultSearchProviderSuggestURL = cfg.defaultSearchProviderSuggestURL;
-    ExtensionInstallForcelist = map (extension:
-      "${extension};https://clients2.google.com/service/update2/crx"
-    ) cfg.extensions;
+    ExtensionInstallForcelist = cfg.extensions;
   };
 in
 
@@ -28,7 +26,11 @@ in
           List of chromium extensions to install.
           For list of plugins ids see id in url of extensions on
           <link xlink:href="https://chrome.google.com/webstore/category/extensions">chrome web store</link>
-          page.
+          page. To install a chromium extension not included in the chrome web
+          store, append to the extension id a semicolon ";" followed by a URL
+          pointing to an Update Manifest XML file. See
+          <link xlink:href="https://www.chromium.org/administrators/policy-list-3#ExtensionInstallForcelist">ExtensionInstallForcelist</link>
+          for additional details.
         '';
         default = [];
         example = literalExample ''
diff --git a/nixos/modules/programs/criu.nix b/nixos/modules/programs/criu.nix
index 48cf5c88a9f..1714e1331a4 100644
--- a/nixos/modules/programs/criu.nix
+++ b/nixos/modules/programs/criu.nix
@@ -8,6 +8,7 @@ in {
   options = {
     programs.criu = {
       enable = mkOption {
+        type = types.bool;
         default = false;
         description = ''
           Install <command>criu</command> along with necessary kernel options.
diff --git a/nixos/modules/programs/fish.nix b/nixos/modules/programs/fish.nix
index 87f6816e4ac..48b324a0fe8 100644
--- a/nixos/modules/programs/fish.nix
+++ b/nixos/modules/programs/fish.nix
@@ -178,6 +178,10 @@ in
         set -l post (string join0 $fish_complete_path | string match --regex "[^\x00]*generated_completions.*" | string split0 | string match -er ".")
         set fish_complete_path $prev "/etc/fish/generated_completions" $post
       end
+      # prevent fish from generating completions on first run
+      if not test -d $__fish_user_data_dir/generated_completions
+        ${pkgs.coreutils}/bin/mkdir $__fish_user_data_dir/generated_completions
+      end
     '';
 
     environment.etc."fish/generated_completions".source =
diff --git a/nixos/modules/programs/npm.nix b/nixos/modules/programs/npm.nix
index b351d80c7ac..f101a44587a 100644
--- a/nixos/modules/programs/npm.nix
+++ b/nixos/modules/programs/npm.nix
@@ -13,7 +13,14 @@ in
     programs.npm = {
       enable = mkEnableOption "<command>npm</command> global config";
 
-      npmrc = lib.mkOption {
+      package = mkOption {
+        type = types.path;
+        description = "The npm package version / flavor to use";
+        default = pkgs.nodePackages.npm;
+        example = literalExample "pkgs.nodePackages_13_x.npm";
+      };
+
+      npmrc = mkOption {
         type = lib.types.lines;
         description = ''
           The system-wide npm configuration.
@@ -40,7 +47,7 @@ in
 
     environment.variables.NPM_CONFIG_GLOBALCONFIG = "/etc/npmrc";
 
-    environment.systemPackages = [ pkgs.nodePackages.npm ];
+    environment.systemPackages = [ cfg.package ];
   };
 
 }
diff --git a/nixos/modules/programs/singularity.nix b/nixos/modules/programs/singularity.nix
index b27e122bd1d..6ac64a81fc2 100644
--- a/nixos/modules/programs/singularity.nix
+++ b/nixos/modules/programs/singularity.nix
@@ -5,8 +5,8 @@ let
   cfg = config.programs.singularity;
   singularity = pkgs.singularity.overrideAttrs (attrs : {
     installPhase = attrs.installPhase + ''
-      mv $bin/libexec/singularity/bin/starter-suid $bin/libexec/singularity/bin/starter-suid.orig
-      ln -s /run/wrappers/bin/singularity-suid $bin/libexec/singularity/bin/starter-suid
+      mv $out/libexec/singularity/bin/starter-suid $out/libexec/singularity/bin/starter-suid.orig
+      ln -s /run/wrappers/bin/singularity-suid $out/libexec/singularity/bin/starter-suid
     '';
   });
 in {
diff --git a/nixos/modules/programs/systemtap.nix b/nixos/modules/programs/systemtap.nix
index ca81e018c9d..360e106678e 100644
--- a/nixos/modules/programs/systemtap.nix
+++ b/nixos/modules/programs/systemtap.nix
@@ -8,6 +8,7 @@ in {
   options = {
     programs.systemtap = {
       enable = mkOption {
+        type = types.bool;
         default = false;
         description = ''
           Install <command>systemtap</command> along with necessary kernel options.
diff --git a/nixos/modules/programs/venus.nix b/nixos/modules/programs/venus.nix
index 110570ac3f0..58faf38777d 100644
--- a/nixos/modules/programs/venus.nix
+++ b/nixos/modules/programs/venus.nix
@@ -75,7 +75,7 @@ in
       };
 
       link = mkOption {
-        default = "http://planet.nixos.org";
+        default = "https://planet.nixos.org";
         type = types.str;
         description = ''
           Link to the main page.
diff --git a/nixos/modules/programs/xonsh.nix b/nixos/modules/programs/xonsh.nix
index 1590020f7b6..c06fd1655c2 100644
--- a/nixos/modules/programs/xonsh.nix
+++ b/nixos/modules/programs/xonsh.nix
@@ -45,7 +45,32 @@ in
 
   config = mkIf cfg.enable {
 
-    environment.etc.xonshrc.text = cfg.config;
+    environment.etc.xonshrc.text = ''
+      # /etc/xonshrc: DO NOT EDIT -- this file has been generated automatically.
+
+
+      if not ''${...}.get('__NIXOS_SET_ENVIRONMENT_DONE'):
+          # The NixOS environment and thereby also $PATH
+          # haven't been fully set up at this point. But
+          # `source-bash` below requires `bash` to be on $PATH,
+          # so add an entry with bash's location:
+          $PATH.add('${pkgs.bash}/bin')
+
+          # Stash xonsh's ls alias, so that we don't get a collision
+          # with Bash's ls alias from environment.shellAliases:
+          _ls_alias = aliases.pop('ls', None)
+
+          # Source the NixOS environment config.
+          source-bash "${config.system.build.setEnvironment}"
+
+          # Restore xonsh's ls alias, overriding that from Bash (if any).
+          if _ls_alias is not None:
+              aliases['ls'] = _ls_alias
+          del _ls_alias
+
+
+      ${cfg.config}
+    '';
 
     environment.systemPackages = [ cfg.package ];
 
diff --git a/nixos/modules/programs/zsh/oh-my-zsh.nix b/nixos/modules/programs/zsh/oh-my-zsh.nix
index 932a780a356..f24842a4791 100644
--- a/nixos/modules/programs/zsh/oh-my-zsh.nix
+++ b/nixos/modules/programs/zsh/oh-my-zsh.nix
@@ -39,6 +39,7 @@ in
     options = {
       programs.zsh.ohMyZsh = {
         enable = mkOption {
+          type = types.bool;
           default = false;
           description = ''
             Enable oh-my-zsh.
diff --git a/nixos/modules/rename.nix b/nixos/modules/rename.nix
index 410db8fd84e..a946268494e 100644
--- a/nixos/modules/rename.nix
+++ b/nixos/modules/rename.nix
@@ -49,6 +49,10 @@ with lib;
       simply add the brightnessctl package to environment.systemPackages.
     '')
 
+    (mkRemovedOptionModule ["services" "prey" ] ''
+      prey-bash-client is deprecated upstream
+    '')
+
     # Do NOT add any option renames here, see top of the file
   ];
 }
diff --git a/nixos/modules/security/acme.nix b/nixos/modules/security/acme.nix
index df5f33c9461..776ef07d716 100644
--- a/nixos/modules/security/acme.nix
+++ b/nixos/modules/security/acme.nix
@@ -87,19 +87,19 @@ let
         default = {};
         example = literalExample ''
           {
-            "example.org" = "/srv/http/nginx";
+            "example.org" = null;
             "mydomain.org" = null;
           }
         '';
         description = ''
-          A list of extra domain names, which are included in the one certificate to be issued, with their
-          own server roots if needed.
+          A list of extra domain names, which are included in the one certificate to be issued.
+          Setting a distinct server root is deprecated and not functional in 20.03+
         '';
       };
 
       keyType = mkOption {
         type = types.str;
-        default = "ec384";
+        default = "ec256";
         description = ''
           Key type to use for private keys.
           For an up to date list of supported values check the --key-type option
@@ -250,7 +250,7 @@ in
             "example.com" = {
               webroot = "/var/www/challenges/";
               email = "foo@example.com";
-              extraDomains = { "www.example.com" = null; "foo.example.com" = "/var/www/foo/"; };
+              extraDomains = { "www.example.com" = null; "foo.example.com" = null; };
             };
             "bar.example.com" = {
               webroot = "/var/www/challenges/";
@@ -321,22 +321,17 @@ in
                   wantedBy = mkIf (!config.boot.isContainer) [ "multi-user.target" ];
                   serviceConfig = {
                     Type = "oneshot";
-                    # With RemainAfterExit the service is considered active even
-                    # after the main process having exited, which means when it
-                    # gets changed, the activation phase restarts it, meaning
-                    # the permissions of the StateDirectory get adjusted
-                    # according to the specified group
-                    RemainAfterExit = true;
                     User = data.user;
                     Group = data.group;
                     PrivateTmp = true;
-                    StateDirectory = "acme/.lego/${cert} ${lpath}";
+                    StateDirectory = "acme/.lego/${cert} acme/.lego/accounts ${lpath}";
                     StateDirectoryMode = if data.allowKeysForGroup then "750" else "700";
                     WorkingDirectory = spath;
                     # Only try loading the credentialsFile if the dns challenge is enabled
                     EnvironmentFile = if data.dnsProvider != null then data.credentialsFile else null;
                     ExecStart = pkgs.writeScript "acme-start" ''
                       #!${pkgs.runtimeShell} -e
+                      test -L ${spath}/accounts -o -d ${spath}/accounts || ln -s ../accounts ${spath}/accounts
                       ${pkgs.lego}/bin/lego ${renewOpts} || ${pkgs.lego}/bin/lego ${runOpts}
                     '';
                     ExecStartPost =
@@ -348,7 +343,9 @@ in
 
                           # Test that existing cert is older than new cert
                           KEY=${spath}/certificates/${keyName}.key
+                          KEY_CHANGED=no
                           if [ -e $KEY -a $KEY -nt key.pem ]; then
+                            KEY_CHANGED=yes
                             cp -p ${spath}/certificates/${keyName}.key key.pem
                             cp -p ${spath}/certificates/${keyName}.crt fullchain.pem
                             cp -p ${spath}/certificates/${keyName}.issuer.crt chain.pem
@@ -359,7 +356,10 @@ in
                           chmod ${fileMode} *.pem
                           chown '${data.user}:${data.group}' *.pem
 
-                          ${data.postRun}
+                          if [ "$KEY_CHANGED" = "yes" ]; then
+                            : # noop in case postRun is empty
+                            ${data.postRun}
+                          fi
                         '';
                       in
                         "+${script}";
@@ -458,7 +458,7 @@ in
   ];
 
   meta = {
-    maintainers = with lib.maintainers; [ abbradar fpletz globin m1cr0man ];
+    maintainers = lib.teams.acme.members;
     doc = ./acme.xml;
   };
 }
diff --git a/nixos/modules/security/acme.xml b/nixos/modules/security/acme.xml
index 2b29c117484..f802faee974 100644
--- a/nixos/modules/security/acme.xml
+++ b/nixos/modules/security/acme.xml
@@ -6,92 +6,249 @@
  <title>SSL/TLS Certificates with ACME</title>
  <para>
   NixOS supports automatic domain validation &amp; certificate retrieval and
-  renewal using the ACME protocol. This is currently only implemented by and
-  for Let's Encrypt. The alternative ACME client <literal>lego</literal> is
-  used under the hood.
+  renewal using the ACME protocol. Any provider can be used, but by default
+  NixOS uses Let's Encrypt. The alternative ACME client <literal>lego</literal>
+  is used under the hood.
+ </para>
+ <para>
+  Automatic cert validation and configuration for Apache and Nginx virtual
+  hosts is included in NixOS, however if you would like to generate a wildcard
+  cert or you are not using a web server you will have to configure DNS
+  based validation.
  </para>
  <section xml:id="module-security-acme-prerequisites">
   <title>Prerequisites</title>
 
   <para>
-   You need to have a running HTTP server for verification. The server must
-   have a webroot defined that can serve
+   To use the ACME module, you must accept the provider's terms of service
+   by setting <literal><xref linkend="opt-security.acme.acceptTerms" /></literal>
+   to <literal>true</literal>. The Let's Encrypt ToS can be found
+   <link xlink:href="https://letsencrypt.org/repository/">here</link>.
+  </para>
+
+  <para>
+   You must also set an email address to be used when creating accounts with
+   Let's Encrypt. You can set this for all certs with
+   <literal><xref linkend="opt-security.acme.email" /></literal>
+   and/or on a per-cert basis with
+   <literal><xref linkend="opt-security.acme.certs._name_.email" /></literal>.
+   This address is only used for registration and renewal reminders,
+   and cannot be used to administer the certificates in any way.
+  </para>
+
+  <para>
+   Alternatively, you can use a different ACME server by changing the
+   <literal><xref linkend="opt-security.acme.server" /></literal> option
+   to a provider of your choosing, or just change the server for one cert with
+   <literal><xref linkend="opt-security.acme.certs._name_.server" /></literal>.
+  </para>
+
+  <para>
+   You will need an HTTP server or DNS server for verification. For HTTP,
+   the server must have a webroot defined that can serve
    <filename>.well-known/acme-challenge</filename>. This directory must be
-   writeable by the user that will run the ACME client.
+   writeable by the user that will run the ACME client. For DNS, you must
+   set up credentials with your provider/server for use with lego.
   </para>
+ </section>
+ <section xml:id="module-security-acme-nginx">
+  <title>Using ACME certificates in Nginx</title>
 
   <para>
-   For instance, this generic snippet could be used for Nginx:
+   NixOS supports fetching ACME certificates for you by setting
+   <literal><link linkend="opt-services.nginx.virtualHosts._name_.enableACME">enableACME</link>
+   = true;</literal> in a virtualHost config. We first create self-signed
+   placeholder certificates in place of the real ACME certs. The placeholder
+   certs are overwritten when the ACME certs arrive. For
+   <literal>foo.example.com</literal> the config would look like.
+  </para>
+
 <programlisting>
-http {
-  server {
-    server_name _;
-    listen 80;
-    listen [::]:80;
-
-    location /.well-known/acme-challenge {
-      root /var/www/challenges;
-    }
+<xref linkend="opt-security.acme.acceptTerms" /> = true;
+<xref linkend="opt-security.acme.email" /> = "admin+acme@example.com";
+services.nginx = {
+  <link linkend="opt-services.nginx.enable">enable</link> = true;
+  <link linkend="opt-services.nginx.virtualHosts">virtualHosts</link> = {
+    "foo.example.com" = {
+      <link linkend="opt-services.nginx.virtualHosts._name_.forceSSL">forceSSL</link> = true;
+      <link linkend="opt-services.nginx.virtualHosts._name_.enableACME">enableACME</link> = true;
+      # All serverAliases will be added as <link linkend="opt-security.acme.certs._name_.extraDomains">extra domains</link> on the certificate.
+      <link linkend="opt-services.nginx.virtualHosts._name_.serverAliases">serverAliases</link> = [ "bar.example.com" ];
+      locations."/" = {
+        <link linkend="opt-services.nginx.virtualHosts._name_.locations._name_.root">root</link> = "/var/www";
+      };
+    };
 
-    location / {
-      return 301 https://$host$request_uri;
-    }
-  }
+    # We can also add a different vhost and reuse the same certificate
+    # but we have to append extraDomains manually.
+    <link linkend="opt-security.acme.certs._name_.extraDomains">security.acme.certs."foo.example.com".extraDomains."baz.example.com"</link> = null;
+    "baz.example.com" = {
+      <link linkend="opt-services.nginx.virtualHosts._name_.forceSSL">forceSSL</link> = true;
+      <link linkend="opt-services.nginx.virtualHosts._name_.useACMEHost">useACMEHost</link> = "foo.example.com";
+      locations."/" = {
+        <link linkend="opt-services.nginx.virtualHosts._name_.locations._name_.root">root</link> = "/var/www";
+      };
+    };
+  };
 }
 </programlisting>
+ </section>
+ <section xml:id="module-security-acme-httpd">
+  <title>Using ACME certificates in Apache/httpd</title>
+
+  <para>
+   Using ACME certificates with Apache virtual hosts is identical
+   to using them with Nginx. The attribute names are all the same, just replace
+   "nginx" with "httpd" where appropriate.
   </para>
  </section>
  <section xml:id="module-security-acme-configuring">
-  <title>Configuring</title>
+  <title>Manual configuration of HTTP-01 validation</title>
 
   <para>
-   To enable ACME certificate retrieval &amp; renewal for a certificate for
-   <literal>foo.example.com</literal>, add the following in your
-   <filename>configuration.nix</filename>:
+   First off you will need to set up a virtual host to serve the challenges.
+   This example uses a vhost called <literal>certs.example.com</literal>, with
+   the intent that you will generate certs for all your vhosts and redirect
+   everyone to HTTPS.
+  </para>
+
+<programlisting>
+<xref linkend="opt-security.acme.acceptTerms" /> = true;
+<xref linkend="opt-security.acme.email" /> = "admin+acme@example.com";
+services.nginx = {
+  <link linkend="opt-services.nginx.enable">enable</link> = true;
+  <link linkend="opt-services.nginx.virtualHosts">virtualHosts</link> = {
+    "acmechallenge.example.com" = {
+      # Catchall vhost, will redirect users to HTTPS for all vhosts
+      <link linkend="opt-services.nginx.virtualHosts._name_.serverAliases">serverAliases</link> = [ "*.example.com" ];
+      # /var/lib/acme/.challenges must be writable by the ACME user
+      # and readable by the Nginx user.
+      # By default, this is the case.
+      locations."/.well-known/acme-challenge" = {
+        <link linkend="opt-services.nginx.virtualHosts._name_.locations._name_.root">root</link> = "/var/lib/acme/.challenges";
+      };
+      locations."/" = {
+        <link linkend="opt-services.nginx.virtualHosts._name_.locations._name_.return">return</link> = "301 https://$host$request_uri";
+      };
+    };
+  };
+}
+# Alternative config for Apache
+services.httpd = {
+  <link linkend="opt-services.httpd.enable">enable = true;</link>
+  <link linkend="opt-services.httpd.virtualHosts">virtualHosts</link> = {
+    "acmechallenge.example.com" = {
+      # Catchall vhost, will redirect users to HTTPS for all vhosts
+      <link linkend="opt-services.httpd.virtualHosts._name_.serverAliases">serverAliases</link> = [ "*.example.com" ];
+      # /var/lib/acme/.challenges must be writable by the ACME user and readable by the Apache user.
+      # By default, this is the case.
+      <link linkend="opt-services.httpd.virtualHosts._name_.documentRoot">documentRoot</link> = "/var/lib/acme/.challenges";
+      <link linkend="opt-services.httpd.virtualHosts._name_.extraConfig">extraConfig</link> = ''
+        RewriteEngine On
+        RewriteCond %{HTTPS} off
+        RewriteCond %{REQUEST_URI} !^/\.well-known/acme-challenge [NC]
+        RewriteRule (.*) https://%{HTTP_HOST}%{REQUEST_URI} [R=301]
+      '';
+    };
+  };
+}
+</programlisting>
+
+  <para>
+   Now you need to configure ACME to generate a certificate.
+  </para>
+
 <programlisting>
 <xref linkend="opt-security.acme.certs"/>."foo.example.com" = {
-  <link linkend="opt-security.acme.certs._name_.webroot">webroot</link> = "/var/www/challenges";
+  <link linkend="opt-security.acme.certs._name_.webroot">webroot</link> = "/var/lib/acme/.challenges";
   <link linkend="opt-security.acme.certs._name_.email">email</link> = "foo@example.com";
+  # Since we have a wildcard vhost to handle port 80,
+  # we can generate certs for anything!
+  # Just make sure your DNS resolves them.
+  <link linkend="opt-security.acme.certs._name_.extraDomains">extraDomains</link> = [ "mail.example.com" ];
 };
 </programlisting>
-  </para>
 
   <para>
    The private key <filename>key.pem</filename> and certificate
    <filename>fullchain.pem</filename> will be put into
    <filename>/var/lib/acme/foo.example.com</filename>.
   </para>
+
   <para>
    Refer to <xref linkend="ch-options" /> for all available configuration
    options for the <link linkend="opt-security.acme.certs">security.acme</link>
    module.
   </para>
  </section>
- <section xml:id="module-security-acme-nginx">
-  <title>Using ACME certificates in Nginx</title>
+ <section xml:id="module-security-acme-config-dns">
+  <title>Configuring ACME for DNS validation</title>
 
   <para>
-   NixOS supports fetching ACME certificates for you by setting
-   <literal><link linkend="opt-services.nginx.virtualHosts._name_.enableACME">enableACME</link>
-   = true;</literal> in a virtualHost config. We first create self-signed
-   placeholder certificates in place of the real ACME certs. The placeholder
-   certs are overwritten when the ACME certs arrive. For
-   <literal>foo.example.com</literal> the config would look like.
+   This is useful if you want to generate a wildcard certificate, since
+   ACME servers will only hand out wildcard certs over DNS validation.
+   There a number of supported DNS providers and servers you can utilise,
+   see the <link xlink:href="https://go-acme.github.io/lego/dns/">lego docs</link>
+   for provider/server specific configuration values. For the sake of these
+   docs, we will provide a fully self-hosted example using bind.
   </para>
 
 <programlisting>
-services.nginx = {
-  <link linkend="opt-services.nginx.enable">enable = true;</link>
-  <link linkend="opt-services.nginx.virtualHosts">virtualHosts</link> = {
-    "foo.example.com" = {
-      <link linkend="opt-services.nginx.virtualHosts._name_.forceSSL">forceSSL</link> = true;
-      <link linkend="opt-services.nginx.virtualHosts._name_.enableACME">enableACME</link> = true;
-      locations."/" = {
-        <link linkend="opt-services.nginx.virtualHosts._name_.locations._name_.root">root</link> = "/var/www";
-      };
-    };
-  };
+services.bind = {
+  <link linkend="opt-services.bind.enable">enable</link> = true;
+  <link linkend="opt-services.bind.extraConfig">extraConfig</link> = ''
+    include "/var/lib/secrets/dnskeys.conf";
+  '';
+  <link linkend="opt-services.bind.zones">zones</link> = [
+    rec {
+      name = "example.com";
+      file = "/var/db/bind/${name}";
+      master = true;
+      extraConfig = "allow-update { key rfc2136key.example.com.; };";
+    }
+  ];
 }
+
+# Now we can configure ACME
+<xref linkend="opt-security.acme.acceptTerms" /> = true;
+<xref linkend="opt-security.acme.email" /> = "admin+acme@example.com";
+<xref linkend="opt-security.acme.certs" />."example.com" = {
+  <link linkend="opt-security.acme.certs._name_.domain">domain</link> = "*.example.com";
+  <link linkend="opt-security.acme.certs._name_.dnsProvider">dnsProvider</link> = "rfc2136";
+  <link linkend="opt-security.acme.certs._name_.credentialsFile">credentialsFile</link> = "/var/lib/secrets/certs.secret";
+  # We don't need to wait for propagation since this is a local DNS server
+  <link linkend="opt-security.acme.certs._name_.dnsPropagationCheck">dnsPropagationCheck</link> = false;
+};
 </programlisting>
+
+  <para>
+   The <filename>dnskeys.conf</filename> and <filename>certs.secret</filename>
+   must be kept secure and thus you should not keep their contents in your
+   Nix config. Instead, generate them one time with these commands:
+  </para>
+
+<programlisting>
+mkdir -p /var/lib/secrets
+tsig-keygen rfc2136key.example.com &gt; /var/lib/secrets/dnskeys.conf
+chown named:root /var/lib/secrets/dnskeys.conf
+chmod 400 /var/lib/secrets/dnskeys.conf
+
+# Copy the secret value from the dnskeys.conf, and put it in
+# RFC2136_TSIG_SECRET below
+
+cat &gt; /var/lib/secrets/certs.secret &lt;&lt; EOF
+RFC2136_NAMESERVER='127.0.0.1:53'
+RFC2136_TSIG_ALGORITHM='hmac-sha256.'
+RFC2136_TSIG_KEY='rfc2136key.example.com'
+RFC2136_TSIG_SECRET='your secret key'
+EOF
+chmod 400 /var/lib/secrets/certs.secret
+</programlisting>
+
+  <para>
+   Now you're all set to generate certs! You should monitor the first invokation
+   by running <literal>systemctl start acme-example.com.service &amp;
+   journalctl -fu acme-example.com.service</literal> and watching its log output.
+  </para>
  </section>
 </chapter>
diff --git a/nixos/modules/security/apparmor-suid.nix b/nixos/modules/security/apparmor-suid.nix
index 3c93f5440ab..6c479e070e2 100644
--- a/nixos/modules/security/apparmor-suid.nix
+++ b/nixos/modules/security/apparmor-suid.nix
@@ -9,6 +9,7 @@ with lib;
   ];
 
   options.security.apparmor.confineSUIDApplications = mkOption {
+    type = types.bool;
     default = true;
     description = ''
       Install AppArmor profiles for commonly-used SUID application
diff --git a/nixos/modules/security/doas.nix b/nixos/modules/security/doas.nix
new file mode 100644
index 00000000000..b81f2d0c2d5
--- /dev/null
+++ b/nixos/modules/security/doas.nix
@@ -0,0 +1,274 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+let
+  cfg = config.security.doas;
+
+  inherit (pkgs) doas;
+
+  mkUsrString = user: toString user;
+
+  mkGrpString = group: ":${toString group}";
+
+  mkOpts = rule: concatStringsSep " " [
+    (optionalString rule.noPass "nopass")
+    (optionalString rule.persist "persist")
+    (optionalString rule.keepEnv "keepenv")
+    "setenv { SSH_AUTH_SOCK ${concatStringsSep " " rule.setEnv} }"
+  ];
+
+  mkArgs = rule:
+    if (isNull rule.args) then ""
+    else if (length rule.args == 0) then "args"
+    else "args ${concatStringsSep " " rule.args}";
+
+  mkRule = rule:
+    let
+      opts = mkOpts rule;
+
+      as = optionalString (!isNull rule.runAs) "as ${rule.runAs}";
+
+      cmd = optionalString (!isNull rule.cmd) "cmd ${rule.cmd}";
+
+      args = mkArgs rule;
+    in
+    optionals (length cfg.extraRules > 0) [
+      (
+        optionalString (length rule.users > 0)
+          (map (usr: "permit ${opts} ${mkUsrString usr} ${as} ${cmd} ${args}") rule.users)
+      )
+      (
+        optionalString (length rule.groups > 0)
+          (map (grp: "permit ${opts} ${mkGrpString grp} ${as} ${cmd} ${args}") rule.groups)
+      )
+    ];
+in
+{
+
+  ###### interface
+
+  options.security.doas = {
+
+    enable = mkOption {
+      type = with types; bool;
+      default = false;
+      description = ''
+        Whether to enable the <command>doas</command> command, which allows
+        non-root users to execute commands as root.
+      '';
+    };
+
+    wheelNeedsPassword = mkOption {
+      type = with types; bool;
+      default = true;
+      description = ''
+        Whether users of the <code>wheel</code> group must provide a password to
+        run commands as super user via <command>doas</command>.
+      '';
+    };
+
+    extraRules = mkOption {
+      default = [];
+      description = ''
+        Define specific rules to be set in the
+        <filename>/etc/doas.conf</filename> file. More specific rules should
+        come after more general ones in order to yield the expected behavior.
+        You can use <code>mkBefore</code> and/or <code>mkAfter</code> to ensure
+        this is the case when configuration options are merged.
+      '';
+      example = literalExample ''
+        [
+          # Allow execution of any command by any user in group doas, requiring
+          # a password and keeping any previously-defined environment variables.
+          { groups = [ "doas" ]; noPass = false; keepEnv = true; }
+
+          # Allow execution of "/home/root/secret.sh" by user `backup` OR user
+          # `database` OR any member of the group with GID `1006`, without a
+          # password.
+          { users = [ "backup" "database" ]; groups = [ 1006 ];
+            cmd = "/home/root/secret.sh"; noPass = true; }
+
+          # Allow any member of group `bar` to run `/home/baz/cmd1.sh` as user
+          # `foo` with argument `hello-doas`.
+          { groups = [ "bar" ]; runAs = "foo";
+            cmd = "/home/baz/cmd1.sh"; args = [ "hello-doas" ]; }
+
+          # Allow any member of group `bar` to run `/home/baz/cmd2.sh` as user
+          # `foo` with no arguments.
+          { groups = [ "bar" ]; runAs = "foo";
+            cmd = "/home/baz/cmd2.sh"; args = [ ]; }
+
+          # Allow user `abusers` to execute "nano" and unset the value of
+          # SSH_AUTH_SOCK, override the value of ALPHA to 1, and inherit the
+          # value of BETA from the current environment.
+          { users = [ "abusers" ]; cmd = "nano";
+            setEnv = [ "-SSH_AUTH_SOCK" "ALPHA=1" "BETA" ]; }
+        ]
+      '';
+      type = with types; listOf (
+        submodule {
+          options = {
+
+            noPass = mkOption {
+              type = with types; bool;
+              default = false;
+              description = ''
+                If <code>true</code>, the user is not required to enter a
+                password.
+              '';
+            };
+
+            persist = mkOption {
+              type = with types; bool;
+              default = false;
+              description = ''
+                If <code>true</code>, do not ask for a password again for some
+                time after the user successfully authenticates.
+              '';
+            };
+
+            keepEnv = mkOption {
+              type = with types; bool;
+              default = false;
+              description = ''
+                If <code>true</code>, environment variables other than those
+                listed in
+                <citerefentry><refentrytitle>doas</refentrytitle><manvolnum>1</manvolnum></citerefentry>
+                are kept when creating the environment for the new process.
+              '';
+            };
+
+            setEnv = mkOption {
+              type = with types; listOf str;
+              default = [];
+              description = ''
+                Keep or set the specified variables. Variables may also be
+                removed with a leading '-' or set using
+                <code>variable=value</code>. If the first character of
+                <code>value</code> is a '$', the value to be set is taken from
+                the existing environment variable of the indicated name. This
+                option is processed after the default environment has been
+                created.
+
+                NOTE: All rules have <code>setenv { SSH_AUTH_SOCK }</code> by
+                default. To prevent <code>SSH_AUTH_SOCK</code> from being
+                inherited, add <code>"-SSH_AUTH_SOCK"</code> anywhere in this
+                list.
+              '';
+            };
+
+            users = mkOption {
+              type = with types; listOf (either str int);
+              default = [];
+              description = "The usernames / UIDs this rule should apply for.";
+            };
+
+            groups = mkOption {
+              type = with types; listOf (either str int);
+              default = [];
+              description = "The groups / GIDs this rule should apply for.";
+            };
+
+            runAs = mkOption {
+              type = with types; nullOr str;
+              default = null;
+              description = ''
+                Which user or group the specified command is allowed to run as.
+                When set to <code>null</code> (the default), all users are
+                allowed.
+
+                A user can be specified using just the username:
+                <code>"foo"</code>. It is also possible to only allow running as
+                a specific group with <code>":bar"</code>.
+              '';
+            };
+
+            cmd = mkOption {
+              type = with types; nullOr str;
+              default = null;
+              description = ''
+                The command the user is allowed to run. When set to
+                <code>null</code> (the default), all commands are allowed.
+
+                NOTE: It is best practice to specify absolute paths. If a
+                relative path is specified, only a restricted PATH will be
+                searched.
+              '';
+            };
+
+            args = mkOption {
+              type = with types; nullOr (listOf str);
+              default = null;
+              description = ''
+                Arguments that must be provided to the command. When set to
+                <code>[]</code>, the command must be run without any arguments.
+              '';
+            };
+          };
+        }
+      );
+    };
+
+    extraConfig = mkOption {
+      type = with types; lines;
+      default = "";
+      description = ''
+        Extra configuration text appended to <filename>doas.conf</filename>.
+      '';
+    };
+  };
+
+
+  ###### implementation
+
+  config = mkIf cfg.enable {
+
+    security.doas.extraRules = mkOrder 600 [
+      {
+        groups = [ "wheel" ];
+        noPass = !cfg.wheelNeedsPassword;
+      }
+    ];
+
+    security.wrappers = {
+      doas.source = "${doas}/bin/doas";
+    };
+
+    environment.systemPackages = [
+      doas
+    ];
+
+    security.pam.services.doas = {
+      allowNullPassword = true;
+      sshAgentAuth = true;
+    };
+
+    environment.etc."doas.conf" = {
+      source = pkgs.runCommand "doas-conf"
+        {
+          src = pkgs.writeText "doas-conf-in" ''
+            # To modify this file, set the NixOS options
+            # `security.doas.extraRules` or `security.doas.extraConfig`. To
+            # completely replace the contents of this file, use
+            # `environment.etc."doas.conf"`.
+
+            # "root" is allowed to do anything.
+            permit nopass keepenv root
+
+            # extraRules
+            ${concatStringsSep "\n" (lists.flatten (map mkRule cfg.extraRules))}
+
+            # extraConfig
+            ${cfg.extraConfig}
+          '';
+          preferLocalBuild = true;
+        }
+        # Make sure that the doas.conf file is syntactically valid.
+        "${pkgs.buildPackages.doas}/bin/doas -C $src && cp $src $out";
+      mode = "0440";
+    };
+
+  };
+
+  meta.maintainers = with maintainers; [ cole-h ];
+}
diff --git a/nixos/modules/security/google_oslogin.nix b/nixos/modules/security/google_oslogin.nix
index 6f9962e1d62..78c2089baeb 100644
--- a/nixos/modules/security/google_oslogin.nix
+++ b/nixos/modules/security/google_oslogin.nix
@@ -49,6 +49,7 @@ in
 
     # enable the nss module, so user lookups etc. work
     system.nssModules = [ package ];
+    system.nssDatabases.passwd = [ "cache_oslogin" "oslogin" ];
 
     # Ugly: sshd refuses to start if a store path is given because /nix/store is group-writable.
     # So indirect by a symlink.
diff --git a/nixos/modules/security/pam.nix b/nixos/modules/security/pam.nix
index bfc2a881387..e1a94b0121a 100644
--- a/nixos/modules/security/pam.nix
+++ b/nixos/modules/security/pam.nix
@@ -54,7 +54,7 @@ let
         description = ''
           If set, users listed in
           <filename>~/.yubico/authorized_yubikeys</filename>
-          are able to log in with the asociated Yubikey tokens.
+          are able to log in with the associated Yubikey tokens.
         '';
       };
 
@@ -219,6 +219,14 @@ let
         '';
       };
 
+      nodelay = mkOption {
+        default = false;
+        type = types.bool;
+        description = ''
+          Wheather the delay after typing a wrong password should be disabled.
+        '';
+      };
+
       requireWheel = mkOption {
         default = false;
         type = types.bool;
@@ -366,7 +374,7 @@ let
             || cfg.enableGnomeKeyring
             || cfg.googleAuthenticator.enable
             || cfg.duoSecurity.enable)) ''
-              auth required pam_unix.so ${optionalString cfg.allowNullPassword "nullok"} likeauth
+              auth required pam_unix.so ${optionalString cfg.allowNullPassword "nullok"} ${optionalString cfg.nodelay "nodelay"} likeauth
               ${optionalString config.security.pam.enableEcryptfs
                 "auth optional ${pkgs.ecryptfs}/lib/security/pam_ecryptfs.so unwrap"}
               ${optionalString cfg.pamMount
@@ -382,7 +390,7 @@ let
                 "auth required ${pkgs.duo-unix}/lib/security/pam_duo.so"}
             '') + ''
           ${optionalString cfg.unixAuth
-              "auth sufficient pam_unix.so ${optionalString cfg.allowNullPassword "nullok"} likeauth try_first_pass"}
+              "auth sufficient pam_unix.so ${optionalString cfg.allowNullPassword "nullok"} ${optionalString cfg.nodelay "nodelay"} likeauth try_first_pass"}
           ${optionalString cfg.otpwAuth
               "auth sufficient ${pkgs.otpw}/lib/security/pam_otpw.so"}
           ${optionalString use_ldap
@@ -545,6 +553,7 @@ in
     };
 
     security.pam.enableSSHAgentAuth = mkOption {
+      type = types.bool;
       default = false;
       description =
         ''
@@ -555,12 +564,7 @@ in
         '';
     };
 
-    security.pam.enableOTPW = mkOption {
-      default = false;
-      description = ''
-        Enable the OTPW (one-time password) PAM module.
-      '';
-    };
+    security.pam.enableOTPW = mkEnableOption "the OTPW (one-time password) PAM module";
 
     security.pam.u2f = {
       enable = mkOption {
@@ -719,12 +723,7 @@ in
       };
     };
 
-    security.pam.enableEcryptfs = mkOption {
-      default = false;
-      description = ''
-        Enable eCryptfs PAM module (mounting ecryptfs home directory on login).
-      '';
-    };
+    security.pam.enableEcryptfs = mkEnableOption "eCryptfs PAM module (mounting ecryptfs home directory on login)";
 
     users.motd = mkOption {
       default = null;
diff --git a/nixos/modules/security/prey.nix b/nixos/modules/security/prey.nix
deleted file mode 100644
index b899ccb6c3e..00000000000
--- a/nixos/modules/security/prey.nix
+++ /dev/null
@@ -1,51 +0,0 @@
-{ config, lib, pkgs, ... }:
-
-with lib;
-
-let
-  cfg = config.services.prey;
-  myPrey = pkgs.prey-bash-client.override {
-    apiKey = cfg.apiKey;
-    deviceKey = cfg.deviceKey;
-  };
-in {
-  options = {
-
-    services.prey = {
-      enable = mkOption {
-        default = false;
-        type = types.bool;
-        description = ''
-          Enables the <link xlink:href="http://preyproject.com/" />
-          shell client. Be sure to specify both API and device keys.
-          Once enabled, a <command>cron</command> job will run every 15
-          minutes to report status information.
-        '';
-      };
-
-      deviceKey = mkOption {
-        type = types.str;
-        description = ''
-          <literal>Device key</literal> obtained by visiting
-          <link xlink:href="https://panel.preyproject.com/devices" />
-          and clicking on your device.
-        '';
-      };
-
-      apiKey = mkOption {
-        type = types.str;
-        description = ''
-          <literal>API key</literal> obtained from
-          <link xlink:href="https://panel.preyproject.com/profile" />.
-        '';
-      };
-    };
-
-  };
-
-  config = mkIf cfg.enable {
-      environment.systemPackages = [ myPrey ];
-      services.cron.systemCronJobs = [ "*/15 * * * * root ${myPrey}/prey.sh" ];
-  };
-
-}
diff --git a/nixos/modules/security/systemd-confinement.nix b/nixos/modules/security/systemd-confinement.nix
index cd4eb81dbe1..0a400f1d535 100644
--- a/nixos/modules/security/systemd-confinement.nix
+++ b/nixos/modules/security/systemd-confinement.nix
@@ -160,6 +160,11 @@ in {
               + " the 'users.users' option instead as this combination is"
               + " currently not supported.";
     }
+    { assertion = !cfg.serviceConfig.ProtectSystem or false;
+      message = "${whatOpt "ProtectSystem"}. ProtectSystem is not compatible"
+              + " with service confinement as it fails to remount /usr within"
+              + " our chroot. Please disable the option.";
+    }
   ]) config.systemd.services);
 
   config.systemd.packages = lib.concatLists (lib.mapAttrsToList (name: cfg: let
diff --git a/nixos/modules/services/amqp/rabbitmq.nix b/nixos/modules/services/amqp/rabbitmq.nix
index f80d6b3f1ba..646708e01c4 100644
--- a/nixos/modules/services/amqp/rabbitmq.nix
+++ b/nixos/modules/services/amqp/rabbitmq.nix
@@ -17,6 +17,7 @@ in {
   options = {
     services.rabbitmq = {
       enable = mkOption {
+        type = types.bool;
         default = false;
         description = ''
           Whether to enable the RabbitMQ server, an Advanced Message
diff --git a/nixos/modules/services/audio/mopidy.nix b/nixos/modules/services/audio/mopidy.nix
index d30c227db42..6fd7eae5b89 100644
--- a/nixos/modules/services/audio/mopidy.nix
+++ b/nixos/modules/services/audio/mopidy.nix
@@ -13,11 +13,11 @@ let
   mopidyEnv = buildEnv {
     name = "mopidy-with-extensions-${mopidy.version}";
     paths = closePropagation cfg.extensionPackages;
-    pathsToLink = [ "/${python3.sitePackages}" ];
+    pathsToLink = [ "/${mopidyPackages.python.sitePackages}" ];
     buildInputs = [ makeWrapper ];
     postBuild = ''
       makeWrapper ${mopidy}/bin/mopidy $out/bin/mopidy \
-        --prefix PYTHONPATH : $out/${python3.sitePackages}
+        --prefix PYTHONPATH : $out/${mopidyPackages.python.sitePackages}
     '';
   };
 in {
diff --git a/nixos/modules/services/audio/mpd.nix b/nixos/modules/services/audio/mpd.nix
index e20591b5beb..f4eb4a265a4 100644
--- a/nixos/modules/services/audio/mpd.nix
+++ b/nixos/modules/services/audio/mpd.nix
@@ -18,8 +18,6 @@ let
     ''}
     state_file          "${cfg.dataDir}/state"
     sticker_file        "${cfg.dataDir}/sticker.sql"
-    user                "${cfg.user}"
-    group               "${cfg.group}"
 
     ${optionalString (cfg.network.listenAddress != "any") ''bind_to_address "${cfg.network.listenAddress}"''}
     ${optionalString (cfg.network.port != 6600)  ''port "${toString cfg.network.port}"''}
diff --git a/nixos/modules/services/backup/mysql-backup.nix b/nixos/modules/services/backup/mysql-backup.nix
index f58af82773f..31d606b141a 100644
--- a/nixos/modules/services/backup/mysql-backup.nix
+++ b/nixos/modules/services/backup/mysql-backup.nix
@@ -37,12 +37,7 @@ in
 
     services.mysqlBackup = {
 
-      enable = mkOption {
-        default = false;
-        description = ''
-          Whether to enable MySQL backups.
-        '';
-      };
+      enable = mkEnableOption "MySQL backups";
 
       calendar = mkOption {
         type = types.str;
diff --git a/nixos/modules/services/backup/postgresql-backup.nix b/nixos/modules/services/backup/postgresql-backup.nix
index 580c7ce68f1..428861a7598 100644
--- a/nixos/modules/services/backup/postgresql-backup.nix
+++ b/nixos/modules/services/backup/postgresql-backup.nix
@@ -44,12 +44,7 @@ in {
 
   options = {
     services.postgresqlBackup = {
-      enable = mkOption {
-        default = false;
-        description = ''
-          Whether to enable PostgreSQL dumps.
-        '';
-      };
+      enable = mkEnableOption "PostgreSQL dumps";
 
       startAt = mkOption {
         default = "*-*-* 01:15:00";
diff --git a/nixos/modules/services/backup/znapzend.nix b/nixos/modules/services/backup/znapzend.nix
index 203631a577f..8098617d11f 100644
--- a/nixos/modules/services/backup/znapzend.nix
+++ b/nixos/modules/services/backup/znapzend.nix
@@ -268,7 +268,8 @@ let
 
   mkSrcAttrs = srcCfg: with srcCfg; {
     enabled = onOff enable;
-    mbuffer = with mbuffer; if enable then "${pkgs.mbuffer}/bin/mbuffer"
+    # mbuffer is not referenced by its full path to accomodate non-NixOS systems or differing mbuffer versions between source and target
+    mbuffer = with mbuffer; if enable then "mbuffer"
         + optionalString (port != null) ":${toString port}" else "off";
     mbuffer_size = mbuffer.size;
     post_znap_cmd = nullOff postsnap;
@@ -357,6 +358,12 @@ in
         default = false;
       };
 
+      features.oracleMode = mkEnableOption ''
+        Destroy snapshots one by one instead of using one long argument list.
+        If source and destination are out of sync for a long time, you may have
+        so many snapshots to destroy that the argument gets is too long and the
+        command fails.
+      '';
       features.recvu = mkEnableOption ''
         recvu feature which uses <literal>-u</literal> on the receiving end to keep the destination
         filesystem unmounted.
@@ -372,6 +379,41 @@ in
         and <citerefentry><refentrytitle>zfs</refentrytitle><manvolnum>8</manvolnum></citerefentry>
         for more info.
       '';
+      features.sendRaw = mkEnableOption ''
+        sendRaw feature which adds the options <literal>-w</literal> to the
+        <command>zfs send</command> command. For encrypted source datasets this
+        instructs zfs not to decrypt before sending which results in a remote
+        backup that can't be read without the encryption key/passphrase, useful
+        when the remote isn't fully trusted or not physically secure. This
+        option must be used consistently, raw incrementals cannot be based on
+        non-raw snapshots and vice versa.
+      '';
+      features.skipIntermediates = mkEnableOption ''
+        Enable the skipIntermediates feature to send a single increment
+        between latest common snapshot and the newly made one. It may skip
+        several source snaps if the destination was offline for some time, and
+        it should skip snapshots not managed by znapzend. Normally for online
+        destinations, the new snapshot is sent as soon as it is created on the
+        source, so there are no automatic increments to skip.
+      '';
+      features.lowmemRecurse = mkEnableOption ''
+        use lowmemRecurse on systems where you have too many datasets, so a
+        recursive listing of attributes to find backup plans exhausts the
+        memory available to <command>znapzend</command>: instead, go the slower
+        way to first list all impacted dataset names, and then query their
+        configs one by one.
+      '';
+      features.zfsGetType = mkEnableOption ''
+        use zfsGetType if your <command>zfs get</command> supports a
+        <literal>-t</literal> argument for filtering by dataset type at all AND
+        lists properties for snapshots by default when recursing, so that there
+        is too much data to process while searching for backup plans.
+        If these two conditions apply to your system, the time needed for a
+        <literal>--recursive</literal> search for backup plans can literally
+        differ by hundreds of times (depending on the amount of snapshots in
+        that dataset tree... and a decent backup plan will ensure you have a lot
+        of those), so you would benefit from requesting this feature.
+      '';
     };
   };
 
@@ -423,5 +465,5 @@ in
     };
   };
 
-  meta.maintainers = with maintainers; [ infinisil ];
+  meta.maintainers = with maintainers; [ infinisil SlothOfAnarchy ];
 }
diff --git a/nixos/modules/services/cluster/k3s/default.nix b/nixos/modules/services/cluster/k3s/default.nix
new file mode 100644
index 00000000000..2e8bf20a68f
--- /dev/null
+++ b/nixos/modules/services/cluster/k3s/default.nix
@@ -0,0 +1,101 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+let
+  cfg = config.services.k3s;
+in
+{
+  # interface
+  options.services.k3s = {
+    enable = mkEnableOption "k3s";
+
+    package = mkOption {
+      type = types.package;
+      default = pkgs.k3s;
+      defaultText = "pkgs.k3s";
+      example = literalExample "pkgs.k3s";
+      description = "Package that should be used for k3s";
+    };
+
+    role = mkOption {
+      description = ''
+        Whether k3s should run as a server or agent.
+        Note that the server, by default, also runs as an agent.
+      '';
+      default = "server";
+      type = types.enum [ "server" "agent" ];
+    };
+
+    serverAddr = mkOption {
+      type = types.str;
+      description = "The k3s server to connect to. This option only makes sense for an agent.";
+      example = "https://10.0.0.10:6443";
+      default = "";
+    };
+
+    token = mkOption {
+      type = types.str;
+      description = "The k3s token to use when connecting to the server. This option only makes sense for an agent.";
+      default = "";
+    };
+
+    docker = mkOption {
+      type = types.bool;
+      default = false;
+      description = "Use docker to run containers rather than the built-in containerd.";
+    };
+
+    extraFlags = mkOption {
+      description = "Extra flags to pass to the k3s command.";
+      default = "";
+      example = "--no-deploy traefik --cluster-cidr 10.24.0.0/16";
+    };
+
+    disableAgent = mkOption {
+      type = types.bool;
+      default = false;
+      description = "Only run the server. This option only makes sense for a server.";
+    };
+  };
+
+  # implementation
+
+  config = mkIf cfg.enable {
+    assertions = [
+      {
+        assertion = cfg.role == "agent" -> cfg.serverAddr != "";
+        message = "serverAddr should be set if role is 'agent'";
+      }
+      {
+        assertion = cfg.role == "agent" -> cfg.token != "";
+        message = "token should be set if role is 'agent'";
+      }
+    ];
+
+    virtualisation.docker = mkIf cfg.docker {
+      enable = mkDefault true;
+    };
+
+    systemd.services.k3s = {
+      description = "k3s service";
+      after = mkIf cfg.docker [ "docker.service" ];
+      wantedBy = [ "multi-user.target" ];
+      serviceConfig = {
+        # See: https://github.com/rancher/k3s/blob/dddbd16305284ae4bd14c0aade892412310d7edc/install.sh#L197
+        Type = if cfg.role == "agent" then "exec" else "notify";
+        KillMode = "process";
+        Delegate = "yes";
+        Restart = "always";
+        RestartSec = "5s";
+        ExecStart = concatStringsSep " \\\n " (
+          [
+            "${cfg.package}/bin/k3s ${cfg.role}"
+          ] ++ (optional cfg.docker "--docker")
+          ++ (optional cfg.disableAgent "--disable-agent")
+          ++ (optional (cfg.role == "agent") "--server ${cfg.serverAddr} --token ${cfg.token}")
+          ++ [ cfg.extraFlags ]
+        );
+      };
+    };
+  };
+}
diff --git a/nixos/modules/services/computing/boinc/client.nix b/nixos/modules/services/computing/boinc/client.nix
index a7edac02538..7becf624071 100644
--- a/nixos/modules/services/computing/boinc/client.nix
+++ b/nixos/modules/services/computing/boinc/client.nix
@@ -99,14 +99,16 @@ in
       environment.systemPackages = [cfg.package];
 
       users.users.boinc = {
+        group = "boinc";
         createHome = false;
         description = "BOINC Client";
         home = cfg.dataDir;
         isSystemUser = true;
       };
+      users.groups.boinc = {};
 
       systemd.tmpfiles.rules = [
-        "d '${cfg.dataDir}' - boinc - - -"
+        "d '${cfg.dataDir}' - boinc boinc - -"
       ];
 
       systemd.services.boinc = {
@@ -114,7 +116,7 @@ in
         after = ["network.target"];
         wantedBy = ["multi-user.target"];
         script = ''
-          ${fhsEnvExecutable} --dir ${cfg.dataDir} --redirectio ${allowRemoteGuiRpcFlag}
+          ${fhsEnvExecutable} --dir ${cfg.dataDir} ${allowRemoteGuiRpcFlag}
         '';
         serviceConfig = {
           User = "boinc";
diff --git a/nixos/modules/services/continuous-integration/gitlab-runner.nix b/nixos/modules/services/continuous-integration/gitlab-runner.nix
index bd4cf6a37ba..eacfed85ddf 100644
--- a/nixos/modules/services/continuous-integration/gitlab-runner.nix
+++ b/nixos/modules/services/continuous-integration/gitlab-runner.nix
@@ -1,160 +1,494 @@
 { config, lib, pkgs, ... }:
-
 with lib;
-
 let
   cfg = config.services.gitlab-runner;
-  configFile =
-    if (cfg.configFile == null) then
-      (pkgs.runCommand "config.toml" {
-        buildInputs = [ pkgs.remarshal ];
-        preferLocalBuild = true;
-      } ''
-        remarshal -if json -of toml \
-          < ${pkgs.writeText "config.json" (builtins.toJSON cfg.configOptions)} \
-          > $out
-      '')
-    else
-      cfg.configFile;
   hasDocker = config.virtualisation.docker.enable;
+  hashedServices = with builtins; (mapAttrs' (name: service: nameValuePair
+    "${name}_${config.networking.hostName}_${
+        substring 0 12
+        (hashString "md5" (unsafeDiscardStringContext (toJSON service)))}"
+      service)
+    cfg.services);
+  configPath = "$HOME/.gitlab-runner/config.toml";
+  configureScript = pkgs.writeShellScriptBin "gitlab-runner-configure" (
+    if (cfg.configFile != null) then ''
+      mkdir -p $(dirname ${configPath})
+      cp ${cfg.configFile} ${configPath}
+      # make config file readable by service
+      chown -R --reference=$HOME $(dirname ${configPath})
+    '' else ''
+      export CONFIG_FILE=${configPath}
+
+      mkdir -p $(dirname ${configPath})
+
+      # remove no longer existing services
+      gitlab-runner verify --delete
+
+      # current and desired state
+      NEEDED_SERVICES=$(echo ${concatStringsSep " " (attrNames hashedServices)} | tr " " "\n")
+      REGISTERED_SERVICES=$(gitlab-runner list 2>&1 | grep 'Executor' | awk '{ print $1 }')
+
+      # difference between current and desired state
+      NEW_SERVICES=$(grep -vxF -f <(echo "$REGISTERED_SERVICES") <(echo "$NEEDED_SERVICES") || true)
+      OLD_SERVICES=$(grep -vxF -f <(echo "$NEEDED_SERVICES") <(echo "$REGISTERED_SERVICES") || true)
+
+      # register new services
+      ${concatStringsSep "\n" (mapAttrsToList (name: service: ''
+        if echo "$NEW_SERVICES" | grep -xq ${name}; then
+          bash -c ${escapeShellArg (concatStringsSep " \\\n " ([
+            "set -a && source ${service.registrationConfigFile} &&"
+            "gitlab-runner register"
+            "--non-interactive"
+            "--name ${name}"
+            "--executor ${service.executor}"
+            "--limit ${toString service.limit}"
+            "--request-concurrency ${toString service.requestConcurrency}"
+            "--maximum-timeout ${toString service.maximumTimeout}"
+          ] ++ service.registrationFlags
+            ++ optional (service.buildsDir != null)
+            "--builds-dir ${service.buildsDir}"
+            ++ optional (service.preCloneScript != null)
+            "--pre-clone-script ${service.preCloneScript}"
+            ++ optional (service.preBuildScript != null)
+            "--pre-build-script ${service.preBuildScript}"
+            ++ optional (service.postBuildScript != null)
+            "--post-build-script ${service.postBuildScript}"
+            ++ optional (service.tagList != [ ])
+            "--tag-list ${concatStringsSep "," service.tagList}"
+            ++ optional service.runUntagged
+            "--run-untagged"
+            ++ optional service.protected
+            "--access-level ref_protected"
+            ++ optional service.debugTraceDisabled
+            "--debug-trace-disabled"
+            ++ map (e: "--env ${escapeShellArg e}") (mapAttrsToList (name: value: "${name}=${value}") service.environmentVariables)
+            ++ optionals (service.executor == "docker") (
+              assert (
+                assertMsg (service.dockerImage != null)
+                  "dockerImage option is required for docker executor (${name})");
+              [ "--docker-image ${service.dockerImage}" ]
+              ++ optional service.dockerDisableCache
+              "--docker-disable-cache"
+              ++ optional service.dockerPrivileged
+              "--docker-privileged"
+              ++ map (v: "--docker-volumes ${escapeShellArg v}") service.dockerVolumes
+              ++ map (v: "--docker-extra-hosts ${escapeShellArg v}") service.dockerExtraHosts
+              ++ map (v: "--docker-allowed-images ${escapeShellArg v}") service.dockerAllowedImages
+              ++ map (v: "--docker-allowed-services ${escapeShellArg v}") service.dockerAllowedServices
+            )
+          ))} && sleep 1
+        fi
+      '') hashedServices)}
+
+      # unregister old services
+      for NAME in $(echo "$OLD_SERVICES")
+      do
+        [ ! -z "$NAME" ] && gitlab-runner unregister \
+          --name "$NAME" && sleep 1
+      done
+
+      # update global options
+      remarshal --if toml --of json ${configPath} \
+        | jq -cM '.check_interval = ${toString cfg.checkInterval} |
+                  .concurrent = ${toString cfg.concurrent}' \
+        | remarshal --if json --of toml \
+        | sponge ${configPath}
+
+      # make config file readable by service
+      chown -R --reference=$HOME $(dirname ${configPath})
+    '');
+  startScript = pkgs.writeShellScriptBin "gitlab-runner-start" ''
+    export CONFIG_FILE=${configPath}
+    exec gitlab-runner run --working-directory $HOME
+  '';
 in
 {
   options.services.gitlab-runner = {
     enable = mkEnableOption "Gitlab Runner";
-
     configFile = mkOption {
+      type = types.nullOr types.path;
       default = null;
       description = ''
         Configuration file for gitlab-runner.
-        Use this option in favor of configOptions to avoid placing CI tokens in the nix store.
 
-        <option>configFile</option> takes precedence over <option>configOptions</option>.
+        <option>configFile</option> takes precedence over <option>services</option>.
+        <option>checkInterval</option> and <option>concurrent</option> will be ignored too.
 
-        Warning: Not using <option>configFile</option> will potentially result in secrets
-        leaking into the WORLD-READABLE nix store.
+        This option is deprecated, please use <option>services</option> instead.
+        You can use <option>registrationConfigFile</option> and
+        <option>registrationFlags</option>
+        for settings not covered by this module.
       '';
-      type = types.nullOr types.path;
     };
-
-    configOptions = mkOption {
+    checkInterval = mkOption {
+      type = types.int;
+      default = 0;
+      example = literalExample "with lib; (length (attrNames config.services.gitlab-runner.services)) * 3";
       description = ''
-        Configuration for gitlab-runner
-        <option>configFile</option> will take precedence over this option.
-
-        Warning: all Configuration, especially CI token, will be stored in a
-        WORLD-READABLE file in the Nix Store.
-
-        If you want to protect your CI token use <option>configFile</option> instead.
+        Defines the interval length, in seconds, between new jobs check.
+        The default value is 3;
+        if set to 0 or lower, the default value will be used.
+        See <link xlink:href="https://docs.gitlab.com/runner/configuration/advanced-configuration.html#how-check_interval-works">runner documentation</link> for more information.
+      '';
+    };
+    concurrent = mkOption {
+      type = types.int;
+      default = 1;
+      example = literalExample "config.nix.maxJobs";
+      description = ''
+        Limits how many jobs globally can be run concurrently.
+        The most upper limit of jobs using all defined runners.
+        0 does not mean unlimited.
       '';
-      type = types.attrs;
-      example = {
-        concurrent = 2;
-        runners = [{
-          name = "docker-nix-1.11";
-          url = "https://CI/";
-          token = "TOKEN";
-          executor = "docker";
-          builds_dir = "";
-          docker = {
-            host = "";
-            image = "nixos/nix:1.11";
-            privileged = true;
-            disable_cache = true;
-            cache_dir = "";
-          };
-        }];
-      };
     };
-
     gracefulTermination = mkOption {
-      default = false;
       type = types.bool;
+      default = false;
       description = ''
-        Finish all remaining jobs before stopping, restarting or reconfiguring.
-        If not set gitlab-runner will stop immediatly without waiting for jobs to finish,
-        which will lead to failed builds.
+        Finish all remaining jobs before stopping.
+        If not set gitlab-runner will stop immediatly without waiting
+        for jobs to finish, which will lead to failed builds.
       '';
     };
-
     gracefulTimeout = mkOption {
-      default = "infinity";
       type = types.str;
+      default = "infinity";
       example = "5min 20s";
-      description = ''Time to wait until a graceful shutdown is turned into a forceful one.'';
-    };
-
-    workDir = mkOption {
-      default = "/var/lib/gitlab-runner";
-      type = types.path;
-      description = "The working directory used";
+      description = ''
+        Time to wait until a graceful shutdown is turned into a forceful one.
+      '';
     };
-
     package = mkOption {
-      description = "Gitlab Runner package to use";
+      type = types.package;
       default = pkgs.gitlab-runner;
       defaultText = "pkgs.gitlab-runner";
-      type = types.package;
       example = literalExample "pkgs.gitlab-runner_1_11";
+      description = "Gitlab Runner package to use.";
     };
-
-    packages = mkOption {
-      default = [ pkgs.bash pkgs.docker-machine ];
-      defaultText = "[ pkgs.bash pkgs.docker-machine ]";
+    extraPackages = mkOption {
       type = types.listOf types.package;
+      default = [ ];
       description = ''
-        Packages to add to PATH for the gitlab-runner process.
+        Extra packages to add to PATH for the gitlab-runner process.
       '';
     };
+    services = mkOption {
+      description = "GitLab Runner services.";
+      default = { };
+      example = literalExample ''
+        {
+          # runner for building in docker via host's nix-daemon
+          # nix store will be readable in runner, might be insecure
+          nix = {
+            # File should contain at least these two variables:
+            # `CI_SERVER_URL`
+            # `REGISTRATION_TOKEN`
+            registrationConfigFile = "/run/secrets/gitlab-runner-registration";
+            dockerImage = "alpine";
+            dockerVolumes = [
+              "/nix/store:/nix/store:ro"
+              "/nix/var/nix/db:/nix/var/nix/db:ro"
+              "/nix/var/nix/daemon-socket:/nix/var/nix/daemon-socket:ro"
+            ];
+            dockerDisableCache = true;
+            preBuildScript = pkgs.writeScript "setup-container" '''
+              mkdir -p -m 0755 /nix/var/log/nix/drvs
+              mkdir -p -m 0755 /nix/var/nix/gcroots
+              mkdir -p -m 0755 /nix/var/nix/profiles
+              mkdir -p -m 0755 /nix/var/nix/temproots
+              mkdir -p -m 0755 /nix/var/nix/userpool
+              mkdir -p -m 1777 /nix/var/nix/gcroots/per-user
+              mkdir -p -m 1777 /nix/var/nix/profiles/per-user
+              mkdir -p -m 0755 /nix/var/nix/profiles/per-user/root
+              mkdir -p -m 0700 "$HOME/.nix-defexpr"
 
-  };
+              . ''${pkgs.nix}/etc/profile.d/nix.sh
 
+              ''${pkgs.nix}/bin/nix-env -i ''${concatStringsSep " " (with pkgs; [ nix cacert git openssh ])}
+
+              ''${pkgs.nix}/bin/nix-channel --add https://nixos.org/channels/nixpkgs-unstable
+              ''${pkgs.nix}/bin/nix-channel --update nixpkgs
+            ''';
+            environmentVariables = {
+              ENV = "/etc/profile";
+              USER = "root";
+              NIX_REMOTE = "daemon";
+              PATH = "/nix/var/nix/profiles/default/bin:/nix/var/nix/profiles/default/sbin:/bin:/sbin:/usr/bin:/usr/sbin";
+              NIX_SSL_CERT_FILE = "/nix/var/nix/profiles/default/etc/ssl/certs/ca-bundle.crt";
+            };
+            tagList = [ "nix" ];
+          };
+          # runner for building docker images
+          docker-images = {
+            # File should contain at least these two variables:
+            # `CI_SERVER_URL`
+            # `REGISTRATION_TOKEN`
+            registrationConfigFile = "/run/secrets/gitlab-runner-registration";
+            dockerImage = "docker:stable";
+            dockerVolumes = [
+              "/var/run/docker.sock:/var/run/docker.sock"
+            ];
+            tagList = [ "docker-images" ];
+          };
+          # runner for executing stuff on host system (very insecure!)
+          # make sure to add required packages (including git!)
+          # to `environment.systemPackages`
+          shell = {
+            # File should contain at least these two variables:
+            # `CI_SERVER_URL`
+            # `REGISTRATION_TOKEN`
+            registrationConfigFile = "/run/secrets/gitlab-runner-registration";
+            executor = "shell";
+            tagList = [ "shell" ];
+          };
+          # runner for everything else
+          default = {
+            # File should contain at least these two variables:
+            # `CI_SERVER_URL`
+            # `REGISTRATION_TOKEN`
+            registrationConfigFile = "/run/secrets/gitlab-runner-registration";
+            dockerImage = "debian:stable";
+          };
+        }
+      '';
+      type = types.attrsOf (types.submodule {
+        options = {
+          registrationConfigFile = mkOption {
+            type = types.path;
+            description = ''
+              Absolute path to a file with environment variables
+              used for gitlab-runner registration.
+              A list of all supported environment variables can be found in
+              <literal>gitlab-runner register --help</literal>.
+
+              Ones that you probably want to set is
+
+              <literal>CI_SERVER_URL=&lt;CI server URL&gt;</literal>
+
+              <literal>REGISTRATION_TOKEN=&lt;registration secret&gt;</literal>
+            '';
+          };
+          registrationFlags = mkOption {
+            type = types.listOf types.str;
+            default = [ ];
+            example = [ "--docker-helper-image my/gitlab-runner-helper" ];
+            description = ''
+              Extra command-line flags passed to
+              <literal>gitlab-runner register</literal>.
+              Execute <literal>gitlab-runner register --help</literal>
+              for a list of supported flags.
+            '';
+          };
+          environmentVariables = mkOption {
+            type = types.attrsOf types.str;
+            default = { };
+            example = { NAME = "value"; };
+            description = ''
+              Custom environment variables injected to build environment.
+              For secrets you can use <option>registrationConfigFile</option>
+              with <literal>RUNNER_ENV</literal> variable set.
+            '';
+          };
+          executor = mkOption {
+            type = types.str;
+            default = "docker";
+            description = ''
+              Select executor, eg. shell, docker, etc.
+              See <link xlink:href="https://docs.gitlab.com/runner/executors/README.html">runner documentation</link> for more information.
+            '';
+          };
+          buildsDir = mkOption {
+            type = types.nullOr types.path;
+            default = null;
+            example = "/var/lib/gitlab-runner/builds";
+            description = ''
+              Absolute path to a directory where builds will be stored
+              in context of selected executor (Locally, Docker, SSH).
+            '';
+          };
+          dockerImage = mkOption {
+            type = types.nullOr types.str;
+            default = null;
+            description = ''
+              Docker image to be used.
+            '';
+          };
+          dockerVolumes = mkOption {
+            type = types.listOf types.str;
+            default = [ ];
+            example = [ "/var/run/docker.sock:/var/run/docker.sock" ];
+            description = ''
+              Bind-mount a volume and create it
+              if it doesn't exist prior to mounting.
+            '';
+          };
+          dockerDisableCache = mkOption {
+            type = types.bool;
+            default = false;
+            description = ''
+              Disable all container caching.
+            '';
+          };
+          dockerPrivileged = mkOption {
+            type = types.bool;
+            default = false;
+            description = ''
+              Give extended privileges to container.
+            '';
+          };
+          dockerExtraHosts = mkOption {
+            type = types.listOf types.str;
+            default = [ ];
+            example = [ "other-host:127.0.0.1" ];
+            description = ''
+              Add a custom host-to-IP mapping.
+            '';
+          };
+          dockerAllowedImages = mkOption {
+            type = types.listOf types.str;
+            default = [ ];
+            example = [ "ruby:*" "python:*" "php:*" "my.registry.tld:5000/*:*" ];
+            description = ''
+              Whitelist allowed images.
+            '';
+          };
+          dockerAllowedServices = mkOption {
+            type = types.listOf types.str;
+            default = [ ];
+            example = [ "postgres:9" "redis:*" "mysql:*" ];
+            description = ''
+              Whitelist allowed services.
+            '';
+          };
+          preCloneScript = mkOption {
+            type = types.nullOr types.path;
+            default = null;
+            description = ''
+              Runner-specific command script executed before code is pulled.
+            '';
+          };
+          preBuildScript = mkOption {
+            type = types.nullOr types.path;
+            default = null;
+            description = ''
+              Runner-specific command script executed after code is pulled,
+              just before build executes.
+            '';
+          };
+          postBuildScript = mkOption {
+            type = types.nullOr types.path;
+            default = null;
+            description = ''
+              Runner-specific command script executed after code is pulled
+              and just after build executes.
+            '';
+          };
+          tagList = mkOption {
+            type = types.listOf types.str;
+            default = [ ];
+            description = ''
+              Tag list.
+            '';
+          };
+          runUntagged = mkOption {
+            type = types.bool;
+            default = false;
+            description = ''
+              Register to run untagged builds; defaults to
+              <literal>true</literal> when <option>tagList</option> is empty.
+            '';
+          };
+          limit = mkOption {
+            type = types.int;
+            default = 0;
+            description = ''
+              Limit how many jobs can be handled concurrently by this service.
+              0 (default) simply means don't limit.
+            '';
+          };
+          requestConcurrency = mkOption {
+            type = types.int;
+            default = 0;
+            description = ''
+              Limit number of concurrent requests for new jobs from GitLab.
+            '';
+          };
+          maximumTimeout = mkOption {
+            type = types.int;
+            default = 0;
+            description = ''
+              What is the maximum timeout (in seconds) that will be set for
+              job when using this Runner. 0 (default) simply means don't limit.
+            '';
+          };
+          protected = mkOption {
+            type = types.bool;
+            default = false;
+            description = ''
+              When set to true Runner will only run on pipelines
+              triggered on protected branches.
+            '';
+          };
+          debugTraceDisabled = mkOption {
+            type = types.bool;
+            default = false;
+            description = ''
+              When set to true Runner will disable the possibility of
+              using the <literal>CI_DEBUG_TRACE</literal> feature.
+            '';
+          };
+        };
+      });
+    };
+  };
   config = mkIf cfg.enable {
+    warnings = optional (cfg.configFile != null) "services.gitlab-runner.`configFile` is deprecated, please use services.gitlab-runner.`services`.";
+    environment.systemPackages = [ cfg.package ];
     systemd.services.gitlab-runner = {
-      path = cfg.packages;
-      environment = config.networking.proxy.envVars // {
-        # Gitlab runner will not start if the HOME variable is not set
-        HOME = cfg.workDir;
-      };
       description = "Gitlab Runner";
+      documentation = [ "https://docs.gitlab.com/runner/" ];
       after = [ "network.target" ]
         ++ optional hasDocker "docker.service";
       requires = optional hasDocker "docker.service";
       wantedBy = [ "multi-user.target" ];
+      environment = config.networking.proxy.envVars // {
+        HOME = "/var/lib/gitlab-runner";
+      };
+      path = with pkgs; [
+        bash
+        gawk
+        jq
+        moreutils
+        remarshal
+        utillinux
+        cfg.package
+      ] ++ cfg.extraPackages;
       reloadIfChanged = true;
-      restartTriggers = [
-         config.environment.etc."gitlab-runner/config.toml".source
-      ];
       serviceConfig = {
+        # Set `DynamicUser` under `systemd.services.gitlab-runner.serviceConfig`
+        # to `lib.mkForce false` in your configuration to run this service as root.
+        # You can also set `User` and `Group` options to run this service as desired user.
+        # Make sure to restart service or changes won't apply.
+        DynamicUser = true;
         StateDirectory = "gitlab-runner";
-        ExecReload= "${pkgs.coreutils}/bin/kill -HUP $MAINPID";
-        ExecStart = ''${cfg.package.bin}/bin/gitlab-runner run \
-          --working-directory ${cfg.workDir} \
-          --config /etc/gitlab-runner/config.toml \
-          --service gitlab-runner \
-          --user gitlab-runner \
-        '';
-
-      } //  optionalAttrs (cfg.gracefulTermination) {
+        SupplementaryGroups = optional hasDocker "docker";
+        ExecStartPre = "!${configureScript}/bin/gitlab-runner-configure";
+        ExecStart = "${startScript}/bin/gitlab-runner-start";
+        ExecReload = "!${configureScript}/bin/gitlab-runner-configure";
+      } // optionalAttrs (cfg.gracefulTermination) {
         TimeoutStopSec = "${cfg.gracefulTimeout}";
         KillSignal = "SIGQUIT";
         KillMode = "process";
       };
     };
-
-    # Make the gitlab-runner command availabe so users can query the runner
-    environment.systemPackages = [ cfg.package ];
-
-    # Make sure the config can be reloaded on change
-    environment.etc."gitlab-runner/config.toml".source = configFile;
-
-    users.users.gitlab-runner = {
-      group = "gitlab-runner";
-      extraGroups = optional hasDocker "docker";
-      uid = config.ids.uids.gitlab-runner;
-      home = cfg.workDir;
-      createHome = true;
-    };
-
-    users.groups.gitlab-runner.gid = config.ids.gids.gitlab-runner;
+    # Enable docker if `docker` executor is used in any service
+    virtualisation.docker.enable = mkIf (
+      any (s: s.executor == "docker") (attrValues cfg.services)
+    ) (mkDefault true);
   };
+  imports = [
+    (mkRenamedOptionModule [ "services" "gitlab-runner" "packages" ] [ "services" "gitlab-runner" "extraPackages" ] )
+    (mkRemovedOptionModule [ "services" "gitlab-runner" "configOptions" ] "Use services.gitlab-runner.services option instead" )
+    (mkRemovedOptionModule [ "services" "gitlab-runner" "workDir" ] "You should move contents of workDir (if any) to /var/lib/gitlab-runner" )
+  ];
 }
diff --git a/nixos/modules/services/continuous-integration/hydra/default.nix b/nixos/modules/services/continuous-integration/hydra/default.nix
index 52e4a3aed33..502a5898a5d 100644
--- a/nixos/modules/services/continuous-integration/hydra/default.nix
+++ b/nixos/modules/services/continuous-integration/hydra/default.nix
@@ -39,6 +39,36 @@ let
 
   inherit (config.system) stateVersion;
 
+  hydra-package =
+  let
+    makeWrapperArgs = concatStringsSep " " (mapAttrsToList (key: value: "--set \"${key}\" \"${value}\"") hydraEnv);
+  in pkgs.buildEnv rec {
+    name = "hydra-env";
+    buildInputs = [ pkgs.makeWrapper ];
+    paths = [ cfg.package ];
+
+    postBuild = ''
+      if [ -L "$out/bin" ]; then
+          unlink "$out/bin"
+      fi
+      mkdir -p "$out/bin"
+
+      for path in ${concatStringsSep " " paths}; do
+        if [ -d "$path/bin" ]; then
+          cd "$path/bin"
+          for prg in *; do
+            if [ -f "$prg" ]; then
+              rm -f "$out/bin/$prg"
+              if [ -x "$prg" ]; then
+                makeWrapper "$path/bin/$prg" "$out/bin/$prg" ${makeWrapperArgs}
+              fi
+            fi
+          done
+        fi
+      done
+   '';
+  };
+
 in
 
 {
@@ -266,7 +296,7 @@ in
         use-substitutes = ${if cfg.useSubstitutes then "1" else "0"}
       '';
 
-    environment.systemPackages = [ cfg.package ];
+    environment.systemPackages = [ hydra-package ];
 
     environment.variables = hydraEnv;
 
@@ -327,7 +357,7 @@ in
           chown hydra.hydra ${cfg.gcRootsDir}
           chmod 2775 ${cfg.gcRootsDir}
         '';
-        serviceConfig.ExecStart = "${cfg.package}/bin/hydra-init";
+        serviceConfig.ExecStart = "${hydra-package}/bin/hydra-init";
         serviceConfig.PermissionsStartOnly = true;
         serviceConfig.User = "hydra";
         serviceConfig.Type = "oneshot";
@@ -342,7 +372,7 @@ in
         restartTriggers = [ hydraConf ];
         serviceConfig =
           { ExecStart =
-              "@${cfg.package}/bin/hydra-server hydra-server -f -h '${cfg.listenHost}' "
+              "@${hydra-package}/bin/hydra-server hydra-server -f -h '${cfg.listenHost}' "
               + "-p ${toString cfg.port} --max_spare_servers 5 --max_servers 25 "
               + "--max_requests 100 ${optionalString cfg.debugServer "-d"}";
             User = "hydra-www";
@@ -355,15 +385,15 @@ in
       { wantedBy = [ "multi-user.target" ];
         requires = [ "hydra-init.service" ];
         after = [ "hydra-init.service" "network.target" ];
-        path = [ cfg.package pkgs.nettools pkgs.openssh pkgs.bzip2 config.nix.package ];
+        path = [ hydra-package pkgs.nettools pkgs.openssh pkgs.bzip2 config.nix.package ];
         restartTriggers = [ hydraConf ];
         environment = env // {
           PGPASSFILE = "${baseDir}/pgpass-queue-runner"; # grrr
           IN_SYSTEMD = "1"; # to get log severity levels
         };
         serviceConfig =
-          { ExecStart = "@${cfg.package}/bin/hydra-queue-runner hydra-queue-runner -v";
-            ExecStopPost = "${cfg.package}/bin/hydra-queue-runner --unlock";
+          { ExecStart = "@${hydra-package}/bin/hydra-queue-runner hydra-queue-runner -v";
+            ExecStopPost = "${hydra-package}/bin/hydra-queue-runner --unlock";
             User = "hydra-queue-runner";
             Restart = "always";
 
@@ -377,11 +407,11 @@ in
       { wantedBy = [ "multi-user.target" ];
         requires = [ "hydra-init.service" ];
         after = [ "hydra-init.service" "network.target" ];
-        path = with pkgs; [ cfg.package nettools jq ];
+        path = with pkgs; [ hydra-package nettools jq ];
         restartTriggers = [ hydraConf ];
         environment = env;
         serviceConfig =
-          { ExecStart = "@${cfg.package}/bin/hydra-evaluator hydra-evaluator";
+          { ExecStart = "@${hydra-package}/bin/hydra-evaluator hydra-evaluator";
             User = "hydra";
             Restart = "always";
             WorkingDirectory = baseDir;
@@ -393,7 +423,7 @@ in
         after = [ "hydra-init.service" ];
         environment = env;
         serviceConfig =
-          { ExecStart = "@${cfg.package}/bin/hydra-update-gc-roots hydra-update-gc-roots";
+          { ExecStart = "@${hydra-package}/bin/hydra-update-gc-roots hydra-update-gc-roots";
             User = "hydra";
           };
         startAt = "2,14:15";
@@ -404,7 +434,7 @@ in
         after = [ "hydra-init.service" ];
         environment = env;
         serviceConfig =
-          { ExecStart = "@${cfg.package}/bin/hydra-send-stats hydra-send-stats";
+          { ExecStart = "@${hydra-package}/bin/hydra-send-stats hydra-send-stats";
             User = "hydra";
           };
       };
@@ -418,7 +448,7 @@ in
           PGPASSFILE = "${baseDir}/pgpass-queue-runner";
         };
         serviceConfig =
-          { ExecStart = "@${cfg.package}/bin/hydra-notify hydra-notify";
+          { ExecStart = "@${hydra-package}/bin/hydra-notify hydra-notify";
             # FIXME: run this under a less privileged user?
             User = "hydra-queue-runner";
             Restart = "always";
diff --git a/nixos/modules/services/databases/clickhouse.nix b/nixos/modules/services/databases/clickhouse.nix
index dbabcae43ee..27440fec4e1 100644
--- a/nixos/modules/services/databases/clickhouse.nix
+++ b/nixos/modules/services/databases/clickhouse.nix
@@ -11,10 +11,7 @@ with lib;
 
     services.clickhouse = {
 
-      enable = mkOption {
-        default = false;
-        description = "Whether to enable ClickHouse database server.";
-      };
+      enable = mkEnableOption "ClickHouse database server";
 
     };
 
diff --git a/nixos/modules/services/databases/cockroachdb.nix b/nixos/modules/services/databases/cockroachdb.nix
index b6f94a4881a..35fb46d69d8 100644
--- a/nixos/modules/services/databases/cockroachdb.nix
+++ b/nixos/modules/services/databases/cockroachdb.nix
@@ -153,7 +153,7 @@ in
         defaultText = "pkgs.cockroachdb";
         description = ''
           The CockroachDB derivation to use for running the service.
-          
+
           This would primarily be useful to enable Enterprise Edition features
           in your own custom CockroachDB build (Nixpkgs CockroachDB binaries
           only contain open source features and open source code).
diff --git a/nixos/modules/services/databases/firebird.nix b/nixos/modules/services/databases/firebird.nix
index 042c9841df5..95837aa1cea 100644
--- a/nixos/modules/services/databases/firebird.nix
+++ b/nixos/modules/services/databases/firebird.nix
@@ -40,12 +40,7 @@ in
 
     services.firebird = {
 
-      enable = mkOption {
-        default = false;
-        description = ''
-          Whether to enable the Firebird super server.
-        '';
-      };
+      enable = mkEnableOption "the Firebird super server";
 
       package = mkOption {
         default = pkgs.firebirdSuper;
diff --git a/nixos/modules/services/databases/memcached.nix b/nixos/modules/services/databases/memcached.nix
index 89ff957babf..f54bb6cc9b1 100644
--- a/nixos/modules/services/databases/memcached.nix
+++ b/nixos/modules/services/databases/memcached.nix
@@ -18,12 +18,7 @@ in
 
     services.memcached = {
 
-      enable = mkOption {
-        default = false;
-        description = "
-          Whether to enable Memcached.
-        ";
-      };
+      enable = mkEnableOption "Memcached";
 
       user = mkOption {
         default = "memcached";
diff --git a/nixos/modules/services/databases/mongodb.nix b/nixos/modules/services/databases/mongodb.nix
index 12879afed47..4453a182990 100644
--- a/nixos/modules/services/databases/mongodb.nix
+++ b/nixos/modules/services/databases/mongodb.nix
@@ -29,12 +29,7 @@ in
 
     services.mongodb = {
 
-      enable = mkOption {
-        default = false;
-        description = "
-          Whether to enable the MongoDB server.
-        ";
-      };
+      enable = mkEnableOption "the MongoDB server";
 
       package = mkOption {
         default = pkgs.mongodb;
diff --git a/nixos/modules/services/databases/openldap.nix b/nixos/modules/services/databases/openldap.nix
index 809f61cfa81..8c2851c37ac 100644
--- a/nixos/modules/services/databases/openldap.nix
+++ b/nixos/modules/services/databases/openldap.nix
@@ -231,6 +231,10 @@ in
 
   };
 
+  meta = {
+    maintainers = lib.maintainers.mic92;
+  };
+
 
   ###### implementation
 
diff --git a/nixos/modules/services/databases/postgresql.nix b/nixos/modules/services/databases/postgresql.nix
index 0b79a996dc7..93f5c1ca5f5 100644
--- a/nixos/modules/services/databases/postgresql.nix
+++ b/nixos/modules/services/databases/postgresql.nix
@@ -34,13 +34,7 @@ in
 
     services.postgresql = {
 
-      enable = mkOption {
-        type = types.bool;
-        default = false;
-        description = ''
-          Whether to run PostgreSQL.
-        '';
-      };
+      enable = mkEnableOption "PostgreSQL Server";
 
       package = mkOption {
         type = types.package;
diff --git a/nixos/modules/services/databases/virtuoso.nix b/nixos/modules/services/databases/virtuoso.nix
index 0cc027cb1d7..6eb09e0a58f 100644
--- a/nixos/modules/services/databases/virtuoso.nix
+++ b/nixos/modules/services/databases/virtuoso.nix
@@ -13,10 +13,7 @@ with lib;
 
     services.virtuoso = {
 
-      enable = mkOption {
-        default = false;
-        description = "Whether to enable Virtuoso Opensource database server.";
-      };
+      enable = mkEnableOption "Virtuoso Opensource database server";
 
       config = mkOption {
         default = "";
diff --git a/nixos/modules/services/editors/emacs.xml b/nixos/modules/services/editors/emacs.xml
index 03483f69fa2..74c60014dce 100644
--- a/nixos/modules/services/editors/emacs.xml
+++ b/nixos/modules/services/editors/emacs.xml
@@ -294,7 +294,7 @@ https://nixos.org/nixpkgs/manual/#sec-modify-via-packageOverrides
     If you are not on NixOS or want to install this particular Emacs only for
     yourself, you can do so by adding it to your
     <filename>~/.config/nixpkgs/config.nix</filename> (see
-    <link xlink:href="http://nixos.org/nixpkgs/manual/#sec-modify-via-packageOverrides">Nixpkgs
+    <link xlink:href="https://nixos.org/nixpkgs/manual/#sec-modify-via-packageOverrides">Nixpkgs
     manual</link>):
     <example xml:id="module-services-emacs-config-nix">
      <title>Custom Emacs in <filename>~/.config/nixpkgs/config.nix</filename></title>
diff --git a/nixos/modules/services/hardware/ratbagd.nix b/nixos/modules/services/hardware/ratbagd.nix
index 103e1d2315a..01a8276750f 100644
--- a/nixos/modules/services/hardware/ratbagd.nix
+++ b/nixos/modules/services/hardware/ratbagd.nix
@@ -10,12 +10,7 @@ in
 
   options = {
     services.ratbagd = {
-      enable = mkOption {
-        default = false;
-        description = ''
-          Whether to enable ratbagd for configuring gaming mice.
-        '';
-      };
+      enable = mkEnableOption "ratbagd for configuring gaming mice";
     };
   };
 
diff --git a/nixos/modules/services/hardware/thermald.nix b/nixos/modules/services/hardware/thermald.nix
index 69577bbe018..ecb529e9bf0 100644
--- a/nixos/modules/services/hardware/thermald.nix
+++ b/nixos/modules/services/hardware/thermald.nix
@@ -8,12 +8,7 @@ in {
   ###### interface
   options = {
     services.thermald = {
-      enable = mkOption {
-        default = false;
-        description = ''
-          Whether to enable thermald, the temperature management daemon.
-        '';
-      };
+      enable = mkEnableOption "thermald, the temperature management daemon";
 
       debug = mkOption {
         type = types.bool;
diff --git a/nixos/modules/services/logging/awstats.nix b/nixos/modules/services/logging/awstats.nix
index 5939d7808f7..896f52302ff 100644
--- a/nixos/modules/services/logging/awstats.nix
+++ b/nixos/modules/services/logging/awstats.nix
@@ -24,7 +24,7 @@ let
 
       logFile = mkOption {
         type = types.str;
-        example = "/var/spool/nginx/logs/access.log";
+        example = "/var/log/nginx/access.log";
         description = ''
           The log file to be scanned.
 
@@ -110,7 +110,7 @@ in
         {
           "mysite" = {
             domain = "example.com";
-            logFile = "/var/spool/nginx/logs/access.log";
+            logFile = "/var/log/nginx/access.log";
           };
         }
       '';
diff --git a/nixos/modules/services/mail/dovecot.nix b/nixos/modules/services/mail/dovecot.nix
index 230a2ae3f82..9fbf0c19752 100644
--- a/nixos/modules/services/mail/dovecot.nix
+++ b/nixos/modules/services/mail/dovecot.nix
@@ -407,7 +407,7 @@ in
 
       after = [ "network.target" ];
       wantedBy = [ "multi-user.target" ];
-      restartTriggers = [ cfg.configFile ];
+      restartTriggers = [ cfg.configFile modulesDir ];
 
       serviceConfig = {
         ExecStart = "${dovecotPkg}/sbin/dovecot -F";
diff --git a/nixos/modules/services/mail/postfix.nix b/nixos/modules/services/mail/postfix.nix
index 19e11b31d9c..608f64a68fb 100644
--- a/nixos/modules/services/mail/postfix.nix
+++ b/nixos/modules/services/mail/postfix.nix
@@ -269,6 +269,7 @@ in
       };
 
       enableSmtp = mkOption {
+        type = types.bool;
         default = true;
         description = "Whether to enable smtp in master.cf.";
       };
diff --git a/nixos/modules/services/mail/roundcube.nix b/nixos/modules/services/mail/roundcube.nix
index 0bb0eaedad5..ed1439745ac 100644
--- a/nixos/modules/services/mail/roundcube.nix
+++ b/nixos/modules/services/mail/roundcube.nix
@@ -7,6 +7,7 @@ let
   fpm = config.services.phpfpm.pools.roundcube;
   localDB = cfg.database.host == "localhost";
   user = cfg.database.username;
+  phpWithPspell = pkgs.php.withExtensions ({ enabled, all }: [ all.pspell ] ++ enabled);
 in
 {
   options.services.roundcube = {
@@ -85,6 +86,15 @@ in
       '';
     };
 
+    dicts = mkOption {
+      type = types.listOf types.package;
+      default = [];
+      example = literalExample "with pkgs.aspellDicts; [ en fr de ]";
+      description = ''
+        List of aspell dictionnaries for spell checking. If empty, spell checking is disabled.
+      '';
+    };
+
     extraConfig = mkOption {
       type = types.lines;
       default = "";
@@ -109,6 +119,11 @@ in
       $config['plugins'] = [${concatMapStringsSep "," (p: "'${p}'") cfg.plugins}];
       $config['des_key'] = file_get_contents('/var/lib/roundcube/des_key');
       $config['mime_types'] = '${pkgs.nginx}/conf/mime.types';
+      $config['enable_spellcheck'] = ${if cfg.dicts == [] then "false" else "true"};
+      # by default, spellchecking uses a third-party cloud services
+      $config['spellcheck_engine'] = 'pspell';
+      $config['spellcheck_languages'] = array(${lib.concatMapStringsSep ", " (dict: let p = builtins.parseDrvName dict.shortName; in "'${p.name}' => '${dict.fullName}'") cfg.dicts});
+
       ${cfg.extraConfig}
     '';
 
@@ -172,6 +187,8 @@ in
         "pm.max_requests" = 500;
         "catch_workers_output" = true;
       };
+      phpPackage = phpWithPspell;
+      phpEnv.ASPELL_CONF = "dict-dir ${pkgs.aspellWithDicts (_: cfg.dicts)}/lib/aspell";
     };
     systemd.services.phpfpm-roundcube.after = [ "roundcube-setup.service" ];
 
@@ -199,7 +216,7 @@ in
             ${psql} <<< 'TRUNCATE TABLE session;'
           fi
 
-          ${pkgs.php}/bin/php ${cfg.package}/bin/update.sh
+          ${phpWithPspell}/bin/php ${cfg.package}/bin/update.sh
         '';
         serviceConfig = {
           Type = "oneshot";
diff --git a/nixos/modules/services/mail/rss2email.nix b/nixos/modules/services/mail/rss2email.nix
index c1e5964c453..7f8d2adac64 100644
--- a/nixos/modules/services/mail/rss2email.nix
+++ b/nixos/modules/services/mail/rss2email.nix
@@ -91,6 +91,8 @@ in {
       };
     };
 
+    environment.systemPackages = with pkgs; [ rss2email ];
+
     services.rss2email.config.to = cfg.to;
 
     systemd.tmpfiles.rules = [
diff --git a/nixos/modules/services/mail/spamassassin.nix b/nixos/modules/services/mail/spamassassin.nix
index 2d5fb40fad3..4e642542ec6 100644
--- a/nixos/modules/services/mail/spamassassin.nix
+++ b/nixos/modules/services/mail/spamassassin.nix
@@ -12,12 +12,10 @@ in
   options = {
 
     services.spamassassin = {
-      enable = mkOption {
-        default = false;
-        description = "Whether to run the SpamAssassin daemon";
-      };
+      enable = mkEnableOption "the SpamAssassin daemon";
 
       debug = mkOption {
+        type = types.bool;
         default = false;
         description = "Whether to run the SpamAssassin daemon in debug mode";
       };
diff --git a/nixos/modules/services/misc/airsonic.nix b/nixos/modules/services/misc/airsonic.nix
index c296e048cea..5cc2ff7f4bd 100644
--- a/nixos/modules/services/misc/airsonic.nix
+++ b/nixos/modules/services/misc/airsonic.nix
@@ -138,6 +138,7 @@ in {
 
     services.nginx = mkIf (cfg.virtualHost != null) {
       enable = true;
+      recommendedProxySettings = true;
       virtualHosts.${cfg.virtualHost} = {
         locations.${cfg.contextPath}.proxyPass = "http://${cfg.listenAddress}:${toString cfg.port}";
       };
diff --git a/nixos/modules/services/misc/autofs.nix b/nixos/modules/services/misc/autofs.nix
index f1742177326..5e7c1e66828 100644
--- a/nixos/modules/services/misc/autofs.nix
+++ b/nixos/modules/services/misc/autofs.nix
@@ -19,6 +19,7 @@ in
     services.autofs = {
 
       enable = mkOption {
+        type = types.bool;
         default = false;
         description = ''
           Mount filesystems on demand. Unmount them automatically.
@@ -56,6 +57,7 @@ in
       };
 
       debug = mkOption {
+        type = types.bool;
         default = false;
         description = ''
           Pass -d and -7 to automount and write log to the system journal.
diff --git a/nixos/modules/services/misc/cgminer.nix b/nixos/modules/services/misc/cgminer.nix
index 9fcae645269..7635c2a0f4e 100644
--- a/nixos/modules/services/misc/cgminer.nix
+++ b/nixos/modules/services/misc/cgminer.nix
@@ -31,13 +31,7 @@ in
 
     services.cgminer = {
 
-      enable = mkOption {
-        default = false;
-        description = ''
-          Whether to enable cgminer, an ASIC/FPGA/GPU miner for bitcoin and
-          litecoin.
-        '';
-      };
+      enable = mkEnableOption "cgminer, an ASIC/FPGA/GPU miner for bitcoin and litecoin";
 
       package = mkOption {
         default = pkgs.cgminer;
diff --git a/nixos/modules/services/misc/confd.nix b/nixos/modules/services/misc/confd.nix
index 8e9bec15dd4..c1ebdb3dde9 100755
--- a/nixos/modules/services/misc/confd.nix
+++ b/nixos/modules/services/misc/confd.nix
@@ -75,7 +75,7 @@ in {
       wantedBy = [ "multi-user.target" ];
       after = [ "network.target" ];
       serviceConfig = {
-        ExecStart = "${cfg.package.bin}/bin/confd";
+        ExecStart = "${cfg.package}/bin/confd";
       };
     };
 
diff --git a/nixos/modules/services/misc/devmon.nix b/nixos/modules/services/misc/devmon.nix
index 9dc8fee2964..e4a3348646b 100644
--- a/nixos/modules/services/misc/devmon.nix
+++ b/nixos/modules/services/misc/devmon.nix
@@ -8,12 +8,7 @@ let
 in {
   options = {
     services.devmon = {
-      enable = mkOption {
-        default = false;
-        description = ''
-          Whether to enable devmon, an automatic device mounting daemon.
-        '';
-      };
+      enable = mkEnableOption "devmon, an automatic device mounting daemon";
     };
   };
 
diff --git a/nixos/modules/services/misc/disnix.nix b/nixos/modules/services/misc/disnix.nix
index b7b6eb7cd66..69386cdbb38 100644
--- a/nixos/modules/services/misc/disnix.nix
+++ b/nixos/modules/services/misc/disnix.nix
@@ -17,10 +17,7 @@ in
 
     services.disnix = {
 
-      enable = mkOption {
-        default = false;
-        description = "Whether to enable Disnix";
-      };
+      enable = mkEnableOption "Disnix";
 
       enableMultiUser = mkOption {
         type = types.bool;
@@ -28,10 +25,7 @@ in
         description = "Whether to support multi-user mode by enabling the Disnix D-Bus service";
       };
 
-      useWebServiceInterface = mkOption {
-        default = false;
-        description = "Whether to enable the DisnixWebService interface running on Apache Tomcat";
-      };
+      useWebServiceInterface = mkEnableOption "the DisnixWebService interface running on Apache Tomcat";
 
       package = mkOption {
         type = types.path;
diff --git a/nixos/modules/services/misc/dysnomia.nix b/nixos/modules/services/misc/dysnomia.nix
index 33a6fb15264..4b52963500d 100644
--- a/nixos/modules/services/misc/dysnomia.nix
+++ b/nixos/modules/services/misc/dysnomia.nix
@@ -177,7 +177,7 @@ in
       wrapper = {};
     }
     // lib.optionalAttrs (config.services.httpd.enable) { apache-webapplication = {
-      documentRoot = config.services.httpd.documentRoot;
+      documentRoot = config.services.httpd.virtualHosts.localhost.documentRoot;
     }; }
     // lib.optionalAttrs (config.services.tomcat.axis2.enable) { axis2-webservice = {}; }
     // lib.optionalAttrs (config.services.ejabberd.enable) { ejabberd-dump = {
diff --git a/nixos/modules/services/misc/etcd.nix b/nixos/modules/services/misc/etcd.nix
index 7322e1c080b..32360d43768 100644
--- a/nixos/modules/services/misc/etcd.nix
+++ b/nixos/modules/services/misc/etcd.nix
@@ -178,7 +178,7 @@ in {
 
       serviceConfig = {
         Type = "notify";
-        ExecStart = "${pkgs.etcd.bin}/bin/etcd";
+        ExecStart = "${pkgs.etcd}/bin/etcd";
         User = "etcd";
         LimitNOFILE = 40000;
       };
diff --git a/nixos/modules/services/misc/felix.nix b/nixos/modules/services/misc/felix.nix
index 188e45abc58..21740c8c0b7 100644
--- a/nixos/modules/services/misc/felix.nix
+++ b/nixos/modules/services/misc/felix.nix
@@ -17,10 +17,7 @@ in
 
     services.felix = {
 
-      enable = mkOption {
-        default = false;
-        description = "Whether to enable the Apache Felix OSGi service";
-      };
+      enable = mkEnableOption "the Apache Felix OSGi service";
 
       bundles = mkOption {
         type = types.listOf types.package;
diff --git a/nixos/modules/services/misc/gitea.nix b/nixos/modules/services/misc/gitea.nix
index 38910a5a005..f8bcedc94fe 100644
--- a/nixos/modules/services/misc/gitea.nix
+++ b/nixos/modules/services/misc/gitea.nix
@@ -14,53 +14,9 @@ let
     RUN_USER = ${cfg.user}
     RUN_MODE = prod
 
-    [database]
-    DB_TYPE = ${cfg.database.type}
-    ${optionalString (usePostgresql || useMysql) ''
-      HOST = ${if cfg.database.socket != null then cfg.database.socket else cfg.database.host + ":" + toString cfg.database.port}
-      NAME = ${cfg.database.name}
-      USER = ${cfg.database.user}
-      PASSWD = #dbpass#
-    ''}
-    ${optionalString useSqlite ''
-      PATH = ${cfg.database.path}
-    ''}
-    ${optionalString usePostgresql ''
-      SSL_MODE = disable
-    ''}
-
-    [repository]
-    ROOT = ${cfg.repositoryRoot}
-
-    [server]
-    DOMAIN = ${cfg.domain}
-    HTTP_ADDR = ${cfg.httpAddress}
-    HTTP_PORT = ${toString cfg.httpPort}
-    ROOT_URL = ${cfg.rootUrl}
-    STATIC_ROOT_PATH = ${cfg.staticRootPath}
-    LFS_JWT_SECRET = #jwtsecret#
-
-    [session]
-    COOKIE_NAME = session
-    COOKIE_SECURE = ${boolToString cfg.cookieSecure}
-
-    [security]
-    SECRET_KEY = #secretkey#
-    INSTALL_LOCK = true
-
-    [log]
-    ROOT_PATH = ${cfg.log.rootPath}
-    LEVEL = ${cfg.log.level}
-
-    [service]
-    DISABLE_REGISTRATION = ${boolToString cfg.disableRegistration}
-
-    ${optionalString (cfg.mailerPasswordFile != null) ''
-      [mailer]
-      PASSWD = #mailerpass#
-    ''}
-
-    ${cfg.extraConfig}
+    ${generators.toINI {} cfg.settings}
+
+    ${optionalString (cfg.extraConfig != null) cfg.extraConfig}
   '';
 in
 
@@ -279,9 +235,36 @@ in
         '';
       };
 
+      settings = mkOption {
+        type = with types; attrsOf (attrsOf (oneOf [ bool int str ]));
+        default = {};
+        description = ''
+          Gitea configuration. Refer to <link xlink:href="https://docs.gitea.io/en-us/config-cheat-sheet/"/>
+          for details on supported values.
+        '';
+        example = literalExample ''
+          {
+            "cron.sync_external_users" = {
+              RUN_AT_START = true;
+              SCHEDULE = "@every 24h";
+              UPDATE_EXISTING = true;
+            };
+            mailer = {
+              ENABLED = true;
+              MAILER_TYPE = "sendmail";
+              FROM = "do-not-reply@example.org";
+              SENDMAIL_PATH = "${pkgs.system-sendmail}/bin/sendmail";
+            };
+            other = {
+              SHOW_FOOTER_VERSION = false;
+            };
+          }
+        '';
+      };
+
       extraConfig = mkOption {
-        type = types.str;
-        default = "";
+        type = with types; nullOr str;
+        default = null;
         description = "Configuration lines appended to the generated gitea configuration file.";
       };
     };
@@ -294,6 +277,62 @@ in
       }
     ];
 
+    services.gitea.settings = {
+      database = mkMerge [
+        {
+          DB_TYPE = cfg.database.type;
+        }
+        (mkIf (useMysql || usePostgresql) {
+          HOST = if cfg.database.socket != null then cfg.database.socket else cfg.database.host + ":" + toString cfg.database.port;
+          NAME = cfg.database.name;
+          USER = cfg.database.user;
+          PASSWD = "#dbpass#";
+        })
+        (mkIf useSqlite {
+          PATH = cfg.database.path;
+        })
+        (mkIf usePostgresql {
+          SSL_MODE = "disable";
+        })
+      ];
+
+      repository = {
+        ROOT = cfg.repositoryRoot;
+      };
+
+      server = {
+        DOMAIN = cfg.domain;
+        HTTP_ADDR = cfg.httpAddress;
+        HTTP_PORT = cfg.httpPort;
+        ROOT_URL = cfg.rootUrl;
+        STATIC_ROOT_PATH = cfg.staticRootPath;
+        LFS_JWT_SECRET = "#jwtsecret#";
+      };
+
+      session = {
+        COOKIE_NAME = "session";
+        COOKIE_SECURE = cfg.cookieSecure;
+      };
+
+      security = {
+        SECRET_KEY = "#secretkey#";
+        INSTALL_LOCK = true;
+      };
+
+      log = {
+        ROOT_PATH = cfg.log.rootPath;
+        LEVEL = cfg.log.level;
+      };
+
+      service = {
+        DISABLE_REGISTRATION = cfg.disableRegistration;
+      };
+
+      mailer = mkIf (cfg.mailerPasswordFile != null) {
+        PASSWD = "#mailerpass#";
+      };
+    };
+
     services.postgresql = optionalAttrs (usePostgresql && cfg.database.createDatabase) {
       enable = mkDefault true;
 
@@ -335,7 +374,7 @@ in
       description = "gitea";
       after = [ "network.target" ] ++ lib.optional usePostgresql "postgresql.service" ++ lib.optional useMysql "mysql.service";
       wantedBy = [ "multi-user.target" ];
-      path = [ gitea.bin pkgs.gitAndTools.git ];
+      path = [ gitea pkgs.gitAndTools.git ];
 
       preStart = let
         runConfig = "${cfg.stateDir}/custom/conf/app.ini";
@@ -347,11 +386,11 @@ in
           cp -f ${configFile} ${runConfig}
 
           if [ ! -e ${secretKey} ]; then
-              ${gitea.bin}/bin/gitea generate secret SECRET_KEY > ${secretKey}
+              ${gitea}/bin/gitea generate secret SECRET_KEY > ${secretKey}
           fi
 
           if [ ! -e ${jwtSecret} ]; then
-              ${gitea.bin}/bin/gitea generate secret LFS_JWT_SECRET > ${jwtSecret}
+              ${gitea}/bin/gitea generate secret LFS_JWT_SECRET > ${jwtSecret}
           fi
 
           KEY="$(head -n1 ${secretKey})"
@@ -374,7 +413,7 @@ in
         HOOKS=$(find ${cfg.repositoryRoot} -mindepth 4 -maxdepth 6 -type f -wholename "*git/hooks/*")
         if [ "$HOOKS" ]
         then
-          sed -ri 's,/nix/store/[a-z0-9.-]+/bin/gitea,${gitea.bin}/bin/gitea,g' $HOOKS
+          sed -ri 's,/nix/store/[a-z0-9.-]+/bin/gitea,${gitea}/bin/gitea,g' $HOOKS
           sed -ri 's,/nix/store/[a-z0-9.-]+/bin/env,${pkgs.coreutils}/bin/env,g' $HOOKS
           sed -ri 's,/nix/store/[a-z0-9.-]+/bin/bash,${pkgs.bash}/bin/bash,g' $HOOKS
           sed -ri 's,/nix/store/[a-z0-9.-]+/bin/perl,${pkgs.perl}/bin/perl,g' $HOOKS
@@ -383,7 +422,7 @@ in
         # update command option in authorized_keys
         if [ -r ${cfg.stateDir}/.ssh/authorized_keys ]
         then
-          sed -ri 's,/nix/store/[a-z0-9.-]+/bin/gitea,${gitea.bin}/bin/gitea,g' ${cfg.stateDir}/.ssh/authorized_keys
+          sed -ri 's,/nix/store/[a-z0-9.-]+/bin/gitea,${gitea}/bin/gitea,g' ${cfg.stateDir}/.ssh/authorized_keys
         fi
       '';
 
@@ -392,7 +431,7 @@ in
         User = cfg.user;
         Group = "gitea";
         WorkingDirectory = cfg.stateDir;
-        ExecStart = "${gitea.bin}/bin/gitea web";
+        ExecStart = "${gitea}/bin/gitea web";
         Restart = "always";
 
         # Filesystem
@@ -435,9 +474,12 @@ in
 
     users.groups.gitea = {};
 
-    warnings = optional (cfg.database.password != "")
-      ''config.services.gitea.database.password will be stored as plaintext
-        in the Nix store. Use database.passwordFile instead.'';
+    warnings =
+      optional (cfg.database.password != "") ''
+        config.services.gitea.database.password will be stored as plaintext in the Nix store. Use database.passwordFile instead.'' ++
+      optional (cfg.extraConfig != null) ''
+        services.gitea.`extraConfig` is deprecated, please use services.gitea.`settings`.
+      '';
 
     # Create database passwordFile default when password is configured.
     services.gitea.database.passwordFile =
@@ -450,7 +492,7 @@ in
        description = "gitea dump";
        after = [ "gitea.service" ];
        wantedBy = [ "default.target" ];
-       path = [ gitea.bin ];
+       path = [ gitea ];
 
        environment = {
          USER = cfg.user;
@@ -461,7 +503,7 @@ in
        serviceConfig = {
          Type = "oneshot";
          User = cfg.user;
-         ExecStart = "${gitea.bin}/bin/gitea dump";
+         ExecStart = "${gitea}/bin/gitea dump";
          WorkingDirectory = cfg.stateDir;
        };
     };
diff --git a/nixos/modules/services/misc/gitlab.nix b/nixos/modules/services/misc/gitlab.nix
index aa958985379..730166b04d2 100644
--- a/nixos/modules/services/misc/gitlab.nix
+++ b/nixos/modules/services/misc/gitlab.nix
@@ -180,7 +180,7 @@ let
         ${optionalString (cfg.smtp.passwordFile != null) ''password: "@smtpPassword@",''}
         domain: "${cfg.smtp.domain}",
         ${optionalString (cfg.smtp.authentication != null) "authentication: :${cfg.smtp.authentication},"}
-        enable_starttls_auto: ${toString cfg.smtp.enableStartTLSAuto},
+        enable_starttls_auto: ${boolToString cfg.smtp.enableStartTLSAuto},
         ca_file: "/etc/ssl/certs/ca-certificates.crt",
         openssl_verify_mode: '${cfg.smtp.opensslVerifyMode}'
       }
diff --git a/nixos/modules/services/misc/gogs.nix b/nixos/modules/services/misc/gogs.nix
index ee99967c261..c5070aaa356 100644
--- a/nixos/modules/services/misc/gogs.nix
+++ b/nixos/modules/services/misc/gogs.nix
@@ -200,7 +200,7 @@ in
       description = "Gogs (Go Git Service)";
       after = [ "network.target" ];
       wantedBy = [ "multi-user.target" ];
-      path = [ pkgs.gogs.bin ];
+      path = [ pkgs.gogs ];
 
       preStart = let
         runConfig = "${cfg.stateDir}/custom/conf/app.ini";
@@ -230,7 +230,7 @@ in
         HOOKS=$(find ${cfg.repositoryRoot} -mindepth 4 -maxdepth 4 -type f -wholename "*git/hooks/*")
         if [ "$HOOKS" ]
         then
-          sed -ri 's,/nix/store/[a-z0-9.-]+/bin/gogs,${pkgs.gogs.bin}/bin/gogs,g' $HOOKS
+          sed -ri 's,/nix/store/[a-z0-9.-]+/bin/gogs,${pkgs.gogs}/bin/gogs,g' $HOOKS
           sed -ri 's,/nix/store/[a-z0-9.-]+/bin/env,${pkgs.coreutils}/bin/env,g' $HOOKS
           sed -ri 's,/nix/store/[a-z0-9.-]+/bin/bash,${pkgs.bash}/bin/bash,g' $HOOKS
           sed -ri 's,/nix/store/[a-z0-9.-]+/bin/perl,${pkgs.perl}/bin/perl,g' $HOOKS
@@ -242,7 +242,7 @@ in
         User = cfg.user;
         Group = cfg.group;
         WorkingDirectory = cfg.stateDir;
-        ExecStart = "${pkgs.gogs.bin}/bin/gogs web";
+        ExecStart = "${pkgs.gogs}/bin/gogs web";
         Restart = "always";
       };
 
diff --git a/nixos/modules/services/misc/ihaskell.nix b/nixos/modules/services/misc/ihaskell.nix
index 11597706d0d..684a242d738 100644
--- a/nixos/modules/services/misc/ihaskell.nix
+++ b/nixos/modules/services/misc/ihaskell.nix
@@ -15,6 +15,7 @@ in
   options = {
     services.ihaskell = {
       enable = mkOption {
+        type = types.bool;
         default = false;
         description = "Autostart an IHaskell notebook service.";
       };
diff --git a/nixos/modules/services/misc/leaps.nix b/nixos/modules/services/misc/leaps.nix
index d4e88ecbebd..ef89d3e64d0 100644
--- a/nixos/modules/services/misc/leaps.nix
+++ b/nixos/modules/services/misc/leaps.nix
@@ -55,7 +55,7 @@ in
         Restart = "on-failure";
         WorkingDirectory = stateDir;
         PrivateTmp = true;
-        ExecStart = "${pkgs.leaps.bin}/bin/leaps -path ${toString cfg.path} -address ${cfg.address}:${toString cfg.port}";
+        ExecStart = "${pkgs.leaps}/bin/leaps -path ${toString cfg.path} -address ${cfg.address}:${toString cfg.port}";
       };
     };
   };
diff --git a/nixos/modules/services/misc/matrix-synapse.xml b/nixos/modules/services/misc/matrix-synapse.xml
index 053a3b2a563..2f2ac27eeb9 100644
--- a/nixos/modules/services/misc/matrix-synapse.xml
+++ b/nixos/modules/services/misc/matrix-synapse.xml
@@ -33,6 +33,7 @@
    <link xlink:href="https://github.com/matrix-org/synapse#synapse-installation">
    installation instructions of Synapse </link>.
 <programlisting>
+{ pkgs, ... }:
 let
   fqdn =
     let
@@ -46,7 +47,7 @@ in {
   <link linkend="opt-networking.firewall.allowedTCPPorts">networking.firewall.allowedTCPPorts</link> = [ 80 443 ];
 
   <link linkend="opt-services.postgresql.enable">services.postgresql.enable</link> = true;
-  <link linkend="opt-services.postgresql.initialScript">services.postgresql.initialScript</link> = ''
+  <link linkend="opt-services.postgresql.initialScript">services.postgresql.initialScript</link> = pkgs.writeText "synapse-init.sql" ''
     CREATE ROLE "matrix-synapse" WITH LOGIN PASSWORD 'synapse';
     CREATE DATABASE "matrix-synapse" WITH OWNER "matrix-synapse"
       TEMPLATE template0
diff --git a/nixos/modules/services/misc/nix-daemon.nix b/nixos/modules/services/misc/nix-daemon.nix
index 0c2407e1dd2..2577cb78e96 100644
--- a/nixos/modules/services/misc/nix-daemon.nix
+++ b/nixos/modules/services/misc/nix-daemon.nix
@@ -283,7 +283,7 @@ in
       trustedBinaryCaches = mkOption {
         type = types.listOf types.str;
         default = [ ];
-        example = [ "http://hydra.nixos.org/" ];
+        example = [ "https://hydra.nixos.org/" ];
         description = ''
           List of binary cache URLs that non-root users can use (in
           addition to those specified using
@@ -510,8 +510,7 @@ in
 
     system.activationScripts.nix = stringAfter [ "etc" "users" ]
       ''
-        # Create directories in /nix.
-        ${nix}/bin/nix ping-store --no-net
+        install -m 0755 -d /nix/var/nix/{gcroots,profiles}/per-user
 
         # Subscribe the root user to the NixOS channel by default.
         if [ ! -e "/root/.nix-channels" ]; then
diff --git a/nixos/modules/services/misc/octoprint.nix b/nixos/modules/services/misc/octoprint.nix
index 651ed374388..7a71d2c8c6a 100644
--- a/nixos/modules/services/misc/octoprint.nix
+++ b/nixos/modules/services/misc/octoprint.nix
@@ -17,9 +17,9 @@ let
 
   cfgUpdate = pkgs.writeText "octoprint-config.yaml" (builtins.toJSON fullConfig);
 
-  pluginsEnv = pkgs.python.buildEnv.override {
-    extraLibs = cfg.plugins pkgs.octoprint-plugins;
-  };
+  pluginsEnv = package.python.withPackages (ps: [ps.octoprint] ++ (cfg.plugins ps));
+
+  package = pkgs.octoprint;
 
 in
 {
@@ -106,7 +106,6 @@ in
       wantedBy = [ "multi-user.target" ];
       after = [ "network.target" ];
       path = [ pluginsEnv ];
-      environment.PYTHONPATH = makeSearchPathOutput "lib" pkgs.python.sitePackages [ pluginsEnv ];
 
       preStart = ''
         if [ -e "${cfg.stateDir}/config.yaml" ]; then
@@ -119,7 +118,7 @@ in
       '';
 
       serviceConfig = {
-        ExecStart = "${pkgs.octoprint}/bin/octoprint serve -b ${cfg.stateDir}";
+        ExecStart = "${pluginsEnv}/bin/octoprint serve -b ${cfg.stateDir}";
         User = cfg.user;
         Group = cfg.group;
       };
diff --git a/nixos/modules/services/misc/pykms.nix b/nixos/modules/services/misc/pykms.nix
index 25aa27ae767..d6aeae48ccb 100644
--- a/nixos/modules/services/misc/pykms.nix
+++ b/nixos/modules/services/misc/pykms.nix
@@ -82,6 +82,7 @@ in {
         ]);
         ProtectHome = "tmpfs";
         WorkingDirectory = libDir;
+        SyslogIdentifier = "pykms";
         Restart = "on-failure";
         MemoryLimit = cfg.memoryLimit;
       };
diff --git a/nixos/modules/services/misc/redmine.nix b/nixos/modules/services/misc/redmine.nix
index 3b8c14d196f..1febdba0c8f 100644
--- a/nixos/modules/services/misc/redmine.nix
+++ b/nixos/modules/services/misc/redmine.nix
@@ -132,7 +132,7 @@ in
         example = literalExample ''
           {
             dkuk-redmine_alex_skin = builtins.fetchurl {
-              url = https://bitbucket.org/dkuk/redmine_alex_skin/get/1842ef675ef3.zip;
+              url = "https://bitbucket.org/dkuk/redmine_alex_skin/get/1842ef675ef3.zip";
               sha256 = "0hrin9lzyi50k4w2bd2b30vrf1i4fi1c0gyas5801wn8i7kpm9yl";
             };
           }
@@ -146,7 +146,7 @@ in
         example = literalExample ''
           {
             redmine_env_auth = builtins.fetchurl {
-              url = https://github.com/Intera/redmine_env_auth/archive/0.6.zip;
+              url = "https://github.com/Intera/redmine_env_auth/archive/0.6.zip";
               sha256 = "0yyr1yjd8gvvh832wdc8m3xfnhhxzk2pk3gm2psg5w9jdvd6skak";
             };
           }
diff --git a/nixos/modules/services/misc/safeeyes.nix b/nixos/modules/services/misc/safeeyes.nix
index 1a33971d922..6ecb0d13187 100644
--- a/nixos/modules/services/misc/safeeyes.nix
+++ b/nixos/modules/services/misc/safeeyes.nix
@@ -16,10 +16,7 @@ in
 
     services.safeeyes = {
 
-      enable = mkOption {
-        default = false;
-        description = "Whether to enable the safeeyes OSGi service";
-      };
+      enable = mkEnableOption "the safeeyes OSGi service";
 
     };
 
diff --git a/nixos/modules/services/misc/ssm-agent.nix b/nixos/modules/services/misc/ssm-agent.nix
index e951a4c7ffa..f7c05deeecb 100644
--- a/nixos/modules/services/misc/ssm-agent.nix
+++ b/nixos/modules/services/misc/ssm-agent.nix
@@ -35,7 +35,7 @@ in {
 
       path = [ fake-lsb-release ];
       serviceConfig = {
-        ExecStart = "${cfg.package.bin}/bin/agent";
+        ExecStart = "${cfg.package}/bin/agent";
         KillMode = "process";
         Restart = "on-failure";
         RestartSec = "15min";
@@ -43,4 +43,3 @@ in {
     };
   };
 }
-
diff --git a/nixos/modules/services/misc/sssd.nix b/nixos/modules/services/misc/sssd.nix
index 36008d25741..77f6ccfe64f 100644
--- a/nixos/modules/services/misc/sssd.nix
+++ b/nixos/modules/services/misc/sssd.nix
@@ -75,6 +75,11 @@ in {
       };
 
       system.nssModules = optional cfg.enable pkgs.sssd;
+      system.nssDatabases = {
+        passwd = [ "sss" ];
+        shadow = [ "sss" ];
+        services = [ "sss" ];
+      };
       services.dbus.packages = [ pkgs.sssd ];
     })
 
diff --git a/nixos/modules/services/misc/svnserve.nix b/nixos/modules/services/misc/svnserve.nix
index 6292bc52b1e..3335ed09d40 100644
--- a/nixos/modules/services/misc/svnserve.nix
+++ b/nixos/modules/services/misc/svnserve.nix
@@ -18,6 +18,7 @@ in
     services.svnserve = {
 
       enable = mkOption {
+        type = types.bool;
         default = false;
         description = "Whether to enable svnserve to serve Subversion repositories through the SVN protocol.";
       };
diff --git a/nixos/modules/services/misc/synergy.nix b/nixos/modules/services/misc/synergy.nix
index bfab8c534d8..5b7cf3ac46c 100644
--- a/nixos/modules/services/misc/synergy.nix
+++ b/nixos/modules/services/misc/synergy.nix
@@ -19,12 +19,8 @@ in
       # !!! All these option descriptions needs to be cleaned up.
 
       client = {
-        enable = mkOption {
-          default = false;
-          description = "
-            Whether to enable the Synergy client (receive keyboard and mouse events from a Synergy server).
-          ";
-        };
+        enable = mkEnableOption "the Synergy client (receive keyboard and mouse events from a Synergy server)";
+
         screenName = mkOption {
           default = "";
           description = ''
@@ -47,12 +43,8 @@ in
       };
 
       server = {
-        enable = mkOption {
-          default = false;
-          description = ''
-            Whether to enable the Synergy server (send keyboard and mouse events).
-          '';
-        };
+        enable = mkEnableOption "the Synergy server (send keyboard and mouse events)";
+
         configFile = mkOption {
           default = "/etc/synergy-server.conf";
           description = "The Synergy server configuration file.";
diff --git a/nixos/modules/services/monitoring/bosun.nix b/nixos/modules/services/monitoring/bosun.nix
index b1c12cce1f8..04e9da1c81a 100644
--- a/nixos/modules/services/monitoring/bosun.nix
+++ b/nixos/modules/services/monitoring/bosun.nix
@@ -148,7 +148,7 @@ in {
         User = cfg.user;
         Group = cfg.group;
         ExecStart = ''
-          ${cfg.package.bin}/bin/bosun -c ${configFile}
+          ${cfg.package}/bin/bosun -c ${configFile}
         '';
       };
     };
diff --git a/nixos/modules/services/monitoring/datadog-agent.nix b/nixos/modules/services/monitoring/datadog-agent.nix
index 2c5fe47242e..f1cb890794e 100644
--- a/nixos/modules/services/monitoring/datadog-agent.nix
+++ b/nixos/modules/services/monitoring/datadog-agent.nix
@@ -225,7 +225,7 @@ in {
           Restart = "always";
           RestartSec = 2;
         };
-        restartTriggers = [ datadogPkg ] ++ attrNames etcfiles;
+        restartTriggers = [ datadogPkg ] ++  map (x: x.source) (attrValues etcfiles);
       } attrs;
     in {
       datadog-agent = makeService {
diff --git a/nixos/modules/services/monitoring/grafana-reporter.nix b/nixos/modules/services/monitoring/grafana-reporter.nix
index b5a78e4583e..893c15d568b 100644
--- a/nixos/modules/services/monitoring/grafana-reporter.nix
+++ b/nixos/modules/services/monitoring/grafana-reporter.nix
@@ -59,7 +59,7 @@ in {
           "-templates ${cfg.templateDir}"
         ];
       in {
-        ExecStart = "${pkgs.grafana_reporter.bin}/bin/grafana-reporter ${args}";
+        ExecStart = "${pkgs.grafana_reporter}/bin/grafana-reporter ${args}";
       };
     };
   };
diff --git a/nixos/modules/services/monitoring/grafana.nix b/nixos/modules/services/monitoring/grafana.nix
index 0f8bc2471e3..b0c81a46d4d 100644
--- a/nixos/modules/services/monitoring/grafana.nix
+++ b/nixos/modules/services/monitoring/grafana.nix
@@ -535,7 +535,7 @@ in {
         ${optionalString cfg.provision.enable ''
           export GF_PATHS_PROVISIONING=${provisionConfDir};
         ''}
-        exec ${cfg.package.bin}/bin/grafana-server -homepath ${cfg.dataDir}
+        exec ${cfg.package}/bin/grafana-server -homepath ${cfg.dataDir}
       '';
       serviceConfig = {
         WorkingDirectory = cfg.dataDir;
diff --git a/nixos/modules/services/monitoring/prometheus/alertmanager.nix b/nixos/modules/services/monitoring/prometheus/alertmanager.nix
index 69e3dcf1408..1b02ebf3704 100644
--- a/nixos/modules/services/monitoring/prometheus/alertmanager.nix
+++ b/nixos/modules/services/monitoring/prometheus/alertmanager.nix
@@ -21,6 +21,8 @@ let
     "--config.file /tmp/alert-manager-substituted.yaml"
     "--web.listen-address ${cfg.listenAddress}:${toString cfg.port}"
     "--log.level ${cfg.logLevel}"
+    "--storage.path /var/lib/alertmanager"
+    (toString (map (peer: "--cluster.peer ${peer}:9094") cfg.clusterPeers))
     ] ++ (optional (cfg.webExternalUrl != null)
       "--web.external-url ${cfg.webExternalUrl}"
     ) ++ (optional (cfg.logFormat != null)
@@ -120,6 +122,14 @@ in {
         '';
       };
 
+      clusterPeers = mkOption {
+        type = types.listOf types.str;
+        default = [];
+        description = ''
+          Initial peers for HA cluster.
+        '';
+      };
+
       extraFlags = mkOption {
         type = types.listOf types.str;
         default = [];
@@ -162,6 +172,7 @@ in {
         '';
         serviceConfig = {
           Restart  = "always";
+          StateDirectory = "alertmanager";
           DynamicUser = true; # implies PrivateTmp
           EnvironmentFile = lib.mkIf (cfg.environmentFile != null) cfg.environmentFile;
           WorkingDirectory = "/tmp";
diff --git a/nixos/modules/services/monitoring/prometheus/default.nix b/nixos/modules/services/monitoring/prometheus/default.nix
index 6b1a4be44d1..84a72afac2f 100644
--- a/nixos/modules/services/monitoring/prometheus/default.nix
+++ b/nixos/modules/services/monitoring/prometheus/default.nix
@@ -10,7 +10,7 @@ let
   # a wrapper that verifies that the configuration is valid
   promtoolCheck = what: name: file:
     if cfg.checkConfig then
-      pkgs.runCommand
+      pkgs.runCommandNoCCLocal
         "${name}-${replaceStrings [" "] [""] what}-checked"
         { buildInputs = [ cfg.package ]; } ''
       ln -s ${file} $out
@@ -19,7 +19,7 @@ let
 
   # Pretty-print JSON to a file
   writePrettyJSON = name: x:
-    pkgs.runCommand name { preferLocalBuild = true; } ''
+    pkgs.runCommandNoCCLocal name {} ''
       echo '${builtins.toJSON x}' | ${pkgs.jq}/bin/jq . > $out
     '';
 
diff --git a/nixos/modules/services/monitoring/prometheus/exporters/snmp.nix b/nixos/modules/services/monitoring/prometheus/exporters/snmp.nix
index 045e48a3d0f..01276366e97 100644
--- a/nixos/modules/services/monitoring/prometheus/exporters/snmp.nix
+++ b/nixos/modules/services/monitoring/prometheus/exporters/snmp.nix
@@ -58,7 +58,7 @@ in
     in {
     serviceConfig = {
       ExecStart = ''
-        ${pkgs.prometheus-snmp-exporter.bin}/bin/snmp_exporter \
+        ${pkgs.prometheus-snmp-exporter}/bin/snmp_exporter \
           --config.file=${escapeShellArg configFile} \
           --log.format=${escapeShellArg cfg.logFormat} \
           --log.level=${cfg.logLevel} \
diff --git a/nixos/modules/services/monitoring/scollector.nix b/nixos/modules/services/monitoring/scollector.nix
index 38cd2213de7..6f13ce889cb 100644
--- a/nixos/modules/services/monitoring/scollector.nix
+++ b/nixos/modules/services/monitoring/scollector.nix
@@ -118,7 +118,7 @@ in {
       serviceConfig = {
         User = cfg.user;
         Group = cfg.group;
-        ExecStart = "${cfg.package.bin}/bin/scollector -conf=${conf} ${lib.concatStringsSep " " cfg.extraOpts}";
+        ExecStart = "${cfg.package}/bin/scollector -conf=${conf} ${lib.concatStringsSep " " cfg.extraOpts}";
       };
     };
 
diff --git a/nixos/modules/services/monitoring/tuptime.nix b/nixos/modules/services/monitoring/tuptime.nix
new file mode 100644
index 00000000000..731260a5c20
--- /dev/null
+++ b/nixos/modules/services/monitoring/tuptime.nix
@@ -0,0 +1,84 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.services.tuptime;
+
+in {
+
+  options.services.tuptime = {
+
+    enable = mkEnableOption "the total uptime service";
+
+    timer = {
+      enable = mkOption {
+        type = types.bool;
+        default = true;
+        description = "Whether to regularly log uptime to detect bad shutdowns.";
+      };
+
+      period = mkOption {
+        type = types.str;
+        default = "*:0/5";
+        description = "systemd calendar event";
+      };
+    };
+  };
+
+
+  config = mkIf cfg.enable {
+
+    environment.systemPackages = [ pkgs.tuptime ];
+
+    users.users.tuptime.description = "tuptime database owner";
+
+    systemd = {
+      services = {
+
+        tuptime = {
+          description = "the total uptime service";
+          documentation = [ "man:tuptime(1)" ];
+          after = [ "time-sync.target" ];
+          wantedBy = [ "multi-user.target" ];
+          serviceConfig = {
+            StateDirectory = "tuptime";
+            Type = "oneshot";
+            User = "tuptime";
+            RemainAfterExit = true;
+            ExecStart = "${pkgs.tuptime}/bin/tuptime -x";
+            ExecStop = "${pkgs.tuptime}/bin/tuptime -xg";
+          };
+        };
+
+        tuptime-oneshot = mkIf cfg.timer.enable {
+          description = "the tuptime scheduled execution unit";
+          serviceConfig = {
+            StateDirectory = "tuptime";
+            Type = "oneshot";
+            User = "tuptime";
+            ExecStart = "${pkgs.tuptime}/bin/tuptime -x";
+          };
+        };
+      };
+
+      timers.tuptime = mkIf cfg.timer.enable {
+        description = "the tuptime scheduled execution timer";
+        # this timer should be started if the service is started
+        # even if the timer was previously stopped
+        wantedBy = [ "tuptime.service" "timers.target" ];
+        # this timer should be stopped if the service is stopped
+        partOf = [ "tuptime.service" ];
+        timerConfig = {
+          OnBootSec = "1min";
+          OnCalendar = cfg.timer.period;
+          Unit = "tuptime-oneshot.service";
+        };
+      };
+    };
+  };
+
+  meta.maintainers = [ maintainers.evils ];
+
+}
diff --git a/nixos/modules/services/network-filesystems/ipfs.nix b/nixos/modules/services/network-filesystems/ipfs.nix
index b6d881afd7b..880f70ae141 100644
--- a/nixos/modules/services/network-filesystems/ipfs.nix
+++ b/nixos/modules/services/network-filesystems/ipfs.nix
@@ -37,9 +37,7 @@ let
   baseService = recursiveUpdate commonEnv {
     wants = [ "ipfs-init.service" ];
     # NB: migration must be performed prior to pre-start, else we get the failure message!
-    preStart = ''
-      ipfs repo fsck # workaround for BUG #4212 (https://github.com/ipfs/go-ipfs/issues/4214)
-    '' + optionalString cfg.autoMount ''
+    preStart = optionalString cfg.autoMount ''
       ipfs --local config Mounts.FuseAllowOther --json true
       ipfs --local config Mounts.IPFS ${cfg.ipfsMountDir}
       ipfs --local config Mounts.IPNS ${cfg.ipnsMountDir}
diff --git a/nixos/modules/services/network-filesystems/netatalk.nix b/nixos/modules/services/network-filesystems/netatalk.nix
index 5422d4dd4e2..7674c8f7fa8 100644
--- a/nixos/modules/services/network-filesystems/netatalk.nix
+++ b/nixos/modules/services/network-filesystems/netatalk.nix
@@ -43,10 +43,7 @@ in
   options = {
     services.netatalk = {
 
-      enable = mkOption {
-          default = false;
-          description = "Whether to enable the Netatalk AFP fileserver.";
-        };
+      enable = mkEnableOption "the Netatalk AFP fileserver";
 
       port = mkOption {
         default = 548;
@@ -65,6 +62,7 @@ in
 
       homes = {
         enable = mkOption {
+          type = types.bool;
           default = false;
           description = "Enable sharing of the UNIX server user home directories.";
         };
diff --git a/nixos/modules/services/network-filesystems/rsyncd.nix b/nixos/modules/services/network-filesystems/rsyncd.nix
index ccad64cfdb2..fa29e18a939 100644
--- a/nixos/modules/services/network-filesystems/rsyncd.nix
+++ b/nixos/modules/services/network-filesystems/rsyncd.nix
@@ -29,10 +29,7 @@ in
   options = {
     services.rsyncd = {
 
-      enable = mkOption {
-        default = false;
-        description = "Whether to enable the rsync daemon.";
-      };
+      enable = mkEnableOption "the rsync daemon";
 
       motd = mkOption {
         type = types.str;
diff --git a/nixos/modules/services/network-filesystems/xtreemfs.nix b/nixos/modules/services/network-filesystems/xtreemfs.nix
index c93e201da56..b8f8c1d7117 100644
--- a/nixos/modules/services/network-filesystems/xtreemfs.nix
+++ b/nixos/modules/services/network-filesystems/xtreemfs.nix
@@ -100,11 +100,13 @@ in
 
       dir = {
         enable = mkOption {
+          type = types.bool;
           default = true;
           description = ''
             Whether to enable XtreemFS DIR service.
           '';
         };
+
         uuid = mkOption {
           example = "eacb6bab-f444-4ebf-a06a-3f72d7465e40";
           description = ''
@@ -218,11 +220,13 @@ in
 
       mrc = {
         enable = mkOption {
+          type = types.bool;
           default = true;
           description = ''
             Whether to enable XtreemFS MRC service.
           '';
         };
+
         uuid = mkOption {
           example = "eacb6bab-f444-4ebf-a06a-3f72d7465e41";
           description = ''
@@ -354,11 +358,13 @@ in
 
       osd = {
         enable = mkOption {
+          type = types.bool;
           default = true;
           description = ''
             Whether to enable XtreemFS OSD service.
           '';
         };
+
         uuid = mkOption {
           example = "eacb6bab-f444-4ebf-a06a-3f72d7465e42";
           description = ''
diff --git a/nixos/modules/services/network-filesystems/yandex-disk.nix b/nixos/modules/services/network-filesystems/yandex-disk.nix
index 0aa01ef9e6d..cc73f13bf77 100644
--- a/nixos/modules/services/network-filesystems/yandex-disk.nix
+++ b/nixos/modules/services/network-filesystems/yandex-disk.nix
@@ -21,6 +21,7 @@ in
     services.yandex-disk = {
 
       enable = mkOption {
+        type = types.bool;
         default = false;
         description = "
           Whether to enable Yandex-disk client. See https://disk.yandex.ru/
diff --git a/nixos/modules/services/networking/amuled.nix b/nixos/modules/services/networking/amuled.nix
index 57f02542eaf..1128ee2c3e6 100644
--- a/nixos/modules/services/networking/amuled.nix
+++ b/nixos/modules/services/networking/amuled.nix
@@ -16,6 +16,7 @@ in
     services.amule = {
 
       enable = mkOption {
+        type = types.bool;
         default = false;
         description = ''
           Whether to run the AMule daemon. You need to manually run "amuled --ec-config" to configure the service for the first time.
diff --git a/nixos/modules/services/networking/babeld.nix b/nixos/modules/services/networking/babeld.nix
index de863461eab..e62c74d0069 100644
--- a/nixos/modules/services/networking/babeld.nix
+++ b/nixos/modules/services/networking/babeld.nix
@@ -35,12 +35,7 @@ in
 
     services.babeld = {
 
-      enable = mkOption {
-        default = false;
-        description = ''
-          Whether to run the babeld network routing daemon.
-        '';
-      };
+      enable = mkEnableOption "the babeld network routing daemon";
 
       interfaceDefaults = mkOption {
         default = null;
diff --git a/nixos/modules/services/networking/bind.nix b/nixos/modules/services/networking/bind.nix
index e3b95afb3d8..faad8863575 100644
--- a/nixos/modules/services/networking/bind.nix
+++ b/nixos/modules/services/networking/bind.nix
@@ -68,12 +68,7 @@ in
 
     services.bind = {
 
-      enable = mkOption {
-        default = false;
-        description = "
-          Whether to enable BIND domain name server.
-        ";
-      };
+      enable = mkEnableOption "BIND domain name server";
 
       cacheNetworks = mkOption {
         default = ["127.0.0.0/24"];
diff --git a/nixos/modules/services/networking/bitlbee.nix b/nixos/modules/services/networking/bitlbee.nix
index 01a16698384..9ebf382fce4 100644
--- a/nixos/modules/services/networking/bitlbee.nix
+++ b/nixos/modules/services/networking/bitlbee.nix
@@ -48,6 +48,7 @@ in
     services.bitlbee = {
 
       enable = mkOption {
+        type = types.bool;
         default = false;
         description = ''
           Whether to run the BitlBee IRC to other chat network gateway.
diff --git a/nixos/modules/services/networking/cntlm.nix b/nixos/modules/services/networking/cntlm.nix
index 4e4e3104c3a..5b5068e43d7 100644
--- a/nixos/modules/services/networking/cntlm.nix
+++ b/nixos/modules/services/networking/cntlm.nix
@@ -33,12 +33,7 @@ in
 
   options.services.cntlm = {
 
-    enable = mkOption {
-      default = false;
-      description = ''
-        Whether to enable the cntlm, which start a local proxy.
-      '';
-    };
+    enable = mkEnableOption "cntlm, which starts a local proxy";
 
     username = mkOption {
       description = ''
diff --git a/nixos/modules/services/networking/consul.nix b/nixos/modules/services/networking/consul.nix
index 689cbc8a986..f7d2afead06 100644
--- a/nixos/modules/services/networking/consul.nix
+++ b/nixos/modules/services/networking/consul.nix
@@ -179,15 +179,15 @@ in
             (filterAttrs (n: _: hasPrefix "consul.d/" n) config.environment.etc);
 
         serviceConfig = {
-          ExecStart = "@${cfg.package.bin}/bin/consul consul agent -config-dir /etc/consul.d"
+          ExecStart = "@${cfg.package}/bin/consul consul agent -config-dir /etc/consul.d"
             + concatMapStrings (n: " -config-file ${n}") configFiles;
-          ExecReload = "${cfg.package.bin}/bin/consul reload";
+          ExecReload = "${cfg.package}/bin/consul reload";
           PermissionsStartOnly = true;
           User = if cfg.dropPrivileges then "consul" else null;
           Restart = "on-failure";
           TimeoutStartSec = "infinity";
         } // (optionalAttrs (cfg.leaveOnStop) {
-          ExecStop = "${cfg.package.bin}/bin/consul leave";
+          ExecStop = "${cfg.package}/bin/consul leave";
         });
 
         path = with pkgs; [ iproute gnugrep gawk consul ];
@@ -238,7 +238,7 @@ in
 
         serviceConfig = {
           ExecStart = ''
-            ${cfg.alerts.package.bin}/bin/consul-alerts start \
+            ${cfg.alerts.package}/bin/consul-alerts start \
               --alert-addr=${cfg.alerts.listenAddr} \
               --consul-addr=${cfg.alerts.consulAddr} \
               ${optionalString cfg.alerts.watchChecks "--watch-checks"} \
diff --git a/nixos/modules/services/networking/flannel.nix b/nixos/modules/services/networking/flannel.nix
index dd2f6454e95..4c040112d28 100644
--- a/nixos/modules/services/networking/flannel.nix
+++ b/nixos/modules/services/networking/flannel.nix
@@ -19,8 +19,8 @@ in {
     package = mkOption {
       description = "Package to use for flannel";
       type = types.package;
-      default = pkgs.flannel.bin;
-      defaultText = "pkgs.flannel.bin";
+      default = pkgs.flannel;
+      defaultText = "pkgs.flannel";
     };
 
     publicIp = mkOption {
@@ -167,7 +167,7 @@ in {
         touch /run/flannel/docker
       '' + optionalString (cfg.storageBackend == "etcd") ''
         echo "setting network configuration"
-        until ${pkgs.etcdctl.bin}/bin/etcdctl set /coreos.com/network/config '${builtins.toJSON networkConfig}'
+        until ${pkgs.etcdctl}/bin/etcdctl set /coreos.com/network/config '${builtins.toJSON networkConfig}'
         do
           echo "setting network configuration, retry"
           sleep 1
diff --git a/nixos/modules/services/networking/flashpolicyd.nix b/nixos/modules/services/networking/flashpolicyd.nix
index 9c51b88ef67..7f25083307c 100644
--- a/nixos/modules/services/networking/flashpolicyd.nix
+++ b/nixos/modules/services/networking/flashpolicyd.nix
@@ -39,6 +39,7 @@ in
     services.flashpolicyd = {
 
       enable = mkOption {
+        type = types.bool;
         default = false;
         description =
           ''
diff --git a/nixos/modules/services/networking/gogoclient.nix b/nixos/modules/services/networking/gogoclient.nix
index c9b03bca711..99455b18314 100644
--- a/nixos/modules/services/networking/gogoclient.nix
+++ b/nixos/modules/services/networking/gogoclient.nix
@@ -19,6 +19,7 @@ in
         '';
       };
       autorun = mkOption {
+        type = types.bool;
         default = true;
         description = ''
           Whether to automatically start the tunnel.
diff --git a/nixos/modules/services/networking/gvpe.nix b/nixos/modules/services/networking/gvpe.nix
index 3ef3548e0a0..92e87cd4640 100644
--- a/nixos/modules/services/networking/gvpe.nix
+++ b/nixos/modules/services/networking/gvpe.nix
@@ -42,12 +42,8 @@ in
 {
   options = {
     services.gvpe = {
-      enable = mkOption {
-        default = false;
-        description = ''
-          Whether to run gvpe
-        '';
-      };
+      enable = lib.mkEnableOption "gvpe";
+
       nodename = mkOption {
         default = null;
         description =''
diff --git a/nixos/modules/services/networking/hostapd.nix b/nixos/modules/services/networking/hostapd.nix
index 2915b54f05b..5d73038363a 100644
--- a/nixos/modules/services/networking/hostapd.nix
+++ b/nixos/modules/services/networking/hostapd.nix
@@ -20,12 +20,14 @@ let
     ssid=${cfg.ssid}
     hw_mode=${cfg.hwMode}
     channel=${toString cfg.channel}
+    ${optionalString (cfg.countryCode != null) ''country_code=${cfg.countryCode}''}
+    ${optionalString (cfg.countryCode != null) ''ieee80211d=1''}
 
     # logging (debug level)
     logger_syslog=-1
-    logger_syslog_level=2
+    logger_syslog_level=${toString cfg.logLevel}
     logger_stdout=-1
-    logger_stdout_level=2
+    logger_stdout_level=${toString cfg.logLevel}
 
     ctrl_interface=/run/hostapd
     ctrl_interface_group=${cfg.group}
@@ -49,6 +51,7 @@ in
     services.hostapd = {
 
       enable = mkOption {
+        type = types.bool;
         default = false;
         description = ''
           Enable putting a wireless interface into infrastructure mode,
@@ -71,6 +74,7 @@ in
       };
 
       noScan = mkOption {
+        type = types.bool;
         default = false;
         description = ''
           Do not scan for overlapping BSSs in HT40+/- mode.
@@ -126,6 +130,7 @@ in
       };
 
       wpa = mkOption {
+        type = types.bool;
         default = true;
         description = ''
           Enable WPA (IEEE 802.11i/D3.0) to authenticate with the access point.
@@ -144,6 +149,35 @@ in
         '';
       };
 
+      logLevel = mkOption {
+        default = 2;
+        type = types.int;
+        description = ''
+          Levels (minimum value for logged events):
+          0 = verbose debugging
+          1 = debugging
+          2 = informational messages
+          3 = notification
+          4 = warning
+        '';
+      };
+
+      countryCode = mkOption {
+        default = null;
+        example = "US";
+        type = with types; nullOr str;
+        description = ''
+          Country code (ISO/IEC 3166-1). Used to set regulatory domain.
+          Set as needed to indicate country in which device is operating.
+          This can limit available channels and transmit power.
+          These two octets are used as the first two octets of the Country String
+          (dot11CountryString).
+          If set this enables IEEE 802.11d. This advertises the countryCode and
+          the set of allowed channels and transmit power levels based on the
+          regulatory limits.
+        '';
+      };
+
       extraConfig = mkOption {
         default = "";
         example = ''
@@ -164,6 +198,8 @@ in
 
     environment.systemPackages =  [ pkgs.hostapd ];
 
+    services.udev.packages = optional (cfg.countryCode != null) [ pkgs.crda ];
+
     systemd.services.hostapd =
       { description = "hostapd wireless AP";
 
diff --git a/nixos/modules/services/networking/ircd-hybrid/default.nix b/nixos/modules/services/networking/ircd-hybrid/default.nix
index b236552eb65..91d0bf437d6 100644
--- a/nixos/modules/services/networking/ircd-hybrid/default.nix
+++ b/nixos/modules/services/networking/ircd-hybrid/default.nix
@@ -36,12 +36,7 @@ in
 
     services.ircdHybrid = {
 
-      enable = mkOption {
-        default = false;
-        description = "
-          Enable IRCD.
-        ";
-      };
+      enable = mkEnableOption "IRCD";
 
       serverName = mkOption {
         default = "hades.arpa";
diff --git a/nixos/modules/services/networking/mailpile.nix b/nixos/modules/services/networking/mailpile.nix
index c42d3d5a44c..b79ee11d17d 100644
--- a/nixos/modules/services/networking/mailpile.nix
+++ b/nixos/modules/services/networking/mailpile.nix
@@ -18,12 +18,8 @@ in
   options = {
 
     services.mailpile = {
-      enable = mkOption {
-        default = false;
-        description = "
-          Whether to enable Mailpile the mail client.
-        ";
-      };
+      enable = mkEnableOption "Mailpile the mail client";
+
       hostname = mkOption {
         default = "localhost";
         description = "Listen to this hostname or ip.";
diff --git a/nixos/modules/services/networking/monero.nix b/nixos/modules/services/networking/monero.nix
index b9536430868..97af2997839 100644
--- a/nixos/modules/services/networking/monero.nix
+++ b/nixos/modules/services/networking/monero.nix
@@ -26,7 +26,7 @@ let
       rpc-login=${rpc.user}:${rpc.password}
     ''}
     ${optionalString rpc.restricted ''
-      restrict-rpc=1
+      restricted-rpc=1
     ''}
 
     limit-rate-up=${toString limits.upload}
diff --git a/nixos/modules/services/networking/nftables.nix b/nixos/modules/services/networking/nftables.nix
index ad7c013a544..ec9d9753cfe 100644
--- a/nixos/modules/services/networking/nftables.nix
+++ b/nixos/modules/services/networking/nftables.nix
@@ -52,7 +52,7 @@ in
             ip protocol icmp icmp type { destination-unreachable, router-advertisement, time-exceeded, parameter-problem } accept
 
             # allow "ping"
-            ip6 nexthdr icmp icmpv6 type echo-request accept
+            ip6 nexthdr icmpv6 icmpv6 type echo-request accept
             ip protocol icmp icmp type echo-request accept
 
             # accept SSH connections (required for a server)
diff --git a/nixos/modules/services/networking/ntp/chrony.nix b/nixos/modules/services/networking/ntp/chrony.nix
index da9d960cc14..b7e4c89a155 100644
--- a/nixos/modules/services/networking/ntp/chrony.nix
+++ b/nixos/modules/services/networking/ntp/chrony.nix
@@ -30,6 +30,7 @@ in
   options = {
     services.chrony = {
       enable = mkOption {
+        type = types.bool;
         default = false;
         description = ''
           Whether to synchronise your machine's time using chrony.
@@ -92,6 +93,11 @@ in
 
     systemd.services.systemd-timedated.environment = { SYSTEMD_TIMEDATED_NTP_SERVICES = "chronyd.service"; };
 
+    systemd.tmpfiles.rules = [
+      "d ${stateDir} 0755 chrony chrony - -"
+      "f ${keyFile} 0640 chrony chrony -"
+    ];
+
     systemd.services.chronyd =
       { description = "chrony NTP daemon";
 
@@ -103,13 +109,6 @@ in
 
         path = [ pkgs.chrony ];
 
-        preStart = ''
-          mkdir -m 0755 -p ${stateDir}
-          touch ${keyFile}
-          chmod 0640 ${keyFile}
-          chown chrony:chrony ${stateDir} ${keyFile}
-        '';
-
         unitConfig.ConditionCapability = "CAP_SYS_TIME";
         serviceConfig =
           { Type = "simple";
@@ -118,7 +117,7 @@ in
             ProtectHome = "yes";
             ProtectSystem = "full";
             PrivateTmp = "yes";
-
+            StateDirectory = "chrony";
           };
 
       };
diff --git a/nixos/modules/services/networking/ntp/ntpd.nix b/nixos/modules/services/networking/ntp/ntpd.nix
index 54ff054d84c..51398851adc 100644
--- a/nixos/modules/services/networking/ntp/ntpd.nix
+++ b/nixos/modules/services/networking/ntp/ntpd.nix
@@ -40,6 +40,7 @@ in
     services.ntp = {
 
       enable = mkOption {
+        type = types.bool;
         default = false;
         description = ''
           Whether to synchronise your machine's time using ntpd, as a peer in
diff --git a/nixos/modules/services/networking/openfire.nix b/nixos/modules/services/networking/openfire.nix
index 4059eb3db83..fe0499d5232 100644
--- a/nixos/modules/services/networking/openfire.nix
+++ b/nixos/modules/services/networking/openfire.nix
@@ -9,14 +9,10 @@ with lib;
 
     services.openfire = {
 
-      enable = mkOption {
-        default = false;
-        description = "
-          Whether to enable OpenFire XMPP server.
-        ";
-      };
+      enable = mkEnableOption "OpenFire XMPP server";
 
       usePostgreSQL = mkOption {
+        type = types.bool;
         default = true;
         description = "
           Whether you use PostgreSQL service for your storage back-end.
diff --git a/nixos/modules/services/networking/pixiecore.nix b/nixos/modules/services/networking/pixiecore.nix
index 0e32f182e2a..85aa40784af 100644
--- a/nixos/modules/services/networking/pixiecore.nix
+++ b/nixos/modules/services/networking/pixiecore.nix
@@ -115,7 +115,7 @@ in
               if cfg.mode == "boot"
               then [ "boot" cfg.kernel ]
                    ++ optional (cfg.initrd != "") cfg.initrd
-                   ++ optional (cfg.cmdLine != "") "--cmdline=${lib.escapeShellArg cfg.cmdLine}"
+                   ++ optionals (cfg.cmdLine != "") [ "--cmdline" cfg.cmdLine ]
               else [ "api" cfg.apiServer ];
           in
             ''
diff --git a/nixos/modules/services/networking/prayer.nix b/nixos/modules/services/networking/prayer.nix
index 9c9eeba23da..f04dac01d9b 100644
--- a/nixos/modules/services/networking/prayer.nix
+++ b/nixos/modules/services/networking/prayer.nix
@@ -41,12 +41,7 @@ in
 
     services.prayer = {
 
-      enable = mkOption {
-        default = false;
-        description = ''
-          Whether to run the prayer webmail http server.
-        '';
-      };
+      enable = mkEnableOption "the prayer webmail http server";
 
       port = mkOption {
         default = "2080";
diff --git a/nixos/modules/services/networking/prosody.nix b/nixos/modules/services/networking/prosody.nix
index 7a503e71166..cdd341c9fb6 100644
--- a/nixos/modules/services/networking/prosody.nix
+++ b/nixos/modules/services/networking/prosody.nix
@@ -1,9 +1,7 @@
 { config, lib, pkgs, ... }:
 
 with lib;
-
 let
-
   cfg = config.services.prosody;
 
   sslOpts = { ... }: {
@@ -30,8 +28,21 @@ let
     };
   };
 
+  discoOpts = {
+    options = {
+      url = mkOption {
+        type = types.str;
+        description = "URL of the endpoint you want to make discoverable";
+      };
+      description = mkOption {
+        type = types.str;
+        description = "A short description of the endpoint you want to advertise";
+      };
+    };
+  };
+
   moduleOpts = {
-    # Generally required
+    # Required for compliance with https://compliance.conversations.im/about/
     roster = mkOption {
       type = types.bool;
       default = true;
@@ -69,6 +80,18 @@ let
       description = "Keep multiple clients in sync";
     };
 
+    csi = mkOption {
+      type = types.bool;
+      default = true;
+      description = "Implements the CSI protocol that allows clients to report their active/inactive state to the server";
+    };
+
+    cloud_notify = mkOption {
+      type = types.bool;
+      default = true;
+      description = "Push notifications to inform users of new messages or other pertinent information even when they have no XMPP clients online";
+    };
+
     pep = mkOption {
       type = types.bool;
       default = true;
@@ -89,10 +112,22 @@ let
 
     vcard = mkOption {
       type = types.bool;
-      default = true;
+      default = false;
       description = "Allow users to set vCards";
     };
 
+    vcard_legacy = mkOption {
+      type = types.bool;
+      default = true;
+      description = "Converts users profiles and Avatars between old and new formats";
+    };
+
+    bookmarks = mkOption {
+      type = types.bool;
+      default = true;
+      description = "Allows interop between older clients that use XEP-0048: Bookmarks in its 1.0 version and recent clients which use it in PEP";
+    };
+
     # Nice to have
     version = mkOption {
       type = types.bool;
@@ -126,10 +161,16 @@ let
 
     mam = mkOption {
       type = types.bool;
-      default = false;
+      default = true;
       description = "Store messages in an archive and allow users to access it";
     };
 
+    smacks = mkOption {
+      type = types.bool;
+      default = true;
+      description = "Allow a client to resume a disconnected session, and prevent message loss";
+    };
+
     # Admin interfaces
     admin_adhoc = mkOption {
       type = types.bool;
@@ -137,6 +178,18 @@ let
       description = "Allows administration via an XMPP client that supports ad-hoc commands";
     };
 
+    http_files = mkOption {
+      type = types.bool;
+      default = true;
+      description = "Serve static files from a directory over HTTP";
+    };
+
+    proxy65 = mkOption {
+      type = types.bool;
+      default = true;
+      description = "Enables a file transfer proxy service which clients behind NAT can use";
+    };
+
     admin_telnet = mkOption {
       type = types.bool;
       default = false;
@@ -156,12 +209,6 @@ let
       description = "Enable WebSocket support";
     };
 
-    http_files = mkOption {
-      type = types.bool;
-      default = false;
-      description = "Serve static files from a directory over HTTP";
-    };
-
     # Other specific functionality
     limits = mkOption {
       type = types.bool;
@@ -210,13 +257,6 @@ let
       default = false;
       description = "Legacy authentication. Only used by some old clients and bots";
     };
-
-    proxy65 = mkOption {
-      type = types.bool;
-      default = false;
-      description = "Enables a file transfer proxy service which clients behind NAT can use";
-    };
-
   };
 
   toLua = x:
@@ -235,6 +275,158 @@ let
     };
   '';
 
+  mucOpts = { ... }: {
+    options = {
+      domain = mkOption {
+        type = types.str;
+        description = "Domain name of the MUC";
+      };
+      name = mkOption {
+        type = types.str;
+        description = "The name to return in service discovery responses for the MUC service itself";
+        default = "Prosody Chatrooms";
+      };
+      restrictRoomCreation = mkOption {
+        type = types.enum [ true false "admin" "local" ];
+        default = false;
+        description = "Restrict room creation to server admins";
+      };
+      maxHistoryMessages = mkOption {
+        type = types.int;
+        default = 20;
+        description = "Specifies a limit on what each room can be configured to keep";
+      };
+      roomLocking = mkOption {
+        type = types.bool;
+        default = true;
+        description = ''
+          Enables room locking, which means that a room must be
+          configured before it can be used. Locked rooms are invisible
+          and cannot be entered by anyone but the creator
+        '';
+      };
+      roomLockTimeout = mkOption {
+        type = types.int;
+        default = 300;
+        description = ''
+          Timout after which the room is destroyed or unlocked if not
+          configured, in seconds
+       '';
+      };
+      tombstones = mkOption {
+        type = types.bool;
+        default = true;
+        description = ''
+          When a room is destroyed, it leaves behind a tombstone which
+          prevents the room being entered or recreated. It also allows
+          anyone who was not in the room at the time it was destroyed
+          to learn about it, and to update their bookmarks. Tombstones
+          prevents the case where someone could recreate a previously
+          semi-anonymous room in order to learn the real JIDs of those
+          who often join there.
+        '';
+      };
+      tombstoneExpiry = mkOption {
+        type = types.int;
+        default = 2678400;
+        description = ''
+          This settings controls how long a tombstone is considered
+          valid. It defaults to 31 days. After this time, the room in
+          question can be created again.
+        '';
+      };
+
+      vcard_muc = mkOption {
+        type = types.bool;
+        default = true;
+      description = "Adds the ability to set vCard for Multi User Chat rooms";
+      };
+
+      # Extra parameters. Defaulting to prosody default values.
+      # Adding them explicitly to make them visible from the options
+      # documentation.
+      #
+      # See https://prosody.im/doc/modules/mod_muc for more details.
+      roomDefaultPublic = mkOption {
+        type = types.bool;
+        default = true;
+        description = "If set, the MUC rooms will be public by default.";
+      };
+      roomDefaultMembersOnly = mkOption {
+        type = types.bool;
+        default = false;
+        description = "If set, the MUC rooms will only be accessible to the members by default.";
+      };
+      roomDefaultModerated = mkOption {
+        type = types.bool;
+        default = false;
+        description = "If set, the MUC rooms will be moderated by default.";
+      };
+      roomDefaultPublicJids = mkOption {
+        type = types.bool;
+        default = false;
+        description = "If set, the MUC rooms will display the public JIDs by default.";
+      };
+      roomDefaultChangeSubject = mkOption {
+        type = types.bool;
+        default = false;
+        description = "If set, the rooms will display the public JIDs by default.";
+      };
+      roomDefaultHistoryLength = mkOption {
+        type = types.int;
+        default = 20;
+        description = "Number of history message sent to participants by default.";
+      };
+      roomDefaultLanguage = mkOption {
+        type = types.str;
+        default = "en";
+        description = "Default room language.";
+      };
+      extraConfig = mkOption {
+        type = types.lines;
+        default = "";
+        description = "Additional MUC specific configuration";
+      };
+    };
+  };
+
+  uploadHttpOpts = { ... }: {
+    options = {
+      domain = mkOption {
+        type = types.nullOr types.str;
+        description = "Domain name for the http-upload service";
+      };
+      uploadFileSizeLimit = mkOption {
+        type = types.str;
+        default = "50 * 1024 * 1024";
+        description = "Maximum file size, in bytes. Defaults to 50MB.";
+      };
+      uploadExpireAfter = mkOption {
+        type = types.str;
+        default = "60 * 60 * 24 * 7";
+        description = "Max age of a file before it gets deleted, in seconds.";
+      };
+      userQuota = mkOption {
+        type = types.nullOr types.int;
+        default = null;
+        example = 1234;
+        description = ''
+          Maximum size of all uploaded files per user, in bytes. There
+          will be no quota if this option is set to null.
+        '';
+      };
+      httpUploadPath = mkOption {
+        type = types.str;
+        description = ''
+          Directory where the uploaded files will be stored. By
+          default, uploaded files are put in a sub-directory of the
+          default Prosody storage path (usually /var/lib/prosody).
+        '';
+        default = "/var/lib/prosody";
+      };
+    };
+  };
+
   vHostOpts = { ... }: {
 
     options = {
@@ -283,6 +475,27 @@ in
         description = "Whether to enable the prosody server";
       };
 
+      xmppComplianceSuite = mkOption {
+        type = types.bool;
+        default = true;
+        description = ''
+          The XEP-0423 defines a set of recommended XEPs to implement
+          for a server. It's generally a good idea to implement this
+          set of extensions if you want to provide your users with a
+          good XMPP experience.
+
+          This NixOS module aims to provide a "advanced server"
+          experience as per defined in the XEP-0423[1] specification.
+
+          Setting this option to true will prevent you from building a
+          NixOS configuration which won't comply with this standard.
+          You can explicitely decide to ignore this standard if you
+          know what you are doing by setting this option to false.
+
+          [1] https://xmpp.org/extensions/xep-0423.html
+        '';
+      };
+
       package = mkOption {
         type = types.package;
         description = "Prosody package to use";
@@ -302,6 +515,12 @@ in
         default = "/var/lib/prosody";
       };
 
+      disco_items = mkOption {
+        type = types.listOf (types.submodule discoOpts);
+        default = [];
+        description = "List of discoverable items you want to advertise.";
+      };
+
       user = mkOption {
         type = types.str;
         default = "prosody";
@@ -320,6 +539,31 @@ in
         description = "Allow account creation";
       };
 
+      # HTTP server-related options
+      httpPorts = mkOption {
+        type = types.listOf types.int;
+        description = "Listening HTTP ports list for this service.";
+        default = [ 5280 ];
+      };
+
+      httpInterfaces = mkOption {
+        type = types.listOf types.str;
+        default = [ "*" "::" ];
+        description = "Interfaces on which the HTTP server will listen on.";
+      };
+
+      httpsPorts = mkOption {
+        type = types.listOf types.int;
+        description = "Listening HTTPS ports list for this service.";
+        default = [ 5281 ];
+      };
+
+      httpsInterfaces = mkOption {
+        type = types.listOf types.str;
+        default = [ "*" "::" ];
+        description = "Interfaces on which the HTTPS server will listen on.";
+      };
+
       c2sRequireEncryption = mkOption {
         type = types.bool;
         default = true;
@@ -387,6 +631,26 @@ in
         description = "Addtional path in which to look find plugins/modules";
       };
 
+      uploadHttp = mkOption {
+        description = ''
+          Configures the Prosody builtin HTTP server to handle user uploads.
+        '';
+        type = types.nullOr (types.submodule uploadHttpOpts);
+        default = null;
+        example = {
+          domain = "uploads.my-xmpp-example-host.org";
+        };
+      };
+
+      muc = mkOption {
+        type = types.listOf (types.submodule mucOpts);
+        default = [ ];
+        example = [ {
+          domain = "conference.my-xmpp-example-host.org";
+        } ];
+        description = "Multi User Chat (MUC) configuration";
+      };
+
       virtualHosts = mkOption {
 
         description = "Define the virtual hosts";
@@ -443,9 +707,44 @@ in
 
   config = mkIf cfg.enable {
 
+    assertions = let
+      genericErrMsg = ''
+
+          Having a server not XEP-0423-compliant might make your XMPP
+          experience terrible. See the NixOS manual for further
+          informations.
+
+          If you know what you're doing, you can disable this warning by
+          setting config.services.prosody.xmppComplianceSuite to false.
+      '';
+      errors = [
+        { assertion = (builtins.length cfg.muc > 0) || !cfg.xmppComplianceSuite;
+          message = ''
+            You need to setup at least a MUC domain to comply with
+            XEP-0423.
+          '' + genericErrMsg;}
+        { assertion = cfg.uploadHttp != null || !cfg.xmppComplianceSuite;
+          message = ''
+            You need to setup the uploadHttp module through
+            config.services.prosody.uploadHttp to comply with
+            XEP-0423.
+          '' + genericErrMsg;}
+      ];
+    in errors;
+
     environment.systemPackages = [ cfg.package ];
 
-    environment.etc."prosody/prosody.cfg.lua".text = ''
+    environment.etc."prosody/prosody.cfg.lua".text =
+      let
+        httpDiscoItems = if (cfg.uploadHttp != null)
+            then [{ url = cfg.uploadHttp.domain; description = "HTTP upload endpoint";}]
+            else [];
+        mucDiscoItems = builtins.foldl'
+            (acc: muc: [{ url = muc.domain; description = "${muc.domain} MUC endpoint";}] ++ acc)
+            []
+            cfg.muc;
+        discoItems = cfg.disco_items ++ httpDiscoItems ++ mucDiscoItems;
+      in ''
 
       pidfile = "/run/prosody/prosody.pid"
 
@@ -472,6 +771,10 @@ in
         ${ lib.concatStringsSep "\n" (map (x: "${toLua x};") cfg.extraModules)}
       };
 
+      disco_items = {
+      ${ lib.concatStringsSep "\n" (builtins.map (x: ''{ "${x.url}", "${x.description}"};'') discoItems)} 
+      };
+
       allow_registration = ${toLua cfg.allowRegistration}
 
       c2s_require_encryption = ${toLua cfg.c2sRequireEncryption}
@@ -486,8 +789,44 @@ in
 
       authentication = ${toLua cfg.authentication}
 
+      http_interfaces = ${toLua cfg.httpInterfaces}
+
+      https_interfaces = ${toLua cfg.httpsInterfaces}
+
+      http_ports = ${toLua cfg.httpPorts}
+
+      https_ports = ${toLua cfg.httpsPorts}
+
       ${ cfg.extraConfig }
 
+      ${lib.concatMapStrings (muc: ''
+        Component ${toLua muc.domain} "muc"
+            modules_enabled = { "muc_mam"; ${optionalString muc.vcard_muc ''"vcard_muc";'' } }
+            name = ${toLua muc.name}
+            restrict_room_creation = ${toLua muc.restrictRoomCreation}
+            max_history_messages = ${toLua muc.maxHistoryMessages}
+            muc_room_locking = ${toLua muc.roomLocking}
+            muc_room_lock_timeout = ${toLua muc.roomLockTimeout}
+            muc_tombstones = ${toLua muc.tombstones}
+            muc_tombstone_expiry = ${toLua muc.tombstoneExpiry}
+            muc_room_default_public = ${toLua muc.roomDefaultPublic}
+            muc_room_default_members_only = ${toLua muc.roomDefaultMembersOnly}
+            muc_room_default_moderated = ${toLua muc.roomDefaultModerated}
+            muc_room_default_public_jids = ${toLua muc.roomDefaultPublicJids}
+            muc_room_default_change_subject = ${toLua muc.roomDefaultChangeSubject}
+            muc_room_default_history_length = ${toLua muc.roomDefaultHistoryLength}
+            muc_room_default_language = ${toLua muc.roomDefaultLanguage}
+            ${ muc.extraConfig }
+        '') cfg.muc}
+
+      ${ lib.optionalString (cfg.uploadHttp != null) ''
+        Component ${toLua cfg.uploadHttp.domain} "http_upload"
+            http_upload_file_size_limit = ${cfg.uploadHttp.uploadFileSizeLimit}
+            http_upload_expire_after = ${cfg.uploadHttp.uploadExpireAfter}
+            ${lib.optionalString (cfg.uploadHttp.userQuota != null) "http_upload_quota = ${toLua cfg.uploadHttp.userQuota}"}
+            http_upload_path = ${toLua cfg.uploadHttp.httpUploadPath}
+      ''}
+
       ${ lib.concatStringsSep "\n" (lib.mapAttrsToList (n: v: ''
         VirtualHost "${v.domain}"
           enabled = ${boolToString v.enabled};
@@ -522,9 +861,22 @@ in
         PIDFile = "/run/prosody/prosody.pid";
         ExecStart = "${cfg.package}/bin/prosodyctl start";
         ExecReload = "${pkgs.coreutils}/bin/kill -HUP $MAINPID";
+
+        MemoryDenyWriteExecute = true;
+        PrivateDevices = true;
+        PrivateMounts = true;
+        PrivateTmp = true;
+        ProtectControlGroups = true;
+        ProtectHome = true;
+        ProtectHostname = true;
+        ProtectKernelModules = true;
+        ProtectKernelTunables = true;
+        RestrictNamespaces = true;
+        RestrictRealtime = true;
+        RestrictSUIDSGID = true;
       };
     };
 
   };
-
+  meta.doc = ./prosody.xml;
 }
diff --git a/nixos/modules/services/networking/prosody.xml b/nixos/modules/services/networking/prosody.xml
new file mode 100644
index 00000000000..7859cb1578b
--- /dev/null
+++ b/nixos/modules/services/networking/prosody.xml
@@ -0,0 +1,88 @@
+<chapter xmlns="http://docbook.org/ns/docbook"
+         xmlns:xlink="http://www.w3.org/1999/xlink"
+         xmlns:xi="http://www.w3.org/2001/XInclude"
+         version="5.0"
+         xml:id="module-services-prosody">
+ <title>Prosody</title>
+ <para>
+  <link xlink:href="https://prosody.im/">Prosody</link> is an open-source, modern XMPP server.
+ </para>
+ <section xml:id="module-services-prosody-basic-usage">
+  <title>Basic usage</title>
+
+  <para>
+    A common struggle for most XMPP newcomers is to find the right set
+    of XMPP Extensions (XEPs) to setup. Forget to activate a few of
+    those and your XMPP experience might turn into a nightmare!
+  </para>
+
+  <para>
+    The XMPP community tackles this problem by creating a meta-XEP
+    listing a decent set of XEPs you should implement. This meta-XEP
+    is issued every year, the 2020 edition being
+    <link xlink:href="https://xmpp.org/extensions/xep-0423.html">XEP-0423</link>.
+  </para>
+  <para>
+    The NixOS Prosody module will implement most of these recommendend XEPs out of
+    the box. That being said, two components still require some
+    manual configuration: the
+    <link xlink:href="https://xmpp.org/extensions/xep-0045.html">Multi User Chat (MUC)</link>
+    and the <link xlink:href="https://xmpp.org/extensions/xep-0363.html">HTTP File Upload</link> ones.
+    You'll need to create a DNS subdomain for each of those. The current convention is to name your
+    MUC endpoint <literal>conference.example.org</literal> and your HTTP upload domain <literal>upload.example.org</literal>.
+  </para>
+  <para>
+    A good configuration to start with, including a
+    <link xlink:href="https://xmpp.org/extensions/xep-0045.html">Multi User Chat (MUC)</link>
+    endpoint as well as a <link xlink:href="https://xmpp.org/extensions/xep-0363.html">HTTP File Upload</link>
+    endpoint will look like this:
+    <programlisting>
+services.prosody = {
+  <link linkend="opt-services.prosody.enable">enable</link> = true;
+  <link linkend="opt-services.prosody.admins">admins</link> = [ "root@example.org" ];
+  <link linkend="opt-services.prosody.ssl.cert">ssl.cert</link> = "/var/lib/acme/example.org/fullchain.pem";
+  <link linkend="opt-services.prosody.ssl.key">ssl.key</link> = "/var/lib/acme/example.org/key.pem";
+  <link linkend="opt-services.prosody.virtualHosts">virtualHosts</link>."example.org" = {
+      <link linkend="opt-services.prosody.virtualHosts._name__.enabled">enabled</link> = true;
+      <link linkend="opt-services.prosody.virtualHosts._name__.domain">domain</link> = "example.org";
+      <link linkend="opt-services.prosody.virtualHosts._name__.ssl.cert">ssl.cert</link> = "/var/lib/acme/example.org/fullchain.pem";
+      <link linkend="opt-services.prosody.virtualHosts._name__.ssl.key">ssl.key</link> = "/var/lib/acme/example.org/key.pem";
+  };
+  <link linkend="opt-services.prosody.muc">muc</link> = [ {
+      <link linkend="opt-services.prosody.muc">domain</link> = "conference.example.org";
+  } ];
+  <link linkend="opt-services.prosody.uploadHttp">uploadHttp</link> = {
+      <link linkend="opt-services.prosody.uploadHttp.domain">domain</link> = "upload.example.org";
+  };
+};</programlisting>
+  </para>
+ </section>
+ <section xml:id="module-services-prosody-letsencrypt">
+  <title>Let's Encrypt Configuration</title>
+ <para>
+   As you can see in the code snippet from the
+   <link linkend="module-services-prosody-basic-usage">previous section</link>,
+   you'll need a single TLS certificate covering your main endpoint,
+   the MUC one as well as the HTTP Upload one. We can generate such a
+   certificate by leveraging the ACME
+   <link linkend="opt-security.acme.certs._name_.extraDomains">extraDomains</link> module option.
+ </para>
+ <para>
+   Provided the setup detailed in the previous section, you'll need the following acme configuration to generate
+   a TLS certificate for the three endponits:
+    <programlisting>
+security.acme = {
+  <link linkend="opt-security.acme.email">email</link> = "root@example.org";
+  <link linkend="opt-security.acme.acceptTerms">acceptTerms</link> = true;
+  <link linkend="opt-security.acme.certs">certs</link> = {
+    "example.org" = {
+      <link linkend="opt-security.acme.certs._name_.webroot">webroot</link> = "/var/www/example.org";
+      <link linkend="opt-security.acme.certs._name_.email">email</link> = "root@example.org";
+      <link linkend="opt-security.acme.certs._name_.extraDomains">extraDomains."conference.example.org"</link> = null;
+      <link linkend="opt-security.acme.certs._name_.extraDomains">extraDomains."upload.example.org"</link> = null;
+    };
+  };
+};</programlisting>
+ </para>
+</section>
+</chapter>
diff --git a/nixos/modules/services/networking/quassel.nix b/nixos/modules/services/networking/quassel.nix
index 52ecd90b7c6..da723ec86ad 100644
--- a/nixos/modules/services/networking/quassel.nix
+++ b/nixos/modules/services/networking/quassel.nix
@@ -16,12 +16,7 @@ in
 
     services.quassel = {
 
-      enable = mkOption {
-        default = false;
-        description = ''
-          Whether to run the Quassel IRC client daemon.
-        '';
-      };
+      enable = mkEnableOption "the Quassel IRC client daemon";
 
       certificateFile = mkOption {
         type = types.nullOr types.str;
diff --git a/nixos/modules/services/networking/radvd.nix b/nixos/modules/services/networking/radvd.nix
index 020faa34922..f4b00c9b356 100644
--- a/nixos/modules/services/networking/radvd.nix
+++ b/nixos/modules/services/networking/radvd.nix
@@ -19,6 +19,7 @@ in
   options = {
 
     services.radvd.enable = mkOption {
+      type = types.bool;
       default = false;
       description =
         ''
diff --git a/nixos/modules/services/networking/rdnssd.nix b/nixos/modules/services/networking/rdnssd.nix
index bccab805bee..469504c4317 100644
--- a/nixos/modules/services/networking/rdnssd.nix
+++ b/nixos/modules/services/networking/rdnssd.nix
@@ -17,6 +17,7 @@ in
   options = {
 
     services.rdnssd.enable = mkOption {
+      type = types.bool;
       default = false;
       #default = config.networking.enableIPv6;
       description =
diff --git a/nixos/modules/services/networking/sabnzbd.nix b/nixos/modules/services/networking/sabnzbd.nix
index 62b24d4377f..ff5aef7d1cb 100644
--- a/nixos/modules/services/networking/sabnzbd.nix
+++ b/nixos/modules/services/networking/sabnzbd.nix
@@ -15,10 +15,8 @@ in
 
   options = {
     services.sabnzbd = {
-      enable = mkOption {
-        default = false;
-        description = "Whether to enable the sabnzbd server.";
-      };
+      enable = mkEnableOption "the sabnzbd server";
+
       configFile = mkOption {
         default = "/var/lib/sabnzbd/sabnzbd.ini";
         description = "Path to config file.";
diff --git a/nixos/modules/services/networking/shairport-sync.nix b/nixos/modules/services/networking/shairport-sync.nix
index 2e988e0ca2e..b4b86a2d55b 100644
--- a/nixos/modules/services/networking/shairport-sync.nix
+++ b/nixos/modules/services/networking/shairport-sync.nix
@@ -17,6 +17,7 @@ in
     services.shairport-sync = {
 
       enable = mkOption {
+        type = types.bool;
         default = false;
         description = ''
           Enable the shairport-sync daemon.
diff --git a/nixos/modules/services/networking/skydns.nix b/nixos/modules/services/networking/skydns.nix
index 6ad18bb2240..e79d6de9264 100644
--- a/nixos/modules/services/networking/skydns.nix
+++ b/nixos/modules/services/networking/skydns.nix
@@ -83,7 +83,7 @@ in {
         SKYDNS_NAMESERVERS = concatStringsSep "," cfg.nameservers;
       };
       serviceConfig = {
-        ExecStart = "${cfg.package.bin}/bin/skydns";
+        ExecStart = "${cfg.package}/bin/skydns";
       };
     };
 
diff --git a/nixos/modules/services/networking/ssh/lshd.nix b/nixos/modules/services/networking/ssh/lshd.nix
index eca599afb33..41d0584080e 100644
--- a/nixos/modules/services/networking/ssh/lshd.nix
+++ b/nixos/modules/services/networking/ssh/lshd.nix
@@ -19,6 +19,7 @@ in
     services.lshd = {
 
       enable = mkOption {
+        type = types.bool;
         default = false;
         description = ''
           Whether to enable the GNU lshd SSH2 daemon, which allows
@@ -53,21 +54,25 @@ in
       };
 
       syslog = mkOption {
+        type = types.bool;
         default = true;
         description = ''Whether to enable syslog output.'';
       };
 
       passwordAuthentication = mkOption {
+        type = types.bool;
         default = true;
         description = ''Whether to enable password authentication.'';
       };
 
       publicKeyAuthentication = mkOption {
+        type = types.bool;
         default = true;
         description = ''Whether to enable public key authentication.'';
       };
 
       rootLogin = mkOption {
+        type = types.bool;
         default = false;
         description = ''Whether to enable remote root login.'';
       };
@@ -89,11 +94,13 @@ in
       };
 
       tcpForwarding = mkOption {
+        type = types.bool;
         default = true;
         description = ''Whether to enable TCP/IP forwarding.'';
       };
 
       x11Forwarding = mkOption {
+        type = types.bool;
         default = true;
         description = ''Whether to enable X11 forwarding.'';
       };
diff --git a/nixos/modules/services/networking/tailscale.nix b/nixos/modules/services/networking/tailscale.nix
index 513c42b4011..4d6aeb75ebd 100644
--- a/nixos/modules/services/networking/tailscale.nix
+++ b/nixos/modules/services/networking/tailscale.nix
@@ -37,7 +37,10 @@ in {
         RuntimeDirectoryMode = 755;
 
         StateDirectory = "tailscale";
-        StateDirectoryMode = 700;
+        StateDirectoryMode = 750;
+
+        CacheDirectory = "tailscale";
+        CacheDirectoryMode = 750;
 
         Restart = "on-failure";
       };
diff --git a/nixos/modules/services/networking/tcpcrypt.nix b/nixos/modules/services/networking/tcpcrypt.nix
index 18f2e135124..5a91054e166 100644
--- a/nixos/modules/services/networking/tcpcrypt.nix
+++ b/nixos/modules/services/networking/tcpcrypt.nix
@@ -15,6 +15,7 @@ in
   options = {
 
     networking.tcpcrypt.enable = mkOption {
+      type = types.bool;
       default = false;
       description = ''
         Whether to enable opportunistic TCP encryption. If the other end
diff --git a/nixos/modules/services/networking/thelounge.nix b/nixos/modules/services/networking/thelounge.nix
index 875d8f66169..a1b06703484 100644
--- a/nixos/modules/services/networking/thelounge.nix
+++ b/nixos/modules/services/networking/thelounge.nix
@@ -62,7 +62,6 @@ in {
     systemd.services.thelounge = {
       description = "The Lounge web IRC client";
       wantedBy = [ "multi-user.target" ];
-      environment = { THELOUNGE_HOME = dataDir; };
       preStart = "ln -sf ${pkgs.writeText "config.js" configJsData} ${dataDir}/config.js";
       serviceConfig = {
         User = "thelounge";
diff --git a/nixos/modules/services/networking/wicd.nix b/nixos/modules/services/networking/wicd.nix
index 03c6bd28aab..aa10a50f876 100644
--- a/nixos/modules/services/networking/wicd.nix
+++ b/nixos/modules/services/networking/wicd.nix
@@ -9,6 +9,7 @@ with lib;
   options = {
 
     networking.wicd.enable = mkOption {
+      type = types.bool;
       default = false;
       description = ''
         Whether to start <command>wicd</command>. Wired and
diff --git a/nixos/modules/services/networking/xinetd.nix b/nixos/modules/services/networking/xinetd.nix
index 8dc6f845ed8..2f527ab156a 100644
--- a/nixos/modules/services/networking/xinetd.nix
+++ b/nixos/modules/services/networking/xinetd.nix
@@ -44,12 +44,7 @@ in
 
   options = {
 
-    services.xinetd.enable = mkOption {
-      default = false;
-      description = ''
-        Whether to enable the xinetd super-server daemon.
-      '';
-    };
+    services.xinetd.enable = mkEnableOption "the xinetd super-server daemon";
 
     services.xinetd.extraDefaults = mkOption {
       default = "";
diff --git a/nixos/modules/services/networking/yggdrasil.nix b/nixos/modules/services/networking/yggdrasil.nix
index 9e675ecd6f4..ecd1406b483 100644
--- a/nixos/modules/services/networking/yggdrasil.nix
+++ b/nixos/modules/services/networking/yggdrasil.nix
@@ -83,6 +83,14 @@ in {
         '';
       };
 
+      group = mkOption {
+        type = types.str;
+        default = "root";
+        example = "wheel";
+        description =
+          "Group to grant acces to the Yggdrasil control socket.";
+      };
+
       openMulticastPort = mkOption {
         type = bool;
         default = false;
@@ -144,8 +152,9 @@ in {
         ExecReload = "${pkgs.coreutils}/bin/kill -HUP $MAINPID";
         Restart = "always";
 
+        Group = cfg.group;
         RuntimeDirectory = "yggdrasil";
-        RuntimeDirectoryMode = "0700";
+        RuntimeDirectoryMode = "0750";
         BindReadOnlyPaths = mkIf configFileProvided
           [ "${cfg.configFile}" ];
 
diff --git a/nixos/modules/services/printing/cupsd.nix b/nixos/modules/services/printing/cupsd.nix
index 59306d625e6..e67badfcd29 100644
--- a/nixos/modules/services/printing/cupsd.nix
+++ b/nixos/modules/services/printing/cupsd.nix
@@ -153,6 +153,16 @@ in
         '';
       };
 
+      allowFrom = mkOption {
+        type = types.listOf types.str;
+        default = [ "localhost" ];
+        example = [ "all" ];
+        apply = concatMapStringsSep "\n" (x: "Allow ${x}");
+        description = ''
+          From which hosts to allow unconditional access.
+        '';
+      };
+
       bindirCmds = mkOption {
         type = types.lines;
         internal = true;
@@ -403,19 +413,19 @@ in
 
         <Location />
           Order allow,deny
-          Allow localhost
+          ${cfg.allowFrom}
         </Location>
 
         <Location /admin>
           Order allow,deny
-          Allow localhost
+          ${cfg.allowFrom}
         </Location>
 
         <Location /admin/conf>
           AuthType Basic
           Require user @SYSTEM
           Order allow,deny
-          Allow localhost
+          ${cfg.allowFrom}
         </Location>
 
         <Policy default>
diff --git a/nixos/modules/services/security/bitwarden_rs/default.nix b/nixos/modules/services/security/bitwarden_rs/default.nix
index a63be0ee766..903a5327037 100644
--- a/nixos/modules/services/security/bitwarden_rs/default.nix
+++ b/nixos/modules/services/security/bitwarden_rs/default.nix
@@ -58,7 +58,7 @@ in {
       default = {};
       example = literalExample ''
         {
-          domain = https://bw.domain.tld:8443;
+          domain = "https://bw.domain.tld:8443";
           signupsAllowed = true;
           rocketPort = 8222;
           rocketLog = "critical";
diff --git a/nixos/modules/services/security/fprot.nix b/nixos/modules/services/security/fprot.nix
index f203f2abc03..3a0b08b3c6d 100644
--- a/nixos/modules/services/security/fprot.nix
+++ b/nixos/modules/services/security/fprot.nix
@@ -10,12 +10,7 @@ in {
 
     services.fprot = {
       updater = {
-        enable = mkOption {
-          default = false;
-          description = ''
-            Whether to enable automatic F-Prot virus definitions database updates.
-          '';
-        };
+        enable = mkEnableOption "automatic F-Prot virus definitions database updates";
 
         productData = mkOption {
           description = ''
diff --git a/nixos/modules/services/security/hologram-agent.nix b/nixos/modules/services/security/hologram-agent.nix
index a5087b0a99b..e37334b3cf5 100644
--- a/nixos/modules/services/security/hologram-agent.nix
+++ b/nixos/modules/services/security/hologram-agent.nix
@@ -43,12 +43,12 @@ in {
       description = "Provide EC2 instance credentials to machines outside of EC2";
       after       = [ "network.target" ];
       wantedBy    = [ "multi-user.target" ];
-      requires    = [ "network-link-dummy0.service" "network-addresses-dummy0.service" ]; 
+      requires    = [ "network-link-dummy0.service" "network-addresses-dummy0.service" ];
       preStart = ''
         /run/current-system/sw/bin/rm -fv /run/hologram.sock
       '';
       serviceConfig = {
-        ExecStart = "${pkgs.hologram.bin}/bin/hologram-agent -debug -conf ${cfgFile} -port ${cfg.httpPort}";
+        ExecStart = "${pkgs.hologram}/bin/hologram-agent -debug -conf ${cfgFile} -port ${cfg.httpPort}";
       };
     };
 
diff --git a/nixos/modules/services/security/hologram-server.nix b/nixos/modules/services/security/hologram-server.nix
index bad02c7440b..4acf6ae0e21 100644
--- a/nixos/modules/services/security/hologram-server.nix
+++ b/nixos/modules/services/security/hologram-server.nix
@@ -123,7 +123,7 @@ in {
       wantedBy    = [ "multi-user.target" ];
 
       serviceConfig = {
-        ExecStart = "${pkgs.hologram.bin}/bin/hologram-server --debug --conf ${cfgFile}";
+        ExecStart = "${pkgs.hologram}/bin/hologram-server --debug --conf ${cfgFile}";
       };
     };
   };
diff --git a/nixos/modules/services/security/oauth2_proxy.nix b/nixos/modules/services/security/oauth2_proxy.nix
index 2abb9ec32ac..d5c5437329e 100644
--- a/nixos/modules/services/security/oauth2_proxy.nix
+++ b/nixos/modules/services/security/oauth2_proxy.nix
@@ -12,7 +12,7 @@ let
   # command-line to launch oauth2_proxy.
   providerSpecificOptions = {
     azure = cfg: {
-      azure.tenant = cfg.azure.tenant;
+      azure-tenant = cfg.azure.tenant;
       resource = cfg.azure.resource;
     };
 
@@ -44,6 +44,7 @@ let
     pass-access-token = passAccessToken;
     pass-basic-auth = passBasicAuth;
     pass-host-header = passHostHeader;
+    reverse-proxy = reverseProxy;
     proxy-prefix = proxyPrefix;
     profile-url = profileURL;
     redeem-url = redeemURL;
@@ -65,8 +66,8 @@ let
   } // lib.optionalAttrs (cfg.htpasswd.file != null) {
     display-htpasswd-file = cfg.htpasswd.displayForm;
   } // lib.optionalAttrs tls.enable {
-    tls-cert = tls.certificate;
-    tls-key = tls.key;
+    tls-cert-file = tls.certificate;
+    tls-key-file = tls.key;
     https-address = tls.httpsAddress;
   } // (getProviderOptions cfg cfg.provider) // cfg.extraConfig;
 
@@ -98,14 +99,21 @@ in
 
     ##############################################
     # PROVIDER configuration
+    # Taken from: https://github.com/pusher/oauth2_proxy/blob/master/providers/providers.go
     provider = mkOption {
       type = types.enum [
         "google"
-        "github"
         "azure"
+        "facebook"
+        "github"
+        "keycloak"
         "gitlab"
         "linkedin"
-        "myusa"
+        "login.gov"
+        "bitbucket"
+        "nextcloud"
+        "digitalocean"
+        "oidc"
       ];
       default = "google";
       description = ''
@@ -433,6 +441,17 @@ in
       '';
     };
 
+    reverseProxy = mkOption {
+      type = types.bool;
+      default = false;
+      description = ''
+        In case when running behind a reverse proxy, controls whether headers
+	like <literal>X-Real-Ip</literal> are accepted. Usage behind a reverse
+        proxy will require this flag to be set to avoid logging the reverse
+        proxy IP address.
+      '';
+    };
+
     proxyPrefix = mkOption {
       type = types.str;
       default = "/oauth2";
@@ -558,7 +577,7 @@ in
       serviceConfig = {
         User = "oauth2_proxy";
         Restart = "always";
-        ExecStart = "${cfg.package.bin}/bin/oauth2_proxy ${configString}";
+        ExecStart = "${cfg.package}/bin/oauth2_proxy ${configString}";
         EnvironmentFile = mkIf (cfg.keyFile != null) cfg.keyFile;
       };
     };
diff --git a/nixos/modules/services/system/kerberos/default.nix b/nixos/modules/services/system/kerberos/default.nix
index c55241c4cff..9a1e6739901 100644
--- a/nixos/modules/services/system/kerberos/default.nix
+++ b/nixos/modules/services/system/kerberos/default.nix
@@ -51,12 +51,7 @@ in
   ###### interface
   options = {
     services.kerberos_server = {
-      enable = mkOption {
-        default = false;
-        description = ''
-          Enable the kerberos authentification server.
-        '';
-      };
+      enable = lib.mkEnableOption "the kerberos authentification server";
 
       realms = mkOption {
         type = types.attrsOf (types.submodule realm);
diff --git a/nixos/modules/services/system/localtime.nix b/nixos/modules/services/system/localtime.nix
index 74925c5e2c4..8f8e2e2e933 100644
--- a/nixos/modules/services/system/localtime.nix
+++ b/nixos/modules/services/system/localtime.nix
@@ -8,6 +8,7 @@ in {
   options = {
     services.localtime = {
       enable = mkOption {
+        type = types.bool;
         default = false;
         description = ''
           Enable <literal>localtime</literal>, simple daemon for keeping the system
diff --git a/nixos/modules/services/system/nscd.nix b/nixos/modules/services/system/nscd.nix
index e11f7e049d8..d720f254b81 100644
--- a/nixos/modules/services/system/nscd.nix
+++ b/nixos/modules/services/system/nscd.nix
@@ -7,6 +7,10 @@ let
   nssModulesPath = config.system.nssModules.path;
   cfg = config.services.nscd;
 
+  nscd = if pkgs.stdenv.hostPlatform.libc == "glibc"
+         then pkgs.stdenv.cc.libc.bin
+         else pkgs.glibc.bin;
+
 in
 
 {
@@ -20,7 +24,11 @@ in
       enable = mkOption {
         type = types.bool;
         default = true;
-        description = "Whether to enable the Name Service Cache Daemon.";
+        description = ''
+          Whether to enable the Name Service Cache Daemon.
+          Disabling this is strongly discouraged, as this effectively disables NSS Lookups
+          from all non-glibc NSS modules, including the ones provided by systemd.
+        '';
       };
 
       config = mkOption {
@@ -59,16 +67,16 @@ in
         # files. So prefix the ExecStart command with "!" to prevent systemd
         # from dropping privileges early. See ExecStart in systemd.service(5).
         serviceConfig =
-          { ExecStart = "!@${pkgs.glibc.bin}/sbin/nscd nscd";
+          { ExecStart = "!@${nscd}/sbin/nscd nscd";
             Type = "forking";
             DynamicUser = true;
             RuntimeDirectory = "nscd";
             PIDFile = "/run/nscd/nscd.pid";
             Restart = "always";
             ExecReload =
-              [ "${pkgs.glibc.bin}/sbin/nscd --invalidate passwd"
-                "${pkgs.glibc.bin}/sbin/nscd --invalidate group"
-                "${pkgs.glibc.bin}/sbin/nscd --invalidate hosts"
+              [ "${nscd}/sbin/nscd --invalidate passwd"
+                "${nscd}/sbin/nscd --invalidate group"
+                "${nscd}/sbin/nscd --invalidate hosts"
               ];
           };
       };
diff --git a/nixos/modules/services/system/uptimed.nix b/nixos/modules/services/system/uptimed.nix
index 3c9978ab226..1e256c51408 100644
--- a/nixos/modules/services/system/uptimed.nix
+++ b/nixos/modules/services/system/uptimed.nix
@@ -10,6 +10,7 @@ in
   options = {
     services.uptimed = {
       enable = mkOption {
+        type = types.bool;
         default = false;
         description = ''
           Enable <literal>uptimed</literal>, allowing you to track
diff --git a/nixos/modules/services/torrent/deluge.nix b/nixos/modules/services/torrent/deluge.nix
index 0c72505395d..45398cb2613 100644
--- a/nixos/modules/services/torrent/deluge.nix
+++ b/nixos/modules/services/torrent/deluge.nix
@@ -5,6 +5,7 @@ with lib;
 let
   cfg = config.services.deluge;
   cfg_web = config.services.deluge.web;
+  isDeluge1 = versionOlder cfg.package.version "2.0.0";
 
   openFilesLimit = 4096;
   listenPortsDefault = [ 6881 6889 ];
@@ -18,11 +19,11 @@ let
   preStart = if cfg.declarative then ''
     if [ -e ${declarativeLockFile} ]; then
       # Was declarative before, no need to back up anything
-      ln -sf ${configFile} ${configDir}/core.conf
+      ${if isDeluge1 then "ln -sf" else "cp"} ${configFile} ${configDir}/core.conf
       ln -sf ${cfg.authFile} ${configDir}/auth
     else
       # Declarative for the first time, backup stateful files
-      ln -sb --suffix=.stateful ${configFile} ${configDir}/core.conf
+      ${if isDeluge1 then "ln -s" else "cp"} -b --suffix=.stateful ${configFile} ${configDir}/core.conf
       ln -sb --suffix=.stateful ${cfg.authFile} ${configDir}/auth
       echo "Autogenerated file that signifies that this server configuration is managed declaratively by NixOS" \
         > ${declarativeLockFile}
@@ -141,7 +142,15 @@ in {
           description = ''
             Extra packages available at runtime to enable Deluge's plugins. For example,
             extraction utilities are required for the built-in "Extractor" plugin.
-            This always contains unzip, gnutar, xz, p7zip and bzip2.
+            This always contains unzip, gnutar, xz and bzip2.
+          '';
+        };
+
+        package = mkOption {
+          type = types.package;
+          example = literalExample "pkgs.deluge-1_x";
+          description = ''
+            Deluge package to use.
           '';
         };
       };
@@ -170,8 +179,15 @@ in {
 
   config = mkIf cfg.enable {
 
+    services.deluge.package = mkDefault (
+      if versionAtLeast config.system.stateVersion "20.09" then
+        pkgs.deluge-2_x
+      else
+        pkgs.deluge-1_x
+    );
+
     # Provide a default set of `extraPackages`.
-    services.deluge.extraPackages = with pkgs; [ unzip gnutar xz p7zip bzip2 ];
+    services.deluge.extraPackages = with pkgs; [ unzip gnutar xz bzip2 ];
 
     systemd.tmpfiles.rules = [
       "d '${cfg.dataDir}' 0770 ${cfg.user} ${cfg.group}"
@@ -189,10 +205,10 @@ in {
       after = [ "network.target" ];
       description = "Deluge BitTorrent Daemon";
       wantedBy = [ "multi-user.target" ];
-      path = [ pkgs.deluge ] ++ cfg.extraPackages;
+      path = [ cfg.package ] ++ cfg.extraPackages;
       serviceConfig = {
         ExecStart = ''
-          ${pkgs.deluge}/bin/deluged \
+          ${cfg.package}/bin/deluged \
             --do-not-daemonize \
             --config ${configDir}
         '';
@@ -212,10 +228,11 @@ in {
       requires = [ "deluged.service" ];
       description = "Deluge BitTorrent WebUI";
       wantedBy = [ "multi-user.target" ];
-      path = [ pkgs.deluge ];
+      path = [ cfg.package ];
       serviceConfig = {
         ExecStart = ''
-          ${pkgs.deluge}/bin/deluge-web \
+          ${cfg.package}/bin/deluge-web \
+            ${optionalString (!isDeluge1) "--do-not-daemonize"} \
             --config ${configDir} \
             --port ${toString cfg.web.port}
         '';
@@ -234,7 +251,7 @@ in {
       })
     ];
 
-    environment.systemPackages = [ pkgs.deluge ];
+    environment.systemPackages = [ cfg.package ];
 
     users.users = mkIf (cfg.user == "deluge") {
       deluge = {
diff --git a/nixos/modules/services/web-apps/dokuwiki.nix b/nixos/modules/services/web-apps/dokuwiki.nix
index 07af7aa0dfe..33a828fa2cb 100644
--- a/nixos/modules/services/web-apps/dokuwiki.nix
+++ b/nixos/modules/services/web-apps/dokuwiki.nix
@@ -3,13 +3,14 @@
 let
 
   inherit (lib) mkEnableOption mkForce mkIf mkMerge mkOption optionalAttrs recursiveUpdate types;
+  inherit (lib) concatMapStringsSep flatten mapAttrs mapAttrs' mapAttrsToList nameValuePair concatMapStringSep;
 
-  cfg = config.services.dokuwiki;
+  eachSite = config.services.dokuwiki;
 
-  user = config.services.nginx.user;
+  user = "dokuwiki";
   group = config.services.nginx.group;
 
-  dokuwikiAclAuthConfig = pkgs.writeText "acl.auth.php" ''
+  dokuwikiAclAuthConfig = cfg: pkgs.writeText "acl.auth.php" ''
     # acl.auth.php
     # <?php exit()?>
     #
@@ -18,244 +19,353 @@ let
     ${toString cfg.acl}
   '';
 
-  dokuwikiLocalConfig = pkgs.writeText "local.php" ''
+  dokuwikiLocalConfig = cfg: pkgs.writeText "local.php" ''
     <?php
     $conf['savedir'] = '${cfg.stateDir}';
     $conf['superuser'] = '${toString cfg.superUser}';
     $conf['useacl'] = '${toString cfg.aclUse}';
+    $conf['disableactions'] = '${cfg.disableActions}';
     ${toString cfg.extraConfig}
   '';
 
-  dokuwikiPluginsLocalConfig = pkgs.writeText "plugins.local.php" ''
+  dokuwikiPluginsLocalConfig = cfg: pkgs.writeText "plugins.local.php" ''
     <?php
     ${cfg.pluginsConfig}
   '';
 
-in
-{
-  options.services.dokuwiki = {
-    enable = mkEnableOption "DokuWiki web application.";
+  pkg = hostName: cfg: pkgs.stdenv.mkDerivation rec {
+    pname = "dokuwiki-${hostName}";
+    version = src.version;
+    src = cfg.package;
 
-    hostName = mkOption {
-      type = types.str;
-      default = "localhost";
-      description = "FQDN for the instance.";
-    };
+    installPhase = ''
+      mkdir -p $out
+      cp -r * $out/
 
-    stateDir = mkOption {
-      type = types.path;
-      default = "/var/lib/dokuwiki/data";
-      description = "Location of the dokuwiki state directory.";
-    };
+      # symlink the dokuwiki config
+      ln -s ${dokuwikiLocalConfig cfg} $out/share/dokuwiki/local.php
 
-    acl = mkOption {
-      type = types.nullOr types.lines;
-      default = null;
-      example = "*               @ALL               8";
-      description = ''
-        Access Control Lists: see <link xlink:href="https://www.dokuwiki.org/acl"/>
-        Mutually exclusive with services.dokuwiki.aclFile
-        Set this to a value other than null to take precedence over aclFile option.
-      '';
-    };
+      # symlink plugins config
+      ln -s ${dokuwikiPluginsLocalConfig cfg} $out/share/dokuwiki/plugins.local.php
 
-    aclFile = mkOption {
-      type = types.nullOr types.path;
-      default = null;
-      description = ''
-        Location of the dokuwiki acl rules. Mutually exclusive with services.dokuwiki.acl
-        Mutually exclusive with services.dokuwiki.acl which is preferred.
-        Consult documentation <link xlink:href="https://www.dokuwiki.org/acl"/> for further instructions.
-        Example: <link xlink:href="https://github.com/splitbrain/dokuwiki/blob/master/conf/acl.auth.php.dist"/>
-      '';
-    };
+      # symlink acl
+      ln -s ${dokuwikiAclAuthConfig cfg} $out/share/dokuwiki/acl.auth.php
 
-    aclUse = mkOption {
-      type = types.bool;
-      default = true;
-      description = ''
-        Necessary for users to log in into the system.
-        Also limits anonymous users. When disabled,
-        everyone is able to create and edit content.
-      '';
-    };
+      # symlink additional plugin(s) and templates(s)
+      ${concatMapStringsSep "\n" (template: "ln -s ${template} $out/share/dokuwiki/lib/tpl/${template.name}") cfg.templates}
+      ${concatMapStringsSep "\n" (plugin: "ln -s ${plugin} $out/share/dokuwiki/lib/plugins/${plugin.name}") cfg.plugins}
+    '';
+  };
 
-    pluginsConfig = mkOption {
-      type = types.lines;
-      default = ''
-        $plugins['authad'] = 0;
-        $plugins['authldap'] = 0;
-        $plugins['authmysql'] = 0;
-        $plugins['authpgsql'] = 0;
-      '';
-      description = ''
-        List of the dokuwiki (un)loaded plugins.
-      '';
-    };
+  siteOpts = { config, lib, name, ...}: {
+    options = {
+      enable = mkEnableOption "DokuWiki web application.";
 
-    superUser = mkOption {
-      type = types.nullOr types.str;
-      default = "@admin";
-      description = ''
-        You can set either a username, a list of usernames (“admin1,admin2”), 
-        or the name of a group by prepending an @ char to the groupname
-        Consult documentation <link xlink:href="https://www.dokuwiki.org/config:superuser"/> for further instructions.
-      '';
-    };
+      package = mkOption {
+        type = types.package;
+        default = pkgs.dokuwiki;
+        description = "Which dokuwiki package to use.";
+      };
+
+      hostName = mkOption {
+        type = types.str;
+        default = "localhost";
+        description = "FQDN for the instance.";
+      };
+
+      stateDir = mkOption {
+        type = types.path;
+        default = "/var/lib/dokuwiki/${name}/data";
+        description = "Location of the dokuwiki state directory.";
+      };
 
-    usersFile = mkOption {
-      type = types.nullOr types.path;
-      default = null;
-      description = ''
-        Location of the dokuwiki users file. List of users. Format:
-        login:passwordhash:Real Name:email:groups,comma,separated 
-        Create passwordHash easily by using:$ mkpasswd -5 password `pwgen 8 1`
-        Example: <link xlink:href="https://github.com/splitbrain/dokuwiki/blob/master/conf/users.auth.php.dist"/>
+      acl = mkOption {
+        type = types.nullOr types.lines;
+        default = null;
+        example = "*               @ALL               8";
+        description = ''
+          Access Control Lists: see <link xlink:href="https://www.dokuwiki.org/acl"/>
+          Mutually exclusive with services.dokuwiki.aclFile
+          Set this to a value other than null to take precedence over aclFile option.
+
+          Warning: Consider using aclFile instead if you do not
+          want to store the ACL in the world-readable Nix store.
         '';
-    };
+      };
 
-    extraConfig = mkOption {
-      type = types.nullOr types.lines;
-      default = null;
-      example = ''
-        $conf['title'] = 'My Wiki';
-        $conf['userewrite'] = 1;
-      '';
-      description = ''
-        DokuWiki configuration. Refer to
-        <link xlink:href="https://www.dokuwiki.org/config"/>
-        for details on supported values.
-      '';
-    };
+      aclFile = mkOption {
+        type = with types; nullOr str;
+        default = if (config.aclUse && config.acl == null) then "/var/lib/dokuwiki/${name}/users.auth.php" else null;
+        description = ''
+          Location of the dokuwiki acl rules. Mutually exclusive with services.dokuwiki.acl
+          Mutually exclusive with services.dokuwiki.acl which is preferred.
+          Consult documentation <link xlink:href="https://www.dokuwiki.org/acl"/> for further instructions.
+          Example: <link xlink:href="https://github.com/splitbrain/dokuwiki/blob/master/conf/acl.auth.php.dist"/>
+        '';
+        example = "/var/lib/dokuwiki/${name}/acl.auth.php";
+      };
 
-    poolConfig = mkOption {
-      type = with types; attrsOf (oneOf [ str int bool ]);
-      default = {
-        "pm" = "dynamic";
-        "pm.max_children" = 32;
-        "pm.start_servers" = 2;
-        "pm.min_spare_servers" = 2;
-        "pm.max_spare_servers" = 4;
-        "pm.max_requests" = 500;
+      aclUse = mkOption {
+        type = types.bool;
+        default = true;
+        description = ''
+          Necessary for users to log in into the system.
+          Also limits anonymous users. When disabled,
+          everyone is able to create and edit content.
+        '';
       };
-      description = ''
-        Options for the dokuwiki PHP pool. See the documentation on <literal>php-fpm.conf</literal>
-        for details on configuration directives.
-      '';
-    };
 
-    nginx = mkOption {
-      type = types.submodule (
-        recursiveUpdate
-          (import ../web-servers/nginx/vhost-options.nix { inherit config lib; })
-          {
-            # Enable encryption by default,
-            options.forceSSL.default = true;
-            options.enableACME.default = true;
-          }
-      );
-      default = {forceSSL = true; enableACME = true;};
-      example = {
-        serverAliases = [
-          "wiki.\${config.networking.domain}"
-        ];
-        enableACME = false;
+      pluginsConfig = mkOption {
+        type = types.lines;
+        default = ''
+          $plugins['authad'] = 0;
+          $plugins['authldap'] = 0;
+          $plugins['authmysql'] = 0;
+          $plugins['authpgsql'] = 0;
+        '';
+        description = ''
+          List of the dokuwiki (un)loaded plugins.
+        '';
+      };
+
+      superUser = mkOption {
+        type = types.nullOr types.str;
+        default = "@admin";
+        description = ''
+          You can set either a username, a list of usernames (“admin1,admin2”),
+          or the name of a group by prepending an @ char to the groupname
+          Consult documentation <link xlink:href="https://www.dokuwiki.org/config:superuser"/> for further instructions.
+        '';
       };
-      description = ''
-        With this option, you can customize the nginx virtualHost which already has sensible defaults for DokuWiki.
-      '';
+
+      usersFile = mkOption {
+        type = with types; nullOr str;
+        default = if config.aclUse then "/var/lib/dokuwiki/${name}/users.auth.php" else null;
+        description = ''
+          Location of the dokuwiki users file. List of users. Format:
+          login:passwordhash:Real Name:email:groups,comma,separated
+          Create passwordHash easily by using:$ mkpasswd -5 password `pwgen 8 1`
+          Example: <link xlink:href="https://github.com/splitbrain/dokuwiki/blob/master/conf/users.auth.php.dist"/>
+          '';
+        example = "/var/lib/dokuwiki/${name}/users.auth.php";
+      };
+
+      disableActions = mkOption {
+        type = types.nullOr types.str;
+        default = "";
+        example = "search,register";
+        description = ''
+          Disable individual action modes. Refer to
+          <link xlink:href="https://www.dokuwiki.org/config:action_modes"/>
+          for details on supported values.
+        '';
+      };
+
+      extraConfig = mkOption {
+        type = types.nullOr types.lines;
+        default = null;
+        example = ''
+          $conf['title'] = 'My Wiki';
+          $conf['userewrite'] = 1;
+        '';
+        description = ''
+          DokuWiki configuration. Refer to
+          <link xlink:href="https://www.dokuwiki.org/config"/>
+          for details on supported values.
+        '';
+      };
+
+      plugins = mkOption {
+        type = types.listOf types.path;
+        default = [];
+        description = ''
+              List of path(s) to respective plugin(s) which are copied from the 'plugin' directory.
+              <note><para>These plugins need to be packaged before use, see example.</para></note>
+        '';
+        example = ''
+              # Let's package the icalevents plugin
+              plugin-icalevents = pkgs.stdenv.mkDerivation {
+                name = "icalevents";
+                # Download the plugin from the dokuwiki site
+                src = pkgs.fetchurl {
+                  url = "https://github.com/real-or-random/dokuwiki-plugin-icalevents/releases/download/2017-06-16/dokuwiki-plugin-icalevents-2017-06-16.zip";
+                  sha256 = "e40ed7dd6bbe7fe3363bbbecb4de481d5e42385b5a0f62f6a6ce6bf3a1f9dfa8";
+                };
+                sourceRoot = ".";
+                # We need unzip to build this package
+                buildInputs = [ pkgs.unzip ];
+                # Installing simply means copying all files to the output directory
+                installPhase = "mkdir -p $out; cp -R * $out/";
+              };
+
+              # And then pass this theme to the plugin list like this:
+              plugins = [ plugin-icalevents ];
+        '';
+      };
+
+      templates = mkOption {
+        type = types.listOf types.path;
+        default = [];
+        description = ''
+              List of path(s) to respective template(s) which are copied from the 'tpl' directory.
+              <note><para>These templates need to be packaged before use, see example.</para></note>
+        '';
+        example = ''
+              # Let's package the bootstrap3 theme
+              template-bootstrap3 = pkgs.stdenv.mkDerivation {
+                name = "bootstrap3";
+                # Download the theme from the dokuwiki site
+                src = pkgs.fetchurl {
+                  url = "https://github.com/giterlizzi/dokuwiki-template-bootstrap3/archive/v2019-05-22.zip";
+                  sha256 = "4de5ff31d54dd61bbccaf092c9e74c1af3a4c53e07aa59f60457a8f00cfb23a6";
+                };
+                # We need unzip to build this package
+                buildInputs = [ pkgs.unzip ];
+                # Installing simply means copying all files to the output directory
+                installPhase = "mkdir -p $out; cp -R * $out/";
+              };
+
+              # And then pass this theme to the template list like this:
+              templates = [ template-bootstrap3 ];
+        '';
+      };
+
+      poolConfig = mkOption {
+        type = with types; attrsOf (oneOf [ str int bool ]);
+        default = {
+          "pm" = "dynamic";
+          "pm.max_children" = 32;
+          "pm.start_servers" = 2;
+          "pm.min_spare_servers" = 2;
+          "pm.max_spare_servers" = 4;
+          "pm.max_requests" = 500;
+        };
+        description = ''
+          Options for the dokuwiki PHP pool. See the documentation on <literal>php-fpm.conf</literal>
+          for details on configuration directives.
+        '';
+      };
+
+      nginx = mkOption {
+        type = types.submodule (
+          recursiveUpdate
+            (import ../web-servers/nginx/vhost-options.nix { inherit config lib; })
+            {
+              # Enable encryption by default,
+              options.forceSSL.default = true;
+              options.enableACME.default = true;
+            }
+        );
+        default = {forceSSL = true; enableACME = true;};
+        example = {
+          serverAliases = [
+            "wiki.\${config.networking.domain}"
+          ];
+          enableACME = false;
+        };
+        description = ''
+          With this option, you can customize the nginx virtualHost which already has sensible defaults for DokuWiki.
+        '';
+      };
+    };
+  };
+in
+{
+  # interface
+  options = {
+    services.dokuwiki = mkOption {
+      type = types.attrsOf (types.submodule siteOpts);
+      default = {};
+      description = "Sepcification of one or more dokuwiki sites to service.";
     };
   };
 
   # implementation
 
-  config = mkIf cfg.enable {
-
-    warnings = mkIf (cfg.superUser == null) ["Not setting services.dokuwiki.superUser will impair your ability to administer DokuWiki"];
-
-    assertions = [ 
-      {
-        assertion = cfg.aclUse -> (cfg.acl != null || cfg.aclFile != null);
-        message = "Either services.dokuwiki.acl or services.dokuwiki.aclFile is mandatory when aclUse is true";
-      }
-      {
-        assertion = cfg.usersFile != null -> cfg.aclUse != false;
-        message = "services.dokuwiki.aclUse must be true when usersFile is not null";
-      }
-    ];
-
-    services.phpfpm.pools.dokuwiki = {
-      inherit user;
-      inherit group;
-      phpEnv = {        
-        DOKUWIKI_LOCAL_CONFIG = "${dokuwikiLocalConfig}";
-        DOKUWIKI_PLUGINS_LOCAL_CONFIG = "${dokuwikiPluginsLocalConfig}";
-      } //optionalAttrs (cfg.usersFile != null) {
-        DOKUWIKI_USERS_AUTH_CONFIG = "${cfg.usersFile}";
-      } //optionalAttrs (cfg.aclUse) {
-        DOKUWIKI_ACL_AUTH_CONFIG = if (cfg.acl != null) then "${dokuwikiAclAuthConfig}" else "${toString cfg.aclFile}";
-      };
-      
-      settings = {
-        "listen.mode" = "0660";
-        "listen.owner" = user;
-        "listen.group" = group;
-      } // cfg.poolConfig;
-    };
+  config = mkIf (eachSite != {}) {
+
+    warnings = mapAttrsToList (hostName: cfg: mkIf (cfg.superUser == null) "Not setting services.dokuwiki.${hostName} superUser will impair your ability to administer DokuWiki") eachSite;
+
+    assertions = flatten (mapAttrsToList (hostName: cfg:
+    [{
+      assertion = cfg.aclUse -> (cfg.acl != null || cfg.aclFile != null);
+      message = "Either services.dokuwiki.${hostName}.acl or services.dokuwiki.${hostName}.aclFile is mandatory if aclUse true";
+    }
+    {
+      assertion = cfg.usersFile != null -> cfg.aclUse != false;
+      message = "services.dokuwiki.${hostName}.aclUse must must be true if usersFile is not null";
+    }
+    ]) eachSite);
+
+    services.phpfpm.pools = mapAttrs' (hostName: cfg: (
+      nameValuePair "dokuwiki-${hostName}" {
+        inherit user;
+        inherit group;
+        phpEnv = {
+          DOKUWIKI_LOCAL_CONFIG = "${dokuwikiLocalConfig cfg}";
+          DOKUWIKI_PLUGINS_LOCAL_CONFIG = "${dokuwikiPluginsLocalConfig cfg}";
+        } // optionalAttrs (cfg.usersFile != null) {
+          DOKUWIKI_USERS_AUTH_CONFIG = "${cfg.usersFile}";
+        } //optionalAttrs (cfg.aclUse) {
+          DOKUWIKI_ACL_AUTH_CONFIG = if (cfg.acl != null) then "${dokuwikiAclAuthConfig cfg}" else "${toString cfg.aclFile}";
+        };
+
+        settings = {
+          "listen.mode" = "0660";
+          "listen.owner" = user;
+          "listen.group" = group;
+        } // cfg.poolConfig;
+      })) eachSite;
 
     services.nginx = {
       enable = true;
-      
-       virtualHosts = {
-        ${cfg.hostName} = mkMerge [ cfg.nginx {
-          root = mkForce "${pkgs.dokuwiki}/share/dokuwiki/";
-          extraConfig = "fastcgi_param HTTPS on;";
-
-          locations."~ /(conf/|bin/|inc/|install.php)" = {
-            extraConfig = "deny all;";
-          };
-
-          locations."~ ^/data/" = {
-            root = "${cfg.stateDir}";
-            extraConfig = "internal;";
-          };
-
-          locations."~ ^/lib.*\.(js|css|gif|png|ico|jpg|jpeg)$" = {
-            extraConfig = "expires 365d;";
-          };
-
-          locations."/" = {
-            priority = 1;
-            index = "doku.php";
-            extraConfig = ''try_files $uri $uri/ @dokuwiki;'';
-          };
-
-          locations."@dokuwiki" = {
-            extraConfig = ''
+      virtualHosts = mapAttrs (hostName: cfg:  mkMerge [ cfg.nginx {
+        root = mkForce "${pkg hostName cfg}/share/dokuwiki";
+        extraConfig = "fastcgi_param HTTPS on;";
+
+        locations."~ /(conf/|bin/|inc/|install.php)" = {
+          extraConfig = "deny all;";
+        };
+
+        locations."~ ^/data/" = {
+          root = "${cfg.stateDir}";
+          extraConfig = "internal;";
+        };
+
+        locations."~ ^/lib.*\.(js|css|gif|png|ico|jpg|jpeg)$" = {
+          extraConfig = "expires 365d;";
+        };
+
+        locations."/" = {
+          priority = 1;
+          index = "doku.php";
+          extraConfig = ''try_files $uri $uri/ @dokuwiki;'';
+        };
+
+        locations."@dokuwiki" = {
+          extraConfig = ''
               # rewrites "doku.php/" out of the URLs if you set the userwrite setting to .htaccess in dokuwiki config page
               rewrite ^/_media/(.*) /lib/exe/fetch.php?media=$1 last;
               rewrite ^/_detail/(.*) /lib/exe/detail.php?media=$1 last;
               rewrite ^/_export/([^/]+)/(.*) /doku.php?do=export_$1&id=$2 last;
               rewrite ^/(.*) /doku.php?id=$1&$args last;
-            '';
-          };
+          '';
+        };
 
-          locations."~ \.php$" = {
-            extraConfig = ''
+        locations."~ \.php$" = {
+          extraConfig = ''
               try_files $uri $uri/ /doku.php;
               include ${pkgs.nginx}/conf/fastcgi_params;
               fastcgi_param SCRIPT_FILENAME $document_root$fastcgi_script_name;
               fastcgi_param REDIRECT_STATUS 200;
-              fastcgi_pass unix:${config.services.phpfpm.pools.dokuwiki.socket};
+              fastcgi_pass unix:${config.services.phpfpm.pools."dokuwiki-${hostName}".socket};
               fastcgi_param HTTPS on;
-            '';
-          };
-        }];
-      };
-
+          '';
+        };
+      }]) eachSite;
     };
 
-    systemd.tmpfiles.rules = [
+    systemd.tmpfiles.rules = flatten (mapAttrsToList (hostName: cfg: [
       "d ${cfg.stateDir}/attic 0750 ${user} ${group} - -"
       "d ${cfg.stateDir}/cache 0750 ${user} ${group} - -"
       "d ${cfg.stateDir}/index 0750 ${user} ${group} - -"
@@ -266,7 +376,13 @@ in
       "d ${cfg.stateDir}/meta 0750 ${user} ${group} - -"
       "d ${cfg.stateDir}/pages 0750 ${user} ${group} - -"
       "d ${cfg.stateDir}/tmp 0750 ${user} ${group} - -"
-    ];
+    ] ++ lib.optional (cfg.aclFile != null) "C ${cfg.aclFile} 0640 ${user} ${group} - ${pkg hostName cfg}/share/dokuwiki/conf/acl.auth.php.dist"
+    ++ lib.optional (cfg.usersFile != null) "C ${cfg.usersFile} 0640 ${user} ${group} - ${pkg hostName cfg}/share/dokuwiki/conf/users.auth.php.dist"
+    ) eachSite);
 
+    users.users.${user} = {
+      group = group;
+      isSystemUser = true;
+    };
   };
 }
diff --git a/nixos/modules/services/web-apps/mattermost.nix b/nixos/modules/services/web-apps/mattermost.nix
index 853347bf86e..f5c2c356afc 100644
--- a/nixos/modules/services/web-apps/mattermost.nix
+++ b/nixos/modules/services/web-apps/mattermost.nix
@@ -224,7 +224,7 @@ in
         serviceConfig = {
           User = "nobody";
           Group = "nogroup";
-          ExecStart = "${pkgs.matterircd.bin}/bin/matterircd ${concatStringsSep " " cfg.matterircd.parameters}";
+          ExecStart = "${pkgs.matterircd}/bin/matterircd ${concatStringsSep " " cfg.matterircd.parameters}";
           WorkingDirectory = "/tmp";
           PrivateTmp = true;
           Restart = "always";
diff --git a/nixos/modules/services/web-apps/mediawiki.nix b/nixos/modules/services/web-apps/mediawiki.nix
index e9ed53857d8..0a5b6047bb5 100644
--- a/nixos/modules/services/web-apps/mediawiki.nix
+++ b/nixos/modules/services/web-apps/mediawiki.nix
@@ -29,7 +29,7 @@ let
       '') cfg.skins)}
 
       ${concatStringsSep "\n" (mapAttrsToList (k: v: ''
-        ln -s ${v} $out/share/mediawiki/extensions/${k}
+        ln -s ${if v != null then v else "$src/share/mediawiki/extensions/${k}"} $out/share/mediawiki/extensions/${k}
       '') cfg.extensions)}
     '';
   };
@@ -204,17 +204,28 @@ in
         default = {};
         type = types.attrsOf types.path;
         description = ''
-          List of paths whose content is copied to the 'skins'
-          subdirectory of the MediaWiki installation.
+          Attribute set of paths whose content is copied to the <filename>skins</filename>
+          subdirectory of the MediaWiki installation in addition to the default skins.
         '';
       };
 
       extensions = mkOption {
         default = {};
-        type = types.attrsOf types.path;
+        type = types.attrsOf (types.nullOr types.path);
         description = ''
-          List of paths whose content is copied to the 'extensions'
-          subdirectory of the MediaWiki installation.
+          Attribute set of paths whose content is copied to the <filename>extensions</filename>
+          subdirectory of the MediaWiki installation and enabled in configuration.
+
+          Use <literal>null</literal> instead of path to enable extensions that are part of MediaWiki.
+        '';
+        example = literalExample ''
+          {
+            Matomo = pkgs.fetchzip {
+              url = "https://github.com/DaSchTour/matomo-mediawiki-extension/archive/v4.0.1.tar.gz";
+              sha256 = "0g5rd3zp0avwlmqagc59cg9bbkn3r7wx7p6yr80s644mj6dlvs1b";
+            };
+            ParserFunctions = null;
+          }
         '';
       };
 
diff --git a/nixos/modules/services/web-apps/nextcloud.nix b/nixos/modules/services/web-apps/nextcloud.nix
index 9de2fbc0732..f826096bf60 100644
--- a/nixos/modules/services/web-apps/nextcloud.nix
+++ b/nixos/modules/services/web-apps/nextcloud.nix
@@ -11,8 +11,8 @@ let
       base = pkgs.php74;
     in
       base.buildEnv {
-        extensions = e: with e;
-          base.enabledExtensions ++ [
+        extensions = { enabled, all }: with all;
+          enabled ++ [
             apcu redis memcached imagick
           ];
         extraConfig = phpOptionsStr;
@@ -322,12 +322,21 @@ in {
           Please migrate your configuration to config.services.nextcloud.poolSettings.
         '')
         ++ (optional (versionOlder cfg.package.version "18") ''
+          A legacy Nextcloud install (from before NixOS 20.03) may be installed.
+
           You're currently deploying an older version of Nextcloud. This may be needed
-          since Nextcloud doesn't allow major version upgrades across multiple versions (i.e. an
-          upgrade from 16 is possible to 17, but not to 18).
+          since Nextcloud doesn't allow major version upgrades that skip multiple
+          versions (i.e. an upgrade from 16 is possible to 17, but not 16 to 18).
+
+          It is assumed that Nextcloud will be upgraded from version 16 to 17.
+
+           * If this is a fresh install, there will be no upgrade to do now.
+
+           * If this server already had Nextcloud installed, first deploy this to your
+             server, and wait until the upgrade to 17 is finished.
 
-          Please deploy this to your server and wait until the migration is finished. After
-          that you can deploy to the latest Nextcloud version available.
+          Then, set `services.nextcloud.package` to `pkgs.nextcloud18` to upgrade to
+          Nextcloud version 18.
         '');
 
       services.nextcloud.package = with pkgs;
diff --git a/nixos/modules/services/web-apps/wordpress.nix b/nixos/modules/services/web-apps/wordpress.nix
index c48a4409737..5fbe53221ae 100644
--- a/nixos/modules/services/web-apps/wordpress.nix
+++ b/nixos/modules/services/web-apps/wordpress.nix
@@ -105,7 +105,7 @@ let
               name = "embed-pdf-viewer-plugin";
               # Download the theme from the wordpress site
               src = pkgs.fetchurl {
-                url = https://downloads.wordpress.org/plugin/embed-pdf-viewer.2.0.3.zip;
+                url = "https://downloads.wordpress.org/plugin/embed-pdf-viewer.2.0.3.zip";
                 sha256 = "1rhba5h5fjlhy8p05zf0p14c9iagfh96y91r36ni0rmk6y891lyd";
               };
               # We need unzip to build this package
@@ -132,7 +132,7 @@ let
               name = "responsive-theme";
               # Download the theme from the wordpress site
               src = pkgs.fetchurl {
-                url = https://downloads.wordpress.org/theme/responsive.3.14.zip;
+                url = "https://downloads.wordpress.org/theme/responsive.3.14.zip";
                 sha256 = "0rjwm811f4aa4q43r77zxlpklyb85q08f9c8ns2akcarrvj5ydx3";
               };
               # We need unzip to build this package
diff --git a/nixos/modules/services/web-servers/apache-httpd/default.nix b/nixos/modules/services/web-servers/apache-httpd/default.nix
index 832c8b30ee9..8abee7130d7 100644
--- a/nixos/modules/services/web-servers/apache-httpd/default.nix
+++ b/nixos/modules/services/web-servers/apache-httpd/default.nix
@@ -12,7 +12,7 @@ let
 
   httpdConf = cfg.configFile;
 
-  php = cfg.phpPackage.override { apacheHttpd = pkg.dev; /* otherwise it only gets .out */ };
+  php = cfg.phpPackage.override { apacheHttpd = pkg; };
 
   phpMajorVersion = lib.versions.major (lib.getVersion php);
 
@@ -41,9 +41,9 @@ let
       "mime" "autoindex" "negotiation" "dir"
       "alias" "rewrite"
       "unixd" "slotmem_shm" "socache_shmcb"
-      "mpm_${cfg.multiProcessingModule}"
+      "mpm_${cfg.mpm}"
     ]
-    ++ (if cfg.multiProcessingModule == "prefork" then [ "cgi" ] else [ "cgid" ])
+    ++ (if cfg.mpm == "prefork" then [ "cgi" ] else [ "cgid" ])
     ++ optional enableHttp2 "http2"
     ++ optional enableSSL "ssl"
     ++ optional enableUserDir "userdir"
@@ -264,7 +264,7 @@ let
 
     PidFile ${runtimeDir}/httpd.pid
 
-    ${optionalString (cfg.multiProcessingModule != "prefork") ''
+    ${optionalString (cfg.mpm != "prefork") ''
       # mod_cgid requires this.
       ScriptSock ${runtimeDir}/cgisock
     ''}
@@ -338,6 +338,7 @@ let
     }
     ''
       cat ${php}/etc/php.ini > $out
+      cat ${php.phpIni} > $out
       echo "$options" >> $out
     '';
 
@@ -349,6 +350,7 @@ in
   imports = [
     (mkRemovedOptionModule [ "services" "httpd" "extraSubservices" ] "Most existing subservices have been ported to the NixOS module system. Please update your configuration accordingly.")
     (mkRemovedOptionModule [ "services" "httpd" "stateDir" ] "The httpd module now uses /run/httpd as a runtime directory.")
+    (mkRenamedOptionModule [ "services" "httpd" "multiProcessingModule" ] [ "services" "httpd" "mpm" ])
 
     # virtualHosts options
     (mkRemovedOptionModule [ "services" "httpd" "documentRoot" ] "Please define a virtual host using `services.httpd.virtualHosts`.")
@@ -453,7 +455,13 @@ in
         type = types.str;
         default = "wwwrun";
         description = ''
-          User account under which httpd runs.
+          User account under which httpd children processes run.
+
+          If you require the main httpd process to run as
+          <literal>root</literal> add the following configuration:
+          <programlisting>
+          systemd.services.httpd.serviceConfig.User = lib.mkForce "root";
+          </programlisting>
         '';
       };
 
@@ -461,7 +469,7 @@ in
         type = types.str;
         default = "wwwrun";
         description = ''
-          Group under which httpd runs.
+          Group under which httpd children processes run.
         '';
       };
 
@@ -538,20 +546,19 @@ in
         '';
       };
 
-      multiProcessingModule = mkOption {
+      mpm = mkOption {
         type = types.enum [ "event" "prefork" "worker" ];
-        default = "prefork";
+        default = "event";
         example = "worker";
         description =
           ''
             Multi-processing module to be used by Apache. Available
-            modules are <literal>prefork</literal> (the default;
-            handles each request in a separate child process),
-            <literal>worker</literal> (hybrid approach that starts a
-            number of child processes each running a number of
-            threads) and <literal>event</literal> (a recent variant of
-            <literal>worker</literal> that handles persistent
-            connections more efficiently).
+            modules are <literal>prefork</literal> (handles each
+            request in a separate child process), <literal>worker</literal>
+            (hybrid approach that starts a number of child processes
+            each running a number of threads) and <literal>event</literal>
+            (the default; a recent variant of <literal>worker</literal>
+            that handles persistent connections more efficiently).
           '';
       };
 
@@ -651,7 +658,7 @@ in
     services.httpd.phpOptions =
       ''
         ; Needed for PHP's mail() function.
-        sendmail_path = sendmail -t -i
+        sendmail_path = ${pkgs.system-sendmail}/bin/sendmail -t -i
 
         ; Don't advertise PHP
         expose_php = off
@@ -702,9 +709,7 @@ in
         wants = concatLists (map (hostOpts: [ "acme-${hostOpts.hostName}.service" "acme-selfsigned-${hostOpts.hostName}.service" ]) vhostsACME);
         after = [ "network.target" "fs.target" ] ++ map (hostOpts: "acme-selfsigned-${hostOpts.hostName}.service") vhostsACME;
 
-        path =
-          [ pkg pkgs.coreutils pkgs.gnugrep ]
-          ++ optional cfg.enablePHP pkgs.system-sendmail; # Needed for PHP's mail() function.
+        path = [ pkg pkgs.coreutils pkgs.gnugrep ];
 
         environment =
           optionalAttrs cfg.enablePHP { PHPRC = phpIni; }
@@ -724,7 +729,7 @@ in
           ExecStart = "@${pkg}/bin/httpd httpd -f ${httpdConf}";
           ExecStop = "${pkg}/bin/httpd -f ${httpdConf} -k graceful-stop";
           ExecReload = "${pkg}/bin/httpd -f ${httpdConf} -k graceful";
-          User = "root";
+          User = cfg.user;
           Group = cfg.group;
           Type = "forking";
           PIDFile = "${runtimeDir}/httpd.pid";
@@ -732,6 +737,7 @@ in
           RestartSec = "5s";
           RuntimeDirectory = "httpd httpd/runtime";
           RuntimeDirectoryMode = "0750";
+          AmbientCapabilities = [ "CAP_NET_BIND_SERVICE" ];
         };
       };
 
diff --git a/nixos/modules/services/web-servers/apache-httpd/vhost-options.nix b/nixos/modules/services/web-servers/apache-httpd/vhost-options.nix
index 2e806afb42c..173c0f8561c 100644
--- a/nixos/modules/services/web-servers/apache-httpd/vhost-options.nix
+++ b/nixos/modules/services/web-servers/apache-httpd/vhost-options.nix
@@ -137,7 +137,7 @@ in
 
     http2 = mkOption {
       type = types.bool;
-      default = false;
+      default = true;
       description = ''
         Whether to enable HTTP 2. HTTP/2 is supported in all multi-processing modules that come with httpd. <emphasis>However, if you use the prefork mpm, there will
         be severe restrictions.</emphasis> Refer to <link xlink:href="https://httpd.apache.org/docs/2.4/howto/http2.html#mpm-config"/> for details.
diff --git a/nixos/modules/services/web-servers/jboss/default.nix b/nixos/modules/services/web-servers/jboss/default.nix
index d28724281a8..ca5b8635fc0 100644
--- a/nixos/modules/services/web-servers/jboss/default.nix
+++ b/nixos/modules/services/web-servers/jboss/default.nix
@@ -24,6 +24,7 @@ in
     services.jboss = {
 
       enable = mkOption {
+        type = types.bool;
         default = false;
         description = "Whether to enable JBoss. WARNING : this package is outdated and is known to have vulnerabilities.";
       };
@@ -59,6 +60,7 @@ in
       };
 
       useJK = mkOption {
+        type = types.bool;
         default = false;
         description = "Whether to use to connector to the Apache HTTP server";
       };
diff --git a/nixos/modules/services/web-servers/nginx/default.nix b/nixos/modules/services/web-servers/nginx/default.nix
index 8d49dc66eb1..1e9cda7e478 100644
--- a/nixos/modules/services/web-servers/nginx/default.nix
+++ b/nixos/modules/services/web-servers/nginx/default.nix
@@ -187,7 +187,7 @@ let
     then "/etc/nginx/nginx.conf"
     else configFile;
 
-  execCommand = "${cfg.package}/bin/nginx -c '${configPath}' -p '${cfg.stateDir}'";
+  execCommand = "${cfg.package}/bin/nginx -c '${configPath}'";
 
   vhosts = concatStringsSep "\n" (mapAttrsToList (vhostName: vhost:
     let
@@ -463,13 +463,6 @@ in
         '';
       };
 
-      stateDir = mkOption {
-        default = "/var/spool/nginx";
-        description = "
-          Directory holding all state for nginx to run.
-        ";
-      };
-
       user = mkOption {
         type = types.str;
         default = "nginx";
@@ -636,6 +629,13 @@ in
     };
   };
 
+  imports = [
+    (mkRemovedOptionModule [ "services" "nginx" "stateDir" ] ''
+      The Nginx log directory has been moved to /var/log/nginx, the cache directory
+      to /var/cache/nginx. The option services.nginx.stateDir has been removed.
+    '')
+  ];
+
   config = mkIf cfg.enable {
     # TODO: test user supplied config file pases syntax test
 
@@ -680,12 +680,6 @@ in
       }
     ];
 
-    systemd.tmpfiles.rules = [
-      "d '${cfg.stateDir}' 0750 ${cfg.user} ${cfg.group} - -"
-      "d '${cfg.stateDir}/logs' 0750 ${cfg.user} ${cfg.group} - -"
-      "Z '${cfg.stateDir}' - ${cfg.user} ${cfg.group} - -"
-    ];
-
     systemd.services.nginx = {
       description = "Nginx Web Server";
       wantedBy = [ "multi-user.target" ];
@@ -708,6 +702,12 @@ in
         # Runtime directory and mode
         RuntimeDirectory = "nginx";
         RuntimeDirectoryMode = "0750";
+        # Cache directory and mode
+        CacheDirectory = "nginx";
+        CacheDirectoryMode = "0750";
+        # Logs directory and mode
+        LogsDirectory = "nginx";
+        LogsDirectoryMode = "0750";
         # Capabilities
         AmbientCapabilities = [ "CAP_NET_BIND_SERVICE" "CAP_SYS_RESOURCE" ];
       };
diff --git a/nixos/modules/services/web-servers/phpfpm/default.nix b/nixos/modules/services/web-servers/phpfpm/default.nix
index 3db19c781d0..d090885a8ca 100644
--- a/nixos/modules/services/web-servers/phpfpm/default.nix
+++ b/nixos/modules/services/web-servers/phpfpm/default.nix
@@ -209,14 +209,14 @@ in {
              user = "php";
              group = "php";
              phpPackage = pkgs.php;
-             settings = '''
+             settings = {
                "pm" = "dynamic";
                "pm.max_children" = 75;
                "pm.start_servers" = 10;
                "pm.min_spare_servers" = 5;
                "pm.max_spare_servers" = 20;
                "pm.max_requests" = 500;
-             ''';
+             };
            }
          }'';
         description = ''
diff --git a/nixos/modules/services/web-servers/traefik.nix b/nixos/modules/services/web-servers/traefik.nix
index 5b0fc467ea4..4ab7307c3b6 100644
--- a/nixos/modules/services/web-servers/traefik.nix
+++ b/nixos/modules/services/web-servers/traefik.nix
@@ -4,56 +4,102 @@ with lib;
 
 let
   cfg = config.services.traefik;
-  configFile =
-    if cfg.configFile == null then
-      pkgs.runCommand "config.toml" {
-        buildInputs = [ pkgs.remarshal ];
-        preferLocalBuild = true;
-      } ''
-        remarshal -if json -of toml \
-          < ${pkgs.writeText "config.json" (builtins.toJSON cfg.configOptions)} \
-          > $out
-      ''
-    else cfg.configFile;
-
+  jsonValue = with types;
+    let
+      valueType = nullOr (oneOf [
+        bool
+        int
+        float
+        str
+        (lazyAttrsOf valueType)
+        (listOf valueType)
+      ]) // {
+        description = "JSON value";
+        emptyValue.value = { };
+      };
+    in valueType;
+  dynamicConfigFile = if cfg.dynamicConfigFile == null then
+    pkgs.runCommand "config.toml" {
+      buildInputs = [ pkgs.remarshal ];
+      preferLocalBuild = true;
+    } ''
+      remarshal -if json -of toml \
+        < ${
+          pkgs.writeText "dynamic_config.json"
+          (builtins.toJSON cfg.dynamicConfigOptions)
+        } \
+        > $out
+    ''
+  else
+    cfg.dynamicConfigFile;
+  staticConfigFile = if cfg.staticConfigFile == null then
+    pkgs.runCommand "config.toml" {
+      buildInputs = [ pkgs.yj ];
+      preferLocalBuild = true;
+    } ''
+      yj -jt -i \
+        < ${
+          pkgs.writeText "static_config.json" (builtins.toJSON
+            (recursiveUpdate cfg.staticConfigOptions {
+              providers.file.filename = "${dynamicConfigFile}";
+            }))
+        } \
+        > $out
+    ''
+  else
+    cfg.staticConfigFile;
 in {
   options.services.traefik = {
     enable = mkEnableOption "Traefik web server";
 
-    configFile = mkOption {
+    staticConfigFile = mkOption {
       default = null;
-      example = literalExample "/path/to/config.toml";
+      example = literalExample "/path/to/static_config.toml";
       type = types.nullOr types.path;
       description = ''
-        Path to verbatim traefik.toml to use.
-        (Using that option has precedence over <literal>configOptions</literal>)
+        Path to traefik's static configuration to use.
+        (Using that option has precedence over <literal>staticConfigOptions</literal> and <literal>dynamicConfigOptions</literal>)
       '';
     };
 
-    configOptions = mkOption {
+    staticConfigOptions = mkOption {
       description = ''
-        Config for Traefik.
+        Static configuration for Traefik.
       '';
-      type = types.attrs;
-      default = {
-        defaultEntryPoints = ["http"];
-        entryPoints.http.address = ":80";
-      };
+      type = jsonValue;
+      default = { entryPoints.http.address = ":80"; };
       example = {
-        defaultEntrypoints = [ "http" ];
-        web.address = ":8080";
+        entryPoints.web.address = ":8080";
         entryPoints.http.address = ":80";
 
-        file = {};
-        frontends = {
-          frontend1 = {
-            backend = "backend1";
-            routes.test_1.rule = "Host:localhost";
-          };
-        };
-        backends.backend1 = {
-          servers.server1.url = "http://localhost:8000";
+        api = { };
+      };
+    };
+
+    dynamicConfigFile = mkOption {
+      default = null;
+      example = literalExample "/path/to/dynamic_config.toml";
+      type = types.nullOr types.path;
+      description = ''
+        Path to traefik's dynamic configuration to use.
+        (Using that option has precedence over <literal>dynamicConfigOptions</literal>)
+      '';
+    };
+
+    dynamicConfigOptions = mkOption {
+      description = ''
+        Dynamic configuration for Traefik.
+      '';
+      type = jsonValue;
+      default = { };
+      example = {
+        http.routers.router1 = {
+          rule = "Host(`localhost`)";
+          service = "service1";
         };
+
+        http.services.service1.loadBalancer.servers =
+          [{ url = "http://localhost:8080"; }];
       };
     };
 
@@ -61,7 +107,7 @@ in {
       default = "/var/lib/traefik";
       type = types.path;
       description = ''
-      Location for any persistent data traefik creates, ie. acme
+        Location for any persistent data traefik creates, ie. acme
       '';
     };
 
@@ -84,16 +130,15 @@ in {
   };
 
   config = mkIf cfg.enable {
-    systemd.tmpfiles.rules = [
-      "d '${cfg.dataDir}' 0700 traefik traefik - -"
-    ];
+    systemd.tmpfiles.rules = [ "d '${cfg.dataDir}' 0700 traefik traefik - -" ];
 
     systemd.services.traefik = {
       description = "Traefik web server";
       after = [ "network-online.target" ];
       wantedBy = [ "multi-user.target" ];
       serviceConfig = {
-        ExecStart = ''${cfg.package.bin}/bin/traefik --configfile=${configFile}'';
+        ExecStart =
+          "${cfg.package}/bin/traefik --configfile=${staticConfigFile}";
         Type = "simple";
         User = "traefik";
         Group = cfg.group;
@@ -120,6 +165,6 @@ in {
       isSystemUser = true;
     };
 
-    users.groups.traefik = {};
+    users.groups.traefik = { };
   };
 }
diff --git a/nixos/modules/services/web-servers/unit/default.nix b/nixos/modules/services/web-servers/unit/default.nix
index f8a18954fc9..989866144e1 100644
--- a/nixos/modules/services/web-servers/unit/default.nix
+++ b/nixos/modules/services/web-servers/unit/default.nix
@@ -91,41 +91,47 @@ in {
       description = "Unit App Server";
       after = [ "network.target" ];
       wantedBy = [ "multi-user.target" ];
-      path = with pkgs; [ curl ];
       preStart = ''
-        test -f '${cfg.stateDir}/conf.json' || rm -f '${cfg.stateDir}/conf.json'
+        [ ! -e '${cfg.stateDir}/conf.json' ] || rm -f '${cfg.stateDir}/conf.json'
       '';
       postStart = ''
-        curl -X PUT --data-binary '@${configFile}' --unix-socket '/run/unit/control.unit.sock' 'http://localhost/config'
+        ${pkgs.curl}/bin/curl -X PUT --data-binary '@${configFile}' --unix-socket '/run/unit/control.unit.sock' 'http://localhost/config'
       '';
       serviceConfig = {
+        Type = "forking";
+        PIDFile = "/run/unit/unit.pid";
         ExecStart = ''
           ${cfg.package}/bin/unitd --control 'unix:/run/unit/control.unit.sock' --pid '/run/unit/unit.pid' \
-                                   --log '${cfg.logDir}/unit.log' --state '${cfg.stateDir}' --no-daemon \
+                                   --log '${cfg.logDir}/unit.log' --state '${cfg.stateDir}' \
                                    --user ${cfg.user} --group ${cfg.group}
         '';
-        # User and group
-        User = cfg.user;
-        Group = cfg.group;
-        # Capabilities
-        AmbientCapabilities = [ "CAP_NET_BIND_SERVICE" "CAP_SETGID" "CAP_SETUID" ];
+        ExecStop = ''
+          ${pkgs.curl}/bin/curl -X DELETE --unix-socket '/run/unit/control.unit.sock' 'http://localhost/config'
+        '';
+        # Runtime directory and mode
+        RuntimeDirectory = "unit";
+        RuntimeDirectoryMode = "0750";
+        # Access write directories
+        ReadWritePaths = [ cfg.stateDir cfg.logDir ];
         # Security
         NoNewPrivileges = true;
         # Sandboxing
-        ProtectSystem = "full";
+        ProtectSystem = "strict";
         ProtectHome = true;
-        RuntimeDirectory = "unit";
-        RuntimeDirectoryMode = "0750";
         PrivateTmp = true;
         PrivateDevices = true;
         ProtectHostname = true;
         ProtectKernelTunables = true;
         ProtectKernelModules = true;
         ProtectControlGroups = true;
+        RestrictAddressFamilies = [ "AF_UNIX" "AF_INET" "AF_INET6" ];
         LockPersonality = true;
         MemoryDenyWriteExecute = true;
         RestrictRealtime = true;
+        RestrictSUIDSGID = true;
         PrivateMounts = true;
+        # System Call Filtering
+        SystemCallArchitectures = "native";
       };
     };
 
diff --git a/nixos/modules/services/x11/desktop-managers/enlightenment.nix b/nixos/modules/services/x11/desktop-managers/enlightenment.nix
index 32c9a40e535..1690a7d51a8 100644
--- a/nixos/modules/services/x11/desktop-managers/enlightenment.nix
+++ b/nixos/modules/services/x11/desktop-managers/enlightenment.nix
@@ -23,6 +23,7 @@ in
   options = {
 
     services.xserver.desktopManager.enlightenment.enable = mkOption {
+      type = types.bool;
       default = false;
       description = "Enable the Enlightenment desktop environment.";
     };
diff --git a/nixos/modules/services/x11/desktop-managers/gnome3.nix b/nixos/modules/services/x11/desktop-managers/gnome3.nix
index ac8e70c52bc..bbc7feb2d04 100644
--- a/nixos/modules/services/x11/desktop-managers/gnome3.nix
+++ b/nixos/modules/services/x11/desktop-managers/gnome3.nix
@@ -72,6 +72,7 @@ in
 
     services.xserver.desktopManager.gnome3 = {
       enable = mkOption {
+        type = types.bool;
         default = false;
         description = "Enable Gnome 3 desktop manager.";
       };
diff --git a/nixos/modules/services/x11/desktop-managers/kodi.nix b/nixos/modules/services/x11/desktop-managers/kodi.nix
index e997b9a1134..bdae9c3afdb 100644
--- a/nixos/modules/services/x11/desktop-managers/kodi.nix
+++ b/nixos/modules/services/x11/desktop-managers/kodi.nix
@@ -10,6 +10,7 @@ in
   options = {
     services.xserver.desktopManager.kodi = {
       enable = mkOption {
+        type = types.bool;
         default = false;
         description = "Enable the kodi multimedia center.";
       };
diff --git a/nixos/modules/services/x11/desktop-managers/pantheon.nix b/nixos/modules/services/x11/desktop-managers/pantheon.nix
index 01fe230b8a4..5fcc8590232 100644
--- a/nixos/modules/services/x11/desktop-managers/pantheon.nix
+++ b/nixos/modules/services/x11/desktop-managers/pantheon.nix
@@ -109,7 +109,7 @@ in
 
       # Without this, elementary LightDM greeter will pre-select non-existent `default` session
       # https://github.com/elementary/greeter/issues/368
-      services.xserver.displayManager.defaultSession = "pantheon";
+      services.xserver.displayManager.defaultSession = mkDefault "pantheon";
 
       services.xserver.displayManager.sessionCommands = ''
         if test "$XDG_CURRENT_DESKTOP" = "Pantheon"; then
diff --git a/nixos/modules/services/x11/desktop-managers/pantheon.xml b/nixos/modules/services/x11/desktop-managers/pantheon.xml
index 9541f2cfd4e..7905ceebd9a 100644
--- a/nixos/modules/services/x11/desktop-managers/pantheon.xml
+++ b/nixos/modules/services/x11/desktop-managers/pantheon.xml
@@ -1,7 +1,7 @@
 <chapter xmlns="http://docbook.org/ns/docbook"
          xmlns:xlink="http://www.w3.org/1999/xlink"
          xml:id="chap-pantheon">
- <title>Pantheon Destkop</title>
+ <title>Pantheon Desktop</title>
  <para>
   Pantheon is the desktop environment created for the elementary OS distribution. It is written from scratch in Vala, utilizing GNOME technologies with GTK 3 and Granite.
  </para>
diff --git a/nixos/modules/services/x11/display-managers/gdm.nix b/nixos/modules/services/x11/display-managers/gdm.nix
index d7bef68e5bc..622ea62f3a9 100644
--- a/nixos/modules/services/x11/display-managers/gdm.nix
+++ b/nixos/modules/services/x11/display-managers/gdm.nix
@@ -93,16 +93,17 @@ in
       };
 
       wayland = mkOption {
+        type = types.bool;
         default = true;
         description = ''
           Allow GDM to run on Wayland instead of Xserver.
           Note to enable Wayland with Nvidia you need to
           enable the <option>nvidiaWayland</option>.
         '';
-        type = types.bool;
       };
 
       nvidiaWayland = mkOption {
+        type = types.bool;
         default = false;
         description = ''
           Whether to allow wayland to be used with the proprietary
diff --git a/nixos/modules/services/x11/display-managers/lightdm-greeters/mini.nix b/nixos/modules/services/x11/display-managers/lightdm-greeters/mini.nix
index 0025f9b3603..16d7fdf15cf 100644
--- a/nixos/modules/services/x11/display-managers/lightdm-greeters/mini.nix
+++ b/nixos/modules/services/x11/display-managers/lightdm-greeters/mini.nix
@@ -14,7 +14,9 @@ let
     user = ${cfg.user}
     show-password-label = true
     password-label-text = Password:
+    invalid-password-text = Invalid Password
     show-input-cursor = true
+    password-alignment = right
 
     [greeter-hotkeys]
     mod-key = meta
@@ -26,6 +28,8 @@ let
     [greeter-theme]
     font = Sans
     font-size = 1em
+    font-weight = bold
+    font-style = normal
     text-color = "#080800"
     error-color = "#F8F8F0"
     background-image = "${ldmcfg.background}"
@@ -36,6 +40,8 @@ let
     layout-space = 15
     password-color = "#F8F8F0"
     password-background-color = "#1B1D1E"
+    password-border-color = "#080800"
+    password-border-width = 2px
 
     ${cfg.extraConfig}
     '';
diff --git a/nixos/modules/services/x11/display-managers/startx.nix b/nixos/modules/services/x11/display-managers/startx.nix
index 57046984358..3980203b945 100644
--- a/nixos/modules/services/x11/display-managers/startx.nix
+++ b/nixos/modules/services/x11/display-managers/startx.nix
@@ -15,6 +15,7 @@ in
   options = {
     services.xserver.displayManager.startx = {
       enable = mkOption {
+        type = types.bool;
         default = false;
         description = ''
           Whether to enable the dummy "startx" pseudo-display manager,
diff --git a/nixos/modules/services/x11/gdk-pixbuf.nix b/nixos/modules/services/x11/gdk-pixbuf.nix
index 9ad926369ec..3fd6fed91e1 100644
--- a/nixos/modules/services/x11/gdk-pixbuf.nix
+++ b/nixos/modules/services/x11/gdk-pixbuf.nix
@@ -19,7 +19,7 @@ let
           continue
         fi
         GDK_PIXBUF_MODULEDIR="$module_dir" \
-          ${pkgs.gdk-pixbuf.dev}/bin/gdk-pixbuf-query-loaders
+          ${pkgs.stdenv.hostPlatform.emulator pkgs.buildPackages} ${pkgs.gdk-pixbuf.dev}/bin/gdk-pixbuf-query-loaders
       done
     ) > "$out"
   '';
diff --git a/nixos/modules/services/x11/hardware/digimend.nix b/nixos/modules/services/x11/hardware/digimend.nix
index a9f5640905a..b1b1682f00b 100644
--- a/nixos/modules/services/x11/hardware/digimend.nix
+++ b/nixos/modules/services/x11/hardware/digimend.nix
@@ -16,12 +16,7 @@ in
 
     services.xserver.digimend = {
 
-      enable = mkOption {
-        default = false;
-        description = ''
-          Whether to enable the digimend drivers for Huion/XP-Pen/etc. tablets.
-        '';
-      };
+      enable = mkEnableOption "the digimend drivers for Huion/XP-Pen/etc. tablets";
 
     };
 
diff --git a/nixos/modules/services/x11/hardware/libinput.nix b/nixos/modules/services/x11/hardware/libinput.nix
index f6b0e7c09f5..9548ecb8ef6 100644
--- a/nixos/modules/services/x11/hardware/libinput.nix
+++ b/nixos/modules/services/x11/hardware/libinput.nix
@@ -184,7 +184,11 @@ in {
         ''
           Option "DragLockButtons" "L1 B1 L2 B2"
         '';
-        description = "Additional options for libinput touchpad driver.";
+        description = ''
+          Additional options for libinput touchpad driver. See
+          <citerefentry><refentrytitle>libinput</refentrytitle><manvolnum>4</manvolnum></citerefentry>
+          for available options.";
+        '';
       };
 
     };
diff --git a/nixos/modules/services/x11/hardware/wacom.nix b/nixos/modules/services/x11/hardware/wacom.nix
index a27889c36a7..dad2b308d1b 100644
--- a/nixos/modules/services/x11/hardware/wacom.nix
+++ b/nixos/modules/services/x11/hardware/wacom.nix
@@ -15,6 +15,7 @@ in
     services.xserver.wacom = {
 
       enable = mkOption {
+        type = types.bool;
         default = false;
         description = ''
           Whether to enable the Wacom touchscreen/digitizer/tablet.
diff --git a/nixos/modules/services/x11/picom.nix b/nixos/modules/services/x11/picom.nix
index e3bd21be73e..1289edd2904 100644
--- a/nixos/modules/services/x11/picom.nix
+++ b/nixos/modules/services/x11/picom.nix
@@ -1,39 +1,48 @@
 { config, lib, pkgs, ... }:
 
 with lib;
-with builtins;
 
 let
 
   cfg = config.services.picom;
 
-  pairOf = x: with types; addCheck (listOf x) (y: length y == 2);
-
-  floatBetween = a: b: with lib; with types;
-    addCheck str (x: versionAtLeast x a && versionOlder x b);
-
-  toConf = attrs: concatStringsSep "\n"
-    (mapAttrsToList
-      (k: v: let
-        sep = if isAttrs v then ":" else "=";
-        # Basically a tinkered lib.generators.mkKeyValueDefault
-        mkValueString = v:
-          if isBool v        then boolToString v
-          else if isInt v    then toString v
-          else if isFloat v  then toString v
-          else if isString v then ''"${escape [ ''"'' ] v}"''
-          else if isList v   then "[ "
-            + concatMapStringsSep " , " mkValueString v
-            + " ]"
-          else if isAttrs v  then "{ "
-            + concatStringsSep " "
-              (mapAttrsToList
-                (key: value: "${toString key}=${mkValueString value};")
-                v)
-            + " }"
-          else abort "picom.mkValueString: unexpected type (v = ${v})";
-      in "${escape [ sep ] k}${sep}${mkValueString v};")
-      attrs);
+  pairOf = x: with types;
+    addCheck (listOf x) (y: length y == 2)
+    // { description = "pair of ${x.description}"; };
+
+  floatBetween = a: b: with types;
+    let
+      # toString prints floats with hardcoded high precision
+      floatToString = f: builtins.toJSON f;
+    in
+      addCheck float (x: x <= b && x >= a)
+      // { description = "a floating point number in " +
+                         "range [${floatToString a}, ${floatToString b}]"; };
+
+  mkDefaultAttrs = mapAttrs (n: v: mkDefault v);
+
+  # Basically a tinkered lib.generators.mkKeyValueDefault
+  # It either serializes a top-level definition "key: { values };"
+  # or an expression "key = { values };"
+  mkAttrsString = top:
+    mapAttrsToList (k: v:
+      let sep = if (top && isAttrs v) then ":" else "=";
+      in "${escape [ sep ] k}${sep}${mkValueString v};");
+
+  # This serializes a Nix expression to the libconfig format.
+  mkValueString = v:
+         if types.bool.check  v then boolToString v
+    else if types.int.check   v then toString v
+    else if types.float.check v then toString v
+    else if types.str.check   v then "\"${escape [ "\"" ] v}\""
+    else if builtins.isList   v then "[ ${concatMapStringsSep " , " mkValueString v} ]"
+    else if types.attrs.check v then "{ ${concatStringsSep " " (mkAttrsString false v) } }"
+    else throw ''
+                 invalid expression used in option services.picom.settings:
+                 ${v}
+               '';
+
+  toConf = attrs: concatStringsSep "\n" (mkAttrsString true cfg.settings);
 
   configFile = pkgs.writeText "picom.conf" (toConf cfg.settings);
 
@@ -61,7 +70,7 @@ in {
     };
 
     fadeDelta = mkOption {
-      type = types.addCheck types.int (x: x > 0);
+      type = types.ints.positive;
       default = 10;
       example = 5;
       description = ''
@@ -70,12 +79,11 @@ in {
     };
 
     fadeSteps = mkOption {
-      type = pairOf (floatBetween "0.01" "1.01");
-      default = [ "0.028" "0.03" ];
-      example = [ "0.04" "0.04" ];
+      type = pairOf (floatBetween 0.01 1);
+      default = [ 0.028 0.03 ];
+      example = [ 0.04 0.04 ];
       description = ''
         Opacity change between fade steps (in and out).
-        (numbers in range 0.01 - 1.0)
       '';
     };
 
@@ -111,11 +119,11 @@ in {
     };
 
     shadowOpacity = mkOption {
-      type = floatBetween "0.0" "1.01";
-      default = "0.75";
-      example = "0.8";
+      type = floatBetween 0 1;
+      default = 0.75;
+      example = 0.8;
       description = ''
-        Window shadows opacity (number in range 0.0 - 1.0).
+        Window shadows opacity.
       '';
     };
 
@@ -134,29 +142,29 @@ in {
     };
 
     activeOpacity = mkOption {
-      type = floatBetween "0.0" "1.01";
-      default = "1.0";
-      example = "0.8";
+      type = floatBetween 0 1;
+      default = 1.0;
+      example = 0.8;
       description = ''
-        Opacity of active windows (number in range 0.0 - 1.0).
+        Opacity of active windows.
       '';
     };
 
     inactiveOpacity = mkOption {
-      type = floatBetween "0.1" "1.01";
-      default = "1.0";
-      example = "0.8";
+      type = floatBetween 0.1 1;
+      default = 1.0;
+      example = 0.8;
       description = ''
-        Opacity of inactive windows (number in range 0.1 - 1.0).
+        Opacity of inactive windows.
       '';
     };
 
     menuOpacity = mkOption {
-      type = floatBetween "0.0" "1.01";
-      default = "1.0";
-      example = "0.8";
+      type = floatBetween 0 1;
+      default = 1.0;
+      example = 0.8;
       description = ''
-        Opacity of dropdown and popup menu (number in range 0.0 - 1.0).
+        Opacity of dropdown and popup menu.
       '';
     };
 
@@ -210,7 +218,7 @@ in {
     };
 
     refreshRate = mkOption {
-      type = types.addCheck types.int (x: x >= 0);
+      type = types.ints.unsigned;
       default = 0;
       example = 60;
       description = ''
@@ -218,54 +226,69 @@ in {
       '';
     };
 
-    settings = let
-      configTypes = with types; oneOf [ bool int float str ];
-      # types.loaOf converts lists to sets
-      loaOf = t: with types; either (listOf t) (attrsOf t);
+    settings = with types;
+    let
+      scalar = oneOf [ bool int float str ]
+        // { description = "scalar types"; };
+
+      libConfig = oneOf [ scalar (listOf libConfig) (attrsOf libConfig) ]
+        // { description = "libconfig type"; };
+
+      topLevel = attrsOf libConfig
+        // { description = ''
+               libconfig configuration. The format consists of an attributes
+               set (called a group) of settings. Each setting can be a scalar type
+               (boolean, integer, floating point number or string), a list of
+               scalars or a group itself
+             '';
+           };
+
     in mkOption {
-      type = loaOf (types.either configTypes (loaOf (types.either configTypes (loaOf configTypes))));
-      default = {};
+      type = topLevel;
+      default = { };
+      example = literalExample ''
+        blur =
+          { method = "gaussian";
+            size = 10;
+            deviation = 5.0;
+          };
+      '';
       description = ''
-        Additional Picom configuration.
+        Picom settings. Use this option to configure Picom settings not exposed
+        in a NixOS option or to bypass one.  For the available options see the
+        CONFIGURATION FILES section at <literal>picom(1)</literal>.
       '';
     };
   };
 
   config = mkIf cfg.enable {
-    services.picom.settings = let
-      # Hard conversion to float, literally lib.toInt but toFloat
-      toFloat = str: let
-        may_be_float = builtins.fromJSON str;
-      in if builtins.isFloat may_be_float
-        then may_be_float
-        else throw "Could not convert ${str} to float.";
-    in {
+    services.picom.settings = mkDefaultAttrs {
       # fading
-      fading           = mkDefault cfg.fade;
-      fade-delta       = mkDefault cfg.fadeDelta;
-      fade-in-step     = mkDefault (toFloat (elemAt cfg.fadeSteps 0));
-      fade-out-step    = mkDefault (toFloat (elemAt cfg.fadeSteps 1));
-      fade-exclude     = mkDefault cfg.fadeExclude;
+      fading           = cfg.fade;
+      fade-delta       = cfg.fadeDelta;
+      fade-in-step     = elemAt cfg.fadeSteps 0;
+      fade-out-step    = elemAt cfg.fadeSteps 1;
+      fade-exclude     = cfg.fadeExclude;
 
       # shadows
-      shadow           = mkDefault cfg.shadow;
-      shadow-offset-x  = mkDefault (elemAt cfg.shadowOffsets 0);
-      shadow-offset-y  = mkDefault (elemAt cfg.shadowOffsets 1);
-      shadow-opacity   = mkDefault (toFloat cfg.shadowOpacity);
-      shadow-exclude   = mkDefault cfg.shadowExclude;
+      shadow           = cfg.shadow;
+      shadow-offset-x  = elemAt cfg.shadowOffsets 0;
+      shadow-offset-y  = elemAt cfg.shadowOffsets 1;
+      shadow-opacity   = cfg.shadowOpacity;
+      shadow-exclude   = cfg.shadowExclude;
 
       # opacity
-      active-opacity   = mkDefault (toFloat cfg.activeOpacity);
-      inactive-opacity = mkDefault (toFloat cfg.inactiveOpacity);
+      active-opacity   = cfg.activeOpacity;
+      inactive-opacity = cfg.inactiveOpacity;
 
-      wintypes         = mkDefault cfg.wintypes;
+      wintypes         = cfg.wintypes;
 
-      opacity-rule     = mkDefault cfg.opacityRules;
+      opacity-rule     = cfg.opacityRules;
 
       # other options
-      backend          = mkDefault cfg.backend;
-      vsync            = mkDefault cfg.vSync;
-      refresh-rate     = mkDefault cfg.refreshRate;
+      backend          = cfg.backend;
+      vsync            = cfg.vSync;
+      refresh-rate     = cfg.refreshRate;
     };
 
     systemd.user.services.picom = {
diff --git a/nixos/modules/services/x11/window-managers/default.nix b/nixos/modules/services/x11/window-managers/default.nix
index 04a9fc46628..b815c5f16a1 100644
--- a/nixos/modules/services/x11/window-managers/default.nix
+++ b/nixos/modules/services/x11/window-managers/default.nix
@@ -30,6 +30,7 @@ in
     ./sawfish.nix
     ./stumpwm.nix
     ./spectrwm.nix
+    ./tinywm.nix
     ./twm.nix
     ./windowmaker.nix
     ./wmii.nix
diff --git a/nixos/modules/services/x11/window-managers/tinywm.nix b/nixos/modules/services/x11/window-managers/tinywm.nix
new file mode 100644
index 00000000000..8e5d9b9170c
--- /dev/null
+++ b/nixos/modules/services/x11/window-managers/tinywm.nix
@@ -0,0 +1,25 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.xserver.windowManager.tinywm;
+in
+{
+  ###### interface
+  options = {
+    services.xserver.windowManager.tinywm.enable = mkEnableOption "tinywm";
+  };
+
+  ###### implementation
+  config = mkIf cfg.enable {
+    services.xserver.windowManager.session = singleton {
+      name = "tinywm";
+      start = ''
+        ${pkgs.tinywm}/bin/tinywm &
+        waitPID=$!
+      '';
+    };
+    environment.systemPackages = [ pkgs.tinywm ];
+  };
+}
diff --git a/nixos/modules/system/activation/no-clone.nix b/nixos/modules/system/activation/no-clone.nix
index 7f458443526..912420347dc 100644
--- a/nixos/modules/system/activation/no-clone.nix
+++ b/nixos/modules/system/activation/no-clone.nix
@@ -4,6 +4,5 @@ with lib;
 
 {
   boot.loader.grub.device = mkOverride 0 "nodev";
-  nesting.children = mkOverride 0 [];
-  nesting.clone = mkOverride 0 [];
+  specialisation = mkOverride 0 {};
 }
diff --git a/nixos/modules/system/activation/top-level.nix b/nixos/modules/system/activation/top-level.nix
index 49693b6f1be..f6739977fa4 100644
--- a/nixos/modules/system/activation/top-level.nix
+++ b/nixos/modules/system/activation/top-level.nix
@@ -11,21 +11,16 @@ let
   # you can provide an easy way to boot the same configuration
   # as you use, but with another kernel
   # !!! fix this
-  cloner = inheritParent: list:
-    map (childConfig:
+  children = mapAttrs (childName: childConfig:
       (import ../../../lib/eval-config.nix {
         inherit baseModules;
         system = config.nixpkgs.initialSystem;
         modules =
-           (optionals inheritParent modules)
+           (optionals childConfig.inheritParentConfig modules)
         ++ [ ./no-clone.nix ]
-        ++ [ childConfig ];
+        ++ [ childConfig.configuration ];
       }).config.system.build.toplevel
-    ) list;
-
-  children =
-     cloner false config.nesting.children
-  ++ cloner true config.nesting.clone;
+    ) config.specialisation;
 
   systemBuilder =
     let
@@ -77,12 +72,9 @@ let
       echo -n "$nixosLabel" > $out/nixos-version
       echo -n "${config.boot.kernelPackages.stdenv.hostPlatform.system}" > $out/system
 
-      mkdir $out/fine-tune
-      childCount=0
-      for i in $children; do
-        childCount=$(( childCount + 1 ))
-        ln -s $i $out/fine-tune/child-$childCount
-      done
+      mkdir $out/specialisation
+      ${concatStringsSep "\n"
+      (mapAttrsToList (name: path: "ln -s ${path} $out/specialisation/${name}") children)}
 
       mkdir $out/bin
       export localeArchive="${config.i18n.glibcLocales}/lib/locale/locale-archive"
@@ -112,7 +104,6 @@ let
     shell = "${pkgs.bash}/bin/sh";
     su = "${pkgs.shadow.su}/bin/su";
 
-    inherit children;
     kernelParams = config.boot.kernelParams;
     installBootLoader =
       config.system.build.installBootLoader
@@ -143,6 +134,11 @@ let
 in
 
 {
+  imports = [
+    (mkRemovedOptionModule [ "nesting" "clone" ] "Use `specialisation.«name» = { inheritParentConfig = true; configuration = { ... }; }` instead.")
+    (mkRemovedOptionModule [ "nesting" "children" ] "Use `specialisation.«name».configuration = { ... }` instead.")
+  ];
+
   options = {
 
     system.build = mkOption {
@@ -154,26 +150,35 @@ in
       '';
     };
 
-    nesting.children = mkOption {
-      default = [];
-      description = ''
-        Additional configurations to build.
-      '';
-    };
-
-    nesting.clone = mkOption {
-      default = [];
+    specialisation = mkOption {
+      default = {};
+      example = lib.literalExample "{ fewJobsManyCores.configuration = { nix.buildCores = 0; nix.maxJobs = 1; }; }";
       description = ''
-        Additional configurations to build based on the current
-        configuration which then has a lower priority.
+        Additional configurations to build. If
+        <literal>inheritParentConfig</literal> is true, the system
+        will be based on the overall system configuration.
 
-        To switch to a cloned configuration (e.g. <literal>child-1</literal>)
-        at runtime, run
+        To switch to a specialised configuration
+        (e.g. <literal>fewJobsManyCores</literal>) at runtime, run:
 
         <programlisting>
-        # sudo /run/current-system/fine-tune/child-1/bin/switch-to-configuration test
+        # sudo /run/current-system/specialisation/fewJobsManyCores/bin/switch-to-configuration test
         </programlisting>
       '';
+      type = types.attrsOf (types.submodule (
+        { ... }: {
+          options.inheritParentConfig = mkOption {
+            type = types.bool;
+            default = true;
+            description = "Include the entire system's configuration. Set to false to make a completely differently configured system.";
+          };
+
+          options.configuration = mkOption {
+            default = {};
+            description = "Arbitrary NixOS configuration options.";
+          };
+        })
+      );
     };
 
     system.boot.loader.id = mkOption {
diff --git a/nixos/modules/system/boot/initrd-ssh.nix b/nixos/modules/system/boot/initrd-ssh.nix
index 5a334e69056..f7ef2610370 100644
--- a/nixos/modules/system/boot/initrd-ssh.nix
+++ b/nixos/modules/system/boot/initrd-ssh.nix
@@ -55,7 +55,7 @@ in
 
         <screen>
         <prompt># </prompt>ssh-keygen -t rsa -N "" -f /etc/secrets/initrd/ssh_host_rsa_key
-        <prompt># </prompt>ssh-keygen -t ed25519 -N "" -f /etc/secrets/initrd/ssh_host_ed_25519_key
+        <prompt># </prompt>ssh-keygen -t ed25519 -N "" -f /etc/secrets/initrd/ssh_host_ed25519_key
         </screen>
 
         <warning>
@@ -83,6 +83,12 @@ in
         Authorized keys for the root user on initrd.
       '';
     };
+
+    extraConfig = mkOption {
+      type = types.lines;
+      default = "";
+      description = "Verbatim contents of <filename>sshd_config</filename>.";
+    };
   };
 
   imports =
@@ -126,6 +132,8 @@ in
       '' else ''
         UseDNS no
       ''}
+
+      ${cfg.extraConfig}
     '';
   in mkIf (config.boot.initrd.network.enable && cfg.enable) {
     assertions = [
diff --git a/nixos/modules/system/boot/loader/grub/install-grub.pl b/nixos/modules/system/boot/loader/grub/install-grub.pl
index ca0fb0248e0..8df18cbd901 100644
--- a/nixos/modules/system/boot/loader/grub/install-grub.pl
+++ b/nixos/modules/system/boot/loader/grub/install-grub.pl
@@ -409,7 +409,7 @@ $conf .= "$extraEntries\n" unless $extraEntriesBeforeNixOS;
 
 # Find all the children of the current default configuration
 # Do not search for grand children
-my @links = sort (glob "$defaultConfig/fine-tune/*");
+my @links = sort (glob "$defaultConfig/specialisation/*");
 foreach my $link (@links) {
 
     my $entryName = "";
@@ -425,7 +425,8 @@ foreach my $link (@links) {
     if ($cfgName) {
         $entryName = $cfgName;
     } else {
-        $entryName = "($date - $version)";
+        my $linkname = basename($link);
+        $entryName = "($linkname - $date - $version)";
     }
     addEntry("NixOS - $entryName", $link);
 }
diff --git a/nixos/modules/system/boot/loader/init-script/init-script-builder.sh b/nixos/modules/system/boot/loader/init-script/init-script-builder.sh
index 08d4ab14c9c..6f48d2539ac 100644
--- a/nixos/modules/system/boot/loader/init-script/init-script-builder.sh
+++ b/nixos/modules/system/boot/loader/init-script/init-script-builder.sh
@@ -69,7 +69,7 @@ addEntry "NixOS - Default" $defaultConfig ""
 
 # Add all generations of the system profile to the menu, in reverse
 # (most recent to least recent) order.
-for link in $((ls -d $defaultConfig/fine-tune/* ) | sort -n); do
+for link in $((ls -d $defaultConfig/specialisation/* ) | sort -n); do
     date=$(stat --printf="%y\n" $link | sed 's/\..*//')
     addEntry "NixOS - variation" $link ""
 done
diff --git a/nixos/modules/system/boot/loader/raspberrypi/raspberrypi-builder.nix b/nixos/modules/system/boot/loader/raspberrypi/raspberrypi-builder.nix
index 7eb52e3d021..e75aa9d1387 100644
--- a/nixos/modules/system/boot/loader/raspberrypi/raspberrypi-builder.nix
+++ b/nixos/modules/system/boot/loader/raspberrypi/raspberrypi-builder.nix
@@ -3,8 +3,8 @@
 pkgs.substituteAll {
   src = ./raspberrypi-builder.sh;
   isExecutable = true;
-  inherit (pkgs) bash;
-  path = [pkgs.coreutils pkgs.gnused pkgs.gnugrep];
+  inherit (pkgs.buildPackages) bash;
+  path = with pkgs.buildPackages; [coreutils gnused gnugrep];
   firmware = pkgs.raspberrypifw;
   inherit configTxt;
 }
diff --git a/nixos/modules/system/boot/loader/raspberrypi/uboot-builder.nix b/nixos/modules/system/boot/loader/raspberrypi/uboot-builder.nix
index 1dc397e521b..a4352ab9a24 100644
--- a/nixos/modules/system/boot/loader/raspberrypi/uboot-builder.nix
+++ b/nixos/modules/system/boot/loader/raspberrypi/uboot-builder.nix
@@ -20,7 +20,7 @@ let
 
   extlinuxConfBuilder =
     import ../generic-extlinux-compatible/extlinux-conf-builder.nix {
-      inherit pkgs;
+      pkgs = pkgs.buildPackages;
     };
 in
 pkgs.substituteAll {
diff --git a/nixos/modules/system/boot/networkd.nix b/nixos/modules/system/boot/networkd.nix
index 3078f84f6e9..9b34b12e73a 100644
--- a/nixos/modules/system/boot/networkd.nix
+++ b/nixos/modules/system/boot/networkd.nix
@@ -205,7 +205,7 @@ let
       "IPv6HopLimit" "IPv4ProxyARP" "IPv6ProxyNDP" "IPv6ProxyNDPAddress"
       "IPv6PrefixDelegation" "IPv6MTUBytes" "Bridge" "Bond" "VRF" "VLAN"
       "IPVLAN" "MACVLAN" "VXLAN" "Tunnel" "ActiveSlave" "PrimarySlave"
-      "ConfigureWithoutCarrier" "Xfrm"
+      "ConfigureWithoutCarrier" "Xfrm" "KeepConfiguration"
     ])
     # Note: For DHCP the values both, none, v4, v6 are deprecated
     (assertValueOneOf "DHCP" ["yes" "no" "ipv4" "ipv6" "both" "none" "v4" "v6"])
@@ -228,6 +228,7 @@ let
     (assertValueOneOf "ActiveSlave" boolValues)
     (assertValueOneOf "PrimarySlave" boolValues)
     (assertValueOneOf "ConfigureWithoutCarrier" boolValues)
+    (assertValueOneOf "KeepConfiguration" (boolValues ++ ["static" "dhcp-on-stop" "dhcp"]))
   ];
 
   checkAddress = checkUnitConfig "Address" [
@@ -274,15 +275,16 @@ let
     ])
   ];
 
-  checkDhcp = checkUnitConfig "DHCP" [
+  checkDhcpV4 = checkUnitConfig "DHCPv4" [
     (assertOnlyFields [
-      "UseDNS" "UseNTP" "UseMTU" "Anonymize" "SendHostname" "UseHostname"
-      "Hostname" "UseDomains" "UseRoutes" "UseTimezone" "CriticalConnection"
-      "ClientIdentifier" "VendorClassIdentifier" "UserClass" "DUIDType"
-      "DUIDRawData" "IAID" "RequestBroadcast" "RouteMetric" "RouteTable"
-      "ListenPort" "RapidCommit"
+      "UseDNS" "RoutesToDNS" "UseNTP" "UseMTU" "Anonymize" "SendHostname" "UseHostname"
+      "Hostname" "UseDomains" "UseRoutes" "UseTimezone"
+      "ClientIdentifier" "VendorClassIdentifier" "UserClass" "MaxAttempts"
+      "DUIDType" "DUIDRawData" "IAID" "RequestBroadcast" "RouteMetric" "RouteTable"
+      "ListenPort" "SendRelease"
     ])
     (assertValueOneOf "UseDNS" boolValues)
+    (assertValueOneOf "RoutesToDNS" boolValues)
     (assertValueOneOf "UseNTP" boolValues)
     (assertValueOneOf "UseMTU" boolValues)
     (assertValueOneOf "Anonymize" boolValues)
@@ -291,13 +293,50 @@ let
     (assertValueOneOf "UseDomains" ["yes" "no" "route"])
     (assertValueOneOf "UseRoutes" boolValues)
     (assertValueOneOf "UseTimezone" boolValues)
-    (assertValueOneOf "CriticalConnection" boolValues)
+    (assertMinimum "MaxAttempts" 0)
     (assertValueOneOf "RequestBroadcast" boolValues)
     (assertInt "RouteTable")
     (assertMinimum "RouteTable" 0)
+    (assertValueOneOf "SendRelease" boolValues)
+  ];
+
+  checkDhcpV6 = checkUnitConfig "DHCPv6" [
+    (assertOnlyFields [
+      "UseDns" "UseNTP" "RapidCommit" "ForceDHCPv6PDOtherInformation"
+      "PrefixDelegationHint"
+    ])
+    (assertValueOneOf "UseDNS" boolValues)
+    (assertValueOneOf "UseNTP" boolValues)
     (assertValueOneOf "RapidCommit" boolValues)
+    (assertValueOneOf "ForceDHCPv6PDOtherInformation" boolValues)
+  ];
+
+  checkIpv6PrefixDelegation = checkUnitConfig "IPv6PrefixDelegation" [
+    (assertOnlyFields [
+      "Managed"  "OtherInformation"  "RouterLifetimeSec"
+      "RouterPreference"  "EmitDNS"  "DNS"  "EmitDomains"  "Domains"
+      "DNSLifetimeSec"
+    ])
+    (assertValueOneOf "Managed" boolValues)
+    (assertValueOneOf "OtherInformation" boolValues)
+    (assertValueOneOf "RouterPreference" ["high" "medium" "low" "normal" "default"])
+    (assertValueOneOf "EmitDNS" boolValues)
+    (assertValueOneOf "EmitDomains" boolValues)
+    (assertMinimum "DNSLifetimeSec" 0)
+  ];
+
+  checkIpv6Prefix = checkUnitConfig "IPv6Prefix" [
+    (assertOnlyFields [
+      "AddressAutoconfiguration"  "OnLink"  "Prefix"
+      "PreferredLifetimeSec" "ValidLifetimeSec"
+    ])
+    (assertValueOneOf "AddressAutoconfiguration" boolValues)
+    (assertValueOneOf "OnLink" boolValues)
+    (assertMinimum "PreferredLifetimeSec" 0)
+    (assertMinimum "ValidLifetimeSec" 0)
   ];
 
+
   checkDhcpServer = checkUnitConfig "DHCPServer" [
     (assertOnlyFields [
       "PoolOffset" "PoolSize" "DefaultLeaseTimeSec" "MaxLeaseTimeSec"
@@ -621,6 +660,22 @@ let
     };
   };
 
+  ipv6PrefixOptions = {
+    options = {
+      ipv6PrefixConfig = mkOption {
+        default = {};
+        example = { Prefix = "fd00::/64"; };
+        type = types.addCheck (types.attrsOf unitOption) checkIpv6Prefix;
+        description = ''
+          Each attribute in this set specifies an option in the
+          <literal>[IPv6Prefix]</literal> section of the unit.  See
+          <citerefentry><refentrytitle>systemd.network</refentrytitle>
+          <manvolnum>5</manvolnum></citerefentry> for details.
+        '';
+      };
+    };
+  };
+
 
   networkOptions = commonNetworkOptions // {
 
@@ -636,13 +691,55 @@ let
       '';
     };
 
+    # systemd.network.networks.*.dhcpConfig has been deprecated in favor of ….dhcpV4Config
+    # Produce a nice warning message so users know it is gone.
     dhcpConfig = mkOption {
+      visible = false;
+      apply = _: throw "The option `systemd.network.networks.*.dhcpConfig` can no longer be used since it's been removed. Please use `systemd.network.networks.*.dhcpV4Config` instead.";
+    };
+
+    dhcpV4Config = mkOption {
+      default = {};
+      example = { UseDNS = true; UseRoutes = true; };
+      type = types.addCheck (types.attrsOf unitOption) checkDhcpV4;
+      description = ''
+        Each attribute in this set specifies an option in the
+        <literal>[DHCPv4]</literal> section of the unit.  See
+        <citerefentry><refentrytitle>systemd.network</refentrytitle>
+        <manvolnum>5</manvolnum></citerefentry> for details.
+      '';
+    };
+
+    dhcpV6Config = mkOption {
       default = {};
       example = { UseDNS = true; UseRoutes = true; };
-      type = types.addCheck (types.attrsOf unitOption) checkDhcp;
+      type = types.addCheck (types.attrsOf unitOption) checkDhcpV6;
+      description = ''
+        Each attribute in this set specifies an option in the
+        <literal>[DHCPv6]</literal> section of the unit.  See
+        <citerefentry><refentrytitle>systemd.network</refentrytitle>
+        <manvolnum>5</manvolnum></citerefentry> for details.
+      '';
+    };
+
+    ipv6PrefixDelegationConfig = mkOption {
+      default = {};
+      example = { EmitDNS = true; Managed = true; OtherInformation = true; };
+      type = types.addCheck (types.attrsOf unitOption) checkIpv6PrefixDelegation;
       description = ''
         Each attribute in this set specifies an option in the
-        <literal>[DHCP]</literal> section of the unit.  See
+        <literal>[IPv6PrefixDelegation]</literal> section of the unit.  See
+        <citerefentry><refentrytitle>systemd.network</refentrytitle>
+        <manvolnum>5</manvolnum></citerefentry> for details.
+      '';
+    };
+
+    ipv6Prefixes = mkOption {
+      default = [];
+      example = { AddressAutoconfiguration = true; OnLink = true; };
+      type = with types; listOf (submodule ipv6PrefixOptions);
+      description = ''
+        A list of ipv6Prefix sections to be added to the unit.  See
         <citerefentry><refentrytitle>systemd.network</refentrytitle>
         <manvolnum>5</manvolnum></citerefentry> for details.
       '';
@@ -973,11 +1070,26 @@ let
           ${concatStringsSep "\n" (map (s: "Tunnel=${s}") def.tunnel)}
           ${concatStringsSep "\n" (map (s: "Xfrm=${s}") def.xfrm)}
 
-          ${optionalString (def.dhcpConfig != { }) ''
-            [DHCP]
-            ${attrsToSection def.dhcpConfig}
+          ${optionalString (def.dhcpV4Config != { }) ''
+            [DHCPv4]
+            ${attrsToSection def.dhcpV4Config}
 
           ''}
+          ${optionalString (def.dhcpV6Config != {}) ''
+            [DHCPv6]
+            ${attrsToSection def.dhcpV6Config}
+
+          ''}
+          ${optionalString (def.ipv6PrefixDelegationConfig != {}) ''
+            [IPv6PrefixDelegation]
+            ${attrsToSection def.ipv6PrefixDelegationConfig}
+
+          ''}
+          ${flip concatMapStrings def.ipv6Prefixes (x: ''
+            [IPv6Prefix]
+            ${attrsToSection x.ipv6PrefixConfig}
+
+          '')}
           ${optionalString (def.dhcpServerConfig != { }) ''
             [DHCPServer]
             ${attrsToSection def.dhcpServerConfig}
@@ -1054,6 +1166,7 @@ in
   };
 
   config = mkMerge [
+
     # .link units are honored by udev, no matter if systemd-networkd is enabled or not.
     {
       systemd.network.units = mapAttrs' (n: v: nameValuePair "${n}.link" (linkToUnit n v)) cfg.links;
@@ -1073,7 +1186,7 @@ in
 
       systemd.services.systemd-networkd = {
         wantedBy = [ "multi-user.target" ];
-        restartTriggers = attrNames unitFiles;
+        restartTriggers = map (x: x.source) (attrValues unitFiles);
         # prevent race condition with interface renaming (#39069)
         requires = [ "systemd-udev-settle.service" ];
         after = [ "systemd-udev-settle.service" ];
diff --git a/nixos/modules/system/boot/resolved.nix b/nixos/modules/system/boot/resolved.nix
index da61c64faf8..b7aaef575ac 100644
--- a/nixos/modules/system/boot/resolved.nix
+++ b/nixos/modules/system/boot/resolved.nix
@@ -138,6 +138,10 @@ in
 
     users.users.resolved.group = "systemd-resolve";
 
+    # add resolve to nss hosts database if enabled and nscd enabled
+    # system.nssModules is configured in nixos/modules/system/boot/systemd.nix
+    system.nssDatabases.hosts = optional config.services.nscd.enable "resolve [!UNAVAIL=return]";
+
     systemd.additionalUpstreamSystemUnits = [
       "systemd-resolved.service"
     ];
diff --git a/nixos/modules/system/boot/stage-1.nix b/nixos/modules/system/boot/stage-1.nix
index 9e3ee5cf0a3..dfd158e2d75 100644
--- a/nixos/modules/system/boot/stage-1.nix
+++ b/nixos/modules/system/boot/stage-1.nix
@@ -137,6 +137,8 @@ let
       ''}
 
       # Copy secrets if needed.
+      #
+      # TODO: move out to a separate script; see #85000.
       ${optionalString (!config.boot.loader.supportsInitrdSecrets)
           (concatStringsSep "\n" (mapAttrsToList (dest: source:
              let source' = if source == null then dest else source; in
@@ -579,6 +581,25 @@ in
         message = "boot.resumeDevice has to be an absolute path."
           + " Old \"x:y\" style is no longer supported.";
       }
+      # TODO: remove when #85000 is fixed
+      { assertion = !config.boot.loader.supportsInitrdSecrets ->
+          all (source:
+            builtins.isPath source ||
+            (builtins.isString source && hasPrefix source builtins.storeDir))
+          (attrValues config.boot.initrd.secrets);
+        message = ''
+          boot.loader.initrd.secrets values must be unquoted paths when
+          using a bootloader that doesn't natively support initrd
+          secrets, e.g.:
+
+            boot.initrd.secrets = {
+              "/etc/secret" = /path/to/secret;
+            };
+
+          Note that this will result in all secrets being stored
+          world-readable in the Nix store!
+        '';
+      }
     ];
 
     system.build =
diff --git a/nixos/modules/system/boot/systemd-lib.nix b/nixos/modules/system/boot/systemd-lib.nix
index a3360291586..fa109394fed 100644
--- a/nixos/modules/system/boot/systemd-lib.nix
+++ b/nixos/modules/system/boot/systemd-lib.nix
@@ -114,7 +114,9 @@ in rec {
         (if isList value then value else [value]))
         as));
 
-  generateUnits = type: units: upstreamUnits: upstreamWants:
+  generateUnits = generateUnits' true;
+
+  generateUnits' = allowCollisions: type: units: upstreamUnits: upstreamWants:
     pkgs.runCommand "${type}-units"
       { preferLocalBuild = true;
         allowSubstitutes = false;
@@ -182,8 +184,13 @@ in rec {
           if [ "$(readlink -f $i/$fn)" = /dev/null ]; then
             ln -sfn /dev/null $out/$fn
           else
-            mkdir -p $out/$fn.d
-            ln -s $i/$fn $out/$fn.d/overrides.conf
+            ${if allowCollisions then ''
+              mkdir -p $out/$fn.d
+              ln -s $i/$fn $out/$fn.d/overrides.conf
+            '' else ''
+              echo "Found multiple derivations configuring $fn!"
+              exit 1
+            ''}
           fi
        else
           ln -fs $i/$fn $out/
diff --git a/nixos/modules/system/boot/systemd-nspawn.nix b/nixos/modules/system/boot/systemd-nspawn.nix
index 1e2435e36f0..06ea5ee49f7 100644
--- a/nixos/modules/system/boot/systemd-nspawn.nix
+++ b/nixos/modules/system/boot/systemd-nspawn.nix
@@ -116,7 +116,7 @@ in {
     in 
       mkMerge [
         (mkIf (cfg != {}) { 
-          environment.etc."systemd/nspawn".source = mkIf (cfg != {}) (generateUnits "nspawn" units [] []);
+          environment.etc."systemd/nspawn".source = mkIf (cfg != {}) (generateUnits' false "nspawn" units [] []);
         })
         {
           systemd.targets.multi-user.wants = [ "machines.target" ];
diff --git a/nixos/modules/system/boot/systemd.nix b/nixos/modules/system/boot/systemd.nix
index 7f207e6c7ef..36a25c4e6c3 100644
--- a/nixos/modules/system/boot/systemd.nix
+++ b/nixos/modules/system/boot/systemd.nix
@@ -201,8 +201,23 @@ let
     ];
 
   makeJobScript = name: text:
-    let mkScriptName =  s: "unit-script-" + (replaceChars [ "\\" "@" ] [ "-" "_" ] (shellEscape s) );
-    in  pkgs.writeTextFile { name = mkScriptName name; executable = true; inherit text; };
+    let
+      scriptName = replaceChars [ "\\" "@" ] [ "-" "_" ] (shellEscape name);
+      out = pkgs.writeTextFile {
+        # The derivation name is different from the script file name
+        # to keep the script file name short to avoid cluttering logs.
+        name = "unit-script-${scriptName}";
+        executable = true;
+        destination = "/bin/${scriptName}";
+        text = ''
+          #!${pkgs.runtimeShell} -e
+          ${text}
+        '';
+        checkPhase = ''
+          ${pkgs.stdenv.shell} -n "$out/bin/${scriptName}"
+        '';
+      };
+    in "${out}/bin/${scriptName}";
 
   unitConfig = { config, options, ... }: {
     config = {
@@ -250,40 +265,28 @@ let
           environment.PATH = config.path;
         }
         (mkIf (config.preStart != "")
-          { serviceConfig.ExecStartPre = makeJobScript "${name}-pre-start" ''
-              #! ${pkgs.runtimeShell} -e
-              ${config.preStart}
-            '';
+          { serviceConfig.ExecStartPre =
+              makeJobScript "${name}-pre-start" config.preStart;
           })
         (mkIf (config.script != "")
-          { serviceConfig.ExecStart = makeJobScript "${name}-start" ''
-              #! ${pkgs.runtimeShell} -e
-              ${config.script}
-            '' + " " + config.scriptArgs;
+          { serviceConfig.ExecStart =
+              makeJobScript "${name}-start" config.script + " " + config.scriptArgs;
           })
         (mkIf (config.postStart != "")
-          { serviceConfig.ExecStartPost = makeJobScript "${name}-post-start" ''
-              #! ${pkgs.runtimeShell} -e
-              ${config.postStart}
-            '';
+          { serviceConfig.ExecStartPost =
+              makeJobScript "${name}-post-start" config.postStart;
           })
         (mkIf (config.reload != "")
-          { serviceConfig.ExecReload = makeJobScript "${name}-reload" ''
-              #! ${pkgs.runtimeShell} -e
-              ${config.reload}
-            '';
+          { serviceConfig.ExecReload =
+              makeJobScript "${name}-reload" config.reload;
           })
         (mkIf (config.preStop != "")
-          { serviceConfig.ExecStop = makeJobScript "${name}-pre-stop" ''
-              #! ${pkgs.runtimeShell} -e
-              ${config.preStop}
-            '';
+          { serviceConfig.ExecStop =
+              makeJobScript "${name}-pre-stop" config.preStop;
           })
         (mkIf (config.postStop != "")
-          { serviceConfig.ExecStopPost = makeJobScript "${name}-post-stop" ''
-              #! ${pkgs.runtimeShell} -e
-              ${config.postStop}
-            '';
+          { serviceConfig.ExecStopPost =
+              makeJobScript "${name}-post-stop" config.postStop;
           })
       ];
   };
@@ -405,6 +408,8 @@ let
     "hibernate" "hybrid-sleep" "suspend-then-hibernate" "lock"
   ];
 
+  proxy_env = config.networking.proxy.envVars;
+
 in
 
 {
@@ -593,17 +598,33 @@ in
         each other's limit. The value may be specified in the following
         units: s, min, h, ms, us. To turn off any kind of rate limiting,
         set either value to 0.
+
+        See <option>services.journald.rateLimitBurst</option> for important
+        considerations when setting this value.
       '';
     };
 
     services.journald.rateLimitBurst = mkOption {
-      default = 1000;
+      default = 10000;
       type = types.int;
       description = ''
         Configures the rate limiting burst limit (number of messages per
         interval) that is applied to all messages generated on the system.
         This rate limiting is applied per-service, so that two services
         which log do not interfere with each other's limit.
+
+        Note that the effective rate limit is multiplied by a factor derived
+        from the available free disk space for the journal as described on
+        <link xlink:href="https://www.freedesktop.org/software/systemd/man/journald.conf.html">
+        journald.conf(5)</link>.
+
+        Note that the total amount of logs stored is limited by journald settings
+        such as <literal>SystemMaxUse</literal>, which defaults to a 4 GB cap.
+
+        It is thus recommended to compute what period of time that you will be
+        able to store logs for when an application logs at full burst rate.
+        With default settings for log lines that are 100 Bytes long, this can
+        amount to just a few hours.
       '';
     };
 
@@ -811,6 +832,27 @@ in
 
     system.build.units = cfg.units;
 
+    # Systemd provides various NSS modules to look up dynamic users, locally
+    # configured IP adresses and local container hostnames.
+    # On NixOS, these can only be passed to the NSS system via nscd (and its
+    # LD_LIBRARY_PATH), which is why it's usually a very good idea to have nscd
+    # enabled (also see the config.nscd.enable description).
+    # While there is already an assertion in place complaining loudly about
+    # having nssModules configured and nscd disabled, for some reason we still
+    # check for nscd being enabled before adding to nssModules.
+    system.nssModules = optional config.services.nscd.enable systemd.out;
+    system.nssDatabases = mkIf config.services.nscd.enable {
+      hosts = (mkMerge [
+        [ "mymachines" ]
+        (mkOrder 1600 [ "myhostname" ] # 1600 to ensure it's always the last
+      )
+      ]);
+      passwd = (mkMerge [
+        [ "mymachines" ]
+        (mkAfter [ "systemd" ])
+      ]);
+    };
+
     environment.systemPackages = [ systemd ];
 
     environment.etc = let
@@ -894,6 +936,13 @@ in
       "sysctl.d/50-coredump.conf".source = "${systemd}/example/sysctl.d/50-coredump.conf";
       "sysctl.d/50-default.conf".source = "${systemd}/example/sysctl.d/50-default.conf";
 
+      "tmpfiles.d/00-nixos.conf".text = ''
+        # This file is created automatically and should not be modified.
+        # Please change the option ‘systemd.tmpfiles.rules’ instead.
+
+        ${concatStringsSep "\n" cfg.tmpfiles.rules}
+      '';
+
       "tmpfiles.d/home.conf".source = "${systemd}/example/tmpfiles.d/home.conf";
       "tmpfiles.d/journal-nocow.conf".source = "${systemd}/example/tmpfiles.d/journal-nocow.conf";
       "tmpfiles.d/portables.conf".source = "${systemd}/example/tmpfiles.d/portables.conf";
@@ -906,13 +955,6 @@ in
       "tmpfiles.d/var.conf".source = "${systemd}/example/tmpfiles.d/var.conf";
       "tmpfiles.d/x11.conf".source = "${systemd}/example/tmpfiles.d/x11.conf";
 
-      "tmpfiles.d/nixos.conf".text = ''
-        # This file is created automatically and should not be modified.
-        # Please change the option ‘systemd.tmpfiles.rules’ instead.
-
-        ${concatStringsSep "\n" cfg.tmpfiles.rules}
-      '';
-
       "systemd/system-generators" = { source = hooks "generators" cfg.generators; };
       "systemd/system-shutdown" = { source = hooks "shutdown" cfg.shutdown; };
     });
@@ -1019,6 +1061,7 @@ in
     systemd.targets.remote-fs.unitConfig.X-StopOnReconfiguration = true;
     systemd.targets.network-online.wantedBy = [ "multi-user.target" ];
     systemd.services.systemd-binfmt.wants = [ "proc-sys-fs-binfmt_misc.mount" ];
+    systemd.services.systemd-importd.environment = proxy_env;
 
     # Don't bother with certain units in containers.
     systemd.services.systemd-remount-fs.unitConfig.ConditionVirtualization = "!container";
diff --git a/nixos/modules/tasks/network-interfaces-scripted.nix b/nixos/modules/tasks/network-interfaces-scripted.nix
index 98bae444df0..9720d90217c 100644
--- a/nixos/modules/tasks/network-interfaces-scripted.nix
+++ b/nixos/modules/tasks/network-interfaces-scripted.nix
@@ -237,6 +237,38 @@ let
             '';
           };
 
+        createNetworkLink = i:
+        let
+          deviceDependency = if (config.boot.isContainer || i.name == "lo")
+            then []
+            else [ (subsystemDevice i.name) ];
+        in
+        nameValuePair "network-link-${i.name}"
+        { description = "Link configuration of ${i.name}";
+          wantedBy = [ "network-interfaces.target" ];
+          before = [ "network-interfaces.target" ];
+          bindsTo = deviceDependency;
+          after = [ "network-pre.target" ] ++ deviceDependency;
+          path = [ pkgs.iproute ];
+          serviceConfig = {
+            Type = "oneshot";
+            RemainAfterExit = true;
+          };
+          script =
+            ''
+              echo "Configuring link..."
+            '' + optionalString (i.macAddress != null) ''
+              echo "setting MAC address to ${i.macAddress}..."
+              ip link set "${i.name}" address "${i.macAddress}"
+            '' + optionalString (i.mtu != null) ''
+              echo "setting MTU to ${toString i.mtu}..."
+              ip link set "${i.name}" mtu "${toString i.mtu}"
+            '' + ''
+              echo -n "bringing up interface... "
+              ip link set "${i.name}" up && echo "done" || (echo "failed"; exit 1)
+            '';
+        };
+
         createTunDevice = i: nameValuePair "${i.name}-netdev"
           { description = "Virtual Network Interface ${i.name}";
             bindsTo = [ "dev-net-tun.device" ];
@@ -508,6 +540,7 @@ let
           });
 
       in listToAttrs (
+           map createNetworkLink interfaces ++
            map configureAddrs interfaces ++
            map createTunDevice (filter (i: i.virtual) interfaces))
          // mapAttrs' createBridgeDevice cfg.bridges
diff --git a/nixos/modules/tasks/network-interfaces-systemd.nix b/nixos/modules/tasks/network-interfaces-systemd.nix
index 41deceb000e..23e1e611a71 100644
--- a/nixos/modules/tasks/network-interfaces-systemd.nix
+++ b/nixos/modules/tasks/network-interfaces-systemd.nix
@@ -94,7 +94,12 @@ in
           address = forEach (interfaceIps i)
             (ip: "${ip.address}/${toString ip.prefixLength}");
           networkConfig.IPv6PrivacyExtensions = "kernel";
-        } ];
+          linkConfig = optionalAttrs (i.macAddress != null) {
+            MACAddress = i.macAddress;
+          } // optionalAttrs (i.mtu != null) {
+            MTUBytes = toString i.mtu;
+          };
+        }];
       })))
       (mkMerge (flip mapAttrsToList cfg.bridges (name: bridge: {
         netdevs."40-${name}" = {
diff --git a/nixos/modules/tasks/network-interfaces.nix b/nixos/modules/tasks/network-interfaces.nix
index 63a79abd4eb..44677d417ea 100644
--- a/nixos/modules/tasks/network-interfaces.nix
+++ b/nixos/modules/tasks/network-interfaces.nix
@@ -1031,6 +1031,11 @@ in
         message = ''
           Temporary addresses are only needed when IPv6 is enabled.
         '';
+      })) ++ (forEach interfaces (i: {
+        assertion = (i.virtual && i.virtualType == "tun") -> i.macAddress == null;
+        message = ''
+          Setting a MAC Address for tun device ${i.name} isn't supported.
+        '';
       })) ++ [
         {
           assertion = cfg.hostId == null || (stringLength cfg.hostId == 8 && isHexString cfg.hostId);
@@ -1140,38 +1145,7 @@ in
           ${cfg.localCommands}
         '';
       };
-    } // (listToAttrs (forEach interfaces (i:
-      let
-        deviceDependency = if (config.boot.isContainer || i.name == "lo")
-          then []
-          else [ (subsystemDevice i.name) ];
-      in
-      nameValuePair "network-link-${i.name}"
-      { description = "Link configuration of ${i.name}";
-        wantedBy = [ "network-interfaces.target" ];
-        before = [ "network-interfaces.target" ];
-        bindsTo = deviceDependency;
-        after = [ "network-pre.target" ] ++ deviceDependency;
-        path = [ pkgs.iproute ];
-        serviceConfig = {
-          Type = "oneshot";
-          RemainAfterExit = true;
-        };
-        script =
-          ''
-            echo "Configuring link..."
-          '' + optionalString (i.macAddress != null) ''
-            echo "setting MAC address to ${i.macAddress}..."
-            ip link set "${i.name}" address "${i.macAddress}"
-          '' + optionalString (i.mtu != null) ''
-            echo "setting MTU to ${toString i.mtu}..."
-            ip link set "${i.name}" mtu "${toString i.mtu}"
-          '' + ''
-            echo -n "bringing up interface... "
-            ip link set "${i.name}" up && echo "done" || (echo "failed"; exit 1)
-          '';
-      })));
-
+    };
     services.mstpd = mkIf needsMstpd { enable = true; };
 
     virtualisation.vswitch = mkIf (cfg.vswitches != { }) { enable = true; };
diff --git a/nixos/modules/virtualisation/containers.nix b/nixos/modules/virtualisation/containers.nix
index dad211ef55b..7d184575640 100644
--- a/nixos/modules/virtualisation/containers.nix
+++ b/nixos/modules/virtualisation/containers.nix
@@ -1,824 +1,125 @@
 { config, lib, pkgs, ... }:
-
-with lib;
-
 let
-
-  # The container's init script, a small wrapper around the regular
-  # NixOS stage-2 init script.
-  containerInit = (cfg:
-    let
-      renderExtraVeth = (name: cfg:
-        ''
-        echo "Bringing ${name} up"
-        ip link set dev ${name} up
-        ${optionalString (cfg.localAddress != null) ''
-          echo "Setting ip for ${name}"
-          ip addr add ${cfg.localAddress} dev ${name}
-        ''}
-        ${optionalString (cfg.localAddress6 != null) ''
-          echo "Setting ip6 for ${name}"
-          ip -6 addr add ${cfg.localAddress6} dev ${name}
-        ''}
-        ${optionalString (cfg.hostAddress != null) ''
-          echo "Setting route to host for ${name}"
-          ip route add ${cfg.hostAddress} dev ${name}
-        ''}
-        ${optionalString (cfg.hostAddress6 != null) ''
-          echo "Setting route6 to host for ${name}"
-          ip -6 route add ${cfg.hostAddress6} dev ${name}
-        ''}
-        ''
-        );
-    in
-      pkgs.writeScript "container-init"
-      ''
-        #! ${pkgs.runtimeShell} -e
-
-        # Initialise the container side of the veth pair.
-        if [ -n "$HOST_ADDRESS" ]   || [ -n "$HOST_ADDRESS6" ]  ||
-           [ -n "$LOCAL_ADDRESS" ]  || [ -n "$LOCAL_ADDRESS6" ] ||
-           [ -n "$HOST_BRIDGE" ]; then
-          ip link set host0 name eth0
-          ip link set dev eth0 up
-
-          if [ -n "$LOCAL_ADDRESS" ]; then
-            ip addr add $LOCAL_ADDRESS dev eth0
-          fi
-          if [ -n "$LOCAL_ADDRESS6" ]; then
-            ip -6 addr add $LOCAL_ADDRESS6 dev eth0
-          fi
-          if [ -n "$HOST_ADDRESS" ]; then
-            ip route add $HOST_ADDRESS dev eth0
-            ip route add default via $HOST_ADDRESS
-          fi
-          if [ -n "$HOST_ADDRESS6" ]; then
-            ip -6 route add $HOST_ADDRESS6 dev eth0
-            ip -6 route add default via $HOST_ADDRESS6
-          fi
-
-          ${concatStringsSep "\n" (mapAttrsToList renderExtraVeth cfg.extraVeths)}
-        fi
-
-        # Start the regular stage 1 script.
-        exec "$1"
-      ''
-    );
-
-  nspawnExtraVethArgs = (name: cfg: "--network-veth-extra=${name}");
-
-  startScript = cfg:
-    ''
-      mkdir -p -m 0755 "$root/etc" "$root/var/lib"
-      mkdir -p -m 0700 "$root/var/lib/private" "$root/root" /run/containers
-      if ! [ -e "$root/etc/os-release" ]; then
-        touch "$root/etc/os-release"
-      fi
-
-      if ! [ -e "$root/etc/machine-id" ]; then
-        touch "$root/etc/machine-id"
-      fi
-
-      mkdir -p -m 0755 \
-        "/nix/var/nix/profiles/per-container/$INSTANCE" \
-        "/nix/var/nix/gcroots/per-container/$INSTANCE"
-
-      cp --remove-destination /etc/resolv.conf "$root/etc/resolv.conf"
-
-      if [ "$PRIVATE_NETWORK" = 1 ]; then
-        extraFlags+=" --private-network"
-      fi
-
-      if [ -n "$HOST_ADDRESS" ]  || [ -n "$LOCAL_ADDRESS" ] ||
-         [ -n "$HOST_ADDRESS6" ] || [ -n "$LOCAL_ADDRESS6" ]; then
-        extraFlags+=" --network-veth"
-      fi
-
-      if [ -n "$HOST_PORT" ]; then
-        OIFS=$IFS
-        IFS=","
-        for i in $HOST_PORT
-        do
-            extraFlags+=" --port=$i"
-        done
-        IFS=$OIFS
-      fi
-
-      if [ -n "$HOST_BRIDGE" ]; then
-        extraFlags+=" --network-bridge=$HOST_BRIDGE"
-      fi
-
-      extraFlags+=" ${concatStringsSep " " (mapAttrsToList nspawnExtraVethArgs cfg.extraVeths)}"
-
-      for iface in $INTERFACES; do
-        extraFlags+=" --network-interface=$iface"
-      done
-
-      for iface in $MACVLANS; do
-        extraFlags+=" --network-macvlan=$iface"
-      done
-
-      # If the host is 64-bit and the container is 32-bit, add a
-      # --personality flag.
-      ${optionalString (config.nixpkgs.localSystem.system == "x86_64-linux") ''
-        if [ "$(< ''${SYSTEM_PATH:-/nix/var/nix/profiles/per-container/$INSTANCE/system}/system)" = i686-linux ]; then
-          extraFlags+=" --personality=x86"
-        fi
-      ''}
-
-      # Run systemd-nspawn without startup notification (we'll
-      # wait for the container systemd to signal readiness).
-      exec ${config.systemd.package}/bin/systemd-nspawn \
-        --keep-unit \
-        -M "$INSTANCE" -D "$root" $extraFlags \
-        $EXTRA_NSPAWN_FLAGS \
-        --notify-ready=yes \
-        --bind-ro=/nix/store \
-        --bind-ro=/nix/var/nix/db \
-        --bind-ro=/nix/var/nix/daemon-socket \
-        --bind="/nix/var/nix/profiles/per-container/$INSTANCE:/nix/var/nix/profiles" \
-        --bind="/nix/var/nix/gcroots/per-container/$INSTANCE:/nix/var/nix/gcroots" \
-        ${optionalString (!cfg.ephemeral) "--link-journal=try-guest"} \
-        --setenv PRIVATE_NETWORK="$PRIVATE_NETWORK" \
-        --setenv HOST_BRIDGE="$HOST_BRIDGE" \
-        --setenv HOST_ADDRESS="$HOST_ADDRESS" \
-        --setenv LOCAL_ADDRESS="$LOCAL_ADDRESS" \
-        --setenv HOST_ADDRESS6="$HOST_ADDRESS6" \
-        --setenv LOCAL_ADDRESS6="$LOCAL_ADDRESS6" \
-        --setenv HOST_PORT="$HOST_PORT" \
-        --setenv PATH="$PATH" \
-        ${optionalString cfg.ephemeral "--ephemeral"} \
-        ${if cfg.additionalCapabilities != null && cfg.additionalCapabilities != [] then
-          ''--capability="${concatStringsSep "," cfg.additionalCapabilities}"'' else ""
-        } \
-        ${if cfg.tmpfs != null && cfg.tmpfs != [] then
-          ''--tmpfs=${concatStringsSep " --tmpfs=" cfg.tmpfs}'' else ""
-        } \
-        ${containerInit cfg} "''${SYSTEM_PATH:-/nix/var/nix/profiles/system}/init"
-    '';
-
-  preStartScript = cfg:
-    ''
-      # Clean up existing machined registration and interfaces.
-      machinectl terminate "$INSTANCE" 2> /dev/null || true
-
-      if [ -n "$HOST_ADDRESS" ]  || [ -n "$LOCAL_ADDRESS" ] ||
-         [ -n "$HOST_ADDRESS6" ] || [ -n "$LOCAL_ADDRESS6" ]; then
-        ip link del dev "ve-$INSTANCE" 2> /dev/null || true
-        ip link del dev "vb-$INSTANCE" 2> /dev/null || true
-      fi
-
-      ${concatStringsSep "\n" (
-        mapAttrsToList (name: cfg:
-          ''ip link del dev ${name} 2> /dev/null || true ''
-        ) cfg.extraVeths
-      )}
-   '';
-
-  postStartScript = (cfg:
-    let
-      ipcall = cfg: ipcmd: variable: attribute:
-        if cfg.${attribute} == null then
-          ''
-            if [ -n "${variable}" ]; then
-              ${ipcmd} add ${variable} dev $ifaceHost
-            fi
-          ''
-        else
-          ''${ipcmd} add ${cfg.${attribute}} dev $ifaceHost'';
-      renderExtraVeth = name: cfg:
-        if cfg.hostBridge != null then
-          ''
-            # Add ${name} to bridge ${cfg.hostBridge}
-            ip link set dev ${name} master ${cfg.hostBridge} up
-          ''
-        else
-          ''
-            echo "Bring ${name} up"
-            ip link set dev ${name} up
-            # Set IPs and routes for ${name}
-            ${optionalString (cfg.hostAddress != null) ''
-              ip addr add ${cfg.hostAddress} dev ${name}
-            ''}
-            ${optionalString (cfg.hostAddress6 != null) ''
-              ip -6 addr add ${cfg.hostAddress6} dev ${name}
-            ''}
-            ${optionalString (cfg.localAddress != null) ''
-              ip route add ${cfg.localAddress} dev ${name}
-            ''}
-            ${optionalString (cfg.localAddress6 != null) ''
-              ip -6 route add ${cfg.localAddress6} dev ${name}
-            ''}
-          '';
-    in
-      ''
-        if [ -n "$HOST_ADDRESS" ]  || [ -n "$LOCAL_ADDRESS" ] ||
-           [ -n "$HOST_ADDRESS6" ] || [ -n "$LOCAL_ADDRESS6" ]; then
-          if [ -z "$HOST_BRIDGE" ]; then
-            ifaceHost=ve-$INSTANCE
-            ip link set dev $ifaceHost up
-
-            ${ipcall cfg "ip addr" "$HOST_ADDRESS" "hostAddress"}
-            ${ipcall cfg "ip -6 addr" "$HOST_ADDRESS6" "hostAddress6"}
-            ${ipcall cfg "ip route" "$LOCAL_ADDRESS" "localAddress"}
-            ${ipcall cfg "ip -6 route" "$LOCAL_ADDRESS6" "localAddress6"}
-          fi
-          ${concatStringsSep "\n" (mapAttrsToList renderExtraVeth cfg.extraVeths)}
-        fi
-      ''
-  );
-
-  serviceDirectives = cfg: {
-    ExecReload = pkgs.writeScript "reload-container"
-      ''
-        #! ${pkgs.runtimeShell} -e
-        ${pkgs.nixos-container}/bin/nixos-container run "$INSTANCE" -- \
-          bash --login -c "''${SYSTEM_PATH:-/nix/var/nix/profiles/system}/bin/switch-to-configuration test"
-      '';
-
-    SyslogIdentifier = "container %i";
-
-    EnvironmentFile = "-/etc/containers/%i.conf";
-
-    Type = "notify";
-
-    RuntimeDirectory = lib.optional cfg.ephemeral "containers/%i";
-
-    # Note that on reboot, systemd-nspawn returns 133, so this
-    # unit will be restarted. On poweroff, it returns 0, so the
-    # unit won't be restarted.
-    RestartForceExitStatus = "133";
-    SuccessExitStatus = "133";
-
-    # Some containers take long to start
-    # especially when you automatically start many at once
-    TimeoutStartSec = cfg.timeoutStartSec;
-
-    Restart = "on-failure";
-
-    Slice = "machine.slice";
-    Delegate = true;
-
-    # Hack: we don't want to kill systemd-nspawn, since we call
-    # "machinectl poweroff" in preStop to shut down the
-    # container cleanly. But systemd requires sending a signal
-    # (at least if we want remaining processes to be killed
-    # after the timeout). So send an ignored signal.
-    KillMode = "mixed";
-    KillSignal = "WINCH";
-
-    DevicePolicy = "closed";
-    DeviceAllow = map (d: "${d.node} ${d.modifier}") cfg.allowedDevices;
+  cfg = config.virtualisation.containers;
+
+  inherit (lib) mkOption types;
+
+  # Once https://github.com/NixOS/nixpkgs/pull/75584 is merged we can use the TOML generator
+  toTOML = name: value: pkgs.runCommandNoCC name {
+    nativeBuildInputs = [ pkgs.remarshal ];
+    value = builtins.toJSON value;
+    passAsFile = [ "value" ];
+  } ''
+    json2toml "$valuePath" "$out"
+  '';
+
+  # Copy configuration files to avoid having the entire sources in the system closure
+  copyFile = filePath: pkgs.runCommandNoCC (builtins.unsafeDiscardStringContext (builtins.baseNameOf filePath)) {} ''
+    cp ${filePath} $out
+  '';
+in
+{
+  meta = {
+    maintainers = [] ++ lib.teams.podman.members;
   };
 
+  options.virtualisation.containers = {
 
-  system = config.nixpkgs.localSystem.system;
-
-  bindMountOpts = { name, ... }: {
-
-    options = {
-      mountPoint = mkOption {
-        example = "/mnt/usb";
-        type = types.str;
-        description = "Mount point on the container file system.";
-      };
-      hostPath = mkOption {
-        default = null;
-        example = "/home/alice";
-        type = types.nullOr types.str;
-        description = "Location of the host path to be mounted.";
-      };
-      isReadOnly = mkOption {
-        default = true;
+    enable =
+      mkOption {
         type = types.bool;
-        description = "Determine whether the mounted path will be accessed in read-only mode.";
+        default = false;
+        description = ''
+          This option enables the common /etc/containers configuration module.
+        '';
       };
-    };
-
-    config = {
-      mountPoint = mkDefault name;
-    };
 
-  };
-
-  allowedDeviceOpts = { ... }: {
-    options = {
-      node = mkOption {
-        example = "/dev/net/tun";
-        type = types.str;
-        description = "Path to device node";
-      };
-      modifier = mkOption {
-        example = "rw";
-        type = types.str;
+    registries = {
+      search = mkOption {
+        type = types.listOf types.str;
+        default = [ "docker.io" "quay.io" ];
         description = ''
-          Device node access modifier. Takes a combination
-          <literal>r</literal> (read), <literal>w</literal> (write), and
-          <literal>m</literal> (mknod). See the
-          <literal>systemd.resource-control(5)</literal> man page for more
-          information.'';
+          List of repositories to search.
+        '';
       };
-    };
-  };
-
-
-  mkBindFlag = d:
-               let flagPrefix = if d.isReadOnly then " --bind-ro=" else " --bind=";
-                   mountstr = if d.hostPath != null then "${d.hostPath}:${d.mountPoint}" else "${d.mountPoint}";
-               in flagPrefix + mountstr ;
 
-  mkBindFlags = bs: concatMapStrings mkBindFlag (lib.attrValues bs);
+      insecure = mkOption {
+        default = [];
+        type = types.listOf types.str;
+        description = ''
+          List of insecure repositories.
+        '';
+      };
 
-  networkOptions = {
-    hostBridge = mkOption {
-      type = types.nullOr types.str;
-      default = null;
-      example = "br0";
-      description = ''
-        Put the host-side of the veth-pair into the named bridge.
-        Only one of hostAddress* or hostBridge can be given.
-      '';
+      block = mkOption {
+        default = [];
+        type = types.listOf types.str;
+        description = ''
+          List of blocked repositories.
+        '';
+      };
     };
 
-    forwardPorts = mkOption {
-      type = types.listOf (types.submodule {
-        options = {
-          protocol = mkOption {
-            type = types.str;
-            default = "tcp";
-            description = "The protocol specifier for port forwarding between host and container";
-          };
-          hostPort = mkOption {
-            type = types.int;
-            description = "Source port of the external interface on host";
-          };
-          containerPort = mkOption {
-            type = types.nullOr types.int;
-            default = null;
-            description = "Target port of container";
+    policy = mkOption {
+      default = {};
+      type = types.attrs;
+      example = lib.literalExample ''
+        {
+          default = [ { type = "insecureAcceptAnything"; } ];
+          transports = {
+            docker-daemon = {
+              "" = [ { type = "insecureAcceptAnything"; } ];
+            };
           };
-        };
-      });
-      default = [];
-      example = [ { protocol = "tcp"; hostPort = 8080; containerPort = 80; } ];
-      description = ''
-        List of forwarded ports from host to container. Each forwarded port
-        is specified by protocol, hostPort and containerPort. By default,
-        protocol is tcp and hostPort and containerPort are assumed to be
-        the same if containerPort is not explicitly given.
-      '';
-    };
-
-
-    hostAddress = mkOption {
-      type = types.nullOr types.str;
-      default = null;
-      example = "10.231.136.1";
-      description = ''
-        The IPv4 address assigned to the host interface.
-        (Not used when hostBridge is set.)
+        }
       '';
-    };
-
-    hostAddress6 = mkOption {
-      type = types.nullOr types.str;
-      default = null;
-      example = "fc00::1";
       description = ''
-        The IPv6 address assigned to the host interface.
-        (Not used when hostBridge is set.)
+        Signature verification policy file.
+        If this option is empty the default policy file from
+        <literal>skopeo</literal> will be used.
       '';
     };
 
-    localAddress = mkOption {
-      type = types.nullOr types.str;
-      default = null;
-      example = "10.231.136.2";
-      description = ''
-        The IPv4 address assigned to the interface in the container.
-        If a hostBridge is used, this should be given with netmask to access
-        the whole network. Otherwise the default netmask is /32 and routing is
-        set up from localAddress to hostAddress and back.
-      '';
-    };
-
-    localAddress6 = mkOption {
-      type = types.nullOr types.str;
-      default = null;
-      example = "fc00::2";
+    users = mkOption {
+      default = [];
+      type = types.listOf types.str;
       description = ''
-        The IPv6 address assigned to the interface in the container.
-        If a hostBridge is used, this should be given with netmask to access
-        the whole network. Otherwise the default netmask is /128 and routing is
-        set up from localAddress6 to hostAddress6 and back.
+        List of users to set up subuid/subgid mappings for.
+        This is a requirement for running rootless containers.
       '';
     };
 
   };
 
-  dummyConfig =
-    {
-      extraVeths = {};
-      additionalCapabilities = [];
-      ephemeral = false;
-      timeoutStartSec = "15s";
-      allowedDevices = [];
-      hostAddress = null;
-      hostAddress6 = null;
-      localAddress = null;
-      localAddress6 = null;
-      tmpfs = null;
-    };
-
-in
-
-{
-  options = {
-
-    boot.isContainer = mkOption {
-      type = types.bool;
-      default = false;
-      description = ''
-        Whether this NixOS machine is a lightweight container running
-        in another NixOS system.
-      '';
-    };
+  config = lib.mkIf cfg.enable {
 
-    boot.enableContainers = mkOption {
-      type = types.bool;
-      default = !config.boot.isContainer;
-      description = ''
-        Whether to enable support for NixOS containers.
-      '';
+    environment.etc."containers/registries.conf".source = toTOML "registries.conf" {
+      registries = lib.mapAttrs (n: v: { registries = v; }) cfg.registries;
     };
 
-    containers = mkOption {
-      type = types.attrsOf (types.submodule (
-        { config, options, name, ... }:
-        {
-          options = {
-
-            config = mkOption {
-              description = ''
-                A specification of the desired configuration of this
-                container, as a NixOS module.
-              '';
-              type = lib.mkOptionType {
-                name = "Toplevel NixOS config";
-                merge = loc: defs: (import ../../lib/eval-config.nix {
-                  inherit system;
-                  modules =
-                    let
-                      extraConfig = {
-                        _file = "module at ${__curPos.file}:${toString __curPos.line}";
-                        config = {
-                          boot.isContainer = true;
-                          networking.hostName = mkDefault name;
-                          networking.useDHCP = false;
-                          assertions = [
-                            {
-                              assertion =  config.privateNetwork -> stringLength name < 12;
-                              message = ''
-                                Container name `${name}` is too long: When `privateNetwork` is enabled, container names can
-                                not be longer than 11 characters, because the container's interface name is derived from it.
-                                This might be fixed in the future. See https://github.com/NixOS/nixpkgs/issues/38509
-                              '';
-                            }
-                          ];
-                        };
-                      };
-                    in [ extraConfig ] ++ (map (x: x.value) defs);
-                  prefix = [ "containers" name ];
-                }).config;
-              };
-            };
-
-            path = mkOption {
-              type = types.path;
-              example = "/nix/var/nix/profiles/containers/webserver";
-              description = ''
-                As an alternative to specifying
-                <option>config</option>, you can specify the path to
-                the evaluated NixOS system configuration, typically a
-                symlink to a system profile.
-              '';
-            };
-
-            additionalCapabilities = mkOption {
-              type = types.listOf types.str;
-              default = [];
-              example = [ "CAP_NET_ADMIN" "CAP_MKNOD" ];
-              description = ''
-                Grant additional capabilities to the container.  See the
-                capabilities(7) and systemd-nspawn(1) man pages for more
-                information.
-              '';
-            };
-
-            ephemeral = mkOption {
-              type = types.bool;
-              default = false;
-              description = ''
-                Runs container in ephemeral mode with the empty root filesystem at boot.
-                This way container will be bootstrapped from scratch on each boot
-                and will be cleaned up on shutdown leaving no traces behind.
-                Useful for completely stateless, reproducible containers.
-
-                Note that this option might require to do some adjustments to the container configuration,
-                e.g. you might want to set
-                <varname>systemd.network.networks.$interface.dhcpConfig.ClientIdentifier</varname> to "mac"
-                if you use <varname>macvlans</varname> option.
-                This way dhcp client identifier will be stable between the container restarts.
-
-                Note that the container journal will not be linked to the host if this option is enabled.
-              '';
-            };
-
-            enableTun = mkOption {
-              type = types.bool;
-              default = false;
-              description = ''
-                Allows the container to create and setup tunnel interfaces
-                by granting the <literal>NET_ADMIN</literal> capability and
-                enabling access to <literal>/dev/net/tun</literal>.
-              '';
-            };
-
-            privateNetwork = mkOption {
-              type = types.bool;
-              default = false;
-              description = ''
-                Whether to give the container its own private virtual
-                Ethernet interface.  The interface is called
-                <literal>eth0</literal>, and is hooked up to the interface
-                <literal>ve-<replaceable>container-name</replaceable></literal>
-                on the host.  If this option is not set, then the
-                container shares the network interfaces of the host,
-                and can bind to any port on any interface.
-              '';
-            };
-
-            interfaces = mkOption {
-              type = types.listOf types.str;
-              default = [];
-              example = [ "eth1" "eth2" ];
-              description = ''
-                The list of interfaces to be moved into the container.
-              '';
-            };
-
-            macvlans = mkOption {
-              type = types.listOf types.str;
-              default = [];
-              example = [ "eth1" "eth2" ];
-              description = ''
-                The list of host interfaces from which macvlans will be
-                created. For each interface specified, a macvlan interface
-                will be created and moved to the container.
-              '';
-            };
-
-            extraVeths = mkOption {
-              type = with types; attrsOf (submodule { options = networkOptions; });
-              default = {};
-              description = ''
-                Extra veth-pairs to be created for the container
-              '';
-            };
-
-            autoStart = mkOption {
-              type = types.bool;
-              default = false;
-              description = ''
-                Whether the container is automatically started at boot-time.
-              '';
-            };
-
-		    timeoutStartSec = mkOption {
-		      type = types.str;
-		      default = "1min";
-		      description = ''
-		        Time for the container to start. In case of a timeout,
-		        the container processes get killed.
-		        See <citerefentry><refentrytitle>systemd.time</refentrytitle>
-		        <manvolnum>7</manvolnum></citerefentry>
-		        for more information about the format.
-		       '';
-		    };
-
-            bindMounts = mkOption {
-              type = with types; loaOf (submodule bindMountOpts);
-              default = {};
-              example = literalExample ''
-                { "/home" = { hostPath = "/home/alice";
-                              isReadOnly = false; };
-                }
-              '';
-
-              description =
-                ''
-                  An extra list of directories that is bound to the container.
-                '';
-            };
-
-            allowedDevices = mkOption {
-              type = with types; listOf (submodule allowedDeviceOpts);
-              default = [];
-              example = [ { node = "/dev/net/tun"; modifier = "rw"; } ];
-              description = ''
-                A list of device nodes to which the containers has access to.
-              '';
-            };
-
-            tmpfs = mkOption {
-              type = types.listOf types.str;
-              default = [];
-              example = [ "/var" ];
-              description = ''
-                Mounts a set of tmpfs file systems into the container.
-                Multiple paths can be specified.
-                Valid items must conform to the --tmpfs argument
-                of systemd-nspawn. See systemd-nspawn(1) for details.
-              '';
-            };
-
-            extraFlags = mkOption {
-              type = types.listOf types.str;
-              default = [];
-              example = [ "--drop-capability=CAP_SYS_CHROOT" ];
-              description = ''
-                Extra flags passed to the systemd-nspawn command.
-                See systemd-nspawn(1) for details.
-              '';
-            };
-
-          } // networkOptions;
-
-          config = mkMerge
-            [
-              (mkIf options.config.isDefined {
-                path = config.config.system.build.toplevel;
-              })
+    users.extraUsers = builtins.listToAttrs (
+      (
+        builtins.foldl' (
+          acc: user: {
+            values = acc.values ++ [
+              {
+                name = user;
+                value = {
+                  subUidRanges = [ { startUid = acc.offset; count = 65536; } ];
+                  subGidRanges = [ { startGid = acc.offset; count = 65536; } ];
+                };
+              }
             ];
-        }));
-
-      default = {};
-      example = literalExample
-        ''
-          { webserver =
-              { path = "/nix/var/nix/profiles/webserver";
-              };
-            database =
-              { config =
-                  { config, pkgs, ... }:
-                  { services.postgresql.enable = true;
-                    services.postgresql.package = pkgs.postgresql_9_6;
-
-                    system.stateVersion = "17.03";
-                  };
-              };
+            offset = acc.offset + 65536;
           }
-        '';
-      description = ''
-        A set of NixOS system configurations to be run as lightweight
-        containers.  Each container appears as a service
-        <literal>container-<replaceable>name</replaceable></literal>
-        on the host system, allowing it to be started and stopped via
-        <command>systemctl</command>.
-      '';
-    };
+        )
+        { values = []; offset = 100000; } (lib.unique cfg.users)
+      ).values
+    );
 
+    environment.etc."containers/policy.json".source =
+      if cfg.policy != {} then pkgs.writeText "policy.json" (builtins.toJSON cfg.policy)
+      else copyFile "${pkgs.skopeo.src}/default-policy.json";
   };
 
-
-  config = mkIf (config.boot.enableContainers) (let
-
-    unit = {
-      description = "Container '%i'";
-
-      unitConfig.RequiresMountsFor = "/var/lib/containers/%i";
-
-      path = [ pkgs.iproute ];
-
-      environment = {
-        root = "/var/lib/containers/%i";
-        INSTANCE = "%i";
-      };
-
-      preStart = preStartScript dummyConfig;
-
-      script = startScript dummyConfig;
-
-      postStart = postStartScript dummyConfig;
-
-      preStop = "machinectl poweroff $INSTANCE";
-
-      restartIfChanged = false;
-
-      serviceConfig = serviceDirectives dummyConfig;
-    };
-  in {
-    systemd.targets.multi-user.wants = [ "machines.target" ];
-
-    systemd.services = listToAttrs (filter (x: x.value != null) (
-      # The generic container template used by imperative containers
-      [{ name = "container@"; value = unit; }]
-      # declarative containers
-      ++ (mapAttrsToList (name: cfg: nameValuePair "container@${name}" (let
-          containerConfig = cfg // (
-          if cfg.enableTun then
-            {
-              allowedDevices = cfg.allowedDevices
-                ++ [ { node = "/dev/net/tun"; modifier = "rw"; } ];
-              additionalCapabilities = cfg.additionalCapabilities
-                ++ [ "CAP_NET_ADMIN" ];
-            }
-          else {});
-        in
-          recursiveUpdate unit {
-            preStart = preStartScript containerConfig;
-            script = startScript containerConfig;
-            postStart = postStartScript containerConfig;
-            serviceConfig = serviceDirectives containerConfig;
-            unitConfig.RequiresMountsFor = lib.optional (!containerConfig.ephemeral) "/var/lib/containers/%i";
-            environment.root = if containerConfig.ephemeral then "/run/containers/%i" else "/var/lib/containers/%i";
-          } // (
-          if containerConfig.autoStart then
-            {
-              wantedBy = [ "machines.target" ];
-              wants = [ "network.target" ];
-              after = [ "network.target" ];
-              restartTriggers = [
-                containerConfig.path
-                config.environment.etc."containers/${name}.conf".source
-              ];
-              restartIfChanged = true;
-            }
-          else {})
-      )) config.containers)
-    ));
-
-    # Generate a configuration file in /etc/containers for each
-    # container so that container@.target can get the container
-    # configuration.
-    environment.etc =
-      let mkPortStr = p: p.protocol + ":" + (toString p.hostPort) + ":" + (if p.containerPort == null then toString p.hostPort else toString p.containerPort);
-      in mapAttrs' (name: cfg: nameValuePair "containers/${name}.conf"
-      { text =
-          ''
-            SYSTEM_PATH=${cfg.path}
-            ${optionalString cfg.privateNetwork ''
-              PRIVATE_NETWORK=1
-              ${optionalString (cfg.hostBridge != null) ''
-                HOST_BRIDGE=${cfg.hostBridge}
-              ''}
-              ${optionalString (length cfg.forwardPorts > 0) ''
-                HOST_PORT=${concatStringsSep "," (map mkPortStr cfg.forwardPorts)}
-              ''}
-              ${optionalString (cfg.hostAddress != null) ''
-                HOST_ADDRESS=${cfg.hostAddress}
-              ''}
-              ${optionalString (cfg.hostAddress6 != null) ''
-                HOST_ADDRESS6=${cfg.hostAddress6}
-              ''}
-              ${optionalString (cfg.localAddress != null) ''
-                LOCAL_ADDRESS=${cfg.localAddress}
-              ''}
-              ${optionalString (cfg.localAddress6 != null) ''
-                LOCAL_ADDRESS6=${cfg.localAddress6}
-              ''}
-            ''}
-            INTERFACES="${toString cfg.interfaces}"
-            MACVLANS="${toString cfg.macvlans}"
-            ${optionalString cfg.autoStart ''
-              AUTO_START=1
-            ''}
-            EXTRA_NSPAWN_FLAGS="${mkBindFlags cfg.bindMounts +
-              optionalString (cfg.extraFlags != [])
-                (" " + concatStringsSep " " cfg.extraFlags)}"
-          '';
-      }) config.containers;
-
-    # Generate /etc/hosts entries for the containers.
-    networking.extraHosts = concatStrings (mapAttrsToList (name: cfg: optionalString (cfg.localAddress != null)
-      ''
-        ${head (splitString "/" cfg.localAddress)} ${name}.containers
-      '') config.containers);
-
-    networking.dhcpcd.denyInterfaces = [ "ve-*" "vb-*" ];
-
-    services.udev.extraRules = optionalString config.networking.networkmanager.enable ''
-      # Don't manage interfaces created by nixos-container.
-      ENV{INTERFACE}=="v[eb]-*", ENV{NM_UNMANAGED}="1"
-    '';
-
-    environment.systemPackages = [ pkgs.nixos-container ];
-
-    boot.kernelModules = [
-      "bridge"
-      "macvlan"
-      "tap"
-      "tun"
-    ];
-  });
 }
diff --git a/nixos/modules/virtualisation/cri-o.nix b/nixos/modules/virtualisation/cri-o.nix
index 14a435f6c8b..2af4214302d 100644
--- a/nixos/modules/virtualisation/cri-o.nix
+++ b/nixos/modules/virtualisation/cri-o.nix
@@ -4,8 +4,21 @@ with lib;
 
 let
   cfg = config.virtualisation.cri-o;
+
+  # Copy configuration files to avoid having the entire sources in the system closure
+  copyFile = filePath: pkgs.runCommandNoCC (builtins.unsafeDiscardStringContext (builtins.baseNameOf filePath)) {} ''
+    cp ${filePath} $out
+  '';
 in
 {
+  imports = [
+    (mkRenamedOptionModule [ "virtualisation" "cri-o" "registries" ] [ "virtualisation" "containers" "registries" "search" ])
+  ];
+
+  meta = {
+    maintainers = lib.teams.podman.members;
+  };
+
   options.virtualisation.cri-o = {
     enable = mkEnableOption "Container Runtime Interface for OCI (CRI-O)";
 
@@ -32,20 +45,14 @@ in
       default = "/pause";
       description = "Pause command to be executed";
     };
-
-    registries = mkOption {
-      type = types.listOf types.str;
-      default = [ "docker.io" "quay.io" ];
-      description = "Registries to be configured for unqualified image pull";
-    };
   };
 
   config = mkIf cfg.enable {
     environment.systemPackages = with pkgs;
-      [ cri-o cri-tools conmon cni-plugins iptables runc utillinux ];
-    environment.etc."crictl.yaml".text = ''
-      runtime-endpoint: unix:///var/run/crio/crio.sock
-    '';
+      [ cri-o cri-tools conmon iptables runc utillinux ];
+
+    environment.etc."crictl.yaml".source = copyFile "${pkgs.cri-o.src}/crictl.yaml";
+
     environment.etc."crio/crio.conf".text = ''
       [crio]
       storage_driver = "${cfg.storageDriver}"
@@ -53,35 +60,21 @@ in
       [crio.image]
       pause_image = "${cfg.pauseImage}"
       pause_command = "${cfg.pauseCommand}"
-      registries = [
-        ${concatMapStringsSep ", " (x: "\"" + x + "\"") cfg.registries}
-      ]
+
+      [crio.network]
+      plugin_dirs = ["${pkgs.cni-plugins}/bin/"]
+      network_dir = "/etc/cni/net.d/"
 
       [crio.runtime]
       conmon = "${pkgs.conmon}/bin/conmon"
       log_level = "${cfg.logLevel}"
       manage_network_ns_lifecycle = true
     '';
-    environment.etc."containers/policy.json".text = ''
-      {"default": [{"type": "insecureAcceptAnything"}]}
-    '';
-    environment.etc."cni/net.d/20-cri-o-bridge.conf".text = ''
-      {
-        "cniVersion": "0.3.1",
-        "name": "crio-bridge",
-        "type": "bridge",
-        "bridge": "cni0",
-        "isGateway": true,
-        "ipMasq": true,
-        "ipam": {
-          "type": "host-local",
-          "subnet": "10.88.0.0/16",
-          "routes": [
-              { "dst": "0.0.0.0/0" }
-          ]
-        }
-      }
-    '';
+
+    environment.etc."cni/net.d/10-crio-bridge.conf".source = copyFile "${pkgs.cri-o.src}/contrib/cni/10-crio-bridge.conf";
+
+    # Enable common /etc/containers configuration
+    virtualisation.containers.enable = true;
 
     systemd.services.crio = {
       description = "Container Runtime Interface for OCI (CRI-O)";
diff --git a/nixos/modules/virtualisation/ec2-amis.nix b/nixos/modules/virtualisation/ec2-amis.nix
index 3b4e55d39d7..24de8cf1afb 100644
--- a/nixos/modules/virtualisation/ec2-amis.nix
+++ b/nixos/modules/virtualisation/ec2-amis.nix
@@ -291,21 +291,43 @@ let self = {
   "19.03".sa-east-1.hvm-ebs = "ami-0c6a43c6e0ad1f4e2";
   "19.03".ap-south-1.hvm-ebs = "ami-0303deb1b5890f878";
 
-  # 19.09.981.205691b7cbe
-  "19.09".eu-west-1.hvm-ebs = "ami-0ebd3156e21e9642f";
-  "19.09".eu-west-2.hvm-ebs = "ami-02a2b5480a79084b7";
-  "19.09".eu-west-3.hvm-ebs = "ami-09aa175c7588734f7";
-  "19.09".eu-central-1.hvm-ebs = "ami-00a7fafd7e237a330";
-  "19.09".us-east-1.hvm-ebs = "ami-00a8eeaf232a74f84";
-  "19.09".us-east-2.hvm-ebs = "ami-093efd3a57a1e03a8";
-  "19.09".us-west-1.hvm-ebs = "ami-0913e9a2b677fac30";
-  "19.09".us-west-2.hvm-ebs = "ami-02d9a19f77b47882a";
-  "19.09".ca-central-1.hvm-ebs = "ami-0627dd3f7b3627a29";
-  "19.09".ap-southeast-1.hvm-ebs = "ami-083614e4d08f2164d";
-  "19.09".ap-southeast-2.hvm-ebs = "ami-0048c704185ded6dc";
-  "19.09".ap-northeast-1.hvm-ebs = "ami-0329e7fc2d7f60bd0";
-  "19.09".ap-northeast-2.hvm-ebs = "ami-03d4ae7d0b5fc364f";
-  "19.09".ap-south-1.hvm-ebs = "ami-0b599690b35aeef23";
+  # 19.09.2243.84af403f54f
+  "19.09".eu-west-1.hvm-ebs = "ami-071082f0fa035374f";
+  "19.09".eu-west-2.hvm-ebs = "ami-0d9dc33c54d1dc4c3";
+  "19.09".eu-west-3.hvm-ebs = "ami-09566799591d1bfed";
+  "19.09".eu-central-1.hvm-ebs = "ami-015f8efc2be419b79";
+  "19.09".eu-north-1.hvm-ebs = "ami-07fc0a32d885e01ed";
+  "19.09".us-east-1.hvm-ebs = "ami-03330d8b51287412f";
+  "19.09".us-east-2.hvm-ebs = "ami-0518b4c84972e967f";
+  "19.09".us-west-1.hvm-ebs = "ami-06ad07e61a353b4a6";
+  "19.09".us-west-2.hvm-ebs = "ami-0e31e30925cf3ce4e";
+  "19.09".ca-central-1.hvm-ebs = "ami-07df50fc76702a36d";
+  "19.09".ap-southeast-1.hvm-ebs = "ami-0f71ae5d4b0b78d95";
+  "19.09".ap-southeast-2.hvm-ebs = "ami-057bbf2b4bd62d210";
+  "19.09".ap-northeast-1.hvm-ebs = "ami-02a62555ca182fb5b";
+  "19.09".ap-northeast-2.hvm-ebs = "ami-0219dde0e6b7b7b93";
+  "19.09".ap-south-1.hvm-ebs = "ami-066f7f2a895c821a1";
+  "19.09".ap-east-1.hvm-ebs = "ami-055b2348db2827ff1";
+  "19.09".sa-east-1.hvm-ebs = "ami-018aab68377227e06";
 
-  latest = self."19.09";
+  # 20.03.1554.94e39623a49
+  "20.03".eu-west-1.hvm-ebs = "ami-02c34db5766cc7013";
+  "20.03".eu-west-2.hvm-ebs = "ami-0e32bd8c7853883f1";
+  "20.03".eu-west-3.hvm-ebs = "ami-061edb1356c1d69fd";
+  "20.03".eu-central-1.hvm-ebs = "ami-0a1a94722dcbff94c";
+  "20.03".eu-north-1.hvm-ebs = "ami-02699abfacbb6464b";
+  "20.03".us-east-1.hvm-ebs = "ami-0c5e7760748b74e85";
+  "20.03".us-east-2.hvm-ebs = "ami-030296bb256764655";
+  "20.03".us-west-1.hvm-ebs = "ami-050be818e0266b741";
+  "20.03".us-west-2.hvm-ebs = "ami-06562f78dca68eda2";
+  "20.03".ca-central-1.hvm-ebs = "ami-02365684a173255c7";
+  "20.03".ap-southeast-1.hvm-ebs = "ami-0dbf353e168d155f7";
+  "20.03".ap-southeast-2.hvm-ebs = "ami-04c0f3a75f63daddd";
+  "20.03".ap-northeast-1.hvm-ebs = "ami-093d9cc49c191eb6c";
+  "20.03".ap-northeast-2.hvm-ebs = "ami-0087df91a7b6ebd45";
+  "20.03".ap-south-1.hvm-ebs = "ami-0a1a6b569af04af9d";
+  "20.03".ap-east-1.hvm-ebs = "ami-0d18fdd309cdefa86";
+  "20.03".sa-east-1.hvm-ebs = "ami-09859378158ae971d";
+
+  latest = self."20.03";
 }; in self
diff --git a/nixos/modules/virtualisation/ecs-agent.nix b/nixos/modules/virtualisation/ecs-agent.nix
index fc51b159579..93fefe56d1a 100644
--- a/nixos/modules/virtualisation/ecs-agent.nix
+++ b/nixos/modules/virtualisation/ecs-agent.nix
@@ -38,9 +38,8 @@ in {
         if [ ! -z "$ECS_DATADIR" ]; then
           mkdir -p "$ECS_DATADIR"
         fi
-        ${cfg.package.bin}/bin/agent
+        ${cfg.package}/bin/agent
       '';
     };
   };
 }
-
diff --git a/nixos/modules/virtualisation/hyperv-image.nix b/nixos/modules/virtualisation/hyperv-image.nix
new file mode 100644
index 00000000000..be2f12b7d01
--- /dev/null
+++ b/nixos/modules/virtualisation/hyperv-image.nix
@@ -0,0 +1,69 @@
+{ config, pkgs, lib, ... }:
+
+with lib;
+
+let
+  cfg = config.hyperv;
+
+in {
+  options = {
+    hyperv = {
+      baseImageSize = mkOption {
+        type = types.int;
+        default = 2048;
+        description = ''
+          The size of the hyper-v base image in MiB.
+        '';
+      };
+      vmDerivationName = mkOption {
+        type = types.str;
+        default = "nixos-hyperv-${config.system.nixos.label}-${pkgs.stdenv.hostPlatform.system}";
+        description = ''
+          The name of the derivation for the hyper-v appliance.
+        '';
+      };
+      vmFileName = mkOption {
+        type = types.str;
+        default = "nixos-${config.system.nixos.label}-${pkgs.stdenv.hostPlatform.system}.vhdx";
+        description = ''
+          The file name of the hyper-v appliance.
+        '';
+      };
+    };
+  };
+
+  config = {
+    system.build.hypervImage = import ../../lib/make-disk-image.nix {
+      name = cfg.vmDerivationName;
+      postVM = ''
+        ${pkgs.vmTools.qemu}/bin/qemu-img convert -f raw -o subformat=dynamic -O vhdx $diskImage $out/${cfg.vmFileName}
+      '';
+      format = "raw";
+      diskSize = cfg.baseImageSize;
+      partitionTableType = "efi";
+      inherit config lib pkgs;
+    };
+
+    fileSystems."/" = {
+      device = "/dev/disk/by-label/nixos";
+      autoResize = true;
+      fsType = "ext4";
+    };
+
+    fileSystems."/boot" = {
+      device = "/dev/disk/by-label/ESP";
+      fsType = "vfat";
+    };
+
+    boot.growPartition = true;
+
+    boot.loader.grub = {
+      version = 2;
+      device = "nodev";
+      efiSupport = true;
+      efiInstallAsRemovable = true;
+    };
+
+    virtualisation.hypervGuest.enable = true;
+  };
+}
diff --git a/nixos/modules/virtualisation/lxd.nix b/nixos/modules/virtualisation/lxd.nix
index de48d3a780e..53b89a9f55b 100644
--- a/nixos/modules/virtualisation/lxd.nix
+++ b/nixos/modules/virtualisation/lxd.nix
@@ -108,7 +108,7 @@ in
       '';
 
       serviceConfig = {
-        ExecStart = "@${cfg.package.bin}/bin/lxd lxd --group lxd";
+        ExecStart = "@${cfg.package}/bin/lxd lxd --group lxd";
         Type = "simple";
         KillMode = "process"; # when stopping, leave the containers alone
         LimitMEMLOCK = "infinity";
diff --git a/nixos/modules/virtualisation/nixos-containers.nix b/nixos/modules/virtualisation/nixos-containers.nix
new file mode 100644
index 00000000000..b0fa03917c8
--- /dev/null
+++ b/nixos/modules/virtualisation/nixos-containers.nix
@@ -0,0 +1,844 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  # The container's init script, a small wrapper around the regular
+  # NixOS stage-2 init script.
+  containerInit = (cfg:
+    let
+      renderExtraVeth = (name: cfg:
+        ''
+        echo "Bringing ${name} up"
+        ip link set dev ${name} up
+        ${optionalString (cfg.localAddress != null) ''
+          echo "Setting ip for ${name}"
+          ip addr add ${cfg.localAddress} dev ${name}
+        ''}
+        ${optionalString (cfg.localAddress6 != null) ''
+          echo "Setting ip6 for ${name}"
+          ip -6 addr add ${cfg.localAddress6} dev ${name}
+        ''}
+        ${optionalString (cfg.hostAddress != null) ''
+          echo "Setting route to host for ${name}"
+          ip route add ${cfg.hostAddress} dev ${name}
+        ''}
+        ${optionalString (cfg.hostAddress6 != null) ''
+          echo "Setting route6 to host for ${name}"
+          ip -6 route add ${cfg.hostAddress6} dev ${name}
+        ''}
+        ''
+        );
+    in
+      pkgs.writeScript "container-init"
+      ''
+        #! ${pkgs.runtimeShell} -e
+
+        # Initialise the container side of the veth pair.
+        if [ -n "$HOST_ADDRESS" ]   || [ -n "$HOST_ADDRESS6" ]  ||
+           [ -n "$LOCAL_ADDRESS" ]  || [ -n "$LOCAL_ADDRESS6" ] ||
+           [ -n "$HOST_BRIDGE" ]; then
+          ip link set host0 name eth0
+          ip link set dev eth0 up
+
+          if [ -n "$LOCAL_ADDRESS" ]; then
+            ip addr add $LOCAL_ADDRESS dev eth0
+          fi
+          if [ -n "$LOCAL_ADDRESS6" ]; then
+            ip -6 addr add $LOCAL_ADDRESS6 dev eth0
+          fi
+          if [ -n "$HOST_ADDRESS" ]; then
+            ip route add $HOST_ADDRESS dev eth0
+            ip route add default via $HOST_ADDRESS
+          fi
+          if [ -n "$HOST_ADDRESS6" ]; then
+            ip -6 route add $HOST_ADDRESS6 dev eth0
+            ip -6 route add default via $HOST_ADDRESS6
+          fi
+
+          ${concatStringsSep "\n" (mapAttrsToList renderExtraVeth cfg.extraVeths)}
+        fi
+
+        # Start the regular stage 1 script.
+        exec "$1"
+      ''
+    );
+
+  nspawnExtraVethArgs = (name: cfg: "--network-veth-extra=${name}");
+
+  startScript = cfg:
+    ''
+      mkdir -p -m 0755 "$root/etc" "$root/var/lib"
+      mkdir -p -m 0700 "$root/var/lib/private" "$root/root" /run/containers
+      if ! [ -e "$root/etc/os-release" ]; then
+        touch "$root/etc/os-release"
+      fi
+
+      if ! [ -e "$root/etc/machine-id" ]; then
+        touch "$root/etc/machine-id"
+      fi
+
+      mkdir -p -m 0755 \
+        "/nix/var/nix/profiles/per-container/$INSTANCE" \
+        "/nix/var/nix/gcroots/per-container/$INSTANCE"
+
+      cp --remove-destination /etc/resolv.conf "$root/etc/resolv.conf"
+
+      if [ "$PRIVATE_NETWORK" = 1 ]; then
+        extraFlags+=" --private-network"
+      fi
+
+      if [ -n "$HOST_ADDRESS" ]  || [ -n "$LOCAL_ADDRESS" ] ||
+         [ -n "$HOST_ADDRESS6" ] || [ -n "$LOCAL_ADDRESS6" ]; then
+        extraFlags+=" --network-veth"
+      fi
+
+      if [ -n "$HOST_PORT" ]; then
+        OIFS=$IFS
+        IFS=","
+        for i in $HOST_PORT
+        do
+            extraFlags+=" --port=$i"
+        done
+        IFS=$OIFS
+      fi
+
+      if [ -n "$HOST_BRIDGE" ]; then
+        extraFlags+=" --network-bridge=$HOST_BRIDGE"
+      fi
+
+      extraFlags+=" ${concatStringsSep " " (mapAttrsToList nspawnExtraVethArgs cfg.extraVeths)}"
+
+      for iface in $INTERFACES; do
+        extraFlags+=" --network-interface=$iface"
+      done
+
+      for iface in $MACVLANS; do
+        extraFlags+=" --network-macvlan=$iface"
+      done
+
+      # If the host is 64-bit and the container is 32-bit, add a
+      # --personality flag.
+      ${optionalString (config.nixpkgs.localSystem.system == "x86_64-linux") ''
+        if [ "$(< ''${SYSTEM_PATH:-/nix/var/nix/profiles/per-container/$INSTANCE/system}/system)" = i686-linux ]; then
+          extraFlags+=" --personality=x86"
+        fi
+      ''}
+
+      # Run systemd-nspawn without startup notification (we'll
+      # wait for the container systemd to signal readiness).
+      exec ${config.systemd.package}/bin/systemd-nspawn \
+        --keep-unit \
+        -M "$INSTANCE" -D "$root" $extraFlags \
+        $EXTRA_NSPAWN_FLAGS \
+        --notify-ready=yes \
+        --bind-ro=/nix/store \
+        --bind-ro=/nix/var/nix/db \
+        --bind-ro=/nix/var/nix/daemon-socket \
+        --bind="/nix/var/nix/profiles/per-container/$INSTANCE:/nix/var/nix/profiles" \
+        --bind="/nix/var/nix/gcroots/per-container/$INSTANCE:/nix/var/nix/gcroots" \
+        ${optionalString (!cfg.ephemeral) "--link-journal=try-guest"} \
+        --setenv PRIVATE_NETWORK="$PRIVATE_NETWORK" \
+        --setenv HOST_BRIDGE="$HOST_BRIDGE" \
+        --setenv HOST_ADDRESS="$HOST_ADDRESS" \
+        --setenv LOCAL_ADDRESS="$LOCAL_ADDRESS" \
+        --setenv HOST_ADDRESS6="$HOST_ADDRESS6" \
+        --setenv LOCAL_ADDRESS6="$LOCAL_ADDRESS6" \
+        --setenv HOST_PORT="$HOST_PORT" \
+        --setenv PATH="$PATH" \
+        ${optionalString cfg.ephemeral "--ephemeral"} \
+        ${if cfg.additionalCapabilities != null && cfg.additionalCapabilities != [] then
+          ''--capability="${concatStringsSep "," cfg.additionalCapabilities}"'' else ""
+        } \
+        ${if cfg.tmpfs != null && cfg.tmpfs != [] then
+          ''--tmpfs=${concatStringsSep " --tmpfs=" cfg.tmpfs}'' else ""
+        } \
+        ${containerInit cfg} "''${SYSTEM_PATH:-/nix/var/nix/profiles/system}/init"
+    '';
+
+  preStartScript = cfg:
+    ''
+      # Clean up existing machined registration and interfaces.
+      machinectl terminate "$INSTANCE" 2> /dev/null || true
+
+      if [ -n "$HOST_ADDRESS" ]  || [ -n "$LOCAL_ADDRESS" ] ||
+         [ -n "$HOST_ADDRESS6" ] || [ -n "$LOCAL_ADDRESS6" ]; then
+        ip link del dev "ve-$INSTANCE" 2> /dev/null || true
+        ip link del dev "vb-$INSTANCE" 2> /dev/null || true
+      fi
+
+      ${concatStringsSep "\n" (
+        mapAttrsToList (name: cfg:
+          ''ip link del dev ${name} 2> /dev/null || true ''
+        ) cfg.extraVeths
+      )}
+   '';
+
+  postStartScript = (cfg:
+    let
+      ipcall = cfg: ipcmd: variable: attribute:
+        if cfg.${attribute} == null then
+          ''
+            if [ -n "${variable}" ]; then
+              ${ipcmd} add ${variable} dev $ifaceHost
+            fi
+          ''
+        else
+          ''${ipcmd} add ${cfg.${attribute}} dev $ifaceHost'';
+      renderExtraVeth = name: cfg:
+        if cfg.hostBridge != null then
+          ''
+            # Add ${name} to bridge ${cfg.hostBridge}
+            ip link set dev ${name} master ${cfg.hostBridge} up
+          ''
+        else
+          ''
+            echo "Bring ${name} up"
+            ip link set dev ${name} up
+            # Set IPs and routes for ${name}
+            ${optionalString (cfg.hostAddress != null) ''
+              ip addr add ${cfg.hostAddress} dev ${name}
+            ''}
+            ${optionalString (cfg.hostAddress6 != null) ''
+              ip -6 addr add ${cfg.hostAddress6} dev ${name}
+            ''}
+            ${optionalString (cfg.localAddress != null) ''
+              ip route add ${cfg.localAddress} dev ${name}
+            ''}
+            ${optionalString (cfg.localAddress6 != null) ''
+              ip -6 route add ${cfg.localAddress6} dev ${name}
+            ''}
+          '';
+    in
+      ''
+        if [ -n "$HOST_ADDRESS" ]  || [ -n "$LOCAL_ADDRESS" ] ||
+           [ -n "$HOST_ADDRESS6" ] || [ -n "$LOCAL_ADDRESS6" ]; then
+          if [ -z "$HOST_BRIDGE" ]; then
+            ifaceHost=ve-$INSTANCE
+            ip link set dev $ifaceHost up
+
+            ${ipcall cfg "ip addr" "$HOST_ADDRESS" "hostAddress"}
+            ${ipcall cfg "ip -6 addr" "$HOST_ADDRESS6" "hostAddress6"}
+            ${ipcall cfg "ip route" "$LOCAL_ADDRESS" "localAddress"}
+            ${ipcall cfg "ip -6 route" "$LOCAL_ADDRESS6" "localAddress6"}
+          fi
+          ${concatStringsSep "\n" (mapAttrsToList renderExtraVeth cfg.extraVeths)}
+        fi
+      ''
+  );
+
+  serviceDirectives = cfg: {
+    ExecReload = pkgs.writeScript "reload-container"
+      ''
+        #! ${pkgs.runtimeShell} -e
+        ${pkgs.nixos-container}/bin/nixos-container run "$INSTANCE" -- \
+          bash --login -c "''${SYSTEM_PATH:-/nix/var/nix/profiles/system}/bin/switch-to-configuration test"
+      '';
+
+    SyslogIdentifier = "container %i";
+
+    EnvironmentFile = "-/etc/containers/%i.conf";
+
+    Type = "notify";
+
+    RuntimeDirectory = lib.optional cfg.ephemeral "containers/%i";
+
+    # Note that on reboot, systemd-nspawn returns 133, so this
+    # unit will be restarted. On poweroff, it returns 0, so the
+    # unit won't be restarted.
+    RestartForceExitStatus = "133";
+    SuccessExitStatus = "133";
+
+    # Some containers take long to start
+    # especially when you automatically start many at once
+    TimeoutStartSec = cfg.timeoutStartSec;
+
+    Restart = "on-failure";
+
+    Slice = "machine.slice";
+    Delegate = true;
+
+    # Hack: we don't want to kill systemd-nspawn, since we call
+    # "machinectl poweroff" in preStop to shut down the
+    # container cleanly. But systemd requires sending a signal
+    # (at least if we want remaining processes to be killed
+    # after the timeout). So send an ignored signal.
+    KillMode = "mixed";
+    KillSignal = "WINCH";
+
+    DevicePolicy = "closed";
+    DeviceAllow = map (d: "${d.node} ${d.modifier}") cfg.allowedDevices;
+  };
+
+
+  system = config.nixpkgs.localSystem.system;
+
+  bindMountOpts = { name, ... }: {
+
+    options = {
+      mountPoint = mkOption {
+        example = "/mnt/usb";
+        type = types.str;
+        description = "Mount point on the container file system.";
+      };
+      hostPath = mkOption {
+        default = null;
+        example = "/home/alice";
+        type = types.nullOr types.str;
+        description = "Location of the host path to be mounted.";
+      };
+      isReadOnly = mkOption {
+        default = true;
+        type = types.bool;
+        description = "Determine whether the mounted path will be accessed in read-only mode.";
+      };
+    };
+
+    config = {
+      mountPoint = mkDefault name;
+    };
+
+  };
+
+  allowedDeviceOpts = { ... }: {
+    options = {
+      node = mkOption {
+        example = "/dev/net/tun";
+        type = types.str;
+        description = "Path to device node";
+      };
+      modifier = mkOption {
+        example = "rw";
+        type = types.str;
+        description = ''
+          Device node access modifier. Takes a combination
+          <literal>r</literal> (read), <literal>w</literal> (write), and
+          <literal>m</literal> (mknod). See the
+          <literal>systemd.resource-control(5)</literal> man page for more
+          information.'';
+      };
+    };
+  };
+
+
+  mkBindFlag = d:
+               let flagPrefix = if d.isReadOnly then " --bind-ro=" else " --bind=";
+                   mountstr = if d.hostPath != null then "${d.hostPath}:${d.mountPoint}" else "${d.mountPoint}";
+               in flagPrefix + mountstr ;
+
+  mkBindFlags = bs: concatMapStrings mkBindFlag (lib.attrValues bs);
+
+  networkOptions = {
+    hostBridge = mkOption {
+      type = types.nullOr types.str;
+      default = null;
+      example = "br0";
+      description = ''
+        Put the host-side of the veth-pair into the named bridge.
+        Only one of hostAddress* or hostBridge can be given.
+      '';
+    };
+
+    forwardPorts = mkOption {
+      type = types.listOf (types.submodule {
+        options = {
+          protocol = mkOption {
+            type = types.str;
+            default = "tcp";
+            description = "The protocol specifier for port forwarding between host and container";
+          };
+          hostPort = mkOption {
+            type = types.int;
+            description = "Source port of the external interface on host";
+          };
+          containerPort = mkOption {
+            type = types.nullOr types.int;
+            default = null;
+            description = "Target port of container";
+          };
+        };
+      });
+      default = [];
+      example = [ { protocol = "tcp"; hostPort = 8080; containerPort = 80; } ];
+      description = ''
+        List of forwarded ports from host to container. Each forwarded port
+        is specified by protocol, hostPort and containerPort. By default,
+        protocol is tcp and hostPort and containerPort are assumed to be
+        the same if containerPort is not explicitly given.
+      '';
+    };
+
+
+    hostAddress = mkOption {
+      type = types.nullOr types.str;
+      default = null;
+      example = "10.231.136.1";
+      description = ''
+        The IPv4 address assigned to the host interface.
+        (Not used when hostBridge is set.)
+      '';
+    };
+
+    hostAddress6 = mkOption {
+      type = types.nullOr types.str;
+      default = null;
+      example = "fc00::1";
+      description = ''
+        The IPv6 address assigned to the host interface.
+        (Not used when hostBridge is set.)
+      '';
+    };
+
+    localAddress = mkOption {
+      type = types.nullOr types.str;
+      default = null;
+      example = "10.231.136.2";
+      description = ''
+        The IPv4 address assigned to the interface in the container.
+        If a hostBridge is used, this should be given with netmask to access
+        the whole network. Otherwise the default netmask is /32 and routing is
+        set up from localAddress to hostAddress and back.
+      '';
+    };
+
+    localAddress6 = mkOption {
+      type = types.nullOr types.str;
+      default = null;
+      example = "fc00::2";
+      description = ''
+        The IPv6 address assigned to the interface in the container.
+        If a hostBridge is used, this should be given with netmask to access
+        the whole network. Otherwise the default netmask is /128 and routing is
+        set up from localAddress6 to hostAddress6 and back.
+      '';
+    };
+
+  };
+
+  dummyConfig =
+    {
+      extraVeths = {};
+      additionalCapabilities = [];
+      ephemeral = false;
+      timeoutStartSec = "15s";
+      allowedDevices = [];
+      hostAddress = null;
+      hostAddress6 = null;
+      localAddress = null;
+      localAddress6 = null;
+      tmpfs = null;
+    };
+
+in
+
+{
+  options = {
+
+    boot.isContainer = mkOption {
+      type = types.bool;
+      default = false;
+      description = ''
+        Whether this NixOS machine is a lightweight container running
+        in another NixOS system. If set to true, support for nested
+        containers is disabled by default, but can be reenabled by
+        setting <option>boot.enableContainers</option> to true.
+      '';
+    };
+
+    boot.enableContainers = mkOption {
+      type = types.bool;
+      default = !config.boot.isContainer;
+      description = ''
+        Whether to enable support for NixOS containers. Defaults to true
+        (at no cost if containers are not actually used), but only if the
+        system is not itself a lightweight container of a host.
+        To enable support for nested containers, this option has to be
+        explicitly set to true (in the outer container).
+      '';
+    };
+
+    containers = mkOption {
+      type = types.attrsOf (types.submodule (
+        { config, options, name, ... }:
+        {
+          options = {
+
+            config = mkOption {
+              description = ''
+                A specification of the desired configuration of this
+                container, as a NixOS module.
+              '';
+              type = let
+                confPkgs = if config.pkgs == null then pkgs else config.pkgs;
+              in lib.mkOptionType {
+                name = "Toplevel NixOS config";
+                merge = loc: defs: (import (confPkgs.path + "/nixos/lib/eval-config.nix") {
+                  inherit system;
+                  pkgs = confPkgs;
+                  baseModules = import (confPkgs.path + "/nixos/modules/module-list.nix");
+                  inherit (confPkgs) lib;
+                  modules =
+                    let
+                      extraConfig = {
+                        _file = "module at ${__curPos.file}:${toString __curPos.line}";
+                        config = {
+                          boot.isContainer = true;
+                          networking.hostName = mkDefault name;
+                          networking.useDHCP = false;
+                          assertions = [
+                            {
+                              assertion =  config.privateNetwork -> stringLength name < 12;
+                              message = ''
+                                Container name `${name}` is too long: When `privateNetwork` is enabled, container names can
+                                not be longer than 11 characters, because the container's interface name is derived from it.
+                                This might be fixed in the future. See https://github.com/NixOS/nixpkgs/issues/38509
+                              '';
+                            }
+                          ];
+                        };
+                      };
+                    in [ extraConfig ] ++ (map (x: x.value) defs);
+                  prefix = [ "containers" name ];
+                }).config;
+              };
+            };
+
+            path = mkOption {
+              type = types.path;
+              example = "/nix/var/nix/profiles/containers/webserver";
+              description = ''
+                As an alternative to specifying
+                <option>config</option>, you can specify the path to
+                the evaluated NixOS system configuration, typically a
+                symlink to a system profile.
+              '';
+            };
+
+            additionalCapabilities = mkOption {
+              type = types.listOf types.str;
+              default = [];
+              example = [ "CAP_NET_ADMIN" "CAP_MKNOD" ];
+              description = ''
+                Grant additional capabilities to the container.  See the
+                capabilities(7) and systemd-nspawn(1) man pages for more
+                information.
+              '';
+            };
+
+            pkgs = mkOption {
+              type = types.nullOr types.attrs;
+              default = null;
+              example = literalExample "pkgs";
+              description = ''
+                Customise which nixpkgs to use for this container.
+              '';
+            };
+
+            ephemeral = mkOption {
+              type = types.bool;
+              default = false;
+              description = ''
+                Runs container in ephemeral mode with the empty root filesystem at boot.
+                This way container will be bootstrapped from scratch on each boot
+                and will be cleaned up on shutdown leaving no traces behind.
+                Useful for completely stateless, reproducible containers.
+
+                Note that this option might require to do some adjustments to the container configuration,
+                e.g. you might want to set
+                <varname>systemd.network.networks.$interface.dhcpV4Config.ClientIdentifier</varname> to "mac"
+                if you use <varname>macvlans</varname> option.
+                This way dhcp client identifier will be stable between the container restarts.
+
+                Note that the container journal will not be linked to the host if this option is enabled.
+              '';
+            };
+
+            enableTun = mkOption {
+              type = types.bool;
+              default = false;
+              description = ''
+                Allows the container to create and setup tunnel interfaces
+                by granting the <literal>NET_ADMIN</literal> capability and
+                enabling access to <literal>/dev/net/tun</literal>.
+              '';
+            };
+
+            privateNetwork = mkOption {
+              type = types.bool;
+              default = false;
+              description = ''
+                Whether to give the container its own private virtual
+                Ethernet interface.  The interface is called
+                <literal>eth0</literal>, and is hooked up to the interface
+                <literal>ve-<replaceable>container-name</replaceable></literal>
+                on the host.  If this option is not set, then the
+                container shares the network interfaces of the host,
+                and can bind to any port on any interface.
+              '';
+            };
+
+            interfaces = mkOption {
+              type = types.listOf types.str;
+              default = [];
+              example = [ "eth1" "eth2" ];
+              description = ''
+                The list of interfaces to be moved into the container.
+              '';
+            };
+
+            macvlans = mkOption {
+              type = types.listOf types.str;
+              default = [];
+              example = [ "eth1" "eth2" ];
+              description = ''
+                The list of host interfaces from which macvlans will be
+                created. For each interface specified, a macvlan interface
+                will be created and moved to the container.
+              '';
+            };
+
+            extraVeths = mkOption {
+              type = with types; attrsOf (submodule { options = networkOptions; });
+              default = {};
+              description = ''
+                Extra veth-pairs to be created for the container.
+              '';
+            };
+
+            autoStart = mkOption {
+              type = types.bool;
+              default = false;
+              description = ''
+                Whether the container is automatically started at boot-time.
+              '';
+            };
+
+		    timeoutStartSec = mkOption {
+		      type = types.str;
+		      default = "1min";
+		      description = ''
+		        Time for the container to start. In case of a timeout,
+		        the container processes get killed.
+		        See <citerefentry><refentrytitle>systemd.time</refentrytitle>
+		        <manvolnum>7</manvolnum></citerefentry>
+		        for more information about the format.
+		       '';
+		    };
+
+            bindMounts = mkOption {
+              type = with types; loaOf (submodule bindMountOpts);
+              default = {};
+              example = literalExample ''
+                { "/home" = { hostPath = "/home/alice";
+                              isReadOnly = false; };
+                }
+              '';
+
+              description =
+                ''
+                  An extra list of directories that is bound to the container.
+                '';
+            };
+
+            allowedDevices = mkOption {
+              type = with types; listOf (submodule allowedDeviceOpts);
+              default = [];
+              example = [ { node = "/dev/net/tun"; modifier = "rw"; } ];
+              description = ''
+                A list of device nodes to which the containers has access to.
+              '';
+            };
+
+            tmpfs = mkOption {
+              type = types.listOf types.str;
+              default = [];
+              example = [ "/var" ];
+              description = ''
+                Mounts a set of tmpfs file systems into the container.
+                Multiple paths can be specified.
+                Valid items must conform to the --tmpfs argument
+                of systemd-nspawn. See systemd-nspawn(1) for details.
+              '';
+            };
+
+            extraFlags = mkOption {
+              type = types.listOf types.str;
+              default = [];
+              example = [ "--drop-capability=CAP_SYS_CHROOT" ];
+              description = ''
+                Extra flags passed to the systemd-nspawn command.
+                See systemd-nspawn(1) for details.
+              '';
+            };
+
+          } // networkOptions;
+
+          config = mkMerge
+            [
+              (mkIf options.config.isDefined {
+                path = config.config.system.build.toplevel;
+              })
+            ];
+        }));
+
+      default = {};
+      example = literalExample
+        ''
+          { webserver =
+              { path = "/nix/var/nix/profiles/webserver";
+              };
+            database =
+              { config =
+                  { config, pkgs, ... }:
+                  { services.postgresql.enable = true;
+                    services.postgresql.package = pkgs.postgresql_9_6;
+
+                    system.stateVersion = "17.03";
+                  };
+              };
+          }
+        '';
+      description = ''
+        A set of NixOS system configurations to be run as lightweight
+        containers.  Each container appears as a service
+        <literal>container-<replaceable>name</replaceable></literal>
+        on the host system, allowing it to be started and stopped via
+        <command>systemctl</command>.
+      '';
+    };
+
+  };
+
+
+  config = mkIf (config.boot.enableContainers) (let
+
+    unit = {
+      description = "Container '%i'";
+
+      unitConfig.RequiresMountsFor = "/var/lib/containers/%i";
+
+      path = [ pkgs.iproute ];
+
+      environment = {
+        root = "/var/lib/containers/%i";
+        INSTANCE = "%i";
+      };
+
+      preStart = preStartScript dummyConfig;
+
+      script = startScript dummyConfig;
+
+      postStart = postStartScript dummyConfig;
+
+      preStop = "machinectl poweroff $INSTANCE";
+
+      restartIfChanged = false;
+
+      serviceConfig = serviceDirectives dummyConfig;
+    };
+  in {
+    systemd.targets.multi-user.wants = [ "machines.target" ];
+
+    systemd.services = listToAttrs (filter (x: x.value != null) (
+      # The generic container template used by imperative containers
+      [{ name = "container@"; value = unit; }]
+      # declarative containers
+      ++ (mapAttrsToList (name: cfg: nameValuePair "container@${name}" (let
+          containerConfig = cfg // (
+          if cfg.enableTun then
+            {
+              allowedDevices = cfg.allowedDevices
+                ++ [ { node = "/dev/net/tun"; modifier = "rw"; } ];
+              additionalCapabilities = cfg.additionalCapabilities
+                ++ [ "CAP_NET_ADMIN" ];
+            }
+          else {});
+        in
+          recursiveUpdate unit {
+            preStart = preStartScript containerConfig;
+            script = startScript containerConfig;
+            postStart = postStartScript containerConfig;
+            serviceConfig = serviceDirectives containerConfig;
+            unitConfig.RequiresMountsFor = lib.optional (!containerConfig.ephemeral) "/var/lib/containers/%i";
+            environment.root = if containerConfig.ephemeral then "/run/containers/%i" else "/var/lib/containers/%i";
+          } // (
+          if containerConfig.autoStart then
+            {
+              wantedBy = [ "machines.target" ];
+              wants = [ "network.target" ];
+              after = [ "network.target" ];
+              restartTriggers = [
+                containerConfig.path
+                config.environment.etc."containers/${name}.conf".source
+              ];
+              restartIfChanged = true;
+            }
+          else {})
+      )) config.containers)
+    ));
+
+    # Generate a configuration file in /etc/containers for each
+    # container so that container@.target can get the container
+    # configuration.
+    environment.etc =
+      let mkPortStr = p: p.protocol + ":" + (toString p.hostPort) + ":" + (if p.containerPort == null then toString p.hostPort else toString p.containerPort);
+      in mapAttrs' (name: cfg: nameValuePair "containers/${name}.conf"
+      { text =
+          ''
+            SYSTEM_PATH=${cfg.path}
+            ${optionalString cfg.privateNetwork ''
+              PRIVATE_NETWORK=1
+              ${optionalString (cfg.hostBridge != null) ''
+                HOST_BRIDGE=${cfg.hostBridge}
+              ''}
+              ${optionalString (length cfg.forwardPorts > 0) ''
+                HOST_PORT=${concatStringsSep "," (map mkPortStr cfg.forwardPorts)}
+              ''}
+              ${optionalString (cfg.hostAddress != null) ''
+                HOST_ADDRESS=${cfg.hostAddress}
+              ''}
+              ${optionalString (cfg.hostAddress6 != null) ''
+                HOST_ADDRESS6=${cfg.hostAddress6}
+              ''}
+              ${optionalString (cfg.localAddress != null) ''
+                LOCAL_ADDRESS=${cfg.localAddress}
+              ''}
+              ${optionalString (cfg.localAddress6 != null) ''
+                LOCAL_ADDRESS6=${cfg.localAddress6}
+              ''}
+            ''}
+            INTERFACES="${toString cfg.interfaces}"
+            MACVLANS="${toString cfg.macvlans}"
+            ${optionalString cfg.autoStart ''
+              AUTO_START=1
+            ''}
+            EXTRA_NSPAWN_FLAGS="${mkBindFlags cfg.bindMounts +
+              optionalString (cfg.extraFlags != [])
+                (" " + concatStringsSep " " cfg.extraFlags)}"
+          '';
+      }) config.containers;
+
+    # Generate /etc/hosts entries for the containers.
+    networking.extraHosts = concatStrings (mapAttrsToList (name: cfg: optionalString (cfg.localAddress != null)
+      ''
+        ${head (splitString "/" cfg.localAddress)} ${name}.containers
+      '') config.containers);
+
+    networking.dhcpcd.denyInterfaces = [ "ve-*" "vb-*" ];
+
+    services.udev.extraRules = optionalString config.networking.networkmanager.enable ''
+      # Don't manage interfaces created by nixos-container.
+      ENV{INTERFACE}=="v[eb]-*", ENV{NM_UNMANAGED}="1"
+    '';
+
+    environment.systemPackages = [ pkgs.nixos-container ];
+
+    boot.kernelModules = [
+      "bridge"
+      "macvlan"
+      "tap"
+      "tun"
+    ];
+  });
+}
diff --git a/nixos/modules/virtualisation/docker-containers.nix b/nixos/modules/virtualisation/oci-containers.nix
index 5ab990a3d7c..a46dd65eb49 100644
--- a/nixos/modules/virtualisation/docker-containers.nix
+++ b/nixos/modules/virtualisation/oci-containers.nix
@@ -1,17 +1,20 @@
-{ config, lib, pkgs, ... }:
+{ config, options, lib, pkgs, ... }:
 
 with lib;
 let
-  cfg = config.docker-containers;
+  cfg = config.virtualisation.oci-containers;
+  proxy_env = config.networking.proxy.envVars;
 
-  dockerContainer =
+  defaultBackend = options.virtualisation.oci-containers.backend.default;
+
+  containerOptions =
     { ... }: {
 
       options = {
 
         image = mkOption {
           type = with types; str;
-          description = "Docker image to run.";
+          description = "OCI image to run.";
           example = "library/hello-world";
         };
 
@@ -58,18 +61,19 @@ let
 
         log-driver = mkOption {
           type = types.str;
-          default = "none";
+          default = "journald";
           description = ''
             Logging driver for the container.  The default of
-            <literal>"none"</literal> means that the container's logs will be
-            handled as part of the systemd unit.  Setting this to
-            <literal>"journald"</literal> will result in duplicate logging, but
-            the container's logs will be visible to the <command>docker
-            logs</command> command.
-
-            For more details and a full list of logging drivers, refer to the
-            <link xlink:href="https://docs.docker.com/engine/reference/run/#logging-drivers---log-driver">
-            Docker engine documentation</link>
+            <literal>"journald"</literal> means that the container's logs will be
+            handled as part of the systemd unit.
+
+            For more details and a full list of logging drivers, refer to respective backends documentation.
+
+            For Docker:
+            <link xlink:href="https://docs.docker.com/engine/reference/run/#logging-drivers---log-driver">Docker engine documentation</link>
+
+            For Podman:
+            Refer to the docker-run(1) man page.
           '';
         };
 
@@ -172,10 +176,10 @@ let
           description = ''
             Define which other containers this one depends on. They will be added to both After and Requires for the unit.
 
-            Use the same name as the attribute under <literal>services.docker-containers</literal>.
+            Use the same name as the attribute under <literal>virtualisation.oci-containers</literal>.
           '';
           example = literalExample ''
-            services.docker-containers = {
+            virtualisation.oci-containers = {
               node1 = {};
               node2 = {
                 dependsOn = [ "node1" ];
@@ -184,10 +188,10 @@ let
           '';
         };
 
-        extraDockerOptions = mkOption {
+        extraOptions = mkOption {
           type = with types; listOf str;
           default = [];
-          description = "Extra options for <command>docker run</command>.";
+          description = "Extra options for <command>${defaultBackend} run</command>.";
           example = literalExample ''
             ["--network=host"]
           '';
@@ -205,24 +209,31 @@ let
     };
 
   mkService = name: container: let
-    mkAfter = map (x: "docker-${x}.service") container.dependsOn;
-  in rec {
+    dependsOn = map (x: "${cfg.backend}-${x}.service") container.dependsOn;
+  in {
     wantedBy = [] ++ optional (container.autoStart) "multi-user.target";
-    after = [ "docker.service" "docker.socket" ] ++ mkAfter;
-    requires = after;
-    path = [ pkgs.docker ];
+    after = lib.optionals (cfg.backend == "docker") [ "docker.service" "docker.socket" ] ++ dependsOn;
+    requires = dependsOn;
+    environment = proxy_env;
+
+    path =
+      if cfg.backend == "docker" then [ pkgs.docker ]
+      else if cfg.backend == "podman" then [ config.virtualisation.podman.package ]
+      else throw "Unhandled backend: ${cfg.backend}";
 
     preStart = ''
-      docker rm -f ${name} || true
+      ${cfg.backend} rm -f ${name} || true
       ${optionalString (container.imageFile != null) ''
-        docker load -i ${container.imageFile}
+        ${cfg.backend} load -i ${container.imageFile}
         ''}
       '';
-    postStop = "docker rm -f ${name} || true";
-        
+    postStop = "${cfg.backend} rm -f ${name} || true";
+
     serviceConfig = {
+      StandardOutput = "null";
+      StandardError = "null";
       ExecStart = concatStringsSep " \\\n  " ([
-        "${pkgs.docker}/bin/docker run"
+        "${config.system.path}/bin/${cfg.backend} run"
         "--rm"
         "--name=${name}"
         "--log-driver=${container.log-driver}"
@@ -233,12 +244,12 @@ let
         ++ optional (container.user != null) "-u ${escapeShellArg container.user}"
         ++ map (v: "-v ${escapeShellArg v}") container.volumes
         ++ optional (container.workdir != null) "-w ${escapeShellArg container.workdir}"
-        ++ map escapeShellArg container.extraDockerOptions
+        ++ map escapeShellArg container.extraOptions
         ++ [container.image]
         ++ map escapeShellArg container.cmd
       );
 
-      ExecStop = ''${pkgs.bash}/bin/sh -c "[ $SERVICE_RESULT = success ] || docker stop ${name}"'';
+      ExecStop = ''${pkgs.bash}/bin/sh -c "[ $SERVICE_RESULT = success ] || ${cfg.backend} stop ${name}"'';
 
       ### There is no generalized way of supporting `reload` for docker
       ### containers. Some containers may respond well to SIGHUP sent to their
@@ -263,19 +274,50 @@ let
   };
 
 in {
+  imports = [
+    (
+      lib.mkChangedOptionModule
+      [ "docker-containers"  ]
+      [ "virtualisation" "oci-containers" ]
+      (oldcfg: {
+        backend = "docker";
+        containers = lib.mapAttrs (n: v: builtins.removeAttrs (v // {
+          extraOptions = v.extraDockerOptions or [];
+        }) [ "extraDockerOptions" ]) oldcfg.docker-containers;
+      })
+    )
+  ];
+
+  options.virtualisation.oci-containers = {
+
+    backend = mkOption {
+      type = types.enum [ "podman" "docker" ];
+      default =
+        # TODO: Once https://github.com/NixOS/nixpkgs/issues/77925 is resolved default to podman
+        # if versionAtLeast config.system.stateVersion "20.09" then "podman"
+        # else "docker";
+        "docker";
+      description = "The underlying Docker implementation to use.";
+    };
 
-  options.docker-containers = mkOption {
-    default = {};
-    type = types.attrsOf (types.submodule dockerContainer);
-    description = "Docker containers to run as systemd services.";
-  };
-
-  config = mkIf (cfg != {}) {
-
-    systemd.services = mapAttrs' (n: v: nameValuePair "docker-${n}" (mkService n v)) cfg;
-
-    virtualisation.docker.enable = true;
+    containers = mkOption {
+      default = {};
+      type = types.attrsOf (types.submodule containerOptions);
+      description = "OCI (Docker) containers to run as systemd services.";
+    };
 
   };
 
+  config = lib.mkIf (cfg.containers != {}) (lib.mkMerge [
+    {
+      systemd.services = mapAttrs' (n: v: nameValuePair "${cfg.backend}-${n}" (mkService n v)) cfg.containers;
+    }
+    (lib.mkIf (cfg.backend == "podman") {
+      virtualisation.podman.enable = true;
+    })
+    (lib.mkIf (cfg.backend == "docker") {
+      virtualisation.docker.enable = true;
+    })
+  ]);
+
 }
diff --git a/nixos/modules/virtualisation/podman.nix b/nixos/modules/virtualisation/podman.nix
new file mode 100644
index 00000000000..652850bf500
--- /dev/null
+++ b/nixos/modules/virtualisation/podman.nix
@@ -0,0 +1,123 @@
+{ config, lib, pkgs, ... }:
+let
+  cfg = config.virtualisation.podman;
+
+  inherit (lib) mkOption types;
+
+  podmanPackage = (pkgs.podman.override { inherit (cfg) extraPackages; });
+
+  # Provides a fake "docker" binary mapping to podman
+  dockerCompat = pkgs.runCommandNoCC "${podmanPackage.pname}-docker-compat-${podmanPackage.version}" {
+    outputs = [ "out" "man" ];
+    inherit (podmanPackage) meta;
+  } ''
+    mkdir -p $out/bin
+    ln -s ${podmanPackage}/bin/podman $out/bin/docker
+
+    mkdir -p $man/share/man/man1
+    for f in ${podmanPackage.man}/share/man/man1/*; do
+      basename=$(basename $f | sed s/podman/docker/g)
+      ln -s $f $man/share/man/man1/$basename
+    done
+  '';
+
+  # Copy configuration files to avoid having the entire sources in the system closure
+  copyFile = filePath: pkgs.runCommandNoCC (builtins.unsafeDiscardStringContext (builtins.baseNameOf filePath)) {} ''
+    cp ${filePath} $out
+  '';
+
+in
+{
+  meta = {
+    maintainers = lib.teams.podman.members;
+  };
+
+  options.virtualisation.podman = {
+
+    enable =
+      mkOption {
+        type = types.bool;
+        default = false;
+        description = ''
+          This option enables Podman, a daemonless container engine for
+          developing, managing, and running OCI Containers on your Linux System.
+
+          It is a drop-in replacement for the <command>docker</command> command.
+        '';
+      };
+
+    dockerCompat = mkOption {
+      type = types.bool;
+      default = false;
+      description = ''
+        Create an alias mapping <command>docker</command> to <command>podman</command>.
+      '';
+    };
+
+    extraPackages = mkOption {
+      type = with types; listOf package;
+      default = [ ];
+      example = lib.literalExample ''
+        [
+          pkgs.gvisor
+        ]
+      '';
+      description = ''
+        Extra packages to be installed in the Podman wrapper.
+      '';
+    };
+
+    libpod = mkOption {
+      default = {};
+      description = "Libpod configuration";
+      type = types.submodule {
+        options = {
+
+          extraConfig = mkOption {
+            type = types.lines;
+            default = "";
+            description = ''
+              Extra configuration that should be put in the libpod.conf
+              configuration file
+            '';
+
+          };
+        };
+      };
+    };
+
+    package = lib.mkOption {
+      type = types.package;
+      default = podmanPackage;
+      internal = true;
+      description = ''
+        The final Podman package (including extra packages).
+      '';
+    };
+
+
+  };
+
+  config = lib.mkIf cfg.enable {
+
+    environment.systemPackages = [ cfg.package ]
+      ++ lib.optional cfg.dockerCompat dockerCompat;
+
+    environment.etc."containers/libpod.conf".text = ''
+      cni_plugin_dir = ["${pkgs.cni-plugins}/bin/"]
+
+    '' + cfg.libpod.extraConfig;
+
+    environment.etc."cni/net.d/87-podman-bridge.conflist".source = copyFile "${pkgs.podman-unwrapped.src}/cni/87-podman-bridge.conflist";
+
+    # Enable common /etc/containers configuration
+    virtualisation.containers.enable = true;
+
+    assertions = [{
+      assertion = cfg.dockerCompat -> !config.virtualisation.docker.enable;
+      message = "Option dockerCompat conflicts with docker";
+    }];
+
+  };
+
+}
diff --git a/nixos/modules/virtualisation/qemu-vm.nix b/nixos/modules/virtualisation/qemu-vm.nix
index 31d332e9f07..ac86330c098 100644
--- a/nixos/modules/virtualisation/qemu-vm.nix
+++ b/nixos/modules/virtualisation/qemu-vm.nix
@@ -189,9 +189,18 @@ let
           mkdir /boot/grub
           echo '(hd0) /dev/vda' > /boot/grub/device.map
 
-          # Install GRUB and generate the GRUB boot menu.
-          touch /etc/NIXOS
+          # This is needed for systemd-boot to find ESP, and udev is not available here to create this
+          mkdir -p /dev/block
+          ln -s /dev/vda2 /dev/block/254:2
+
+          # Set up system profile (normally done by nixos-rebuild / nix-env --set)
           mkdir -p /nix/var/nix/profiles
+          ln -s ${config.system.build.toplevel} /nix/var/nix/profiles/system-1-link
+          ln -s /nix/var/nix/profiles/system-1-link /nix/var/nix/profiles/system
+
+          # Install bootloader
+          touch /etc/NIXOS
+          export NIXOS_INSTALL_BOOTLOADER=1
           ${config.system.build.toplevel}/bin/switch-to-configuration boot
 
           umount /boot
@@ -499,7 +508,7 @@ in
     # FIXME: Consolidate this one day.
     virtualisation.qemu.options = mkMerge [
       (mkIf (pkgs.stdenv.isi686 || pkgs.stdenv.isx86_64) [
-        "-vga std" "-usb" "-device usb-tablet,bus=usb-bus.0"
+        "-usb" "-device usb-tablet,bus=usb-bus.0"
       ])
       (mkIf (pkgs.stdenv.isAarch32 || pkgs.stdenv.isAarch64) [
         "-device virtio-gpu-pci" "-device usb-ehci,id=usb0" "-device usb-kbd" "-device usb-tablet"
diff --git a/nixos/modules/virtualisation/xen-dom0.nix b/nixos/modules/virtualisation/xen-dom0.nix
index 7f0af9901b9..7b2a66c4348 100644
--- a/nixos/modules/virtualisation/xen-dom0.nix
+++ b/nixos/modules/virtualisation/xen-dom0.nix
@@ -103,6 +103,7 @@ in
         };
 
         forwardDns = mkOption {
+          type = types.bool;
           default = false;
           description = ''
             If set to <literal>true</literal>, the DNS queries from the
@@ -135,14 +136,8 @@ in
           };
       };
 
-    virtualisation.xen.trace =
-      mkOption {
-        default = false;
-        description =
-          ''
-            Enable Xen tracing.
-          '';
-      };
+    virtualisation.xen.trace = mkEnableOption "Xen tracing";
+
   };
 
 
diff --git a/nixos/release-combined.nix b/nixos/release-combined.nix
index 02f19610f8a..ece2d091f5a 100644
--- a/nixos/release-combined.nix
+++ b/nixos/release-combined.nix
@@ -20,11 +20,6 @@ let
       else pkgs.lib.mapAttrs (n: v: removeMaintainers v) set
     else set;
 
-  allSupportedNixpkgs = builtins.removeAttrs (removeMaintainers (import ../pkgs/top-level/release.nix {
-    supportedSystems = supportedSystems ++ limitedSupportedSystems;
-    nixpkgs = nixpkgsSrc;
-  })) [ "unstable" ];
-
 in rec {
 
   nixos = removeMaintainers (import ./release.nix {
@@ -55,6 +50,7 @@ in rec {
         (onFullSupported "nixos.dummy")
         (onAllSupported "nixos.iso_minimal")
         (onSystems ["x86_64-linux"] "nixos.iso_plasma5")
+        (onSystems ["x86_64-linux"] "nixos.iso_gnome")
         (onFullSupported "nixos.manual")
         (onSystems ["x86_64-linux"] "nixos.ova")
         (onSystems ["aarch64-linux"] "nixos.sd_image")
@@ -75,6 +71,7 @@ in rec {
         (onFullSupported "nixos.tests.fontconfig-default-fonts")
         (onFullSupported "nixos.tests.gnome3")
         (onFullSupported "nixos.tests.gnome3-xorg")
+        (onFullSupported "nixos.tests.hardened")
         (onSystems ["x86_64-linux"] "nixos.tests.hibernate")
         (onFullSupported "nixos.tests.i3wm")
         (onSystems ["x86_64-linux"] "nixos.tests.installer.btrfsSimple")
@@ -96,6 +93,8 @@ in rec {
         (onFullSupported "nixos.tests.keymap.dvp")
         (onFullSupported "nixos.tests.keymap.neo")
         (onFullSupported "nixos.tests.keymap.qwertz")
+        (onFullSupported "nixos.tests.latestKernel.hardened")
+        (onFullSupported "nixos.tests.latestKernel.login")
         (onFullSupported "nixos.tests.lightdm")
         (onFullSupported "nixos.tests.login")
         (onFullSupported "nixos.tests.misc")
@@ -112,11 +111,13 @@ in rec {
         (onFullSupported "nixos.tests.networking.scripted.sit")
         (onFullSupported "nixos.tests.networking.scripted.static")
         (onFullSupported "nixos.tests.networking.scripted.vlan")
+        (onFullSupported "nixos.tests.systemd-networkd-ipv6-prefix-delegation")
         (onFullSupported "nixos.tests.nfs3.simple")
         (onFullSupported "nixos.tests.nfs4.simple")
         (onFullSupported "nixos.tests.openssh")
         (onFullSupported "nixos.tests.pantheon")
         (onFullSupported "nixos.tests.php.fpm")
+        (onFullSupported "nixos.tests.php.httpd")
         (onFullSupported "nixos.tests.php.pcre")
         (onFullSupported "nixos.tests.plasma5")
         (onFullSupported "nixos.tests.predictable-interface-names.predictableNetworkd")
diff --git a/nixos/release.nix b/nixos/release.nix
index 6107f352971..cf16986b213 100644
--- a/nixos/release.nix
+++ b/nixos/release.nix
@@ -155,6 +155,12 @@ in rec {
     inherit system;
   });
 
+  iso_gnome = forMatchingSystems [ "x86_64-linux" ] (system: makeIso {
+    module = ./modules/installer/cd-dvd/installation-cd-graphical-gnome.nix;
+    type = "gnome";
+    inherit system;
+  });
+
   # A variant with a more recent (but possibly less stable) kernel
   # that might support more hardware.
   iso_minimal_new_kernel = forMatchingSystems [ "x86_64-linux" "aarch64-linux" ] (system: makeIso {
@@ -308,9 +314,9 @@ in rec {
     lapp = makeClosure ({ pkgs, ... }:
       { services.httpd.enable = true;
         services.httpd.adminAddr = "foo@example.org";
+        services.httpd.enablePHP = true;
         services.postgresql.enable = true;
         services.postgresql.package = pkgs.postgresql;
-        environment.systemPackages = [ pkgs.php ];
       });
   };
 }
diff --git a/nixos/tests/acme.nix b/nixos/tests/acme.nix
index e045f3415fa..fc41dc1eb5f 100644
--- a/nixos/tests/acme.nix
+++ b/nixos/tests/acme.nix
@@ -1,5 +1,5 @@
 let
-  commonConfig = ./common/letsencrypt/common.nix;
+  commonConfig = ./common/acme/client;
 
   dnsScript = {writeScript, dnsAddress, bash, curl}: writeScript "dns-hook.sh" ''
     #!${bash}/bin/bash
@@ -12,12 +12,13 @@ let
     fi
   '';
 
-in import ./make-test-python.nix {
+in import ./make-test-python.nix ({ lib, ... }: {
   name = "acme";
+  meta.maintainers = lib.teams.acme.members;
 
   nodes = rec {
-    letsencrypt = { nodes, lib, ... }: {
-      imports = [ ./common/letsencrypt ];
+    acme = { nodes, lib, ... }: {
+      imports = [ ./common/acme/server ];
       networking.nameservers = lib.mkForce [
         nodes.dnsserver.config.networking.primaryIPAddress
       ];
@@ -33,8 +34,7 @@ in import ./make-test-python.nix {
         serviceConfig = {
           ExecStart = "${pkgs.pebble}/bin/pebble-challtestsrv -dns01 ':53' -defaultIPv6 '' -defaultIPv4 '${nodes.webserver.config.networking.primaryIPAddress}'";
           # Required to bind on privileged ports.
-          User = "root";
-          Group = "root";
+          AmbientCapabilities = [ "CAP_NET_BIND_SERVICE" ];
         };
       };
     };
@@ -45,19 +45,16 @@ in import ./make-test-python.nix {
         nodes.dnsserver.config.networking.primaryIPAddress
       ];
       networking.firewall.allowedTCPPorts = [ 80 ];
-      security.acme = {
-        server = "https://acme-v02.api.letsencrypt.org/dir";
-        certs."standalone.com" = {
-            webroot = "/var/lib/acme/acme-challenges";
-        };
+      security.acme.certs."standalone.test" = {
+        webroot = "/var/lib/acme/acme-challenges";
       };
-      systemd.targets."acme-finished-standalone.com" = {};
-      systemd.services."acme-standalone.com" = {
-        wants = [ "acme-finished-standalone.com.target" ];
-        before = [ "acme-finished-standalone.com.target" ];
+      systemd.targets."acme-finished-standalone.test" = {};
+      systemd.services."acme-standalone.test" = {
+        wants = [ "acme-finished-standalone.test.target" ];
+        before = [ "acme-finished-standalone.test.target" ];
       };
       services.nginx.enable = true;
-      services.nginx.virtualHosts."standalone.com" = {
+      services.nginx.virtualHosts."standalone.test" = {
         locations."/.well-known/acme-challenge".root = "/var/lib/acme/acme-challenges";
       };
     };
@@ -71,16 +68,16 @@ in import ./make-test-python.nix {
 
       # A target remains active. Use this to probe the fact that
       # a service fired eventhough it is not RemainAfterExit
-      systemd.targets."acme-finished-a.example.com" = {};
-      systemd.services."acme-a.example.com" = {
-        wants = [ "acme-finished-a.example.com.target" ];
-        before = [ "acme-finished-a.example.com.target" ];
+      systemd.targets."acme-finished-a.example.test" = {};
+      systemd.services."acme-a.example.test" = {
+        wants = [ "acme-finished-a.example.test.target" ];
+        before = [ "acme-finished-a.example.test.target" ];
         after = [ "nginx.service" ];
       };
 
       services.nginx.enable = true;
 
-      services.nginx.virtualHosts."a.example.com" = {
+      services.nginx.virtualHosts."a.example.test" = {
         enableACME = true;
         forceSSL = true;
         locations."/".root = pkgs.runCommand "docroot" {} ''
@@ -89,54 +86,52 @@ in import ./make-test-python.nix {
         '';
       };
 
-      security.acme.server = "https://acme-v02.api.letsencrypt.org/dir";
-
-      nesting.clone = [
-        ({pkgs, ...}: {
-          systemd.targets."acme-finished-b.example.com" = {};
-          systemd.services."acme-b.example.com" = {
-            wants = [ "acme-finished-b.example.com.target" ];
-            before = [ "acme-finished-b.example.com.target" ];
-            after = [ "nginx.service" ];
-          };
-          services.nginx.virtualHosts."b.example.com" = {
-            enableACME = true;
-            forceSSL = true;
-            locations."/".root = pkgs.runCommand "docroot" {} ''
-              mkdir -p "$out"
-              echo hello world > "$out/index.html"
-            '';
-          };
-        })
-        ({pkgs, config, nodes, lib, ...}: {
-          security.acme.certs."example.com" = {
-            domain = "*.example.com";
-            dnsProvider = "exec";
-            dnsPropagationCheck = false;
-            credentialsFile = with pkgs; writeText "wildcard.env" ''
-              EXEC_PATH=${dnsScript { inherit writeScript bash curl; dnsAddress = nodes.dnsserver.config.networking.primaryIPAddress; }}
-            '';
-            user = config.services.nginx.user;
-            group = config.services.nginx.group;
-          };
-          systemd.targets."acme-finished-example.com" = {};
-          systemd.services."acme-example.com" = {
-            wants = [ "acme-finished-example.com.target" ];
-            before = [ "acme-finished-example.com.target" "nginx.service" ];
-            wantedBy = [ "nginx.service" ];
-          };
-          services.nginx.virtualHosts."c.example.com" = {
-            forceSSL = true;
-            sslCertificate = config.security.acme.certs."example.com".directory + "/cert.pem";
-            sslTrustedCertificate = config.security.acme.certs."example.com".directory + "/full.pem";
-            sslCertificateKey = config.security.acme.certs."example.com".directory + "/key.pem";
-            locations."/".root = pkgs.runCommand "docroot" {} ''
-              mkdir -p "$out"
-              echo hello world > "$out/index.html"
-            '';
-          };
-        })
-      ];
+      security.acme.server = "https://acme.test/dir";
+
+      specialisation.second-cert.configuration = {pkgs, ...}: {
+        systemd.targets."acme-finished-b.example.test" = {};
+        systemd.services."acme-b.example.test" = {
+          wants = [ "acme-finished-b.example.test.target" ];
+          before = [ "acme-finished-b.example.test.target" ];
+          after = [ "nginx.service" ];
+        };
+        services.nginx.virtualHosts."b.example.test" = {
+          enableACME = true;
+          forceSSL = true;
+          locations."/".root = pkgs.runCommand "docroot" {} ''
+            mkdir -p "$out"
+            echo hello world > "$out/index.html"
+          '';
+        };
+      };
+      specialisation.dns-01.configuration = {pkgs, config, nodes, lib, ...}: {
+        security.acme.certs."example.test" = {
+          domain = "*.example.test";
+          dnsProvider = "exec";
+          dnsPropagationCheck = false;
+          credentialsFile = with pkgs; writeText "wildcard.env" ''
+            EXEC_PATH=${dnsScript { inherit writeScript bash curl; dnsAddress = nodes.dnsserver.config.networking.primaryIPAddress; }}
+          '';
+          user = config.services.nginx.user;
+          group = config.services.nginx.group;
+        };
+        systemd.targets."acme-finished-example.test" = {};
+        systemd.services."acme-example.test" = {
+          wants = [ "acme-finished-example.test.target" ];
+          before = [ "acme-finished-example.test.target" "nginx.service" ];
+          wantedBy = [ "nginx.service" ];
+        };
+        services.nginx.virtualHosts."c.example.test" = {
+          forceSSL = true;
+          sslCertificate = config.security.acme.certs."example.test".directory + "/cert.pem";
+          sslTrustedCertificate = config.security.acme.certs."example.test".directory + "/full.pem";
+          sslCertificateKey = config.security.acme.certs."example.test".directory + "/key.pem";
+          locations."/".root = pkgs.runCommand "docroot" {} ''
+            mkdir -p "$out"
+            echo hello world > "$out/index.html"
+          '';
+        };
+      };
     };
 
     client = {nodes, lib, ...}: {
@@ -161,46 +156,44 @@ in import ./make-test-python.nix {
       client.start()
       dnsserver.start()
 
-      letsencrypt.wait_for_unit("default.target")
+      acme.wait_for_unit("default.target")
       dnsserver.wait_for_unit("pebble-challtestsrv.service")
       client.succeed(
-          'curl --data \'{"host": "acme-v02.api.letsencrypt.org", "addresses": ["${nodes.letsencrypt.config.networking.primaryIPAddress}"]}\' http://${nodes.dnsserver.config.networking.primaryIPAddress}:8055/add-a'
+          'curl --data \'{"host": "acme.test", "addresses": ["${nodes.acme.config.networking.primaryIPAddress}"]}\' http://${nodes.dnsserver.config.networking.primaryIPAddress}:8055/add-a'
       )
       client.succeed(
-          'curl --data \'{"host": "standalone.com", "addresses": ["${nodes.acmeStandalone.config.networking.primaryIPAddress}"]}\' http://${nodes.dnsserver.config.networking.primaryIPAddress}:8055/add-a'
+          'curl --data \'{"host": "standalone.test", "addresses": ["${nodes.acmeStandalone.config.networking.primaryIPAddress}"]}\' http://${nodes.dnsserver.config.networking.primaryIPAddress}:8055/add-a'
       )
 
-      letsencrypt.start()
+      acme.start()
       acmeStandalone.start()
 
-      letsencrypt.wait_for_unit("default.target")
-      letsencrypt.wait_for_unit("pebble.service")
+      acme.wait_for_unit("default.target")
+      acme.wait_for_unit("pebble.service")
 
       with subtest("can request certificate with HTTPS-01 challenge"):
           acmeStandalone.wait_for_unit("default.target")
-          acmeStandalone.succeed("systemctl start acme-standalone.com.service")
-          acmeStandalone.wait_for_unit("acme-finished-standalone.com.target")
+          acmeStandalone.succeed("systemctl start acme-standalone.test.service")
+          acmeStandalone.wait_for_unit("acme-finished-standalone.test.target")
 
       client.wait_for_unit("default.target")
 
-      client.succeed("curl https://acme-v02.api.letsencrypt.org:15000/roots/0 > /tmp/ca.crt")
-      client.succeed(
-          "curl https://acme-v02.api.letsencrypt.org:15000/intermediate-keys/0 >> /tmp/ca.crt"
-      )
+      client.succeed("curl https://acme.test:15000/roots/0 > /tmp/ca.crt")
+      client.succeed("curl https://acme.test:15000/intermediate-keys/0 >> /tmp/ca.crt")
 
       with subtest("Can request certificate for nginx service"):
-          webserver.wait_for_unit("acme-finished-a.example.com.target")
+          webserver.wait_for_unit("acme-finished-a.example.test.target")
           client.succeed(
-              "curl --cacert /tmp/ca.crt https://a.example.com/ | grep -qF 'hello world'"
+              "curl --cacert /tmp/ca.crt https://a.example.test/ | grep -qF 'hello world'"
           )
 
       with subtest("Can add another certificate for nginx service"):
           webserver.succeed(
-              "/run/current-system/fine-tune/child-1/bin/switch-to-configuration test"
+              "/run/current-system/specialisation/second-cert/bin/switch-to-configuration test"
           )
-          webserver.wait_for_unit("acme-finished-b.example.com.target")
+          webserver.wait_for_unit("acme-finished-b.example.test.target")
           client.succeed(
-              "curl --cacert /tmp/ca.crt https://b.example.com/ | grep -qF 'hello world'"
+              "curl --cacert /tmp/ca.crt https://b.example.test/ | grep -qF 'hello world'"
           )
 
       with subtest("Can request wildcard certificates using DNS-01 challenge"):
@@ -208,11 +201,11 @@ in import ./make-test-python.nix {
               "${switchToNewServer}"
           )
           webserver.succeed(
-              "/run/current-system/fine-tune/child-2/bin/switch-to-configuration test"
+              "/run/current-system/specialisation/dns-01/bin/switch-to-configuration test"
           )
-          webserver.wait_for_unit("acme-finished-example.com.target")
+          webserver.wait_for_unit("acme-finished-example.test.target")
           client.succeed(
-              "curl --cacert /tmp/ca.crt https://c.example.com/ | grep -qF 'hello world'"
+              "curl --cacert /tmp/ca.crt https://c.example.test/ | grep -qF 'hello world'"
           )
     '';
-}
+})
diff --git a/nixos/tests/all-tests.nix b/nixos/tests/all-tests.nix
index 76ca4941617..5a0c9d1afae 100644
--- a/nixos/tests/all-tests.nix
+++ b/nixos/tests/all-tests.nix
@@ -22,9 +22,8 @@ let
 in
 {
   _3proxy = handleTest ./3proxy.nix {};
-  acme = handleTestOn ["x86_64-linux"] ./acme.nix {};
+  acme = handleTest ./acme.nix {};
   atd = handleTest ./atd.nix {};
-  automysqlbackup = handleTest ./automysqlbackup.nix {};
   avahi = handleTest ./avahi.nix {};
   babeld = handleTest ./babeld.nix {};
   bcachefs = handleTestOn ["x86_64-linux"] ./bcachefs.nix {}; # linux-4.18.2018.10.12 is unsupported on aarch64
@@ -51,7 +50,9 @@ in
   cloud-init = handleTest ./cloud-init.nix {};
   codimd = handleTest ./codimd.nix {};
   consul = handleTest ./consul.nix {};
+  cockroachdb = handleTestOn ["x86_64-linux"] ./cockroachdb.nix {};
   containers-bridge = handleTest ./containers-bridge.nix {};
+  containers-custom-pkgs.nix = handleTest ./containers-custom-pkgs.nix {};
   containers-ephemeral = handleTest ./containers-ephemeral.nix {};
   containers-extra_veth = handleTest ./containers-extra_veth.nix {};
   containers-hosts = handleTest ./containers-hosts.nix {};
@@ -67,8 +68,9 @@ in
   deluge = handleTest ./deluge.nix {};
   dhparams = handleTest ./dhparams.nix {};
   dnscrypt-proxy2 = handleTestOn ["x86_64-linux"] ./dnscrypt-proxy2.nix {};
+  doas = handleTest ./doas.nix {};
   docker = handleTestOn ["x86_64-linux"] ./docker.nix {};
-  docker-containers = handleTestOn ["x86_64-linux"] ./docker-containers.nix {};
+  oci-containers = handleTestOn ["x86_64-linux"] ./oci-containers.nix {};
   docker-edge = handleTestOn ["x86_64-linux"] ./docker-edge.nix {};
   docker-preloader = handleTestOn ["x86_64-linux"] ./docker-preloader.nix {};
   docker-registry = handleTest ./docker-registry.nix {};
@@ -83,6 +85,7 @@ in
   ecryptfs = handleTest ./ecryptfs.nix {};
   ejabberd = handleTest ./xmpp/ejabberd.nix {};
   elk = handleTestOn ["x86_64-linux"] ./elk.nix {};
+  enlightenment = handleTest ./enlightenment.nix {};
   env = handleTest ./env.nix {};
   etcd = handleTestOn ["x86_64-linux"] ./etcd.nix {};
   etcd-cluster = handleTestOn ["x86_64-linux"] ./etcd-cluster.nix {};
@@ -141,11 +144,13 @@ in
   initrdNetwork = handleTest ./initrd-network.nix {};
   installer = handleTest ./installer.nix {};
   iodine = handleTest ./iodine.nix {};
+  ipfs = handleTest ./ipfs.nix {};
   ipv6 = handleTest ./ipv6.nix {};
   jackett = handleTest ./jackett.nix {};
   jellyfin = handleTest ./jellyfin.nix {};
   jenkins = handleTest ./jenkins.nix {};
   jirafeau = handleTest ./jirafeau.nix {};
+  k3s = handleTest ./k3s.nix {};
   kafka = handleTest ./kafka.nix {};
   keepalived = handleTest ./keepalived.nix {};
   kerberos = handleTest ./kerberos/default.nix {};
@@ -159,8 +164,8 @@ in
   # kubernetes.e2e should eventually replace kubernetes.rbac when it works
   #kubernetes.e2e = handleTestOn ["x86_64-linux"] ./kubernetes/e2e.nix {};
   kubernetes.rbac = handleTestOn ["x86_64-linux"] ./kubernetes/rbac.nix {};
+  latestKernel.hardened = handleTest ./hardened.nix { latestKernel = true; };
   latestKernel.login = handleTest ./login.nix { latestKernel = true; };
-  ldap = handleTest ./ldap.nix {};
   leaps = handleTest ./leaps.nix {};
   lidarr = handleTest ./lidarr.nix {};
   lightdm = handleTest ./lightdm.nix {};
@@ -172,6 +177,8 @@ in
   magnetico = handleTest ./magnetico.nix {};
   magic-wormhole-mailbox-server = handleTest ./magic-wormhole-mailbox-server.nix {};
   mailcatcher = handleTest ./mailcatcher.nix {};
+  mariadb-galera-mariabackup = handleTest ./mysql/mariadb-galera-mariabackup.nix {};
+  mariadb-galera-rsync = handleTest ./mysql/mariadb-galera-rsync.nix {};
   mathics = handleTest ./mathics.nix {};
   matomo = handleTest ./matomo.nix {};
   matrix-synapse = handleTest ./matrix-synapse.nix {};
@@ -193,16 +200,17 @@ in
   munin = handleTest ./munin.nix {};
   mutableUsers = handleTest ./mutable-users.nix {};
   mxisd = handleTest ./mxisd.nix {};
-  mysql = handleTest ./mysql.nix {};
-  mysqlBackup = handleTest ./mysql-backup.nix {};
-  mysqlReplication = handleTest ./mysql-replication.nix {};
+  mysql = handleTest ./mysql/mysql.nix {};
+  mysql-autobackup = handleTest ./mysql/mysql-autobackup.nix {};
+  mysql-backup = handleTest ./mysql/mysql-backup.nix {};
+  mysql-replication = handleTest ./mysql/mysql-replication.nix {};
   nagios = handleTest ./nagios.nix {};
   nat.firewall = handleTest ./nat.nix { withFirewall = true; };
   nat.firewall-conntrack = handleTest ./nat.nix { withFirewall = true; withConntrackHelpers = true; };
   nat.standalone = handleTest ./nat.nix { withFirewall = false; };
   ndppd = handleTest ./ndppd.nix {};
   neo4j = handleTest ./neo4j.nix {};
-  nesting = handleTest ./nesting.nix {};
+  specialisation = handleTest ./specialisation.nix {};
   netdata = handleTest ./netdata.nix {};
   networking.networkd = handleTest ./networking.nix { networkd = true; };
   networking.scripted = handleTest ./networking.nix { networkd = false; };
@@ -244,6 +252,7 @@ in
   php = handleTest ./php {};
   plasma5 = handleTest ./plasma5.nix {};
   plotinus = handleTest ./plotinus.nix {};
+  podman = handleTest ./podman.nix {};
   postgis = handleTest ./postgis.nix {};
   postgresql = handleTest ./postgresql.nix {};
   postgresql-wal-receiver = handleTest ./postgresql-wal-receiver.nix {};
@@ -282,6 +291,7 @@ in
   snapper = handleTest ./snapper.nix {};
   solr = handleTest ./solr.nix {};
   spacecookie = handleTest ./spacecookie.nix {};
+  spike = handleTest ./spike.nix {};
   sonarr = handleTest ./sonarr.nix {};
   strongswan-swanctl = handleTest ./strongswan-swanctl.nix {};
   sudo = handleTest ./sudo.nix {};
@@ -291,10 +301,13 @@ in
   syncthing-relay = handleTest ./syncthing-relay.nix {};
   systemd = handleTest ./systemd.nix {};
   systemd-analyze = handleTest ./systemd-analyze.nix {};
+  systemd-boot = handleTestOn ["x86_64-linux"] ./systemd-boot.nix {};
   systemd-confinement = handleTest ./systemd-confinement.nix {};
   systemd-timesyncd = handleTest ./systemd-timesyncd.nix {};
   systemd-networkd-vrf = handleTest ./systemd-networkd-vrf.nix {};
   systemd-networkd = handleTest ./systemd-networkd.nix {};
+  systemd-networkd-dhcpserver = handleTest ./systemd-networkd-dhcpserver.nix {};
+  systemd-networkd-ipv6-prefix-delegation = handleTest ./systemd-networkd-ipv6-prefix-delegation.nix {};
   systemd-nspawn = handleTest ./systemd-nspawn.nix {};
   pdns-recursor = handleTest ./pdns-recursor.nix {};
   taskserver = handleTest ./taskserver.nix {};
@@ -303,12 +316,16 @@ in
   timezone = handleTest ./timezone.nix {};
   tinydns = handleTest ./tinydns.nix {};
   tor = handleTest ./tor.nix {};
+  # traefik test relies on docker-containers
+  traefik = handleTestOn ["x86_64-linux"] ./traefik.nix {};
   transmission = handleTest ./transmission.nix {};
   trac = handleTest ./trac.nix {};
   trilium-server = handleTestOn ["x86_64-linux"] ./trilium-server.nix {};
   trezord = handleTest ./trezord.nix {};
   trickster = handleTest ./trickster.nix {};
+  tuptime = handleTest ./tuptime.nix {};
   udisks2 = handleTest ./udisks2.nix {};
+  unit-php = handleTest ./web-servers/unit-php.nix {};
   upnp = handleTest ./upnp.nix {};
   uwsgi = handleTest ./uwsgi.nix {};
   vault = handleTest ./vault.nix {};
diff --git a/nixos/tests/caddy.nix b/nixos/tests/caddy.nix
index fc10df0c79b..144d83179a1 100644
--- a/nixos/tests/caddy.nix
+++ b/nixos/tests/caddy.nix
@@ -20,35 +20,33 @@ import ./make-test-python.nix ({ pkgs, ... }: {
         }
       '';
 
-      nesting.clone = [
-        {
-          services.caddy.config = lib.mkForce ''
-            http://localhost {
-              gzip
+      specialisation.etag.configuration = {
+        services.caddy.config = lib.mkForce ''
+          http://localhost {
+            gzip
 
-              root ${
-                pkgs.runCommand "testdir2" {} ''
-                  mkdir "$out"
-                  echo changed > "$out/example.html"
-                ''
-              }
+            root ${
+              pkgs.runCommand "testdir2" {} ''
+                mkdir "$out"
+                echo changed > "$out/example.html"
+              ''
             }
-          '';
-        }
+          }
+        '';
+      };
 
-        {
-          services.caddy.config = ''
-            http://localhost:8080 {
-            }
-          '';
-        }
-      ];
+      specialisation.config-reload.configuration = {
+        services.caddy.config = ''
+          http://localhost:8080 {
+          }
+        '';
+      };
     };
   };
 
   testScript = { nodes, ... }: let
-    etagSystem = "${nodes.webserver.config.system.build.toplevel}/fine-tune/child-1";
-    justReloadSystem = "${nodes.webserver.config.system.build.toplevel}/fine-tune/child-2";
+    etagSystem = "${nodes.webserver.config.system.build.toplevel}/specialisation/etag";
+    justReloadSystem = "${nodes.webserver.config.system.build.toplevel}/specialisation/config-reload";
   in ''
     url = "http://localhost/example.html"
     webserver.wait_for_unit("caddy")
@@ -77,7 +75,7 @@ import ./make-test-python.nix ({ pkgs, ... }: {
         assert old_etag != new_etag, "Old ETag {} is the same as {}".format(
             old_etag, new_etag
         )
-    
+
     with subtest("config is reloaded on nixos-rebuild switch"):
         webserver.succeed(
             "${justReloadSystem}/bin/switch-to-configuration test >&2"
diff --git a/nixos/tests/chromium.nix b/nixos/tests/chromium.nix
index fc5d3a5c52f..795b93f6f54 100644
--- a/nixos/tests/chromium.nix
+++ b/nixos/tests/chromium.nix
@@ -37,7 +37,7 @@ mapAttrs (channel: chromiumPkg: makeTest rec {
     </head>
     <body onload="javascript:document.title='startup done'">
       <img src="file://${pkgs.fetchurl {
-        url = "http://nixos.org/logo/nixos-hex.svg";
+        url = "https://nixos.org/logo/nixos-hex.svg";
         sha256 = "07ymq6nw8kc22m7kzxjxldhiq8gzmc7f45kq2bvhbdm0w5s112s4";
       }}" />
     </body>
diff --git a/nixos/tests/cockroachdb.nix b/nixos/tests/cockroachdb.nix
index 496283fddc7..d0cc5e19837 100644
--- a/nixos/tests/cockroachdb.nix
+++ b/nixos/tests/cockroachdb.nix
@@ -1,7 +1,7 @@
 # This performs a full 'end-to-end' test of a multi-node CockroachDB cluster
 # using the built-in 'cockroach workload' command, to simulate a semi-realistic
 # test load. It generally takes anywhere from 3-5 minutes to run and 1-2GB of
-# RAM (though each of 3 workers gets 1GB allocated)
+# RAM (though each of 3 workers gets 2GB allocated)
 #
 # CockroachDB requires synchronized system clocks within a small error window
 # (~500ms by default) on each node in order to maintain a multi-node cluster.
@@ -55,7 +55,7 @@ let
 
     {
       # Bank/TPC-C benchmarks take some memory to complete
-      virtualisation.memorySize = 1024;
+      virtualisation.memorySize = 2048;
 
       # Install the KVM PTP "Virtualized Clock" driver. This allows a /dev/ptp0
       # device to appear as a reference clock, synchronized to the host clock.
@@ -88,6 +88,8 @@ let
       services.cockroachdb.listen.address = myAddr;
       services.cockroachdb.join = lib.mkIf (joinNode != null) joinNode;
 
+      systemd.services.chronyd.unitConfig.ConditionPathExists = "/dev/ptp0";
+
       # Hold startup until Chrony has performed its first measurement (which
       # will probably result in a full timeskip, thanks to makestep)
       systemd.services.cockroachdb.preStart = ''
@@ -95,7 +97,7 @@ let
       '';
     };
 
-in import ./make-test.nix ({ pkgs, ...} : {
+in import ./make-test-python.nix ({ pkgs, ...} : {
   name = "cockroachdb";
   meta.maintainers = with pkgs.stdenv.lib.maintainers;
     [ thoughtpolice ];
@@ -110,17 +112,13 @@ in import ./make-test.nix ({ pkgs, ...} : {
   # there's otherwise no way to guarantee that node1 will start before the others try
   # to join it.
   testScript = ''
-    $node1->start;
-    $node1->waitForUnit("cockroachdb");
-
-    $node2->start;
-    $node2->waitForUnit("cockroachdb");
-
-    $node3->start;
-    $node3->waitForUnit("cockroachdb");
-
-    $node1->mustSucceed("cockroach sql --host=192.168.1.1 --insecure -e 'SHOW ALL CLUSTER SETTINGS' 2>&1");
-    $node1->mustSucceed("cockroach workload init bank 'postgresql://root\@192.168.1.1:26257?sslmode=disable'");
-    $node1->mustSucceed("cockroach workload run bank --duration=1m 'postgresql://root\@192.168.1.1:26257?sslmode=disable'");
+    for node in node1, node2, node3:
+        node.start()
+        node.wait_for_unit("cockroachdb")
+    node1.succeed(
+        "cockroach sql --host=192.168.1.1 --insecure -e 'SHOW ALL CLUSTER SETTINGS' 2>&1",
+        "cockroach workload init bank 'postgresql://root@192.168.1.1:26257?sslmode=disable'",
+        "cockroach workload run bank --duration=1m 'postgresql://root@192.168.1.1:26257?sslmode=disable'",
+    )
   '';
 })
diff --git a/nixos/tests/common/acme/client/default.nix b/nixos/tests/common/acme/client/default.nix
new file mode 100644
index 00000000000..80893da0252
--- /dev/null
+++ b/nixos/tests/common/acme/client/default.nix
@@ -0,0 +1,15 @@
+{ lib, nodes, pkgs, ... }:
+
+let
+  acme-ca = nodes.acme.config.test-support.acme.caCert;
+in
+
+{
+  security.acme = {
+    server = "https://acme.test/dir";
+    email = "hostmaster@example.test";
+    acceptTerms = true;
+  };
+
+  security.pki.certificateFiles = [ acme-ca ];
+}
diff --git a/nixos/tests/common/letsencrypt/default.nix b/nixos/tests/common/acme/server/default.nix
index 110a2520971..1a0ee882572 100644
--- a/nixos/tests/common/letsencrypt/default.nix
+++ b/nixos/tests/common/acme/server/default.nix
@@ -1,27 +1,27 @@
 # The certificate for the ACME service is exported as:
 #
-#   config.test-support.letsencrypt.caCert
+#   config.test-support.acme.caCert
 #
 # This value can be used inside the configuration of other test nodes to inject
 # the snakeoil certificate into security.pki.certificateFiles or into package
 # overlays.
 #
 # Another value that's needed if you don't use a custom resolver (see below for
-# notes on that) is to add the letsencrypt node as a nameserver to every node
+# notes on that) is to add the acme node as a nameserver to every node
 # that needs to acquire certificates using ACME, because otherwise the API host
-# for letsencrypt.org can't be resolved.
+# for acme.test can't be resolved.
 #
 # A configuration example of a full node setup using this would be this:
 #
 # {
-#   letsencrypt = import ./common/letsencrypt;
+#   acme = import ./common/acme/server;
 #
 #   example = { nodes, ... }: {
 #     networking.nameservers = [
-#       nodes.letsencrypt.config.networking.primaryIPAddress
+#       nodes.acme.config.networking.primaryIPAddress
 #     ];
 #     security.pki.certificateFiles = [
-#       nodes.letsencrypt.config.test-support.letsencrypt.caCert
+#       nodes.acme.config.test-support.acme.caCert
 #     ];
 #   };
 # }
@@ -33,9 +33,9 @@
 # override networking.nameservers like this:
 #
 # {
-#   letsencrypt = { nodes, ... }: {
-#     imports = [ ./common/letsencrypt ];
-#     networking.nameservers = [
+#   acme = { nodes, lib, ... }: {
+#     imports = [ ./common/acme/server ];
+#     networking.nameservers = lib.mkForce [
 #       nodes.myresolver.config.networking.primaryIPAddress
 #     ];
 #   };
@@ -55,16 +55,16 @@
 let
   snakeOilCerts = import ./snakeoil-certs.nix;
 
-  wfeDomain = "acme-v02.api.letsencrypt.org";
+  wfeDomain = "acme.test";
   wfeCertFile = snakeOilCerts.${wfeDomain}.cert;
   wfeKeyFile = snakeOilCerts.${wfeDomain}.key;
 
-  siteDomain = "letsencrypt.org";
+  siteDomain = "acme.test";
   siteCertFile = snakeOilCerts.${siteDomain}.cert;
   siteKeyFile = snakeOilCerts.${siteDomain}.key;
   pebble = pkgs.pebble;
   resolver = let
-    message = "You need to define a resolver for the letsencrypt test module.";
+    message = "You need to define a resolver for the acme test module.";
     firstNS = lib.head config.networking.nameservers;
   in if config.networking.nameservers == [] then throw message else firstNS;
 
@@ -76,15 +76,16 @@ let
     httpPort = 80;
     tlsPort = 443;
     ocspResponderURL = "http://0.0.0.0:4002";
+    strict = true;
   };
 
   pebbleConfFile = pkgs.writeText "pebble.conf" (builtins.toJSON pebbleConf);
   pebbleDataDir = "/root/pebble";
 
 in {
-  imports = [ ../resolver.nix ];
+  imports = [ ../../resolver.nix ];
 
-  options.test-support.letsencrypt.caCert = lib.mkOption {
+  options.test-support.acme.caCert = lib.mkOption {
     type = lib.types.path;
     description = ''
       A certificate file to use with the <literal>nodes</literal> attribute to
@@ -98,7 +99,7 @@ in {
       resolver.enable = let
         isLocalResolver = config.networking.nameservers == [ "127.0.0.1" ];
       in lib.mkOverride 900 isLocalResolver;
-      letsencrypt.caCert = snakeOilCerts.ca.cert;
+      acme.caCert = snakeOilCerts.ca.cert;
     };
 
     # This has priority 140, because modules/testing/test-instrumentation.nix
@@ -126,8 +127,7 @@ in {
         '';
         serviceConfig = {
           # Required to bind on privileged ports.
-          User = "root";
-          Group = "root";
+          AmbientCapabilities = [ "CAP_NET_BIND_SERVICE" ];
         };
       };
     };
diff --git a/nixos/tests/common/letsencrypt/mkcerts.nix b/nixos/tests/common/acme/server/mkcerts.nix
index e7ac2bae46b..2474019cbac 100644
--- a/nixos/tests/common/letsencrypt/mkcerts.nix
+++ b/nixos/tests/common/acme/server/mkcerts.nix
@@ -1,10 +1,9 @@
 { pkgs ? import <nixpkgs> {}
 , lib ? pkgs.lib
-
-, domains ? [ "acme-v02.api.letsencrypt.org" "letsencrypt.org" ]
+, domains ? [ "acme.test" ]
 }:
 
-pkgs.runCommand "letsencrypt-snakeoil-ca" {
+pkgs.runCommand "acme-snakeoil-ca" {
   nativeBuildInputs = [ pkgs.openssl ];
 } ''
   addpem() {
diff --git a/nixos/tests/common/letsencrypt/mkcerts.sh b/nixos/tests/common/acme/server/mkcerts.sh
index cc7f8ca650d..cc7f8ca650d 100755
--- a/nixos/tests/common/letsencrypt/mkcerts.sh
+++ b/nixos/tests/common/acme/server/mkcerts.sh
diff --git a/nixos/tests/common/acme/server/snakeoil-certs.nix b/nixos/tests/common/acme/server/snakeoil-certs.nix
new file mode 100644
index 00000000000..fd537c3260f
--- /dev/null
+++ b/nixos/tests/common/acme/server/snakeoil-certs.nix
@@ -0,0 +1,171 @@
+# Generated via mkcert.sh in the same directory.
+{
+  ca.key = builtins.toFile "ca.key" ''
+    -----BEGIN PRIVATE KEY-----
+    MIIJQwIBADANBgkqhkiG9w0BAQEFAASCCS0wggkpAgEAAoICAQDCnVZGEn68ezXl
+    DWE5gjsCPqutR4nxw/wvIbAxB2Vk2WeQ6HGvt2Jdrz5qer2IXd76YtpQeqd+ffet
+    aLtMeFTr+Xy9yqEpx2AfvmEEcLnuiWbsUGZzsHwW7/4kPgAFBy9TwJn/k892lR6u
+    QYa0QS39CX85kLMZ/LZXUyClIBa+IxT1OovmGqMOr4nGASRQP6d/nnyn41Knat/d
+    tpyaa5zgfYwA6YW6UxcywvBSpMOXM0/82BFZGyALt3nQ+ffmrtKcvMjsNLBFaslV
+    +zYO1PMbLbTCW8SmJTjhzuapXtBHruvoe24133XWlvcP1ylaTx0alwiQWJr1XEOU
+    WLEFTgOTeRyiVDxDunpz+7oGcwzcdOG8nCgd6w0aYaECz1zvS3FYTQz+MiqmEkx6
+    s4bj1U90I0kwUJbeWjjrGO7Y9Qq4i19GafDg7cAMn9eHCiNbNrPj6t/gfaVbCrbk
+    m3ZVjkvLTQ2mb2lv7+tVii45227iNPuNS6lx2FVlr/DXiRrOVfghPvoOxUfXzogJ
+    hZLV4Zki+ycbGQa5w8YMDYCv4c08dKA7AatVhNS60c1zgQNjuWF3BvocSySyGUon
+    VT6h1DYlJ9YAqgqNpedgNR9kpp034SMhB7dj9leB6LRMA+c1fG/T+1lDbkA+vope
+    pt4+30oDcCTYfEifl1HwqNw/bXDm1wIDAQABAoICABPbd/UYaAQVUk93yQbUKe81
+    s9CvbvzTMYUhm9e02Hyszitz/D2gqZHDksvMkFA8u8aylXIGwdZfRglUmV/ZG1kk
+    kLzQ0xbvN/ilNUL9uYsETBMqtPly9YZloHnUNa5NqF+UVGJGk7GWz5WaLANybx3V
+    fTzDbfLl3TkVy0vt9UQbUkUfXyzwZNjXwmgIr8rcY9vasP90a3eXqRX3Tw1Wk6A4
+    TzO8oB994O0WBO150Fc6Lhwvc72yzddENlLDXq8UAXtqq9mmGqJKnhZ+1mo3AkMw
+    q7P1JyCIxcAMm26GtRvLVljXV0x5640kxDrCin6jeeW/qWkJEW6dpmuZjR5scmLI
+    /9n8H+fGzdZH8bOPPotMy12doj3vJqvew3p0eIkmVctYMJKD0j/CWjvKJNE3Yx4O
+    Ls47X/dEypX6anR1HQUXcpd6JfRWdIJANo2Duaz+HYbyA88bHcJL9shFYcjLs3sX
+    R/TvnnKHvw/ud7XBgvLGwGAf/cDEuLI2tv+V7tkMGrMUv+gUJNZaJaCpdt+1iUwO
+    QFq8APyBNn6FFw54TwXWfSjfSNh3geIMLHuErYVu9MIXvB7Yhh+ZvLcfLbmckhAX
+    wb39RRHnCWvnw5Bm9hnsDhqfDsIoP+2wvUkViyHOmrKi8nSJhSk19C8AuQtSVcJg
+    5op+epEmjt70GHt52nuBAoIBAQD2a4Ftp4QxWE2d6oAFI6WPrX7nAwI5/ezCbO/h
+    yoYAn6ucTVnn5/5ITJ8V4WTWZ4lkoZP3YSJiCyBhs8fN63J+RaJ/bFRblHDns1HA
+    2nlMVdNLg6uOfjgUJ8Y6xVM0J2dcFtwIFyK5pfZ7loxMZfvuovg74vDOi2vnO3dO
+    16DP3zUx6B/yIt57CYn8NWTq+MO2bzKUnczUQRx0yEzPOfOmVbcqGP8f7WEdDWXm
+    7scjjN53OPyKzLOVEhOMsUhIMBMO25I9ZpcVkyj3/nj+fFLf/XjOTM00M/S/KnOj
+    RwaWffx6mSYS66qNc5JSsojhIiYyiGVEWIznBpNWDU35y/uXAoIBAQDKLj0dyig2
+    kj1r3HvdgK4sRULqBQFMqE9ylxDmpJxAj6/A8hJ0RCBR57vnIIZMzK4+6K0l3VBJ
+    ukzXJHJLPkZ0Uuo2zLuRLkyjBECH6KYznyTkUVRn50Oq6IoP6WTCfd3Eg+7AKYY1
+    VFo2iR8sxeSQQ+AylFy6QcQ1xPIW30Jj1/LFjrRdRggapPEekpJec0pEqhasT8rR
+    UFhRL2NdZnL5b7ZlsJc7gZKEJgNfxgzaCzloqLcjCgGpOhLKx0fFsNOqHcbIGMwG
+    6wQCOyNghQJ6AZtRD5TYCJow92FchWjoTIaMJ8RjMKQmxpiwM6wQG4J78Hd3mbhf
+    q0hiQhPHaNbBAoIBAFeIeMFq8BpXM7sUwcURlI4lIx8Mgo33FVM7PzsFpfQyw9MR
+    5w3p6vnjvd8X4aoHvVZxzw3hA0WwjiAmrKMJL/KK6d45rP2bDUBBAplvAgeLtTLt
+    4tMLIwCF4HSgA55TIPQlaqO1FDC+M4BTSiMZVxS970/WnZPBEuNgzFDFZ+pvb4X6
+    3t40ZLNwAAQHM4IEPAFiHqWMKGZ9eo5BWIeEHnjHmfjqSDYfLJAVYk1WJIcMUzom
+    lA76CBC8CxW/I94AtcRhWuFUv/Z5/+OYEYLUxtuqPm+J+JrCmf4OJmWppT1wI2+p
+    V00BSeRVWXTm1piieM8ahF5y1hp6y3uV3k0NmKECggEBAMC42Ms3s6NpPSE+99eJ
+    3P0YPJOkl7uByNGbTKH+kW89SDRsy8iGVCSe9892gm5cwU/4LWyljO3qp2qBNG2i
+    /DfP/bCk8bqPXsAZwoWK8DrO3bTCDepJWYhlx40pVkHLBwVXGdOVAXh+YswPY2cj
+    cB9QhDrSj52AKU9z36yLvtY7uBA3Wph6tCjpx2n0H4/m6AmR9LDmEpf5tWYV/OrA
+    SKKaqUw/y7kOZyKOtbKqr/98qYmpIYFF/ZVZZSZkVXcNeoZzgdOlR37ksVqLEsrj
+    nxu7wli/uItBj/FTLjyqcvjUUYDyO1KtwBuyPUPgzYhBIN2Rt9+K6WRQelwnToFL
+    30ECggEBALzozykZj2sr3z8tQQRZuXLGotUFGsQCB8ikeqoeB8FbNNkC+qgflQGv
+    zLRB2KWOvnboc94wVgBJH43xG0HBibZnBhUO8/HBI/WlmyEj9KQ/ZskUK4GVZkB6
+    r/81ASLwH+P/rqrLEjcp1SIPPevjzCWD9VYR5m/qPHLNxStwGSrPjtPzgaFxhq84
+    Jl+YVmNqVlrOKYYfIPh8exPLiTti3wfM61pVYFv56PI2gd5ysMWYnuN+vK0sbmZh
+    cIWwykcKlODIngI7IzYqt8NuIJI0jrYyHgtUw4jaJzdF4mEOplGONxdz15jAGHtg
+    JUsBXFNz132nP4iIr3UKrPedQZijSi4=
+    -----END PRIVATE KEY-----
+  '';
+  ca.cert = builtins.toFile "ca.cert" ''
+    -----BEGIN CERTIFICATE-----
+    MIIFDzCCAvegAwIBAgIUTRDYSWJvmlhwIR3pzVrIQfnboLEwDQYJKoZIhvcNAQEL
+    BQAwFjEUMBIGA1UEAwwLU25ha2VvaWwgQ0EwIBcNMjAwMzIyMjI1NjE3WhgPMjEy
+    MDAyMjcyMjU2MTdaMBYxFDASBgNVBAMMC1NuYWtlb2lsIENBMIICIjANBgkqhkiG
+    9w0BAQEFAAOCAg8AMIICCgKCAgEAwp1WRhJ+vHs15Q1hOYI7Aj6rrUeJ8cP8LyGw
+    MQdlZNlnkOhxr7diXa8+anq9iF3e+mLaUHqnfn33rWi7THhU6/l8vcqhKcdgH75h
+    BHC57olm7FBmc7B8Fu/+JD4ABQcvU8CZ/5PPdpUerkGGtEEt/Ql/OZCzGfy2V1Mg
+    pSAWviMU9TqL5hqjDq+JxgEkUD+nf558p+NSp2rf3bacmmuc4H2MAOmFulMXMsLw
+    UqTDlzNP/NgRWRsgC7d50Pn35q7SnLzI7DSwRWrJVfs2DtTzGy20wlvEpiU44c7m
+    qV7QR67r6HtuNd911pb3D9cpWk8dGpcIkFia9VxDlFixBU4Dk3kcolQ8Q7p6c/u6
+    BnMM3HThvJwoHesNGmGhAs9c70txWE0M/jIqphJMerOG49VPdCNJMFCW3lo46xju
+    2PUKuItfRmnw4O3ADJ/XhwojWzaz4+rf4H2lWwq25Jt2VY5Ly00Npm9pb+/rVYou
+    Odtu4jT7jUupcdhVZa/w14kazlX4IT76DsVH186ICYWS1eGZIvsnGxkGucPGDA2A
+    r+HNPHSgOwGrVYTUutHNc4EDY7lhdwb6HEskshlKJ1U+odQ2JSfWAKoKjaXnYDUf
+    ZKadN+EjIQe3Y/ZXgei0TAPnNXxv0/tZQ25APr6KXqbePt9KA3Ak2HxIn5dR8Kjc
+    P21w5tcCAwEAAaNTMFEwHQYDVR0OBBYEFCIoeYSYjtMiPrmxfHmcrsZkyTpvMB8G
+    A1UdIwQYMBaAFCIoeYSYjtMiPrmxfHmcrsZkyTpvMA8GA1UdEwEB/wQFMAMBAf8w
+    DQYJKoZIhvcNAQELBQADggIBAHPdwOgAxyhIhbqFObNftW8K3sptorB/Fj6jwYCm
+    mHleFueqQnjTHMWsflOjREvQp1M307FWooGj+KQkjwvAyDc/Hmy7WgJxBg9p3vc+
+    /Xf/e7ZfBl8rv7vH8VXW/BC1vVsILdFncrgTrP8/4psV50/cl1F4+nPBiekvvxwZ
+    k+R7SgeSvcWT7YlOG8tm1M3al4F4mWzSRkYjkrXmwRCKAiya9xcGSt0Bob+LoM/O
+    mpDGV/PMC1WAoDc1mMuXN2hSc0n68xMcuFs+dj/nQYn8uL5pzOxpX9560ynKyLDv
+    yOzQlM2VuZ7H2hSIeYOFgrtHJJwhDtzjmUNDQpQdp9Fx+LONQTS1VLCTXND2i/3F
+    10X6PkdnLEn09RiPt5qy20pQkICxoEydmlwpFs32musYfJPdBPkZqZWrwINBv2Wb
+    HfOmEB4xUvXufZ5Ju5icgggBkyNA3PCLo0GZFRrMtvA7i9IXOcXNR+njhKa9246V
+    QQfeWiz05RmIvgShJYVsnZWtael8ni366d+UXypBYncohimyNlAD1n+Bh3z0PvBB
+    +FK4JgOSeouM4SuBHdwmlZ/H0mvfUG81Y8Jbrw0yuRHtuCtX5HpN5GKpZPHDE7aQ
+    fEShVB/GElC3n3DvgK9OJBeVVhYQgUEfJi4rsSxt3cdEI0NrdckUoZbApWVJ3CBc
+    F8Y7
+    -----END CERTIFICATE-----
+  '';
+  "acme.test".key = builtins.toFile "acme.test.key" ''
+    -----BEGIN RSA PRIVATE KEY-----
+    MIIJKAIBAAKCAgEAlgQTZjKfs3aHw0J993k7jFAs+hVRPf//zHMAiUkPKUYPTSl1
+    TxS/bPbhWzSoom00j4SLhGGGhbd+lnvTg0uvKbxskgATfw5clbm1ZN+gx4DuxwjL
+    V3xIxpeSY+PKzs5z8w/k+AJh+zOPyXwH3ut3C+ogp1S/5IhmzV3a/yU/6k0zpGxj
+    N6ZPRTXFrz93I1pPeCkJz90l7tj+2uFc9xtM20NQX52f0Y2oShcG8fKdNZVzuHHk
+    ZXkrZIhou55/nRy2jKgFeD3GQQfa9rwPWrVybQ6tKMMkoazB/Unky9xcTI2LJarf
+    xgHDO9v9yFBvmR4UM8B3kM82NHoENtHaZ2mmiMGZzTEQlf8xwYyHFrqBFIVRWEUr
+    7rr/O5Qr9gIN0T4u367HCexVYAKzbO2P9h75czzjMMoGkbXze9SMQ/ikrxEmwAHg
+    r1Xxh6iQYmgPNk8AR3d9+o2I7WJZMUYZARLnuhVr9BNXv510iqZTqX8lcyL5fEj3
+    ST4Ab+H7rfevZt6NU26iJLBYAjrA2mSvH+wvkboxrgSS8xYPkOW8NLNEbbodzofI
+    pB+SaK53OIk0bj9c1YAgrSNER/TDTgDXrWUNrlfVZ/M7+AEdeU06wi7sVhVif6OB
+    D3OpgKSNjeE6TuJH80Pi5MWugSFBr792Xb6uhVoPiVOFN+qiGB6UkwBgSKkCAwEA
+    AQKCAgAmN7OZfZwh5DiCDhZ5TXFWNba/n16rJOTN+R5R20L5iNetGLrCAs8hu2N+
+    ENRFTPzu8x14BEB5IF4niDRCZq2hPFeMemh9HfOIUV9c63vSV459NkhXaVpA/axV
+    tlqchQwVCB+U70Z28JPZCLgYmnQhnOvktTqNxhIqj5aTGbJGxpQ5d0Nvkfbv8tsB
+    4nE/mGpWel39jqFzT+Tdbjx414Ok+GkpcsacZDJTbbpfOSfD1uc8PgepskzTt8y2
+    v5JTPFVlUAjUsSgouQ+XfCGNQlx8XBjRIaXbal+hX4niRald91FTr0yC7UAHp+vn
+    dFZ586fB526OfbuZctxP+vZhEhFSseQKxHQ0tB8me81xH44daVNr9PPUM69FDT3j
+    ygJaUJjNEG3vVzePCDzhmxTmz2/rAClp77WTWziBWDoA6YWDDzhgNPrXWzLIbZIx
+    ue9ZbGEOh/u5ZzrEXxKCz9FjDe9wQu3TeYUe0M+ejzwWgn7zdWDvjjmtLUUuun2Y
+    wW7WANpu32qvB/V+qssw4O63tbRiwneRCnb8AF2ixgyWr6xyZwch4kacv1KMiixf
+    gO/5GTj7ba5GcdGoktJb29cUEgz13yPd106RsHK4vcggFxfMbOVauNRIo6ddLwyS
+    8UMxLf2i2cToOLkHZrIb8FgimmzRoBd3yYzwVJBydiVcsrHQAQKCAQEAxlzFYCiQ
+    hjEtblGnrkOC7Hx6HvqMelViOkGN8Y9VczG4GhwntmSE2nbpaAKhFBGdLfuSI3tJ
+    Lf24f0IGgAhzPmpo2TjbxPO3YSKFTH71fznVBhtQ1iSxwZ1InXktnuhot6VSDx6A
+    sbHSy1hMFy3nj+Zj5+fQ89tclzBzG9bCShaauO39KrPMwKi6CYoYdGhXBC3+OpHY
+    zBNvmDTxG2kW8L42rlf14EH4pAlgKs4eeZbpcbZ6fXURP2hToHJ8swyKw/1p12WA
+    cc19BKFJXL8nNP4uCf/fI0mVYpytz5KwUzG+z+umDqk+RRCH4mNB28xvEEuEyp/e
+    /C5Is+WrlDAA6QKCAQEAwZsK4AJ/w4Xf4Q/SsnZJO9bfP1ejJjzKElt8rG28JXeb
+    +FjykZZ6vw2gt2Boest2n9N4fBwaRkaHVtVS4iAmaDXozTlcvCLs2rVjPSguuQtW
+    80CKg6+dux+6gFN8IGzDCiX3pWUnhhiXvCcRYEcvgpH6GA5vuCNrXrjH0JFC0kef
+    aaDMGMTbzhc2IIRztmWU4v8YJSSy5KOkIQLWO+7u9aGx9IqT5/z3gx3XrItyl0Bk
+    aQmZEh7JOSyhmGhhf5LdeTLu2YgRw3/tzS+lPMX3+UPw99k9MdTOFn2pww5AdRmg
+    aBIzV+/LBYG0pPRl0D8/6yzGVBPuUDQpmK9Z3gsxwQKCAQEAnNkMZN2Ocd1+6+V7
+    LmtJog9HTSmWXMEZG7FsOJ661Yxx44txx2IyPsCaDNlPXxwSaiKrSo0Yr1oZQd8G
+    XsTPw4HGiETSWijQTulJ99PH8SLck6iTwdBgEhV5LrN75FQnQVdizHu1DUzrvkiC
+    Wi29FWb6howiCEDjNNVln5SwKn83NpVQgyyK8ag4+oQMlDdQ3wgzJ0Ld53hS3Eq4
+    f5EYR6JQgIki7YGcxrB3L0GujTxMONMuhfdEfRvUTGFawwVe0FyYDW7AIrx2Z2vV
+    I5YuvVNjOhrt6OwtSD1VnnWCITaLh8LwmlUu3NOWbudHUzKSe5MLXGEPo95BNKad
+    hl5yyQKCAQBNo0gMJtRnawMpdLfwewDJL1SdSR6S0ePS0r8/Qk4l1D5GrByyB183
+    yFY/0zhyra7nTt1NH9PlhJj3WFqBdZURSzUNP0iR5YuH9R9Twg5ihEqdB6/EOSOO
+    i521okTvl83q/ui9ecAMxUXr3NrZ+hHyUWmyRe/FLub6uCzg1a+vNauWpzXRZPgk
+    QCijh5oDdd7r3JIpKvtWNs01s7aHmDxZYjtDrmK7sDTtboUzm0QbpWXevUuV+aSF
+    +gDfZlRa3WFVHfisYSWGeYG6O7YOlfDoE7fJHGOu3QC8Ai6Wmtt8Wgd6VHokdHO8
+    xJPVZnCBvyt5up3Zz5hMr25S3VazdVfBAoIBAHVteqTGqWpKFxekGwR0RqE30wmN
+    iIEwFhgOZ8sQ+6ViZJZUR4Nn2fchn2jVwF8V8J1GrJbTknqzAwdXtO3FbgfmmyF2
+    9VbS/GgomXhA9vJkM4KK3Iwo/y/nE9hRhtzuVE0QPudz2fyfaDgnWjcNM59064tH
+    88361LVJm3ixyWSBD41UZ7NgWWJX1y2f073vErsfcPpavF5lhn1oSkQnOlgMJsnl
+    24qeuzAgTWu/2rFpIA2EK30Bgvsl3pjJxHwyNDAgklV7C783LIoAHi7VO7tzZ6iF
+    dmD5XLfcUZc3eaB7XehNQKBXDGLJeI5AFmjsHka5GUoitkU2PFrg/3+nJmg=
+    -----END RSA PRIVATE KEY-----
+  '';
+  "acme.test".cert = builtins.toFile "acme.test.cert" ''
+    -----BEGIN CERTIFICATE-----
+    MIIEoTCCAokCAgKaMA0GCSqGSIb3DQEBCwUAMBYxFDASBgNVBAMMC1NuYWtlb2ls
+    IENBMCAXDTIwMDMyMjIyNTYxOFoYDzIxMjAwMjI3MjI1NjE4WjAUMRIwEAYDVQQD
+    DAlhY21lLnRlc3QwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCWBBNm
+    Mp+zdofDQn33eTuMUCz6FVE9///McwCJSQ8pRg9NKXVPFL9s9uFbNKiibTSPhIuE
+    YYaFt36We9ODS68pvGySABN/DlyVubVk36DHgO7HCMtXfEjGl5Jj48rOznPzD+T4
+    AmH7M4/JfAfe63cL6iCnVL/kiGbNXdr/JT/qTTOkbGM3pk9FNcWvP3cjWk94KQnP
+    3SXu2P7a4Vz3G0zbQ1BfnZ/RjahKFwbx8p01lXO4ceRleStkiGi7nn+dHLaMqAV4
+    PcZBB9r2vA9atXJtDq0owyShrMH9SeTL3FxMjYslqt/GAcM72/3IUG+ZHhQzwHeQ
+    zzY0egQ20dpnaaaIwZnNMRCV/zHBjIcWuoEUhVFYRSvuuv87lCv2Ag3RPi7frscJ
+    7FVgArNs7Y/2HvlzPOMwygaRtfN71IxD+KSvESbAAeCvVfGHqJBiaA82TwBHd336
+    jYjtYlkxRhkBEue6FWv0E1e/nXSKplOpfyVzIvl8SPdJPgBv4fut969m3o1TbqIk
+    sFgCOsDaZK8f7C+RujGuBJLzFg+Q5bw0s0Rtuh3Oh8ikH5Jornc4iTRuP1zVgCCt
+    I0RH9MNOANetZQ2uV9Vn8zv4AR15TTrCLuxWFWJ/o4EPc6mApI2N4TpO4kfzQ+Lk
+    xa6BIUGvv3Zdvq6FWg+JU4U36qIYHpSTAGBIqQIDAQABMA0GCSqGSIb3DQEBCwUA
+    A4ICAQBCDs0V4z00Ze6Ask3qDOLAPo4k85QCfItlRZmwl2XbPZq7kbe13MqF2wxx
+    yiLalm6veK+ehU9MYN104hJZnuce5iEcZurk+8A+Pwn1Ifz+oWKVbUtUP3uV8Sm3
+    chktJ2H1bebXtNJE5TwvdHiUkXU9ywQt2FkxiTSl6+eac7JKEQ8lVN/o6uYxF5ds
+    +oIZplb7bv2XxsRCzq55F2tJX7fIzqXrSa+lQTnfLGmDVMAQX4TRB/lx0Gqd1a9y
+    qGfFnZ7xVyW97f6PiL8MoxPfd2I2JzrzGyP/igNbFOW0ho1OwfxVmvZeS7fQSc5e
+    +qu+nwnFfl0S4cHRif3G3zmz8Ryx9LM5TYkH41qePIHxoEO2sV0DgWJvbSjysV2S
+    EU2a31dJ0aZ+z6YtZVpHlujKMVzxVTrqj74trS4LvU5h/9hv7e1gjYdox1TO0HMK
+    mtDfgBevB21Tvxpz67Ijf31HvfTmCerKJEOjGnbYmyYpMeMNSONRDcToWk8sUwvi
+    OWa5jlUFRAxgXNM09vCTPi9aRUhcFqACqfAd6I1NqGVlfplLWrc7SWaSa+PsLfBf
+    4EOZfk8iEKBVeYXNjg+CcD8j8yk/oEs816/jpihIk8haCDRWYWGKyyGnwn6OQb8d
+    MdRO2b7Oi/AAmEF3jMlICqv286GIYK5qTKk2/CKHlOLPnsWEuA==
+    -----END CERTIFICATE-----
+  '';
+}
diff --git a/nixos/tests/common/letsencrypt/common.nix b/nixos/tests/common/letsencrypt/common.nix
deleted file mode 100644
index bd559c8dacc..00000000000
--- a/nixos/tests/common/letsencrypt/common.nix
+++ /dev/null
@@ -1,12 +0,0 @@
-{ lib, nodes, pkgs, ... }: let
-  letsencrypt-ca = nodes.letsencrypt.config.test-support.letsencrypt.caCert;
-in {
-  networking.nameservers = [
-    nodes.letsencrypt.config.networking.primaryIPAddress
-  ];
-
-  security.acme.acceptTerms = true;
-  security.acme.email = "webmaster@example.com";
-
-  security.pki.certificateFiles = [ letsencrypt-ca ];
-}
diff --git a/nixos/tests/common/letsencrypt/snakeoil-certs.nix b/nixos/tests/common/letsencrypt/snakeoil-certs.nix
deleted file mode 100644
index ca4f71ae688..00000000000
--- a/nixos/tests/common/letsencrypt/snakeoil-certs.nix
+++ /dev/null
@@ -1,254 +0,0 @@
-# Generated via mkcert.sh in the same directory.
-{
-  ca.key = builtins.toFile "ca.key" ''
-    -----BEGIN PRIVATE KEY-----
-    MIIJQgIBADANBgkqhkiG9w0BAQEFAASCCSwwggkoAgEAAoICAQDQ0b23I1srJZwR
-    2MMdvSJK5pcwLfrXU+4gEZEnWNyT8yeVweya+8vmNNOlvK3zxf+ZiY/7aQ0RZJMO
-    h2+VdlgHmr2QKhQTf1HwfZA/06FolD3/DcS+DMJMSTVr179/XLndeVVZUqU7tjvB
-    AWKSIS8H2hSF1UOPi9gBDR8MwCP6Qgj8WYhbkt9q47/lO96qAmm6U1F+Q7RYM9ZQ
-    IWI81N0Ms5wJocg7n6S19iV66ePh7APapZFYup61gFGWfahmA217ELIZd56n8yjO
-    F0epb9sC0XpYCDRrYKBWLqPiv+6wvdZtZvALItyIv08ZwXlBkFg3LbAAhPnf0Vxz
-    pYysQmyyyzkgy252n+Sie0kx+B4qm6fOkpfgYlPSVTb2dXx/be/SE08u0a9FO0fZ
-    pkByWEZJUUwngsJgLUa7MorQf3avxozfC25XqvzbieZfSXlA7mOUclZbC/WUFpyj
-    MlyJU2eCQ8wSwsPXl91oxcYlOkuVLgd41gr9pGXQSuKIkrgbfkftjg2tDC+7g7O8
-    qrdF42FjbZjIx/74AasmsGh4GTQtiSkvEnTstioC6aCV44DlJWbBIMvkyawubjUl
-    Ppij0H66Y9Q4tEc/ktc7oGQfqqluyLb43TeobTPHALsNeAYb39rMtBo5DDCUc81s
-    fuDMhMr/oYXKrFstUsg5AY6mJaRG0QIDAQABAoICAF5ZVfmoPOoKzTB3GvmV2iez
-    dj4rmDmwT1gn98iqasdiRtFwVGJWQHNcDQDGdmY9YNZThD2Y4nGoWpVm9jC2zuFo
-    thusF3QTw8cARKvCCBzDVhumce1YwHVNYpi+W2TFValOyBRathN7rBXxdUMHQUOv
-    8jPh/uudyNP4xL2zFs5dBchW/7g4bT/TdYGyglGYU4L/YEPHfXWYvk1oOAW6O8Ig
-    aPElKt5drEMW2yplATSzua4RvtEzSMBDIRn43pxxEgdXrNC67nF9+ULc2+Efi/oD
-    Ad9CncSiXO9zlVK/W655p6e4qd6uOqyCm8/MTegkuub7eplRe8D3zGjoNN4kCQ4S
-    rckVvIDDb6vZk7PKx9F7GWIqaG/YvFFFKO1MrAZg7SguFA6PtGOYAFocT03P6KXT
-    l2SnZQWKyxUAlh4tOBGlRFgGCx/krRIKbgNYn/qk/ezcRl8c7GpOPh+b7Icoq7u3
-    l4tIVBBHqS8uGgtyi+YwuJeht2MV1aEcSkykKLh2ipp8tb6spORJUkhjawDjvxeQ
-    GztN30Xh2riTXYZ0HExVTtJa8jyvFyp/97ptPIJXaVt2A2KIS3sBFHKnpY+/OrQg
-    uUauYgi13WFHsKOxZL9GYGk7Ujd8bw4CEcJFxKY7bhpGVI6Du7NRkUDWN0+0yusI
-    2szCJ7+ZqJkrc1+GrI/RAoIBAQDseAEggOLYZkpU2Pht15ZbxjM9ayT2ANq1+RTu
-    LjJx4gv2/o/XJCfMZCL0b9TJqtYeH+N6G9oDRJ99VIhUPedhWSYdj9Qj+rPd++TS
-    bp+MoSjmfUfxLTDrmFHL7ppquAE65aDy3B5c+OCb0I4X6CILUf0LynBzgl4kdrzN
-    U6BG3Mt0RiGPojlPV82B9ZUF/09YAz7BIz9X3KMhze1Gps5OeGuUnc9O2IAJYkrj
-    ur9H2YlNS4w+IjRLAXSXUqC8bqPZp6WTo1G/rlyAkIRXCGN90uk5JQvXoj9immFO
-    WaylbdcNG3YcGutreYeZL/UIWF6zCdc6pYG0cCBJS6S/RN7FAoIBAQDiERrLuUbV
-    3fx/a8uMeZop6hXtQpF7jlFxqUmza7QSvBuwks4QVJF+qMSiSvKDkCKqZD4qVf4N
-    TMxEj5vNR0PbnmDshyKJNGVjEauKJSb65CFDUcL1eR/A/oJvxiIdN1Z4cPrpnRux
-    /zIfPuYfYHpdz52buxxmlD7bfwYmVKVpnzjB9z0I1CasZ5uqB0Z8H0OLyUu8S4ju
-    RfkKBDMgVl2q96i8ZvX4C1b7XuimIUqv4WHq5+ejcYirgrYtUbBIaDU3/LORcJdy
-    /K76L1/up70RTDUYYm/HKaRy+vMTpUsZJ7Qbh0hrvQkUvNQ1HXjprW2AePIYi33N
-    h3mb1ulqw4idAoIBAQCsn0YjVjNDShkFK4bfmLv4rw2Ezoyi0SjYIsb2wN6uaBfX
-    7SlQIuKywH8L9f9eYMoCH8FNyLs0G4paUbVb2fzpAc1jUzXINiHL8TCvtXXfkV5s
-    NBSqqRTHR+CegMZVFZJATpVZ9PptYHmHBY5VQW5o2SdizhudFxRmhg95zIx6boBP
-    l0q0sfYoR66MKpzpTeG8HFJZZ8O7/iNQcCXAp9B/VEUkrrdBlaaSMyD8cb1lVBZ5
-    SKdOTGXkQ2G7feQ86n/OSiYDSvxIc56vc9BIQKVwmuEKiFLGzXh8ILrcGXaBJVgS
-    B3QHPFeTk5o7Z9j2iJxJEuv9sginkhrfpsrTnhEJAoIBACkrUkTtjd/e2F/gIqaH
-    crLVZX7a06G7rktTuA9LuvR6e1Rxt8Mzk3eMhprDqVyaQCXlsYiGNoj3hm+p84az
-    xsDVG/OXPIveFeSv0ByNXYbtSr12w1lu4ICGGP0ACTBm5oFymc83hFarEdas3r2y
-    FTbGW36D2c04jCXvARCz85fDnlN8kgnskMpu5+NUBdsO2n83fmphGyPBbHQNhb4K
-    3G4JQhplab/tWL7YbufqQi67jdh4uS+Duo75c/HW4ZKeH6r9gzomVf5j0/3N6NuO
-    gpkG1tiE/LQ5ejBSUTgvrvh6yYsF3QN53pB/PuoZXu63Xay62ePsa1GlrVjbD5EY
-    4OUCggEAJFr7F7AQLMJTAxHFLCsZZ0ZZ+tXYclBC4eHPkZ6sD5jvL3KIpW3Q7jXk
-    oIoD/XEX4B+Qe5M3jQJ/Y5ZJETHcgfcHZbDpCKN2WHQgldQbAJiFd4GY1OegdVsr
-    7TC8jh3Q2eYjzL8u4z7LSNI6aQSv1eWE7S1Q5j/sX/YYDR4W3CBMeIUpqoDWpn87
-    czbIRyA/4L0Y/HLpg/ZCbvtJZbsQwYXhyqfbjlm4BRQ6JiC5uEBKvuDRUXToBJta
-    JU8XMm+Ae5Ogrw7P6hg68dWpagfjb7UZ7Zxv+VDsbrU6KsDcyGCAwrrRZou/6KUG
-    Eq4OVTSu/s8gmY94tgbjeOaLUPEPmg==
-    -----END PRIVATE KEY-----
-  '';
-  ca.cert = builtins.toFile "ca.cert" ''
-    -----BEGIN CERTIFICATE-----
-    MIIFDzCCAvegAwIBAgIUU9rbCLTuvaI6gjSsFsJJjfLWIX8wDQYJKoZIhvcNAQEL
-    BQAwFjEUMBIGA1UEAwwLU25ha2VvaWwgQ0EwIBcNMTkxMDE4MDc1NDEyWhgPMjEx
-    OTA5MjQwNzU0MTJaMBYxFDASBgNVBAMMC1NuYWtlb2lsIENBMIICIjANBgkqhkiG
-    9w0BAQEFAAOCAg8AMIICCgKCAgEA0NG9tyNbKyWcEdjDHb0iSuaXMC3611PuIBGR
-    J1jck/MnlcHsmvvL5jTTpbyt88X/mYmP+2kNEWSTDodvlXZYB5q9kCoUE39R8H2Q
-    P9OhaJQ9/w3EvgzCTEk1a9e/f1y53XlVWVKlO7Y7wQFikiEvB9oUhdVDj4vYAQ0f
-    DMAj+kII/FmIW5LfauO/5TveqgJpulNRfkO0WDPWUCFiPNTdDLOcCaHIO5+ktfYl
-    eunj4ewD2qWRWLqetYBRln2oZgNtexCyGXeep/MozhdHqW/bAtF6WAg0a2CgVi6j
-    4r/usL3WbWbwCyLciL9PGcF5QZBYNy2wAIT539Fcc6WMrEJssss5IMtudp/kontJ
-    MfgeKpunzpKX4GJT0lU29nV8f23v0hNPLtGvRTtH2aZAclhGSVFMJ4LCYC1GuzKK
-    0H92r8aM3wtuV6r824nmX0l5QO5jlHJWWwv1lBacozJciVNngkPMEsLD15fdaMXG
-    JTpLlS4HeNYK/aRl0EriiJK4G35H7Y4NrQwvu4OzvKq3ReNhY22YyMf++AGrJrBo
-    eBk0LYkpLxJ07LYqAumgleOA5SVmwSDL5MmsLm41JT6Yo9B+umPUOLRHP5LXO6Bk
-    H6qpbsi2+N03qG0zxwC7DXgGG9/azLQaOQwwlHPNbH7gzITK/6GFyqxbLVLIOQGO
-    piWkRtECAwEAAaNTMFEwHQYDVR0OBBYEFAZcEiVphGxBT4OWXbM6lKu96dvbMB8G
-    A1UdIwQYMBaAFAZcEiVphGxBT4OWXbM6lKu96dvbMA8GA1UdEwEB/wQFMAMBAf8w
-    DQYJKoZIhvcNAQELBQADggIBAGJ5Jnxq1IQ++IRYxCE7r7BqzzF+HTx0EWKkSOmt
-    eSPqeOdhC26hJlclgGZXAF/Xosmn8vkSQMHhj/jr4HI0VF9IyvDUJm8AKsnOgu/7
-    DUey3lEUdOtJpTG9NyTOcrzxToMJ+hWlFLZKxx2dk4FLIvTLjmo1VHM97Bat7XYW
-    IrL9RRIZ25V+eCYtlR7XYjceGFQ0rCdp8SFIQwC6C/AH2tV3b1AJFsND9PcoLu7c
-    //fH+WUQCcD/N0grdC/QCX7AFWzd4rKQ8gjfND4TSYFTSDwW10Mud4kAVhY2P1sY
-    Y3ZpnxWrCHbIZMbszlbMyD+cjsCBnNvOtYGm7pDut/371rllVcB/uOWYWMCtKPoj
-    0elPrwNMrK+P+wceNBCRQO+9gwzB589F2morFTtsob/qtpAygW8Sfl8M+iLWXeYS
-    c3LBLnj0TpgXKRWg7wgIWKSZx9v6pgy70U0qvkjNS1XseUCPf7hfAbxT3xF+37Dw
-    zZRwF4WAWqdnJoOey21mgc+a2DQzqtykA6KfHgCqNFfDbQXPXvNy25DDThbk+paX
-    G2M2EWtr+Nv9s/zm7Xv/pOXlgMFavaj+ikqZ4wfJf6c/sMOdZJtMA4TsYtAJgbc8
-    ts+0eymTq4v5S8/fW51Lbjw6hc1Kcm8k7NbHSi9sEjBfxFLTZNQ5eb4NGr9Od3sU
-    kgwJ
-    -----END CERTIFICATE-----
-  '';
-  "acme-v02.api.letsencrypt.org".key = builtins.toFile "acme-v02.api.letsencrypt.org.key" ''
-    -----BEGIN RSA PRIVATE KEY-----
-    MIIJKQIBAAKCAgEApny0WhfDwEXe6WDTCw8qBuMAPDr88pj6kbhQWfzAW2c0TggJ
-    Etjs9dktENeTpSl14nnLVMiSYIJPYY3KbOIFQH1qDaOuQ7NaOhj9CdMTm5r9bl+C
-    YAyqLIMQ9AAZDhUcQjOy3moiL7ClFHlkFYuEzZBO9DF7hJpfUFIs0Idg50mNoZh/
-    K/fb4P2skNjfCjjomTRUmZHxT6G00ImSTtSaYbN/WHut1xXwJvOoT1nlEA/PghKm
-    JJ9ZuRMSddUJmjL+sT09L8LVkK8CKeHi4r58DHM0D0u8owIFV9qsXd5UvZHaNgvQ
-    4OAWGukMX+TxRuqkUZkaj84vnNL+ttEMl4jedw0ImzNtCOYehDyTPRkfng5PLWMS
-    vWbwyP8jDd2578mSbx5BF7ypYX366+vknjIFyZ5WezcC1pscIHxLoEwuhuf+knN+
-    kFkLOHeYbqQrU6mxSnu9q0hnNvGUkTP0a/1aLOGRfQ5C/pxpE/Rebi8qfM/OJFd4
-    mSxGL93JUTXWAItiIeBnQpIne65/Ska9dWynOEfIb0okdet3kfmNHz3zc17dZ5g4
-    AdOSCgHAlQgFt/Qd8W6xXUe4C5Mfv2ctxRrfQhDwtB6rMByPwzImnciC2h3vCwD3
-    vS/vjUyWICyhZyi2LZDUQz+sCKBXCYYcYh8ThFO40j5x1OnYMq7XQvyl8QkCAwEA
-    AQKCAgBSAfdssWwRF9m3p6QNPIj9H3AMOxpB/azffqTFzsSJwYp4LWkayZPfffy+
-    4RGvN38D8e6ActP3ifjEGu3tOGBR5fUJhujeHEiDea+a2Ug9S9kuNwmnelWQ23bM
-    Wgf9cdSbn4+qEymHyEFolmsAWdsuzri1fHJVXR06GWBNz4GiLA8B3HY4GD1M1Gfe
-    aZVkGagpXyeVBdiR2xuP5VQWVI8/NQWzdiipW/sRlNABVkyI3uDeN4VzYLL3gTeE
-    p021kQz4DSxIjHZacHpmWwhBnIbKMy0fo7TlrqcnIWXqTwv63Q9Zs/RN8NOyqb0Y
-    t1NKFWafcwUsdOnrG9uv/cVwF1FNE8puydaOi8rL1zAeK89JH8NRQ02wohR9w8qy
-    b2tB6DyGMtuqBt8Il6GA16ZoEuaXeayvlsvDEmG1cS9ZwBvfgrVPAmlm2AYdIf5B
-    RHIJu4BJC6Nn2ehVLqxx1QDhog3SOnAsCmcfg5g/fCwxcVMLIhODFoiKYGeMitDG
-    Q4e5JKcOg+RR8PT/n4eY4rUDBGtsR+Nw8S2DWgXmSufyfDtKCjZB4IuLWPS29tNh
-    zF6iYfoiTWzrSs/yqPSKIFpv+PWZwkKSvjdxia6lSBYYEON4W2QICEtiEs+SvcG4
-    0eIqWM+rRmPnJyMfGqX6GCs3rHDQB2VNJPBCYPQalJ/KwZumAQKCAQEA0ezM6qPJ
-    1JM/fddgeQ50h0T9TRXVUTCISxXza+l4NuFt1NdqUOdHsGtbL1JR4GaQUG8qD1/P
-    R39YgnQEQimxpmYLCZkobkwPxTZm9oiMXpcJrlN4PB5evaWShRSv3mgigpt3Wzml
-    Td+2R9RoA/hvF/wEyIvaWznYOyugBC7GXs20dNnZDULhUapeQu7r6JvgmxBOby7S
-    0FbhGplBiSDETzZURqzH/GMJKaJtNgyyVf3Hbg4mZAQDWoBRr+8HxsNbDkxP6e91
-    QrPHy2VZFiaTmJfoxRhyMTn7/JZaLJaUHDOniOsdMj/V7vMCgpfBqh5vR8bKzuPy
-    ZINggpcFPp1IYQKCAQEAywc7AQoktMBCru/3vzBqUveXbR3RKzNyZCTH5CMm3UNH
-    zmblFgqF2nxzNil21GqAXzSwZk5FyHbkeD3yvEZm+bXzsZTDNokAwoiTgyrr2tf8
-    GLMlCHHl5euIh1xHuyg/oKajVGOoXUXK8piqiDpQKd3Zwc6u2oyQlh+gYTPKh+7i
-    ilipkYawoE6teb6JUGpvU+d27INgNhB2oDEXY3pG2PbV+wv229ykSZxh1sJUdDwT
-    a8eTg+3pCGXtOZiJoQTFwKUlD2WYTGqS4Gx6dIJco5k+ZikGNST1JGE64Jl4MZdI
-    rtyvpcYblh5Q14sJGvp4kWYS9tjEM8pA+4Z9th3JqQKCAQEAkidH0+UM1A9gmQCm
-    jiHeR39ky5Jz3f7oJT63J15479yrVxBTWNhtNQrJhXzOvGkr+JQsuF+ANMsYmFql
-    zFqy8KMC9D/JwmD6adeif+o5sHF/r/s1LsYGOAtao4TvnOzrefs7ciwERt+GTSQ4
-    9uq0jgJMYkPcVr9DKI8K7V6ThdW52dECKRVzQiRXVEp7vIsqKUuFECuNYrfaKWai
-    FhLWGkA9FKee5L0e1/naB1N3ph72Bk2btO6GVzAXr2HADEZe0umWiczJ2xLH+3go
-    Oh/JiufYi8ClYFh6dDVJutlrbOcZsV3gCegfzikqijmWABcIavSgpsJVNF2zh7gV
-    Uq62gQKCAQAdO2FHeQpn6/at8WceY/4rC/MFhvGC4tlpidIuCtGhsfo4wZ/iWImF
-    N73u4nF1jBAHpTJwyHxLrLKgjWrRqOFSutvniZ/BzmAJolh63kcvL0Hg3IpMePm8
-    7PivZJ3/WIAwxU1m7SJkq5PY8ho7mwnHvWWI/hU26l42/z68QBS9FawQd0uS5G2x
-    5yIbEU/8ABcfYYhB7XiA0EYEMo1HiWeB/ag5iTN13ILbBmUf4sL+KVgygH3A1RRk
-    XSiWzluij2lZn22ClgIjnoSfQ38uH0bvVzUgyG9YX4XcQxOTGwWvPjT82FGB8NAw
-    ARVqs14QQFfzt1qrp/I38rsAfBDFk+xhAoIBAQCEKNk/oJcy9t/jMIbLcn6z3aCc
-    Fn8GBPSXtFj0t6weN5lHof+cggw4owMFWQQyAXxo/K6NnKNydMPZ5qjtLsHNpbpQ
-    aT1Or0/1YR1bJ8Lo82B4QM++7F761GWQPvE/tyrfPkfkWl92ITIpmnlw4wycRlkq
-    9anI2fnj1nIZwixzE2peb6PcsZU2HOs9uZ5RRd9wia696I7IpNibs4O4J2WTm4va
-    +NeYif3V2g9qwgT0Va0c9/Jlg3b58R0vA8j/VCU5I0TyXpkB3Xapx+pvEdZ3viUL
-    mXZaVotmWjgBXGDtd2VQg2ZiAMXHn3RzXSgV4Z+A/XacRs75h9bNw0ZJYrz1
-    -----END RSA PRIVATE KEY-----
-  '';
-  "acme-v02.api.letsencrypt.org".cert = builtins.toFile "acme-v02.api.letsencrypt.org.cert" ''
-    -----BEGIN CERTIFICATE-----
-    MIIEtDCCApwCAgKaMA0GCSqGSIb3DQEBCwUAMBYxFDASBgNVBAMMC1NuYWtlb2ls
-    IENBMCAXDTE5MTAxODA3NTQxM1oYDzIxMTkwOTI0MDc1NDEzWjAnMSUwIwYDVQQD
-    DBxhY21lLXYwMi5hcGkubGV0c2VuY3J5cHQub3JnMIICIjANBgkqhkiG9w0BAQEF
-    AAOCAg8AMIICCgKCAgEApny0WhfDwEXe6WDTCw8qBuMAPDr88pj6kbhQWfzAW2c0
-    TggJEtjs9dktENeTpSl14nnLVMiSYIJPYY3KbOIFQH1qDaOuQ7NaOhj9CdMTm5r9
-    bl+CYAyqLIMQ9AAZDhUcQjOy3moiL7ClFHlkFYuEzZBO9DF7hJpfUFIs0Idg50mN
-    oZh/K/fb4P2skNjfCjjomTRUmZHxT6G00ImSTtSaYbN/WHut1xXwJvOoT1nlEA/P
-    ghKmJJ9ZuRMSddUJmjL+sT09L8LVkK8CKeHi4r58DHM0D0u8owIFV9qsXd5UvZHa
-    NgvQ4OAWGukMX+TxRuqkUZkaj84vnNL+ttEMl4jedw0ImzNtCOYehDyTPRkfng5P
-    LWMSvWbwyP8jDd2578mSbx5BF7ypYX366+vknjIFyZ5WezcC1pscIHxLoEwuhuf+
-    knN+kFkLOHeYbqQrU6mxSnu9q0hnNvGUkTP0a/1aLOGRfQ5C/pxpE/Rebi8qfM/O
-    JFd4mSxGL93JUTXWAItiIeBnQpIne65/Ska9dWynOEfIb0okdet3kfmNHz3zc17d
-    Z5g4AdOSCgHAlQgFt/Qd8W6xXUe4C5Mfv2ctxRrfQhDwtB6rMByPwzImnciC2h3v
-    CwD3vS/vjUyWICyhZyi2LZDUQz+sCKBXCYYcYh8ThFO40j5x1OnYMq7XQvyl8QkC
-    AwEAATANBgkqhkiG9w0BAQsFAAOCAgEAkx0GLPuCvKSLTHxVLh5tP4jxSGG/zN37
-    PeZLu3QJTdRdRc8bgeOGXAVEVFbqOLTNTsuY1mvpiv2V6wxR6nns+PIHeLY/UOdc
-    mOreKPtMU2dWPp3ybec2Jwii6PhAXZJ26AKintmug1psMw7662crR3SCnn85/CvW
-    192vhr5gM1PqLBIlbsX0tAqxAwBe1YkxBb9vCq8NVghJlKme49xnwGULMTGs15MW
-    hIPx6sW93zwrGiTsDImH49ILGF+NcX1AgAq90nG0j/l5zhDgXGJglX+K1xP99X1R
-    de3I4uoufPa5q+Pjmhy7muL+o4Qt0D0Vm86RqqjTkNPsr7gAJtt66A7TJrYiIoKn
-    GTIBsgM6egeFLLYQsT0ap/59HJismO2Pjx4Jk/jHOkC8TJsXQNRq1Km76VMBnuc0
-    2CMoD9pb38GjUUH94D4hJK4Ls/gJMF3ftKUyR8Sr/LjE6qU6Yj+ZpeEQP4kW9ANq
-    Lv9KSNDQQpRTL4LwGLTGomksLTQEekge7/q4J2TQRZNYJ/mxnrBKRcv9EAMgBMXq
-    Q+7GHtKDv9tJVlMfG/MRD3CMuuSRiT3OVbvMMkFzsPkqxYAP1CqE/JGvh67TzKI+
-    MUfXKehA6TKuxrTVqCtoFIfGaqA9IWyoRTtugYq/xssB9ESeEYGeaM1A9Yueqz+h
-    KkBZO00jHSE=
-    -----END CERTIFICATE-----
-  '';
-  "letsencrypt.org".key = builtins.toFile "letsencrypt.org.key" ''
-    -----BEGIN RSA PRIVATE KEY-----
-    MIIJKgIBAAKCAgEA9dpdPEyzD3/BBds7tA/51s+WmLFyWuFrq4yMd2R+vi5gvK7n
-    lLNVKhYgiTmK2Um+UEpGucJqZHcTSZA1Bz4S/8ND/AI9I6EmwvBinY5/PubxEALk
-    9YiDA+IzH8ZGFM8wXg7fMbbJAsyv+SHAtr2jmCsggrpuD5fgzs2p+F2q0+oVoeFw
-    MAOUdAf2jNtNLEj2Q6MiR5Xq+wFOcRtXlNlXWIX3NrmubO/xOpDNpsyjyYC5Ld+W
-    06MS5bTHSdv56AkUg2PugMChj15TOddEJIK8zPXFTlMYye9SKwjhNUZovfe4xXCa
-    Tj2nmzrcuMKLz+S3sKQeTWjiRcY3w4zTlAbhtGXDjXjhMObrHoWM8e3cTL4NJMvt
-    tNStXficxbeTbIiYu+7dtF0q+iWaZqexc6PdAaIpFZ0XSw+i5iLdQZmBwzY7NLlH
-    pQupfh6ze0qDUVZAMDubo4JKUTBzH6QTuhHx+uUm7Lc8YdNArn7o/vMZDQym1Eia
-    xKxZuCGaqFvq8ZK4nBVsHfcXbhF/XD2HMid3t7ImbREVu9qnc+En+acU/SJaaL3r
-    jMW6HLVMr6+vQrCzYkvLzKYpoUm9D1Kcn6d8Ofxl2iCaY9CkMr5/6J1p1wcTdcN7
-    IVQ/DFBeTDauyWbyZkO/lPoZoakWyXOx9S9tgClzhFmNgRkZv9wN+QguNDcCAwEA
-    AQKCAgEA0ndlacGfaJ1NeN39dmBW2XZMzdrassJXkjx34528gsLhPaXdyobbWXQn
-    1lHUc7+VlNaBRXUR73+gm1FAlDqnuRxIjuy7ukyzCh8PzSG3/PlnVPWlXCzJPAHh
-    EkqCpD3agirpF34LBsKDwxsKB2bBLft9kWxX3DGA2olmAKDvJQs4CaUcjX4DEHHg
-    tyTmJAsyByUYq3/D8a1koZ9ukpadF8NXpxm+ILQoJqLf6vM1I8N2w7atP/BStSLV
-    mH0gq2tajEB4ZPCDXmC5jsKiKz9gsXWUu0CX8AdYqE6pvRnRgQ8Ytq1265QMb+8s
-    FV82oXqDZkyZRFuNmX3fLyDX39kkTcVS37S56Gzk4EzDWE/u2RXCAPeWla2zUFYI
-    hg8X4ZAwbZRODtK2cZTuCZEILM/iKmtSgHC+aQhp18EUAefa7WGrRD4AvbTxH4VF
-    ek60bwISBk5Mhf39MwqIiQxGOFmfLsQReZvzH4jI5zfDXf/0yZ/1SdGeu6+Walt0
-    V81Ua/DB6zshHpeSP74HMuJHZ4DOQfcV/ndyzvoP84pAjenSx6O034OwQTkpoMI/
-    f/2rK8kdzYSL4f//kFMuRLqmAwOmAFYB2oMo0/YaIoQ4vgTHDKTSxj5mbno56GdT
-    huMAVMKskaCSVbyMB/xyQG7senLItVv+HafVk6ChMUbkIjv9zgECggEBAP+ux1RG
-    cETGjK2U3CRoHGxR7FwaX6hkSokG+aFdVLer+WUrZmR8Ccvh2ALpm8K1G6TTk/5X
-    ZeVX4+1VFYDeTHMN8g20usS5mw3v2GF3fGxGLe4q56l4/4kKMZOrSBuWH4niiIKD
-    0QogdzWkpQJ93nMbZxZ5lk+lRZVf3qSm6nzyP468ndrfI57Ov5OUIWZ7KhTUH9IK
-    8/urUk+lEvyzQmNTlt5ZZXRz7cR01K8chx1zevVAyynzSuGjTysaBN7LTT0v3yVu
-    96yKNsxJvuIz2+4qSjhbnN4jH+feN0VsdF3+Qkru0lBmLVgJl4X67XFaAKMDU9yv
-    3alS53Pkol+Dy1cCggEBAPYodofHC1ydoOmCvUAq4oJNtyI4iIOY/ch3sxVhkNyi
-    KBscQqbay/DiXFiNl+NsemzB1PrHzvCaqKcBKw537XzeKqUgYuVLkFGubf9bDhXi
-    wSRcYbU/oNTgiTgXPW8wH60uIoLaiNi1/YjO2zh4GEY/kFqSuD54Y91iFmcC75bv
-    OjCNugnRdpRjOFhaeNx75tdverR37w3APVZuBSv3bJlMPCtaf+fEAKxJxeqCs3Oq
-    rtsw2TQ4TqfE8/w9qPCVv3bQbMbO48SwjxAz47qH2h3qGu3Ov8badeARe+Ou7nuI
-    U13gPuPOhPXIQP/MYOyamPJdFyng1b8vyNsfjOcWMiECggEAEkMgl6NkV3U7DRbp
-    1mvdQ9tiH33+wR9Qt5LY966b43aUHKbJ7Hlzla1u6V5YMsMO02oNUwhZDdWGQShn
-    ncnC+iDP3iy/flenfIpaETQgnfcxRqan31H2Joqk2eBNCTNi001r5K6XmrqQ6TL2
-    WkQ1RFF7vn42vz+VxcKQO4B0lTIUWhSczcpMWAZ6ZocZD6HScqRoFW+U16/39Bpd
-    TdFb944742vNNFEndXXGzy8hc3gRGz1ihX+MJKuuduyn1mX9AVbPAHR5mkhQ+6x0
-    xuFfXxaEMJxSiwdFOyGDHyFM+n2zrHh8ayOxL22X9gjjNspv6zTMo6GoGnUCdSOq
-    eVoHhwKCAQEAot5O3rOB/vuEljwcv7IgQJrvCsNg/8FgWR1p7kGpuXHJG3btWrz1
-    pyH+e9DjqGQD9KWjJ3LAp02NPUJ2nJIZHj9Y8/yjspb2nDTPLt+uSCjKJibBt0ys
-    O219HRGzYjfzHYCi8PVrCggQAk7rmUdMuF4iQutE4ICDgtz9eZbls3YBiFKdvxVK
-    Yg/sHflucmPAbtah13prPyvs6ZzN6zNANYXNYdn1OwHieBwvyWRFG8jY/MorTHPd
-    BwA3drPNbbGHBzQMZNZKub8gSVYr3SU52gUlYCclmIq+50xqLlF2FWIz1q8irVPd
-    gUnIR/eQQbxgaivRwbGze1ZAjUsozVVQQQKCAQEA9uAKU3O06bEUGj+L0G+7R7r/
-    bi2DNi2kLJ7jyq+n0OqcHEQ1zFK4LAPaXY0yMYXieUzhivMGLSNDiubGO2/KxkFF
-    REXUFgYWZYMwrKsUuscybB64cQDwzD0oXrhvEa2PHecdG6AZ63iLcHaaDzyCPID/
-    wtljekLO2jbJ5esXZd016lykFfUd/K4KP1DGyI2Dkq6q0gTc/Y36gDAcPhIWtzna
-    UujYCe3a8DWCElH4geKXaB5ABbV1eJ8Lch599lXJ9Hszem6QNosFsPaHDCcqLS9H
-    yy2WA6CY2LVU7kONN+O0kxs2fVbxIkI+d/LZyX/yIGlkXcAzL07llIlrTAYebQ==
-    -----END RSA PRIVATE KEY-----
-  '';
-  "letsencrypt.org".cert = builtins.toFile "letsencrypt.org.cert" ''
-    -----BEGIN CERTIFICATE-----
-    MIIEpzCCAo8CAgKaMA0GCSqGSIb3DQEBCwUAMBYxFDASBgNVBAMMC1NuYWtlb2ls
-    IENBMCAXDTE5MTAxODA3NTQxNVoYDzIxMTkwOTI0MDc1NDE1WjAaMRgwFgYDVQQD
-    DA9sZXRzZW5jcnlwdC5vcmcwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoIC
-    AQD12l08TLMPf8EF2zu0D/nWz5aYsXJa4WurjIx3ZH6+LmC8rueUs1UqFiCJOYrZ
-    Sb5QSka5wmpkdxNJkDUHPhL/w0P8Aj0joSbC8GKdjn8+5vEQAuT1iIMD4jMfxkYU
-    zzBeDt8xtskCzK/5IcC2vaOYKyCCum4Pl+DOzan4XarT6hWh4XAwA5R0B/aM200s
-    SPZDoyJHler7AU5xG1eU2VdYhfc2ua5s7/E6kM2mzKPJgLkt35bToxLltMdJ2/no
-    CRSDY+6AwKGPXlM510QkgrzM9cVOUxjJ71IrCOE1Rmi997jFcJpOPaebOty4wovP
-    5LewpB5NaOJFxjfDjNOUBuG0ZcONeOEw5usehYzx7dxMvg0ky+201K1d+JzFt5Ns
-    iJi77t20XSr6JZpmp7Fzo90BoikVnRdLD6LmIt1BmYHDNjs0uUelC6l+HrN7SoNR
-    VkAwO5ujgkpRMHMfpBO6EfH65Sbstzxh00Cufuj+8xkNDKbUSJrErFm4IZqoW+rx
-    kricFWwd9xduEX9cPYcyJ3e3siZtERW72qdz4Sf5pxT9IlpoveuMxboctUyvr69C
-    sLNiS8vMpimhSb0PUpyfp3w5/GXaIJpj0KQyvn/onWnXBxN1w3shVD8MUF5MNq7J
-    ZvJmQ7+U+hmhqRbJc7H1L22AKXOEWY2BGRm/3A35CC40NwIDAQABMA0GCSqGSIb3
-    DQEBCwUAA4ICAQBbJwE+qc0j6JGHWe0TGjv1viJU3WuyJkMRi+ejx0p/k7Ntp5An
-    2wLC7b/lVP/Nh+PKY/iXWn/BErv2MUo4POc1g8svgxsmMMh5KGGieIfGs7xT+JMH
-    dzZZM+pUpIB5fEO5JfjiOEOKDdAvRSs0mTAVYZEokGkXSNWyylvEaA16mHtMgPjo
-    Lm75d0O66RfJDdd/hTl8umGpF7kEGW1qYk2QmuPr7AqOa8na7olL5fMPh6Q7yRqx
-    GIS9JKQ0fWl8Ngk09WfwUN/kEMcp9Jl5iunNRkbpUJIM/lHFkSA7yOFFL+dVWzd4
-    2r+ddJXTFzW8Rwt65l8SV2MEhijEamKva3mqKLIRWxDsfFVT1T04LWFtnzMW4Z29
-    UHF9Pi7XSyKz0Y/Lz31mNTkjJYbOvbnwok8lc3wFWHc+lummZk8IkCq8xfqzwmwX
-    Ow6EV+Q6VaQpOHumQZ12pBBLtL8DyDhWaRUgVy2vYpwYsMYa5BFMcKCynjlSewo9
-    G2hNoW45cQZP1qHltRR9Xad7SaP7iTETDCiR7AWOqSpDipSh9eMfVW97ZbSfz+vl
-    xl8PZEZMTRIIRVXsPP+E8gtDUhUQp2+Vcz8r6q71qslXM09xl/501uaNjCc3hH2R
-    iw2N77Lho1F3FrBbHdML3RYHZI55eC9iQw6R4S+R4b+iWLJoHzHrW61itg==
-    -----END CERTIFICATE-----
-  '';
-}
diff --git a/nixos/tests/common/resolver.nix b/nixos/tests/common/resolver.nix
index 6be8d1d18e6..09a74de20fa 100644
--- a/nixos/tests/common/resolver.nix
+++ b/nixos/tests/common/resolver.nix
@@ -18,7 +18,7 @@
       defining this option needs to be explicitly imported.
 
       The reason this option exists is for the
-      <filename>nixos/tests/common/letsencrypt</filename> module, which
+      <filename>nixos/tests/common/acme/server</filename> module, which
       needs that option to disable the resolver once the user has set its own
       resolver.
     '';
diff --git a/nixos/tests/containers-custom-pkgs.nix b/nixos/tests/containers-custom-pkgs.nix
new file mode 100644
index 00000000000..397a4a905e6
--- /dev/null
+++ b/nixos/tests/containers-custom-pkgs.nix
@@ -0,0 +1,42 @@
+# Test for NixOS' container support.
+
+import ./make-test-python.nix ({ pkgs, lib, ...} : let
+
+  customPkgs = pkgs // {
+    hello = pkgs.hello.overrideAttrs(old: {
+      name = "custom-hello";
+    });
+  };
+
+in {
+  name = "containers-hosts";
+  meta = with lib.maintainers; {
+    maintainers = [ adisbladis ];
+  };
+
+  machine =
+    { ... }:
+    {
+      virtualisation.memorySize = 256;
+      virtualisation.vlans = [];
+
+      containers.simple = {
+        autoStart = true;
+        pkgs = customPkgs;
+        config = {pkgs, config, ... }: {
+          environment.systemPackages = [
+            pkgs.hello
+          ];
+        };
+      };
+
+    };
+
+  testScript = ''
+    start_all()
+    machine.wait_for_unit("default.target")
+    machine.succeed(
+        "test $(nixos-container run simple -- readlink -f /run/current-system/sw/bin/hello) = ${customPkgs.hello}/bin/hello"
+    )
+  '';
+})
diff --git a/nixos/tests/deluge.nix b/nixos/tests/deluge.nix
index 37689c3d913..3cf179a3821 100644
--- a/nixos/tests/deluge.nix
+++ b/nixos/tests/deluge.nix
@@ -5,9 +5,10 @@ import ./make-test-python.nix ({ pkgs, ...} : {
   };
 
   nodes = {
-    simple = {
+    simple1 = {
       services.deluge = {
         enable = true;
+        package = pkgs.deluge-1_x;
         web = {
           enable = true;
           openFirewall = true;
@@ -15,50 +16,92 @@ import ./make-test-python.nix ({ pkgs, ...} : {
       };
     };
 
-    declarative =
-      { ... }:
+    declarative1 = {
+      services.deluge = {
+        enable = true;
+        package = pkgs.deluge-1_x;
+        openFirewall = true;
+        declarative = true;
+        config = {
+          allow_remote = true;
+          download_location = "/var/lib/deluge/my-download";
+          daemon_port = 58846;
+          listen_ports = [ 6881 6889 ];
+        };
+        web = {
+          enable = true;
+          port =  3142;
+        };
+        authFile = pkgs.writeText "deluge-auth" ''
+          localclient:a7bef72a890:10
+          andrew:password:10
+          user3:anotherpass:5
+        '';
+      };
+    };
 
-      {
-        services.deluge = {
+    simple2 = {
+      services.deluge = {
+        enable = true;
+        package = pkgs.deluge-2_x;
+        web = {
           enable = true;
           openFirewall = true;
-          declarative = true;
-          config = {
-            allow_remote = true;
-            download_location = "/var/lib/deluge/my-download";
-            daemon_port = 58846;
-            listen_ports = [ 6881 6889 ];
-          };
-          web = {
-            enable = true;
-            port =  3142;
-          };
-          authFile = pkgs.writeText "deluge-auth" ''
-            localclient:a7bef72a890:10
-            andrew:password:10
-            user3:anotherpass:5
-          '';
         };
-        environment.systemPackages = [ pkgs.deluge ];
       };
+    };
+
+    declarative2 = {
+      services.deluge = {
+        enable = true;
+        package = pkgs.deluge-2_x;
+        openFirewall = true;
+        declarative = true;
+        config = {
+          allow_remote = true;
+          download_location = "/var/lib/deluge/my-download";
+          daemon_port = 58846;
+          listen_ports = [ 6881 6889 ];
+        };
+        web = {
+          enable = true;
+          port =  3142;
+        };
+        authFile = pkgs.writeText "deluge-auth" ''
+          localclient:a7bef72a890:10
+          andrew:password:10
+          user3:anotherpass:5
+        '';
+      };
+    };
 
   };
 
   testScript = ''
     start_all()
 
-    simple.wait_for_unit("deluged")
-    simple.wait_for_unit("delugeweb")
-    simple.wait_for_open_port("8112")
-    declarative.wait_for_unit("network.target")
-    declarative.wait_until_succeeds("curl --fail http://simple:8112")
+    simple1.wait_for_unit("deluged")
+    simple2.wait_for_unit("deluged")
+    simple1.wait_for_unit("delugeweb")
+    simple2.wait_for_unit("delugeweb")
+    simple1.wait_for_open_port("8112")
+    simple2.wait_for_open_port("8112")
+    declarative1.wait_for_unit("network.target")
+    declarative2.wait_for_unit("network.target")
+    declarative1.wait_until_succeeds("curl --fail http://simple1:8112")
+    declarative2.wait_until_succeeds("curl --fail http://simple2:8112")
 
-    declarative.wait_for_unit("deluged")
-    declarative.wait_for_unit("delugeweb")
-    declarative.wait_until_succeeds("curl --fail http://declarative:3142")
-    declarative.succeed("deluge-console 'help' | grep -q 'rm - Remove a torrent'")
-    declarative.succeed(
-        "deluge-console 'connect 127.0.0.1:58846 andrew password; help' | grep -q 'rm - Remove a torrent'"
+    declarative1.wait_for_unit("deluged")
+    declarative2.wait_for_unit("deluged")
+    declarative1.wait_for_unit("delugeweb")
+    declarative2.wait_for_unit("delugeweb")
+    declarative1.wait_until_succeeds("curl --fail http://declarative1:3142")
+    declarative2.wait_until_succeeds("curl --fail http://declarative2:3142")
+    declarative1.succeed(
+        "deluge-console 'connect 127.0.0.1:58846 andrew password; help' | grep -q 'rm.*Remove a torrent'"
+    )
+    declarative2.succeed(
+        "deluge-console 'connect 127.0.0.1:58846 andrew password; help' | grep -q 'rm.*Remove a torrent'"
     )
   '';
 })
diff --git a/nixos/tests/doas.nix b/nixos/tests/doas.nix
new file mode 100644
index 00000000000..9c0a4bdc756
--- /dev/null
+++ b/nixos/tests/doas.nix
@@ -0,0 +1,83 @@
+# Some tests to ensure doas is working properly.
+import ./make-test-python.nix (
+  { lib, ... }: {
+    name = "doas";
+    meta = with lib.maintainers; {
+      maintainers = [ cole-h ];
+    };
+
+    machine =
+      { ... }:
+        {
+          users.groups = { foobar = {}; barfoo = {}; baz = { gid = 1337; }; };
+          users.users = {
+            test0 = { isNormalUser = true; extraGroups = [ "wheel" ]; };
+            test1 = { isNormalUser = true; };
+            test2 = { isNormalUser = true; extraGroups = [ "foobar" ]; };
+            test3 = { isNormalUser = true; extraGroups = [ "barfoo" ]; };
+            test4 = { isNormalUser = true; extraGroups = [ "baz" ]; };
+            test5 = { isNormalUser = true; };
+            test6 = { isNormalUser = true; };
+            test7 = { isNormalUser = true; };
+          };
+
+          security.doas = {
+            enable = true;
+            wheelNeedsPassword = false;
+
+            extraRules = [
+              { users = [ "test1" ]; groups = [ "foobar" ]; }
+              { users = [ "test2" ]; noPass = true; setEnv = [ "CORRECT" "HORSE=BATTERY" ]; }
+              { groups = [ "barfoo" 1337 ]; noPass = true; }
+              { users = [ "test5" ]; noPass = true; keepEnv = true; runAs = "test1"; }
+              { users = [ "test6" ]; noPass = true; keepEnv = true; setEnv = [ "-STAPLE" ]; }
+              { users = [ "test7" ]; noPass = true; setEnv = [ "-SSH_AUTH_SOCK" ]; }
+            ];
+          };
+        };
+
+    testScript = ''
+      with subtest("users in wheel group should have passwordless doas"):
+          machine.succeed('su - test0 -c "doas -u root true"')
+
+      with subtest("test1 user should not be able to use doas without password"):
+          machine.fail('su - test1 -c "doas -n -u root true"')
+
+      with subtest("test2 user should be able to keep some env"):
+          if "CORRECT=1" not in machine.succeed('su - test2 -c "CORRECT=1 doas env"'):
+              raise Exception("failed to keep CORRECT")
+
+          if "HORSE=BATTERY" not in machine.succeed('su - test2 -c "doas env"'):
+              raise Exception("failed to setenv HORSE=BATTERY")
+
+      with subtest("users in group 'barfoo' shouldn't require password"):
+          machine.succeed("doas -u test3 doas -n -u root true")
+
+      with subtest("users in group 'baz' (GID 1337) shouldn't require password"):
+          machine.succeed("doas -u test4 doas -n -u root echo true")
+
+      with subtest("test5 user should be able to run commands under test1"):
+          machine.succeed("doas -u test5 doas -n -u test1 true")
+
+      with subtest("test5 user should not be able to run commands under root"):
+          machine.fail("doas -u test5 doas -n -u root true")
+
+      with subtest("test6 user should be able to keepenv"):
+          envs = ["BATTERY=HORSE", "CORRECT=false"]
+          out = machine.succeed(
+              'su - test6 -c "BATTERY=HORSE CORRECT=false STAPLE=Tr0ub4dor doas env"'
+          )
+
+          if not all(env in out for env in envs):
+              raise Exception("failed to keep BATTERY or CORRECT")
+          if "STAPLE=Tr0ub4dor" in out:
+              raise Exception("failed to exclude STAPLE")
+
+      with subtest("test7 should not have access to SSH_AUTH_SOCK"):
+          if "SSH_AUTH_SOCK=HOLEY" in machine.succeed(
+              'su - test7 -c "SSH_AUTH_SOCK=HOLEY doas env"'
+          ):
+              raise Exception("failed to exclude SSH_AUTH_SOCK")
+    '';
+  }
+)
diff --git a/nixos/tests/docker-containers.nix b/nixos/tests/docker-containers.nix
deleted file mode 100644
index 0e318a52d9f..00000000000
--- a/nixos/tests/docker-containers.nix
+++ /dev/null
@@ -1,27 +0,0 @@
-# Test Docker containers as systemd units
-
-import ./make-test-python.nix ({ pkgs, lib, ... }: {
-  name = "docker-containers";
-  meta = {
-    maintainers = with lib.maintainers; [ benley mkaito ];
-  };
-
-  nodes = {
-    docker = { pkgs, ... }: {
-      virtualisation.docker.enable = true;
-
-      docker-containers.nginx = {
-        image = "nginx-container";
-        imageFile = pkgs.dockerTools.examples.nginx;
-        ports = ["8181:80"];
-      };
-    };
-  };
-
-  testScript = ''
-    start_all()
-    docker.wait_for_unit("docker-nginx.service")
-    docker.wait_for_open_port(8181)
-    docker.wait_until_succeeds("curl http://localhost:8181 | grep Hello")
-  '';
-})
diff --git a/nixos/tests/dokuwiki.nix b/nixos/tests/dokuwiki.nix
index 38bde10f47e..05271919eff 100644
--- a/nixos/tests/dokuwiki.nix
+++ b/nixos/tests/dokuwiki.nix
@@ -1,29 +1,74 @@
-import ./make-test-python.nix ({ lib, ... }:
+import ./make-test-python.nix ({ pkgs, ... }:
 
-with lib;
+let
+  template-bootstrap3 = pkgs.stdenv.mkDerivation {
+    name = "bootstrap3";
+    # Download the theme from the dokuwiki site
+    src = pkgs.fetchurl {
+      url = "https://github.com/giterlizzi/dokuwiki-template-bootstrap3/archive/v2019-05-22.zip";
+      sha256 = "4de5ff31d54dd61bbccaf092c9e74c1af3a4c53e07aa59f60457a8f00cfb23a6";
+    };
+    # We need unzip to build this package
+    buildInputs = [ pkgs.unzip ];
+    # Installing simply means copying all files to the output directory
+    installPhase = "mkdir -p $out; cp -R * $out/";
+  };
+
+
+  # Let's package the icalevents plugin
+  plugin-icalevents = pkgs.stdenv.mkDerivation {
+    name = "icalevents";
+    # Download the plugin from the dokuwiki site
+    src = pkgs.fetchurl {
+      url = "https://github.com/real-or-random/dokuwiki-plugin-icalevents/releases/download/2017-06-16/dokuwiki-plugin-icalevents-2017-06-16.zip";
+      sha256 = "e40ed7dd6bbe7fe3363bbbecb4de481d5e42385b5a0f62f6a6ce6bf3a1f9dfa8";
+    };
+    # We need unzip to build this package
+    buildInputs = [ pkgs.unzip ];
+    sourceRoot = ".";
+    # Installing simply means copying all files to the output directory
+    installPhase = "mkdir -p $out; cp -R * $out/";
+  };
 
-{
+in {
   name = "dokuwiki";
-  meta.maintainers = with maintainers; [ maintainers."1000101" ];
-
-  nodes.machine =
-    { pkgs, ... }:
-    { services.dokuwiki = {
-        enable = true;
-        acl = " ";
-        superUser = null;
-        nginx = {
-          forceSSL = false;
-          enableACME = false;
-        };
-      }; 
+  meta.maintainers = with pkgs.lib.maintainers; [ "1000101" ];
+
+  machine = { ... }: {
+    services.dokuwiki."site1.local" = {
+      aclUse = false;
+      superUser = "admin";
+      nginx = {
+        forceSSL = false;
+        enableACME = false;
+      };
+    };
+    services.dokuwiki."site2.local" = {
+      aclUse = true;
+      superUser = "admin";
+      nginx = {
+        forceSSL = false;
+        enableACME = false;
+      };
+      templates = [ template-bootstrap3 ];
+      plugins = [ plugin-icalevents ];
     };
+    networking.hosts."127.0.0.1" = [ "site1.local" "site2.local" ];
+  };
 
   testScript = ''
-    machine.start()
-    machine.wait_for_unit("phpfpm-dokuwiki.service")
+    site_names = ["site1.local", "site2.local"]
+
+    start_all()
+
+    machine.wait_for_unit("phpfpm-dokuwiki-site1.local.service")
+    machine.wait_for_unit("phpfpm-dokuwiki-site2.local.service")
+
     machine.wait_for_unit("nginx.service")
+
     machine.wait_for_open_port(80)
-    machine.succeed("curl -sSfL http://localhost/ | grep 'DokuWiki'")
+
+    machine.succeed("curl -sSfL http://site1.local/ | grep 'DokuWiki'")
+    machine.succeed("curl -sSfL http://site2.local/ | grep 'DokuWiki'")
   '';
 })
diff --git a/nixos/tests/ec2.nix b/nixos/tests/ec2.nix
index 6aeeb17ba31..5a59d65e602 100644
--- a/nixos/tests/ec2.nix
+++ b/nixos/tests/ec2.nix
@@ -108,7 +108,7 @@ in {
     inherit image;
     sshPublicKey = snakeOilPublicKey;
 
-    # ### http://nixos.org/channels/nixos-unstable nixos
+    # ### https://nixos.org/channels/nixos-unstable nixos
     userData = ''
       { pkgs, ... }:
 
diff --git a/nixos/tests/elk.nix b/nixos/tests/elk.nix
index d3dc6dde135..7e87197ed9f 100644
--- a/nixos/tests/elk.nix
+++ b/nixos/tests/elk.nix
@@ -101,6 +101,7 @@ let
                       prefixed indices. Ignore the error if the filter does not result in an
                       actionable list of indices (ignore_empty_list) and exit cleanly.
                     options:
+                      allow_ilm_indices: true
                       ignore_empty_list: True
                       disable_action: False
                     filters:
diff --git a/nixos/tests/enlightenment.nix b/nixos/tests/enlightenment.nix
new file mode 100644
index 00000000000..5fa8d765dd1
--- /dev/null
+++ b/nixos/tests/enlightenment.nix
@@ -0,0 +1,101 @@
+import ./make-test-python.nix ({ pkgs, ...} :
+{
+  name = "enlightenment";
+
+  meta = with pkgs.stdenv.lib.maintainers; {
+    maintainers = [ romildo ];
+  };
+
+  machine = { ... }:
+  {
+    imports = [ ./common/user-account.nix ];
+    services.xserver.enable = true;
+    services.xserver.desktopManager.enlightenment.enable = true;
+    services.xserver.displayManager.lightdm = {
+      enable = true;
+      autoLogin = {
+        enable = true;
+        user = "alice";
+      };
+    };
+    hardware.pulseaudio.enable = true; # needed for the factl test, /dev/snd/* exists without them but udev doesn't care then
+    virtualisation.memorySize = 1024;
+    environment.systemPackages = [ pkgs.xdotool ];
+    services.acpid.enable = true;
+    services.connman.enable = true;
+    services.connman.package = pkgs.connmanMinimal;
+  };
+
+  enableOCR = true;
+
+  testScript = { nodes, ... }: let
+    user = nodes.machine.config.users.users.alice;
+  in ''
+    with subtest("Ensure x starts"):
+        machine.wait_for_x()
+        machine.wait_for_file("${user.home}/.Xauthority")
+        machine.succeed("xauth merge ${user.home}/.Xauthority")
+
+    with subtest("Check that logging in has given the user ownership of devices"):
+        machine.succeed("getfacl -p /dev/snd/timer | grep -q ${user.name}")
+
+    with subtest("First time wizard"):
+        machine.wait_for_text("Default")  # Language
+        machine.succeed("xdotool mousemove 512 185 click 1")  # Default Language
+        machine.screenshot("wizard1")
+        machine.succeed("xdotool mousemove 512 740 click 1")  # Next
+
+        machine.wait_for_text("English")  # Keyboard (default)
+        machine.screenshot("wizard2")
+        machine.succeed("xdotool mousemove 512 740 click 1")  # Next
+
+        machine.wait_for_text("Standard")  # Profile (default)
+        machine.screenshot("wizard3")
+        machine.succeed("xdotool mousemove 512 740 click 1")  # Next
+
+        machine.wait_for_text("Title")  # Sizing (default)
+        machine.screenshot("wizard4")
+        machine.succeed("xdotool mousemove 512 740 click 1")  # Next
+
+        machine.wait_for_text("clicked")  # Windows Phocus
+        machine.succeed("xdotool mousemove 512 370 click 1")  # Click
+        machine.screenshot("wizard5")
+        machine.succeed("xdotool mousemove 512 740 click 1")  # Next
+
+        machine.wait_for_text("bindings")  # Mouse Modifiers (default)
+        machine.screenshot("wizard6")
+        machine.succeed("xdotool mousemove 512 740 click 1")  # Next
+
+        machine.wait_for_text("Connman")  # Network Management (default)
+        machine.screenshot("wizard7")
+        machine.succeed("xdotool mousemove 512 740 click 1")  # Next
+
+        machine.wait_for_text("BlusZ")  # Bluetooh Management (default)
+        machine.screenshot("wizard8")
+        machine.succeed("xdotool mousemove 512 740 click 1")  # Next
+
+        machine.wait_for_text("Compositing")  # Compositing (default)
+        machine.screenshot("wizard9")
+        machine.succeed("xdotool mousemove 512 740 click 1")  # Next
+
+        machine.wait_for_text("update")  # Updates
+        machine.succeed("xdotool mousemove 512 495 click 1")  # Disable
+        machine.screenshot("wizard10")
+        machine.succeed("xdotool mousemove 512 740 click 1")  # Next
+
+        machine.wait_for_text("taskbar")  # Taskbar
+        machine.succeed("xdotool mousemove 480 410 click 1")  # Enable
+        machine.screenshot("wizard11")
+        machine.succeed("xdotool mousemove 512 740 click 1")  # Next
+
+        machine.wait_for_text("Home")  # The desktop
+        machine.screenshot("wizard12")
+
+    with subtest("Run Terminology"):
+        machine.succeed("terminology &")
+        machine.sleep(5)
+        machine.send_chars("ls --color -alF\n")
+        machine.sleep(2)
+        machine.screenshot("terminology")
+  '';
+})
diff --git a/nixos/tests/flannel.nix b/nixos/tests/flannel.nix
index 9991c5eaa32..7615732c20c 100644
--- a/nixos/tests/flannel.nix
+++ b/nixos/tests/flannel.nix
@@ -1,20 +1,24 @@
-import ./make-test.nix ({ pkgs, ...} : {
+import ./make-test-python.nix ({ lib, ...} : {
   name = "flannel";
 
-  meta = with pkgs.stdenv.lib.maintainers; {
+  meta = with lib.maintainers; {
     maintainers = [ offline ];
   };
 
   nodes = let
-    flannelConfig = {
+    flannelConfig = { pkgs, ... } : {
       services.flannel = {
         enable = true;
+        backend = {
+          Type = "udp";
+          Port = 8285;
+        };
         network = "10.1.0.0/16";
         iface = "eth1";
         etcd.endpoints = ["http://etcd:2379"];
       };
 
-      networking.firewall.allowedUDPPorts = [ 8472 ];
+      networking.firewall.allowedUDPPorts = [ 8285 ];
     };
   in {
     etcd = { ... }: {
@@ -32,25 +36,22 @@ import ./make-test.nix ({ pkgs, ...} : {
       networking.firewall.allowedTCPPorts = [ 2379 ];
     };
 
-    node1 = { ... }: {
-      require = [flannelConfig];
-    };
-
-    node2 = { ... }: {
-      require = [flannelConfig];
-    };
+    node1 = flannelConfig;
+    node2 = flannelConfig;
   };
 
   testScript = ''
-    startAll;
+    start_all()
 
-    $node1->waitForUnit("flannel.service");
-    $node2->waitForUnit("flannel.service");
+    node1.wait_for_unit("flannel.service")
+    node2.wait_for_unit("flannel.service")
 
-    my $ip1 = $node1->succeed("ip -4 addr show flannel.1 | grep -oP '(?<=inet).*(?=/)'");
-    my $ip2 = $node2->succeed("ip -4 addr show flannel.1 | grep -oP '(?<=inet).*(?=/)'");
+    node1.wait_until_succeeds("ip l show dev flannel0")
+    ip1 = node1.succeed("ip -4 addr show flannel0 | grep -oP '(?<=inet).*(?=/)'")
+    node2.wait_until_succeeds("ip l show dev flannel0")
+    ip2 = node2.succeed("ip -4 addr show flannel0 | grep -oP '(?<=inet).*(?=/)'")
 
-    $node1->waitUntilSucceeds("ping -c 1 $ip2");
-    $node2->waitUntilSucceeds("ping -c 1 $ip1");
+    node1.wait_until_succeeds(f"ping -c 1 {ip2}")
+    node2.wait_until_succeeds(f"ping -c 1 {ip1}")
   '';
 })
diff --git a/nixos/tests/gitdaemon.nix b/nixos/tests/gitdaemon.nix
index b610caf06fb..c4a707943ef 100644
--- a/nixos/tests/gitdaemon.nix
+++ b/nixos/tests/gitdaemon.nix
@@ -55,6 +55,9 @@ in {
     with subtest("git daemon starts"):
         server.wait_for_unit("git-daemon.service")
 
+    server.wait_for_unit("network-online.target")
+    client.wait_for_unit("network-online.target")
+
     with subtest("client can clone project.git"):
         client.succeed(
             "git clone git://server/project.git /project",
diff --git a/nixos/tests/gnome3-xorg.nix b/nixos/tests/gnome3-xorg.nix
index f793bb922ad..b59badcd5de 100644
--- a/nixos/tests/gnome3-xorg.nix
+++ b/nixos/tests/gnome3-xorg.nix
@@ -1,7 +1,7 @@
-import ./make-test-python.nix ({ pkgs, ...} : {
+import ./make-test-python.nix ({ pkgs, lib, ...} : {
   name = "gnome3-xorg";
-  meta = with pkgs.stdenv.lib.maintainers; {
-    maintainers = pkgs.gnome3.maintainers;
+  meta = with lib; {
+    maintainers = teams.gnome.members;
   };
 
   machine = { nodes, ... }: let
diff --git a/nixos/tests/gnome3.nix b/nixos/tests/gnome3.nix
index 486c146d8dc..17e72c5f651 100644
--- a/nixos/tests/gnome3.nix
+++ b/nixos/tests/gnome3.nix
@@ -1,7 +1,7 @@
-import ./make-test-python.nix ({ pkgs, ...} : {
+import ./make-test-python.nix ({ pkgs, lib, ...} : {
   name = "gnome3";
-  meta = with pkgs.stdenv.lib.maintainers; {
-    maintainers = pkgs.gnome3.maintainers;
+  meta = with lib; {
+    maintainers = teams.gnome.members;
   };
 
   machine =
diff --git a/nixos/tests/google-oslogin/default.nix b/nixos/tests/google-oslogin/default.nix
index 1977e92e987..97783c81f39 100644
--- a/nixos/tests/google-oslogin/default.nix
+++ b/nixos/tests/google-oslogin/default.nix
@@ -22,6 +22,8 @@ in {
     client = { ... }: {};
   };
   testScript =  ''
+    MOCKUSER = "mockuser_nixos_org"
+    MOCKADMIN = "mockadmin_nixos_org"
     start_all()
 
     server.wait_for_unit("mock-google-metadata.service")
@@ -29,10 +31,10 @@ in {
 
     # mockserver should return a non-expired ssh key for both mockuser and mockadmin
     server.succeed(
-        '${pkgs.google-compute-engine-oslogin}/bin/google_authorized_keys mockuser | grep -q "${snakeOilPublicKey}"'
+        f'${pkgs.google-compute-engine-oslogin}/bin/google_authorized_keys {MOCKUSER} | grep -q "${snakeOilPublicKey}"'
     )
     server.succeed(
-        '${pkgs.google-compute-engine-oslogin}/bin/google_authorized_keys mockadmin | grep -q "${snakeOilPublicKey}"'
+        f'${pkgs.google-compute-engine-oslogin}/bin/google_authorized_keys {MOCKADMIN} | grep -q "${snakeOilPublicKey}"'
     )
 
     # install snakeoil ssh key on the client, and provision .ssh/config file
@@ -50,20 +52,22 @@ in {
     client.fail("ssh ghost@server 'true'")
 
     # we should be able to connect as mockuser
-    client.succeed("ssh mockuser@server 'true'")
+    client.succeed(f"ssh {MOCKUSER}@server 'true'")
     # but we shouldn't be able to sudo
     client.fail(
-        "ssh mockuser@server '/run/wrappers/bin/sudo /run/current-system/sw/bin/id' | grep -q 'root'"
+        f"ssh {MOCKUSER}@server '/run/wrappers/bin/sudo /run/current-system/sw/bin/id' | grep -q 'root'"
     )
 
     # we should also be able to log in as mockadmin
-    client.succeed("ssh mockadmin@server 'true'")
+    client.succeed(f"ssh {MOCKADMIN}@server 'true'")
     # pam_oslogin_admin.so should now have generated a sudoers file
-    server.succeed("find /run/google-sudoers.d | grep -q '/run/google-sudoers.d/mockadmin'")
+    server.succeed(
+        f"find /run/google-sudoers.d | grep -q '/run/google-sudoers.d/{MOCKADMIN}'"
+    )
 
     # and we should be able to sudo
     client.succeed(
-        "ssh mockadmin@server '/run/wrappers/bin/sudo /run/current-system/sw/bin/id' | grep -q 'root'"
+        f"ssh {MOCKADMIN}@server '/run/wrappers/bin/sudo /run/current-system/sw/bin/id' | grep -q 'root'"
     )
   '';
   })
diff --git a/nixos/tests/google-oslogin/server.py b/nixos/tests/google-oslogin/server.py
index bfc527cb97d..5ea9bbd2c96 100644
--- a/nixos/tests/google-oslogin/server.py
+++ b/nixos/tests/google-oslogin/server.py
@@ -7,24 +7,29 @@ import hashlib
 import base64
 
 from http.server import BaseHTTPRequestHandler, HTTPServer
+from urllib.parse import urlparse, parse_qs
 from typing import Dict
 
 SNAKEOIL_PUBLIC_KEY = os.environ['SNAKEOIL_PUBLIC_KEY']
+MOCKUSER="mockuser_nixos_org"
+MOCKADMIN="mockadmin_nixos_org"
 
 
-def w(msg):
+def w(msg: bytes):
     sys.stderr.write(f"{msg}\n")
     sys.stderr.flush()
 
 
-def gen_fingerprint(pubkey):
+def gen_fingerprint(pubkey: str):
     decoded_key = base64.b64decode(pubkey.encode("ascii").split()[1])
     return hashlib.sha256(decoded_key).hexdigest()
 
-def gen_email(username):
+
+def gen_email(username: str):
     """username seems to be a 21 characters long number string, so mimic that in a reproducible way"""
     return str(int(hashlib.sha256(username.encode()).hexdigest(), 16))[0:21]
 
+
 def gen_mockuser(username: str, uid: str, gid: str, home_directory: str, snakeoil_pubkey: str) -> Dict:
     snakeoil_pubkey_fingerprint = gen_fingerprint(snakeoil_pubkey)
     # seems to be a 21 characters long numberstring, so mimic that in a reproducible way
@@ -56,7 +61,8 @@ def gen_mockuser(username: str, uid: str, gid: str, home_directory: str, snakeoi
 
 
 class ReqHandler(BaseHTTPRequestHandler):
-    def _send_json_ok(self, data):
+
+    def _send_json_ok(self, data: dict):
         self.send_response(200)
         self.send_header('Content-type', 'application/json')
         self.end_headers()
@@ -64,29 +70,62 @@ class ReqHandler(BaseHTTPRequestHandler):
         w(out)
         self.wfile.write(out)
 
+    def _send_json_success(self, success=True):
+        self.send_response(200)
+        self.send_header('Content-type', 'application/json')
+        self.end_headers()
+        out = json.dumps({"success": success}).encode()
+        w(out)
+        self.wfile.write(out)
+
+    def _send_404(self):
+        self.send_response(404)
+        self.end_headers()
+
     def do_GET(self):
         p = str(self.path)
-        # mockuser and mockadmin are allowed to login, both use the same snakeoil public key
-        if p == '/computeMetadata/v1/oslogin/users?username=mockuser' \
-            or p == '/computeMetadata/v1/oslogin/users?uid=1009719690':
-            self._send_json_ok(gen_mockuser(username='mockuser', uid='1009719690', gid='1009719690',
-                                            home_directory='/home/mockuser', snakeoil_pubkey=SNAKEOIL_PUBLIC_KEY))
-        elif p == '/computeMetadata/v1/oslogin/users?username=mockadmin' \
-            or p == '/computeMetadata/v1/oslogin/users?uid=1009719691':
-            self._send_json_ok(gen_mockuser(username='mockadmin', uid='1009719691', gid='1009719691',
-                                            home_directory='/home/mockadmin', snakeoil_pubkey=SNAKEOIL_PUBLIC_KEY))
-
-        # mockuser is allowed to login
-        elif p == f"/computeMetadata/v1/oslogin/authorize?email={gen_email('mockuser')}&policy=login":
-            self._send_json_ok({'success': True})
-
-        # mockadmin may also become root
-        elif p == f"/computeMetadata/v1/oslogin/authorize?email={gen_email('mockadmin')}&policy=login" or p == f"/computeMetadata/v1/oslogin/authorize?email={gen_email('mockadmin')}&policy=adminLogin":
-            self._send_json_ok({'success': True})
+        pu = urlparse(p)
+        params = parse_qs(pu.query)
+
+        # users endpoint
+        if pu.path == "/computeMetadata/v1/oslogin/users":
+            # mockuser and mockadmin are allowed to login, both use the same snakeoil public key
+            if params.get('username') == [MOCKUSER] or params.get('uid') == ["1009719690"]:
+                username = MOCKUSER
+                uid = "1009719690"
+            elif params.get('username') == [MOCKADMIN] or params.get('uid') == ["1009719691"]:
+                username = MOCKADMIN
+                uid = "1009719691"
+            else:
+                self._send_404()
+                return
+
+            self._send_json_ok(gen_mockuser(username=username, uid=uid, gid=uid, home_directory=f"/home/{username}", snakeoil_pubkey=SNAKEOIL_PUBLIC_KEY))
+            return
+
+        # authorize endpoint
+        elif pu.path == "/computeMetadata/v1/oslogin/authorize":
+            # is user allowed to login?
+            if params.get("policy") == ["login"]:
+                # mockuser and mockadmin are allowed to login
+                if params.get('email') == [gen_email(MOCKUSER)] or params.get('email') == [gen_email(MOCKADMIN)]:
+                    self._send_json_success()
+                    return
+                self._send_json_success(False)
+                return
+            # is user allowed to become root?
+            elif params.get("policy") == ["adminLogin"]:
+                # only mockadmin is allowed to become admin
+                self._send_json_success((params['email'] == [gen_email(MOCKADMIN)]))
+                return
+            # send 404 for other policies
+            else:
+                self._send_404()
+                return
         else:
             sys.stderr.write(f"Unhandled path: {p}\n")
             sys.stderr.flush()
-            self.send_response(501)
+            self.send_response(404)
             self.end_headers()
             self.wfile.write(b'')
 
diff --git a/nixos/tests/hardened.nix b/nixos/tests/hardened.nix
index cbf76f9e558..5ed0dfcf9ab 100644
--- a/nixos/tests/hardened.nix
+++ b/nixos/tests/hardened.nix
@@ -1,4 +1,4 @@
-import ./make-test.nix ({ pkgs, ...} : {
+import ./make-test.nix ({ pkgs, latestKernel ? false, ... } : {
   name = "hardened";
   meta = with pkgs.stdenv.lib.maintainers; {
     maintainers = [ joachifm ];
@@ -10,6 +10,8 @@ import ./make-test.nix ({ pkgs, ...} : {
     { users.users.alice = { isNormalUser = true; extraGroups = [ "proc" ]; };
       users.users.sybil = { isNormalUser = true; group = "wheel"; };
       imports = [ ../modules/profiles/hardened.nix ];
+      boot.kernelPackages =
+        lib.mkIf latestKernel pkgs.linuxPackages_latest_hardened;
       environment.memoryAllocator.provider = "graphene-hardened";
       nix.useSandbox = false;
       virtualisation.emptyDiskImages = [ 4096 ];
@@ -23,7 +25,9 @@ import ./make-test.nix ({ pkgs, ...} : {
           options = [ "noauto" ];
         };
       };
-      boot.extraModulePackages = [ config.boot.kernelPackages.wireguard ];
+      boot.extraModulePackages =
+        optional (versionOlder config.boot.kernelPackages.kernel.version "5.6")
+          config.boot.kernelPackages.wireguard;
       boot.kernelModules = [ "wireguard" ];
     };
 
@@ -76,7 +80,8 @@ import ./make-test.nix ({ pkgs, ...} : {
 
       # Test userns
       subtest "userns", sub {
-          $machine->fail("unshare --user");
+          $machine->succeed("unshare --user true");
+          $machine->fail("su -l alice -c 'unshare --user true'");
       };
 
       # Test dmesg restriction
diff --git a/nixos/tests/hydra/db-migration.nix b/nixos/tests/hydra/db-migration.nix
index aa1c81c9e77..cf74acfd67a 100644
--- a/nixos/tests/hydra/db-migration.nix
+++ b/nixos/tests/hydra/db-migration.nix
@@ -1,8 +1,14 @@
-{ system ? builtins.currentSystem, ... }:
+{ system ? builtins.currentSystem
+, pkgs ? import ../../.. { inherit system; }
+, ...
+}:
 
 let inherit (import ./common.nix { inherit system; }) baseConfig; in
 
-{ mig = import ../make-test-python.nix ({ pkgs, lib, ... }: {
+with import ../../lib/testing-python.nix { inherit system pkgs; };
+with pkgs.lib;
+
+{ mig = makeTest {
     name = "hydra-db-migration";
     meta = with pkgs.stdenv.lib.maintainers; {
       maintainers = [ ma27 ];
@@ -82,5 +88,5 @@ let inherit (import ./common.nix { inherit system; }) baseConfig; in
 
       original.shutdown()
     '';
-  });
+  };
 }
diff --git a/nixos/tests/installed-tests/default.nix b/nixos/tests/installed-tests/default.nix
index a189ef63f22..b6bdfea2277 100644
--- a/nixos/tests/installed-tests/default.nix
+++ b/nixos/tests/installed-tests/default.nix
@@ -91,6 +91,7 @@ in
   ibus = callInstalledTest ./ibus.nix {};
   libgdata = callInstalledTest ./libgdata.nix {};
   glib-testing = callInstalledTest ./glib-testing.nix {};
+  libjcat = callInstalledTest ./libjcat.nix {};
   libxmlb = callInstalledTest ./libxmlb.nix {};
   malcontent = callInstalledTest ./malcontent.nix {};
   ostree = callInstalledTest ./ostree.nix {};
diff --git a/nixos/tests/installed-tests/libjcat.nix b/nixos/tests/installed-tests/libjcat.nix
new file mode 100644
index 00000000000..41493a73089
--- /dev/null
+++ b/nixos/tests/installed-tests/libjcat.nix
@@ -0,0 +1,5 @@
+{ pkgs, makeInstalledTest, ... }:
+
+makeInstalledTest {
+  tested = pkgs.libjcat;
+}
diff --git a/nixos/tests/installer.nix b/nixos/tests/installer.nix
index babde4126c4..eef9abebf9f 100644
--- a/nixos/tests/installer.nix
+++ b/nixos/tests/installer.nix
@@ -29,7 +29,7 @@ let
             boot.loader.grub.splashImage = null;
           ''}
 
-          boot.loader.grub.extraConfig = "serial; terminal_output.serial";
+          boot.loader.grub.extraConfig = "serial; terminal_output serial";
           ${if grubUseEfi then ''
             boot.loader.grub.device = "nodev";
             boot.loader.grub.efiSupport = true;
@@ -65,7 +65,7 @@ let
   # partitions and filesystems.
   testScriptFun = { bootLoader, createPartitions, grubVersion, grubDevice, grubUseEfi
                   , grubIdentifier, preBootCommands, extraConfig
-                  , testCloneConfig
+                  , testSpecialisationConfig
                   }:
     let iface = if grubVersion == 1 then "ide" else "virtio";
         isEfi = bootLoader == "systemd-boot" || (bootLoader == "grub" && grubUseEfi);
@@ -97,7 +97,7 @@ let
 
 
       def create_machine_named(name):
-          return create_machine({**default_flags, "name": "boot-after-install"})
+          return create_machine({**default_flags, "name": name})
 
 
       machine.start()
@@ -220,7 +220,7 @@ let
 
       # Tests for validating clone configuration entries in grub menu
     ''
-    + optionalString testCloneConfig ''
+    + optionalString testSpecialisationConfig ''
       # Reboot Machine
       machine = create_machine_named("clone-default-config")
       ${preBootCommands}
@@ -262,7 +262,7 @@ let
     , bootLoader ? "grub" # either "grub" or "systemd-boot"
     , grubVersion ? 2, grubDevice ? "/dev/vda", grubIdentifier ? "uuid", grubUseEfi ? false
     , enableOCR ? false, meta ? {}
-    , testCloneConfig ? false
+    , testSpecialisationConfig ? false
     }:
     makeTest {
       inherit enableOCR;
@@ -337,7 +337,7 @@ let
       testScript = testScriptFun {
         inherit bootLoader createPartitions preBootCommands
                 grubVersion grubDevice grubIdentifier grubUseEfi extraConfig
-                testCloneConfig;
+                testSpecialisationConfig;
       };
     };
 
@@ -411,11 +411,11 @@ let
     grubUseEfi = true;
   };
 
-  clone-test-extraconfig = {
+  specialisation-test-extraconfig = {
     extraConfig = ''
       environment.systemPackages = [ pkgs.grub2 ];
       boot.loader.grub.configurationName = "Home";
-      nesting.clone = [ {
+      specialisation.work.configuration = {
         boot.loader.grub.configurationName = lib.mkForce "Work";
 
         environment.etc = {
@@ -424,9 +424,9 @@ let
               gitproxy = none for work.com
               ";
         };
-      } ];
+      };
     '';
-    testCloneConfig = true;
+    testSpecialisationConfig = true;
   };
 
 
@@ -440,7 +440,7 @@ in {
   simple = makeInstallerTest "simple" simple-test-config;
 
   # Test cloned configurations with the simple grub configuration
-  simpleClone = makeInstallerTest "simpleClone" (simple-test-config // clone-test-extraconfig);
+  simpleSpecialised = makeInstallerTest "simpleSpecialised" (simple-test-config // specialisation-test-extraconfig);
 
   # Simple GPT/UEFI configuration using systemd-boot with 3 partitions: ESP, swap & root filesystem
   simpleUefiSystemdBoot = makeInstallerTest "simpleUefiSystemdBoot" {
@@ -467,7 +467,7 @@ in {
   simpleUefiGrub = makeInstallerTest "simpleUefiGrub" simple-uefi-grub-config;
 
   # Test cloned configurations with the uefi grub configuration
-  simpleUefiGrubClone = makeInstallerTest "simpleUefiGrubClone" (simple-uefi-grub-config // clone-test-extraconfig);
+  simpleUefiGrubSpecialisation = makeInstallerTest "simpleUefiGrubSpecialisation" (simple-uefi-grub-config // specialisation-test-extraconfig);
 
   # Same as the previous, but now with a separate /boot partition.
   separateBoot = makeInstallerTest "separateBoot" {
@@ -650,6 +650,32 @@ in {
     '';
   };
 
+  bcache = makeInstallerTest "bcache" {
+    createPartitions = ''
+      machine.succeed(
+          "flock /dev/vda parted --script /dev/vda --"
+          + " mklabel msdos"
+          + " mkpart primary ext2 1M 50MB"  # /boot
+          + " mkpart primary 50MB 512MB  "  # swap
+          + " mkpart primary 512MB 1024MB"  # Cache (typically SSD)
+          + " mkpart primary 1024MB -1s ",  # Backing device (typically HDD)
+          "modprobe bcache",
+          "udevadm settle",
+          "make-bcache -B /dev/vda4 -C /dev/vda3",
+          "echo /dev/vda3 > /sys/fs/bcache/register",
+          "echo /dev/vda4 > /sys/fs/bcache/register",
+          "udevadm settle",
+          "mkfs.ext3 -L nixos /dev/bcache0",
+          "mount LABEL=nixos /mnt",
+          "mkfs.ext3 -L boot /dev/vda1",
+          "mkdir /mnt/boot",
+          "mount LABEL=boot /mnt/boot",
+          "mkswap -f /dev/vda2 -L swap",
+          "swapon -L swap",
+      )
+    '';
+  };
+
   # Test a basic install using GRUB 1.
   grub1 = makeInstallerTest "grub1" {
     createPartitions = ''
diff --git a/nixos/tests/iodine.nix b/nixos/tests/iodine.nix
index 8bd9603a6d6..41fb2e7778d 100644
--- a/nixos/tests/iodine.nix
+++ b/nixos/tests/iodine.nix
@@ -1,6 +1,7 @@
 import ./make-test-python.nix (
   { pkgs, ... }: let
     domain = "whatever.example.com";
+    password = "false;foo;exit;withspecialcharacters";
   in
     {
       name = "iodine";
@@ -21,7 +22,7 @@ import ./make-test-python.nix (
               services.iodine.server = {
                 enable = true;
                 ip = "10.53.53.1/24";
-                passwordFile = "${builtins.toFile "password" "foo"}";
+                passwordFile = "${builtins.toFile "password" password}";
                 inherit domain;
               };
 
@@ -41,7 +42,7 @@ import ./make-test-python.nix (
               server = domain;
             };
             systemd.tmpfiles.rules = [
-              "f /root/pw 0666 root root - foo"
+              "f /root/pw 0666 root root - ${password}"
             ];
             environment.systemPackages = [
               pkgs.nagiosPluginsOfficial
diff --git a/nixos/tests/ipfs.nix b/nixos/tests/ipfs.nix
index 3cff7e99ff8..4d721aec0c7 100644
--- a/nixos/tests/ipfs.nix
+++ b/nixos/tests/ipfs.nix
@@ -1,55 +1,25 @@
-
-import ./make-test.nix ({ pkgs, ...} : {
+import ./make-test-python.nix ({ pkgs, ...} : {
   name = "ipfs";
   meta = with pkgs.stdenv.lib.maintainers; {
     maintainers = [ mguentner ];
   };
 
-  nodes = {
-    adder =
-      { ... }:
-      {
-        services.ipfs = {
-          enable = true;
-          defaultMode = "norouting";
-          gatewayAddress = "/ip4/127.0.0.1/tcp/2323";
-          apiAddress = "/ip4/127.0.0.1/tcp/2324";
-        };
-        networking.firewall.allowedTCPPorts = [ 4001 ];
-      };
-    getter =
-      { ... }:
-      {
-        services.ipfs = {
-          enable = true;
-          defaultMode = "norouting";
-          autoMount = true;
-        };
-        networking.firewall.allowedTCPPorts = [ 4001 ];
-      };
+  nodes.machine = { ... }: {
+    services.ipfs = {
+      enable = true;
+      apiAddress = "/ip4/127.0.0.1/tcp/2324";
+    };
   };
 
   testScript = ''
-    startAll;
-    $adder->waitForUnit("ipfs-norouting");
-    $getter->waitForUnit("ipfs-norouting");
-
-    # wait until api is available
-    $adder->waitUntilSucceeds("ipfs --api /ip4/127.0.0.1/tcp/2324 id");
-    my $addrId = $adder->succeed("ipfs --api /ip4/127.0.0.1/tcp/2324 id -f=\"<id>\"");
-    my $addrIp = (split /[ \/]+/, $adder->succeed("ip -o -4 addr show dev eth1"))[3];
-
-    $adder->mustSucceed("[ -n \"\$(ipfs --api /ip4/127.0.0.1/tcp/2324 config Addresses.Gateway | grep /ip4/127.0.0.1/tcp/2323)\" ]");
-
-    # wait until api is available
-    $getter->waitUntilSucceeds("ipfs --api /ip4/127.0.0.1/tcp/5001 id");
-    my $ipfsHash = $adder->mustSucceed("echo fnord | ipfs --api /ip4/127.0.0.1/tcp/2324 add | cut -d' ' -f2");
-    chomp($ipfsHash);
+    start_all()
+    machine.wait_for_unit("ipfs")
 
-    $adder->mustSucceed("[ -n \"\$(echo fnord | ipfs --api /ip4/127.0.0.1/tcp/2324 add | grep added)\" ]");
+    machine.wait_until_succeeds("ipfs --api /ip4/127.0.0.1/tcp/2324 id")
+    ipfs_hash = machine.succeed(
+        "echo fnord | ipfs --api /ip4/127.0.0.1/tcp/2324 add | awk '{ print $2 }'"
+    )
 
-    $getter->mustSucceed("ipfs --api /ip4/127.0.0.1/tcp/5001 swarm connect /ip4/$addrIp/tcp/4001/ipfs/$addrId");
-    $getter->mustSucceed("[ -n \"\$(ipfs --api /ip4/127.0.0.1/tcp/5001 cat /ipfs/$ipfsHash | grep fnord)\" ]");
-    $getter->mustSucceed("[ -n \"$(cat /ipfs/$ipfsHash | grep fnord)\" ]");
-    '';
+    machine.succeed(f"ipfs cat /ipfs/{ipfs_hash.strip()} | grep fnord")
+  '';
 })
diff --git a/nixos/tests/k3s.nix b/nixos/tests/k3s.nix
new file mode 100644
index 00000000000..5bda6f493f0
--- /dev/null
+++ b/nixos/tests/k3s.nix
@@ -0,0 +1,78 @@
+import ./make-test-python.nix ({ pkgs, ... }:
+
+let
+  # A suitable k3s pause image, also used for the test pod
+  pauseImage = pkgs.dockerTools.buildImage {
+    name = "test.local/pause";
+    tag = "local";
+    contents = with pkgs; [ tini coreutils busybox ];
+    config.Entrypoint = [ "/bin/tini" "--" "/bin/sleep" "inf" ];
+  };
+  testPodYaml = pkgs.writeText "test.yml" ''
+    # Don't use the default service account because there's a race where it may
+    # not be created yet; make our own instead.
+    apiVersion: v1
+    kind: ServiceAccount
+    metadata:
+      name: test
+    ---
+    apiVersion: v1
+    kind: Pod
+    metadata:
+      name: test
+    spec:
+      serviceAccountName: test
+      containers:
+      - name: test
+        image: test.local/pause:local
+        imagePullPolicy: Never
+        command: ["sh", "-c", "sleep inf"]
+  '';
+in
+{
+  name = "k3s";
+  meta = with pkgs.stdenv.lib.maintainers; {
+    maintainers = [ euank ];
+  };
+
+  nodes = {
+    k3s =
+      { pkgs, ... }: {
+        environment.systemPackages = [ pkgs.k3s pkgs.gzip ];
+
+        # k3s uses enough resources the default vm fails.
+        virtualisation.memorySize = pkgs.lib.mkDefault 1536;
+        virtualisation.diskSize = pkgs.lib.mkDefault 4096;
+
+        services.k3s.enable = true;
+        services.k3s.role = "server";
+        services.k3s.package = pkgs.k3s;
+        # Slightly reduce resource usage
+        services.k3s.extraFlags = "--no-deploy coredns,servicelb,traefik,local-storage,metrics-server --pause-image test.local/pause:local";
+
+        users.users = {
+          noprivs = {
+            isNormalUser = true;
+            description = "Can't access k3s by default";
+            password = "*";
+          };
+        };
+      };
+  };
+
+  testScript = ''
+    start_all()
+
+    k3s.wait_for_unit("k3s")
+    k3s.succeed("k3s kubectl cluster-info")
+    k3s.fail("sudo -u noprivs k3s kubectl cluster-info")
+    # k3s.succeed("k3s check-config") # fails with the current nixos kernel config, uncomment once this passes
+
+    k3s.succeed(
+        "zcat ${pauseImage} | k3s ctr image import -"
+    )
+
+    k3s.succeed("k3s kubectl apply -f ${testPodYaml}")
+    k3s.succeed("k3s kubectl wait --for 'condition=Ready' pod/test")
+  '';
+})
diff --git a/nixos/tests/ldap.nix b/nixos/tests/ldap.nix
deleted file mode 100644
index 74b002fc00e..00000000000
--- a/nixos/tests/ldap.nix
+++ /dev/null
@@ -1,405 +0,0 @@
-import ./make-test-python.nix ({ pkgs, lib, ...} :
-
-let
-  unlines = lib.concatStringsSep "\n";
-  unlinesAttrs = f: as: unlines (lib.mapAttrsToList f as);
-
-  dbDomain = "example.com";
-  dbSuffix = "dc=example,dc=com";
-  dbAdminDn = "cn=admin,${dbSuffix}";
-  dbAdminPwd = "admin-password";
-  # NOTE: slappasswd -h "{SSHA}" -s '${dbAdminPwd}'
-  dbAdminPwdHash = "{SSHA}i7FopSzkFQMrHzDMB1vrtkI0rBnwouP8";
-  ldapUser = "test-ldap-user";
-  ldapUserId = 10000;
-  ldapUserPwd = "user-password";
-  # NOTE: slappasswd -h "{SSHA}" -s '${ldapUserPwd}'
-  ldapUserPwdHash = "{SSHA}v12XICMZNGT6r2KJ26rIkN8Vvvp4QX6i";
-  ldapGroup = "test-ldap-group";
-  ldapGroupId = 10000;
-
-  mkClient = useDaemon:
-    { lib, ... }:
-    {
-      virtualisation.memorySize = 256;
-      virtualisation.vlans = [ 1 ];
-      security.pam.services.su.rootOK = lib.mkForce false;
-      users.ldap.enable = true;
-      users.ldap.daemon = {
-        enable = useDaemon;
-        rootpwmoddn = "cn=admin,${dbSuffix}";
-        rootpwmodpwFile = "/etc/nslcd.rootpwmodpw";
-      };
-      users.ldap.loginPam = true;
-      users.ldap.nsswitch = true;
-      users.ldap.server = "ldap://server";
-      users.ldap.base = "ou=posix,${dbSuffix}";
-      users.ldap.bind = {
-        distinguishedName = "cn=admin,${dbSuffix}";
-        passwordFile = "/etc/ldap/bind.password";
-      };
-      # NOTE: passwords stored in clear in Nix's store, but this is a test.
-      environment.etc."ldap/bind.password".source = pkgs.writeText "password" dbAdminPwd;
-      environment.etc."nslcd.rootpwmodpw".source = pkgs.writeText "rootpwmodpw" dbAdminPwd;
-    };
-in
-
-{
-  name = "ldap";
-  meta = with pkgs.stdenv.lib.maintainers; {
-    maintainers = [ montag451 ];
-  };
-
-  nodes = {
-
-    server =
-      { pkgs, config, ... }:
-      let
-        inherit (config.services) openldap;
-
-        slapdConfig = pkgs.writeText "cn=config.ldif" (''
-          dn: cn=config
-          objectClass: olcGlobal
-          #olcPidFile: /run/slapd/slapd.pid
-          # List of arguments that were passed to the server
-          #olcArgsFile: /run/slapd/slapd.args
-          # Read slapd-config(5) for possible values
-          olcLogLevel: none
-          # The tool-threads parameter sets the actual amount of CPU's
-          # that is used for indexing.
-          olcToolThreads: 1
-
-          dn: olcDatabase={-1}frontend,cn=config
-          objectClass: olcDatabaseConfig
-          objectClass: olcFrontendConfig
-          # The maximum number of entries that is returned for a search operation
-          olcSizeLimit: 500
-          # Allow unlimited access to local connection from the local root user
-          olcAccess: to *
-            by dn.exact=gidNumber=0+uidNumber=0,cn=peercred,cn=external,cn=auth manage
-            by * break
-          # Allow unauthenticated read access for schema and base DN autodiscovery
-          olcAccess: to dn.exact=""
-            by * read
-          olcAccess: to dn.base="cn=Subschema"
-            by * read
-
-          dn: olcDatabase=config,cn=config
-          objectClass: olcDatabaseConfig
-          olcRootDN: cn=admin,cn=config
-          #olcRootPW:
-          # NOTE: access to cn=config, system root can be manager
-          # with SASL mechanism (-Y EXTERNAL) over unix socket (-H ldapi://)
-          olcAccess: to *
-            by dn.exact="gidNumber=0+uidNumber=0,cn=peercred,cn=external,cn=auth" manage
-            by * break
-
-          dn: cn=schema,cn=config
-          objectClass: olcSchemaConfig
-
-          include: file://${pkgs.openldap}/etc/schema/core.ldif
-          include: file://${pkgs.openldap}/etc/schema/cosine.ldif
-          include: file://${pkgs.openldap}/etc/schema/nis.ldif
-          include: file://${pkgs.openldap}/etc/schema/inetorgperson.ldif
-
-          dn: cn=module{0},cn=config
-          objectClass: olcModuleList
-          # Where the dynamically loaded modules are stored
-          #olcModulePath: /usr/lib/ldap
-          olcModuleLoad: back_mdb
-
-          ''
-          + unlinesAttrs (olcSuffix: {conf, ...}:
-              "include: file://" + pkgs.writeText "config.ldif" conf
-            ) slapdDatabases
-          );
-
-        slapdDatabases = {
-          ${dbSuffix} = {
-            conf = ''
-              dn: olcBackend={1}mdb,cn=config
-              objectClass: olcBackendConfig
-
-              dn: olcDatabase={1}mdb,cn=config
-              olcSuffix: ${dbSuffix}
-              olcDbDirectory: ${openldap.dataDir}/${dbSuffix}
-              objectClass: olcDatabaseConfig
-              objectClass: olcMdbConfig
-              # NOTE: checkpoint the database periodically in case of system failure
-              # and to speed up slapd shutdown.
-              olcDbCheckpoint: 512 30
-              # Database max size is 1G
-              olcDbMaxSize: 1073741824
-              olcLastMod: TRUE
-              # NOTE: database superuser. Needed for syncrepl,
-              # and used to auth as admin through a TCP connection.
-              olcRootDN: cn=admin,${dbSuffix}
-              olcRootPW: ${dbAdminPwdHash}
-              #
-              olcDbIndex: objectClass eq
-              olcDbIndex: cn,uid eq
-              olcDbIndex: uidNumber,gidNumber eq
-              olcDbIndex: member,memberUid eq
-              #
-              olcAccess: to attrs=userPassword
-                by self write
-                by anonymous auth
-                by dn="cn=admin,${dbSuffix}" write
-                by dn="gidNumber=0+uidNumber=0,cn=peercred,cn=external,cn=auth" write
-                by * none
-              olcAccess: to attrs=shadowLastChange
-                by self write
-                by dn="cn=admin,${dbSuffix}" write
-                by dn="gidNumber=0+uidNumber=0,cn=peercred,cn=external,cn=auth" write
-                by * none
-              olcAccess: to dn.sub="ou=posix,${dbSuffix}"
-                by self read
-                by dn="cn=admin,${dbSuffix}" read
-                by dn="gidNumber=0+uidNumber=0,cn=peercred,cn=external,cn=auth" read
-              olcAccess: to *
-                by self read
-                by * none
-            '';
-            data = ''
-              dn: ${dbSuffix}
-              objectClass: top
-              objectClass: dcObject
-              objectClass: organization
-              o: ${dbDomain}
-
-              dn: cn=admin,${dbSuffix}
-              objectClass: simpleSecurityObject
-              objectClass: organizationalRole
-              description: ${dbDomain} LDAP administrator
-              roleOccupant: ${dbSuffix}
-              userPassword: ${ldapUserPwdHash}
-
-              dn: ou=posix,${dbSuffix}
-              objectClass: top
-              objectClass: organizationalUnit
-
-              dn: ou=accounts,ou=posix,${dbSuffix}
-              objectClass: top
-              objectClass: organizationalUnit
-
-              dn: ou=groups,ou=posix,${dbSuffix}
-              objectClass: top
-              objectClass: organizationalUnit
-            ''
-            + lib.concatMapStrings posixAccount [
-              { uid=ldapUser; uidNumber=ldapUserId; gidNumber=ldapGroupId; userPassword=ldapUserPwdHash; }
-            ]
-            + lib.concatMapStrings posixGroup [
-              { gid=ldapGroup; gidNumber=ldapGroupId; members=[]; }
-            ];
-          };
-        };
-
-        # NOTE: create a user account using the posixAccount objectClass.
-        posixAccount =
-          { uid
-          , uidNumber ? null
-          , gidNumber ? null
-          , cn ? ""
-          , sn ? ""
-          , userPassword ? ""
-          , loginShell ? "/bin/sh"
-          }: ''
-
-            dn: uid=${uid},ou=accounts,ou=posix,${dbSuffix}
-            objectClass: person
-            objectClass: posixAccount
-            objectClass: shadowAccount
-            cn: ${cn}
-            gecos:
-            ${if gidNumber == null then "#" else "gidNumber: ${toString gidNumber}"}
-            homeDirectory: /home/${uid}
-            loginShell: ${loginShell}
-            sn: ${sn}
-            ${if uidNumber == null then "#" else "uidNumber: ${toString uidNumber}"}
-            ${if userPassword == "" then "#" else "userPassword: ${userPassword}"}
-          '';
-
-        # NOTE: create a group using the posixGroup objectClass.
-        posixGroup =
-          { gid
-          , gidNumber
-          , members
-          }: ''
-
-            dn: cn=${gid},ou=groups,ou=posix,${dbSuffix}
-            objectClass: top
-            objectClass: posixGroup
-            gidNumber: ${toString gidNumber}
-            ${lib.concatMapStrings (member: "memberUid: ${member}\n") members}
-          '';
-      in
-      {
-        virtualisation.memorySize = 256;
-        virtualisation.vlans = [ 1 ];
-        networking.firewall.allowedTCPPorts = [ 389 ];
-        services.openldap.enable = true;
-        services.openldap.dataDir = "/var/db/openldap";
-        services.openldap.configDir = "/var/db/slapd";
-        services.openldap.urlList = [
-          "ldap:///"
-          "ldapi:///"
-        ];
-        systemd.services.openldap = {
-          preStart = ''
-              set -e
-              # NOTE: slapd's config is always re-initialized.
-              rm -rf "${openldap.configDir}"/cn=config \
-                     "${openldap.configDir}"/cn=config.ldif
-              install -D -d -m 0700 -o "${openldap.user}" -g "${openldap.group}" "${openldap.configDir}"
-              # NOTE: olcDbDirectory must be created before adding the config.
-              '' +
-              unlinesAttrs (olcSuffix: {data, ...}: ''
-                # NOTE: database is always re-initialized.
-                rm -rf "${openldap.dataDir}/${olcSuffix}"
-                install -D -d -m 0700 -o "${openldap.user}" -g "${openldap.group}" \
-                 "${openldap.dataDir}/${olcSuffix}"
-                '') slapdDatabases
-              + ''
-              # NOTE: slapd is supposed to be stopped while in preStart,
-              #       hence slap* commands can safely be used.
-              umask 0077
-              ${pkgs.openldap}/bin/slapadd -n 0 \
-               -F "${openldap.configDir}" \
-               -l ${slapdConfig}
-              chown -R "${openldap.user}:${openldap.group}" "${openldap.configDir}"
-              # NOTE: slapadd(8): To populate the config database slapd-config(5),
-              #                   use -n 0 as it is always the first database.
-              #                   It must physically exist on the filesystem prior to this, however.
-            '' +
-            unlinesAttrs (olcSuffix: {data, ...}: ''
-              # NOTE: load database ${olcSuffix}
-              # (as root to avoid depending on sudo or chpst)
-              ${pkgs.openldap}/bin/slapadd \
-               -F "${openldap.configDir}" \
-               -l ${pkgs.writeText "data.ldif" data}
-              '' + ''
-              # NOTE: redundant with default openldap's preStart, but do not harm.
-              chown -R "${openldap.user}:${openldap.group}" \
-               "${openldap.dataDir}/${olcSuffix}"
-            '') slapdDatabases;
-        };
-      };
-
-    client1 = mkClient true; # use nss_pam_ldapd
-    client2 = mkClient false; # use nss_ldap and pam_ldap
-  };
-
-  testScript = ''
-    def expect_script(*commands):
-        script = ";".join(commands)
-        return f"${pkgs.expect}/bin/expect -c '{script}'"
-
-
-    server.start()
-    server.wait_for_unit("default.target")
-
-    with subtest("slapd: auth as database admin with SASL and check a POSIX account"):
-        server.succeed(
-            'test "$(ldapsearch -LLL -H ldapi:// -Y EXTERNAL '
-            + "-b 'uid=${ldapUser},ou=accounts,ou=posix,${dbSuffix}' "
-            + "-s base uidNumber | "
-            + "sed -ne 's/^uidNumber: \\(.*\\)/\\1/p')\" -eq ${toString ldapUserId}"
-        )
-
-    with subtest("slapd: auth as database admin with password and check a POSIX account"):
-        server.succeed(
-            "test \"$(ldapsearch -LLL -H ldap://server -D 'cn=admin,${dbSuffix}' "
-            + "-w '${dbAdminPwd}' -b 'uid=${ldapUser},ou=accounts,ou=posix,${dbSuffix}' "
-            + "-s base uidNumber | "
-            + "sed -ne 's/^uidNumber: \\(.*\\)/\\1/p')\" -eq ${toString ldapUserId}"
-        )
-
-    client1.start()
-    client1.wait_for_unit("default.target")
-
-    with subtest("password: su with password to a POSIX account"):
-        client1.succeed(
-            expect_script(
-                'spawn su "${ldapUser}"',
-                'expect "Password:"',
-                'send "${ldapUserPwd}\n"',
-                'expect "*"',
-                'send "whoami\n"',
-                'expect -ex "${ldapUser}" {exit}',
-                "exit 1",
-            )
-        )
-
-    with subtest("password: change password of a POSIX account as root"):
-        client1.succeed(
-            "chpasswd <<<'${ldapUser}:new-password'",
-            expect_script(
-                'spawn su "${ldapUser}"',
-                'expect "Password:"',
-                'send "new-password\n"',
-                'expect "*"',
-                'send "whoami\n"',
-                'expect -ex "${ldapUser}" {exit}',
-                "exit 1",
-            ),
-            "chpasswd <<<'${ldapUser}:${ldapUserPwd}'",
-        )
-
-    with subtest("password: change password of a POSIX account from itself"):
-        client1.succeed(
-            "chpasswd <<<'${ldapUser}:${ldapUserPwd}' ",
-            expect_script(
-                "spawn su --login ${ldapUser} -c passwd",
-                'expect "Password: "',
-                'send "${ldapUserPwd}\n"',
-                'expect "(current) UNIX password: "',
-                'send "${ldapUserPwd}\n"',
-                'expect "New password: "',
-                'send "new-password\n"',
-                'expect "Retype new password: "',
-                'send "new-password\n"',
-                'expect "passwd: password updated successfully" {exit}',
-                "exit 1",
-            ),
-            expect_script(
-                'spawn su "${ldapUser}"',
-                'expect "Password:"',
-                'send "${ldapUserPwd}\n"',
-                'expect "su: Authentication failure" {exit}',
-                "exit 1",
-            ),
-            expect_script(
-                'spawn su "${ldapUser}"',
-                'expect "Password:"',
-                'send "new-password\n"',
-                'expect "*"',
-                'send "whoami\n"',
-                'expect -ex "${ldapUser}" {exit}',
-                "exit 1",
-            ),
-            "chpasswd <<<'${ldapUser}:${ldapUserPwd}'",
-        )
-
-    client2.start()
-    client2.wait_for_unit("default.target")
-
-    with subtest("NSS"):
-        client1.succeed(
-            "test \"$(id -u    '${ldapUser}')\" -eq ${toString ldapUserId}",
-            "test \"$(id -u -n '${ldapUser}')\" =  '${ldapUser}'",
-            "test \"$(id -g    '${ldapUser}')\" -eq ${toString ldapGroupId}",
-            "test \"$(id -g -n '${ldapUser}')\" =  '${ldapGroup}'",
-            "test \"$(id -u    '${ldapUser}')\" -eq ${toString ldapUserId}",
-            "test \"$(id -u -n '${ldapUser}')\" =  '${ldapUser}'",
-            "test \"$(id -g    '${ldapUser}')\" -eq ${toString ldapGroupId}",
-            "test \"$(id -g -n '${ldapUser}')\" =  '${ldapGroup}'",
-        )
-
-    with subtest("PAM"):
-        client1.succeed(
-            "echo ${ldapUserPwd} | su -l '${ldapUser}' -c true",
-            "echo ${ldapUserPwd} | su -l '${ldapUser}' -c true",
-        )
-  '';
-})
diff --git a/nixos/tests/mediawiki.nix b/nixos/tests/mediawiki.nix
index 9468c1de8cc..008682310cf 100644
--- a/nixos/tests/mediawiki.nix
+++ b/nixos/tests/mediawiki.nix
@@ -8,6 +8,13 @@ import ./make-test-python.nix ({ pkgs, lib, ... }: {
       services.mediawiki.virtualHost.hostName = "localhost";
       services.mediawiki.virtualHost.adminAddr = "root@example.com";
       services.mediawiki.passwordFile = pkgs.writeText "password" "correcthorsebatterystaple";
+      services.mediawiki.extensions = {
+        Matomo = pkgs.fetchzip {
+          url = "https://github.com/DaSchTour/matomo-mediawiki-extension/archive/v4.0.1.tar.gz";
+          sha256 = "0g5rd3zp0avwlmqagc59cg9bbkn3r7wx7p6yr80s644mj6dlvs1b";
+        };
+        ParserFunctions = null;
+      };
     };
 
   testScript = ''
diff --git a/nixos/tests/minio.nix b/nixos/tests/minio.nix
index 3b061974267..02d1f7aa6c2 100644
--- a/nixos/tests/minio.nix
+++ b/nixos/tests/minio.nix
@@ -44,7 +44,7 @@ in {
 
     # Create a test bucket on the server
     machine.succeed(
-        "mc config host add minio http://localhost:9000 ${accessKey} ${secretKey} S3v4"
+        "mc config host add minio http://localhost:9000 ${accessKey} ${secretKey} --api s3v4"
     )
     machine.succeed("mc mb minio/test-bucket")
     machine.succeed("${minioPythonScript}")
diff --git a/nixos/tests/mysql/mariadb-galera-mariabackup.nix b/nixos/tests/mysql/mariadb-galera-mariabackup.nix
new file mode 100644
index 00000000000..73abf6c555f
--- /dev/null
+++ b/nixos/tests/mysql/mariadb-galera-mariabackup.nix
@@ -0,0 +1,223 @@
+import ./../make-test-python.nix ({ pkgs, ...} :
+
+let
+  mysqlenv-common      = pkgs.buildEnv { name = "mysql-path-env-common";      pathsToLink = [ "/bin" ]; paths = with pkgs; [ bash gawk gnutar inetutils which ]; };
+  mysqlenv-mariabackup = pkgs.buildEnv { name = "mysql-path-env-mariabackup"; pathsToLink = [ "/bin" ]; paths = with pkgs; [ gzip iproute netcat procps pv socat ]; };
+
+in {
+  name = "mariadb-galera-mariabackup";
+  meta = with pkgs.stdenv.lib.maintainers; {
+    maintainers = [ izorkin ];
+  };
+
+  # The test creates a Galera cluster with 3 nodes and is checking if mariabackup-based SST works. The cluster is tested by creating a DB and an empty table on one node,
+  # and checking the table's presence on the other node.
+
+  nodes = {
+    galera_01 =
+      { pkgs, ... }:
+      {
+      networking = {
+        interfaces.eth1 = {
+          ipv4.addresses = [
+            { address = "192.168.1.1"; prefixLength = 24; }
+          ];
+        };
+        extraHosts = ''
+          192.168.1.1 galera_01
+          192.168.1.2 galera_02
+          192.168.1.3 galera_03
+        '';
+        firewall.allowedTCPPorts = [ 3306 4444 4567 4568 ];
+        firewall.allowedUDPPorts = [ 4567 ];
+      };
+      users.users.testuser = { };
+      systemd.services.mysql = with pkgs; {
+        path = [ mysqlenv-common mysqlenv-mariabackup ];
+      };
+      services.mysql = {
+        enable = true;
+        package = pkgs.mariadb;
+        ensureDatabases = [ "testdb" ];
+        ensureUsers = [{
+          name = "testuser";
+          ensurePermissions = {
+            "testdb.*" = "ALL PRIVILEGES";
+          };
+        }];
+        initialScript = pkgs.writeText "mariadb-init.sql" ''
+          GRANT ALL PRIVILEGES ON *.* TO 'check_repl'@'localhost' IDENTIFIED BY 'check_pass' WITH GRANT OPTION;
+          FLUSH PRIVILEGES;
+        '';
+        settings = {
+          mysqld = {
+            bind_address = "0.0.0.0";
+          };
+          galera = {
+            wsrep_on = "ON";
+            wsrep_debug = "OFF";
+            wsrep_retry_autocommit = "3";
+            wsrep_provider = "${pkgs.mariadb-galera_25}/lib/galera/libgalera_smm.so";
+            wsrep_cluster_address = "gcomm://";
+            wsrep_cluster_name = "galera";
+            wsrep_node_address = "192.168.1.1";
+            wsrep_node_name = "galera_01";
+            wsrep_sst_method = "mariabackup";
+            wsrep_sst_auth = "check_repl:check_pass";
+            binlog_format = "ROW";
+            enforce_storage_engine = "InnoDB";
+            innodb_autoinc_lock_mode = "2";
+          };
+        };
+      };
+    };
+
+    galera_02 =
+      { pkgs, ... }:
+      {
+      networking = {
+        interfaces.eth1 = {
+          ipv4.addresses = [
+            { address = "192.168.1.2"; prefixLength = 24; }
+          ];
+        };
+        extraHosts = ''
+          192.168.1.1 galera_01
+          192.168.1.2 galera_02
+          192.168.1.3 galera_03
+        '';
+        firewall.allowedTCPPorts = [ 3306 4444 4567 4568 ];
+        firewall.allowedUDPPorts = [ 4567 ];
+      };
+      users.users.testuser = { };
+      systemd.services.mysql = with pkgs; {
+        path = [ mysqlenv-common mysqlenv-mariabackup ];
+      };
+      services.mysql = {
+        enable = true;
+        package = pkgs.mariadb;
+        settings = {
+          mysqld = {
+            bind_address = "0.0.0.0";
+          };
+          galera = {
+            wsrep_on = "ON";
+            wsrep_debug = "OFF";
+            wsrep_retry_autocommit = "3";
+            wsrep_provider = "${pkgs.mariadb-galera_25}/lib/galera/libgalera_smm.so";
+            wsrep_cluster_address = "gcomm://galera_01,galera_02,galera_03";
+            wsrep_cluster_name = "galera";
+            wsrep_node_address = "192.168.1.2";
+            wsrep_node_name = "galera_02";
+            wsrep_sst_method = "mariabackup";
+            wsrep_sst_auth = "check_repl:check_pass";
+            binlog_format = "ROW";
+            enforce_storage_engine = "InnoDB";
+            innodb_autoinc_lock_mode = "2";
+          };
+        };
+      };
+    };
+
+    galera_03 =
+      { pkgs, ... }:
+      {
+      networking = {
+        interfaces.eth1 = {
+          ipv4.addresses = [
+            { address = "192.168.1.3"; prefixLength = 24; }
+          ];
+        };
+        extraHosts = ''
+          192.168.1.1 galera_01
+          192.168.1.2 galera_02
+          192.168.1.3 galera_03
+        '';
+        firewall.allowedTCPPorts = [ 3306 4444 4567 4568 ];
+        firewall.allowedUDPPorts = [ 4567 ];
+      };
+      users.users.testuser = { };
+      systemd.services.mysql = with pkgs; {
+        path = [ mysqlenv-common mysqlenv-mariabackup ];
+      };
+      services.mysql = {
+        enable = true;
+        package = pkgs.mariadb;
+        settings = {
+          mysqld = {
+            bind_address = "0.0.0.0";
+          };
+          galera = {
+            wsrep_on = "ON";
+            wsrep_debug = "OFF";
+            wsrep_retry_autocommit = "3";
+            wsrep_provider = "${pkgs.mariadb-galera_25}/lib/galera/libgalera_smm.so";
+            wsrep_cluster_address = "gcomm://galera_01,galera_02,galera_03";
+            wsrep_cluster_name = "galera";
+            wsrep_node_address = "192.168.1.3";
+            wsrep_node_name = "galera_03";
+            wsrep_sst_method = "mariabackup";
+            wsrep_sst_auth = "check_repl:check_pass";
+            binlog_format = "ROW";
+            enforce_storage_engine = "InnoDB";
+            innodb_autoinc_lock_mode = "2";
+          };
+        };
+      };
+    };
+  };
+
+  testScript = ''
+    galera_01.start()
+    galera_01.wait_for_unit("mysql")
+    galera_01.wait_for_open_port(3306)
+    galera_01.succeed(
+        "sudo -u testuser mysql -u testuser -e 'use testdb; create table db1 (test_id INT, PRIMARY KEY (test_id)) ENGINE = InnoDB;'"
+    )
+    galera_01.succeed(
+        "sudo -u testuser mysql -u testuser -e 'use testdb; insert into db1 values (37);'"
+    )
+    galera_02.start()
+    galera_02.wait_for_unit("mysql")
+    galera_02.wait_for_open_port(3306)
+    galera_03.start()
+    galera_03.wait_for_unit("mysql")
+    galera_03.wait_for_open_port(3306)
+    galera_02.succeed(
+        "sudo -u testuser mysql -u root -e 'use testdb; select test_id from db1;' -N | grep 37"
+    )
+    galera_02.succeed(
+        "sudo -u testuser mysql -u root -e 'use testdb; create table db2 (test_id INT, PRIMARY KEY (test_id)) ENGINE = InnoDB;'"
+    )
+    galera_02.succeed("systemctl stop mysql")
+    galera_01.succeed(
+        "sudo -u testuser mysql -u testuser -e 'use testdb; insert into db2 values (38);'"
+    )
+    galera_03.succeed(
+        "sudo -u testuser mysql -u root -e 'use testdb; create table db3 (test_id INT, PRIMARY KEY (test_id)) ENGINE = InnoDB;'"
+    )
+    galera_01.succeed(
+        "sudo -u testuser mysql -u testuser -e 'use testdb; insert into db3 values (39);'"
+    )
+    galera_02.succeed("systemctl start mysql")
+    galera_02.wait_for_open_port(3306)
+    galera_02.succeed(
+        "sudo -u testuser mysql -u root -e 'show status' -N | grep 'wsrep_cluster_size.*3'"
+    )
+    galera_03.succeed(
+        "sudo -u testuser mysql -u root -e 'show status' -N | grep 'wsrep_local_state_comment.*Synced'"
+    )
+    galera_01.succeed(
+        "sudo -u testuser mysql -u root -e 'use testdb; select test_id from db3;' -N | grep 39"
+    )
+    galera_02.succeed(
+        "sudo -u testuser mysql -u root -e 'use testdb; select test_id from db2;' -N | grep 38"
+    )
+    galera_03.succeed(
+        "sudo -u testuser mysql -u root -e 'use testdb; select test_id from db1;' -N | grep 37"
+    )
+    galera_01.succeed("sudo -u testuser mysql -u testuser -e 'use testdb; drop table db3;'")
+    galera_02.succeed("sudo -u testuser mysql -u root -e 'use testdb; drop table db2;'")
+    galera_03.succeed("sudo -u testuser mysql -u root -e 'use testdb; drop table db1;'")
+  '';
+})
diff --git a/nixos/tests/mysql/mariadb-galera-rsync.nix b/nixos/tests/mysql/mariadb-galera-rsync.nix
new file mode 100644
index 00000000000..cacae4569b5
--- /dev/null
+++ b/nixos/tests/mysql/mariadb-galera-rsync.nix
@@ -0,0 +1,216 @@
+import ./../make-test-python.nix ({ pkgs, ...} :
+
+let
+  mysqlenv-common      = pkgs.buildEnv { name = "mysql-path-env-common";      pathsToLink = [ "/bin" ]; paths = with pkgs; [ bash gawk gnutar inetutils which ]; };
+  mysqlenv-rsync       = pkgs.buildEnv { name = "mysql-path-env-rsync";       pathsToLink = [ "/bin" ]; paths = with pkgs; [ lsof procps rsync stunnel ]; };
+
+in {
+  name = "mariadb-galera-rsync";
+  meta = with pkgs.stdenv.lib.maintainers; {
+    maintainers = [ izorkin ];
+  };
+
+  # The test creates a Galera cluster with 3 nodes and is checking if rsync-based SST works. The cluster is tested by creating a DB and an empty table on one node,
+  # and checking the table's presence on the other node.
+
+  nodes = {
+    galera_04 =
+      { pkgs, ... }:
+      {
+      networking = {
+        interfaces.eth1 = {
+          ipv4.addresses = [
+            { address = "192.168.2.1"; prefixLength = 24; }
+          ];
+        };
+        extraHosts = ''
+          192.168.2.1 galera_04
+          192.168.2.2 galera_05
+          192.168.2.3 galera_06
+        '';
+        firewall.allowedTCPPorts = [ 3306 4444 4567 4568 ];
+        firewall.allowedUDPPorts = [ 4567 ];
+      };
+      users.users.testuser = { };
+      systemd.services.mysql = with pkgs; {
+        path = [ mysqlenv-common mysqlenv-rsync ];
+      };
+      services.mysql = {
+        enable = true;
+        package = pkgs.mariadb;
+        ensureDatabases = [ "testdb" ];
+        ensureUsers = [{
+          name = "testuser";
+          ensurePermissions = {
+            "testdb.*" = "ALL PRIVILEGES";
+          };
+        }];
+        settings = {
+          mysqld = {
+            bind_address = "0.0.0.0";
+          };
+          galera = {
+            wsrep_on = "ON";
+            wsrep_debug = "OFF";
+            wsrep_retry_autocommit = "3";
+            wsrep_provider = "${pkgs.mariadb-galera_25}/lib/galera/libgalera_smm.so";
+            wsrep_cluster_address = "gcomm://";
+            wsrep_cluster_name = "galera-rsync";
+            wsrep_node_address = "192.168.2.1";
+            wsrep_node_name = "galera_04";
+            wsrep_sst_method = "rsync";
+            binlog_format = "ROW";
+            enforce_storage_engine = "InnoDB";
+            innodb_autoinc_lock_mode = "2";
+          };
+        };
+      };
+    };
+
+    galera_05 =
+      { pkgs, ... }:
+      {
+      networking = {
+        interfaces.eth1 = {
+          ipv4.addresses = [
+            { address = "192.168.2.2"; prefixLength = 24; }
+          ];
+        };
+        extraHosts = ''
+          192.168.2.1 galera_04
+          192.168.2.2 galera_05
+          192.168.2.3 galera_06
+        '';
+        firewall.allowedTCPPorts = [ 3306 4444 4567 4568 ];
+        firewall.allowedUDPPorts = [ 4567 ];
+      };
+      users.users.testuser = { };
+      systemd.services.mysql = with pkgs; {
+        path = [ mysqlenv-common mysqlenv-rsync ];
+      };
+      services.mysql = {
+        enable = true;
+        package = pkgs.mariadb;
+        settings = {
+          mysqld = {
+            bind_address = "0.0.0.0";
+          };
+          galera = {
+            wsrep_on = "ON";
+            wsrep_debug = "OFF";
+            wsrep_retry_autocommit = "3";
+            wsrep_provider = "${pkgs.mariadb-galera_25}/lib/galera/libgalera_smm.so";
+            wsrep_cluster_address = "gcomm://galera_04,galera_05,galera_06";
+            wsrep_cluster_name = "galera-rsync";
+            wsrep_node_address = "192.168.2.2";
+            wsrep_node_name = "galera_05";
+            wsrep_sst_method = "rsync";
+            binlog_format = "ROW";
+            enforce_storage_engine = "InnoDB";
+            innodb_autoinc_lock_mode = "2";
+          };
+        };
+      };
+    };
+
+    galera_06 =
+      { pkgs, ... }:
+      {
+      networking = {
+        interfaces.eth1 = {
+          ipv4.addresses = [
+            { address = "192.168.2.3"; prefixLength = 24; }
+          ];
+        };
+        extraHosts = ''
+          192.168.2.1 galera_04
+          192.168.2.2 galera_05
+          192.168.2.3 galera_06
+        '';
+        firewall.allowedTCPPorts = [ 3306 4444 4567 4568 ];
+        firewall.allowedUDPPorts = [ 4567 ];
+      };
+      users.users.testuser = { };
+      systemd.services.mysql = with pkgs; {
+        path = [ mysqlenv-common mysqlenv-rsync ];
+      };
+      services.mysql = {
+        enable = true;
+        package = pkgs.mariadb;
+        settings = {
+          mysqld = {
+            bind_address = "0.0.0.0";
+          };
+          galera = {
+            wsrep_on = "ON";
+            wsrep_debug = "OFF";
+            wsrep_retry_autocommit = "3";
+            wsrep_provider = "${pkgs.mariadb-galera_25}/lib/galera/libgalera_smm.so";
+            wsrep_cluster_address = "gcomm://galera_04,galera_05,galera_06";
+            wsrep_cluster_name = "galera-rsync";
+            wsrep_node_address = "192.168.2.3";
+            wsrep_node_name = "galera_06";
+            wsrep_sst_method = "rsync";
+            binlog_format = "ROW";
+            enforce_storage_engine = "InnoDB";
+            innodb_autoinc_lock_mode = "2";
+          };
+        };
+      };
+    };
+  };
+
+  testScript = ''
+    galera_04.start()
+    galera_04.wait_for_unit("mysql")
+    galera_04.wait_for_open_port(3306)
+    galera_04.succeed(
+        "sudo -u testuser mysql -u testuser -e 'use testdb; create table db1 (test_id INT, PRIMARY KEY (test_id)) ENGINE = InnoDB;'"
+    )
+    galera_04.succeed(
+        "sudo -u testuser mysql -u testuser -e 'use testdb; insert into db1 values (41);'"
+    )
+    galera_05.start()
+    galera_05.wait_for_unit("mysql")
+    galera_05.wait_for_open_port(3306)
+    galera_06.start()
+    galera_06.wait_for_unit("mysql")
+    galera_06.wait_for_open_port(3306)
+    galera_05.succeed(
+        "sudo -u testuser mysql -u root -e 'use testdb; select test_id from db1;' -N | grep 41"
+    )
+    galera_05.succeed(
+        "sudo -u testuser mysql -u root -e 'use testdb; create table db2 (test_id INT, PRIMARY KEY (test_id)) ENGINE = InnoDB;'"
+    )
+    galera_05.succeed("systemctl stop mysql")
+    galera_04.succeed(
+        "sudo -u testuser mysql -u testuser -e 'use testdb; insert into db2 values (42);'"
+    )
+    galera_06.succeed(
+        "sudo -u testuser mysql -u root -e 'use testdb; create table db3 (test_id INT, PRIMARY KEY (test_id)) ENGINE = InnoDB;'"
+    )
+    galera_04.succeed(
+        "sudo -u testuser mysql -u testuser -e 'use testdb; insert into db3 values (43);'"
+    )
+    galera_05.succeed("systemctl start mysql")
+    galera_05.wait_for_open_port(3306)
+    galera_05.succeed(
+        "sudo -u testuser mysql -u root -e 'show status' -N | grep 'wsrep_cluster_size.*3'"
+    )
+    galera_06.succeed(
+        "sudo -u testuser mysql -u root -e 'show status' -N | grep 'wsrep_local_state_comment.*Synced'"
+    )
+    galera_04.succeed(
+        "sudo -u testuser mysql -u root -e 'use testdb; select test_id from db3;' -N | grep 43"
+    )
+    galera_05.succeed(
+        "sudo -u testuser mysql -u root -e 'use testdb; select test_id from db2;' -N | grep 42"
+    )
+    galera_06.succeed(
+        "sudo -u testuser mysql -u root -e 'use testdb; select test_id from db1;' -N | grep 41"
+    )
+    galera_04.succeed("sudo -u testuser mysql -u testuser -e 'use testdb; drop table db3;'")
+    galera_05.succeed("sudo -u testuser mysql -u root -e 'use testdb; drop table db2;'")
+    galera_06.succeed("sudo -u testuser mysql -u root -e 'use testdb; drop table db1;'")
+  '';
+})
diff --git a/nixos/tests/automysqlbackup.nix b/nixos/tests/mysql/mysql-autobackup.nix
index 224b93862fb..65576e52a53 100644
--- a/nixos/tests/automysqlbackup.nix
+++ b/nixos/tests/mysql/mysql-autobackup.nix
@@ -1,4 +1,4 @@
-import ./make-test-python.nix ({ pkgs, lib, ... }:
+import ./../make-test-python.nix ({ pkgs, lib, ... }:
 
 {
   name = "automysqlbackup";
diff --git a/nixos/tests/mysql-backup.nix b/nixos/tests/mysql/mysql-backup.nix
index a0595e4d553..c4c1079a8a6 100644
--- a/nixos/tests/mysql-backup.nix
+++ b/nixos/tests/mysql/mysql-backup.nix
@@ -1,5 +1,5 @@
 # Test whether mysqlBackup option works
-import ./make-test-python.nix ({ pkgs, ... } : {
+import ./../make-test-python.nix ({ pkgs, ... } : {
   name = "mysql-backup";
   meta = with pkgs.stdenv.lib.maintainers; {
     maintainers = [ rvl ];
diff --git a/nixos/tests/mysql-replication.nix b/nixos/tests/mysql/mysql-replication.nix
index a2654f041ad..81038dccd94 100644
--- a/nixos/tests/mysql-replication.nix
+++ b/nixos/tests/mysql/mysql-replication.nix
@@ -1,4 +1,4 @@
-import ./make-test-python.nix ({ pkgs, ...} :
+import ./../make-test-python.nix ({ pkgs, ...} :
 
 let
   replicateUser = "replicate";
diff --git a/nixos/tests/mysql.nix b/nixos/tests/mysql/mysql.nix
index 11c1dabf936..d236ce94632 100644
--- a/nixos/tests/mysql.nix
+++ b/nixos/tests/mysql/mysql.nix
@@ -1,4 +1,4 @@
-import ./make-test-python.nix ({ pkgs, ...} : {
+import ./../make-test-python.nix ({ pkgs, ...} : {
   name = "mysql";
   meta = with pkgs.stdenv.lib.maintainers; {
     maintainers = [ eelco shlevy ];
diff --git a/nixos/tests/testdb.sql b/nixos/tests/mysql/testdb.sql
index 3c68c49ae82..3c68c49ae82 100644
--- a/nixos/tests/testdb.sql
+++ b/nixos/tests/mysql/testdb.sql
diff --git a/nixos/tests/nesting.nix b/nixos/tests/nesting.nix
deleted file mode 100644
index a75806b24ff..00000000000
--- a/nixos/tests/nesting.nix
+++ /dev/null
@@ -1,44 +0,0 @@
-import ./make-test-python.nix {
-  name = "nesting";
-  nodes =  {
-    clone = { pkgs, ... }: {
-      environment.systemPackages = [ pkgs.cowsay ];
-      nesting.clone = [
-        ({ pkgs, ... }: {
-          environment.systemPackages = [ pkgs.hello ];
-        })
-      ];
-    };
-    children = { pkgs, ... }: {
-      environment.systemPackages = [ pkgs.cowsay ];
-      nesting.children = [
-        ({ pkgs, ... }: {
-          environment.systemPackages = [ pkgs.hello ];
-        })
-      ];
-    };
-  };
-  testScript = ''
-    clone.wait_for_unit("default.target")
-    clone.succeed("cowsay hey")
-    clone.fail("hello")
-
-    with subtest("Nested clones do inherit from parent"):
-        clone.succeed(
-            "/run/current-system/fine-tune/child-1/bin/switch-to-configuration test"
-        )
-        clone.succeed("cowsay hey")
-        clone.succeed("hello")
-
-    children.wait_for_unit("default.target")
-    children.succeed("cowsay hey")
-    children.fail("hello")
-
-    with subtest("Nested children do not inherit from parent"):
-        children.succeed(
-            "/run/current-system/fine-tune/child-1/bin/switch-to-configuration test"
-        )
-        children.fail("cowsay hey")
-        children.succeed("hello")
-  '';
-}
diff --git a/nixos/tests/networking.nix b/nixos/tests/networking.nix
index 0a6507d2dc8..3d8ab761a44 100644
--- a/nixos/tests/networking.nix
+++ b/nixos/tests/networking.nix
@@ -200,6 +200,7 @@ let
           useDHCP = false;
           interfaces.eth1 = {
             ipv4.addresses = mkOverride 0 [ ];
+            mtu = 1343;
             useDHCP = true;
           };
           interfaces.eth2.ipv4.addresses = mkOverride 0 [ ];
@@ -216,6 +217,9 @@ let
           with subtest("Wait until we have an ip address on each interface"):
               client.wait_until_succeeds("ip addr show dev eth1 | grep -q '192.168.1'")
 
+          with subtest("ensure MTU is set"):
+              assert "mtu 1343" in client.succeed("ip link show dev eth1")
+
           with subtest("Test vlan 1"):
               client.wait_until_succeeds("ping -c 1 192.168.1.1")
               client.wait_until_succeeds("ping -c 1 192.168.1.2")
@@ -455,11 +459,14 @@ let
           ipv4.addresses = [ { address = "192.168.1.1"; prefixLength = 24; } ];
           ipv6.addresses = [ { address = "2001:1470:fffd:2096::"; prefixLength = 64; } ];
           virtual = true;
+          mtu = 1342;
+          macAddress = "02:de:ad:be:ef:01";
         };
         networking.interfaces.tun0 = {
           ipv4.addresses = [ { address = "192.168.1.2"; prefixLength = 24; } ];
           ipv6.addresses = [ { address = "2001:1470:fffd:2097::"; prefixLength = 64; } ];
           virtual = true;
+          mtu = 1343;
         };
       };
 
@@ -471,7 +478,7 @@ let
 
         with subtest("Wait for networking to come up"):
             machine.start()
-            machine.wait_for_unit("network-online.target")
+            machine.wait_for_unit("network.target")
 
         with subtest("Test interfaces set up"):
             list = machine.succeed("ip tuntap list | sort").strip()
@@ -486,7 +493,12 @@ let
             """.format(
                 list, targetList
             )
-
+        with subtest("Test MTU and MAC Address are configured"):
+            assert "mtu 1342" in machine.succeed("ip link show dev tap0")
+            assert "mtu 1343" in machine.succeed("ip link show dev tun0")
+            assert "02:de:ad:be:ef:01" in machine.succeed("ip link show dev tap0")
+      '' # network-addresses-* only exist in scripted networking
+      + optionalString (!networkd) ''
         with subtest("Test interfaces clean up"):
             machine.succeed("systemctl stop network-addresses-tap0")
             machine.sleep(10)
@@ -602,17 +614,17 @@ let
       };
 
       testScript = ''
-        targetIPv4Table = """
-        10.0.0.0/16 proto static scope link mtu 1500 
-        192.168.1.0/24 proto kernel scope link src 192.168.1.2 
-        192.168.2.0/24 via 192.168.1.1 proto static 
-        """.strip()
-
-        targetIPv6Table = """
-        2001:1470:fffd:2097::/64 proto kernel metric 256 pref medium
-        2001:1470:fffd:2098::/64 via fdfd:b3f0::1 proto static metric 1024 pref medium
-        fdfd:b3f0::/48 proto static metric 1024 pref medium
-        """.strip()
+        targetIPv4Table = [
+            "10.0.0.0/16 proto static scope link mtu 1500",
+            "192.168.1.0/24 proto kernel scope link src 192.168.1.2",
+            "192.168.2.0/24 via 192.168.1.1 proto static",
+        ]
+
+        targetIPv6Table = [
+            "2001:1470:fffd:2097::/64 proto kernel metric 256 pref medium",
+            "2001:1470:fffd:2098::/64 via fdfd:b3f0::1 proto static metric 1024 pref medium",
+            "fdfd:b3f0::/48 proto static metric 1024 pref medium",
+        ]
 
         machine.start()
         machine.wait_for_unit("network.target")
@@ -620,9 +632,9 @@ let
         with subtest("test routing tables"):
             ipv4Table = machine.succeed("ip -4 route list dev eth0 | head -n3").strip()
             ipv6Table = machine.succeed("ip -6 route list dev eth0 | head -n3").strip()
-            assert (
-                ipv4Table == targetIPv4Table
-            ), """
+            assert [
+                l.strip() for l in ipv4Table.splitlines()
+            ] == targetIPv4Table, """
               The IPv4 routing table does not match the expected one:
                 Result:
                   {}
@@ -631,9 +643,9 @@ let
               """.format(
                 ipv4Table, targetIPv4Table
             )
-            assert (
-                ipv6Table == targetIPv6Table
-            ), """
+            assert [
+                l.strip() for l in ipv6Table.splitlines()
+            ] == targetIPv6Table, """
               The IPv6 routing table does not match the expected one:
                 Result:
                   {}
diff --git a/nixos/tests/nginx-etag.nix b/nixos/tests/nginx-etag.nix
index e357309d166..63ab2e0c6c2 100644
--- a/nixos/tests/nginx-etag.nix
+++ b/nixos/tests/nginx-etag.nix
@@ -19,7 +19,7 @@ import ./make-test-python.nix {
         '';
       };
 
-      nesting.clone = lib.singleton {
+      specialisation.pass-checks.configuration = {
         services.nginx.virtualHosts.server = {
           root = lib.mkForce (pkgs.runCommandLocal "testdir2" {} ''
             mkdir "$out"
@@ -70,7 +70,7 @@ import ./make-test-python.nix {
 
   testScript = { nodes, ... }: let
     inherit (nodes.server.config.system.build) toplevel;
-    newSystem = "${toplevel}/fine-tune/child-1";
+    newSystem = "${toplevel}/specialisation/pass-checks";
   in ''
     start_all()
 
diff --git a/nixos/tests/nginx.nix b/nixos/tests/nginx.nix
index 7358800a676..18822f09568 100644
--- a/nixos/tests/nginx.nix
+++ b/nixos/tests/nginx.nix
@@ -42,38 +42,35 @@ import ./make-test-python.nix ({ pkgs, ... }: {
 
       services.nginx.enableReload = true;
 
-      nesting.clone = [
-        {
-          services.nginx.virtualHosts.localhost = {
-            root = lib.mkForce (pkgs.runCommand "testdir2" {} ''
-              mkdir "$out"
-              echo content changed > "$out/index.html"
-            '');
-          };
-        }
-
-        {
-          services.nginx.virtualHosts."1.my.test".listen = [ { addr = "127.0.0.1"; port = 8080; }];
-        }
-
-        {
-          services.nginx.package = pkgs.nginxUnstable;
-        }
-
-        {
-          services.nginx.package = pkgs.nginxUnstable;
-          services.nginx.virtualHosts."!@$$(#*%".locations."~@#*$*!)".proxyPass = ";;;";
-        }
-      ];
-    };
+      specialisation.etagSystem.configuration = {
+        services.nginx.virtualHosts.localhost = {
+          root = lib.mkForce (pkgs.runCommand "testdir2" {} ''
+            mkdir "$out"
+            echo content changed > "$out/index.html"
+          '');
+        };
+      };
+
+      specialisation.justReloadSystem.configuration = {
+        services.nginx.virtualHosts."1.my.test".listen = [ { addr = "127.0.0.1"; port = 8080; }];
+      };
 
+      specialisation.reloadRestartSystem.configuration = {
+        services.nginx.package = pkgs.nginxUnstable;
+      };
+
+      specialisation.reloadWithErrorsSystem.configuration = {
+        services.nginx.package = pkgs.nginxUnstable;
+        services.nginx.virtualHosts."!@$$(#*%".locations."~@#*$*!)".proxyPass = ";;;";
+      };
+    };
   };
 
   testScript = { nodes, ... }: let
-    etagSystem = "${nodes.webserver.config.system.build.toplevel}/fine-tune/child-1";
-    justReloadSystem = "${nodes.webserver.config.system.build.toplevel}/fine-tune/child-2";
-    reloadRestartSystem = "${nodes.webserver.config.system.build.toplevel}/fine-tune/child-3";
-    reloadWithErrorsSystem = "${nodes.webserver.config.system.build.toplevel}/fine-tune/child-4";
+    etagSystem = "${nodes.webserver.config.system.build.toplevel}/specialisation/etagSystem";
+    justReloadSystem = "${nodes.webserver.config.system.build.toplevel}/specialisation/justReloadSystem";
+    reloadRestartSystem = "${nodes.webserver.config.system.build.toplevel}/specialisation/reloadRestartSystem";
+    reloadWithErrorsSystem = "${nodes.webserver.config.system.build.toplevel}/specialisation/reloadWithErrorsSystem";
   in ''
     url = "http://localhost/index.html"
 
diff --git a/nixos/tests/oci-containers.nix b/nixos/tests/oci-containers.nix
new file mode 100644
index 00000000000..bb6c019f07c
--- /dev/null
+++ b/nixos/tests/oci-containers.nix
@@ -0,0 +1,43 @@
+{ system ? builtins.currentSystem
+, config ? {}
+, pkgs ? import ../.. { inherit system config; }
+, lib ? pkgs.lib
+}:
+
+let
+
+  inherit (import ../lib/testing-python.nix { inherit system pkgs; }) makeTest;
+
+  mkOCITest = backend: makeTest {
+    name = "oci-containers-${backend}";
+
+    meta = {
+      maintainers = with lib.maintainers; [ adisbladis benley mkaito ];
+    };
+
+    nodes = {
+      ${backend} = { pkgs, ... }: {
+        virtualisation.oci-containers = {
+          inherit backend;
+          containers.nginx = {
+            image = "nginx-container";
+            imageFile = pkgs.dockerTools.examples.nginx;
+            ports = ["8181:80"];
+          };
+        };
+      };
+    };
+
+    testScript = ''
+      start_all()
+      ${backend}.wait_for_unit("${backend}-nginx.service")
+      ${backend}.wait_for_open_port(8181)
+      ${backend}.wait_until_succeeds("curl http://localhost:8181 | grep Hello")
+    '';
+  };
+
+in
+lib.foldl' (attrs: backend: attrs // { ${backend} = mkOCITest backend; }) {} [
+  "docker"
+  "podman"
+]
diff --git a/nixos/tests/partition.nix b/nixos/tests/partition.nix
deleted file mode 100644
index 01a08995950..00000000000
--- a/nixos/tests/partition.nix
+++ /dev/null
@@ -1,247 +0,0 @@
-import ./make-test.nix ({ pkgs, ... }:
-
-with pkgs.lib;
-
-let
-  ksExt = pkgs.writeText "ks-ext4" ''
-    clearpart --all --initlabel --drives=vdb
-
-    part /boot --recommended --label=boot --fstype=ext2 --ondisk=vdb
-    part swap --recommended --label=swap --fstype=swap --ondisk=vdb
-    part /nix --size=500 --label=nix --fstype=ext3 --ondisk=vdb
-    part / --recommended --label=root --fstype=ext4 --ondisk=vdb
-  '';
-
-  ksBtrfs = pkgs.writeText "ks-btrfs" ''
-    clearpart --all --initlabel --drives=vdb,vdc
-
-    part swap1 --recommended --label=swap1 --fstype=swap --ondisk=vdb
-    part swap2 --recommended --label=swap2 --fstype=swap --ondisk=vdc
-
-    part btrfs.1 --grow --ondisk=vdb
-    part btrfs.2 --grow --ondisk=vdc
-
-    btrfs / --data=0 --metadata=1 --label=root btrfs.1 btrfs.2
-  '';
-
-  ksF2fs = pkgs.writeText "ks-f2fs" ''
-    clearpart --all --initlabel --drives=vdb
-
-    part swap  --recommended --label=swap --fstype=swap --ondisk=vdb
-    part /boot --recommended --label=boot --fstype=f2fs --ondisk=vdb
-    part /     --recommended --label=root --fstype=f2fs --ondisk=vdb
-  '';
-
-  ksRaid = pkgs.writeText "ks-raid" ''
-    clearpart --all --initlabel --drives=vdb,vdc
-
-    part raid.01 --size=200 --ondisk=vdb
-    part raid.02 --size=200 --ondisk=vdc
-
-    part swap1 --size=500 --label=swap1 --fstype=swap --ondisk=vdb
-    part swap2 --size=500 --label=swap2 --fstype=swap --ondisk=vdc
-
-    part raid.11 --grow --ondisk=vdb
-    part raid.12 --grow --ondisk=vdc
-
-    raid /boot --level=1 --fstype=ext3 --device=md0 raid.01 raid.02
-    raid / --level=1 --fstype=xfs --device=md1 raid.11 raid.12
-  '';
-
-  ksRaidLvmCrypt = pkgs.writeText "ks-lvm-crypt" ''
-    clearpart --all --initlabel --drives=vdb,vdc
-
-    part raid.1 --grow --ondisk=vdb
-    part raid.2 --grow --ondisk=vdc
-
-    raid pv.0 --level=1 --encrypted --passphrase=x --device=md0 raid.1 raid.2
-
-    volgroup nixos pv.0
-
-    logvol /boot --size=200 --fstype=ext3 --name=boot --vgname=nixos
-    logvol swap --size=500 --fstype=swap --name=swap --vgname=nixos
-    logvol / --size=1000 --grow --fstype=ext4 --name=root --vgname=nixos
-  '';
-in {
-  name = "partitiion";
-
-  machine = { pkgs, ... }: {
-    environment.systemPackages = [
-      pkgs.pythonPackages.nixpart0
-      pkgs.file pkgs.btrfs-progs pkgs.xfsprogs pkgs.lvm2
-    ];
-    virtualisation.emptyDiskImages = [ 4096 4096 ];
-  };
-
-  testScript = ''
-    my $diskStart;
-    my @mtab;
-
-    sub getMtab {
-      my $mounts = $machine->succeed("cat /proc/mounts");
-      chomp $mounts;
-      return map [split], split /\n/, $mounts;
-    }
-
-    sub parttest {
-      my ($desc, $code) = @_;
-      $machine->start;
-      $machine->waitForUnit("default.target");
-
-      # Gather mounts and superblock
-      @mtab = getMtab;
-      $diskStart = $machine->succeed("dd if=/dev/vda bs=512 count=1");
-
-      subtest($desc, $code);
-      $machine->shutdown;
-    }
-
-    sub ensureSanity {
-      # Check whether the filesystem in /dev/vda is still intact
-      my $newDiskStart = $machine->succeed("dd if=/dev/vda bs=512 count=1");
-      if ($diskStart ne $newDiskStart) {
-        $machine->log("Something went wrong, the partitioner wrote " .
-                      "something into the first 512 bytes of /dev/vda!");
-        die;
-      }
-
-      # Check whether nixpart has unmounted anything
-      my @currentMtab = getMtab;
-      for my $mount (@mtab) {
-        my $path = $mount->[1];
-        unless (grep { $_->[1] eq $path } @currentMtab) {
-          $machine->log("The partitioner seems to have unmounted $path.");
-          die;
-        }
-      }
-    }
-
-    sub checkMount {
-      my $mounts = $machine->succeed("cat /proc/mounts");
-
-    }
-
-    sub kickstart {
-      $machine->copyFileFromHost($_[0], "/kickstart");
-      $machine->succeed("nixpart -v /kickstart");
-      ensureSanity;
-    }
-
-    sub ensurePartition {
-      my ($name, $match) = @_;
-      my $path = $name =~ /^\// ? $name : "/dev/disk/by-label/$name";
-      my $out = $machine->succeed("file -Ls $path");
-      my @matches = grep(/^$path: .*$match/i, $out);
-      if (!@matches) {
-        $machine->log("Partition on $path was expected to have a " .
-                      "file system that matches $match, but instead has: $out");
-        die;
-      }
-    }
-
-    sub ensureNoPartition {
-      $machine->succeed("test ! -e /dev/$_[0]");
-    }
-
-    sub ensureMountPoint {
-      $machine->succeed("mountpoint $_[0]");
-    }
-
-    sub remountAndCheck {
-      $machine->nest("Remounting partitions:", sub {
-        # XXX: "findmnt -ARunl -oTARGET /mnt" seems to NOT print all mounts!
-        my $getmounts_cmd = "cat /proc/mounts | cut -d' ' -f2 | grep '^/mnt'";
-        # Insert canaries first
-        my $canaries = $machine->succeed($getmounts_cmd . " | while read p;" .
-                                         " do touch \"\$p/canary\";" .
-                                         " echo \"\$p/canary\"; done");
-        # Now unmount manually
-        $machine->succeed($getmounts_cmd . " | tac | xargs -r umount");
-        # /mnt should be empty or non-existing
-        my $found = $machine->succeed("find /mnt -mindepth 1");
-        chomp $found;
-        if ($found) {
-          $machine->log("Cruft found in /mnt:\n$found");
-          die;
-        }
-        # Try to remount with nixpart
-        $machine->succeed("nixpart -vm /kickstart");
-        ensureMountPoint("/mnt");
-        # Check if our beloved canaries are dead
-        chomp $canaries;
-        $machine->nest("Checking canaries:", sub {
-          for my $canary (split /\n/, $canaries) {
-            $machine->succeed("test -e '$canary'");
-          }
-        });
-      });
-    }
-
-    parttest "ext2, ext3 and ext4 filesystems", sub {
-      kickstart("${ksExt}");
-      ensurePartition("boot", "ext2");
-      ensurePartition("swap", "swap");
-      ensurePartition("nix", "ext3");
-      ensurePartition("root", "ext4");
-      ensurePartition("/dev/vdb4", "boot sector");
-      ensureNoPartition("vdb6");
-      ensureNoPartition("vdc1");
-      remountAndCheck;
-      ensureMountPoint("/mnt/boot");
-      ensureMountPoint("/mnt/nix");
-    };
-
-    parttest "btrfs filesystem", sub {
-      $machine->succeed("modprobe btrfs");
-      kickstart("${ksBtrfs}");
-      ensurePartition("swap1", "swap");
-      ensurePartition("swap2", "swap");
-      ensurePartition("/dev/vdb2", "btrfs");
-      ensurePartition("/dev/vdc2", "btrfs");
-      ensureNoPartition("vdb3");
-      ensureNoPartition("vdc3");
-      remountAndCheck;
-    };
-
-    parttest "f2fs filesystem", sub {
-      $machine->succeed("modprobe f2fs");
-      kickstart("${ksF2fs}");
-      ensurePartition("swap", "swap");
-      ensurePartition("boot", "f2fs");
-      ensurePartition("root", "f2fs");
-      remountAndCheck;
-      ensureMountPoint("/mnt/boot", "f2fs");
-    };
-
-    parttest "RAID1 with XFS", sub {
-      kickstart("${ksRaid}");
-      ensurePartition("swap1", "swap");
-      ensurePartition("swap2", "swap");
-      ensurePartition("/dev/md0", "ext3");
-      ensurePartition("/dev/md1", "xfs");
-      ensureNoPartition("vdb4");
-      ensureNoPartition("vdc4");
-      ensureNoPartition("md2");
-      remountAndCheck;
-      ensureMountPoint("/mnt/boot");
-    };
-
-    parttest "RAID1 with LUKS and LVM", sub {
-      kickstart("${ksRaidLvmCrypt}");
-      ensurePartition("/dev/vdb1", "data");
-      ensureNoPartition("vdb2");
-      ensurePartition("/dev/vdc1", "data");
-      ensureNoPartition("vdc2");
-
-      ensurePartition("/dev/md0", "luks");
-      ensureNoPartition("md1");
-
-      ensurePartition("/dev/nixos/boot", "ext3");
-      ensurePartition("/dev/nixos/swap", "swap");
-      ensurePartition("/dev/nixos/root", "ext4");
-
-      remountAndCheck;
-      ensureMountPoint("/mnt/boot");
-    };
-  '';
-})
diff --git a/nixos/tests/php/default.nix b/nixos/tests/php/default.nix
index 9ab14f722d0..ee7a3b56a3e 100644
--- a/nixos/tests/php/default.nix
+++ b/nixos/tests/php/default.nix
@@ -3,5 +3,6 @@
   pkgs ? import ../../.. { inherit system config; }
 }: {
   fpm = import ./fpm.nix { inherit system pkgs; };
+  httpd = import ./httpd.nix { inherit system pkgs; };
   pcre = import ./pcre.nix { inherit system pkgs; };
 }
diff --git a/nixos/tests/php/fpm.nix b/nixos/tests/php/fpm.nix
index e93a3183418..513abd94373 100644
--- a/nixos/tests/php/fpm.nix
+++ b/nixos/tests/php/fpm.nix
@@ -1,6 +1,6 @@
-import ../make-test-python.nix ({pkgs, ...}: {
+import ../make-test-python.nix ({pkgs, lib, ...}: {
   name = "php-fpm-nginx-test";
-  meta.maintainers = with pkgs.stdenv.lib.maintainers; [ etu ];
+  meta.maintainers = lib.teams.php.members;
 
   machine = { config, lib, pkgs, ... }: {
     services.nginx = {
@@ -43,13 +43,11 @@ import ../make-test-python.nix ({pkgs, ...}: {
     machine.wait_for_unit("phpfpm-foobar.service")
 
     # Check so we get an evaluated PHP back
-    assert "PHP Version ${pkgs.php.version}" in machine.succeed("curl -vvv -s http://127.0.0.1:80/")
+    response = machine.succeed("curl -vvv -s http://127.0.0.1:80/")
+    assert "PHP Version ${pkgs.php.version}" in response, "PHP version not detected"
 
     # Check so we have database and some other extensions loaded
-    assert "json" in machine.succeed("curl -vvv -s http://127.0.0.1:80/")
-    assert "opcache" in machine.succeed("curl -vvv -s http://127.0.0.1:80/")
-    assert "pdo_mysql" in machine.succeed("curl -vvv -s http://127.0.0.1:80/")
-    assert "pdo_pgsql" in machine.succeed("curl -vvv -s http://127.0.0.1:80/")
-    assert "pdo_sqlite" in machine.succeed("curl -vvv -s http://127.0.0.1:80/")
+    for ext in ["json", "opcache", "pdo_mysql", "pdo_pgsql", "pdo_sqlite"]:
+        assert ext in response, f"Missing {ext} extension"
   '';
 })
diff --git a/nixos/tests/php/httpd.nix b/nixos/tests/php/httpd.nix
new file mode 100644
index 00000000000..1092e0ecadd
--- /dev/null
+++ b/nixos/tests/php/httpd.nix
@@ -0,0 +1,31 @@
+import ../make-test-python.nix ({pkgs, lib, ...}: {
+  name = "php-httpd-test";
+  meta.maintainers = lib.teams.php.members;
+
+  machine = { config, lib, pkgs, ... }: {
+    services.httpd = {
+      enable = true;
+      adminAddr = "admin@phpfpm";
+      virtualHosts."phpfpm" = let
+        testdir = pkgs.writeTextDir "web/index.php" "<?php phpinfo();";
+      in {
+        documentRoot = "${testdir}/web";
+        locations."/" = {
+          index = "index.php index.html";
+        };
+      };
+      enablePHP = true;
+    };
+  };
+  testScript = { ... }: ''
+    machine.wait_for_unit("httpd.service")
+
+    # Check so we get an evaluated PHP back
+    response = machine.succeed("curl -vvv -s http://127.0.0.1:80/")
+    assert "PHP Version ${pkgs.php.version}" in response, "PHP version not detected"
+
+    # Check so we have database and some other extensions loaded
+    for ext in ["json", "opcache", "pdo_mysql", "pdo_pgsql", "pdo_sqlite"]:
+        assert ext in response, f"Missing {ext} extension"
+  '';
+})
diff --git a/nixos/tests/php/pcre.nix b/nixos/tests/php/pcre.nix
index 56a87778579..3dd0964e60f 100644
--- a/nixos/tests/php/pcre.nix
+++ b/nixos/tests/php/pcre.nix
@@ -1,7 +1,9 @@
 let
   testString = "can-use-subgroups";
-in import ../make-test-python.nix ({ ...}: {
+in import ../make-test-python.nix ({lib, ...}: {
   name = "php-httpd-pcre-jit-test";
+  meta.maintainers = lib.teams.php.members;
+
   machine = { lib, pkgs, ... }: {
     time.timeZone = "UTC";
     services.httpd = {
@@ -30,8 +32,8 @@ in import ../make-test-python.nix ({ ...}: {
     ''
       machine.wait_for_unit("httpd.service")
       # Ensure php evaluation by matching on the var_dump syntax
-      assert 'string(${toString (builtins.stringLength testString)}) "${testString}"' in machine.succeed(
-          "curl -vvv -s http://127.0.0.1:80/index.php"
-      )
+      response = machine.succeed("curl -vvv -s http://127.0.0.1:80/index.php")
+      expected = 'string(${toString (builtins.stringLength testString)}) "${testString}"'
+      assert expected in response, "Does not appear to be able to use subgroups."
     '';
 })
diff --git a/nixos/tests/podman.nix b/nixos/tests/podman.nix
new file mode 100644
index 00000000000..283db71d9a4
--- /dev/null
+++ b/nixos/tests/podman.nix
@@ -0,0 +1,60 @@
+# This test runs podman and checks if simple container starts
+
+import ./make-test-python.nix (
+  { pkgs, lib, ... }: {
+    name = "podman";
+    meta = {
+      maintainers = lib.teams.podman.members;
+    };
+
+    nodes = {
+      podman =
+        { pkgs, ... }:
+        {
+          virtualisation.podman.enable = true;
+          virtualisation.containers.users = [
+            "alice"
+          ];
+
+          users.users.alice = {
+            isNormalUser = true;
+            home = "/home/alice";
+            description = "Alice Foobar";
+          };
+
+        };
+    };
+
+    testScript = ''
+      import shlex
+
+
+      def su_cmd(cmd):
+          cmd = shlex.quote(cmd)
+          return f"su alice -l -c {cmd}"
+
+
+      podman.wait_for_unit("sockets.target")
+      start_all()
+
+
+      with subtest("Run container as root"):
+          podman.succeed("tar cv --files-from /dev/null | podman import - scratchimg")
+          podman.succeed(
+              "podman run -d --name=sleeping -v /nix/store:/nix/store -v /run/current-system/sw/bin:/bin scratchimg /bin/sleep 10"
+          )
+          podman.succeed("podman ps | grep sleeping")
+          podman.succeed("podman stop sleeping")
+
+      with subtest("Run container rootless"):
+          podman.succeed(su_cmd("tar cv --files-from /dev/null | podman import - scratchimg"))
+          podman.succeed(
+              su_cmd(
+                  "podman run -d --name=sleeping -v /nix/store:/nix/store -v /run/current-system/sw/bin:/bin scratchimg /bin/sleep 10"
+              )
+          )
+          podman.succeed(su_cmd("podman ps | grep sleeping"))
+          podman.succeed(su_cmd("podman stop sleeping"))
+    '';
+  }
+)
diff --git a/nixos/tests/prometheus.nix b/nixos/tests/prometheus.nix
index 8bfd0c131e6..bce489168f9 100644
--- a/nixos/tests/prometheus.nix
+++ b/nixos/tests/prometheus.nix
@@ -179,7 +179,7 @@ in import ./make-test-python.nix {
     s3.succeed(
         "mc config host add minio "
         + "http://localhost:${toString minioPort} "
-        + "${s3.accessKey} ${s3.secretKey} S3v4",
+        + "${s3.accessKey} ${s3.secretKey} --api s3v4",
         "mc mb minio/thanos-bucket",
     )
 
diff --git a/nixos/tests/redmine.nix b/nixos/tests/redmine.nix
index 73eb684f33a..3866a1f528c 100644
--- a/nixos/tests/redmine.nix
+++ b/nixos/tests/redmine.nix
@@ -3,74 +3,42 @@
   pkgs ? import ../.. { inherit system config; }
 }:
 
-with import ../lib/testing.nix { inherit system pkgs; };
+with import ../lib/testing-python.nix { inherit system pkgs; };
 with pkgs.lib;
 
 let
-  mysqlTest = package: makeTest {
-    machine =
-      { config, pkgs, ... }:
-      { services.redmine.enable = true;
-        services.redmine.package = package;
-        services.redmine.database.type = "mysql2";
-        services.redmine.plugins = {
+  redmineTest = { name, type }: makeTest {
+    name = "redmine-${name}";
+    machine = { config, pkgs, ... }: {
+      services.redmine = {
+        enable = true;
+        package = pkgs.redmine;
+        database.type = type;
+        plugins = {
           redmine_env_auth = pkgs.fetchurl {
             url = "https://github.com/Intera/redmine_env_auth/archive/0.7.zip";
             sha256 = "1xb8lyarc7mpi86yflnlgyllh9hfwb9z304f19dx409gqpia99sc";
           };
         };
-        services.redmine.themes = {
+        themes = {
           dkuk-redmine_alex_skin = pkgs.fetchurl {
             url = "https://bitbucket.org/dkuk/redmine_alex_skin/get/1842ef675ef3.zip";
             sha256 = "0hrin9lzyi50k4w2bd2b30vrf1i4fi1c0gyas5801wn8i7kpm9yl";
           };
         };
       };
+    };
 
     testScript = ''
-      startAll;
-      $machine->waitForUnit('redmine.service');
-      $machine->waitForOpenPort('3000');
-      $machine->succeed("curl --fail http://localhost:3000/");
+      start_all()
+      machine.wait_for_unit("redmine.service")
+      machine.wait_for_open_port(3000)
+      machine.succeed("curl --fail http://localhost:3000/")
     '';
-  };
-
-  pgsqlTest = package: makeTest {
-    machine =
-      { config, pkgs, ... }:
-      { services.redmine.enable = true;
-        services.redmine.package = package;
-        services.redmine.database.type = "postgresql";
-        services.redmine.plugins = {
-          redmine_env_auth = pkgs.fetchurl {
-            url = "https://github.com/Intera/redmine_env_auth/archive/0.7.zip";
-            sha256 = "1xb8lyarc7mpi86yflnlgyllh9hfwb9z304f19dx409gqpia99sc";
-          };
-        };
-        services.redmine.themes = {
-          dkuk-redmine_alex_skin = pkgs.fetchurl {
-            url = "https://bitbucket.org/dkuk/redmine_alex_skin/get/1842ef675ef3.zip";
-            sha256 = "0hrin9lzyi50k4w2bd2b30vrf1i4fi1c0gyas5801wn8i7kpm9yl";
-          };
-        };
-      };
-
-    testScript = ''
-      startAll;
-      $machine->waitForUnit('redmine.service');
-      $machine->waitForOpenPort('3000');
-      $machine->succeed("curl --fail http://localhost:3000/");
-    '';
-  };
-in
-{
-  mysql = mysqlTest pkgs.redmine // {
-    name = "mysql";
-    meta.maintainers = [ maintainers.aanderse ];
-  };
-
-  pgsql = pgsqlTest pkgs.redmine // {
-    name = "pgsql";
+  } // {
     meta.maintainers = [ maintainers.aanderse ];
   };
+in {
+  mysql = redmineTest { name = "mysql"; type = "mysql2"; };
+  pgsql = redmineTest { name = "pgsql"; type = "postgresql"; };
 }
diff --git a/nixos/tests/roundcube.nix b/nixos/tests/roundcube.nix
index 1897b53e283..97e1125694b 100644
--- a/nixos/tests/roundcube.nix
+++ b/nixos/tests/roundcube.nix
@@ -12,6 +12,7 @@ import ./make-test-python.nix ({ pkgs, ...} : {
         database.password = "not production";
         package = pkgs.roundcube.withPlugins (plugins: [ plugins.persistent_login ]);
         plugins = [ "persistent_login" ];
+        dicts = with pkgs.aspellDicts; [ en fr de ];
       };
       services.nginx.virtualHosts.roundcube = {
         forceSSL = false;
diff --git a/nixos/tests/service-runner.nix b/nixos/tests/service-runner.nix
index adb3fcd36d7..39ae66fe111 100644
--- a/nixos/tests/service-runner.nix
+++ b/nixos/tests/service-runner.nix
@@ -23,7 +23,7 @@ import ./make-test-python.nix ({ pkgs, ... }: {
         machine.fail(f"curl {url}")
         machine.succeed(
             """
-            mkdir -p /run/nginx /var/spool/nginx/logs
+            mkdir -p /run/nginx /var/log/nginx /var/cache/nginx
             ${nodes.machine.config.systemd.services.nginx.runner} &
             echo $!>my-nginx.pid
             """
diff --git a/nixos/tests/specialisation.nix b/nixos/tests/specialisation.nix
new file mode 100644
index 00000000000..b8d4b8279f4
--- /dev/null
+++ b/nixos/tests/specialisation.nix
@@ -0,0 +1,43 @@
+import ./make-test-python.nix {
+  name = "specialisation";
+  nodes =  {
+    inheritconf = { pkgs, ... }: {
+      environment.systemPackages = [ pkgs.cowsay ];
+      specialisation.inheritconf.configuration = { pkgs, ... }: {
+        environment.systemPackages = [ pkgs.hello ];
+      };
+    };
+    noinheritconf = { pkgs, ... }: {
+      environment.systemPackages = [ pkgs.cowsay ];
+      specialisation.noinheritconf = {
+        inheritParentConfig = false;
+        configuration = { pkgs, ... }: {
+          environment.systemPackages = [ pkgs.hello ];
+        };
+      };
+    };
+  };
+  testScript = ''
+    inheritconf.wait_for_unit("default.target")
+    inheritconf.succeed("cowsay hey")
+    inheritconf.fail("hello")
+
+    with subtest("Nested clones do inherit from parent"):
+        inheritconf.succeed(
+            "/run/current-system/specialisation/inheritconf/bin/switch-to-configuration test"
+        )
+        inheritconf.succeed("cowsay hey")
+        inheritconf.succeed("hello")
+
+        noinheritconf.wait_for_unit("default.target")
+        noinheritconf.succeed("cowsay hey")
+        noinheritconf.fail("hello")
+
+    with subtest("Nested children do not inherit from parent"):
+        noinheritconf.succeed(
+            "/run/current-system/specialisation/noinheritconf/bin/switch-to-configuration test"
+        )
+        noinheritconf.fail("cowsay hey")
+        noinheritconf.succeed("hello")
+  '';
+}
diff --git a/nixos/tests/systemd-boot.nix b/nixos/tests/systemd-boot.nix
new file mode 100644
index 00000000000..e911c393361
--- /dev/null
+++ b/nixos/tests/systemd-boot.nix
@@ -0,0 +1,31 @@
+{ system ? builtins.currentSystem,
+  config ? {},
+  pkgs ? import ../.. { inherit system config; }
+}:
+
+with import ../lib/testing-python.nix { inherit system pkgs; };
+with pkgs.lib;
+
+makeTest {
+  name = "systemd-boot";
+  meta.maintainers = with pkgs.stdenv.lib.maintainers; [ danielfullmer ];
+
+  machine = { pkgs, lib, ... }: {
+    virtualisation.useBootLoader = true;
+    virtualisation.useEFIBoot = true;
+    boot.loader.systemd-boot.enable = true;
+  };
+
+  testScript = ''
+    machine.start()
+    machine.wait_for_unit("multi-user.target")
+
+    machine.succeed("test -e /boot/loader/entries/nixos-generation-1.conf")
+
+    # Ensure we actually booted using systemd-boot.
+    # Magic number is the vendor UUID used by systemd-boot.
+    machine.succeed(
+        "test -e /sys/firmware/efi/efivars/LoaderEntrySelected-4a67b082-0a4c-41cf-b6c7-440b29bb8c4f"
+    )
+  '';
+}
diff --git a/nixos/tests/systemd-confinement.nix b/nixos/tests/systemd-confinement.nix
index f22836e227b..ebf6d218fd6 100644
--- a/nixos/tests/systemd-confinement.nix
+++ b/nixos/tests/systemd-confinement.nix
@@ -1,4 +1,4 @@
-import ./make-test.nix {
+import ./make-test-python.nix {
   name = "systemd-confinement";
 
   machine = { pkgs, lib, ... }: let
@@ -17,7 +17,7 @@ import ./make-test.nix {
       exit "''${ret:-1}"
     '';
 
-    mkTestStep = num: { description, config ? {}, testScript }: {
+    mkTestStep = num: { config ? {}, testScript }: {
       systemd.sockets."test${toString num}" = {
         description = "Socket for Test Service ${toString num}";
         wantedBy = [ "sockets.target" ];
@@ -34,52 +34,48 @@ import ./make-test.nix {
         };
       } // removeAttrs config [ "confinement" "serviceConfig" ];
 
-      __testSteps = lib.mkOrder num ''
-        subtest '${lib.escape ["\\" "'"] description}', sub {
-          $machine->succeed('echo ${toString num} > /teststep');
-          ${testScript}
-        };
-      '';
+      __testSteps = lib.mkOrder num (''
+        machine.succeed("echo ${toString num} > /teststep")
+      '' + testScript);
     };
 
   in {
     imports = lib.imap1 mkTestStep [
-      { description = "chroot-only confinement";
-        config.confinement.mode = "chroot-only";
+      { config.confinement.mode = "chroot-only";
         testScript = ''
-          $machine->succeed(
-            'test "$(chroot-exec ls -1 / | paste -sd,)" = bin,nix',
-            'test "$(chroot-exec id -u)" = 0',
-            'chroot-exec chown 65534 /bin',
-          );
+          with subtest("chroot-only confinement"):
+              machine.succeed(
+                  'test "$(chroot-exec ls -1 / | paste -sd,)" = bin,nix',
+                  'test "$(chroot-exec id -u)" = 0',
+                  "chroot-exec chown 65534 /bin",
+              )
         '';
       }
-      { description = "full confinement with APIVFS";
-        testScript = ''
-          $machine->fail(
-            'chroot-exec ls -l /etc',
-            'chroot-exec ls -l /run',
-            'chroot-exec chown 65534 /bin',
-          );
-          $machine->succeed(
-            'test "$(chroot-exec id -u)" = 0',
-            'chroot-exec chown 0 /bin',
-          );
+      { testScript = ''
+          with subtest("full confinement with APIVFS"):
+              machine.fail(
+                  "chroot-exec ls -l /etc",
+                  "chroot-exec ls -l /run",
+                  "chroot-exec chown 65534 /bin",
+              )
+              machine.succeed(
+                  'test "$(chroot-exec id -u)" = 0', "chroot-exec chown 0 /bin",
+              )
         '';
       }
-      { description = "check existence of bind-mounted /etc";
-        config.serviceConfig.BindReadOnlyPaths = [ "/etc" ];
+      { config.serviceConfig.BindReadOnlyPaths = [ "/etc" ];
         testScript = ''
-          $machine->succeed('test -n "$(chroot-exec cat /etc/passwd)"');
+          with subtest("check existence of bind-mounted /etc"):
+              machine.succeed('test -n "$(chroot-exec cat /etc/passwd)"')
         '';
       }
-      { description = "check if User/Group really runs as non-root";
-        config.serviceConfig.User = "chroot-testuser";
+      { config.serviceConfig.User = "chroot-testuser";
         config.serviceConfig.Group = "chroot-testgroup";
         testScript = ''
-          $machine->succeed('chroot-exec ls -l /dev');
-          $machine->succeed('test "$(chroot-exec id -u)" != 0');
-          $machine->fail('chroot-exec touch /bin/test');
+          with subtest("check if User/Group really runs as non-root"):
+              machine.succeed("chroot-exec ls -l /dev")
+              machine.succeed('test "$(chroot-exec id -u)" != 0')
+              machine.fail("chroot-exec touch /bin/test")
         '';
       }
       (let
@@ -87,62 +83,60 @@ import ./make-test.nix {
           target = pkgs.writeText "symlink-target" "got me\n";
         } "ln -s \"$target\" \"$out\"";
       in {
-        description = "check if symlinks are properly bind-mounted";
         config.confinement.packages = lib.singleton symlink;
         testScript = ''
-          $machine->fail('chroot-exec test -e /etc');
-          $machine->succeed('chroot-exec cat ${symlink} >&2');
-          $machine->succeed('test "$(chroot-exec cat ${symlink})" = "got me"');
+          with subtest("check if symlinks are properly bind-mounted"):
+              machine.fail("chroot-exec test -e /etc")
+              machine.succeed(
+                  "chroot-exec cat ${symlink} >&2",
+                  'test "$(chroot-exec cat ${symlink})" = "got me"',
+              )
         '';
       })
-      { description = "check if StateDirectory works";
-        config.serviceConfig.User = "chroot-testuser";
+      { config.serviceConfig.User = "chroot-testuser";
         config.serviceConfig.Group = "chroot-testgroup";
         config.serviceConfig.StateDirectory = "testme";
         testScript = ''
-          $machine->succeed('chroot-exec touch /tmp/canary');
-          $machine->succeed('chroot-exec "echo works > /var/lib/testme/foo"');
-          $machine->succeed('test "$(< /var/lib/testme/foo)" = works');
-          $machine->succeed('test ! -e /tmp/canary');
+          with subtest("check if StateDirectory works"):
+              machine.succeed("chroot-exec touch /tmp/canary")
+              machine.succeed('chroot-exec "echo works > /var/lib/testme/foo"')
+              machine.succeed('test "$(< /var/lib/testme/foo)" = works')
+              machine.succeed("test ! -e /tmp/canary")
         '';
       }
-      { description = "check if /bin/sh works";
-        testScript = ''
-          $machine->succeed(
-            'chroot-exec test -e /bin/sh',
-            'test "$(chroot-exec \'/bin/sh -c "echo bar"\')" = bar',
-          );
+      { testScript = ''
+          with subtest("check if /bin/sh works"):
+              machine.succeed(
+                  "chroot-exec test -e /bin/sh",
+                  'test "$(chroot-exec \'/bin/sh -c "echo bar"\')" = bar',
+              )
         '';
       }
-      { description = "check if suppressing /bin/sh works";
-        config.confinement.binSh = null;
+      { config.confinement.binSh = null;
         testScript = ''
-          $machine->succeed(
-            'chroot-exec test ! -e /bin/sh',
-            'test "$(chroot-exec \'/bin/sh -c "echo foo"\')" != foo',
-          );
+          with subtest("check if suppressing /bin/sh works"):
+              machine.succeed("chroot-exec test ! -e /bin/sh")
+              machine.succeed('test "$(chroot-exec \'/bin/sh -c "echo foo"\')" != foo')
         '';
       }
-      { description = "check if we can set /bin/sh to something different";
-        config.confinement.binSh = "${pkgs.hello}/bin/hello";
+      { config.confinement.binSh = "${pkgs.hello}/bin/hello";
         testScript = ''
-          $machine->succeed(
-            'chroot-exec test -e /bin/sh',
-            'test "$(chroot-exec /bin/sh -g foo)" = foo',
-          );
+          with subtest("check if we can set /bin/sh to something different"):
+              machine.succeed("chroot-exec test -e /bin/sh")
+              machine.succeed('test "$(chroot-exec /bin/sh -g foo)" = foo')
         '';
       }
-      { description = "check if only Exec* dependencies are included";
-        config.environment.FOOBAR = pkgs.writeText "foobar" "eek\n";
+      { config.environment.FOOBAR = pkgs.writeText "foobar" "eek\n";
         testScript = ''
-          $machine->succeed('test "$(chroot-exec \'cat "$FOOBAR"\')" != eek');
+          with subtest("check if only Exec* dependencies are included"):
+              machine.succeed('test "$(chroot-exec \'cat "$FOOBAR"\')" != eek')
         '';
       }
-      { description = "check if all unit dependencies are included";
-        config.environment.FOOBAR = pkgs.writeText "foobar" "eek\n";
+      { config.environment.FOOBAR = pkgs.writeText "foobar" "eek\n";
         config.confinement.fullUnit = true;
         testScript = ''
-          $machine->succeed('test "$(chroot-exec \'cat "$FOOBAR"\')" = eek');
+          with subtest("check if all unit dependencies are included"):
+              machine.succeed('test "$(chroot-exec \'cat "$FOOBAR"\')" = eek')
         '';
       }
     ];
@@ -162,7 +156,6 @@ import ./make-test.nix {
   };
 
   testScript = { nodes, ... }: ''
-    $machine->waitForUnit('multi-user.target');
-    ${nodes.machine.config.__testSteps}
-  '';
+    machine.wait_for_unit("multi-user.target")
+  '' + nodes.machine.config.__testSteps;
 }
diff --git a/nixos/tests/systemd-networkd-dhcpserver.nix b/nixos/tests/systemd-networkd-dhcpserver.nix
new file mode 100644
index 00000000000..f1a2662f8cb
--- /dev/null
+++ b/nixos/tests/systemd-networkd-dhcpserver.nix
@@ -0,0 +1,58 @@
+# This test predominantly tests systemd-networkd DHCP server, by
+# setting up a DHCP server and client, and ensuring they are mutually
+# reachable via the DHCP allocated address.
+import ./make-test-python.nix ({pkgs, ...}: {
+  name = "systemd-networkd-dhcpserver";
+  meta = with pkgs.stdenv.lib.maintainers; {
+    maintainers = [ tomfitzhenry ];
+  };
+  nodes = {
+    router = { config, pkgs, ... }: {
+      virtualisation.vlans = [ 1 ];
+      systemd.services.systemd-networkd.environment.SYSTEMD_LOG_LEVEL = "debug";
+      networking = {
+        useNetworkd = true;
+        useDHCP = false;
+        firewall.enable = false;
+      };
+      systemd.network = {
+        networks = {
+          # systemd-networkd will load the first network unit file
+          # that matches, ordered lexiographically by filename.
+          # /etc/systemd/network/{40-eth1,99-main}.network already
+          # exists. This network unit must be loaded for the test,
+          # however, hence why this network is named such.
+          "01-eth1" = {
+            name = "eth1";
+            networkConfig = {
+              DHCPServer = true;
+              Address = "10.0.0.1/24";
+            };
+            dhcpServerConfig = {
+              PoolOffset = 100;
+              PoolSize = 1;
+            };
+          };
+        };
+      };
+    };
+
+    client = { config, pkgs, ... }: {
+      virtualisation.vlans = [ 1 ];
+      systemd.services.systemd-networkd.environment.SYSTEMD_LOG_LEVEL = "debug";
+      networking = {
+        useNetworkd = true;
+        useDHCP = false;
+        firewall.enable = false;
+        interfaces.eth1.useDHCP = true;
+      };
+    };
+  };
+  testScript = { ... }: ''
+    start_all()
+    router.wait_for_unit("systemd-networkd-wait-online.service")
+    client.wait_for_unit("systemd-networkd-wait-online.service")
+    client.wait_until_succeeds("ping -c 5 10.0.0.1")
+    router.wait_until_succeeds("ping -c 5 10.0.0.100")
+  '';
+})
diff --git a/nixos/tests/systemd-networkd-ipv6-prefix-delegation.nix b/nixos/tests/systemd-networkd-ipv6-prefix-delegation.nix
new file mode 100644
index 00000000000..99cd341eec1
--- /dev/null
+++ b/nixos/tests/systemd-networkd-ipv6-prefix-delegation.nix
@@ -0,0 +1,295 @@
+# This test verifies that we can request and assign IPv6 prefixes from upstream
+# (e.g. ISP) routers.
+# The setup consits of three VMs. One for the ISP, as your residential router
+# and the third as a client machine in the residential network.
+#
+# There are two VLANs in this test:
+# - VLAN 1 is the connection between the ISP and the router
+# - VLAN 2 is the connection between the router and the client
+
+import ./make-test-python.nix ({pkgs, ...}: {
+  name = "systemd-networkd-ipv6-prefix-delegation";
+  meta = with pkgs.stdenv.lib.maintainers; {
+    maintainers = [ andir ];
+  };
+  nodes = {
+
+    # The ISP's routers job is to delegate IPv6 prefixes via DHCPv6. Like with
+    # regular IPv6 auto-configuration it will also emit IPv6 router
+    # advertisements (RAs). Those RA's will not carry a prefix but in contrast
+    # just set the "Other" flag to indicate to the receiving nodes that they
+    # should attempt DHCPv6.
+    #
+    # Note: On the ISPs device we don't really care if we are using networkd in
+    # this example. That being said we can't use it (yet) as networkd doesn't
+    # implement the serving side of DHCPv6. We will use ISC's well aged dhcpd6
+    # for that task.
+    isp = { lib, pkgs, ... }: {
+      virtualisation.vlans = [ 1 ];
+      networking = {
+        useDHCP = false;
+        firewall.enable = false;
+        interfaces.eth1.ipv4.addresses = lib.mkForce []; # no need for legacy IP
+        interfaces.eth1.ipv6.addresses = lib.mkForce [
+          { address = "2001:DB8::"; prefixLength = 64; }
+        ];
+      };
+
+      # Since we want to program the routes that we delegate to the "customer"
+      # into our routing table we must have a way to gain the required privs.
+      # This security wrapper will do in our test setup.
+      #
+      # DO NOT COPY THIS TO PRODUCTION AS IS. Think about it at least twice.
+      # Everyone on the "isp" machine will be able to add routes to the kernel.
+      security.wrappers.add-dhcpd-lease = {
+        source = pkgs.writeShellScript "add-dhcpd-lease" ''
+          exec ${pkgs.iproute}/bin/ip -6 route replace "$1" via "$2"
+        '';
+        capabilities = "cap_net_admin+ep";
+      };
+      services = {
+        # Configure the DHCPv6 server
+        #
+        # We will hand out /48 prefixes from the subnet 2001:DB8:F000::/36.
+        # That gives us ~8k prefixes. That should be enough for this test.
+        #
+        # Since (usually) you will not receive a prefix with the router
+        # advertisements we also hand out /128 leases from the range
+        # 2001:DB8:0000:0000:FFFF::/112.
+        dhcpd6 = {
+          enable = true;
+          interfaces = [ "eth1" ];
+          extraConfig = ''
+            subnet6 2001:DB8::/36 {
+              range6 2001:DB8:0000:0000:FFFF:: 2001:DB8:0000:0000:FFFF::FFFF;
+              prefix6 2001:DB8:F000:: 2001:DB8:FFFF:: /48;
+            }
+
+            # This is the secret sauce. We have to extract the prefix and the
+            # next hop when commiting the lease to the database.  dhcpd6
+            # (rightfully) has not concept of adding routes to the systems
+            # routing table. It really depends on the setup.
+            #
+            # In a production environment your DHCPv6 server is likely not the
+            # router. You might want to consider BGP, custom NetConf calls, …
+            # in those cases.
+            on commit {
+              set IP = pick-first-value(binary-to-ascii(16, 16, ":", substring(option dhcp6.ia-na, 16, 16)), "n/a");
+              set Prefix = pick-first-value(binary-to-ascii(16, 16, ":", suffix(option dhcp6.ia-pd, 16)), "n/a");
+              set PrefixLength = pick-first-value(binary-to-ascii(10, 8, ":", substring(suffix(option dhcp6.ia-pd, 17), 0, 1)), "n/a");
+              log(concat(IP, " ", Prefix, " ", PrefixLength));
+              execute("/run/wrappers/bin/add-dhcpd-lease", concat(Prefix,"/",PrefixLength), IP);
+            }
+          '';
+        };
+
+        # Finally we have to set up the router advertisements. While we could be
+        # using networkd or bird for this task `radvd` is probably the most
+        # venerable of them all. It was made explicitly for this purpose and
+        # the configuration is much more straightforward than what networkd
+        # requires.
+        # As outlined above we will have to set the `Managed` flag as otherwise
+        # the clients will not know if they should do DHCPv6. (Some do
+        # anyway/always)
+        radvd = {
+          enable = true;
+          config = ''
+            interface eth1 {
+              AdvSendAdvert on;
+              AdvManagedFlag on;
+              AdvOtherConfigFlag off; # we don't really have DNS or NTP or anything like that to distribute
+              prefix ::/64 {
+                AdvOnLink on;
+                AdvAutonomous on;
+              };
+            };
+          '';
+        };
+
+      };
+    };
+
+    # This will be our (residential) router that receives the IPv6 prefix (IA_PD)
+    # and /128 (IA_NA) allocation.
+    #
+    # Here we will actually start using networkd.
+    router = {
+      virtualisation.vlans = [ 1 2 ];
+      systemd.services.systemd-networkd.environment.SYSTEMD_LOG_LEVEL = "debug";
+
+      boot.kernel.sysctl = {
+        # we want to forward packets from the ISP to the client and back.
+        "net.ipv6.conf.all.forwarding" = 1;
+      };
+
+      networking = {
+        useNetworkd = true;
+        useDHCP = false;
+        # Consider enabling this in production and generating firewall rules
+        # for fowarding/input from the configured interfaces so you do not have
+        # to manage multiple places
+        firewall.enable = false;
+      };
+
+      systemd.network = {
+        networks = {
+          # systemd-networkd will load the first network unit file
+          # that matches, ordered lexiographically by filename.
+          # /etc/systemd/network/{40-eth1,99-main}.network already
+          # exists. This network unit must be loaded for the test,
+          # however, hence why this network is named such.
+
+          # Configuration of the interface to the ISP.
+          # We must request accept RAs and request the PD prefix.
+          "01-eth1" = {
+            name = "eth1";
+            networkConfig = {
+              Description = "ISP interface";
+              IPv6AcceptRA = true;
+              #DHCP = false; # no need for legacy IP
+            };
+            linkConfig = {
+              # We care about this interface when talking about being "online".
+              # If this interface is in the `routable` state we can reach
+              # others and they should be able to reach us.
+              RequiredForOnline = "routable";
+            };
+            # This configures the DHCPv6 client part towards the ISPs DHCPv6 server.
+            dhcpV6Config = {
+              # We have to include a request for a prefix in our DHCPv6 client
+              # request packets.
+              # Otherwise the upstream DHCPv6 server wouldn't know if we want a
+              # prefix or not.  Note: On some installation it makes sense to
+              # always force that option on the DHPCv6 server since there are
+              # certain CPEs that are just not setting this field but happily
+              # accept the delegated prefix.
+              PrefixDelegationHint  = "::/48";
+            };
+            ipv6PrefixDelegationConfig = {
+              # Let networkd know that we would very much like to use DHCPv6
+              # to obtain the "managed" information. Not sure why they can't
+              # just take that from the upstream RAs.
+              Managed = true;
+            };
+          };
+
+          # Interface to the client. Here we should redistribute a /64 from
+          # the prefix we received from the ISP.
+          "01-eth2" = {
+            name = "eth2";
+            networkConfig = {
+              Description = "Client interface";
+              # the client shouldn't be allowed to send us RAs, that would be weird.
+              IPv6AcceptRA = false;
+
+              # Just delegate prefixes from the DHCPv6 PD pool.
+              # If you also want to distribute a local ULA prefix you want to
+              # set this to `yes` as that includes both static prefixes as well
+              # as PD prefixes.
+              IPv6PrefixDelegation = "dhcpv6";
+            };
+            # finally "act as router" (according to systemd.network(5))
+            ipv6PrefixDelegationConfig = {
+              RouterLifetimeSec = 300; # required as otherwise no RA's are being emitted
+
+              # In a production environment you should consider setting these as well:
+              #EmitDNS = true;
+              #EmitDomains = true;
+              #DNS= = "fe80::1"; # or whatever "well known" IP your router will have on the inside.
+            };
+
+            # This adds a "random" ULA prefix to the interface that is being
+            # advertised to the clients.
+            # Not used in this test.
+            # ipv6Prefixes = [
+            #   {
+            #     ipv6PrefixConfig = {
+            #       AddressAutoconfiguration = true;
+            #       PreferredLifetimeSec = 1800;
+            #       ValidLifetimeSec = 1800;
+            #     };
+            #   }
+            # ];
+          };
+
+          # finally we are going to add a static IPv6 unique local address to
+          # the "lo" interface.  This will serve as ICMPv6 echo target to
+          # verify connectivity from the client to the router.
+          "01-lo" = {
+            name = "lo";
+            addresses = [
+              { addressConfig.Address = "FD42::1/128"; }
+            ];
+          };
+        };
+      };
+
+      # make the network-online target a requirement, we wait for it in our test script
+      systemd.targets.network-online.wantedBy = [ "multi-user.target" ];
+    };
+
+    # This is the client behind the router. We should be receving router
+    # advertisements for both the ULA and the delegated prefix.
+    # All we have to do is boot with the default (networkd) configuration.
+    client = {
+      virtualisation.vlans = [ 2 ];
+      systemd.services.systemd-networkd.environment.SYSTEMD_LOG_LEVEL = "debug";
+      networking = {
+        useNetworkd = true;
+        useDHCP = false;
+      };
+
+      # make the network-online target a requirement, we wait for it in our test script
+      systemd.targets.network-online.wantedBy = [ "multi-user.target" ];
+    };
+  };
+
+  testScript = ''
+    # First start the router and wait for it it reach a state where we are
+    # certain networkd is up and it is able to send out RAs
+    router.start()
+    router.wait_for_unit("systemd-networkd.service")
+
+    # After that we can boot the client and wait for the network online target.
+    # Since we only care about IPv6 that should not involve waiting for legacy
+    # IP leases.
+    client.start()
+    client.wait_for_unit("network-online.target")
+
+    # the static address on the router should not be reachable
+    client.wait_until_succeeds("ping -6 -c 1 FD42::1")
+
+    # the global IP of the ISP router should still not be a reachable
+    router.fail("ping -6 -c 1 2001:DB8::")
+
+    # Once we have internal connectivity boot up the ISP
+    isp.start()
+
+    # Since for the ISP "being online" should have no real meaning we just
+    # wait for the target where all the units have been started.
+    # It probably still takes a few more seconds for all the RA timers to be
+    # fired etc..
+    isp.wait_for_unit("multi-user.target")
+
+    # wait until the uplink interface has a good status
+    router.wait_for_unit("network-online.target")
+    router.wait_until_succeeds("ping -6 -c1 2001:DB8::")
+
+    # shortly after that the client should have received it's global IPv6
+    # address and thus be able to ping the ISP
+    client.wait_until_succeeds("ping -6 -c1 2001:DB8::")
+
+    # verify that we got a globally scoped address in eth1 from the
+    # documentation prefix
+    ip_output = client.succeed("ip --json -6 address show dev eth1")
+
+    import json
+
+    ip_json = json.loads(ip_output)[0]
+    assert any(
+        addr["local"].upper().startswith("2001:DB8:")
+        for addr in ip_json["addr_info"]
+        if addr["scope"] == "global"
+    )
+  '';
+})
diff --git a/nixos/tests/traefik.nix b/nixos/tests/traefik.nix
new file mode 100644
index 00000000000..0e21a7cf843
--- /dev/null
+++ b/nixos/tests/traefik.nix
@@ -0,0 +1,87 @@
+# Test Traefik as a reverse proxy of a local web service
+# and a Docker container.
+import ./make-test-python.nix ({ pkgs, ... }: {
+  name = "traefik";
+  meta = with pkgs.stdenv.lib.maintainers; {
+    maintainers = [ joko ];
+  };
+
+  nodes = {
+    client = { config, pkgs, ... }: {
+      environment.systemPackages = [ pkgs.curl ];
+    };
+    traefik = { config, pkgs, ... }: {
+      docker-containers.nginx = {
+        extraDockerOptions = [
+          "-l" "traefik.enable=true"
+          "-l" "traefik.http.routers.nginx.entrypoints=web"
+          "-l" "traefik.http.routers.nginx.rule=Host(`nginx.traefik.test`)"
+        ];
+        image = "nginx-container";
+        imageFile = pkgs.dockerTools.examples.nginx;
+      };
+
+      networking.firewall.allowedTCPPorts = [ 80 ];
+
+      services.traefik = {
+        enable = true;
+
+        dynamicConfigOptions = {
+          http.routers.simplehttp = {
+            rule = "Host(`simplehttp.traefik.test`)";
+            entryPoints = [ "web" ];
+            service = "simplehttp";
+          };
+
+          http.services.simplehttp = {
+            loadBalancer.servers = [{
+              url = "http://127.0.0.1:8000";
+            }];
+          };
+        };
+
+        staticConfigOptions = {
+          global = {
+            checkNewVersion = false;
+            sendAnonymousUsage = false;
+          };
+
+          entryPoints.web.address = ":80";
+
+          providers.docker.exposedByDefault = false;
+        };
+      };
+
+      systemd.services.simplehttp = {
+        script = "${pkgs.python3}/bin/python -m http.server 8000";
+        serviceConfig.Type = "simple";
+        wantedBy = [ "multi-user.target" ];
+      };
+
+      users.users.traefik.extraGroups = [ "docker" ];
+    };
+  };
+
+  testScript = ''
+    start_all()
+
+    traefik.wait_for_unit("docker-nginx.service")
+    traefik.wait_until_succeeds("docker ps | grep nginx-container")
+    traefik.wait_for_unit("simplehttp.service")
+    traefik.wait_for_unit("traefik.service")
+    traefik.wait_for_open_port(80)
+    traefik.wait_for_unit("multi-user.target")
+
+    client.wait_for_unit("multi-user.target")
+
+    with subtest("Check that a container can be reached via Traefik"):
+        assert "Hello from NGINX" in client.succeed(
+            "curl -sSf -H Host:nginx.traefik.test http://traefik/"
+        )
+
+    with subtest("Check that dynamic configuration works"):
+        assert "Directory listing for " in client.succeed(
+            "curl -sSf -H Host:simplehttp.traefik.test http://traefik/"
+        )
+  '';
+})
diff --git a/nixos/tests/tuptime.nix b/nixos/tests/tuptime.nix
new file mode 100644
index 00000000000..36ce2b1ae19
--- /dev/null
+++ b/nixos/tests/tuptime.nix
@@ -0,0 +1,29 @@
+import ./make-test-python.nix ({ pkgs, ...} : {
+  name = "tuptime";
+  meta = with pkgs.stdenv.lib.maintainers; {
+    maintainers = [ evils ];
+  };
+
+  machine = { pkgs, ... }: {
+    imports = [ ../modules/profiles/minimal.nix ];
+    services.tuptime.enable = true;
+  };
+
+  testScript =
+    ''
+      # see if it starts
+      start_all()
+      machine.wait_for_unit("multi-user.target")
+      machine.succeed("tuptime | grep 'System startups:[[:blank:]]*1'")
+      machine.succeed("tuptime | grep 'System uptime:[[:blank:]]*100.0%'")
+      machine.shutdown()
+
+      # restart machine and see if it correctly reports the reboot
+      machine.start()
+      machine.wait_for_unit("multi-user.target")
+      machine.succeed("tuptime | grep 'System startups:[[:blank:]]*2'")
+      machine.succeed("tuptime | grep 'System shutdowns:[[:blank:]]*1 ok'")
+      machine.shutdown()
+    '';
+})
+
diff --git a/nixos/tests/udisks2.nix b/nixos/tests/udisks2.nix
index 64f5b6c40d2..50a02396891 100644
--- a/nixos/tests/udisks2.nix
+++ b/nixos/tests/udisks2.nix
@@ -3,7 +3,7 @@ import ./make-test-python.nix ({ pkgs, ... }:
 let
 
   stick = pkgs.fetchurl {
-    url = "http://nixos.org/~eelco/nix/udisks-test.img.xz";
+    url = "https://nixos.org/~eelco/nix/udisks-test.img.xz";
     sha256 = "0was1xgjkjad91nipzclaz5biv3m4b2nk029ga6nk7iklwi19l8b";
   };
 
diff --git a/nixos/tests/web-servers/unit-php.nix b/nixos/tests/web-servers/unit-php.nix
new file mode 100644
index 00000000000..c6327a1f825
--- /dev/null
+++ b/nixos/tests/web-servers/unit-php.nix
@@ -0,0 +1,47 @@
+import ../make-test-python.nix ({pkgs, ...}:
+ let
+    testdir = pkgs.writeTextDir "www/info.php" "<?php phpinfo();";
+
+in {
+  name = "unit-php-test";
+  meta.maintainers = with pkgs.stdenv.lib.maintainers; [ izorkin ];
+
+  machine = { config, lib, pkgs, ... }: {
+    services.unit = {
+      enable = true;
+      config = ''
+        {
+          "listeners": {
+            "*:9074": {
+              "application": "php_74"
+            }
+          },
+          "applications": {
+            "php_74": {
+              "type": "php 7.4",
+              "processes": 1,
+              "user": "testuser",
+              "group": "testgroup",
+              "root": "${testdir}/www",
+              "index": "info.php"
+            }
+          }
+        }
+      '';
+    };
+    users = {
+      users.testuser = {
+        isNormalUser = false;
+        uid = 1074;
+        group = "testgroup";
+      };
+      groups.testgroup = {
+        gid= 1074;
+      };
+    };
+  };
+  testScript = ''
+    machine.wait_for_unit("unit.service")
+    assert "PHP Version ${pkgs.php74.version}" in machine.succeed("curl -vvv -s http://127.0.0.1:9074/")
+  '';
+})
diff --git a/nixos/tests/xmpp/prosody-mysql.nix b/nixos/tests/xmpp/prosody-mysql.nix
index 0507227021b..9a00bcabf38 100644
--- a/nixos/tests/xmpp/prosody-mysql.nix
+++ b/nixos/tests/xmpp/prosody-mysql.nix
@@ -6,6 +6,11 @@ import ../make-test-python.nix {
       environment.systemPackages = [
         (pkgs.callPackage ./xmpp-sendmessage.nix { connectTo = nodes.server.config.networking.primaryIPAddress; })
       ];
+      networking.extraHosts = ''
+        ${nodes.server.config.networking.primaryIPAddress} example.com
+        ${nodes.server.config.networking.primaryIPAddress} conference.example.com
+        ${nodes.server.config.networking.primaryIPAddress} uploads.example.com
+      '';
     };
     server = { config, pkgs, ... }: {
       nixpkgs.overlays = [
@@ -18,6 +23,8 @@ import ../make-test-python.nix {
       ];
       networking.extraHosts = ''
         ${config.networking.primaryIPAddress} example.com
+        ${config.networking.primaryIPAddress} conference.example.com
+        ${config.networking.primaryIPAddress} uploads.example.com
       '';
       networking.firewall.enable = false;
       services.prosody = {
@@ -39,6 +46,14 @@ import ../make-test-python.nix {
           domain = "example.com";
           enabled = true;
         };
+        muc = [
+          {
+            domain = "conference.example.com";
+          }
+        ];
+        uploadHttp = {
+          domain = "uploads.example.com";
+        };
       };
     };
     mysql = { config, pkgs, ... }: {
diff --git a/nixos/tests/xmpp/prosody.nix b/nixos/tests/xmpp/prosody.nix
index 9d1374bff6b..e7755e24bab 100644
--- a/nixos/tests/xmpp/prosody.nix
+++ b/nixos/tests/xmpp/prosody.nix
@@ -1,27 +1,80 @@
-import ../make-test-python.nix {
-  name = "prosody";
+let
+  cert = pkgs: pkgs.runCommandNoCC "selfSignedCerts" { buildInputs = [ pkgs.openssl ]; } ''
+    openssl req -x509 -newkey rsa:4096 -keyout key.pem -out cert.pem -nodes -subj '/CN=example.com/CN=uploads.example.com/CN=conference.example.com'
+    mkdir -p $out
+    cp key.pem cert.pem $out
+  '';
+  createUsers = pkgs: pkgs.writeScriptBin "create-prosody-users" ''
+    #!${pkgs.bash}/bin/bash
+    set -e
+
+    # Creates and set password for the 2 xmpp test users.
+    #
+    # Doing that in a bash script instead of doing that in the test
+    # script allow us to easily provision the users when running that
+    # test interactively.
+
+    prosodyctl register cthon98 example.com nothunter2
+    prosodyctl register azurediamond example.com hunter2
+  '';
+  delUsers = pkgs: pkgs.writeScriptBin "delete-prosody-users" ''
+    #!${pkgs.bash}/bin/bash
+    set -e
+
+    # Deletes the test users.
+    #
+    # Doing that in a bash script instead of doing that in the test
+    # script allow us to easily provision the users when running that
+    # test interactively.
 
+    prosodyctl deluser cthon98@example.com
+    prosodyctl deluser azurediamond@example.com
+  '';
+in import ../make-test-python.nix {
+  name = "prosody";
   nodes = {
-    client = { nodes, pkgs, ... }: {
+    client = { nodes, pkgs, config, ... }: {
+      security.pki.certificateFiles = [ "${cert pkgs}/cert.pem" ];
+      console.keyMap = "fr-bepo";
+      networking.extraHosts = ''
+        ${nodes.server.config.networking.primaryIPAddress} example.com
+        ${nodes.server.config.networking.primaryIPAddress} conference.example.com
+        ${nodes.server.config.networking.primaryIPAddress} uploads.example.com
+      '';
       environment.systemPackages = [
         (pkgs.callPackage ./xmpp-sendmessage.nix { connectTo = nodes.server.config.networking.primaryIPAddress; })
       ];
     };
     server = { config, pkgs, ... }: {
+      security.pki.certificateFiles = [ "${cert pkgs}/cert.pem" ];
+      console.keyMap = "fr-bepo";
       networking.extraHosts = ''
         ${config.networking.primaryIPAddress} example.com
+        ${config.networking.primaryIPAddress} conference.example.com
+        ${config.networking.primaryIPAddress} uploads.example.com
       '';
       networking.firewall.enable = false;
+      environment.systemPackages = [
+        (createUsers pkgs)
+        (delUsers pkgs)
+      ];
       services.prosody = {
         enable = true;
-        # TODO: use a self-signed certificate
-        c2sRequireEncryption = false;
-        extraConfig = ''
-          storage = "sql"
-        '';
-        virtualHosts.test = {
+        ssl.cert = "${cert pkgs}/cert.pem";
+        ssl.key = "${cert pkgs}/key.pem";
+        virtualHosts.example = {
           domain = "example.com";
           enabled = true;
+          ssl.cert = "${cert pkgs}/cert.pem";
+          ssl.key = "${cert pkgs}/key.pem";
+        };
+        muc = [
+          {
+            domain = "conference.example.com";
+          }
+        ];
+        uploadHttp = {
+          domain = "uploads.example.com";
         };
       };
     };
@@ -31,16 +84,8 @@ import ../make-test-python.nix {
     server.wait_for_unit("prosody.service")
     server.succeed('prosodyctl status | grep "Prosody is running"')
 
-    # set password to 'nothunter2' (it's asked twice)
-    server.succeed("yes nothunter2 | prosodyctl adduser cthon98@example.com")
-    # set password to 'y'
-    server.succeed("yes | prosodyctl adduser azurediamond@example.com")
-    # correct password to "hunter2"
-    server.succeed("yes hunter2 | prosodyctl passwd azurediamond@example.com")
-
-    client.succeed("send-message")
-
-    server.succeed("prosodyctl deluser cthon98@example.com")
-    server.succeed("prosodyctl deluser azurediamond@example.com")
+    server.succeed("create-prosody-users")
+    client.succeed('send-message 2>&1 | grep "XMPP SCRIPT TEST SUCCESS"')
+    server.succeed("delete-prosody-users")
   '';
 }
diff --git a/nixos/tests/xmpp/xmpp-sendmessage.nix b/nixos/tests/xmpp/xmpp-sendmessage.nix
index 2a075a01813..349b9c6f38e 100644
--- a/nixos/tests/xmpp/xmpp-sendmessage.nix
+++ b/nixos/tests/xmpp/xmpp-sendmessage.nix
@@ -1,46 +1,61 @@
-{ writeScriptBin, python3, connectTo ? "localhost" }:
-writeScriptBin "send-message" ''
-  #!${(python3.withPackages (ps: [ ps.sleekxmpp ])).interpreter}
-  # Based on the sleekxmpp send_client example, look there for more details:
-  # https://github.com/fritzy/SleekXMPP/blob/develop/examples/send_client.py
-  import sleekxmpp
-
-  class SendMsgBot(sleekxmpp.ClientXMPP):
-      """
-      A basic SleekXMPP bot that will log in, send a message,
-      and then log out.
-      """
-      def __init__(self, jid, password, recipient, message):
-          sleekxmpp.ClientXMPP.__init__(self, jid, password)
-
-          self.recipient = recipient
-          self.msg = message
-
-          self.add_event_handler("session_start", self.start, threaded=True)
-
-      def start(self, event):
-          self.send_presence()
-          self.get_roster()
-
-          self.send_message(mto=self.recipient,
-                            mbody=self.msg,
-                            mtype='chat')
-
-          self.disconnect(wait=True)
-
-
-  if __name__ == '__main__':
-      xmpp = SendMsgBot("cthon98@example.com", "nothunter2", "azurediamond@example.com", "hey, if you type in your pw, it will show as stars")
-      xmpp.register_plugin('xep_0030') # Service Discovery
-      xmpp.register_plugin('xep_0199') # XMPP Ping
-
-      # TODO: verify certificate
-      # If you want to verify the SSL certificates offered by a server:
-      # xmpp.ca_certs = "path/to/ca/cert"
-
-      if xmpp.connect(('${connectTo}', 5222)):
-          xmpp.process(block=True)
-      else:
-          print("Unable to connect.")
-          sys.exit(1)
+{ writeScriptBin, writeText, python3, connectTo ? "localhost" }:
+let
+  dummyFile = writeText "dummy-file" ''
+    Dear dog,
+
+    Please find this *really* important attachment.
+
+    Yours truly,
+    John
+  '';
+in writeScriptBin "send-message" ''
+#!${(python3.withPackages (ps: [ ps.slixmpp ])).interpreter}
+import logging
+import sys
+from types import MethodType
+
+from slixmpp import ClientXMPP
+from slixmpp.exceptions import IqError, IqTimeout
+
+
+class CthonTest(ClientXMPP):
+
+    def __init__(self, jid, password):
+        ClientXMPP.__init__(self, jid, password)
+        self.add_event_handler("session_start", self.session_start)
+
+    async def session_start(self, event):
+        log = logging.getLogger(__name__)
+        self.send_presence()
+        self.get_roster()
+        # Sending a test message
+        self.send_message(mto="azurediamond@example.com", mbody="Hello, this is dog.", mtype="chat")
+        log.info('Message sent')
+
+        # Test http upload (XEP_0363)
+        def timeout_callback(arg):
+            log.error("ERROR: Cannot upload file. XEP_0363 seems broken")
+            sys.exit(1)
+        url = await self['xep_0363'].upload_file("${dummyFile}",timeout=10, timeout_callback=timeout_callback)
+        log.info('Upload success!')
+        # Test MUC
+        self.plugin['xep_0045'].join_muc('testMucRoom', 'cthon98', wait=True)
+        log.info('MUC join success!')
+        log.info('XMPP SCRIPT TEST SUCCESS')
+        self.disconnect(wait=True)
+
+
+if __name__ == '__main__':
+    logging.basicConfig(level=logging.DEBUG,
+                        format='%(levelname)-8s %(message)s')
+
+    ct = CthonTest('cthon98@example.com', 'nothunter2')
+    ct.register_plugin('xep_0071')
+    ct.register_plugin('xep_0128')
+    # HTTP Upload
+    ct.register_plugin('xep_0363')
+    # MUC
+    ct.register_plugin('xep_0045')
+    ct.connect(("server", 5222))
+    ct.process(forever=False)
 ''