summary refs log tree commit diff
path: root/nixos
diff options
context:
space:
mode:
Diffstat (limited to 'nixos')
-rw-r--r--nixos/doc/manual/configuration/declarative-packages.xml6
-rw-r--r--nixos/doc/manual/configuration/xfce.xml14
-rw-r--r--nixos/doc/manual/development/option-types.xml90
-rw-r--r--nixos/doc/manual/development/replace-modules.xml4
-rw-r--r--nixos/doc/manual/man-nixos-install.xml62
-rw-r--r--nixos/doc/manual/man-pages.xml2
-rw-r--r--nixos/doc/manual/release-notes/rl-2003.xml96
-rw-r--r--nixos/lib/test-driver/test-driver.py25
-rw-r--r--nixos/lib/testing-python.nix2
-rwxr-xr-xnixos/maintainers/scripts/azure/create-azure.sh4
-rw-r--r--nixos/modules/config/i18n.nix13
-rw-r--r--nixos/modules/config/ldap.nix4
-rw-r--r--nixos/modules/config/pulseaudio.nix27
-rw-r--r--nixos/modules/config/swap.nix2
-rw-r--r--nixos/modules/hardware/opengl.nix6
-rw-r--r--nixos/modules/hardware/usb-wwan.nix13
-rw-r--r--nixos/modules/hardware/video/nvidia.nix9
-rw-r--r--nixos/modules/i18n/input-method/ibus.nix2
-rw-r--r--nixos/modules/installer/cd-dvd/system-tarball-pc.nix5
-rw-r--r--nixos/modules/installer/cd-dvd/system-tarball-sheevaplug.nix5
-rw-r--r--nixos/modules/installer/cd-dvd/system-tarball.nix2
-rw-r--r--nixos/modules/installer/tools/nix-fallback-paths.nix8
-rw-r--r--nixos/modules/installer/tools/nixos-install.sh15
-rw-r--r--nixos/modules/installer/tools/tools.nix10
-rw-r--r--nixos/modules/misc/documentation.nix7
-rw-r--r--nixos/modules/misc/version.nix15
-rw-r--r--nixos/modules/module-list.nix12
-rw-r--r--nixos/modules/programs/bandwhich.nix29
-rw-r--r--nixos/modules/programs/dconf.nix9
-rw-r--r--nixos/modules/programs/liboping.nix22
-rw-r--r--nixos/modules/programs/screen.nix1
-rw-r--r--nixos/modules/programs/shadow.nix28
-rw-r--r--nixos/modules/programs/sway.nix16
-rw-r--r--nixos/modules/programs/traceroute.nix26
-rw-r--r--nixos/modules/programs/way-cooler.nix78
-rw-r--r--nixos/modules/rename.nix7
-rw-r--r--nixos/modules/security/duosec.nix28
-rw-r--r--nixos/modules/security/pam.nix12
-rw-r--r--nixos/modules/security/pam_mount.nix5
-rw-r--r--nixos/modules/security/rtkit.nix5
-rw-r--r--nixos/modules/security/sudo.nix3
-rw-r--r--nixos/modules/services/admin/oxidized.nix2
-rw-r--r--nixos/modules/services/amqp/rabbitmq.nix5
-rw-r--r--nixos/modules/services/audio/mpd.nix26
-rw-r--r--nixos/modules/services/backup/mysql-backup.nix7
-rw-r--r--nixos/modules/services/cluster/kubernetes/default.nix3
-rw-r--r--nixos/modules/services/cluster/kubernetes/pki.nix10
-rw-r--r--nixos/modules/services/continuous-integration/buildbot/master.nix23
-rw-r--r--nixos/modules/services/continuous-integration/buildbot/worker.nix23
-rw-r--r--nixos/modules/services/continuous-integration/buildkite-agent.nix111
-rw-r--r--nixos/modules/services/continuous-integration/gocd-agent/default.nix24
-rw-r--r--nixos/modules/services/continuous-integration/gocd-server/default.nix24
-rw-r--r--nixos/modules/services/continuous-integration/hydra/default.nix4
-rw-r--r--nixos/modules/services/continuous-integration/jenkins/default.nix24
-rw-r--r--nixos/modules/services/continuous-integration/jenkins/slave.nix22
-rw-r--r--nixos/modules/services/databases/cockroachdb.nix14
-rw-r--r--nixos/modules/services/databases/foundationdb.nix14
-rw-r--r--nixos/modules/services/databases/influxdb.nix14
-rw-r--r--nixos/modules/services/databases/memcached.nix7
-rw-r--r--nixos/modules/services/databases/mysql.nix2
-rw-r--r--nixos/modules/services/databases/neo4j.nix3
-rw-r--r--nixos/modules/services/databases/openldap.nix2
-rw-r--r--nixos/modules/services/databases/virtuoso.nix5
-rw-r--r--nixos/modules/services/editors/infinoted.nix15
-rw-r--r--nixos/modules/services/hardware/actkbd.nix2
-rw-r--r--nixos/modules/services/hardware/bluetooth.nix6
-rw-r--r--nixos/modules/services/hardware/sane_extra_backends/brscan4.nix11
-rw-r--r--nixos/modules/services/hardware/tcsd.nix14
-rw-r--r--nixos/modules/services/hardware/tlp.nix15
-rw-r--r--nixos/modules/services/hardware/udev.nix11
-rw-r--r--nixos/modules/services/hardware/usbmuxd.nix15
-rw-r--r--nixos/modules/services/logging/awstats.nix1
-rw-r--r--nixos/modules/services/logging/logcheck.nix7
-rw-r--r--nixos/modules/services/mail/dovecot.nix52
-rw-r--r--nixos/modules/services/mail/dspam.nix14
-rw-r--r--nixos/modules/services/mail/exim.nix6
-rw-r--r--nixos/modules/services/mail/mlmmj.nix6
-rw-r--r--nixos/modules/services/mail/nullmailer.nix7
-rw-r--r--nixos/modules/services/mail/opendkim.nix14
-rw-r--r--nixos/modules/services/mail/postfix.nix26
-rw-r--r--nixos/modules/services/mail/postsrsd.nix14
-rw-r--r--nixos/modules/services/mail/rspamd.nix6
-rw-r--r--nixos/modules/services/mail/spamassassin.nix12
-rw-r--r--nixos/modules/services/misc/apache-kafka.nix3
-rw-r--r--nixos/modules/services/misc/bepasty.nix18
-rw-r--r--nixos/modules/services/misc/cgminer.nix7
-rw-r--r--nixos/modules/services/misc/couchpotato.nix11
-rw-r--r--nixos/modules/services/misc/dictd.nix10
-rw-r--r--nixos/modules/services/misc/etcd.nix3
-rw-r--r--nixos/modules/services/misc/exhibitor.nix3
-rw-r--r--nixos/modules/services/misc/felix.nix10
-rw-r--r--nixos/modules/services/misc/folding-at-home.nix5
-rw-r--r--nixos/modules/services/misc/gitea.nix2
-rw-r--r--nixos/modules/services/misc/gitlab.nix14
-rw-r--r--nixos/modules/services/misc/gpsd.nix10
-rw-r--r--nixos/modules/services/misc/headphones.nix24
-rw-r--r--nixos/modules/services/misc/home-assistant.nix9
-rw-r--r--nixos/modules/services/misc/matrix-synapse.nix12
-rw-r--r--nixos/modules/services/misc/mediatomb.nix24
-rw-r--r--nixos/modules/services/misc/nix-daemon.nix10
-rw-r--r--nixos/modules/services/misc/nixos-manual.nix2
-rw-r--r--nixos/modules/services/misc/octoprint.nix14
-rw-r--r--nixos/modules/services/misc/paperless.nix14
-rw-r--r--nixos/modules/services/misc/redmine.nix16
-rw-r--r--nixos/modules/services/misc/ripple-data-api.nix5
-rw-r--r--nixos/modules/services/misc/rippled.nix5
-rw-r--r--nixos/modules/services/misc/rogue.nix2
-rw-r--r--nixos/modules/services/misc/serviio.nix13
-rw-r--r--nixos/modules/services/misc/sickbeard.nix24
-rw-r--r--nixos/modules/services/misc/siproxd.nix3
-rw-r--r--nixos/modules/services/misc/taskserver/default.nix16
-rw-r--r--nixos/modules/services/misc/uhub.nix24
-rw-r--r--nixos/modules/services/misc/zookeeper.nix3
-rw-r--r--nixos/modules/services/monitoring/collectd.nix7
-rw-r--r--nixos/modules/services/monitoring/datadog-agent.nix21
-rw-r--r--nixos/modules/services/monitoring/dd-agent/dd-agent.nix60
-rw-r--r--nixos/modules/services/monitoring/fusion-inventory.nix3
-rw-r--r--nixos/modules/services/monitoring/graphite.nix3
-rw-r--r--nixos/modules/services/monitoring/heapster.nix3
-rw-r--r--nixos/modules/services/monitoring/munin.nix10
-rw-r--r--nixos/modules/services/monitoring/nagios.nix117
-rw-r--r--nixos/modules/services/monitoring/netdata.nix11
-rw-r--r--nixos/modules/services/monitoring/statsd.nix3
-rw-r--r--nixos/modules/services/monitoring/sysstat.nix12
-rw-r--r--nixos/modules/services/monitoring/telegraf.nix5
-rw-r--r--nixos/modules/services/monitoring/ups.nix45
-rw-r--r--nixos/modules/services/network-filesystems/ceph.nix7
-rw-r--r--nixos/modules/services/network-filesystems/davfs2.nix23
-rw-r--r--nixos/modules/services/network-filesystems/drbd.nix6
-rw-r--r--nixos/modules/services/networking/bind.nix5
-rw-r--r--nixos/modules/services/networking/bitlbee.nix3
-rw-r--r--nixos/modules/services/networking/charybdis.nix6
-rw-r--r--nixos/modules/services/networking/connman.nix32
-rw-r--r--nixos/modules/services/networking/corerad.nix46
-rw-r--r--nixos/modules/services/networking/coturn.nix14
-rw-r--r--nixos/modules/services/networking/dhcpcd.nix6
-rw-r--r--nixos/modules/services/networking/dnschain.nix3
-rw-r--r--nixos/modules/services/networking/dnsmasq.nix3
-rw-r--r--nixos/modules/services/networking/ejabberd.nix14
-rw-r--r--nixos/modules/services/networking/gale.nix5
-rw-r--r--nixos/modules/services/networking/git-daemon.nix12
-rw-r--r--nixos/modules/services/networking/gnunet.nix8
-rw-r--r--nixos/modules/services/networking/hans.nix3
-rw-r--r--nixos/modules/services/networking/i2pd.nix18
-rw-r--r--nixos/modules/services/networking/iodine.nix3
-rw-r--r--nixos/modules/services/networking/ircd-hybrid/default.nix5
-rw-r--r--nixos/modules/services/networking/kippo.nix5
-rw-r--r--nixos/modules/services/networking/kresd.nix35
-rw-r--r--nixos/modules/services/networking/matterbridge.nix13
-rw-r--r--nixos/modules/services/networking/mjpg-streamer.nix9
-rw-r--r--nixos/modules/services/networking/monero.nix8
-rw-r--r--nixos/modules/services/networking/mxisd.nix12
-rw-r--r--nixos/modules/services/networking/namecoind.nix6
-rw-r--r--nixos/modules/services/networking/nat.nix2
-rw-r--r--nixos/modules/services/networking/ndppd.nix20
-rw-r--r--nixos/modules/services/networking/networkmanager.nix102
-rw-r--r--nixos/modules/services/networking/nntp-proxy.nix5
-rw-r--r--nixos/modules/services/networking/nsd.nix8
-rw-r--r--nixos/modules/services/networking/ntp/chrony.nix10
-rw-r--r--nixos/modules/services/networking/ntp/ntpd.nix5
-rw-r--r--nixos/modules/services/networking/ntp/openntpd.nix3
-rw-r--r--nixos/modules/services/networking/owamp.nix7
-rw-r--r--nixos/modules/services/networking/pdnsd.nix6
-rw-r--r--nixos/modules/services/networking/polipo.nix10
-rw-r--r--nixos/modules/services/networking/pppd.nix10
-rw-r--r--nixos/modules/services/networking/prayer.nix11
-rw-r--r--nixos/modules/services/networking/quassel.nix16
-rw-r--r--nixos/modules/services/networking/radicale.nix11
-rw-r--r--nixos/modules/services/networking/shairport-sync.nix5
-rw-r--r--nixos/modules/services/networking/shorewall.nix75
-rw-r--r--nixos/modules/services/networking/shorewall6.nix75
-rw-r--r--nixos/modules/services/networking/shout.nix3
-rw-r--r--nixos/modules/services/networking/smokeping.nix3
-rw-r--r--nixos/modules/services/networking/supybot.nix4
-rw-r--r--nixos/modules/services/networking/syncthing.nix12
-rw-r--r--nixos/modules/services/networking/tcpcrypt.nix3
-rw-r--r--nixos/modules/services/networking/tox-bootstrapd.nix5
-rw-r--r--nixos/modules/services/networking/vsftpd.nix21
-rw-r--r--nixos/modules/services/networking/wpa_supplicant.nix7
-rw-r--r--nixos/modules/services/networking/xandikos.nix148
-rw-r--r--nixos/modules/services/networking/znc/default.nix26
-rw-r--r--nixos/modules/services/printing/cupsd.nix5
-rw-r--r--nixos/modules/services/scheduling/atd.nix10
-rw-r--r--nixos/modules/services/scheduling/fcron.nix5
-rw-r--r--nixos/modules/services/search/hound.nix22
-rw-r--r--nixos/modules/services/search/kibana.nix3
-rw-r--r--nixos/modules/services/search/solr.nix14
-rw-r--r--nixos/modules/services/security/certmgr.nix4
-rw-r--r--nixos/modules/services/security/clamav.nix9
-rw-r--r--nixos/modules/services/security/fprot.nix14
-rw-r--r--nixos/modules/services/security/torify.nix3
-rw-r--r--nixos/modules/services/security/torsocks.nix9
-rw-r--r--nixos/modules/services/system/dbus.nix5
-rw-r--r--nixos/modules/services/system/localtime.nix4
-rw-r--r--nixos/modules/services/torrent/transmission.nix18
-rw-r--r--nixos/modules/services/ttys/agetty.nix3
-rw-r--r--nixos/modules/services/web-apps/frab.nix10
-rw-r--r--nixos/modules/services/web-apps/ihatemoney/default.nix141
-rw-r--r--nixos/modules/services/web-apps/mattermost.nix22
-rw-r--r--nixos/modules/services/web-apps/nextcloud.nix12
-rw-r--r--nixos/modules/services/web-servers/apache-httpd/default.nix19
-rw-r--r--nixos/modules/services/web-servers/nginx/default.nix32
-rw-r--r--nixos/modules/services/web-servers/nginx/gitweb.nix53
-rw-r--r--nixos/modules/services/web-servers/tomcat.nix10
-rw-r--r--nixos/modules/services/web-servers/unit/default.nix14
-rw-r--r--nixos/modules/services/web-servers/uwsgi.nix33
-rw-r--r--nixos/modules/services/x11/desktop-managers/enlightenment.nix5
-rw-r--r--nixos/modules/services/x11/desktop-managers/gnome3.nix12
-rw-r--r--nixos/modules/services/x11/desktop-managers/plasma5.nix5
-rw-r--r--nixos/modules/services/x11/display-managers/default.nix2
-rw-r--r--nixos/modules/services/x11/display-managers/gdm.nix25
-rw-r--r--nixos/modules/services/x11/extra-layouts.nix8
-rw-r--r--nixos/modules/services/x11/hardware/libinput.nix13
-rw-r--r--nixos/modules/services/x11/hardware/multitouch.nix94
-rw-r--r--nixos/modules/services/x11/picom.nix (renamed from nixos/modules/services/x11/compton.nix)32
-rw-r--r--nixos/modules/services/x11/unclutter.nix7
-rw-r--r--nixos/modules/services/x11/xserver.nix34
-rw-r--r--nixos/modules/system/boot/kernel.nix5
-rw-r--r--nixos/modules/system/boot/networkd.nix10
-rw-r--r--nixos/modules/system/boot/systemd-lib.nix8
-rw-r--r--nixos/modules/system/boot/systemd.nix2
-rw-r--r--nixos/modules/tasks/powertop.nix1
-rw-r--r--nixos/modules/virtualisation/containers.nix15
-rw-r--r--nixos/modules/virtualisation/docker-containers.nix2
-rw-r--r--nixos/modules/virtualisation/xen-dom0.nix31
-rw-r--r--nixos/tests/3proxy.nix57
-rw-r--r--nixos/tests/all-tests.nix6
-rw-r--r--nixos/tests/bittorrent.nix155
-rw-r--r--nixos/tests/buildkite-agent.nix23
-rw-r--r--nixos/tests/ceph-multi-node.nix56
-rw-r--r--nixos/tests/ceph-single-node.nix23
-rw-r--r--nixos/tests/certmgr.nix12
-rw-r--r--nixos/tests/chromium.nix2
-rw-r--r--nixos/tests/common/user-account.nix1
-rw-r--r--nixos/tests/corerad.nix71
-rw-r--r--nixos/tests/elk.nix111
-rw-r--r--nixos/tests/gnome3-xorg.nix78
-rw-r--r--nixos/tests/haka.nix10
-rw-r--r--nixos/tests/home-assistant.nix71
-rw-r--r--nixos/tests/hydra/default.nix2
-rw-r--r--nixos/tests/ihatemoney.nix52
-rw-r--r--nixos/tests/initdb.nix26
-rw-r--r--nixos/tests/kafka.nix44
-rw-r--r--nixos/tests/kexec.nix18
-rw-r--r--nixos/tests/mysql.nix18
-rw-r--r--nixos/tests/nagios.nix116
-rw-r--r--nixos/tests/netdata.nix1
-rw-r--r--nixos/tests/nginx-etag.nix89
-rw-r--r--nixos/tests/nginx-sso.nix24
-rw-r--r--nixos/tests/nginx.nix97
-rw-r--r--nixos/tests/postgresql.nix45
-rw-r--r--nixos/tests/xandikos.nix70
252 files changed, 3118 insertions, 1836 deletions
diff --git a/nixos/doc/manual/configuration/declarative-packages.xml b/nixos/doc/manual/configuration/declarative-packages.xml
index 5fb3bcb9f8f..cd84d1951d2 100644
--- a/nixos/doc/manual/configuration/declarative-packages.xml
+++ b/nixos/doc/manual/configuration/declarative-packages.xml
@@ -19,6 +19,12 @@
   <command>nixos-rebuild switch</command>.
  </para>
 
+ <note>
+  <para>
+   Some packages require additional global configuration such as D-Bus or systemd service registration so adding them to <xref linkend="opt-environment.systemPackages"/> might not be sufficient. You are advised to check the <link xlink:href="#ch-options">list of options</link> whether a NixOS module for the package does not exist.
+  </para>
+ </note>
+
  <para>
   You can get a list of the available packages as follows:
 <screen>
diff --git a/nixos/doc/manual/configuration/xfce.xml b/nixos/doc/manual/configuration/xfce.xml
index 027828bb936..7d2862f8b31 100644
--- a/nixos/doc/manual/configuration/xfce.xml
+++ b/nixos/doc/manual/configuration/xfce.xml
@@ -13,15 +13,15 @@
 </programlisting>
  </para>
  <para>
-  Optionally, <emphasis>compton</emphasis> can be enabled for nice graphical
+  Optionally, <emphasis>picom</emphasis> can be enabled for nice graphical
   effects, some example settings:
 <programlisting>
-<link linkend="opt-services.compton.enable">services.compton</link> = {
-  <link linkend="opt-services.compton.enable">enable</link>          = true;
-  <link linkend="opt-services.compton.fade">fade</link>            = true;
-  <link linkend="opt-services.compton.inactiveOpacity">inactiveOpacity</link> = "0.9";
-  <link linkend="opt-services.compton.shadow">shadow</link>          = true;
-  <link linkend="opt-services.compton.fadeDelta">fadeDelta</link>       = 4;
+<link linkend="opt-services.picom.enable">services.picom</link> = {
+  <link linkend="opt-services.picom.enable">enable</link>          = true;
+  <link linkend="opt-services.picom.fade">fade</link>            = true;
+  <link linkend="opt-services.picom.inactiveOpacity">inactiveOpacity</link> = "0.9";
+  <link linkend="opt-services.picom.shadow">shadow</link>          = true;
+  <link linkend="opt-services.picom.fadeDelta">fadeDelta</link>       = 4;
 };
 </programlisting>
  </para>
diff --git a/nixos/doc/manual/development/option-types.xml b/nixos/doc/manual/development/option-types.xml
index 8fcbb627342..957349ad181 100644
--- a/nixos/doc/manual/development/option-types.xml
+++ b/nixos/doc/manual/development/option-types.xml
@@ -257,14 +257,68 @@
     <listitem>
      <para>
       A set of sub options <replaceable>o</replaceable>.
-      <replaceable>o</replaceable> can be an attribute set or a function
-      returning an attribute set. Submodules are used in composed types to
-      create modular options. Submodule are detailed in
+      <replaceable>o</replaceable> can be an attribute set, a function
+      returning an attribute set, or a path to a file containing such a value. Submodules are used in
+      composed types to create modular options. This is equivalent to
+      <literal>types.submoduleWith { modules = toList o; shorthandOnlyDefinesConfig = true; }</literal>.
+      Submodules are detailed in
       <xref
           linkend='section-option-types-submodule' />.
      </para>
     </listitem>
    </varlistentry>
+   <varlistentry>
+     <term>
+       <varname>types.submoduleWith</varname> {
+        <replaceable>modules</replaceable>,
+        <replaceable>specialArgs</replaceable> ? {},
+        <replaceable>shorthandOnlyDefinesConfig</replaceable> ? false }
+     </term>
+     <listitem>
+       <para>
+         Like <varname>types.submodule</varname>, but more flexible and with better defaults.
+         It has parameters
+         <itemizedlist>
+           <listitem><para>
+             <replaceable>modules</replaceable>
+             A list of modules to use by default for this submodule type. This gets combined
+             with all option definitions to build the final list of modules that will be included.
+             <note><para>
+               Only options defined with this argument are included in rendered documentation.
+             </para></note>
+           </para></listitem>
+           <listitem><para>
+             <replaceable>specialArgs</replaceable>
+             An attribute set of extra arguments to be passed to the module functions.
+             The option <literal>_module.args</literal> should be used instead
+             for most arguments since it allows overriding. <replaceable>specialArgs</replaceable> should only be
+             used for arguments that can&apos;t go through the module fixed-point, because of
+             infinite recursion or other problems. An example is overriding the
+             <varname>lib</varname> argument, because <varname>lib</varname> itself is used
+             to define <literal>_module.args</literal>, which makes using
+             <literal>_module.args</literal> to define it impossible.
+           </para></listitem>
+           <listitem><para>
+             <replaceable>shorthandOnlyDefinesConfig</replaceable>
+             Whether definitions of this type should default to the <literal>config</literal>
+             section of a module (see <xref linkend='ex-module-syntax'/>) if it is an attribute
+             set. Enabling this only has a benefit when the submodule defines an option named
+             <literal>config</literal> or <literal>options</literal>. In such a case it would
+             allow the option to be set with <literal>the-submodule.config = "value"</literal>
+             instead of requiring <literal>the-submodule.config.config = "value"</literal>.
+             This is because only when modules <emphasis>don&apos;t</emphasis> set the
+             <literal>config</literal> or <literal>options</literal> keys, all keys are interpreted
+             as option definitions in the <literal>config</literal> section. Enabling this option
+             implicitly puts all attributes in the <literal>config</literal> section.
+           </para>
+           <para>
+             With this option enabled, defining a non-<literal>config</literal> section requires
+             using a function: <literal>the-submodule = { ... }: { options = { ... }; }</literal>.
+           </para></listitem>
+         </itemizedlist>
+       </para>
+     </listitem>
+   </varlistentry>
   </variablelist>
  </section>
 
@@ -298,6 +352,36 @@
       An attribute set of where all the values are of
       <replaceable>t</replaceable> type. Multiple definitions result in the
       joined attribute set.
+      <note><para>
+       This type is <emphasis>strict</emphasis> in its values, which in turn
+       means attributes cannot depend on other attributes. See <varname>
+       types.lazyAttrsOf</varname> for a lazy version.
+      </para></note>
+     </para>
+    </listitem>
+   </varlistentry>
+   <varlistentry>
+    <term>
+     <varname>types.lazyAttrsOf</varname> <replaceable>t</replaceable>
+    </term>
+    <listitem>
+     <para>
+      An attribute set of where all the values are of
+      <replaceable>t</replaceable> type. Multiple definitions result in the
+      joined attribute set. This is the lazy version of <varname>types.attrsOf
+      </varname>, allowing attributes to depend on each other.
+      <warning><para>
+       This version does not fully support conditional definitions! With an
+       option <varname>foo</varname> of this type and a definition
+       <literal>foo.attr = lib.mkIf false 10</literal>, evaluating
+       <literal>foo ? attr</literal> will return <literal>true</literal>
+       even though it should be false. Accessing the value will then throw
+       an error. For types <replaceable>t</replaceable> that have an
+       <literal>emptyValue</literal> defined, that value will be returned
+       instead of throwing an error. So if the type of <literal>foo.attr</literal>
+       was <literal>lazyAttrsOf (nullOr int)</literal>, <literal>null</literal>
+       would be returned instead for the same <literal>mkIf false</literal> definition.
+      </para></warning>
      </para>
     </listitem>
    </varlistentry>
diff --git a/nixos/doc/manual/development/replace-modules.xml b/nixos/doc/manual/development/replace-modules.xml
index 7b103c36d90..b4a466e2294 100644
--- a/nixos/doc/manual/development/replace-modules.xml
+++ b/nixos/doc/manual/development/replace-modules.xml
@@ -6,8 +6,8 @@
  <title>Replace Modules</title>
 
  <para>
-  Modules that are imported can also be disabled. The option declarations and
-  config implementation of a disabled module will be ignored, allowing another
+  Modules that are imported can also be disabled. The option declarations,
+  config implementation and the imports of a disabled module will be ignored, allowing another
   to take it's place. This can be used to import a set of modules from another
   channel while keeping the rest of the system on a stable release.
  </para>
diff --git a/nixos/doc/manual/man-nixos-install.xml b/nixos/doc/manual/man-nixos-install.xml
index 45bbd5d81ff..0752c397182 100644
--- a/nixos/doc/manual/man-nixos-install.xml
+++ b/nixos/doc/manual/man-nixos-install.xml
@@ -15,6 +15,26 @@
   <cmdsynopsis>
    <command>nixos-install</command>
    <arg>
+    <group choice='req'>
+     <arg choice='plain'>
+      <option>--verbose</option>
+     </arg>
+     <arg choice='plain'>
+      <option>-v</option>
+     </arg>
+    </group>
+   </arg>
+   <arg>
+    <group choice='req'>
+     <arg choice='plain'>
+      <option>--print-build-logs</option>
+     </arg>
+     <arg choice='plain'>
+      <option>-L</option>
+     </arg>
+    </group>
+   </arg>
+   <arg>
     <arg choice='plain'>
      <option>-I</option>
     </arg>
@@ -36,6 +56,13 @@
    </arg>
 
    <arg>
+     <arg choice='plain'>
+       <option>--channel</option>
+     </arg>
+     <replaceable>channel</replaceable>
+   </arg>
+
+   <arg>
     <arg choice='plain'>
      <option>--no-channel-copy</option>
     </arg>
@@ -107,6 +134,12 @@
      </para>
     </listitem>
     <listitem>
+      <para>
+        It installs the current channel <quote>nixos</quote> in the target channel
+        profile (unless <option>--no-channel-copy</option> is specified).
+      </para>
+    </listitem>
+    <listitem>
      <para>
       It installs the GRUB boot loader on the device specified in the option
       <option>boot.loader.grub.device</option> (unless
@@ -135,6 +168,23 @@
   </para>
   <variablelist>
    <varlistentry>
+    <term><option>--verbose</option> / <option>-v</option></term>
+    <listitem>
+     <para>Increases the level of verbosity of diagnostic messages
+     printed on standard error.  For each Nix operation, the information
+     printed on standard output is well-defined; any diagnostic
+     information is printed on standard error, never on standard
+     output.</para>
+     <para>Please note that this option may be specified repeatedly.</para>
+    </listitem>
+   </varlistentry>
+   <varlistentry>
+    <term><option>--print-build-logs</option> / <option>-L</option></term>
+    <listitem>
+     <para>Print the full build logs of <command>nix build</command> to stderr.</para>
+    </listitem>
+   </varlistentry>
+   <varlistentry>
     <term>
      <option>--root</option>
     </term>
@@ -166,6 +216,18 @@
     </listitem>
    </varlistentry>
    <varlistentry>
+     <term>
+       <option>--channel</option>
+     </term>
+     <listitem>
+       <para>
+         If this option is provided, do not copy the current
+         <quote>nixos</quote> channel to the target host. Instead, use the
+         specified derivation.
+       </para>
+     </listitem>
+   </varlistentry>
+   <varlistentry>
     <term>
      <option>-I</option>
     </term>
diff --git a/nixos/doc/manual/man-pages.xml b/nixos/doc/manual/man-pages.xml
index f5a1dd2d69f..49acfe7330b 100644
--- a/nixos/doc/manual/man-pages.xml
+++ b/nixos/doc/manual/man-pages.xml
@@ -6,7 +6,7 @@
   <author><personname><firstname>Eelco</firstname><surname>Dolstra</surname></personname>
    <contrib>Author</contrib>
   </author>
-  <copyright><year>2007-2019</year><holder>Eelco Dolstra</holder>
+  <copyright><year>2007-2020</year><holder>Eelco Dolstra</holder>
   </copyright>
  </info>
  <xi:include href="man-configuration.xml" />
diff --git a/nixos/doc/manual/release-notes/rl-2003.xml b/nixos/doc/manual/release-notes/rl-2003.xml
index 1c1c8908064..1eef4f08c4f 100644
--- a/nixos/doc/manual/release-notes/rl-2003.xml
+++ b/nixos/doc/manual/release-notes/rl-2003.xml
@@ -170,6 +170,12 @@ services.xserver.displayManager.defaultSession = "xfce+icewm";
    </listitem>
    <listitem>
     <para>
+     The Way Cooler wayland compositor has been removed, as the project has been officially canceled.
+     There are no more <literal>way-cooler</literal> attribute and <literal>programs.way-cooler</literal> options.
+    </para>
+   </listitem>
+   <listitem>
+    <para>
       The BEAM package set has been deleted. You will only find there the different interpreters.
       You should now use the different build tools coming with the languages with sandbox mode disabled.
     </para>
@@ -266,6 +272,14 @@ services.xserver.displayManager.defaultSession = "xfce+icewm";
    </listitem>
    <listitem>
     <para>
+     The <literal>kresd</literal> services deprecates the <literal>interfaces</literal> option
+     in favor of the <literal>listenPlain</literal> option which requires full
+     <link xlink:href="https://www.freedesktop.org/software/systemd/man/systemd.socket.html#ListenStream=">systemd.socket compatible</link>
+     declaration which always include a port.
+    </para>
+   </listitem>
+   <listitem>
+    <para>
      Virtual console options have been reorganized and can be found under
      a single top-level attribute: <literal>console</literal>.
      The full set of changes is as follows:
@@ -349,6 +363,88 @@ services.xserver.displayManager.defaultSession = "xfce+icewm";
       <link linkend="opt-services.httpd.virtualHosts">services.httpd.virtualHosts.&lt;name&gt;.useACMEHost</link>.
     </para>
    </listitem>
+   <listitem>
+    <para>
+     For NixOS configuration options, the <literal>loaOf</literal> type has
+     been deprecated and will be removed in a future release. In nixpkgs,
+     options of this type will be changed to <literal>attrsOf</literal>
+     instead. If you were using one of these in your configuration, you will
+     see a warning suggesting what changes will be required.
+    </para>
+    <para>
+     For example, <link linkend="opt-users.users">users.users</link> is a
+     <literal>loaOf</literal> option that is commonly used as follows:
+     <programlisting>
+users.users =
+  [ { name = "me";
+      description = "My personal user.";
+      isNormalUser = true;
+    }
+  ];
+     </programlisting>
+     This should be rewritten by removing the list and using the
+     value of <literal>name</literal> as the name of the attribute set:
+     <programlisting>
+users.users.me =
+  { description = "My personal user.";
+    isNormalUser = true;
+  };
+     </programlisting>
+    </para>
+    <para>
+     For more information on this change have look at these links:
+     <link xlink:href="https://github.com/NixOS/nixpkgs/issues/1800">issue #1800</link>,
+     <link xlink:href="https://github.com/NixOS/nixpkgs/pull/63103">PR #63103</link>.
+    </para>
+   </listitem>
+   <listitem>
+    <para>
+     For NixOS modules, the types <literal>types.submodule</literal> and <literal>types.submoduleWith</literal> now support
+     paths as allowed values, similar to how <literal>imports</literal> supports paths.
+     Because of this, if you have a module that defines an option of type
+     <literal>either (submodule ...) path</literal>, it will break since a path
+     is now treated as the first type instead of the second. To fix this, change
+     the type to <literal>either path (submodule ...)</literal>.
+    </para>
+   </listitem>
+   <listitem>
+    <para>
+      The <link linkend="opt-services.buildkite-agent.enable">Buildkite Agent</link>
+      module and corresponding packages have been updated to 3.x.
+      While doing so, the following options have been changed:
+    </para>
+    <itemizedlist>
+      <listitem>
+       <para>
+         <literal>services.buildkite-agent.meta-data</literal> has been renamed to
+         <link linkend="opt-services.buildkite-agent.tags">services.buildkite-agent.tags</link>,
+         to match upstreams naming for 3.x.
+         Its type has also changed - it now accepts an attrset of strings.
+       </para>
+      </listitem>
+      <listitem>
+       <para>
+         The<literal>services.buildkite-agent.openssh.publicKeyPath</literal> option
+         has been removed, as it's not necessary to deploy public keys to clone private
+         repositories.
+       </para>
+      </listitem>
+      <listitem>
+       <para>
+         <literal>services.buildkite-agent.openssh.privateKeyPath</literal>
+         has been renamed to
+         <link linkend="opt-services.buildkite-agent.privateSshKeyPath">buildkite-agent.privateSshKeyPath</link>,
+         as the whole <literal>openssh</literal> now only contained that single option.
+       </para>
+      </listitem>
+      <listitem>
+       <para>
+         <link linkend="opt-services.buildkite-agent.shell">services.buildkite-agent.shell</link>
+         has been introduced, allowing to specify a custom shell to be used.
+       </para>
+      </listitem>
+    </itemizedlist>
+   </listitem>
   </itemizedlist>
  </section>
 
diff --git a/nixos/lib/test-driver/test-driver.py b/nixos/lib/test-driver/test-driver.py
index 7e575189209..cf204a2619f 100644
--- a/nixos/lib/test-driver/test-driver.py
+++ b/nixos/lib/test-driver/test-driver.py
@@ -84,7 +84,7 @@ CHAR_TO_KEY = {
 
 # Forward references
 nr_tests: int
-nr_succeeded: int
+failed_tests: list
 log: "Logger"
 machines: "List[Machine]"
 
@@ -221,7 +221,7 @@ class Machine:
             return path
 
         self.state_dir = create_dir("vm-state-{}".format(self.name))
-        self.shared_dir = create_dir("{}/xchg".format(self.state_dir))
+        self.shared_dir = create_dir("shared-xchg")
 
         self.booted = False
         self.connected = False
@@ -576,7 +576,7 @@ class Machine:
         vm_src = pathlib.Path(source)
         with tempfile.TemporaryDirectory(dir=self.shared_dir) as shared_td:
             shared_temp = pathlib.Path(shared_td)
-            vm_shared_temp = pathlib.Path("/tmp/xchg") / shared_temp.name
+            vm_shared_temp = pathlib.Path("/tmp/shared") / shared_temp.name
             vm_intermediate = vm_shared_temp / vm_src.name
             intermediate = shared_temp / vm_src.name
             # Copy the file to the shared directory inside VM
@@ -704,7 +704,8 @@ class Machine:
 
         def process_serial_output() -> None:
             for _line in self.process.stdout:
-                line = _line.decode("unicode_escape").replace("\r", "").rstrip()
+                # Ignore undecodable bytes that may occur in boot menus
+                line = _line.decode(errors="ignore").replace("\r", "").rstrip()
                 eprint("{} # {}".format(self.name, line))
                 self.logger.enqueue({"msg": line, "machine": self.name})
 
@@ -841,23 +842,31 @@ def run_tests() -> None:
             machine.execute("sync")
 
     if nr_tests != 0:
+        nr_succeeded = nr_tests - len(failed_tests)
         eprint("{} out of {} tests succeeded".format(nr_succeeded, nr_tests))
-        if nr_tests > nr_succeeded:
+        if len(failed_tests) > 0:
+            eprint(
+                "The following tests have failed:\n - {}".format(
+                    "\n - ".join(failed_tests)
+                )
+            )
             sys.exit(1)
 
 
 @contextmanager
 def subtest(name: str) -> Iterator[None]:
     global nr_tests
-    global nr_succeeded
+    global failed_tests
 
     with log.nested(name):
         nr_tests += 1
         try:
             yield
-            nr_succeeded += 1
             return True
         except Exception as e:
+            failed_tests.append(
+                'Test "{}" failed with error: "{}"'.format(name, str(e))
+            )
             log.log("error: {}".format(str(e)))
 
     return False
@@ -879,7 +888,7 @@ if __name__ == "__main__":
     exec("\n".join(machine_eval))
 
     nr_tests = 0
-    nr_succeeded = 0
+    failed_tests = []
 
     @atexit.register
     def clean_up() -> None:
diff --git a/nixos/lib/testing-python.nix b/nixos/lib/testing-python.nix
index 3d09be3b6cd..a7f6d792651 100644
--- a/nixos/lib/testing-python.nix
+++ b/nixos/lib/testing-python.nix
@@ -155,7 +155,7 @@ in rec {
             --add-flags "''${vms[*]}" \
             ${lib.optionalString enableOCR
               "--prefix PATH : '${ocrProg}/bin:${imagemagick_tiff}/bin'"} \
-            --run "export testScript=\"\$(cat $out/test-script)\"" \
+            --run "export testScript=\"\$(${coreutils}/bin/cat $out/test-script)\"" \
             --set VLANS '${toString vlans}'
           ln -s ${testDriver}/bin/nixos-test-driver $out/bin/nixos-run-vms
           wrapProgram $out/bin/nixos-run-vms \
diff --git a/nixos/maintainers/scripts/azure/create-azure.sh b/nixos/maintainers/scripts/azure/create-azure.sh
index 2b22cb53661..0558f8dfffc 100755
--- a/nixos/maintainers/scripts/azure/create-azure.sh
+++ b/nixos/maintainers/scripts/azure/create-azure.sh
@@ -1,6 +1,6 @@
-#! /bin/sh -e
+#! /bin/sh -eu
 
-export NIX_PATH=nixpkgs=../../../..
+export NIX_PATH=nixpkgs=$(dirname $(readlink -f $0))/../../../..
 export NIXOS_CONFIG=$(dirname $(readlink -f $0))/../../../modules/virtualisation/azure-image.nix
 export TIMESTAMP=$(date +%Y%m%d%H%M)
 
diff --git a/nixos/modules/config/i18n.nix b/nixos/modules/config/i18n.nix
index 45691f4839c..cc2ddda9d32 100644
--- a/nixos/modules/config/i18n.nix
+++ b/nixos/modules/config/i18n.nix
@@ -80,14 +80,11 @@ with lib;
     };
 
     # ‘/etc/locale.conf’ is used by systemd.
-    environment.etc = singleton
-      { target = "locale.conf";
-        source = pkgs.writeText "locale.conf"
-          ''
-            LANG=${config.i18n.defaultLocale}
-            ${concatStringsSep "\n" (mapAttrsToList (n: v: ''${n}=${v}'') config.i18n.extraLocaleSettings)}
-          '';
-      };
+    environment.etc."locale.conf".source = pkgs.writeText "locale.conf"
+      ''
+        LANG=${config.i18n.defaultLocale}
+        ${concatStringsSep "\n" (mapAttrsToList (n: v: ''${n}=${v}'') config.i18n.extraLocaleSettings)}
+      '';
 
   };
 }
diff --git a/nixos/modules/config/ldap.nix b/nixos/modules/config/ldap.nix
index e008497a2a6..9c8e9d14937 100644
--- a/nixos/modules/config/ldap.nix
+++ b/nixos/modules/config/ldap.nix
@@ -224,7 +224,9 @@ in
 
   config = mkIf cfg.enable {
 
-    environment.etc = optional (!cfg.daemon.enable) ldapConfig;
+    environment.etc = optionalAttrs (!cfg.daemon.enable) {
+      "ldap.conf" = ldapConfig;
+    };
 
     system.activationScripts = mkIf (!cfg.daemon.enable) {
       ldap = stringAfter [ "etc" "groups" "users" ] ''
diff --git a/nixos/modules/config/pulseaudio.nix b/nixos/modules/config/pulseaudio.nix
index 9baad9b5854..048bbb30c73 100644
--- a/nixos/modules/config/pulseaudio.nix
+++ b/nixos/modules/config/pulseaudio.nix
@@ -215,9 +215,8 @@ in {
 
   config = mkMerge [
     {
-      environment.etc = singleton {
-        target = "pulse/client.conf";
-        source = clientConf;
+      environment.etc = {
+        "pulse/client.conf".source = clientConf;
       };
 
       hardware.pulseaudio.configFile = mkDefault "${getBin overriddenPackage}/etc/pulse/default.pa";
@@ -228,19 +227,16 @@ in {
 
       sound.enable = true;
 
-      environment.etc = [
-        { target = "asound.conf";
-          source = alsaConf; }
+      environment.etc = {
+        "asound.conf".source = alsaConf;
 
-        { target = "pulse/daemon.conf";
-          source = writeText "daemon.conf" (lib.generators.toKeyValue {} cfg.daemon.config); }
+        "pulse/daemon.conf".source = writeText "daemon.conf"
+          (lib.generators.toKeyValue {} cfg.daemon.config);
 
-        { target = "openal/alsoft.conf";
-          source = writeText "alsoft.conf" "drivers=pulse"; }
+        "openal/alsoft.conf".source = writeText "alsoft.conf" "drivers=pulse";
 
-        { target = "libao.conf";
-          source = writeText "libao.conf" "default_driver=pulse"; }
-      ];
+        "libao.conf".source = writeText "libao.conf" "default_driver=pulse";
+      };
 
       # Disable flat volumes to enable relative ones
       hardware.pulseaudio.daemon.config.flat-volumes = mkDefault "no";
@@ -275,9 +271,8 @@ in {
     })
 
     (mkIf nonSystemWide {
-      environment.etc = singleton {
-        target = "pulse/default.pa";
-        source = myConfigFile;
+      environment.etc = {
+        "pulse/default.pa".source = myConfigFile;
       };
       systemd.user = {
         services.pulseaudio = {
diff --git a/nixos/modules/config/swap.nix b/nixos/modules/config/swap.nix
index fed3fa3bc7c..d0fc0d4a3ea 100644
--- a/nixos/modules/config/swap.nix
+++ b/nixos/modules/config/swap.nix
@@ -58,7 +58,7 @@ let
       device = mkOption {
         example = "/dev/sda3";
         type = types.str;
-        description = "Path of the device.";
+        description = "Path of the device or swap file.";
       };
 
       label = mkOption {
diff --git a/nixos/modules/hardware/opengl.nix b/nixos/modules/hardware/opengl.nix
index 89dc5008df5..28cddea8b79 100644
--- a/nixos/modules/hardware/opengl.nix
+++ b/nixos/modules/hardware/opengl.nix
@@ -43,11 +43,11 @@ in
         description = ''
           Whether to enable OpenGL drivers. This is needed to enable
           OpenGL support in X11 systems, as well as for Wayland compositors
-          like sway, way-cooler and Weston. It is enabled by default
+          like sway and Weston. It is enabled by default
           by the corresponding modules, so you do not usually have to
           set it yourself, only if there is no module for your wayland
-          compositor of choice. See services.xserver.enable,
-          programs.sway.enable, and programs.way-cooler.enable.
+          compositor of choice. See services.xserver.enable and
+          programs.sway.enable.
         '';
         type = types.bool;
         default = false;
diff --git a/nixos/modules/hardware/usb-wwan.nix b/nixos/modules/hardware/usb-wwan.nix
index 2d20421586a..679a6c6497c 100644
--- a/nixos/modules/hardware/usb-wwan.nix
+++ b/nixos/modules/hardware/usb-wwan.nix
@@ -21,6 +21,19 @@ with lib;
   ###### implementation
 
   config = mkIf config.hardware.usbWwan.enable {
+    # Attaches device specific handlers.
     services.udev.packages = with pkgs; [ usb-modeswitch-data ];
+
+    # Triggered by udev, usb-modeswitch creates systemd services via a
+    # template unit in the usb-modeswitch package.
+    systemd.packages = with pkgs; [ usb-modeswitch ];
+
+    # The systemd service requires the usb-modeswitch-data. The
+    # usb-modeswitch package intends to discover this via the
+    # filesystem at /usr/share/usb_modeswitch, and merge it with user
+    # configuration in /etc/usb_modeswitch.d. Configuring the correct
+    # path in the package is difficult, as it would cause a cyclic
+    # dependency.
+    environment.etc."usb_modeswitch.d".source = "${pkgs.usb-modeswitch-data}/share/usb_modeswitch";
   };
 }
diff --git a/nixos/modules/hardware/video/nvidia.nix b/nixos/modules/hardware/video/nvidia.nix
index fcb30187fa2..1794bb4b433 100644
--- a/nixos/modules/hardware/video/nvidia.nix
+++ b/nixos/modules/hardware/video/nvidia.nix
@@ -198,10 +198,11 @@ in
     # Create /dev/nvidia-uvm when the nvidia-uvm module is loaded.
     services.udev.extraRules =
       ''
-        KERNEL=="nvidia", RUN+="${pkgs.runtimeShell} -c 'mknod -m 666 /dev/nvidiactl c $(grep nvidia-frontend /proc/devices | cut -d \  -f 1) 255'"
-        KERNEL=="nvidia_modeset", RUN+="${pkgs.runtimeShell} -c 'mknod -m 666 /dev/nvidia-modeset c $(grep nvidia-frontend /proc/devices | cut -d \  -f 1) 254'"
-        KERNEL=="card*", SUBSYSTEM=="drm", DRIVERS=="nvidia", RUN+="${pkgs.runtimeShell} -c 'mknod -m 666 /dev/nvidia%n c $(grep nvidia-frontend /proc/devices | cut -d \  -f 1) %n'"
-        KERNEL=="nvidia_uvm", RUN+="${pkgs.runtimeShell} -c 'mknod -m 666 /dev/nvidia-uvm c $(grep nvidia-uvm /proc/devices | cut -d \  -f 1) 0'"
+        KERNEL=="nvidia", RUN+="${pkgs.runtimeShell} -c 'mknod -m 666 /dev/nvidiactl c $$(grep nvidia-frontend /proc/devices | cut -d \  -f 1) 255'"
+        KERNEL=="nvidia_modeset", RUN+="${pkgs.runtimeShell} -c 'mknod -m 666 /dev/nvidia-modeset c $$(grep nvidia-frontend /proc/devices | cut -d \  -f 1) 254'"
+        KERNEL=="card*", SUBSYSTEM=="drm", DRIVERS=="nvidia", RUN+="${pkgs.runtimeShell} -c 'mknod -m 666 /dev/nvidia%n c $$(grep nvidia-frontend /proc/devices | cut -d \  -f 1) %n'"
+        KERNEL=="nvidia_uvm", RUN+="${pkgs.runtimeShell} -c 'mknod -m 666 /dev/nvidia-uvm c $$(grep nvidia-uvm /proc/devices | cut -d \  -f 1) 0'"
+        KERNEL=="nvidia_uvm", RUN+="${pkgs.runtimeShell} -c 'mknod -m 666 /dev/nvidia-uvm-tools c $$(grep nvidia-uvm /proc/devices | cut -d \  -f 1) 0'"
       '';
 
     boot.blacklistedKernelModules = [ "nouveau" "nvidiafb" ];
diff --git a/nixos/modules/i18n/input-method/ibus.nix b/nixos/modules/i18n/input-method/ibus.nix
index 810e1643d31..a3d97619fc4 100644
--- a/nixos/modules/i18n/input-method/ibus.nix
+++ b/nixos/modules/i18n/input-method/ibus.nix
@@ -64,6 +64,8 @@ in
     # Without dconf enabled it is impossible to use IBus
     programs.dconf.enable = true;
 
+    programs.dconf.profiles.ibus = "${ibusPackage}/etc/dconf/profile/ibus";
+
     services.dbus.packages = [
       ibusAutostart
     ];
diff --git a/nixos/modules/installer/cd-dvd/system-tarball-pc.nix b/nixos/modules/installer/cd-dvd/system-tarball-pc.nix
index bf8b7deb59e..f2af7dcde3d 100644
--- a/nixos/modules/installer/cd-dvd/system-tarball-pc.nix
+++ b/nixos/modules/installer/cd-dvd/system-tarball-pc.nix
@@ -122,11 +122,10 @@ in
 
   /* fake entry, just to have a happy stage-1. Users
      may boot without having stage-1 though */
-  fileSystems = [
+  fileSystems.fake =
     { mountPoint = "/";
       device = "/dev/something";
-      }
-  ];
+    };
 
   nixpkgs.config = {
     packageOverrides = p: {
diff --git a/nixos/modules/installer/cd-dvd/system-tarball-sheevaplug.nix b/nixos/modules/installer/cd-dvd/system-tarball-sheevaplug.nix
index 90a5128c02a..8408f56f94f 100644
--- a/nixos/modules/installer/cd-dvd/system-tarball-sheevaplug.nix
+++ b/nixos/modules/installer/cd-dvd/system-tarball-sheevaplug.nix
@@ -117,11 +117,10 @@ in
 
   /* fake entry, just to have a happy stage-1. Users
      may boot without having stage-1 though */
-  fileSystems = [
+  fileSystems.fake =
     { mountPoint = "/";
       device = "/dev/something";
-      }
-  ];
+    };
 
   services.mingetty = {
     # Some more help text.
diff --git a/nixos/modules/installer/cd-dvd/system-tarball.nix b/nixos/modules/installer/cd-dvd/system-tarball.nix
index b84096861f5..58098c45535 100644
--- a/nixos/modules/installer/cd-dvd/system-tarball.nix
+++ b/nixos/modules/installer/cd-dvd/system-tarball.nix
@@ -41,7 +41,7 @@ in
 
     # In stage 1 of the boot, mount the CD/DVD as the root FS by label
     # so that we don't need to know its device.
-    fileSystems = [ ];
+    fileSystems = { };
 
     # boot.initrd.availableKernelModules = [ "mvsdio" "reiserfs" "ext3" "ext4" ];
 
diff --git a/nixos/modules/installer/tools/nix-fallback-paths.nix b/nixos/modules/installer/tools/nix-fallback-paths.nix
index d7149b35d4c..c2f2578733b 100644
--- a/nixos/modules/installer/tools/nix-fallback-paths.nix
+++ b/nixos/modules/installer/tools/nix-fallback-paths.nix
@@ -1,6 +1,6 @@
 {
-  x86_64-linux = "/nix/store/6chjfy4j6hjwj5f8zcbbdg02i21x1qsi-nix-2.3.1";
-  i686-linux = "/nix/store/xa8z7fwszjjm4kiwrxfc8xv9c1pzzm7a-nix-2.3.1";
-  aarch64-linux = "/nix/store/8cac1ivcnchlpzmdjby2f71l1fwpnymr-nix-2.3.1";
-  x86_64-darwin = "/nix/store/6639l9815ggdnb4aka22qcjy7p8w4hb9-nix-2.3.1";
+  x86_64-linux = "/nix/store/0q5qnh10m2sfrriszc1ysmggw659q6qm-nix-2.3.2";
+  i686-linux = "/nix/store/i7ad7r5d8a5b3l22hg4a1im2qq05y6vd-nix-2.3.2";
+  aarch64-linux = "/nix/store/bv06pavfw0dbqzr8w3l7s71nx27gnxa0-nix-2.3.2";
+  x86_64-darwin = "/nix/store/x6mnl1nij7y4v5ihlplr4k937ayr403r-nix-2.3.2";
 }
diff --git a/nixos/modules/installer/tools/nixos-install.sh b/nixos/modules/installer/tools/nixos-install.sh
index 8685cb345e1..a3ff3fe2c0c 100644
--- a/nixos/modules/installer/tools/nixos-install.sh
+++ b/nixos/modules/installer/tools/nixos-install.sh
@@ -14,6 +14,8 @@ extraBuildFlags=()
 mountPoint=/mnt
 channelPath=
 system=
+verbosity=()
+buildLogs=
 
 while [ "$#" -gt 0 ]; do
     i="$1"; shift 1
@@ -55,6 +57,12 @@ while [ "$#" -gt 0 ]; do
         --debug)
             set -x
             ;;
+        -v*|--verbose)
+            verbosity+=("$i")
+            ;;
+        -L|--print-build-logs)
+            buildLogs="$i"
+            ;;
         *)
             echo "$0: unknown option \`$i'"
             exit 1
@@ -94,7 +102,7 @@ if [[ -z $system ]]; then
     outLink="$tmpdir/system"
     nix build --out-link "$outLink" --store "$mountPoint" "${extraBuildFlags[@]}" \
         --extra-substituters "$sub" \
-        -f '<nixpkgs/nixos>' system -I "nixos-config=$NIXOS_CONFIG"
+        -f '<nixpkgs/nixos>' system -I "nixos-config=$NIXOS_CONFIG" ${verbosity[@]} ${buildLogs}
     system=$(readlink -f $outLink)
 fi
 
@@ -103,7 +111,7 @@ fi
 # a progress bar.
 nix-env --store "$mountPoint" "${extraBuildFlags[@]}" \
         --extra-substituters "$sub" \
-        -p $mountPoint/nix/var/nix/profiles/system --set "$system"
+        -p $mountPoint/nix/var/nix/profiles/system --set "$system" ${verbosity[@]}
 
 # Copy the NixOS/Nixpkgs sources to the target as the initial contents
 # of the NixOS channel.
@@ -115,7 +123,8 @@ if [[ -z $noChannelCopy ]]; then
         echo "copying channel..."
         mkdir -p $mountPoint/nix/var/nix/profiles/per-user/root
         nix-env --store "$mountPoint" "${extraBuildFlags[@]}" --extra-substituters "$sub" \
-                -p $mountPoint/nix/var/nix/profiles/per-user/root/channels --set "$channelPath" --quiet
+                -p $mountPoint/nix/var/nix/profiles/per-user/root/channels --set "$channelPath" --quiet \
+                ${verbosity[@]}
         install -m 0700 -d $mountPoint/root/.nix-defexpr
         ln -sfn /nix/var/nix/profiles/per-user/root/channels $mountPoint/root/.nix-defexpr/channels
     fi
diff --git a/nixos/modules/installer/tools/tools.nix b/nixos/modules/installer/tools/tools.nix
index e4db39b5c81..5df9c23e6b6 100644
--- a/nixos/modules/installer/tools/tools.nix
+++ b/nixos/modules/installer/tools/tools.nix
@@ -159,10 +159,12 @@ in
         #   extraGroups = [ "wheel" ]; # Enable ‘sudo’ for the user.
         # };
 
-        # This value determines the NixOS release with which your system is to be
-        # compatible, in order to avoid breaking some software such as database
-        # servers. You should change this only after NixOS release notes say you
-        # should.
+        # This value determines the NixOS release from which the default
+        # settings for stateful data, like file locations and database versions
+        # on your system were taken. It‘s perfectly fine and recommended to leave
+        # this value at the release version of the first install of this system.
+        # Before changing this value read the documentation for this option
+        # (e.g. man configuration.nix or on https://nixos.org/nixos/options.html).
         system.stateVersion = "${config.system.nixos.release}"; # Did you read the comment?
 
       }
diff --git a/nixos/modules/misc/documentation.nix b/nixos/modules/misc/documentation.nix
index 820553270e3..d09afadd609 100644
--- a/nixos/modules/misc/documentation.nix
+++ b/nixos/modules/misc/documentation.nix
@@ -1,4 +1,4 @@
-{ config, lib, pkgs, baseModules, extraModules, modules, ... }:
+{ config, lib, pkgs, baseModules, extraModules, modules, modulesPath, ... }:
 
 with lib;
 
@@ -22,7 +22,10 @@ let
         scrubbedEval = evalModules {
           modules = [ { nixpkgs.localSystem = config.nixpkgs.localSystem; } ] ++ manualModules;
           args = (config._module.args) // { modules = [ ]; };
-          specialArgs = { pkgs = scrubDerivations "pkgs" pkgs; };
+          specialArgs = {
+            pkgs = scrubDerivations "pkgs" pkgs;
+            inherit modulesPath;
+          };
         };
         scrubDerivations = namePrefix: pkgSet: mapAttrs
           (name: value:
diff --git a/nixos/modules/misc/version.nix b/nixos/modules/misc/version.nix
index 0540b493003..ddbd3963cc5 100644
--- a/nixos/modules/misc/version.nix
+++ b/nixos/modules/misc/version.nix
@@ -61,11 +61,18 @@ in
         configuration defaults in a way incompatible with stateful
         data. For instance, if the default version of PostgreSQL
         changes, the new version will probably be unable to read your
-        existing databases. To prevent such breakage, you can set the
+        existing databases. To prevent such breakage, you should set the
         value of this option to the NixOS release with which you want
-        to be compatible. The effect is that NixOS will option
+        to be compatible. The effect is that NixOS will use
         defaults corresponding to the specified release (such as using
         an older version of PostgreSQL).
+        It‘s perfectly fine and recommended to leave this value at the
+        release version of the first install of this system.
+        Changing this option will not upgrade your system. In fact it
+        is meant to stay constant exactly when you upgrade your system.
+        You should only bump this option, if you are sure that you can
+        or have migrated all state on your system which is affected
+        by this option.
       '';
     };
 
@@ -84,8 +91,8 @@ in
       # These defaults are set here rather than up there so that
       # changing them would not rebuild the manual
       version = mkDefault (cfg.release + cfg.versionSuffix);
-      revision      = mkIf (pathIsDirectory gitRepo) (mkDefault            gitCommitId);
-      versionSuffix = mkIf (pathIsDirectory gitRepo) (mkDefault (".git." + gitCommitId));
+      revision      = mkIf (pathExists gitRepo) (mkDefault            gitCommitId);
+      versionSuffix = mkIf (pathExists gitRepo) (mkDefault (".git." + gitCommitId));
     };
 
     # Generate /etc/os-release.  See
diff --git a/nixos/modules/module-list.nix b/nixos/modules/module-list.nix
index 914f9a878b0..eadf1d2d89b 100644
--- a/nixos/modules/module-list.nix
+++ b/nixos/modules/module-list.nix
@@ -95,6 +95,7 @@
   ./programs/adb.nix
   ./programs/atop.nix
   ./programs/autojump.nix
+  ./programs/bandwhich.nix
   ./programs/bash/bash.nix
   ./programs/bcc.nix
   ./programs/browserpass.nix
@@ -126,6 +127,7 @@
   ./programs/java.nix
   ./programs/kbdlight.nix
   ./programs/less.nix
+  ./programs/liboping.nix
   ./programs/light.nix
   ./programs/mosh.nix
   ./programs/mininet.nix
@@ -151,13 +153,13 @@
   ./programs/system-config-printer.nix
   ./programs/thefuck.nix
   ./programs/tmux.nix
+  ./programs/traceroute.nix
   ./programs/tsm-client.nix
   ./programs/udevil.nix
   ./programs/usbtop.nix
   ./programs/venus.nix
   ./programs/vim.nix
   ./programs/wavemon.nix
-  ./programs/way-cooler.nix
   ./programs/waybar.nix
   ./programs/wireshark.nix
   ./programs/x2goserver.nix
@@ -576,6 +578,7 @@
   ./services/networking/connman.nix
   ./services/networking/consul.nix
   ./services/networking/coredns.nix
+  ./services/networking/corerad.nix
   ./services/networking/coturn.nix
   ./services/networking/dante.nix
   ./services/networking/ddclient.nix
@@ -691,6 +694,8 @@
   ./services/networking/skydns.nix
   ./services/networking/shadowsocks.nix
   ./services/networking/shairport-sync.nix
+  ./services/networking/shorewall.nix
+  ./services/networking/shorewall6.nix
   ./services/networking/shout.nix
   ./services/networking/sniproxy.nix
   ./services/networking/smokeping.nix
@@ -732,6 +737,7 @@
   ./services/networking/wicd.nix
   ./services/networking/wireguard.nix
   ./services/networking/wpa_supplicant.nix
+  ./services/networking/xandikos.nix
   ./services/networking/xinetd.nix
   ./services/networking/xl2tpd.nix
   ./services/networking/xrdp.nix
@@ -803,6 +809,7 @@
   ./services/web-apps/gotify-server.nix
   ./services/web-apps/icingaweb2/icingaweb2.nix
   ./services/web-apps/icingaweb2/module-monitoring.nix
+  ./services/web-apps/ihatemoney
   ./services/web-apps/limesurvey.nix
   ./services/web-apps/mattermost.nix
   ./services/web-apps/mediawiki.nix
@@ -851,7 +858,7 @@
   ./services/x11/extra-layouts.nix
   ./services/x11/clight.nix
   ./services/x11/colord.nix
-  ./services/x11/compton.nix
+  ./services/x11/picom.nix
   ./services/x11/unclutter.nix
   ./services/x11/unclutter-xfixes.nix
   ./services/x11/desktop-managers/default.nix
@@ -865,7 +872,6 @@
   ./services/x11/display-managers/xpra.nix
   ./services/x11/fractalart.nix
   ./services/x11/hardware/libinput.nix
-  ./services/x11/hardware/multitouch.nix
   ./services/x11/hardware/synaptics.nix
   ./services/x11/hardware/wacom.nix
   ./services/x11/hardware/digimend.nix
diff --git a/nixos/modules/programs/bandwhich.nix b/nixos/modules/programs/bandwhich.nix
new file mode 100644
index 00000000000..5413044f461
--- /dev/null
+++ b/nixos/modules/programs/bandwhich.nix
@@ -0,0 +1,29 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let cfg = config.programs.bandwhich;
+in {
+  meta.maintainers = with maintainers; [ filalex77 ];
+
+  options = {
+    programs.bandwhich = {
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = ''
+          Whether to add bandwhich to the global environment and configure a
+          setcap wrapper for it.
+        '';
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    environment.systemPackages = with pkgs; [ bandwhich ];
+    security.wrappers.bandwhich = {
+      source = "${pkgs.bandwhich}/bin/bandwhich";
+      capabilities = "cap_net_raw,cap_net_admin+ep";
+    };
+  };
+}
diff --git a/nixos/modules/programs/dconf.nix b/nixos/modules/programs/dconf.nix
index e0e2ffd80cf..6702e8efd1c 100644
--- a/nixos/modules/programs/dconf.nix
+++ b/nixos/modules/programs/dconf.nix
@@ -6,7 +6,10 @@ let
   cfg = config.programs.dconf;
 
   mkDconfProfile = name: path:
-    { source = path; target = "dconf/profile/${name}"; };
+    {
+      name = "dconf/profile/${name}";
+      value.source = path; 
+    };
 
 in
 {
@@ -29,8 +32,8 @@ in
   ###### implementation
 
   config = mkIf (cfg.profiles != {} || cfg.enable) {
-    environment.etc = optionals (cfg.profiles != {})
-      (mapAttrsToList mkDconfProfile cfg.profiles);
+    environment.etc = optionalAttrs (cfg.profiles != {})
+      (mapAttrs' mkDconfProfile cfg.profiles);
 
     services.dbus.packages = [ pkgs.dconf ];
 
diff --git a/nixos/modules/programs/liboping.nix b/nixos/modules/programs/liboping.nix
new file mode 100644
index 00000000000..4e4c235ccde
--- /dev/null
+++ b/nixos/modules/programs/liboping.nix
@@ -0,0 +1,22 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.programs.liboping;
+in {
+  options.programs.liboping = {
+    enable = mkEnableOption "liboping";
+  };
+  config = mkIf cfg.enable {
+    environment.systemPackages = with pkgs; [ liboping ];
+    security.wrappers = mkMerge (map (
+      exec: {
+        "${exec}" = {
+          source = "${pkgs.liboping}/bin/${exec}";
+          capabilities = "cap_net_raw+p";
+        };
+      }
+    ) [ "oping" "noping" ]);
+  };
+}
diff --git a/nixos/modules/programs/screen.nix b/nixos/modules/programs/screen.nix
index 4fd800dbae7..728a0eb8cea 100644
--- a/nixos/modules/programs/screen.nix
+++ b/nixos/modules/programs/screen.nix
@@ -27,6 +27,7 @@ in
     environment.etc.screenrc.text = cfg.screenrc;
 
     environment.systemPackages = [ pkgs.screen ];
+    security.pam.services.screen = {};
   };
 
 }
diff --git a/nixos/modules/programs/shadow.nix b/nixos/modules/programs/shadow.nix
index 7eaf79d864e..fc352795c01 100644
--- a/nixos/modules/programs/shadow.nix
+++ b/nixos/modules/programs/shadow.nix
@@ -76,22 +76,18 @@ in
         config.users.defaultUserShell;
 
     environment.etc =
-      [ { # /etc/login.defs: global configuration for pwdutils.  You
-          # cannot login without it!
-          source = pkgs.writeText "login.defs" loginDefs;
-          target = "login.defs";
-        }
-
-        { # /etc/default/useradd: configuration for useradd.
-          source = pkgs.writeText "useradd"
-            ''
-              GROUP=100
-              HOME=/home
-              SHELL=${utils.toShellPath config.users.defaultUserShell}
-            '';
-          target = "default/useradd";
-        }
-      ];
+      { # /etc/login.defs: global configuration for pwdutils.  You
+        # cannot login without it!
+        "login.defs".source = pkgs.writeText "login.defs" loginDefs;
+
+        # /etc/default/useradd: configuration for useradd.
+        "default/useradd".source = pkgs.writeText "useradd"
+          ''
+            GROUP=100
+            HOME=/home
+            SHELL=${utils.toShellPath config.users.defaultUserShell}
+          '';
+      };
 
     security.pam.services =
       { chsh = { rootOK = true; };
diff --git a/nixos/modules/programs/sway.nix b/nixos/modules/programs/sway.nix
index e2a4018e902..33e252be45f 100644
--- a/nixos/modules/programs/sway.nix
+++ b/nixos/modules/programs/sway.nix
@@ -28,6 +28,7 @@ let
 
   swayPackage = pkgs.sway.override {
     extraSessionCommands = cfg.extraSessionCommands;
+    extraOptions = cfg.extraOptions;
     withBaseWrapper = cfg.wrapperFeatures.base;
     withGtkWrapper = cfg.wrapperFeatures.gtk;
   };
@@ -67,6 +68,21 @@ in {
       '';
     };
 
+    extraOptions = mkOption {
+      type = types.listOf types.str;
+      default = [];
+      example = [
+        "--verbose"
+        "--debug"
+        "--unsupported-gpu"
+        "--my-next-gpu-wont-be-nvidia"
+      ];
+      description = ''
+        Command line arguments passed to launch Sway. Please DO NOT report
+        issues if you use an unsupported GPU (proprietary drivers).
+      '';
+    };
+
     extraPackages = mkOption {
       type = with types; listOf package;
       default = with pkgs; [
diff --git a/nixos/modules/programs/traceroute.nix b/nixos/modules/programs/traceroute.nix
new file mode 100644
index 00000000000..4eb0be3f0e0
--- /dev/null
+++ b/nixos/modules/programs/traceroute.nix
@@ -0,0 +1,26 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.programs.traceroute;
+in {
+  options = {
+    programs.traceroute = {
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = ''
+          Whether to configure a setcap wrapper for traceroute.
+        '';
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    security.wrappers.traceroute = {
+      source = "${pkgs.traceroute}/bin/traceroute";
+      capabilities = "cap_net_raw+p";
+    };
+  };
+}
diff --git a/nixos/modules/programs/way-cooler.nix b/nixos/modules/programs/way-cooler.nix
deleted file mode 100644
index f27bd42bd76..00000000000
--- a/nixos/modules/programs/way-cooler.nix
+++ /dev/null
@@ -1,78 +0,0 @@
-{ config, pkgs, lib, ... }:
-
-with lib;
-
-let
-  cfg = config.programs.way-cooler;
-  way-cooler = pkgs.way-cooler;
-
-  wcWrapped = pkgs.writeShellScriptBin "way-cooler" ''
-    ${cfg.extraSessionCommands}
-    exec ${pkgs.dbus}/bin/dbus-run-session ${way-cooler}/bin/way-cooler
-  '';
-  wcJoined = pkgs.symlinkJoin {
-    name = "way-cooler-wrapped";
-    paths = [ wcWrapped way-cooler ];
-  };
-  configFile = readFile "${way-cooler}/etc/way-cooler/init.lua";
-  spawnBar = ''
-    util.program.spawn_at_startup("lemonbar");
-  '';
-in
-{
-  options.programs.way-cooler = {
-    enable = mkEnableOption "way-cooler";
-
-    extraSessionCommands = mkOption {
-      default     = "";
-      type        = types.lines;
-      example = ''
-        export XKB_DEFAULT_LAYOUT=us,de
-        export XKB_DEFAULT_VARIANT=,nodeadkeys
-        export XKB_DEFAULT_OPTIONS=grp:caps_toggle,
-      '';
-      description = ''
-        Shell commands executed just before way-cooler is started.
-      '';
-    };
-
-    extraPackages = mkOption {
-      type = with types; listOf package;
-      default = with pkgs; [
-        westonLite xwayland dmenu
-      ];
-      example = literalExample ''
-        with pkgs; [
-          westonLite xwayland dmenu
-        ]
-      '';
-      description = ''
-        Extra packages to be installed system wide.
-      '';
-    };
-
-    enableBar = mkOption {
-      type = types.bool;
-      default = true;
-      description = ''
-        Whether to enable an unofficial bar.
-      '';
-    };
-  };
-
-  config = mkIf cfg.enable {
-    environment.systemPackages = [ wcJoined ] ++ cfg.extraPackages;
-
-    security.pam.services.wc-lock = {};
-    environment.etc."way-cooler/init.lua".text = ''
-      ${configFile}
-      ${optionalString cfg.enableBar spawnBar}
-    '';
-
-    hardware.opengl.enable = mkDefault true;
-    fonts.enableDefaultFonts = mkDefault true;
-    programs.dconf.enable = mkDefault true;
-  };
-
-  meta.maintainers = with maintainers; [ gnidorah ];
-}
diff --git a/nixos/modules/rename.nix b/nixos/modules/rename.nix
index 7109ab5a109..26de8a18d92 100644
--- a/nixos/modules/rename.nix
+++ b/nixos/modules/rename.nix
@@ -27,6 +27,13 @@ with lib;
     (mkRemovedOptionModule [ "services.osquery" ] "The osquery module has been removed")
     (mkRemovedOptionModule [ "services.fourStore" ] "The fourStore module has been removed")
     (mkRemovedOptionModule [ "services.fourStoreEndpoint" ] "The fourStoreEndpoint module has been removed")
+    (mkRemovedOptionModule [ "programs" "way-cooler" ] ("way-cooler is abandoned by its author: " +
+      "https://way-cooler.org/blog/2020/01/09/way-cooler-post-mortem.html"))
+    (mkRemovedOptionModule [ "services" "xserver" "multitouch" ] ''
+      services.xserver.multitouch (which uses xf86_input_mtrack) has been removed
+      as the underlying package isn't being maintained. Working alternatives are
+      libinput and synaptics.
+    '')
 
     # Do NOT add any option renames here, see top of the file
   ];
diff --git a/nixos/modules/security/duosec.nix b/nixos/modules/security/duosec.nix
index 997328ad9e6..78a82b7154e 100644
--- a/nixos/modules/security/duosec.nix
+++ b/nixos/modules/security/duosec.nix
@@ -25,19 +25,21 @@ let
     accept_env_factor=${boolToStr cfg.acceptEnvFactor}
   '';
 
-  loginCfgFile = optional cfg.ssh.enable
-    { source = pkgs.writeText "login_duo.conf" configFileLogin;
-      mode   = "0600";
-      user   = "sshd";
-      target = "duo/login_duo.conf";
-    };
+  loginCfgFile = optionalAttrs cfg.ssh.enable {
+    "duo/login_duo.conf" =
+      { source = pkgs.writeText "login_duo.conf" configFileLogin;
+        mode   = "0600";
+        user   = "sshd";
+      };
+  };
 
-  pamCfgFile = optional cfg.pam.enable
-    { source = pkgs.writeText "pam_duo.conf" configFilePam;
-      mode   = "0600";
-      user   = "sshd";
-      target = "duo/pam_duo.conf";
-    };
+  pamCfgFile = optional cfg.pam.enable {
+    "duo/pam_duo.conf" =
+      { source = pkgs.writeText "pam_duo.conf" configFilePam;
+        mode   = "0600";
+        user   = "sshd";
+      };
+  };
 in
 {
   options = {
@@ -186,7 +188,7 @@ in
      environment.systemPackages = [ pkgs.duo-unix ];
 
      security.wrappers.login_duo.source = "${pkgs.duo-unix.out}/bin/login_duo";
-     environment.etc = loginCfgFile ++ pamCfgFile;
+     environment.etc = loginCfgFile // pamCfgFile;
 
      /* If PAM *and* SSH are enabled, then don't do anything special.
      If PAM isn't used, set the default SSH-only options. */
diff --git a/nixos/modules/security/pam.nix b/nixos/modules/security/pam.nix
index 0adc27c47f0..bfc2a881387 100644
--- a/nixos/modules/security/pam.nix
+++ b/nixos/modules/security/pam.nix
@@ -475,9 +475,9 @@ let
 
   motd = pkgs.writeText "motd" config.users.motd;
 
-  makePAMService = pamService:
-    { source = pkgs.writeText "${pamService.name}.pam" pamService.text;
-      target = "pam.d/${pamService.name}";
+  makePAMService = name: service:
+    { name = "pam.d/${name}";
+      value.source = pkgs.writeText "${name}.pam" service.text;
     };
 
 in
@@ -760,8 +760,7 @@ in
       };
     };
 
-    environment.etc =
-      mapAttrsToList (n: v: makePAMService v) config.security.pam.services;
+    environment.etc = mapAttrs' makePAMService config.security.pam.services;
 
     security.pam.services =
       { other.text =
@@ -777,11 +776,8 @@ in
           '';
 
         # Most of these should be moved to specific modules.
-        cups = {};
-        ftp = {};
         i3lock = {};
         i3lock-color = {};
-        screen = {};
         vlock = {};
         xlock = {};
         xscreensaver = {};
diff --git a/nixos/modules/security/pam_mount.nix b/nixos/modules/security/pam_mount.nix
index 75f58462d13..77e22a96b55 100644
--- a/nixos/modules/security/pam_mount.nix
+++ b/nixos/modules/security/pam_mount.nix
@@ -36,8 +36,7 @@ in
   config = mkIf (cfg.enable || anyPamMount) {
 
     environment.systemPackages = [ pkgs.pam_mount ];
-    environment.etc = [{
-      target = "security/pam_mount.conf.xml";
+    environment.etc."security/pam_mount.conf.xml" = {
       source =
         let
           extraUserVolumes = filterAttrs (n: u: u.cryptHomeLuks != null) config.users.users;
@@ -66,7 +65,7 @@ in
           ${concatStringsSep "\n" cfg.extraVolumes}
           </pam_mount>
           '';
-    }];
+    };
 
   };
 }
diff --git a/nixos/modules/security/rtkit.nix b/nixos/modules/security/rtkit.nix
index f6dda21c600..a7b27cbcf21 100644
--- a/nixos/modules/security/rtkit.nix
+++ b/nixos/modules/security/rtkit.nix
@@ -34,9 +34,8 @@ with lib;
 
     services.dbus.packages = [ pkgs.rtkit ];
 
-    users.users = singleton
-      { name = "rtkit";
-        uid = config.ids.uids.rtkit;
+    users.users.rtkit =
+      { uid = config.ids.uids.rtkit;
         description = "RealtimeKit daemon";
       };
 
diff --git a/nixos/modules/security/sudo.nix b/nixos/modules/security/sudo.nix
index 10ee036be84..d899806ef05 100644
--- a/nixos/modules/security/sudo.nix
+++ b/nixos/modules/security/sudo.nix
@@ -212,7 +212,7 @@ in
 
     security.pam.services.sudo = { sshAgentAuth = true; };
 
-    environment.etc = singleton
+    environment.etc.sudoers =
       { source =
           pkgs.runCommand "sudoers"
           {
@@ -222,7 +222,6 @@ in
           # Make sure that the sudoers file is syntactically valid.
           # (currently disabled - NIXOS-66)
           "${pkgs.buildPackages.sudo}/sbin/visudo -f $src -c && cp $src $out";
-        target = "sudoers";
         mode = "0440";
       };
 
diff --git a/nixos/modules/services/admin/oxidized.nix b/nixos/modules/services/admin/oxidized.nix
index 885eaed1de6..94b44630ba6 100644
--- a/nixos/modules/services/admin/oxidized.nix
+++ b/nixos/modules/services/admin/oxidized.nix
@@ -111,7 +111,7 @@ in
         Restart  = "always";
         WorkingDirectory = cfg.dataDir;
         KillSignal = "SIGKILL";
-        PIDFile = "${cfg.dataDir}.config/oxidized/pid";
+        PIDFile = "${cfg.dataDir}/.config/oxidized/pid";
       };
     };
   };
diff --git a/nixos/modules/services/amqp/rabbitmq.nix b/nixos/modules/services/amqp/rabbitmq.nix
index 697732426cc..35fb49f709a 100644
--- a/nixos/modules/services/amqp/rabbitmq.nix
+++ b/nixos/modules/services/amqp/rabbitmq.nix
@@ -165,7 +165,10 @@ in {
       after = [ "network.target" "epmd.socket" ];
       wants = [ "network.target" "epmd.socket" ];
 
-      path = [ cfg.package pkgs.procps ];
+      path = [
+        cfg.package
+        pkgs.coreutils # mkdir/chown/chmod for preStart
+      ];
 
       environment = {
         RABBITMQ_MNESIA_BASE = "${cfg.dataDir}/mnesia";
diff --git a/nixos/modules/services/audio/mpd.nix b/nixos/modules/services/audio/mpd.nix
index 7932d094197..e20591b5beb 100644
--- a/nixos/modules/services/audio/mpd.nix
+++ b/nixos/modules/services/audio/mpd.nix
@@ -184,19 +184,19 @@ in {
       };
     };
 
-    users.users = optionalAttrs (cfg.user == name) (singleton {
-      inherit uid;
-      inherit name;
-      group = cfg.group;
-      extraGroups = [ "audio" ];
-      description = "Music Player Daemon user";
-      home = "${cfg.dataDir}";
-    });
-
-    users.groups = optionalAttrs (cfg.group == name) (singleton {
-      inherit name;
-      gid = gid;
-    });
+    users.users = optionalAttrs (cfg.user == name) {
+      ${name} = {
+        inherit uid;
+        group = cfg.group;
+        extraGroups = [ "audio" ];
+        description = "Music Player Daemon user";
+        home = "${cfg.dataDir}";
+      };
+    };
+
+    users.groups = optionalAttrs (cfg.group == name) {
+      ${name}.gid = gid;
+    };
   };
 
 }
diff --git a/nixos/modules/services/backup/mysql-backup.nix b/nixos/modules/services/backup/mysql-backup.nix
index dbd5605143f..f58af82773f 100644
--- a/nixos/modules/services/backup/mysql-backup.nix
+++ b/nixos/modules/services/backup/mysql-backup.nix
@@ -84,13 +84,14 @@ in
   };
 
   config = mkIf cfg.enable {
-    users.users = optionalAttrs (cfg.user == defaultUser) (singleton
-      { name = defaultUser;
+    users.users = optionalAttrs (cfg.user == defaultUser) {
+      ${defaultUser} = {
         isSystemUser = true;
         createHome = false;
         home = cfg.location;
         group = "nogroup";
-      });
+      };
+    };
 
     services.mysql.ensureUsers = [{
       name = cfg.user;
diff --git a/nixos/modules/services/cluster/kubernetes/default.nix b/nixos/modules/services/cluster/kubernetes/default.nix
index 3605d036509..3a11a6513a4 100644
--- a/nixos/modules/services/cluster/kubernetes/default.nix
+++ b/nixos/modules/services/cluster/kubernetes/default.nix
@@ -266,8 +266,7 @@ in {
         "d /var/lib/kubernetes 0755 kubernetes kubernetes -"
       ];
 
-      users.users = singleton {
-        name = "kubernetes";
+      users.users.kubernetes = {
         uid = config.ids.uids.kubernetes;
         description = "Kubernetes user";
         extraGroups = [ "docker" ];
diff --git a/nixos/modules/services/cluster/kubernetes/pki.nix b/nixos/modules/services/cluster/kubernetes/pki.nix
index 733479e24c9..4275563f1a3 100644
--- a/nixos/modules/services/cluster/kubernetes/pki.nix
+++ b/nixos/modules/services/cluster/kubernetes/pki.nix
@@ -20,6 +20,7 @@ let
         size = 2048;
     };
     CN = top.masterAddress;
+    hosts = cfg.cfsslAPIExtraSANs;
   });
 
   cfsslAPITokenBaseName = "apitoken.secret";
@@ -66,6 +67,15 @@ in
       type = bool;
     };
 
+    cfsslAPIExtraSANs = mkOption {
+      description = ''
+        Extra x509 Subject Alternative Names to be added to the cfssl API webserver TLS cert.
+      '';
+      default = [];
+      example = [ "subdomain.example.com" ];
+      type = listOf str;
+    };
+
     genCfsslAPIToken = mkOption {
       description = ''
         Whether to automatically generate cfssl API-token secret,
diff --git a/nixos/modules/services/continuous-integration/buildbot/master.nix b/nixos/modules/services/continuous-integration/buildbot/master.nix
index 9c615fbe885..e3da3092d45 100644
--- a/nixos/modules/services/continuous-integration/buildbot/master.nix
+++ b/nixos/modules/services/continuous-integration/buildbot/master.nix
@@ -222,19 +222,20 @@ in {
   };
 
   config = mkIf cfg.enable {
-    users.groups = optional (cfg.group == "buildbot") {
-      name = "buildbot";
+    users.groups = optionalAttrs (cfg.group == "buildbot") {
+      buildbot = { };
     };
 
-    users.users = optional (cfg.user == "buildbot") {
-      name = "buildbot";
-      description = "Buildbot User.";
-      isNormalUser = true;
-      createHome = true;
-      home = cfg.home;
-      group = cfg.group;
-      extraGroups = cfg.extraGroups;
-      useDefaultShell = true;
+    users.users = optionalAttrs (cfg.user == "buildbot") {
+      buildbot = {
+        description = "Buildbot User.";
+        isNormalUser = true;
+        createHome = true;
+        home = cfg.home;
+        group = cfg.group;
+        extraGroups = cfg.extraGroups;
+        useDefaultShell = true;
+      };
     };
 
     systemd.services.buildbot-master = {
diff --git a/nixos/modules/services/continuous-integration/buildbot/worker.nix b/nixos/modules/services/continuous-integration/buildbot/worker.nix
index 49e04ca3622..52f24b8cee3 100644
--- a/nixos/modules/services/continuous-integration/buildbot/worker.nix
+++ b/nixos/modules/services/continuous-integration/buildbot/worker.nix
@@ -136,19 +136,20 @@ in {
   config = mkIf cfg.enable {
     services.buildbot-worker.workerPassFile = mkDefault (pkgs.writeText "buildbot-worker-password" cfg.workerPass);
 
-    users.groups = optional (cfg.group == "bbworker") {
-      name = "bbworker";
+    users.groups = optionalAttrs (cfg.group == "bbworker") {
+      bbworker = { };
     };
 
-    users.users = optional (cfg.user == "bbworker") {
-      name = "bbworker";
-      description = "Buildbot Worker User.";
-      isNormalUser = true;
-      createHome = true;
-      home = cfg.home;
-      group = cfg.group;
-      extraGroups = cfg.extraGroups;
-      useDefaultShell = true;
+    users.users = optionalAttrs (cfg.user == "bbworker") {
+      bbworker = {
+        description = "Buildbot Worker User.";
+        isNormalUser = true;
+        createHome = true;
+        home = cfg.home;
+        group = cfg.group;
+        extraGroups = cfg.extraGroups;
+        useDefaultShell = true;
+      };
     };
 
     systemd.services.buildbot-worker = {
diff --git a/nixos/modules/services/continuous-integration/buildkite-agent.nix b/nixos/modules/services/continuous-integration/buildkite-agent.nix
index 32f361454bc..418a7bc1a46 100644
--- a/nixos/modules/services/continuous-integration/buildkite-agent.nix
+++ b/nixos/modules/services/continuous-integration/buildkite-agent.nix
@@ -29,6 +29,8 @@ let
     ${concatStringsSep "\n" (mapAttrsToList mkHookEntry (filterAttrs (n: v: v != null) cfg.hooks))}
   '';
 
+  defaultUser = "buildkite-agent";
+
 in
 
 {
@@ -50,12 +52,21 @@ in
       };
 
       runtimePackages = mkOption {
-        default = [ pkgs.bash pkgs.nix ];
-        defaultText = "[ pkgs.bash pkgs.nix ]";
+        default = [ pkgs.bash pkgs.gnutar pkgs.gzip pkgs.git pkgs.nix ];
+        defaultText = "[ pkgs.bash pkgs.gnutar pkgs.gzip pkgs.git pkgs.nix ]";
         description = "Add programs to the buildkite-agent environment";
         type = types.listOf types.package;
       };
 
+      user = mkOption {
+        type = types.str;
+        default = defaultUser;
+        description = ''
+          Set this option when you want to run the buildkite agent as something else
+          than the default user "buildkite-agent".
+        '';
+      };
+
       tokenPath = mkOption {
         type = types.path;
         description = ''
@@ -74,13 +85,12 @@ in
         '';
       };
 
-      meta-data = mkOption {
-        type = types.str;
-        default = "";
-        example = "queue=default,docker=true,ruby2=true";
+      tags = mkOption {
+        type = types.attrsOf types.str;
+        default = {};
+        example = { queue = "default"; docker = "true"; ruby2 ="true"; };
         description = ''
-          Meta data for the agent. This is a comma-separated list of
-          <code>key=value</code> pairs.
+          Tags for the agent.
         '';
       };
 
@@ -93,26 +103,20 @@ in
         '';
       };
 
-      openssh =
-        { privateKeyPath = mkOption {
-            type = types.path;
-            description = ''
-              Private agent key.
+      privateSshKeyPath = mkOption {
+        type = types.nullOr types.path;
+        default = null;
+        ## maximum care is taken so that secrets (ssh keys and the CI token)
+        ## don't end up in the Nix store.
+        apply = final: if final == null then null else toString final;
 
-              A run-time path to the key file, which is supposed to be provisioned
-              outside of Nix store.
-            '';
-          };
-          publicKeyPath = mkOption {
-            type = types.path;
-            description = ''
-              Public agent key.
-
-              A run-time path to the key file, which is supposed to be provisioned
-              outside of Nix store.
-            '';
-          };
-        };
+        description = ''
+          OpenSSH private key
+
+          A run-time path to the key file, which is supposed to be provisioned
+          outside of Nix store.
+        '';
+      };
 
       hooks = mkHookOptions [
         { name = "checkout";
@@ -181,18 +185,26 @@ in
           instead.
         '';
       };
+
+      shell = mkOption {
+        type = types.str;
+        default = "${pkgs.bash}/bin/bash -e -c";
+        description = ''
+          Command that buildkite-agent 3 will execute when it spawns a shell.
+        '';
+      };
     };
   };
 
   config = mkIf config.services.buildkite-agent.enable {
-    users.users.buildkite-agent =
-      { name = "buildkite-agent";
-        home = cfg.dataDir;
-        createHome = true;
-        description = "Buildkite agent user";
-        extraGroups = [ "keys" ];
-        isSystemUser = true;
-      };
+    users.users.buildkite-agent = mkIf (cfg.user == defaultUser) {
+      name = "buildkite-agent";
+      home = cfg.dataDir;
+      createHome = true;
+      description = "Buildkite agent user";
+      extraGroups = [ "keys" ];
+      isSystemUser = true;
+    };
 
     environment.systemPackages = [ cfg.package ];
 
@@ -210,17 +222,18 @@ in
         ##     don't end up in the Nix store.
         preStart = let
           sshDir = "${cfg.dataDir}/.ssh";
+          tagStr = lib.concatStringsSep "," (lib.mapAttrsToList (name: value: "${name}=${value}") cfg.tags);
         in
-          ''
+          optionalString (cfg.privateSshKeyPath != null) ''
             mkdir -m 0700 -p "${sshDir}"
-            cp -f "${toString cfg.openssh.privateKeyPath}" "${sshDir}/id_rsa"
-            cp -f "${toString cfg.openssh.publicKeyPath}"  "${sshDir}/id_rsa.pub"
-            chmod 600 "${sshDir}"/id_rsa*
-
+            cp -f "${toString cfg.privateSshKeyPath}" "${sshDir}/id_rsa"
+            chmod 600 "${sshDir}"/id_rsa
+          '' + ''
             cat > "${cfg.dataDir}/buildkite-agent.cfg" <<EOF
             token="$(cat ${toString cfg.tokenPath})"
             name="${cfg.name}"
-            meta-data="${cfg.meta-data}"
+            shell="${cfg.shell}"
+            tags="${tagStr}"
             build-path="${cfg.dataDir}/builds"
             hooks-path="${cfg.hooksPath}"
             ${cfg.extraConfig}
@@ -228,11 +241,14 @@ in
           '';
 
         serviceConfig =
-          { ExecStart = "${pkgs.buildkite-agent}/bin/buildkite-agent start --config /var/lib/buildkite-agent/buildkite-agent.cfg";
-            User = "buildkite-agent";
+          { ExecStart = "${cfg.package}/bin/buildkite-agent start --config /var/lib/buildkite-agent/buildkite-agent.cfg";
+            User = cfg.user;
             RestartSec = 5;
             Restart = "on-failure";
             TimeoutSec = 10;
+            # set a long timeout to give buildkite-agent a chance to finish current builds
+            TimeoutStopSec = "2 min";
+            KillMode = "mixed";
           };
       };
 
@@ -246,8 +262,11 @@ in
     ];
   };
   imports = [
-    (mkRenamedOptionModule [ "services" "buildkite-agent" "token" ]                [ "services" "buildkite-agent" "tokenPath" ])
-    (mkRenamedOptionModule [ "services" "buildkite-agent" "openssh" "privateKey" ] [ "services" "buildkite-agent" "openssh" "privateKeyPath" ])
-    (mkRenamedOptionModule [ "services" "buildkite-agent" "openssh" "publicKey" ]  [ "services" "buildkite-agent" "openssh" "publicKeyPath" ])
+    (mkRenamedOptionModule [ "services" "buildkite-agent" "token" ]                    [ "services" "buildkite-agent" "tokenPath" ])
+    (mkRenamedOptionModule [ "services" "buildkite-agent" "openssh" "privateKey" ]     [ "services" "buildkite-agent" "privateSshKeyPath" ])
+    (mkRenamedOptionModule [ "services" "buildkite-agent" "openssh" "privateKeyPath" ] [ "services" "buildkite-agent" "privateSshKeyPath" ])
+    (mkRemovedOptionModule [ "services" "buildkite-agent" "openssh" "publicKey" ]      "SSH public keys aren't necessary to clone private repos.")
+    (mkRemovedOptionModule [ "services" "buildkite-agent" "openssh" "publicKeyPath" ]  "SSH public keys aren't necessary to clone private repos.")
+    (mkRenamedOptionModule [ "services" "buildkite-agent" "meta-data"]                 [ "services" "buildkite-agent" "tags" ])
   ];
 }
diff --git a/nixos/modules/services/continuous-integration/gocd-agent/default.nix b/nixos/modules/services/continuous-integration/gocd-agent/default.nix
index 8126f27c2b0..2e9e1c94857 100644
--- a/nixos/modules/services/continuous-integration/gocd-agent/default.nix
+++ b/nixos/modules/services/continuous-integration/gocd-agent/default.nix
@@ -135,20 +135,20 @@ in {
   };
 
   config = mkIf cfg.enable {
-    users.groups = optional (cfg.group == "gocd-agent") {
-      name = "gocd-agent";
-      gid = config.ids.gids.gocd-agent;
+    users.groups = optionalAttrs (cfg.group == "gocd-agent") {
+      gocd-agent.gid = config.ids.gids.gocd-agent;
     };
 
-    users.users = optional (cfg.user == "gocd-agent") {
-      name = "gocd-agent";
-      description = "gocd-agent user";
-      createHome = true;
-      home = cfg.workDir;
-      group = cfg.group;
-      extraGroups = cfg.extraGroups;
-      useDefaultShell = true;
-      uid = config.ids.uids.gocd-agent;
+    users.users = optionalAttrs (cfg.user == "gocd-agent") {
+      gocd-agent = {
+        description = "gocd-agent user";
+        createHome = true;
+        home = cfg.workDir;
+        group = cfg.group;
+        extraGroups = cfg.extraGroups;
+        useDefaultShell = true;
+        uid = config.ids.uids.gocd-agent;
+      };
     };
 
     systemd.services.gocd-agent = {
diff --git a/nixos/modules/services/continuous-integration/gocd-server/default.nix b/nixos/modules/services/continuous-integration/gocd-server/default.nix
index 8f177da129e..4fa41ac49ed 100644
--- a/nixos/modules/services/continuous-integration/gocd-server/default.nix
+++ b/nixos/modules/services/continuous-integration/gocd-server/default.nix
@@ -143,20 +143,20 @@ in {
   };
 
   config = mkIf cfg.enable {
-    users.groups = optional (cfg.group == "gocd-server") {
-      name = "gocd-server";
-      gid = config.ids.gids.gocd-server;
+    users.groups = optionalAttrs (cfg.group == "gocd-server") {
+      gocd-server.gid = config.ids.gids.gocd-server;
     };
 
-    users.users = optional (cfg.user == "gocd-server") {
-      name = "gocd-server";
-      description = "gocd-server user";
-      createHome = true;
-      home = cfg.workDir;
-      group = cfg.group;
-      extraGroups = cfg.extraGroups;
-      useDefaultShell = true;
-      uid = config.ids.uids.gocd-server;
+    users.users = optionalAttrs (cfg.user == "gocd-server") {
+      gocd-server = {
+        description = "gocd-server user";
+        createHome = true;
+        home = cfg.workDir;
+        group = cfg.group;
+        extraGroups = cfg.extraGroups;
+        useDefaultShell = true;
+        uid = config.ids.uids.gocd-server;
+      };
     };
 
     systemd.services.gocd-server = {
diff --git a/nixos/modules/services/continuous-integration/hydra/default.nix b/nixos/modules/services/continuous-integration/hydra/default.nix
index 30c5550f71c..8b56207590a 100644
--- a/nixos/modules/services/continuous-integration/hydra/default.nix
+++ b/nixos/modules/services/continuous-integration/hydra/default.nix
@@ -167,7 +167,7 @@ in
 
       buildMachinesFiles = mkOption {
         type = types.listOf types.path;
-        default = [ "/etc/nix/machines" ];
+        default = optional (config.nix.buildMachines != []) "/etc/nix/machines";
         example = [ "/etc/nix/machines" "/var/lib/hydra/provisioner/machines" ];
         description = "List of files containing build machines.";
       };
@@ -333,7 +333,7 @@ in
           IN_SYSTEMD = "1"; # to get log severity levels
         };
         serviceConfig =
-          { ExecStart = "@${cfg.package}/bin/hydra-queue-runner hydra-queue-runner -v --option build-use-substitutes ${boolToString cfg.useSubstitutes}";
+          { ExecStart = "@${cfg.package}/bin/hydra-queue-runner hydra-queue-runner -v";
             ExecStopPost = "${cfg.package}/bin/hydra-queue-runner --unlock";
             User = "hydra-queue-runner";
             Restart = "always";
diff --git a/nixos/modules/services/continuous-integration/jenkins/default.nix b/nixos/modules/services/continuous-integration/jenkins/default.nix
index 0ec90671388..1477c471f8a 100644
--- a/nixos/modules/services/continuous-integration/jenkins/default.nix
+++ b/nixos/modules/services/continuous-integration/jenkins/default.nix
@@ -150,20 +150,20 @@ in {
       pkgs.dejavu_fonts
     ];
 
-    users.groups = optional (cfg.group == "jenkins") {
-      name = "jenkins";
-      gid = config.ids.gids.jenkins;
+    users.groups = optionalAttrs (cfg.group == "jenkins") {
+      jenkins.gid = config.ids.gids.jenkins;
     };
 
-    users.users = optional (cfg.user == "jenkins") {
-      name = "jenkins";
-      description = "jenkins user";
-      createHome = true;
-      home = cfg.home;
-      group = cfg.group;
-      extraGroups = cfg.extraGroups;
-      useDefaultShell = true;
-      uid = config.ids.uids.jenkins;
+    users.users = optionalAttrs (cfg.user == "jenkins") {
+      jenkins = {
+        description = "jenkins user";
+        createHome = true;
+        home = cfg.home;
+        group = cfg.group;
+        extraGroups = cfg.extraGroups;
+        useDefaultShell = true;
+        uid = config.ids.uids.jenkins;
+      };
     };
 
     systemd.services.jenkins = {
diff --git a/nixos/modules/services/continuous-integration/jenkins/slave.nix b/nixos/modules/services/continuous-integration/jenkins/slave.nix
index 92deabc3dd3..3c0e6f78e74 100644
--- a/nixos/modules/services/continuous-integration/jenkins/slave.nix
+++ b/nixos/modules/services/continuous-integration/jenkins/slave.nix
@@ -50,19 +50,19 @@ in {
   };
 
   config = mkIf (cfg.enable && !masterCfg.enable) {
-    users.groups = optional (cfg.group == "jenkins") {
-      name = "jenkins";
-      gid = config.ids.gids.jenkins;
+    users.groups = optionalAttrs (cfg.group == "jenkins") {
+      jenkins.gid = config.ids.gids.jenkins;
     };
 
-    users.users = optional (cfg.user == "jenkins") {
-      name = "jenkins";
-      description = "jenkins user";
-      createHome = true;
-      home = cfg.home;
-      group = cfg.group;
-      useDefaultShell = true;
-      uid = config.ids.uids.jenkins;
+    users.users = optionalAttrs (cfg.user == "jenkins") {
+      jenkins = {
+        description = "jenkins user";
+        createHome = true;
+        home = cfg.home;
+        group = cfg.group;
+        useDefaultShell = true;
+        uid = config.ids.uids.jenkins;
+      };
     };
   };
 }
diff --git a/nixos/modules/services/databases/cockroachdb.nix b/nixos/modules/services/databases/cockroachdb.nix
index 268fdcc819f..b6f94a4881a 100644
--- a/nixos/modules/services/databases/cockroachdb.nix
+++ b/nixos/modules/services/databases/cockroachdb.nix
@@ -171,17 +171,17 @@ in
 
     environment.systemPackages = [ crdb ];
 
-    users.users = optionalAttrs (cfg.user == "cockroachdb") (singleton
-      { name        = "cockroachdb";
+    users.users = optionalAttrs (cfg.user == "cockroachdb") {
+      cockroachdb = {
         description = "CockroachDB Server User";
         uid         = config.ids.uids.cockroachdb;
         group       = cfg.group;
-      });
+      };
+    };
 
-    users.groups = optionalAttrs (cfg.group == "cockroachdb") (singleton
-      { name = "cockroachdb";
-        gid  = config.ids.gids.cockroachdb;
-      });
+    users.groups = optionalAttrs (cfg.group == "cockroachdb") {
+      cockroachdb.gid = config.ids.gids.cockroachdb;
+    };
 
     networking.firewall.allowedTCPPorts = lib.optionals cfg.openPorts
       [ cfg.http.port cfg.listen.port ];
diff --git a/nixos/modules/services/databases/foundationdb.nix b/nixos/modules/services/databases/foundationdb.nix
index 8f8d0da7c8d..18727acc7c7 100644
--- a/nixos/modules/services/databases/foundationdb.nix
+++ b/nixos/modules/services/databases/foundationdb.nix
@@ -341,17 +341,17 @@ in
 
     environment.systemPackages = [ pkg ];
 
-    users.users = optionalAttrs (cfg.user == "foundationdb") (singleton
-      { name        = "foundationdb";
+    users.users = optionalAttrs (cfg.user == "foundationdb") {
+      foundationdb = {
         description = "FoundationDB User";
         uid         = config.ids.uids.foundationdb;
         group       = cfg.group;
-      });
+      };
+    };
 
-    users.groups = optionalAttrs (cfg.group == "foundationdb") (singleton
-      { name = "foundationdb";
-        gid  = config.ids.gids.foundationdb;
-      });
+    users.groups = optionalAttrs (cfg.group == "foundationdb") {
+      foundationdb.gid = config.ids.gids.foundationdb;
+    };
 
     networking.firewall.allowedTCPPortRanges = mkIf cfg.openFirewall
       [ { from = cfg.listenPortStart;
diff --git a/nixos/modules/services/databases/influxdb.nix b/nixos/modules/services/databases/influxdb.nix
index 2f176a03872..dd5d69b1147 100644
--- a/nixos/modules/services/databases/influxdb.nix
+++ b/nixos/modules/services/databases/influxdb.nix
@@ -182,15 +182,15 @@ in
         '';
     };
 
-    users.users = optional (cfg.user == "influxdb") {
-      name = "influxdb";
-      uid = config.ids.uids.influxdb;
-      description = "Influxdb daemon user";
+    users.users = optionalAttrs (cfg.user == "influxdb") {
+      influxdb = {
+        uid = config.ids.uids.influxdb;
+        description = "Influxdb daemon user";
+      };
     };
 
-    users.groups = optional (cfg.group == "influxdb") {
-      name = "influxdb";
-      gid = config.ids.gids.influxdb;
+    users.groups = optionalAttrs (cfg.group == "influxdb") {
+      influxdb.gid = config.ids.gids.influxdb;
     };
   };
 
diff --git a/nixos/modules/services/databases/memcached.nix b/nixos/modules/services/databases/memcached.nix
index d1dfdb41bf4..89ff957babf 100644
--- a/nixos/modules/services/databases/memcached.nix
+++ b/nixos/modules/services/databases/memcached.nix
@@ -64,10 +64,9 @@ in
 
   config = mkIf config.services.memcached.enable {
 
-    users.users = optional (cfg.user == "memcached") {
-      name = "memcached";
-      description = "Memcached server user";
-      isSystemUser = true;
+    users.users = optionalAttrs (cfg.user == "memcached") {
+      memcached.description = "Memcached server user";
+      memcached.isSystemUser = true;
     };
 
     environment.systemPackages = [ memcached ];
diff --git a/nixos/modules/services/databases/mysql.nix b/nixos/modules/services/databases/mysql.nix
index 6af32700fc7..8d520b82fb5 100644
--- a/nixos/modules/services/databases/mysql.nix
+++ b/nixos/modules/services/databases/mysql.nix
@@ -320,6 +320,8 @@ in
           Type = if hasNotify then "notify" else "simple";
           RuntimeDirectory = "mysqld";
           RuntimeDirectoryMode = "0755";
+          Restart = "on-abort";
+          RestartSec = "5s";
           # The last two environment variables are used for starting Galera clusters
           ExecStart = "${mysql}/bin/mysqld --defaults-file=/etc/my.cnf ${mysqldOptions} $_WSREP_NEW_CLUSTER $_WSREP_START_POSITION";
           ExecStartPost =
diff --git a/nixos/modules/services/databases/neo4j.nix b/nixos/modules/services/databases/neo4j.nix
index 29a83300ec1..09b453e7584 100644
--- a/nixos/modules/services/databases/neo4j.nix
+++ b/nixos/modules/services/databases/neo4j.nix
@@ -650,8 +650,7 @@ in {
 
       environment.systemPackages = [ cfg.package ];
 
-      users.users = singleton {
-        name = "neo4j";
+      users.users.neo4j = {
         uid = config.ids.uids.neo4j;
         description = "Neo4j daemon user";
         home = cfg.directories.home;
diff --git a/nixos/modules/services/databases/openldap.nix b/nixos/modules/services/databases/openldap.nix
index 5bf57a1bf9c..809f61cfa81 100644
--- a/nixos/modules/services/databases/openldap.nix
+++ b/nixos/modules/services/databases/openldap.nix
@@ -259,6 +259,8 @@ in
           ${openldap.out}/bin/slapadd ${configOpts} -l ${dataFile}
         ''}
         chown -R "${cfg.user}:${cfg.group}" "${cfg.dataDir}"
+
+        ${openldap}/bin/slaptest ${configOpts}
       '';
       serviceConfig.ExecStart =
         "${openldap.out}/libexec/slapd -d '${cfg.logLevel}' " +
diff --git a/nixos/modules/services/databases/virtuoso.nix b/nixos/modules/services/databases/virtuoso.nix
index 6ffc44a5274..0cc027cb1d7 100644
--- a/nixos/modules/services/databases/virtuoso.nix
+++ b/nixos/modules/services/databases/virtuoso.nix
@@ -54,9 +54,8 @@ with lib;
 
   config = mkIf cfg.enable {
 
-    users.users = singleton
-      { name = virtuosoUser;
-        uid = config.ids.uids.virtuoso;
+    users.users.${virtuosoUser} =
+      { uid = config.ids.uids.virtuoso;
         description = "virtuoso user";
         home = stateDir;
       };
diff --git a/nixos/modules/services/editors/infinoted.nix b/nixos/modules/services/editors/infinoted.nix
index be366761694..8b997ccbf66 100644
--- a/nixos/modules/services/editors/infinoted.nix
+++ b/nixos/modules/services/editors/infinoted.nix
@@ -111,14 +111,15 @@ in {
   };
 
   config = mkIf (cfg.enable) {
-    users.users = optional (cfg.user == "infinoted")
-      { name = "infinoted";
-        description = "Infinoted user";
-        group = cfg.group;
-        isSystemUser = true;
+    users.users = optionalAttrs (cfg.user == "infinoted")
+      { infinoted = {
+          description = "Infinoted user";
+          group = cfg.group;
+          isSystemUser = true;
+        };
       };
-    users.groups = optional (cfg.group == "infinoted")
-      { name = "infinoted";
+    users.groups = optionalAttrs (cfg.group == "infinoted")
+      { infinoted = { };
       };
 
     systemd.services.infinoted =
diff --git a/nixos/modules/services/hardware/actkbd.nix b/nixos/modules/services/hardware/actkbd.nix
index 4168140b287..daa407ca1f0 100644
--- a/nixos/modules/services/hardware/actkbd.nix
+++ b/nixos/modules/services/hardware/actkbd.nix
@@ -83,7 +83,7 @@ in
 
           See <command>actkbd</command> <filename>README</filename> for documentation.
 
-          The example shows a piece of what <option>sound.enableMediaKeys</option> does when enabled.
+          The example shows a piece of what <option>sound.mediaKeys.enable</option> does when enabled.
         '';
       };
 
diff --git a/nixos/modules/services/hardware/bluetooth.nix b/nixos/modules/services/hardware/bluetooth.nix
index 11d67418a31..dfa39e7f602 100644
--- a/nixos/modules/services/hardware/bluetooth.nix
+++ b/nixos/modules/services/hardware/bluetooth.nix
@@ -74,9 +74,9 @@ in {
 
     environment.systemPackages = [ bluez-bluetooth ];
 
-    environment.etc = singleton {
-      source = pkgs.writeText "main.conf" (generators.toINI { } cfg.config + optionalString (cfg.extraConfig != null) cfg.extraConfig);
-      target = "bluetooth/main.conf";
+    environment.etc."bluetooth/main.conf"= {
+      source = pkgs.writeText "main.conf"
+        (generators.toINI { } cfg.config + optionalString (cfg.extraConfig != null) cfg.extraConfig);
     };
 
     services.udev.packages = [ bluez-bluetooth ];
diff --git a/nixos/modules/services/hardware/sane_extra_backends/brscan4.nix b/nixos/modules/services/hardware/sane_extra_backends/brscan4.nix
index f6ed4e25e9c..6f49a1ab6d4 100644
--- a/nixos/modules/services/hardware/sane_extra_backends/brscan4.nix
+++ b/nixos/modules/services/hardware/sane_extra_backends/brscan4.nix
@@ -67,11 +67,11 @@ in
 {
   options = {
 
-    hardware.sane.brscan4.enable = 
+    hardware.sane.brscan4.enable =
       mkEnableOption "Brother's brscan4 scan backend" // {
       description = ''
         When enabled, will automatically register the "brscan4" sane
-        backend and bring configuration files to their expected location. 
+        backend and bring configuration files to their expected location.
       '';
     };
 
@@ -95,14 +95,11 @@ in
       pkgs.brscan4
     ];
 
-    environment.etc = singleton {
-      target = "opt/brother/scanner/brscan4";
-      source = "${etcFiles}/etc/opt/brother/scanner/brscan4";
-    };
+    environment.etc."opt/brother/scanner/brscan4" =
+      { source = "${etcFiles}/etc/opt/brother/scanner/brscan4"; };
 
     assertions = [
       { assertion = all (x: !(null != x.ip && null != x.nodename)) netDeviceList;
-          
         message = ''
           When describing a network device as part of the attribute list
           `hardware.sane.brscan4.netDevices`, only one of its `ip` or `nodename`
diff --git a/nixos/modules/services/hardware/tcsd.nix b/nixos/modules/services/hardware/tcsd.nix
index 3876280ee6b..68cb5d791aa 100644
--- a/nixos/modules/services/hardware/tcsd.nix
+++ b/nixos/modules/services/hardware/tcsd.nix
@@ -137,15 +137,15 @@ in
       serviceConfig.ExecStart = "${pkgs.trousers}/sbin/tcsd -f -c ${tcsdConf}";
     };
 
-    users.users = optionalAttrs (cfg.user == "tss") (singleton
-      { name = "tss";
+    users.users = optionalAttrs (cfg.user == "tss") {
+      tss = {
         group = "tss";
         uid = config.ids.uids.tss;
-      });
+      };
+    };
 
-    users.groups = optionalAttrs (cfg.group == "tss") (singleton
-      { name = "tss";
-        gid = config.ids.gids.tss;
-      });
+    users.groups = optionalAttrs (cfg.group == "tss") {
+      tss.gid = config.ids.gids.tss;
+    };
   };
 }
diff --git a/nixos/modules/services/hardware/tlp.nix b/nixos/modules/services/hardware/tlp.nix
index adc1881a525..955a6067799 100644
--- a/nixos/modules/services/hardware/tlp.nix
+++ b/nixos/modules/services/hardware/tlp.nix
@@ -103,13 +103,14 @@ in
 
     services.udev.packages = [ tlp ];
 
-    environment.etc = [{ source = confFile;
-                         target = "default/tlp";
-                       }
-                      ] ++ optional enableRDW {
-                        source = "${tlp}/etc/NetworkManager/dispatcher.d/99tlp-rdw-nm";
-                        target = "NetworkManager/dispatcher.d/99tlp-rdw-nm";
-                      };
+    environment.etc =
+      {
+        "default/tlp".source = confFile;
+      } // optionalAttrs enableRDW {
+        "NetworkManager/dispatcher.d/99tlp-rdw-nm" = {
+          source = "${tlp}/etc/NetworkManager/dispatcher.d/99tlp-rdw-nm";
+        };
+      };
 
     environment.systemPackages = [ tlp ];
 
diff --git a/nixos/modules/services/hardware/udev.nix b/nixos/modules/services/hardware/udev.nix
index 50997bb9bb1..168056a475e 100644
--- a/nixos/modules/services/hardware/udev.nix
+++ b/nixos/modules/services/hardware/udev.nix
@@ -281,13 +281,10 @@ in
     boot.kernelParams = mkIf (!config.networking.usePredictableInterfaceNames) [ "net.ifnames=0" ];
 
     environment.etc =
-      [ { source = udevRules;
-          target = "udev/rules.d";
-        }
-        { source = hwdbBin;
-          target = "udev/hwdb.bin";
-        }
-      ];
+      {
+        "udev/rules.d".source = udevRules;
+        "udev/hwdb.bin".source = hwdbBin;
+      };
 
     system.requiredKernelConfig = with config.lib.kernelConfig; [
       (isEnabled "UNIX")
diff --git a/nixos/modules/services/hardware/usbmuxd.nix b/nixos/modules/services/hardware/usbmuxd.nix
index 39bbcaf4627..11a4b0a858f 100644
--- a/nixos/modules/services/hardware/usbmuxd.nix
+++ b/nixos/modules/services/hardware/usbmuxd.nix
@@ -43,15 +43,16 @@ in
 
   config = mkIf cfg.enable {
 
-    users.users = optional (cfg.user == defaultUserGroup) {
-      name = cfg.user;
-      description = "usbmuxd user";
-      group = cfg.group;
-      isSystemUser = true;
+    users.users = optionalAttrs (cfg.user == defaultUserGroup) {
+      ${cfg.user} = {
+        description = "usbmuxd user";
+        group = cfg.group;
+        isSystemUser = true;
+      };
     };
 
-    users.groups = optional (cfg.group == defaultUserGroup) {
-      name = cfg.group;
+    users.groups = optionalAttrs (cfg.group == defaultUserGroup) {
+      ${cfg.group} = { };
     };
 
     # Give usbmuxd permission for Apple devices
diff --git a/nixos/modules/services/logging/awstats.nix b/nixos/modules/services/logging/awstats.nix
index d51c9a5cffa..5939d7808f7 100644
--- a/nixos/modules/services/logging/awstats.nix
+++ b/nixos/modules/services/logging/awstats.nix
@@ -65,6 +65,7 @@ let
             "ValidHTTPCodes" = "404";
           }
         '';
+        description = "Extra configuration to be appendend to awstats.\${name}.conf.";
       };
 
       webService = {
diff --git a/nixos/modules/services/logging/logcheck.nix b/nixos/modules/services/logging/logcheck.nix
index 6d8be5b926d..4296b2270c2 100644
--- a/nixos/modules/services/logging/logcheck.nix
+++ b/nixos/modules/services/logging/logcheck.nix
@@ -213,13 +213,14 @@ in
         mapAttrsToList writeIgnoreRule cfg.ignore
         ++ mapAttrsToList writeIgnoreCronRule cfg.ignoreCron;
 
-    users.users = optionalAttrs (cfg.user == "logcheck") (singleton
-      { name = "logcheck";
+    users.users = optionalAttrs (cfg.user == "logcheck") {
+      logcheck = {
         uid = config.ids.uids.logcheck;
         shell = "/bin/sh";
         description = "Logcheck user account";
         extraGroups = cfg.extraGroups;
-      });
+      };
+    };
 
     system.activationScripts.logcheck = ''
       mkdir -m 700 -p /var/{lib,lock}/logcheck
diff --git a/nixos/modules/services/mail/dovecot.nix b/nixos/modules/services/mail/dovecot.nix
index 2cda8c49f5e..b5ed2c594f7 100644
--- a/nixos/modules/services/mail/dovecot.nix
+++ b/nixos/modules/services/mail/dovecot.nix
@@ -310,36 +310,32 @@ in
      ++ optional cfg.enablePop3 "pop3"
      ++ optional cfg.enableLmtp "lmtp";
 
-    users.users = [
-      { name = "dovenull";
-        uid = config.ids.uids.dovenull2;
-        description = "Dovecot user for untrusted logins";
-        group = "dovenull";
-      }
-    ] ++ optional (cfg.user == "dovecot2")
-         { name = "dovecot2";
-           uid = config.ids.uids.dovecot2;
+    users.users = {
+      dovenull =
+        { uid = config.ids.uids.dovenull2;
+          description = "Dovecot user for untrusted logins";
+          group = "dovenull";
+        };
+    } // optionalAttrs (cfg.user == "dovecot2") {
+      dovecot2 =
+         { uid = config.ids.uids.dovecot2;
            description = "Dovecot user";
            group = cfg.group;
-         }
-      ++ optional (cfg.createMailUser && cfg.mailUser != null)
-         ({ name = cfg.mailUser;
-            description = "Virtual Mail User";
-         } // optionalAttrs (cfg.mailGroup != null) {
-           group = cfg.mailGroup;
-         });
-
-    users.groups = optional (cfg.group == "dovecot2")
-      { name = "dovecot2";
-        gid = config.ids.gids.dovecot2;
-      }
-    ++ optional (cfg.createMailUser && cfg.mailGroup != null)
-      { name = cfg.mailGroup;
-      }
-    ++ singleton
-      { name = "dovenull";
-        gid = config.ids.gids.dovenull2;
-      };
+         };
+    } // optionalAttrs (cfg.createMailUser && cfg.mailUser != null) {
+      ${cfg.mailUser} =
+        { description = "Virtual Mail User"; } //
+        optionalAttrs (cfg.mailGroup != null)
+          { group = cfg.mailGroup; };
+    };
+
+    users.groups = {
+      dovenull.gid = config.ids.gids.dovenull2;
+    } // optionalAttrs (cfg.group == "dovecot2") {
+      dovecot2.gid = config.ids.gids.dovecot2;
+    } // optionalAttrs (cfg.createMailUser && cfg.mailGroup != null) {
+      ${cfg.mailGroup} = { };
+    };
 
     environment.etc."dovecot/modules".source = modulesDir;
     environment.etc."dovecot/dovecot.conf".source = cfg.configFile;
diff --git a/nixos/modules/services/mail/dspam.nix b/nixos/modules/services/mail/dspam.nix
index 72b8c4c08b9..766ebc8095a 100644
--- a/nixos/modules/services/mail/dspam.nix
+++ b/nixos/modules/services/mail/dspam.nix
@@ -86,16 +86,16 @@ in {
 
   config = mkIf cfg.enable (mkMerge [
     {
-      users.users = optionalAttrs (cfg.user == "dspam") (singleton
-        { name = "dspam";
+      users.users = optionalAttrs (cfg.user == "dspam") {
+        dspam = {
           group = cfg.group;
           uid = config.ids.uids.dspam;
-        });
+        };
+      };
 
-      users.groups = optionalAttrs (cfg.group == "dspam") (singleton
-        { name = "dspam";
-          gid = config.ids.gids.dspam;
-        });
+      users.groups = optionalAttrs (cfg.group == "dspam") {
+        dspam.gid = config.ids.gids.dspam;
+      };
 
       environment.systemPackages = [ dspam ];
 
diff --git a/nixos/modules/services/mail/exim.nix b/nixos/modules/services/mail/exim.nix
index 47812dd1e40..892fbd33214 100644
--- a/nixos/modules/services/mail/exim.nix
+++ b/nixos/modules/services/mail/exim.nix
@@ -87,15 +87,13 @@ in
       systemPackages = [ cfg.package ];
     };
 
-    users.users = singleton {
-      name = cfg.user;
+    users.users.${cfg.user} = {
       description = "Exim mail transfer agent user";
       uid = config.ids.uids.exim;
       group = cfg.group;
     };
 
-    users.groups = singleton {
-      name = cfg.group;
+    users.groups.${cfg.group} = {
       gid = config.ids.gids.exim;
     };
 
diff --git a/nixos/modules/services/mail/mlmmj.nix b/nixos/modules/services/mail/mlmmj.nix
index 7ae00f3e501..d58d93c4214 100644
--- a/nixos/modules/services/mail/mlmmj.nix
+++ b/nixos/modules/services/mail/mlmmj.nix
@@ -94,8 +94,7 @@ in
 
   config = mkIf cfg.enable {
 
-    users.users = singleton {
-      name = cfg.user;
+    users.users.${cfg.user} = {
       description = "mlmmj user";
       home = stateDir;
       createHome = true;
@@ -104,8 +103,7 @@ in
       useDefaultShell = true;
     };
 
-    users.groups = singleton {
-      name = cfg.group;
+    users.groups.${cfg.group} = {
       gid = config.ids.gids.mlmmj;
     };
 
diff --git a/nixos/modules/services/mail/nullmailer.nix b/nixos/modules/services/mail/nullmailer.nix
index 2c2910e0aa9..fe3f8ef9b39 100644
--- a/nixos/modules/services/mail/nullmailer.nix
+++ b/nixos/modules/services/mail/nullmailer.nix
@@ -201,15 +201,12 @@ with lib;
     };
 
     users = {
-      users = singleton {
-        name = cfg.user;
+      users.${cfg.user} = {
         description = "Nullmailer relay-only mta user";
         group = cfg.group;
       };
 
-      groups = singleton {
-        name = cfg.group;
-      };
+      groups.${cfg.group} = { };
     };
 
     systemd.tmpfiles.rules = [
diff --git a/nixos/modules/services/mail/opendkim.nix b/nixos/modules/services/mail/opendkim.nix
index 6431531d5eb..eb6a426684d 100644
--- a/nixos/modules/services/mail/opendkim.nix
+++ b/nixos/modules/services/mail/opendkim.nix
@@ -91,16 +91,16 @@ in {
 
   config = mkIf cfg.enable {
 
-    users.users = optionalAttrs (cfg.user == "opendkim") (singleton
-      { name = "opendkim";
+    users.users = optionalAttrs (cfg.user == "opendkim") {
+      opendkim = {
         group = cfg.group;
         uid = config.ids.uids.opendkim;
-      });
+      };
+    };
 
-    users.groups = optionalAttrs (cfg.group == "opendkim") (singleton
-      { name = "opendkim";
-        gid = config.ids.gids.opendkim;
-      });
+    users.groups = optionalAttrs (cfg.group == "opendkim") {
+      opendkim.gid = config.ids.gids.opendkim;
+    };
 
     environment.systemPackages = [ pkgs.opendkim ];
 
diff --git a/nixos/modules/services/mail/postfix.nix b/nixos/modules/services/mail/postfix.nix
index df438a0c69d..19e11b31d9c 100644
--- a/nixos/modules/services/mail/postfix.nix
+++ b/nixos/modules/services/mail/postfix.nix
@@ -612,10 +612,7 @@ in
     {
 
       environment = {
-        etc = singleton
-          { source = "/var/lib/postfix/conf";
-            target = "postfix";
-          };
+        etc.postfix.source = "/var/lib/postfix/conf";
 
         # This makes it comfortable to run 'postqueue/postdrop' for example.
         systemPackages = [ pkgs.postfix ];
@@ -655,21 +652,20 @@ in
         setgid = true;
       };
 
-      users.users = optional (user == "postfix")
-        { name = "postfix";
-          description = "Postfix mail server user";
-          uid = config.ids.uids.postfix;
-          group = group;
+      users.users = optionalAttrs (user == "postfix")
+        { postfix = {
+            description = "Postfix mail server user";
+            uid = config.ids.uids.postfix;
+            group = group;
+          };
         };
 
       users.groups =
-        optional (group == "postfix")
-        { name = group;
-          gid = config.ids.gids.postfix;
+        optionalAttrs (group == "postfix")
+        { ${group}.gid = config.ids.gids.postfix;
         }
-        ++ optional (setgidGroup == "postdrop")
-        { name = setgidGroup;
-          gid = config.ids.gids.postdrop;
+        // optionalAttrs (setgidGroup == "postdrop")
+        { ${setgidGroup}.gid = config.ids.gids.postdrop;
         };
 
       systemd.services.postfix =
diff --git a/nixos/modules/services/mail/postsrsd.nix b/nixos/modules/services/mail/postsrsd.nix
index 8f12a16906c..2ebc675ab10 100644
--- a/nixos/modules/services/mail/postsrsd.nix
+++ b/nixos/modules/services/mail/postsrsd.nix
@@ -90,16 +90,16 @@ in {
 
     services.postsrsd.domain = mkDefault config.networking.hostName;
 
-    users.users = optionalAttrs (cfg.user == "postsrsd") (singleton
-      { name = "postsrsd";
+    users.users = optionalAttrs (cfg.user == "postsrsd") {
+      postsrsd = {
         group = cfg.group;
         uid = config.ids.uids.postsrsd;
-      });
+      };
+    };
 
-    users.groups = optionalAttrs (cfg.group == "postsrsd") (singleton
-      { name = "postsrsd";
-        gid = config.ids.gids.postsrsd;
-      });
+    users.groups = optionalAttrs (cfg.group == "postsrsd") {
+      postsrsd.gid = config.ids.gids.postsrsd;
+    };
 
     systemd.services.postsrsd = {
       description = "PostSRSd SRS rewriting server";
diff --git a/nixos/modules/services/mail/rspamd.nix b/nixos/modules/services/mail/rspamd.nix
index f156595e6f8..aacdbe2aeed 100644
--- a/nixos/modules/services/mail/rspamd.nix
+++ b/nixos/modules/services/mail/rspamd.nix
@@ -374,15 +374,13 @@ in
     # Allow users to run 'rspamc' and 'rspamadm'.
     environment.systemPackages = [ pkgs.rspamd ];
 
-    users.users = singleton {
-      name = cfg.user;
+    users.users.${cfg.user} = {
       description = "rspamd daemon";
       uid = config.ids.uids.rspamd;
       group = cfg.group;
     };
 
-    users.groups = singleton {
-      name = cfg.group;
+    users.groups.${cfg.group} = {
       gid = config.ids.gids.rspamd;
     };
 
diff --git a/nixos/modules/services/mail/spamassassin.nix b/nixos/modules/services/mail/spamassassin.nix
index 1fe77ce5a0c..75442c7cdb5 100644
--- a/nixos/modules/services/mail/spamassassin.nix
+++ b/nixos/modules/services/mail/spamassassin.nix
@@ -5,7 +5,6 @@ with lib;
 let
   cfg = config.services.spamassassin;
   spamassassin-local-cf = pkgs.writeText "local.cf" cfg.config;
-  spamassassin-init-pre = pkgs.writeText "init.pre" cfg.initPreConf;
 
   spamdEnv = pkgs.buildEnv {
     name = "spamd-env";
@@ -65,8 +64,9 @@ in
       };
 
       initPreConf = mkOption {
-        type = types.str;
+        type = with types; either str path;
         description = "The SpamAssassin init.pre config.";
+        apply = val: if builtins.isPath val then val else pkgs.writeText "init.pre" val;
         default =
         ''
           #
@@ -124,19 +124,17 @@ in
     # Allow users to run 'spamc'.
 
     environment = {
-      etc = singleton { source = spamdEnv; target = "spamassassin"; };
+      etc.spamassassin.source = spamdEnv;
       systemPackages = [ pkgs.spamassassin ];
     };
 
-    users.users = singleton {
-      name = "spamd";
+    users.users.spamd = {
       description = "Spam Assassin Daemon";
       uid = config.ids.uids.spamd;
       group = "spamd";
     };
 
-    users.groups = singleton {
-      name = "spamd";
+    users.groups.spamd = {
       gid = config.ids.gids.spamd;
     };
 
diff --git a/nixos/modules/services/misc/apache-kafka.nix b/nixos/modules/services/misc/apache-kafka.nix
index 46308f74dc9..f3a650a260f 100644
--- a/nixos/modules/services/misc/apache-kafka.nix
+++ b/nixos/modules/services/misc/apache-kafka.nix
@@ -124,8 +124,7 @@ in {
 
     environment.systemPackages = [cfg.package];
 
-    users.users = singleton {
-      name = "apache-kafka";
+    users.users.apache-kafka = {
       uid = config.ids.uids.apache-kafka;
       description = "Apache Kafka daemon user";
       home = head cfg.logDirs;
diff --git a/nixos/modules/services/misc/bepasty.nix b/nixos/modules/services/misc/bepasty.nix
index 87d36068144..f69832e5b2b 100644
--- a/nixos/modules/services/misc/bepasty.nix
+++ b/nixos/modules/services/misc/bepasty.nix
@@ -168,16 +168,12 @@ in
         })
     ) cfg.servers;
 
-    users.users = [{
-      uid = config.ids.uids.bepasty;
-      name = user;
-      group = group;
-      home = default_home;
-    }];
-
-    users.groups = [{
-      name = group;
-      gid = config.ids.gids.bepasty;
-    }];
+    users.users.${user} =
+      { uid = config.ids.uids.bepasty;
+        group = group;
+        home = default_home;
+      };
+
+    users.groups.${group}.gid = config.ids.gids.bepasty;
   };
 }
diff --git a/nixos/modules/services/misc/cgminer.nix b/nixos/modules/services/misc/cgminer.nix
index b1cf5a7d110..9fcae645269 100644
--- a/nixos/modules/services/misc/cgminer.nix
+++ b/nixos/modules/services/misc/cgminer.nix
@@ -110,11 +110,12 @@ in
 
   config = mkIf config.services.cgminer.enable {
 
-    users.users = optionalAttrs (cfg.user == "cgminer") (singleton
-      { name = "cgminer";
+    users.users = optionalAttrs (cfg.user == "cgminer") {
+      cgminer = {
         uid = config.ids.uids.cgminer;
         description = "Cgminer user";
-      });
+      };
+    };
 
     environment.systemPackages = [ cfg.package ];
 
diff --git a/nixos/modules/services/misc/couchpotato.nix b/nixos/modules/services/misc/couchpotato.nix
index 528af486b41..f5163cf86cf 100644
--- a/nixos/modules/services/misc/couchpotato.nix
+++ b/nixos/modules/services/misc/couchpotato.nix
@@ -29,17 +29,14 @@ in
       };
     };
 
-    users.users = singleton
-      { name = "couchpotato";
-        group = "couchpotato";
+    users.users.couchpotato =
+      { group = "couchpotato";
         home = "/var/lib/couchpotato/";
         description = "CouchPotato daemon user";
         uid = config.ids.uids.couchpotato;
       };
 
-    users.groups = singleton
-      { name = "couchpotato";
-        gid = config.ids.gids.couchpotato;
-      };
+    users.groups.couchpotato =
+      { gid = config.ids.gids.couchpotato; };
   };
 }
diff --git a/nixos/modules/services/misc/dictd.nix b/nixos/modules/services/misc/dictd.nix
index 8d3e294622d..d175854d2d1 100644
--- a/nixos/modules/services/misc/dictd.nix
+++ b/nixos/modules/services/misc/dictd.nix
@@ -45,18 +45,14 @@ in
     # get the command line client on system path to make some use of the service
     environment.systemPackages = [ pkgs.dict ];
 
-    users.users = singleton
-      { name = "dictd";
-        group = "dictd";
+    users.users.dictd =
+      { group = "dictd";
         description = "DICT.org dictd server";
         home = "${dictdb}/share/dictd";
         uid = config.ids.uids.dictd;
       };
 
-    users.groups = singleton
-      { name = "dictd";
-        gid = config.ids.gids.dictd;
-      };
+    users.groups.dictd.gid = config.ids.gids.dictd;
 
     systemd.services.dictd = {
       description = "DICT.org Dictionary Server";
diff --git a/nixos/modules/services/misc/etcd.nix b/nixos/modules/services/misc/etcd.nix
index e4d5322f9b5..7322e1c080b 100644
--- a/nixos/modules/services/misc/etcd.nix
+++ b/nixos/modules/services/misc/etcd.nix
@@ -186,8 +186,7 @@ in {
 
     environment.systemPackages = [ pkgs.etcdctl ];
 
-    users.users = singleton {
-      name = "etcd";
+    users.users.etcd = {
       uid = config.ids.uids.etcd;
       description = "Etcd daemon user";
       home = cfg.dataDir;
diff --git a/nixos/modules/services/misc/exhibitor.nix b/nixos/modules/services/misc/exhibitor.nix
index 74f4f671f46..f8c79f892da 100644
--- a/nixos/modules/services/misc/exhibitor.nix
+++ b/nixos/modules/services/misc/exhibitor.nix
@@ -410,8 +410,7 @@ in
         sed -i 's/'"$replace_what"'/'"$replace_with"'/g' ${cfg.baseDir}/zookeeper/bin/zk*.sh
       '';
     };
-    users.users = singleton {
-      name = "zookeeper";
+    users.users.zookeeper = {
       uid = config.ids.uids.zookeeper;
       description = "Zookeeper daemon user";
       home = cfg.baseDir;
diff --git a/nixos/modules/services/misc/felix.nix b/nixos/modules/services/misc/felix.nix
index 1c5ece86825..188e45abc58 100644
--- a/nixos/modules/services/misc/felix.nix
+++ b/nixos/modules/services/misc/felix.nix
@@ -47,14 +47,10 @@ in
   ###### implementation
 
   config = mkIf cfg.enable {
-    users.groups = singleton
-      { name = "osgi";
-        gid = config.ids.gids.osgi;
-      };
+    users.groups.osgi.gid = config.ids.gids.osgi;
 
-    users.users = singleton
-      { name = "osgi";
-        uid = config.ids.uids.osgi;
+    users.users.osgi =
+      { uid = config.ids.uids.osgi;
         description = "OSGi user";
         home = "/homeless-shelter";
       };
diff --git a/nixos/modules/services/misc/folding-at-home.nix b/nixos/modules/services/misc/folding-at-home.nix
index 122c89ce068..fd2ea3948f6 100644
--- a/nixos/modules/services/misc/folding-at-home.nix
+++ b/nixos/modules/services/misc/folding-at-home.nix
@@ -42,9 +42,8 @@ in {
 
   config = mkIf cfg.enable {
 
-    users.users = singleton
-      { name = fahUser;
-        uid = config.ids.uids.foldingathome;
+    users.users.${fahUser} =
+      { uid = config.ids.uids.foldingathome;
         description = "Folding@Home user";
         home = stateDir;
       };
diff --git a/nixos/modules/services/misc/gitea.nix b/nixos/modules/services/misc/gitea.nix
index 258476dd9fe..38910a5a005 100644
--- a/nixos/modules/services/misc/gitea.nix
+++ b/nixos/modules/services/misc/gitea.nix
@@ -364,7 +364,7 @@ in
           ''}
           sed -e "s,#secretkey#,$KEY,g" \
               -e "s,#dbpass#,$DBPASS,g" \
-              -e "s,#jwtsecet#,$JWTSECET,g" \
+              -e "s,#jwtsecret#,$JWTSECRET,g" \
               -e "s,#mailerpass#,$MAILERPASSWORD,g" \
               -i ${runConfig}
           chmod 640 ${runConfig} ${secretKey} ${jwtSecret}
diff --git a/nixos/modules/services/misc/gitlab.nix b/nixos/modules/services/misc/gitlab.nix
index 61d0ce0aef8..aa958985379 100644
--- a/nixos/modules/services/misc/gitlab.nix
+++ b/nixos/modules/services/misc/gitlab.nix
@@ -633,20 +633,14 @@ in {
     # Use postfix to send out mails.
     services.postfix.enable = mkDefault true;
 
-    users.users = [
-      { name = cfg.user;
-        group = cfg.group;
+    users.users.${cfg.user} =
+      { group = cfg.group;
         home = "${cfg.statePath}/home";
         shell = "${pkgs.bash}/bin/bash";
         uid = config.ids.uids.gitlab;
-      }
-    ];
+      };
 
-    users.groups = [
-      { name = cfg.group;
-        gid = config.ids.gids.gitlab;
-      }
-    ];
+    users.groups.${cfg.group}.gid = config.ids.gids.gitlab;
 
     systemd.tmpfiles.rules = [
       "d /run/gitlab 0755 ${cfg.user} ${cfg.group} -"
diff --git a/nixos/modules/services/misc/gpsd.nix b/nixos/modules/services/misc/gpsd.nix
index 3bfcb636a3c..f954249942a 100644
--- a/nixos/modules/services/misc/gpsd.nix
+++ b/nixos/modules/services/misc/gpsd.nix
@@ -86,17 +86,13 @@ in
 
   config = mkIf cfg.enable {
 
-    users.users = singleton
-      { name = "gpsd";
-        inherit uid;
+    users.users.gpsd =
+      { inherit uid;
         description = "gpsd daemon user";
         home = "/var/empty";
       };
 
-    users.groups = singleton
-      { name = "gpsd";
-        inherit gid;
-      };
+    users.groups.gpsd = { inherit gid; };
 
     systemd.services.gpsd = {
       description = "GPSD daemon";
diff --git a/nixos/modules/services/misc/headphones.nix b/nixos/modules/services/misc/headphones.nix
index 4a77045be28..3ee0a4458bd 100644
--- a/nixos/modules/services/misc/headphones.nix
+++ b/nixos/modules/services/misc/headphones.nix
@@ -59,19 +59,19 @@ in
 
   config = mkIf cfg.enable {
 
-    users.users = optionalAttrs (cfg.user == name) (singleton {
-      name = name;
-      uid = config.ids.uids.headphones;
-      group = cfg.group;
-      description = "headphones user";
-      home = cfg.dataDir;
-      createHome = true;
-    });
+    users.users = optionalAttrs (cfg.user == name) {
+      ${name} = {
+        uid = config.ids.uids.headphones;
+        group = cfg.group;
+        description = "headphones user";
+        home = cfg.dataDir;
+        createHome = true;
+      };
+    };
 
-    users.groups = optionalAttrs (cfg.group == name) (singleton {
-      name = name;
-      gid = config.ids.gids.headphones;
-    });
+    users.groups = optionalAttrs (cfg.group == name) {
+      ${name}.gid = config.ids.gids.headphones;
+    };
 
     systemd.services.headphones = {
         description = "Headphones Server";
diff --git a/nixos/modules/services/misc/home-assistant.nix b/nixos/modules/services/misc/home-assistant.nix
index 74702c97f55..cc113ca2d0c 100644
--- a/nixos/modules/services/misc/home-assistant.nix
+++ b/nixos/modules/services/misc/home-assistant.nix
@@ -11,6 +11,9 @@ let
     (recursiveUpdate defaultConfig cfg.config) else cfg.config));
   configFile = pkgs.runCommand "configuration.yaml" { preferLocalBuild = true; } ''
     ${pkgs.remarshal}/bin/json2yaml -i ${configJSON} -o $out
+    # Hack to support secrets, that are encoded as custom yaml objects,
+    # https://www.home-assistant.io/docs/configuration/secrets/
+    sed -i -e "s/'\!secret \(.*\)'/\!secret \1/" $out
   '';
 
   lovelaceConfigJSON = pkgs.writeText "ui-lovelace.json"
@@ -98,6 +101,10 @@ in {
         {
           homeassistant = {
             name = "Home";
+            latitude = "!secret latitude";
+            longitude = "!secret longitude";
+            elevation = "!secret elevation";
+            unit_system = "metric";
             time_zone = "UTC";
           };
           frontend = { };
@@ -108,6 +115,8 @@ in {
       description = ''
         Your <filename>configuration.yaml</filename> as a Nix attribute set.
         Beware that setting this option will delete your previous <filename>configuration.yaml</filename>.
+        <link xlink:href="https://www.home-assistant.io/docs/configuration/secrets/">Secrets</link>
+        are encoded as strings as shown in the example.
       '';
     };
 
diff --git a/nixos/modules/services/misc/matrix-synapse.nix b/nixos/modules/services/misc/matrix-synapse.nix
index 0bda8980720..750f4a292fb 100644
--- a/nixos/modules/services/misc/matrix-synapse.nix
+++ b/nixos/modules/services/misc/matrix-synapse.nix
@@ -657,19 +657,17 @@ in {
   };
 
   config = mkIf cfg.enable {
-    users.users = [
-      { name = "matrix-synapse";
+    users.users.matrix-synapse = { 
         group = "matrix-synapse";
         home = cfg.dataDir;
         createHome = true;
         shell = "${pkgs.bash}/bin/bash";
         uid = config.ids.uids.matrix-synapse;
-      } ];
+      };
 
-    users.groups = [
-      { name = "matrix-synapse";
-        gid = config.ids.gids.matrix-synapse;
-      } ];
+    users.groups.matrix-synapse = {
+      gid = config.ids.gids.matrix-synapse;
+    };
 
     services.postgresql = mkIf (usePostgresql && cfg.create_local_database) {
       enable = mkDefault true;
diff --git a/nixos/modules/services/misc/mediatomb.nix b/nixos/modules/services/misc/mediatomb.nix
index 107fb57fe1c..529f584a201 100644
--- a/nixos/modules/services/misc/mediatomb.nix
+++ b/nixos/modules/services/misc/mediatomb.nix
@@ -266,19 +266,19 @@ in {
       serviceConfig.User = "${cfg.user}";
     };
 
-    users.groups = optionalAttrs (cfg.group == "mediatomb") (singleton {
-      name = "mediatomb";
-      gid = gid;
-    });
+    users.groups = optionalAttrs (cfg.group == "mediatomb") {
+      mediatomb.gid = gid;
+    };
 
-    users.users = optionalAttrs (cfg.user == "mediatomb") (singleton {
-      name = "mediatomb";
-      isSystemUser = true;
-      group = cfg.group;
-      home = "${cfg.dataDir}";
-      createHome = true;
-      description = "Mediatomb DLNA Server User";
-    });
+    users.users = optionalAttrs (cfg.user == "mediatomb") {
+      mediatomb = {
+        isSystemUser = true;
+        group = cfg.group;
+        home = "${cfg.dataDir}";
+        createHome = true;
+        description = "Mediatomb DLNA Server User";
+      };
+    };
 
     networking.firewall = {
       allowedUDPPorts = [ 1900 cfg.port ];
diff --git a/nixos/modules/services/misc/nix-daemon.nix b/nixos/modules/services/misc/nix-daemon.nix
index 24780446d50..17c3582db0f 100644
--- a/nixos/modules/services/misc/nix-daemon.nix
+++ b/nixos/modules/services/misc/nix-daemon.nix
@@ -12,8 +12,9 @@ let
 
   isNix23 = versionAtLeast nixVersion "2.3pre";
 
-  makeNixBuildUser = nr:
-    { name = "nixbld${toString nr}";
+  makeNixBuildUser = nr: {
+    name  = "nixbld${toString nr}";
+    value = {
       description = "Nix build user ${toString nr}";
 
       /* For consistency with the setgid(2), setuid(2), and setgroups(2)
@@ -23,8 +24,9 @@ let
       group = "nixbld";
       extraGroups = [ "nixbld" ];
     };
+  };
 
-  nixbldUsers = map makeNixBuildUser (range 1 cfg.nrBuildUsers);
+  nixbldUsers = listToAttrs (map makeNixBuildUser (range 1 cfg.nrBuildUsers));
 
   nixConf =
     assert versionAtLeast nixVersion "2.2";
@@ -445,7 +447,7 @@ in
 
     users.users = nixbldUsers;
 
-    services.xserver.displayManager.hiddenUsers = map ({ name, ... }: name) nixbldUsers;
+    services.xserver.displayManager.hiddenUsers = attrNames nixbldUsers;
 
     system.activationScripts.nix = stringAfter [ "etc" "users" ]
       ''
diff --git a/nixos/modules/services/misc/nixos-manual.nix b/nixos/modules/services/misc/nixos-manual.nix
index 20ba3d8ef0b..ab73f49d4be 100644
--- a/nixos/modules/services/misc/nixos-manual.nix
+++ b/nixos/modules/services/misc/nixos-manual.nix
@@ -52,7 +52,7 @@ in
       };
     })
     (mkIf (cfg.showManual && cfgd.enable && cfgd.nixos.enable) {
-      boot.extraTTYs = [ "tty${toString cfg.ttyNumber}" ];
+      console.extraTTYs = [ "tty${toString cfg.ttyNumber}" ];
 
       systemd.services.nixos-manual = {
         description = "NixOS Manual";
diff --git a/nixos/modules/services/misc/octoprint.nix b/nixos/modules/services/misc/octoprint.nix
index 8950010773c..651ed374388 100644
--- a/nixos/modules/services/misc/octoprint.nix
+++ b/nixos/modules/services/misc/octoprint.nix
@@ -86,16 +86,16 @@ in
 
   config = mkIf cfg.enable {
 
-    users.users = optionalAttrs (cfg.user == "octoprint") (singleton
-      { name = "octoprint";
+    users.users = optionalAttrs (cfg.user == "octoprint") {
+      octoprint = {
         group = cfg.group;
         uid = config.ids.uids.octoprint;
-      });
+      };
+    };
 
-    users.groups = optionalAttrs (cfg.group == "octoprint") (singleton
-      { name = "octoprint";
-        gid = config.ids.gids.octoprint;
-      });
+    users.groups = optionalAttrs (cfg.group == "octoprint") {
+      octoprint.gid = config.ids.gids.octoprint;
+    };
 
     systemd.tmpfiles.rules = [
       "d '${cfg.stateDir}' - ${cfg.user} ${cfg.group} - -"
diff --git a/nixos/modules/services/misc/paperless.nix b/nixos/modules/services/misc/paperless.nix
index 3985dc0b303..bfaf760fb83 100644
--- a/nixos/modules/services/misc/paperless.nix
+++ b/nixos/modules/services/misc/paperless.nix
@@ -123,9 +123,9 @@ in
   config = mkIf cfg.enable {
 
     systemd.tmpfiles.rules = [
-      "d '${cfg.dataDir}' - ${cfg.user} ${cfg.user} - -"
+      "d '${cfg.dataDir}' - ${cfg.user} ${config.users.users.${cfg.user}.group} - -"
     ] ++ (optional cfg.consumptionDirIsPublic
-      "d '${cfg.consumptionDir}' 777 ${cfg.user} ${cfg.user} - -"
+      "d '${cfg.consumptionDir}' 777 - - - -"
       # If the consumption dir is not created here, it's automatically created by
       # 'manage' with the default permissions.
     );
@@ -169,17 +169,15 @@ in
     };
 
     users = optionalAttrs (cfg.user == defaultUser) {
-      users = [{
-        name = defaultUser;
+      users.${defaultUser} = {
         group = defaultUser;
         uid = config.ids.uids.paperless;
         home = cfg.dataDir;
-      }];
+      };
 
-      groups = [{
-        name = defaultUser;
+      groups.${defaultUser} = {
         gid = config.ids.gids.paperless;
-      }];
+      };
     };
   };
 }
diff --git a/nixos/modules/services/misc/redmine.nix b/nixos/modules/services/misc/redmine.nix
index bf9a6914a48..3b8c14d196f 100644
--- a/nixos/modules/services/misc/redmine.nix
+++ b/nixos/modules/services/misc/redmine.nix
@@ -66,7 +66,7 @@ in
         type = types.package;
         default = pkgs.redmine;
         description = "Which Redmine package to use.";
-        example = "pkgs.redmine.override { ruby = pkgs.ruby_2_4; }";
+        example = "pkgs.redmine.override { ruby = pkgs.ruby_2_7; }";
       };
 
       user = mkOption {
@@ -367,17 +367,17 @@ in
 
     };
 
-    users.users = optionalAttrs (cfg.user == "redmine") (singleton
-      { name = "redmine";
+    users.users = optionalAttrs (cfg.user == "redmine") {
+      redmine = {
         group = cfg.group;
         home = cfg.stateDir;
         uid = config.ids.uids.redmine;
-      });
+      };
+    };
 
-    users.groups = optionalAttrs (cfg.group == "redmine") (singleton
-      { name = "redmine";
-        gid = config.ids.gids.redmine;
-      });
+    users.groups = optionalAttrs (cfg.group == "redmine") {
+      redmine.gid = config.ids.gids.redmine;
+    };
 
     warnings = optional (cfg.database.password != "")
       ''config.services.redmine.database.password will be stored as plaintext
diff --git a/nixos/modules/services/misc/ripple-data-api.nix b/nixos/modules/services/misc/ripple-data-api.nix
index 042b496d35e..9fab462f7e3 100644
--- a/nixos/modules/services/misc/ripple-data-api.nix
+++ b/nixos/modules/services/misc/ripple-data-api.nix
@@ -185,9 +185,8 @@ in {
       ];
     };
 
-    users.users = singleton
-      { name = "ripple-data-api";
-        description = "Ripple data api user";
+    users.users.ripple-data-api =
+      { description = "Ripple data api user";
         uid = config.ids.uids.ripple-data-api;
       };
   };
diff --git a/nixos/modules/services/misc/rippled.nix b/nixos/modules/services/misc/rippled.nix
index cdf61730de3..ef34e3a779f 100644
--- a/nixos/modules/services/misc/rippled.nix
+++ b/nixos/modules/services/misc/rippled.nix
@@ -406,9 +406,8 @@ in
 
   config = mkIf cfg.enable {
 
-    users.users = singleton
-      { name = "rippled";
-        description = "Ripple server user";
+    users.users.rippled =
+      { description = "Ripple server user";
         uid = config.ids.uids.rippled;
         home = cfg.databasePath;
         createHome = true;
diff --git a/nixos/modules/services/misc/rogue.nix b/nixos/modules/services/misc/rogue.nix
index aae02e384c9..d56d103b5f3 100644
--- a/nixos/modules/services/misc/rogue.nix
+++ b/nixos/modules/services/misc/rogue.nix
@@ -40,7 +40,7 @@ in
 
   config = mkIf cfg.enable {
 
-    boot.extraTTYs = [ cfg.tty ];
+    console.extraTTYs = [ cfg.tty ];
 
     systemd.services.rogue =
       { description = "Rogue dungeon crawling game";
diff --git a/nixos/modules/services/misc/serviio.nix b/nixos/modules/services/misc/serviio.nix
index 9868192724b..0ead6a81691 100644
--- a/nixos/modules/services/misc/serviio.nix
+++ b/nixos/modules/services/misc/serviio.nix
@@ -63,20 +63,15 @@ in {
       };
     };
 
-    users.users = [
-      {
-        name = "serviio";
-        group = "serviio";
+    users.users.serviio =
+      { group = "serviio";
         home = cfg.dataDir;
         description = "Serviio Media Server User";
         createHome = true;
         isSystemUser = true;
-      }
-    ];
+      };
 
-    users.groups = [
-      { name = "serviio";}
-    ];
+    users.groups.serviio = { };
 
     networking.firewall = {
       allowedTCPPorts = [
diff --git a/nixos/modules/services/misc/sickbeard.nix b/nixos/modules/services/misc/sickbeard.nix
index 5cfbbe516ae..a32dbfa3108 100644
--- a/nixos/modules/services/misc/sickbeard.nix
+++ b/nixos/modules/services/misc/sickbeard.nix
@@ -63,19 +63,19 @@ in
 
   config = mkIf cfg.enable {
 
-    users.users = optionalAttrs (cfg.user == name) (singleton {
-      name = name;
-      uid = config.ids.uids.sickbeard;
-      group = cfg.group;
-      description = "sickbeard user";
-      home = cfg.dataDir;
-      createHome = true;
-    });
+    users.users = optionalAttrs (cfg.user == name) {
+      ${name} = {
+        uid = config.ids.uids.sickbeard;
+        group = cfg.group;
+        description = "sickbeard user";
+        home = cfg.dataDir;
+        createHome = true;
+      };
+    };
 
-    users.groups = optionalAttrs (cfg.group == name) (singleton {
-      name = name;
-      gid = config.ids.gids.sickbeard;
-    });
+    users.groups = optionalAttrs (cfg.group == name) {
+      ${name}.gid = config.ids.gids.sickbeard;
+    };
 
     systemd.services.sickbeard = {
       description = "Sickbeard Server";
diff --git a/nixos/modules/services/misc/siproxd.nix b/nixos/modules/services/misc/siproxd.nix
index dcaf73aca44..ae7b27de8e7 100644
--- a/nixos/modules/services/misc/siproxd.nix
+++ b/nixos/modules/services/misc/siproxd.nix
@@ -161,8 +161,7 @@ in
 
   config = mkIf cfg.enable {
 
-    users.users = singleton {
-      name = "siproxyd";
+    users.users.siproxyd = {
       uid = config.ids.uids.siproxd;
     };
 
diff --git a/nixos/modules/services/misc/taskserver/default.nix b/nixos/modules/services/misc/taskserver/default.nix
index 8a57277fafe..a894caed1a3 100644
--- a/nixos/modules/services/misc/taskserver/default.nix
+++ b/nixos/modules/services/misc/taskserver/default.nix
@@ -368,16 +368,16 @@ in {
     (mkIf cfg.enable {
       environment.systemPackages = [ nixos-taskserver ];
 
-      users.users = optional (cfg.user == "taskd") {
-        name = "taskd";
-        uid = config.ids.uids.taskd;
-        description = "Taskserver user";
-        group = cfg.group;
+      users.users = optionalAttrs (cfg.user == "taskd") {
+        taskd = {
+          uid = config.ids.uids.taskd;
+          description = "Taskserver user";
+          group = cfg.group;
+        };
       };
 
-      users.groups = optional (cfg.group == "taskd") {
-        name = "taskd";
-        gid = config.ids.gids.taskd;
+      users.groups = optionalAttrs (cfg.group == "taskd") {
+        taskd.gid = config.ids.gids.taskd;
       };
 
       services.taskserver.config = {
diff --git a/nixos/modules/services/misc/uhub.nix b/nixos/modules/services/misc/uhub.nix
index 753580c3e40..d1b38831028 100644
--- a/nixos/modules/services/misc/uhub.nix
+++ b/nixos/modules/services/misc/uhub.nix
@@ -41,31 +41,31 @@ in
       enable = mkOption {
         type = types.bool;
         default = false;
-	description = "Whether to enable the uhub ADC hub.";
+        description = "Whether to enable the uhub ADC hub.";
       };
 
       port = mkOption {
         type = types.int;
         default = 1511;
-	description = "TCP port to bind the hub to.";
+        description = "TCP port to bind the hub to.";
       };
 
       address = mkOption {
         type = types.str;
         default = "any";
-	description = "Address to bind the hub to.";
+        description = "Address to bind the hub to.";
       };
 
       enableTLS = mkOption {
         type = types.bool;
         default = false;
-	description = "Whether to enable TLS support.";
+        description = "Whether to enable TLS support.";
       };
 
       hubConfig = mkOption {
         type = types.lines;
         default = "";
-	description = "Contents of uhub configuration file.";
+        description = "Contents of uhub configuration file.";
       };
 
       aclConfig = mkOption {
@@ -77,11 +77,11 @@ in
       plugins = {
 
         authSqlite = {
-	  enable = mkOption {
+          enable = mkOption {
             type = types.bool;
             default = false;
             description = "Whether to enable the Sqlite authentication database plugin";
-	  };
+          };
           file = mkOption {
             type = types.path;
             example = "/var/db/uhub-users";
@@ -161,14 +161,8 @@ in
   config = mkIf cfg.enable {
 
     users = {
-      users = singleton {
-        name = "uhub";
-        uid = config.ids.uids.uhub;
-      };
-      groups = singleton {
-        name = "uhub";
-        gid = config.ids.gids.uhub;
-      };
+      users.uhub.uid = config.ids.uids.uhub;
+      groups.uhub.gid = config.ids.gids.uhub;
     };
 
     systemd.services.uhub = {
diff --git a/nixos/modules/services/misc/zookeeper.nix b/nixos/modules/services/misc/zookeeper.nix
index 5d91e44a199..f6af7c75eba 100644
--- a/nixos/modules/services/misc/zookeeper.nix
+++ b/nixos/modules/services/misc/zookeeper.nix
@@ -146,8 +146,7 @@ in {
       '';
     };
 
-    users.users = singleton {
-      name = "zookeeper";
+    users.users.zookeeper = {
       uid = config.ids.uids.zookeeper;
       description = "Zookeeper daemon user";
       home = cfg.dataDir;
diff --git a/nixos/modules/services/monitoring/collectd.nix b/nixos/modules/services/monitoring/collectd.nix
index 731ac743b7c..ef3663c62e0 100644
--- a/nixos/modules/services/monitoring/collectd.nix
+++ b/nixos/modules/services/monitoring/collectd.nix
@@ -129,9 +129,10 @@ in {
       };
     };
 
-    users.users = optional (cfg.user == "collectd") {
-      name = "collectd";
-      isSystemUser = true;
+    users.users = optionalAttrs (cfg.user == "collectd") {
+      collectd = {
+        isSystemUser = true;
+      };
     };
   };
 }
diff --git a/nixos/modules/services/monitoring/datadog-agent.nix b/nixos/modules/services/monitoring/datadog-agent.nix
index 02a9f316fc3..2c5fe47242e 100644
--- a/nixos/modules/services/monitoring/datadog-agent.nix
+++ b/nixos/modules/services/monitoring/datadog-agent.nix
@@ -22,9 +22,9 @@ let
   # Generate Datadog configuration files for each configured checks.
   # This works because check configurations have predictable paths,
   # and because JSON is a valid subset of YAML.
-  makeCheckConfigs = entries: mapAttrsToList (name: conf: {
-    source = pkgs.writeText "${name}-check-conf.yaml" (builtins.toJSON conf);
-    target = "datadog-agent/conf.d/${name}.d/conf.yaml";
+  makeCheckConfigs = entries: mapAttrs' (name: conf: {
+    name = "datadog-agent/conf.d/${name}.d/conf.yaml";
+    value.source = pkgs.writeText "${name}-check-conf.yaml" (builtins.toJSON conf);
   }) entries;
 
   defaultChecks = {
@@ -34,10 +34,11 @@ let
 
   # Assemble all check configurations and the top-level agent
   # configuration.
-  etcfiles = with pkgs; with builtins; [{
-    source = writeText "datadog.yaml" (toJSON ddConf);
-    target = "datadog-agent/datadog.yaml";
-  }] ++ makeCheckConfigs (cfg.checks // defaultChecks);
+  etcfiles = with pkgs; with builtins;
+  { "datadog-agent/datadog.yaml" = {
+      source = writeText "datadog.yaml" (toJSON ddConf);
+    };
+  } // makeCheckConfigs (cfg.checks // defaultChecks);
 
   # Apply the configured extraIntegrations to the provided agent
   # package. See the documentation of `dd-agent/integrations-core.nix`
@@ -204,7 +205,7 @@ in {
   config = mkIf cfg.enable {
     environment.systemPackages = [ datadogPkg pkgs.sysstat pkgs.procps pkgs.iproute ];
 
-    users.extraUsers.datadog = {
+    users.users.datadog = {
       description = "Datadog Agent User";
       uid = config.ids.uids.datadog;
       group = "datadog";
@@ -212,7 +213,7 @@ in {
       createHome = true;
     };
 
-    users.extraGroups.datadog.gid = config.ids.gids.datadog;
+    users.groups.datadog.gid = config.ids.gids.datadog;
 
     systemd.services = let
       makeService = attrs: recursiveUpdate {
@@ -224,7 +225,7 @@ in {
           Restart = "always";
           RestartSec = 2;
         };
-        restartTriggers = [ datadogPkg ] ++ map (etc: etc.source) etcfiles;
+        restartTriggers = [ datadogPkg ] ++ attrNames etcfiles;
       } attrs;
     in {
       datadog-agent = makeService {
diff --git a/nixos/modules/services/monitoring/dd-agent/dd-agent.nix b/nixos/modules/services/monitoring/dd-agent/dd-agent.nix
index 5ee6b092a6a..e91717fb205 100644
--- a/nixos/modules/services/monitoring/dd-agent/dd-agent.nix
+++ b/nixos/modules/services/monitoring/dd-agent/dd-agent.nix
@@ -78,37 +78,35 @@ let
   etcfiles =
     let
       defaultConfd = import ./dd-agent-defaults.nix;
-    in (map (f: { source = "${pkgs.dd-agent}/agent/conf.d-system/${f}";
-                  target = "dd-agent/conf.d/${f}";
-                }) defaultConfd) ++ [
-      { source = ddConf;
-        target = "dd-agent/datadog.conf";
-      }
-      { source = diskConfig;
-        target = "dd-agent/conf.d/disk.yaml";
-      }
-      { source = networkConfig;
-        target = "dd-agent/conf.d/network.yaml";
-      } ] ++
-    (optional (cfg.postgresqlConfig != null)
-      { source = postgresqlConfig;
-        target = "dd-agent/conf.d/postgres.yaml";
-      }) ++
-    (optional (cfg.nginxConfig != null)
-      { source = nginxConfig;
-        target = "dd-agent/conf.d/nginx.yaml";
-      }) ++
-    (optional (cfg.mongoConfig != null)
-      { source = mongoConfig;
-        target = "dd-agent/conf.d/mongo.yaml";
-      }) ++
-    (optional (cfg.processConfig != null)
-      { source = processConfig;
-        target = "dd-agent/conf.d/process.yaml";
-      }) ++
-    (optional (cfg.jmxConfig != null)
-      { source = jmxConfig;
-        target = "dd-agent/conf.d/jmx.yaml";
+    in
+      listToAttrs (map (f: {
+        name = "dd-agent/conf.d/${f}";
+        value.source = "${pkgs.dd-agent}/agent/conf.d-system/${f}";
+      }) defaultConfd) //
+      {
+        "dd-agent/datadog.conf".source = ddConf;
+        "dd-agent/conf.d/disk.yaml".source = diskConfig;
+        "dd-agent/conf.d/network.yaml".source = networkConfig;
+      } //
+      (optionalAttrs (cfg.postgresqlConfig != null)
+      {
+        "dd-agent/conf.d/postgres.yaml".source = postgresqlConfig;
+      }) //
+      (optionalAttrs (cfg.nginxConfig != null)
+      {
+        "dd-agent/conf.d/nginx.yaml".source = nginxConfig;
+      }) //
+      (optionalAttrs (cfg.mongoConfig != null)
+      { 
+        "dd-agent/conf.d/mongo.yaml".source = mongoConfig;
+      }) //
+      (optionalAttrs (cfg.processConfig != null)
+      { 
+        "dd-agent/conf.d/process.yaml".source = processConfig;
+      }) //
+      (optionalAttrs (cfg.jmxConfig != null)
+      {
+        "dd-agent/conf.d/jmx.yaml".source = jmxConfig;
       });
 
 in {
diff --git a/nixos/modules/services/monitoring/fusion-inventory.nix b/nixos/modules/services/monitoring/fusion-inventory.nix
index fe19ed56195..9b65c76ce02 100644
--- a/nixos/modules/services/monitoring/fusion-inventory.nix
+++ b/nixos/modules/services/monitoring/fusion-inventory.nix
@@ -46,8 +46,7 @@ in {
 
   config = mkIf cfg.enable {
 
-    users.users = singleton {
-      name = "fusion-inventory";
+    users.users.fusion-inventory = {
       description = "FusionInventory user";
       isSystemUser = true;
     };
diff --git a/nixos/modules/services/monitoring/graphite.nix b/nixos/modules/services/monitoring/graphite.nix
index f7874af3df2..dd147bb3793 100644
--- a/nixos/modules/services/monitoring/graphite.nix
+++ b/nixos/modules/services/monitoring/graphite.nix
@@ -632,8 +632,7 @@ in {
       cfg.web.enable || cfg.api.enable ||
       cfg.seyren.enable || cfg.pager.enable || cfg.beacon.enable
      ) {
-      users.users = singleton {
-        name = "graphite";
+      users.users.graphite = {
         uid = config.ids.uids.graphite;
         description = "Graphite daemon user";
         home = dataDir;
diff --git a/nixos/modules/services/monitoring/heapster.nix b/nixos/modules/services/monitoring/heapster.nix
index 6da0831b4c5..585632943fd 100644
--- a/nixos/modules/services/monitoring/heapster.nix
+++ b/nixos/modules/services/monitoring/heapster.nix
@@ -49,8 +49,7 @@ in {
       };
     };
 
-    users.users = singleton {
-      name = "heapster";
+    users.users.heapsterrs = {
       uid = config.ids.uids.heapster;
       description = "Heapster user";
     };
diff --git a/nixos/modules/services/monitoring/munin.nix b/nixos/modules/services/monitoring/munin.nix
index 8af0650c738..1ebf7ee6a76 100644
--- a/nixos/modules/services/monitoring/munin.nix
+++ b/nixos/modules/services/monitoring/munin.nix
@@ -317,18 +317,16 @@ in
 
     environment.systemPackages = [ pkgs.munin ];
 
-    users.users = [{
-      name = "munin";
+    users.users.munin = {
       description = "Munin monitoring user";
       group = "munin";
       uid = config.ids.uids.munin;
       home = "/var/lib/munin";
-    }];
+    };
 
-    users.groups = [{
-      name = "munin";
+    users.groups.munin = {
       gid = config.ids.gids.munin;
-    }];
+    };
 
   }) (mkIf nodeCfg.enable {
 
diff --git a/nixos/modules/services/monitoring/nagios.nix b/nixos/modules/services/monitoring/nagios.nix
index 4128bc12030..3ca79dddaf5 100644
--- a/nixos/modules/services/monitoring/nagios.nix
+++ b/nixos/modules/services/monitoring/nagios.nix
@@ -17,32 +17,39 @@ let
       preferLocalBuild = true;
     } "mkdir -p $out; ln -s $nagiosObjectDefs $out/";
 
-  nagiosCfgFile = pkgs.writeText "nagios.cfg"
-    ''
-      # Paths for state and logs.
-      log_file=${nagiosLogDir}/current
-      log_archive_path=${nagiosLogDir}/archive
-      status_file=${nagiosState}/status.dat
-      object_cache_file=${nagiosState}/objects.cache
-      temp_file=${nagiosState}/nagios.tmp
-      lock_file=/run/nagios.lock # Not used I think.
-      state_retention_file=${nagiosState}/retention.dat
-      query_socket=${nagiosState}/nagios.qh
-      check_result_path=${nagiosState}
-      command_file=${nagiosState}/nagios.cmd
-
-      # Configuration files.
-      #resource_file=resource.cfg
-      cfg_dir=${nagiosObjectDefsDir}
-
-      # Uid/gid that the daemon runs under.
-      nagios_user=nagios
-      nagios_group=nagios
-
-      # Misc. options.
-      illegal_macro_output_chars=`~$&|'"<>
-      retain_state_information=1
-    ''; # "
+  nagiosCfgFile = let
+    default = {
+      log_file="${nagiosLogDir}/current";
+      log_archive_path="${nagiosLogDir}/archive";
+      status_file="${nagiosState}/status.dat";
+      object_cache_file="${nagiosState}/objects.cache";
+      temp_file="${nagiosState}/nagios.tmp";
+      lock_file="/run/nagios.lock";
+      state_retention_file="${nagiosState}/retention.dat";
+      query_socket="${nagiosState}/nagios.qh";
+      check_result_path="${nagiosState}";
+      command_file="${nagiosState}/nagios.cmd";
+      cfg_dir="${nagiosObjectDefsDir}";
+      nagios_user="nagios";
+      nagios_group="nagios";
+      illegal_macro_output_chars="`~$&|'\"<>";
+      retain_state_information="1";
+    };
+    lines = mapAttrsToList (key: value: "${key}=${value}") (default // cfg.extraConfig);
+    content = concatStringsSep "\n" lines;
+    file = pkgs.writeText "nagios.cfg" content;
+    validated =  pkgs.runCommand "nagios-checked.cfg" {preferLocalBuild=true;} ''
+      cp ${file} nagios.cfg
+      # nagios checks the existence of /var/lib/nagios, but
+      # it does not exists in the build sandbox, so we fake it
+      mkdir lib
+      lib=$(readlink -f lib)
+      sed -i s@=${nagiosState}@=$lib@ nagios.cfg
+      ${pkgs.nagios}/bin/nagios -v nagios.cfg && cp ${file} $out
+    '';
+    defaultCfgFile = if cfg.validateConfig then validated else file;
+  in
+  if cfg.mainConfigFile == null then defaultCfgFile else cfg.mainConfigFile;
 
   # Plain configuration for the Nagios web-interface with no
   # authentication.
@@ -77,16 +84,11 @@ in
     (mkRemovedOptionModule [ "services" "nagios" "urlPath" ] "The urlPath option has been removed as it is hard coded to /nagios in the nagios package.")
   ];
 
+  meta.maintainers = with lib.maintainers; [ symphorien ];
+
   options = {
     services.nagios = {
-      enable = mkOption {
-        default = false;
-        description = "
-          Whether to use <link
-          xlink:href='http://www.nagios.org/'>Nagios</link> to monitor
-          your system or network.
-        ";
-      };
+      enable = mkEnableOption "<link xlink:href='http://www.nagios.org/'>Nagios</link> to monitor your system or network.";
 
       objectDefs = mkOption {
         description = "
@@ -94,12 +96,14 @@ in
           the hosts, host groups, services and contacts for the
           network that you want Nagios to monitor.
         ";
+        type = types.listOf types.path;
+        example = literalExample "[ ./objects.cfg ]";
       };
 
       plugins = mkOption {
         type = types.listOf types.package;
-        default = [pkgs.nagiosPluginsOfficial pkgs.ssmtp];
-        defaultText = "[pkgs.nagiosPluginsOfficial pkgs.ssmtp]";
+        default = with pkgs; [ nagiosPluginsOfficial ssmtp mailutils ];
+        defaultText = "[pkgs.nagiosPluginsOfficial pkgs.ssmtp pkgs.mailutils]";
         description = "
           Packages to be added to the Nagios <envar>PATH</envar>.
           Typically used to add plugins, but can be anything.
@@ -107,14 +111,29 @@ in
       };
 
       mainConfigFile = mkOption {
-        type = types.package;
-        default = nagiosCfgFile;
-        defaultText = "nagiosCfgFile";
+        type = types.nullOr types.package;
+        default = null;
         description = "
-          Derivation for the main configuration file of Nagios.
+          If non-null, overrides the main configuration file of Nagios.
         ";
       };
 
+      extraConfig = mkOption {
+        type = types.attrsOf types.str;
+        example = {
+          debug_level = "-1";
+          debug_file = "/var/log/nagios/debug.log";
+        };
+        default = {};
+        description = "Configuration to add to /etc/nagios.cfg";
+      };
+
+      validateConfig = mkOption {
+        type = types.bool;
+        default = pkgs.stdenv.hostPlatform == pkgs.stdenv.buildPlatform;
+        description = "if true, the syntax of the nagios configuration file is checked at build time";
+      };
+
       cgiConfigFile = mkOption {
         type = types.package;
         default = nagiosCGICfgFile;
@@ -126,6 +145,7 @@ in
       };
 
       enableWebInterface = mkOption {
+        type = types.bool;
         default = false;
         description = "
           Whether to enable the Nagios web interface.  You should also
@@ -164,16 +184,12 @@ in
 
     # This isn't needed, it's just so that the user can type "nagiostats
     # -c /etc/nagios.cfg".
-    environment.etc = [
-      { source = cfg.mainConfigFile;
-        target = "nagios.cfg";
-      }
-    ];
+    environment.etc."nagios.cfg".source = nagiosCfgFile;
 
     environment.systemPackages = [ pkgs.nagios ];
     systemd.services.nagios = {
       description = "Nagios monitoring daemon";
-      path     = [ pkgs.nagios ];
+      path     = [ pkgs.nagios ] ++ cfg.plugins;
       wantedBy = [ "multi-user.target" ];
       after    = [ "network.target" ];
 
@@ -184,14 +200,9 @@ in
         RestartSec = 2;
         LogsDirectory = "nagios";
         StateDirectory = "nagios";
+        ExecStart = "${pkgs.nagios}/bin/nagios /etc/nagios.cfg";
+        X-ReloadIfChanged = nagiosCfgFile;
       };
-
-      script = ''
-        for i in ${toString cfg.plugins}; do
-          export PATH=$i/bin:$i/sbin:$i/libexec:$PATH
-        done
-        exec ${pkgs.nagios}/bin/nagios ${cfg.mainConfigFile}
-      '';
     };
 
     services.httpd.virtualHosts = optionalAttrs cfg.enableWebInterface {
diff --git a/nixos/modules/services/monitoring/netdata.nix b/nixos/modules/services/monitoring/netdata.nix
index 3ffde8e9bce..f8225af2042 100644
--- a/nixos/modules/services/monitoring/netdata.nix
+++ b/nixos/modules/services/monitoring/netdata.nix
@@ -179,13 +179,14 @@ in {
       { domain = "netdata"; type = "hard"; item = "nofile"; value = "30000"; }
     ];
 
-    users.users = optional (cfg.user == defaultUser) {
-      name = defaultUser;
-      isSystemUser = true;
+    users.users = optionalAttrs (cfg.user == defaultUser) {
+      ${defaultUser} = {
+        isSystemUser = true;
+      };
     };
 
-    users.groups = optional (cfg.group == defaultUser) {
-      name = defaultUser;
+    users.groups = optionalAttrs (cfg.group == defaultUser) {
+      ${defaultUser} = { };
     };
 
   };
diff --git a/nixos/modules/services/monitoring/statsd.nix b/nixos/modules/services/monitoring/statsd.nix
index ea155821ecc..17836e95a6f 100644
--- a/nixos/modules/services/monitoring/statsd.nix
+++ b/nixos/modules/services/monitoring/statsd.nix
@@ -125,8 +125,7 @@ in
       message = "Only builtin backends (graphite, console, repeater) or backends enumerated in `pkgs.nodePackages` are allowed!";
     }) cfg.backends;
 
-    users.users = singleton {
-      name = "statsd";
+    users.use.statsdrs = {
       uid = config.ids.uids.statsd;
       description = "Statsd daemon user";
     };
diff --git a/nixos/modules/services/monitoring/sysstat.nix b/nixos/modules/services/monitoring/sysstat.nix
index d668faa53cc..ca2cff82723 100644
--- a/nixos/modules/services/monitoring/sysstat.nix
+++ b/nixos/modules/services/monitoring/sysstat.nix
@@ -5,15 +5,10 @@ let
 in {
   options = {
     services.sysstat = {
-      enable = mkOption {
-        type = types.bool;
-        default = false;
-        description = ''
-          Whether to enable sar system activity collection.
-        '';
-      };
+      enable = mkEnableOption "sar system activity collection";
 
       collect-frequency = mkOption {
+        type = types.str;
         default = "*:00/10";
         description = ''
           OnCalendar specification for sysstat-collect
@@ -21,6 +16,7 @@ in {
       };
 
       collect-args = mkOption {
+        type = types.str;
         default = "1 1";
         description = ''
           Arguments to pass sa1 when collecting statistics
@@ -33,13 +29,13 @@ in {
     systemd.services.sysstat = {
       description = "Resets System Activity Logs";
       wantedBy = [ "multi-user.target" ];
-      preStart = "test -d /var/log/sa || mkdir -p /var/log/sa";
 
       serviceConfig = {
         User = "root";
         RemainAfterExit = true;
         Type = "oneshot";
         ExecStart = "${pkgs.sysstat}/lib/sa/sa1 --boot";
+        LogsDirectory = "sa";
       };
     };
 
diff --git a/nixos/modules/services/monitoring/telegraf.nix b/nixos/modules/services/monitoring/telegraf.nix
index d8786732668..5d131557e8b 100644
--- a/nixos/modules/services/monitoring/telegraf.nix
+++ b/nixos/modules/services/monitoring/telegraf.nix
@@ -63,10 +63,9 @@ in {
       };
     };
 
-    users.users = [{
-      name = "telegraf";
+    users.users.telegraf = {
       uid = config.ids.uids.telegraf;
       description = "telegraf daemon user";
-    }];
+    };
   };
 }
diff --git a/nixos/modules/services/monitoring/ups.nix b/nixos/modules/services/monitoring/ups.nix
index 1bdc4e4410f..a45e806d4ad 100644
--- a/nixos/modules/services/monitoring/ups.nix
+++ b/nixos/modules/services/monitoring/ups.nix
@@ -214,14 +214,12 @@ in
       environment.NUT_STATEPATH = "/var/lib/nut/";
     };
 
-    environment.etc = [
-      { source = pkgs.writeText "nut.conf"
+    environment.etc = {
+      "nut/nut.conf".source = pkgs.writeText "nut.conf"
         ''
           MODE = ${cfg.mode}
         '';
-        target = "nut/nut.conf";
-      }
-      { source = pkgs.writeText "ups.conf"
+      "nut/ups.conf".source = pkgs.writeText "ups.conf"
         ''
           maxstartdelay = ${toString cfg.maxStartDelay}
 
@@ -229,25 +227,15 @@ in
 
           "}
         '';
-        target = "nut/ups.conf";
-      }
-      { source = cfg.schedulerRules;
-        target = "nut/upssched.conf";
-      }
+      "nut/upssched.conf".source = cfg.schedulerRules;
       # These file are containing private informations and thus should not
       # be stored inside the Nix store.
       /*
-      { source = ;
-        target = "nut/upsd.conf";
-      }
-      { source = ;
-        target = "nut/upsd.users";
-      }
-      { source = ;
-        target = "nut/upsmon.conf;
-      }
+      "nut/upsd.conf".source = "";
+      "nut/upsd.users".source = "";
+      "nut/upsmon.conf".source = "";
       */
-    ];
+    };
 
     power.ups.schedulerRules = mkDefault "${pkgs.nut}/etc/upssched.conf.sample";
 
@@ -259,21 +247,16 @@ in
 
 
 /*
-    users.users = [
-      { name = "nut";
-        uid = 84;
+    users.users.nut =
+      { uid = 84;
         home = "/var/lib/nut";
         createHome = true;
         group = "nut";
         description = "UPnP A/V Media Server user";
-      }
-    ];
-
-    users.groups = [
-      { name = "nut";
-        gid = 84;
-      }
-    ];
+      };
+
+    users.groups."nut" =
+      { gid = 84; };
 */
 
   };
diff --git a/nixos/modules/services/network-filesystems/ceph.nix b/nixos/modules/services/network-filesystems/ceph.nix
index 543a7b25d5d..d17959a6a30 100644
--- a/nixos/modules/services/network-filesystems/ceph.nix
+++ b/nixos/modules/services/network-filesystems/ceph.nix
@@ -371,15 +371,14 @@ in
       in
         generators.toINI {} totalConfig;
 
-    users.users = singleton {
-      name = "ceph";
+    users.users.ceph = {
       uid = config.ids.uids.ceph;
       description = "Ceph daemon user";
       group = "ceph";
       extraGroups = [ "disk" ];
     };
-    users.groups = singleton {
-      name = "ceph";
+
+    users.groups.ceph = {
       gid = config.ids.gids.ceph;
     };
 
diff --git a/nixos/modules/services/network-filesystems/davfs2.nix b/nixos/modules/services/network-filesystems/davfs2.nix
index 100d458d536..4b6f85e4a2c 100644
--- a/nixos/modules/services/network-filesystems/davfs2.nix
+++ b/nixos/modules/services/network-filesystems/davfs2.nix
@@ -57,18 +57,19 @@ in
     environment.systemPackages = [ pkgs.davfs2 ];
     environment.etc."davfs2/davfs2.conf".source = cfgFile;
 
-    users.groups = optionalAttrs (cfg.davGroup == "davfs2") (singleton {
-      name = "davfs2";
-      gid = config.ids.gids.davfs2;
-    });
+    users.groups = optionalAttrs (cfg.davGroup == "davfs2") {
+      davfs2.gid = config.ids.gids.davfs2;
+    };
+
+    users.users = optionalAttrs (cfg.davUser == "davfs2") {
+      davfs2 = {
+        createHome = false;
+        group = cfg.davGroup;
+        uid = config.ids.uids.davfs2;
+        description = "davfs2 user";
+      };
+    };
 
-    users.users = optionalAttrs (cfg.davUser == "davfs2") (singleton {
-      name = "davfs2";
-      createHome = false;
-      group = cfg.davGroup;
-      uid = config.ids.uids.davfs2;
-      description = "davfs2 user";
-    });
   };
 
 }
diff --git a/nixos/modules/services/network-filesystems/drbd.nix b/nixos/modules/services/network-filesystems/drbd.nix
index 4ab74ed8e1c..916e7eaaaa9 100644
--- a/nixos/modules/services/network-filesystems/drbd.nix
+++ b/nixos/modules/services/network-filesystems/drbd.nix
@@ -47,10 +47,8 @@ let cfg = config.services.drbd; in
         options drbd usermode_helper=/run/current-system/sw/bin/drbdadm
       '';
 
-    environment.etc = singleton
-      { source = pkgs.writeText "drbd.conf" cfg.config;
-        target = "drbd.conf";
-      };
+    environment.etc.drbd.conf =
+      { source = pkgs.writeText "drbd.conf" cfg.config; };
 
     systemd.services.drbd = {
       after = [ "systemd-udev.settle.service" "network.target" ];
diff --git a/nixos/modules/services/networking/bind.nix b/nixos/modules/services/networking/bind.nix
index d09c6735e12..e3b95afb3d8 100644
--- a/nixos/modules/services/networking/bind.nix
+++ b/nixos/modules/services/networking/bind.nix
@@ -178,9 +178,8 @@ in
 
     networking.resolvconf.useLocalResolver = mkDefault true;
 
-    users.users = singleton
-      { name = bindUser;
-        uid = config.ids.uids.bind;
+    users.users.${bindUser} =
+      { uid = config.ids.uids.bind;
         description = "BIND daemon user";
       };
 
diff --git a/nixos/modules/services/networking/bitlbee.nix b/nixos/modules/services/networking/bitlbee.nix
index 274b3617160..54fe70f7ccc 100644
--- a/nixos/modules/services/networking/bitlbee.nix
+++ b/nixos/modules/services/networking/bitlbee.nix
@@ -161,8 +161,7 @@ in
 
   config =  mkMerge [
     (mkIf config.services.bitlbee.enable {
-      users.users = singleton {
-        name = "bitlbee";
+      users.users.bitlbee = {
         uid = bitlbeeUid;
         description = "BitlBee user";
         home = "/var/lib/bitlbee";
diff --git a/nixos/modules/services/networking/charybdis.nix b/nixos/modules/services/networking/charybdis.nix
index da26246e703..43829d36e41 100644
--- a/nixos/modules/services/networking/charybdis.nix
+++ b/nixos/modules/services/networking/charybdis.nix
@@ -71,15 +71,13 @@ in
 
   config = mkIf cfg.enable (lib.mkMerge [
     {
-      users.users = singleton {
-        name = cfg.user;
+      users.users.${cfg.user} = {
         description = "Charybdis IRC daemon user";
         uid = config.ids.uids.ircd;
         group = cfg.group;
       };
 
-      users.groups = singleton {
-        name = cfg.group;
+      users.groups.${cfg.group} = {
         gid = config.ids.gids.ircd;
       };
 
diff --git a/nixos/modules/services/networking/connman.nix b/nixos/modules/services/networking/connman.nix
index 8402be939fe..e8eadc4e187 100644
--- a/nixos/modules/services/networking/connman.nix
+++ b/nixos/modules/services/networking/connman.nix
@@ -11,6 +11,7 @@ let
 
     ${cfg.extraConfig}
   '';
+  enableIwd = cfg.wifi.backend == "iwd";
 in {
 
   imports = [
@@ -56,6 +57,17 @@ in {
         '';
       };
 
+      wifi = {
+        backend = mkOption {
+          type = types.enum [ "wpa_supplicant" "iwd" ];
+          default = "wpa_supplicant";
+          description = ''
+            Specify the Wi-Fi backend used.
+            Currently supported are <option>wpa_supplicant</option> or <option>iwd</option>.
+          '';
+        };
+      };
+
       extraFlags = mkOption {
         type = with types; listOf str;
         default = [ ];
@@ -77,9 +89,6 @@ in {
       assertion = !config.networking.useDHCP;
       message = "You can not use services.connman with networking.useDHCP";
     }{
-      assertion = config.networking.wireless.enable;
-      message = "You must use services.connman with networking.wireless";
-    }{
       assertion = !config.networking.networkmanager.enable;
       message = "You can not use services.connman with networking.networkmanager";
     }];
@@ -89,12 +98,18 @@ in {
     systemd.services.connman = {
       description = "Connection service";
       wantedBy = [ "multi-user.target" ];
-      after = [ "syslog.target" ];
+      after = [ "syslog.target" ] ++ optional enableIwd "iwd.service";
+      requires = optional enableIwd "iwd.service";
       serviceConfig = {
         Type = "dbus";
         BusName = "net.connman";
         Restart = "on-failure";
-        ExecStart = "${pkgs.connman}/sbin/connmand --config=${configFile} --nodaemon ${toString cfg.extraFlags}";
+        ExecStart = toString ([
+          "${pkgs.connman}/sbin/connmand"
+          "--config=${configFile}"
+          "--nodaemon"
+        ] ++ optional enableIwd "--wifi=iwd_agent"
+          ++ cfg.extraFlags);
         StandardOutput = "null";
       };
     };
@@ -125,7 +140,12 @@ in {
 
     networking = {
       useDHCP = false;
-      wireless.enable = true;
+      wireless = {
+        enable = mkIf (!enableIwd) true;
+        iwd = mkIf enableIwd {
+          enable = true;
+        };
+      };
       networkmanager.enable = false;
     };
   };
diff --git a/nixos/modules/services/networking/corerad.nix b/nixos/modules/services/networking/corerad.nix
new file mode 100644
index 00000000000..1a2c4aec665
--- /dev/null
+++ b/nixos/modules/services/networking/corerad.nix
@@ -0,0 +1,46 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.corerad;
+in {
+  meta = {
+    maintainers = with maintainers; [ mdlayher ];
+  };
+
+  options.services.corerad = {
+    enable = mkEnableOption "CoreRAD IPv6 NDP RA daemon";
+
+    configFile = mkOption {
+      type = types.path;
+      example = literalExample "\"\${pkgs.corerad}/etc/corerad/corerad.toml\"";
+      description = "Path to CoreRAD TOML configuration file.";
+    };
+
+    package = mkOption {
+      default = pkgs.corerad;
+      defaultText = literalExample "pkgs.corerad";
+      type = types.package;
+      description = "CoreRAD package to use.";
+    };
+  };
+
+  config = mkIf cfg.enable {
+    systemd.services.corerad = {
+      description = "CoreRAD IPv6 NDP RA daemon";
+      after = [ "network.target" ];
+      wantedBy = [ "multi-user.target" ];
+      serviceConfig = {
+        LimitNPROC = 512;
+        LimitNOFILE = 1048576;
+        CapabilityBoundingSet = "CAP_NET_ADMIN CAP_NET_RAW";
+        AmbientCapabilities = "CAP_NET_ADMIN CAP_NET_RAW";
+        NoNewPrivileges = true;
+        DynamicUser = true;
+        ExecStart = "${getBin cfg.package}/bin/corerad -c=${cfg.configFile}";
+        Restart = "on-failure";
+      };
+    };
+  };
+}
diff --git a/nixos/modules/services/networking/coturn.nix b/nixos/modules/services/networking/coturn.nix
index c430ce5af92..1bfbc307c59 100644
--- a/nixos/modules/services/networking/coturn.nix
+++ b/nixos/modules/services/networking/coturn.nix
@@ -294,16 +294,14 @@ in {
   };
 
   config = mkIf cfg.enable {
-    users.users = [
-      { name = "turnserver";
-        uid = config.ids.uids.turnserver;
+    users.users.turnserver =
+      { uid = config.ids.uids.turnserver;
         description = "coturn TURN server user";
-      } ];
-    users.groups = [
-      { name = "turnserver";
-        gid = config.ids.gids.turnserver;
+      };
+    users.groups.turnserver =
+      { gid = config.ids.gids.turnserver;
         members = [ "turnserver" ];
-      } ];
+      };
 
     systemd.services.coturn = {
       description = "coturn TURN server";
diff --git a/nixos/modules/services/networking/dhcpcd.nix b/nixos/modules/services/networking/dhcpcd.nix
index 7b278603455..6fbc014db71 100644
--- a/nixos/modules/services/networking/dhcpcd.nix
+++ b/nixos/modules/services/networking/dhcpcd.nix
@@ -185,11 +185,7 @@ in
 
     environment.systemPackages = [ dhcpcd ];
 
-    environment.etc =
-      [ { source = exitHook;
-          target = "dhcpcd.exit-hook";
-        }
-      ];
+    environment.etc."dhcpcd.exit-hook".source = exitHook;
 
     powerManagement.resumeCommands = mkIf config.systemd.services.dhcpcd.enable
       ''
diff --git a/nixos/modules/services/networking/dnschain.nix b/nixos/modules/services/networking/dnschain.nix
index 2586f2d74e9..003609ea705 100644
--- a/nixos/modules/services/networking/dnschain.nix
+++ b/nixos/modules/services/networking/dnschain.nix
@@ -147,8 +147,7 @@ in
       '';
     };
 
-    users.users = singleton {
-      name = username;
+    users.users.${username} = {
       description = "DNSChain daemon user";
       home = dataDir;
       createHome = true;
diff --git a/nixos/modules/services/networking/dnsmasq.nix b/nixos/modules/services/networking/dnsmasq.nix
index 714a5903bff..377d7bc5705 100644
--- a/nixos/modules/services/networking/dnsmasq.nix
+++ b/nixos/modules/services/networking/dnsmasq.nix
@@ -86,8 +86,7 @@ in
 
     services.dbus.packages = [ dnsmasq ];
 
-    users.users = singleton {
-      name = "dnsmasq";
+    users.users.dnsmasq = {
       uid = config.ids.uids.dnsmasq;
       description = "Dnsmasq daemon user";
     };
diff --git a/nixos/modules/services/networking/ejabberd.nix b/nixos/modules/services/networking/ejabberd.nix
index 6a38f85c48a..a5af25b983b 100644
--- a/nixos/modules/services/networking/ejabberd.nix
+++ b/nixos/modules/services/networking/ejabberd.nix
@@ -94,18 +94,18 @@ in {
   config = mkIf cfg.enable {
     environment.systemPackages = [ cfg.package ];
 
-    users.users = optionalAttrs (cfg.user == "ejabberd") (singleton
-      { name = "ejabberd";
+    users.users = optionalAttrs (cfg.user == "ejabberd") {
+      ejabberd = {
         group = cfg.group;
         home = cfg.spoolDir;
         createHome = true;
         uid = config.ids.uids.ejabberd;
-      });
+      };
+    };
 
-    users.groups = optionalAttrs (cfg.group == "ejabberd") (singleton
-      { name = "ejabberd";
-        gid = config.ids.gids.ejabberd;
-      });
+    users.groups = optionalAttrs (cfg.group == "ejabberd") {
+      ejabberd.gid = config.ids.gids.ejabberd;
+    };
 
     systemd.services.ejabberd = {
       description = "ejabberd server";
diff --git a/nixos/modules/services/networking/gale.nix b/nixos/modules/services/networking/gale.nix
index 7083d87c407..cb954fd836b 100644
--- a/nixos/modules/services/networking/gale.nix
+++ b/nixos/modules/services/networking/gale.nix
@@ -104,14 +104,13 @@ in
          systemPackages = [ pkgs.gale ];
        };
 
-       users.users = [{
-         name = cfg.user;
+       users.users.${cfg.user} = {
          description = "Gale daemon";
          uid = config.ids.uids.gale;
          group = cfg.group;
          home = home;
          createHome = true;
-       }];
+       };
 
        users.groups = [{
          name = cfg.group;
diff --git a/nixos/modules/services/networking/git-daemon.nix b/nixos/modules/services/networking/git-daemon.nix
index a638a3083fb..6f2e149433f 100644
--- a/nixos/modules/services/networking/git-daemon.nix
+++ b/nixos/modules/services/networking/git-daemon.nix
@@ -104,16 +104,16 @@ in
 
   config = mkIf cfg.enable {
 
-    users.users = if cfg.user != "git" then {} else singleton
-      { name = "git";
+    users.users = optionalAttrs (cfg.user != "git") {
+      git = {
         uid = config.ids.uids.git;
         description = "Git daemon user";
       };
+    };
 
-    users.groups = if cfg.group != "git" then {} else singleton
-      { name = "git";
-        gid = config.ids.gids.git;
-      };
+    users.groups = optionalAttrs (cfg.group != "git") {
+      git.gid = config.ids.gids.git;
+    };
 
     systemd.services.git-daemon = {
       after = [ "network.target" ];
diff --git a/nixos/modules/services/networking/gnunet.nix b/nixos/modules/services/networking/gnunet.nix
index 178a832c166..69d4ed04775 100644
--- a/nixos/modules/services/networking/gnunet.nix
+++ b/nixos/modules/services/networking/gnunet.nix
@@ -42,6 +42,7 @@ in
     services.gnunet = {
 
       enable = mkOption {
+        type = types.bool;
         default = false;
         description = ''
           Whether to run the GNUnet daemon.  GNUnet is GNU's anonymous
@@ -51,6 +52,7 @@ in
 
       fileSharing = {
         quota = mkOption {
+          type = types.int;
           default = 1024;
           description = ''
             Maximum file system usage (in MiB) for file sharing.
@@ -60,6 +62,7 @@ in
 
       udp = {
         port = mkOption {
+          type = types.port;
           default = 2086;  # assigned by IANA
           description = ''
             The UDP port for use by GNUnet.
@@ -69,6 +72,7 @@ in
 
       tcp = {
         port = mkOption {
+          type = types.port;
           default = 2086;  # assigned by IANA
           description = ''
             The TCP port for use by GNUnet.
@@ -78,6 +82,7 @@ in
 
       load = {
         maxNetDownBandwidth = mkOption {
+          type = types.int;
           default = 50000;
           description = ''
             Maximum bandwidth usage (in bits per second) for GNUnet
@@ -86,6 +91,7 @@ in
         };
 
         maxNetUpBandwidth = mkOption {
+          type = types.int;
           default = 50000;
           description = ''
             Maximum bandwidth usage (in bits per second) for GNUnet
@@ -94,6 +100,7 @@ in
         };
 
         hardNetUpBandwidth = mkOption {
+          type = types.int;
           default = 0;
           description = ''
             Hard bandwidth limit (in bits per second) when uploading
@@ -111,6 +118,7 @@ in
       };
 
       extraOptions = mkOption {
+        type = types.lines;
         default = "";
         description = ''
           Additional options that will be copied verbatim in `gnunet.conf'.
diff --git a/nixos/modules/services/networking/hans.nix b/nixos/modules/services/networking/hans.nix
index 4f60300f5ff..8334dc68d62 100644
--- a/nixos/modules/services/networking/hans.nix
+++ b/nixos/modules/services/networking/hans.nix
@@ -135,8 +135,7 @@ in
       };
     };
 
-    users.users = singleton {
-      name = hansUser;
+    users.users.${hansUser} = {
       description = "Hans daemon user";
       isSystemUser = true;
     };
diff --git a/nixos/modules/services/networking/i2pd.nix b/nixos/modules/services/networking/i2pd.nix
index e2c2275b551..326d34f6ca9 100644
--- a/nixos/modules/services/networking/i2pd.nix
+++ b/nixos/modules/services/networking/i2pd.nix
@@ -158,10 +158,10 @@ let
       (sec "addressbook")
       (strOpt "defaulturl" cfg.addressbook.defaulturl)
     ] ++ (optionalEmptyList "subscriptions" cfg.addressbook.subscriptions)
-      ++ (flip map
-      (collect (proto: proto ? port && proto ? address && proto ? name) cfg.proto)
+      ++ (flip mapAttrs
+      (collect (name: proto: proto ? port && proto ? address && proto ? name) cfg.proto)
       (proto: let protoOpts = [
-        (sec proto.name)
+        (sec name)
         (boolOpt "enabled" proto.enable)
         (strOpt "address" proto.address)
         (intOpt "port" proto.port)
@@ -181,10 +181,10 @@ let
 
   tunnelConf = let opts = [
     notice
-    (flip map
-      (collect (tun: tun ? port && tun ? destination) cfg.outTunnels)
+    (flip mapAttrs
+      (collect (name: tun: tun ? port && tun ? destination) cfg.outTunnels)
       (tun: let outTunOpts = [
-        (sec tun.name)
+        (sec name)
         "type = client"
         (intOpt "port" tun.port)
         (strOpt "destination" tun.destination)
@@ -204,10 +204,10 @@ let
         ++ (if tun ? crypto.tagsToSend then
             optionalNullInt "crypto.tagstosend" tun.crypto.tagsToSend else []);
         in concatStringsSep "\n" outTunOpts))
-    (flip map
-      (collect (tun: tun ? port && tun ? address) cfg.inTunnels)
+    (flip mapAttrs
+      (collect (name: tun: tun ? port && tun ? address) cfg.inTunnels)
       (tun: let inTunOpts = [
-        (sec tun.name)
+        (sec name)
         "type = server"
         (intOpt "port" tun.port)
         (strOpt "host" tun.address)
diff --git a/nixos/modules/services/networking/iodine.nix b/nixos/modules/services/networking/iodine.nix
index 97b5843bbcf..f9ca26c2796 100644
--- a/nixos/modules/services/networking/iodine.nix
+++ b/nixos/modules/services/networking/iodine.nix
@@ -147,8 +147,7 @@ in
       };
     };
 
-    users.users = singleton {
-      name = iodinedUser;
+    users.users.${iodinedUser} = {
       uid = config.ids.uids.iodined;
       description = "Iodine daemon user";
     };
diff --git a/nixos/modules/services/networking/ircd-hybrid/default.nix b/nixos/modules/services/networking/ircd-hybrid/default.nix
index f5abe61a1ba..b236552eb65 100644
--- a/nixos/modules/services/networking/ircd-hybrid/default.nix
+++ b/nixos/modules/services/networking/ircd-hybrid/default.nix
@@ -112,9 +112,8 @@ in
 
   config = mkIf config.services.ircdHybrid.enable {
 
-    users.users = singleton
-      { name = "ircd";
-        description = "IRCD owner";
+    users.users.ircd =
+      { description = "IRCD owner";
         group = "ircd";
         uid = config.ids.uids.ircd;
       };
diff --git a/nixos/modules/services/networking/kippo.nix b/nixos/modules/services/networking/kippo.nix
index bdea6a1d1ca..553415a2f32 100644
--- a/nixos/modules/services/networking/kippo.nix
+++ b/nixos/modules/services/networking/kippo.nix
@@ -73,12 +73,11 @@ in
         ${cfg.extraConfig}
     '';
 
-    users.users = singleton {
-      name = "kippo";
+    users.users.kippo = {
       description = "kippo web server privilege separation user";
       uid = 108; # why does config.ids.uids.kippo give an error?
     };
-    users.groups = singleton { name = "kippo";gid=108; };
+    users.groups.kippo.gid = 108;
 
     systemd.services.kippo = with pkgs; {
       description = "Kippo Web Server";
diff --git a/nixos/modules/services/networking/kresd.nix b/nixos/modules/services/networking/kresd.nix
index fc516c01230..5eb50a13ca9 100644
--- a/nixos/modules/services/networking/kresd.nix
+++ b/nixos/modules/services/networking/kresd.nix
@@ -13,6 +13,17 @@ in
 {
   meta.maintainers = [ maintainers.vcunat /* upstream developer */ ];
 
+  imports = [
+    (mkChangedOptionModule [ "services" "kresd" "interfaces" ] [ "services" "kresd" "listenPlain" ]
+      (config:
+        let value = getAttrFromPath [ "services" "kresd" "interfaces" ] config;
+        in map
+          (iface: if elem ":" (stringToCharacters iface) then "[${iface}]:53" else "${iface}:53") # Syntax depends on being IPv6 or IPv4.
+          value
+      )
+    )
+  ];
+
   ###### interface
   options.services.kresd = {
     enable = mkOption {
@@ -39,11 +50,12 @@ in
         Directory for caches.  They are intended to survive reboots.
       '';
     };
-    interfaces = mkOption {
+    listenPlain = mkOption {
       type = with types; listOf str;
-      default = [ "::1" "127.0.0.1" ];
+      default = [ "[::1]:53" "127.0.0.1:53" ];
       description = ''
-        What addresses the server should listen on. (UDP+TCP 53)
+        What addresses and ports the server should listen on.
+        For detailed syntax see ListenStream in man systemd.socket.
       '';
     };
     listenTLS = mkOption {
@@ -51,7 +63,7 @@ in
       default = [];
       example = [ "198.51.100.1:853" "[2001:db8::1]:853" "853" ];
       description = ''
-        Addresses on which kresd should provide DNS over TLS (see RFC 7858).
+        Addresses and ports on which kresd should provide DNS over TLS (see RFC 7858).
         For detailed syntax see ListenStream in man systemd.socket.
       '';
     };
@@ -62,24 +74,17 @@ in
   config = mkIf cfg.enable {
     environment.etc."kresd.conf".source = configFile; # not required
 
-    users.users = singleton
-      { name = "kresd";
-        uid = config.ids.uids.kresd;
+    users.users.kresd =
+      { uid = config.ids.uids.kresd;
         group = "kresd";
         description = "Knot-resolver daemon user";
       };
-    users.groups = singleton
-      { name = "kresd";
-        gid = config.ids.gids.kresd;
-      };
+    users.groups.kresd.gid = config.ids.gids.kresd;
 
     systemd.sockets.kresd = rec {
       wantedBy = [ "sockets.target" ];
       before = wantedBy;
-      listenStreams = map
-        # Syntax depends on being IPv6 or IPv4.
-        (iface: if elem ":" (stringToCharacters iface) then "[${iface}]:53" else "${iface}:53")
-        cfg.interfaces;
+      listenStreams = cfg.listenPlain;
       socketConfig = {
         ListenDatagram = listenStreams;
         FreeBind = true;
diff --git a/nixos/modules/services/networking/matterbridge.nix b/nixos/modules/services/networking/matterbridge.nix
index 682eaa6eb29..bad35133459 100644
--- a/nixos/modules/services/networking/matterbridge.nix
+++ b/nixos/modules/services/networking/matterbridge.nix
@@ -92,14 +92,15 @@ in
     warnings = optional options.services.matterbridge.configFile.isDefined
       "The option services.matterbridge.configFile is insecure and should be replaced with services.matterbridge.configPath";
 
-    users.users = optional (cfg.user == "matterbridge")
-      { name = "matterbridge";
-        group = "matterbridge";
-        isSystemUser = true;
+    users.users = optionalAttrs (cfg.user == "matterbridge")
+      { matterbridge = {
+          group = "matterbridge";
+          isSystemUser = true;
+        };
       };
 
-    users.groups = optional (cfg.group == "matterbridge")
-      { name = "matterbridge";
+    users.groups = optionalAttrs (cfg.group == "matterbridge")
+      { matterbridge = { };
       };
 
     systemd.services.matterbridge = {
diff --git a/nixos/modules/services/networking/mjpg-streamer.nix b/nixos/modules/services/networking/mjpg-streamer.nix
index e0a6c112e3c..dbc35e2e71c 100644
--- a/nixos/modules/services/networking/mjpg-streamer.nix
+++ b/nixos/modules/services/networking/mjpg-streamer.nix
@@ -49,10 +49,11 @@ in {
 
   config = mkIf cfg.enable {
 
-    users.users = optional (cfg.user == "mjpg-streamer") {
-      name = "mjpg-streamer";
-      uid = config.ids.uids.mjpg-streamer;
-      group = cfg.group;
+    users.users = optionalAttrs (cfg.user == "mjpg-streamer") {
+      mjpg-streamer = {
+        uid = config.ids.uids.mjpg-streamer;
+        group = cfg.group;
+      };
     };
 
     systemd.services.mjpg-streamer = {
diff --git a/nixos/modules/services/networking/monero.nix b/nixos/modules/services/networking/monero.nix
index 98a3456f639..b9536430868 100644
--- a/nixos/modules/services/networking/monero.nix
+++ b/nixos/modules/services/networking/monero.nix
@@ -197,17 +197,15 @@ in
 
   config = mkIf cfg.enable {
 
-    users.users = singleton {
-      name = "monero";
+    users.users.monero = {
       uid  = config.ids.uids.monero;
       description = "Monero daemon user";
       home = dataDir;
       createHome = true;
     };
 
-    users.groups = singleton {
-      name = "monero";
-      gid  = config.ids.gids.monero;
+    users.groups.monero = {
+      gid = config.ids.gids.monero;
     };
 
     systemd.services.monero = {
diff --git a/nixos/modules/services/networking/mxisd.nix b/nixos/modules/services/networking/mxisd.nix
index a3d61922e57..482d6ff456b 100644
--- a/nixos/modules/services/networking/mxisd.nix
+++ b/nixos/modules/services/networking/mxisd.nix
@@ -93,23 +93,19 @@ in {
   };
 
   config = mkIf cfg.enable {
-    users.users = [
+    users.users.mxisd =
       {
-        name = "mxisd";
         group = "mxisd";
         home = cfg.dataDir;
         createHome = true;
         shell = "${pkgs.bash}/bin/bash";
         uid = config.ids.uids.mxisd;
-      }
-    ];
+      };
 
-    users.groups = [
+    users.groups.mxisd =
       {
-        name = "mxisd";
         gid = config.ids.gids.mxisd;
-      }
-    ];
+      };
 
     systemd.services.mxisd = {
       description = "a federated identity server for the matrix ecosystem";
diff --git a/nixos/modules/services/networking/namecoind.nix b/nixos/modules/services/networking/namecoind.nix
index 43a9a0b2598..ead7f085943 100644
--- a/nixos/modules/services/networking/namecoind.nix
+++ b/nixos/modules/services/networking/namecoind.nix
@@ -154,16 +154,14 @@ in
       config = ${configFile}
     '';
 
-    users.users = singleton {
-      name = "namecoin";
+    users.users.namecoin = {
       uid  = config.ids.uids.namecoin;
       description = "Namecoin daemon user";
       home = dataDir;
       createHome = true;
     };
 
-    users.groups = singleton {
-      name = "namecoin";
+    users.groups.namecoin = {
       gid  = config.ids.gids.namecoin;
     };
 
diff --git a/nixos/modules/services/networking/nat.nix b/nixos/modules/services/networking/nat.nix
index f1238bc6b16..9c658af30f7 100644
--- a/nixos/modules/services/networking/nat.nix
+++ b/nixos/modules/services/networking/nat.nix
@@ -68,7 +68,7 @@ let
           destinationPorts = if (m == null) then throw "bad ip:ports `${fwd.destination}'" else elemAt m 1;
         in ''
           # Allow connections to ${loopbackip}:${toString fwd.sourcePort} from the host itself
-          iptables -w -t nat -A OUTPUT \
+          iptables -w -t nat -A nixos-nat-out \
             -d ${loopbackip} -p ${fwd.proto} \
             --dport ${builtins.toString fwd.sourcePort} \
             -j DNAT --to-destination ${fwd.destination}
diff --git a/nixos/modules/services/networking/ndppd.nix b/nixos/modules/services/networking/ndppd.nix
index 92088623517..e015f76f622 100644
--- a/nixos/modules/services/networking/ndppd.nix
+++ b/nixos/modules/services/networking/ndppd.nix
@@ -161,7 +161,25 @@ in {
       documentation = [ "man:ndppd(1)" "man:ndppd.conf(5)" ];
       after = [ "network-pre.target" ];
       wantedBy = [ "multi-user.target" ];
-      serviceConfig.ExecStart = "${pkgs.ndppd}/bin/ndppd -c ${ndppdConf}";
+      serviceConfig = {
+        ExecStart = "${pkgs.ndppd}/bin/ndppd -c ${ndppdConf}";
+
+        # Sandboxing
+        CapabilityBoundingSet = "CAP_NET_RAW CAP_NET_ADMIN";
+        ProtectSystem = "strict";
+        ProtectHome = true;
+        PrivateTmp = true;
+        PrivateDevices = true;
+        ProtectKernelTunables = true;
+        ProtectKernelModules = true;
+        ProtectControlGroups = true;
+        RestrictAddressFamilies = "AF_INET6 AF_PACKET AF_NETLINK";
+        RestrictNamespaces = true;
+        LockPersonality = true;
+        MemoryDenyWriteExecute = true;
+        RestrictRealtime = true;
+        RestrictSUIDSGID = true;
+      };
     };
   };
 }
diff --git a/nixos/modules/services/networking/networkmanager.nix b/nixos/modules/services/networking/networkmanager.nix
index 53029b59067..e817f295a44 100644
--- a/nixos/modules/services/networking/networkmanager.nix
+++ b/nixos/modules/services/networking/networkmanager.nix
@@ -308,6 +308,7 @@ in {
 
                 if [ "$2" != "up" ]; then
                     logger "exit: event $2 != up"
+                    exit
                 fi
 
                 # coreutils and iproute are in PATH too
@@ -361,62 +362,59 @@ in {
       }
     ];
 
-    environment.etc = with pkgs; [
-      { source = configFile;
-        target = "NetworkManager/NetworkManager.conf";
-      }
-      { source = "${networkmanager-openvpn}/lib/NetworkManager/VPN/nm-openvpn-service.name";
-        target = "NetworkManager/VPN/nm-openvpn-service.name";
-      }
-      { source = "${networkmanager-vpnc}/lib/NetworkManager/VPN/nm-vpnc-service.name";
-        target = "NetworkManager/VPN/nm-vpnc-service.name";
-      }
-      { source = "${networkmanager-openconnect}/lib/NetworkManager/VPN/nm-openconnect-service.name";
-        target = "NetworkManager/VPN/nm-openconnect-service.name";
-      }
-      { source = "${networkmanager-fortisslvpn}/lib/NetworkManager/VPN/nm-fortisslvpn-service.name";
-        target = "NetworkManager/VPN/nm-fortisslvpn-service.name";
-      }
-      { source = "${networkmanager-l2tp}/lib/NetworkManager/VPN/nm-l2tp-service.name";
-        target = "NetworkManager/VPN/nm-l2tp-service.name";
-      }
-      { source = "${networkmanager-iodine}/lib/NetworkManager/VPN/nm-iodine-service.name";
-        target = "NetworkManager/VPN/nm-iodine-service.name";
+    environment.etc = with pkgs; {
+      "NetworkManager/NetworkManager.conf".source = configFile;
+
+      "NetworkManager/VPN/nm-openvpn-service.name".source =
+        "${networkmanager-openvpn}/lib/NetworkManager/VPN/nm-openvpn-service.name";
+
+      "NetworkManager/VPN/nm-vpnc-service.name".source =
+        "${networkmanager-vpnc}/lib/NetworkManager/VPN/nm-vpnc-service.name";
+
+      "NetworkManager/VPN/nm-openconnect-service.name".source =
+        "${networkmanager-openconnect}/lib/NetworkManager/VPN/nm-openconnect-service.name";
+
+      "NetworkManager/VPN/nm-fortisslvpn-service.name".source =
+        "${networkmanager-fortisslvpn}/lib/NetworkManager/VPN/nm-fortisslvpn-service.name";
+
+      "NetworkManager/VPN/nm-l2tp-service.name".source =
+        "${networkmanager-l2tp}/lib/NetworkManager/VPN/nm-l2tp-service.name";
+
+      "NetworkManager/VPN/nm-iodine-service.name".source =
+        "${networkmanager-iodine}/lib/NetworkManager/VPN/nm-iodine-service.name";
       }
-    ] ++ optional (cfg.appendNameservers != [] || cfg.insertNameservers != [])
-           { source = overrideNameserversScript;
-             target = "NetworkManager/dispatcher.d/02overridedns";
-           }
-      ++ lib.imap1 (i: s: {
-        inherit (s) source;
-        target = "NetworkManager/dispatcher.d/${dispatcherTypesSubdirMap.${s.type}}03userscript${lib.fixedWidthNumber 4 i}";
-        mode = "0544";
-      }) cfg.dispatcherScripts
-      ++ optional cfg.enableStrongSwan
-           { source = "${pkgs.networkmanager_strongswan}/lib/NetworkManager/VPN/nm-strongswan-service.name";
-             target = "NetworkManager/VPN/nm-strongswan-service.name";
-           };
+      // optionalAttrs (cfg.appendNameservers != [] || cfg.insertNameservers != [])
+         {
+           "NetworkManager/dispatcher.d/02overridedns".source = overrideNameserversScript;
+         }
+      // optionalAttrs cfg.enableStrongSwan
+         {
+           "NetworkManager/VPN/nm-strongswan-service.name".source =
+             "${pkgs.networkmanager_strongswan}/lib/NetworkManager/VPN/nm-strongswan-service.name";
+         }
+      // listToAttrs (lib.imap1 (i: s:
+         {
+            name = "NetworkManager/dispatcher.d/${dispatcherTypesSubdirMap.${s.type}}03userscript${lib.fixedWidthNumber 4 i}";
+            value = { mode = "0544"; inherit (s) source; };
+         }) cfg.dispatcherScripts);
 
     environment.systemPackages = cfg.packages;
 
-    users.groups = [{
-      name = "networkmanager";
-      gid = config.ids.gids.networkmanager;
-    }
-    {
-      name = "nm-openvpn";
-      gid = config.ids.gids.nm-openvpn;
-    }];
-    users.users = [{
-      name = "nm-openvpn";
-      uid = config.ids.uids.nm-openvpn;
-      extraGroups = [ "networkmanager" ];
-    }
-    {
-      name = "nm-iodine";
-      isSystemUser = true;
-      group = "networkmanager";
-    }];
+    users.groups = {
+      networkmanager.gid = config.ids.gids.networkmanager;
+      nm-openvpn.gid = config.ids.gids.nm-openvpn;
+    };
+
+    users.users = {
+      nm-openvpn = {
+        uid = config.ids.uids.nm-openvpn;
+        extraGroups = [ "networkmanager" ];
+      };
+      nm-iodine = {
+        isSystemUser = true;
+        group = "networkmanager";
+      };
+    };
 
     systemd.packages = cfg.packages;
 
diff --git a/nixos/modules/services/networking/nntp-proxy.nix b/nixos/modules/services/networking/nntp-proxy.nix
index d24d6f77a49..cc061bf6e3b 100644
--- a/nixos/modules/services/networking/nntp-proxy.nix
+++ b/nixos/modules/services/networking/nntp-proxy.nix
@@ -210,9 +210,8 @@ in
 
   config = mkIf cfg.enable {
 
-    users.users = singleton
-      { name = proxyUser;
-        uid = config.ids.uids.nntp-proxy;
+    users.users.${proxyUser} =
+      { uid = config.ids.uids.nntp-proxy;
         description = "NNTP-Proxy daemon user";
       };
 
diff --git a/nixos/modules/services/networking/nsd.nix b/nixos/modules/services/networking/nsd.nix
index bc0966e6b8e..344396638a6 100644
--- a/nixos/modules/services/networking/nsd.nix
+++ b/nixos/modules/services/networking/nsd.nix
@@ -899,13 +899,9 @@ in
 
     environment.systemPackages = [ nsdPkg ];
 
-    users.groups = singleton {
-      name = username;
-      gid = config.ids.gids.nsd;
-    };
+    users.groups.${username}.gid = config.ids.gids.nsd;
 
-    users.users = singleton {
-      name = username;
+    users.users.${username} = {
       description = "NSD service user";
       home = stateDir;
       createHome  = true;
diff --git a/nixos/modules/services/networking/ntp/chrony.nix b/nixos/modules/services/networking/ntp/chrony.nix
index c74476c7a15..da9d960cc14 100644
--- a/nixos/modules/services/networking/ntp/chrony.nix
+++ b/nixos/modules/services/networking/ntp/chrony.nix
@@ -79,14 +79,10 @@ in
 
     environment.systemPackages = [ pkgs.chrony ];
 
-    users.groups = singleton
-      { name = "chrony";
-        gid = config.ids.gids.chrony;
-      };
+    users.groups.chrony.gid = config.ids.gids.chrony;
 
-    users.users = singleton
-      { name = "chrony";
-        uid = config.ids.uids.chrony;
+    users.users.chrony =
+      { uid = config.ids.uids.chrony;
         group = "chrony";
         description = "chrony daemon user";
         home = stateDir;
diff --git a/nixos/modules/services/networking/ntp/ntpd.nix b/nixos/modules/services/networking/ntp/ntpd.nix
index 1197c84f045..b5403cb747d 100644
--- a/nixos/modules/services/networking/ntp/ntpd.nix
+++ b/nixos/modules/services/networking/ntp/ntpd.nix
@@ -104,9 +104,8 @@ in
 
     systemd.services.systemd-timedated.environment = { SYSTEMD_TIMEDATED_NTP_SERVICES = "ntpd.service"; };
 
-    users.users = singleton
-      { name = ntpUser;
-        uid = config.ids.uids.ntp;
+    users.users.${ntpUser} =
+      { uid = config.ids.uids.ntp;
         description = "NTP daemon user";
         home = stateDir;
       };
diff --git a/nixos/modules/services/networking/ntp/openntpd.nix b/nixos/modules/services/networking/ntp/openntpd.nix
index 471d15b1687..67a04d48d30 100644
--- a/nixos/modules/services/networking/ntp/openntpd.nix
+++ b/nixos/modules/services/networking/ntp/openntpd.nix
@@ -60,8 +60,7 @@ in
 
     environment.etc."ntpd.conf".text = configFile;
 
-    users.users = singleton {
-      name = "ntp";
+    users.users.ntp = {
       uid = config.ids.uids.ntp;
       description = "OpenNTP daemon user";
       home = "/var/empty";
diff --git a/nixos/modules/services/networking/owamp.nix b/nixos/modules/services/networking/owamp.nix
index dbb2e3b4c40..637ed618b89 100644
--- a/nixos/modules/services/networking/owamp.nix
+++ b/nixos/modules/services/networking/owamp.nix
@@ -17,16 +17,13 @@ in
   ###### implementation
 
   config = mkIf cfg.enable {
-    users.users = singleton {
-      name = "owamp";
+    users.users.owamp = {
       group = "owamp";
       description = "Owamp daemon";
       isSystemUser = true;
     };
 
-    users.groups = singleton {
-      name = "owamp";
-    };
+    users.groups.owamp = { };
 
     systemd.services.owamp = {
       description = "Owamp server";
diff --git a/nixos/modules/services/networking/pdnsd.nix b/nixos/modules/services/networking/pdnsd.nix
index f5b174dd7b7..24b5bbc5104 100644
--- a/nixos/modules/services/networking/pdnsd.nix
+++ b/nixos/modules/services/networking/pdnsd.nix
@@ -62,15 +62,13 @@ in
     };
 
   config = mkIf cfg.enable {
-    users.users = singleton {
-      name = pdnsdUser;
+    users.users.${pdnsdUser} = {
       uid = config.ids.uids.pdnsd;
       group = pdnsdGroup;
       description = "pdnsd user";
     };
 
-    users.groups = singleton {
-      name = pdnsdGroup;
+    users.groups.${pdnsdGroup} = {
       gid = config.ids.gids.pdnsd;
     };
 
diff --git a/nixos/modules/services/networking/polipo.nix b/nixos/modules/services/networking/polipo.nix
index dbe3b738097..1ff9388346b 100644
--- a/nixos/modules/services/networking/polipo.nix
+++ b/nixos/modules/services/networking/polipo.nix
@@ -85,17 +85,15 @@ in
 
   config = mkIf cfg.enable {
 
-    users.users = singleton
-      { name = "polipo";
-        uid = config.ids.uids.polipo;
+    users.users.polipo =
+      { uid = config.ids.uids.polipo;
         description = "Polipo caching proxy user";
         home = "/var/cache/polipo";
         createHome = true;
       };
 
-    users.groups = singleton
-      { name = "polipo";
-        gid = config.ids.gids.polipo;
+    users.groups.polipo =
+      { gid = config.ids.gids.polipo;
         members = [ "polipo" ];
       };
 
diff --git a/nixos/modules/services/networking/pppd.nix b/nixos/modules/services/networking/pppd.nix
index e96c27bd84b..b31bfa64235 100644
--- a/nixos/modules/services/networking/pppd.nix
+++ b/nixos/modules/services/networking/pppd.nix
@@ -64,11 +64,13 @@ in
     enabledConfigs = filter (f: f.enable) (attrValues cfg.peers);
 
     mkEtc = peerCfg: {
-      "ppp/peers/${peerCfg.name}".text = peerCfg.config;
+      name = "ppp/peers/${peerCfg.name}";
+      value.text = peerCfg.config;
     };
 
     mkSystemd = peerCfg: {
-      "pppd-${peerCfg.name}" = {
+      name = "pppd-${peerCfg.name}";
+      value = {
         restartTriggers = [ config.environment.etc."ppp/peers/${peerCfg.name}".source ];
         before = [ "network.target" ];
         wants = [ "network.target" ];
@@ -124,8 +126,8 @@ in
       };
     };
 
-    etcFiles = map mkEtc enabledConfigs;
-    systemdConfigs = map mkSystemd enabledConfigs;
+    etcFiles = listToAttrs (map mkEtc enabledConfigs);
+    systemdConfigs = listToAttrs (map mkSystemd enabledConfigs);
 
   in mkIf cfg.enable {
     environment.etc = mkMerge etcFiles;
diff --git a/nixos/modules/services/networking/prayer.nix b/nixos/modules/services/networking/prayer.nix
index c936417e68c..9c9eeba23da 100644
--- a/nixos/modules/services/networking/prayer.nix
+++ b/nixos/modules/services/networking/prayer.nix
@@ -72,17 +72,14 @@ in
   config = mkIf config.services.prayer.enable {
     environment.systemPackages = [ prayer ];
 
-    users.users = singleton
-      { name = prayerUser;
-        uid = config.ids.uids.prayer;
+    users.users.${prayerUser} =
+      { uid = config.ids.uids.prayer;
         description = "Prayer daemon user";
         home = stateDir;
       };
 
-    users.groups = singleton
-      { name = prayerGroup;
-        gid = config.ids.gids.prayer;
-      };
+    users.groups.${prayerGroup} =
+      { gid = config.ids.gids.prayer; };
 
     systemd.services.prayer = {
       wantedBy = [ "multi-user.target" ];
diff --git a/nixos/modules/services/networking/quassel.nix b/nixos/modules/services/networking/quassel.nix
index b495b3948fb..52ecd90b7c6 100644
--- a/nixos/modules/services/networking/quassel.nix
+++ b/nixos/modules/services/networking/quassel.nix
@@ -92,17 +92,21 @@ in
         message = "Quassel needs a certificate file in order to require SSL";
       }];
 
-    users.users = mkIf (cfg.user == null) [
-      { name = "quassel";
+    users.users = optionalAttrs (cfg.user == null) {
+      quassel = {
+        name = "quassel";
         description = "Quassel IRC client daemon";
         group = "quassel";
         uid = config.ids.uids.quassel;
-      }];
+      };
+    };
 
-    users.groups = mkIf (cfg.user == null) [
-      { name = "quassel";
+    users.groups = optionalAttrs (cfg.user == null) {
+      quassel = {
+        name = "quassel";
         gid = config.ids.gids.quassel;
-      }];
+      };
+    };
 
     systemd.tmpfiles.rules = [
       "d '${cfg.dataDir}' - ${user} - - -"
diff --git a/nixos/modules/services/networking/radicale.nix b/nixos/modules/services/networking/radicale.nix
index 1daced4a6c7..30bf22586f8 100644
--- a/nixos/modules/services/networking/radicale.nix
+++ b/nixos/modules/services/networking/radicale.nix
@@ -59,18 +59,15 @@ in
   config = mkIf cfg.enable {
     environment.systemPackages = [ cfg.package ];
 
-    users.users = singleton
-      { name = "radicale";
-        uid = config.ids.uids.radicale;
+    users.users.radicale =
+      { uid = config.ids.uids.radicale;
         description = "radicale user";
         home = "/var/lib/radicale";
         createHome = true;
       };
 
-    users.groups = singleton
-      { name = "radicale";
-        gid = config.ids.gids.radicale;
-      };
+    users.groups.radicale =
+      { gid = config.ids.gids.radicale; };
 
     systemd.services.radicale = {
       description = "A Simple Calendar and Contact Server";
diff --git a/nixos/modules/services/networking/shairport-sync.nix b/nixos/modules/services/networking/shairport-sync.nix
index 68e005ab81d..2e988e0ca2e 100644
--- a/nixos/modules/services/networking/shairport-sync.nix
+++ b/nixos/modules/services/networking/shairport-sync.nix
@@ -55,9 +55,8 @@ in
     services.avahi.publish.enable = true;
     services.avahi.publish.userServices = true;
 
-    users.users = singleton
-      { name = cfg.user;
-        description = "Shairport user";
+    users.users.${cfg.user} =
+      { description = "Shairport user";
         isSystemUser = true;
         createHome = true;
         home = "/var/lib/shairport-sync";
diff --git a/nixos/modules/services/networking/shorewall.nix b/nixos/modules/services/networking/shorewall.nix
new file mode 100644
index 00000000000..0f94d414fcf
--- /dev/null
+++ b/nixos/modules/services/networking/shorewall.nix
@@ -0,0 +1,75 @@
+{ config, lib, pkgs, ... }:
+let
+  types = lib.types;
+  cfg = config.services.shorewall;
+in {
+  options = {
+    services.shorewall = {
+      enable = lib.mkOption {
+        type        = types.bool;
+        default     = false;
+        description = ''
+          Whether to enable Shorewall IPv4 Firewall.
+          <warning>
+            <para>
+            Enabling this service WILL disable the existing NixOS
+            firewall! Default firewall rules provided by packages are not
+            considered at the moment.
+            </para>
+          </warning>
+        '';
+      };
+      package = lib.mkOption {
+        type        = types.package;
+        default     = pkgs.shorewall;
+        defaultText = "pkgs.shorewall";
+        description = "The shorewall package to use.";
+      };
+      configs = lib.mkOption {
+        type        = types.attrsOf types.str;
+        default     = {};
+        description = ''
+          This option defines the Shorewall configs.
+          The attribute name defines the name of the config,
+          and the attribute value defines the content of the config.
+        '';
+        apply = lib.mapAttrs (name: text: pkgs.writeText "${name}" text);
+      };
+    };
+  };
+
+  config = lib.mkIf cfg.enable {
+    systemd.services.firewall.enable = false;
+    systemd.services.shorewall = {
+      description     = "Shorewall IPv4 Firewall";
+      after           = [ "ipset.target" ];
+      before          = [ "network-pre.target" ];
+      wants           = [ "network-pre.target" ];
+      wantedBy        = [ "multi-user.target" ];
+      reloadIfChanged = true;
+      restartTriggers = lib.attrValues cfg.configs;
+      serviceConfig = {
+        Type            = "oneshot";
+        RemainAfterExit = "yes";
+        ExecStart       = "${cfg.package}/bin/shorewall start";
+        ExecReload      = "${cfg.package}/bin/shorewall reload";
+        ExecStop        = "${cfg.package}/bin/shorewall stop";
+      };
+      preStart = ''
+        install -D -d -m 750 /var/lib/shorewall
+        install -D -d -m 755 /var/lock/subsys
+        touch                /var/log/shorewall.log
+        chown 750            /var/log/shorewall.log
+      '';
+    };
+    environment = {
+      etc = lib.mapAttrsToList
+              (name: file:
+                { source = file;
+                  target = "shorewall/${name}";
+                })
+              cfg.configs;
+      systemPackages = [ cfg.package ];
+    };
+  };
+}
diff --git a/nixos/modules/services/networking/shorewall6.nix b/nixos/modules/services/networking/shorewall6.nix
new file mode 100644
index 00000000000..9c22a037c0b
--- /dev/null
+++ b/nixos/modules/services/networking/shorewall6.nix
@@ -0,0 +1,75 @@
+{ config, lib, pkgs, ... }:
+let
+  types = lib.types;
+  cfg = config.services.shorewall6;
+in {
+  options = {
+    services.shorewall6 = {
+      enable = lib.mkOption {
+        type        = types.bool;
+        default     = false;
+        description = ''
+          Whether to enable Shorewall IPv6 Firewall.
+          <warning>
+            <para>
+            Enabling this service WILL disable the existing NixOS
+            firewall! Default firewall rules provided by packages are not
+            considered at the moment.
+            </para>
+          </warning>
+        '';
+      };
+      package = lib.mkOption {
+        type        = types.package;
+        default     = pkgs.shorewall;
+        defaultText = "pkgs.shorewall";
+        description = "The shorewall package to use.";
+      };
+      configs = lib.mkOption {
+        type        = types.attrsOf types.str;
+        default     = {};
+        description = ''
+          This option defines the Shorewall configs.
+          The attribute name defines the name of the config,
+          and the attribute value defines the content of the config.
+        '';
+        apply = lib.mapAttrs (name: text: pkgs.writeText "${name}" text);
+      };
+    };
+  };
+
+  config = lib.mkIf cfg.enable {
+    systemd.services.firewall.enable = false;
+    systemd.services.shorewall6 = {
+      description     = "Shorewall IPv6 Firewall";
+      after           = [ "ipset.target" ];
+      before          = [ "network-pre.target" ];
+      wants           = [ "network-pre.target" ];
+      wantedBy        = [ "multi-user.target" ];
+      reloadIfChanged = true;
+      restartTriggers = lib.attrValues cfg.configs;
+      serviceConfig = {
+        Type            = "oneshot";
+        RemainAfterExit = "yes";
+        ExecStart       = "${cfg.package}/bin/shorewall6 start";
+        ExecReload      = "${cfg.package}/bin/shorewall6 reload";
+        ExecStop        = "${cfg.package}/bin/shorewall6 stop";
+      };
+      preStart = ''
+        install -D -d -m 750 /var/lib/shorewall6
+        install -D -d -m 755 /var/lock/subsys
+        touch                /var/log/shorewall6.log
+        chown 750            /var/log/shorewall6.log
+      '';
+    };
+    environment = {
+      etc = lib.mapAttrsToList
+              (name: file:
+                { source = file;
+                  target = "shorewall6/${name}";
+                })
+              cfg.configs;
+      systemPackages = [ cfg.package ];
+    };
+  };
+}
diff --git a/nixos/modules/services/networking/shout.nix b/nixos/modules/services/networking/shout.nix
index e548ec66962..a808a7f39d0 100644
--- a/nixos/modules/services/networking/shout.nix
+++ b/nixos/modules/services/networking/shout.nix
@@ -82,8 +82,7 @@ in {
   };
 
   config = mkIf cfg.enable {
-    users.users = singleton {
-      name = "shout";
+    users.users.shout = {
       uid = config.ids.uids.shout;
       description = "Shout daemon user";
       home = shoutHome;
diff --git a/nixos/modules/services/networking/smokeping.nix b/nixos/modules/services/networking/smokeping.nix
index b48b0b3a9d6..37ee2a80389 100644
--- a/nixos/modules/services/networking/smokeping.nix
+++ b/nixos/modules/services/networking/smokeping.nix
@@ -280,8 +280,7 @@ in
       fping6.source = "${pkgs.fping}/bin/fping6";
     };
     environment.systemPackages = [ pkgs.fping ];
-    users.users = singleton {
-      name = cfg.user;
+    users.users.${cfg.user} = {
       isNormalUser = false;
       isSystemUser = true;
       uid = config.ids.uids.smokeping;
diff --git a/nixos/modules/services/networking/supybot.nix b/nixos/modules/services/networking/supybot.nix
index 64eb1106832..92c84bd0e1e 100644
--- a/nixos/modules/services/networking/supybot.nix
+++ b/nixos/modules/services/networking/supybot.nix
@@ -45,8 +45,7 @@ in
 
     environment.systemPackages = [ pkgs.pythonPackages.limnoria ];
 
-    users.users = singleton {
-      name = "supybot";
+    users.users.supybotrs = {
       uid = config.ids.uids.supybot;
       group = "supybot";
       description = "Supybot IRC bot user";
@@ -55,7 +54,6 @@ in
     };
 
     users.groups.supybot = {
-      name = "supybot";
       gid = config.ids.gids.supybot;
     };
 
diff --git a/nixos/modules/services/networking/syncthing.nix b/nixos/modules/services/networking/syncthing.nix
index b3f2af5b179..47b10e408c0 100644
--- a/nixos/modules/services/networking/syncthing.nix
+++ b/nixos/modules/services/networking/syncthing.nix
@@ -112,12 +112,12 @@ in {
               addresses = [ "tcp://192.168.0.10:51820" ];
             };
           };
-          type = types.attrsOf (types.submodule ({ config, ... }: {
+          type = types.attrsOf (types.submodule ({ name, ... }: {
             options = {
 
               name = mkOption {
                 type = types.str;
-                default = config._module.args.name;
+                default = name;
                 description = ''
                   Name of the device
                 '';
@@ -175,7 +175,7 @@ in {
               devices = [ "bigbox" ];
             };
           };
-          type = types.attrsOf (types.submodule ({ config, ... }: {
+          type = types.attrsOf (types.submodule ({ name, ... }: {
             options = {
 
               enable = mkOption {
@@ -190,7 +190,7 @@ in {
 
               path = mkOption {
                 type = types.str;
-                default = config._module.args.name;
+                default = name;
                 description = ''
                   The path to the folder which should be shared.
                 '';
@@ -198,7 +198,7 @@ in {
 
               id = mkOption {
                 type = types.str;
-                default = config._module.args.name;
+                default = name;
                 description = ''
                   The id of the folder. Must be the same on all devices.
                 '';
@@ -206,7 +206,7 @@ in {
 
               label = mkOption {
                 type = types.str;
-                default = config._module.args.name;
+                default = name;
                 description = ''
                   The label of the folder.
                 '';
diff --git a/nixos/modules/services/networking/tcpcrypt.nix b/nixos/modules/services/networking/tcpcrypt.nix
index a0ccb995009..18f2e135124 100644
--- a/nixos/modules/services/networking/tcpcrypt.nix
+++ b/nixos/modules/services/networking/tcpcrypt.nix
@@ -29,8 +29,7 @@ in
 
   config = mkIf cfg.enable {
 
-    users.users = singleton {
-      name = "tcpcryptd";
+    users.users.tcpcryptd = {
       uid = config.ids.uids.tcpcryptd;
       description = "tcpcrypt daemon user";
     };
diff --git a/nixos/modules/services/networking/tox-bootstrapd.nix b/nixos/modules/services/networking/tox-bootstrapd.nix
index 1d349215169..f88e34827d0 100644
--- a/nixos/modules/services/networking/tox-bootstrapd.nix
+++ b/nixos/modules/services/networking/tox-bootstrapd.nix
@@ -56,9 +56,8 @@ in
 
   config = mkIf config.services.toxBootstrapd.enable {
 
-    users.users = singleton
-      { name = "tox-bootstrapd";
-        uid = config.ids.uids.tox-bootstrapd;
+    users.users.tox-bootstrapd =
+      { uid = config.ids.uids.tox-bootstrapd;
         description = "Tox bootstrap daemon user";
         inherit home;
         createHome = true;
diff --git a/nixos/modules/services/networking/vsftpd.nix b/nixos/modules/services/networking/vsftpd.nix
index 90093d9a78d..47990dbb377 100644
--- a/nixos/modules/services/networking/vsftpd.nix
+++ b/nixos/modules/services/networking/vsftpd.nix
@@ -279,21 +279,22 @@ in
         message = "vsftpd: If enableVirtualUsers is true, you need to setup both the userDbPath and localUsers options.";
       }];
 
-    users.users =
-      [ { name = "vsftpd";
-          uid = config.ids.uids.vsftpd;
-          description = "VSFTPD user";
-          home = if cfg.localRoot != null
-                   then cfg.localRoot # <= Necessary for virtual users.
-                   else "/homeless-shelter";
-        }
-      ] ++ optional cfg.anonymousUser
-        { name = "ftp";
+    users.users = {
+      "vsftpd" = {
+        uid = config.ids.uids.vsftpd;
+        description = "VSFTPD user";
+        home = if cfg.localRoot != null
+               then cfg.localRoot # <= Necessary for virtual users.
+               else "/homeless-shelter";
+      };
+    } // optionalAttrs cfg.anonymousUser {
+      "ftp" = { name = "ftp";
           uid = config.ids.uids.ftp;
           group = "ftp";
           description = "Anonymous FTP user";
           home = cfg.anonymousUserHome;
         };
+    };
 
     users.groups.ftp.gid = config.ids.gids.ftp;
 
diff --git a/nixos/modules/services/networking/wpa_supplicant.nix b/nixos/modules/services/networking/wpa_supplicant.nix
index 8f05c3949fb..de0f11595a9 100644
--- a/nixos/modules/services/networking/wpa_supplicant.nix
+++ b/nixos/modules/services/networking/wpa_supplicant.nix
@@ -233,6 +233,7 @@ in {
       path = [ pkgs.wpa_supplicant ];
 
       script = ''
+        iface_args="-s -u -D${cfg.driver} -c ${configFile}"
         ${if ifaces == [] then ''
           for i in $(cd /sys/class/net && echo *); do
             DEVTYPE=
@@ -240,14 +241,14 @@ in {
             if [ -e "$UEVENT_PATH" ]; then
               source "$UEVENT_PATH"
               if [ "$DEVTYPE" = "wlan" -o -e /sys/class/net/$i/wireless ]; then
-                ifaces="$ifaces''${ifaces:+ -N} -i$i"
+                args+="''${args:+ -N} -i$i $iface_args"
               fi
             fi
           done
         '' else ''
-          ifaces="${concatStringsSep " -N " (map (i: "-i${i}") ifaces)}"
+          args="${concatMapStringsSep " -N " (i: "-i${i} $iface_args") ifaces}"
         ''}
-        exec wpa_supplicant -s -u -D${cfg.driver} -c ${configFile} $ifaces
+        exec wpa_supplicant $args
       '';
     };
 
diff --git a/nixos/modules/services/networking/xandikos.nix b/nixos/modules/services/networking/xandikos.nix
new file mode 100644
index 00000000000..87c029156b9
--- /dev/null
+++ b/nixos/modules/services/networking/xandikos.nix
@@ -0,0 +1,148 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.xandikos;
+in
+{
+
+  options = {
+    services.xandikos = {
+      enable = mkEnableOption "Xandikos CalDAV and CardDAV server";
+
+      package = mkOption {
+        type = types.package;
+        default = pkgs.xandikos;
+        defaultText = "pkgs.xandikos";
+        description = "The Xandikos package to use.";
+      };
+
+      address = mkOption {
+        type = types.str;
+        default = "localhost";
+        description = ''
+          The IP address on which Xandikos will listen.
+          By default listens on localhost.
+        '';
+      };
+
+      port = mkOption {
+        type = types.port;
+        default = 8080;
+        description = "The port of the Xandikos web application";
+      };
+
+      routePrefix = mkOption {
+        type = types.str;
+        default = "/";
+        description = ''
+          Path to Xandikos.
+          Useful when Xandikos is behind a reverse proxy.
+        '';
+      };
+
+      extraOptions = mkOption {
+        default = [];
+        type = types.listOf types.str;
+        example = literalExample ''
+          [ "--autocreate"
+            "--defaults"
+            "--current-user-principal user"
+            "--dump-dav-xml"
+          ]
+        '';
+        description = ''
+          Extra command line arguments to pass to xandikos.
+        '';
+      };
+
+      nginx = mkOption {
+        default = {};
+        description = ''
+          Configuration for nginx reverse proxy.
+        '';
+
+        type = types.submodule {
+          options = {
+            enable = mkOption {
+              type = types.bool;
+              default = false;
+              description = ''
+                Configure the nginx reverse proxy settings.
+              '';
+            };
+
+            hostName = mkOption {
+              type = types.str;
+              description = ''
+                The hostname use to setup the virtualhost configuration
+              '';
+            };
+          };
+        };
+      };
+
+    };
+
+  };
+
+  config = mkIf cfg.enable (
+    mkMerge [
+      {
+        meta.maintainers = [ lib.maintainers."0x4A6F" ];
+
+        systemd.services.xandikos = {
+          description = "A Simple Calendar and Contact Server";
+          after = [ "network.target" ];
+          wantedBy = [ "multi-user.target" ];
+
+          serviceConfig = {
+            User = "xandikos";
+            Group = "xandikos";
+            DynamicUser = "yes";
+            RuntimeDirectory = "xandikos";
+            StateDirectory = "xandikos";
+            StateDirectoryMode = "0700";
+            PrivateDevices = true;
+            # Sandboxing
+            CapabilityBoundingSet = "CAP_NET_RAW CAP_NET_ADMIN";
+            ProtectSystem = "strict";
+            ProtectHome = true;
+            PrivateTmp = true;
+            ProtectKernelTunables = true;
+            ProtectKernelModules = true;
+            ProtectControlGroups = true;
+            RestrictAddressFamilies = "AF_INET AF_INET6 AF_UNIX AF_PACKET AF_NETLINK";
+            RestrictNamespaces = true;
+            LockPersonality = true;
+            MemoryDenyWriteExecute = true;
+            RestrictRealtime = true;
+            RestrictSUIDSGID = true;
+            ExecStart = ''
+              ${cfg.package}/bin/xandikos \
+                --directory /var/lib/xandikos \
+                --listen_address ${cfg.address} \
+                --port ${toString cfg.port} \
+                --route-prefix ${cfg.routePrefix} \
+                ${lib.concatStringsSep " " cfg.extraOptions}
+            '';
+          };
+        };
+      }
+
+      (
+        mkIf cfg.nginx.enable {
+          services.nginx = {
+            enable = true;
+            virtualHosts."${cfg.nginx.hostName}" = {
+              locations."/" = {
+                proxyPass = "http://${cfg.address}:${toString cfg.port}/";
+              };
+            };
+          };
+        }
+      )
+    ]
+  );
+}
diff --git a/nixos/modules/services/networking/znc/default.nix b/nixos/modules/services/networking/znc/default.nix
index 0a9848a4934..a7315896c50 100644
--- a/nixos/modules/services/networking/znc/default.nix
+++ b/nixos/modules/services/networking/znc/default.nix
@@ -287,20 +287,22 @@ in
       '';
     };
 
-    users.users = optional (cfg.user == defaultUser)
-      { name = defaultUser;
-        description = "ZNC server daemon owner";
-        group = defaultUser;
-        uid = config.ids.uids.znc;
-        home = cfg.dataDir;
-        createHome = true;
+    users.users = optionalAttrs (cfg.user == defaultUser) {
+      ${defaultUser} =
+        { description = "ZNC server daemon owner";
+          group = defaultUser;
+          uid = config.ids.uids.znc;
+          home = cfg.dataDir;
+          createHome = true;
+        };
       };
 
-    users.groups = optional (cfg.user == defaultUser)
-      { name = defaultUser;
-        gid = config.ids.gids.znc;
-        members = [ defaultUser ];
-      };
+    users.groups = optionalAttrs (cfg.user == defaultUser) {
+      ${defaultUser} =
+        { gid = config.ids.gids.znc;
+          members = [ defaultUser ];
+        };
+    };
 
   };
 }
diff --git a/nixos/modules/services/printing/cupsd.nix b/nixos/modules/services/printing/cupsd.nix
index cc35be49bc3..59306d625e6 100644
--- a/nixos/modules/services/printing/cupsd.nix
+++ b/nixos/modules/services/printing/cupsd.nix
@@ -288,9 +288,8 @@ in
 
   config = mkIf config.services.printing.enable {
 
-    users.users = singleton
-      { name = "cups";
-        uid = config.ids.uids.cups;
+    users.users.cups =
+      { uid = config.ids.uids.cups;
         group = "lp";
         description = "CUPS printing services";
       };
diff --git a/nixos/modules/services/scheduling/atd.nix b/nixos/modules/services/scheduling/atd.nix
index a32907647a0..93ed9231d3c 100644
--- a/nixos/modules/services/scheduling/atd.nix
+++ b/nixos/modules/services/scheduling/atd.nix
@@ -57,17 +57,13 @@ in
 
     security.pam.services.atd = {};
 
-    users.users = singleton
-      { name = "atd";
-        uid = config.ids.uids.atd;
+    users.users.atd =
+      { uid = config.ids.uids.atd;
         description = "atd user";
         home = "/var/empty";
       };
 
-    users.groups = singleton
-      { name = "atd";
-        gid = config.ids.gids.atd;
-      };
+    users.groups.atd.gid = config.ids.gids.atd;
 
     systemd.services.atd = {
       description = "Job Execution Daemon (atd)";
diff --git a/nixos/modules/services/scheduling/fcron.nix b/nixos/modules/services/scheduling/fcron.nix
index e43ca014e14..42bed21bf25 100644
--- a/nixos/modules/services/scheduling/fcron.nix
+++ b/nixos/modules/services/scheduling/fcron.nix
@@ -86,7 +86,8 @@ in
 
     services.fcron.systab = systemCronJobs;
 
-    environment.etc =
+    environment.etc = listToAttrs
+      (map (x: { name = x.target; value = x; })
       [ (allowdeny "allow" (cfg.allow))
         (allowdeny "deny" cfg.deny)
         # see man 5 fcron.conf
@@ -112,7 +113,7 @@ in
           gid = config.ids.gids.fcron;
           mode = "0644";
         }
-      ];
+      ]);
 
     environment.systemPackages = [ pkgs.fcron ];
     users.users.fcron = {
diff --git a/nixos/modules/services/search/hound.nix b/nixos/modules/services/search/hound.nix
index 6740928db9a..7a44489efe6 100644
--- a/nixos/modules/services/search/hound.nix
+++ b/nixos/modules/services/search/hound.nix
@@ -88,19 +88,19 @@ in {
   };
 
   config = mkIf cfg.enable {
-    users.groups = optional (cfg.group == "hound") {
-      name = "hound";
-      gid = config.ids.gids.hound;
+    users.groups = optionalAttrs (cfg.group == "hound") {
+      hound.gid = config.ids.gids.hound;
     };
 
-    users.users = optional (cfg.user == "hound") {
-      name = "hound";
-      description = "hound code search";
-      createHome = true;
-      home = cfg.home;
-      group = cfg.group;
-      extraGroups = cfg.extraGroups;
-      uid = config.ids.uids.hound;
+    users.users = optionalAttrs (cfg.user == "hound") {
+      hound = {
+        description = "hound code search";
+        createHome = true;
+        home = cfg.home;
+        group = cfg.group;
+        extraGroups = cfg.extraGroups;
+        uid = config.ids.uids.hound;
+      };
     };
 
     systemd.services.hound = {
diff --git a/nixos/modules/services/search/kibana.nix b/nixos/modules/services/search/kibana.nix
index 43a63aa8fdc..2beb265ee5d 100644
--- a/nixos/modules/services/search/kibana.nix
+++ b/nixos/modules/services/search/kibana.nix
@@ -198,8 +198,7 @@ in {
 
     environment.systemPackages = [ cfg.package ];
 
-    users.users = singleton {
-      name = "kibana";
+    users.users.kibana = {
       uid = config.ids.uids.kibana;
       description = "Kibana service user";
       home = cfg.dataDir;
diff --git a/nixos/modules/services/search/solr.nix b/nixos/modules/services/search/solr.nix
index 5ef7d9893a4..b2176225493 100644
--- a/nixos/modules/services/search/solr.nix
+++ b/nixos/modules/services/search/solr.nix
@@ -100,18 +100,18 @@ in
       };
     };
 
-    users.users = optionalAttrs (cfg.user == "solr") (singleton
-      { name = "solr";
+    users.users = optionalAttrs (cfg.user == "solr") {
+      solr = {
         group = cfg.group;
         home = cfg.stateDir;
         createHome = true;
         uid = config.ids.uids.solr;
-      });
+      };
+    };
 
-    users.groups = optionalAttrs (cfg.group == "solr") (singleton
-      { name = "solr";
-        gid = config.ids.gids.solr;
-      });
+    users.groups = optionalAttrs (cfg.group == "solr") {
+      solr.gid = config.ids.gids.solr;
+    };
 
   };
 
diff --git a/nixos/modules/services/security/certmgr.nix b/nixos/modules/services/security/certmgr.nix
index e89078883eb..94c0ba14117 100644
--- a/nixos/modules/services/security/certmgr.nix
+++ b/nixos/modules/services/security/certmgr.nix
@@ -113,7 +113,7 @@ in
         otherCert = "/var/certmgr/specs/other-cert.json";
       }
       '';
-      type = with types; attrsOf (either (submodule {
+      type = with types; attrsOf (either path (submodule {
         options = {
           service = mkOption {
             type = nullOr str;
@@ -148,7 +148,7 @@ in
             description = "certmgr spec request object.";
           };
         };
-    }) path);
+    }));
       description = ''
         Certificate specs as described by:
         <link xlink:href="https://github.com/cloudflare/certmgr#certificate-specs" />
diff --git a/nixos/modules/services/security/clamav.nix b/nixos/modules/services/security/clamav.nix
index ef5bde7907e..aaf6fb0479b 100644
--- a/nixos/modules/services/security/clamav.nix
+++ b/nixos/modules/services/security/clamav.nix
@@ -83,18 +83,15 @@ in
   config = mkIf (cfg.updater.enable || cfg.daemon.enable) {
     environment.systemPackages = [ pkg ];
 
-    users.users = singleton {
-      name = clamavUser;
+    users.users.${clamavUser} = {
       uid = config.ids.uids.clamav;
       group = clamavGroup;
       description = "ClamAV daemon user";
       home = stateDir;
     };
 
-    users.groups = singleton {
-      name = clamavGroup;
-      gid = config.ids.gids.clamav;
-    };
+    users.groups.${clamavGroup} =
+      { gid = config.ids.gids.clamav; };
 
     environment.etc."clamav/freshclam.conf".source = freshclamConfigFile;
     environment.etc."clamav/clamd.conf".source = clamdConfigFile;
diff --git a/nixos/modules/services/security/fprot.nix b/nixos/modules/services/security/fprot.nix
index 47449039146..f203f2abc03 100644
--- a/nixos/modules/services/security/fprot.nix
+++ b/nixos/modules/services/security/fprot.nix
@@ -48,22 +48,18 @@ in {
     services.fprot.updater.licenseKeyfile = mkDefault "${pkgs.fprot}/opt/f-prot/license.key";
 
     environment.systemPackages = [ pkgs.fprot ];
-    environment.etc = singleton {
+    environment.etc."f-prot.conf" = {
       source = "${pkgs.fprot}/opt/f-prot/f-prot.conf";
-      target = "f-prot.conf";
     };
 
-    users.users = singleton
-      { name = fprotUser;
-        uid = config.ids.uids.fprot;
+    users.users.${fprotUser} =
+      { uid = config.ids.uids.fprot;
         description = "F-Prot daemon user";
         home = stateDir;
       };
 
-    users.groups = singleton
-      { name = fprotGroup;
-        gid = config.ids.gids.fprot;
-      };
+    users.groups.${fprotGroup} =
+      { gid = config.ids.gids.fprot; };
 
     services.cron.systemCronJobs = [ "*/${toString cfg.updater.frequency} * * * * root start fprot-updater" ];
 
diff --git a/nixos/modules/services/security/torify.nix b/nixos/modules/services/security/torify.nix
index 08da726437e..39551190dd3 100644
--- a/nixos/modules/services/security/torify.nix
+++ b/nixos/modules/services/security/torify.nix
@@ -25,6 +25,7 @@ in
     services.tor.tsocks = {
 
       enable = mkOption {
+        type = types.bool;
         default = false;
         description = ''
           Whether to build tsocks wrapper script to relay application traffic via Tor.
@@ -40,6 +41,7 @@ in
       };
 
       server = mkOption {
+        type = types.str;
         default = "localhost:9050";
         example = "192.168.0.20";
         description = ''
@@ -48,6 +50,7 @@ in
       };
 
       config = mkOption {
+        type = types.lines;
         default = "";
         description = ''
           Extra configuration. Contents will be added verbatim to TSocks
diff --git a/nixos/modules/services/security/torsocks.nix b/nixos/modules/services/security/torsocks.nix
index c60c745443b..47ac95c4626 100644
--- a/nixos/modules/services/security/torsocks.nix
+++ b/nixos/modules/services/security/torsocks.nix
@@ -112,10 +112,9 @@ in
   config = mkIf cfg.enable {
     environment.systemPackages = [ pkgs.torsocks (wrapTorsocks "torsocks-faster" cfg.fasterServer) ];
 
-    environment.etc =
-      [ { source = pkgs.writeText "torsocks.conf" (configFile cfg.server);
-          target = "tor/torsocks.conf";
-        }
-      ];
+    environment.etc."tor/torsocks.conf" =
+      {
+        source = pkgs.writeText "torsocks.conf" (configFile cfg.server);
+      };
   };
 }
diff --git a/nixos/modules/services/system/dbus.nix b/nixos/modules/services/system/dbus.nix
index 936646a5fd7..4a60fec1ca8 100644
--- a/nixos/modules/services/system/dbus.nix
+++ b/nixos/modules/services/system/dbus.nix
@@ -68,10 +68,7 @@ in
 
     environment.systemPackages = [ pkgs.dbus.daemon pkgs.dbus ];
 
-    environment.etc = singleton
-      { source = configDir;
-        target = "dbus-1";
-      };
+    environment.etc."dbus-1".source = configDir;
 
     users.users.messagebus = {
       uid = config.ids.uids.messagebus;
diff --git a/nixos/modules/services/system/localtime.nix b/nixos/modules/services/system/localtime.nix
index c3c0b432b49..74925c5e2c4 100644
--- a/nixos/modules/services/system/localtime.nix
+++ b/nixos/modules/services/system/localtime.nix
@@ -35,6 +35,10 @@ in {
     # Install the systemd unit.
     systemd.packages = [ pkgs.localtime.out ];
 
+    users.users.localtimed = {
+      description = "Taskserver user";
+    };
+
     systemd.services.localtime = {
       wantedBy = [ "multi-user.target" ];
       serviceConfig.Restart = "on-failure";
diff --git a/nixos/modules/services/torrent/transmission.nix b/nixos/modules/services/torrent/transmission.nix
index f7a88867b61..5ba72e8d773 100644
--- a/nixos/modules/services/torrent/transmission.nix
+++ b/nixos/modules/services/torrent/transmission.nix
@@ -118,7 +118,7 @@ in
       # 1) Only the "transmission" user and group have access to torrents.
       # 2) Optionally update/force specific fields into the configuration file.
       serviceConfig.ExecStartPre = preStart;
-      serviceConfig.ExecStart = "${pkgs.transmission}/bin/transmission-daemon -f --port ${toString config.services.transmission.port}";
+      serviceConfig.ExecStart = "${pkgs.transmission}/bin/transmission-daemon -f --port ${toString config.services.transmission.port} --config-dir ${settingsDir}";
       serviceConfig.ExecReload = "${pkgs.coreutils}/bin/kill -HUP $MAINPID";
       serviceConfig.User = cfg.user;
       serviceConfig.Group = cfg.group;
@@ -129,19 +129,23 @@ in
     # It's useful to have transmission in path, e.g. for remote control
     environment.systemPackages = [ pkgs.transmission ];
 
-    users.users = optionalAttrs (cfg.user == "transmission") (singleton
-      { name = "transmission";
+    users.users = optionalAttrs (cfg.user == "transmission") ({
+      transmission = {
+        name = "transmission";
         group = cfg.group;
         uid = config.ids.uids.transmission;
         description = "Transmission BitTorrent user";
         home = homeDir;
         createHome = true;
-      });
+      };
+    });
 
-    users.groups = optionalAttrs (cfg.group == "transmission") (singleton
-      { name = "transmission";
+    users.groups = optionalAttrs (cfg.group == "transmission") ({
+      transmission = {
+        name = "transmission";
         gid = config.ids.gids.transmission;
-      });
+      };
+    });
 
     # AppArmor profile
     security.apparmor.profiles = mkIf apparmor [
diff --git a/nixos/modules/services/ttys/agetty.nix b/nixos/modules/services/ttys/agetty.nix
index f127d8a0276..f3a629f7af7 100644
--- a/nixos/modules/services/ttys/agetty.nix
+++ b/nixos/modules/services/ttys/agetty.nix
@@ -102,7 +102,7 @@ in
         enable = mkDefault config.boot.isContainer;
       };
 
-    environment.etc = singleton
+    environment.etc.issue =
       { # Friendly greeting on the virtual consoles.
         source = pkgs.writeText "issue" ''
 
@@ -110,7 +110,6 @@ in
           ${config.services.mingetty.helpLine}
 
         '';
-        target = "issue";
       };
 
   };
diff --git a/nixos/modules/services/web-apps/frab.nix b/nixos/modules/services/web-apps/frab.nix
index a9a30b40922..1b5890d6b0c 100644
--- a/nixos/modules/services/web-apps/frab.nix
+++ b/nixos/modules/services/web-apps/frab.nix
@@ -173,15 +173,13 @@ in
   config = mkIf cfg.enable {
     environment.systemPackages = [ frab-rake ];
 
-    users.users = [
-      { name = cfg.user;
-        group = cfg.group;
+    users.users.${cfg.user} =
+      { group = cfg.group;
         home = "${cfg.statePath}";
         isSystemUser = true;
-      }
-    ];
+      };
 
-    users.groups = [ { name = cfg.group; } ];
+    users.groups.${cfg.group} = { };
 
     systemd.tmpfiles.rules = [
       "d '${cfg.statePath}/system/attachments' - ${cfg.user} ${cfg.group} - -"
diff --git a/nixos/modules/services/web-apps/ihatemoney/default.nix b/nixos/modules/services/web-apps/ihatemoney/default.nix
new file mode 100644
index 00000000000..68769ac8c03
--- /dev/null
+++ b/nixos/modules/services/web-apps/ihatemoney/default.nix
@@ -0,0 +1,141 @@
+{ config, pkgs, lib, ... }:
+with lib;
+let
+  cfg = config.services.ihatemoney;
+  user = "ihatemoney";
+  group = "ihatemoney";
+  db = "ihatemoney";
+  python3 = config.services.uwsgi.package.python3;
+  pkg = python3.pkgs.ihatemoney;
+  toBool = x: if x then "True" else "False";
+  configFile = pkgs.writeText "ihatemoney.cfg" ''
+        from secrets import token_hex
+        # load a persistent secret key
+        SECRET_KEY_FILE = "/var/lib/ihatemoney/secret_key"
+        SECRET_KEY = ""
+        try:
+          with open(SECRET_KEY_FILE) as f:
+            SECRET_KEY = f.read()
+        except FileNotFoundError:
+          pass
+        if not SECRET_KEY:
+          print("ihatemoney: generating a new secret key")
+          SECRET_KEY = token_hex(50)
+          with open(SECRET_KEY_FILE, "w") as f:
+            f.write(SECRET_KEY)
+        del token_hex
+        del SECRET_KEY_FILE
+
+        # "normal" configuration
+        DEBUG = False
+        SQLALCHEMY_DATABASE_URI = '${
+          if cfg.backend == "sqlite"
+          then "sqlite:////var/lib/ihatemoney/ihatemoney.sqlite"
+          else "postgresql:///${db}"}'
+        SQLALCHEMY_TRACK_MODIFICATIONS = False
+        MAIL_DEFAULT_SENDER = ("${cfg.defaultSender.name}", "${cfg.defaultSender.email}")
+        ACTIVATE_DEMO_PROJECT = ${toBool cfg.enableDemoProject}
+        ADMIN_PASSWORD = "${toString cfg.adminHashedPassword /*toString null == ""*/}"
+        ALLOW_PUBLIC_PROJECT_CREATION = ${toBool cfg.enablePublicProjectCreation}
+        ACTIVATE_ADMIN_DASHBOARD = ${toBool cfg.enableAdminDashboard}
+
+        ${cfg.extraConfig}
+  '';
+in
+  {
+    options.services.ihatemoney = {
+      enable = mkEnableOption "ihatemoney webapp. Note that this will set uwsgi to emperor mode running as root";
+      backend = mkOption {
+        type = types.enum [ "sqlite" "postgresql" ];
+        default = "sqlite";
+        description = ''
+          The database engine to use for ihatemoney.
+          If <literal>postgresql</literal> is selected, then a database called
+          <literal>${db}</literal> will be created. If you disable this option,
+          it will however not be removed.
+        '';
+      };
+      adminHashedPassword = mkOption {
+        type = types.nullOr types.str;
+        default = null;
+        description = "The hashed password of the administrator. To obtain it, run <literal>ihatemoney generate_password_hash</literal>";
+      };
+      uwsgiConfig = mkOption {
+        type = types.attrs;
+        example = {
+          http = ":8000";
+        };
+        description = "Additionnal configuration of the UWSGI vassal running ihatemoney. It should notably specify on which interfaces and ports the vassal should listen.";
+      };
+      defaultSender = {
+        name = mkOption {
+          type = types.str;
+          default = "Budget manager";
+          description = "The display name of the sender of ihatemoney emails";
+        };
+        email = mkOption {
+          type = types.str;
+          default = "ihatemoney@${config.networking.hostName}";
+          description = "The email of the sender of ihatemoney emails";
+        };
+      };
+      enableDemoProject = mkEnableOption "access to the demo project in ihatemoney";
+      enablePublicProjectCreation = mkEnableOption "permission to create projects in ihatemoney by anyone";
+      enableAdminDashboard = mkEnableOption "ihatemoney admin dashboard";
+      extraConfig = mkOption {
+        type = types.str;
+        default = "";
+        description = "Extra configuration appended to ihatemoney's configuration file. It is a python file, so pay attention to indentation.";
+      };
+    };
+    config = mkIf cfg.enable {
+      services.postgresql = mkIf (cfg.backend == "postgresql") {
+        enable = true;
+        ensureDatabases = [ db ];
+        ensureUsers = [ {
+          name = user;
+          ensurePermissions = {
+            "DATABASE ${db}" = "ALL PRIVILEGES";
+          };
+        } ];
+      };
+      systemd.services.postgresql = mkIf (cfg.backend == "postgresql") {
+        wantedBy = [ "uwsgi.service" ];
+        before = [ "uwsgi.service" ];
+      };
+      systemd.tmpfiles.rules = [
+        "d /var/lib/ihatemoney 770 ${user} ${group}"
+      ];
+      users = {
+        users.${user} = {
+          isSystemUser = true;
+          inherit group;
+        };
+        groups.${group} = {};
+      };
+      services.uwsgi = {
+        enable = true;
+        plugins = [ "python3" ];
+        # the vassal needs to be able to setuid
+        user = "root";
+        group = "root";
+        instance = {
+          type = "emperor";
+          vassals.ihatemoney = {
+            type = "normal";
+            strict = true;
+            uid = user;
+            gid = group;
+            # apparently flask uses threads: https://github.com/spiral-project/ihatemoney/commit/c7815e48781b6d3a457eaff1808d179402558f8c
+            enable-threads = true;
+            module = "wsgi:application";
+            chdir = "${pkg}/${pkg.pythonModule.sitePackages}/ihatemoney";
+            env = [ "IHATEMONEY_SETTINGS_FILE_PATH=${configFile}" ];
+            pythonPackages = self: [ self.ihatemoney ];
+          } // cfg.uwsgiConfig;
+        };
+      };
+    };
+  }
+
+
diff --git a/nixos/modules/services/web-apps/mattermost.nix b/nixos/modules/services/web-apps/mattermost.nix
index 8c7fc4056ad..41c52b9653b 100644
--- a/nixos/modules/services/web-apps/mattermost.nix
+++ b/nixos/modules/services/web-apps/mattermost.nix
@@ -146,17 +146,17 @@ in
 
   config = mkMerge [
     (mkIf cfg.enable {
-      users.users = optionalAttrs (cfg.user == "mattermost") (singleton {
-        name = "mattermost";
-        group = cfg.group;
-        uid = config.ids.uids.mattermost;
-        home = cfg.statePath;
-      });
-
-      users.groups = optionalAttrs (cfg.group == "mattermost") (singleton {
-        name = "mattermost";
-        gid = config.ids.gids.mattermost;
-      });
+      users.users = optionalAttrs (cfg.user == "mattermost") {
+        mattermost = {
+          group = cfg.group;
+          uid = config.ids.uids.mattermost;
+          home = cfg.statePath;
+        };
+      };
+
+      users.groups = optionalAttrs (cfg.group == "mattermost") {
+        mattermost.gid = config.ids.gids.mattermost;
+      };
 
       services.postgresql.enable = cfg.localDatabaseCreate;
 
diff --git a/nixos/modules/services/web-apps/nextcloud.nix b/nixos/modules/services/web-apps/nextcloud.nix
index e3a2db398e6..d79f2bb735f 100644
--- a/nixos/modules/services/web-apps/nextcloud.nix
+++ b/nixos/modules/services/web-apps/nextcloud.nix
@@ -62,7 +62,7 @@ in {
     https = mkOption {
       type = types.bool;
       default = false;
-      description = "Enable if there is a TLS terminating proxy in front of nextcloud.";
+      description = "Use https for generated links.";
     };
 
     maxUploadSize = mkOption {
@@ -229,6 +229,15 @@ in {
         '';
       };
 
+      trustedProxies = mkOption {
+        type = types.listOf types.str;
+        default = [];
+        description = ''
+          Trusted proxies, to provide if the nextcloud installation is being
+          proxied to secure against e.g. spoofing.
+        '';
+      };
+
       overwriteProtocol = mkOption {
         type = types.nullOr (types.enum [ "http" "https" ]);
         default = null;
@@ -352,6 +361,7 @@ in {
               ${optionalString (c.dbpassFile != null) "'dbpassword' => nix_read_pwd(),"}
               'dbtype' => '${c.dbtype}',
               'trusted_domains' => ${writePhpArrary ([ cfg.hostName ] ++ c.extraTrustedDomains)},
+              'trusted_proxies' => ${writePhpArrary (c.trustedProxies)},
             ];
           '';
           occInstallCmd = let
diff --git a/nixos/modules/services/web-servers/apache-httpd/default.nix b/nixos/modules/services/web-servers/apache-httpd/default.nix
index 8e3be316298..fd17e4b54f0 100644
--- a/nixos/modules/services/web-servers/apache-httpd/default.nix
+++ b/nixos/modules/services/web-servers/apache-httpd/default.nix
@@ -567,7 +567,7 @@ in
 
       sslProtocols = mkOption {
         type = types.str;
-        default = "All -SSLv2 -SSLv3 -TLSv1";
+        default = "All -SSLv2 -SSLv3 -TLSv1 -TLSv1.1";
         example = "All -SSLv2 -SSLv3";
         description = "Allowed SSL/TLS protocol versions.";
       };
@@ -606,17 +606,17 @@ in
       }
     ];
 
-    users.users = optionalAttrs (mainCfg.user == "wwwrun") (singleton
-      { name = "wwwrun";
+    users.users = optionalAttrs (mainCfg.user == "wwwrun") {
+      wwwrun = {
         group = mainCfg.group;
         description = "Apache httpd user";
         uid = config.ids.uids.wwwrun;
-      });
+      };
+    };
 
-    users.groups = optionalAttrs (mainCfg.group == "wwwrun") (singleton
-      { name = "wwwrun";
-        gid = config.ids.gids.wwwrun;
-      });
+    users.groups = optionalAttrs (mainCfg.group == "wwwrun") {
+      wwwrun.gid = config.ids.gids.wwwrun;
+    };
 
     security.acme.certs = mapAttrs (name: hostOpts: {
       user = mainCfg.user;
@@ -629,6 +629,9 @@ in
 
     environment.systemPackages = [httpd];
 
+    # required for "apachectl configtest"
+    environment.etc."httpd/httpd.conf".source = httpdConf;
+
     services.httpd.phpOptions =
       ''
         ; Needed for PHP's mail() function.
diff --git a/nixos/modules/services/web-servers/nginx/default.nix b/nixos/modules/services/web-servers/nginx/default.nix
index ada7a25604c..c8602e5975b 100644
--- a/nixos/modules/services/web-servers/nginx/default.nix
+++ b/nixos/modules/services/web-servers/nginx/default.nix
@@ -178,6 +178,8 @@ let
     then "/etc/nginx/nginx.conf"
     else configFile;
 
+  execCommand = "${cfg.package}/bin/nginx -c '${configPath}' -p '${cfg.stateDir}'";
+
   vhosts = concatStringsSep "\n" (mapAttrsToList (vhostName: vhost:
     let
         onlySSL = vhost.onlySSL || vhost.enableSSL;
@@ -682,10 +684,10 @@ in
       stopIfChanged = false;
       preStart = ''
         ${cfg.preStart}
-        ${cfg.package}/bin/nginx -c '${configPath}' -p '${cfg.stateDir}' -t
+        ${execCommand} -t
       '';
       serviceConfig = {
-        ExecStart = "${cfg.package}/bin/nginx -c '${configPath}' -p '${cfg.stateDir}'";
+        ExecStart = execCommand;
         ExecReload = "${pkgs.coreutils}/bin/kill -HUP $MAINPID";
         Restart = "always";
         RestartSec = "10s";
@@ -706,11 +708,18 @@ in
     };
 
     systemd.services.nginx-config-reload = mkIf cfg.enableReload {
-      wantedBy = [ "nginx.service" ];
+      wants = [ "nginx.service" ];
+      wantedBy = [ "multi-user.target" ];
       restartTriggers = [ configFile ];
+      # commented, because can cause extra delays during activate for this config:
+      #      services.nginx.virtualHosts."_".locations."/".proxyPass = "http://blabla:3000";
+      # stopIfChanged = false;
+      serviceConfig.Type = "oneshot";
+      serviceConfig.TimeoutSec = 60;
       script = ''
         if ${pkgs.systemd}/bin/systemctl -q is-active nginx.service ; then
-          ${pkgs.systemd}/bin/systemctl reload nginx.service
+          ${execCommand} -t && \
+            ${pkgs.systemd}/bin/systemctl reload nginx.service
         fi
       '';
       serviceConfig.RemainAfterExit = true;
@@ -731,15 +740,16 @@ in
         listToAttrs acmePairs
     );
 
-    users.users = optionalAttrs (cfg.user == "nginx") (singleton
-      { name = "nginx";
+    users.users = optionalAttrs (cfg.user == "nginx") {
+      nginx = {
         group = cfg.group;
         uid = config.ids.uids.nginx;
-      });
+      };
+    };
+
+    users.groups = optionalAttrs (cfg.group == "nginx") {
+      nginx.gid = config.ids.gids.nginx;
+    };
 
-    users.groups = optionalAttrs (cfg.group == "nginx") (singleton
-      { name = "nginx";
-        gid = config.ids.gids.nginx;
-      });
   };
 }
diff --git a/nixos/modules/services/web-servers/nginx/gitweb.nix b/nixos/modules/services/web-servers/nginx/gitweb.nix
index 272fd148018..f7fb07bb797 100644
--- a/nixos/modules/services/web-servers/nginx/gitweb.nix
+++ b/nixos/modules/services/web-servers/nginx/gitweb.nix
@@ -3,8 +3,9 @@
 with lib;
 
 let
-  cfg = config.services.gitweb;
-  package = pkgs.gitweb.override (optionalAttrs cfg.gitwebTheme {
+  cfg = config.services.nginx.gitweb;
+  gitwebConfig = config.services.gitweb;
+  package = pkgs.gitweb.override (optionalAttrs gitwebConfig.gitwebTheme {
     gitwebTheme = true;
   });
 
@@ -17,13 +18,45 @@ in
       default = false;
       type = types.bool;
       description = ''
-        If true, enable gitweb in nginx. Access it at http://yourserver/gitweb
+        If true, enable gitweb in nginx.
+      '';
+    };
+
+    location = mkOption {
+      default = "/gitweb";
+      type = types.str;
+      description = ''
+        Location to serve gitweb on.
+      '';
+    };
+
+    user = mkOption {
+      default = "nginx";
+      type = types.str;
+      description = ''
+        Existing user that the CGI process will belong to. (Default almost surely will do.)
+      '';
+    };
+
+    group = mkOption {
+      default = "nginx";
+      type = types.str;
+      description = ''
+        Group that the CGI process will belong to. (Set to <literal>config.services.gitolite.group</literal> if you are using gitolite.)
+      '';
+    };
+
+    virtualHost = mkOption {
+      default = "_";
+      type = types.str;
+      description = ''
+        VirtualHost to serve gitweb on. Default is catch-all.
       '';
     };
 
   };
 
-  config = mkIf config.services.nginx.gitweb.enable {
+  config = mkIf cfg.enable {
 
     systemd.services.gitweb = {
       description = "GitWeb service";
@@ -32,22 +65,22 @@ in
         FCGI_SOCKET_PATH = "/run/gitweb/gitweb.sock";
       };
       serviceConfig = {
-        User = "nginx";
-        Group = "nginx";
+        User = cfg.user;
+        Group = cfg.group;
         RuntimeDirectory = [ "gitweb" ];
       };
       wantedBy = [ "multi-user.target" ];
     };
 
     services.nginx = {
-      virtualHosts.default = {
-        locations."/gitweb/static/" = {
+      virtualHosts.${cfg.virtualHost} = {
+        locations."${cfg.location}/static/" = {
           alias = "${package}/static/";
         };
-        locations."/gitweb/" = {
+        locations."${cfg.location}/" = {
           extraConfig = ''
             include ${pkgs.nginx}/conf/fastcgi_params;
-            fastcgi_param GITWEB_CONFIG ${cfg.gitwebConfigFile};
+            fastcgi_param GITWEB_CONFIG ${gitwebConfig.gitwebConfigFile};
             fastcgi_pass unix:/run/gitweb/gitweb.sock;
           '';
         };
diff --git a/nixos/modules/services/web-servers/tomcat.nix b/nixos/modules/services/web-servers/tomcat.nix
index 68261c50324..6d12925829f 100644
--- a/nixos/modules/services/web-servers/tomcat.nix
+++ b/nixos/modules/services/web-servers/tomcat.nix
@@ -194,14 +194,10 @@ in
 
   config = mkIf config.services.tomcat.enable {
 
-    users.groups = singleton
-      { name = "tomcat";
-        gid = config.ids.gids.tomcat;
-      };
+    users.groups.tomcat.gid = config.ids.gids.tomcat;
 
-    users.users = singleton
-      { name = "tomcat";
-        uid = config.ids.uids.tomcat;
+    users.users.tomcat =
+      { uid = config.ids.uids.tomcat;
         description = "Tomcat user";
         home = "/homeless-shelter";
         extraGroups = cfg.extraGroups;
diff --git a/nixos/modules/services/web-servers/unit/default.nix b/nixos/modules/services/web-servers/unit/default.nix
index 8ce65756d1b..b0b837cd192 100644
--- a/nixos/modules/services/web-servers/unit/default.nix
+++ b/nixos/modules/services/web-servers/unit/default.nix
@@ -129,14 +129,14 @@ in {
       };
     };
 
-    users.users = optionalAttrs (cfg.user == "unit") (singleton {
-      name = "unit";
-      group = cfg.group;
+    users.users = optionalAttrs (cfg.user == "unit") {
+      unit.group = cfg.group;
       isSystemUser = true;
-    });
+    };
+
+    users.groups = optionalAttrs (cfg.group == "unit") {
+      unit = { };
+    };
 
-    users.groups = optionalAttrs (cfg.group == "unit") (singleton {
-      name = "unit";
-    });
   };
 }
diff --git a/nixos/modules/services/web-servers/uwsgi.nix b/nixos/modules/services/web-servers/uwsgi.nix
index af70f32f32d..3481b5e6040 100644
--- a/nixos/modules/services/web-servers/uwsgi.nix
+++ b/nixos/modules/services/web-servers/uwsgi.nix
@@ -5,10 +5,6 @@ with lib;
 let
   cfg = config.services.uwsgi;
 
-  uwsgi = pkgs.uwsgi.override {
-    plugins = cfg.plugins;
-  };
-
   buildCfg = name: c:
     let
       plugins =
@@ -23,8 +19,8 @@ let
       python =
         if hasPython2 && hasPython3 then
           throw "`plugins` attribute in UWSGI configuration shouldn't contain both python2 and python3"
-        else if hasPython2 then uwsgi.python2
-        else if hasPython3 then uwsgi.python3
+        else if hasPython2 then cfg.package.python2
+        else if hasPython3 then cfg.package.python3
         else null;
 
       pythonEnv = python.withPackages (c.pythonPackages or (self: []));
@@ -77,6 +73,11 @@ in {
         description = "Where uWSGI communication sockets can live";
       };
 
+      package = mkOption {
+        type = types.package;
+        internal = true;
+      };
+
       instance = mkOption {
         type = types.attrs;
         default = {
@@ -138,7 +139,7 @@ in {
       '';
       serviceConfig = {
         Type = "notify";
-        ExecStart = "${uwsgi}/bin/uwsgi --uid ${cfg.user} --gid ${cfg.group} --json ${buildCfg "server" cfg.instance}/server.json";
+        ExecStart = "${cfg.package}/bin/uwsgi --uid ${cfg.user} --gid ${cfg.group} --json ${buildCfg "server" cfg.instance}/server.json";
         ExecReload = "${pkgs.coreutils}/bin/kill -HUP $MAINPID";
         ExecStop = "${pkgs.coreutils}/bin/kill -INT $MAINPID";
         NotifyAccess = "main";
@@ -146,15 +147,19 @@ in {
       };
     };
 
-    users.users = optionalAttrs (cfg.user == "uwsgi") (singleton
-      { name = "uwsgi";
+    users.users = optionalAttrs (cfg.user == "uwsgi") {
+      uwsgi = {
         group = cfg.group;
         uid = config.ids.uids.uwsgi;
-      });
+      };
+    };
+
+    users.groups = optionalAttrs (cfg.group == "uwsgi") {
+      uwsgi.gid = config.ids.gids.uwsgi;
+    };
 
-    users.groups = optionalAttrs (cfg.group == "uwsgi") (singleton
-      { name = "uwsgi";
-        gid = config.ids.gids.uwsgi;
-      });
+    services.uwsgi.package = pkgs.uwsgi.override {
+      inherit (cfg) plugins;
+    };
   };
 }
diff --git a/nixos/modules/services/x11/desktop-managers/enlightenment.nix b/nixos/modules/services/x11/desktop-managers/enlightenment.nix
index 04e82599b94..26b662a2a64 100644
--- a/nixos/modules/services/x11/desktop-managers/enlightenment.nix
+++ b/nixos/modules/services/x11/desktop-managers/enlightenment.nix
@@ -68,10 +68,7 @@ in
 
     security.wrappers = (import "${e.enlightenment}/e-wrappers.nix").security.wrappers;
 
-    environment.etc = singleton
-      { source = xcfg.xkbDir;
-        target = "X11/xkb";
-      };
+    environment.etc."X11/xkb".source = xcfg.xkbDir;
 
     fonts.fonts = [ pkgs.dejavu_fonts pkgs.ubuntu_font_family ];
 
diff --git a/nixos/modules/services/x11/desktop-managers/gnome3.nix b/nixos/modules/services/x11/desktop-managers/gnome3.nix
index 6d9bd284bc7..ba9906072b3 100644
--- a/nixos/modules/services/x11/desktop-managers/gnome3.nix
+++ b/nixos/modules/services/x11/desktop-managers/gnome3.nix
@@ -144,7 +144,7 @@ in
       services.gnome3.core-shell.enable = true;
       services.gnome3.core-utilities.enable = mkDefault true;
 
-      services.xserver.displayManager.sessionPackages = [ pkgs.gnome3.gnome-session ];
+      services.xserver.displayManager.sessionPackages = [ pkgs.gnome3.gnome-session.sessions ];
 
       environment.extraInit = ''
         ${concatMapStrings (p: ''
@@ -249,11 +249,17 @@ in
       services.system-config-printer.enable = (mkIf config.services.printing.enable (mkDefault true));
       services.telepathy.enable = mkDefault true;
 
-      systemd.packages = with pkgs.gnome3; [ vino gnome-session ];
+      systemd.packages = with pkgs.gnome3; [
+        gnome-session
+        gnome-shell
+        vino
+      ];
 
       services.avahi.enable = mkDefault true;
 
-      xdg.portal.extraPortals = [ pkgs.gnome3.gnome-shell ];
+      xdg.portal.extraPortals = [
+        pkgs.gnome3.gnome-shell
+      ];
 
       services.geoclue2.enable = mkDefault true;
       services.geoclue2.enableDemoAgent = false; # GNOME has its own geoclue agent
diff --git a/nixos/modules/services/x11/desktop-managers/plasma5.nix b/nixos/modules/services/x11/desktop-managers/plasma5.nix
index fce274477b6..2538858ac0f 100644
--- a/nixos/modules/services/x11/desktop-managers/plasma5.nix
+++ b/nixos/modules/services/x11/desktop-managers/plasma5.nix
@@ -183,10 +183,7 @@ in
         "/share"
       ];
 
-      environment.etc = singleton {
-        source = xcfg.xkbDir;
-        target = "X11/xkb";
-      };
+      environment.etc."X11/xkb".source = xcfg.xkbDir;
 
       # Enable GTK applications to load SVG icons
       services.xserver.gdk-pixbuf.modulePackages = [ pkgs.librsvg ];
diff --git a/nixos/modules/services/x11/display-managers/default.nix b/nixos/modules/services/x11/display-managers/default.nix
index 2d809b5cc9f..1efd0739376 100644
--- a/nixos/modules/services/x11/display-managers/default.nix
+++ b/nixos/modules/services/x11/display-managers/default.nix
@@ -380,7 +380,7 @@ in
         wms = filter (s: s.manage == "window") cfg.displayManager.session;
 
         # Script responsible for starting the window manager and the desktop manager.
-        xsession = wm: dm: pkgs.writeScript "xsession" ''
+        xsession = dm: wm: pkgs.writeScript "xsession" ''
           #! ${pkgs.bash}/bin/bash
 
           # Legacy session script used to construct .desktop files from
diff --git a/nixos/modules/services/x11/display-managers/gdm.nix b/nixos/modules/services/x11/display-managers/gdm.nix
index 6630f012f04..325023f4121 100644
--- a/nixos/modules/services/x11/display-managers/gdm.nix
+++ b/nixos/modules/services/x11/display-managers/gdm.nix
@@ -159,22 +159,25 @@ in
           GDM_X_SESSION_WRAPPER = "${xSessionWrapper}";
         };
         execCmd = "exec ${gdm}/bin/gdm";
-        preStart = optionalString config.hardware.pulseaudio.enable ''
-          mkdir -p /run/gdm/.config/pulse
-          ln -sf ${pulseConfig} /run/gdm/.config/pulse/default.pa
-          chown -R gdm:gdm /run/gdm/.config
-        '' + optionalString config.services.gnome3.gnome-initial-setup.enable ''
-          # Create stamp file for gnome-initial-setup to prevent run.
-          mkdir -p /run/gdm/.config
-          cat - > /run/gdm/.config/gnome-initial-setup-done <<- EOF
-          yes
-          EOF
-        '' + optionalString (defaultSessionName != null) ''
+        preStart = optionalString (defaultSessionName != null) ''
           # Set default session in session chooser to a specified values – basically ignore session history.
           ${setSessionScript}/bin/set-session ${cfg.sessionData.autologinSession}
         '';
       };
 
+    systemd.tmpfiles.rules = [
+      "d /run/gdm/.config 0711 gdm gdm -"
+    ] ++ optionals config.hardware.pulseaudio.enable [
+      "L+ /run/gdm/.config/pulse - - - - ${pulseConfig}"
+    ] ++ optionals config.services.gnome3.gnome-initial-setup.enable [
+      # Create stamp file for gnome-initial-setup to prevent it starting in GDM.
+      "f /run/gdm/.config/gnome-initial-setup-done 0711 gdm gdm - yes"
+    ];
+
+    # Otherwise GDM will not be able to start correctly and display Wayland sessions
+    systemd.packages = with pkgs.gnome3; [ gnome-session gnome-shell ];
+    environment.systemPackages = [ pkgs.gnome3.adwaita-icon-theme ];
+
     systemd.services.display-manager.wants = [
       # Because sd_login_monitor_new requires /run/systemd/machines
       "systemd-machined.service"
diff --git a/nixos/modules/services/x11/extra-layouts.nix b/nixos/modules/services/x11/extra-layouts.nix
index 1af98a1318b..f48216ff446 100644
--- a/nixos/modules/services/x11/extra-layouts.nix
+++ b/nixos/modules/services/x11/extra-layouts.nix
@@ -141,7 +141,7 @@ in
         });
 
         xkbcomp = super.xorg.xkbcomp.overrideAttrs (old: {
-          configureFlags = "--with-xkb-config-root=${self.xkb_patched}/share/X11/xkb";
+          configureFlags = [ "--with-xkb-config-root=${self.xkb_patched}/share/X11/xkb" ];
         });
 
       };
@@ -158,6 +158,12 @@ in
 
     });
 
+    environment.sessionVariables = {
+      # runtime override supported by multiple libraries e. g. libxkbcommon
+      # https://xkbcommon.org/doc/current/group__include-path.html
+      XKB_CONFIG_ROOT = "${pkgs.xkb_patched}/etc/X11/xkb";
+    };
+
     services.xserver = {
       xkbDir = "${pkgs.xkb_patched}/etc/X11/xkb";
       exportConfiguration = config.services.xserver.displayManager.startx.enable;
diff --git a/nixos/modules/services/x11/hardware/libinput.nix b/nixos/modules/services/x11/hardware/libinput.nix
index 71065dfc26b..f6b0e7c09f5 100644
--- a/nixos/modules/services/x11/hardware/libinput.nix
+++ b/nixos/modules/services/x11/hardware/libinput.nix
@@ -198,12 +198,13 @@ in {
 
     environment.systemPackages = [ pkgs.xorg.xf86inputlibinput ];
 
-    environment.etc = [
-      (let cfgPath = "X11/xorg.conf.d/40-libinput.conf"; in {
-        source = pkgs.xorg.xf86inputlibinput.out + "/share/" + cfgPath;
-        target = cfgPath;
-      })
-    ];
+    environment.etc =
+      let cfgPath = "X11/xorg.conf.d/40-libinput.conf";
+      in {
+        ${cfgPath} = {
+          source = pkgs.xorg.xf86inputlibinput.out + "/share/" + cfgPath;
+        };
+      };
 
     services.udev.packages = [ pkgs.libinput.out ];
 
diff --git a/nixos/modules/services/x11/hardware/multitouch.nix b/nixos/modules/services/x11/hardware/multitouch.nix
deleted file mode 100644
index c03bb3b494f..00000000000
--- a/nixos/modules/services/x11/hardware/multitouch.nix
+++ /dev/null
@@ -1,94 +0,0 @@
-{ config, lib, pkgs, ... }:
-
-with lib;
-
-let cfg = config.services.xserver.multitouch;
-    disabledTapConfig = ''
-      Option "MaxTapTime" "0"
-      Option "MaxTapMove" "0"
-      Option "TapButton1" "0"
-      Option "TapButton2" "0"
-      Option "TapButton3" "0"
-    '';
-in {
-
-  options = {
-
-    services.xserver.multitouch = {
-
-      enable = mkOption {
-        default = false;
-        description = "Whether to enable multitouch touchpad support.";
-      };
-
-      invertScroll = mkOption {
-        default = false;
-        type = types.bool;
-        description = "Whether to invert scrolling direction à la OSX Lion";
-      };
-
-      ignorePalm = mkOption {
-        default = false;
-        type = types.bool;
-        description = "Whether to ignore touches detected as being the palm (i.e when typing)";
-      };
-
-      tapButtons = mkOption {
-        type = types.bool;
-        default = true;
-        description = "Whether to enable tap buttons.";
-      };
-
-      buttonsMap = mkOption {
-        type = types.listOf types.int;
-        default = [3 2 0];
-        example = [1 3 2];
-        description = "Remap touchpad buttons.";
-        apply = map toString;
-      };
-
-      additionalOptions = mkOption {
-        type = types.str;
-        default = "";
-        example = ''
-          Option "ScaleDistance" "50"
-          Option "RotateDistance" "60"
-        '';
-        description = ''
-          Additional options for mtrack touchpad driver.
-        '';
-      };
-
-    };
-
-  };
-
-  config = mkIf cfg.enable {
-
-    services.xserver.modules = [ pkgs.xf86_input_mtrack ];
-
-    services.xserver.config =
-      ''
-        # Automatically enable the multitouch driver
-        Section "InputClass"
-          MatchIsTouchpad "on"
-          Identifier "Touchpads"
-          Driver "mtrack"
-          Option "IgnorePalm" "${boolToString cfg.ignorePalm}"
-          Option "ClickFinger1" "${builtins.elemAt cfg.buttonsMap 0}"
-          Option "ClickFinger2" "${builtins.elemAt cfg.buttonsMap 1}"
-          Option "ClickFinger3" "${builtins.elemAt cfg.buttonsMap 2}"
-          ${optionalString (!cfg.tapButtons) disabledTapConfig}
-          ${optionalString cfg.invertScroll ''
-            Option "ScrollUpButton" "5"
-            Option "ScrollDownButton" "4"
-            Option "ScrollLeftButton" "7"
-            Option "ScrollRightButton" "6"
-          ''}
-          ${cfg.additionalOptions}
-        EndSection
-      '';
-
-  };
-
-}
diff --git a/nixos/modules/services/x11/compton.nix b/nixos/modules/services/x11/picom.nix
index 61174672e2d..e3bd21be73e 100644
--- a/nixos/modules/services/x11/compton.nix
+++ b/nixos/modules/services/x11/picom.nix
@@ -5,7 +5,7 @@ with builtins;
 
 let
 
-  cfg = config.services.compton;
+  cfg = config.services.picom;
 
   pairOf = x: with types; addCheck (listOf x) (y: length y == 2);
 
@@ -31,20 +31,24 @@ let
                 (key: value: "${toString key}=${mkValueString value};")
                 v)
             + " }"
-          else abort "compton.mkValueString: unexpected type (v = ${v})";
+          else abort "picom.mkValueString: unexpected type (v = ${v})";
       in "${escape [ sep ] k}${sep}${mkValueString v};")
       attrs);
 
-  configFile = pkgs.writeText "compton.conf" (toConf cfg.settings);
+  configFile = pkgs.writeText "picom.conf" (toConf cfg.settings);
 
 in {
 
-  options.services.compton = {
+  imports = [
+    (mkAliasOptionModule [ "services" "compton" ] [ "services" "picom" ])
+  ];
+
+  options.services.picom = {
     enable = mkOption {
       type = types.bool;
       default = false;
       description = ''
-        Whether of not to enable Compton as the X.org composite manager.
+        Whether of not to enable Picom as the X.org composite manager.
       '';
     };
 
@@ -85,7 +89,7 @@ in {
       ];
       description = ''
         List of conditions of windows that should not be faded.
-        See <literal>compton(1)</literal> man page for more examples.
+        See <literal>picom(1)</literal> man page for more examples.
       '';
     };
 
@@ -125,7 +129,7 @@ in {
       ];
       description = ''
         List of conditions of windows that should have no shadow.
-        See <literal>compton(1)</literal> man page for more examples.
+        See <literal>picom(1)</literal> man page for more examples.
       '';
     };
 
@@ -192,7 +196,7 @@ in {
       apply = x:
         let
           res = x != "none";
-          msg = "The type of services.compton.vSync has changed to bool:"
+          msg = "The type of services.picom.vSync has changed to bool:"
                 + " interpreting ${x} as ${boolToString res}";
         in
           if isBool x then x
@@ -222,13 +226,13 @@ in {
       type = loaOf (types.either configTypes (loaOf (types.either configTypes (loaOf configTypes))));
       default = {};
       description = ''
-        Additional Compton configuration.
+        Additional Picom configuration.
       '';
     };
   };
 
   config = mkIf cfg.enable {
-    services.compton.settings = let
+    services.picom.settings = let
       # Hard conversion to float, literally lib.toInt but toFloat
       toFloat = str: let
         may_be_float = builtins.fromJSON str;
@@ -264,8 +268,8 @@ in {
       refresh-rate     = mkDefault cfg.refreshRate;
     };
 
-    systemd.user.services.compton = {
-      description = "Compton composite manager";
+    systemd.user.services.picom = {
+      description = "Picom composite manager";
       wantedBy = [ "graphical-session.target" ];
       partOf = [ "graphical-session.target" ];
 
@@ -275,13 +279,13 @@ in {
       };
 
       serviceConfig = {
-        ExecStart = "${pkgs.compton}/bin/compton --config ${configFile}";
+        ExecStart = "${pkgs.picom}/bin/picom --config ${configFile}";
         RestartSec = 3;
         Restart = "always";
       };
     };
 
-    environment.systemPackages = [ pkgs.compton ];
+    environment.systemPackages = [ pkgs.picom ];
   };
 
   meta.maintainers = with lib.maintainers; [ rnhmjoj ];
diff --git a/nixos/modules/services/x11/unclutter.nix b/nixos/modules/services/x11/unclutter.nix
index 2478aaabb79..c0868604a68 100644
--- a/nixos/modules/services/x11/unclutter.nix
+++ b/nixos/modules/services/x11/unclutter.nix
@@ -32,7 +32,7 @@ in {
       default = 1;
     };
 
-    threeshold = mkOption {
+    threshold = mkOption {
       description = "Minimum number of pixels considered cursor movement";
       type = types.int;
       default = 1;
@@ -72,6 +72,11 @@ in {
     };
   };
 
+  imports = [
+    (mkRenamedOptionModule [ "services" "unclutter" "threeshold" ]
+                           [ "services"  "unclutter" "threshold" ])
+  ];
+
   meta.maintainers = with lib.maintainers; [ rnhmjoj ];
 
 }
diff --git a/nixos/modules/services/x11/xserver.nix b/nixos/modules/services/x11/xserver.nix
index 1f6ee7cfffd..7029919170a 100644
--- a/nixos/modules/services/x11/xserver.nix
+++ b/nixos/modules/services/x11/xserver.nix
@@ -331,9 +331,9 @@ in
       };
 
       xkbOptions = mkOption {
-        type = types.str;
+        type = types.commas;
         default = "terminate:ctrl_alt_bksp";
-        example = "grp:caps_toggle, grp_led:scroll";
+        example = "grp:caps_toggle,grp_led:scroll";
         description = ''
           X keyboard options; layout switching goes here.
         '';
@@ -590,19 +590,15 @@ in
     ];
 
     environment.etc =
-      (optionals cfg.exportConfiguration
-        [ { source = "${configFile}";
-            target = "X11/xorg.conf";
-          }
+      (optionalAttrs cfg.exportConfiguration
+        {
+          "X11/xorg.conf".source = "${configFile}";
           # -xkbdir command line option does not seems to be passed to xkbcomp.
-          { source = "${cfg.xkbDir}";
-            target = "X11/xkb";
-          }
-        ])
+          "X11/xkb".source = "${cfg.xkbDir}";
+        })
       # localectl looks into 00-keyboard.conf
-      ++ [
-        {
-          text = ''
+      //{
+          "X11/xorg.conf.d/00-keyboard.conf".text = ''
             Section "InputClass"
               Identifier "Keyboard catchall"
               MatchIsKeyboard "on"
@@ -612,16 +608,12 @@ in
               Option "XkbVariant" "${cfg.xkbVariant}"
             EndSection
           '';
-          target = "X11/xorg.conf.d/00-keyboard.conf";
         }
-      ]
       # Needed since 1.18; see https://bugs.freedesktop.org/show_bug.cgi?id=89023#c5
-      ++ (let cfgPath = "/X11/xorg.conf.d/10-evdev.conf"; in
-        [{
-          source = xorg.xf86inputevdev.out + "/share" + cfgPath;
-          target = cfgPath;
-        }]
-      );
+      // (let cfgPath = "/X11/xorg.conf.d/10-evdev.conf"; in
+        {
+          ${cfgPath}.source = xorg.xf86inputevdev.out + "/share" + cfgPath;
+        });
 
     environment.systemPackages =
       [ xorg.xorgserver.out
diff --git a/nixos/modules/system/boot/kernel.nix b/nixos/modules/system/boot/kernel.nix
index 8a309f3bc5f..6edb9082e75 100644
--- a/nixos/modules/system/boot/kernel.nix
+++ b/nixos/modules/system/boot/kernel.nix
@@ -256,9 +256,8 @@ in
 
     # Create /etc/modules-load.d/nixos.conf, which is read by
     # systemd-modules-load.service to load required kernel modules.
-    environment.etc = singleton
-      { target = "modules-load.d/nixos.conf";
-        source = kernelModulesConf;
+    environment.etc =
+      { "modules-load.d/nixos.conf".source = kernelModulesConf;
       };
 
     systemd.services.systemd-modules-load =
diff --git a/nixos/modules/system/boot/networkd.nix b/nixos/modules/system/boot/networkd.nix
index 58d914d0810..3e289a63139 100644
--- a/nixos/modules/system/boot/networkd.nix
+++ b/nixos/modules/system/boot/networkd.nix
@@ -872,10 +872,10 @@ let
         '';
     };
 
-  unitFiles = map (name: {
-    target = "systemd/network/${name}";
-    source = "${cfg.units.${name}.unit}/${name}";
-  }) (attrNames cfg.units);
+  unitFiles = listToAttrs (map (name: {
+    name = "systemd/network/${name}";
+    value.source = "${cfg.units.${name}.unit}/${name}";
+  }) (attrNames cfg.units));
 in
 
 {
@@ -938,7 +938,7 @@ in
 
     systemd.services.systemd-networkd = {
       wantedBy = [ "multi-user.target" ];
-      restartTriggers = map (f: f.source) (unitFiles);
+      restartTriggers = attrNames unitFiles;
       # prevent race condition with interface renaming (#39069)
       requires = [ "systemd-udev-settle.service" ];
       after = [ "systemd-udev-settle.service" ];
diff --git a/nixos/modules/system/boot/systemd-lib.nix b/nixos/modules/system/boot/systemd-lib.nix
index 28ad4f121bb..fd1a5b9f62c 100644
--- a/nixos/modules/system/boot/systemd-lib.nix
+++ b/nixos/modules/system/boot/systemd-lib.nix
@@ -147,7 +147,13 @@ in rec {
       done
 
       # Symlink all units provided listed in systemd.packages.
-      for i in ${toString cfg.packages}; do
+      packages="${toString cfg.packages}"
+
+      # Filter duplicate directories
+      declare -A unique_packages
+      for k in $packages ; do unique_packages[$k]=1 ; done
+
+      for i in ''${!unique_packages[@]}; do
         for fn in $i/etc/systemd/${type}/* $i/lib/systemd/${type}/*; do
           if ! [[ "$fn" =~ .wants$ ]]; then
             if [[ -d "$fn" ]]; then
diff --git a/nixos/modules/system/boot/systemd.nix b/nixos/modules/system/boot/systemd.nix
index 7951dcc816a..c438bb216e7 100644
--- a/nixos/modules/system/boot/systemd.nix
+++ b/nixos/modules/system/boot/systemd.nix
@@ -240,7 +240,7 @@ let
   serviceConfig = { name, config, ... }: {
     config = mkMerge
       [ { # Default path for systemd services.  Should be quite minimal.
-          path =
+          path = mkAfter
             [ pkgs.coreutils
               pkgs.findutils
               pkgs.gnugrep
diff --git a/nixos/modules/tasks/powertop.nix b/nixos/modules/tasks/powertop.nix
index 609831506e1..e8064f9fa80 100644
--- a/nixos/modules/tasks/powertop.nix
+++ b/nixos/modules/tasks/powertop.nix
@@ -15,6 +15,7 @@ in {
     systemd.services = {
       powertop = {
         wantedBy = [ "multi-user.target" ];
+        after = [ "multi-user.target" ];
         description = "Powertop tunings";
         path = [ pkgs.kmod ];
         serviceConfig = {
diff --git a/nixos/modules/virtualisation/containers.nix b/nixos/modules/virtualisation/containers.nix
index 09678ce9ea7..02de5801da2 100644
--- a/nixos/modules/virtualisation/containers.nix
+++ b/nixos/modules/virtualisation/containers.nix
@@ -225,12 +225,6 @@ let
           fi
           ${concatStringsSep "\n" (mapAttrsToList renderExtraVeth cfg.extraVeths)}
         fi
-
-        # Get the leader PID so that we can signal it in
-        # preStop. We can't use machinectl there because D-Bus
-        # might be shutting down. FIXME: in systemd 219 we can
-        # just signal systemd-nspawn to do a clean shutdown.
-        machinectl show "$INSTANCE" | sed 's/Leader=\(.*\)/\1/;t;d' > "/run/containers/$INSTANCE.pid"
       ''
   );
 
@@ -715,14 +709,7 @@ in
 
       postStart = postStartScript dummyConfig;
 
-      preStop =
-        ''
-          pid="$(cat /run/containers/$INSTANCE.pid)"
-          if [ -n "$pid" ]; then
-            kill -RTMIN+4 "$pid"
-          fi
-          rm -f "/run/containers/$INSTANCE.pid"
-        '';
+      preStop = "machinectl poweroff $INSTANCE";
 
       restartIfChanged = false;
 
diff --git a/nixos/modules/virtualisation/docker-containers.nix b/nixos/modules/virtualisation/docker-containers.nix
index 59b0943f591..760cb9122a2 100644
--- a/nixos/modules/virtualisation/docker-containers.nix
+++ b/nixos/modules/virtualisation/docker-containers.nix
@@ -186,7 +186,7 @@ let
         ++ map escapeShellArg container.cmd
       );
       ExecStartPre = "-${pkgs.docker}/bin/docker rm -f %n";
-      ExecStop = "${pkgs.docker}/bin/docker stop %n";
+      ExecStop = ''${pkgs.bash}/bin/sh -c "[ $SERVICE_RESULT = success ] || ${pkgs.docker}/bin/docker stop %n"'';
       ExecStopPost = "-${pkgs.docker}/bin/docker rm -f %n";
 
       ### There is no generalized way of supporting `reload` for docker
diff --git a/nixos/modules/virtualisation/xen-dom0.nix b/nixos/modules/virtualisation/xen-dom0.nix
index 6fd54c52758..7f0af9901b9 100644
--- a/nixos/modules/virtualisation/xen-dom0.nix
+++ b/nixos/modules/virtualisation/xen-dom0.nix
@@ -233,26 +233,19 @@ in
 
 
     environment.etc =
-      [ { source = "${cfg.package}/etc/xen/xl.conf";
-          target = "xen/xl.conf";
-        }
-        { source = "${cfg.package}/etc/xen/scripts";
-          target = "xen/scripts";
-        }
-        { text = ''
-            source ${cfg.package}/etc/default/xendomains
-
-            ${cfg.domains.extraConfig}
-          '';
-          target = "default/xendomains";
-        }
-      ]
-      ++ lib.optionals (builtins.compareVersions cfg.package.version "4.10" >= 0) [
+      {
+        "xen/xl.conf".source = "${cfg.package}/etc/xen/xl.conf";
+        "xen/scripts".source = "${cfg.package}/etc/xen/scripts";
+        "default/xendomains".text = ''
+          source ${cfg.package}/etc/default/xendomains
+
+          ${cfg.domains.extraConfig}
+        '';
+      }
+      // optionalAttrs (builtins.compareVersions cfg.package.version "4.10" >= 0) {
         # in V 4.10 oxenstored requires /etc/xen/oxenstored.conf to start
-        { source = "${cfg.package}/etc/xen/oxenstored.conf";
-          target = "xen/oxenstored.conf";
-        }
-      ];
+        "xen/oxenstored.conf".source = "${cfg.package}/etc/xen/oxenstored.conf";
+      };
 
     # Xen provides udev rules.
     services.udev.packages = [ cfg.package ];
diff --git a/nixos/tests/3proxy.nix b/nixos/tests/3proxy.nix
index b8e1dac0e89..3e2061d7e42 100644
--- a/nixos/tests/3proxy.nix
+++ b/nixos/tests/3proxy.nix
@@ -1,4 +1,4 @@
-import ./make-test.nix ({ pkgs, ...} : {
+import ./make-test-python.nix ({ pkgs, ...} : {
   name = "3proxy";
   meta = with pkgs.stdenv.lib.maintainers; {
     maintainers = [ misuzu ];
@@ -134,29 +134,52 @@ import ./make-test.nix ({ pkgs, ...} : {
   };
 
   testScript = ''
-    startAll;
-
-    $peer1->waitForUnit("3proxy.service");
+    peer1.wait_for_unit("3proxy.service")
+    peer1.wait_for_open_port("9999")
 
     # test none auth
-    $peer0->succeed("${pkgs.wget}/bin/wget -e use_proxy=yes -e http_proxy=http://192.168.0.2:3128 -S -O /dev/null http://216.58.211.112:9999");
-    $peer0->succeed("${pkgs.wget}/bin/wget -e use_proxy=yes -e http_proxy=http://192.168.0.2:3128 -S -O /dev/null http://192.168.0.2:9999");
-    $peer0->succeed("${pkgs.wget}/bin/wget -e use_proxy=yes -e http_proxy=http://192.168.0.2:3128 -S -O /dev/null http://127.0.0.1:9999");
+    peer0.succeed(
+        "${pkgs.wget}/bin/wget -e use_proxy=yes -e http_proxy=http://192.168.0.2:3128 -S -O /dev/null http://216.58.211.112:9999"
+    )
+    peer0.succeed(
+        "${pkgs.wget}/bin/wget -e use_proxy=yes -e http_proxy=http://192.168.0.2:3128 -S -O /dev/null http://192.168.0.2:9999"
+    )
+    peer0.succeed(
+        "${pkgs.wget}/bin/wget -e use_proxy=yes -e http_proxy=http://192.168.0.2:3128 -S -O /dev/null http://127.0.0.1:9999"
+    )
 
-    $peer2->waitForUnit("3proxy.service");
+    peer2.wait_for_unit("3proxy.service")
+    peer2.wait_for_open_port("9999")
 
     # test iponly auth
-    $peer0->succeed("${pkgs.wget}/bin/wget -e use_proxy=yes -e http_proxy=http://192.168.0.3:3128 -S -O /dev/null http://216.58.211.113:9999");
-    $peer0->fail("${pkgs.wget}/bin/wget -e use_proxy=yes -e http_proxy=http://192.168.0.3:3128 -S -O /dev/null http://192.168.0.3:9999");
-    $peer0->fail("${pkgs.wget}/bin/wget -e use_proxy=yes -e http_proxy=http://192.168.0.3:3128 -S -O /dev/null http://127.0.0.1:9999");
+    peer0.succeed(
+        "${pkgs.wget}/bin/wget -e use_proxy=yes -e http_proxy=http://192.168.0.3:3128 -S -O /dev/null http://216.58.211.113:9999"
+    )
+    peer0.fail(
+        "${pkgs.wget}/bin/wget -e use_proxy=yes -e http_proxy=http://192.168.0.3:3128 -S -O /dev/null http://192.168.0.3:9999"
+    )
+    peer0.fail(
+        "${pkgs.wget}/bin/wget -e use_proxy=yes -e http_proxy=http://192.168.0.3:3128 -S -O /dev/null http://127.0.0.1:9999"
+    )
 
-    $peer3->waitForUnit("3proxy.service");
+    peer3.wait_for_unit("3proxy.service")
+    peer3.wait_for_open_port("9999")
 
     # test strong auth
-    $peer0->succeed("${pkgs.wget}/bin/wget -e use_proxy=yes -e http_proxy=http://admin:bigsecret\@192.168.0.4:3128 -S -O /dev/null http://216.58.211.114:9999");
-    $peer0->fail("${pkgs.wget}/bin/wget -e use_proxy=yes -e http_proxy=http://admin:bigsecret\@192.168.0.4:3128 -S -O /dev/null http://192.168.0.4:9999");
-    $peer0->fail("${pkgs.wget}/bin/wget -e use_proxy=yes -e http_proxy=http://192.168.0.4:3128 -S -O /dev/null http://216.58.211.114:9999");
-    $peer0->fail("${pkgs.wget}/bin/wget -e use_proxy=yes -e http_proxy=http://192.168.0.4:3128 -S -O /dev/null http://192.168.0.4:9999");
-    $peer0->fail("${pkgs.wget}/bin/wget -e use_proxy=yes -e http_proxy=http://192.168.0.4:3128 -S -O /dev/null http://127.0.0.1:9999");
+    peer0.succeed(
+        "${pkgs.wget}/bin/wget -e use_proxy=yes -e http_proxy=http://admin:bigsecret\@192.168.0.4:3128 -S -O /dev/null http://216.58.211.114:9999"
+    )
+    peer0.fail(
+        "${pkgs.wget}/bin/wget -e use_proxy=yes -e http_proxy=http://admin:bigsecret\@192.168.0.4:3128 -S -O /dev/null http://192.168.0.4:9999"
+    )
+    peer0.fail(
+        "${pkgs.wget}/bin/wget -e use_proxy=yes -e http_proxy=http://192.168.0.4:3128 -S -O /dev/null http://216.58.211.114:9999"
+    )
+    peer0.fail(
+        "${pkgs.wget}/bin/wget -e use_proxy=yes -e http_proxy=http://192.168.0.4:3128 -S -O /dev/null http://192.168.0.4:9999"
+    )
+    peer0.fail(
+        "${pkgs.wget}/bin/wget -e use_proxy=yes -e http_proxy=http://192.168.0.4:3128 -S -O /dev/null http://127.0.0.1:9999"
+    )
   '';
 })
diff --git a/nixos/tests/all-tests.nix b/nixos/tests/all-tests.nix
index 111643ad69c..eb69457fb7e 100644
--- a/nixos/tests/all-tests.nix
+++ b/nixos/tests/all-tests.nix
@@ -33,6 +33,7 @@ in
   bind = handleTest ./bind.nix {};
   bittorrent = handleTest ./bittorrent.nix {};
   #blivet = handleTest ./blivet.nix {};   # broken since 2017-07024
+  buildkite-agent = handleTest ./buildkite-agent.nix {};
   boot = handleTestOn ["x86_64-linux"] ./boot.nix {}; # syslinux is unsupported on aarch64
   boot-stage1 = handleTest ./boot-stage1.nix {};
   borgbackup = handleTest ./borgbackup.nix {};
@@ -61,6 +62,7 @@ in
   containers-portforward = handleTest ./containers-portforward.nix {};
   containers-restart_networking = handleTest ./containers-restart_networking.nix {};
   containers-tmpfs = handleTest ./containers-tmpfs.nix {};
+  corerad = handleTest ./corerad.nix {};
   couchdb = handleTest ./couchdb.nix {};
   deluge = handleTest ./deluge.nix {};
   dhparams = handleTest ./dhparams.nix {};
@@ -122,6 +124,7 @@ in
   i3wm = handleTest ./i3wm.nix {};
   icingaweb2 = handleTest ./icingaweb2.nix {};
   iftop = handleTest ./iftop.nix {};
+  ihatemoney = handleTest ./ihatemoney.nix {};
   incron = handleTest ./incron.nix {};
   influxdb = handleTest ./influxdb.nix {};
   initrd-network-ssh = handleTest ./initrd-network-ssh {};
@@ -179,6 +182,7 @@ in
   mysql = handleTest ./mysql.nix {};
   mysqlBackup = handleTest ./mysql-backup.nix {};
   mysqlReplication = handleTest ./mysql-replication.nix {};
+  nagios = handleTest ./nagios.nix {};
   nat.firewall = handleTest ./nat.nix { withFirewall = true; };
   nat.firewall-conntrack = handleTest ./nat.nix { withFirewall = true; withConntrackHelpers = true; };
   nat.standalone = handleTest ./nat.nix { withFirewall = false; };
@@ -197,6 +201,7 @@ in
   nfs4 = handleTest ./nfs { version = 4; };
   nghttpx = handleTest ./nghttpx.nix {};
   nginx = handleTest ./nginx.nix {};
+  nginx-etag = handleTest ./nginx-etag.nix {};
   nginx-sso = handleTest ./nginx-sso.nix {};
   nix-ssh-serve = handleTest ./nix-ssh-serve.nix {};
   nixos-generate-config = handleTest ./nixos-generate-config.nix {};
@@ -292,6 +297,7 @@ in
   wireguard-generated = handleTest ./wireguard/generated.nix {};
   wireguard-namespaces = handleTest ./wireguard/namespaces.nix {};
   wordpress = handleTest ./wordpress.nix {};
+  xandikos = handleTest ./xandikos.nix {};
   xautolock = handleTest ./xautolock.nix {};
   xfce = handleTest ./xfce.nix {};
   xmonad = handleTest ./xmonad.nix {};
diff --git a/nixos/tests/bittorrent.nix b/nixos/tests/bittorrent.nix
index e5be652c711..0a97d5556a2 100644
--- a/nixos/tests/bittorrent.nix
+++ b/nixos/tests/bittorrent.nix
@@ -18,6 +18,17 @@ let
   externalRouterAddress = "80.100.100.1";
   externalClient2Address = "80.100.100.2";
   externalTrackerAddress = "80.100.100.3";
+
+  transmissionConfig = { ... }: {
+    environment.systemPackages = [ pkgs.transmission ];
+    services.transmission = {
+      enable = true;
+      settings = {
+        dht-enabled = false;
+        message-level = 3;
+      };
+    };
+  };
 in
 
 {
@@ -26,88 +37,79 @@ in
     maintainers = [ domenkozar eelco rob bobvanderlinden ];
   };
 
-  nodes =
-    { tracker =
-        { pkgs, ... }:
-        { environment.systemPackages = [ pkgs.transmission ];
-
-          virtualisation.vlans = [ 1 ];
-          networking.interfaces.eth1.ipv4.addresses = [
-            { address = externalTrackerAddress; prefixLength = 24; }
-          ];
-
-          # We need Apache on the tracker to serve the torrents.
-          services.httpd.enable = true;
-          services.httpd.adminAddr = "foo@example.org";
-          services.httpd.documentRoot = "/tmp";
-
-          networking.firewall.enable = false;
-
-          services.opentracker.enable = true;
-
-          services.transmission.enable = true;
-          services.transmission.settings.dht-enabled = false;
-          services.transmission.settings.port-forwaring-enabled = false;
-        };
-
-      router =
-        { pkgs, nodes, ... }:
-        { virtualisation.vlans = [ 1 2 ];
-          networking.nat.enable = true;
-          networking.nat.internalInterfaces = [ "eth2" ];
-          networking.nat.externalInterface = "eth1";
-          networking.firewall.enable = true;
-          networking.firewall.trustedInterfaces = [ "eth2" ];
-          networking.interfaces.eth0.ipv4.addresses = [];
-          networking.interfaces.eth1.ipv4.addresses = [
-            { address = externalRouterAddress; prefixLength = 24; }
-          ];
-          networking.interfaces.eth2.ipv4.addresses = [
-            { address = internalRouterAddress; prefixLength = 24; }
-          ];
-          services.miniupnpd = {
-            enable = true;
-            externalInterface = "eth1";
-            internalIPs = [ "eth2" ];
-            appendConfig = ''
-              ext_ip=${externalRouterAddress}
-            '';
+  nodes = {
+    tracker = { pkgs, ... }: {
+      imports = [ transmissionConfig ];
+
+      virtualisation.vlans = [ 1 ];
+      networking.firewall.enable = false;
+      networking.interfaces.eth1.ipv4.addresses = [
+        { address = externalTrackerAddress; prefixLength = 24; }
+      ];
+
+      # We need Apache on the tracker to serve the torrents.
+      services.httpd = {
+        enable = true;
+        virtualHosts = {
+          "torrentserver.org" = {
+            adminAddr = "foo@example.org";
+            documentRoot = "/tmp";
           };
         };
+      };
+      services.opentracker.enable = true;
+    };
 
-      client1 =
-        { pkgs, nodes, ... }:
-        { environment.systemPackages = [ pkgs.transmission pkgs.miniupnpc ];
-          virtualisation.vlans = [ 2 ];
-          networking.interfaces.eth0.ipv4.addresses = [];
-          networking.interfaces.eth1.ipv4.addresses = [
-            { address = internalClient1Address; prefixLength = 24; }
-          ];
-          networking.defaultGateway = internalRouterAddress;
-          networking.firewall.enable = false;
-          services.transmission.enable = true;
-          services.transmission.settings.dht-enabled = false;
-          services.transmission.settings.message-level = 3;
-        };
+    router = { pkgs, nodes, ... }: {
+      virtualisation.vlans = [ 1 2 ];
+      networking.nat.enable = true;
+      networking.nat.internalInterfaces = [ "eth2" ];
+      networking.nat.externalInterface = "eth1";
+      networking.firewall.enable = true;
+      networking.firewall.trustedInterfaces = [ "eth2" ];
+      networking.interfaces.eth0.ipv4.addresses = [];
+      networking.interfaces.eth1.ipv4.addresses = [
+        { address = externalRouterAddress; prefixLength = 24; }
+      ];
+      networking.interfaces.eth2.ipv4.addresses = [
+        { address = internalRouterAddress; prefixLength = 24; }
+      ];
+      services.miniupnpd = {
+        enable = true;
+        externalInterface = "eth1";
+        internalIPs = [ "eth2" ];
+        appendConfig = ''
+          ext_ip=${externalRouterAddress}
+        '';
+      };
+    };
 
-      client2 =
-        { pkgs, ... }:
-        { environment.systemPackages = [ pkgs.transmission ];
-          virtualisation.vlans = [ 1 ];
-          networking.interfaces.eth0.ipv4.addresses = [];
-          networking.interfaces.eth1.ipv4.addresses = [
-            { address = externalClient2Address; prefixLength = 24; }
-          ];
-          networking.firewall.enable = false;
-          services.transmission.enable = true;
-          services.transmission.settings.dht-enabled = false;
-          services.transmission.settings.port-forwaring-enabled = false;
-        };
+    client1 = { pkgs, nodes, ... }: {
+      imports = [ transmissionConfig ];
+      environment.systemPackages = [ pkgs.miniupnpc ];
+
+      virtualisation.vlans = [ 2 ];
+      networking.interfaces.eth0.ipv4.addresses = [];
+      networking.interfaces.eth1.ipv4.addresses = [
+        { address = internalClient1Address; prefixLength = 24; }
+      ];
+      networking.defaultGateway = internalRouterAddress;
+      networking.firewall.enable = false;
     };
 
-  testScript =
-    { nodes, ... }:
-    ''
+    client2 = { pkgs, ... }: {
+      imports = [ transmissionConfig ];
+
+      virtualisation.vlans = [ 1 ];
+      networking.interfaces.eth0.ipv4.addresses = [];
+      networking.interfaces.eth1.ipv4.addresses = [
+        { address = externalClient2Address; prefixLength = 24; }
+      ];
+      networking.firewall.enable = false;
+    };
+  };
+
+  testScript = { nodes, ... }: ''
       start_all()
 
       # Wait for network and miniupnpd.
@@ -159,5 +161,4 @@ in
           "cmp /tmp/test.tar.bz2 ${file}"
       )
     '';
-
 })
diff --git a/nixos/tests/buildkite-agent.nix b/nixos/tests/buildkite-agent.nix
new file mode 100644
index 00000000000..042ce389eb8
--- /dev/null
+++ b/nixos/tests/buildkite-agent.nix
@@ -0,0 +1,23 @@
+import ./make-test-python.nix ({ pkgs, ... }:
+
+{
+  name = "buildkite-agent";
+  meta = with pkgs.stdenv.lib.maintainers; {
+    maintainers = [ flokli ];
+  };
+
+  machine = { pkgs, ... }: {
+    services.buildkite-agent = {
+      enable = true;
+      privateSshKeyPath = (import ./ssh-keys.nix pkgs).snakeOilPrivateKey;
+      tokenPath = (pkgs.writeText "my-token" "5678");
+    };
+  };
+
+  testScript = ''
+    # we can't wait on the unit to start up, as we obviously can't connect to buildkite,
+    # but we can look whether files are set up correctly
+    machine.wait_for_file("/var/lib/buildkite-agent/buildkite-agent.cfg")
+    machine.wait_for_file("/var/lib/buildkite-agent/.ssh/id_rsa")
+  '';
+})
diff --git a/nixos/tests/ceph-multi-node.nix b/nixos/tests/ceph-multi-node.nix
index 52a0b5caf23..90dd747525d 100644
--- a/nixos/tests/ceph-multi-node.nix
+++ b/nixos/tests/ceph-multi-node.nix
@@ -19,6 +19,12 @@ let
       key = "AQBEEJNac00kExAAXEgy943BGyOpVH1LLlHafQ==";
       uuid = "5e97a838-85b6-43b0-8950-cb56d554d1e5";
     };
+    osd2 = {
+      name = "2";
+      ip = "192.168.1.4";
+      key = "AQAdyhZeIaUlARAAGRoidDAmS6Vkp546UFEf5w==";
+      uuid = "ea999274-13d0-4dd5-9af9-ad25a324f72f";
+    };
   };
   generateCephConfig = { daemonConfig }: {
     enable = true;
@@ -72,35 +78,20 @@ let
     };
   }; };
 
-  networkOsd0 = {
+  networkOsd = osd: {
     dhcpcd.enable = false;
     interfaces.eth1.ipv4.addresses = pkgs.lib.mkOverride 0 [
-      { address = cfg.osd0.ip; prefixLength = 24; }
+      { address = osd.ip; prefixLength = 24; }
     ];
     firewall = {
       allowedTCPPortRanges = [ { from = 6800; to = 7300; } ];
     };
   };
-  cephConfigOsd0 = generateCephConfig { daemonConfig = {
-    osd = {
-      enable = true;
-      daemons = [ cfg.osd0.name ];
-    };
-  }; };
 
-  networkOsd1 = {
-    dhcpcd.enable = false;
-    interfaces.eth1.ipv4.addresses = pkgs.lib.mkOverride 0 [
-      { address = cfg.osd1.ip; prefixLength = 24; }
-    ];
-    firewall = {
-      allowedTCPPortRanges = [ { from = 6800; to = 7300; } ];
-    };
-  };
-  cephConfigOsd1 = generateCephConfig { daemonConfig = {
+  cephConfigOsd = osd: generateCephConfig { daemonConfig = {
     osd = {
       enable = true;
-      daemons = [ cfg.osd1.name ];
+      daemons = [ osd.name ];
     };
   }; };
 
@@ -114,6 +105,7 @@ let
     monA.wait_for_unit("network.target")
     osd0.wait_for_unit("network.target")
     osd1.wait_for_unit("network.target")
+    osd2.wait_for_unit("network.target")
 
     # Bootstrap ceph-mon daemon
     monA.succeed(
@@ -145,8 +137,9 @@ let
     monA.succeed("cp /etc/ceph/ceph.client.admin.keyring /tmp/shared")
     osd0.succeed("cp /tmp/shared/ceph.client.admin.keyring /etc/ceph")
     osd1.succeed("cp /tmp/shared/ceph.client.admin.keyring /etc/ceph")
+    osd2.succeed("cp /tmp/shared/ceph.client.admin.keyring /etc/ceph")
 
-    # Bootstrap both OSDs
+    # Bootstrap OSDs
     osd0.succeed(
         "mkfs.xfs /dev/vdb",
         "mkdir -p /var/lib/ceph/osd/ceph-${cfg.osd0.name}",
@@ -161,6 +154,13 @@ let
         "ceph-authtool --create-keyring /var/lib/ceph/osd/ceph-${cfg.osd1.name}/keyring --name osd.${cfg.osd1.name} --add-key ${cfg.osd1.key}",
         'echo \'{"cephx_secret": "${cfg.osd1.key}"}\' | ceph osd new ${cfg.osd1.uuid} -i -',
     )
+    osd2.succeed(
+        "mkfs.xfs /dev/vdb",
+        "mkdir -p /var/lib/ceph/osd/ceph-${cfg.osd2.name}",
+        "mount /dev/vdb /var/lib/ceph/osd/ceph-${cfg.osd2.name}",
+        "ceph-authtool --create-keyring /var/lib/ceph/osd/ceph-${cfg.osd2.name}/keyring --name osd.${cfg.osd2.name} --add-key ${cfg.osd2.key}",
+        'echo \'{"cephx_secret": "${cfg.osd2.key}"}\' | ceph osd new ${cfg.osd2.uuid} -i -',
+    )
 
     # Initialize the OSDs with regular filestore
     osd0.succeed(
@@ -173,7 +173,12 @@ let
         "chown -R ceph:ceph /var/lib/ceph/osd",
         "systemctl start ceph-osd-${cfg.osd1.name}",
     )
-    monA.wait_until_succeeds("ceph osd stat | grep -e '2 osds: 2 up[^,]*, 2 in'")
+    osd2.succeed(
+        "ceph-osd -i ${cfg.osd2.name} --mkfs --osd-uuid ${cfg.osd2.uuid}",
+        "chown -R ceph:ceph /var/lib/ceph/osd",
+        "systemctl start ceph-osd-${cfg.osd2.name}",
+    )
+    monA.wait_until_succeeds("ceph osd stat | grep -e '3 osds: 3 up[^,]*, 3 in'")
     monA.wait_until_succeeds("ceph -s | grep 'mgr: ${cfg.monA.name}(active,'")
     monA.wait_until_succeeds("ceph -s | grep 'HEALTH_OK'")
 
@@ -196,16 +201,18 @@ let
     monA.crash()
     osd0.crash()
     osd1.crash()
+    osd2.crash()
 
     # Start it up
     osd0.start()
     osd1.start()
+    osd2.start()
     monA.start()
 
     # Ensure the cluster comes back up again
     monA.succeed("ceph -s | grep 'mon: 1 daemons'")
     monA.wait_until_succeeds("ceph -s | grep 'quorum ${cfg.monA.name}'")
-    monA.wait_until_succeeds("ceph osd stat | grep -e '2 osds: 2 up[^,]*, 2 in'")
+    monA.wait_until_succeeds("ceph osd stat | grep -e '3 osds: 3 up[^,]*, 3 in'")
     monA.wait_until_succeeds("ceph -s | grep 'mgr: ${cfg.monA.name}(active,'")
     monA.wait_until_succeeds("ceph -s | grep 'HEALTH_OK'")
   '';
@@ -217,8 +224,9 @@ in {
 
   nodes = {
     monA = generateHost { pkgs = pkgs; cephConfig = cephConfigMonA; networkConfig = networkMonA; };
-    osd0 = generateHost { pkgs = pkgs; cephConfig = cephConfigOsd0; networkConfig = networkOsd0; };
-    osd1 = generateHost { pkgs = pkgs; cephConfig = cephConfigOsd1; networkConfig = networkOsd1; };
+    osd0 = generateHost { pkgs = pkgs; cephConfig = cephConfigOsd cfg.osd0; networkConfig = networkOsd cfg.osd0; };
+    osd1 = generateHost { pkgs = pkgs; cephConfig = cephConfigOsd cfg.osd1; networkConfig = networkOsd cfg.osd1; };
+    osd2 = generateHost { pkgs = pkgs; cephConfig = cephConfigOsd cfg.osd2; networkConfig = networkOsd cfg.osd2; };
   };
 
   testScript = testscript;
diff --git a/nixos/tests/ceph-single-node.nix b/nixos/tests/ceph-single-node.nix
index da92a73e14d..1a027e17836 100644
--- a/nixos/tests/ceph-single-node.nix
+++ b/nixos/tests/ceph-single-node.nix
@@ -17,6 +17,11 @@ let
       key = "AQBEEJNac00kExAAXEgy943BGyOpVH1LLlHafQ==";
       uuid = "5e97a838-85b6-43b0-8950-cb56d554d1e5";
     };
+    osd2 = {
+      name = "2";
+      key = "AQAdyhZeIaUlARAAGRoidDAmS6Vkp546UFEf5w==";
+      uuid = "ea999274-13d0-4dd5-9af9-ad25a324f72f";
+    };
   };
   generateCephConfig = { daemonConfig }: {
     enable = true;
@@ -30,7 +35,7 @@ let
   generateHost = { pkgs, cephConfig, networkConfig, ... }: {
     virtualisation = {
       memorySize = 512;
-      emptyDiskImages = [ 20480 20480 ];
+      emptyDiskImages = [ 20480 20480 20480 ];
       vlans = [ 1 ];
     };
 
@@ -65,7 +70,7 @@ let
     };
     osd = {
       enable = true;
-      daemons = [ cfg.osd0.name cfg.osd1.name ];
+      daemons = [ cfg.osd0.name cfg.osd1.name cfg.osd2.name ];
     };
   }; };
 
@@ -104,29 +109,36 @@ let
     monA.wait_until_succeeds("ceph -s | grep 'quorum ${cfg.monA.name}'")
     monA.wait_until_succeeds("ceph -s | grep 'mgr: ${cfg.monA.name}(active,'")
 
-    # Bootstrap both OSDs
+    # Bootstrap OSDs
     monA.succeed(
         "mkfs.xfs /dev/vdb",
         "mkfs.xfs /dev/vdc",
+        "mkfs.xfs /dev/vdd",
         "mkdir -p /var/lib/ceph/osd/ceph-${cfg.osd0.name}",
         "mount /dev/vdb /var/lib/ceph/osd/ceph-${cfg.osd0.name}",
         "mkdir -p /var/lib/ceph/osd/ceph-${cfg.osd1.name}",
         "mount /dev/vdc /var/lib/ceph/osd/ceph-${cfg.osd1.name}",
+        "mkdir -p /var/lib/ceph/osd/ceph-${cfg.osd2.name}",
+        "mount /dev/vdd /var/lib/ceph/osd/ceph-${cfg.osd2.name}",
         "ceph-authtool --create-keyring /var/lib/ceph/osd/ceph-${cfg.osd0.name}/keyring --name osd.${cfg.osd0.name} --add-key ${cfg.osd0.key}",
         "ceph-authtool --create-keyring /var/lib/ceph/osd/ceph-${cfg.osd1.name}/keyring --name osd.${cfg.osd1.name} --add-key ${cfg.osd1.key}",
+        "ceph-authtool --create-keyring /var/lib/ceph/osd/ceph-${cfg.osd2.name}/keyring --name osd.${cfg.osd2.name} --add-key ${cfg.osd2.key}",
         'echo \'{"cephx_secret": "${cfg.osd0.key}"}\' | ceph osd new ${cfg.osd0.uuid} -i -',
         'echo \'{"cephx_secret": "${cfg.osd1.key}"}\' | ceph osd new ${cfg.osd1.uuid} -i -',
+        'echo \'{"cephx_secret": "${cfg.osd2.key}"}\' | ceph osd new ${cfg.osd2.uuid} -i -',
     )
 
     # Initialize the OSDs with regular filestore
     monA.succeed(
         "ceph-osd -i ${cfg.osd0.name} --mkfs --osd-uuid ${cfg.osd0.uuid}",
         "ceph-osd -i ${cfg.osd1.name} --mkfs --osd-uuid ${cfg.osd1.uuid}",
+        "ceph-osd -i ${cfg.osd2.name} --mkfs --osd-uuid ${cfg.osd2.uuid}",
         "chown -R ceph:ceph /var/lib/ceph/osd",
         "systemctl start ceph-osd-${cfg.osd0.name}",
         "systemctl start ceph-osd-${cfg.osd1.name}",
+        "systemctl start ceph-osd-${cfg.osd2.name}",
     )
-    monA.wait_until_succeeds("ceph osd stat | grep -e '2 osds: 2 up[^,]*, 2 in'")
+    monA.wait_until_succeeds("ceph osd stat | grep -e '3 osds: 3 up[^,]*, 3 in'")
     monA.wait_until_succeeds("ceph -s | grep 'mgr: ${cfg.monA.name}(active,'")
     monA.wait_until_succeeds("ceph -s | grep 'HEALTH_OK'")
 
@@ -161,11 +173,12 @@ let
     monA.wait_for_unit("ceph-mgr-${cfg.monA.name}")
     monA.wait_for_unit("ceph-osd-${cfg.osd0.name}")
     monA.wait_for_unit("ceph-osd-${cfg.osd1.name}")
+    monA.wait_for_unit("ceph-osd-${cfg.osd2.name}")
 
     # Ensure the cluster comes back up again
     monA.succeed("ceph -s | grep 'mon: 1 daemons'")
     monA.wait_until_succeeds("ceph -s | grep 'quorum ${cfg.monA.name}'")
-    monA.wait_until_succeeds("ceph osd stat | grep -e '2 osds: 2 up[^,]*, 2 in'")
+    monA.wait_until_succeeds("ceph osd stat | grep -e '3 osds: 3 up[^,]*, 3 in'")
     monA.wait_until_succeeds("ceph -s | grep 'mgr: ${cfg.monA.name}(active,'")
     monA.wait_until_succeeds("ceph -s | grep 'HEALTH_OK'")
   '';
diff --git a/nixos/tests/certmgr.nix b/nixos/tests/certmgr.nix
index cb69f35e862..ef32f54400e 100644
--- a/nixos/tests/certmgr.nix
+++ b/nixos/tests/certmgr.nix
@@ -9,8 +9,8 @@ let
     inherit action;
     authority = {
       file = {
-        group = "nobody";
-        owner = "nobody";
+        group = "nginx";
+        owner = "nginx";
         path = "/tmp/${host}-ca.pem";
       };
       label = "www_ca";
@@ -18,14 +18,14 @@ let
       remote = "localhost:8888";
     };
     certificate = {
-      group = "nobody";
-      owner = "nobody";
+      group = "nginx";
+      owner = "nginx";
       path = "/tmp/${host}-cert.pem";
     };
     private_key = {
-      group = "nobody";
+      group = "nginx";
       mode = "0600";
-      owner = "nobody";
+      owner = "nginx";
       path = "/tmp/${host}-key.pem";
     };
     request = {
diff --git a/nixos/tests/chromium.nix b/nixos/tests/chromium.nix
index af5db2a3dbe..a5531d112e3 100644
--- a/nixos/tests/chromium.nix
+++ b/nixos/tests/chromium.nix
@@ -36,7 +36,7 @@ mapAttrs (channel: chromiumPkg: makeTest rec {
     <body onload="javascript:document.title='startup done'">
       <img src="file://${pkgs.fetchurl {
         url = "http://nixos.org/logo/nixos-hex.svg";
-        sha256 = "0wxpp65npdw2cg8m0cxc9qff1sb3b478cxpg1741d8951g948rg8";
+        sha256 = "07ymq6nw8kc22m7kzxjxldhiq8gzmc7f45kq2bvhbdm0w5s112s4";
       }}" />
     </body>
     </html>
diff --git a/nixos/tests/common/user-account.nix b/nixos/tests/common/user-account.nix
index 9cd531a1f96..a57ee2d59ae 100644
--- a/nixos/tests/common/user-account.nix
+++ b/nixos/tests/common/user-account.nix
@@ -4,6 +4,7 @@
     { isNormalUser = true;
       description = "Alice Foobar";
       password = "foobar";
+      uid = 1000;
     };
 
   users.users.bob =
diff --git a/nixos/tests/corerad.nix b/nixos/tests/corerad.nix
new file mode 100644
index 00000000000..68b698857b4
--- /dev/null
+++ b/nixos/tests/corerad.nix
@@ -0,0 +1,71 @@
+import ./make-test-python.nix (
+  {
+    nodes = {
+      router = {config, pkgs, ...}: { 
+        config = {
+          # This machines simulates a router with IPv6 forwarding and a static IPv6 address.
+          boot.kernel.sysctl = {
+            "net.ipv6.conf.all.forwarding" = true;
+          };
+          networking.interfaces.eth1 = {
+            ipv6.addresses = [ { address = "fd00:dead:beef:dead::1"; prefixLength = 64; } ];
+          };
+          services.corerad = {
+            enable = true;
+            # Serve router advertisements to the client machine with prefix information matching
+            # any IPv6 /64 prefixes configured on this interface.
+            configFile = pkgs.writeText "corerad.toml" ''
+              [[interfaces]]
+              name = "eth1"
+              send_advertisements = true
+                [[interfaces.plugins]]
+                name = "prefix"
+                prefix = "::/64"
+            '';
+          };
+        };
+      };
+      client = {config, pkgs, ...}: {
+        # Use IPv6 SLAAC from router advertisements, and install rdisc6 so we can
+        # trigger one immediately.
+        config = {
+          boot.kernel.sysctl = {
+            "net.ipv6.conf.all.autoconf" = true;
+          };
+          environment.systemPackages = with pkgs; [
+            ndisc6
+          ];
+        };
+      };
+    };
+
+    testScript = ''
+      start_all()
+
+      with subtest("Wait for CoreRAD and network ready"):
+          # Ensure networking is online and CoreRAD is ready.
+          router.wait_for_unit("network-online.target")
+          client.wait_for_unit("network-online.target")
+          router.wait_for_unit("corerad.service")
+
+          # Ensure the client can reach the router.
+          client.wait_until_succeeds("ping -c 1 fd00:dead:beef:dead::1")
+
+      with subtest("Verify SLAAC on client"):
+          # Trigger a router solicitation and verify a SLAAC address is assigned from
+          # the prefix configured on the router.
+          client.wait_until_succeeds("rdisc6 -1 -r 10 eth1")
+          client.wait_until_succeeds(
+              "ip -6 addr show dev eth1 | grep -q 'fd00:dead:beef:dead:'"
+          )
+
+          addrs = client.succeed("ip -6 addr show dev eth1")
+
+          assert (
+              "fd00:dead:beef:dead:" in addrs
+          ), "SLAAC prefix was not found in client addresses after router advertisement"
+          assert (
+              "/64 scope global temporary" in addrs
+          ), "SLAAC temporary address was not configured on client after router advertisement"
+    '';
+  })
diff --git a/nixos/tests/elk.nix b/nixos/tests/elk.nix
index b33d98b85d6..d3dc6dde135 100644
--- a/nixos/tests/elk.nix
+++ b/nixos/tests/elk.nix
@@ -6,20 +6,11 @@
   # NIXPKGS_ALLOW_UNFREE=1 nix-build nixos/tests/elk.nix -A ELK-6 --arg enableUnfree true
 }:
 
-with import ../lib/testing.nix { inherit system pkgs; };
-with pkgs.lib;
-
 let
   esUrl = "http://localhost:9200";
 
-  totalHits = message :
-    "curl --silent --show-error '${esUrl}/_search' -H 'Content-Type: application/json' " +
-    ''-d '{\"query\" : { \"match\" : { \"message\" : \"${message}\"}}}' '' +
-    "| jq .hits.total";
-
   mkElkTest = name : elk :
-   let elasticsearchGe7 = builtins.compareVersions elk.elasticsearch.version "7" >= 0;
-   in makeTest {
+    import ./make-test-python.nix ({
     inherit name;
     meta = with pkgs.stdenv.lib.maintainers; {
       maintainers = [ eelco offline basvandijk ];
@@ -50,15 +41,15 @@ let
                                         elk.journalbeat.version "6" < 0; in {
                 enable = true;
                 package = elk.journalbeat;
-                extraConfig = mkOptionDefault (''
+                extraConfig = pkgs.lib.mkOptionDefault (''
                   logging:
                     to_syslog: true
                     level: warning
                     metrics.enabled: false
                   output.elasticsearch:
                     hosts: [ "127.0.0.1:9200" ]
-                    ${optionalString lt6 "template.enabled: false"}
-                '' + optionalString (!lt6) ''
+                    ${pkgs.lib.optionalString lt6 "template.enabled: false"}
+                '' + pkgs.lib.optionalString (!lt6) ''
                   journalbeat.inputs:
                   - paths: []
                     seek: cursor
@@ -99,8 +90,7 @@ let
               };
 
               elasticsearch-curator = {
-                # The current version of curator (5.6) doesn't support elasticsearch >= 7.0.0.
-                enable = !elasticsearchGe7;
+                enable = true;
                 actionYAML = ''
                 ---
                 actions:
@@ -130,11 +120,23 @@ let
       };
 
     testScript = ''
-      startAll;
+      import json
+
+
+      def total_hits(message):
+          dictionary = {"query": {"match": {"message": message}}}
+          return (
+              "curl --silent --show-error '${esUrl}/_search' "
+              + "-H 'Content-Type: application/json' "
+              + "-d '{}' ".format(json.dumps(dictionary))
+              + "| jq .hits.total"
+          )
+
+
+      start_all()
 
-      # Wait until elasticsearch is listening for connections.
-      $one->waitForUnit("elasticsearch.service");
-      $one->waitForOpenPort(9200);
+      one.wait_for_unit("elasticsearch.service")
+      one.wait_for_open_port(9200)
 
       # Continue as long as the status is not "red". The status is probably
       # "yellow" instead of "green" because we are using a single elasticsearch
@@ -142,42 +144,43 @@ let
       #
       # TODO: extend this test with multiple elasticsearch nodes
       #       and see if the status turns "green".
-      $one->waitUntilSucceeds(
-        "curl --silent --show-error '${esUrl}/_cluster/health' " .
-        "| jq .status | grep -v red");
-
-      # Perform some simple logstash tests.
-      $one->waitForUnit("logstash.service");
-      $one->waitUntilSucceeds("cat /tmp/logstash.out | grep flowers");
-      $one->waitUntilSucceeds("cat /tmp/logstash.out | grep -v dragons");
-
-      # See if kibana is healthy.
-      $one->waitForUnit("kibana.service");
-      $one->waitUntilSucceeds(
-        "curl --silent --show-error 'http://localhost:5601/api/status' " .
-        "| jq .status.overall.state | grep green");
-
-      # See if logstash messages arive in elasticsearch.
-      $one->waitUntilSucceeds("${totalHits "flowers"} | grep -v 0");
-      $one->waitUntilSucceeds("${totalHits "dragons"} | grep 0");
-
-      # Test if a message logged to the journal
-      # is ingested by elasticsearch via journalbeat.
-      $one->waitForUnit("journalbeat.service");
-      $one->execute("echo 'Supercalifragilisticexpialidocious' | systemd-cat");
-      $one->waitUntilSucceeds(
-        "${totalHits "Supercalifragilisticexpialidocious"} | grep -v 0");
-
-    '' + optionalString (!elasticsearchGe7) ''
-      # Test elasticsearch-curator.
-      $one->systemctl("stop logstash");
-      $one->systemctl("start elasticsearch-curator");
-      $one->waitUntilSucceeds(
-        "! curl --silent --show-error '${esUrl}/_cat/indices' " .
-        "| grep logstash | grep -q ^$1");
+      one.wait_until_succeeds(
+          "curl --silent --show-error '${esUrl}/_cluster/health' | jq .status | grep -v red"
+      )
+
+      with subtest("Perform some simple logstash tests"):
+          one.wait_for_unit("logstash.service")
+          one.wait_until_succeeds("cat /tmp/logstash.out | grep flowers")
+          one.wait_until_succeeds("cat /tmp/logstash.out | grep -v dragons")
+
+      with subtest("Kibana is healthy"):
+          one.wait_for_unit("kibana.service")
+          one.wait_until_succeeds(
+              "curl --silent --show-error 'http://localhost:5601/api/status' | jq .status.overall.state | grep green"
+          )
+
+      with subtest("Logstash messages arive in elasticsearch"):
+          one.wait_until_succeeds(total_hits("flowers") + " | grep -v 0")
+          one.wait_until_succeeds(total_hits("dragons") + " | grep 0")
+
+      with subtest(
+          "A message logged to the journal is ingested by elasticsearch via journalbeat"
+      ):
+          one.wait_for_unit("journalbeat.service")
+          one.execute("echo 'Supercalifragilisticexpialidocious' | systemd-cat")
+          one.wait_until_succeeds(
+              total_hits("Supercalifragilisticexpialidocious") + " | grep -v 0"
+          )
+
+      with subtest("Elasticsearch-curator works"):
+          one.systemctl("stop logstash")
+          one.systemctl("start elasticsearch-curator")
+          one.wait_until_succeeds(
+              '! curl --silent --show-error "${esUrl}/_cat/indices" | grep logstash | grep -q ^'
+          )
     '';
-  };
-in mapAttrs mkElkTest {
+  }) {};
+in pkgs.lib.mapAttrs mkElkTest {
   ELK-6 =
     if enableUnfree
     then {
diff --git a/nixos/tests/gnome3-xorg.nix b/nixos/tests/gnome3-xorg.nix
index aa03501f6a5..f793bb922ad 100644
--- a/nixos/tests/gnome3-xorg.nix
+++ b/nixos/tests/gnome3-xorg.nix
@@ -1,41 +1,79 @@
-import ./make-test.nix ({ pkgs, ...} : {
+import ./make-test-python.nix ({ pkgs, ...} : {
   name = "gnome3-xorg";
   meta = with pkgs.stdenv.lib.maintainers; {
     maintainers = pkgs.gnome3.maintainers;
   };
 
-  machine =
-    { ... }:
+  machine = { nodes, ... }: let
+    user = nodes.machine.config.users.users.alice;
+  in
 
     { imports = [ ./common/user-account.nix ];
 
       services.xserver.enable = true;
 
-      services.xserver.displayManager.gdm.enable = false;
-      services.xserver.displayManager.lightdm.enable = true;
-      services.xserver.displayManager.lightdm.autoLogin.enable = true;
-      services.xserver.displayManager.lightdm.autoLogin.user = "alice";
+      services.xserver.displayManager.gdm = {
+        enable = true;
+        autoLogin = {
+          enable = true;
+          user = user.name;
+        };
+      };
+
       services.xserver.desktopManager.gnome3.enable = true;
       services.xserver.displayManager.defaultSession = "gnome-xorg";
 
       virtualisation.memorySize = 1024;
     };
 
-  testScript =
-    ''
-      $machine->waitForX;
+  testScript = { nodes, ... }: let
+    user = nodes.machine.config.users.users.alice;
+    uid = toString user.uid;
+    bus = "DBUS_SESSION_BUS_ADDRESS=unix:path=/run/user/${uid}/bus";
+    xauthority = "/run/user/${uid}/gdm/Xauthority";
+    display = "DISPLAY=:0.0";
+    env = "${bus} XAUTHORITY=${xauthority} ${display}";
+    gdbus = "${env} gdbus";
+    su = command: "su - ${user.name} -c '${env} ${command}'";
+
+    # Call javascript in gnome shell, returns a tuple (success, output), where
+    # `success` is true if the dbus call was successful and output is what the
+    # javascript evaluates to.
+    eval = "call --session -d org.gnome.Shell -o /org/gnome/Shell -m org.gnome.Shell.Eval";
+
+    # False when startup is done
+    startingUp = su "${gdbus} ${eval} Main.layoutManager._startingUp";
+
+    # Start gnome-terminal
+    gnomeTerminalCommand = su "gnome-terminal";
 
-      # wait for alice to be logged in
-      $machine->waitForUnit("default.target","alice");
+    # Hopefully gnome-terminal's wm class
+    wmClass = su "${gdbus} ${eval} global.display.focus_window.wm_class";
+  in ''
+      with subtest("Login to GNOME Xorg with GDM"):
+          machine.wait_for_x()
+          # Wait for alice to be logged in"
+          machine.wait_for_unit("default.target", "${user.name}")
+          machine.wait_for_file("${xauthority}")
+          machine.succeed("xauth merge ${xauthority}")
+          # Check that logging in has given the user ownership of devices
+          assert "alice" in machine.succeed("getfacl -p /dev/snd/timer")
 
-      # Check that logging in has given the user ownership of devices.
-      $machine->succeed("getfacl -p /dev/snd/timer | grep -q alice");
+      with subtest("Wait for GNOME Shell"):
+          # correct output should be (true, 'false')
+          machine.wait_until_succeeds(
+              "${startingUp} | grep -q 'true,..false'"
+          )
 
-      $machine->succeed("su - alice -c 'DISPLAY=:0.0 gnome-terminal &'");
-      $machine->succeed("xauth merge ~alice/.Xauthority");
-      $machine->waitForWindow(qr/alice.*machine/);
-      $machine->succeed("timeout 900 bash -c 'while read msg; do if [[ \$msg =~ \"GNOME Shell started\" ]]; then break; fi; done < <(journalctl -f)'");
-      $machine->sleep(10);
-      $machine->screenshot("screen");
+      with subtest("Open Gnome Terminal"):
+          machine.succeed(
+              "${gnomeTerminalCommand}"
+          )
+          # correct output should be (true, '"Gnome-terminal"')
+          machine.wait_until_succeeds(
+              "${wmClass} | grep -q  'true,...Gnome-terminal'"
+          )
+          machine.sleep(20)
+          machine.screenshot("screen")
     '';
 })
diff --git a/nixos/tests/haka.nix b/nixos/tests/haka.nix
index 6277ebb4933..3ca19cb0971 100644
--- a/nixos/tests/haka.nix
+++ b/nixos/tests/haka.nix
@@ -1,6 +1,6 @@
 # This test runs haka and probes it with hakactl
 
-import ./make-test.nix ({ pkgs, ...} : {
+import ./make-test-python.nix ({ pkgs, ...} : {
   name = "haka";
   meta = with pkgs.stdenv.lib.maintainers; {
     maintainers = [ tvestelind ];
@@ -15,10 +15,10 @@ import ./make-test.nix ({ pkgs, ...} : {
     };
 
   testScript = ''
-    startAll;
+    start_all()
 
-    $haka->waitForUnit("haka.service");
-    $haka->succeed("hakactl status");
-    $haka->succeed("hakactl stop");
+    haka.wait_for_unit("haka.service")
+    haka.succeed("hakactl status")
+    haka.succeed("hakactl stop")
   '';
 })
diff --git a/nixos/tests/home-assistant.nix b/nixos/tests/home-assistant.nix
index 6b53914fd85..80dca43f1f3 100644
--- a/nixos/tests/home-assistant.nix
+++ b/nixos/tests/home-assistant.nix
@@ -1,11 +1,10 @@
-import ./make-test.nix ({ pkgs, ... }:
+import ./make-test-python.nix ({ pkgs, ... }:
 
 let
   configDir = "/var/lib/foobar";
   apiPassword = "some_secret";
   mqttPassword = "another_secret";
   hassCli = "hass-cli --server http://hass:8123 --password '${apiPassword}'";
-
 in {
   name = "home-assistant";
   meta = with pkgs.stdenv.lib; {
@@ -69,36 +68,44 @@ in {
   };
 
   testScript = ''
-    startAll;
-    $hass->waitForUnit("home-assistant.service");
-
-    # The config is specified using a Nix attribute set,
-    # converted from JSON to YAML, and linked to the config dir
-    $hass->succeed("test -L ${configDir}/configuration.yaml");
-    # The lovelace config is copied because lovelaceConfigWritable = true
-    $hass->succeed("test -f ${configDir}/ui-lovelace.yaml");
-
-    # Check that Home Assistant's web interface and API can be reached
-    $hass->waitForOpenPort(8123);
-    $hass->succeed("curl --fail http://localhost:8123/states");
-    $hass->succeed("curl --fail -H 'x-ha-access: ${apiPassword}' http://localhost:8123/api/ | grep -qF 'API running'");
-
-    # Toggle a binary sensor using MQTT
-    $hass->succeed("curl http://localhost:8123/api/states/binary_sensor.mqtt_binary_sensor -H 'x-ha-access: ${apiPassword}' | grep -qF '\"state\": \"off\"'");
-    $hass->waitUntilSucceeds("mosquitto_pub -V mqttv311 -t home-assistant/test -u homeassistant -P '${mqttPassword}' -m let_there_be_light");
-    $hass->succeed("curl http://localhost:8123/api/states/binary_sensor.mqtt_binary_sensor -H 'x-ha-access: ${apiPassword}' | grep -qF '\"state\": \"on\"'");
-
-    # Toggle a binary sensor using hass-cli
-    $hass->succeed("${hassCli} --output json state get binary_sensor.mqtt_binary_sensor | grep -qF '\"state\": \"on\"'");
-    $hass->succeed("${hassCli} state edit binary_sensor.mqtt_binary_sensor --json='{\"state\": \"off\"}'");
-    $hass->succeed("curl http://localhost:8123/api/states/binary_sensor.mqtt_binary_sensor -H 'x-ha-access: ${apiPassword}' | grep -qF '\"state\": \"off\"'");
-
-    # Print log to ease debugging
-    my $log = $hass->succeed("cat ${configDir}/home-assistant.log");
-    print "\n### home-assistant.log ###\n";
-    print "$log\n";
+    start_all()
+    hass.wait_for_unit("home-assistant.service")
+    with subtest("Check that YAML configuration file is in place"):
+        hass.succeed("test -L ${configDir}/configuration.yaml")
+    with subtest("lovelace config is copied because lovelaceConfigWritable = true"):
+        hass.succeed("test -f ${configDir}/ui-lovelace.yaml")
+    with subtest("Check that Home Assistant's web interface and API can be reached"):
+        hass.wait_for_open_port(8123)
+        hass.succeed("curl --fail http://localhost:8123/states")
+        assert "API running" in hass.succeed(
+            "curl --fail -H 'x-ha-access: ${apiPassword}' http://localhost:8123/api/"
+        )
+    with subtest("Toggle a binary sensor using MQTT"):
+        assert '"state": "off"' in hass.succeed(
+            "curl http://localhost:8123/api/states/binary_sensor.mqtt_binary_sensor -H 'x-ha-access: ${apiPassword}'"
+        )
+        hass.wait_until_succeeds(
+            "mosquitto_pub -V mqttv311 -t home-assistant/test -u homeassistant -P '${mqttPassword}' -m let_there_be_light"
+        )
+        assert '"state": "on"' in hass.succeed(
+            "curl http://localhost:8123/api/states/binary_sensor.mqtt_binary_sensor -H 'x-ha-access: ${apiPassword}'"
+        )
+    with subtest("Toggle a binary sensor using hass-cli"):
+        assert '"state": "on"' in hass.succeed(
+            "${hassCli} --output json state get binary_sensor.mqtt_binary_sensor"
+        )
+        hass.succeed(
+            "${hassCli} state edit binary_sensor.mqtt_binary_sensor --json='{\"state\": \"off\"}'"
+        )
+        assert '"state": "off"' in hass.succeed(
+            "curl http://localhost:8123/api/states/binary_sensor.mqtt_binary_sensor -H 'x-ha-access: ${apiPassword}'"
+        )
+    with subtest("Print log to ease debugging"):
+        output_log = hass.succeed("cat ${configDir}/home-assistant.log")
+        print("\n### home-assistant.log ###\n")
+        print(output_log + "\n")
 
-    # Check that no errors were logged
-    $hass->fail("cat ${configDir}/home-assistant.log | grep -qF ERROR");
+    with subtest("Check that no errors were logged"):
+        assert "ERROR" not in output_log
   '';
 })
diff --git a/nixos/tests/hydra/default.nix b/nixos/tests/hydra/default.nix
index 37e14cb19cc..1c0ed3369b1 100644
--- a/nixos/tests/hydra/default.nix
+++ b/nixos/tests/hydra/default.nix
@@ -30,7 +30,7 @@ let
   callTest = f: f { inherit system pkgs; };
 
   hydraPkgs = {
-    inherit (pkgs) nixStable nixUnstable;
+    inherit (pkgs) nixStable nixUnstable nixFlakes;
   };
 
   tests = pkgs.lib.flip pkgs.lib.mapAttrs hydraPkgs (name: nix:
diff --git a/nixos/tests/ihatemoney.nix b/nixos/tests/ihatemoney.nix
new file mode 100644
index 00000000000..14db17fe5e6
--- /dev/null
+++ b/nixos/tests/ihatemoney.nix
@@ -0,0 +1,52 @@
+{ system ? builtins.currentSystem
+, config ? {}
+, pkgs ? import ../.. { inherit system config; }
+}:
+
+let
+  inherit (import ../lib/testing.nix { inherit system pkgs; }) makeTest;
+in
+map (
+  backend: makeTest {
+    name = "ihatemoney-${backend}";
+    machine = { lib, ... }: {
+      services.ihatemoney = {
+        enable = true;
+        enablePublicProjectCreation = true;
+        inherit backend;
+        uwsgiConfig = {
+          http = ":8000";
+        };
+      };
+      boot.cleanTmpDir = true;
+      # ihatemoney needs a local smtp server otherwise project creation just crashes
+      services.opensmtpd = {
+        enable = true;
+        serverConfiguration = ''
+          listen on lo
+          action foo relay
+          match from any for any action foo
+        '';
+      };
+    };
+    testScript = ''
+      $machine->waitForOpenPort(8000);
+      $machine->waitForUnit("uwsgi.service");
+      my $return = $machine->succeed("curl -X POST http://localhost:8000/api/projects -d 'name=yay&id=yay&password=yay&contact_email=yay\@example.com'");
+      die "wrong project id $return" unless "\"yay\"\n" eq $return;
+      my $timestamp = $machine->succeed("stat --printf %Y /var/lib/ihatemoney/secret_key");
+      my $owner = $machine->succeed("stat --printf %U:%G /var/lib/ihatemoney/secret_key");
+      die "wrong ownership for the secret key: $owner, is uwsgi running as the right user ?" unless $owner eq "ihatemoney:ihatemoney";
+      $machine->shutdown();
+      $machine->start();
+      $machine->waitForOpenPort(8000);
+      $machine->waitForUnit("uwsgi.service");
+      # check that the database is really persistent
+      print $machine->succeed("curl --basic -u yay:yay http://localhost:8000/api/projects/yay");
+      # check that the secret key is really persistent
+      my $timestamp2 = $machine->succeed("stat --printf %Y /var/lib/ihatemoney/secret_key");
+      die unless $timestamp eq $timestamp2;
+      $machine->succeed("curl http://localhost:8000 | grep ihatemoney");
+    '';
+  }
+) [ "sqlite" "postgresql" ]
diff --git a/nixos/tests/initdb.nix b/nixos/tests/initdb.nix
deleted file mode 100644
index 749d7857a13..00000000000
--- a/nixos/tests/initdb.nix
+++ /dev/null
@@ -1,26 +0,0 @@
-let
-  pkgs = import <nixpkgs> { };
-in
-with import <nixpkgs/nixos/lib/testing.nix> { inherit pkgs; system = builtins.currentSystem; };
-with pkgs.lib;
-
-makeTest {
-    name = "pg-initdb";
-
-    machine = {...}:
-      {
-        documentation.enable = false;
-        services.postgresql.enable = true;
-        services.postgresql.package = pkgs.postgresql_9_6;
-        environment.pathsToLink = [
-          "/share/postgresql"
-        ];
-      };
-
-    testScript = ''
-      $machine->start;
-      $machine->succeed("sudo -u postgres initdb -D /tmp/testpostgres2");
-      $machine->shutdown;
-    '';
-
-  }
\ No newline at end of file
diff --git a/nixos/tests/kafka.nix b/nixos/tests/kafka.nix
index 48ca98da8fa..f3de24e873b 100644
--- a/nixos/tests/kafka.nix
+++ b/nixos/tests/kafka.nix
@@ -3,11 +3,10 @@
   pkgs ? import ../.. { inherit system config; }
 }:
 
-with import ../lib/testing.nix { inherit system pkgs; };
 with pkgs.lib;
 
 let
-  makeKafkaTest = name: kafkaPackage: (makeTest {
+  makeKafkaTest = name: kafkaPackage: (import ./make-test-python.nix ({
     inherit name;
     meta = with pkgs.stdenv.lib.maintainers; {
       maintainers = [ nequissimus ];
@@ -45,24 +44,40 @@ let
     };
 
     testScript = ''
-      startAll;
+      start_all()
 
-      $zookeeper1->waitForUnit("default.target");
-      $zookeeper1->waitForUnit("zookeeper.service");
-      $zookeeper1->waitForOpenPort(2181);
+      zookeeper1.wait_for_unit("default.target")
+      zookeeper1.wait_for_unit("zookeeper.service")
+      zookeeper1.wait_for_open_port(2181)
 
-      $kafka->waitForUnit("default.target");
-      $kafka->waitForUnit("apache-kafka.service");
-      $kafka->waitForOpenPort(9092);
+      kafka.wait_for_unit("default.target")
+      kafka.wait_for_unit("apache-kafka.service")
+      kafka.wait_for_open_port(9092)
 
-      $kafka->waitUntilSucceeds("${kafkaPackage}/bin/kafka-topics.sh --create --zookeeper zookeeper1:2181 --partitions 1 --replication-factor 1 --topic testtopic");
-      $kafka->mustSucceed("echo 'test 1' | ${kafkaPackage}/bin/kafka-console-producer.sh --broker-list localhost:9092 --topic testtopic");
+      kafka.wait_until_succeeds(
+          "${kafkaPackage}/bin/kafka-topics.sh --create "
+          + "--zookeeper zookeeper1:2181 --partitions 1 "
+          + "--replication-factor 1 --topic testtopic"
+      )
+      kafka.succeed(
+          "echo 'test 1' | "
+          + "${kafkaPackage}/bin/kafka-console-producer.sh "
+          + "--broker-list localhost:9092 --topic testtopic"
+      )
     '' + (if name == "kafka_0_9" then ''
-      $kafka->mustSucceed("${kafkaPackage}/bin/kafka-console-consumer.sh --zookeeper zookeeper1:2181 --topic testtopic --from-beginning --max-messages 1 | grep 'test 1'");
+      assert "test 1" in kafka.succeed(
+          "${kafkaPackage}/bin/kafka-console-consumer.sh "
+          + "--zookeeper zookeeper1:2181 --topic testtopic "
+          + "--from-beginning --max-messages 1"
+      )
     '' else ''
-      $kafka->mustSucceed("${kafkaPackage}/bin/kafka-console-consumer.sh --bootstrap-server localhost:9092 --topic testtopic --from-beginning --max-messages 1 | grep 'test 1'");
+      assert "test 1" in kafka.succeed(
+          "${kafkaPackage}/bin/kafka-console-consumer.sh "
+          + "--bootstrap-server localhost:9092 --topic testtopic "
+          + "--from-beginning --max-messages 1"
+      )
     '');
-  });
+  }) {});
 
 in with pkgs; {
   kafka_0_9  = makeKafkaTest "kafka_0_9"  apacheKafka_0_9;
@@ -74,4 +89,5 @@ in with pkgs; {
   kafka_2_1  = makeKafkaTest "kafka_2_1"  apacheKafka_2_1;
   kafka_2_2  = makeKafkaTest "kafka_2_2"  apacheKafka_2_2;
   kafka_2_3  = makeKafkaTest "kafka_2_3"  apacheKafka_2_3;
+  kafka_2_4  = makeKafkaTest "kafka_2_4"  apacheKafka_2_4;
 }
diff --git a/nixos/tests/kexec.nix b/nixos/tests/kexec.nix
index b13b4131091..ec0cd9796b0 100644
--- a/nixos/tests/kexec.nix
+++ b/nixos/tests/kexec.nix
@@ -1,9 +1,15 @@
 # Test whether fast reboots via kexec work.
 
-import ./make-test.nix ({ pkgs, ...} : {
+import ./make-test-python.nix ({ pkgs, lib, ...} : {
   name = "kexec";
-  meta = with pkgs.stdenv.lib.maintainers; {
+  meta = with lib.maintainers; {
     maintainers = [ eelco ];
+    # Currently hangs forever; last output is:
+    #     machine # [   10.239914] dhcpcd[707]: eth0: adding default route via fe80::2
+    #     machine: waiting for the VM to finish booting
+    #     machine # Cannot find the ESP partition mount point.
+    #     machine # [   28.681197] nscd[692]: 692 checking for monitored file `/etc/netgroup': No such file or directory
+    broken = true;
   };
 
   machine = { ... }:
@@ -11,9 +17,9 @@ import ./make-test.nix ({ pkgs, ...} : {
 
   testScript =
     ''
-      $machine->waitForUnit("multi-user.target");
-      $machine->execute("systemctl kexec &");
-      $machine->{connected} = 0;
-      $machine->waitForUnit("multi-user.target");
+      machine.wait_for_unit("multi-user.target")
+      machine.execute("systemctl kexec &")
+      machine.connected = False
+      machine.wait_for_unit("multi-user.target")
     '';
 })
diff --git a/nixos/tests/mysql.nix b/nixos/tests/mysql.nix
index 2c0d212c2f1..924bac84e26 100644
--- a/nixos/tests/mysql.nix
+++ b/nixos/tests/mysql.nix
@@ -27,6 +27,7 @@ import ./make-test-python.nix ({ pkgs, ...} : {
 
       {
         users.users.testuser = { };
+        users.users.testuser2 = { };
         services.mysql.enable = true;
         services.mysql.initialScript = pkgs.writeText "mariadb-init.sql" ''
           ALTER USER root@localhost IDENTIFIED WITH unix_socket;
@@ -34,12 +35,17 @@ import ./make-test-python.nix ({ pkgs, ...} : {
           DELETE FROM mysql.user WHERE user = ''';
           FLUSH PRIVILEGES;
         '';
-        services.mysql.ensureDatabases = [ "testdb" ];
+        services.mysql.ensureDatabases = [ "testdb" "testdb2" ];
         services.mysql.ensureUsers = [{
           name = "testuser";
           ensurePermissions = {
             "testdb.*" = "ALL PRIVILEGES";
           };
+        } {
+          name = "testuser2";
+          ensurePermissions = {
+            "testdb2.*" = "ALL PRIVILEGES";
+          };
         }];
         services.mysql.package = pkgs.mariadb;
       };
@@ -47,7 +53,7 @@ import ./make-test-python.nix ({ pkgs, ...} : {
   };
 
   testScript = ''
-    start_all
+    start_all()
 
     mysql.wait_for_unit("mysql")
     mysql.succeed("echo 'use empty_testdb;' | mysql -u root")
@@ -62,6 +68,14 @@ import ./make-test-python.nix ({ pkgs, ...} : {
     mariadb.succeed(
         "echo 'use testdb; insert into tests values (42);' | sudo -u testuser mysql -u testuser"
     )
+    # Ensure testuser2 is not able to insert into testdb as mysql testuser2
+    mariadb.fail(
+        "echo 'use testdb; insert into tests values (23);' | sudo -u testuser2 mysql -u testuser2"
+    )
+    # Ensure testuser2 is not able to authenticate as mysql testuser
+    mariadb.fail(
+        "echo 'use testdb; insert into tests values (23);' | sudo -u testuser2 mysql -u testuser"
+    )
     mariadb.succeed(
         "echo 'use testdb; select test_id from tests;' | sudo -u testuser mysql -u testuser -N | grep 42"
     )
diff --git a/nixos/tests/nagios.nix b/nixos/tests/nagios.nix
new file mode 100644
index 00000000000..6f5d4447287
--- /dev/null
+++ b/nixos/tests/nagios.nix
@@ -0,0 +1,116 @@
+import ./make-test-python.nix (
+  { pkgs, ... }: {
+    name = "nagios";
+    meta = with pkgs.stdenv.lib.maintainers; {
+      maintainers = [ symphorien ];
+    };
+
+    machine = { lib, ... }: let
+      writer = pkgs.writeShellScript "write" ''
+        set -x
+        echo "$@"  >> /tmp/notifications
+      '';
+    in
+      {
+        # tested service
+        services.sshd.enable = true;
+        # nagios
+        services.nagios = {
+          enable = true;
+          # make state transitions faster
+          extraConfig.interval_length = "5";
+          objectDefs =
+            (map (x: "${pkgs.nagios}/etc/objects/${x}.cfg") [ "templates" "timeperiods" "commands" ]) ++ [
+              (
+                pkgs.writeText "objects.cfg" ''
+                  # notifications are written to /tmp/notifications
+                  define command {
+                  command_name notify-host-by-file
+                  command_line ${writer} "$HOSTNAME is $HOSTSTATE$"
+                  }
+                  define command {
+                  command_name notify-service-by-file
+                  command_line ${writer} "$SERVICEDESC$ is $SERVICESTATE$"
+                  }
+
+                  # nagios boilerplate
+                  define contact {
+                  contact_name                    alice
+                  alias                           alice
+                  host_notifications_enabled      1
+                  service_notifications_enabled   1
+                  service_notification_period     24x7
+                  host_notification_period        24x7
+                  service_notification_options    w,u,c,r,f,s
+                  host_notification_options       d,u,r,f,s
+                  service_notification_commands   notify-service-by-file
+                  host_notification_commands      notify-host-by-file
+                  email                           foo@example.com
+                  }
+                  define contactgroup {
+                  contactgroup_name   admins
+                  alias               Admins
+                  members alice
+                  }
+                  define hostgroup{
+                  hostgroup_name  allhosts
+                  alias  All hosts
+                  }
+
+                  # monitored objects
+                  define host {
+                  use         generic-host
+                  host_name   localhost
+                  alias       localhost
+                  address     localhost
+                  hostgroups  allhosts
+                  contact_groups admins
+                  # make state transitions faster.
+                  max_check_attempts 2
+                  check_interval 1
+                  retry_interval 1
+                  }
+                  define service {
+                  use                 generic-service
+                  host_name           localhost
+                  service_description ssh
+                  check_command       check_ssh
+                  # make state transitions faster.
+                  max_check_attempts 2
+                  check_interval 1
+                  retry_interval 1
+                  }
+                ''
+              )
+            ];
+        };
+      };
+
+    testScript = { ... }: ''
+      with subtest("ensure sshd starts"):
+          machine.wait_for_unit("sshd.service")
+
+
+      with subtest("ensure nagios starts"):
+          machine.wait_for_file("/var/log/nagios/current")
+
+
+      def assert_notify(text):
+          machine.wait_for_file("/tmp/notifications")
+          real = machine.succeed("cat /tmp/notifications").strip()
+          print(f"got {real!r}, expected {text!r}")
+          assert text == real
+
+
+      with subtest("ensure we get a notification when sshd is down"):
+          machine.succeed("systemctl stop sshd")
+          assert_notify("ssh is CRITICAL")
+
+
+      with subtest("ensure tests can succeed"):
+          machine.succeed("systemctl start sshd")
+          machine.succeed("rm /tmp/notifications")
+          assert_notify("ssh is OK")
+    '';
+  }
+)
diff --git a/nixos/tests/netdata.nix b/nixos/tests/netdata.nix
index 8dd5eafb097..4ddc96e8bc2 100644
--- a/nixos/tests/netdata.nix
+++ b/nixos/tests/netdata.nix
@@ -25,6 +25,7 @@ import ./make-test-python.nix ({ pkgs, ...} : {
 
     # check if the netdata main page loads.
     netdata.succeed("curl --fail http://localhost:19999/")
+    netdata.succeed("sleep 4")
 
     # check if netdata can read disk ops for root owned processes.
     # if > 0, successful. verifies both netdata working and
diff --git a/nixos/tests/nginx-etag.nix b/nixos/tests/nginx-etag.nix
new file mode 100644
index 00000000000..e357309d166
--- /dev/null
+++ b/nixos/tests/nginx-etag.nix
@@ -0,0 +1,89 @@
+import ./make-test-python.nix {
+  name = "nginx-etag";
+
+  nodes = {
+    server = { pkgs, lib, ... }: {
+      networking.firewall.enable = false;
+      services.nginx.enable = true;
+      services.nginx.virtualHosts.server = {
+        root = pkgs.runCommandLocal "testdir" {} ''
+          mkdir "$out"
+          cat > "$out/test.js" <<EOF
+          document.getElementById('foobar').setAttribute('foo', 'bar');
+          EOF
+          cat > "$out/index.html" <<EOF
+          <!DOCTYPE html>
+          <div id="foobar">test</div>
+          <script src="test.js"></script>
+          EOF
+        '';
+      };
+
+      nesting.clone = lib.singleton {
+        services.nginx.virtualHosts.server = {
+          root = lib.mkForce (pkgs.runCommandLocal "testdir2" {} ''
+            mkdir "$out"
+            cat > "$out/test.js" <<EOF
+            document.getElementById('foobar').setAttribute('foo', 'yay');
+            EOF
+            cat > "$out/index.html" <<EOF
+            <!DOCTYPE html>
+            <div id="foobar">test</div>
+            <script src="test.js"></script>
+            EOF
+          '');
+        };
+      };
+    };
+
+    client = { pkgs, lib, ... }: {
+      virtualisation.memorySize = 512;
+      environment.systemPackages = let
+        testRunner = pkgs.writers.writePython3Bin "test-runner" {
+          libraries = [ pkgs.python3Packages.selenium ];
+        } ''
+          import os
+          import time
+
+          from selenium.webdriver import Firefox
+          from selenium.webdriver.firefox.options import Options
+
+          options = Options()
+          options.add_argument('--headless')
+          driver = Firefox(options=options)
+
+          driver.implicitly_wait(20)
+          driver.get('http://server/')
+          driver.find_element_by_xpath('//div[@foo="bar"]')
+          open('/tmp/passed_stage1', 'w')
+
+          while not os.path.exists('/tmp/proceed'):
+              time.sleep(0.5)
+
+          driver.get('http://server/')
+          driver.find_element_by_xpath('//div[@foo="yay"]')
+          open('/tmp/passed', 'w')
+        '';
+      in [ pkgs.firefox-unwrapped pkgs.geckodriver testRunner ];
+    };
+  };
+
+  testScript = { nodes, ... }: let
+    inherit (nodes.server.config.system.build) toplevel;
+    newSystem = "${toplevel}/fine-tune/child-1";
+  in ''
+    start_all()
+
+    server.wait_for_unit("nginx.service")
+    client.wait_for_unit("multi-user.target")
+    client.execute("test-runner &")
+    client.wait_for_file("/tmp/passed_stage1")
+
+    server.succeed(
+        "${newSystem}/bin/switch-to-configuration test >&2"
+    )
+    client.succeed("touch /tmp/proceed")
+
+    client.wait_for_file("/tmp/passed")
+  '';
+}
diff --git a/nixos/tests/nginx-sso.nix b/nixos/tests/nginx-sso.nix
index e19992cb6bf..8834fc31c38 100644
--- a/nixos/tests/nginx-sso.nix
+++ b/nixos/tests/nginx-sso.nix
@@ -1,4 +1,4 @@
-import ./make-test.nix ({ pkgs, ... }: {
+import ./make-test-python.nix ({ pkgs, ... }: {
   name = "nginx-sso";
   meta = {
     maintainers = with pkgs.stdenv.lib.maintainers; [ delroth ];
@@ -27,18 +27,22 @@ import ./make-test.nix ({ pkgs, ... }: {
   };
 
   testScript = ''
-    startAll;
+    start_all()
 
-    $machine->waitForUnit("nginx-sso.service");
-    $machine->waitForOpenPort(8080);
+    machine.wait_for_unit("nginx-sso.service")
+    machine.wait_for_open_port(8080)
 
-    # No valid user -> 401.
-    $machine->fail("curl -sSf http://localhost:8080/auth");
+    with subtest("No valid user -> 401"):
+        machine.fail("curl -sSf http://localhost:8080/auth")
 
-    # Valid user but no matching ACL -> 403.
-    $machine->fail("curl -sSf -H 'Authorization: Token MyToken' http://localhost:8080/auth");
+    with subtest("Valid user but no matching ACL -> 403"):
+        machine.fail(
+            "curl -sSf -H 'Authorization: Token MyToken' http://localhost:8080/auth"
+        )
 
-    # Valid user and matching ACL -> 200.
-    $machine->succeed("curl -sSf -H 'Authorization: Token MyToken' -H 'X-Application: MyApp' http://localhost:8080/auth");
+    with subtest("Valid user and matching ACL -> 200"):
+        machine.succeed(
+            "curl -sSf -H 'Authorization: Token MyToken' -H 'X-Application: MyApp' http://localhost:8080/auth"
+        )
   '';
 })
diff --git a/nixos/tests/nginx.nix b/nixos/tests/nginx.nix
index d0b7306ae83..7358800a676 100644
--- a/nixos/tests/nginx.nix
+++ b/nixos/tests/nginx.nix
@@ -4,10 +4,10 @@
 #   2. whether the ETag header is properly generated whenever we're serving
 #      files in Nix store paths
 #   3. nginx doesn't restart on configuration changes (only reloads)
-import ./make-test.nix ({ pkgs, ... }: {
+import ./make-test-python.nix ({ pkgs, ... }: {
   name = "nginx";
   meta = with pkgs.stdenv.lib.maintainers; {
-    maintainers = [ mbbx6spp ];
+    maintainers = [ mbbx6spp danbst ];
   };
 
   nodes = {
@@ -59,6 +59,11 @@ import ./make-test.nix ({ pkgs, ... }: {
         {
           services.nginx.package = pkgs.nginxUnstable;
         }
+
+        {
+          services.nginx.package = pkgs.nginxUnstable;
+          services.nginx.virtualHosts."!@$$(#*%".locations."~@#*$*!)".proxyPass = ";;;";
+        }
       ];
     };
 
@@ -68,44 +73,60 @@ import ./make-test.nix ({ pkgs, ... }: {
     etagSystem = "${nodes.webserver.config.system.build.toplevel}/fine-tune/child-1";
     justReloadSystem = "${nodes.webserver.config.system.build.toplevel}/fine-tune/child-2";
     reloadRestartSystem = "${nodes.webserver.config.system.build.toplevel}/fine-tune/child-3";
+    reloadWithErrorsSystem = "${nodes.webserver.config.system.build.toplevel}/fine-tune/child-4";
   in ''
-    my $url = 'http://localhost/index.html';
-
-    sub checkEtag {
-      my $etag = $webserver->succeed(
-        'curl -v '.$url.' 2>&1 | sed -n -e "s/^< [Ee][Tt][Aa][Gg]: *//p"'
-      );
-      $etag =~ s/\r?\n$//;
-      my $httpCode = $webserver->succeed(
-        'curl -w "%{http_code}" -X HEAD -H \'If-None-Match: '.$etag.'\' '.$url
-      );
-      chomp $httpCode;
-      die "HTTP code is not 304" unless $httpCode == 304;
-      return $etag;
-    }
-
-    $webserver->waitForUnit("nginx");
-    $webserver->waitForOpenPort("80");
-
-    subtest "check ETag if serving Nix store paths", sub {
-      my $oldEtag = checkEtag;
-      $webserver->succeed("${etagSystem}/bin/switch-to-configuration test >&2");
-      $webserver->sleep(1); # race condition
-      my $newEtag = checkEtag;
-      die "Old ETag $oldEtag is the same as $newEtag" if $oldEtag eq $newEtag;
-    };
+    url = "http://localhost/index.html"
 
-    subtest "config is reloaded on nixos-rebuild switch", sub {
-      $webserver->succeed("${justReloadSystem}/bin/switch-to-configuration test >&2");
-      $webserver->waitForOpenPort("8080");
-      $webserver->fail("journalctl -u nginx | grep -q -i stopped");
-      $webserver->succeed("journalctl -u nginx | grep -q -i reloaded");
-    };
 
-    subtest "restart when nginx package changes", sub {
-      $webserver->succeed("${reloadRestartSystem}/bin/switch-to-configuration test >&2");
-      $webserver->waitForUnit("nginx");
-      $webserver->succeed("journalctl -u nginx | grep -q -i stopped");
-    };
+    def check_etag():
+        etag = webserver.succeed(
+            f'curl -v {url} 2>&1 | sed -n -e "s/^< etag: *//ip"'
+        ).rstrip()
+        http_code = webserver.succeed(
+            f"curl -w '%{{http_code}}' --head --fail -H 'If-None-Match: {etag}' {url}"
+        )
+        assert http_code.split("\n")[-1] == "304"
+
+        return etag
+
+
+    webserver.wait_for_unit("nginx")
+    webserver.wait_for_open_port(80)
+
+    with subtest("check ETag if serving Nix store paths"):
+        old_etag = check_etag()
+        webserver.succeed(
+            "${etagSystem}/bin/switch-to-configuration test >&2"
+        )
+        webserver.sleep(1)
+        new_etag = check_etag()
+        assert old_etag != new_etag
+
+    with subtest("config is reloaded on nixos-rebuild switch"):
+        webserver.succeed(
+            "${justReloadSystem}/bin/switch-to-configuration test >&2"
+        )
+        webserver.wait_for_open_port(8080)
+        webserver.fail("journalctl -u nginx | grep -q -i stopped")
+        webserver.succeed("journalctl -u nginx | grep -q -i reloaded")
+
+    with subtest("restart when nginx package changes"):
+        webserver.succeed(
+            "${reloadRestartSystem}/bin/switch-to-configuration test >&2"
+        )
+        webserver.wait_for_unit("nginx")
+        webserver.succeed("journalctl -u nginx | grep -q -i stopped")
+
+    with subtest("nixos-rebuild --switch should fail when there are configuration errors"):
+        webserver.fail(
+            "${reloadWithErrorsSystem}/bin/switch-to-configuration test >&2"
+        )
+        webserver.succeed("[[ $(systemctl is-failed nginx-config-reload) == failed ]]")
+        webserver.succeed("[[ $(systemctl is-failed nginx) == active ]]")
+        # just to make sure operation is idempotent. During development I had a situation
+        # when first time it shows error, but stops showing it on subsequent rebuilds
+        webserver.fail(
+            "${reloadWithErrorsSystem}/bin/switch-to-configuration test >&2"
+        )
   '';
 })
diff --git a/nixos/tests/postgresql.nix b/nixos/tests/postgresql.nix
index e71c3888288..3201e22555e 100644
--- a/nixos/tests/postgresql.nix
+++ b/nixos/tests/postgresql.nix
@@ -29,11 +29,15 @@ let
 
     machine = {...}:
       {
-        services.postgresql.enable = true;
-        services.postgresql.package = postgresql-package;
+        services.postgresql = {
+          enable = true;
+          package = postgresql-package;
+        };
 
-        services.postgresqlBackup.enable = true;
-        services.postgresqlBackup.databases = optional (!backup-all) "postgres";
+        services.postgresqlBackup = {
+          enable = true;
+          databases = optional (!backup-all) "postgres";
+        };
       };
 
     testScript = let
@@ -49,23 +53,32 @@ let
       machine.start()
       machine.wait_for_unit("postgresql")
 
-      # postgresql should be available just after unit start
-      machine.succeed(
-          "cat ${test-sql} | sudo -u postgres psql"
-      )
-      machine.shutdown()  # make sure that postgresql survive restart (bug #1735)
-      time.sleep(2)
-      machine.start()
-      machine.wait_for_unit("postgresql")
+      with subtest("Postgresql is available just after unit start"):
+          machine.succeed(
+              "cat ${test-sql} | sudo -u postgres psql"
+          )
+
+      with subtest("Postgresql survives restart (bug #1735)"):
+          machine.shutdown()
+          time.sleep(2)
+          machine.start()
+          machine.wait_for_unit("postgresql")
+
       machine.fail(check_count("SELECT * FROM sth;", 3))
       machine.succeed(check_count("SELECT * FROM sth;", 5))
       machine.fail(check_count("SELECT * FROM sth;", 4))
       machine.succeed(check_count("SELECT xpath('/test/text()', doc) FROM xmltest;", 1))
 
-      # Check backup service
-      machine.succeed("systemctl start ${backupService}.service")
-      machine.succeed("zcat /var/backup/postgresql/${backupName}.sql.gz | grep '<test>ok</test>'")
-      machine.succeed("stat -c '%a' /var/backup/postgresql/${backupName}.sql.gz | grep 600")
+      with subtest("Backup service works"):
+          machine.succeed(
+              "systemctl start ${backupService}.service",
+              "zcat /var/backup/postgresql/${backupName}.sql.gz | grep '<test>ok</test>'",
+              "stat -c '%a' /var/backup/postgresql/${backupName}.sql.gz | grep 600",
+          )
+
+      with subtest("Initdb works"):
+          machine.succeed("sudo -u postgres initdb -D /tmp/testpostgres2")
+
       machine.shutdown()
     '';
 
diff --git a/nixos/tests/xandikos.nix b/nixos/tests/xandikos.nix
new file mode 100644
index 00000000000..0fded20ff1a
--- /dev/null
+++ b/nixos/tests/xandikos.nix
@@ -0,0 +1,70 @@
+import ./make-test-python.nix (
+  { pkgs, lib, ... }:
+
+    {
+      name = "xandikos";
+
+      meta.maintainers = [ lib.maintainers."0x4A6F" ];
+
+      nodes = {
+        xandikos_client = {};
+        xandikos_default = {
+          networking.firewall.allowedTCPPorts = [ 8080 ];
+          services.xandikos.enable = true;
+        };
+        xandikos_proxy = {
+          networking.firewall.allowedTCPPorts = [ 80 8080 ];
+          services.xandikos.enable = true;
+          services.xandikos.address = "localhost";
+          services.xandikos.port = 8080;
+          services.xandikos.routePrefix = "/xandikos/";
+          services.xandikos.extraOptions = [
+            "--defaults"
+          ];
+          services.nginx = {
+            enable = true;
+            recommendedProxySettings = true;
+            virtualHosts."xandikos" = {
+              serverName = "xandikos.local";
+              basicAuth.xandikos = "snakeOilPassword";
+              locations."/xandikos/" = {
+                proxyPass = "http://localhost:8080/";
+              };
+            };
+          };
+        };
+      };
+
+      testScript = ''
+        start_all()
+
+        with subtest("Xandikos default"):
+            xandikos_default.wait_for_unit("multi-user.target")
+            xandikos_default.wait_for_unit("xandikos.service")
+            xandikos_default.wait_for_open_port(8080)
+            xandikos_default.succeed("curl --fail http://localhost:8080/")
+            xandikos_default.succeed(
+                "curl -s --fail --location http://localhost:8080/ | grep -qi Xandikos"
+            )
+            xandikos_client.wait_for_unit("network.target")
+            xandikos_client.fail("curl --fail http://xandikos_default:8080/")
+
+        with subtest("Xandikos proxy"):
+            xandikos_proxy.wait_for_unit("multi-user.target")
+            xandikos_proxy.wait_for_unit("xandikos.service")
+            xandikos_proxy.wait_for_open_port(8080)
+            xandikos_proxy.succeed("curl --fail http://localhost:8080/")
+            xandikos_proxy.succeed(
+                "curl -s --fail --location http://localhost:8080/ | grep -qi Xandikos"
+            )
+            xandikos_client.wait_for_unit("network.target")
+            xandikos_client.fail("curl --fail http://xandikos_proxy:8080/")
+            xandikos_client.succeed(
+                "curl -s --fail -u xandikos:snakeOilPassword -H 'Host: xandikos.local' http://xandikos_proxy/xandikos/ | grep -qi Xandikos"
+            )
+            xandikos_client.succeed(
+                "curl -s --fail -u xandikos:snakeOilPassword -H 'Host: xandikos.local' http://xandikos_proxy/xandikos/user/ | grep -qi Xandikos"
+            )
+      '';
+    }
+)