summary refs log tree commit diff
path: root/nixos
diff options
context:
space:
mode:
Diffstat (limited to 'nixos')
-rw-r--r--nixos/doc/manual/configuration/configuration.xml1
-rw-r--r--nixos/doc/manual/configuration/kubernetes.xml127
-rw-r--r--nixos/doc/manual/release-notes/rl-1903.xml124
-rw-r--r--nixos/maintainers/scripts/cloudstack/cloudstack-image.nix23
-rw-r--r--nixos/modules/config/fonts/fontconfig-penultimate.nix4
-rw-r--r--nixos/modules/config/fonts/fontconfig-ultimate.nix2
-rw-r--r--nixos/modules/config/fonts/fontconfig.nix2
-rw-r--r--nixos/modules/config/fonts/fontdir.nix2
-rw-r--r--nixos/modules/config/nsswitch.nix13
-rw-r--r--nixos/modules/hardware/acpilight.nix24
-rw-r--r--nixos/modules/hardware/ledger.nix14
-rw-r--r--nixos/modules/hardware/video/uvcvideo/uvcdynctrl-udev-rules.nix1
-rw-r--r--nixos/modules/installer/cd-dvd/channel.nix2
-rw-r--r--nixos/modules/installer/cd-dvd/installation-cd-graphical-base.nix4
-rw-r--r--nixos/modules/installer/tools/nixos-rebuild.sh9
-rw-r--r--nixos/modules/misc/documentation.nix1
-rw-r--r--nixos/modules/misc/ids.nix6
-rw-r--r--nixos/modules/module-list.nix22
-rw-r--r--nixos/modules/profiles/graphical.nix4
-rw-r--r--nixos/modules/programs/autojump.nix33
-rw-r--r--nixos/modules/programs/less.nix2
-rw-r--r--nixos/modules/programs/singularity.nix21
-rw-r--r--nixos/modules/programs/ssh.nix10
-rw-r--r--nixos/modules/rename.nix11
-rw-r--r--nixos/modules/security/ca.nix1
-rw-r--r--nixos/modules/security/sudo.nix5
-rw-r--r--nixos/modules/services/audio/squeezelite.nix27
-rw-r--r--nixos/modules/services/cluster/kubernetes/addon-manager.nix167
-rw-r--r--nixos/modules/services/cluster/kubernetes/addons/dashboard.nix (renamed from nixos/modules/services/cluster/kubernetes/dashboard.nix)15
-rw-r--r--nixos/modules/services/cluster/kubernetes/addons/dns.nix (renamed from nixos/modules/services/cluster/kubernetes/dns.nix)38
-rw-r--r--nixos/modules/services/cluster/kubernetes/apiserver.nix428
-rw-r--r--nixos/modules/services/cluster/kubernetes/controller-manager.nix162
-rw-r--r--nixos/modules/services/cluster/kubernetes/default.nix1131
-rw-r--r--nixos/modules/services/cluster/kubernetes/flannel.nix134
-rw-r--r--nixos/modules/services/cluster/kubernetes/kubelet.nix358
-rw-r--r--nixos/modules/services/cluster/kubernetes/pki.nix388
-rw-r--r--nixos/modules/services/cluster/kubernetes/proxy.nix82
-rw-r--r--nixos/modules/services/cluster/kubernetes/scheduler.nix94
-rw-r--r--nixos/modules/services/continuous-integration/buildkite-agent.nix2
-rw-r--r--nixos/modules/services/continuous-integration/gitlab-runner.nix1
-rw-r--r--nixos/modules/services/databases/hbase.nix2
-rw-r--r--nixos/modules/services/databases/influxdb.nix1
-rw-r--r--nixos/modules/services/desktops/pantheon/contractor.nix39
-rw-r--r--nixos/modules/services/desktops/pantheon/files.nix36
-rw-r--r--nixos/modules/services/desktops/tumbler.nix50
-rw-r--r--nixos/modules/services/hardware/acpid.nix2
-rw-r--r--nixos/modules/services/hardware/bolt.nix34
-rw-r--r--nixos/modules/services/hardware/tlp.nix1
-rw-r--r--nixos/modules/services/logging/logcheck.nix2
-rw-r--r--nixos/modules/services/logging/logstash.nix5
-rw-r--r--nixos/modules/services/mail/opensmtpd.nix2
-rw-r--r--nixos/modules/services/misc/beanstalkd.nix52
-rw-r--r--nixos/modules/services/misc/gitlab.nix15
-rw-r--r--nixos/modules/services/misc/gitolite.nix2
-rw-r--r--nixos/modules/services/misc/headphones.nix87
-rw-r--r--nixos/modules/services/misc/home-assistant.nix2
-rw-r--r--nixos/modules/services/misc/jackett.nix59
-rw-r--r--nixos/modules/services/misc/matrix-synapse.nix10
-rw-r--r--nixos/modules/services/misc/nix-daemon.nix2
-rw-r--r--nixos/modules/services/misc/radarr.nix59
-rw-r--r--nixos/modules/services/misc/taskserver/default.nix2
-rw-r--r--nixos/modules/services/misc/zoneminder.nix8
-rw-r--r--nixos/modules/services/monitoring/apcupsd.nix2
-rw-r--r--nixos/modules/services/monitoring/graphite.nix6
-rw-r--r--nixos/modules/services/monitoring/nagios.nix6
-rw-r--r--nixos/modules/services/monitoring/netdata.nix2
-rw-r--r--nixos/modules/services/monitoring/prometheus/default.nix2
-rw-r--r--nixos/modules/services/monitoring/scollector.nix2
-rw-r--r--nixos/modules/services/monitoring/telegraf.nix1
-rw-r--r--nixos/modules/services/monitoring/uptime.nix3
-rw-r--r--nixos/modules/services/network-filesystems/beegfs.nix5
-rw-r--r--nixos/modules/services/network-filesystems/diod.nix1
-rw-r--r--nixos/modules/services/network-filesystems/ipfs.nix2
-rw-r--r--nixos/modules/services/network-filesystems/openafs/client.nix4
-rw-r--r--nixos/modules/services/networking/dnscache.nix2
-rw-r--r--nixos/modules/services/networking/flannel.nix54
-rw-r--r--nixos/modules/services/networking/hylafax/systemd.nix2
-rw-r--r--nixos/modules/services/networking/nix-serve.nix9
-rw-r--r--nixos/modules/services/networking/prayer.nix2
-rw-r--r--nixos/modules/services/networking/quassel.nix28
-rw-r--r--nixos/modules/services/networking/shout.nix2
-rw-r--r--nixos/modules/services/networking/xrdp.nix2
-rw-r--r--nixos/modules/services/printing/cupsd.nix5
-rw-r--r--nixos/modules/services/ttys/kmscon.nix2
-rw-r--r--nixos/modules/services/web-apps/codimd.nix2
-rw-r--r--nixos/modules/services/web-apps/icingaweb2/icingaweb2.nix626
-rw-r--r--nixos/modules/services/web-apps/icingaweb2/module-monitoring.nix157
-rw-r--r--nixos/modules/services/web-apps/matomo-doc.xml2
-rw-r--r--nixos/modules/services/web-apps/tt-rss.nix4
-rw-r--r--nixos/modules/services/web-servers/apache-httpd/default.nix16
-rw-r--r--nixos/modules/services/web-servers/apache-httpd/mediawiki.nix7
-rw-r--r--nixos/modules/services/web-servers/nginx/default.nix24
-rw-r--r--nixos/modules/services/web-servers/phpfpm/default.nix10
-rw-r--r--nixos/modules/services/web-servers/phpfpm/pool-options.nix9
-rw-r--r--nixos/modules/services/web-servers/traefik.nix1
-rw-r--r--nixos/modules/services/x11/desktop-managers/default.nix2
-rw-r--r--nixos/modules/services/x11/desktop-managers/gnome3.nix3
-rw-r--r--nixos/modules/services/x11/desktop-managers/pantheon.nix195
-rw-r--r--nixos/modules/services/x11/display-managers/lightdm-greeters/enso-os.nix7
-rw-r--r--nixos/modules/services/x11/display-managers/lightdm-greeters/gtk.nix7
-rw-r--r--nixos/modules/services/x11/display-managers/lightdm-greeters/pantheon.nix47
-rw-r--r--nixos/modules/services/x11/display-managers/lightdm.nix1
-rw-r--r--nixos/modules/services/x11/display-managers/slim.nix2
-rw-r--r--nixos/modules/services/x11/gdk-pixbuf.nix2
-rw-r--r--nixos/modules/services/x11/xserver.nix4
-rw-r--r--nixos/modules/system/boot/stage-1.nix14
-rw-r--r--nixos/modules/system/boot/systemd.nix21
-rw-r--r--nixos/modules/system/etc/make-etc.sh15
-rw-r--r--nixos/modules/system/etc/setup-etc.pl19
-rw-r--r--nixos/modules/tasks/filesystems/vboxsf.nix2
-rw-r--r--nixos/modules/tasks/kbd.nix1
-rw-r--r--nixos/modules/tasks/network-interfaces.nix2
-rw-r--r--nixos/modules/virtualisation/cloudstack-config.nix40
-rw-r--r--nixos/modules/virtualisation/vmware-guest.nix18
-rw-r--r--nixos/release-combined.nix1
-rw-r--r--nixos/tests/all-tests.nix3
-rw-r--r--nixos/tests/avahi.nix2
-rw-r--r--nixos/tests/beanstalkd.nix45
-rw-r--r--nixos/tests/bittorrent.nix2
-rw-r--r--nixos/tests/containers-bridge.nix2
-rw-r--r--nixos/tests/containers-imperative.nix2
-rw-r--r--nixos/tests/containers-ipv4.nix2
-rw-r--r--nixos/tests/containers-ipv6.nix2
-rw-r--r--nixos/tests/containers-portforward.nix2
-rw-r--r--nixos/tests/elk.nix2
-rw-r--r--nixos/tests/firefox.nix2
-rw-r--r--nixos/tests/firewall.nix2
-rw-r--r--nixos/tests/flannel.nix5
-rw-r--r--nixos/tests/gnome3.nix2
-rw-r--r--nixos/tests/home-assistant.nix4
-rw-r--r--nixos/tests/influxdb.nix2
-rw-r--r--nixos/tests/ipv6.nix2
-rw-r--r--nixos/tests/jenkins.nix2
-rw-r--r--nixos/tests/kexec.nix2
-rw-r--r--nixos/tests/kubernetes/base.nix39
-rw-r--r--nixos/tests/kubernetes/certs.nix219
-rw-r--r--nixos/tests/kubernetes/dns.nix15
-rw-r--r--nixos/tests/kubernetes/kubernetes-common.nix57
-rw-r--r--nixos/tests/kubernetes/rbac.nix13
-rw-r--r--nixos/tests/login.nix2
-rw-r--r--nixos/tests/misc.nix2
-rw-r--r--nixos/tests/mumble.nix2
-rw-r--r--nixos/tests/munin.nix2
-rw-r--r--nixos/tests/mysql-replication.nix2
-rw-r--r--nixos/tests/mysql.nix2
-rw-r--r--nixos/tests/nat.nix2
-rw-r--r--nixos/tests/nfs.nix2
-rw-r--r--nixos/tests/openssh.nix2
-rw-r--r--nixos/tests/pantheon.nix55
-rw-r--r--nixos/tests/phabricator.nix2
-rw-r--r--nixos/tests/printing.nix2
-rw-r--r--nixos/tests/proxy.nix2
-rw-r--r--nixos/tests/quake3.nix2
-rw-r--r--nixos/tests/rabbitmq.nix2
-rw-r--r--nixos/tests/subversion.nix2
-rw-r--r--nixos/tests/tomcat.nix2
-rw-r--r--nixos/tests/trac.nix2
-rw-r--r--nixos/tests/udisks2.nix2
-rw-r--r--nixos/tests/xfce.nix2
159 files changed, 4325 insertions, 1570 deletions
diff --git a/nixos/doc/manual/configuration/configuration.xml b/nixos/doc/manual/configuration/configuration.xml
index cebc4122c6c..138d1d86d7f 100644
--- a/nixos/doc/manual/configuration/configuration.xml
+++ b/nixos/doc/manual/configuration/configuration.xml
@@ -23,5 +23,6 @@
  <xi:include href="linux-kernel.xml" />
  <xi:include href="../generated/modules.xml" xpointer="xpointer(//section[@id='modules']/*)" />
  <xi:include href="profiles.xml" />
+ <xi:include href="kubernetes.xml" />
 <!-- Apache; libvirtd virtualisation -->
 </part>
diff --git a/nixos/doc/manual/configuration/kubernetes.xml b/nixos/doc/manual/configuration/kubernetes.xml
new file mode 100644
index 00000000000..ddc026c0c01
--- /dev/null
+++ b/nixos/doc/manual/configuration/kubernetes.xml
@@ -0,0 +1,127 @@
+<chapter xmlns="http://docbook.org/ns/docbook"
+         xmlns:xlink="http://www.w3.org/1999/xlink"
+         xmlns:xi="http://www.w3.org/2001/XInclude"
+         version="5.0"
+         xml:id="sec-kubernetes">
+ <title>Kubernetes</title>
+
+ <para>
+   The NixOS Kubernetes module is a collective term for a handful of
+   individual submodules implementing the Kubernetes cluster components.
+ </para>
+
+ <para>
+   There are generally two ways of enabling Kubernetes on NixOS.
+   One way is to enable and configure cluster components appropriately by hand:
+<programlisting>
+services.kubernetes = {
+  apiserver.enable = true;
+  controllerManager.enable = true;
+  scheduler.enable = true;
+  addonManager.enable = true;
+  proxy.enable = true;
+  flannel.enable = true;
+};
+</programlisting>
+  Another way is to assign cluster roles ("master" and/or "node") to the host.
+  This enables apiserver, controllerManager, scheduler, addonManager,
+  kube-proxy and etcd:
+<programlisting>
+<xref linkend="opt-services.kubernetes.roles"/> = [ "master" ];
+</programlisting>
+  While this will enable the kubelet and kube-proxy only:
+<programlisting>
+<xref linkend="opt-services.kubernetes.roles"/> = [ "node" ];
+</programlisting>
+  Assigning both the master and node roles is usable if you want a single
+  node Kubernetes cluster for dev or testing purposes:
+<programlisting>
+<xref linkend="opt-services.kubernetes.roles"/> = [ "master" "node" ];
+</programlisting>
+  Note: Assigning either role will also default both
+  <xref linkend="opt-services.kubernetes.flannel.enable"/> and
+  <xref linkend="opt-services.kubernetes.easyCerts"/> to true.
+  This sets up flannel as CNI and activates automatic PKI bootstrapping.
+ </para>
+
+ <para>
+   As of kubernetes 1.10.X it has been deprecated to open
+   non-tls-enabled ports on kubernetes components. Thus, from NixOS 19.03 all
+   plain HTTP ports have been disabled by default.
+   While opening insecure ports is still possible, it is recommended not to
+   bind these to other interfaces than loopback.
+
+   To re-enable the insecure port on the apiserver, see options:
+   <xref linkend="opt-services.kubernetes.apiserver.insecurePort"/>
+   and
+   <xref linkend="opt-services.kubernetes.apiserver.insecureBindAddress"/>
+ </para>
+
+ <note>
+  <para>
+   As of NixOS 19.03, it is mandatory to configure:
+   <xref linkend="opt-services.kubernetes.masterAddress"/>.
+   The masterAddress must be resolveable and routeable by all cluster nodes.
+   In single node clusters, this can be set to <literal>localhost</literal>.
+  </para>
+ </note>
+
+ <para>
+   Role-based access control (RBAC) authorization mode is enabled by default.
+   This means that anonymous requests to the apiserver secure port will
+   expectedly cause a permission denied error. All cluster components must
+   therefore be configured with x509 certificates for two-way tls communication.
+   The x509 certificate subject section determines the roles and permissions
+   granted by the apiserver to perform clusterwide or namespaced operations.
+   See also:
+   <link
+     xlink:href="https://kubernetes.io/docs/reference/access-authn-authz/rbac/">
+     Using RBAC Authorization</link>.
+ </para>
+
+  <para>
+   The NixOS kubernetes module provides an option for automatic certificate
+   bootstrapping and configuration,
+    <xref linkend="opt-services.kubernetes.easyCerts"/>.
+   The PKI bootstrapping process involves setting up a certificate authority
+   (CA) daemon (cfssl) on the kubernetes master node. cfssl generates a CA-cert
+   for the cluster, and uses the CA-cert for signing subordinate certs issued to
+   each of the cluster components. Subsequently, the certmgr daemon monitors
+   active certificates and renews them when needed. For single node Kubernetes
+   clusters, setting <xref linkend="opt-services.kubernetes.easyCerts"/> = true
+   is sufficient and no further action is required. For joining extra node
+   machines to an existing cluster on the other hand, establishing initial trust
+   is mandatory.
+ </para>
+
+ <para>
+   To add new nodes to the cluster:
+   On any (non-master) cluster node where
+   <xref linkend="opt-services.kubernetes.easyCerts"/> is enabled, the helper
+   script <literal>nixos-kubernetes-node-join</literal> is available on PATH.
+   Given a token on stdin, it will copy the token to the kubernetes
+   secrets directory and restart the certmgr service. As requested
+   certificates are issued, the script will restart kubernetes cluster
+   components as needed for them to pick up new keypairs.
+ </para>
+
+ <note>
+  <para>
+   Multi-master (HA) clusters are not supported by the easyCerts module.
+  </para>
+ </note>
+
+ <para>
+   In order to interact with an RBAC-enabled cluster as an administrator, one
+   needs to have cluster-admin privileges. By default, when easyCerts is
+   enabled, a cluster-admin kubeconfig file is generated and linked into
+   <literal>/etc/kubernetes/cluster-admin.kubeconfig</literal> as determined by
+   <xref linkend="opt-services.kubernetes.pki.etcClusterAdminKubeconfig"/>.
+   <literal>export KUBECONFIG=/etc/kubernetes/cluster-admin.kubeconfig</literal>
+   will make kubectl use this kubeconfig to access and authenticate the cluster.
+   The cluster-admin kubeconfig references an auto-generated keypair owned by
+   root. Thus, only root on the kubernetes master may obtain cluster-admin
+   rights by means of this file.
+ </para>
+
+</chapter>
diff --git a/nixos/doc/manual/release-notes/rl-1903.xml b/nixos/doc/manual/release-notes/rl-1903.xml
index 508a0882dd2..0a5fe858f07 100644
--- a/nixos/doc/manual/release-notes/rl-1903.xml
+++ b/nixos/doc/manual/release-notes/rl-1903.xml
@@ -23,6 +23,47 @@
     The default Python 3 interpreter is now CPython 3.7 instead of CPython 3.6.
     </para>
    </listitem>
+   <listitem>
+    <para>
+     Added the Pantheon desktop environment.
+     It can be enabled through <varname>services.xserver.desktopManager.pantheon.enable</varname>.
+    </para>
+    <note>
+     <para>
+      <varname>services.xserver.desktopManager.pantheon</varname> default enables lightdm
+      as a display manager and using Pantheon's greeter.
+     </para>
+     <para>
+      This is because of limitations with the screenlocking implementation, whereas the
+      screenlocker would be non-functional without it.
+     </para>
+     <para>
+      Because of that it is recommended to retain this precaution, however if you'd like to change this set:
+     </para>
+     <itemizedlist>
+      <listitem>
+       <para>
+        <option>services.xserver.displayManager.lightdm.enable</option>
+       </para>
+      </listitem>
+      <listitem>
+       <para>
+        <option>services.xserver.displayManager.lightdm.greeters.pantheon.enable</option>
+       </para>
+      </listitem>
+     </itemizedlist>
+     <para>to <literal>false</literal> and enable your preferred display manager.</para>
+    </note>
+   </listitem>
+   <listitem>
+     <para>
+       A major refactoring of the Kubernetes module has been completed.
+       Refactorings primarily focus on decoupling components and enhancing
+       security. Two-way TLS and RBAC has been enabled by default for all
+       components, which slightly changes the way the module is configured.
+       See: <xref linkend="sec-kubernetes"/> for details.
+     </para>
+   </listitem>
   </itemizedlist>
  </section>
 
@@ -52,6 +93,18 @@
      in <literal>nixos/modules/virtualisation/google-compute-config.nix</literal>.
     </para>
    </listitem>
+   <listitem>
+    <para>
+     <literal>./services/misc/beanstalkd.nix</literal>
+    </para>
+   </listitem>
+   <listitem>
+    <para>
+     There is a new <varname>services.cockroachdb</varname> module for running
+     CockroachDB databases. NixOS now ships with CockroachDB 2.1.x as well, available
+     on <literal>x86_64-linux</literal> and <literal>aarch64-linux</literal>.
+    </para>
+   </listitem>
   </itemizedlist>
 
   <itemizedlist>
@@ -515,6 +568,20 @@
    </listitem>
    <listitem>
     <para>
+     Symlinks in <filename>/etc</filename> (except <filename>/etc/static</filename>)
+     are now relative instead of absolute. This makes possible to examine
+     NixOS container's <filename>/etc</filename> directory from host system
+     (previously it pointed to host <filename>/etc</filename> when viewed from host,
+     and to container <filename>/etc</filename> when viewed from container chroot).
+    </para>
+    <para>
+     This also makes <filename>/etc/os-release</filename> adhere to
+     <link xlink:href="https://www.freedesktop.org/software/systemd/man/os-release.html">the standard</link>
+     for NixOS containers.
+    </para>
+   </listitem>
+   <listitem>
+    <para>
       Flat volumes are now disabled by default in <literal>hardware.pulseaudio</literal>.
       This has been done to prevent applications, which are unaware of this feature, setting
       their volumes to 100% on startup causing harm to your audio hardware and potentially your ears.
@@ -549,6 +616,63 @@
      provisioning.
     </para>
    </listitem>
+   <listitem>
+     <para>
+       The use of insecure ports on kubernetes has been deprecated.
+       Thus options:
+       <varname>services.kubernetes.apiserver.port</varname> and
+       <varname>services.kubernetes.controllerManager.port</varname>
+       has been renamed to <varname>.insecurePort</varname>,
+       and default of both options has changed to 0 (disabled).
+     </para>
+    </listitem>
+    <listitem>
+      <para>
+        Note that the default value of
+        <varname>services.kubernetes.apiserver.bindAddress</varname>
+        has changed from 127.0.0.1 to 0.0.0.0, allowing the apiserver to be
+        accessible from outside the master node itself.
+        If the apiserver insecurePort is enabled,
+        it is strongly recommended to only bind on the loopback interface. See:
+        <varname>services.kubernetes.apiserver.insecurebindAddress</varname>.
+      </para>
+    </listitem>
+    <listitem>
+      <para>
+        The option <varname>services.kubernetes.apiserver.allowPrivileged</varname>
+        and <varname>services.kubernetes.kubelet.allowPrivileged</varname> now
+        defaults to false. Disallowing privileged containers on the cluster.
+      </para>
+    </listitem>
+    <listitem>
+      <para>
+        The kubernetes module does no longer add the kubernetes package to
+        <varname>environment.systemPackages</varname> implicitly.
+      </para>
+    </listitem>
+    <listitem>
+      <para>
+        The <literal>intel</literal> driver has been removed from the default list of
+        <link linkend="opt-services.xserver.videoDrivers">X.org video drivers</link>.
+        The <literal>modesetting</literal> driver should take over automatically,
+        it is better maintained upstream and has less problems with advanced X11 features.
+        This can lead to a change in the output names used by <literal>xrandr</literal>.
+        Some performance regressions on some GPU models might happen.
+        Some OpenCL and VA-API applications might also break
+        (Beignet seems to provide OpenCL support with
+        <literal>modesetting</literal> driver, too).
+        Users who need this functionality more than multi-output XRandR are advised
+        to add `intel` to `videoDrivers` and report an issue (or provide additional
+        details in an existing one)
+      </para>
+   </listitem>
+   <listitem>
+     <para>
+       Openmpi has been updated to version 4.0.0, which removes some deprecated MPI-1 symbols.
+       This may break some older applications that still rely on those symbols.
+       An upgrade guide can be found <link xlink:href="https://www.open-mpi.org/faq/?category=mpi-removed">here</link>.
+     </para>
+   </listitem>
   </itemizedlist>
  </section>
 </section>
diff --git a/nixos/maintainers/scripts/cloudstack/cloudstack-image.nix b/nixos/maintainers/scripts/cloudstack/cloudstack-image.nix
new file mode 100644
index 00000000000..37b46db059c
--- /dev/null
+++ b/nixos/maintainers/scripts/cloudstack/cloudstack-image.nix
@@ -0,0 +1,23 @@
+# nix-build '<nixpkgs/nixos>' -A config.system.build.cloudstackImage --arg configuration "{ imports = [ ./nixos/maintainers/scripts/cloudstack/cloudstack-image.nix ]; }"
+
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+{
+  imports =
+    [ ../../../modules/virtualisation/cloudstack-config.nix ];
+
+  system.build.cloudstackImage = import ../../../lib/make-disk-image.nix {
+    inherit lib config pkgs;
+    diskSize = 8192;
+    format = "qcow2";
+    configFile = pkgs.writeText "configuration.nix"
+      ''
+        {
+          imports = [ <nixpkgs/nixos/modules/virtualisation/cloudstack-config.nix> ];
+        }
+      '';
+  };
+
+}
diff --git a/nixos/modules/config/fonts/fontconfig-penultimate.nix b/nixos/modules/config/fonts/fontconfig-penultimate.nix
index 7e05e77d967..2c18244621a 100644
--- a/nixos/modules/config/fonts/fontconfig-penultimate.nix
+++ b/nixos/modules/config/fonts/fontconfig-penultimate.nix
@@ -55,7 +55,9 @@ let
   localConf = pkgs.writeText "fc-local.conf" cfg.localConf;
 
   # The configuration to be included in /etc/font/
-  penultimateConf = pkgs.runCommand "font-penultimate-conf" {} ''
+  penultimateConf = pkgs.runCommand "font-penultimate-conf" {
+    preferLocalBuild = true;
+    } ''
     support_folder=$out/etc/fonts/conf.d
     latest_folder=$out/etc/fonts/${latestVersion}/conf.d
 
diff --git a/nixos/modules/config/fonts/fontconfig-ultimate.nix b/nixos/modules/config/fonts/fontconfig-ultimate.nix
index 7549dc6c065..45328f3eaf1 100644
--- a/nixos/modules/config/fonts/fontconfig-ultimate.nix
+++ b/nixos/modules/config/fonts/fontconfig-ultimate.nix
@@ -7,7 +7,7 @@ let cfg = config.fonts.fontconfig.ultimate;
     latestVersion  = pkgs.fontconfig.configVersion;
 
     # The configuration to be included in /etc/font/
-    confPkg = pkgs.runCommand "font-ultimate-conf" {} ''
+    confPkg = pkgs.runCommand "font-ultimate-conf" { preferLocalBuild = true; } ''
       support_folder=$out/etc/fonts/conf.d
       latest_folder=$out/etc/fonts/${latestVersion}/conf.d
 
diff --git a/nixos/modules/config/fonts/fontconfig.nix b/nixos/modules/config/fonts/fontconfig.nix
index 12f5ca2e799..d79c43c0b5b 100644
--- a/nixos/modules/config/fonts/fontconfig.nix
+++ b/nixos/modules/config/fonts/fontconfig.nix
@@ -190,7 +190,7 @@ let cfg = config.fonts.fontconfig;
     '';
 
     # fontconfig configuration package
-    confPkg = pkgs.runCommand "fontconfig-conf" {} ''
+    confPkg = pkgs.runCommand "fontconfig-conf" { preferLocalBuild = true; } ''
       support_folder=$out/etc/fonts
       latest_folder=$out/etc/fonts/${latestVersion}
 
diff --git a/nixos/modules/config/fonts/fontdir.nix b/nixos/modules/config/fonts/fontdir.nix
index 180e38f81f4..cc70fbf8744 100644
--- a/nixos/modules/config/fonts/fontdir.nix
+++ b/nixos/modules/config/fonts/fontdir.nix
@@ -4,7 +4,7 @@ with lib;
 
 let
 
-  x11Fonts = pkgs.runCommand "X11-fonts" { } ''
+  x11Fonts = pkgs.runCommand "X11-fonts" { preferLocalBuild = true; } ''
     mkdir -p "$out/share/X11-fonts"
     find ${toString config.fonts.fonts} \
       \( -name fonts.dir -o -name '*.ttf' -o -name '*.otf' \) \
diff --git a/nixos/modules/config/nsswitch.nix b/nixos/modules/config/nsswitch.nix
index b601e908e49..13277fe56e4 100644
--- a/nixos/modules/config/nsswitch.nix
+++ b/nixos/modules/config/nsswitch.nix
@@ -61,6 +61,15 @@ in {
         };
     };
 
+    system.nssHosts = mkOption {
+      type = types.listOf types.str;
+      default = [];
+      example = [ "mdns" ];
+      description = ''
+        List of host entries to configure in <filename>/etc/nsswitch.conf</filename>.
+      '';
+    };
+
   };
 
   config = {
@@ -85,7 +94,7 @@ in {
       group:     ${concatStringsSep " " passwdArray}
       shadow:    ${concatStringsSep " " shadowArray}
 
-      hosts:     ${concatStringsSep " " hostArray}
+      hosts:     ${concatStringsSep " " config.system.nssHosts}
       networks:  files
 
       ethers:    files
@@ -94,6 +103,8 @@ in {
       rpc:       files
     '';
 
+    system.nssHosts = hostArray;
+
     # Systemd provides nss-myhostname to ensure that our hostname
     # always resolves to a valid IP address.  It returns all locally
     # configured IP addresses, or ::1 and 127.0.0.2 as
diff --git a/nixos/modules/hardware/acpilight.nix b/nixos/modules/hardware/acpilight.nix
new file mode 100644
index 00000000000..34e8a222096
--- /dev/null
+++ b/nixos/modules/hardware/acpilight.nix
@@ -0,0 +1,24 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+let
+  cfg = config.hardware.acpilight;
+in
+{
+  options = {
+    hardware.acpilight = {
+      enable = mkOption {
+        default = false;
+        type = types.bool;
+        description = ''
+          Enable acpilight.
+          This will allow brightness control via xbacklight from users in the video group
+        '';
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    services.udev.packages = with pkgs; [ acpilight ];
+  };
+}
diff --git a/nixos/modules/hardware/ledger.nix b/nixos/modules/hardware/ledger.nix
new file mode 100644
index 00000000000..41abe74315a
--- /dev/null
+++ b/nixos/modules/hardware/ledger.nix
@@ -0,0 +1,14 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.hardware.ledger;
+
+in {
+  options.hardware.ledger.enable = mkEnableOption "udev rules for Ledger devices";
+
+  config = mkIf cfg.enable {
+    services.udev.packages = [ pkgs.ledger-udev-rules ];
+  };
+}
diff --git a/nixos/modules/hardware/video/uvcvideo/uvcdynctrl-udev-rules.nix b/nixos/modules/hardware/video/uvcvideo/uvcdynctrl-udev-rules.nix
index 2cf5f13bc15..a808429c999 100644
--- a/nixos/modules/hardware/video/uvcvideo/uvcdynctrl-udev-rules.nix
+++ b/nixos/modules/hardware/video/uvcvideo/uvcdynctrl-udev-rules.nix
@@ -29,6 +29,7 @@ runCommand "uvcdynctrl-udev-rules-${version}"
   ];
   dontPatchELF = true;
   dontStrip = true;
+  preferLocalBuild = true;
 }
 ''
   mkdir -p "$out/lib/udev"
diff --git a/nixos/modules/installer/cd-dvd/channel.nix b/nixos/modules/installer/cd-dvd/channel.nix
index e946c4abc57..ab5e7c0645f 100644
--- a/nixos/modules/installer/cd-dvd/channel.nix
+++ b/nixos/modules/installer/cd-dvd/channel.nix
@@ -13,7 +13,7 @@ let
   # user, as expected by nixos-rebuild/nixos-install. FIXME: merge
   # with make-channel.nix.
   channelSources = pkgs.runCommand "nixos-${config.system.nixos.version}"
-    { }
+    { preferLocalBuild = true; }
     ''
       mkdir -p $out
       cp -prd ${nixpkgs.outPath} $out/nixos
diff --git a/nixos/modules/installer/cd-dvd/installation-cd-graphical-base.nix b/nixos/modules/installer/cd-dvd/installation-cd-graphical-base.nix
index 228ef371d25..917b3758d38 100644
--- a/nixos/modules/installer/cd-dvd/installation-cd-graphical-base.nix
+++ b/nixos/modules/installer/cd-dvd/installation-cd-graphical-base.nix
@@ -31,6 +31,10 @@ with lib;
   # there is no power management backend such as upower).
   powerManagement.enable = true;
 
+  # Enable sound in graphical iso's.
+  hardware.pulseaudio.enable = true;
+  hardware.pulseaudio.systemWide = true; # Needed since we run plasma as root.
+
   environment.systemPackages = [
     # Include gparted for partitioning disks.
     pkgs.gparted
diff --git a/nixos/modules/installer/tools/nixos-rebuild.sh b/nixos/modules/installer/tools/nixos-rebuild.sh
index 361c2e49e05..27e5b5d8c70 100644
--- a/nixos/modules/installer/tools/nixos-rebuild.sh
+++ b/nixos/modules/installer/tools/nixos-rebuild.sh
@@ -29,7 +29,7 @@ while [ "$#" -gt 0 ]; do
       --help)
         showSyntax
         ;;
-      switch|boot|test|build|dry-build|dry-run|dry-activate|build-vm|build-vm-with-bootloader)
+      switch|boot|test|build|edit|dry-build|dry-run|dry-activate|build-vm|build-vm-with-bootloader)
         if [ "$i" = dry-run ]; then i=dry-build; fi
         action="$i"
         ;;
@@ -227,6 +227,13 @@ if [ -z "$_NIXOS_REBUILD_REEXEC" -a -n "$canRun" -a -z "$fast" ]; then
     fi
 fi
 
+# Find configuration.nix and open editor instead of building.
+if [ "$action" = edit ]; then
+    NIXOS_CONFIG=${NIXOS_CONFIG:-$(nix-instantiate --find-file nixos-config)}
+    exec "${EDITOR:-nano}" "$NIXOS_CONFIG"
+    exit 1
+fi
+
 
 tmpDir=$(mktemp -t -d nixos-rebuild.XXXXXX)
 SSHOPTS="$NIX_SSHOPTS -o ControlMaster=auto -o ControlPath=$tmpDir/ssh-%n -o ControlPersist=60"
diff --git a/nixos/modules/misc/documentation.nix b/nixos/modules/misc/documentation.nix
index 09d53c322fb..9b2e1235b74 100644
--- a/nixos/modules/misc/documentation.nix
+++ b/nixos/modules/misc/documentation.nix
@@ -156,6 +156,7 @@ in
       environment.systemPackages = [ pkgs.man-db ];
       environment.pathsToLink = [ "/share/man" ];
       environment.extraOutputsToInstall = [ "man" ] ++ optional cfg.dev.enable "devman";
+      environment.etc."man.conf".source = "${pkgs.man-db}/etc/man_db.conf";
     })
 
     (mkIf cfg.info.enable {
diff --git a/nixos/modules/misc/ids.nix b/nixos/modules/misc/ids.nix
index d6e6ccaecd2..e78673514e3 100644
--- a/nixos/modules/misc/ids.nix
+++ b/nixos/modules/misc/ids.nix
@@ -272,7 +272,7 @@
       nzbget = 245;
       mosquitto = 246;
       toxvpn = 247;
-      squeezelite = 248;
+      # squeezelite = 248; # DynamicUser = true
       turnserver = 249;
       smokeping = 250;
       gocd-agent = 251;
@@ -290,7 +290,7 @@
       riak-cs = 263;
       infinoted = 264;
       sickbeard = 265;
-      # glance = 266; # unused, removed 2017-12-13
+      headphones = 266;
       couchpotato = 267;
       gogs = 268;
       pdns-recursor = 269;
@@ -590,7 +590,7 @@
       riak-cs = 263;
       infinoted = 264;
       sickbeard = 265;
-      # glance = 266; # unused, removed 2017-12-13
+      headphones = 266;
       couchpotato = 267;
       gogs = 268;
       kresd = 270;
diff --git a/nixos/modules/module-list.nix b/nixos/modules/module-list.nix
index 04bcb41cd07..32b3f14e82d 100644
--- a/nixos/modules/module-list.nix
+++ b/nixos/modules/module-list.nix
@@ -44,6 +44,7 @@
   ./hardware/digitalbitbox.nix
   ./hardware/sensor/iio.nix
   ./hardware/ksm.nix
+  ./hardware/ledger.nix
   ./hardware/mcelog.nix
   ./hardware/network/b43.nix
   ./hardware/nitrokey.nix
@@ -82,6 +83,7 @@
   ./misc/version.nix
   ./programs/adb.nix
   ./programs/atop.nix
+  ./programs/autojump.nix
   ./programs/bash/bash.nix
   ./programs/bcc.nix
   ./programs/blcr.nix
@@ -195,9 +197,17 @@
   ./services/backup/tarsnap.nix
   ./services/backup/znapzend.nix
   ./services/cluster/hadoop/default.nix
+  ./services/cluster/kubernetes/addons/dns.nix
+  ./services/cluster/kubernetes/addons/dashboard.nix
+  ./services/cluster/kubernetes/addon-manager.nix
+  ./services/cluster/kubernetes/apiserver.nix
+  ./services/cluster/kubernetes/controller-manager.nix
   ./services/cluster/kubernetes/default.nix
-  ./services/cluster/kubernetes/dns.nix
-  ./services/cluster/kubernetes/dashboard.nix
+  ./services/cluster/kubernetes/flannel.nix
+  ./services/cluster/kubernetes/kubelet.nix
+  ./services/cluster/kubernetes/pki.nix
+  ./services/cluster/kubernetes/proxy.nix
+  ./services/cluster/kubernetes/scheduler.nix
   ./services/computing/boinc/client.nix
   ./services/computing/torque/server.nix
   ./services/computing/torque/mom.nix
@@ -242,6 +252,8 @@
   ./services/desktops/bamf.nix
   ./services/desktops/dleyna-renderer.nix
   ./services/desktops/dleyna-server.nix
+  ./services/desktops/pantheon/contractor.nix
+  ./services/desktops/pantheon/files.nix
   ./services/desktops/flatpak.nix
   ./services/desktops/geoclue2.nix
   ./services/desktops/gsignond.nix
@@ -267,6 +279,7 @@
   ./services/desktops/gnome3/tracker-miners.nix
   ./services/desktops/profile-sync-daemon.nix
   ./services/desktops/telepathy.nix
+  ./services/desktops/tumbler.nix
   ./services/desktops/zeitgeist.nix
   ./services/development/bloop.nix
   ./services/development/hoogle.nix
@@ -280,6 +293,7 @@
   ./services/hardware/acpid.nix
   ./services/hardware/actkbd.nix
   ./services/hardware/bluetooth.nix
+  ./services/hardware/bolt.nix
   ./services/hardware/brltty.nix
   ./services/hardware/freefall.nix
   ./services/hardware/fwupd.nix
@@ -348,6 +362,7 @@
   ./services/misc/apache-kafka.nix
   ./services/misc/autofs.nix
   ./services/misc/autorandr.nix
+  ./services/misc/beanstalkd.nix
   ./services/misc/bees.nix
   ./services/misc/bepasty.nix
   ./services/misc/canto-daemon.nix
@@ -380,6 +395,7 @@
   ./services/misc/gogs.nix
   ./services/misc/gollum.nix
   ./services/misc/gpsd.nix
+  ./services/misc/headphones.nix
   ./services/misc/home-assistant.nix
   ./services/misc/ihaskell.nix
   ./services/misc/irkerd.nix
@@ -716,6 +732,8 @@
   ./services/web-apps/atlassian/jira.nix
   ./services/web-apps/codimd.nix
   ./services/web-apps/frab.nix
+  ./services/web-apps/icingaweb2/icingaweb2.nix
+  ./services/web-apps/icingaweb2/module-monitoring.nix
   ./services/web-apps/mattermost.nix
   ./services/web-apps/nextcloud.nix
   ./services/web-apps/nexus.nix
diff --git a/nixos/modules/profiles/graphical.nix b/nixos/modules/profiles/graphical.nix
index fba756391b1..649f5564ac6 100644
--- a/nixos/modules/profiles/graphical.nix
+++ b/nixos/modules/profiles/graphical.nix
@@ -14,5 +14,9 @@
     libinput.enable = true; # for touchpad support on many laptops
   };
 
+  # Enable sound in virtualbox appliances.
+  hardware.pulseaudio.enable = true;
+  hardware.pulseaudio.systemWide = true; # Needed since we run plasma as root.
+
   environment.systemPackages = [ pkgs.glxinfo pkgs.firefox ];
 }
diff --git a/nixos/modules/programs/autojump.nix b/nixos/modules/programs/autojump.nix
new file mode 100644
index 00000000000..3a8feec4bb4
--- /dev/null
+++ b/nixos/modules/programs/autojump.nix
@@ -0,0 +1,33 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.programs.autojump;
+  prg = config.programs;
+in
+{
+  options = {
+    programs.autojump = {
+
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = ''
+          Whether to enable autojump.
+        '';
+      };
+    };
+  }; 
+
+  ###### implementation
+
+  config = mkIf cfg.enable {
+    environment.pathsToLink = [ "/share/autojump" ];
+    environment.systemPackages = [ pkgs.autojump ];
+
+    programs.bash.interactiveShellInit = "source ${pkgs.autojump}/share/autojump/autojump.bash"; 
+    programs.zsh.interactiveShellInit = mkIf prg.zsh.enable "source ${pkgs.autojump}/share/autojump/autojump.zsh";
+    programs.fish.interactiveShellInit = mkIf prg.fish.enable "source ${pkgs.autojump}/share/autojump/autojump.fish";
+  };
+}
diff --git a/nixos/modules/programs/less.nix b/nixos/modules/programs/less.nix
index d39103a5805..9fdf99e9c69 100644
--- a/nixos/modules/programs/less.nix
+++ b/nixos/modules/programs/less.nix
@@ -25,7 +25,7 @@ let
   '';
 
   lessKey = pkgs.runCommand "lesskey"
-            { src = pkgs.writeText "lessconfig" configText; }
+            { src = pkgs.writeText "lessconfig" configText; preferLocalBuild = true; }
             "${pkgs.less}/bin/lesskey -o $out $src";
 
 in
diff --git a/nixos/modules/programs/singularity.nix b/nixos/modules/programs/singularity.nix
index 86153d93385..b27e122bd1d 100644
--- a/nixos/modules/programs/singularity.nix
+++ b/nixos/modules/programs/singularity.nix
@@ -3,18 +3,27 @@
 with lib;
 let
   cfg = config.programs.singularity;
+  singularity = pkgs.singularity.overrideAttrs (attrs : {
+    installPhase = attrs.installPhase + ''
+      mv $bin/libexec/singularity/bin/starter-suid $bin/libexec/singularity/bin/starter-suid.orig
+      ln -s /run/wrappers/bin/singularity-suid $bin/libexec/singularity/bin/starter-suid
+    '';
+  });
 in {
   options.programs.singularity = {
     enable = mkEnableOption "Singularity";
   };
 
   config = mkIf cfg.enable {
-      environment.systemPackages = [ pkgs.singularity ];
-      systemd.tmpfiles.rules = [ "d /var/singularity/mnt/session 0770 root root -"
-                                 "d /var/singularity/mnt/final 0770 root root -"
-                                 "d /var/singularity/mnt/overlay 0770 root root -"
-                                 "d /var/singularity/mnt/container 0770 root root -"
-                                 "d /var/singularity/mnt/source 0770 root root -"];
+      environment.systemPackages = [ singularity ];
+      security.wrappers.singularity-suid.source = "${singularity}/libexec/singularity/bin/starter-suid.orig";
+      systemd.tmpfiles.rules = [
+        "d /var/singularity/mnt/session 0770 root root -"
+        "d /var/singularity/mnt/final 0770 root root -"
+        "d /var/singularity/mnt/overlay 0770 root root -"
+        "d /var/singularity/mnt/container 0770 root root -"
+        "d /var/singularity/mnt/source 0770 root root -"
+      ];
   };
 
 }
diff --git a/nixos/modules/programs/ssh.nix b/nixos/modules/programs/ssh.nix
index 4640c1d78d2..46965dd35b7 100644
--- a/nixos/modules/programs/ssh.nix
+++ b/nixos/modules/programs/ssh.nix
@@ -88,7 +88,8 @@ in
         type = types.lines;
         default = "";
         description = ''
-          Extra configuration text appended to <filename>ssh_config</filename>.
+          Extra configuration text prepended to <filename>ssh_config</filename>. Other generated
+          options will be added after a <code>Host *</code> pattern.
           See <citerefentry><refentrytitle>ssh_config</refentrytitle><manvolnum>5</manvolnum></citerefentry>
           for help.
         '';
@@ -203,6 +204,11 @@ in
     # generation in the sshd service.
     environment.etc."ssh/ssh_config".text =
       ''
+        # Custom options from `extraConfig`, to override generated options
+        ${cfg.extraConfig}
+
+        # Generated options from other settings
+        Host *
         AddressFamily ${if config.networking.enableIPv6 then "any" else "inet"}
 
         ${optionalString cfg.setXAuthLocation ''
@@ -213,8 +219,6 @@ in
 
         ${optionalString (cfg.pubkeyAcceptedKeyTypes != []) "PubkeyAcceptedKeyTypes ${concatStringsSep "," cfg.pubkeyAcceptedKeyTypes}"}
         ${optionalString (cfg.hostKeyAlgorithms != []) "HostKeyAlgorithms ${concatStringsSep "," cfg.hostKeyAlgorithms}"}
-
-        ${cfg.extraConfig}
       '';
 
     environment.etc."ssh/ssh_known_hosts".text = knownHostsText;
diff --git a/nixos/modules/rename.nix b/nixos/modules/rename.nix
index 24ab963f718..1e6557e1f0e 100644
--- a/nixos/modules/rename.nix
+++ b/nixos/modules/rename.nix
@@ -40,9 +40,19 @@ with lib;
     (mkRenamedOptionModule [ "services" "kibana" "host" ] [ "services" "kibana" "listenAddress" ])
     (mkRenamedOptionModule [ "services" "kubernetes" "apiserver" "admissionControl" ] [ "services" "kubernetes" "apiserver" "enableAdmissionPlugins" ])
     (mkRenamedOptionModule [ "services" "kubernetes" "apiserver" "address" ] ["services" "kubernetes" "apiserver" "bindAddress"])
+    (mkRenamedOptionModule [ "services" "kubernetes" "apiserver" "port" ] ["services" "kubernetes" "apiserver" "insecurePort"])
     (mkRemovedOptionModule [ "services" "kubernetes" "apiserver" "publicAddress" ] "")
     (mkRenamedOptionModule [ "services" "kubernetes" "addons" "dashboard" "enableRBAC" ] [ "services" "kubernetes" "addons" "dashboard" "rbac" "enable" ])
+    (mkRenamedOptionModule [ "services" "kubernetes" "controllerManager" "address" ] ["services" "kubernetes" "controllerManager" "bindAddress"])
+    (mkRenamedOptionModule [ "services" "kubernetes" "controllerManager" "port" ] ["services" "kubernetes" "controllerManager" "insecurePort"])
+    (mkRenamedOptionModule [ "services" "kubernetes" "etcd" "servers" ] [ "services" "kubernetes" "apiserver" "etcd" "servers" ])
+    (mkRenamedOptionModule [ "services" "kubernetes" "etcd" "keyFile" ] [ "services" "kubernetes" "apiserver" "etcd" "keyFile" ])
+    (mkRenamedOptionModule [ "services" "kubernetes" "etcd" "certFile" ] [ "services" "kubernetes" "apiserver" "etcd" "certFile" ])
+    (mkRenamedOptionModule [ "services" "kubernetes" "etcd" "caFile" ] [ "services" "kubernetes" "apiserver" "etcd" "caFile" ])
+    (mkRemovedOptionModule [ "services" "kubernetes" "kubelet" "applyManifests" ] "")
     (mkRemovedOptionModule [ "services" "kubernetes" "kubelet" "cadvisorPort" ] "")
+    (mkRenamedOptionModule [ "services" "kubernetes" "proxy" "address" ] ["services" "kubernetes" "proxy" "bindAddress"])
+    (mkRemovedOptionModule [ "services" "kubernetes" "verbose" ] "")
     (mkRenamedOptionModule [ "services" "logstash" "address" ] [ "services" "logstash" "listenAddress" ])
     (mkRenamedOptionModule [ "services" "mpd" "network" "host" ] [ "services" "mpd" "network" "listenAddress" ])
     (mkRenamedOptionModule [ "services" "neo4j" "host" ] [ "services" "neo4j" "defaultListenAddress" ])
@@ -59,6 +69,7 @@ with lib;
     (mkRenamedOptionModule [ "services" "statsd" "host" ] [ "services" "statsd" "listenAddress" ])
     (mkRenamedOptionModule [ "services" "subsonic" "host" ] [ "services" "subsonic" "listenAddress" ])
     (mkRenamedOptionModule [ "services" "tor" "relay" "portSpec" ] [ "services" "tor" "relay" "port" ])
+    (mkRenamedOptionModule [ "services" "vmwareGuest" ] [ "virtualisation" "vmware" "guest" ])
     (mkRenamedOptionModule [ "jobs" ] [ "systemd" "services" ])
 
     (mkRenamedOptionModule [ "services" "gitlab" "stateDir" ] [ "services" "gitlab" "statePath" ])
diff --git a/nixos/modules/security/ca.nix b/nixos/modules/security/ca.nix
index 67469be18b4..1c4ee421fc5 100644
--- a/nixos/modules/security/ca.nix
+++ b/nixos/modules/security/ca.nix
@@ -14,6 +14,7 @@ let
     { files =
         cfg.certificateFiles ++
         [ (builtins.toFile "extra.crt" (concatStringsSep "\n" cfg.certificates)) ];
+      preferLocalBuild = true;
      }
     ''
       cat $files > $out
diff --git a/nixos/modules/security/sudo.nix b/nixos/modules/security/sudo.nix
index 69a2a4f8f9a..573588aaeec 100644
--- a/nixos/modules/security/sudo.nix
+++ b/nixos/modules/security/sudo.nix
@@ -215,7 +215,10 @@ in
     environment.etc = singleton
       { source =
           pkgs.runCommand "sudoers"
-          { src = pkgs.writeText "sudoers-in" cfg.configFile; }
+          {
+            src = pkgs.writeText "sudoers-in" cfg.configFile;
+            preferLocalBuild = true;
+          }
           # Make sure that the sudoers file is syntactically valid.
           # (currently disabled - NIXOS-66)
           "${pkgs.buildPackages.sudo}/sbin/visudo -f $src -c && cp $src $out";
diff --git a/nixos/modules/services/audio/squeezelite.nix b/nixos/modules/services/audio/squeezelite.nix
index 57ae3855993..05506f5bcc7 100644
--- a/nixos/modules/services/audio/squeezelite.nix
+++ b/nixos/modules/services/audio/squeezelite.nix
@@ -3,8 +3,7 @@
 with lib;
 
 let
-
-  uid = config.ids.uids.squeezelite;
+  dataDir = "/var/lib/squeezelite";
   cfg = config.services.squeezelite;
 
 in {
@@ -17,14 +16,6 @@ in {
 
       enable = mkEnableOption "Squeezelite, a software Squeezebox emulator";
 
-      dataDir = mkOption {
-        default = "/var/lib/squeezelite";
-        type = types.str;
-        description = ''
-          The directory where Squeezelite stores its name file.
-        '';
-      };
-
       extraArguments = mkOption {
         default = "";
         type = types.str;
@@ -46,22 +37,14 @@ in {
       wantedBy = [ "multi-user.target" ];
       after = [ "network.target" "sound.target" ];
       description = "Software Squeezebox emulator";
-      preStart = "mkdir -p ${cfg.dataDir} && chown -R squeezelite ${cfg.dataDir}";
       serviceConfig = {
-        ExecStart = "${pkgs.squeezelite}/bin/squeezelite -N ${cfg.dataDir}/player-name ${cfg.extraArguments}";
-        User = "squeezelite";
-        PermissionsStartOnly = true;
+        DynamicUser = true;
+        ExecStart = "${pkgs.squeezelite}/bin/squeezelite -N ${dataDir}/player-name ${cfg.extraArguments}";
+        StateDirectory = builtins.baseNameOf dataDir;
+        SupplementaryGroups = "audio";
       };
     };
 
-    users.users.squeezelite= {
-      inherit uid;
-      group = "nogroup";
-      extraGroups = [ "audio" ];
-      description = "Squeezelite user";
-      home = "${cfg.dataDir}";
-    };
-
   };
 
 }
diff --git a/nixos/modules/services/cluster/kubernetes/addon-manager.nix b/nixos/modules/services/cluster/kubernetes/addon-manager.nix
new file mode 100644
index 00000000000..17f2dde31a7
--- /dev/null
+++ b/nixos/modules/services/cluster/kubernetes/addon-manager.nix
@@ -0,0 +1,167 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  top = config.services.kubernetes;
+  cfg = top.addonManager;
+
+  isRBACEnabled = elem "RBAC" top.apiserver.authorizationMode;
+
+  addons = pkgs.runCommand "kubernetes-addons" { } ''
+    mkdir -p $out
+    # since we are mounting the addons to the addon manager, they need to be copied
+    ${concatMapStringsSep ";" (a: "cp -v ${a}/* $out/") (mapAttrsToList (name: addon:
+      pkgs.writeTextDir "${name}.json" (builtins.toJSON addon)
+    ) (cfg.addons))}
+  '';
+in
+{
+  ###### interface
+  options.services.kubernetes.addonManager = with lib.types; {
+
+    bootstrapAddons = mkOption {
+      description = ''
+        Bootstrap addons are like regular addons, but they are applied with cluster-admin rigths.
+        They are applied at addon-manager startup only.
+      '';
+      default = { };
+      type = attrsOf attrs;
+      example = literalExample ''
+        {
+          "my-service" = {
+            "apiVersion" = "v1";
+            "kind" = "Service";
+            "metadata" = {
+              "name" = "my-service";
+              "namespace" = "default";
+            };
+            "spec" = { ... };
+          };
+        }
+      '';
+    };
+
+    addons = mkOption {
+      description = "Kubernetes addons (any kind of Kubernetes resource can be an addon).";
+      default = { };
+      type = attrsOf (either attrs (listOf attrs));
+      example = literalExample ''
+        {
+          "my-service" = {
+            "apiVersion" = "v1";
+            "kind" = "Service";
+            "metadata" = {
+              "name" = "my-service";
+              "namespace" = "default";
+            };
+            "spec" = { ... };
+          };
+        }
+        // import <nixpkgs/nixos/modules/services/cluster/kubernetes/dashboard.nix> { cfg = config.services.kubernetes; };
+      '';
+    };
+
+    enable = mkEnableOption "Whether to enable Kubernetes addon manager.";
+  };
+
+  ###### implementation
+  config = mkIf cfg.enable {
+    environment.etc."kubernetes/addons".source = "${addons}/";
+
+    systemd.services.kube-addon-manager = {
+      description = "Kubernetes addon manager";
+      wantedBy = [ "kubernetes.target" ];
+      after = [ "kube-apiserver.service" ];
+      environment.ADDON_PATH = "/etc/kubernetes/addons/";
+      path = [ pkgs.gawk ];
+      serviceConfig = {
+        Slice = "kubernetes.slice";
+        ExecStart = "${top.package}/bin/kube-addons";
+        WorkingDirectory = top.dataDir;
+        User = "kubernetes";
+        Group = "kubernetes";
+        Restart = "on-failure";
+        RestartSec = 10;
+      };
+    };
+
+    services.kubernetes.addonManager.bootstrapAddons = mkIf isRBACEnabled
+    (let
+      name = system:kube-addon-manager;
+      namespace = "kube-system";
+    in
+    {
+
+      kube-addon-manager-r = {
+        apiVersion = "rbac.authorization.k8s.io/v1";
+        kind = "Role";
+        metadata = {
+          inherit name namespace;
+        };
+        rules = [{
+          apiGroups = ["*"];
+          resources = ["*"];
+          verbs = ["*"];
+        }];
+      };
+
+      kube-addon-manager-rb = {
+        apiVersion = "rbac.authorization.k8s.io/v1";
+        kind = "RoleBinding";
+        metadata = {
+          inherit name namespace;
+        };
+        roleRef = {
+          apiGroup = "rbac.authorization.k8s.io";
+          kind = "Role";
+          inherit name;
+        };
+        subjects = [{
+          apiGroup = "rbac.authorization.k8s.io";
+          kind = "User";
+          inherit name;
+        }];
+      };
+
+      kube-addon-manager-cluster-lister-cr = {
+        apiVersion = "rbac.authorization.k8s.io/v1";
+        kind = "ClusterRole";
+        metadata = {
+          name = "${name}:cluster-lister";
+        };
+        rules = [{
+          apiGroups = ["*"];
+          resources = ["*"];
+          verbs = ["list"];
+        }];
+      };
+
+      kube-addon-manager-cluster-lister-crb = {
+        apiVersion = "rbac.authorization.k8s.io/v1";
+        kind = "ClusterRoleBinding";
+        metadata = {
+          name = "${name}:cluster-lister";
+        };
+        roleRef = {
+          apiGroup = "rbac.authorization.k8s.io";
+          kind = "ClusterRole";
+          name = "${name}:cluster-lister";
+        };
+        subjects = [{
+          kind = "User";
+          inherit name;
+        }];
+      };
+    });
+
+    services.kubernetes.pki.certs = {
+      addonManager = top.lib.mkCert {
+        name = "kube-addon-manager";
+        CN = "system:kube-addon-manager";
+        action = "systemctl restart kube-addon-manager.service";
+      };
+    };
+  };
+
+}
diff --git a/nixos/modules/services/cluster/kubernetes/dashboard.nix b/nixos/modules/services/cluster/kubernetes/addons/dashboard.nix
index cbd6e8f7bf7..454e7d35bc0 100644
--- a/nixos/modules/services/cluster/kubernetes/dashboard.nix
+++ b/nixos/modules/services/cluster/kubernetes/addons/dashboard.nix
@@ -8,6 +8,13 @@ in {
   options.services.kubernetes.addons.dashboard = {
     enable = mkEnableOption "kubernetes dashboard addon";
 
+    extraArgs = mkOption {
+      description = "Extra arguments to append to the dashboard cmdline";
+      type = types.listOf types.str;
+      default = [];
+      example = ["--enable-skip-login"];
+    };
+
     rbac = mkOption {
       description = "Role-based access control (RBAC) options";
       default = {};
@@ -31,7 +38,7 @@ in {
     version = mkOption {
       description = "Which version of the kubernetes dashboard to deploy";
       type = types.str;
-      default = "v1.8.3";
+      default = "v1.10.1";
     };
 
     image = mkOption {
@@ -39,9 +46,9 @@ in {
       type = types.attrs;
       default = {
         imageName = "k8s.gcr.io/kubernetes-dashboard-amd64";
-        imageDigest = "sha256:dc4026c1b595435ef5527ca598e1e9c4343076926d7d62b365c44831395adbd0";
+        imageDigest = "sha256:0ae6b69432e78069c5ce2bcde0fe409c5c4d6f0f4d9cd50a17974fea38898747";
         finalImageTag = cfg.version;
-        sha256 = "18ajcg0q1vignfjk2sm4xj4wzphfz8wah69ps8dklqfvv0164mc8";
+        sha256 = "01xrr4pwgr2hcjrjsi3d14ifpzdfbxzqpzxbk2fkbjb9zkv38zxy";
       };
     };
   };
@@ -99,7 +106,7 @@ in {
                     memory = "100Mi";
                   };
                 };
-                args = ["--auto-generate-certificates"];
+                args = ["--auto-generate-certificates"] ++ cfg.extraArgs;
                 volumeMounts = [{
                   name = "tmp-volume";
                   mountPath = "/tmp";
diff --git a/nixos/modules/services/cluster/kubernetes/dns.nix b/nixos/modules/services/cluster/kubernetes/addons/dns.nix
index 5a3e281ea69..8f3234bfc70 100644
--- a/nixos/modules/services/cluster/kubernetes/dns.nix
+++ b/nixos/modules/services/cluster/kubernetes/addons/dns.nix
@@ -3,7 +3,7 @@
 with lib;
 
 let
-  version = "1.2.5";
+  version = "1.3.1";
   cfg = config.services.kubernetes.addons.dns;
   ports = {
     dns = 10053;
@@ -43,9 +43,9 @@ in {
       type = types.attrs;
       default = {
         imageName = "coredns/coredns";
-        imageDigest = "sha256:33c8da20b887ae12433ec5c40bfddefbbfa233d5ce11fb067122e68af30291d6";
+        imageDigest = "sha256:02382353821b12c21b062c59184e227e001079bb13ebd01f9d3270ba0fcbf1e4";
         finalImageTag = version;
-        sha256 = "13q19rgwapv27xcs664dw502254yw4zw63insf6g2danidv2mg6i";
+        sha256 = "0vbylgyxv2jm2mnzk6f28jbsj305zsxmx3jr6ngjq461czcl5fi5";
       };
     };
   };
@@ -54,21 +54,7 @@ in {
     services.kubernetes.kubelet.seedDockerImages =
       singleton (pkgs.dockerTools.pullImage cfg.coredns);
 
-    services.kubernetes.addonManager.addons = {
-      coredns-sa = {
-        apiVersion = "v1";
-        kind = "ServiceAccount";
-        metadata = {
-          labels = {
-            "addonmanager.kubernetes.io/mode" = "Reconcile";
-            "k8s-app" = "kube-dns";
-            "kubernetes.io/cluster-service" = "true";
-          };
-          name = "coredns";
-          namespace = "kube-system";
-        };
-      };
-
+    services.kubernetes.addonManager.bootstrapAddons = {
       coredns-cr = {
         apiVersion = "rbac.authorization.k8s.io/v1beta1";
         kind = "ClusterRole";
@@ -123,6 +109,22 @@ in {
           }
         ];
       };
+    };
+
+    services.kubernetes.addonManager.addons = {
+      coredns-sa = {
+        apiVersion = "v1";
+        kind = "ServiceAccount";
+        metadata = {
+          labels = {
+            "addonmanager.kubernetes.io/mode" = "Reconcile";
+            "k8s-app" = "kube-dns";
+            "kubernetes.io/cluster-service" = "true";
+          };
+          name = "coredns";
+          namespace = "kube-system";
+        };
+      };
 
       coredns-cm = {
         apiVersion = "v1";
diff --git a/nixos/modules/services/cluster/kubernetes/apiserver.nix b/nixos/modules/services/cluster/kubernetes/apiserver.nix
new file mode 100644
index 00000000000..81e45b417de
--- /dev/null
+++ b/nixos/modules/services/cluster/kubernetes/apiserver.nix
@@ -0,0 +1,428 @@
+  { config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  top = config.services.kubernetes;
+  cfg = top.apiserver;
+
+  isRBACEnabled = elem "RBAC" cfg.authorizationMode;
+
+  apiserverServiceIP = (concatStringsSep "." (
+    take 3 (splitString "." cfg.serviceClusterIpRange
+  )) + ".1");
+in
+{
+  ###### interface
+  options.services.kubernetes.apiserver = with lib.types; {
+
+    advertiseAddress = mkOption {
+      description = ''
+        Kubernetes apiserver IP address on which to advertise the apiserver
+        to members of the cluster. This address must be reachable by the rest
+        of the cluster.
+      '';
+      default = null;
+      type = nullOr str;
+    };
+
+    allowPrivileged = mkOption {
+      description = "Whether to allow privileged containers on Kubernetes.";
+      default = false;
+      type = bool;
+    };
+
+    authorizationMode = mkOption {
+      description = ''
+        Kubernetes apiserver authorization mode (AlwaysAllow/AlwaysDeny/ABAC/Webhook/RBAC/Node). See
+        <link xlink:href="https://kubernetes.io/docs/reference/access-authn-authz/authorization/"/>
+      '';
+      default = ["RBAC" "Node"]; # Enabling RBAC by default, although kubernetes default is AllowAllow
+      type = listOf (enum ["AlwaysAllow" "AlwaysDeny" "ABAC" "Webhook" "RBAC" "Node"]);
+    };
+
+    authorizationPolicy = mkOption {
+      description = ''
+        Kubernetes apiserver authorization policy file. See
+        <link xlink:href="https://kubernetes.io/docs/reference/access-authn-authz/authorization/"/>
+      '';
+      default = [];
+      type = listOf attrs;
+    };
+
+    basicAuthFile = mkOption {
+      description = ''
+        Kubernetes apiserver basic authentication file. See
+        <link xlink:href="https://kubernetes.io/docs/reference/access-authn-authz/authentication"/>
+      '';
+      default = null;
+      type = nullOr path;
+    };
+
+    bindAddress = mkOption {
+      description = ''
+        The IP address on which to listen for the --secure-port port.
+        The associated interface(s) must be reachable by the rest
+        of the cluster, and by CLI/web clients.
+      '';
+      default = "0.0.0.0";
+      type = str;
+    };
+
+    clientCaFile = mkOption {
+      description = "Kubernetes apiserver CA file for client auth.";
+      default = top.caFile;
+      type = nullOr path;
+    };
+
+    disableAdmissionPlugins = mkOption {
+      description = ''
+        Kubernetes admission control plugins to disable. See
+        <link xlink:href="https://kubernetes.io/docs/admin/admission-controllers/"/>
+      '';
+      default = [];
+      type = listOf str;
+    };
+
+    enable = mkEnableOption "Kubernetes apiserver";
+
+    enableAdmissionPlugins = mkOption {
+      description = ''
+        Kubernetes admission control plugins to enable. See
+        <link xlink:href="https://kubernetes.io/docs/admin/admission-controllers/"/>
+      '';
+      default = [
+        "NamespaceLifecycle" "LimitRanger" "ServiceAccount"
+        "ResourceQuota" "DefaultStorageClass" "DefaultTolerationSeconds"
+        "NodeRestriction"
+      ];
+      example = [
+        "NamespaceLifecycle" "NamespaceExists" "LimitRanger"
+        "SecurityContextDeny" "ServiceAccount" "ResourceQuota"
+        "PodSecurityPolicy" "NodeRestriction" "DefaultStorageClass"
+      ];
+      type = listOf str;
+    };
+
+    etcd = {
+      servers = mkOption {
+        description = "List of etcd servers.";
+        default = ["http://127.0.0.1:2379"];
+        type = types.listOf types.str;
+      };
+
+      keyFile = mkOption {
+        description = "Etcd key file.";
+        default = null;
+        type = types.nullOr types.path;
+      };
+
+      certFile = mkOption {
+        description = "Etcd cert file.";
+        default = null;
+        type = types.nullOr types.path;
+      };
+
+      caFile = mkOption {
+        description = "Etcd ca file.";
+        default = top.caFile;
+        type = types.nullOr types.path;
+      };
+    };
+
+    extraOpts = mkOption {
+      description = "Kubernetes apiserver extra command line options.";
+      default = "";
+      type = str;
+    };
+
+    extraSANs = mkOption {
+      description = "Extra x509 Subject Alternative Names to be added to the kubernetes apiserver tls cert.";
+      default = [];
+      type = listOf str;
+    };
+
+    featureGates = mkOption {
+      description = "List set of feature gates";
+      default = top.featureGates;
+      type = listOf str;
+    };
+
+    insecureBindAddress = mkOption {
+      description = "The IP address on which to serve the --insecure-port.";
+      default = "127.0.0.1";
+      type = str;
+    };
+
+    insecurePort = mkOption {
+      description = "Kubernetes apiserver insecure listening port. (0 = disabled)";
+      default = 0;
+      type = int;
+    };
+
+    kubeletClientCaFile = mkOption {
+      description = "Path to a cert file for connecting to kubelet.";
+      default = top.caFile;
+      type = nullOr path;
+    };
+
+    kubeletClientCertFile = mkOption {
+      description = "Client certificate to use for connections to kubelet.";
+      default = null;
+      type = nullOr path;
+    };
+
+    kubeletClientKeyFile = mkOption {
+      description = "Key to use for connections to kubelet.";
+      default = null;
+      type = nullOr path;
+    };
+
+    kubeletHttps = mkOption {
+      description = "Whether to use https for connections to kubelet.";
+      default = true;
+      type = bool;
+    };
+
+    runtimeConfig = mkOption {
+      description = ''
+        Api runtime configuration. See
+        <link xlink:href="https://kubernetes.io/docs/tasks/administer-cluster/cluster-management/"/>
+      '';
+      default = "authentication.k8s.io/v1beta1=true";
+      example = "api/all=false,api/v1=true";
+      type = str;
+    };
+
+    storageBackend = mkOption {
+      description = ''
+        Kubernetes apiserver storage backend.
+      '';
+      default = "etcd3";
+      type = enum ["etcd2" "etcd3"];
+    };
+
+    securePort = mkOption {
+      description = "Kubernetes apiserver secure port.";
+      default = 6443;
+      type = int;
+    };
+
+    serviceAccountKeyFile = mkOption {
+      description = ''
+        Kubernetes apiserver PEM-encoded x509 RSA private or public key file,
+        used to verify ServiceAccount tokens. By default tls private key file
+        is used.
+      '';
+      default = null;
+      type = nullOr path;
+    };
+
+    serviceClusterIpRange = mkOption {
+      description = ''
+        A CIDR notation IP range from which to assign service cluster IPs.
+        This must not overlap with any IP ranges assigned to nodes for pods.
+      '';
+      default = "10.0.0.0/24";
+      type = str;
+    };
+
+    tlsCertFile = mkOption {
+      description = "Kubernetes apiserver certificate file.";
+      default = null;
+      type = nullOr path;
+    };
+
+    tlsKeyFile = mkOption {
+      description = "Kubernetes apiserver private key file.";
+      default = null;
+      type = nullOr path;
+    };
+
+    tokenAuthFile = mkOption {
+      description = ''
+        Kubernetes apiserver token authentication file. See
+        <link xlink:href="https://kubernetes.io/docs/reference/access-authn-authz/authentication"/>
+      '';
+      default = null;
+      type = nullOr path;
+    };
+
+    verbosity = mkOption {
+      description = ''
+        Optional glog verbosity level for logging statements. See
+        <link xlink:href="https://github.com/kubernetes/community/blob/master/contributors/devel/logging.md"/>
+      '';
+      default = null;
+      type = nullOr int;
+    };
+
+    webhookConfig = mkOption {
+      description = ''
+        Kubernetes apiserver Webhook config file. It uses the kubeconfig file format.
+        See <link xlink:href="https://kubernetes.io/docs/reference/access-authn-authz/webhook/"/>
+      '';
+      default = null;
+      type = nullOr path;
+    };
+
+  };
+
+
+  ###### implementation
+  config = mkMerge [
+
+    (mkIf cfg.enable {
+        systemd.services.kube-apiserver = {
+          description = "Kubernetes APIServer Service";
+          wantedBy = [ "kubernetes.target" ];
+          after = [ "network.target" ];
+          serviceConfig = {
+            Slice = "kubernetes.slice";
+            ExecStart = ''${top.package}/bin/kube-apiserver \
+              --allow-privileged=${boolToString cfg.allowPrivileged} \
+              --authorization-mode=${concatStringsSep "," cfg.authorizationMode} \
+                ${optionalString (elem "ABAC" cfg.authorizationMode)
+                  "--authorization-policy-file=${
+                    pkgs.writeText "kube-auth-policy.jsonl"
+                    (concatMapStringsSep "\n" (l: builtins.toJSON l) cfg.authorizationPolicy)
+                  }"
+                } \
+                ${optionalString (elem "Webhook" cfg.authorizationMode)
+                  "--authorization-webhook-config-file=${cfg.webhookConfig}"
+                } \
+              --bind-address=${cfg.bindAddress} \
+              ${optionalString (cfg.advertiseAddress != null)
+                "--advertise-address=${cfg.advertiseAddress}"} \
+              ${optionalString (cfg.clientCaFile != null)
+                "--client-ca-file=${cfg.clientCaFile}"} \
+              --disable-admission-plugins=${concatStringsSep "," cfg.disableAdmissionPlugins} \
+              --enable-admission-plugins=${concatStringsSep "," cfg.enableAdmissionPlugins} \
+              --etcd-servers=${concatStringsSep "," cfg.etcd.servers} \
+              ${optionalString (cfg.etcd.caFile != null)
+                "--etcd-cafile=${cfg.etcd.caFile}"} \
+              ${optionalString (cfg.etcd.certFile != null)
+                "--etcd-certfile=${cfg.etcd.certFile}"} \
+              ${optionalString (cfg.etcd.keyFile != null)
+                "--etcd-keyfile=${cfg.etcd.keyFile}"} \
+              ${optionalString (cfg.featureGates != [])
+                "--feature-gates=${concatMapStringsSep "," (feature: "${feature}=true") cfg.featureGates}"} \
+              ${optionalString (cfg.basicAuthFile != null)
+                "--basic-auth-file=${cfg.basicAuthFile}"} \
+              --kubelet-https=${boolToString cfg.kubeletHttps} \
+              ${optionalString (cfg.kubeletClientCaFile != null)
+                "--kubelet-certificate-authority=${cfg.kubeletClientCaFile}"} \
+              ${optionalString (cfg.kubeletClientCertFile != null)
+                "--kubelet-client-certificate=${cfg.kubeletClientCertFile}"} \
+              ${optionalString (cfg.kubeletClientKeyFile != null)
+                "--kubelet-client-key=${cfg.kubeletClientKeyFile}"} \
+              --insecure-bind-address=${cfg.insecureBindAddress} \
+              --insecure-port=${toString cfg.insecurePort} \
+              ${optionalString (cfg.runtimeConfig != "")
+                "--runtime-config=${cfg.runtimeConfig}"} \
+              --secure-port=${toString cfg.securePort} \
+              ${optionalString (cfg.serviceAccountKeyFile!=null)
+                "--service-account-key-file=${cfg.serviceAccountKeyFile}"} \
+              --service-cluster-ip-range=${cfg.serviceClusterIpRange} \
+              --storage-backend=${cfg.storageBackend} \
+              ${optionalString (cfg.tlsCertFile != null)
+                "--tls-cert-file=${cfg.tlsCertFile}"} \
+              ${optionalString (cfg.tlsKeyFile != null)
+                "--tls-private-key-file=${cfg.tlsKeyFile}"} \
+              ${optionalString (cfg.tokenAuthFile != null)
+                "--token-auth-file=${cfg.tokenAuthFile}"} \
+              ${optionalString (cfg.verbosity != null) "--v=${toString cfg.verbosity}"} \
+              ${cfg.extraOpts}
+            '';
+            WorkingDirectory = top.dataDir;
+            User = "kubernetes";
+            Group = "kubernetes";
+            AmbientCapabilities = "cap_net_bind_service";
+            Restart = "on-failure";
+            RestartSec = 5;
+          };
+        };
+
+        services.etcd = {
+          clientCertAuth = mkDefault true;
+          peerClientCertAuth = mkDefault true;
+          listenClientUrls = mkDefault ["https://0.0.0.0:2379"];
+          listenPeerUrls = mkDefault ["https://0.0.0.0:2380"];
+          advertiseClientUrls = mkDefault ["https://${top.masterAddress}:2379"];
+          initialCluster = mkDefault ["${top.masterAddress}=https://${top.masterAddress}:2380"];
+          name = top.masterAddress;
+          initialAdvertisePeerUrls = mkDefault ["https://${top.masterAddress}:2380"];
+        };
+
+        services.kubernetes.addonManager.bootstrapAddons = mkIf isRBACEnabled {
+
+          apiserver-kubelet-api-admin-crb = {
+            apiVersion = "rbac.authorization.k8s.io/v1";
+            kind = "ClusterRoleBinding";
+            metadata = {
+              name = "system:kube-apiserver:kubelet-api-admin";
+            };
+            roleRef = {
+              apiGroup = "rbac.authorization.k8s.io";
+              kind = "ClusterRole";
+              name = "system:kubelet-api-admin";
+            };
+            subjects = [{
+              kind = "User";
+              name = "system:kube-apiserver";
+            }];
+          };
+
+        };
+
+      services.kubernetes.pki.certs = with top.lib; {
+        apiServer = mkCert {
+          name = "kube-apiserver";
+          CN = "kubernetes";
+          hosts = [
+                    "kubernetes.default.svc"
+                    "kubernetes.default.svc.${top.addons.dns.clusterDomain}"
+                    cfg.advertiseAddress
+                    top.masterAddress
+                    apiserverServiceIP
+                    "127.0.0.1"
+                  ] ++ cfg.extraSANs;
+          action = "systemctl restart kube-apiserver.service";
+        };
+        apiserverKubeletClient = mkCert {
+          name = "kube-apiserver-kubelet-client";
+          CN = "system:kube-apiserver";
+          action = "systemctl restart kube-apiserver.service";
+        };
+        apiserverEtcdClient = mkCert {
+          name = "kube-apiserver-etcd-client";
+          CN = "etcd-client";
+          action = "systemctl restart kube-apiserver.service";
+        };
+        clusterAdmin = mkCert {
+          name = "cluster-admin";
+          CN = "cluster-admin";
+          fields = {
+            O = "system:masters";
+          };
+          privateKeyOwner = "root";
+        };
+        etcd = mkCert {
+          name = "etcd";
+          CN = top.masterAddress;
+          hosts = [
+                    "etcd.local"
+                    "etcd.${top.addons.dns.clusterDomain}"
+                    top.masterAddress
+                    cfg.advertiseAddress
+                  ];
+          privateKeyOwner = "etcd";
+          action = "systemctl restart etcd.service";
+        };
+      };
+
+    })
+
+  ];
+
+}
diff --git a/nixos/modules/services/cluster/kubernetes/controller-manager.nix b/nixos/modules/services/cluster/kubernetes/controller-manager.nix
new file mode 100644
index 00000000000..dff97f144d5
--- /dev/null
+++ b/nixos/modules/services/cluster/kubernetes/controller-manager.nix
@@ -0,0 +1,162 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  top = config.services.kubernetes;
+  cfg = top.controllerManager;
+in
+{
+  ###### interface
+  options.services.kubernetes.controllerManager = with lib.types; {
+
+    allocateNodeCIDRs = mkOption {
+      description = "Whether to automatically allocate CIDR ranges for cluster nodes.";
+      default = true;
+      type = bool;
+    };
+
+    bindAddress = mkOption {
+      description = "Kubernetes controller manager listening address.";
+      default = "127.0.0.1";
+      type = str;
+    };
+
+    clusterCidr = mkOption {
+      description = "Kubernetes CIDR Range for Pods in cluster.";
+      default = top.clusterCidr;
+      type = str;
+    };
+
+    enable = mkEnableOption "Kubernetes controller manager.";
+
+    extraOpts = mkOption {
+      description = "Kubernetes controller manager extra command line options.";
+      default = "";
+      type = str;
+    };
+
+    featureGates = mkOption {
+      description = "List set of feature gates";
+      default = top.featureGates;
+      type = listOf str;
+    };
+
+    insecurePort = mkOption {
+      description = "Kubernetes controller manager insecure listening port.";
+      default = 0;
+      type = int;
+    };
+
+    kubeconfig = top.lib.mkKubeConfigOptions "Kubernetes controller manager";
+
+    leaderElect = mkOption {
+      description = "Whether to start leader election before executing main loop.";
+      type = bool;
+      default = true;
+    };
+
+    rootCaFile = mkOption {
+      description = ''
+        Kubernetes controller manager certificate authority file included in
+        service account's token secret.
+      '';
+      default = top.caFile;
+      type = nullOr path;
+    };
+
+    securePort = mkOption {
+      description = "Kubernetes controller manager secure listening port.";
+      default = 10252;
+      type = int;
+    };
+
+    serviceAccountKeyFile = mkOption {
+      description = ''
+        Kubernetes controller manager PEM-encoded private RSA key file used to
+        sign service account tokens
+      '';
+      default = null;
+      type = nullOr path;
+    };
+
+    tlsCertFile = mkOption {
+      description = "Kubernetes controller-manager certificate file.";
+      default = null;
+      type = nullOr path;
+    };
+
+    tlsKeyFile = mkOption {
+      description = "Kubernetes controller-manager private key file.";
+      default = null;
+      type = nullOr path;
+    };
+
+    verbosity = mkOption {
+      description = ''
+        Optional glog verbosity level for logging statements. See
+        <link xlink:href="https://github.com/kubernetes/community/blob/master/contributors/devel/logging.md"/>
+      '';
+      default = null;
+      type = nullOr int;
+    };
+
+  };
+
+  ###### implementation
+  config = mkIf cfg.enable {
+    systemd.services.kube-controller-manager = {
+      description = "Kubernetes Controller Manager Service";
+      wantedBy = [ "kubernetes.target" ];
+      after = [ "kube-apiserver.service" ];
+      serviceConfig = {
+        RestartSec = "30s";
+        Restart = "on-failure";
+        Slice = "kubernetes.slice";
+        ExecStart = ''${top.package}/bin/kube-controller-manager \
+          --allocate-node-cidrs=${boolToString cfg.allocateNodeCIDRs} \
+          --bind-address=${cfg.bindAddress} \
+          ${optionalString (cfg.clusterCidr!=null)
+            "--cluster-cidr=${cfg.clusterCidr}"} \
+          ${optionalString (cfg.featureGates != [])
+            "--feature-gates=${concatMapStringsSep "," (feature: "${feature}=true") cfg.featureGates}"} \
+          --kubeconfig=${top.lib.mkKubeConfig "kube-controller-manager" cfg.kubeconfig} \
+          --leader-elect=${boolToString cfg.leaderElect} \
+          ${optionalString (cfg.rootCaFile!=null)
+            "--root-ca-file=${cfg.rootCaFile}"} \
+          --port=${toString cfg.insecurePort} \
+          --secure-port=${toString cfg.securePort} \
+          ${optionalString (cfg.serviceAccountKeyFile!=null)
+            "--service-account-private-key-file=${cfg.serviceAccountKeyFile}"} \
+          ${optionalString (cfg.tlsCertFile!=null)
+            "--tls-cert-file=${cfg.tlsCertFile}"} \
+          ${optionalString (cfg.tlsKeyFile!=null)
+            "--tls-key-file=${cfg.tlsKeyFile}"} \
+          ${optionalString (elem "RBAC" top.apiserver.authorizationMode)
+            "--use-service-account-credentials"} \
+          ${optionalString (cfg.verbosity != null) "--v=${toString cfg.verbosity}"} \
+          ${cfg.extraOpts}
+        '';
+        WorkingDirectory = top.dataDir;
+        User = "kubernetes";
+        Group = "kubernetes";
+      };
+      path = top.path;
+    };
+
+    services.kubernetes.pki.certs = with top.lib; {
+      controllerManager = mkCert {
+        name = "kube-controller-manager";
+        CN = "kube-controller-manager";
+        action = "systemctl restart kube-controller-manager.service";
+      };
+      controllerManagerClient = mkCert {
+        name = "kube-controller-manager-client";
+        CN = "system:kube-controller-manager";
+        action = "systemctl restart kube-controller-manager.service";
+      };
+    };
+
+    services.kubernetes.controllerManager.kubeconfig.server = mkDefault top.apiserverAddress;
+  };
+}
diff --git a/nixos/modules/services/cluster/kubernetes/default.nix b/nixos/modules/services/cluster/kubernetes/default.nix
index 6f3c45b29bf..375e33e91b5 100644
--- a/nixos/modules/services/cluster/kubernetes/default.nix
+++ b/nixos/modules/services/cluster/kubernetes/default.nix
@@ -5,74 +5,52 @@ with lib;
 let
   cfg = config.services.kubernetes;
 
-  # YAML config; see:
-  #   https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/
-  #   https://github.com/kubernetes/kubernetes/blob/release-1.10/pkg/kubelet/apis/kubeletconfig/v1beta1/types.go
-  #
-  # TODO: migrate the following flags to this config file
-  #
-  #   --pod-manifest-path
-  #   --address
-  #   --port
-  #   --tls-cert-file
-  #   --tls-private-key-file
-  #   --client-ca-file
-  #   --authentication-token-webhook
-  #   --authentication-token-webhook-cache-ttl
-  #   --authorization-mode
-  #   --healthz-bind-address
-  #   --healthz-port
-  #   --allow-privileged
-  #   --cluster-dns
-  #   --cluster-domain
-  #   --hairpin-mode
-  #   --feature-gates
-  kubeletConfig = pkgs.runCommand "kubelet-config.yaml" { } ''
-    echo > $out ${pkgs.lib.escapeShellArg (builtins.toJSON {
-      kind = "KubeletConfiguration";
-      apiVersion = "kubelet.config.k8s.io/v1beta1";
-      ${if cfg.kubelet.applyManifests then "staticPodPath" else null} =
-        manifests;
-    })}
-  '';
-
-  infraContainer = pkgs.dockerTools.buildImage {
-    name = "pause";
-    tag = "latest";
-    contents = cfg.package.pause;
-    config.Cmd = "/bin/pause";
-  };
-
-  mkKubeConfig = name: cfg: pkgs.writeText "${name}-kubeconfig" (builtins.toJSON {
+  mkKubeConfig = name: conf: pkgs.writeText "${name}-kubeconfig" (builtins.toJSON {
     apiVersion = "v1";
     kind = "Config";
     clusters = [{
       name = "local";
       cluster.certificate-authority = cfg.caFile;
-      cluster.server = cfg.server;
+      cluster.server = conf.server;
     }];
     users = [{
-      name = "kubelet";
+      inherit name;
       user = {
-        client-certificate = cfg.certFile;
-        client-key = cfg.keyFile;
+        client-certificate = conf.certFile;
+        client-key = conf.keyFile;
       };
     }];
     contexts = [{
       context = {
         cluster = "local";
-        user = "kubelet";
+        user = name;
       };
-      current-context = "kubelet-context";
+      current-context = "local";
     }];
   });
 
+  caCert = secret "ca";
+
+  etcdEndpoints = ["https://${cfg.masterAddress}:2379"];
+
+  mkCert = { name, CN, hosts ? [], fields ? {}, action ? "",
+             privateKeyOwner ? "kubernetes" }: rec {
+    inherit name caCert CN hosts fields action;
+    cert = secret name;
+    key = secret "${name}-key";
+    privateKeyOptions = {
+      owner = privateKeyOwner;
+      group = "nogroup";
+      mode = "0600";
+      path = key;
+    };
+  };
+
+  secret = name: "${cfg.secretsPath}/${name}.pem";
+
   mkKubeConfigOptions = prefix: {
     server = mkOption {
       description = "${prefix} kube-apiserver server address.";
-      default = "http://${if cfg.apiserver.advertiseAddress != null
-                          then cfg.apiserver.advertiseAddress
-                          else "127.0.0.1"}:${toString cfg.apiserver.port}";
       type = types.str;
     };
 
@@ -101,66 +79,6 @@ let
     certFile = mkDefault cfg.kubeconfig.certFile;
     keyFile = mkDefault cfg.kubeconfig.keyFile;
   };
-
-  cniConfig =
-    if cfg.kubelet.cni.config != [] && !(isNull cfg.kubelet.cni.configDir) then
-      throw "Verbatim CNI-config and CNI configDir cannot both be set."
-    else if !(isNull cfg.kubelet.cni.configDir) then
-      cfg.kubelet.cni.configDir
-    else
-      (pkgs.buildEnv {
-        name = "kubernetes-cni-config";
-        paths = imap (i: entry:
-          pkgs.writeTextDir "${toString (10+i)}-${entry.type}.conf" (builtins.toJSON entry)
-        ) cfg.kubelet.cni.config;
-      });
-
-  manifests = pkgs.buildEnv {
-    name = "kubernetes-manifests";
-    paths = mapAttrsToList (name: manifest:
-      pkgs.writeTextDir "${name}.json" (builtins.toJSON manifest)
-    ) cfg.kubelet.manifests;
-  };
-
-  addons = pkgs.runCommand "kubernetes-addons" { } ''
-    mkdir -p $out
-    # since we are mounting the addons to the addon manager, they need to be copied
-    ${concatMapStringsSep ";" (a: "cp -v ${a}/* $out/") (mapAttrsToList (name: addon:
-      pkgs.writeTextDir "${name}.json" (builtins.toJSON addon)
-    ) (cfg.addonManager.addons))}
-  '';
-
-  taintOptions = { name, ... }: {
-    options = {
-      key = mkOption {
-        description = "Key of taint.";
-        default = name;
-        type = types.str;
-      };
-      value = mkOption {
-        description = "Value of taint.";
-        type = types.str;
-      };
-      effect = mkOption {
-        description = "Effect of taint.";
-        example = "NoSchedule";
-        type = types.enum ["NoSchedule" "PreferNoSchedule" "NoExecute"];
-      };
-    };
-  };
-
-  taints = concatMapStringsSep "," (v: "${v.key}=${v.value}:${v.effect}") (mapAttrsToList (n: v: v) cfg.kubelet.taints);
-
-  # needed for flannel to pass options to docker
-  mkDockerOpts = pkgs.runCommand "mk-docker-opts" {
-    buildInputs = [ pkgs.makeWrapper ];
-  } ''
-    mkdir -p $out
-    cp ${pkgs.kubernetes.src}/cluster/centos/node/bin/mk-docker-opts.sh $out/mk-docker-opts.sh
-
-    # bashInteractive needed for `compgen`
-    makeWrapper ${pkgs.bashInteractive}/bin/bash $out/mk-docker-opts --add-flags "$out/mk-docker-opts.sh"
-  '';
 in {
 
   ###### interface
@@ -170,8 +88,9 @@ in {
       description = ''
         Kubernetes role that this machine should take.
 
-        Master role will enable etcd, apiserver, scheduler and controller manager
-        services. Node role will enable etcd, docker, kubelet and proxy services.
+        Master role will enable etcd, apiserver, scheduler, controller manager
+        addon manager, flannel and proxy services.
+        Node role will enable flannel, docker, kubelet and proxy services.
       '';
       default = [];
       type = types.listOf (types.enum ["master" "node"]);
@@ -184,40 +103,17 @@ in {
       defaultText = "pkgs.kubernetes";
     };
 
-    verbose = mkOption {
-      description = "Kubernetes enable verbose mode for debugging.";
-      default = false;
-      type = types.bool;
-    };
-
-    etcd = {
-      servers = mkOption {
-        description = "List of etcd servers. By default etcd is started, except if this option is changed.";
-        default = ["http://127.0.0.1:2379"];
-        type = types.listOf types.str;
-      };
-
-      keyFile = mkOption {
-        description = "Etcd key file.";
-        default = null;
-        type = types.nullOr types.path;
-      };
-
-      certFile = mkOption {
-        description = "Etcd cert file.";
-        default = null;
-        type = types.nullOr types.path;
-      };
+    kubeconfig = mkKubeConfigOptions "Default kubeconfig";
 
-      caFile = mkOption {
-        description = "Etcd ca file.";
-        default = cfg.caFile;
-        type = types.nullOr types.path;
-      };
+    apiserverAddress = mkOption {
+      description = ''
+        Clusterwide accessible address for the kubernetes apiserver,
+        including protocol and optional port.
+      '';
+      example = "https://kubernetes-apiserver.example.com:6443";
+      type = types.str;
     };
 
-    kubeconfig = mkKubeConfigOptions "Default kubeconfig";
-
     caFile = mkOption {
       description = "Default kubernetes certificate authority";
       type = types.nullOr types.path;
@@ -230,549 +126,22 @@ in {
       type = types.path;
     };
 
+    easyCerts = mkOption {
+      description = "Automatically setup x509 certificates and keys for the entire cluster.";
+      default = false;
+      type = types.bool;
+    };
+
     featureGates = mkOption {
-      description = "List set of feature gates";
+      description = "List set of feature gates.";
       default = [];
       type = types.listOf types.str;
     };
 
-    apiserver = {
-      enable = mkOption {
-        description = "Whether to enable Kubernetes apiserver.";
-        default = false;
-        type = types.bool;
-      };
-
-      featureGates = mkOption {
-        description = "List set of feature gates";
-        default = cfg.featureGates;
-        type = types.listOf types.str;
-      };
-
-      bindAddress = mkOption {
-        description = ''
-          The IP address on which to listen for the --secure-port port.
-          The associated interface(s) must be reachable by the rest
-          of the cluster, and by CLI/web clients.
-        '';
-        default = "0.0.0.0";
-        type = types.str;
-      };
-
-      advertiseAddress = mkOption {
-        description = ''
-          Kubernetes apiserver IP address on which to advertise the apiserver
-          to members of the cluster. This address must be reachable by the rest
-          of the cluster.
-        '';
-        default = null;
-        type = types.nullOr types.str;
-      };
-
-      storageBackend = mkOption {
-        description = ''
-          Kubernetes apiserver storage backend.
-        '';
-        default = "etcd3";
-        type = types.enum ["etcd2" "etcd3"];
-      };
-
-      port = mkOption {
-        description = "Kubernetes apiserver listening port.";
-        default = 8080;
-        type = types.int;
-      };
-
-      securePort = mkOption {
-        description = "Kubernetes apiserver secure port.";
-        default = 443;
-        type = types.int;
-      };
-
-      tlsCertFile = mkOption {
-        description = "Kubernetes apiserver certificate file.";
-        default = null;
-        type = types.nullOr types.path;
-      };
-
-      tlsKeyFile = mkOption {
-        description = "Kubernetes apiserver private key file.";
-        default = null;
-        type = types.nullOr types.path;
-      };
-
-      clientCaFile = mkOption {
-        description = "Kubernetes apiserver CA file for client auth.";
-        default = cfg.caFile;
-        type = types.nullOr types.path;
-      };
-
-      tokenAuthFile = mkOption {
-        description = ''
-          Kubernetes apiserver token authentication file. See
-          <link xlink:href="https://kubernetes.io/docs/reference/access-authn-authz/authentication"/>
-        '';
-        default = null;
-        type = types.nullOr types.path;
-      };
-
-      basicAuthFile = mkOption {
-        description = ''
-          Kubernetes apiserver basic authentication file. See
-          <link xlink:href="https://kubernetes.io/docs/reference/access-authn-authz/authentication"/>
-        '';
-        default = pkgs.writeText "users" ''
-          kubernetes,admin,0
-        '';
-        type = types.nullOr types.path;
-      };
-
-      authorizationMode = mkOption {
-        description = ''
-          Kubernetes apiserver authorization mode (AlwaysAllow/AlwaysDeny/ABAC/Webhook/RBAC/Node). See
-          <link xlink:href="https://kubernetes.io/docs/reference/access-authn-authz/authorization/"/>
-        '';
-        default = ["RBAC" "Node"];
-        type = types.listOf (types.enum ["AlwaysAllow" "AlwaysDeny" "ABAC" "Webhook" "RBAC" "Node"]);
-      };
-
-      authorizationPolicy = mkOption {
-        description = ''
-          Kubernetes apiserver authorization policy file. See
-          <link xlink:href="https://kubernetes.io/docs/reference/access-authn-authz/authorization/"/>
-        '';
-        default = [];
-        type = types.listOf types.attrs;
-      };
-
-      webhookConfig = mkOption {
-        description = ''
-          Kubernetes apiserver Webhook config file. It uses the kubeconfig file format.
-          See <link xlink:href="https://kubernetes.io/docs/reference/access-authn-authz/webhook/"/>
-        '';
-        default = null;
-        type = types.nullOr types.path;
-      };
-
-      allowPrivileged = mkOption {
-        description = "Whether to allow privileged containers on Kubernetes.";
-        default = true;
-        type = types.bool;
-      };
-
-      serviceClusterIpRange = mkOption {
-        description = ''
-          A CIDR notation IP range from which to assign service cluster IPs.
-          This must not overlap with any IP ranges assigned to nodes for pods.
-        '';
-        default = "10.0.0.0/24";
-        type = types.str;
-      };
-
-      runtimeConfig = mkOption {
-        description = ''
-          Api runtime configuration. See
-          <link xlink:href="https://kubernetes.io/docs/tasks/administer-cluster/cluster-management/"/>
-        '';
-        default = "authentication.k8s.io/v1beta1=true";
-        example = "api/all=false,api/v1=true";
-        type = types.str;
-      };
-
-      enableAdmissionPlugins = mkOption {
-        description = ''
-          Kubernetes admission control plugins to enable. See
-          <link xlink:href="https://kubernetes.io/docs/admin/admission-controllers/"/>
-        '';
-        default = ["NamespaceLifecycle" "LimitRanger" "ServiceAccount" "ResourceQuota" "DefaultStorageClass" "DefaultTolerationSeconds" "NodeRestriction"];
-        example = [
-          "NamespaceLifecycle" "NamespaceExists" "LimitRanger"
-          "SecurityContextDeny" "ServiceAccount" "ResourceQuota"
-          "PodSecurityPolicy" "NodeRestriction" "DefaultStorageClass"
-        ];
-        type = types.listOf types.str;
-      };
-
-      disableAdmissionPlugins = mkOption {
-        description = ''
-          Kubernetes admission control plugins to disable. See
-          <link xlink:href="https://kubernetes.io/docs/admin/admission-controllers/"/>
-        '';
-        default = [];
-        type = types.listOf types.str;
-      };
-
-      serviceAccountKeyFile = mkOption {
-        description = ''
-          Kubernetes apiserver PEM-encoded x509 RSA private or public key file,
-          used to verify ServiceAccount tokens. By default tls private key file
-          is used.
-        '';
-        default = null;
-        type = types.nullOr types.path;
-      };
-
-      kubeletClientCaFile = mkOption {
-        description = "Path to a cert file for connecting to kubelet.";
-        default = cfg.caFile;
-        type = types.nullOr types.path;
-      };
-
-      kubeletClientCertFile = mkOption {
-        description = "Client certificate to use for connections to kubelet.";
-        default = null;
-        type = types.nullOr types.path;
-      };
-
-      kubeletClientKeyFile = mkOption {
-        description = "Key to use for connections to kubelet.";
-        default = null;
-        type = types.nullOr types.path;
-      };
-
-      kubeletHttps = mkOption {
-        description = "Whether to use https for connections to kubelet.";
-        default = true;
-        type = types.bool;
-      };
-
-      extraOpts = mkOption {
-        description = "Kubernetes apiserver extra command line options.";
-        default = "";
-        type = types.str;
-      };
-    };
-
-    scheduler = {
-      enable = mkOption {
-        description = "Whether to enable Kubernetes scheduler.";
-        default = false;
-        type = types.bool;
-      };
-
-      featureGates = mkOption {
-        description = "List set of feature gates";
-        default = cfg.featureGates;
-        type = types.listOf types.str;
-      };
-
-      address = mkOption {
-        description = "Kubernetes scheduler listening address.";
-        default = "127.0.0.1";
-        type = types.str;
-      };
-
-      port = mkOption {
-        description = "Kubernetes scheduler listening port.";
-        default = 10251;
-        type = types.int;
-      };
-
-      leaderElect = mkOption {
-        description = "Whether to start leader election before executing main loop.";
-        type = types.bool;
-        default = true;
-      };
-
-      kubeconfig = mkKubeConfigOptions "Kubernetes scheduler";
-
-      extraOpts = mkOption {
-        description = "Kubernetes scheduler extra command line options.";
-        default = "";
-        type = types.str;
-      };
-    };
-
-    controllerManager = {
-      enable = mkOption {
-        description = "Whether to enable Kubernetes controller manager.";
-        default = false;
-        type = types.bool;
-      };
-
-      featureGates = mkOption {
-        description = "List set of feature gates";
-        default = cfg.featureGates;
-        type = types.listOf types.str;
-      };
-
-      address = mkOption {
-        description = "Kubernetes controller manager listening address.";
-        default = "127.0.0.1";
-        type = types.str;
-      };
-
-      port = mkOption {
-        description = "Kubernetes controller manager listening port.";
-        default = 10252;
-        type = types.int;
-      };
-
-      leaderElect = mkOption {
-        description = "Whether to start leader election before executing main loop.";
-        type = types.bool;
-        default = true;
-      };
-
-      serviceAccountKeyFile = mkOption {
-        description = ''
-          Kubernetes controller manager PEM-encoded private RSA key file used to
-          sign service account tokens
-        '';
-        default = null;
-        type = types.nullOr types.path;
-      };
-
-      rootCaFile = mkOption {
-        description = ''
-          Kubernetes controller manager certificate authority file included in
-          service account's token secret.
-        '';
-        default = cfg.caFile;
-        type = types.nullOr types.path;
-      };
-
-      kubeconfig = mkKubeConfigOptions "Kubernetes controller manager";
-
-      extraOpts = mkOption {
-        description = "Kubernetes controller manager extra command line options.";
-        default = "";
-        type = types.str;
-      };
-    };
-
-    kubelet = {
-      enable = mkOption {
-        description = "Whether to enable Kubernetes kubelet.";
-        default = false;
-        type = types.bool;
-      };
-
-      featureGates = mkOption {
-        description = "List set of feature gates";
-        default = cfg.featureGates;
-        type = types.listOf types.str;
-      };
-
-      seedDockerImages = mkOption {
-        description = "List of docker images to preload on system";
-        default = [];
-        type = types.listOf types.package;
-      };
-
-      registerNode = mkOption {
-        description = "Whether to auto register kubelet with API server.";
-        default = true;
-        type = types.bool;
-      };
-
-      address = mkOption {
-        description = "Kubernetes kubelet info server listening address.";
-        default = "0.0.0.0";
-        type = types.str;
-      };
-
-      port = mkOption {
-        description = "Kubernetes kubelet info server listening port.";
-        default = 10250;
-        type = types.int;
-      };
-
-      tlsCertFile = mkOption {
-        description = "File containing x509 Certificate for HTTPS.";
-        default = null;
-        type = types.nullOr types.path;
-      };
-
-      tlsKeyFile = mkOption {
-        description = "File containing x509 private key matching tlsCertFile.";
-        default = null;
-        type = types.nullOr types.path;
-      };
-
-      clientCaFile = mkOption {
-        description = "Kubernetes apiserver CA file for client authentication.";
-        default = cfg.caFile;
-        type = types.nullOr types.path;
-      };
-
-      healthz = {
-        bind = mkOption {
-          description = "Kubernetes kubelet healthz listening address.";
-          default = "127.0.0.1";
-          type = types.str;
-        };
-
-        port = mkOption {
-          description = "Kubernetes kubelet healthz port.";
-          default = 10248;
-          type = types.int;
-        };
-      };
-
-      hostname = mkOption {
-        description = "Kubernetes kubelet hostname override.";
-        default = config.networking.hostName;
-        type = types.str;
-      };
-
-      allowPrivileged = mkOption {
-        description = "Whether to allow Kubernetes containers to request privileged mode.";
-        default = true;
-        type = types.bool;
-      };
-
-      clusterDns = mkOption {
-        description = "Use alternative DNS.";
-        default = "10.1.0.1";
-        type = types.str;
-      };
-
-      clusterDomain = mkOption {
-        description = "Use alternative domain.";
-        default = config.services.kubernetes.addons.dns.clusterDomain;
-        type = types.str;
-      };
-
-      networkPlugin = mkOption {
-        description = "Network plugin to use by Kubernetes.";
-        type = types.nullOr (types.enum ["cni" "kubenet"]);
-        default = "kubenet";
-      };
-
-      cni = {
-        packages = mkOption {
-          description = "List of network plugin packages to install.";
-          type = types.listOf types.package;
-          default = [];
-        };
-
-        config = mkOption {
-          description = "Kubernetes CNI configuration.";
-          type = types.listOf types.attrs;
-          default = [];
-          example = literalExample ''
-            [{
-              "cniVersion": "0.2.0",
-              "name": "mynet",
-              "type": "bridge",
-              "bridge": "cni0",
-              "isGateway": true,
-              "ipMasq": true,
-              "ipam": {
-                  "type": "host-local",
-                  "subnet": "10.22.0.0/16",
-                  "routes": [
-                      { "dst": "0.0.0.0/0" }
-                  ]
-              }
-            } {
-              "cniVersion": "0.2.0",
-              "type": "loopback"
-            }]
-          '';
-        };
-
-        configDir = mkOption {
-          description = "Path to Kubernetes CNI configuration directory.";
-          type = types.nullOr types.path;
-          default = null;
-        };
-      };
-
-      manifests = mkOption {
-        description = "List of manifests to bootstrap with kubelet (only pods can be created as manifest entry)";
-        type = types.attrsOf types.attrs;
-        default = {};
-      };
-
-      applyManifests = mkOption {
-        description = "Whether to apply manifests (this is true for master node).";
-        default = false;
-        type = types.bool;
-      };
-
-      unschedulable = mkOption {
-        description = "Whether to set node taint to unschedulable=true as it is the case of node that has only master role.";
-        default = false;
-        type = types.bool;
-      };
-
-      taints = mkOption {
-        description = "Node taints (https://kubernetes.io/docs/concepts/configuration/assign-pod-node/).";
-        default = {};
-        type = types.attrsOf (types.submodule [ taintOptions ]);
-      };
-
-      nodeIp = mkOption {
-        description = "IP address of the node. If set, kubelet will use this IP address for the node.";
-        default = null;
-        type = types.nullOr types.str;
-      };
-
-      kubeconfig = mkKubeConfigOptions "Kubelet";
-
-      extraOpts = mkOption {
-        description = "Kubernetes kubelet extra command line options.";
-        default = "";
-        type = types.str;
-      };
-    };
-
-    proxy = {
-      enable = mkOption {
-        description = "Whether to enable Kubernetes proxy.";
-        default = false;
-        type = types.bool;
-      };
-
-      featureGates = mkOption {
-        description = "List set of feature gates";
-        default = cfg.featureGates;
-        type = types.listOf types.str;
-      };
-
-      address = mkOption {
-        description = "Kubernetes proxy listening address.";
-        default = "0.0.0.0";
-        type = types.str;
-      };
-
-      kubeconfig = mkKubeConfigOptions "Kubernetes proxy";
-
-      extraOpts = mkOption {
-        description = "Kubernetes proxy extra command line options.";
-        default = "";
-        type = types.str;
-      };
-    };
-
-    addonManager = {
-      enable = mkOption {
-        description = "Whether to enable Kubernetes addon manager.";
-        default = false;
-        type = types.bool;
-      };
-
-      addons = mkOption {
-        description = "Kubernetes addons (any kind of Kubernetes resource can be an addon).";
-        default = { };
-        type = types.attrsOf (types.either types.attrs (types.listOf types.attrs));
-        example = literalExample ''
-          {
-            "my-service" = {
-              "apiVersion" = "v1";
-              "kind" = "Service";
-              "metadata" = {
-                "name" = "my-service";
-                "namespace" = "default";
-              };
-              "spec" = { ... };
-            };
-          }
-          // import <nixpkgs/nixos/modules/services/cluster/kubernetes/dashboard.nix> { cfg = config.services.kubernetes; };
-        '';
-      };
+    masterAddress = mkOption {
+      description = "Clusterwide available network address or hostname for the kubernetes master server.";
+      example = "master.example.com";
+      type = types.str;
     };
 
     path = mkOption {
@@ -787,304 +156,75 @@ in {
       type = types.nullOr types.str;
     };
 
-    flannel.enable = mkOption {
-      description = "Whether to enable flannel networking";
-      default = false;
-      type = types.bool;
+    lib = mkOption {
+      description = "Common functions for the kubernetes modules.";
+      default = {
+        inherit mkCert;
+        inherit mkKubeConfig;
+        inherit mkKubeConfigOptions;
+      };
+      type = types.attrs;
     };
 
+    secretsPath = mkOption {
+      description = "Default location for kubernetes secrets. Not a store location.";
+      type = types.path;
+      default = cfg.dataDir + "/secrets";
+    };
   };
 
   ###### implementation
 
   config = mkMerge [
-    (mkIf cfg.kubelet.enable {
-      services.kubernetes.kubelet.seedDockerImages = [infraContainer];
-
-      systemd.services.kubelet-bootstrap = {
-        description = "Boostrap Kubelet";
-        wantedBy = ["kubernetes.target"];
-        after = ["docker.service" "network.target"];
-        path = with pkgs; [ docker ];
-        script = ''
-          ${concatMapStrings (img: ''
-            echo "Seeding docker image: ${img}"
-            docker load <${img}
-          '') cfg.kubelet.seedDockerImages}
-
-          rm /opt/cni/bin/* || true
-          ${concatMapStrings (package: ''
-            echo "Linking cni package: ${package}"
-            ln -fs ${package}/bin/* /opt/cni/bin
-          '') cfg.kubelet.cni.packages}
-        '';
-        serviceConfig = {
-          Slice = "kubernetes.slice";
-          Type = "oneshot";
-        };
-      };
 
-      systemd.services.kubelet = {
-        description = "Kubernetes Kubelet Service";
-        wantedBy = [ "kubernetes.target" ];
-        after = [ "network.target" "docker.service" "kube-apiserver.service" "kubelet-bootstrap.service" ];
-        path = with pkgs; [ gitMinimal openssh docker utillinux iproute ethtool thin-provisioning-tools iptables socat ] ++ cfg.path;
-        serviceConfig = {
-          Slice = "kubernetes.slice";
-          CPUAccounting = true;
-          MemoryAccounting = true;
-          ExecStart = ''${cfg.package}/bin/kubelet \
-            ${optionalString (taints != "")
-              "--register-with-taints=${taints}"} \
-            --kubeconfig=${mkKubeConfig "kubelet" cfg.kubelet.kubeconfig} \
-            --config=${kubeletConfig} \
-            --address=${cfg.kubelet.address} \
-            --port=${toString cfg.kubelet.port} \
-            --register-node=${boolToString cfg.kubelet.registerNode} \
-            ${optionalString (cfg.kubelet.tlsCertFile != null)
-              "--tls-cert-file=${cfg.kubelet.tlsCertFile}"} \
-            ${optionalString (cfg.kubelet.tlsKeyFile != null)
-              "--tls-private-key-file=${cfg.kubelet.tlsKeyFile}"} \
-            ${optionalString (cfg.kubelet.clientCaFile != null)
-              "--client-ca-file=${cfg.kubelet.clientCaFile}"} \
-            --authentication-token-webhook \
-            --authentication-token-webhook-cache-ttl="10s" \
-            --authorization-mode=Webhook \
-            --healthz-bind-address=${cfg.kubelet.healthz.bind} \
-            --healthz-port=${toString cfg.kubelet.healthz.port} \
-            --hostname-override=${cfg.kubelet.hostname} \
-            --allow-privileged=${boolToString cfg.kubelet.allowPrivileged} \
-            --root-dir=${cfg.dataDir} \
-            ${optionalString (cfg.kubelet.clusterDns != "")
-              "--cluster-dns=${cfg.kubelet.clusterDns}"} \
-            ${optionalString (cfg.kubelet.clusterDomain != "")
-              "--cluster-domain=${cfg.kubelet.clusterDomain}"} \
-            --pod-infra-container-image=pause \
-            ${optionalString (cfg.kubelet.networkPlugin != null)
-              "--network-plugin=${cfg.kubelet.networkPlugin}"} \
-            --cni-conf-dir=${cniConfig} \
-            --hairpin-mode=hairpin-veth \
-            ${optionalString (cfg.kubelet.nodeIp != null)
-              "--node-ip=${cfg.kubelet.nodeIp}"} \
-            ${optionalString (cfg.kubelet.featureGates != [])
-              "--feature-gates=${concatMapStringsSep "," (feature: "${feature}=true") cfg.kubelet.featureGates}"} \
-            ${optionalString cfg.verbose "--v=6 --log_flush_frequency=1s"} \
-            ${cfg.kubelet.extraOpts}
-          '';
-          WorkingDirectory = cfg.dataDir;
-        };
-      };
-
-      # Allways include cni plugins
-      services.kubernetes.kubelet.cni.packages = [pkgs.cni-plugins];
-
-      boot.kernelModules = ["br_netfilter"];
-
-      services.kubernetes.kubelet.kubeconfig = kubeConfigDefaults;
-    })
-
-    (mkIf (cfg.kubelet.applyManifests && cfg.kubelet.enable) {
-      environment.etc = mapAttrs' (name: manifest:
-        nameValuePair "kubernetes/manifests/${name}.json" {
-          text = builtins.toJSON manifest;
-          mode = "0755";
-        }
-      ) cfg.kubelet.manifests;
-    })
-
-    (mkIf (cfg.kubelet.unschedulable && cfg.kubelet.enable) {
-      services.kubernetes.kubelet.taints.unschedulable = {
-        value = "true";
-        effect = "NoSchedule";
-      };
-    })
-
-    (mkIf cfg.apiserver.enable {
-      systemd.services.kube-apiserver = {
-        description = "Kubernetes APIServer Service";
-        wantedBy = [ "kubernetes.target" ];
-        after = [ "network.target" "docker.service" ];
-        serviceConfig = {
-          Slice = "kubernetes.slice";
-          ExecStart = ''${cfg.package}/bin/kube-apiserver \
-            --etcd-servers=${concatStringsSep "," cfg.etcd.servers} \
-            ${optionalString (cfg.etcd.caFile != null)
-              "--etcd-cafile=${cfg.etcd.caFile}"} \
-            ${optionalString (cfg.etcd.certFile != null)
-              "--etcd-certfile=${cfg.etcd.certFile}"} \
-            ${optionalString (cfg.etcd.keyFile != null)
-              "--etcd-keyfile=${cfg.etcd.keyFile}"} \
-            --insecure-port=${toString cfg.apiserver.port} \
-            --bind-address=${cfg.apiserver.bindAddress} \
-            ${optionalString (cfg.apiserver.advertiseAddress != null)
-              "--advertise-address=${cfg.apiserver.advertiseAddress}"} \
-            --allow-privileged=${boolToString cfg.apiserver.allowPrivileged}\
-            ${optionalString (cfg.apiserver.tlsCertFile != null)
-              "--tls-cert-file=${cfg.apiserver.tlsCertFile}"} \
-            ${optionalString (cfg.apiserver.tlsKeyFile != null)
-              "--tls-private-key-file=${cfg.apiserver.tlsKeyFile}"} \
-            ${optionalString (cfg.apiserver.tokenAuthFile != null)
-              "--token-auth-file=${cfg.apiserver.tokenAuthFile}"} \
-            ${optionalString (cfg.apiserver.basicAuthFile != null)
-              "--basic-auth-file=${cfg.apiserver.basicAuthFile}"} \
-            --kubelet-https=${if cfg.apiserver.kubeletHttps then "true" else "false"} \
-            ${optionalString (cfg.apiserver.kubeletClientCaFile != null)
-              "--kubelet-certificate-authority=${cfg.apiserver.kubeletClientCaFile}"} \
-            ${optionalString (cfg.apiserver.kubeletClientCertFile != null)
-              "--kubelet-client-certificate=${cfg.apiserver.kubeletClientCertFile}"} \
-            ${optionalString (cfg.apiserver.kubeletClientKeyFile != null)
-              "--kubelet-client-key=${cfg.apiserver.kubeletClientKeyFile}"} \
-            ${optionalString (cfg.apiserver.clientCaFile != null)
-              "--client-ca-file=${cfg.apiserver.clientCaFile}"} \
-            --authorization-mode=${concatStringsSep "," cfg.apiserver.authorizationMode} \
-            ${optionalString (elem "ABAC" cfg.apiserver.authorizationMode)
-              "--authorization-policy-file=${
-                pkgs.writeText "kube-auth-policy.jsonl"
-                (concatMapStringsSep "\n" (l: builtins.toJSON l) cfg.apiserver.authorizationPolicy)
-              }"
-            } \
-            ${optionalString (elem "Webhook" cfg.apiserver.authorizationMode)
-              "--authorization-webhook-config-file=${cfg.apiserver.webhookConfig}"
-            } \
-            --secure-port=${toString cfg.apiserver.securePort} \
-            --service-cluster-ip-range=${cfg.apiserver.serviceClusterIpRange} \
-            ${optionalString (cfg.apiserver.runtimeConfig != "")
-              "--runtime-config=${cfg.apiserver.runtimeConfig}"} \
-            --enable-admission-plugins=${concatStringsSep "," cfg.apiserver.enableAdmissionPlugins} \
-            --disable-admission-plugins=${concatStringsSep "," cfg.apiserver.disableAdmissionPlugins} \
-            ${optionalString (cfg.apiserver.serviceAccountKeyFile!=null)
-              "--service-account-key-file=${cfg.apiserver.serviceAccountKeyFile}"} \
-            ${optionalString cfg.verbose "--v=6"} \
-            ${optionalString cfg.verbose "--log-flush-frequency=1s"} \
-            --storage-backend=${cfg.apiserver.storageBackend} \
-            ${optionalString (cfg.kubelet.featureGates != [])
-              "--feature-gates=${concatMapStringsSep "," (feature: "${feature}=true") cfg.kubelet.featureGates}"} \
-            ${cfg.apiserver.extraOpts}
-          '';
-          WorkingDirectory = cfg.dataDir;
-          User = "kubernetes";
-          Group = "kubernetes";
-          AmbientCapabilities = "cap_net_bind_service";
-          Restart = "on-failure";
-          RestartSec = 5;
-        };
-      };
+    (mkIf cfg.easyCerts {
+      services.kubernetes.pki.enable = mkDefault true;
+      services.kubernetes.caFile = caCert;
     })
 
-    (mkIf cfg.scheduler.enable {
-      systemd.services.kube-scheduler = {
-        description = "Kubernetes Scheduler Service";
-        wantedBy = [ "kubernetes.target" ];
-        after = [ "kube-apiserver.service" ];
-        serviceConfig = {
-          Slice = "kubernetes.slice";
-          ExecStart = ''${cfg.package}/bin/kube-scheduler \
-            --address=${cfg.scheduler.address} \
-            --port=${toString cfg.scheduler.port} \
-            --leader-elect=${boolToString cfg.scheduler.leaderElect} \
-            --kubeconfig=${mkKubeConfig "kube-scheduler" cfg.scheduler.kubeconfig} \
-            ${optionalString cfg.verbose "--v=6"} \
-            ${optionalString cfg.verbose "--log-flush-frequency=1s"} \
-            ${optionalString (cfg.scheduler.featureGates != [])
-              "--feature-gates=${concatMapStringsSep "," (feature: "${feature}=true") cfg.scheduler.featureGates}"} \
-            ${cfg.scheduler.extraOpts}
-          '';
-          WorkingDirectory = cfg.dataDir;
-          User = "kubernetes";
-          Group = "kubernetes";
-        };
-      };
-
-      services.kubernetes.scheduler.kubeconfig = kubeConfigDefaults;
-    })
-
-    (mkIf cfg.controllerManager.enable {
-      systemd.services.kube-controller-manager = {
-        description = "Kubernetes Controller Manager Service";
-        wantedBy = [ "kubernetes.target" ];
-        after = [ "kube-apiserver.service" ];
-        serviceConfig = {
-          RestartSec = "30s";
-          Restart = "on-failure";
-          Slice = "kubernetes.slice";
-          ExecStart = ''${cfg.package}/bin/kube-controller-manager \
-            --address=${cfg.controllerManager.address} \
-            --port=${toString cfg.controllerManager.port} \
-            --kubeconfig=${mkKubeConfig "kube-controller-manager" cfg.controllerManager.kubeconfig} \
-            --leader-elect=${boolToString cfg.controllerManager.leaderElect} \
-            ${if (cfg.controllerManager.serviceAccountKeyFile!=null)
-              then "--service-account-private-key-file=${cfg.controllerManager.serviceAccountKeyFile}"
-              else "--service-account-private-key-file=/var/run/kubernetes/apiserver.key"} \
-            ${if (cfg.controllerManager.rootCaFile!=null)
-              then "--root-ca-file=${cfg.controllerManager.rootCaFile}"
-              else "--root-ca-file=/var/run/kubernetes/apiserver.crt"} \
-            ${if (cfg.clusterCidr!=null)
-              then "--cluster-cidr=${cfg.clusterCidr} --allocate-node-cidrs=true"
-              else "--allocate-node-cidrs=false"} \
-            ${optionalString (cfg.controllerManager.featureGates != [])
-              "--feature-gates=${concatMapStringsSep "," (feature: "${feature}=true") cfg.controllerManager.featureGates}"} \
-            ${optionalString cfg.verbose "--v=6"} \
-            ${optionalString cfg.verbose "--log-flush-frequency=1s"} \
-            ${cfg.controllerManager.extraOpts}
-          '';
-          WorkingDirectory = cfg.dataDir;
-          User = "kubernetes";
-          Group = "kubernetes";
+    (mkIf (elem "master" cfg.roles) {
+      services.kubernetes.apiserver.enable = mkDefault true;
+      services.kubernetes.scheduler.enable = mkDefault true;
+      services.kubernetes.controllerManager.enable = mkDefault true;
+      services.kubernetes.addonManager.enable = mkDefault true;
+      services.kubernetes.proxy.enable = mkDefault true;
+      services.etcd.enable = true; # Cannot mkDefault because of flannel default options
+      services.kubernetes.kubelet = {
+        enable = mkDefault true;
+        taints = mkIf (!(elem "node" cfg.roles)) {
+          master = {
+            key = "node-role.kubernetes.io/master";
+            value = "true";
+            effect = "NoSchedule";
+          };
         };
-        path = cfg.path;
       };
-
-      services.kubernetes.controllerManager.kubeconfig = kubeConfigDefaults;
     })
 
-    (mkIf cfg.proxy.enable {
-      systemd.services.kube-proxy = {
-        description = "Kubernetes Proxy Service";
-        wantedBy = [ "kubernetes.target" ];
-        after = [ "kube-apiserver.service" ];
-        path = [pkgs.iptables pkgs.conntrack_tools];
-        serviceConfig = {
-          Slice = "kubernetes.slice";
-          ExecStart = ''${cfg.package}/bin/kube-proxy \
-            --kubeconfig=${mkKubeConfig "kube-proxy" cfg.proxy.kubeconfig} \
-            --bind-address=${cfg.proxy.address} \
-            ${optionalString (cfg.proxy.featureGates != [])
-              "--feature-gates=${concatMapStringsSep "," (feature: "${feature}=true") cfg.proxy.featureGates}"} \
-            ${optionalString cfg.verbose "--v=6"} \
-            ${optionalString cfg.verbose "--log-flush-frequency=1s"} \
-            ${optionalString (cfg.clusterCidr!=null)
-              "--cluster-cidr=${cfg.clusterCidr}"} \
-            ${cfg.proxy.extraOpts}
-          '';
-          WorkingDirectory = cfg.dataDir;
-        };
-      };
-
-      # kube-proxy needs iptables
-      networking.firewall.enable = mkDefault true;
 
-      services.kubernetes.proxy.kubeconfig = kubeConfigDefaults;
+    (mkIf (all (el: el == "master") cfg.roles) {
+      # if this node is only a master make it unschedulable by default
+      services.kubernetes.kubelet.unschedulable = mkDefault true;
     })
 
-    (mkIf (any (el: el == "master") cfg.roles) {
-      virtualisation.docker.enable = mkDefault true;
+    (mkIf (elem "node" cfg.roles) {
       services.kubernetes.kubelet.enable = mkDefault true;
-      services.kubernetes.kubelet.allowPrivileged = mkDefault true;
-      services.kubernetes.kubelet.applyManifests = mkDefault true;
-      services.kubernetes.apiserver.enable = mkDefault true;
-      services.kubernetes.scheduler.enable = mkDefault true;
-      services.kubernetes.controllerManager.enable = mkDefault true;
-      services.etcd.enable = mkDefault (cfg.etcd.servers == ["http://127.0.0.1:2379"]);
-      services.kubernetes.addonManager.enable = mkDefault true;
       services.kubernetes.proxy.enable = mkDefault true;
     })
 
-    # if this node is only a master make it unschedulable by default
-    (mkIf (all (el: el == "master") cfg.roles) {
-      services.kubernetes.kubelet.unschedulable = mkDefault true;
+    # Using "services.kubernetes.roles" will automatically enable easyCerts and flannel
+    (mkIf (cfg.roles != []) {
+      services.kubernetes.flannel.enable = mkDefault true;
+      services.flannel.etcd.endpoints = mkDefault etcdEndpoints;
+      services.kubernetes.easyCerts = mkDefault true;
+    })
+
+    (mkIf cfg.apiserver.enable {
+      services.kubernetes.pki.etcClusterAdminKubeconfig = mkDefault "kubernetes/cluster-admin.kubeconfig";
+      services.kubernetes.apiserver.etcd.servers = mkDefault etcdEndpoints;
     })
 
-    (mkIf (any (el: el == "node") cfg.roles) {
+    (mkIf cfg.kubelet.enable {
       virtualisation.docker = {
         enable = mkDefault true;
 
@@ -1094,26 +234,18 @@ in {
         # iptables must be disabled for kubernetes
         extraOptions = "--iptables=false --ip-masq=false";
       };
-
-      services.kubernetes.kubelet.enable = mkDefault true;
-      services.kubernetes.proxy.enable = mkDefault true;
     })
 
-    (mkIf cfg.addonManager.enable {
-      environment.etc."kubernetes/addons".source = "${addons}/";
-
-      systemd.services.kube-addon-manager = {
-        description = "Kubernetes addon manager";
-        wantedBy = [ "kubernetes.target" ];
-        after = [ "kube-apiserver.service" ];
-        environment.ADDON_PATH = "/etc/kubernetes/addons/";
-        path = [ pkgs.gawk ];
-        serviceConfig = {
-          Slice = "kubernetes.slice";
-          ExecStart = "${cfg.package}/bin/kube-addons";
-          WorkingDirectory = cfg.dataDir;
-          User = "kubernetes";
-          Group = "kubernetes";
+    (mkIf (cfg.apiserver.enable || cfg.controllerManager.enable) {
+      services.kubernetes.pki.certs = {
+        serviceAccount = mkCert {
+          name = "service-account";
+          CN = "system:service-account-signer";
+          action = ''
+            systemctl reload \
+              kube-apiserver.service \
+              kube-controller-manager.service
+          '';
         };
       };
     })
@@ -1123,7 +255,8 @@ in {
         cfg.scheduler.enable ||
         cfg.controllerManager.enable ||
         cfg.kubelet.enable ||
-        cfg.proxy.enable
+        cfg.proxy.enable ||
+        cfg.addonManager.enable
     ) {
       systemd.targets.kubernetes = {
         description = "Kubernetes";
@@ -1132,11 +265,10 @@ in {
 
       systemd.tmpfiles.rules = [
         "d /opt/cni/bin 0755 root root -"
-        "d /var/run/kubernetes 0755 kubernetes kubernetes -"
+        "d /run/kubernetes 0755 kubernetes kubernetes -"
         "d /var/lib/kubernetes 0755 kubernetes kubernetes -"
       ];
 
-      environment.systemPackages = [ cfg.package ];
       users.users = singleton {
         name = "kubernetes";
         uid = config.ids.uids.kubernetes;
@@ -1148,53 +280,12 @@ in {
       };
       users.groups.kubernetes.gid = config.ids.gids.kubernetes;
 
-			# dns addon is enabled by default
+      # dns addon is enabled by default
       services.kubernetes.addons.dns.enable = mkDefault true;
-    })
 
-    (mkIf cfg.flannel.enable {
-      services.flannel = {
-        enable = mkDefault true;
-        network = mkDefault cfg.clusterCidr;
-        etcd = mkDefault {
-          endpoints = cfg.etcd.servers;
-          inherit (cfg.etcd) caFile certFile keyFile;
-        };
-      };
-
-      services.kubernetes.kubelet = {
-        networkPlugin = mkDefault "cni";
-        cni.config = mkDefault [{
-          name = "mynet";
-          type = "flannel";
-          delegate = {
-            isDefaultGateway = true;
-            bridge = "docker0";
-          };
-        }];
-      };
-
-      systemd.services."mk-docker-opts" = {
-        description = "Pre-Docker Actions";
-        wantedBy = [ "flannel.service" ];
-        before = [ "docker.service" ];
-        after = [ "flannel.service" ];
-        path = [ pkgs.gawk pkgs.gnugrep ];
-        script = ''
-          mkdir -p /run/flannel
-          ${mkDockerOpts}/mk-docker-opts -d /run/flannel/docker
-        '';
-        serviceConfig.Type = "oneshot";
-      };
-      systemd.services.docker.serviceConfig.EnvironmentFile = "/run/flannel/docker";
-
-      # read environment variables generated by mk-docker-opts
-      virtualisation.docker.extraOptions = "$DOCKER_OPTS";
-
-      networking.firewall.allowedUDPPorts = [
-        8285  # flannel udp
-        8472  # flannel vxlan
-      ];
+      services.kubernetes.apiserverAddress = mkDefault ("https://${if cfg.apiserver.advertiseAddress != null
+                          then cfg.apiserver.advertiseAddress
+                          else "${cfg.masterAddress}:${toString cfg.apiserver.securePort}"}");
     })
   ];
 }
diff --git a/nixos/modules/services/cluster/kubernetes/flannel.nix b/nixos/modules/services/cluster/kubernetes/flannel.nix
new file mode 100644
index 00000000000..93ee2fd65ee
--- /dev/null
+++ b/nixos/modules/services/cluster/kubernetes/flannel.nix
@@ -0,0 +1,134 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  top = config.services.kubernetes;
+  cfg = top.flannel;
+
+  # we want flannel to use kubernetes itself as configuration backend, not direct etcd
+  storageBackend = "kubernetes";
+
+  # needed for flannel to pass options to docker
+  mkDockerOpts = pkgs.runCommand "mk-docker-opts" {
+    buildInputs = [ pkgs.makeWrapper ];
+  } ''
+    mkdir -p $out
+    cp ${pkgs.kubernetes.src}/cluster/centos/node/bin/mk-docker-opts.sh $out/mk-docker-opts.sh
+
+    # bashInteractive needed for `compgen`
+    makeWrapper ${pkgs.bashInteractive}/bin/bash $out/mk-docker-opts --add-flags "$out/mk-docker-opts.sh"
+  '';
+in
+{
+  ###### interface
+  options.services.kubernetes.flannel = {
+    enable = mkEnableOption "enable flannel networking";
+  };
+
+  ###### implementation
+  config = mkIf cfg.enable {
+    services.flannel = {
+
+      enable = mkDefault true;
+      network = mkDefault top.clusterCidr;
+      inherit storageBackend;
+      nodeName = config.services.kubernetes.kubelet.hostname;
+    };
+
+    services.kubernetes.kubelet = {
+      networkPlugin = mkDefault "cni";
+      cni.config = mkDefault [{
+        name = "mynet";
+        type = "flannel";
+        delegate = {
+          isDefaultGateway = true;
+          bridge = "docker0";
+        };
+      }];
+    };
+
+    systemd.services."mk-docker-opts" = {
+      description = "Pre-Docker Actions";
+      path = with pkgs; [ gawk gnugrep ];
+      script = ''
+        ${mkDockerOpts}/mk-docker-opts -d /run/flannel/docker
+        systemctl restart docker
+      '';
+      serviceConfig.Type = "oneshot";
+    };
+
+    systemd.paths."flannel-subnet-env" = {
+      wantedBy = [ "flannel.service" ];
+      pathConfig = {
+        PathModified = "/run/flannel/subnet.env";
+        Unit = "mk-docker-opts.service";
+      };
+    };
+
+    systemd.services.docker = {
+      environment.DOCKER_OPTS = "-b none";
+      serviceConfig.EnvironmentFile = "-/run/flannel/docker";
+    };
+
+    # read environment variables generated by mk-docker-opts
+    virtualisation.docker.extraOptions = "$DOCKER_OPTS";
+
+    networking = {
+      firewall.allowedUDPPorts = [
+        8285  # flannel udp
+        8472  # flannel vxlan
+      ];
+      dhcpcd.denyInterfaces = [ "docker*" "flannel*" ];
+    };
+
+    services.kubernetes.pki.certs = {
+      flannelClient = top.lib.mkCert {
+        name = "flannel-client";
+        CN = "flannel-client";
+        action = "systemctl restart flannel.service";
+      };
+    };
+
+    # give flannel som kubernetes rbac permissions if applicable
+    services.kubernetes.addonManager.bootstrapAddons = mkIf ((storageBackend == "kubernetes") && (elem "RBAC" top.apiserver.authorizationMode)) {
+
+      flannel-cr = {
+        apiVersion = "rbac.authorization.k8s.io/v1beta1";
+        kind = "ClusterRole";
+        metadata = { name = "flannel"; };
+        rules = [{
+          apiGroups = [ "" ];
+          resources = [ "pods" ];
+          verbs = [ "get" ];
+        }
+        {
+          apiGroups = [ "" ];
+          resources = [ "nodes" ];
+          verbs = [ "list" "watch" ];
+        }
+        {
+          apiGroups = [ "" ];
+          resources = [ "nodes/status" ];
+          verbs = [ "patch" ];
+        }];
+      };
+
+      flannel-crb = {
+        apiVersion = "rbac.authorization.k8s.io/v1beta1";
+        kind = "ClusterRoleBinding";
+        metadata = { name = "flannel"; };
+        roleRef = {
+          apiGroup = "rbac.authorization.k8s.io";
+          kind = "ClusterRole";
+          name = "flannel";
+        };
+        subjects = [{
+          kind = "User";
+          name = "flannel-client";
+        }];
+      };
+
+    };
+  };
+}
diff --git a/nixos/modules/services/cluster/kubernetes/kubelet.nix b/nixos/modules/services/cluster/kubernetes/kubelet.nix
new file mode 100644
index 00000000000..c94bb28bf7f
--- /dev/null
+++ b/nixos/modules/services/cluster/kubernetes/kubelet.nix
@@ -0,0 +1,358 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  top = config.services.kubernetes;
+  cfg = top.kubelet;
+
+  cniConfig =
+    if cfg.cni.config != [] && !(isNull cfg.cni.configDir) then
+      throw "Verbatim CNI-config and CNI configDir cannot both be set."
+    else if !(isNull cfg.cni.configDir) then
+      cfg.cni.configDir
+    else
+      (pkgs.buildEnv {
+        name = "kubernetes-cni-config";
+        paths = imap (i: entry:
+          pkgs.writeTextDir "${toString (10+i)}-${entry.type}.conf" (builtins.toJSON entry)
+        ) cfg.cni.config;
+      });
+
+  infraContainer = pkgs.dockerTools.buildImage {
+    name = "pause";
+    tag = "latest";
+    contents = top.package.pause;
+    config.Cmd = "/bin/pause";
+  };
+
+  kubeconfig = top.lib.mkKubeConfig "kubelet" cfg.kubeconfig;
+
+  manifests = pkgs.buildEnv {
+    name = "kubernetes-manifests";
+    paths = mapAttrsToList (name: manifest:
+      pkgs.writeTextDir "${name}.json" (builtins.toJSON manifest)
+    ) cfg.manifests;
+  };
+
+  manifestPath = "kubernetes/manifests";
+
+  taintOptions = with lib.types; { name, ... }: {
+    options = {
+      key = mkOption {
+        description = "Key of taint.";
+        default = name;
+        type = str;
+      };
+      value = mkOption {
+        description = "Value of taint.";
+        type = str;
+      };
+      effect = mkOption {
+        description = "Effect of taint.";
+        example = "NoSchedule";
+        type = enum ["NoSchedule" "PreferNoSchedule" "NoExecute"];
+      };
+    };
+  };
+
+  taints = concatMapStringsSep "," (v: "${v.key}=${v.value}:${v.effect}") (mapAttrsToList (n: v: v) cfg.taints);
+in
+{
+  ###### interface
+  options.services.kubernetes.kubelet = with lib.types; {
+
+    address = mkOption {
+      description = "Kubernetes kubelet info server listening address.";
+      default = "0.0.0.0";
+      type = str;
+    };
+
+    allowPrivileged = mkOption {
+      description = "Whether to allow Kubernetes containers to request privileged mode.";
+      default = false;
+      type = bool;
+    };
+
+    clusterDns = mkOption {
+      description = "Use alternative DNS.";
+      default = "10.1.0.1";
+      type = str;
+    };
+
+    clusterDomain = mkOption {
+      description = "Use alternative domain.";
+      default = config.services.kubernetes.addons.dns.clusterDomain;
+      type = str;
+    };
+
+    clientCaFile = mkOption {
+      description = "Kubernetes apiserver CA file for client authentication.";
+      default = top.caFile;
+      type = nullOr path;
+    };
+
+    cni = {
+      packages = mkOption {
+        description = "List of network plugin packages to install.";
+        type = listOf package;
+        default = [];
+      };
+
+      config = mkOption {
+        description = "Kubernetes CNI configuration.";
+        type = listOf attrs;
+        default = [];
+        example = literalExample ''
+          [{
+            "cniVersion": "0.2.0",
+            "name": "mynet",
+            "type": "bridge",
+            "bridge": "cni0",
+            "isGateway": true,
+            "ipMasq": true,
+            "ipam": {
+                "type": "host-local",
+                "subnet": "10.22.0.0/16",
+                "routes": [
+                    { "dst": "0.0.0.0/0" }
+                ]
+            }
+          } {
+            "cniVersion": "0.2.0",
+            "type": "loopback"
+          }]
+        '';
+      };
+
+      configDir = mkOption {
+        description = "Path to Kubernetes CNI configuration directory.";
+        type = nullOr path;
+        default = null;
+      };
+    };
+
+    enable = mkEnableOption "Kubernetes kubelet.";
+
+    extraOpts = mkOption {
+      description = "Kubernetes kubelet extra command line options.";
+      default = "";
+      type = str;
+    };
+
+    featureGates = mkOption {
+      description = "List set of feature gates";
+      default = top.featureGates;
+      type = listOf str;
+    };
+
+    healthz = {
+      bind = mkOption {
+        description = "Kubernetes kubelet healthz listening address.";
+        default = "127.0.0.1";
+        type = str;
+      };
+
+      port = mkOption {
+        description = "Kubernetes kubelet healthz port.";
+        default = 10248;
+        type = int;
+      };
+    };
+
+    hostname = mkOption {
+      description = "Kubernetes kubelet hostname override.";
+      default = config.networking.hostName;
+      type = str;
+    };
+
+    kubeconfig = top.lib.mkKubeConfigOptions "Kubelet";
+
+    manifests = mkOption {
+      description = "List of manifests to bootstrap with kubelet (only pods can be created as manifest entry)";
+      type = attrsOf attrs;
+      default = {};
+    };
+
+    networkPlugin = mkOption {
+      description = "Network plugin to use by Kubernetes.";
+      type = nullOr (enum ["cni" "kubenet"]);
+      default = "kubenet";
+    };
+
+    nodeIp = mkOption {
+      description = "IP address of the node. If set, kubelet will use this IP address for the node.";
+      default = null;
+      type = nullOr str;
+    };
+
+    registerNode = mkOption {
+      description = "Whether to auto register kubelet with API server.";
+      default = true;
+      type = bool;
+    };
+
+    port = mkOption {
+      description = "Kubernetes kubelet info server listening port.";
+      default = 10250;
+      type = int;
+    };
+
+    seedDockerImages = mkOption {
+      description = "List of docker images to preload on system";
+      default = [];
+      type = listOf package;
+    };
+
+    taints = mkOption {
+      description = "Node taints (https://kubernetes.io/docs/concepts/configuration/assign-pod-node/).";
+      default = {};
+      type = attrsOf (submodule [ taintOptions ]);
+    };
+
+    tlsCertFile = mkOption {
+      description = "File containing x509 Certificate for HTTPS.";
+      default = null;
+      type = nullOr path;
+    };
+
+    tlsKeyFile = mkOption {
+      description = "File containing x509 private key matching tlsCertFile.";
+      default = null;
+      type = nullOr path;
+    };
+
+    unschedulable = mkOption {
+      description = "Whether to set node taint to unschedulable=true as it is the case of node that has only master role.";
+      default = false;
+      type = bool;
+    };
+
+    verbosity = mkOption {
+      description = ''
+        Optional glog verbosity level for logging statements. See
+        <link xlink:href="https://github.com/kubernetes/community/blob/master/contributors/devel/logging.md"/>
+      '';
+      default = null;
+      type = nullOr int;
+    };
+
+  };
+
+  ###### implementation
+  config = mkMerge [
+    (mkIf cfg.enable {
+      services.kubernetes.kubelet.seedDockerImages = [infraContainer];
+
+      systemd.services.kubelet = {
+        description = "Kubernetes Kubelet Service";
+        wantedBy = [ "kubernetes.target" ];
+        after = [ "network.target" "docker.service" "kube-apiserver.service" ];
+        path = with pkgs; [ gitMinimal openssh docker utillinux iproute ethtool thin-provisioning-tools iptables socat ] ++ top.path;
+        preStart = ''
+          ${concatMapStrings (img: ''
+            echo "Seeding docker image: ${img}"
+            docker load <${img}
+          '') cfg.seedDockerImages}
+
+          rm /opt/cni/bin/* || true
+          ${concatMapStrings (package: ''
+            echo "Linking cni package: ${package}"
+            ln -fs ${package}/bin/* /opt/cni/bin
+          '') cfg.cni.packages}
+        '';
+        serviceConfig = {
+          Slice = "kubernetes.slice";
+          CPUAccounting = true;
+          MemoryAccounting = true;
+          Restart = "on-failure";
+          RestartSec = "1000ms";
+          ExecStart = ''${top.package}/bin/kubelet \
+            --address=${cfg.address} \
+            --allow-privileged=${boolToString cfg.allowPrivileged} \
+            --authentication-token-webhook \
+            --authentication-token-webhook-cache-ttl="10s" \
+            --authorization-mode=Webhook \
+            ${optionalString (cfg.clientCaFile != null)
+              "--client-ca-file=${cfg.clientCaFile}"} \
+            ${optionalString (cfg.clusterDns != "")
+              "--cluster-dns=${cfg.clusterDns}"} \
+            ${optionalString (cfg.clusterDomain != "")
+              "--cluster-domain=${cfg.clusterDomain}"} \
+            --cni-conf-dir=${cniConfig} \
+            ${optionalString (cfg.featureGates != [])
+              "--feature-gates=${concatMapStringsSep "," (feature: "${feature}=true") cfg.featureGates}"} \
+            --hairpin-mode=hairpin-veth \
+            --healthz-bind-address=${cfg.healthz.bind} \
+            --healthz-port=${toString cfg.healthz.port} \
+            --hostname-override=${cfg.hostname} \
+            --kubeconfig=${kubeconfig} \
+            ${optionalString (cfg.networkPlugin != null)
+              "--network-plugin=${cfg.networkPlugin}"} \
+            ${optionalString (cfg.nodeIp != null)
+              "--node-ip=${cfg.nodeIp}"} \
+            --pod-infra-container-image=pause \
+            ${optionalString (cfg.manifests != {})
+              "--pod-manifest-path=/etc/${manifestPath}"} \
+            --port=${toString cfg.port} \
+            --register-node=${boolToString cfg.registerNode} \
+            ${optionalString (taints != "")
+              "--register-with-taints=${taints}"} \
+            --root-dir=${top.dataDir} \
+            ${optionalString (cfg.tlsCertFile != null)
+              "--tls-cert-file=${cfg.tlsCertFile}"} \
+            ${optionalString (cfg.tlsKeyFile != null)
+              "--tls-private-key-file=${cfg.tlsKeyFile}"} \
+            ${optionalString (cfg.verbosity != null) "--v=${toString cfg.verbosity}"} \
+            ${cfg.extraOpts}
+          '';
+          WorkingDirectory = top.dataDir;
+        };
+      };
+
+      # Allways include cni plugins
+      services.kubernetes.kubelet.cni.packages = [pkgs.cni-plugins];
+
+      boot.kernelModules = ["br_netfilter"];
+
+      services.kubernetes.kubelet.hostname = with config.networking;
+        mkDefault (hostName + optionalString (!isNull domain) ".${domain}");
+
+      services.kubernetes.pki.certs = with top.lib; {
+        kubelet = mkCert {
+          name = "kubelet";
+          CN = top.kubelet.hostname;
+          action = "systemctl restart kubelet.service";
+
+        };
+        kubeletClient = mkCert {
+          name = "kubelet-client";
+          CN = "system:node:${top.kubelet.hostname}";
+          fields = {
+            O = "system:nodes";
+          };
+          action = "systemctl restart kubelet.service";
+        };
+      };
+
+      services.kubernetes.kubelet.kubeconfig.server = mkDefault top.apiserverAddress;
+    })
+
+    (mkIf (cfg.enable && cfg.manifests != {}) {
+      environment.etc = mapAttrs' (name: manifest:
+        nameValuePair "${manifestPath}/${name}.json" {
+          text = builtins.toJSON manifest;
+          mode = "0755";
+        }
+      ) cfg.manifests;
+    })
+
+    (mkIf (cfg.unschedulable && cfg.enable) {
+      services.kubernetes.kubelet.taints.unschedulable = {
+        value = "true";
+        effect = "NoSchedule";
+      };
+    })
+
+  ];
+}
diff --git a/nixos/modules/services/cluster/kubernetes/pki.nix b/nixos/modules/services/cluster/kubernetes/pki.nix
new file mode 100644
index 00000000000..38deca23a99
--- /dev/null
+++ b/nixos/modules/services/cluster/kubernetes/pki.nix
@@ -0,0 +1,388 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  top = config.services.kubernetes;
+  cfg = top.pki;
+
+  csrCA = pkgs.writeText "kube-pki-cacert-csr.json" (builtins.toJSON {
+    key = {
+        algo = "rsa";
+        size = 2048;
+    };
+    names = singleton cfg.caSpec;
+  });
+
+  csrCfssl = pkgs.writeText "kube-pki-cfssl-csr.json" (builtins.toJSON {
+    key = {
+        algo = "rsa";
+        size = 2048;
+    };
+    CN = top.masterAddress;
+  });
+
+  cfsslAPITokenBaseName = "apitoken.secret";
+  cfsslAPITokenPath = "${config.services.cfssl.dataDir}/${cfsslAPITokenBaseName}";
+  certmgrAPITokenPath = "${top.secretsPath}/${cfsslAPITokenBaseName}";
+  cfsslAPITokenLength = 32;
+
+  clusterAdminKubeconfig = with cfg.certs.clusterAdmin;
+    top.lib.mkKubeConfig "cluster-admin" {
+        server = top.apiserverAddress;
+        certFile = cert;
+        keyFile = key;
+    };
+
+  remote = with config.services; "https://${kubernetes.masterAddress}:${toString cfssl.port}";
+in
+{
+  ###### interface
+  options.services.kubernetes.pki = with lib.types; {
+
+    enable = mkEnableOption "Whether to enable easyCert issuer service.";
+
+    certs = mkOption {
+      description = "List of certificate specs to feed to cert generator.";
+      default = {};
+      type = attrs;
+    };
+
+    genCfsslCACert = mkOption {
+      description = ''
+        Whether to automatically generate cfssl CA certificate and key,
+        if they don't exist.
+      '';
+      default = true;
+      type = bool;
+    };
+
+    genCfsslAPICerts = mkOption {
+      description = ''
+        Whether to automatically generate cfssl API webserver TLS cert and key,
+        if they don't exist.
+      '';
+      default = true;
+      type = bool;
+    };
+
+    genCfsslAPIToken = mkOption {
+      description = ''
+        Whether to automatically generate cfssl API-token secret,
+        if they doesn't exist.
+      '';
+      default = true;
+      type = bool;
+    };
+
+    pkiTrustOnBootstrap = mkOption {
+      description = "Whether to always trust remote cfssl server upon initial PKI bootstrap.";
+      default = true;
+      type = bool;
+    };
+
+    caCertPathPrefix = mkOption {
+      description = ''
+        Path-prefrix for the CA-certificate to be used for cfssl signing.
+        Suffixes ".pem" and "-key.pem" will be automatically appended for
+        the public and private keys respectively.
+      '';
+      default = "${config.services.cfssl.dataDir}/ca";
+      type = str;
+    };
+
+    caSpec = mkOption {
+      description = "Certificate specification for the auto-generated CAcert.";
+      default = {
+        CN = "kubernetes-cluster-ca";
+        O = "NixOS";
+        OU = "services.kubernetes.pki.caSpec";
+        L = "auto-generated";
+      };
+      type = attrs;
+    };
+
+    etcClusterAdminKubeconfig = mkOption {
+      description = ''
+        Symlink a kubeconfig with cluster-admin privileges to environment path
+        (/etc/&lt;path&gt;).
+      '';
+      default = null;
+      type = nullOr str;
+    };
+
+  };
+
+  ###### implementation
+  config = mkIf cfg.enable
+  (let
+    cfsslCertPathPrefix = "${config.services.cfssl.dataDir}/cfssl";
+    cfsslCert = "${cfsslCertPathPrefix}.pem";
+    cfsslKey = "${cfsslCertPathPrefix}-key.pem";
+  in
+  {
+
+    services.cfssl = mkIf (top.apiserver.enable) {
+      enable = true;
+      address = "0.0.0.0";
+      tlsCert = cfsslCert;
+      tlsKey = cfsslKey;
+      configFile = toString (pkgs.writeText "cfssl-config.json" (builtins.toJSON {
+        signing = {
+          profiles = {
+            default = {
+              usages = ["digital signature"];
+              auth_key = "default";
+              expiry = "720h";
+            };
+          };
+        };
+        auth_keys = {
+          default = {
+            type = "standard";
+            key = "file:${cfsslAPITokenPath}";
+          };
+        };
+      }));
+    };
+
+    systemd.services.cfssl.preStart = with pkgs; with config.services.cfssl; mkIf (top.apiserver.enable)
+    (concatStringsSep "\n" [
+      "set -e"
+      (optionalString cfg.genCfsslCACert ''
+        if [ ! -f "${cfg.caCertPathPrefix}.pem" ]; then
+          ${cfssl}/bin/cfssl genkey -initca ${csrCA} | \
+            ${cfssl}/bin/cfssljson -bare ${cfg.caCertPathPrefix}
+        fi
+      '')
+      (optionalString cfg.genCfsslAPICerts ''
+        if [ ! -f "${dataDir}/cfssl.pem" ]; then
+          ${cfssl}/bin/cfssl gencert -ca "${cfg.caCertPathPrefix}.pem" -ca-key "${cfg.caCertPathPrefix}-key.pem" ${csrCfssl} | \
+            ${cfssl}/bin/cfssljson -bare ${cfsslCertPathPrefix}
+        fi
+      '')
+      (optionalString cfg.genCfsslAPIToken ''
+        if [ ! -f "${cfsslAPITokenPath}" ]; then
+          head -c ${toString (cfsslAPITokenLength / 2)} /dev/urandom | od -An -t x | tr -d ' ' >"${cfsslAPITokenPath}"
+        fi
+        chown cfssl "${cfsslAPITokenPath}" && chmod 400 "${cfsslAPITokenPath}"
+      '')]);
+
+    systemd.services.kube-certmgr-bootstrap = {
+      description = "Kubernetes certmgr bootstrapper";
+      wantedBy = [ "certmgr.service" ];
+      after = [ "cfssl.target" ];
+      script = concatStringsSep "\n" [''
+        set -e
+
+        # If there's a cfssl (cert issuer) running locally, then don't rely on user to
+        # manually paste it in place. Just symlink.
+        # otherwise, create the target file, ready for users to insert the token
+
+        if [ -f "${cfsslAPITokenPath}" ]; then
+          ln -fs "${cfsslAPITokenPath}" "${certmgrAPITokenPath}"
+        else
+          touch "${certmgrAPITokenPath}" && chmod 600 "${certmgrAPITokenPath}"
+        fi
+      ''
+      (optionalString (cfg.pkiTrustOnBootstrap) ''
+        if [ ! -f "${top.caFile}" ] || [ $(cat "${top.caFile}" | wc -c) -lt 1 ]; then
+          ${pkgs.curl}/bin/curl --fail-early -f -kd '{}' ${remote}/api/v1/cfssl/info | \
+            ${pkgs.cfssl}/bin/cfssljson -stdout >${top.caFile}
+        fi
+      '')
+      ];
+      serviceConfig = {
+        RestartSec = "10s";
+        Restart = "on-failure";
+      };
+    };
+
+    services.certmgr = {
+      enable = true;
+      package = pkgs.certmgr-selfsigned;
+      svcManager = "command";
+      specs =
+        let
+          mkSpec = _: cert: {
+            inherit (cert) action;
+            authority = {
+              inherit remote;
+              file.path = cert.caCert;
+              root_ca = cert.caCert;
+              profile = "default";
+              auth_key_file = certmgrAPITokenPath;
+            };
+            certificate = {
+              path = cert.cert;
+            };
+            private_key = cert.privateKeyOptions;
+            request = {
+              inherit (cert) CN hosts;
+              key = {
+                algo = "rsa";
+                size = 2048;
+              };
+              names = [ cert.fields ];
+            };
+          };
+        in
+          mapAttrs mkSpec cfg.certs;
+      };
+
+      #TODO: Get rid of kube-addon-manager in the future for the following reasons
+      # - it is basically just a shell script wrapped around kubectl
+      # - it assumes that it is clusterAdmin or can gain clusterAdmin rights through serviceAccount
+      # - it is designed to be used with k8s system components only
+      # - it would be better with a more Nix-oriented way of managing addons
+      systemd.services.kube-addon-manager = mkIf top.addonManager.enable (mkMerge [{
+        environment.KUBECONFIG = with cfg.certs.addonManager;
+          top.lib.mkKubeConfig "addon-manager" {
+            server = top.apiserverAddress;
+            certFile = cert;
+            keyFile = key;
+          };
+        }
+
+        (optionalAttrs (top.addonManager.bootstrapAddons != {}) {
+          serviceConfig.PermissionsStartOnly = true;
+          preStart = with pkgs;
+          let
+            files = mapAttrsToList (n: v: writeText "${n}.json" (builtins.toJSON v))
+              top.addonManager.bootstrapAddons;
+          in
+          ''
+            export KUBECONFIG=${clusterAdminKubeconfig}
+            ${kubectl}/bin/kubectl apply -f ${concatStringsSep " \\\n -f " files}
+          '';
+        })]);
+
+      environment.etc.${cfg.etcClusterAdminKubeconfig}.source = mkIf (!isNull cfg.etcClusterAdminKubeconfig)
+        clusterAdminKubeconfig;
+
+      environment.systemPackages = mkIf (top.kubelet.enable || top.proxy.enable) [
+      (pkgs.writeScriptBin "nixos-kubernetes-node-join" ''
+        set -e
+        exec 1>&2
+
+        if [ $# -gt 0 ]; then
+          echo "Usage: $(basename $0)"
+          echo ""
+          echo "No args. Apitoken must be provided on stdin."
+          echo "To get the apitoken, execute: 'sudo cat ${certmgrAPITokenPath}' on the master node."
+          exit 1
+        fi
+
+        if [ $(id -u) != 0 ]; then
+          echo "Run as root please."
+          exit 1
+        fi
+
+        read -r token
+        if [ ''${#token} != ${toString cfsslAPITokenLength} ]; then
+          echo "Token must be of length ${toString cfsslAPITokenLength}."
+          exit 1
+        fi
+
+        echo $token > ${certmgrAPITokenPath}
+        chmod 600 ${certmgrAPITokenPath}
+
+        echo "Restarting certmgr..." >&1
+        systemctl restart certmgr
+
+        echo "Waiting for certs to appear..." >&1
+
+        ${optionalString top.kubelet.enable ''
+          while [ ! -f ${cfg.certs.kubelet.cert} ]; do sleep 1; done
+          echo "Restarting kubelet..." >&1
+          systemctl restart kubelet
+        ''}
+
+        ${optionalString top.proxy.enable ''
+          while [ ! -f ${cfg.certs.kubeProxyClient.cert} ]; do sleep 1; done
+          echo "Restarting kube-proxy..." >&1
+          systemctl restart kube-proxy
+        ''}
+
+        ${optionalString top.flannel.enable ''
+          while [ ! -f ${cfg.certs.flannelClient.cert} ]; do sleep 1; done
+          echo "Restarting flannel..." >&1
+          systemctl restart flannel
+        ''}
+
+        echo "Node joined succesfully"
+      '')];
+
+      # isolate etcd on loopback at the master node
+      # easyCerts doesn't support multimaster clusters anyway atm.
+      services.etcd = with cfg.certs.etcd; {
+        listenClientUrls = ["https://127.0.0.1:2379"];
+        listenPeerUrls = ["https://127.0.0.1:2380"];
+        advertiseClientUrls = ["https://etcd.local:2379"];
+        initialCluster = ["${top.masterAddress}=https://etcd.local:2380"];
+        initialAdvertisePeerUrls = ["https://etcd.local:2380"];
+        certFile = mkDefault cert;
+        keyFile = mkDefault key;
+        trustedCaFile = mkDefault caCert;
+      };
+      networking.extraHosts = mkIf (config.services.etcd.enable) ''
+        127.0.0.1 etcd.${top.addons.dns.clusterDomain} etcd.local
+      '';
+
+      services.flannel = with cfg.certs.flannelClient; {
+        kubeconfig = top.lib.mkKubeConfig "flannel" {
+          server = top.apiserverAddress;
+          certFile = cert;
+          keyFile = key;
+        };
+      };
+
+      services.kubernetes = {
+
+        apiserver = mkIf top.apiserver.enable (with cfg.certs.apiServer; {
+          etcd = with cfg.certs.apiserverEtcdClient; {
+            servers = ["https://etcd.local:2379"];
+            certFile = mkDefault cert;
+            keyFile = mkDefault key;
+            caFile = mkDefault caCert;
+          };
+          clientCaFile = mkDefault caCert;
+          tlsCertFile = mkDefault cert;
+          tlsKeyFile = mkDefault key;
+          serviceAccountKeyFile = mkDefault cfg.certs.serviceAccount.cert;
+          kubeletClientCaFile = mkDefault caCert;
+          kubeletClientCertFile = mkDefault cfg.certs.apiserverKubeletClient.cert;
+          kubeletClientKeyFile = mkDefault cfg.certs.apiserverKubeletClient.key;
+        });
+        controllerManager = mkIf top.controllerManager.enable {
+          serviceAccountKeyFile = mkDefault cfg.certs.serviceAccount.key;
+          rootCaFile = cfg.certs.controllerManagerClient.caCert;
+          kubeconfig = with cfg.certs.controllerManagerClient; {
+            certFile = mkDefault cert;
+            keyFile = mkDefault key;
+          };
+        };
+        scheduler = mkIf top.scheduler.enable {
+          kubeconfig = with cfg.certs.schedulerClient; {
+            certFile = mkDefault cert;
+            keyFile = mkDefault key;
+          };
+        };
+        kubelet = mkIf top.kubelet.enable {
+          clientCaFile = mkDefault cfg.certs.kubelet.caCert;
+          tlsCertFile = mkDefault cfg.certs.kubelet.cert;
+          tlsKeyFile = mkDefault cfg.certs.kubelet.key;
+          kubeconfig = with cfg.certs.kubeletClient; {
+            certFile = mkDefault cert;
+            keyFile = mkDefault key;
+          };
+        };
+        proxy = mkIf top.proxy.enable {
+          kubeconfig = with cfg.certs.kubeProxyClient; {
+            certFile = mkDefault cert;
+            keyFile = mkDefault key;
+          };
+        };
+      };
+    });
+}
diff --git a/nixos/modules/services/cluster/kubernetes/proxy.nix b/nixos/modules/services/cluster/kubernetes/proxy.nix
new file mode 100644
index 00000000000..83cd3e23100
--- /dev/null
+++ b/nixos/modules/services/cluster/kubernetes/proxy.nix
@@ -0,0 +1,82 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  top = config.services.kubernetes;
+  cfg = top.proxy;
+in
+{
+
+  ###### interface
+  options.services.kubernetes.proxy = with lib.types; {
+
+    bindAddress = mkOption {
+      description = "Kubernetes proxy listening address.";
+      default = "0.0.0.0";
+      type = str;
+    };
+
+    enable = mkEnableOption "Whether to enable Kubernetes proxy.";
+
+    extraOpts = mkOption {
+      description = "Kubernetes proxy extra command line options.";
+      default = "";
+      type = str;
+    };
+
+    featureGates = mkOption {
+      description = "List set of feature gates";
+      default = top.featureGates;
+      type = listOf str;
+    };
+
+    kubeconfig = top.lib.mkKubeConfigOptions "Kubernetes proxy";
+
+    verbosity = mkOption {
+      description = ''
+        Optional glog verbosity level for logging statements. See
+        <link xlink:href="https://github.com/kubernetes/community/blob/master/contributors/devel/logging.md"/>
+      '';
+      default = null;
+      type = nullOr int;
+    };
+
+  };
+
+  ###### implementation
+  config = mkIf cfg.enable {
+    systemd.services.kube-proxy = {
+      description = "Kubernetes Proxy Service";
+      wantedBy = [ "kubernetes.target" ];
+      after = [ "kube-apiserver.service" ];
+      path = with pkgs; [ iptables conntrack_tools ];
+      serviceConfig = {
+        Slice = "kubernetes.slice";
+        ExecStart = ''${top.package}/bin/kube-proxy \
+          --bind-address=${cfg.bindAddress} \
+          ${optionalString (top.clusterCidr!=null)
+            "--cluster-cidr=${top.clusterCidr}"} \
+          ${optionalString (cfg.featureGates != [])
+            "--feature-gates=${concatMapStringsSep "," (feature: "${feature}=true") cfg.featureGates}"} \
+          --kubeconfig=${top.lib.mkKubeConfig "kube-proxy" cfg.kubeconfig} \
+          ${optionalString (cfg.verbosity != null) "--v=${toString cfg.verbosity}"} \
+          ${cfg.extraOpts}
+        '';
+        WorkingDirectory = top.dataDir;
+        Restart = "on-failure";
+        RestartSec = 5;
+      };
+    };
+
+    services.kubernetes.pki.certs = {
+      kubeProxyClient = top.lib.mkCert {
+        name = "kube-proxy-client";
+        CN = "system:kube-proxy";
+        action = "systemctl restart kube-proxy.service";
+      };
+    };
+
+    services.kubernetes.proxy.kubeconfig.server = mkDefault top.apiserverAddress;
+  };
+}
diff --git a/nixos/modules/services/cluster/kubernetes/scheduler.nix b/nixos/modules/services/cluster/kubernetes/scheduler.nix
new file mode 100644
index 00000000000..0305b9aefe5
--- /dev/null
+++ b/nixos/modules/services/cluster/kubernetes/scheduler.nix
@@ -0,0 +1,94 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  top = config.services.kubernetes;
+  cfg = top.scheduler;
+in
+{
+  ###### interface
+  options.services.kubernetes.scheduler = with lib.types; {
+
+    address = mkOption {
+      description = "Kubernetes scheduler listening address.";
+      default = "127.0.0.1";
+      type = str;
+    };
+
+    enable = mkEnableOption "Whether to enable Kubernetes scheduler.";
+
+    extraOpts = mkOption {
+      description = "Kubernetes scheduler extra command line options.";
+      default = "";
+      type = str;
+    };
+
+    featureGates = mkOption {
+      description = "List set of feature gates";
+      default = top.featureGates;
+      type = listOf str;
+    };
+
+    kubeconfig = top.lib.mkKubeConfigOptions "Kubernetes scheduler";
+
+    leaderElect = mkOption {
+      description = "Whether to start leader election before executing main loop.";
+      type = bool;
+      default = true;
+    };
+
+    port = mkOption {
+      description = "Kubernetes scheduler listening port.";
+      default = 10251;
+      type = int;
+    };
+
+    verbosity = mkOption {
+      description = ''
+        Optional glog verbosity level for logging statements. See
+        <link xlink:href="https://github.com/kubernetes/community/blob/master/contributors/devel/logging.md"/>
+      '';
+      default = null;
+      type = nullOr int;
+    };
+
+  };
+
+  ###### implementation
+  config = mkIf cfg.enable {
+    systemd.services.kube-scheduler = {
+      description = "Kubernetes Scheduler Service";
+      wantedBy = [ "kubernetes.target" ];
+      after = [ "kube-apiserver.service" ];
+      serviceConfig = {
+        Slice = "kubernetes.slice";
+        ExecStart = ''${top.package}/bin/kube-scheduler \
+          --address=${cfg.address} \
+          ${optionalString (cfg.featureGates != [])
+            "--feature-gates=${concatMapStringsSep "," (feature: "${feature}=true") cfg.featureGates}"} \
+          --kubeconfig=${top.lib.mkKubeConfig "kube-scheduler" cfg.kubeconfig} \
+          --leader-elect=${boolToString cfg.leaderElect} \
+          --port=${toString cfg.port} \
+          ${optionalString (cfg.verbosity != null) "--v=${toString cfg.verbosity}"} \
+          ${cfg.extraOpts}
+        '';
+        WorkingDirectory = top.dataDir;
+        User = "kubernetes";
+        Group = "kubernetes";
+        Restart = "on-failure";
+        RestartSec = 5;
+      };
+    };
+
+    services.kubernetes.pki.certs = {
+      schedulerClient = top.lib.mkCert {
+        name = "kube-scheduler-client";
+        CN = "system:kube-scheduler";
+        action = "systemctl restart kube-scheduler.service";
+      };
+    };
+
+    services.kubernetes.scheduler.kubeconfig.server = mkDefault top.apiserverAddress;
+  };
+}
diff --git a/nixos/modules/services/continuous-integration/buildkite-agent.nix b/nixos/modules/services/continuous-integration/buildkite-agent.nix
index 9daf391c73c..2136778aff4 100644
--- a/nixos/modules/services/continuous-integration/buildkite-agent.nix
+++ b/nixos/modules/services/continuous-integration/buildkite-agent.nix
@@ -24,7 +24,7 @@ let
       EOF
       chmod 755 $out/${name}
     '';
-  in pkgs.runCommand "buildkite-agent-hooks" {} ''
+  in pkgs.runCommand "buildkite-agent-hooks" { preferLocalBuild = true; } ''
     mkdir $out
     ${concatStringsSep "\n" (mapAttrsToList mkHookEntry (filterAttrs (n: v: v != null) cfg.hooks))}
   '';
diff --git a/nixos/modules/services/continuous-integration/gitlab-runner.nix b/nixos/modules/services/continuous-integration/gitlab-runner.nix
index a0aff1b8b5b..3ceaa6f5ff3 100644
--- a/nixos/modules/services/continuous-integration/gitlab-runner.nix
+++ b/nixos/modules/services/continuous-integration/gitlab-runner.nix
@@ -8,6 +8,7 @@ let
     if (cfg.configFile == null) then
       (pkgs.runCommand "config.toml" {
         buildInputs = [ pkgs.remarshal ];
+        preferLocalBuild = true;
       } ''
         remarshal -if json -of toml \
           < ${pkgs.writeText "config.json" (builtins.toJSON cfg.configOptions)} \
diff --git a/nixos/modules/services/databases/hbase.nix b/nixos/modules/services/databases/hbase.nix
index 4772e897efe..52f2d95b4e0 100644
--- a/nixos/modules/services/databases/hbase.nix
+++ b/nixos/modules/services/databases/hbase.nix
@@ -18,7 +18,7 @@ let
     </configuration>
   '';
 
-  configDir = pkgs.runCommand "hbase-config-dir" {} ''
+  configDir = pkgs.runCommand "hbase-config-dir" { preferLocalBuild = true; } ''
     mkdir -p $out
     cp ${cfg.package}/conf/* $out/
     rm $out/hbase-site.xml
diff --git a/nixos/modules/services/databases/influxdb.nix b/nixos/modules/services/databases/influxdb.nix
index d7a028b25d8..888bf13c3df 100644
--- a/nixos/modules/services/databases/influxdb.nix
+++ b/nixos/modules/services/databases/influxdb.nix
@@ -98,6 +98,7 @@ let
 
   configFile = pkgs.runCommand "config.toml" {
     buildInputs = [ pkgs.remarshal ];
+    preferLocalBuild = true;
   } ''
     remarshal -if json -of toml \
       < ${pkgs.writeText "config.json" (builtins.toJSON configOptions)} \
diff --git a/nixos/modules/services/desktops/pantheon/contractor.nix b/nixos/modules/services/desktops/pantheon/contractor.nix
new file mode 100644
index 00000000000..bd538db7241
--- /dev/null
+++ b/nixos/modules/services/desktops/pantheon/contractor.nix
@@ -0,0 +1,39 @@
+# Contractor
+
+{ config, pkgs, lib, ... }:
+
+with lib;
+
+{
+
+  ###### interface
+
+  options = {
+
+    services.pantheon.contractor = {
+
+      enable = mkEnableOption "contractor, a desktop-wide extension service used by pantheon";
+
+    };
+
+  };
+
+
+  ###### implementation
+
+  config = mkIf config.services.pantheon.contractor.enable {
+
+    environment.systemPackages = with  pkgs.pantheon; [
+      contractor
+      extra-elementary-contracts
+    ];
+
+    services.dbus.packages = [ pkgs.pantheon.contractor ];
+
+    environment.pathsToLink = [
+      "/share/contractor"
+    ];
+
+  };
+
+}
diff --git a/nixos/modules/services/desktops/pantheon/files.nix b/nixos/modules/services/desktops/pantheon/files.nix
new file mode 100644
index 00000000000..2edbe5b3a6d
--- /dev/null
+++ b/nixos/modules/services/desktops/pantheon/files.nix
@@ -0,0 +1,36 @@
+# pantheon files daemon.
+
+{ config, pkgs, lib, ... }:
+
+with lib;
+
+{
+
+  ###### interface
+
+  options = {
+
+    services.pantheon.files = {
+
+      enable = mkEnableOption "pantheon files daemon";
+
+    };
+
+  };
+
+
+  ###### implementation
+
+  config = mkIf config.services.pantheon.files.enable {
+
+    environment.systemPackages = [
+      pkgs.pantheon.elementary-files
+    ];
+
+    services.dbus.packages = [
+      pkgs.pantheon.elementary-files
+    ];
+
+  };
+
+}
diff --git a/nixos/modules/services/desktops/tumbler.nix b/nixos/modules/services/desktops/tumbler.nix
new file mode 100644
index 00000000000..ccbb6d1434d
--- /dev/null
+++ b/nixos/modules/services/desktops/tumbler.nix
@@ -0,0 +1,50 @@
+# Tumbler
+
+{ config, pkgs, lib, ... }:
+
+with lib;
+
+let
+
+  cfg = config.services.tumbler;
+  tumbler = cfg.package;
+
+in
+
+{
+
+  ###### interface
+
+  options = {
+
+    services.tumbler = {
+
+      enable = mkEnableOption "Tumbler, A D-Bus thumbnailer service";
+
+      package = mkOption {
+        type = types.package;
+        default = pkgs.xfce4-13.tumbler;
+        description = "Which tumbler package to use";
+        example = pkgs.xfce4-12.tumbler;
+      };
+
+    };
+
+  };
+
+
+  ###### implementation
+
+  config = mkIf cfg.enable {
+
+    environment.systemPackages = [
+      tumbler
+    ];
+
+    services.dbus.packages = [
+      tumbler
+    ];
+
+  };
+
+}
diff --git a/nixos/modules/services/hardware/acpid.nix b/nixos/modules/services/hardware/acpid.nix
index 0f05876aee3..4c97485d972 100644
--- a/nixos/modules/services/hardware/acpid.nix
+++ b/nixos/modules/services/hardware/acpid.nix
@@ -21,7 +21,7 @@ let
     };
   };
 
-  acpiConfDir = pkgs.runCommand "acpi-events" {}
+  acpiConfDir = pkgs.runCommand "acpi-events" { preferLocalBuild = true; }
     ''
       mkdir -p $out
       ${
diff --git a/nixos/modules/services/hardware/bolt.nix b/nixos/modules/services/hardware/bolt.nix
new file mode 100644
index 00000000000..32b60af0603
--- /dev/null
+++ b/nixos/modules/services/hardware/bolt.nix
@@ -0,0 +1,34 @@
+# Thunderbolt 3 device manager
+
+{ config, lib, pkgs, ...}:
+
+with lib;
+
+{
+  options = {
+
+    services.hardware.bolt = {
+
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = ''
+          Whether to enable Bolt, a userspace daemon to enable
+          security levels for Thunderbolt 3 on GNU/Linux.
+
+          Bolt is used by GNOME 3 to handle Thunderbolt settings.
+        '';
+      };
+
+    };
+
+  };
+
+  config = mkIf config.services.hardware.bolt.enable {
+
+    environment.systemPackages = [ pkgs.bolt ];
+    services.udev.packages = [ pkgs.bolt ];
+    systemd.packages = [ pkgs.bolt ];
+
+  };
+}
diff --git a/nixos/modules/services/hardware/tlp.nix b/nixos/modules/services/hardware/tlp.nix
index b894025c0fd..092ff051a04 100644
--- a/nixos/modules/services/hardware/tlp.nix
+++ b/nixos/modules/services/hardware/tlp.nix
@@ -17,6 +17,7 @@ tlp = pkgs.tlp.override {
 confFile = pkgs.runCommand "tlp"
   { config = cfg.extraConfig;
     passAsFile = [ "config" ];
+    preferLocalBuild = true;
   }
   ''
     cat ${tlp}/etc/default/tlp > $out
diff --git a/nixos/modules/services/logging/logcheck.nix b/nixos/modules/services/logging/logcheck.nix
index 1477d273d5e..9c64160e92b 100644
--- a/nixos/modules/services/logging/logcheck.nix
+++ b/nixos/modules/services/logging/logcheck.nix
@@ -5,7 +5,7 @@ with lib;
 let
   cfg = config.services.logcheck;
 
-  defaultRules = pkgs.runCommand "logcheck-default-rules" {} ''
+  defaultRules = pkgs.runCommand "logcheck-default-rules" { preferLocalBuild = true; } ''
                    cp -prd ${pkgs.logcheck}/etc/logcheck $out
                    chmod u+w $out
                    rm -r $out/logcheck.*
diff --git a/nixos/modules/services/logging/logstash.nix b/nixos/modules/services/logging/logstash.nix
index aa019d855ea..9b707e9deb5 100644
--- a/nixos/modules/services/logging/logstash.nix
+++ b/nixos/modules/services/logging/logstash.nix
@@ -27,7 +27,10 @@ let
 
   logstashSettingsYml = pkgs.writeText "logstash.yml" cfg.extraSettings;
 
-  logstashSettingsDir = pkgs.runCommand "logstash-settings" {inherit logstashSettingsYml;} ''
+  logstashSettingsDir = pkgs.runCommand "logstash-settings" {
+      inherit logstashSettingsYml;
+      preferLocalBuild = true;
+    } ''
     mkdir -p $out
     ln -s $logstashSettingsYml $out/logstash.yml
   '';
diff --git a/nixos/modules/services/mail/opensmtpd.nix b/nixos/modules/services/mail/opensmtpd.nix
index 4276552d4f0..a870550ba50 100644
--- a/nixos/modules/services/mail/opensmtpd.nix
+++ b/nixos/modules/services/mail/opensmtpd.nix
@@ -8,7 +8,7 @@ let
   conf = pkgs.writeText "smtpd.conf" cfg.serverConfiguration;
   args = concatStringsSep " " cfg.extraServerArgs;
 
-  sendmail = pkgs.runCommand "opensmtpd-sendmail" {} ''
+  sendmail = pkgs.runCommand "opensmtpd-sendmail" { preferLocalBuild = true; } ''
     mkdir -p $out/bin
     ln -s ${cfg.package}/sbin/smtpctl $out/bin/sendmail
   '';
diff --git a/nixos/modules/services/misc/beanstalkd.nix b/nixos/modules/services/misc/beanstalkd.nix
new file mode 100644
index 00000000000..8a3e0ab1949
--- /dev/null
+++ b/nixos/modules/services/misc/beanstalkd.nix
@@ -0,0 +1,52 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.beanstalkd;
+  pkg = pkgs.beanstalkd;
+in
+
+{
+  # interface
+
+  options = {
+    services.beanstalkd = {
+      enable = mkEnableOption "Enable the Beanstalk work queue.";
+
+      listen = {
+        port = mkOption {
+          type = types.int;
+          description = "TCP port that will be used to accept client connections.";
+          default = 11300;
+        };
+
+        address = mkOption {
+          type = types.str;
+          description = "IP address to listen on.";
+          default = "127.0.0.1";
+          example = "0.0.0.0";
+        };
+      };
+    };
+  };
+
+  # implementation
+
+  config = mkIf cfg.enable {
+
+    environment.systemPackages = [ pkg ];
+
+    systemd.services.beanstalkd = {
+      description = "Beanstalk Work Queue";
+      after = [ "network.target" ];
+      wantedBy = [ "multi-user.target" ];
+      serviceConfig = {
+        DynamicUser = true;
+        Restart = "always";
+        ExecStart = "${pkg}/bin/beanstalkd -l ${cfg.listen.address} -p ${toString cfg.listen.port}";
+      };
+    };
+
+  };
+}
diff --git a/nixos/modules/services/misc/gitlab.nix b/nixos/modules/services/misc/gitlab.nix
index 25c258ebe13..b8617e48d8e 100644
--- a/nixos/modules/services/misc/gitlab.nix
+++ b/nixos/modules/services/misc/gitlab.nix
@@ -22,7 +22,8 @@ let
       password = cfg.databasePassword;
       username = cfg.databaseUsername;
       encoding = "utf8";
-    };
+      pool = cfg.databasePool;
+    } // cfg.extraDatabaseConfig;
   };
 
   gitalyToml = pkgs.writeText "gitaly.toml" ''
@@ -253,6 +254,18 @@ in {
         description = "Gitlab database user.";
       };
 
+      databasePool = mkOption {
+        type = types.int;
+        default = 5;
+        description = "Database connection pool size.";
+      };
+
+      extraDatabaseConfig = mkOption {
+        type = types.attrs;
+        default = {};
+        description = "Extra configuration in config/database.yml.";
+      };
+
       host = mkOption {
         type = types.str;
         default = config.networking.hostName;
diff --git a/nixos/modules/services/misc/gitolite.nix b/nixos/modules/services/misc/gitolite.nix
index b9c2a966e6f..c7f2a168f8a 100644
--- a/nixos/modules/services/misc/gitolite.nix
+++ b/nixos/modules/services/misc/gitolite.nix
@@ -110,7 +110,7 @@ in
   config = mkIf cfg.enable (
   let
     manageGitoliteRc = cfg.extraGitoliteRc != "";
-    rcDir = pkgs.runCommand "gitolite-rc" { } rcDirScript;
+    rcDir = pkgs.runCommand "gitolite-rc" { preferLocalBuild = true; } rcDirScript;
     rcDirScript =
       ''
         mkdir "$out"
diff --git a/nixos/modules/services/misc/headphones.nix b/nixos/modules/services/misc/headphones.nix
new file mode 100644
index 00000000000..4a77045be28
--- /dev/null
+++ b/nixos/modules/services/misc/headphones.nix
@@ -0,0 +1,87 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  name = "headphones";
+
+  cfg = config.services.headphones;
+
+in
+
+{
+
+  ###### interface
+
+  options = {
+    services.headphones = {
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = "Whether to enable the headphones server.";
+      };
+      dataDir = mkOption {
+        type = types.path;
+        default = "/var/lib/${name}";
+        description = "Path where to store data files.";
+      };
+      configFile = mkOption {
+        type = types.path;
+        default = "${cfg.dataDir}/config.ini";
+        description = "Path to config file.";
+      };
+      host = mkOption {
+        type = types.str;
+        default = "localhost";
+        description = "Host to listen on.";
+      };
+      port = mkOption {
+        type = types.ints.u16;
+        default = 8181;
+        description = "Port to bind to.";
+      };
+      user = mkOption {
+        type = types.str;
+        default = name;
+        description = "User to run the service as";
+      };
+      group = mkOption {
+        type = types.str;
+        default = name;
+        description = "Group to run the service as";
+      };
+    };
+  };
+
+
+  ###### implementation
+
+  config = mkIf cfg.enable {
+
+    users.users = optionalAttrs (cfg.user == name) (singleton {
+      name = name;
+      uid = config.ids.uids.headphones;
+      group = cfg.group;
+      description = "headphones user";
+      home = cfg.dataDir;
+      createHome = true;
+    });
+
+    users.groups = optionalAttrs (cfg.group == name) (singleton {
+      name = name;
+      gid = config.ids.gids.headphones;
+    });
+
+    systemd.services.headphones = {
+        description = "Headphones Server";
+        wantedBy    = [ "multi-user.target" ];
+        after = [ "network.target" ];
+        serviceConfig = {
+          User = cfg.user;
+          Group = cfg.group;
+          ExecStart = "${pkgs.headphones}/bin/headphones --datadir ${cfg.dataDir} --config ${cfg.configFile} --host ${cfg.host} --port ${toString cfg.port}";
+        };
+    };
+  };
+}
diff --git a/nixos/modules/services/misc/home-assistant.nix b/nixos/modules/services/misc/home-assistant.nix
index 4ccfa22c89e..95a7f2ea989 100644
--- a/nixos/modules/services/misc/home-assistant.nix
+++ b/nixos/modules/services/misc/home-assistant.nix
@@ -19,7 +19,7 @@ let
     ${pkgs.remarshal}/bin/json2yaml -i ${lovelaceConfigJSON} -o $out
   '';
 
-  availableComponents = pkgs.home-assistant.availableComponents;
+  availableComponents = cfg.package.availableComponents;
 
   # Given component "parentConfig.platform", returns whether config.parentConfig
   # is a list containing a set with set.platform == "platform".
diff --git a/nixos/modules/services/misc/jackett.nix b/nixos/modules/services/misc/jackett.nix
index 8d1b3d225a4..b18ce2b1f81 100644
--- a/nixos/modules/services/misc/jackett.nix
+++ b/nixos/modules/services/misc/jackett.nix
@@ -4,11 +4,36 @@ with lib;
 
 let
   cfg = config.services.jackett;
+
 in
 {
   options = {
     services.jackett = {
       enable = mkEnableOption "Jackett";
+
+      dataDir = mkOption {
+        type = types.str;
+        default = "/var/lib/jackett/.config/Jackett";
+        description = "The directory where Jackett stores its data files.";
+      };
+
+      openFirewall = mkOption {
+        type = types.bool;
+        default = false;
+        description = "Open ports in the firewall for the Jackett web interface.";
+      };
+
+      user = mkOption {
+        type = types.str;
+        default = "jackett";
+        description = "User account under which Jackett runs.";
+      };
+
+      group = mkOption {
+        type = types.str;
+        default = "jackett";
+        description = "Group under which Jackett runs.";
+      };
     };
   };
 
@@ -18,30 +43,38 @@ in
       after = [ "network.target" ];
       wantedBy = [ "multi-user.target" ];
       preStart = ''
-        test -d /var/lib/jackett/ || {
-          echo "Creating jackett data directory in /var/lib/jackett/"
-          mkdir -p /var/lib/jackett/
+        test -d ${cfg.dataDir} || {
+          echo "Creating jackett data directory in ${cfg.dataDir}"
+          mkdir -p ${cfg.dataDir}
         }
-        chown -R jackett:jackett /var/lib/jackett/
-        chmod 0700 /var/lib/jackett/
+        chown -R ${cfg.user}:${cfg.group} ${cfg.dataDir}
+        chmod 0700 ${cfg.dataDir}
       '';
 
       serviceConfig = {
         Type = "simple";
-        User = "jackett";
-        Group = "jackett";
+        User = cfg.user;
+        Group = cfg.group;
         PermissionsStartOnly = "true";
-        ExecStart = "${pkgs.jackett}/bin/Jackett";
+        ExecStart = "${pkgs.jackett}/bin/Jackett --NoUpdates --DataFolder '${cfg.dataDir}'";
         Restart = "on-failure";
       };
     };
 
-    users.users.jackett = {
-      uid = config.ids.uids.jackett;
-      home = "/var/lib/jackett";
-      group = "jackett";
+    networking.firewall = mkIf cfg.openFirewall {
+      allowedTCPPorts = [ 9117 ];
+    };
+
+    users.users = mkIf (cfg.user == "jackett") {
+      jackett = {
+        group = cfg.group;
+        home = cfg.dataDir;
+        uid = config.ids.uids.jackett;
+      };
     };
-    users.groups.jackett.gid = config.ids.gids.jackett;
 
+    users.groups = mkIf (cfg.group == "jackett") {
+      jackett.gid = config.ids.gids.jackett;
+    };
   };
 }
diff --git a/nixos/modules/services/misc/matrix-synapse.nix b/nixos/modules/services/misc/matrix-synapse.nix
index a01e34d7362..87999c3614f 100644
--- a/nixos/modules/services/misc/matrix-synapse.nix
+++ b/nixos/modules/services/misc/matrix-synapse.nix
@@ -651,16 +651,12 @@ in {
 
     services.postgresql.enable = mkIf usePostgresql (mkDefault true);
 
-    systemd.services.matrix-synapse =
-    let
-      python = (pkgs.python3.withPackages (ps: with ps; [ (ps.toPythonModule cfg.package) ]));
-    in
-    {
+    systemd.services.matrix-synapse = {
       description = "Synapse Matrix homeserver";
       after = [ "network.target" "postgresql.service" ];
       wantedBy = [ "multi-user.target" ];
       preStart = ''
-        ${python.interpreter} -m synapse.app.homeserver \
+        ${cfg.package}/bin/homeserver \
           --config-path ${configFile} \
           --keys-directory ${cfg.dataDir} \
           --generate-keys
@@ -691,7 +687,7 @@ in {
         WorkingDirectory = cfg.dataDir;
         PermissionsStartOnly = true;
         ExecStart = ''
-          ${python.interpreter} -m synapse.app.homeserver \
+          ${cfg.package}/bin/homeserver \
             ${ concatMapStringsSep "\n  " (x: "--config-path ${x} \\") ([ configFile ] ++ cfg.extraConfigFiles) }
             --keys-directory ${cfg.dataDir}
         '';
diff --git a/nixos/modules/services/misc/nix-daemon.nix b/nixos/modules/services/misc/nix-daemon.nix
index 9a8116a03e8..665215822af 100644
--- a/nixos/modules/services/misc/nix-daemon.nix
+++ b/nixos/modules/services/misc/nix-daemon.nix
@@ -33,7 +33,7 @@ let
       sh = pkgs.runtimeShell;
       binshDeps = pkgs.writeReferencesToFile sh;
     in
-      pkgs.runCommand "nix.conf" { extraOptions = cfg.extraOptions; } (''
+      pkgs.runCommand "nix.conf" { preferLocalBuild = true; extraOptions = cfg.extraOptions; } (''
         ${optionalString (!isNix20) ''
           extraPaths=$(for i in $(cat ${binshDeps}); do if test -d $i; then echo $i; fi; done)
         ''}
diff --git a/nixos/modules/services/misc/radarr.nix b/nixos/modules/services/misc/radarr.nix
index 1a9fad3883c..9ab26d84832 100644
--- a/nixos/modules/services/misc/radarr.nix
+++ b/nixos/modules/services/misc/radarr.nix
@@ -4,11 +4,36 @@ with lib;
 
 let
   cfg = config.services.radarr;
+
 in
 {
   options = {
     services.radarr = {
       enable = mkEnableOption "Radarr";
+
+      dataDir = mkOption {
+        type = types.str;
+        default = "/var/lib/radarr/.config/Radarr";
+        description = "The directory where Radarr stores its data files.";
+      };
+
+      openFirewall = mkOption {
+        type = types.bool;
+        default = false;
+        description = "Open ports in the firewall for the Radarr web interface.";
+      };
+
+      user = mkOption {
+        type = types.str;
+        default = "radarr";
+        description = "User account under which Radarr runs.";
+      };
+
+      group = mkOption {
+        type = types.str;
+        default = "radarr";
+        description = "Group under which Radarr runs.";
+      };
     };
   };
 
@@ -18,30 +43,38 @@ in
       after = [ "network.target" ];
       wantedBy = [ "multi-user.target" ];
       preStart = ''
-        test -d /var/lib/radarr/ || {
-          echo "Creating radarr data directory in /var/lib/radarr/"
-          mkdir -p /var/lib/radarr/
+        test -d ${cfg.dataDir} || {
+          echo "Creating radarr data directory in ${cfg.dataDir}"
+          mkdir -p ${cfg.dataDir}
         }
-        chown -R radarr:radarr /var/lib/radarr/
-        chmod 0700 /var/lib/radarr/
+        chown -R ${cfg.user}:${cfg.group} ${cfg.dataDir}
+        chmod 0700 ${cfg.dataDir}
       '';
 
       serviceConfig = {
         Type = "simple";
-        User = "radarr";
-        Group = "radarr";
+        User = cfg.user;
+        Group = cfg.group;
         PermissionsStartOnly = "true";
-        ExecStart = "${pkgs.radarr}/bin/Radarr";
+        ExecStart = "${pkgs.radarr}/bin/Radarr -nobrowser -data='${cfg.dataDir}'";
         Restart = "on-failure";
       };
     };
 
-    users.users.radarr = {
-      uid = config.ids.uids.radarr;
-      home = "/var/lib/radarr";
-      group = "radarr";
+    networking.firewall = mkIf cfg.openFirewall {
+      allowedTCPPorts = [ 7878 ];
+    };
+
+    users.users = mkIf (cfg.user == "radarr") {
+      radarr = {
+        group = cfg.group;
+        home = cfg.dataDir;
+        uid = config.ids.uids.radarr;
+      };
     };
-    users.groups.radarr.gid = config.ids.gids.radarr;
 
+    users.groups = mkIf (cfg.group == "radarr") {
+      radarr.gid = config.ids.gids.radarr;
+    };
   };
 }
diff --git a/nixos/modules/services/misc/taskserver/default.nix b/nixos/modules/services/misc/taskserver/default.nix
index 5f97abf1871..483bc99ad94 100644
--- a/nixos/modules/services/misc/taskserver/default.nix
+++ b/nixos/modules/services/misc/taskserver/default.nix
@@ -109,7 +109,7 @@ let
   nixos-taskserver = pkgs.pythonPackages.buildPythonApplication {
     name = "nixos-taskserver";
 
-    src = pkgs.runCommand "nixos-taskserver-src" {} ''
+    src = pkgs.runCommand "nixos-taskserver-src" { preferLocalBuild = true; } ''
       mkdir -p "$out"
       cat "${pkgs.substituteAll {
         src = ./helper-tool.py;
diff --git a/nixos/modules/services/misc/zoneminder.nix b/nixos/modules/services/misc/zoneminder.nix
index a40e9e84613..ae7de7850d9 100644
--- a/nixos/modules/services/misc/zoneminder.nix
+++ b/nixos/modules/services/misc/zoneminder.nix
@@ -205,15 +205,13 @@ in {
 
       mysql = lib.mkIf cfg.database.createLocally {
         ensureDatabases = [ cfg.database.name ];
-        ensureUsers = {
+        ensureUsers = [{
           name = cfg.database.username;
-          ensurePermissions = [
-            { "${cfg.database.name}.*" = "ALL PRIVILEGES"; }
-          ];
+          ensurePermissions = { "${cfg.database.name}.*" = "ALL PRIVILEGES"; };
           initialDatabases = [
             { inherit (cfg.database) name; schema = "${pkg}/share/zoneminder/db/zm_create.sql"; }
           ];
-        };
+        }];
       };
 
       nginx = lib.mkIf useNginx {
diff --git a/nixos/modules/services/monitoring/apcupsd.nix b/nixos/modules/services/monitoring/apcupsd.nix
index 7ee870183ca..49957e65290 100644
--- a/nixos/modules/services/monitoring/apcupsd.nix
+++ b/nixos/modules/services/monitoring/apcupsd.nix
@@ -45,7 +45,7 @@ let
 
   eventToShellCmds = event: if builtins.hasAttr event cfg.hooks then (shellCmdsForEventScript event (builtins.getAttr event cfg.hooks)) else "";
 
-  scriptDir = pkgs.runCommand "apcupsd-scriptdir" {} (''
+  scriptDir = pkgs.runCommand "apcupsd-scriptdir" { preferLocalBuild = true; } (''
     mkdir "$out"
     # Copy SCRIPTDIR from apcupsd package
     cp -r ${pkgs.apcupsd}/etc/apcupsd/* "$out"/
diff --git a/nixos/modules/services/monitoring/graphite.nix b/nixos/modules/services/monitoring/graphite.nix
index cdc98b407e9..f59bc56962b 100644
--- a/nixos/modules/services/monitoring/graphite.nix
+++ b/nixos/modules/services/monitoring/graphite.nix
@@ -9,8 +9,10 @@ let
   dataDir = cfg.dataDir;
   staticDir = cfg.dataDir + "/static";
 
-  graphiteLocalSettingsDir = pkgs.runCommand "graphite_local_settings"
-    {inherit graphiteLocalSettings;} ''
+  graphiteLocalSettingsDir = pkgs.runCommand "graphite_local_settings" {
+      inherit graphiteLocalSettings;
+      preferLocalBuild = true; 
+    } ''
     mkdir -p $out
     ln -s $graphiteLocalSettings $out/graphite_local_settings.py
   '';
diff --git a/nixos/modules/services/monitoring/nagios.nix b/nixos/modules/services/monitoring/nagios.nix
index 3e1d727b416..e5496209f82 100644
--- a/nixos/modules/services/monitoring/nagios.nix
+++ b/nixos/modules/services/monitoring/nagios.nix
@@ -11,8 +11,10 @@ let
 
   nagiosObjectDefs = cfg.objectDefs;
 
-  nagiosObjectDefsDir = pkgs.runCommand "nagios-objects" {inherit nagiosObjectDefs;}
-    "mkdir -p $out; ln -s $nagiosObjectDefs $out/";
+  nagiosObjectDefsDir = pkgs.runCommand "nagios-objects" {
+      inherit nagiosObjectDefs;
+      preferLocalBuild = true;
+    } "mkdir -p $out; ln -s $nagiosObjectDefs $out/";
 
   nagiosCfgFile = pkgs.writeText "nagios.cfg"
     ''
diff --git a/nixos/modules/services/monitoring/netdata.nix b/nixos/modules/services/monitoring/netdata.nix
index 1d86c5d893d..a49555cf677 100644
--- a/nixos/modules/services/monitoring/netdata.nix
+++ b/nixos/modules/services/monitoring/netdata.nix
@@ -5,7 +5,7 @@ with lib;
 let
   cfg = config.services.netdata;
 
-  wrappedPlugins = pkgs.runCommand "wrapped-plugins" {} ''
+  wrappedPlugins = pkgs.runCommand "wrapped-plugins" { preferLocalBuild = true; } ''
     mkdir -p $out/libexec/netdata/plugins.d
     ln -s /run/wrappers/bin/apps.plugin $out/libexec/netdata/plugins.d/apps.plugin
   '';
diff --git a/nixos/modules/services/monitoring/prometheus/default.nix b/nixos/modules/services/monitoring/prometheus/default.nix
index 1b1503ab5fc..cc703573d8c 100644
--- a/nixos/modules/services/monitoring/prometheus/default.nix
+++ b/nixos/modules/services/monitoring/prometheus/default.nix
@@ -19,7 +19,7 @@ let
 
   # Pretty-print JSON to a file
   writePrettyJSON = name: x:
-    pkgs.runCommand name { } ''
+    pkgs.runCommand name { preferLocalBuild = true; } ''
       echo '${builtins.toJSON x}' | ${pkgs.jq}/bin/jq . > $out
     '';
 
diff --git a/nixos/modules/services/monitoring/scollector.nix b/nixos/modules/services/monitoring/scollector.nix
index 6ecb21d628d..fbded746a5f 100644
--- a/nixos/modules/services/monitoring/scollector.nix
+++ b/nixos/modules/services/monitoring/scollector.nix
@@ -5,7 +5,7 @@ with lib;
 let
   cfg = config.services.scollector;
 
-  collectors = pkgs.runCommand "collectors" {}
+  collectors = pkgs.runCommand "collectors" { preferLocalBuild = true; }
     ''
     mkdir -p $out
     ${lib.concatStringsSep
diff --git a/nixos/modules/services/monitoring/telegraf.nix b/nixos/modules/services/monitoring/telegraf.nix
index 6bfcd7143e1..d8786732668 100644
--- a/nixos/modules/services/monitoring/telegraf.nix
+++ b/nixos/modules/services/monitoring/telegraf.nix
@@ -7,6 +7,7 @@ let
 
   configFile = pkgs.runCommand "config.toml" {
     buildInputs = [ pkgs.remarshal ];
+    preferLocalBuild = true;
   } ''
     remarshal -if json -of toml \
       < ${pkgs.writeText "config.json" (builtins.toJSON cfg.extraConfig)} \
diff --git a/nixos/modules/services/monitoring/uptime.nix b/nixos/modules/services/monitoring/uptime.nix
index b4d3a264010..c0993f3bc2e 100644
--- a/nixos/modules/services/monitoring/uptime.nix
+++ b/nixos/modules/services/monitoring/uptime.nix
@@ -4,7 +4,8 @@ let
 
   cfg = config.services.uptime;
 
-  configDir = pkgs.runCommand "config" {} (if cfg.configFile != null then ''
+  configDir = pkgs.runCommand "config" { preferLocalBuild = true; }
+  (if cfg.configFile != null then ''
     mkdir $out
     ext=`echo ${cfg.configFile} | grep -o \\..*`
     ln -sv ${cfg.configFile} $out/default$ext
diff --git a/nixos/modules/services/network-filesystems/beegfs.nix b/nixos/modules/services/network-filesystems/beegfs.nix
index d9dde3f6bb6..86b1bb9160f 100644
--- a/nixos/modules/services/network-filesystems/beegfs.nix
+++ b/nixos/modules/services/network-filesystems/beegfs.nix
@@ -102,7 +102,10 @@ let
 
   # wrappers to beegfs tools. Avoid typing path of config files
   utilWrappers = mapAttrsToList ( name: cfg:
-      ( pkgs.runCommand "beegfs-utils-${name}" { nativeBuildInputs = [ pkgs.makeWrapper ]; } ''
+    ( pkgs.runCommand "beegfs-utils-${name}" {
+        nativeBuildInputs = [ pkgs.makeWrapper ];
+        preferLocalBuild = true;
+        } ''
         mkdir -p $out/bin
 
         makeWrapper ${pkgs.beegfs}/bin/beegfs-check-servers \
diff --git a/nixos/modules/services/network-filesystems/diod.nix b/nixos/modules/services/network-filesystems/diod.nix
index 556fad4d8ab..063bae6ddb1 100644
--- a/nixos/modules/services/network-filesystems/diod.nix
+++ b/nixos/modules/services/network-filesystems/diod.nix
@@ -153,7 +153,6 @@ in
       after = [ "network.target" ];
       serviceConfig = {
         ExecStart = "${pkgs.diod}/sbin/diod -f -c ${diodConfig}";
-        CapabilityBoundingSet = "cap_net_bind_service+=ep";
       };
     };
   };
diff --git a/nixos/modules/services/network-filesystems/ipfs.nix b/nixos/modules/services/network-filesystems/ipfs.nix
index 602cd50d8f5..d4fa1eccdf3 100644
--- a/nixos/modules/services/network-filesystems/ipfs.nix
+++ b/nixos/modules/services/network-filesystems/ipfs.nix
@@ -19,7 +19,7 @@ let
     "/var/lib/ipfs/.ipfs";
 
   # Wrapping the ipfs binary with the environment variable IPFS_PATH set to dataDir because we can't set it in the user environment
-  wrapped = runCommand "ipfs" { buildInputs = [ makeWrapper ]; } ''
+  wrapped = runCommand "ipfs" { buildInputs = [ makeWrapper ]; preferLocalBuild = true; } ''
     mkdir -p "$out/bin"
     makeWrapper "${ipfs}/bin/ipfs" "$out/bin/ipfs" \
       --set IPFS_PATH ${cfg.dataDir} \
diff --git a/nixos/modules/services/network-filesystems/openafs/client.nix b/nixos/modules/services/network-filesystems/openafs/client.nix
index 240c1392088..93d2d7fcd97 100644
--- a/nixos/modules/services/network-filesystems/openafs/client.nix
+++ b/nixos/modules/services/network-filesystems/openafs/client.nix
@@ -15,7 +15,7 @@ let
 
   clientServDB = pkgs.writeText "client-cellServDB-${cfg.cellName}" (mkCellServDB cfg.cellName cfg.cellServDB);
 
-  afsConfig = pkgs.runCommand "afsconfig" {} ''
+  afsConfig = pkgs.runCommand "afsconfig" { preferLocalBuild = true; } ''
     mkdir -p $out
     echo ${cfg.cellName} > $out/ThisCell
     cat ${cellServDB} ${clientServDB} > $out/CellServDB
@@ -198,7 +198,7 @@ in
 
     environment.etc = {
       clientCellServDB = {
-        source = pkgs.runCommand "CellServDB" {} ''
+        source = pkgs.runCommand "CellServDB" { preferLocalBuild = true; } ''
           cat ${cellServDB} ${clientServDB} > $out
         '';
         target = "openafs/CellServDB";
diff --git a/nixos/modules/services/networking/dnscache.nix b/nixos/modules/services/networking/dnscache.nix
index fc30f50317f..5051fc916d9 100644
--- a/nixos/modules/services/networking/dnscache.nix
+++ b/nixos/modules/services/networking/dnscache.nix
@@ -5,7 +5,7 @@ with lib;
 let
   cfg = config.services.dnscache;
 
-  dnscache-root = pkgs.runCommand "dnscache-root" {} ''
+  dnscache-root = pkgs.runCommand "dnscache-root" { preferLocalBuild = true; } ''
     mkdir -p $out/{servers,ip}
 
     ${concatMapStrings (ip: ''
diff --git a/nixos/modules/services/networking/flannel.nix b/nixos/modules/services/networking/flannel.nix
index b93e28e34ef..ec702cdc6ff 100644
--- a/nixos/modules/services/networking/flannel.nix
+++ b/nixos/modules/services/networking/flannel.nix
@@ -73,11 +73,35 @@ in {
       };
     };
 
+    kubeconfig = mkOption {
+      description = ''
+        Path to kubeconfig to use for storing flannel config using the
+        Kubernetes API
+      '';
+      type = types.nullOr types.path;
+      default = null;
+    };
+
     network = mkOption {
       description = " IPv4 network in CIDR format to use for the entire flannel network.";
       type = types.str;
     };
 
+    nodeName = mkOption {
+      description = ''
+        Needed when running with Kubernetes as backend as this cannot be auto-detected";
+      '';
+      type = types.nullOr types.str;
+      default = with config.networking; (hostName + optionalString (!isNull domain) ".${domain}");
+      example = "node1.example.com";
+    };
+
+    storageBackend = mkOption {
+      description = "Determines where flannel stores its configuration at runtime";
+      type = types.enum ["etcd" "kubernetes"];
+      default = "etcd";
+    };
+
     subnetLen = mkOption {
       description = ''
         The size of the subnet allocated to each host. Defaults to 24 (i.e. /24)
@@ -122,17 +146,25 @@ in {
       after = [ "network.target" ];
       environment = {
         FLANNELD_PUBLIC_IP = cfg.publicIp;
+        FLANNELD_IFACE = cfg.iface;
+      } // optionalAttrs (cfg.storageBackend == "etcd") {
         FLANNELD_ETCD_ENDPOINTS = concatStringsSep "," cfg.etcd.endpoints;
         FLANNELD_ETCD_KEYFILE = cfg.etcd.keyFile;
         FLANNELD_ETCD_CERTFILE = cfg.etcd.certFile;
         FLANNELD_ETCD_CAFILE = cfg.etcd.caFile;
-        FLANNELD_IFACE = cfg.iface;
         ETCDCTL_CERT_FILE = cfg.etcd.certFile;
         ETCDCTL_KEY_FILE = cfg.etcd.keyFile;
         ETCDCTL_CA_FILE = cfg.etcd.caFile;
         ETCDCTL_PEERS = concatStringsSep "," cfg.etcd.endpoints;
+      } // optionalAttrs (cfg.storageBackend == "kubernetes") {
+        FLANNELD_KUBE_SUBNET_MGR = "true";
+        FLANNELD_KUBECONFIG_FILE = cfg.kubeconfig;
+        NODE_NAME = cfg.nodeName;
       };
       preStart = ''
+        mkdir -p /run/flannel
+        touch /run/flannel/docker
+      '' + optionalString (cfg.storageBackend == "etcd") ''
         echo "setting network configuration"
         until ${pkgs.etcdctl.bin}/bin/etcdctl set /coreos.com/network/config '${builtins.toJSON networkConfig}'
         do
@@ -140,15 +172,19 @@ in {
           sleep 1
         done
       '';
-      postStart = ''
-        while [ ! -f /run/flannel/subnet.env ]
-        do
-          sleep 1
-        done
-      '';
-      serviceConfig.ExecStart = "${cfg.package}/bin/flannel";
+      serviceConfig = {
+        ExecStart = "${cfg.package}/bin/flannel";
+        Restart = "always";
+        RestartSec = "10s";
+      };
     };
 
-    services.etcd.enable = mkDefault (cfg.etcd.endpoints == ["http://127.0.0.1:2379"]);
+    services.etcd.enable = mkDefault (cfg.storageBackend == "etcd" && cfg.etcd.endpoints == ["http://127.0.0.1:2379"]);
+
+    # for some reason, flannel doesn't let you configure this path
+    # see: https://github.com/coreos/flannel/blob/master/Documentation/configuration.md#configuration
+    environment.etc."kube-flannel/net-conf.json" = mkIf (cfg.storageBackend == "kubernetes") {
+      source = pkgs.writeText "net-conf.json" (builtins.toJSON networkConfig);
+    };
   };
 }
diff --git a/nixos/modules/services/networking/hylafax/systemd.nix b/nixos/modules/services/networking/hylafax/systemd.nix
index 91d9c1a37da..ef177e4be34 100644
--- a/nixos/modules/services/networking/hylafax/systemd.nix
+++ b/nixos/modules/services/networking/hylafax/systemd.nix
@@ -41,7 +41,7 @@ let
           "$out/config.${name}"
       '';
     in
-      pkgs.runCommand "hylafax-config-modems" {}
+      pkgs.runCommand "hylafax-config-modems" { preferLocalBuild = true; }
       ''mkdir --parents "$out/" ${concatStringsSep "\n" (mapModems mkLine)}'';
 
   setupSpoolScript = pkgs.substituteAll {
diff --git a/nixos/modules/services/networking/nix-serve.nix b/nixos/modules/services/networking/nix-serve.nix
index e83cad949ae..ca458d089dc 100644
--- a/nixos/modules/services/networking/nix-serve.nix
+++ b/nixos/modules/services/networking/nix-serve.nix
@@ -31,6 +31,15 @@ in
         default = null;
         description = ''
           The path to the file used for signing derivation data.
+          Generate with:
+
+          ```
+          nix-store --generate-binary-cache-key key-name secret-key-file public-key-file
+          ```
+
+          Make sure user `nix-serve` has read access to the private key file.
+
+          For more details see <citerefentry><refentrytitle>nix-store</refentrytitle><manvolnum>1</manvolnum></citerefentry>.
         '';
       };
 
diff --git a/nixos/modules/services/networking/prayer.nix b/nixos/modules/services/networking/prayer.nix
index f63f86496be..c936417e68c 100644
--- a/nixos/modules/services/networking/prayer.nix
+++ b/nixos/modules/services/networking/prayer.nix
@@ -25,7 +25,7 @@ let
     ${cfg.extraConfig}
   '';
 
-  prayerCfg = pkgs.runCommand "prayer.cf" { } ''
+  prayerCfg = pkgs.runCommand "prayer.cf" { preferLocalBuild = true; } ''
     # We have to remove the http_port 80, or it will start a server there
     cat ${prayer}/etc/prayer.cf | grep -v http_port > $out
     cat ${prayerExtraCfg} >> $out
diff --git a/nixos/modules/services/networking/quassel.nix b/nixos/modules/services/networking/quassel.nix
index d850bb8b130..b223a48e055 100644
--- a/nixos/modules/services/networking/quassel.nix
+++ b/nixos/modules/services/networking/quassel.nix
@@ -23,6 +23,22 @@ in
         '';
       };
 
+      certificateFile = mkOption {
+        type = types.nullOr types.str;
+        default = null;
+        description = ''
+          Path to the certificate used for SSL connections with clients.
+        '';
+      };
+
+      requireSSL = mkOption {
+        type = types.bool;
+        default = false;
+        description = ''
+          Require SSL for connections from clients.
+        '';
+      };
+
       package = mkOption {
         type = types.package;
         default = pkgs.quasselDaemon;
@@ -71,6 +87,10 @@ in
   ###### implementation
 
   config = mkIf cfg.enable {
+    assertions = [
+      { assertion = cfg.requireSSL -> cfg.certificateFile != null;
+        message = "Quassel needs a certificate file in order to require SSL";
+      }];
 
     users.users = mkIf (cfg.user == null) [
       { name = "quassel";
@@ -98,7 +118,13 @@ in
 
         serviceConfig =
         {
-          ExecStart = "${quassel}/bin/quasselcore --listen=${concatStringsSep '','' cfg.interfaces} --port=${toString cfg.portNumber} --configdir=${cfg.dataDir}";
+          ExecStart = concatStringsSep " " ([
+            "${quassel}/bin/quasselcore"
+            "--listen=${concatStringsSep "," cfg.interfaces}"
+            "--port=${toString cfg.portNumber}"
+            "--configdir=${cfg.dataDir}"
+          ] ++ optional cfg.requireSSL "--require-ssl"
+            ++ optional (cfg.certificateFile != null) "--ssl-cert=${cfg.certificateFile}");
           User = user;
           PermissionsStartOnly = true;
         };
diff --git a/nixos/modules/services/networking/shout.nix b/nixos/modules/services/networking/shout.nix
index 1ea676d0f92..f511a9af256 100644
--- a/nixos/modules/services/networking/shout.nix
+++ b/nixos/modules/services/networking/shout.nix
@@ -6,7 +6,7 @@ let
   cfg = config.services.shout;
   shoutHome = "/var/lib/shout";
 
-  defaultConfig = pkgs.runCommand "config.js" {} ''
+  defaultConfig = pkgs.runCommand "config.js" { preferLocalBuild = true; } ''
     EDITOR=true ${pkgs.shout}/bin/shout config --home $PWD
     mv config.js $out
   '';
diff --git a/nixos/modules/services/networking/xrdp.nix b/nixos/modules/services/networking/xrdp.nix
index a1c5d879f3c..cc18f6d0064 100644
--- a/nixos/modules/services/networking/xrdp.nix
+++ b/nixos/modules/services/networking/xrdp.nix
@@ -4,7 +4,7 @@ with lib;
 
 let
   cfg = config.services.xrdp;
-  confDir = pkgs.runCommand "xrdp.conf" { } ''
+  confDir = pkgs.runCommand "xrdp.conf" { preferLocalBuild = true; } ''
     mkdir $out
 
     cp ${cfg.package}/etc/xrdp/{km-*,xrdp,sesman,xrdp_keyboard}.ini $out
diff --git a/nixos/modules/services/printing/cupsd.nix b/nixos/modules/services/printing/cupsd.nix
index 3a43ebbb889..854c76cc0a1 100644
--- a/nixos/modules/services/printing/cupsd.nix
+++ b/nixos/modules/services/printing/cupsd.nix
@@ -11,8 +11,9 @@ let
   avahiEnabled = config.services.avahi.enable;
   polkitEnabled = config.security.polkit.enable;
 
-  additionalBackends = pkgs.runCommand "additional-cups-backends" { }
-    ''
+  additionalBackends = pkgs.runCommand "additional-cups-backends" {
+      preferLocalBuild = true;
+    } ''
       mkdir -p $out
       if [ ! -e ${cups.out}/lib/cups/backend/smb ]; then
         mkdir -p $out/lib/cups/backend
diff --git a/nixos/modules/services/ttys/kmscon.nix b/nixos/modules/services/ttys/kmscon.nix
index 82b6a51028e..dc37f9bee4b 100644
--- a/nixos/modules/services/ttys/kmscon.nix
+++ b/nixos/modules/services/ttys/kmscon.nix
@@ -82,7 +82,7 @@ in {
       X-RestartIfChanged=false
     '';
 
-    systemd.units."autovt@.service".unit = pkgs.runCommand "unit" { }
+    systemd.units."autovt@.service".unit = pkgs.runCommand "unit" { preferLocalBuild = true; }
         ''
           mkdir -p $out
           ln -s ${config.systemd.units."kmsconvt@.service".unit}/kmsconvt@.service $out/autovt@.service
diff --git a/nixos/modules/services/web-apps/codimd.nix b/nixos/modules/services/web-apps/codimd.nix
index a0af28eac7c..56e1de17e3c 100644
--- a/nixos/modules/services/web-apps/codimd.nix
+++ b/nixos/modules/services/web-apps/codimd.nix
@@ -6,7 +6,7 @@ let
   cfg = config.services.codimd;
 
   prettyJSON = conf:
-    pkgs.runCommand "codimd-config.json" { } ''
+    pkgs.runCommand "codimd-config.json" { preferLocalBuild = true; } ''
       echo '${builtins.toJSON conf}' | ${pkgs.jq}/bin/jq \
         '{production:del(.[]|nulls)|del(.[][]?|nulls)}' > $out
     '';
diff --git a/nixos/modules/services/web-apps/icingaweb2/icingaweb2.nix b/nixos/modules/services/web-apps/icingaweb2/icingaweb2.nix
new file mode 100644
index 00000000000..ccaa2cff1c2
--- /dev/null
+++ b/nixos/modules/services/web-apps/icingaweb2/icingaweb2.nix
@@ -0,0 +1,626 @@
+{ config, lib, pkgs, ... }: with lib; let
+  cfg = config.services.icingaweb2;
+  poolName = "icingaweb2";
+  phpfpmSocketName = "/var/run/phpfpm/${poolName}.sock";
+
+  formatBool = b: if b then "1" else "0";
+
+  configIni = let
+    config = cfg.generalConfig;
+  in ''
+    [global]
+    show_stacktraces = "${formatBool config.showStacktraces}"
+    show_application_state_messages = "${formatBool config.showApplicationStateMessages}"
+    module_path = "${pkgs.icingaweb2}/modules${optionalString (builtins.length config.modulePath > 0) ":${concatStringsSep ":" config.modulePath}"}"
+    config_backend = "${config.configBackend}"
+    ${optionalString (config.configBackend == "db") ''config_resource = "${config.configResource}"''}
+
+    [logging]
+    log = "${config.log}"
+    ${optionalString (config.log != "none") ''level = "${config.logLevel}"''}
+    ${optionalString (config.log == "php" || config.log == "syslog") ''application = "${config.logApplication}"''}
+    ${optionalString (config.log == "syslog") ''facility = "${config.logFacility}"''}
+    ${optionalString (config.log == "file") ''file = "${config.logFile}"''}
+
+    [themes]
+    default = "${config.themeDefault}"
+    disabled = "${formatBool config.themeDisabled}"
+
+    [authentication]
+    ${optionalString (config.authDefaultDomain != null) ''default_domain = "${config.authDefaultDomain}"''}
+  '';
+
+  resourcesIni = concatStringsSep "\n" (mapAttrsToList (name: config: ''
+    [${name}]
+    type = "${config.type}"
+    ${optionalString (config.type == "db") ''
+      db = "${config.db}"
+      host = "${config.host}"
+      ${optionalString (config.port != null) ''port = "${toString config.port}"''}
+      username = "${config.username}"
+      password = "${config.password}"
+      dbname = "${config.dbname}"
+      ${optionalString (config.charset != null) ''charset = "${config.charset}"''}
+      use_ssl = "${formatBool config.useSSL}"
+      ${optionalString (config.sslCert != null) ''ssl_cert = "${config.sslCert}"''}
+      ${optionalString (config.sslKey != null) ''ssl_cert = "${config.sslKey}"''}
+      ${optionalString (config.sslCA != null) ''ssl_cert = "${config.sslCA}"''}
+      ${optionalString (config.sslCApath != null) ''ssl_cert = "${config.sslCApath}"''}
+      ${optionalString (config.sslCipher != null) ''ssl_cert = "${config.sslCipher}"''}
+    ''}
+    ${optionalString (config.type == "ldap") ''
+      hostname = "${config.host}"
+      ${optionalString (config.port != null) ''port = "${toString config.port}"''}
+      root_dn = "${config.rootDN}"
+      bind_dn = "${config.username}"
+      bind_pw = "${config.password}"
+      encryption = "${config.ldapEncryption}"
+      timeout = "${toString config.ldapTimeout}"
+    ''}
+    ${optionalString (config.type == "ssh") ''
+      user = "${config.username}"
+      private_key = "${config.sshPrivateKey}"
+    ''}
+
+  '') cfg.resources);
+
+  authenticationIni = concatStringsSep "\n" (mapAttrsToList (name: config: ''
+    [${name}]
+    backend = "${config.backend}"
+    ${optionalString (config.domain != null) ''domain = "${config.domain}"''}
+    ${optionalString (config.backend == "external" && config.externalStripRegex != null) ''strip_username_regexp = "${config.externalStripRegex}"''}
+    ${optionalString (config.backend != "external") ''resource = "${config.resource}"''}
+    ${optionalString (config.backend == "ldap" || config.backend == "msldap") ''
+      ${optionalString (config.ldapUserClass != null) ''user_class = "${config.ldapUserClass}"''}
+      ${optionalString (config.ldapUserNameAttr != null) ''user_name_attribute = "${config.ldapUserNameAttr}"''}
+      ${optionalString (config.ldapFilter != null) ''filter = "${config.ldapFilter}"''}
+    ''}
+  '') cfg.authentications);
+
+  groupsIni = concatStringsSep "\n" (mapAttrsToList (name: config: ''
+    [${name}]
+    backend = "${config.backend}"
+    resource = "${config.resource}"
+    ${optionalString (config.backend != "db") ''
+      ${optionalString (config.ldapUserClass != null) ''user_class = "${config.ldapUserClass}"''}
+      ${optionalString (config.ldapUserNameAttr != null) ''user_name_attribute = "${config.ldapUserNameAttr}"''}
+      ${optionalString (config.ldapGroupClass != null) ''group_class = "${config.ldapGroupClass}"''}
+      ${optionalString (config.ldapGroupNameAttr != null) ''group_name_attribute = "${config.ldapGroupNameAttr}"''}
+      ${optionalString (config.ldapGroupFilter != null) ''group_filter = "${config.ldapGroupFilter}"''}
+    ''}
+    ${optionalString (config.backend == "msldap" && config.ldapNestedSearch) ''nested_group_search = "1"''}
+  '') cfg.groupBackends);
+
+  rolesIni = let
+    optionalList = var: attribute: optionalString (builtins.length var > 0) ''${attribute} = "${concatStringsSep "," var}"'';
+  in concatStringsSep "\n" (mapAttrsToList (name: config: ''
+    [${name}]
+    ${optionalList config.users "users"}
+    ${optionalList config.groups "groups"}
+    ${optionalList config.permissions "permissions"}
+    ${optionalList config.permissions "permissions"}
+    ${concatStringsSep "\n" (mapAttrsToList (key: value: optionalList value key) config.extraAssignments)}
+  '') cfg.roles);
+
+in {
+  options.services.icingaweb2 = with types; {
+    enable = mkEnableOption "the icingaweb2 web interface";
+
+    pool = mkOption {
+      type = str;
+      default = "${poolName}";
+      description = ''
+         Name of existing PHP-FPM pool that is used to run Icingaweb2.
+         If not specified, a pool will automatically created with default values.
+      '';
+    };
+
+    virtualHost = mkOption {
+      type = nullOr str;
+      default = "icingaweb2";
+      description = ''
+        Name of the nginx virtualhost to use and setup. If null, no virtualhost is set up.
+      '';
+    };
+
+    timezone = mkOption {
+      type = str;
+      default = "UTC";
+      example = "Europe/Berlin";
+      description = "PHP-compliant timezone specification";
+    };
+
+    modules = {
+      doc.enable = mkEnableOption "the icingaweb2 doc module";
+      migrate.enable = mkEnableOption "the icingaweb2 migrate module";
+      setup.enable = mkEnableOption "the icingaweb2 setup module";
+      test.enable = mkEnableOption "the icingaweb2 test module";
+      translation.enable = mkEnableOption "the icingaweb2 translation module";
+    };
+
+    modulePackages = mkOption {
+      type = attrsOf package;
+      default = {};
+      example = literalExample ''
+        {
+          "snow" = pkgs.icingaweb2Modules.theme-snow;
+        }
+      '';
+      description = ''
+        Name-package attrset of Icingaweb 2 modules packages to enable.
+
+        If you enable modules manually (e.g. via the web ui), they will not be touched.
+      '';
+    };
+
+    generalConfig = {
+      mutable = mkOption {
+        type = bool;
+        default = false;
+        description = ''
+          Make config.ini mutable (e.g. via the web interface).
+          Not that you need to update module_path manually.
+        '';
+      };
+
+      showStacktraces = mkOption {
+        type = bool;
+        default = true;
+        description = "Enable stack traces in the Web UI";
+      };
+
+      showApplicationStateMessages = mkOption {
+        type = bool;
+        default = true;
+        description = "Enable application state messages in the Web UI";
+      };
+
+      modulePath = mkOption {
+        type = listOf str;
+        default = [];
+        description = "List of additional module search paths";
+      };
+
+      configBackend = mkOption {
+        type = enum [ "ini" "db" "none" ];
+        default = "db";
+        description = "Where to store user preferences";
+      };
+
+      configResource = mkOption {
+        type = nullOr str;
+        default = null;
+        description = "Database resource where user preferences are stored (if they are stored in a database)";
+      };
+
+      log = mkOption {
+        type = enum [ "syslog" "php" "file" "none" ];
+        default = "syslog";
+        description = "Logging target";
+      };
+
+      logLevel = mkOption {
+        type = enum [ "ERROR" "WARNING" "INFO" "DEBUG" ];
+        default = "ERROR";
+        description = "Maximum logging level to emit";
+      };
+
+      logApplication = mkOption {
+        type = str;
+        default = "icingaweb2";
+        description = "Application name to log under (syslog and php log)";
+      };
+
+      logFacility = mkOption {
+        type = enum [ "user" "local0" "local1" "local2" "local3" "local4" "local5" "local6" "local7" ];
+        default = "user";
+        description = "Syslog facility to log to";
+      };
+
+      logFile = mkOption {
+        type = str;
+        default = "/var/log/icingaweb2/icingaweb2.log";
+        description = "File to log to";
+      };
+
+      themeDefault = mkOption {
+        type = str;
+        default = "Icinga";
+        description = "Name of the default theme";
+      };
+
+      themeDisabled = mkOption {
+        type = bool;
+        default = false;
+        description = "Disallow users to change the theme";
+      };
+
+      authDefaultDomain = mkOption {
+        type = nullOr str;
+        default = null;
+        description = "Domain for users logging in without a qualified domain";
+      };
+    };
+
+    mutableResources = mkOption {
+      type = bool;
+      default = false;
+      description = "Make resources.ini mutable (e.g. via the web interface)";
+    };
+
+    resources = mkOption {
+      default = {};
+      description = "Icingaweb 2 resources to define";
+      type = attrsOf (submodule ({ name, ... }: {
+        options = {
+          name = mkOption {
+            visible = false;
+            default = name;
+            type = str;
+            description = "Name of this resource";
+          };
+
+          type = mkOption {
+            type = enum [ "db" "ldap" "ssh" ];
+            default = "db";
+            description = "Type of this resouce";
+          };
+
+          db = mkOption {
+            type = enum [ "mysql" "pgsql" ];
+            default = "mysql";
+            description = "Type of this database resource";
+          };
+
+          host = mkOption {
+            type = str;
+            description = "Host to connect to";
+          };
+
+          port = mkOption {
+            type = nullOr port;
+            default = null;
+            description = "Port to connect on";
+          };
+
+          username = mkOption {
+            type = str;
+            description = "Database or SSH user or LDAP bind DN to connect with";
+          };
+
+          password = mkOption {
+            type = str;
+            description = "Password for the database user or LDAP bind DN";
+          };
+
+          dbname = mkOption {
+            type = str;
+            description = "Name of the database to connect to";
+          };
+
+          charset = mkOption {
+            type = nullOr str;
+            default = null;
+            example = "utf8";
+            description = "Database character set to connect with";
+          };
+
+          useSSL = mkOption {
+            type = nullOr bool;
+            default = false;
+            description = "Whether to connect to the database using SSL";
+          };
+
+          sslCert = mkOption {
+            type = nullOr str;
+            default = null;
+            description = "The file path to the SSL certificate. Only available for the mysql database.";
+          };
+
+          sslKey = mkOption {
+            type = nullOr str;
+            default = null;
+            description = "The file path to the SSL key. Only available for the mysql database.";
+          };
+
+          sslCA = mkOption {
+            type = nullOr str;
+            default = null;
+            description = "The file path to the SSL certificate authority. Only available for the mysql database.";
+          };
+
+          sslCApath = mkOption {
+            type = nullOr str;
+            default = null;
+            description = "The file path to the directory that contains the trusted SSL CA certificates in PEM format. Only available for the mysql database.";
+          };
+
+          sslCipher = mkOption {
+            type = nullOr str;
+            default = null;
+            description = "A list of one or more permissible ciphers to use for SSL encryption, in a format understood by OpenSSL. Only available for the mysql database.";
+          };
+
+          rootDN = mkOption {
+            type = str;
+            description = "Root object of the LDAP tree";
+          };
+
+          ldapEncryption = mkOption {
+            type = enum [ "none" "starttls" "ldaps" ];
+            default = "none";
+            description = "LDAP encryption to use";
+          };
+
+          ldapTimeout = mkOption {
+            type = ints.positive;
+            default = 5;
+            description = "Connection timeout for every LDAP connection";
+          };
+
+          sshPrivateKey = mkOption {
+            type = str;
+            description = "The path to the private key of the user";
+          };
+        };
+      }));
+    };
+
+    mutableAuthConfig = mkOption {
+      type = bool;
+      default = true;
+      description = "Make authentication.ini mutable (e.g. via the web interface)";
+    };
+
+    authentications = mkOption {
+      default = {};
+      description = "Icingaweb 2 authentications to define";
+      type = attrsOf (submodule ({ name, ... }: {
+        options = {
+          name = mkOption {
+            visible = false;
+            default = name;
+            type = str;
+            description = "Name of this authentication";
+          };
+
+          backend = mkOption {
+            type = enum [ "external" "ldap" "msldap" "db" ];
+            default = "db";
+            description = "The type of this authentication backend";
+          };
+
+          domain = mkOption {
+            type = nullOr str;
+            default = null;
+            description = "Domain for domain-aware authentication";
+          };
+
+          externalStripRegex = mkOption {
+            type = nullOr str;
+            default = null;
+            description = "Regular expression to strip off specific user name parts";
+          };
+
+          resource = mkOption {
+            type = str;
+            description = "Name of the database/LDAP resource";
+          };
+
+          ldapUserClass = mkOption {
+            type = nullOr str;
+            default = null;
+            description = "LDAP user class";
+          };
+
+          ldapUserNameAttr = mkOption {
+            type = nullOr str;
+            default = null;
+            description = "LDAP attribute which contains the username";
+          };
+
+          ldapFilter = mkOption {
+            type = nullOr str;
+            default = null;
+            description = "LDAP search filter";
+          };
+        };
+      }));
+    };
+
+    mutableGroupsConfig = mkOption {
+      type = bool;
+      default = true;
+      description = "Make groups.ini mutable (e.g. via the web interface)";
+    };
+
+    groupBackends = mkOption {
+      default = {};
+      description = "Icingaweb 2 group backends to define";
+      type = attrsOf (submodule ({ name, ... }: {
+        options = {
+          name = mkOption {
+            visible = false;
+            default = name;
+            type = str;
+            description = "Name of this group backend";
+          };
+
+          backend = mkOption {
+            type = enum [ "ldap" "msldap" "db" ];
+            default = "db";
+            description = "The type of this group backend";
+          };
+
+          resource = mkOption {
+            type = str;
+            description = "Name of the database/LDAP resource";
+          };
+
+          ldapUserClass = mkOption {
+            type = nullOr str;
+            default = null;
+            description = "LDAP user class";
+          };
+
+          ldapUserNameAttr = mkOption {
+            type = nullOr str;
+            default = null;
+            description = "LDAP attribute which contains the username";
+          };
+
+          ldapGroupClass = mkOption {
+            type = nullOr str;
+            default = null;
+            description = "LDAP group class";
+          };
+
+          ldapGroupNameAttr = mkOption {
+            type = nullOr str;
+            default = null;
+            description = "LDAP attribute which contains the groupname";
+          };
+
+          ldapGroupFilter = mkOption {
+            type = nullOr str;
+            default = null;
+            description = "LDAP group search filter";
+          };
+
+          ldapNestedSearch = mkOption {
+            type = bool;
+            default = false;
+            description = "Enable nested group search in Active Directory based on the user";
+          };
+        };
+      }));
+    };
+
+    mutableRolesConfig = mkOption {
+      type = bool;
+      default = true;
+      description = "Make roles.ini mutable (e.g. via the web interface)";
+    };
+
+    roles = mkOption {
+      default = {};
+      description = "Icingaweb 2 roles to define";
+      type = attrsOf (submodule ({ name, ... }: {
+        options = {
+          name = mkOption {
+            visible = false;
+            default = name;
+            type = str;
+            description = "Name of this role";
+          };
+
+          users = mkOption {
+            type = listOf str;
+            default = [];
+            description = "List of users that are assigned to the role";
+          };
+
+          groups = mkOption {
+            type = listOf str;
+            default = [];
+            description = "List of groups that are assigned to the role";
+          };
+
+          permissions = mkOption {
+            type = listOf str;
+            default = [];
+            example = [ "application/share/navigation" "config/*" ];
+            description = "The permissions to grant";
+          };
+
+          extraAssignments = mkOption {
+            type = attrsOf (listOf str);
+            default = {};
+            example = { "monitoring/blacklist/properties" = [ "sla" "customer"]; };
+            description = "Additional assignments of this role";
+          };
+        };
+      }));
+    };
+  };
+
+  config = mkIf cfg.enable {
+    services.phpfpm.poolConfigs = mkIf (cfg.pool == "${poolName}") {
+      "${poolName}" = ''
+        listen = "${phpfpmSocketName}"
+        listen.owner = nginx
+        listen.group = nginx
+        listen.mode = 0600
+        user = icingaweb2
+        pm = dynamic
+        pm.max_children = 75
+        pm.start_servers = 2
+        pm.min_spare_servers = 2
+        pm.max_spare_servers = 10
+      '';
+    };
+
+    services.phpfpm.phpOptions = mkIf (cfg.pool == "${poolName}")
+      ''
+        extension = ${pkgs.phpPackages.imagick}/lib/php/extensions/imagick.so
+        date.timezone = "${cfg.timezone}"
+      '';
+
+    systemd.services."phpfpm-${poolName}".serviceConfig.ReadWritePaths = [ "/etc/icingaweb2" ];
+
+    services.nginx = {
+      enable = true;
+      virtualHosts = mkIf (cfg.virtualHost != null) {
+        "${cfg.virtualHost}" = {
+          root = "${pkgs.icingaweb2}/public";
+
+          extraConfig = ''
+            index index.php;
+            try_files $1 $uri $uri/ /index.php$is_args$args;
+          '';
+
+          locations."~ ..*/.*.php$".extraConfig = ''
+            return 403;
+          '';
+
+          locations."~ ^/index.php(.*)$".extraConfig = ''
+            fastcgi_intercept_errors on;
+            fastcgi_index index.php;
+            include ${config.services.nginx.package}/conf/fastcgi.conf;
+            try_files $uri =404;
+            fastcgi_split_path_info ^(.+\.php)(/.+)$;
+            fastcgi_pass unix:${phpfpmSocketName};
+            fastcgi_param SCRIPT_FILENAME ${pkgs.icingaweb2}/public/index.php;
+          '';
+        };
+      };
+    };
+
+    # /etc/icingaweb2
+    environment.etc = let
+      doModule = name: optionalAttrs (cfg.modules."${name}".enable) (nameValuePair "icingaweb2/enabledModules/${name}" { source = "${pkgs.icingaweb2}/modules/${name}"; });
+    in {}
+      # Module packages
+      // (mapAttrs' (k: v: nameValuePair "icingaweb2/enabledModules/${k}" { source = v; }) cfg.modulePackages)
+      # Built-in modules
+      // doModule "doc"
+      // doModule "migrate"
+      // doModule "setup"
+      // doModule "test"
+      // doModule "translation"
+      # Configs
+      // optionalAttrs (!cfg.generalConfig.mutable) { "icingaweb2/config.ini".text = configIni; }
+      // optionalAttrs (!cfg.mutableResources) { "icingaweb2/resources.ini".text = resourcesIni; }
+      // optionalAttrs (!cfg.mutableAuthConfig) { "icingaweb2/authentication.ini".text = authenticationIni; }
+      // optionalAttrs (!cfg.mutableGroupsConfig) { "icingaweb2/groups.ini".text = groupsIni; }
+      // optionalAttrs (!cfg.mutableRolesConfig) { "icingaweb2/roles.ini".text = rolesIni; };
+
+    # User and group
+    users.groups.icingaweb2 = {};
+    users.users.icingaweb2 = {
+      description = "Icingaweb2 service user";
+      group = "icingaweb2";
+      isSystemUser = true;
+    };
+  };
+}
diff --git a/nixos/modules/services/web-apps/icingaweb2/module-monitoring.nix b/nixos/modules/services/web-apps/icingaweb2/module-monitoring.nix
new file mode 100644
index 00000000000..167e5e38956
--- /dev/null
+++ b/nixos/modules/services/web-apps/icingaweb2/module-monitoring.nix
@@ -0,0 +1,157 @@
+{ config, lib, pkgs, ... }: with lib; let
+  cfg = config.services.icingaweb2.modules.monitoring;
+
+  configIni = ''
+    [security]
+    protected_customvars = "${concatStringsSep "," cfg.generalConfig.protectedVars}"
+  '';
+
+  backendsIni = let
+    formatBool = b: if b then "1" else "0";
+  in concatStringsSep "\n" (mapAttrsToList (name: config: ''
+    [${name}]
+    type = "ido"
+    resource = "${config.resource}"
+    disabled = "${formatBool config.disabled}"
+  '') cfg.backends);
+
+  transportsIni = concatStringsSep "\n" (mapAttrsToList (name: config: ''
+    [${name}]
+    type = "${config.type}"
+    ${optionalString (config.instance != null) ''instance = "${config.instance}"''}
+    ${optionalString (config.type == "local" || config.type == "remote") ''path = "${config.path}"''}
+    ${optionalString (config.type != "local") ''
+      host = "${config.host}"
+      ${optionalString (config.port != null) ''port = "${toString config.port}"''}
+      user${optionalString (config.type == "api") "name"} = "${config.username}"
+    ''}
+    ${optionalString (config.type == "api") ''password = "${config.password}"''}
+    ${optionalString (config.type == "remote") ''resource = "${config.resource}"''}
+  '') cfg.transports);
+
+in {
+  options.services.icingaweb2.modules.monitoring = with types; {
+    enable = mkOption {
+      type = bool;
+      default = true;
+      description = "Whether to enable the icingaweb2 monitoring module.";
+    };
+
+    generalConfig = {
+      mutable = mkOption {
+        type = bool;
+        default = false;
+        description = "Make config.ini of the monitoring module mutable (e.g. via the web interface).";
+      };
+
+      protectedVars = mkOption {
+        type = listOf str;
+        default = [ "*pw*" "*pass*" "community" ];
+        description = "List of string patterns for custom variables which should be excluded from user’s view.";
+      };
+    };
+
+    mutableBackends = mkOption {
+      type = bool;
+      default = false;
+      description = "Make backends.ini of the monitoring module mutable (e.g. via the web interface).";
+    };
+
+    backends = mkOption {
+      default = { "icinga" = { resource = "icinga_ido"; }; };
+      description = "Monitoring backends to define";
+      type = attrsOf (submodule ({ name, ... }: {
+        options = {
+          name = mkOption {
+            visible = false;
+            default = name;
+            type = str;
+            description = "Name of this backend";
+          };
+
+          resource = mkOption {
+            type = str;
+            description = "Name of the IDO resource";
+          };
+
+          disabled = mkOption {
+            type = bool;
+            default = false;
+            description = "Disable this backend";
+          };
+        };
+      }));
+    };
+
+    mutableTransports = mkOption {
+      type = bool;
+      default = true;
+      description = "Make commandtransports.ini of the monitoring module mutable (e.g. via the web interface).";
+    };
+
+    transports = mkOption {
+      default = {};
+      description = "Command transports to define";
+      type = attrsOf (submodule ({ name, ... }: {
+        options = {
+          name = mkOption {
+            visible = false;
+            default = name;
+            type = str;
+            description = "Name of this transport";
+          };
+
+          type = mkOption {
+            type = enum [ "api" "local" "remote" ];
+            default = "api";
+            description = "Type of  this transport";
+          };
+
+          instance = mkOption {
+            type = nullOr str;
+            default = null;
+            description = "Assign a icinga instance to this transport";
+          };
+
+          path = mkOption {
+            type = str;
+            description = "Path to the socket for local or remote transports";
+          };
+
+          host = mkOption {
+            type = str;
+            description = "Host for the api or remote transport";
+          };
+
+          port = mkOption {
+            type = nullOr str;
+            default = null;
+            description = "Port to connect to for the api or remote transport";
+          };
+
+          username = mkOption {
+            type = str;
+            description = "Username for the api or remote transport";
+          };
+
+          password = mkOption {
+            type = str;
+            description = "Password for the api transport";
+          };
+
+          resource = mkOption {
+            type = str;
+            description = "SSH identity resource for the remote transport";
+          };
+        };
+      }));
+    };
+  };
+
+  config = mkIf (config.services.icingaweb2.enable && cfg.enable) {
+    environment.etc = { "icingaweb2/enabledModules/monitoring" = { source = "${pkgs.icingaweb2}/modules/monitoring"; }; }
+      // optionalAttrs (!cfg.generalConfig.mutable) { "icingaweb2/modules/monitoring/config.ini".text = configIni; }
+      // optionalAttrs (!cfg.mutableBackends) { "icingaweb2/modules/monitoring/backends.ini".text = backendsIni; }
+      // optionalAttrs (!cfg.mutableTransports) { "icingaweb2/modules/monitoring/commandtransports.ini".text = transportsIni; };
+  };
+}
diff --git a/nixos/modules/services/web-apps/matomo-doc.xml b/nixos/modules/services/web-apps/matomo-doc.xml
index c71c22e810e..20d2de9f418 100644
--- a/nixos/modules/services/web-apps/matomo-doc.xml
+++ b/nixos/modules/services/web-apps/matomo-doc.xml
@@ -47,7 +47,7 @@
   </para>
  </section>
 
- <section>
+ <section xml:id="module-services-matomo-archive-processing">
   <title>Archive Processing</title>
   <para>
    This module comes with the systemd service <literal>matomo-archive-processing.service</literal>
diff --git a/nixos/modules/services/web-apps/tt-rss.nix b/nixos/modules/services/web-apps/tt-rss.nix
index cf6f79c92f4..f7a3daa5fdd 100644
--- a/nixos/modules/services/web-apps/tt-rss.nix
+++ b/nixos/modules/services/web-apps/tt-rss.nix
@@ -40,7 +40,7 @@ let
         else if (cfg.database.passwordFile != null) then
           "file_get_contents('${cfg.database.passwordFile}')"
         else
-          ""
+          "''"
       });
       define('DB_PORT', '${toString dbPort}');
 
@@ -562,7 +562,7 @@ let
           callSql = e:
               if cfg.database.type == "pgsql" then ''
                   ${optionalString (cfg.database.password != null) "PGPASSWORD=${cfg.database.password}"} \
-                  ${optionalString (cfg.database.passwordFile != null) "PGPASSWORD=$(cat ${cfg.database.passwordFile}"}) \
+                  ${optionalString (cfg.database.passwordFile != null) "PGPASSWORD=$(cat ${cfg.database.passwordFile})"} \
                   ${pkgs.sudo}/bin/sudo -u ${cfg.user} ${config.services.postgresql.package}/bin/psql \
                     -U ${cfg.database.user} \
                     ${optionalString (cfg.database.host != null) "-h ${cfg.database.host} --port ${toString dbPort}"} \
diff --git a/nixos/modules/services/web-servers/apache-httpd/default.nix b/nixos/modules/services/web-servers/apache-httpd/default.nix
index bb962334786..1eac5be2f8d 100644
--- a/nixos/modules/services/web-servers/apache-httpd/default.nix
+++ b/nixos/modules/services/web-servers/apache-httpd/default.nix
@@ -217,7 +217,7 @@ let
     ) null ([ cfg ] ++ subservices);
 
     documentRoot = if maybeDocumentRoot != null then maybeDocumentRoot else
-      pkgs.runCommand "empty" {} "mkdir -p $out";
+      pkgs.runCommand "empty" { preferLocalBuild = true; } "mkdir -p $out";
 
     documentRootConf = ''
       DocumentRoot "${documentRoot}"
@@ -376,6 +376,8 @@ let
     Include ${httpd}/conf/extra/httpd-multilang-errordoc.conf
     Include ${httpd}/conf/extra/httpd-languages.conf
 
+    TraceEnable off
+
     ${if enableSSL then sslConf else ""}
 
     # Fascist default - deny access to everything.
@@ -424,6 +426,7 @@ let
   phpIni = pkgs.runCommand "php.ini"
     { options = concatStringsSep "\n"
         ([ mainCfg.phpOptions ] ++ (map (svc: svc.phpOptions) allSubservices));
+      preferLocalBuild = true;
     }
     ''
       cat ${php}/etc/php.ini > $out
@@ -495,8 +498,8 @@ in
         default = false;
         description = ''
           If enabled, each virtual host gets its own
-          <filename>access_log</filename> and
-          <filename>error_log</filename>, namely suffixed by the
+          <filename>access.log</filename> and
+          <filename>error.log</filename>, namely suffixed by the
           <option>hostName</option> of the virtual host.
         '';
       };
@@ -639,8 +642,8 @@ in
 
       sslProtocols = mkOption {
         type = types.str;
-        default = "All -SSLv2 -SSLv3";
-        example = "All -SSLv2 -SSLv3 -TLSv1";
+        default = "All -SSLv2 -SSLv3 -TLSv1";
+        example = "All -SSLv2 -SSLv3";
         description = "Allowed SSL/TLS protocol versions.";
       };
     }
@@ -684,6 +687,9 @@ in
       ''
         ; Needed for PHP's mail() function.
         sendmail_path = sendmail -t -i
+
+        ; Don't advertise PHP
+        expose_php = off
       '' + optionalString (!isNull config.time.timeZone) ''
 
         ; Apparently PHP doesn't use $TZ.
diff --git a/nixos/modules/services/web-servers/apache-httpd/mediawiki.nix b/nixos/modules/services/web-servers/apache-httpd/mediawiki.nix
index e871ae6ff15..6234478014c 100644
--- a/nixos/modules/services/web-servers/apache-httpd/mediawiki.nix
+++ b/nixos/modules/services/web-servers/apache-httpd/mediawiki.nix
@@ -116,9 +116,10 @@ let
       '';
   };
 
-  mediawikiScripts = pkgs.runCommand "mediawiki-${config.id}-scripts"
-    { buildInputs = [ pkgs.makeWrapper ]; }
-    ''
+  mediawikiScripts = pkgs.runCommand "mediawiki-${config.id}-scripts" {
+      buildInputs = [ pkgs.makeWrapper ];
+      preferLocalBuild = true;
+    } ''
       mkdir -p $out/bin
       for i in changePassword.php createAndPromote.php userOptions.php edit.php nukePage.php update.php; do
         makeWrapper ${php}/bin/php $out/bin/mediawiki-${config.id}-$(basename $i .php) \
diff --git a/nixos/modules/services/web-servers/nginx/default.nix b/nixos/modules/services/web-servers/nginx/default.nix
index 6c733f093ba..89dc8b3795e 100644
--- a/nixos/modules/services/web-servers/nginx/default.nix
+++ b/nixos/modules/services/web-servers/nginx/default.nix
@@ -44,7 +44,25 @@ let
     }
   ''));
 
-  configFile = pkgs.writeText "nginx.conf" ''
+  awkFormat = pkgs.writeText "awkFormat-nginx.awk" ''
+    awk -f
+    {sub(/^[ \t]+/,"");idx=0}
+    /\{/{ctx++;idx=1}
+    /\}/{ctx--}
+    {id="";for(i=idx;i<ctx;i++)id=sprintf("%s%s", id, "\t");printf "%s%s\n", id, $0}
+  '';
+
+  configFile = pkgs.stdenv.mkDerivation {
+    name = "nginx-config";
+    src = "";
+    phases = [ "installPhase" ];
+    installPhase = ''
+      mkdir $out
+      awk -f ${awkFormat} ${pre-configFile} | sed '/^\s*$/d' > $out/nginx.conf
+    '';
+  };
+
+  pre-configFile = pkgs.writeText "pre-nginx.conf" ''
     user ${cfg.user} ${cfg.group};
     error_log ${cfg.logError};
     daemon off;
@@ -638,10 +656,10 @@ in
       preStart =
         ''
         ${cfg.preStart}
-        ${cfg.package}/bin/nginx -c ${configFile} -p ${cfg.stateDir} -t
+        ${cfg.package}/bin/nginx -c ${configFile}/nginx.conf -p ${cfg.stateDir} -t
         '';
       serviceConfig = {
-        ExecStart = "${cfg.package}/bin/nginx -c ${configFile} -p ${cfg.stateDir}";
+        ExecStart = "${cfg.package}/bin/nginx -c ${configFile}/nginx.conf -p ${cfg.stateDir}";
         ExecReload = "${pkgs.coreutils}/bin/kill -HUP $MAINPID";
         Restart = "always";
         RestartSec = "10s";
diff --git a/nixos/modules/services/web-servers/phpfpm/default.nix b/nixos/modules/services/web-servers/phpfpm/default.nix
index 152c89a2cae..ffafbc5e92f 100644
--- a/nixos/modules/services/web-servers/phpfpm/default.nix
+++ b/nixos/modules/services/web-servers/phpfpm/default.nix
@@ -14,11 +14,13 @@ let
 
   mapPoolConfig = n: p: {
     phpPackage = cfg.phpPackage;
+    phpOptions = cfg.phpOptions;
     config = p;
   };
 
   mapPool = n: p: {
     phpPackage = p.phpPackage;
+    phpOptions = p.phpOptions;
     config = ''
       listen = ${p.listen}
       ${p.extraConfig}
@@ -35,8 +37,9 @@ let
     ${conf}
   '';
 
-  phpIni = pkgs.runCommand "php.ini" {
-    inherit (cfg) phpPackage phpOptions;
+  phpIni = pool: pkgs.runCommand "php.ini" {
+    inherit (pool) phpPackage phpOptions;
+    preferLocalBuild = true;
     nixDefaults = ''
       sendmail_path = "/run/wrappers/bin/sendmail -t -i"
     '';
@@ -156,6 +159,7 @@ in {
         '';
         serviceConfig = let
           cfgFile = fpmCfgFile pool poolConfig.config;
+          iniFile = phpIni poolConfig;
         in {
           Slice = "phpfpm.slice";
           PrivateDevices = true;
@@ -164,7 +168,7 @@ in {
           # XXX: We need AF_NETLINK to make the sendmail SUID binary from postfix work
           RestrictAddressFamilies = "AF_UNIX AF_INET AF_INET6 AF_NETLINK";
           Type = "notify";
-          ExecStart = "${poolConfig.phpPackage}/bin/php-fpm -y ${cfgFile} -c ${phpIni}";
+          ExecStart = "${poolConfig.phpPackage}/bin/php-fpm -y ${cfgFile} -c ${iniFile}";
           ExecReload = "${pkgs.coreutils}/bin/kill -USR2 $MAINPID";
         };
       }
diff --git a/nixos/modules/services/web-servers/phpfpm/pool-options.nix b/nixos/modules/services/web-servers/phpfpm/pool-options.nix
index 40c83cddb95..d9ad7eff71f 100644
--- a/nixos/modules/services/web-servers/phpfpm/pool-options.nix
+++ b/nixos/modules/services/web-servers/phpfpm/pool-options.nix
@@ -25,6 +25,15 @@ with lib; {
       '';
     };
 
+    phpOptions = mkOption {
+      type = types.lines;
+      default = fpmCfg.phpOptions;
+      defaultText = "config.services.phpfpm.phpOptions";
+      description = ''
+        "Options appended to the PHP configuration file <filename>php.ini</filename> used for this PHP-FPM pool."
+      '';
+    };
+
     extraConfig = mkOption {
       type = types.lines;
       example = ''
diff --git a/nixos/modules/services/web-servers/traefik.nix b/nixos/modules/services/web-servers/traefik.nix
index 700202b1d28..4674ed0177e 100644
--- a/nixos/modules/services/web-servers/traefik.nix
+++ b/nixos/modules/services/web-servers/traefik.nix
@@ -8,6 +8,7 @@ let
     if cfg.configFile == null then
       pkgs.runCommand "config.toml" {
         buildInputs = [ pkgs.remarshal ];
+        preferLocalBuild = true;
       } ''
         remarshal -if json -of toml \
           < ${pkgs.writeText "config.json" (builtins.toJSON cfg.configOptions)} \
diff --git a/nixos/modules/services/x11/desktop-managers/default.nix b/nixos/modules/services/x11/desktop-managers/default.nix
index cce35aa28ba..2b1e9169e5f 100644
--- a/nixos/modules/services/x11/desktop-managers/default.nix
+++ b/nixos/modules/services/x11/desktop-managers/default.nix
@@ -20,7 +20,7 @@ in
   imports = [
     ./none.nix ./xterm.nix ./xfce.nix ./plasma5.nix ./lumina.nix
     ./lxqt.nix ./enlightenment.nix ./gnome3.nix ./kodi.nix ./maxx.nix
-    ./mate.nix
+    ./mate.nix ./pantheon.nix
   ];
 
   options = {
diff --git a/nixos/modules/services/x11/desktop-managers/gnome3.nix b/nixos/modules/services/x11/desktop-managers/gnome3.nix
index 7544ba4638a..6255dce8276 100644
--- a/nixos/modules/services/x11/desktop-managers/gnome3.nix
+++ b/nixos/modules/services/x11/desktop-managers/gnome3.nix
@@ -15,7 +15,7 @@ let
     '';
   };
 
-  nixos-gsettings-desktop-schemas = pkgs.runCommand "nixos-gsettings-desktop-schemas" {}
+  nixos-gsettings-desktop-schemas = pkgs.runCommand "nixos-gsettings-desktop-schemas" { preferLocalBuild = true; }
     ''
      mkdir -p $out/share/gsettings-schemas/nixos-gsettings-overrides/glib-2.0/schemas
      cp -rf ${pkgs.gnome3.gsettings-desktop-schemas}/share/gsettings-schemas/gsettings-desktop-schemas*/glib-2.0/schemas/*.xml $out/share/gsettings-schemas/nixos-gsettings-overrides/glib-2.0/schemas
@@ -151,6 +151,7 @@ in {
     services.colord.enable = mkDefault true;
     services.packagekit.enable = mkDefault true;
     hardware.bluetooth.enable = mkDefault true;
+    services.hardware.bolt.enable = mkDefault true;
     services.xserver.libinput.enable = mkDefault true; # for controlling touchpad settings via gnome control center
     services.udev.packages = [ pkgs.gnome3.gnome-settings-daemon ];
     systemd.packages = [ pkgs.gnome3.vino ];
diff --git a/nixos/modules/services/x11/desktop-managers/pantheon.nix b/nixos/modules/services/x11/desktop-managers/pantheon.nix
new file mode 100644
index 00000000000..0f49439bf7c
--- /dev/null
+++ b/nixos/modules/services/x11/desktop-managers/pantheon.nix
@@ -0,0 +1,195 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.services.xserver.desktopManager.pantheon;
+
+  nixos-gsettings-desktop-schemas = pkgs.pantheon.elementary-gsettings-schemas.override {
+    extraGSettingsOverridePackages = cfg.extraGSettingsOverridePackages;
+    extraGSettingsOverrides = cfg.extraGSettingsOverrides;
+  };
+
+in
+
+{
+  options = {
+
+    services.xserver.desktopManager.pantheon = {
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = "Enable the pantheon desktop manager";
+      };
+
+      sessionPath = mkOption {
+        default = [];
+        example = literalExample "[ pkgs.gnome3.gpaste ]";
+        description = ''
+          Additional list of packages to be added to the session search path.
+          Useful for GSettings-conditional autostart.
+
+          Note that this should be a last resort; patching the package is preferred (see GPaste).
+        '';
+        apply = list: list ++
+        [
+          pkgs.pantheon.pantheon-agent-geoclue2
+        ];
+      };
+
+      extraGSettingsOverrides = mkOption {
+        default = "";
+        type = types.lines;
+        description = "Additional gsettings overrides.";
+      };
+
+      extraGSettingsOverridePackages = mkOption {
+        default = [];
+        type = types.listOf types.path;
+        description = "List of packages for which gsettings are overridden.";
+      };
+
+      debug = mkEnableOption "gnome-session debug messages";
+
+    };
+
+    environment.pantheon.excludePackages = mkOption {
+      default = [];
+      example = literalExample "[ pkgs.pantheon.elementary-camera ]";
+      type = types.listOf types.package;
+      description = "Which packages pantheon should exclude from the default environment";
+    };
+
+  };
+
+
+  config = mkIf cfg.enable {
+
+    services.xserver.displayManager.extraSessionFilePackages = [ pkgs.pantheon.elementary-session-settings ];
+
+    # Ensure lightdm is used when Pantheon is enabled
+    # Without it screen locking will be nonfunctional because of the use of lightlocker
+    services.xserver.displayManager.lightdm.enable = mkDefault true;
+    services.xserver.displayManager.lightdm.greeters.pantheon.enable = mkDefault true;
+
+    # If not set manually Pantheon session cannot be started
+    # Known issue of https://github.com/NixOS/nixpkgs/pull/43992
+    services.xserver.desktopManager.default = mkForce "pantheon";
+
+    services.xserver.displayManager.sessionCommands = ''
+      if test "$XDG_CURRENT_DESKTOP" = "Pantheon"; then
+          ${concatMapStrings (p: ''
+            if [ -d "${p}/share/gsettings-schemas/${p.name}" ]; then
+              export XDG_DATA_DIRS=$XDG_DATA_DIRS''${XDG_DATA_DIRS:+:}${p}/share/gsettings-schemas/${p.name}
+            fi
+
+            if [ -d "${p}/lib/girepository-1.0" ]; then
+              export GI_TYPELIB_PATH=$GI_TYPELIB_PATH''${GI_TYPELIB_PATH:+:}${p}/lib/girepository-1.0
+              export LD_LIBRARY_PATH=$LD_LIBRARY_PATH''${LD_LIBRARY_PATH:+:}${p}/lib
+            fi
+          '') cfg.sessionPath}
+
+          # Makes qt applications look less alien
+          export QT_QPA_PLATFORMTHEME=gtk3
+          export QT_STYLE_OVERRIDE=adwaita
+      fi
+    '';
+
+    hardware.bluetooth.enable = mkDefault true;
+    hardware.pulseaudio.enable = mkDefault true;
+    security.polkit.enable = true;
+    services.accounts-daemon.enable = true;
+    services.bamf.enable = true;
+    services.colord.enable = mkDefault true;
+    services.pantheon.files.enable = mkDefault true;
+    services.tumbler.enable = mkDefault true;
+    services.dbus.packages = mkMerge [
+      ([ pkgs.pantheon.switchboard-plug-power ])
+      (mkIf config.services.printing.enable  ([pkgs.system-config-printer]) )
+    ];
+    services.pantheon.contractor.enable = true;
+    services.geoclue2.enable = mkDefault true;
+    # pantheon has pantheon-agent-geoclue2
+    services.geoclue2.enableDemoAgent = false;
+    services.gnome3.at-spi2-core.enable = true;
+    services.gnome3.evolution-data-server.enable = true;
+    services.gnome3.file-roller.enable = true;
+    # TODO: gnome-keyring's xdg autostarts will still be in the environment (from elementary-session-settings) if disabled forcefully
+    services.gnome3.gnome-keyring.enable = true;
+    services.gnome3.gvfs.enable = true;
+    services.gnome3.rygel.enable = true;
+    services.gsignond.enable = true;
+    services.gsignond.plugins = with pkgs.gsignondPlugins; [ lastfm mail oauth ];
+    services.udev.packages = [ pkgs.pantheon.elementary-settings-daemon ];
+    services.udisks2.enable = true;
+    services.upower.enable = config.powerManagement.enable;
+    services.xserver.libinput.enable = mkDefault true;
+    services.xserver.updateDbusEnvironment = true;
+    services.zeitgeist.enable = true;
+
+    networking.networkmanager.enable = mkDefault true;
+    networking.networkmanager.basePackages =
+      { inherit (pkgs) networkmanager modemmanager wpa_supplicant;
+        inherit (pkgs.gnome3) networkmanager-openvpn networkmanager-vpnc
+                              networkmanager-openconnect networkmanager-fortisslvpn
+                              networkmanager-iodine networkmanager-l2tp; };
+
+    # Override GSettings schemas
+    environment.variables.NIX_GSETTINGS_OVERRIDES_DIR = "${nixos-gsettings-desktop-schemas}/share/gsettings-schemas/nixos-gsettings-overrides/glib-2.0/schemas";
+
+    environment.variables.GNOME_SESSION_DEBUG = optionalString cfg.debug "1";
+
+    environment.variables.GIO_EXTRA_MODULES = [
+      "${lib.getLib pkgs.gnome3.dconf}/lib/gio/modules"
+      "${pkgs.gnome3.glib-networking.out}/lib/gio/modules"
+      "${pkgs.gnome3.gvfs}/lib/gio/modules"
+    ];
+
+    environment.pathsToLink = [
+      # FIXME: modules should link subdirs of `/share` rather than relying on this
+      "/share"
+    ];
+
+    environment.systemPackages = pkgs.pantheon.artwork ++ pkgs.pantheon.desktop ++ pkgs.pantheon.services ++ cfg.sessionPath
+      ++ (pkgs.gnome3.removePackagesByName pkgs.pantheon.apps config.environment.pantheon.excludePackages)
+      ++ (with pkgs.gnome3;
+      [
+        adwaita-icon-theme
+        dconf
+        epiphany
+        evince
+        geary
+        gnome-bluetooth
+        gnome-font-viewer
+        gnome-power-manager
+      ])
+      ++ (with pkgs;
+      [
+        adwaita-qt
+        desktop-file-utils
+        glib
+        glib-networking
+        gnome-menus
+        gtk3.out
+        hicolor-icon-theme
+        lightlocker
+        plank
+        qgnomeplatform
+        shared-mime-info
+        sound-theme-freedesktop
+        xdg-user-dirs
+      ]);
+
+    fonts.fonts = with pkgs; [
+      opensans-ttf
+      roboto-mono
+    ];
+    fonts.fontconfig.defaultFonts = {
+      monospace = [ "Roboto Mono" ];
+      sansSerif = [ "Open Sans" ];
+    };
+
+  };
+
+}
diff --git a/nixos/modules/services/x11/display-managers/lightdm-greeters/enso-os.nix b/nixos/modules/services/x11/display-managers/lightdm-greeters/enso-os.nix
index 7c794b1ba17..de128809ce3 100644
--- a/nixos/modules/services/x11/display-managers/lightdm-greeters/enso-os.nix
+++ b/nixos/modules/services/x11/display-managers/lightdm-greeters/enso-os.nix
@@ -12,9 +12,10 @@ let
 
   # We need a few things in the environment for the greeter to run with
   # fonts/icons.
-  wrappedEnsoGreeter = pkgs.runCommand "lightdm-enso-os-greeter"
-    { buildInputs = [ pkgs.makeWrapper ]; }
-    ''
+  wrappedEnsoGreeter = pkgs.runCommand "lightdm-enso-os-greeter" {
+      buildInputs = [ pkgs.makeWrapper ];
+      preferLocalBuild = true;
+    } ''
       # This wrapper ensures that we actually get themes
       makeWrapper ${pkgs.lightdm-enso-os-greeter}/bin/pantheon-greeter \
         $out/greeter \
diff --git a/nixos/modules/services/x11/display-managers/lightdm-greeters/gtk.nix b/nixos/modules/services/x11/display-managers/lightdm-greeters/gtk.nix
index efec943c007..772cc95e84e 100644
--- a/nixos/modules/services/x11/display-managers/lightdm-greeters/gtk.nix
+++ b/nixos/modules/services/x11/display-managers/lightdm-greeters/gtk.nix
@@ -18,9 +18,10 @@ let
   # The default greeter provided with this expression is the GTK greeter.
   # Again, we need a few things in the environment for the greeter to run with
   # fonts/icons.
-  wrappedGtkGreeter = pkgs.runCommand "lightdm-gtk-greeter"
-    { buildInputs = [ pkgs.makeWrapper ]; }
-    ''
+  wrappedGtkGreeter = pkgs.runCommand "lightdm-gtk-greeter" {
+      buildInputs = [ pkgs.makeWrapper ];
+      preferLocalBuild = true;
+    } ''
       # This wrapper ensures that we actually get themes
       makeWrapper ${pkgs.lightdm_gtk_greeter}/sbin/lightdm-gtk-greeter \
         $out/greeter \
diff --git a/nixos/modules/services/x11/display-managers/lightdm-greeters/pantheon.nix b/nixos/modules/services/x11/display-managers/lightdm-greeters/pantheon.nix
new file mode 100644
index 00000000000..05011b999f2
--- /dev/null
+++ b/nixos/modules/services/x11/display-managers/lightdm-greeters/pantheon.nix
@@ -0,0 +1,47 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  dmcfg = config.services.xserver.displayManager;
+  ldmcfg = dmcfg.lightdm;
+  cfg = ldmcfg.greeters.pantheon;
+
+  xgreeters = pkgs.linkFarm "pantheon-greeter-xgreeters" [{
+    path = "${pkgs.pantheon.elementary-greeter}/share/xgreeters/io.elementary.greeter.desktop";
+    name = "io.elementary.greeter.desktop";
+  }];
+
+in
+{
+  options = {
+
+    services.xserver.displayManager.lightdm.greeters.pantheon = {
+
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = ''
+          Whether to enable elementary-greeter as the lightdm greeter.
+        '';
+      };
+
+    };
+
+  };
+
+  config = mkIf (ldmcfg.enable && cfg.enable) {
+
+    services.xserver.displayManager.lightdm.greeters.gtk.enable = false;
+
+    services.xserver.displayManager.lightdm.greeter = mkDefault {
+      package = xgreeters;
+      name = "io.elementary.greeter";
+    };
+
+    environment.etc."lightdm/io.elementary.greeter.conf".source = "${pkgs.pantheon.elementary-greeter}/etc/lightdm/io.elementary.greeter.conf";
+    environment.etc."wingpanel.d/io.elementary.greeter.whitelist".source = "${pkgs.pantheon.elementary-default-settings}/etc/wingpanel.d/io.elementary.greeter.whitelist";
+
+  };
+}
diff --git a/nixos/modules/services/x11/display-managers/lightdm.nix b/nixos/modules/services/x11/display-managers/lightdm.nix
index 567c3ac3454..3ab4f26399f 100644
--- a/nixos/modules/services/x11/display-managers/lightdm.nix
+++ b/nixos/modules/services/x11/display-managers/lightdm.nix
@@ -81,6 +81,7 @@ in
     ./lightdm-greeters/gtk.nix
     ./lightdm-greeters/mini.nix
     ./lightdm-greeters/enso-os.nix
+    ./lightdm-greeters/pantheon.nix
   ];
 
   options = {
diff --git a/nixos/modules/services/x11/display-managers/slim.nix b/nixos/modules/services/x11/display-managers/slim.nix
index 4e411c8ceb0..124660a43f0 100644
--- a/nixos/modules/services/x11/display-managers/slim.nix
+++ b/nixos/modules/services/x11/display-managers/slim.nix
@@ -28,7 +28,7 @@ let
   # Unpack the SLiM theme, or use the default.
   slimThemesDir =
     let
-      unpackedTheme = pkgs.runCommand "slim-theme" {}
+      unpackedTheme = pkgs.runCommand "slim-theme" { preferLocalBuild = true; }
         ''
           mkdir -p $out
           cd $out
diff --git a/nixos/modules/services/x11/gdk-pixbuf.nix b/nixos/modules/services/x11/gdk-pixbuf.nix
index 58faa8e2f9d..2dc8eabd95a 100644
--- a/nixos/modules/services/x11/gdk-pixbuf.nix
+++ b/nixos/modules/services/x11/gdk-pixbuf.nix
@@ -10,7 +10,7 @@ let
 
   # Generate the cache file by running gdk-pixbuf-query-loaders for each
   # package and concatenating the results.
-  loadersCache = pkgs.runCommand "gdk-pixbuf-loaders.cache" {} ''
+  loadersCache = pkgs.runCommand "gdk-pixbuf-loaders.cache" { preferLocalBuild = true; } ''
     (
       for package in ${concatStringsSep " " effectivePackages}; do
         module_dir="$package/${pkgs.gdk_pixbuf.moduleDir}"
diff --git a/nixos/modules/services/x11/xserver.nix b/nixos/modules/services/x11/xserver.nix
index 34ae8c11a3f..d84ab3ced6f 100644
--- a/nixos/modules/services/x11/xserver.nix
+++ b/nixos/modules/services/x11/xserver.nix
@@ -113,6 +113,7 @@ let
     { xfs = optionalString (cfg.useXFS != false)
         ''FontPath "${toString cfg.useXFS}"'';
       inherit (cfg) config;
+      preferLocalBuild = true;
     }
       ''
         echo 'Section "Files"' >> $out
@@ -240,7 +241,7 @@ in
       videoDrivers = mkOption {
         type = types.listOf types.str;
         # !!! We'd like "nv" here, but it segfaults the X server.
-        default = [ "ati" "cirrus" "intel" "vesa" "vmware" "modesetting" ];
+        default = [ "ati" "cirrus" "vesa" "vmware" "modesetting" ];
         example = [
           "ati_unfree" "amdgpu" "amdgpu-pro"
           "nv" "nvidia" "nvidiaLegacy340" "nvidiaLegacy304"
@@ -705,6 +706,7 @@ in
     system.extraDependencies = singleton (pkgs.runCommand "xkb-validated" {
       inherit (cfg) xkbModel layout xkbVariant xkbOptions;
       nativeBuildInputs = [ pkgs.xkbvalidate ];
+      preferLocalBuild = true;
     } ''
       validate "$xkbModel" "$layout" "$xkbVariant" "$xkbOptions"
       touch "$out"
diff --git a/nixos/modules/system/boot/stage-1.nix b/nixos/modules/system/boot/stage-1.nix
index 5e27b24ac44..9984a97bbdd 100644
--- a/nixos/modules/system/boot/stage-1.nix
+++ b/nixos/modules/system/boot/stage-1.nix
@@ -196,9 +196,10 @@ let
     ''; # */
 
 
-  udevRules = pkgs.runCommand "udev-rules"
-    { allowedReferences = [ extraUtils ]; }
-    ''
+  udevRules = pkgs.runCommand "udev-rules" {
+      allowedReferences = [ extraUtils ];
+      preferLocalBuild = true;
+    } ''
       mkdir -p $out
 
       echo 'ENV{LD_LIBRARY_PATH}="${extraUtils}/lib"' > $out/00-env.rules
@@ -298,9 +299,10 @@ let
         { object = pkgs.writeText "mdadm.conf" config.boot.initrd.mdadmConf;
           symlink = "/etc/mdadm.conf";
         }
-        { object = pkgs.runCommand "initrd-kmod-blacklist-ubuntu"
-            { src = "${pkgs.kmod-blacklist-ubuntu}/modprobe.conf"; }
-            ''
+        { object = pkgs.runCommand "initrd-kmod-blacklist-ubuntu" {
+              src = "${pkgs.kmod-blacklist-ubuntu}/modprobe.conf";
+              preferLocalBuild = true;
+            } ''
               target=$out
               ${pkgs.buildPackages.perl}/bin/perl -0pe 's/## file: iwlwifi.conf(.+?)##/##/s;' $src > $out
             '';
diff --git a/nixos/modules/system/boot/systemd.nix b/nixos/modules/system/boot/systemd.nix
index 58812bf33d9..18ee2ef1b8f 100644
--- a/nixos/modules/system/boot/systemd.nix
+++ b/nixos/modules/system/boot/systemd.nix
@@ -593,7 +593,7 @@ in
 
     services.journald.forwardToSyslog = mkOption {
       default = config.services.rsyslogd.enable || config.services.syslog-ng.enable;
-      defaultText = "config.services.rsyslogd.enable || config.services.syslog-ng.enable";
+      defaultText = "services.rsyslogd.enable || services.syslog-ng.enable";
       type = types.bool;
       description = ''
         Whether to forward log messages to syslog.
@@ -650,6 +650,19 @@ in
       '';
     };
 
+    services.logind.lidSwitchExternalPower = mkOption {
+      default = config.services.logind.lidSwitch;
+      defaultText = "services.logind.lidSwitch";
+      example = "ignore";
+      type = logindHandlerType;
+
+      description = ''
+        Specifies what to do when the laptop lid is closed and the system is
+        on external power. By default use the same action as specified in
+        services.logind.lidSwitch.
+      '';
+    };
+
     systemd.user.extraConfig = mkOption {
       default = "";
       type = types.lines;
@@ -747,7 +760,10 @@ in
     environment.etc = let
       # generate contents for /etc/systemd/system-generators from
       # systemd.generators and systemd.generator-packages
-      generators = pkgs.runCommand "system-generators" { packages = cfg.generator-packages; } ''
+      generators = pkgs.runCommand "system-generators" {
+          preferLocalBuild = true;
+          packages = cfg.generator-packages;
+        } ''
         mkdir -p $out
         for package in $packages
         do
@@ -797,6 +813,7 @@ in
         KillUserProcesses=${if config.services.logind.killUserProcesses then "yes" else "no"}
         HandleLidSwitch=${config.services.logind.lidSwitch}
         HandleLidSwitchDocked=${config.services.logind.lidSwitchDocked}
+        HandleLidSwitchExternalPower=${config.services.logind.lidSwitchExternalPower}
         ${config.services.logind.extraConfig}
       '';
 
diff --git a/nixos/modules/system/etc/make-etc.sh b/nixos/modules/system/etc/make-etc.sh
index 1ca4c3046f0..9c0520e92fc 100644
--- a/nixos/modules/system/etc/make-etc.sh
+++ b/nixos/modules/system/etc/make-etc.sh
@@ -10,6 +10,11 @@ users_=($users)
 groups_=($groups)
 set +f
 
+# Create relative symlinks, so that the links can be followed if
+# the NixOS installation is not mounted as filesystem root.
+# Absolute symlinks violate the os-release format
+# at https://www.freedesktop.org/software/systemd/man/os-release.html
+# and break e.g. systemd-nspawn and os-prober.
 for ((i = 0; i < ${#targets_[@]}; i++)); do
     source="${sources_[$i]}"
     target="${targets_[$i]}"
@@ -19,14 +24,14 @@ for ((i = 0; i < ${#targets_[@]}; i++)); do
         # If the source name contains '*', perform globbing.
         mkdir -p $out/etc/$target
         for fn in $source; do
-            ln -s "$fn" $out/etc/$target/
+            ln -s --relative "$fn" $out/etc/$target/
         done
 
     else
-        
+
         mkdir -p $out/etc/$(dirname $target)
         if ! [ -e $out/etc/$target ]; then
-            ln -s $source $out/etc/$target
+            ln -s --relative $source $out/etc/$target
         else
             echo "duplicate entry $target -> $source"
             if test "$(readlink $out/etc/$target)" != "$source"; then
@@ -34,13 +39,13 @@ for ((i = 0; i < ${#targets_[@]}; i++)); do
                 exit 1
             fi
         fi
-        
+
         if test "${modes_[$i]}" != symlink; then
             echo "${modes_[$i]}"  > $out/etc/$target.mode
             echo "${users_[$i]}"  > $out/etc/$target.uid
             echo "${groups_[$i]}" > $out/etc/$target.gid
         fi
-        
+
     fi
 done
 
diff --git a/nixos/modules/system/etc/setup-etc.pl b/nixos/modules/system/etc/setup-etc.pl
index eed20065087..82ef49a2a27 100644
--- a/nixos/modules/system/etc/setup-etc.pl
+++ b/nixos/modules/system/etc/setup-etc.pl
@@ -4,6 +4,7 @@ use File::Copy;
 use File::Path;
 use File::Basename;
 use File::Slurp;
+use File::Spec;
 
 my $etc = $ARGV[0] or die;
 my $static = "/etc/static";
@@ -17,6 +18,20 @@ sub atomicSymlink {
     return 1;
 }
 
+# Create relative symlinks, so that the links can be followed if
+# the NixOS installation is not mounted as filesystem root.
+# Absolute symlinks violate the os-release format
+# at https://www.freedesktop.org/software/systemd/man/os-release.html
+# and break e.g. systemd-nspawn and os-prober.
+sub atomicRelativeSymlink {
+    my ($source, $target) = @_;
+    my $tmp = "$target.tmp";
+    unlink $tmp;
+    my $rel = File::Spec->abs2rel($source, dirname $target);
+    symlink $rel, $tmp or return 0;
+    rename $tmp, $target or return 0;
+    return 1;
+}
 
 # Atomically update /etc/static to point at the etc files of the
 # current configuration.
@@ -103,7 +118,7 @@ sub link {
     if (-e "$_.mode") {
         my $mode = read_file("$_.mode"); chomp $mode;
         if ($mode eq "direct-symlink") {
-            atomicSymlink readlink("$static/$fn"), $target or warn;
+            atomicRelativeSymlink readlink("$static/$fn"), $target or warn;
         } else {
             my $uid = read_file("$_.uid"); chomp $uid;
             my $gid = read_file("$_.gid"); chomp $gid;
@@ -117,7 +132,7 @@ sub link {
         push @copied, $fn;
         print CLEAN "$fn\n";
     } elsif (-l "$_") {
-        atomicSymlink "$static/$fn", $target or warn;
+        atomicRelativeSymlink "$static/$fn", $target or warn;
     }
 }
 
diff --git a/nixos/modules/tasks/filesystems/vboxsf.nix b/nixos/modules/tasks/filesystems/vboxsf.nix
index 87f1984f084..5497194f6a8 100644
--- a/nixos/modules/tasks/filesystems/vboxsf.nix
+++ b/nixos/modules/tasks/filesystems/vboxsf.nix
@@ -6,7 +6,7 @@ let
 
   inInitrd = any (fs: fs == "vboxsf") config.boot.initrd.supportedFilesystems;
 
-  package = pkgs.runCommand "mount.vboxsf" {} ''
+  package = pkgs.runCommand "mount.vboxsf" { preferLocalBuild = true; } ''
     mkdir -p $out/bin
     cp ${pkgs.linuxPackages.virtualboxGuestAdditions}/bin/mount.vboxsf $out/bin
   '';
diff --git a/nixos/modules/tasks/kbd.nix b/nixos/modules/tasks/kbd.nix
index fbe42b8e8f0..6d34f897d18 100644
--- a/nixos/modules/tasks/kbd.nix
+++ b/nixos/modules/tasks/kbd.nix
@@ -15,6 +15,7 @@ let
   optimizedKeymap = pkgs.runCommand "keymap" {
     nativeBuildInputs = [ pkgs.buildPackages.kbd ];
     LOADKEYS_KEYMAP_PATH = "${kbdEnv}/share/keymaps/**";
+    preferLocalBuild = true;
   } ''
     loadkeys -b ${optionalString isUnicode "-u"} "${config.i18n.consoleKeyMap}" > $out
   '';
diff --git a/nixos/modules/tasks/network-interfaces.nix b/nixos/modules/tasks/network-interfaces.nix
index 815523093dd..f9b0eb330bf 100644
--- a/nixos/modules/tasks/network-interfaces.nix
+++ b/nixos/modules/tasks/network-interfaces.nix
@@ -995,7 +995,7 @@ in
       '';
 
     environment.etc."hostid" = mkIf (cfg.hostId != null)
-      { source = pkgs.runCommand "gen-hostid" {} ''
+      { source = pkgs.runCommand "gen-hostid" { preferLocalBuild = true; } ''
           hi="${cfg.hostId}"
           ${if pkgs.stdenv.isBigEndian then ''
             echo -ne "\x''${hi:0:2}\x''${hi:2:2}\x''${hi:4:2}\x''${hi:6:2}" > $out
diff --git a/nixos/modules/virtualisation/cloudstack-config.nix b/nixos/modules/virtualisation/cloudstack-config.nix
new file mode 100644
index 00000000000..81c54567627
--- /dev/null
+++ b/nixos/modules/virtualisation/cloudstack-config.nix
@@ -0,0 +1,40 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+{
+  imports = [
+    ../profiles/qemu-guest.nix
+  ];
+
+  config = {
+    fileSystems."/" = {
+      device = "/dev/disk/by-label/nixos";
+      autoResize = true;
+    };
+
+    boot.growPartition = true;
+    boot.kernelParams = [ "console=tty0" ];
+    boot.loader.grub.device = "/dev/vda";
+    boot.loader.timeout = 0;
+
+    # Allow root logins
+    services.openssh = {
+      enable = true;
+      permitRootLogin = "prohibit-password";
+    };
+
+    # Cloud-init configuration.
+    services.cloud-init.enable = true;
+    # Wget is needed for setting password. This is of little use as
+    # root password login is disabled above.
+    environment.systemPackages = [ pkgs.wget ];
+    # Only enable CloudStack datasource for faster boot speed.
+    environment.etc."cloud/cloud.cfg.d/99_cloudstack.cfg".text = ''
+      datasource:
+        CloudStack: {}
+        None: {}
+      datasource_list: ["CloudStack"]
+    '';
+  };
+}
diff --git a/nixos/modules/virtualisation/vmware-guest.nix b/nixos/modules/virtualisation/vmware-guest.nix
index 15c78f14c52..d18778f8158 100644
--- a/nixos/modules/virtualisation/vmware-guest.nix
+++ b/nixos/modules/virtualisation/vmware-guest.nix
@@ -3,19 +3,17 @@
 with lib;
 
 let
-  cfg = config.services.vmwareGuest;
+  cfg = config.virtualisation.vmware.guest;
   open-vm-tools = if cfg.headless then pkgs.open-vm-tools-headless else pkgs.open-vm-tools;
   xf86inputvmmouse = pkgs.xorg.xf86inputvmmouse;
 in
 {
-  options = {
-    services.vmwareGuest = {
-      enable = mkEnableOption "VMWare Guest Support";
-      headless = mkOption {
-        type = types.bool;
-        default = false;
-        description = "Whether to disable X11-related features.";
-      };
+  options.virtualisation.vmware.guest = {
+    enable = mkEnableOption "VMWare Guest Support";
+    headless = mkOption {
+      type = types.bool;
+      default = false;
+      description = "Whether to disable X11-related features.";
     };
   };
 
@@ -25,6 +23,8 @@ in
       message = "VMWare guest is not currently supported on ${pkgs.stdenv.hostPlatform.system}";
     } ];
 
+    boot.initrd.kernelModules = [ "vmw_pvscsi" ];
+
     environment.systemPackages = [ open-vm-tools ];
 
     systemd.services.vmware =
diff --git a/nixos/release-combined.nix b/nixos/release-combined.nix
index ea8b92e94f0..6c313f8dd3e 100644
--- a/nixos/release-combined.nix
+++ b/nixos/release-combined.nix
@@ -68,6 +68,7 @@ in rec {
         (all nixos.tests.firefox)
         (all nixos.tests.firewall)
         (except ["aarch64-linux"] nixos.tests.gnome3)
+        (except ["aarch64-linux"] nixos.tests.pantheon)
         nixos.tests.installer.zfsroot.x86_64-linux or [] # ZFS is 64bit only
         (except ["aarch64-linux"] nixos.tests.installer.lvm)
         (except ["aarch64-linux"] nixos.tests.installer.luksroot)
diff --git a/nixos/tests/all-tests.nix b/nixos/tests/all-tests.nix
index 7e207fa419f..65227857a38 100644
--- a/nixos/tests/all-tests.nix
+++ b/nixos/tests/all-tests.nix
@@ -25,6 +25,7 @@ in
   atd = handleTest ./atd.nix {};
   avahi = handleTest ./avahi.nix {};
   bcachefs = handleTestOn ["x86_64-linux"] ./bcachefs.nix {}; # linux-4.18.2018.10.12 is unsupported on aarch64
+  beanstalkd = handleTest ./beanstalkd.nix {};
   beegfs = handleTestOn ["x86_64-linux"] ./beegfs.nix {}; # beegfs is unsupported on aarch64
   bind = handleTest ./bind.nix {};
   bittorrent = handleTest ./bittorrent.nix {};
@@ -73,6 +74,7 @@ in
   ferm = handleTest ./ferm.nix {};
   firefox = handleTest ./firefox.nix {};
   firewall = handleTest ./firewall.nix {};
+  flannel = handleTestOn ["x86_64-linux"] ./flannel.nix {};
   flatpak = handleTest ./flatpak.nix {};
   fsck = handleTest ./fsck.nix {};
   fwupd = handleTestOn ["x86_64-linux"] ./fwupd.nix {}; # libsmbios is unsupported on aarch64
@@ -171,6 +173,7 @@ in
   ostree = handleTest ./ostree.nix {};
   pam-oath-login = handleTest ./pam-oath-login.nix {};
   pam-u2f = handleTest ./pam-u2f.nix {};
+  pantheon = handleTest ./pantheon.nix {};
   peerflix = handleTest ./peerflix.nix {};
   pgjwt = handleTest ./pgjwt.nix {};
   pgmanage = handleTest ./pgmanage.nix {};
diff --git a/nixos/tests/avahi.nix b/nixos/tests/avahi.nix
index dfb60998941..56b21a40155 100644
--- a/nixos/tests/avahi.nix
+++ b/nixos/tests/avahi.nix
@@ -2,7 +2,7 @@
 import ./make-test.nix ({ pkgs, ... } : {
   name = "avahi";
   meta = with pkgs.stdenv.lib.maintainers; {
-    maintainers = [ eelco chaoflow ];
+    maintainers = [ eelco ];
   };
 
   nodes = let
diff --git a/nixos/tests/beanstalkd.nix b/nixos/tests/beanstalkd.nix
new file mode 100644
index 00000000000..fa2fbc2c92a
--- /dev/null
+++ b/nixos/tests/beanstalkd.nix
@@ -0,0 +1,45 @@
+import ./make-test.nix ({ pkgs, lib, ... }:
+
+let
+  pythonEnv = pkgs.python3.withPackages (p: [p.beanstalkc]);
+
+  produce = pkgs.writeScript "produce.py" ''
+    #!${pythonEnv.interpreter}
+    import beanstalkc
+
+    queue = beanstalkc.Connection(host='localhost', port=11300, parse_yaml=False);
+    queue.put(b'this is a job')
+    queue.put(b'this is another job')
+  '';
+
+  consume = pkgs.writeScript "consume.py" ''
+    #!${pythonEnv.interpreter}
+    import beanstalkc
+
+    queue = beanstalkc.Connection(host='localhost', port=11300, parse_yaml=False);
+
+    job = queue.reserve(timeout=0)
+    print(job.body.decode('utf-8'))
+    job.delete()
+  '';
+
+in
+{
+  name = "beanstalkd";
+  meta.maintainers = [ lib.maintainers.aanderse ];
+
+  machine =
+    { ... }:
+    { services.beanstalkd.enable = true;
+    };
+
+  testScript = ''
+    startAll;
+
+    $machine->waitForUnit('beanstalkd.service');
+
+    $machine->succeed("${produce}");
+    $machine->succeed("${consume}") eq "this is a job\n" or die;
+    $machine->succeed("${consume}") eq "this is another job\n" or die;
+  '';
+})
diff --git a/nixos/tests/bittorrent.nix b/nixos/tests/bittorrent.nix
index 27871f72b4e..3b1169a1b7f 100644
--- a/nixos/tests/bittorrent.nix
+++ b/nixos/tests/bittorrent.nix
@@ -23,7 +23,7 @@ in
 {
   name = "bittorrent";
   meta = with pkgs.stdenv.lib.maintainers; {
-    maintainers = [ domenkozar eelco chaoflow rob bobvanderlinden ];
+    maintainers = [ domenkozar eelco rob bobvanderlinden ];
   };
 
   nodes =
diff --git a/nixos/tests/containers-bridge.nix b/nixos/tests/containers-bridge.nix
index 0eae51433d2..38db64eb793 100644
--- a/nixos/tests/containers-bridge.nix
+++ b/nixos/tests/containers-bridge.nix
@@ -10,7 +10,7 @@ in
 import ./make-test.nix ({ pkgs, ...} : {
   name = "containers-bridge";
   meta = with pkgs.stdenv.lib.maintainers; {
-    maintainers = [ aristid aszlig eelco chaoflow kampfschlaefer ];
+    maintainers = [ aristid aszlig eelco kampfschlaefer ];
   };
 
   machine =
diff --git a/nixos/tests/containers-imperative.nix b/nixos/tests/containers-imperative.nix
index 782095a09da..0c101037aa7 100644
--- a/nixos/tests/containers-imperative.nix
+++ b/nixos/tests/containers-imperative.nix
@@ -3,7 +3,7 @@
 import ./make-test.nix ({ pkgs, ...} : {
   name = "containers-imperative";
   meta = with pkgs.stdenv.lib.maintainers; {
-    maintainers = [ aristid aszlig eelco chaoflow kampfschlaefer ];
+    maintainers = [ aristid aszlig eelco kampfschlaefer ];
   };
 
   machine =
diff --git a/nixos/tests/containers-ipv4.nix b/nixos/tests/containers-ipv4.nix
index 5f83a33b107..ace68ff2df8 100644
--- a/nixos/tests/containers-ipv4.nix
+++ b/nixos/tests/containers-ipv4.nix
@@ -3,7 +3,7 @@
 import ./make-test.nix ({ pkgs, ...} : {
   name = "containers-ipv4";
   meta = with pkgs.stdenv.lib.maintainers; {
-    maintainers = [ aristid aszlig eelco chaoflow kampfschlaefer ];
+    maintainers = [ aristid aszlig eelco kampfschlaefer ];
   };
 
   machine =
diff --git a/nixos/tests/containers-ipv6.nix b/nixos/tests/containers-ipv6.nix
index 5866e51b731..a9499d192bd 100644
--- a/nixos/tests/containers-ipv6.nix
+++ b/nixos/tests/containers-ipv6.nix
@@ -8,7 +8,7 @@ in
 import ./make-test.nix ({ pkgs, ...} : {
   name = "containers-ipv6";
   meta = with pkgs.stdenv.lib.maintainers; {
-    maintainers = [ aristid aszlig eelco chaoflow kampfschlaefer ];
+    maintainers = [ aristid aszlig eelco kampfschlaefer ];
   };
 
   machine =
diff --git a/nixos/tests/containers-portforward.nix b/nixos/tests/containers-portforward.nix
index d2dda926fc0..ec8e9629c21 100644
--- a/nixos/tests/containers-portforward.nix
+++ b/nixos/tests/containers-portforward.nix
@@ -10,7 +10,7 @@ in
 import ./make-test.nix ({ pkgs, ...} : {
   name = "containers-portforward";
   meta = with pkgs.stdenv.lib.maintainers; {
-    maintainers = [ aristid aszlig eelco chaoflow kampfschlaefer ianwookim ];
+    maintainers = [ aristid aszlig eelco kampfschlaefer ianwookim ];
   };
 
   machine =
diff --git a/nixos/tests/elk.nix b/nixos/tests/elk.nix
index d787ac97300..e7ae023f3ff 100644
--- a/nixos/tests/elk.nix
+++ b/nixos/tests/elk.nix
@@ -13,7 +13,7 @@ let
   mkElkTest = name : elk : makeTest {
     inherit name;
     meta = with pkgs.stdenv.lib.maintainers; {
-      maintainers = [ eelco chaoflow offline basvandijk ];
+      maintainers = [ eelco offline basvandijk ];
     };
     nodes = {
       one =
diff --git a/nixos/tests/firefox.nix b/nixos/tests/firefox.nix
index 58a80243ea9..f5b946a0881 100644
--- a/nixos/tests/firefox.nix
+++ b/nixos/tests/firefox.nix
@@ -1,7 +1,7 @@
 import ./make-test.nix ({ pkgs, ... }: {
   name = "firefox";
   meta = with pkgs.stdenv.lib.maintainers; {
-    maintainers = [ eelco chaoflow shlevy ];
+    maintainers = [ eelco shlevy ];
   };
 
   machine =
diff --git a/nixos/tests/firewall.nix b/nixos/tests/firewall.nix
index 7207a880d8e..fcf758910e0 100644
--- a/nixos/tests/firewall.nix
+++ b/nixos/tests/firewall.nix
@@ -3,7 +3,7 @@
 import ./make-test.nix ( { pkgs, ... } : {
   name = "firewall";
   meta = with pkgs.stdenv.lib.maintainers; {
-    maintainers = [ eelco chaoflow ];
+    maintainers = [ eelco ];
   };
 
   nodes =
diff --git a/nixos/tests/flannel.nix b/nixos/tests/flannel.nix
index fb66fe28209..0b261a68477 100644
--- a/nixos/tests/flannel.nix
+++ b/nixos/tests/flannel.nix
@@ -21,8 +21,9 @@ import ./make-test.nix ({ pkgs, ...} : rec {
       services = {
         etcd = {
           enable = true;
-          listenClientUrls = ["http://etcd:2379"];
-          listenPeerUrls = ["http://etcd:2380"];
+          listenClientUrls = ["http://0.0.0.0:2379"]; # requires ip-address for binding
+          listenPeerUrls = ["http://0.0.0.0:2380"]; # requires ip-address for binding
+          advertiseClientUrls = ["http://etcd:2379"];
           initialAdvertisePeerUrls = ["http://etcd:2379"];
           initialCluster = ["etcd=http://etcd:2379"];
         };
diff --git a/nixos/tests/gnome3.nix b/nixos/tests/gnome3.nix
index 95694ea4828..b58c9e5a0e3 100644
--- a/nixos/tests/gnome3.nix
+++ b/nixos/tests/gnome3.nix
@@ -1,7 +1,7 @@
 import ./make-test.nix ({ pkgs, ...} : {
   name = "gnome3";
   meta = with pkgs.stdenv.lib.maintainers; {
-    maintainers = [ domenkozar eelco chaoflow lethalman ];
+    maintainers = [ domenkozar eelco lethalman ];
   };
 
   machine =
diff --git a/nixos/tests/home-assistant.nix b/nixos/tests/home-assistant.nix
index a93360b252f..8def0a6f9b9 100644
--- a/nixos/tests/home-assistant.nix
+++ b/nixos/tests/home-assistant.nix
@@ -87,8 +87,8 @@ in {
     $hass->succeed("curl http://localhost:8123/api/states/binary_sensor.mqtt_binary_sensor -H 'x-ha-access: ${apiPassword}' | grep -qF '\"state\": \"on\"'");
 
     # Toggle a binary sensor using hass-cli
-    $hass->succeed("${hassCli} --output json entity get binary_sensor.mqtt_binary_sensor | grep -qF '\"state\": \"on\"'");
-    $hass->succeed("${hassCli} entity edit binary_sensor.mqtt_binary_sensor --json='{\"state\": \"off\"}'");
+    $hass->succeed("${hassCli} --output json state get binary_sensor.mqtt_binary_sensor | grep -qF '\"state\": \"on\"'");
+    $hass->succeed("${hassCli} state edit binary_sensor.mqtt_binary_sensor --json='{\"state\": \"off\"}'");
     $hass->succeed("curl http://localhost:8123/api/states/binary_sensor.mqtt_binary_sensor -H 'x-ha-access: ${apiPassword}' | grep -qF '\"state\": \"off\"'");
 
     # Print log to ease debugging
diff --git a/nixos/tests/influxdb.nix b/nixos/tests/influxdb.nix
index 440049d9511..61201202204 100644
--- a/nixos/tests/influxdb.nix
+++ b/nixos/tests/influxdb.nix
@@ -3,7 +3,7 @@
 import ./make-test.nix ({ pkgs, ...} : {
   name = "influxdb";
   meta = with pkgs.stdenv.lib.maintainers; {
-    maintainers = [ chaoflow offline ];
+    maintainers = [ offline ];
   };
 
   nodes = {
diff --git a/nixos/tests/ipv6.nix b/nixos/tests/ipv6.nix
index 97f348a9bee..14f24c29cfe 100644
--- a/nixos/tests/ipv6.nix
+++ b/nixos/tests/ipv6.nix
@@ -4,7 +4,7 @@
 import ./make-test.nix ({ pkgs, ...} : {
   name = "ipv6";
   meta = with pkgs.stdenv.lib.maintainers; {
-    maintainers = [ eelco chaoflow ];
+    maintainers = [ eelco ];
   };
 
   nodes =
diff --git a/nixos/tests/jenkins.nix b/nixos/tests/jenkins.nix
index 4f2d2085cd1..a6eec411ff2 100644
--- a/nixos/tests/jenkins.nix
+++ b/nixos/tests/jenkins.nix
@@ -6,7 +6,7 @@
 import ./make-test.nix ({ pkgs, ...} : {
   name = "jenkins";
   meta = with pkgs.stdenv.lib.maintainers; {
-    maintainers = [ bjornfor coconnor domenkozar eelco chaoflow ];
+    maintainers = [ bjornfor coconnor domenkozar eelco ];
   };
 
   nodes = {
diff --git a/nixos/tests/kexec.nix b/nixos/tests/kexec.nix
index db596189d46..b13b4131091 100644
--- a/nixos/tests/kexec.nix
+++ b/nixos/tests/kexec.nix
@@ -3,7 +3,7 @@
 import ./make-test.nix ({ pkgs, ...} : {
   name = "kexec";
   meta = with pkgs.stdenv.lib.maintainers; {
-    maintainers = [ eelco chaoflow ];
+    maintainers = [ eelco ];
   };
 
   machine = { ... }:
diff --git a/nixos/tests/kubernetes/base.nix b/nixos/tests/kubernetes/base.nix
index 9d77be13175..ec1a75e74c4 100644
--- a/nixos/tests/kubernetes/base.nix
+++ b/nixos/tests/kubernetes/base.nix
@@ -10,7 +10,6 @@ let
   mkKubernetesBaseTest =
     { name, domain ? "my.zyx", test, machines
     , pkgs ? import <nixpkgs> { inherit system; }
-    , certs ? import ./certs.nix { inherit pkgs; externalDomain = domain; kubelets = attrNames machines; }
     , extraConfiguration ? null }:
     let
       masterName = head (filter (machineName: any (role: role == "master") machines.${machineName}.roles) (attrNames machines));
@@ -20,6 +19,10 @@ let
         ${master.ip}  api.${domain}
         ${concatMapStringsSep "\n" (machineName: "${machines.${machineName}.ip}  ${machineName}.${domain}") (attrNames machines)}
       '';
+      kubectl = with pkgs; runCommand "wrap-kubectl" { buildInputs = [ makeWrapper ]; } ''
+        mkdir -p $out/bin
+        makeWrapper ${pkgs.kubernetes}/bin/kubectl $out/bin/kubectl --set KUBECONFIG "/etc/kubernetes/cluster-admin.kubeconfig"
+      '';
     in makeTest {
       inherit name;
 
@@ -27,6 +30,7 @@ let
         { config, pkgs, lib, nodes, ... }:
           mkMerge [
             {
+              boot.postBootCommands = "rm -fr /var/lib/kubernetes/secrets /tmp/shared/*";
               virtualisation.memorySize = mkDefault 1536;
               virtualisation.diskSize = mkDefault 4096;
               networking = {
@@ -45,34 +49,25 @@ let
                 };
               };
               programs.bash.enableCompletion = true;
-              environment.variables = {
-                ETCDCTL_CERT_FILE = "${certs.worker}/etcd-client.pem";
-                ETCDCTL_KEY_FILE = "${certs.worker}/etcd-client-key.pem";
-                ETCDCTL_CA_FILE = "${certs.worker}/ca.pem";
-                ETCDCTL_PEERS = "https://etcd.${domain}:2379";
-              };
+              environment.systemPackages = [ kubectl ];
               services.flannel.iface = "eth1";
-              services.kubernetes.apiserver.advertiseAddress = master.ip;
+              services.kubernetes = {
+                addons.dashboard.enable = true;
+
+                easyCerts = true;
+                inherit (machine) roles;
+                apiserver = {
+                  securePort = 443;
+                  advertiseAddress = master.ip;
+                };
+                masterAddress = "${masterName}.${config.networking.domain}";
+              };
             }
             (optionalAttrs (any (role: role == "master") machine.roles) {
               networking.firewall.allowedTCPPorts = [
-                2379 2380  # etcd
                 443 # kubernetes apiserver
               ];
-              services.etcd = {
-                enable = true;
-                certFile = "${certs.master}/etcd.pem";
-                keyFile = "${certs.master}/etcd-key.pem";
-                trustedCaFile = "${certs.master}/ca.pem";
-                peerClientCertAuth = true;
-                listenClientUrls = ["https://0.0.0.0:2379"];
-                listenPeerUrls = ["https://0.0.0.0:2380"];
-                advertiseClientUrls = ["https://etcd.${config.networking.domain}:2379"];
-                initialCluster = ["${masterName}=https://etcd.${config.networking.domain}:2380"];
-                initialAdvertisePeerUrls = ["https://etcd.${config.networking.domain}:2380"];
-              };
             })
-            (import ./kubernetes-common.nix { inherit (machine) roles; inherit pkgs config certs; })
             (optionalAttrs (machine ? "extraConfiguration") (machine.extraConfiguration { inherit config pkgs lib nodes; }))
             (optionalAttrs (extraConfiguration != null) (extraConfiguration { inherit config pkgs lib nodes; }))
           ]
diff --git a/nixos/tests/kubernetes/certs.nix b/nixos/tests/kubernetes/certs.nix
deleted file mode 100644
index 85e92f6330c..00000000000
--- a/nixos/tests/kubernetes/certs.nix
+++ /dev/null
@@ -1,219 +0,0 @@
-{
-  pkgs ? import <nixpkgs> {},
-  externalDomain ? "myawesomecluster.cluster.yourdomain.net",
-  serviceClusterIp ? "10.0.0.1",
-  kubelets,
-  ...
-}:
-let
-   runWithCFSSL = name: cmd:
-     let secrets = pkgs.runCommand "${name}-cfss.json" {
-         buildInputs = [ pkgs.cfssl pkgs.jq ];
-         outputs = [ "out" "cert" "key" "csr" ];
-       }
-       ''
-         (
-           echo "${cmd}"
-           cfssl ${cmd} > tmp
-           cat tmp | jq -r .key > $key
-           cat tmp | jq -r .cert > $cert
-           cat tmp | jq -r .csr > $csr
-
-           touch $out
-         ) 2>&1 | fold -w 80 -s
-       '';
-     in {
-       key = secrets.key;
-       cert = secrets.cert;
-       csr = secrets.csr;
-     };
-
-   writeCFSSL = content:
-     pkgs.runCommand content.name {
-      buildInputs = [ pkgs.cfssl pkgs.jq ];
-     } ''
-       mkdir -p $out
-       cd $out
-
-       json=${pkgs.lib.escapeShellArg (builtins.toJSON content)}
-
-       # for a given $field in the $json, treat the associated value as a
-       # file path and substitute the contents thereof into the $json
-       # object.
-       expandFileField() {
-         local field=$1
-         if jq -e --arg field "$field" 'has($field)'; then
-           local path="$(echo "$json" | jq -r ".$field")"
-           json="$(echo "$json" | jq --arg val "$(cat "$path")" ".$field = \$val")"
-         fi
-       }
-
-       expandFileField key
-       expandFileField ca
-       expandFileField cert
-
-       echo "$json" | cfssljson -bare ${content.name}
-     '';
-
-  noCSR = content: pkgs.lib.filterAttrs (n: v: n != "csr") content;
-  noKey = content: pkgs.lib.filterAttrs (n: v: n != "key") content;
-
-  writeFile = content:
-    if pkgs.lib.isDerivation content
-    then content
-    else pkgs.writeText "content" (builtins.toJSON content);
-
-  createServingCertKey = { ca, cn, hosts? [], size ? 2048, name ? cn }:
-    noCSR (
-      (runWithCFSSL name "gencert -ca=${writeFile ca.cert} -ca-key=${writeFile ca.key} -profile=server -config=${writeFile ca.config} ${writeFile {
-        CN = cn;
-        hosts = hosts;
-        key = { algo = "rsa"; inherit size; };
-      }}") // { inherit name; }
-    );
-
-  createClientCertKey = { ca, cn, groups ? [], size ? 2048, name ? cn }:
-    noCSR (
-      (runWithCFSSL name "gencert -ca=${writeFile ca.cert} -ca-key=${writeFile ca.key} -profile=client -config=${writeFile ca.config} ${writeFile {
-        CN = cn;
-        names = map (group: {O = group;}) groups;
-        hosts = [""];
-        key = { algo = "rsa"; inherit size; };
-      }}") // { inherit name; }
-    );
-
-  createSigningCertKey = { C ? "xx", ST ? "x", L ? "x", O ? "x", OU ? "x", CN ? "ca", emailAddress ? "x", expiry ? "43800h", size ? 2048, name ? CN }:
-    (noCSR (runWithCFSSL CN "genkey -initca ${writeFile {
-      key = { algo = "rsa"; inherit size; };
-      names = [{ inherit C ST L O OU CN emailAddress; }];
-    }}")) // {
-      inherit name;
-      config.signing = {
-        default.expiry = expiry;
-        profiles = {
-          server = {
-            inherit expiry;
-            usages = [
-              "signing"
-              "key encipherment"
-              "server auth"
-            ];
-          };
-          client = {
-            inherit expiry;
-            usages = [
-              "signing"
-              "key encipherment"
-              "client auth"
-            ];
-          };
-          peer = {
-            inherit expiry;
-            usages = [
-              "signing"
-              "key encipherment"
-              "server auth"
-              "client auth"
-            ];
-          };
-        };
-      };
-    };
-
-  ca = createSigningCertKey {};
-
-  kube-apiserver = createServingCertKey {
-    inherit ca;
-    cn = "kube-apiserver";
-    hosts = ["kubernetes.default" "kubernetes.default.svc" "localhost" "api.${externalDomain}" serviceClusterIp];
-  };
-
-  kubelet = createServingCertKey {
-    inherit ca;
-    cn = "kubelet";
-    hosts = ["*.${externalDomain}"];
-  };
-
-  service-accounts = createServingCertKey {
-    inherit ca;
-    cn = "kube-service-accounts";
-  };
-
-  etcd = createServingCertKey {
-    inherit ca;
-    cn = "etcd";
-    hosts = ["etcd.${externalDomain}"];
-  };
-
-  etcd-client = createClientCertKey {
-    inherit ca;
-    cn = "etcd-client";
-  };
-
-  kubelet-client = createClientCertKey {
-    inherit ca;
-    cn = "kubelet-client";
-    groups = ["system:masters"];
-  };
-
-  apiserver-client = {
-    kubelet = hostname: createClientCertKey {
-      inherit ca;
-      name = "apiserver-client-kubelet-${hostname}";
-      cn = "system:node:${hostname}.${externalDomain}";
-      groups = ["system:nodes"];
-    };
-
-    kube-proxy = createClientCertKey {
-      inherit ca;
-      name = "apiserver-client-kube-proxy";
-      cn = "system:kube-proxy";
-      groups = ["system:kube-proxy" "system:nodes"];
-    };
-
-    kube-controller-manager = createClientCertKey {
-      inherit ca;
-      name = "apiserver-client-kube-controller-manager";
-      cn = "system:kube-controller-manager";
-      groups = ["system:masters"];
-    };
-
-    kube-scheduler = createClientCertKey {
-      inherit ca;
-      name = "apiserver-client-kube-scheduler";
-      cn = "system:kube-scheduler";
-      groups = ["system:kube-scheduler"];
-    };
-
-    admin = createClientCertKey {
-      inherit ca;
-      cn = "admin";
-      groups = ["system:masters"];
-    };
-  };
-in {
-  master = pkgs.buildEnv {
-    name = "master-keys";
-    paths = [
-      (writeCFSSL (noKey ca))
-      (writeCFSSL kube-apiserver)
-      (writeCFSSL kubelet-client)
-      (writeCFSSL apiserver-client.kube-controller-manager)
-      (writeCFSSL apiserver-client.kube-scheduler)
-      (writeCFSSL service-accounts)
-      (writeCFSSL etcd)
-    ];
-  };
-
-  worker = pkgs.buildEnv {
-    name = "worker-keys";
-    paths = [
-      (writeCFSSL (noKey ca))
-      (writeCFSSL kubelet)
-      (writeCFSSL apiserver-client.kube-proxy)
-      (writeCFSSL etcd-client)
-    ] ++ map (hostname: writeCFSSL (apiserver-client.kubelet hostname)) kubelets;
-  };
-
-  admin = writeCFSSL apiserver-client.admin;
-}
diff --git a/nixos/tests/kubernetes/dns.nix b/nixos/tests/kubernetes/dns.nix
index f25ea5b9ed8..46bcb01a526 100644
--- a/nixos/tests/kubernetes/dns.nix
+++ b/nixos/tests/kubernetes/dns.nix
@@ -71,17 +71,17 @@ let
 
   base = {
     name = "dns";
-    inherit domain certs extraConfiguration;
+    inherit domain extraConfiguration;
   };
 
   singleNodeTest = {
     test = ''
       # prepare machine1 for test
       $machine1->waitUntilSucceeds("kubectl get node machine1.${domain} | grep -w Ready");
-      $machine1->execute("docker load < ${redisImage}");
+      $machine1->waitUntilSucceeds("docker load < ${redisImage}");
       $machine1->waitUntilSucceeds("kubectl create -f ${redisPod}");
       $machine1->waitUntilSucceeds("kubectl create -f ${redisService}");
-      $machine1->execute("docker load < ${probeImage}");
+      $machine1->waitUntilSucceeds("docker load < ${probeImage}");
       $machine1->waitUntilSucceeds("kubectl create -f ${probePod}");
 
       # check if pods are running
@@ -99,13 +99,16 @@ let
 
   multiNodeTest = {
     test = ''
+      # Node token exchange
+      $machine1->waitUntilSucceeds("cp -f /var/lib/cfssl/apitoken.secret /tmp/shared/apitoken.secret");
+      $machine2->waitUntilSucceeds("cat /tmp/shared/apitoken.secret | nixos-kubernetes-node-join");
+
       # prepare machines for test
-      $machine1->waitUntilSucceeds("kubectl get node machine1.${domain} | grep -w Ready");
       $machine1->waitUntilSucceeds("kubectl get node machine2.${domain} | grep -w Ready");
-      $machine2->execute("docker load < ${redisImage}");
+      $machine2->waitUntilSucceeds("docker load < ${redisImage}");
       $machine1->waitUntilSucceeds("kubectl create -f ${redisPod}");
       $machine1->waitUntilSucceeds("kubectl create -f ${redisService}");
-      $machine2->execute("docker load < ${probeImage}");
+      $machine2->waitUntilSucceeds("docker load < ${probeImage}");
       $machine1->waitUntilSucceeds("kubectl create -f ${probePod}");
 
       # check if pods are running
diff --git a/nixos/tests/kubernetes/kubernetes-common.nix b/nixos/tests/kubernetes/kubernetes-common.nix
deleted file mode 100644
index 87c65b88365..00000000000
--- a/nixos/tests/kubernetes/kubernetes-common.nix
+++ /dev/null
@@ -1,57 +0,0 @@
-{ roles, config, pkgs, certs }:
-with pkgs.lib;
-let
-  base = {
-    inherit roles;
-    flannel.enable = true;
-    addons.dashboard.enable = true;
-
-    caFile = "${certs.master}/ca.pem";
-    apiserver = {
-      tlsCertFile = "${certs.master}/kube-apiserver.pem";
-      tlsKeyFile = "${certs.master}/kube-apiserver-key.pem";
-      kubeletClientCertFile = "${certs.master}/kubelet-client.pem";
-      kubeletClientKeyFile = "${certs.master}/kubelet-client-key.pem";
-      serviceAccountKeyFile = "${certs.master}/kube-service-accounts.pem";
-    };
-    etcd = {
-      servers = ["https://etcd.${config.networking.domain}:2379"];
-      certFile = "${certs.worker}/etcd-client.pem";
-      keyFile = "${certs.worker}/etcd-client-key.pem";
-    };
-    kubeconfig = {
-      server = "https://api.${config.networking.domain}";
-    };
-    kubelet = {
-      tlsCertFile = "${certs.worker}/kubelet.pem";
-      tlsKeyFile = "${certs.worker}/kubelet-key.pem";
-      hostname = "${config.networking.hostName}.${config.networking.domain}";
-      kubeconfig = {
-        certFile = "${certs.worker}/apiserver-client-kubelet-${config.networking.hostName}.pem";
-        keyFile = "${certs.worker}/apiserver-client-kubelet-${config.networking.hostName}-key.pem";
-      };
-    };
-    controllerManager = {
-      serviceAccountKeyFile = "${certs.master}/kube-service-accounts-key.pem";
-      kubeconfig = {
-        certFile = "${certs.master}/apiserver-client-kube-controller-manager.pem";
-        keyFile = "${certs.master}/apiserver-client-kube-controller-manager-key.pem";
-      };
-    };
-    scheduler = {
-      kubeconfig = {
-        certFile = "${certs.master}/apiserver-client-kube-scheduler.pem";
-        keyFile = "${certs.master}/apiserver-client-kube-scheduler-key.pem";
-      };
-    };
-    proxy = {
-      kubeconfig = {
-        certFile = "${certs.worker}/apiserver-client-kube-proxy.pem";
-        keyFile = "${certs.worker}//apiserver-client-kube-proxy-key.pem";
-      };
-    };
-  };
-
-in {
-  services.kubernetes = base;
-}
diff --git a/nixos/tests/kubernetes/rbac.nix b/nixos/tests/kubernetes/rbac.nix
index 226808c4b26..3ce7adcd0d7 100644
--- a/nixos/tests/kubernetes/rbac.nix
+++ b/nixos/tests/kubernetes/rbac.nix
@@ -96,7 +96,7 @@ let
     test = ''
       $machine1->waitUntilSucceeds("kubectl get node machine1.my.zyx | grep -w Ready");
 
-      $machine1->execute("docker load < ${kubectlImage}");
+      $machine1->waitUntilSucceeds("docker load < ${kubectlImage}");
 
       $machine1->waitUntilSucceeds("kubectl apply -f ${roServiceAccount}");
       $machine1->waitUntilSucceeds("kubectl apply -f ${roRole}");
@@ -105,7 +105,7 @@ let
 
       $machine1->waitUntilSucceeds("kubectl get pod kubectl | grep Running");
 
-      $machine1->succeed("kubectl exec -ti kubectl -- kubectl get pods");
+      $machine1->waitUntilSucceeds("kubectl exec -ti kubectl -- kubectl get pods");
       $machine1->fail("kubectl exec -ti kubectl -- kubectl create -f /kubectl-pod-2.json");
       $machine1->fail("kubectl exec -ti kubectl -- kubectl delete pods -l name=kubectl");
     '';
@@ -113,10 +113,13 @@ let
 
   multinode = base // {
     test = ''
-      $machine1->waitUntilSucceeds("kubectl get node machine1.my.zyx | grep -w Ready");
+      # Node token exchange
+      $machine1->waitUntilSucceeds("cp -f /var/lib/cfssl/apitoken.secret /tmp/shared/apitoken.secret");
+      $machine2->waitUntilSucceeds("cat /tmp/shared/apitoken.secret | nixos-kubernetes-node-join");
+
       $machine1->waitUntilSucceeds("kubectl get node machine2.my.zyx | grep -w Ready");
 
-      $machine2->execute("docker load < ${kubectlImage}");
+      $machine2->waitUntilSucceeds("docker load < ${kubectlImage}");
 
       $machine1->waitUntilSucceeds("kubectl apply -f ${roServiceAccount}");
       $machine1->waitUntilSucceeds("kubectl apply -f ${roRole}");
@@ -125,7 +128,7 @@ let
 
       $machine1->waitUntilSucceeds("kubectl get pod kubectl | grep Running");
 
-      $machine1->succeed("kubectl exec -ti kubectl -- kubectl get pods");
+      $machine1->waitUntilSucceeds("kubectl exec -ti kubectl -- kubectl get pods");
       $machine1->fail("kubectl exec -ti kubectl -- kubectl create -f /kubectl-pod-2.json");
       $machine1->fail("kubectl exec -ti kubectl -- kubectl delete pods -l name=kubectl");
     '';
diff --git a/nixos/tests/login.nix b/nixos/tests/login.nix
index 3dbb494b689..9844ad492e8 100644
--- a/nixos/tests/login.nix
+++ b/nixos/tests/login.nix
@@ -3,7 +3,7 @@ import ./make-test.nix ({ pkgs, latestKernel ? false, ... }:
 {
   name = "login";
   meta = with pkgs.stdenv.lib.maintainers; {
-    maintainers = [ eelco chaoflow ];
+    maintainers = [ eelco ];
   };
 
   machine =
diff --git a/nixos/tests/misc.nix b/nixos/tests/misc.nix
index 3ad55651b11..ca28bc31cf1 100644
--- a/nixos/tests/misc.nix
+++ b/nixos/tests/misc.nix
@@ -3,7 +3,7 @@
 import ./make-test.nix ({ pkgs, ...} : rec {
   name = "misc";
   meta = with pkgs.stdenv.lib.maintainers; {
-    maintainers = [ eelco chaoflow ];
+    maintainers = [ eelco ];
   };
 
   foo = pkgs.writeText "foo" "Hello World";
diff --git a/nixos/tests/mumble.nix b/nixos/tests/mumble.nix
index 8146453bfd5..dadd16fd9a0 100644
--- a/nixos/tests/mumble.nix
+++ b/nixos/tests/mumble.nix
@@ -9,7 +9,7 @@ in
 {
   name = "mumble";
   meta = with pkgs.stdenv.lib.maintainers; {
-    maintainers = [ thoughtpolice eelco chaoflow ];
+    maintainers = [ thoughtpolice eelco ];
   };
 
   nodes = {
diff --git a/nixos/tests/munin.nix b/nixos/tests/munin.nix
index 95cecf17b8c..31374aaf77e 100644
--- a/nixos/tests/munin.nix
+++ b/nixos/tests/munin.nix
@@ -4,7 +4,7 @@
 import ./make-test.nix ({ pkgs, ...} : {
   name = "munin";
   meta = with pkgs.stdenv.lib.maintainers; {
-    maintainers = [ domenkozar eelco chaoflow ];
+    maintainers = [ domenkozar eelco ];
   };
 
   nodes = {
diff --git a/nixos/tests/mysql-replication.nix b/nixos/tests/mysql-replication.nix
index 84d70cf3524..c75a862106f 100644
--- a/nixos/tests/mysql-replication.nix
+++ b/nixos/tests/mysql-replication.nix
@@ -8,7 +8,7 @@ in
 {
   name = "mysql-replication";
   meta = with pkgs.stdenv.lib.maintainers; {
-    maintainers = [ eelco chaoflow shlevy ];
+    maintainers = [ eelco shlevy ];
   };
 
   nodes = {
diff --git a/nixos/tests/mysql.nix b/nixos/tests/mysql.nix
index 7251c4a8649..1a611779366 100644
--- a/nixos/tests/mysql.nix
+++ b/nixos/tests/mysql.nix
@@ -1,7 +1,7 @@
 import ./make-test.nix ({ pkgs, ...} : {
   name = "mysql";
   meta = with pkgs.stdenv.lib.maintainers; {
-    maintainers = [ eelco chaoflow shlevy ];
+    maintainers = [ eelco shlevy ];
   };
 
   nodes = {
diff --git a/nixos/tests/nat.nix b/nixos/tests/nat.nix
index 34229e91311..51d9cf166bb 100644
--- a/nixos/tests/nat.nix
+++ b/nixos/tests/nat.nix
@@ -24,7 +24,7 @@ import ./make-test.nix ({ pkgs, lib, withFirewall, withConntrackHelpers ? false,
     name = "nat" + (if withFirewall then "WithFirewall" else "Standalone")
                  + (lib.optionalString withConntrackHelpers "withConntrackHelpers");
     meta = with pkgs.stdenv.lib.maintainers; {
-      maintainers = [ eelco chaoflow rob ];
+      maintainers = [ eelco rob ];
     };
 
     nodes =
diff --git a/nixos/tests/nfs.nix b/nixos/tests/nfs.nix
index ce171701893..2f655336e75 100644
--- a/nixos/tests/nfs.nix
+++ b/nixos/tests/nfs.nix
@@ -20,7 +20,7 @@ in
 {
   name = "nfs";
   meta = with pkgs.stdenv.lib.maintainers; {
-    maintainers = [ eelco chaoflow  ];
+    maintainers = [ eelco ];
   };
 
   nodes =
diff --git a/nixos/tests/openssh.nix b/nixos/tests/openssh.nix
index c66b90b802d..219a20c5c7e 100644
--- a/nixos/tests/openssh.nix
+++ b/nixos/tests/openssh.nix
@@ -5,7 +5,7 @@ let inherit (import ./ssh-keys.nix pkgs)
 in {
   name = "openssh";
   meta = with pkgs.stdenv.lib.maintainers; {
-    maintainers = [ aszlig eelco chaoflow ];
+    maintainers = [ aszlig eelco ];
   };
 
   nodes = {
diff --git a/nixos/tests/pantheon.nix b/nixos/tests/pantheon.nix
new file mode 100644
index 00000000000..c50f77f8617
--- /dev/null
+++ b/nixos/tests/pantheon.nix
@@ -0,0 +1,55 @@
+import ./make-test.nix ({ pkgs, ...} :
+
+{
+  name = "pantheon";
+  meta = with pkgs.stdenv.lib.maintainers; {
+    maintainers = [ worldofpeace ];
+  };
+
+  machine = { ... }:
+
+  {
+    imports = [ ./common/user-account.nix ];
+
+    services.xserver.enable = true;
+    services.xserver.desktopManager.pantheon.enable = true;
+
+    virtualisation.memorySize = 1024;
+  };
+
+  enableOCR = true;
+
+  testScript = { nodes, ... }: let
+    user = nodes.machine.config.users.users.alice;
+  in ''
+    startAll;
+
+    # Wait for display manager to start
+    $machine->waitForText(qr/${user.description}/);
+    $machine->screenshot("lightdm");
+
+    # Log in
+    $machine->sendChars("${user.password}\n");
+    $machine->waitForFile("/home/alice/.Xauthority");
+    $machine->succeed("xauth merge ~alice/.Xauthority");
+
+    # Check if "pantheon-shell" components actually start
+    $machine->waitUntilSucceeds("pgrep gala");
+    $machine->waitForWindow(qr/gala/);
+    $machine->waitUntilSucceeds("pgrep wingpanel");
+    $machine->waitForWindow("wingpanel");
+    $machine->waitUntilSucceeds("pgrep plank");
+    $machine->waitForWindow(qr/plank/);
+
+    # Check that logging in has given the user ownership of devices.
+    $machine->succeed("getfacl /dev/snd/timer | grep -q alice");
+
+    # Open elementary terminal
+    $machine->execute("su - alice -c 'DISPLAY=:0.0 io.elementary.terminal &'");
+    $machine->waitForWindow(qr/io.elementary.terminal/);
+
+    # Take a screenshot of the desktop
+    $machine->sleep(20);
+    $machine->screenshot("screen");
+  '';
+})
diff --git a/nixos/tests/phabricator.nix b/nixos/tests/phabricator.nix
index 20b3b838aba..db23331842c 100644
--- a/nixos/tests/phabricator.nix
+++ b/nixos/tests/phabricator.nix
@@ -1,7 +1,7 @@
 import ./make-test.nix ({ pkgs, ... }: {
   name = "phabricator";
   meta = with pkgs.stdenv.lib.maintainers; {
-    maintainers = [ chaoflow ];
+    maintainers = [ ];
   };
 
   nodes = {
diff --git a/nixos/tests/printing.nix b/nixos/tests/printing.nix
index 7026637ead1..caa8131a4b4 100644
--- a/nixos/tests/printing.nix
+++ b/nixos/tests/printing.nix
@@ -3,7 +3,7 @@
 import ./make-test.nix ({pkgs, ... }: {
   name = "printing";
   meta = with pkgs.stdenv.lib.maintainers; {
-    maintainers = [ domenkozar eelco chaoflow jgeerds ];
+    maintainers = [ domenkozar eelco jgeerds ];
   };
 
   nodes = {
diff --git a/nixos/tests/proxy.nix b/nixos/tests/proxy.nix
index 18195312028..1f39e903cdd 100644
--- a/nixos/tests/proxy.nix
+++ b/nixos/tests/proxy.nix
@@ -16,7 +16,7 @@ in
 {
   name = "proxy";
   meta = with pkgs.stdenv.lib.maintainers; {
-    maintainers = [ eelco chaoflow ];
+    maintainers = [ eelco ];
   };
 
   nodes =
diff --git a/nixos/tests/quake3.nix b/nixos/tests/quake3.nix
index 75c82cca63f..fbb798515e1 100644
--- a/nixos/tests/quake3.nix
+++ b/nixos/tests/quake3.nix
@@ -22,7 +22,7 @@ in
 rec {
   name = "quake3";
   meta = with pkgs.stdenv.lib.maintainers; {
-    maintainers = [ domenkozar eelco chaoflow ];
+    maintainers = [ domenkozar eelco ];
   };
 
   # TODO: lcov doesn't work atm
diff --git a/nixos/tests/rabbitmq.nix b/nixos/tests/rabbitmq.nix
index 34ab0578786..bb5932c3641 100644
--- a/nixos/tests/rabbitmq.nix
+++ b/nixos/tests/rabbitmq.nix
@@ -3,7 +3,7 @@
 import ./make-test.nix ({ pkgs, ... }: {
   name = "rabbitmq";
   meta = with pkgs.stdenv.lib.maintainers; {
-    maintainers = [ eelco chaoflow offline ];
+    maintainers = [ eelco offline ];
   };
 
   nodes = {
diff --git a/nixos/tests/subversion.nix b/nixos/tests/subversion.nix
index 6175155cdfc..e7b99b10602 100644
--- a/nixos/tests/subversion.nix
+++ b/nixos/tests/subversion.nix
@@ -34,7 +34,7 @@ in
 {
   name = "subversion";
   meta = with pkgs.stdenv.lib.maintainers; {
-    maintainers = [ eelco chaoflow ];
+    maintainers = [ eelco ];
   };
 
   nodes =
diff --git a/nixos/tests/tomcat.nix b/nixos/tests/tomcat.nix
index af63c7ee8e0..8e7b886dd30 100644
--- a/nixos/tests/tomcat.nix
+++ b/nixos/tests/tomcat.nix
@@ -1,7 +1,7 @@
 import ./make-test.nix ({ pkgs, ...} : {
   name = "tomcat";
   meta = with pkgs.stdenv.lib.maintainers; {
-    maintainers = [ eelco chaoflow ];
+    maintainers = [ eelco ];
   };
 
   nodes = {
diff --git a/nixos/tests/trac.nix b/nixos/tests/trac.nix
index 4599885acde..8ec11ebda2c 100644
--- a/nixos/tests/trac.nix
+++ b/nixos/tests/trac.nix
@@ -1,7 +1,7 @@
 import ./make-test.nix ({ pkgs, ... }: {
   name = "trac";
   meta = with pkgs.stdenv.lib.maintainers; {
-    maintainers = [ eelco chaoflow ];
+    maintainers = [ eelco ];
   };
 
   nodes = {
diff --git a/nixos/tests/udisks2.nix b/nixos/tests/udisks2.nix
index 8bbbe286efc..dcf869908d8 100644
--- a/nixos/tests/udisks2.nix
+++ b/nixos/tests/udisks2.nix
@@ -12,7 +12,7 @@ in
 {
   name = "udisks2";
   meta = with pkgs.stdenv.lib.maintainers; {
-    maintainers = [ eelco chaoflow ];
+    maintainers = [ eelco ];
   };
 
   machine =
diff --git a/nixos/tests/xfce.nix b/nixos/tests/xfce.nix
index 47717e8cf7d..12d8a050d47 100644
--- a/nixos/tests/xfce.nix
+++ b/nixos/tests/xfce.nix
@@ -1,7 +1,7 @@
 import ./make-test.nix ({ pkgs, ...} : {
   name = "xfce";
   meta = with pkgs.stdenv.lib.maintainers; {
-    maintainers = [ eelco chaoflow shlevy ];
+    maintainers = [ eelco shlevy ];
   };
 
   machine =