summary refs log tree commit diff
path: root/nixos
diff options
context:
space:
mode:
authorFrederik Rietdijk <fridh@fridh.nl>2019-02-21 08:02:10 +0100
committerFrederik Rietdijk <fridh@fridh.nl>2019-02-21 08:02:10 +0100
commit5871da418fffdbf286f8fa8ad743f7d76a62baa6 (patch)
tree4f2ee1f353fd6682d3505706d359f1e6d6a64ab5 /nixos
parenteb22e4c775a9cbed876bfc743a59cc314f11b75b (diff)
parente2cd07b9978198696681776405c4e23526d483a7 (diff)
downloadnixpkgs-5871da418fffdbf286f8fa8ad743f7d76a62baa6.tar
nixpkgs-5871da418fffdbf286f8fa8ad743f7d76a62baa6.tar.gz
nixpkgs-5871da418fffdbf286f8fa8ad743f7d76a62baa6.tar.bz2
nixpkgs-5871da418fffdbf286f8fa8ad743f7d76a62baa6.tar.lz
nixpkgs-5871da418fffdbf286f8fa8ad743f7d76a62baa6.tar.xz
nixpkgs-5871da418fffdbf286f8fa8ad743f7d76a62baa6.tar.zst
nixpkgs-5871da418fffdbf286f8fa8ad743f7d76a62baa6.zip
Merge staging into python-unstable
Diffstat (limited to 'nixos')
-rw-r--r--nixos/doc/manual/configuration/configuration.xml1
-rw-r--r--nixos/doc/manual/configuration/kubernetes.xml127
-rw-r--r--nixos/doc/manual/configuration/wireless.xml5
-rw-r--r--nixos/doc/manual/release-notes/rl-1903.xml58
-rw-r--r--nixos/modules/misc/documentation.nix1
-rw-r--r--nixos/modules/misc/ids.nix4
-rw-r--r--nixos/modules/module-list.nix17
-rw-r--r--nixos/modules/programs/autojump.nix33
-rw-r--r--nixos/modules/programs/singularity.nix21
-rw-r--r--nixos/modules/programs/ssh.nix10
-rw-r--r--nixos/modules/rename.nix11
-rw-r--r--nixos/modules/services/cluster/kubernetes/addon-manager.nix167
-rw-r--r--nixos/modules/services/cluster/kubernetes/addons/dashboard.nix (renamed from nixos/modules/services/cluster/kubernetes/dashboard.nix)15
-rw-r--r--nixos/modules/services/cluster/kubernetes/addons/dns.nix (renamed from nixos/modules/services/cluster/kubernetes/dns.nix)38
-rw-r--r--nixos/modules/services/cluster/kubernetes/apiserver.nix428
-rw-r--r--nixos/modules/services/cluster/kubernetes/controller-manager.nix162
-rw-r--r--nixos/modules/services/cluster/kubernetes/default.nix1131
-rw-r--r--nixos/modules/services/cluster/kubernetes/flannel.nix134
-rw-r--r--nixos/modules/services/cluster/kubernetes/kubelet.nix358
-rw-r--r--nixos/modules/services/cluster/kubernetes/pki.nix388
-rw-r--r--nixos/modules/services/cluster/kubernetes/proxy.nix80
-rw-r--r--nixos/modules/services/cluster/kubernetes/scheduler.nix92
-rw-r--r--nixos/modules/services/hardware/bolt.nix34
-rw-r--r--nixos/modules/services/misc/gitlab.nix15
-rw-r--r--nixos/modules/services/misc/headphones.nix87
-rw-r--r--nixos/modules/services/misc/jackett.nix59
-rw-r--r--nixos/modules/services/misc/radarr.nix59
-rw-r--r--nixos/modules/services/misc/zoneminder.nix8
-rw-r--r--nixos/modules/services/network-filesystems/diod.nix1
-rw-r--r--nixos/modules/services/networking/flannel.nix54
-rw-r--r--nixos/modules/services/networking/quassel.nix28
-rw-r--r--nixos/modules/services/networking/wpa_supplicant.nix21
-rw-r--r--nixos/modules/services/printing/cupsd.nix4
-rw-r--r--nixos/modules/services/web-apps/icingaweb2/icingaweb2.nix626
-rw-r--r--nixos/modules/services/web-apps/icingaweb2/module-monitoring.nix157
-rw-r--r--nixos/modules/services/web-apps/matomo-doc.xml32
-rw-r--r--nixos/modules/services/web-apps/matomo.nix99
-rw-r--r--nixos/modules/services/web-apps/tt-rss.nix32
-rw-r--r--nixos/modules/services/web-servers/apache-httpd/default.nix13
-rw-r--r--nixos/modules/services/web-servers/phpfpm/default.nix9
-rw-r--r--nixos/modules/services/web-servers/phpfpm/pool-options.nix9
-rw-r--r--nixos/modules/services/x11/desktop-managers/gnome3.nix1
-rw-r--r--nixos/modules/system/boot/systemd.nix13
-rw-r--r--nixos/modules/virtualisation/vmware-guest.nix18
-rw-r--r--nixos/tests/all-tests.nix1
-rw-r--r--nixos/tests/flannel.nix5
-rw-r--r--nixos/tests/kubernetes/base.nix39
-rw-r--r--nixos/tests/kubernetes/certs.nix219
-rw-r--r--nixos/tests/kubernetes/dns.nix15
-rw-r--r--nixos/tests/kubernetes/kubernetes-common.nix57
-rw-r--r--nixos/tests/kubernetes/rbac.nix13
-rw-r--r--nixos/tests/printing.nix2
-rw-r--r--nixos/tests/switch-test.nix13
53 files changed, 3555 insertions, 1469 deletions
diff --git a/nixos/doc/manual/configuration/configuration.xml b/nixos/doc/manual/configuration/configuration.xml
index cebc4122c6c..138d1d86d7f 100644
--- a/nixos/doc/manual/configuration/configuration.xml
+++ b/nixos/doc/manual/configuration/configuration.xml
@@ -23,5 +23,6 @@
  <xi:include href="linux-kernel.xml" />
  <xi:include href="../generated/modules.xml" xpointer="xpointer(//section[@id='modules']/*)" />
  <xi:include href="profiles.xml" />
+ <xi:include href="kubernetes.xml" />
 <!-- Apache; libvirtd virtualisation -->
 </part>
diff --git a/nixos/doc/manual/configuration/kubernetes.xml b/nixos/doc/manual/configuration/kubernetes.xml
new file mode 100644
index 00000000000..ddc026c0c01
--- /dev/null
+++ b/nixos/doc/manual/configuration/kubernetes.xml
@@ -0,0 +1,127 @@
+<chapter xmlns="http://docbook.org/ns/docbook"
+         xmlns:xlink="http://www.w3.org/1999/xlink"
+         xmlns:xi="http://www.w3.org/2001/XInclude"
+         version="5.0"
+         xml:id="sec-kubernetes">
+ <title>Kubernetes</title>
+
+ <para>
+   The NixOS Kubernetes module is a collective term for a handful of
+   individual submodules implementing the Kubernetes cluster components.
+ </para>
+
+ <para>
+   There are generally two ways of enabling Kubernetes on NixOS.
+   One way is to enable and configure cluster components appropriately by hand:
+<programlisting>
+services.kubernetes = {
+  apiserver.enable = true;
+  controllerManager.enable = true;
+  scheduler.enable = true;
+  addonManager.enable = true;
+  proxy.enable = true;
+  flannel.enable = true;
+};
+</programlisting>
+  Another way is to assign cluster roles ("master" and/or "node") to the host.
+  This enables apiserver, controllerManager, scheduler, addonManager,
+  kube-proxy and etcd:
+<programlisting>
+<xref linkend="opt-services.kubernetes.roles"/> = [ "master" ];
+</programlisting>
+  While this will enable the kubelet and kube-proxy only:
+<programlisting>
+<xref linkend="opt-services.kubernetes.roles"/> = [ "node" ];
+</programlisting>
+  Assigning both the master and node roles is usable if you want a single
+  node Kubernetes cluster for dev or testing purposes:
+<programlisting>
+<xref linkend="opt-services.kubernetes.roles"/> = [ "master" "node" ];
+</programlisting>
+  Note: Assigning either role will also default both
+  <xref linkend="opt-services.kubernetes.flannel.enable"/> and
+  <xref linkend="opt-services.kubernetes.easyCerts"/> to true.
+  This sets up flannel as CNI and activates automatic PKI bootstrapping.
+ </para>
+
+ <para>
+   As of kubernetes 1.10.X it has been deprecated to open
+   non-tls-enabled ports on kubernetes components. Thus, from NixOS 19.03 all
+   plain HTTP ports have been disabled by default.
+   While opening insecure ports is still possible, it is recommended not to
+   bind these to other interfaces than loopback.
+
+   To re-enable the insecure port on the apiserver, see options:
+   <xref linkend="opt-services.kubernetes.apiserver.insecurePort"/>
+   and
+   <xref linkend="opt-services.kubernetes.apiserver.insecureBindAddress"/>
+ </para>
+
+ <note>
+  <para>
+   As of NixOS 19.03, it is mandatory to configure:
+   <xref linkend="opt-services.kubernetes.masterAddress"/>.
+   The masterAddress must be resolveable and routeable by all cluster nodes.
+   In single node clusters, this can be set to <literal>localhost</literal>.
+  </para>
+ </note>
+
+ <para>
+   Role-based access control (RBAC) authorization mode is enabled by default.
+   This means that anonymous requests to the apiserver secure port will
+   expectedly cause a permission denied error. All cluster components must
+   therefore be configured with x509 certificates for two-way tls communication.
+   The x509 certificate subject section determines the roles and permissions
+   granted by the apiserver to perform clusterwide or namespaced operations.
+   See also:
+   <link
+     xlink:href="https://kubernetes.io/docs/reference/access-authn-authz/rbac/">
+     Using RBAC Authorization</link>.
+ </para>
+
+  <para>
+   The NixOS kubernetes module provides an option for automatic certificate
+   bootstrapping and configuration,
+    <xref linkend="opt-services.kubernetes.easyCerts"/>.
+   The PKI bootstrapping process involves setting up a certificate authority
+   (CA) daemon (cfssl) on the kubernetes master node. cfssl generates a CA-cert
+   for the cluster, and uses the CA-cert for signing subordinate certs issued to
+   each of the cluster components. Subsequently, the certmgr daemon monitors
+   active certificates and renews them when needed. For single node Kubernetes
+   clusters, setting <xref linkend="opt-services.kubernetes.easyCerts"/> = true
+   is sufficient and no further action is required. For joining extra node
+   machines to an existing cluster on the other hand, establishing initial trust
+   is mandatory.
+ </para>
+
+ <para>
+   To add new nodes to the cluster:
+   On any (non-master) cluster node where
+   <xref linkend="opt-services.kubernetes.easyCerts"/> is enabled, the helper
+   script <literal>nixos-kubernetes-node-join</literal> is available on PATH.
+   Given a token on stdin, it will copy the token to the kubernetes
+   secrets directory and restart the certmgr service. As requested
+   certificates are issued, the script will restart kubernetes cluster
+   components as needed for them to pick up new keypairs.
+ </para>
+
+ <note>
+  <para>
+   Multi-master (HA) clusters are not supported by the easyCerts module.
+  </para>
+ </note>
+
+ <para>
+   In order to interact with an RBAC-enabled cluster as an administrator, one
+   needs to have cluster-admin privileges. By default, when easyCerts is
+   enabled, a cluster-admin kubeconfig file is generated and linked into
+   <literal>/etc/kubernetes/cluster-admin.kubeconfig</literal> as determined by
+   <xref linkend="opt-services.kubernetes.pki.etcClusterAdminKubeconfig"/>.
+   <literal>export KUBECONFIG=/etc/kubernetes/cluster-admin.kubeconfig</literal>
+   will make kubectl use this kubeconfig to access and authenticate the cluster.
+   The cluster-admin kubeconfig references an auto-generated keypair owned by
+   root. Thus, only root on the kubernetes master may obtain cluster-admin
+   rights by means of this file.
+ </para>
+
+</chapter>
diff --git a/nixos/doc/manual/configuration/wireless.xml b/nixos/doc/manual/configuration/wireless.xml
index 999447234ad..f7e99ff0e35 100644
--- a/nixos/doc/manual/configuration/wireless.xml
+++ b/nixos/doc/manual/configuration/wireless.xml
@@ -29,7 +29,10 @@
   networks are set, it will default to using a configuration file at
   <literal>/etc/wpa_supplicant.conf</literal>. You should edit this file
   yourself to define wireless networks, WPA keys and so on (see
-  wpa_supplicant.conf(5)).
+  <citerefentry>
+    <refentrytitle>wpa_supplicant.conf</refentrytitle>
+    <manvolnum>5</manvolnum>
+  </citerefentry>).
  </para>
 
  <para>
diff --git a/nixos/doc/manual/release-notes/rl-1903.xml b/nixos/doc/manual/release-notes/rl-1903.xml
index 2277edede8e..fd4bb975a05 100644
--- a/nixos/doc/manual/release-notes/rl-1903.xml
+++ b/nixos/doc/manual/release-notes/rl-1903.xml
@@ -54,6 +54,13 @@
      </itemizedlist>
      <para>to <literal>false</literal> and enable your preferred display manager.</para>
     </note>
+     <para>
+       A major refactoring of the Kubernetes module has been completed.
+       Refactorings primarily focus on decoupling components and enhancing
+       security. Two-way TLS and RBAC has been enabled by default for all
+       components, which slightly changes the way the module is configured.
+       See: <xref linkend="sec-kubernetes"/> for details.
+     </para>
    </listitem>
   </itemizedlist>
  </section>
@@ -452,6 +459,23 @@
      <option>services.matomo.package</option> which determines the used
      Matomo version.
     </para>
+    <para>
+     The Matomo module now also comes with the systemd service <literal>matomo-archive-processing.service</literal>
+     and a timer that automatically triggers archive processing every hour.
+     This means that you can safely
+     <link xlink:href="https://matomo.org/docs/setup-auto-archiving/#disable-browser-triggers-for-matomo-archiving-and-limit-matomo-reports-to-updating-every-hour">
+      disable browser triggers for Matomo archiving
+     </link> at <literal>Administration > System > General Settings</literal>.
+    </para>
+    <para>
+     Additionally, you can enable to
+     <link xlink:href="https://matomo.org/docs/privacy/#step-2-delete-old-visitors-logs">
+      delete old visitor logs
+     </link> at <literal>Administration > System > Privacy</literal>,
+     but make sure that you run <literal>systemctl start matomo-archive-processing.service</literal>
+     at least once without errors if you have already collected data before,
+     so that the reports get archived before the source data gets deleted.
+    </para>
    </listitem>
    <listitem>
     <para>
@@ -555,6 +579,40 @@
      provisioning.
     </para>
    </listitem>
+   <listitem>
+     <para>
+       The use of insecure ports on kubernetes has been deprecated.
+       Thus options:
+       <varname>services.kubernetes.apiserver.port</varname> and
+       <varname>services.kubernetes.controllerManager.port</varname>
+       has been renamed to <varname>.insecurePort</varname>,
+       and default of both options has changed to 0 (disabled).
+     </para>
+    </listitem>
+    <listitem>
+      <para>
+        Note that the default value of
+        <varname>services.kubernetes.apiserver.bindAddress</varname>
+        has changed from 127.0.0.1 to 0.0.0.0, allowing the apiserver to be
+        accessible from outside the master node itself.
+        If the apiserver insecurePort is enabled,
+        it is strongly recommended to only bind on the loopback interface. See:
+        <varname>services.kubernetes.apiserver.insecurebindAddress</varname>.
+      </para>
+    </listitem>
+    <listitem>
+      <para>
+        The option <varname>services.kubernetes.apiserver.allowPrivileged</varname>
+        and <varname>services.kubernetes.kubelet.allowPrivileged</varname> now
+        defaults to false. Disallowing privileged containers on the cluster.
+      </para>
+    </listitem>
+    <listitem>
+      <para>
+        The kubernetes module does no longer add the kubernetes package to
+        <varname>environment.systemPackages</varname> implicitly.
+      </para>
+    </listitem>
   </itemizedlist>
  </section>
 </section>
diff --git a/nixos/modules/misc/documentation.nix b/nixos/modules/misc/documentation.nix
index 09d53c322fb..9b2e1235b74 100644
--- a/nixos/modules/misc/documentation.nix
+++ b/nixos/modules/misc/documentation.nix
@@ -156,6 +156,7 @@ in
       environment.systemPackages = [ pkgs.man-db ];
       environment.pathsToLink = [ "/share/man" ];
       environment.extraOutputsToInstall = [ "man" ] ++ optional cfg.dev.enable "devman";
+      environment.etc."man.conf".source = "${pkgs.man-db}/etc/man_db.conf";
     })
 
     (mkIf cfg.info.enable {
diff --git a/nixos/modules/misc/ids.nix b/nixos/modules/misc/ids.nix
index d6e6ccaecd2..5f134b51939 100644
--- a/nixos/modules/misc/ids.nix
+++ b/nixos/modules/misc/ids.nix
@@ -290,7 +290,7 @@
       riak-cs = 263;
       infinoted = 264;
       sickbeard = 265;
-      # glance = 266; # unused, removed 2017-12-13
+      headphones = 266;
       couchpotato = 267;
       gogs = 268;
       pdns-recursor = 269;
@@ -590,7 +590,7 @@
       riak-cs = 263;
       infinoted = 264;
       sickbeard = 265;
-      # glance = 266; # unused, removed 2017-12-13
+      headphones = 266;
       couchpotato = 267;
       gogs = 268;
       kresd = 270;
diff --git a/nixos/modules/module-list.nix b/nixos/modules/module-list.nix
index 003b9df95d7..7af6e117c51 100644
--- a/nixos/modules/module-list.nix
+++ b/nixos/modules/module-list.nix
@@ -82,6 +82,7 @@
   ./misc/version.nix
   ./programs/adb.nix
   ./programs/atop.nix
+  ./programs/autojump.nix
   ./programs/bash/bash.nix
   ./programs/bcc.nix
   ./programs/blcr.nix
@@ -195,9 +196,17 @@
   ./services/backup/tarsnap.nix
   ./services/backup/znapzend.nix
   ./services/cluster/hadoop/default.nix
+  ./services/cluster/kubernetes/addons/dns.nix
+  ./services/cluster/kubernetes/addons/dashboard.nix
+  ./services/cluster/kubernetes/addon-manager.nix
+  ./services/cluster/kubernetes/apiserver.nix
+  ./services/cluster/kubernetes/controller-manager.nix
   ./services/cluster/kubernetes/default.nix
-  ./services/cluster/kubernetes/dns.nix
-  ./services/cluster/kubernetes/dashboard.nix
+  ./services/cluster/kubernetes/flannel.nix
+  ./services/cluster/kubernetes/kubelet.nix
+  ./services/cluster/kubernetes/pki.nix
+  ./services/cluster/kubernetes/proxy.nix
+  ./services/cluster/kubernetes/scheduler.nix
   ./services/computing/boinc/client.nix
   ./services/computing/torque/server.nix
   ./services/computing/torque/mom.nix
@@ -283,6 +292,7 @@
   ./services/hardware/acpid.nix
   ./services/hardware/actkbd.nix
   ./services/hardware/bluetooth.nix
+  ./services/hardware/bolt.nix
   ./services/hardware/brltty.nix
   ./services/hardware/freefall.nix
   ./services/hardware/fwupd.nix
@@ -383,6 +393,7 @@
   ./services/misc/gogs.nix
   ./services/misc/gollum.nix
   ./services/misc/gpsd.nix
+  ./services/misc/headphones.nix
   ./services/misc/home-assistant.nix
   ./services/misc/ihaskell.nix
   ./services/misc/irkerd.nix
@@ -719,6 +730,8 @@
   ./services/web-apps/atlassian/jira.nix
   ./services/web-apps/codimd.nix
   ./services/web-apps/frab.nix
+  ./services/web-apps/icingaweb2/icingaweb2.nix
+  ./services/web-apps/icingaweb2/module-monitoring.nix
   ./services/web-apps/mattermost.nix
   ./services/web-apps/nextcloud.nix
   ./services/web-apps/nexus.nix
diff --git a/nixos/modules/programs/autojump.nix b/nixos/modules/programs/autojump.nix
new file mode 100644
index 00000000000..3a8feec4bb4
--- /dev/null
+++ b/nixos/modules/programs/autojump.nix
@@ -0,0 +1,33 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.programs.autojump;
+  prg = config.programs;
+in
+{
+  options = {
+    programs.autojump = {
+
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = ''
+          Whether to enable autojump.
+        '';
+      };
+    };
+  }; 
+
+  ###### implementation
+
+  config = mkIf cfg.enable {
+    environment.pathsToLink = [ "/share/autojump" ];
+    environment.systemPackages = [ pkgs.autojump ];
+
+    programs.bash.interactiveShellInit = "source ${pkgs.autojump}/share/autojump/autojump.bash"; 
+    programs.zsh.interactiveShellInit = mkIf prg.zsh.enable "source ${pkgs.autojump}/share/autojump/autojump.zsh";
+    programs.fish.interactiveShellInit = mkIf prg.fish.enable "source ${pkgs.autojump}/share/autojump/autojump.fish";
+  };
+}
diff --git a/nixos/modules/programs/singularity.nix b/nixos/modules/programs/singularity.nix
index 86153d93385..b27e122bd1d 100644
--- a/nixos/modules/programs/singularity.nix
+++ b/nixos/modules/programs/singularity.nix
@@ -3,18 +3,27 @@
 with lib;
 let
   cfg = config.programs.singularity;
+  singularity = pkgs.singularity.overrideAttrs (attrs : {
+    installPhase = attrs.installPhase + ''
+      mv $bin/libexec/singularity/bin/starter-suid $bin/libexec/singularity/bin/starter-suid.orig
+      ln -s /run/wrappers/bin/singularity-suid $bin/libexec/singularity/bin/starter-suid
+    '';
+  });
 in {
   options.programs.singularity = {
     enable = mkEnableOption "Singularity";
   };
 
   config = mkIf cfg.enable {
-      environment.systemPackages = [ pkgs.singularity ];
-      systemd.tmpfiles.rules = [ "d /var/singularity/mnt/session 0770 root root -"
-                                 "d /var/singularity/mnt/final 0770 root root -"
-                                 "d /var/singularity/mnt/overlay 0770 root root -"
-                                 "d /var/singularity/mnt/container 0770 root root -"
-                                 "d /var/singularity/mnt/source 0770 root root -"];
+      environment.systemPackages = [ singularity ];
+      security.wrappers.singularity-suid.source = "${singularity}/libexec/singularity/bin/starter-suid.orig";
+      systemd.tmpfiles.rules = [
+        "d /var/singularity/mnt/session 0770 root root -"
+        "d /var/singularity/mnt/final 0770 root root -"
+        "d /var/singularity/mnt/overlay 0770 root root -"
+        "d /var/singularity/mnt/container 0770 root root -"
+        "d /var/singularity/mnt/source 0770 root root -"
+      ];
   };
 
 }
diff --git a/nixos/modules/programs/ssh.nix b/nixos/modules/programs/ssh.nix
index 4640c1d78d2..46965dd35b7 100644
--- a/nixos/modules/programs/ssh.nix
+++ b/nixos/modules/programs/ssh.nix
@@ -88,7 +88,8 @@ in
         type = types.lines;
         default = "";
         description = ''
-          Extra configuration text appended to <filename>ssh_config</filename>.
+          Extra configuration text prepended to <filename>ssh_config</filename>. Other generated
+          options will be added after a <code>Host *</code> pattern.
           See <citerefentry><refentrytitle>ssh_config</refentrytitle><manvolnum>5</manvolnum></citerefentry>
           for help.
         '';
@@ -203,6 +204,11 @@ in
     # generation in the sshd service.
     environment.etc."ssh/ssh_config".text =
       ''
+        # Custom options from `extraConfig`, to override generated options
+        ${cfg.extraConfig}
+
+        # Generated options from other settings
+        Host *
         AddressFamily ${if config.networking.enableIPv6 then "any" else "inet"}
 
         ${optionalString cfg.setXAuthLocation ''
@@ -213,8 +219,6 @@ in
 
         ${optionalString (cfg.pubkeyAcceptedKeyTypes != []) "PubkeyAcceptedKeyTypes ${concatStringsSep "," cfg.pubkeyAcceptedKeyTypes}"}
         ${optionalString (cfg.hostKeyAlgorithms != []) "HostKeyAlgorithms ${concatStringsSep "," cfg.hostKeyAlgorithms}"}
-
-        ${cfg.extraConfig}
       '';
 
     environment.etc."ssh/ssh_known_hosts".text = knownHostsText;
diff --git a/nixos/modules/rename.nix b/nixos/modules/rename.nix
index 24ab963f718..1e6557e1f0e 100644
--- a/nixos/modules/rename.nix
+++ b/nixos/modules/rename.nix
@@ -40,9 +40,19 @@ with lib;
     (mkRenamedOptionModule [ "services" "kibana" "host" ] [ "services" "kibana" "listenAddress" ])
     (mkRenamedOptionModule [ "services" "kubernetes" "apiserver" "admissionControl" ] [ "services" "kubernetes" "apiserver" "enableAdmissionPlugins" ])
     (mkRenamedOptionModule [ "services" "kubernetes" "apiserver" "address" ] ["services" "kubernetes" "apiserver" "bindAddress"])
+    (mkRenamedOptionModule [ "services" "kubernetes" "apiserver" "port" ] ["services" "kubernetes" "apiserver" "insecurePort"])
     (mkRemovedOptionModule [ "services" "kubernetes" "apiserver" "publicAddress" ] "")
     (mkRenamedOptionModule [ "services" "kubernetes" "addons" "dashboard" "enableRBAC" ] [ "services" "kubernetes" "addons" "dashboard" "rbac" "enable" ])
+    (mkRenamedOptionModule [ "services" "kubernetes" "controllerManager" "address" ] ["services" "kubernetes" "controllerManager" "bindAddress"])
+    (mkRenamedOptionModule [ "services" "kubernetes" "controllerManager" "port" ] ["services" "kubernetes" "controllerManager" "insecurePort"])
+    (mkRenamedOptionModule [ "services" "kubernetes" "etcd" "servers" ] [ "services" "kubernetes" "apiserver" "etcd" "servers" ])
+    (mkRenamedOptionModule [ "services" "kubernetes" "etcd" "keyFile" ] [ "services" "kubernetes" "apiserver" "etcd" "keyFile" ])
+    (mkRenamedOptionModule [ "services" "kubernetes" "etcd" "certFile" ] [ "services" "kubernetes" "apiserver" "etcd" "certFile" ])
+    (mkRenamedOptionModule [ "services" "kubernetes" "etcd" "caFile" ] [ "services" "kubernetes" "apiserver" "etcd" "caFile" ])
+    (mkRemovedOptionModule [ "services" "kubernetes" "kubelet" "applyManifests" ] "")
     (mkRemovedOptionModule [ "services" "kubernetes" "kubelet" "cadvisorPort" ] "")
+    (mkRenamedOptionModule [ "services" "kubernetes" "proxy" "address" ] ["services" "kubernetes" "proxy" "bindAddress"])
+    (mkRemovedOptionModule [ "services" "kubernetes" "verbose" ] "")
     (mkRenamedOptionModule [ "services" "logstash" "address" ] [ "services" "logstash" "listenAddress" ])
     (mkRenamedOptionModule [ "services" "mpd" "network" "host" ] [ "services" "mpd" "network" "listenAddress" ])
     (mkRenamedOptionModule [ "services" "neo4j" "host" ] [ "services" "neo4j" "defaultListenAddress" ])
@@ -59,6 +69,7 @@ with lib;
     (mkRenamedOptionModule [ "services" "statsd" "host" ] [ "services" "statsd" "listenAddress" ])
     (mkRenamedOptionModule [ "services" "subsonic" "host" ] [ "services" "subsonic" "listenAddress" ])
     (mkRenamedOptionModule [ "services" "tor" "relay" "portSpec" ] [ "services" "tor" "relay" "port" ])
+    (mkRenamedOptionModule [ "services" "vmwareGuest" ] [ "virtualisation" "vmware" "guest" ])
     (mkRenamedOptionModule [ "jobs" ] [ "systemd" "services" ])
 
     (mkRenamedOptionModule [ "services" "gitlab" "stateDir" ] [ "services" "gitlab" "statePath" ])
diff --git a/nixos/modules/services/cluster/kubernetes/addon-manager.nix b/nixos/modules/services/cluster/kubernetes/addon-manager.nix
new file mode 100644
index 00000000000..17f2dde31a7
--- /dev/null
+++ b/nixos/modules/services/cluster/kubernetes/addon-manager.nix
@@ -0,0 +1,167 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  top = config.services.kubernetes;
+  cfg = top.addonManager;
+
+  isRBACEnabled = elem "RBAC" top.apiserver.authorizationMode;
+
+  addons = pkgs.runCommand "kubernetes-addons" { } ''
+    mkdir -p $out
+    # since we are mounting the addons to the addon manager, they need to be copied
+    ${concatMapStringsSep ";" (a: "cp -v ${a}/* $out/") (mapAttrsToList (name: addon:
+      pkgs.writeTextDir "${name}.json" (builtins.toJSON addon)
+    ) (cfg.addons))}
+  '';
+in
+{
+  ###### interface
+  options.services.kubernetes.addonManager = with lib.types; {
+
+    bootstrapAddons = mkOption {
+      description = ''
+        Bootstrap addons are like regular addons, but they are applied with cluster-admin rigths.
+        They are applied at addon-manager startup only.
+      '';
+      default = { };
+      type = attrsOf attrs;
+      example = literalExample ''
+        {
+          "my-service" = {
+            "apiVersion" = "v1";
+            "kind" = "Service";
+            "metadata" = {
+              "name" = "my-service";
+              "namespace" = "default";
+            };
+            "spec" = { ... };
+          };
+        }
+      '';
+    };
+
+    addons = mkOption {
+      description = "Kubernetes addons (any kind of Kubernetes resource can be an addon).";
+      default = { };
+      type = attrsOf (either attrs (listOf attrs));
+      example = literalExample ''
+        {
+          "my-service" = {
+            "apiVersion" = "v1";
+            "kind" = "Service";
+            "metadata" = {
+              "name" = "my-service";
+              "namespace" = "default";
+            };
+            "spec" = { ... };
+          };
+        }
+        // import <nixpkgs/nixos/modules/services/cluster/kubernetes/dashboard.nix> { cfg = config.services.kubernetes; };
+      '';
+    };
+
+    enable = mkEnableOption "Whether to enable Kubernetes addon manager.";
+  };
+
+  ###### implementation
+  config = mkIf cfg.enable {
+    environment.etc."kubernetes/addons".source = "${addons}/";
+
+    systemd.services.kube-addon-manager = {
+      description = "Kubernetes addon manager";
+      wantedBy = [ "kubernetes.target" ];
+      after = [ "kube-apiserver.service" ];
+      environment.ADDON_PATH = "/etc/kubernetes/addons/";
+      path = [ pkgs.gawk ];
+      serviceConfig = {
+        Slice = "kubernetes.slice";
+        ExecStart = "${top.package}/bin/kube-addons";
+        WorkingDirectory = top.dataDir;
+        User = "kubernetes";
+        Group = "kubernetes";
+        Restart = "on-failure";
+        RestartSec = 10;
+      };
+    };
+
+    services.kubernetes.addonManager.bootstrapAddons = mkIf isRBACEnabled
+    (let
+      name = system:kube-addon-manager;
+      namespace = "kube-system";
+    in
+    {
+
+      kube-addon-manager-r = {
+        apiVersion = "rbac.authorization.k8s.io/v1";
+        kind = "Role";
+        metadata = {
+          inherit name namespace;
+        };
+        rules = [{
+          apiGroups = ["*"];
+          resources = ["*"];
+          verbs = ["*"];
+        }];
+      };
+
+      kube-addon-manager-rb = {
+        apiVersion = "rbac.authorization.k8s.io/v1";
+        kind = "RoleBinding";
+        metadata = {
+          inherit name namespace;
+        };
+        roleRef = {
+          apiGroup = "rbac.authorization.k8s.io";
+          kind = "Role";
+          inherit name;
+        };
+        subjects = [{
+          apiGroup = "rbac.authorization.k8s.io";
+          kind = "User";
+          inherit name;
+        }];
+      };
+
+      kube-addon-manager-cluster-lister-cr = {
+        apiVersion = "rbac.authorization.k8s.io/v1";
+        kind = "ClusterRole";
+        metadata = {
+          name = "${name}:cluster-lister";
+        };
+        rules = [{
+          apiGroups = ["*"];
+          resources = ["*"];
+          verbs = ["list"];
+        }];
+      };
+
+      kube-addon-manager-cluster-lister-crb = {
+        apiVersion = "rbac.authorization.k8s.io/v1";
+        kind = "ClusterRoleBinding";
+        metadata = {
+          name = "${name}:cluster-lister";
+        };
+        roleRef = {
+          apiGroup = "rbac.authorization.k8s.io";
+          kind = "ClusterRole";
+          name = "${name}:cluster-lister";
+        };
+        subjects = [{
+          kind = "User";
+          inherit name;
+        }];
+      };
+    });
+
+    services.kubernetes.pki.certs = {
+      addonManager = top.lib.mkCert {
+        name = "kube-addon-manager";
+        CN = "system:kube-addon-manager";
+        action = "systemctl restart kube-addon-manager.service";
+      };
+    };
+  };
+
+}
diff --git a/nixos/modules/services/cluster/kubernetes/dashboard.nix b/nixos/modules/services/cluster/kubernetes/addons/dashboard.nix
index cbd6e8f7bf7..454e7d35bc0 100644
--- a/nixos/modules/services/cluster/kubernetes/dashboard.nix
+++ b/nixos/modules/services/cluster/kubernetes/addons/dashboard.nix
@@ -8,6 +8,13 @@ in {
   options.services.kubernetes.addons.dashboard = {
     enable = mkEnableOption "kubernetes dashboard addon";
 
+    extraArgs = mkOption {
+      description = "Extra arguments to append to the dashboard cmdline";
+      type = types.listOf types.str;
+      default = [];
+      example = ["--enable-skip-login"];
+    };
+
     rbac = mkOption {
       description = "Role-based access control (RBAC) options";
       default = {};
@@ -31,7 +38,7 @@ in {
     version = mkOption {
       description = "Which version of the kubernetes dashboard to deploy";
       type = types.str;
-      default = "v1.8.3";
+      default = "v1.10.1";
     };
 
     image = mkOption {
@@ -39,9 +46,9 @@ in {
       type = types.attrs;
       default = {
         imageName = "k8s.gcr.io/kubernetes-dashboard-amd64";
-        imageDigest = "sha256:dc4026c1b595435ef5527ca598e1e9c4343076926d7d62b365c44831395adbd0";
+        imageDigest = "sha256:0ae6b69432e78069c5ce2bcde0fe409c5c4d6f0f4d9cd50a17974fea38898747";
         finalImageTag = cfg.version;
-        sha256 = "18ajcg0q1vignfjk2sm4xj4wzphfz8wah69ps8dklqfvv0164mc8";
+        sha256 = "01xrr4pwgr2hcjrjsi3d14ifpzdfbxzqpzxbk2fkbjb9zkv38zxy";
       };
     };
   };
@@ -99,7 +106,7 @@ in {
                     memory = "100Mi";
                   };
                 };
-                args = ["--auto-generate-certificates"];
+                args = ["--auto-generate-certificates"] ++ cfg.extraArgs;
                 volumeMounts = [{
                   name = "tmp-volume";
                   mountPath = "/tmp";
diff --git a/nixos/modules/services/cluster/kubernetes/dns.nix b/nixos/modules/services/cluster/kubernetes/addons/dns.nix
index 5a3e281ea69..8f3234bfc70 100644
--- a/nixos/modules/services/cluster/kubernetes/dns.nix
+++ b/nixos/modules/services/cluster/kubernetes/addons/dns.nix
@@ -3,7 +3,7 @@
 with lib;
 
 let
-  version = "1.2.5";
+  version = "1.3.1";
   cfg = config.services.kubernetes.addons.dns;
   ports = {
     dns = 10053;
@@ -43,9 +43,9 @@ in {
       type = types.attrs;
       default = {
         imageName = "coredns/coredns";
-        imageDigest = "sha256:33c8da20b887ae12433ec5c40bfddefbbfa233d5ce11fb067122e68af30291d6";
+        imageDigest = "sha256:02382353821b12c21b062c59184e227e001079bb13ebd01f9d3270ba0fcbf1e4";
         finalImageTag = version;
-        sha256 = "13q19rgwapv27xcs664dw502254yw4zw63insf6g2danidv2mg6i";
+        sha256 = "0vbylgyxv2jm2mnzk6f28jbsj305zsxmx3jr6ngjq461czcl5fi5";
       };
     };
   };
@@ -54,21 +54,7 @@ in {
     services.kubernetes.kubelet.seedDockerImages =
       singleton (pkgs.dockerTools.pullImage cfg.coredns);
 
-    services.kubernetes.addonManager.addons = {
-      coredns-sa = {
-        apiVersion = "v1";
-        kind = "ServiceAccount";
-        metadata = {
-          labels = {
-            "addonmanager.kubernetes.io/mode" = "Reconcile";
-            "k8s-app" = "kube-dns";
-            "kubernetes.io/cluster-service" = "true";
-          };
-          name = "coredns";
-          namespace = "kube-system";
-        };
-      };
-
+    services.kubernetes.addonManager.bootstrapAddons = {
       coredns-cr = {
         apiVersion = "rbac.authorization.k8s.io/v1beta1";
         kind = "ClusterRole";
@@ -123,6 +109,22 @@ in {
           }
         ];
       };
+    };
+
+    services.kubernetes.addonManager.addons = {
+      coredns-sa = {
+        apiVersion = "v1";
+        kind = "ServiceAccount";
+        metadata = {
+          labels = {
+            "addonmanager.kubernetes.io/mode" = "Reconcile";
+            "k8s-app" = "kube-dns";
+            "kubernetes.io/cluster-service" = "true";
+          };
+          name = "coredns";
+          namespace = "kube-system";
+        };
+      };
 
       coredns-cm = {
         apiVersion = "v1";
diff --git a/nixos/modules/services/cluster/kubernetes/apiserver.nix b/nixos/modules/services/cluster/kubernetes/apiserver.nix
new file mode 100644
index 00000000000..81e45b417de
--- /dev/null
+++ b/nixos/modules/services/cluster/kubernetes/apiserver.nix
@@ -0,0 +1,428 @@
+  { config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  top = config.services.kubernetes;
+  cfg = top.apiserver;
+
+  isRBACEnabled = elem "RBAC" cfg.authorizationMode;
+
+  apiserverServiceIP = (concatStringsSep "." (
+    take 3 (splitString "." cfg.serviceClusterIpRange
+  )) + ".1");
+in
+{
+  ###### interface
+  options.services.kubernetes.apiserver = with lib.types; {
+
+    advertiseAddress = mkOption {
+      description = ''
+        Kubernetes apiserver IP address on which to advertise the apiserver
+        to members of the cluster. This address must be reachable by the rest
+        of the cluster.
+      '';
+      default = null;
+      type = nullOr str;
+    };
+
+    allowPrivileged = mkOption {
+      description = "Whether to allow privileged containers on Kubernetes.";
+      default = false;
+      type = bool;
+    };
+
+    authorizationMode = mkOption {
+      description = ''
+        Kubernetes apiserver authorization mode (AlwaysAllow/AlwaysDeny/ABAC/Webhook/RBAC/Node). See
+        <link xlink:href="https://kubernetes.io/docs/reference/access-authn-authz/authorization/"/>
+      '';
+      default = ["RBAC" "Node"]; # Enabling RBAC by default, although kubernetes default is AllowAllow
+      type = listOf (enum ["AlwaysAllow" "AlwaysDeny" "ABAC" "Webhook" "RBAC" "Node"]);
+    };
+
+    authorizationPolicy = mkOption {
+      description = ''
+        Kubernetes apiserver authorization policy file. See
+        <link xlink:href="https://kubernetes.io/docs/reference/access-authn-authz/authorization/"/>
+      '';
+      default = [];
+      type = listOf attrs;
+    };
+
+    basicAuthFile = mkOption {
+      description = ''
+        Kubernetes apiserver basic authentication file. See
+        <link xlink:href="https://kubernetes.io/docs/reference/access-authn-authz/authentication"/>
+      '';
+      default = null;
+      type = nullOr path;
+    };
+
+    bindAddress = mkOption {
+      description = ''
+        The IP address on which to listen for the --secure-port port.
+        The associated interface(s) must be reachable by the rest
+        of the cluster, and by CLI/web clients.
+      '';
+      default = "0.0.0.0";
+      type = str;
+    };
+
+    clientCaFile = mkOption {
+      description = "Kubernetes apiserver CA file for client auth.";
+      default = top.caFile;
+      type = nullOr path;
+    };
+
+    disableAdmissionPlugins = mkOption {
+      description = ''
+        Kubernetes admission control plugins to disable. See
+        <link xlink:href="https://kubernetes.io/docs/admin/admission-controllers/"/>
+      '';
+      default = [];
+      type = listOf str;
+    };
+
+    enable = mkEnableOption "Kubernetes apiserver";
+
+    enableAdmissionPlugins = mkOption {
+      description = ''
+        Kubernetes admission control plugins to enable. See
+        <link xlink:href="https://kubernetes.io/docs/admin/admission-controllers/"/>
+      '';
+      default = [
+        "NamespaceLifecycle" "LimitRanger" "ServiceAccount"
+        "ResourceQuota" "DefaultStorageClass" "DefaultTolerationSeconds"
+        "NodeRestriction"
+      ];
+      example = [
+        "NamespaceLifecycle" "NamespaceExists" "LimitRanger"
+        "SecurityContextDeny" "ServiceAccount" "ResourceQuota"
+        "PodSecurityPolicy" "NodeRestriction" "DefaultStorageClass"
+      ];
+      type = listOf str;
+    };
+
+    etcd = {
+      servers = mkOption {
+        description = "List of etcd servers.";
+        default = ["http://127.0.0.1:2379"];
+        type = types.listOf types.str;
+      };
+
+      keyFile = mkOption {
+        description = "Etcd key file.";
+        default = null;
+        type = types.nullOr types.path;
+      };
+
+      certFile = mkOption {
+        description = "Etcd cert file.";
+        default = null;
+        type = types.nullOr types.path;
+      };
+
+      caFile = mkOption {
+        description = "Etcd ca file.";
+        default = top.caFile;
+        type = types.nullOr types.path;
+      };
+    };
+
+    extraOpts = mkOption {
+      description = "Kubernetes apiserver extra command line options.";
+      default = "";
+      type = str;
+    };
+
+    extraSANs = mkOption {
+      description = "Extra x509 Subject Alternative Names to be added to the kubernetes apiserver tls cert.";
+      default = [];
+      type = listOf str;
+    };
+
+    featureGates = mkOption {
+      description = "List set of feature gates";
+      default = top.featureGates;
+      type = listOf str;
+    };
+
+    insecureBindAddress = mkOption {
+      description = "The IP address on which to serve the --insecure-port.";
+      default = "127.0.0.1";
+      type = str;
+    };
+
+    insecurePort = mkOption {
+      description = "Kubernetes apiserver insecure listening port. (0 = disabled)";
+      default = 0;
+      type = int;
+    };
+
+    kubeletClientCaFile = mkOption {
+      description = "Path to a cert file for connecting to kubelet.";
+      default = top.caFile;
+      type = nullOr path;
+    };
+
+    kubeletClientCertFile = mkOption {
+      description = "Client certificate to use for connections to kubelet.";
+      default = null;
+      type = nullOr path;
+    };
+
+    kubeletClientKeyFile = mkOption {
+      description = "Key to use for connections to kubelet.";
+      default = null;
+      type = nullOr path;
+    };
+
+    kubeletHttps = mkOption {
+      description = "Whether to use https for connections to kubelet.";
+      default = true;
+      type = bool;
+    };
+
+    runtimeConfig = mkOption {
+      description = ''
+        Api runtime configuration. See
+        <link xlink:href="https://kubernetes.io/docs/tasks/administer-cluster/cluster-management/"/>
+      '';
+      default = "authentication.k8s.io/v1beta1=true";
+      example = "api/all=false,api/v1=true";
+      type = str;
+    };
+
+    storageBackend = mkOption {
+      description = ''
+        Kubernetes apiserver storage backend.
+      '';
+      default = "etcd3";
+      type = enum ["etcd2" "etcd3"];
+    };
+
+    securePort = mkOption {
+      description = "Kubernetes apiserver secure port.";
+      default = 6443;
+      type = int;
+    };
+
+    serviceAccountKeyFile = mkOption {
+      description = ''
+        Kubernetes apiserver PEM-encoded x509 RSA private or public key file,
+        used to verify ServiceAccount tokens. By default tls private key file
+        is used.
+      '';
+      default = null;
+      type = nullOr path;
+    };
+
+    serviceClusterIpRange = mkOption {
+      description = ''
+        A CIDR notation IP range from which to assign service cluster IPs.
+        This must not overlap with any IP ranges assigned to nodes for pods.
+      '';
+      default = "10.0.0.0/24";
+      type = str;
+    };
+
+    tlsCertFile = mkOption {
+      description = "Kubernetes apiserver certificate file.";
+      default = null;
+      type = nullOr path;
+    };
+
+    tlsKeyFile = mkOption {
+      description = "Kubernetes apiserver private key file.";
+      default = null;
+      type = nullOr path;
+    };
+
+    tokenAuthFile = mkOption {
+      description = ''
+        Kubernetes apiserver token authentication file. See
+        <link xlink:href="https://kubernetes.io/docs/reference/access-authn-authz/authentication"/>
+      '';
+      default = null;
+      type = nullOr path;
+    };
+
+    verbosity = mkOption {
+      description = ''
+        Optional glog verbosity level for logging statements. See
+        <link xlink:href="https://github.com/kubernetes/community/blob/master/contributors/devel/logging.md"/>
+      '';
+      default = null;
+      type = nullOr int;
+    };
+
+    webhookConfig = mkOption {
+      description = ''
+        Kubernetes apiserver Webhook config file. It uses the kubeconfig file format.
+        See <link xlink:href="https://kubernetes.io/docs/reference/access-authn-authz/webhook/"/>
+      '';
+      default = null;
+      type = nullOr path;
+    };
+
+  };
+
+
+  ###### implementation
+  config = mkMerge [
+
+    (mkIf cfg.enable {
+        systemd.services.kube-apiserver = {
+          description = "Kubernetes APIServer Service";
+          wantedBy = [ "kubernetes.target" ];
+          after = [ "network.target" ];
+          serviceConfig = {
+            Slice = "kubernetes.slice";
+            ExecStart = ''${top.package}/bin/kube-apiserver \
+              --allow-privileged=${boolToString cfg.allowPrivileged} \
+              --authorization-mode=${concatStringsSep "," cfg.authorizationMode} \
+                ${optionalString (elem "ABAC" cfg.authorizationMode)
+                  "--authorization-policy-file=${
+                    pkgs.writeText "kube-auth-policy.jsonl"
+                    (concatMapStringsSep "\n" (l: builtins.toJSON l) cfg.authorizationPolicy)
+                  }"
+                } \
+                ${optionalString (elem "Webhook" cfg.authorizationMode)
+                  "--authorization-webhook-config-file=${cfg.webhookConfig}"
+                } \
+              --bind-address=${cfg.bindAddress} \
+              ${optionalString (cfg.advertiseAddress != null)
+                "--advertise-address=${cfg.advertiseAddress}"} \
+              ${optionalString (cfg.clientCaFile != null)
+                "--client-ca-file=${cfg.clientCaFile}"} \
+              --disable-admission-plugins=${concatStringsSep "," cfg.disableAdmissionPlugins} \
+              --enable-admission-plugins=${concatStringsSep "," cfg.enableAdmissionPlugins} \
+              --etcd-servers=${concatStringsSep "," cfg.etcd.servers} \
+              ${optionalString (cfg.etcd.caFile != null)
+                "--etcd-cafile=${cfg.etcd.caFile}"} \
+              ${optionalString (cfg.etcd.certFile != null)
+                "--etcd-certfile=${cfg.etcd.certFile}"} \
+              ${optionalString (cfg.etcd.keyFile != null)
+                "--etcd-keyfile=${cfg.etcd.keyFile}"} \
+              ${optionalString (cfg.featureGates != [])
+                "--feature-gates=${concatMapStringsSep "," (feature: "${feature}=true") cfg.featureGates}"} \
+              ${optionalString (cfg.basicAuthFile != null)
+                "--basic-auth-file=${cfg.basicAuthFile}"} \
+              --kubelet-https=${boolToString cfg.kubeletHttps} \
+              ${optionalString (cfg.kubeletClientCaFile != null)
+                "--kubelet-certificate-authority=${cfg.kubeletClientCaFile}"} \
+              ${optionalString (cfg.kubeletClientCertFile != null)
+                "--kubelet-client-certificate=${cfg.kubeletClientCertFile}"} \
+              ${optionalString (cfg.kubeletClientKeyFile != null)
+                "--kubelet-client-key=${cfg.kubeletClientKeyFile}"} \
+              --insecure-bind-address=${cfg.insecureBindAddress} \
+              --insecure-port=${toString cfg.insecurePort} \
+              ${optionalString (cfg.runtimeConfig != "")
+                "--runtime-config=${cfg.runtimeConfig}"} \
+              --secure-port=${toString cfg.securePort} \
+              ${optionalString (cfg.serviceAccountKeyFile!=null)
+                "--service-account-key-file=${cfg.serviceAccountKeyFile}"} \
+              --service-cluster-ip-range=${cfg.serviceClusterIpRange} \
+              --storage-backend=${cfg.storageBackend} \
+              ${optionalString (cfg.tlsCertFile != null)
+                "--tls-cert-file=${cfg.tlsCertFile}"} \
+              ${optionalString (cfg.tlsKeyFile != null)
+                "--tls-private-key-file=${cfg.tlsKeyFile}"} \
+              ${optionalString (cfg.tokenAuthFile != null)
+                "--token-auth-file=${cfg.tokenAuthFile}"} \
+              ${optionalString (cfg.verbosity != null) "--v=${toString cfg.verbosity}"} \
+              ${cfg.extraOpts}
+            '';
+            WorkingDirectory = top.dataDir;
+            User = "kubernetes";
+            Group = "kubernetes";
+            AmbientCapabilities = "cap_net_bind_service";
+            Restart = "on-failure";
+            RestartSec = 5;
+          };
+        };
+
+        services.etcd = {
+          clientCertAuth = mkDefault true;
+          peerClientCertAuth = mkDefault true;
+          listenClientUrls = mkDefault ["https://0.0.0.0:2379"];
+          listenPeerUrls = mkDefault ["https://0.0.0.0:2380"];
+          advertiseClientUrls = mkDefault ["https://${top.masterAddress}:2379"];
+          initialCluster = mkDefault ["${top.masterAddress}=https://${top.masterAddress}:2380"];
+          name = top.masterAddress;
+          initialAdvertisePeerUrls = mkDefault ["https://${top.masterAddress}:2380"];
+        };
+
+        services.kubernetes.addonManager.bootstrapAddons = mkIf isRBACEnabled {
+
+          apiserver-kubelet-api-admin-crb = {
+            apiVersion = "rbac.authorization.k8s.io/v1";
+            kind = "ClusterRoleBinding";
+            metadata = {
+              name = "system:kube-apiserver:kubelet-api-admin";
+            };
+            roleRef = {
+              apiGroup = "rbac.authorization.k8s.io";
+              kind = "ClusterRole";
+              name = "system:kubelet-api-admin";
+            };
+            subjects = [{
+              kind = "User";
+              name = "system:kube-apiserver";
+            }];
+          };
+
+        };
+
+      services.kubernetes.pki.certs = with top.lib; {
+        apiServer = mkCert {
+          name = "kube-apiserver";
+          CN = "kubernetes";
+          hosts = [
+                    "kubernetes.default.svc"
+                    "kubernetes.default.svc.${top.addons.dns.clusterDomain}"
+                    cfg.advertiseAddress
+                    top.masterAddress
+                    apiserverServiceIP
+                    "127.0.0.1"
+                  ] ++ cfg.extraSANs;
+          action = "systemctl restart kube-apiserver.service";
+        };
+        apiserverKubeletClient = mkCert {
+          name = "kube-apiserver-kubelet-client";
+          CN = "system:kube-apiserver";
+          action = "systemctl restart kube-apiserver.service";
+        };
+        apiserverEtcdClient = mkCert {
+          name = "kube-apiserver-etcd-client";
+          CN = "etcd-client";
+          action = "systemctl restart kube-apiserver.service";
+        };
+        clusterAdmin = mkCert {
+          name = "cluster-admin";
+          CN = "cluster-admin";
+          fields = {
+            O = "system:masters";
+          };
+          privateKeyOwner = "root";
+        };
+        etcd = mkCert {
+          name = "etcd";
+          CN = top.masterAddress;
+          hosts = [
+                    "etcd.local"
+                    "etcd.${top.addons.dns.clusterDomain}"
+                    top.masterAddress
+                    cfg.advertiseAddress
+                  ];
+          privateKeyOwner = "etcd";
+          action = "systemctl restart etcd.service";
+        };
+      };
+
+    })
+
+  ];
+
+}
diff --git a/nixos/modules/services/cluster/kubernetes/controller-manager.nix b/nixos/modules/services/cluster/kubernetes/controller-manager.nix
new file mode 100644
index 00000000000..dff97f144d5
--- /dev/null
+++ b/nixos/modules/services/cluster/kubernetes/controller-manager.nix
@@ -0,0 +1,162 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  top = config.services.kubernetes;
+  cfg = top.controllerManager;
+in
+{
+  ###### interface
+  options.services.kubernetes.controllerManager = with lib.types; {
+
+    allocateNodeCIDRs = mkOption {
+      description = "Whether to automatically allocate CIDR ranges for cluster nodes.";
+      default = true;
+      type = bool;
+    };
+
+    bindAddress = mkOption {
+      description = "Kubernetes controller manager listening address.";
+      default = "127.0.0.1";
+      type = str;
+    };
+
+    clusterCidr = mkOption {
+      description = "Kubernetes CIDR Range for Pods in cluster.";
+      default = top.clusterCidr;
+      type = str;
+    };
+
+    enable = mkEnableOption "Kubernetes controller manager.";
+
+    extraOpts = mkOption {
+      description = "Kubernetes controller manager extra command line options.";
+      default = "";
+      type = str;
+    };
+
+    featureGates = mkOption {
+      description = "List set of feature gates";
+      default = top.featureGates;
+      type = listOf str;
+    };
+
+    insecurePort = mkOption {
+      description = "Kubernetes controller manager insecure listening port.";
+      default = 0;
+      type = int;
+    };
+
+    kubeconfig = top.lib.mkKubeConfigOptions "Kubernetes controller manager";
+
+    leaderElect = mkOption {
+      description = "Whether to start leader election before executing main loop.";
+      type = bool;
+      default = true;
+    };
+
+    rootCaFile = mkOption {
+      description = ''
+        Kubernetes controller manager certificate authority file included in
+        service account's token secret.
+      '';
+      default = top.caFile;
+      type = nullOr path;
+    };
+
+    securePort = mkOption {
+      description = "Kubernetes controller manager secure listening port.";
+      default = 10252;
+      type = int;
+    };
+
+    serviceAccountKeyFile = mkOption {
+      description = ''
+        Kubernetes controller manager PEM-encoded private RSA key file used to
+        sign service account tokens
+      '';
+      default = null;
+      type = nullOr path;
+    };
+
+    tlsCertFile = mkOption {
+      description = "Kubernetes controller-manager certificate file.";
+      default = null;
+      type = nullOr path;
+    };
+
+    tlsKeyFile = mkOption {
+      description = "Kubernetes controller-manager private key file.";
+      default = null;
+      type = nullOr path;
+    };
+
+    verbosity = mkOption {
+      description = ''
+        Optional glog verbosity level for logging statements. See
+        <link xlink:href="https://github.com/kubernetes/community/blob/master/contributors/devel/logging.md"/>
+      '';
+      default = null;
+      type = nullOr int;
+    };
+
+  };
+
+  ###### implementation
+  config = mkIf cfg.enable {
+    systemd.services.kube-controller-manager = {
+      description = "Kubernetes Controller Manager Service";
+      wantedBy = [ "kubernetes.target" ];
+      after = [ "kube-apiserver.service" ];
+      serviceConfig = {
+        RestartSec = "30s";
+        Restart = "on-failure";
+        Slice = "kubernetes.slice";
+        ExecStart = ''${top.package}/bin/kube-controller-manager \
+          --allocate-node-cidrs=${boolToString cfg.allocateNodeCIDRs} \
+          --bind-address=${cfg.bindAddress} \
+          ${optionalString (cfg.clusterCidr!=null)
+            "--cluster-cidr=${cfg.clusterCidr}"} \
+          ${optionalString (cfg.featureGates != [])
+            "--feature-gates=${concatMapStringsSep "," (feature: "${feature}=true") cfg.featureGates}"} \
+          --kubeconfig=${top.lib.mkKubeConfig "kube-controller-manager" cfg.kubeconfig} \
+          --leader-elect=${boolToString cfg.leaderElect} \
+          ${optionalString (cfg.rootCaFile!=null)
+            "--root-ca-file=${cfg.rootCaFile}"} \
+          --port=${toString cfg.insecurePort} \
+          --secure-port=${toString cfg.securePort} \
+          ${optionalString (cfg.serviceAccountKeyFile!=null)
+            "--service-account-private-key-file=${cfg.serviceAccountKeyFile}"} \
+          ${optionalString (cfg.tlsCertFile!=null)
+            "--tls-cert-file=${cfg.tlsCertFile}"} \
+          ${optionalString (cfg.tlsKeyFile!=null)
+            "--tls-key-file=${cfg.tlsKeyFile}"} \
+          ${optionalString (elem "RBAC" top.apiserver.authorizationMode)
+            "--use-service-account-credentials"} \
+          ${optionalString (cfg.verbosity != null) "--v=${toString cfg.verbosity}"} \
+          ${cfg.extraOpts}
+        '';
+        WorkingDirectory = top.dataDir;
+        User = "kubernetes";
+        Group = "kubernetes";
+      };
+      path = top.path;
+    };
+
+    services.kubernetes.pki.certs = with top.lib; {
+      controllerManager = mkCert {
+        name = "kube-controller-manager";
+        CN = "kube-controller-manager";
+        action = "systemctl restart kube-controller-manager.service";
+      };
+      controllerManagerClient = mkCert {
+        name = "kube-controller-manager-client";
+        CN = "system:kube-controller-manager";
+        action = "systemctl restart kube-controller-manager.service";
+      };
+    };
+
+    services.kubernetes.controllerManager.kubeconfig.server = mkDefault top.apiserverAddress;
+  };
+}
diff --git a/nixos/modules/services/cluster/kubernetes/default.nix b/nixos/modules/services/cluster/kubernetes/default.nix
index 6f3c45b29bf..375e33e91b5 100644
--- a/nixos/modules/services/cluster/kubernetes/default.nix
+++ b/nixos/modules/services/cluster/kubernetes/default.nix
@@ -5,74 +5,52 @@ with lib;
 let
   cfg = config.services.kubernetes;
 
-  # YAML config; see:
-  #   https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/
-  #   https://github.com/kubernetes/kubernetes/blob/release-1.10/pkg/kubelet/apis/kubeletconfig/v1beta1/types.go
-  #
-  # TODO: migrate the following flags to this config file
-  #
-  #   --pod-manifest-path
-  #   --address
-  #   --port
-  #   --tls-cert-file
-  #   --tls-private-key-file
-  #   --client-ca-file
-  #   --authentication-token-webhook
-  #   --authentication-token-webhook-cache-ttl
-  #   --authorization-mode
-  #   --healthz-bind-address
-  #   --healthz-port
-  #   --allow-privileged
-  #   --cluster-dns
-  #   --cluster-domain
-  #   --hairpin-mode
-  #   --feature-gates
-  kubeletConfig = pkgs.runCommand "kubelet-config.yaml" { } ''
-    echo > $out ${pkgs.lib.escapeShellArg (builtins.toJSON {
-      kind = "KubeletConfiguration";
-      apiVersion = "kubelet.config.k8s.io/v1beta1";
-      ${if cfg.kubelet.applyManifests then "staticPodPath" else null} =
-        manifests;
-    })}
-  '';
-
-  infraContainer = pkgs.dockerTools.buildImage {
-    name = "pause";
-    tag = "latest";
-    contents = cfg.package.pause;
-    config.Cmd = "/bin/pause";
-  };
-
-  mkKubeConfig = name: cfg: pkgs.writeText "${name}-kubeconfig" (builtins.toJSON {
+  mkKubeConfig = name: conf: pkgs.writeText "${name}-kubeconfig" (builtins.toJSON {
     apiVersion = "v1";
     kind = "Config";
     clusters = [{
       name = "local";
       cluster.certificate-authority = cfg.caFile;
-      cluster.server = cfg.server;
+      cluster.server = conf.server;
     }];
     users = [{
-      name = "kubelet";
+      inherit name;
       user = {
-        client-certificate = cfg.certFile;
-        client-key = cfg.keyFile;
+        client-certificate = conf.certFile;
+        client-key = conf.keyFile;
       };
     }];
     contexts = [{
       context = {
         cluster = "local";
-        user = "kubelet";
+        user = name;
       };
-      current-context = "kubelet-context";
+      current-context = "local";
     }];
   });
 
+  caCert = secret "ca";
+
+  etcdEndpoints = ["https://${cfg.masterAddress}:2379"];
+
+  mkCert = { name, CN, hosts ? [], fields ? {}, action ? "",
+             privateKeyOwner ? "kubernetes" }: rec {
+    inherit name caCert CN hosts fields action;
+    cert = secret name;
+    key = secret "${name}-key";
+    privateKeyOptions = {
+      owner = privateKeyOwner;
+      group = "nogroup";
+      mode = "0600";
+      path = key;
+    };
+  };
+
+  secret = name: "${cfg.secretsPath}/${name}.pem";
+
   mkKubeConfigOptions = prefix: {
     server = mkOption {
       description = "${prefix} kube-apiserver server address.";
-      default = "http://${if cfg.apiserver.advertiseAddress != null
-                          then cfg.apiserver.advertiseAddress
-                          else "127.0.0.1"}:${toString cfg.apiserver.port}";
       type = types.str;
     };
 
@@ -101,66 +79,6 @@ let
     certFile = mkDefault cfg.kubeconfig.certFile;
     keyFile = mkDefault cfg.kubeconfig.keyFile;
   };
-
-  cniConfig =
-    if cfg.kubelet.cni.config != [] && !(isNull cfg.kubelet.cni.configDir) then
-      throw "Verbatim CNI-config and CNI configDir cannot both be set."
-    else if !(isNull cfg.kubelet.cni.configDir) then
-      cfg.kubelet.cni.configDir
-    else
-      (pkgs.buildEnv {
-        name = "kubernetes-cni-config";
-        paths = imap (i: entry:
-          pkgs.writeTextDir "${toString (10+i)}-${entry.type}.conf" (builtins.toJSON entry)
-        ) cfg.kubelet.cni.config;
-      });
-
-  manifests = pkgs.buildEnv {
-    name = "kubernetes-manifests";
-    paths = mapAttrsToList (name: manifest:
-      pkgs.writeTextDir "${name}.json" (builtins.toJSON manifest)
-    ) cfg.kubelet.manifests;
-  };
-
-  addons = pkgs.runCommand "kubernetes-addons" { } ''
-    mkdir -p $out
-    # since we are mounting the addons to the addon manager, they need to be copied
-    ${concatMapStringsSep ";" (a: "cp -v ${a}/* $out/") (mapAttrsToList (name: addon:
-      pkgs.writeTextDir "${name}.json" (builtins.toJSON addon)
-    ) (cfg.addonManager.addons))}
-  '';
-
-  taintOptions = { name, ... }: {
-    options = {
-      key = mkOption {
-        description = "Key of taint.";
-        default = name;
-        type = types.str;
-      };
-      value = mkOption {
-        description = "Value of taint.";
-        type = types.str;
-      };
-      effect = mkOption {
-        description = "Effect of taint.";
-        example = "NoSchedule";
-        type = types.enum ["NoSchedule" "PreferNoSchedule" "NoExecute"];
-      };
-    };
-  };
-
-  taints = concatMapStringsSep "," (v: "${v.key}=${v.value}:${v.effect}") (mapAttrsToList (n: v: v) cfg.kubelet.taints);
-
-  # needed for flannel to pass options to docker
-  mkDockerOpts = pkgs.runCommand "mk-docker-opts" {
-    buildInputs = [ pkgs.makeWrapper ];
-  } ''
-    mkdir -p $out
-    cp ${pkgs.kubernetes.src}/cluster/centos/node/bin/mk-docker-opts.sh $out/mk-docker-opts.sh
-
-    # bashInteractive needed for `compgen`
-    makeWrapper ${pkgs.bashInteractive}/bin/bash $out/mk-docker-opts --add-flags "$out/mk-docker-opts.sh"
-  '';
 in {
 
   ###### interface
@@ -170,8 +88,9 @@ in {
       description = ''
         Kubernetes role that this machine should take.
 
-        Master role will enable etcd, apiserver, scheduler and controller manager
-        services. Node role will enable etcd, docker, kubelet and proxy services.
+        Master role will enable etcd, apiserver, scheduler, controller manager
+        addon manager, flannel and proxy services.
+        Node role will enable flannel, docker, kubelet and proxy services.
       '';
       default = [];
       type = types.listOf (types.enum ["master" "node"]);
@@ -184,40 +103,17 @@ in {
       defaultText = "pkgs.kubernetes";
     };
 
-    verbose = mkOption {
-      description = "Kubernetes enable verbose mode for debugging.";
-      default = false;
-      type = types.bool;
-    };
-
-    etcd = {
-      servers = mkOption {
-        description = "List of etcd servers. By default etcd is started, except if this option is changed.";
-        default = ["http://127.0.0.1:2379"];
-        type = types.listOf types.str;
-      };
-
-      keyFile = mkOption {
-        description = "Etcd key file.";
-        default = null;
-        type = types.nullOr types.path;
-      };
-
-      certFile = mkOption {
-        description = "Etcd cert file.";
-        default = null;
-        type = types.nullOr types.path;
-      };
+    kubeconfig = mkKubeConfigOptions "Default kubeconfig";
 
-      caFile = mkOption {
-        description = "Etcd ca file.";
-        default = cfg.caFile;
-        type = types.nullOr types.path;
-      };
+    apiserverAddress = mkOption {
+      description = ''
+        Clusterwide accessible address for the kubernetes apiserver,
+        including protocol and optional port.
+      '';
+      example = "https://kubernetes-apiserver.example.com:6443";
+      type = types.str;
     };
 
-    kubeconfig = mkKubeConfigOptions "Default kubeconfig";
-
     caFile = mkOption {
       description = "Default kubernetes certificate authority";
       type = types.nullOr types.path;
@@ -230,549 +126,22 @@ in {
       type = types.path;
     };
 
+    easyCerts = mkOption {
+      description = "Automatically setup x509 certificates and keys for the entire cluster.";
+      default = false;
+      type = types.bool;
+    };
+
     featureGates = mkOption {
-      description = "List set of feature gates";
+      description = "List set of feature gates.";
       default = [];
       type = types.listOf types.str;
     };
 
-    apiserver = {
-      enable = mkOption {
-        description = "Whether to enable Kubernetes apiserver.";
-        default = false;
-        type = types.bool;
-      };
-
-      featureGates = mkOption {
-        description = "List set of feature gates";
-        default = cfg.featureGates;
-        type = types.listOf types.str;
-      };
-
-      bindAddress = mkOption {
-        description = ''
-          The IP address on which to listen for the --secure-port port.
-          The associated interface(s) must be reachable by the rest
-          of the cluster, and by CLI/web clients.
-        '';
-        default = "0.0.0.0";
-        type = types.str;
-      };
-
-      advertiseAddress = mkOption {
-        description = ''
-          Kubernetes apiserver IP address on which to advertise the apiserver
-          to members of the cluster. This address must be reachable by the rest
-          of the cluster.
-        '';
-        default = null;
-        type = types.nullOr types.str;
-      };
-
-      storageBackend = mkOption {
-        description = ''
-          Kubernetes apiserver storage backend.
-        '';
-        default = "etcd3";
-        type = types.enum ["etcd2" "etcd3"];
-      };
-
-      port = mkOption {
-        description = "Kubernetes apiserver listening port.";
-        default = 8080;
-        type = types.int;
-      };
-
-      securePort = mkOption {
-        description = "Kubernetes apiserver secure port.";
-        default = 443;
-        type = types.int;
-      };
-
-      tlsCertFile = mkOption {
-        description = "Kubernetes apiserver certificate file.";
-        default = null;
-        type = types.nullOr types.path;
-      };
-
-      tlsKeyFile = mkOption {
-        description = "Kubernetes apiserver private key file.";
-        default = null;
-        type = types.nullOr types.path;
-      };
-
-      clientCaFile = mkOption {
-        description = "Kubernetes apiserver CA file for client auth.";
-        default = cfg.caFile;
-        type = types.nullOr types.path;
-      };
-
-      tokenAuthFile = mkOption {
-        description = ''
-          Kubernetes apiserver token authentication file. See
-          <link xlink:href="https://kubernetes.io/docs/reference/access-authn-authz/authentication"/>
-        '';
-        default = null;
-        type = types.nullOr types.path;
-      };
-
-      basicAuthFile = mkOption {
-        description = ''
-          Kubernetes apiserver basic authentication file. See
-          <link xlink:href="https://kubernetes.io/docs/reference/access-authn-authz/authentication"/>
-        '';
-        default = pkgs.writeText "users" ''
-          kubernetes,admin,0
-        '';
-        type = types.nullOr types.path;
-      };
-
-      authorizationMode = mkOption {
-        description = ''
-          Kubernetes apiserver authorization mode (AlwaysAllow/AlwaysDeny/ABAC/Webhook/RBAC/Node). See
-          <link xlink:href="https://kubernetes.io/docs/reference/access-authn-authz/authorization/"/>
-        '';
-        default = ["RBAC" "Node"];
-        type = types.listOf (types.enum ["AlwaysAllow" "AlwaysDeny" "ABAC" "Webhook" "RBAC" "Node"]);
-      };
-
-      authorizationPolicy = mkOption {
-        description = ''
-          Kubernetes apiserver authorization policy file. See
-          <link xlink:href="https://kubernetes.io/docs/reference/access-authn-authz/authorization/"/>
-        '';
-        default = [];
-        type = types.listOf types.attrs;
-      };
-
-      webhookConfig = mkOption {
-        description = ''
-          Kubernetes apiserver Webhook config file. It uses the kubeconfig file format.
-          See <link xlink:href="https://kubernetes.io/docs/reference/access-authn-authz/webhook/"/>
-        '';
-        default = null;
-        type = types.nullOr types.path;
-      };
-
-      allowPrivileged = mkOption {
-        description = "Whether to allow privileged containers on Kubernetes.";
-        default = true;
-        type = types.bool;
-      };
-
-      serviceClusterIpRange = mkOption {
-        description = ''
-          A CIDR notation IP range from which to assign service cluster IPs.
-          This must not overlap with any IP ranges assigned to nodes for pods.
-        '';
-        default = "10.0.0.0/24";
-        type = types.str;
-      };
-
-      runtimeConfig = mkOption {
-        description = ''
-          Api runtime configuration. See
-          <link xlink:href="https://kubernetes.io/docs/tasks/administer-cluster/cluster-management/"/>
-        '';
-        default = "authentication.k8s.io/v1beta1=true";
-        example = "api/all=false,api/v1=true";
-        type = types.str;
-      };
-
-      enableAdmissionPlugins = mkOption {
-        description = ''
-          Kubernetes admission control plugins to enable. See
-          <link xlink:href="https://kubernetes.io/docs/admin/admission-controllers/"/>
-        '';
-        default = ["NamespaceLifecycle" "LimitRanger" "ServiceAccount" "ResourceQuota" "DefaultStorageClass" "DefaultTolerationSeconds" "NodeRestriction"];
-        example = [
-          "NamespaceLifecycle" "NamespaceExists" "LimitRanger"
-          "SecurityContextDeny" "ServiceAccount" "ResourceQuota"
-          "PodSecurityPolicy" "NodeRestriction" "DefaultStorageClass"
-        ];
-        type = types.listOf types.str;
-      };
-
-      disableAdmissionPlugins = mkOption {
-        description = ''
-          Kubernetes admission control plugins to disable. See
-          <link xlink:href="https://kubernetes.io/docs/admin/admission-controllers/"/>
-        '';
-        default = [];
-        type = types.listOf types.str;
-      };
-
-      serviceAccountKeyFile = mkOption {
-        description = ''
-          Kubernetes apiserver PEM-encoded x509 RSA private or public key file,
-          used to verify ServiceAccount tokens. By default tls private key file
-          is used.
-        '';
-        default = null;
-        type = types.nullOr types.path;
-      };
-
-      kubeletClientCaFile = mkOption {
-        description = "Path to a cert file for connecting to kubelet.";
-        default = cfg.caFile;
-        type = types.nullOr types.path;
-      };
-
-      kubeletClientCertFile = mkOption {
-        description = "Client certificate to use for connections to kubelet.";
-        default = null;
-        type = types.nullOr types.path;
-      };
-
-      kubeletClientKeyFile = mkOption {
-        description = "Key to use for connections to kubelet.";
-        default = null;
-        type = types.nullOr types.path;
-      };
-
-      kubeletHttps = mkOption {
-        description = "Whether to use https for connections to kubelet.";
-        default = true;
-        type = types.bool;
-      };
-
-      extraOpts = mkOption {
-        description = "Kubernetes apiserver extra command line options.";
-        default = "";
-        type = types.str;
-      };
-    };
-
-    scheduler = {
-      enable = mkOption {
-        description = "Whether to enable Kubernetes scheduler.";
-        default = false;
-        type = types.bool;
-      };
-
-      featureGates = mkOption {
-        description = "List set of feature gates";
-        default = cfg.featureGates;
-        type = types.listOf types.str;
-      };
-
-      address = mkOption {
-        description = "Kubernetes scheduler listening address.";
-        default = "127.0.0.1";
-        type = types.str;
-      };
-
-      port = mkOption {
-        description = "Kubernetes scheduler listening port.";
-        default = 10251;
-        type = types.int;
-      };
-
-      leaderElect = mkOption {
-        description = "Whether to start leader election before executing main loop.";
-        type = types.bool;
-        default = true;
-      };
-
-      kubeconfig = mkKubeConfigOptions "Kubernetes scheduler";
-
-      extraOpts = mkOption {
-        description = "Kubernetes scheduler extra command line options.";
-        default = "";
-        type = types.str;
-      };
-    };
-
-    controllerManager = {
-      enable = mkOption {
-        description = "Whether to enable Kubernetes controller manager.";
-        default = false;
-        type = types.bool;
-      };
-
-      featureGates = mkOption {
-        description = "List set of feature gates";
-        default = cfg.featureGates;
-        type = types.listOf types.str;
-      };
-
-      address = mkOption {
-        description = "Kubernetes controller manager listening address.";
-        default = "127.0.0.1";
-        type = types.str;
-      };
-
-      port = mkOption {
-        description = "Kubernetes controller manager listening port.";
-        default = 10252;
-        type = types.int;
-      };
-
-      leaderElect = mkOption {
-        description = "Whether to start leader election before executing main loop.";
-        type = types.bool;
-        default = true;
-      };
-
-      serviceAccountKeyFile = mkOption {
-        description = ''
-          Kubernetes controller manager PEM-encoded private RSA key file used to
-          sign service account tokens
-        '';
-        default = null;
-        type = types.nullOr types.path;
-      };
-
-      rootCaFile = mkOption {
-        description = ''
-          Kubernetes controller manager certificate authority file included in
-          service account's token secret.
-        '';
-        default = cfg.caFile;
-        type = types.nullOr types.path;
-      };
-
-      kubeconfig = mkKubeConfigOptions "Kubernetes controller manager";
-
-      extraOpts = mkOption {
-        description = "Kubernetes controller manager extra command line options.";
-        default = "";
-        type = types.str;
-      };
-    };
-
-    kubelet = {
-      enable = mkOption {
-        description = "Whether to enable Kubernetes kubelet.";
-        default = false;
-        type = types.bool;
-      };
-
-      featureGates = mkOption {
-        description = "List set of feature gates";
-        default = cfg.featureGates;
-        type = types.listOf types.str;
-      };
-
-      seedDockerImages = mkOption {
-        description = "List of docker images to preload on system";
-        default = [];
-        type = types.listOf types.package;
-      };
-
-      registerNode = mkOption {
-        description = "Whether to auto register kubelet with API server.";
-        default = true;
-        type = types.bool;
-      };
-
-      address = mkOption {
-        description = "Kubernetes kubelet info server listening address.";
-        default = "0.0.0.0";
-        type = types.str;
-      };
-
-      port = mkOption {
-        description = "Kubernetes kubelet info server listening port.";
-        default = 10250;
-        type = types.int;
-      };
-
-      tlsCertFile = mkOption {
-        description = "File containing x509 Certificate for HTTPS.";
-        default = null;
-        type = types.nullOr types.path;
-      };
-
-      tlsKeyFile = mkOption {
-        description = "File containing x509 private key matching tlsCertFile.";
-        default = null;
-        type = types.nullOr types.path;
-      };
-
-      clientCaFile = mkOption {
-        description = "Kubernetes apiserver CA file for client authentication.";
-        default = cfg.caFile;
-        type = types.nullOr types.path;
-      };
-
-      healthz = {
-        bind = mkOption {
-          description = "Kubernetes kubelet healthz listening address.";
-          default = "127.0.0.1";
-          type = types.str;
-        };
-
-        port = mkOption {
-          description = "Kubernetes kubelet healthz port.";
-          default = 10248;
-          type = types.int;
-        };
-      };
-
-      hostname = mkOption {
-        description = "Kubernetes kubelet hostname override.";
-        default = config.networking.hostName;
-        type = types.str;
-      };
-
-      allowPrivileged = mkOption {
-        description = "Whether to allow Kubernetes containers to request privileged mode.";
-        default = true;
-        type = types.bool;
-      };
-
-      clusterDns = mkOption {
-        description = "Use alternative DNS.";
-        default = "10.1.0.1";
-        type = types.str;
-      };
-
-      clusterDomain = mkOption {
-        description = "Use alternative domain.";
-        default = config.services.kubernetes.addons.dns.clusterDomain;
-        type = types.str;
-      };
-
-      networkPlugin = mkOption {
-        description = "Network plugin to use by Kubernetes.";
-        type = types.nullOr (types.enum ["cni" "kubenet"]);
-        default = "kubenet";
-      };
-
-      cni = {
-        packages = mkOption {
-          description = "List of network plugin packages to install.";
-          type = types.listOf types.package;
-          default = [];
-        };
-
-        config = mkOption {
-          description = "Kubernetes CNI configuration.";
-          type = types.listOf types.attrs;
-          default = [];
-          example = literalExample ''
-            [{
-              "cniVersion": "0.2.0",
-              "name": "mynet",
-              "type": "bridge",
-              "bridge": "cni0",
-              "isGateway": true,
-              "ipMasq": true,
-              "ipam": {
-                  "type": "host-local",
-                  "subnet": "10.22.0.0/16",
-                  "routes": [
-                      { "dst": "0.0.0.0/0" }
-                  ]
-              }
-            } {
-              "cniVersion": "0.2.0",
-              "type": "loopback"
-            }]
-          '';
-        };
-
-        configDir = mkOption {
-          description = "Path to Kubernetes CNI configuration directory.";
-          type = types.nullOr types.path;
-          default = null;
-        };
-      };
-
-      manifests = mkOption {
-        description = "List of manifests to bootstrap with kubelet (only pods can be created as manifest entry)";
-        type = types.attrsOf types.attrs;
-        default = {};
-      };
-
-      applyManifests = mkOption {
-        description = "Whether to apply manifests (this is true for master node).";
-        default = false;
-        type = types.bool;
-      };
-
-      unschedulable = mkOption {
-        description = "Whether to set node taint to unschedulable=true as it is the case of node that has only master role.";
-        default = false;
-        type = types.bool;
-      };
-
-      taints = mkOption {
-        description = "Node taints (https://kubernetes.io/docs/concepts/configuration/assign-pod-node/).";
-        default = {};
-        type = types.attrsOf (types.submodule [ taintOptions ]);
-      };
-
-      nodeIp = mkOption {
-        description = "IP address of the node. If set, kubelet will use this IP address for the node.";
-        default = null;
-        type = types.nullOr types.str;
-      };
-
-      kubeconfig = mkKubeConfigOptions "Kubelet";
-
-      extraOpts = mkOption {
-        description = "Kubernetes kubelet extra command line options.";
-        default = "";
-        type = types.str;
-      };
-    };
-
-    proxy = {
-      enable = mkOption {
-        description = "Whether to enable Kubernetes proxy.";
-        default = false;
-        type = types.bool;
-      };
-
-      featureGates = mkOption {
-        description = "List set of feature gates";
-        default = cfg.featureGates;
-        type = types.listOf types.str;
-      };
-
-      address = mkOption {
-        description = "Kubernetes proxy listening address.";
-        default = "0.0.0.0";
-        type = types.str;
-      };
-
-      kubeconfig = mkKubeConfigOptions "Kubernetes proxy";
-
-      extraOpts = mkOption {
-        description = "Kubernetes proxy extra command line options.";
-        default = "";
-        type = types.str;
-      };
-    };
-
-    addonManager = {
-      enable = mkOption {
-        description = "Whether to enable Kubernetes addon manager.";
-        default = false;
-        type = types.bool;
-      };
-
-      addons = mkOption {
-        description = "Kubernetes addons (any kind of Kubernetes resource can be an addon).";
-        default = { };
-        type = types.attrsOf (types.either types.attrs (types.listOf types.attrs));
-        example = literalExample ''
-          {
-            "my-service" = {
-              "apiVersion" = "v1";
-              "kind" = "Service";
-              "metadata" = {
-                "name" = "my-service";
-                "namespace" = "default";
-              };
-              "spec" = { ... };
-            };
-          }
-          // import <nixpkgs/nixos/modules/services/cluster/kubernetes/dashboard.nix> { cfg = config.services.kubernetes; };
-        '';
-      };
+    masterAddress = mkOption {
+      description = "Clusterwide available network address or hostname for the kubernetes master server.";
+      example = "master.example.com";
+      type = types.str;
     };
 
     path = mkOption {
@@ -787,304 +156,75 @@ in {
       type = types.nullOr types.str;
     };
 
-    flannel.enable = mkOption {
-      description = "Whether to enable flannel networking";
-      default = false;
-      type = types.bool;
+    lib = mkOption {
+      description = "Common functions for the kubernetes modules.";
+      default = {
+        inherit mkCert;
+        inherit mkKubeConfig;
+        inherit mkKubeConfigOptions;
+      };
+      type = types.attrs;
     };
 
+    secretsPath = mkOption {
+      description = "Default location for kubernetes secrets. Not a store location.";
+      type = types.path;
+      default = cfg.dataDir + "/secrets";
+    };
   };
 
   ###### implementation
 
   config = mkMerge [
-    (mkIf cfg.kubelet.enable {
-      services.kubernetes.kubelet.seedDockerImages = [infraContainer];
-
-      systemd.services.kubelet-bootstrap = {
-        description = "Boostrap Kubelet";
-        wantedBy = ["kubernetes.target"];
-        after = ["docker.service" "network.target"];
-        path = with pkgs; [ docker ];
-        script = ''
-          ${concatMapStrings (img: ''
-            echo "Seeding docker image: ${img}"
-            docker load <${img}
-          '') cfg.kubelet.seedDockerImages}
-
-          rm /opt/cni/bin/* || true
-          ${concatMapStrings (package: ''
-            echo "Linking cni package: ${package}"
-            ln -fs ${package}/bin/* /opt/cni/bin
-          '') cfg.kubelet.cni.packages}
-        '';
-        serviceConfig = {
-          Slice = "kubernetes.slice";
-          Type = "oneshot";
-        };
-      };
 
-      systemd.services.kubelet = {
-        description = "Kubernetes Kubelet Service";
-        wantedBy = [ "kubernetes.target" ];
-        after = [ "network.target" "docker.service" "kube-apiserver.service" "kubelet-bootstrap.service" ];
-        path = with pkgs; [ gitMinimal openssh docker utillinux iproute ethtool thin-provisioning-tools iptables socat ] ++ cfg.path;
-        serviceConfig = {
-          Slice = "kubernetes.slice";
-          CPUAccounting = true;
-          MemoryAccounting = true;
-          ExecStart = ''${cfg.package}/bin/kubelet \
-            ${optionalString (taints != "")
-              "--register-with-taints=${taints}"} \
-            --kubeconfig=${mkKubeConfig "kubelet" cfg.kubelet.kubeconfig} \
-            --config=${kubeletConfig} \
-            --address=${cfg.kubelet.address} \
-            --port=${toString cfg.kubelet.port} \
-            --register-node=${boolToString cfg.kubelet.registerNode} \
-            ${optionalString (cfg.kubelet.tlsCertFile != null)
-              "--tls-cert-file=${cfg.kubelet.tlsCertFile}"} \
-            ${optionalString (cfg.kubelet.tlsKeyFile != null)
-              "--tls-private-key-file=${cfg.kubelet.tlsKeyFile}"} \
-            ${optionalString (cfg.kubelet.clientCaFile != null)
-              "--client-ca-file=${cfg.kubelet.clientCaFile}"} \
-            --authentication-token-webhook \
-            --authentication-token-webhook-cache-ttl="10s" \
-            --authorization-mode=Webhook \
-            --healthz-bind-address=${cfg.kubelet.healthz.bind} \
-            --healthz-port=${toString cfg.kubelet.healthz.port} \
-            --hostname-override=${cfg.kubelet.hostname} \
-            --allow-privileged=${boolToString cfg.kubelet.allowPrivileged} \
-            --root-dir=${cfg.dataDir} \
-            ${optionalString (cfg.kubelet.clusterDns != "")
-              "--cluster-dns=${cfg.kubelet.clusterDns}"} \
-            ${optionalString (cfg.kubelet.clusterDomain != "")
-              "--cluster-domain=${cfg.kubelet.clusterDomain}"} \
-            --pod-infra-container-image=pause \
-            ${optionalString (cfg.kubelet.networkPlugin != null)
-              "--network-plugin=${cfg.kubelet.networkPlugin}"} \
-            --cni-conf-dir=${cniConfig} \
-            --hairpin-mode=hairpin-veth \
-            ${optionalString (cfg.kubelet.nodeIp != null)
-              "--node-ip=${cfg.kubelet.nodeIp}"} \
-            ${optionalString (cfg.kubelet.featureGates != [])
-              "--feature-gates=${concatMapStringsSep "," (feature: "${feature}=true") cfg.kubelet.featureGates}"} \
-            ${optionalString cfg.verbose "--v=6 --log_flush_frequency=1s"} \
-            ${cfg.kubelet.extraOpts}
-          '';
-          WorkingDirectory = cfg.dataDir;
-        };
-      };
-
-      # Allways include cni plugins
-      services.kubernetes.kubelet.cni.packages = [pkgs.cni-plugins];
-
-      boot.kernelModules = ["br_netfilter"];
-
-      services.kubernetes.kubelet.kubeconfig = kubeConfigDefaults;
-    })
-
-    (mkIf (cfg.kubelet.applyManifests && cfg.kubelet.enable) {
-      environment.etc = mapAttrs' (name: manifest:
-        nameValuePair "kubernetes/manifests/${name}.json" {
-          text = builtins.toJSON manifest;
-          mode = "0755";
-        }
-      ) cfg.kubelet.manifests;
-    })
-
-    (mkIf (cfg.kubelet.unschedulable && cfg.kubelet.enable) {
-      services.kubernetes.kubelet.taints.unschedulable = {
-        value = "true";
-        effect = "NoSchedule";
-      };
-    })
-
-    (mkIf cfg.apiserver.enable {
-      systemd.services.kube-apiserver = {
-        description = "Kubernetes APIServer Service";
-        wantedBy = [ "kubernetes.target" ];
-        after = [ "network.target" "docker.service" ];
-        serviceConfig = {
-          Slice = "kubernetes.slice";
-          ExecStart = ''${cfg.package}/bin/kube-apiserver \
-            --etcd-servers=${concatStringsSep "," cfg.etcd.servers} \
-            ${optionalString (cfg.etcd.caFile != null)
-              "--etcd-cafile=${cfg.etcd.caFile}"} \
-            ${optionalString (cfg.etcd.certFile != null)
-              "--etcd-certfile=${cfg.etcd.certFile}"} \
-            ${optionalString (cfg.etcd.keyFile != null)
-              "--etcd-keyfile=${cfg.etcd.keyFile}"} \
-            --insecure-port=${toString cfg.apiserver.port} \
-            --bind-address=${cfg.apiserver.bindAddress} \
-            ${optionalString (cfg.apiserver.advertiseAddress != null)
-              "--advertise-address=${cfg.apiserver.advertiseAddress}"} \
-            --allow-privileged=${boolToString cfg.apiserver.allowPrivileged}\
-            ${optionalString (cfg.apiserver.tlsCertFile != null)
-              "--tls-cert-file=${cfg.apiserver.tlsCertFile}"} \
-            ${optionalString (cfg.apiserver.tlsKeyFile != null)
-              "--tls-private-key-file=${cfg.apiserver.tlsKeyFile}"} \
-            ${optionalString (cfg.apiserver.tokenAuthFile != null)
-              "--token-auth-file=${cfg.apiserver.tokenAuthFile}"} \
-            ${optionalString (cfg.apiserver.basicAuthFile != null)
-              "--basic-auth-file=${cfg.apiserver.basicAuthFile}"} \
-            --kubelet-https=${if cfg.apiserver.kubeletHttps then "true" else "false"} \
-            ${optionalString (cfg.apiserver.kubeletClientCaFile != null)
-              "--kubelet-certificate-authority=${cfg.apiserver.kubeletClientCaFile}"} \
-            ${optionalString (cfg.apiserver.kubeletClientCertFile != null)
-              "--kubelet-client-certificate=${cfg.apiserver.kubeletClientCertFile}"} \
-            ${optionalString (cfg.apiserver.kubeletClientKeyFile != null)
-              "--kubelet-client-key=${cfg.apiserver.kubeletClientKeyFile}"} \
-            ${optionalString (cfg.apiserver.clientCaFile != null)
-              "--client-ca-file=${cfg.apiserver.clientCaFile}"} \
-            --authorization-mode=${concatStringsSep "," cfg.apiserver.authorizationMode} \
-            ${optionalString (elem "ABAC" cfg.apiserver.authorizationMode)
-              "--authorization-policy-file=${
-                pkgs.writeText "kube-auth-policy.jsonl"
-                (concatMapStringsSep "\n" (l: builtins.toJSON l) cfg.apiserver.authorizationPolicy)
-              }"
-            } \
-            ${optionalString (elem "Webhook" cfg.apiserver.authorizationMode)
-              "--authorization-webhook-config-file=${cfg.apiserver.webhookConfig}"
-            } \
-            --secure-port=${toString cfg.apiserver.securePort} \
-            --service-cluster-ip-range=${cfg.apiserver.serviceClusterIpRange} \
-            ${optionalString (cfg.apiserver.runtimeConfig != "")
-              "--runtime-config=${cfg.apiserver.runtimeConfig}"} \
-            --enable-admission-plugins=${concatStringsSep "," cfg.apiserver.enableAdmissionPlugins} \
-            --disable-admission-plugins=${concatStringsSep "," cfg.apiserver.disableAdmissionPlugins} \
-            ${optionalString (cfg.apiserver.serviceAccountKeyFile!=null)
-              "--service-account-key-file=${cfg.apiserver.serviceAccountKeyFile}"} \
-            ${optionalString cfg.verbose "--v=6"} \
-            ${optionalString cfg.verbose "--log-flush-frequency=1s"} \
-            --storage-backend=${cfg.apiserver.storageBackend} \
-            ${optionalString (cfg.kubelet.featureGates != [])
-              "--feature-gates=${concatMapStringsSep "," (feature: "${feature}=true") cfg.kubelet.featureGates}"} \
-            ${cfg.apiserver.extraOpts}
-          '';
-          WorkingDirectory = cfg.dataDir;
-          User = "kubernetes";
-          Group = "kubernetes";
-          AmbientCapabilities = "cap_net_bind_service";
-          Restart = "on-failure";
-          RestartSec = 5;
-        };
-      };
+    (mkIf cfg.easyCerts {
+      services.kubernetes.pki.enable = mkDefault true;
+      services.kubernetes.caFile = caCert;
     })
 
-    (mkIf cfg.scheduler.enable {
-      systemd.services.kube-scheduler = {
-        description = "Kubernetes Scheduler Service";
-        wantedBy = [ "kubernetes.target" ];
-        after = [ "kube-apiserver.service" ];
-        serviceConfig = {
-          Slice = "kubernetes.slice";
-          ExecStart = ''${cfg.package}/bin/kube-scheduler \
-            --address=${cfg.scheduler.address} \
-            --port=${toString cfg.scheduler.port} \
-            --leader-elect=${boolToString cfg.scheduler.leaderElect} \
-            --kubeconfig=${mkKubeConfig "kube-scheduler" cfg.scheduler.kubeconfig} \
-            ${optionalString cfg.verbose "--v=6"} \
-            ${optionalString cfg.verbose "--log-flush-frequency=1s"} \
-            ${optionalString (cfg.scheduler.featureGates != [])
-              "--feature-gates=${concatMapStringsSep "," (feature: "${feature}=true") cfg.scheduler.featureGates}"} \
-            ${cfg.scheduler.extraOpts}
-          '';
-          WorkingDirectory = cfg.dataDir;
-          User = "kubernetes";
-          Group = "kubernetes";
-        };
-      };
-
-      services.kubernetes.scheduler.kubeconfig = kubeConfigDefaults;
-    })
-
-    (mkIf cfg.controllerManager.enable {
-      systemd.services.kube-controller-manager = {
-        description = "Kubernetes Controller Manager Service";
-        wantedBy = [ "kubernetes.target" ];
-        after = [ "kube-apiserver.service" ];
-        serviceConfig = {
-          RestartSec = "30s";
-          Restart = "on-failure";
-          Slice = "kubernetes.slice";
-          ExecStart = ''${cfg.package}/bin/kube-controller-manager \
-            --address=${cfg.controllerManager.address} \
-            --port=${toString cfg.controllerManager.port} \
-            --kubeconfig=${mkKubeConfig "kube-controller-manager" cfg.controllerManager.kubeconfig} \
-            --leader-elect=${boolToString cfg.controllerManager.leaderElect} \
-            ${if (cfg.controllerManager.serviceAccountKeyFile!=null)
-              then "--service-account-private-key-file=${cfg.controllerManager.serviceAccountKeyFile}"
-              else "--service-account-private-key-file=/var/run/kubernetes/apiserver.key"} \
-            ${if (cfg.controllerManager.rootCaFile!=null)
-              then "--root-ca-file=${cfg.controllerManager.rootCaFile}"
-              else "--root-ca-file=/var/run/kubernetes/apiserver.crt"} \
-            ${if (cfg.clusterCidr!=null)
-              then "--cluster-cidr=${cfg.clusterCidr} --allocate-node-cidrs=true"
-              else "--allocate-node-cidrs=false"} \
-            ${optionalString (cfg.controllerManager.featureGates != [])
-              "--feature-gates=${concatMapStringsSep "," (feature: "${feature}=true") cfg.controllerManager.featureGates}"} \
-            ${optionalString cfg.verbose "--v=6"} \
-            ${optionalString cfg.verbose "--log-flush-frequency=1s"} \
-            ${cfg.controllerManager.extraOpts}
-          '';
-          WorkingDirectory = cfg.dataDir;
-          User = "kubernetes";
-          Group = "kubernetes";
+    (mkIf (elem "master" cfg.roles) {
+      services.kubernetes.apiserver.enable = mkDefault true;
+      services.kubernetes.scheduler.enable = mkDefault true;
+      services.kubernetes.controllerManager.enable = mkDefault true;
+      services.kubernetes.addonManager.enable = mkDefault true;
+      services.kubernetes.proxy.enable = mkDefault true;
+      services.etcd.enable = true; # Cannot mkDefault because of flannel default options
+      services.kubernetes.kubelet = {
+        enable = mkDefault true;
+        taints = mkIf (!(elem "node" cfg.roles)) {
+          master = {
+            key = "node-role.kubernetes.io/master";
+            value = "true";
+            effect = "NoSchedule";
+          };
         };
-        path = cfg.path;
       };
-
-      services.kubernetes.controllerManager.kubeconfig = kubeConfigDefaults;
     })
 
-    (mkIf cfg.proxy.enable {
-      systemd.services.kube-proxy = {
-        description = "Kubernetes Proxy Service";
-        wantedBy = [ "kubernetes.target" ];
-        after = [ "kube-apiserver.service" ];
-        path = [pkgs.iptables pkgs.conntrack_tools];
-        serviceConfig = {
-          Slice = "kubernetes.slice";
-          ExecStart = ''${cfg.package}/bin/kube-proxy \
-            --kubeconfig=${mkKubeConfig "kube-proxy" cfg.proxy.kubeconfig} \
-            --bind-address=${cfg.proxy.address} \
-            ${optionalString (cfg.proxy.featureGates != [])
-              "--feature-gates=${concatMapStringsSep "," (feature: "${feature}=true") cfg.proxy.featureGates}"} \
-            ${optionalString cfg.verbose "--v=6"} \
-            ${optionalString cfg.verbose "--log-flush-frequency=1s"} \
-            ${optionalString (cfg.clusterCidr!=null)
-              "--cluster-cidr=${cfg.clusterCidr}"} \
-            ${cfg.proxy.extraOpts}
-          '';
-          WorkingDirectory = cfg.dataDir;
-        };
-      };
-
-      # kube-proxy needs iptables
-      networking.firewall.enable = mkDefault true;
 
-      services.kubernetes.proxy.kubeconfig = kubeConfigDefaults;
+    (mkIf (all (el: el == "master") cfg.roles) {
+      # if this node is only a master make it unschedulable by default
+      services.kubernetes.kubelet.unschedulable = mkDefault true;
     })
 
-    (mkIf (any (el: el == "master") cfg.roles) {
-      virtualisation.docker.enable = mkDefault true;
+    (mkIf (elem "node" cfg.roles) {
       services.kubernetes.kubelet.enable = mkDefault true;
-      services.kubernetes.kubelet.allowPrivileged = mkDefault true;
-      services.kubernetes.kubelet.applyManifests = mkDefault true;
-      services.kubernetes.apiserver.enable = mkDefault true;
-      services.kubernetes.scheduler.enable = mkDefault true;
-      services.kubernetes.controllerManager.enable = mkDefault true;
-      services.etcd.enable = mkDefault (cfg.etcd.servers == ["http://127.0.0.1:2379"]);
-      services.kubernetes.addonManager.enable = mkDefault true;
       services.kubernetes.proxy.enable = mkDefault true;
     })
 
-    # if this node is only a master make it unschedulable by default
-    (mkIf (all (el: el == "master") cfg.roles) {
-      services.kubernetes.kubelet.unschedulable = mkDefault true;
+    # Using "services.kubernetes.roles" will automatically enable easyCerts and flannel
+    (mkIf (cfg.roles != []) {
+      services.kubernetes.flannel.enable = mkDefault true;
+      services.flannel.etcd.endpoints = mkDefault etcdEndpoints;
+      services.kubernetes.easyCerts = mkDefault true;
+    })
+
+    (mkIf cfg.apiserver.enable {
+      services.kubernetes.pki.etcClusterAdminKubeconfig = mkDefault "kubernetes/cluster-admin.kubeconfig";
+      services.kubernetes.apiserver.etcd.servers = mkDefault etcdEndpoints;
     })
 
-    (mkIf (any (el: el == "node") cfg.roles) {
+    (mkIf cfg.kubelet.enable {
       virtualisation.docker = {
         enable = mkDefault true;
 
@@ -1094,26 +234,18 @@ in {
         # iptables must be disabled for kubernetes
         extraOptions = "--iptables=false --ip-masq=false";
       };
-
-      services.kubernetes.kubelet.enable = mkDefault true;
-      services.kubernetes.proxy.enable = mkDefault true;
     })
 
-    (mkIf cfg.addonManager.enable {
-      environment.etc."kubernetes/addons".source = "${addons}/";
-
-      systemd.services.kube-addon-manager = {
-        description = "Kubernetes addon manager";
-        wantedBy = [ "kubernetes.target" ];
-        after = [ "kube-apiserver.service" ];
-        environment.ADDON_PATH = "/etc/kubernetes/addons/";
-        path = [ pkgs.gawk ];
-        serviceConfig = {
-          Slice = "kubernetes.slice";
-          ExecStart = "${cfg.package}/bin/kube-addons";
-          WorkingDirectory = cfg.dataDir;
-          User = "kubernetes";
-          Group = "kubernetes";
+    (mkIf (cfg.apiserver.enable || cfg.controllerManager.enable) {
+      services.kubernetes.pki.certs = {
+        serviceAccount = mkCert {
+          name = "service-account";
+          CN = "system:service-account-signer";
+          action = ''
+            systemctl reload \
+              kube-apiserver.service \
+              kube-controller-manager.service
+          '';
         };
       };
     })
@@ -1123,7 +255,8 @@ in {
         cfg.scheduler.enable ||
         cfg.controllerManager.enable ||
         cfg.kubelet.enable ||
-        cfg.proxy.enable
+        cfg.proxy.enable ||
+        cfg.addonManager.enable
     ) {
       systemd.targets.kubernetes = {
         description = "Kubernetes";
@@ -1132,11 +265,10 @@ in {
 
       systemd.tmpfiles.rules = [
         "d /opt/cni/bin 0755 root root -"
-        "d /var/run/kubernetes 0755 kubernetes kubernetes -"
+        "d /run/kubernetes 0755 kubernetes kubernetes -"
         "d /var/lib/kubernetes 0755 kubernetes kubernetes -"
       ];
 
-      environment.systemPackages = [ cfg.package ];
       users.users = singleton {
         name = "kubernetes";
         uid = config.ids.uids.kubernetes;
@@ -1148,53 +280,12 @@ in {
       };
       users.groups.kubernetes.gid = config.ids.gids.kubernetes;
 
-			# dns addon is enabled by default
+      # dns addon is enabled by default
       services.kubernetes.addons.dns.enable = mkDefault true;
-    })
 
-    (mkIf cfg.flannel.enable {
-      services.flannel = {
-        enable = mkDefault true;
-        network = mkDefault cfg.clusterCidr;
-        etcd = mkDefault {
-          endpoints = cfg.etcd.servers;
-          inherit (cfg.etcd) caFile certFile keyFile;
-        };
-      };
-
-      services.kubernetes.kubelet = {
-        networkPlugin = mkDefault "cni";
-        cni.config = mkDefault [{
-          name = "mynet";
-          type = "flannel";
-          delegate = {
-            isDefaultGateway = true;
-            bridge = "docker0";
-          };
-        }];
-      };
-
-      systemd.services."mk-docker-opts" = {
-        description = "Pre-Docker Actions";
-        wantedBy = [ "flannel.service" ];
-        before = [ "docker.service" ];
-        after = [ "flannel.service" ];
-        path = [ pkgs.gawk pkgs.gnugrep ];
-        script = ''
-          mkdir -p /run/flannel
-          ${mkDockerOpts}/mk-docker-opts -d /run/flannel/docker
-        '';
-        serviceConfig.Type = "oneshot";
-      };
-      systemd.services.docker.serviceConfig.EnvironmentFile = "/run/flannel/docker";
-
-      # read environment variables generated by mk-docker-opts
-      virtualisation.docker.extraOptions = "$DOCKER_OPTS";
-
-      networking.firewall.allowedUDPPorts = [
-        8285  # flannel udp
-        8472  # flannel vxlan
-      ];
+      services.kubernetes.apiserverAddress = mkDefault ("https://${if cfg.apiserver.advertiseAddress != null
+                          then cfg.apiserver.advertiseAddress
+                          else "${cfg.masterAddress}:${toString cfg.apiserver.securePort}"}");
     })
   ];
 }
diff --git a/nixos/modules/services/cluster/kubernetes/flannel.nix b/nixos/modules/services/cluster/kubernetes/flannel.nix
new file mode 100644
index 00000000000..93ee2fd65ee
--- /dev/null
+++ b/nixos/modules/services/cluster/kubernetes/flannel.nix
@@ -0,0 +1,134 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  top = config.services.kubernetes;
+  cfg = top.flannel;
+
+  # we want flannel to use kubernetes itself as configuration backend, not direct etcd
+  storageBackend = "kubernetes";
+
+  # needed for flannel to pass options to docker
+  mkDockerOpts = pkgs.runCommand "mk-docker-opts" {
+    buildInputs = [ pkgs.makeWrapper ];
+  } ''
+    mkdir -p $out
+    cp ${pkgs.kubernetes.src}/cluster/centos/node/bin/mk-docker-opts.sh $out/mk-docker-opts.sh
+
+    # bashInteractive needed for `compgen`
+    makeWrapper ${pkgs.bashInteractive}/bin/bash $out/mk-docker-opts --add-flags "$out/mk-docker-opts.sh"
+  '';
+in
+{
+  ###### interface
+  options.services.kubernetes.flannel = {
+    enable = mkEnableOption "enable flannel networking";
+  };
+
+  ###### implementation
+  config = mkIf cfg.enable {
+    services.flannel = {
+
+      enable = mkDefault true;
+      network = mkDefault top.clusterCidr;
+      inherit storageBackend;
+      nodeName = config.services.kubernetes.kubelet.hostname;
+    };
+
+    services.kubernetes.kubelet = {
+      networkPlugin = mkDefault "cni";
+      cni.config = mkDefault [{
+        name = "mynet";
+        type = "flannel";
+        delegate = {
+          isDefaultGateway = true;
+          bridge = "docker0";
+        };
+      }];
+    };
+
+    systemd.services."mk-docker-opts" = {
+      description = "Pre-Docker Actions";
+      path = with pkgs; [ gawk gnugrep ];
+      script = ''
+        ${mkDockerOpts}/mk-docker-opts -d /run/flannel/docker
+        systemctl restart docker
+      '';
+      serviceConfig.Type = "oneshot";
+    };
+
+    systemd.paths."flannel-subnet-env" = {
+      wantedBy = [ "flannel.service" ];
+      pathConfig = {
+        PathModified = "/run/flannel/subnet.env";
+        Unit = "mk-docker-opts.service";
+      };
+    };
+
+    systemd.services.docker = {
+      environment.DOCKER_OPTS = "-b none";
+      serviceConfig.EnvironmentFile = "-/run/flannel/docker";
+    };
+
+    # read environment variables generated by mk-docker-opts
+    virtualisation.docker.extraOptions = "$DOCKER_OPTS";
+
+    networking = {
+      firewall.allowedUDPPorts = [
+        8285  # flannel udp
+        8472  # flannel vxlan
+      ];
+      dhcpcd.denyInterfaces = [ "docker*" "flannel*" ];
+    };
+
+    services.kubernetes.pki.certs = {
+      flannelClient = top.lib.mkCert {
+        name = "flannel-client";
+        CN = "flannel-client";
+        action = "systemctl restart flannel.service";
+      };
+    };
+
+    # give flannel som kubernetes rbac permissions if applicable
+    services.kubernetes.addonManager.bootstrapAddons = mkIf ((storageBackend == "kubernetes") && (elem "RBAC" top.apiserver.authorizationMode)) {
+
+      flannel-cr = {
+        apiVersion = "rbac.authorization.k8s.io/v1beta1";
+        kind = "ClusterRole";
+        metadata = { name = "flannel"; };
+        rules = [{
+          apiGroups = [ "" ];
+          resources = [ "pods" ];
+          verbs = [ "get" ];
+        }
+        {
+          apiGroups = [ "" ];
+          resources = [ "nodes" ];
+          verbs = [ "list" "watch" ];
+        }
+        {
+          apiGroups = [ "" ];
+          resources = [ "nodes/status" ];
+          verbs = [ "patch" ];
+        }];
+      };
+
+      flannel-crb = {
+        apiVersion = "rbac.authorization.k8s.io/v1beta1";
+        kind = "ClusterRoleBinding";
+        metadata = { name = "flannel"; };
+        roleRef = {
+          apiGroup = "rbac.authorization.k8s.io";
+          kind = "ClusterRole";
+          name = "flannel";
+        };
+        subjects = [{
+          kind = "User";
+          name = "flannel-client";
+        }];
+      };
+
+    };
+  };
+}
diff --git a/nixos/modules/services/cluster/kubernetes/kubelet.nix b/nixos/modules/services/cluster/kubernetes/kubelet.nix
new file mode 100644
index 00000000000..c94bb28bf7f
--- /dev/null
+++ b/nixos/modules/services/cluster/kubernetes/kubelet.nix
@@ -0,0 +1,358 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  top = config.services.kubernetes;
+  cfg = top.kubelet;
+
+  cniConfig =
+    if cfg.cni.config != [] && !(isNull cfg.cni.configDir) then
+      throw "Verbatim CNI-config and CNI configDir cannot both be set."
+    else if !(isNull cfg.cni.configDir) then
+      cfg.cni.configDir
+    else
+      (pkgs.buildEnv {
+        name = "kubernetes-cni-config";
+        paths = imap (i: entry:
+          pkgs.writeTextDir "${toString (10+i)}-${entry.type}.conf" (builtins.toJSON entry)
+        ) cfg.cni.config;
+      });
+
+  infraContainer = pkgs.dockerTools.buildImage {
+    name = "pause";
+    tag = "latest";
+    contents = top.package.pause;
+    config.Cmd = "/bin/pause";
+  };
+
+  kubeconfig = top.lib.mkKubeConfig "kubelet" cfg.kubeconfig;
+
+  manifests = pkgs.buildEnv {
+    name = "kubernetes-manifests";
+    paths = mapAttrsToList (name: manifest:
+      pkgs.writeTextDir "${name}.json" (builtins.toJSON manifest)
+    ) cfg.manifests;
+  };
+
+  manifestPath = "kubernetes/manifests";
+
+  taintOptions = with lib.types; { name, ... }: {
+    options = {
+      key = mkOption {
+        description = "Key of taint.";
+        default = name;
+        type = str;
+      };
+      value = mkOption {
+        description = "Value of taint.";
+        type = str;
+      };
+      effect = mkOption {
+        description = "Effect of taint.";
+        example = "NoSchedule";
+        type = enum ["NoSchedule" "PreferNoSchedule" "NoExecute"];
+      };
+    };
+  };
+
+  taints = concatMapStringsSep "," (v: "${v.key}=${v.value}:${v.effect}") (mapAttrsToList (n: v: v) cfg.taints);
+in
+{
+  ###### interface
+  options.services.kubernetes.kubelet = with lib.types; {
+
+    address = mkOption {
+      description = "Kubernetes kubelet info server listening address.";
+      default = "0.0.0.0";
+      type = str;
+    };
+
+    allowPrivileged = mkOption {
+      description = "Whether to allow Kubernetes containers to request privileged mode.";
+      default = false;
+      type = bool;
+    };
+
+    clusterDns = mkOption {
+      description = "Use alternative DNS.";
+      default = "10.1.0.1";
+      type = str;
+    };
+
+    clusterDomain = mkOption {
+      description = "Use alternative domain.";
+      default = config.services.kubernetes.addons.dns.clusterDomain;
+      type = str;
+    };
+
+    clientCaFile = mkOption {
+      description = "Kubernetes apiserver CA file for client authentication.";
+      default = top.caFile;
+      type = nullOr path;
+    };
+
+    cni = {
+      packages = mkOption {
+        description = "List of network plugin packages to install.";
+        type = listOf package;
+        default = [];
+      };
+
+      config = mkOption {
+        description = "Kubernetes CNI configuration.";
+        type = listOf attrs;
+        default = [];
+        example = literalExample ''
+          [{
+            "cniVersion": "0.2.0",
+            "name": "mynet",
+            "type": "bridge",
+            "bridge": "cni0",
+            "isGateway": true,
+            "ipMasq": true,
+            "ipam": {
+                "type": "host-local",
+                "subnet": "10.22.0.0/16",
+                "routes": [
+                    { "dst": "0.0.0.0/0" }
+                ]
+            }
+          } {
+            "cniVersion": "0.2.0",
+            "type": "loopback"
+          }]
+        '';
+      };
+
+      configDir = mkOption {
+        description = "Path to Kubernetes CNI configuration directory.";
+        type = nullOr path;
+        default = null;
+      };
+    };
+
+    enable = mkEnableOption "Kubernetes kubelet.";
+
+    extraOpts = mkOption {
+      description = "Kubernetes kubelet extra command line options.";
+      default = "";
+      type = str;
+    };
+
+    featureGates = mkOption {
+      description = "List set of feature gates";
+      default = top.featureGates;
+      type = listOf str;
+    };
+
+    healthz = {
+      bind = mkOption {
+        description = "Kubernetes kubelet healthz listening address.";
+        default = "127.0.0.1";
+        type = str;
+      };
+
+      port = mkOption {
+        description = "Kubernetes kubelet healthz port.";
+        default = 10248;
+        type = int;
+      };
+    };
+
+    hostname = mkOption {
+      description = "Kubernetes kubelet hostname override.";
+      default = config.networking.hostName;
+      type = str;
+    };
+
+    kubeconfig = top.lib.mkKubeConfigOptions "Kubelet";
+
+    manifests = mkOption {
+      description = "List of manifests to bootstrap with kubelet (only pods can be created as manifest entry)";
+      type = attrsOf attrs;
+      default = {};
+    };
+
+    networkPlugin = mkOption {
+      description = "Network plugin to use by Kubernetes.";
+      type = nullOr (enum ["cni" "kubenet"]);
+      default = "kubenet";
+    };
+
+    nodeIp = mkOption {
+      description = "IP address of the node. If set, kubelet will use this IP address for the node.";
+      default = null;
+      type = nullOr str;
+    };
+
+    registerNode = mkOption {
+      description = "Whether to auto register kubelet with API server.";
+      default = true;
+      type = bool;
+    };
+
+    port = mkOption {
+      description = "Kubernetes kubelet info server listening port.";
+      default = 10250;
+      type = int;
+    };
+
+    seedDockerImages = mkOption {
+      description = "List of docker images to preload on system";
+      default = [];
+      type = listOf package;
+    };
+
+    taints = mkOption {
+      description = "Node taints (https://kubernetes.io/docs/concepts/configuration/assign-pod-node/).";
+      default = {};
+      type = attrsOf (submodule [ taintOptions ]);
+    };
+
+    tlsCertFile = mkOption {
+      description = "File containing x509 Certificate for HTTPS.";
+      default = null;
+      type = nullOr path;
+    };
+
+    tlsKeyFile = mkOption {
+      description = "File containing x509 private key matching tlsCertFile.";
+      default = null;
+      type = nullOr path;
+    };
+
+    unschedulable = mkOption {
+      description = "Whether to set node taint to unschedulable=true as it is the case of node that has only master role.";
+      default = false;
+      type = bool;
+    };
+
+    verbosity = mkOption {
+      description = ''
+        Optional glog verbosity level for logging statements. See
+        <link xlink:href="https://github.com/kubernetes/community/blob/master/contributors/devel/logging.md"/>
+      '';
+      default = null;
+      type = nullOr int;
+    };
+
+  };
+
+  ###### implementation
+  config = mkMerge [
+    (mkIf cfg.enable {
+      services.kubernetes.kubelet.seedDockerImages = [infraContainer];
+
+      systemd.services.kubelet = {
+        description = "Kubernetes Kubelet Service";
+        wantedBy = [ "kubernetes.target" ];
+        after = [ "network.target" "docker.service" "kube-apiserver.service" ];
+        path = with pkgs; [ gitMinimal openssh docker utillinux iproute ethtool thin-provisioning-tools iptables socat ] ++ top.path;
+        preStart = ''
+          ${concatMapStrings (img: ''
+            echo "Seeding docker image: ${img}"
+            docker load <${img}
+          '') cfg.seedDockerImages}
+
+          rm /opt/cni/bin/* || true
+          ${concatMapStrings (package: ''
+            echo "Linking cni package: ${package}"
+            ln -fs ${package}/bin/* /opt/cni/bin
+          '') cfg.cni.packages}
+        '';
+        serviceConfig = {
+          Slice = "kubernetes.slice";
+          CPUAccounting = true;
+          MemoryAccounting = true;
+          Restart = "on-failure";
+          RestartSec = "1000ms";
+          ExecStart = ''${top.package}/bin/kubelet \
+            --address=${cfg.address} \
+            --allow-privileged=${boolToString cfg.allowPrivileged} \
+            --authentication-token-webhook \
+            --authentication-token-webhook-cache-ttl="10s" \
+            --authorization-mode=Webhook \
+            ${optionalString (cfg.clientCaFile != null)
+              "--client-ca-file=${cfg.clientCaFile}"} \
+            ${optionalString (cfg.clusterDns != "")
+              "--cluster-dns=${cfg.clusterDns}"} \
+            ${optionalString (cfg.clusterDomain != "")
+              "--cluster-domain=${cfg.clusterDomain}"} \
+            --cni-conf-dir=${cniConfig} \
+            ${optionalString (cfg.featureGates != [])
+              "--feature-gates=${concatMapStringsSep "," (feature: "${feature}=true") cfg.featureGates}"} \
+            --hairpin-mode=hairpin-veth \
+            --healthz-bind-address=${cfg.healthz.bind} \
+            --healthz-port=${toString cfg.healthz.port} \
+            --hostname-override=${cfg.hostname} \
+            --kubeconfig=${kubeconfig} \
+            ${optionalString (cfg.networkPlugin != null)
+              "--network-plugin=${cfg.networkPlugin}"} \
+            ${optionalString (cfg.nodeIp != null)
+              "--node-ip=${cfg.nodeIp}"} \
+            --pod-infra-container-image=pause \
+            ${optionalString (cfg.manifests != {})
+              "--pod-manifest-path=/etc/${manifestPath}"} \
+            --port=${toString cfg.port} \
+            --register-node=${boolToString cfg.registerNode} \
+            ${optionalString (taints != "")
+              "--register-with-taints=${taints}"} \
+            --root-dir=${top.dataDir} \
+            ${optionalString (cfg.tlsCertFile != null)
+              "--tls-cert-file=${cfg.tlsCertFile}"} \
+            ${optionalString (cfg.tlsKeyFile != null)
+              "--tls-private-key-file=${cfg.tlsKeyFile}"} \
+            ${optionalString (cfg.verbosity != null) "--v=${toString cfg.verbosity}"} \
+            ${cfg.extraOpts}
+          '';
+          WorkingDirectory = top.dataDir;
+        };
+      };
+
+      # Allways include cni plugins
+      services.kubernetes.kubelet.cni.packages = [pkgs.cni-plugins];
+
+      boot.kernelModules = ["br_netfilter"];
+
+      services.kubernetes.kubelet.hostname = with config.networking;
+        mkDefault (hostName + optionalString (!isNull domain) ".${domain}");
+
+      services.kubernetes.pki.certs = with top.lib; {
+        kubelet = mkCert {
+          name = "kubelet";
+          CN = top.kubelet.hostname;
+          action = "systemctl restart kubelet.service";
+
+        };
+        kubeletClient = mkCert {
+          name = "kubelet-client";
+          CN = "system:node:${top.kubelet.hostname}";
+          fields = {
+            O = "system:nodes";
+          };
+          action = "systemctl restart kubelet.service";
+        };
+      };
+
+      services.kubernetes.kubelet.kubeconfig.server = mkDefault top.apiserverAddress;
+    })
+
+    (mkIf (cfg.enable && cfg.manifests != {}) {
+      environment.etc = mapAttrs' (name: manifest:
+        nameValuePair "${manifestPath}/${name}.json" {
+          text = builtins.toJSON manifest;
+          mode = "0755";
+        }
+      ) cfg.manifests;
+    })
+
+    (mkIf (cfg.unschedulable && cfg.enable) {
+      services.kubernetes.kubelet.taints.unschedulable = {
+        value = "true";
+        effect = "NoSchedule";
+      };
+    })
+
+  ];
+}
diff --git a/nixos/modules/services/cluster/kubernetes/pki.nix b/nixos/modules/services/cluster/kubernetes/pki.nix
new file mode 100644
index 00000000000..38deca23a99
--- /dev/null
+++ b/nixos/modules/services/cluster/kubernetes/pki.nix
@@ -0,0 +1,388 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  top = config.services.kubernetes;
+  cfg = top.pki;
+
+  csrCA = pkgs.writeText "kube-pki-cacert-csr.json" (builtins.toJSON {
+    key = {
+        algo = "rsa";
+        size = 2048;
+    };
+    names = singleton cfg.caSpec;
+  });
+
+  csrCfssl = pkgs.writeText "kube-pki-cfssl-csr.json" (builtins.toJSON {
+    key = {
+        algo = "rsa";
+        size = 2048;
+    };
+    CN = top.masterAddress;
+  });
+
+  cfsslAPITokenBaseName = "apitoken.secret";
+  cfsslAPITokenPath = "${config.services.cfssl.dataDir}/${cfsslAPITokenBaseName}";
+  certmgrAPITokenPath = "${top.secretsPath}/${cfsslAPITokenBaseName}";
+  cfsslAPITokenLength = 32;
+
+  clusterAdminKubeconfig = with cfg.certs.clusterAdmin;
+    top.lib.mkKubeConfig "cluster-admin" {
+        server = top.apiserverAddress;
+        certFile = cert;
+        keyFile = key;
+    };
+
+  remote = with config.services; "https://${kubernetes.masterAddress}:${toString cfssl.port}";
+in
+{
+  ###### interface
+  options.services.kubernetes.pki = with lib.types; {
+
+    enable = mkEnableOption "Whether to enable easyCert issuer service.";
+
+    certs = mkOption {
+      description = "List of certificate specs to feed to cert generator.";
+      default = {};
+      type = attrs;
+    };
+
+    genCfsslCACert = mkOption {
+      description = ''
+        Whether to automatically generate cfssl CA certificate and key,
+        if they don't exist.
+      '';
+      default = true;
+      type = bool;
+    };
+
+    genCfsslAPICerts = mkOption {
+      description = ''
+        Whether to automatically generate cfssl API webserver TLS cert and key,
+        if they don't exist.
+      '';
+      default = true;
+      type = bool;
+    };
+
+    genCfsslAPIToken = mkOption {
+      description = ''
+        Whether to automatically generate cfssl API-token secret,
+        if they doesn't exist.
+      '';
+      default = true;
+      type = bool;
+    };
+
+    pkiTrustOnBootstrap = mkOption {
+      description = "Whether to always trust remote cfssl server upon initial PKI bootstrap.";
+      default = true;
+      type = bool;
+    };
+
+    caCertPathPrefix = mkOption {
+      description = ''
+        Path-prefrix for the CA-certificate to be used for cfssl signing.
+        Suffixes ".pem" and "-key.pem" will be automatically appended for
+        the public and private keys respectively.
+      '';
+      default = "${config.services.cfssl.dataDir}/ca";
+      type = str;
+    };
+
+    caSpec = mkOption {
+      description = "Certificate specification for the auto-generated CAcert.";
+      default = {
+        CN = "kubernetes-cluster-ca";
+        O = "NixOS";
+        OU = "services.kubernetes.pki.caSpec";
+        L = "auto-generated";
+      };
+      type = attrs;
+    };
+
+    etcClusterAdminKubeconfig = mkOption {
+      description = ''
+        Symlink a kubeconfig with cluster-admin privileges to environment path
+        (/etc/&lt;path&gt;).
+      '';
+      default = null;
+      type = nullOr str;
+    };
+
+  };
+
+  ###### implementation
+  config = mkIf cfg.enable
+  (let
+    cfsslCertPathPrefix = "${config.services.cfssl.dataDir}/cfssl";
+    cfsslCert = "${cfsslCertPathPrefix}.pem";
+    cfsslKey = "${cfsslCertPathPrefix}-key.pem";
+  in
+  {
+
+    services.cfssl = mkIf (top.apiserver.enable) {
+      enable = true;
+      address = "0.0.0.0";
+      tlsCert = cfsslCert;
+      tlsKey = cfsslKey;
+      configFile = toString (pkgs.writeText "cfssl-config.json" (builtins.toJSON {
+        signing = {
+          profiles = {
+            default = {
+              usages = ["digital signature"];
+              auth_key = "default";
+              expiry = "720h";
+            };
+          };
+        };
+        auth_keys = {
+          default = {
+            type = "standard";
+            key = "file:${cfsslAPITokenPath}";
+          };
+        };
+      }));
+    };
+
+    systemd.services.cfssl.preStart = with pkgs; with config.services.cfssl; mkIf (top.apiserver.enable)
+    (concatStringsSep "\n" [
+      "set -e"
+      (optionalString cfg.genCfsslCACert ''
+        if [ ! -f "${cfg.caCertPathPrefix}.pem" ]; then
+          ${cfssl}/bin/cfssl genkey -initca ${csrCA} | \
+            ${cfssl}/bin/cfssljson -bare ${cfg.caCertPathPrefix}
+        fi
+      '')
+      (optionalString cfg.genCfsslAPICerts ''
+        if [ ! -f "${dataDir}/cfssl.pem" ]; then
+          ${cfssl}/bin/cfssl gencert -ca "${cfg.caCertPathPrefix}.pem" -ca-key "${cfg.caCertPathPrefix}-key.pem" ${csrCfssl} | \
+            ${cfssl}/bin/cfssljson -bare ${cfsslCertPathPrefix}
+        fi
+      '')
+      (optionalString cfg.genCfsslAPIToken ''
+        if [ ! -f "${cfsslAPITokenPath}" ]; then
+          head -c ${toString (cfsslAPITokenLength / 2)} /dev/urandom | od -An -t x | tr -d ' ' >"${cfsslAPITokenPath}"
+        fi
+        chown cfssl "${cfsslAPITokenPath}" && chmod 400 "${cfsslAPITokenPath}"
+      '')]);
+
+    systemd.services.kube-certmgr-bootstrap = {
+      description = "Kubernetes certmgr bootstrapper";
+      wantedBy = [ "certmgr.service" ];
+      after = [ "cfssl.target" ];
+      script = concatStringsSep "\n" [''
+        set -e
+
+        # If there's a cfssl (cert issuer) running locally, then don't rely on user to
+        # manually paste it in place. Just symlink.
+        # otherwise, create the target file, ready for users to insert the token
+
+        if [ -f "${cfsslAPITokenPath}" ]; then
+          ln -fs "${cfsslAPITokenPath}" "${certmgrAPITokenPath}"
+        else
+          touch "${certmgrAPITokenPath}" && chmod 600 "${certmgrAPITokenPath}"
+        fi
+      ''
+      (optionalString (cfg.pkiTrustOnBootstrap) ''
+        if [ ! -f "${top.caFile}" ] || [ $(cat "${top.caFile}" | wc -c) -lt 1 ]; then
+          ${pkgs.curl}/bin/curl --fail-early -f -kd '{}' ${remote}/api/v1/cfssl/info | \
+            ${pkgs.cfssl}/bin/cfssljson -stdout >${top.caFile}
+        fi
+      '')
+      ];
+      serviceConfig = {
+        RestartSec = "10s";
+        Restart = "on-failure";
+      };
+    };
+
+    services.certmgr = {
+      enable = true;
+      package = pkgs.certmgr-selfsigned;
+      svcManager = "command";
+      specs =
+        let
+          mkSpec = _: cert: {
+            inherit (cert) action;
+            authority = {
+              inherit remote;
+              file.path = cert.caCert;
+              root_ca = cert.caCert;
+              profile = "default";
+              auth_key_file = certmgrAPITokenPath;
+            };
+            certificate = {
+              path = cert.cert;
+            };
+            private_key = cert.privateKeyOptions;
+            request = {
+              inherit (cert) CN hosts;
+              key = {
+                algo = "rsa";
+                size = 2048;
+              };
+              names = [ cert.fields ];
+            };
+          };
+        in
+          mapAttrs mkSpec cfg.certs;
+      };
+
+      #TODO: Get rid of kube-addon-manager in the future for the following reasons
+      # - it is basically just a shell script wrapped around kubectl
+      # - it assumes that it is clusterAdmin or can gain clusterAdmin rights through serviceAccount
+      # - it is designed to be used with k8s system components only
+      # - it would be better with a more Nix-oriented way of managing addons
+      systemd.services.kube-addon-manager = mkIf top.addonManager.enable (mkMerge [{
+        environment.KUBECONFIG = with cfg.certs.addonManager;
+          top.lib.mkKubeConfig "addon-manager" {
+            server = top.apiserverAddress;
+            certFile = cert;
+            keyFile = key;
+          };
+        }
+
+        (optionalAttrs (top.addonManager.bootstrapAddons != {}) {
+          serviceConfig.PermissionsStartOnly = true;
+          preStart = with pkgs;
+          let
+            files = mapAttrsToList (n: v: writeText "${n}.json" (builtins.toJSON v))
+              top.addonManager.bootstrapAddons;
+          in
+          ''
+            export KUBECONFIG=${clusterAdminKubeconfig}
+            ${kubectl}/bin/kubectl apply -f ${concatStringsSep " \\\n -f " files}
+          '';
+        })]);
+
+      environment.etc.${cfg.etcClusterAdminKubeconfig}.source = mkIf (!isNull cfg.etcClusterAdminKubeconfig)
+        clusterAdminKubeconfig;
+
+      environment.systemPackages = mkIf (top.kubelet.enable || top.proxy.enable) [
+      (pkgs.writeScriptBin "nixos-kubernetes-node-join" ''
+        set -e
+        exec 1>&2
+
+        if [ $# -gt 0 ]; then
+          echo "Usage: $(basename $0)"
+          echo ""
+          echo "No args. Apitoken must be provided on stdin."
+          echo "To get the apitoken, execute: 'sudo cat ${certmgrAPITokenPath}' on the master node."
+          exit 1
+        fi
+
+        if [ $(id -u) != 0 ]; then
+          echo "Run as root please."
+          exit 1
+        fi
+
+        read -r token
+        if [ ''${#token} != ${toString cfsslAPITokenLength} ]; then
+          echo "Token must be of length ${toString cfsslAPITokenLength}."
+          exit 1
+        fi
+
+        echo $token > ${certmgrAPITokenPath}
+        chmod 600 ${certmgrAPITokenPath}
+
+        echo "Restarting certmgr..." >&1
+        systemctl restart certmgr
+
+        echo "Waiting for certs to appear..." >&1
+
+        ${optionalString top.kubelet.enable ''
+          while [ ! -f ${cfg.certs.kubelet.cert} ]; do sleep 1; done
+          echo "Restarting kubelet..." >&1
+          systemctl restart kubelet
+        ''}
+
+        ${optionalString top.proxy.enable ''
+          while [ ! -f ${cfg.certs.kubeProxyClient.cert} ]; do sleep 1; done
+          echo "Restarting kube-proxy..." >&1
+          systemctl restart kube-proxy
+        ''}
+
+        ${optionalString top.flannel.enable ''
+          while [ ! -f ${cfg.certs.flannelClient.cert} ]; do sleep 1; done
+          echo "Restarting flannel..." >&1
+          systemctl restart flannel
+        ''}
+
+        echo "Node joined succesfully"
+      '')];
+
+      # isolate etcd on loopback at the master node
+      # easyCerts doesn't support multimaster clusters anyway atm.
+      services.etcd = with cfg.certs.etcd; {
+        listenClientUrls = ["https://127.0.0.1:2379"];
+        listenPeerUrls = ["https://127.0.0.1:2380"];
+        advertiseClientUrls = ["https://etcd.local:2379"];
+        initialCluster = ["${top.masterAddress}=https://etcd.local:2380"];
+        initialAdvertisePeerUrls = ["https://etcd.local:2380"];
+        certFile = mkDefault cert;
+        keyFile = mkDefault key;
+        trustedCaFile = mkDefault caCert;
+      };
+      networking.extraHosts = mkIf (config.services.etcd.enable) ''
+        127.0.0.1 etcd.${top.addons.dns.clusterDomain} etcd.local
+      '';
+
+      services.flannel = with cfg.certs.flannelClient; {
+        kubeconfig = top.lib.mkKubeConfig "flannel" {
+          server = top.apiserverAddress;
+          certFile = cert;
+          keyFile = key;
+        };
+      };
+
+      services.kubernetes = {
+
+        apiserver = mkIf top.apiserver.enable (with cfg.certs.apiServer; {
+          etcd = with cfg.certs.apiserverEtcdClient; {
+            servers = ["https://etcd.local:2379"];
+            certFile = mkDefault cert;
+            keyFile = mkDefault key;
+            caFile = mkDefault caCert;
+          };
+          clientCaFile = mkDefault caCert;
+          tlsCertFile = mkDefault cert;
+          tlsKeyFile = mkDefault key;
+          serviceAccountKeyFile = mkDefault cfg.certs.serviceAccount.cert;
+          kubeletClientCaFile = mkDefault caCert;
+          kubeletClientCertFile = mkDefault cfg.certs.apiserverKubeletClient.cert;
+          kubeletClientKeyFile = mkDefault cfg.certs.apiserverKubeletClient.key;
+        });
+        controllerManager = mkIf top.controllerManager.enable {
+          serviceAccountKeyFile = mkDefault cfg.certs.serviceAccount.key;
+          rootCaFile = cfg.certs.controllerManagerClient.caCert;
+          kubeconfig = with cfg.certs.controllerManagerClient; {
+            certFile = mkDefault cert;
+            keyFile = mkDefault key;
+          };
+        };
+        scheduler = mkIf top.scheduler.enable {
+          kubeconfig = with cfg.certs.schedulerClient; {
+            certFile = mkDefault cert;
+            keyFile = mkDefault key;
+          };
+        };
+        kubelet = mkIf top.kubelet.enable {
+          clientCaFile = mkDefault cfg.certs.kubelet.caCert;
+          tlsCertFile = mkDefault cfg.certs.kubelet.cert;
+          tlsKeyFile = mkDefault cfg.certs.kubelet.key;
+          kubeconfig = with cfg.certs.kubeletClient; {
+            certFile = mkDefault cert;
+            keyFile = mkDefault key;
+          };
+        };
+        proxy = mkIf top.proxy.enable {
+          kubeconfig = with cfg.certs.kubeProxyClient; {
+            certFile = mkDefault cert;
+            keyFile = mkDefault key;
+          };
+        };
+      };
+    });
+}
diff --git a/nixos/modules/services/cluster/kubernetes/proxy.nix b/nixos/modules/services/cluster/kubernetes/proxy.nix
new file mode 100644
index 00000000000..6bcf2eaca82
--- /dev/null
+++ b/nixos/modules/services/cluster/kubernetes/proxy.nix
@@ -0,0 +1,80 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  top = config.services.kubernetes;
+  cfg = top.proxy;
+in
+{
+
+  ###### interface
+  options.services.kubernetes.proxy = with lib.types; {
+
+    bindAddress = mkOption {
+      description = "Kubernetes proxy listening address.";
+      default = "0.0.0.0";
+      type = str;
+    };
+
+    enable = mkEnableOption "Whether to enable Kubernetes proxy.";
+
+    extraOpts = mkOption {
+      description = "Kubernetes proxy extra command line options.";
+      default = "";
+      type = str;
+    };
+
+    featureGates = mkOption {
+      description = "List set of feature gates";
+      default = top.featureGates;
+      type = listOf str;
+    };
+
+    kubeconfig = top.lib.mkKubeConfigOptions "Kubernetes proxy";
+
+    verbosity = mkOption {
+      description = ''
+        Optional glog verbosity level for logging statements. See
+        <link xlink:href="https://github.com/kubernetes/community/blob/master/contributors/devel/logging.md"/>
+      '';
+      default = null;
+      type = nullOr int;
+    };
+
+  };
+
+  ###### implementation
+  config = mkIf cfg.enable {
+    systemd.services.kube-proxy = {
+      description = "Kubernetes Proxy Service";
+      wantedBy = [ "kubernetes.target" ];
+      after = [ "kube-apiserver.service" ];
+      path = with pkgs; [ iptables conntrack_tools ];
+      serviceConfig = {
+        Slice = "kubernetes.slice";
+        ExecStart = ''${top.package}/bin/kube-proxy \
+          --bind-address=${cfg.bindAddress} \
+          ${optionalString (top.clusterCidr!=null)
+            "--cluster-cidr=${top.clusterCidr}"} \
+          ${optionalString (cfg.featureGates != [])
+            "--feature-gates=${concatMapStringsSep "," (feature: "${feature}=true") cfg.featureGates}"} \
+          --kubeconfig=${top.lib.mkKubeConfig "kube-proxy" cfg.kubeconfig} \
+          ${optionalString (cfg.verbosity != null) "--v=${toString cfg.verbosity}"} \
+          ${cfg.extraOpts}
+        '';
+        WorkingDirectory = top.dataDir;
+      };
+    };
+
+    services.kubernetes.pki.certs = {
+      kubeProxyClient = top.lib.mkCert {
+        name = "kube-proxy-client";
+        CN = "system:kube-proxy";
+        action = "systemctl restart kube-proxy.service";
+      };
+    };
+
+    services.kubernetes.proxy.kubeconfig.server = mkDefault top.apiserverAddress;
+  };
+}
diff --git a/nixos/modules/services/cluster/kubernetes/scheduler.nix b/nixos/modules/services/cluster/kubernetes/scheduler.nix
new file mode 100644
index 00000000000..655e6f8b6e2
--- /dev/null
+++ b/nixos/modules/services/cluster/kubernetes/scheduler.nix
@@ -0,0 +1,92 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  top = config.services.kubernetes;
+  cfg = top.scheduler;
+in
+{
+  ###### interface
+  options.services.kubernetes.scheduler = with lib.types; {
+
+    address = mkOption {
+      description = "Kubernetes scheduler listening address.";
+      default = "127.0.0.1";
+      type = str;
+    };
+
+    enable = mkEnableOption "Whether to enable Kubernetes scheduler.";
+
+    extraOpts = mkOption {
+      description = "Kubernetes scheduler extra command line options.";
+      default = "";
+      type = str;
+    };
+
+    featureGates = mkOption {
+      description = "List set of feature gates";
+      default = top.featureGates;
+      type = listOf str;
+    };
+
+    kubeconfig = top.lib.mkKubeConfigOptions "Kubernetes scheduler";
+
+    leaderElect = mkOption {
+      description = "Whether to start leader election before executing main loop.";
+      type = bool;
+      default = true;
+    };
+
+    port = mkOption {
+      description = "Kubernetes scheduler listening port.";
+      default = 10251;
+      type = int;
+    };
+
+    verbosity = mkOption {
+      description = ''
+        Optional glog verbosity level for logging statements. See
+        <link xlink:href="https://github.com/kubernetes/community/blob/master/contributors/devel/logging.md"/>
+      '';
+      default = null;
+      type = nullOr int;
+    };
+
+  };
+
+  ###### implementation
+  config = mkIf cfg.enable {
+    systemd.services.kube-scheduler = {
+      description = "Kubernetes Scheduler Service";
+      wantedBy = [ "kubernetes.target" ];
+      after = [ "kube-apiserver.service" ];
+      serviceConfig = {
+        Slice = "kubernetes.slice";
+        ExecStart = ''${top.package}/bin/kube-scheduler \
+          --address=${cfg.address} \
+          ${optionalString (cfg.featureGates != [])
+            "--feature-gates=${concatMapStringsSep "," (feature: "${feature}=true") cfg.featureGates}"} \
+          --kubeconfig=${top.lib.mkKubeConfig "kube-scheduler" cfg.kubeconfig} \
+          --leader-elect=${boolToString cfg.leaderElect} \
+          --port=${toString cfg.port} \
+          ${optionalString (cfg.verbosity != null) "--v=${toString cfg.verbosity}"} \
+          ${cfg.extraOpts}
+        '';
+        WorkingDirectory = top.dataDir;
+        User = "kubernetes";
+        Group = "kubernetes";
+      };
+    };
+
+    services.kubernetes.pki.certs = {
+      schedulerClient = top.lib.mkCert {
+        name = "kube-scheduler-client";
+        CN = "system:kube-scheduler";
+        action = "systemctl restart kube-scheduler.service";
+      };
+    };
+
+    services.kubernetes.scheduler.kubeconfig.server = mkDefault top.apiserverAddress;
+  };
+}
diff --git a/nixos/modules/services/hardware/bolt.nix b/nixos/modules/services/hardware/bolt.nix
new file mode 100644
index 00000000000..32b60af0603
--- /dev/null
+++ b/nixos/modules/services/hardware/bolt.nix
@@ -0,0 +1,34 @@
+# Thunderbolt 3 device manager
+
+{ config, lib, pkgs, ...}:
+
+with lib;
+
+{
+  options = {
+
+    services.hardware.bolt = {
+
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = ''
+          Whether to enable Bolt, a userspace daemon to enable
+          security levels for Thunderbolt 3 on GNU/Linux.
+
+          Bolt is used by GNOME 3 to handle Thunderbolt settings.
+        '';
+      };
+
+    };
+
+  };
+
+  config = mkIf config.services.hardware.bolt.enable {
+
+    environment.systemPackages = [ pkgs.bolt ];
+    services.udev.packages = [ pkgs.bolt ];
+    systemd.packages = [ pkgs.bolt ];
+
+  };
+}
diff --git a/nixos/modules/services/misc/gitlab.nix b/nixos/modules/services/misc/gitlab.nix
index 25c258ebe13..b8617e48d8e 100644
--- a/nixos/modules/services/misc/gitlab.nix
+++ b/nixos/modules/services/misc/gitlab.nix
@@ -22,7 +22,8 @@ let
       password = cfg.databasePassword;
       username = cfg.databaseUsername;
       encoding = "utf8";
-    };
+      pool = cfg.databasePool;
+    } // cfg.extraDatabaseConfig;
   };
 
   gitalyToml = pkgs.writeText "gitaly.toml" ''
@@ -253,6 +254,18 @@ in {
         description = "Gitlab database user.";
       };
 
+      databasePool = mkOption {
+        type = types.int;
+        default = 5;
+        description = "Database connection pool size.";
+      };
+
+      extraDatabaseConfig = mkOption {
+        type = types.attrs;
+        default = {};
+        description = "Extra configuration in config/database.yml.";
+      };
+
       host = mkOption {
         type = types.str;
         default = config.networking.hostName;
diff --git a/nixos/modules/services/misc/headphones.nix b/nixos/modules/services/misc/headphones.nix
new file mode 100644
index 00000000000..4a77045be28
--- /dev/null
+++ b/nixos/modules/services/misc/headphones.nix
@@ -0,0 +1,87 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  name = "headphones";
+
+  cfg = config.services.headphones;
+
+in
+
+{
+
+  ###### interface
+
+  options = {
+    services.headphones = {
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = "Whether to enable the headphones server.";
+      };
+      dataDir = mkOption {
+        type = types.path;
+        default = "/var/lib/${name}";
+        description = "Path where to store data files.";
+      };
+      configFile = mkOption {
+        type = types.path;
+        default = "${cfg.dataDir}/config.ini";
+        description = "Path to config file.";
+      };
+      host = mkOption {
+        type = types.str;
+        default = "localhost";
+        description = "Host to listen on.";
+      };
+      port = mkOption {
+        type = types.ints.u16;
+        default = 8181;
+        description = "Port to bind to.";
+      };
+      user = mkOption {
+        type = types.str;
+        default = name;
+        description = "User to run the service as";
+      };
+      group = mkOption {
+        type = types.str;
+        default = name;
+        description = "Group to run the service as";
+      };
+    };
+  };
+
+
+  ###### implementation
+
+  config = mkIf cfg.enable {
+
+    users.users = optionalAttrs (cfg.user == name) (singleton {
+      name = name;
+      uid = config.ids.uids.headphones;
+      group = cfg.group;
+      description = "headphones user";
+      home = cfg.dataDir;
+      createHome = true;
+    });
+
+    users.groups = optionalAttrs (cfg.group == name) (singleton {
+      name = name;
+      gid = config.ids.gids.headphones;
+    });
+
+    systemd.services.headphones = {
+        description = "Headphones Server";
+        wantedBy    = [ "multi-user.target" ];
+        after = [ "network.target" ];
+        serviceConfig = {
+          User = cfg.user;
+          Group = cfg.group;
+          ExecStart = "${pkgs.headphones}/bin/headphones --datadir ${cfg.dataDir} --config ${cfg.configFile} --host ${cfg.host} --port ${toString cfg.port}";
+        };
+    };
+  };
+}
diff --git a/nixos/modules/services/misc/jackett.nix b/nixos/modules/services/misc/jackett.nix
index 8d1b3d225a4..b18ce2b1f81 100644
--- a/nixos/modules/services/misc/jackett.nix
+++ b/nixos/modules/services/misc/jackett.nix
@@ -4,11 +4,36 @@ with lib;
 
 let
   cfg = config.services.jackett;
+
 in
 {
   options = {
     services.jackett = {
       enable = mkEnableOption "Jackett";
+
+      dataDir = mkOption {
+        type = types.str;
+        default = "/var/lib/jackett/.config/Jackett";
+        description = "The directory where Jackett stores its data files.";
+      };
+
+      openFirewall = mkOption {
+        type = types.bool;
+        default = false;
+        description = "Open ports in the firewall for the Jackett web interface.";
+      };
+
+      user = mkOption {
+        type = types.str;
+        default = "jackett";
+        description = "User account under which Jackett runs.";
+      };
+
+      group = mkOption {
+        type = types.str;
+        default = "jackett";
+        description = "Group under which Jackett runs.";
+      };
     };
   };
 
@@ -18,30 +43,38 @@ in
       after = [ "network.target" ];
       wantedBy = [ "multi-user.target" ];
       preStart = ''
-        test -d /var/lib/jackett/ || {
-          echo "Creating jackett data directory in /var/lib/jackett/"
-          mkdir -p /var/lib/jackett/
+        test -d ${cfg.dataDir} || {
+          echo "Creating jackett data directory in ${cfg.dataDir}"
+          mkdir -p ${cfg.dataDir}
         }
-        chown -R jackett:jackett /var/lib/jackett/
-        chmod 0700 /var/lib/jackett/
+        chown -R ${cfg.user}:${cfg.group} ${cfg.dataDir}
+        chmod 0700 ${cfg.dataDir}
       '';
 
       serviceConfig = {
         Type = "simple";
-        User = "jackett";
-        Group = "jackett";
+        User = cfg.user;
+        Group = cfg.group;
         PermissionsStartOnly = "true";
-        ExecStart = "${pkgs.jackett}/bin/Jackett";
+        ExecStart = "${pkgs.jackett}/bin/Jackett --NoUpdates --DataFolder '${cfg.dataDir}'";
         Restart = "on-failure";
       };
     };
 
-    users.users.jackett = {
-      uid = config.ids.uids.jackett;
-      home = "/var/lib/jackett";
-      group = "jackett";
+    networking.firewall = mkIf cfg.openFirewall {
+      allowedTCPPorts = [ 9117 ];
+    };
+
+    users.users = mkIf (cfg.user == "jackett") {
+      jackett = {
+        group = cfg.group;
+        home = cfg.dataDir;
+        uid = config.ids.uids.jackett;
+      };
     };
-    users.groups.jackett.gid = config.ids.gids.jackett;
 
+    users.groups = mkIf (cfg.group == "jackett") {
+      jackett.gid = config.ids.gids.jackett;
+    };
   };
 }
diff --git a/nixos/modules/services/misc/radarr.nix b/nixos/modules/services/misc/radarr.nix
index 1a9fad3883c..9ab26d84832 100644
--- a/nixos/modules/services/misc/radarr.nix
+++ b/nixos/modules/services/misc/radarr.nix
@@ -4,11 +4,36 @@ with lib;
 
 let
   cfg = config.services.radarr;
+
 in
 {
   options = {
     services.radarr = {
       enable = mkEnableOption "Radarr";
+
+      dataDir = mkOption {
+        type = types.str;
+        default = "/var/lib/radarr/.config/Radarr";
+        description = "The directory where Radarr stores its data files.";
+      };
+
+      openFirewall = mkOption {
+        type = types.bool;
+        default = false;
+        description = "Open ports in the firewall for the Radarr web interface.";
+      };
+
+      user = mkOption {
+        type = types.str;
+        default = "radarr";
+        description = "User account under which Radarr runs.";
+      };
+
+      group = mkOption {
+        type = types.str;
+        default = "radarr";
+        description = "Group under which Radarr runs.";
+      };
     };
   };
 
@@ -18,30 +43,38 @@ in
       after = [ "network.target" ];
       wantedBy = [ "multi-user.target" ];
       preStart = ''
-        test -d /var/lib/radarr/ || {
-          echo "Creating radarr data directory in /var/lib/radarr/"
-          mkdir -p /var/lib/radarr/
+        test -d ${cfg.dataDir} || {
+          echo "Creating radarr data directory in ${cfg.dataDir}"
+          mkdir -p ${cfg.dataDir}
         }
-        chown -R radarr:radarr /var/lib/radarr/
-        chmod 0700 /var/lib/radarr/
+        chown -R ${cfg.user}:${cfg.group} ${cfg.dataDir}
+        chmod 0700 ${cfg.dataDir}
       '';
 
       serviceConfig = {
         Type = "simple";
-        User = "radarr";
-        Group = "radarr";
+        User = cfg.user;
+        Group = cfg.group;
         PermissionsStartOnly = "true";
-        ExecStart = "${pkgs.radarr}/bin/Radarr";
+        ExecStart = "${pkgs.radarr}/bin/Radarr -nobrowser -data='${cfg.dataDir}'";
         Restart = "on-failure";
       };
     };
 
-    users.users.radarr = {
-      uid = config.ids.uids.radarr;
-      home = "/var/lib/radarr";
-      group = "radarr";
+    networking.firewall = mkIf cfg.openFirewall {
+      allowedTCPPorts = [ 7878 ];
+    };
+
+    users.users = mkIf (cfg.user == "radarr") {
+      radarr = {
+        group = cfg.group;
+        home = cfg.dataDir;
+        uid = config.ids.uids.radarr;
+      };
     };
-    users.groups.radarr.gid = config.ids.gids.radarr;
 
+    users.groups = mkIf (cfg.group == "radarr") {
+      radarr.gid = config.ids.gids.radarr;
+    };
   };
 }
diff --git a/nixos/modules/services/misc/zoneminder.nix b/nixos/modules/services/misc/zoneminder.nix
index a40e9e84613..ae7de7850d9 100644
--- a/nixos/modules/services/misc/zoneminder.nix
+++ b/nixos/modules/services/misc/zoneminder.nix
@@ -205,15 +205,13 @@ in {
 
       mysql = lib.mkIf cfg.database.createLocally {
         ensureDatabases = [ cfg.database.name ];
-        ensureUsers = {
+        ensureUsers = [{
           name = cfg.database.username;
-          ensurePermissions = [
-            { "${cfg.database.name}.*" = "ALL PRIVILEGES"; }
-          ];
+          ensurePermissions = { "${cfg.database.name}.*" = "ALL PRIVILEGES"; };
           initialDatabases = [
             { inherit (cfg.database) name; schema = "${pkg}/share/zoneminder/db/zm_create.sql"; }
           ];
-        };
+        }];
       };
 
       nginx = lib.mkIf useNginx {
diff --git a/nixos/modules/services/network-filesystems/diod.nix b/nixos/modules/services/network-filesystems/diod.nix
index 556fad4d8ab..063bae6ddb1 100644
--- a/nixos/modules/services/network-filesystems/diod.nix
+++ b/nixos/modules/services/network-filesystems/diod.nix
@@ -153,7 +153,6 @@ in
       after = [ "network.target" ];
       serviceConfig = {
         ExecStart = "${pkgs.diod}/sbin/diod -f -c ${diodConfig}";
-        CapabilityBoundingSet = "cap_net_bind_service+=ep";
       };
     };
   };
diff --git a/nixos/modules/services/networking/flannel.nix b/nixos/modules/services/networking/flannel.nix
index b93e28e34ef..ec702cdc6ff 100644
--- a/nixos/modules/services/networking/flannel.nix
+++ b/nixos/modules/services/networking/flannel.nix
@@ -73,11 +73,35 @@ in {
       };
     };
 
+    kubeconfig = mkOption {
+      description = ''
+        Path to kubeconfig to use for storing flannel config using the
+        Kubernetes API
+      '';
+      type = types.nullOr types.path;
+      default = null;
+    };
+
     network = mkOption {
       description = " IPv4 network in CIDR format to use for the entire flannel network.";
       type = types.str;
     };
 
+    nodeName = mkOption {
+      description = ''
+        Needed when running with Kubernetes as backend as this cannot be auto-detected";
+      '';
+      type = types.nullOr types.str;
+      default = with config.networking; (hostName + optionalString (!isNull domain) ".${domain}");
+      example = "node1.example.com";
+    };
+
+    storageBackend = mkOption {
+      description = "Determines where flannel stores its configuration at runtime";
+      type = types.enum ["etcd" "kubernetes"];
+      default = "etcd";
+    };
+
     subnetLen = mkOption {
       description = ''
         The size of the subnet allocated to each host. Defaults to 24 (i.e. /24)
@@ -122,17 +146,25 @@ in {
       after = [ "network.target" ];
       environment = {
         FLANNELD_PUBLIC_IP = cfg.publicIp;
+        FLANNELD_IFACE = cfg.iface;
+      } // optionalAttrs (cfg.storageBackend == "etcd") {
         FLANNELD_ETCD_ENDPOINTS = concatStringsSep "," cfg.etcd.endpoints;
         FLANNELD_ETCD_KEYFILE = cfg.etcd.keyFile;
         FLANNELD_ETCD_CERTFILE = cfg.etcd.certFile;
         FLANNELD_ETCD_CAFILE = cfg.etcd.caFile;
-        FLANNELD_IFACE = cfg.iface;
         ETCDCTL_CERT_FILE = cfg.etcd.certFile;
         ETCDCTL_KEY_FILE = cfg.etcd.keyFile;
         ETCDCTL_CA_FILE = cfg.etcd.caFile;
         ETCDCTL_PEERS = concatStringsSep "," cfg.etcd.endpoints;
+      } // optionalAttrs (cfg.storageBackend == "kubernetes") {
+        FLANNELD_KUBE_SUBNET_MGR = "true";
+        FLANNELD_KUBECONFIG_FILE = cfg.kubeconfig;
+        NODE_NAME = cfg.nodeName;
       };
       preStart = ''
+        mkdir -p /run/flannel
+        touch /run/flannel/docker
+      '' + optionalString (cfg.storageBackend == "etcd") ''
         echo "setting network configuration"
         until ${pkgs.etcdctl.bin}/bin/etcdctl set /coreos.com/network/config '${builtins.toJSON networkConfig}'
         do
@@ -140,15 +172,19 @@ in {
           sleep 1
         done
       '';
-      postStart = ''
-        while [ ! -f /run/flannel/subnet.env ]
-        do
-          sleep 1
-        done
-      '';
-      serviceConfig.ExecStart = "${cfg.package}/bin/flannel";
+      serviceConfig = {
+        ExecStart = "${cfg.package}/bin/flannel";
+        Restart = "always";
+        RestartSec = "10s";
+      };
     };
 
-    services.etcd.enable = mkDefault (cfg.etcd.endpoints == ["http://127.0.0.1:2379"]);
+    services.etcd.enable = mkDefault (cfg.storageBackend == "etcd" && cfg.etcd.endpoints == ["http://127.0.0.1:2379"]);
+
+    # for some reason, flannel doesn't let you configure this path
+    # see: https://github.com/coreos/flannel/blob/master/Documentation/configuration.md#configuration
+    environment.etc."kube-flannel/net-conf.json" = mkIf (cfg.storageBackend == "kubernetes") {
+      source = pkgs.writeText "net-conf.json" (builtins.toJSON networkConfig);
+    };
   };
 }
diff --git a/nixos/modules/services/networking/quassel.nix b/nixos/modules/services/networking/quassel.nix
index d850bb8b130..b223a48e055 100644
--- a/nixos/modules/services/networking/quassel.nix
+++ b/nixos/modules/services/networking/quassel.nix
@@ -23,6 +23,22 @@ in
         '';
       };
 
+      certificateFile = mkOption {
+        type = types.nullOr types.str;
+        default = null;
+        description = ''
+          Path to the certificate used for SSL connections with clients.
+        '';
+      };
+
+      requireSSL = mkOption {
+        type = types.bool;
+        default = false;
+        description = ''
+          Require SSL for connections from clients.
+        '';
+      };
+
       package = mkOption {
         type = types.package;
         default = pkgs.quasselDaemon;
@@ -71,6 +87,10 @@ in
   ###### implementation
 
   config = mkIf cfg.enable {
+    assertions = [
+      { assertion = cfg.requireSSL -> cfg.certificateFile != null;
+        message = "Quassel needs a certificate file in order to require SSL";
+      }];
 
     users.users = mkIf (cfg.user == null) [
       { name = "quassel";
@@ -98,7 +118,13 @@ in
 
         serviceConfig =
         {
-          ExecStart = "${quassel}/bin/quasselcore --listen=${concatStringsSep '','' cfg.interfaces} --port=${toString cfg.portNumber} --configdir=${cfg.dataDir}";
+          ExecStart = concatStringsSep " " ([
+            "${quassel}/bin/quasselcore"
+            "--listen=${concatStringsSep "," cfg.interfaces}"
+            "--port=${toString cfg.portNumber}"
+            "--configdir=${cfg.dataDir}"
+          ] ++ optional cfg.requireSSL "--require-ssl"
+            ++ optional (cfg.certificateFile != null) "--ssl-cert=${cfg.certificateFile}");
           User = user;
           PermissionsStartOnly = true;
         };
diff --git a/nixos/modules/services/networking/wpa_supplicant.nix b/nixos/modules/services/networking/wpa_supplicant.nix
index 8622212f085..cdfe98aa034 100644
--- a/nixos/modules/services/networking/wpa_supplicant.nix
+++ b/nixos/modules/services/networking/wpa_supplicant.nix
@@ -86,7 +86,12 @@ in {
               '';
               description = ''
                 Use this option to configure advanced authentication methods like EAP.
-                See wpa_supplicant.conf(5) for example configurations.
+                See
+                <citerefentry>
+                  <refentrytitle>wpa_supplicant.conf</refentrytitle>
+                  <manvolnum>5</manvolnum>
+                </citerefentry>
+                for example configurations.
 
                 Mutually exclusive with <varname>psk</varname> and <varname>pskRaw</varname>.
               '';
@@ -122,7 +127,12 @@ in {
               '';
               description = ''
                 Extra configuration lines appended to the network block.
-                See wpa_supplicant.conf(5) for available options.
+                See
+                <citerefentry>
+                  <refentrytitle>wpa_supplicant.conf</refentrytitle>
+                  <manvolnum>5</manvolnum>
+                </citerefentry>
+                for available options.
               '';
             };
 
@@ -174,7 +184,12 @@ in {
         '';
         description = ''
           Extra lines appended to the configuration file.
-          See wpa_supplicant.conf(5) for available options.
+          See
+          <citerefentry>
+            <refentrytitle>wpa_supplicant.conf</refentrytitle>
+            <manvolnum>5</manvolnum>
+          </citerefentry>
+          for available options.
         '';
       };
     };
diff --git a/nixos/modules/services/printing/cupsd.nix b/nixos/modules/services/printing/cupsd.nix
index 1031d6f3d7e..3a43ebbb889 100644
--- a/nixos/modules/services/printing/cupsd.nix
+++ b/nixos/modules/services/printing/cupsd.nix
@@ -316,6 +316,10 @@ in
             mkdir -m 0755 -p ${cfg.tempDir}
 
             mkdir -m 0755 -p /var/lib/cups
+            # While cups will automatically create self-signed certificates if accessed via TLS,
+            # this directory to store the certificates needs to be created manually.
+            mkdir -m 0700 -p /var/lib/cups/ssl
+
             # Backwards compatibility
             if [ ! -L /etc/cups ]; then
               mv /etc/cups/* /var/lib/cups
diff --git a/nixos/modules/services/web-apps/icingaweb2/icingaweb2.nix b/nixos/modules/services/web-apps/icingaweb2/icingaweb2.nix
new file mode 100644
index 00000000000..ccaa2cff1c2
--- /dev/null
+++ b/nixos/modules/services/web-apps/icingaweb2/icingaweb2.nix
@@ -0,0 +1,626 @@
+{ config, lib, pkgs, ... }: with lib; let
+  cfg = config.services.icingaweb2;
+  poolName = "icingaweb2";
+  phpfpmSocketName = "/var/run/phpfpm/${poolName}.sock";
+
+  formatBool = b: if b then "1" else "0";
+
+  configIni = let
+    config = cfg.generalConfig;
+  in ''
+    [global]
+    show_stacktraces = "${formatBool config.showStacktraces}"
+    show_application_state_messages = "${formatBool config.showApplicationStateMessages}"
+    module_path = "${pkgs.icingaweb2}/modules${optionalString (builtins.length config.modulePath > 0) ":${concatStringsSep ":" config.modulePath}"}"
+    config_backend = "${config.configBackend}"
+    ${optionalString (config.configBackend == "db") ''config_resource = "${config.configResource}"''}
+
+    [logging]
+    log = "${config.log}"
+    ${optionalString (config.log != "none") ''level = "${config.logLevel}"''}
+    ${optionalString (config.log == "php" || config.log == "syslog") ''application = "${config.logApplication}"''}
+    ${optionalString (config.log == "syslog") ''facility = "${config.logFacility}"''}
+    ${optionalString (config.log == "file") ''file = "${config.logFile}"''}
+
+    [themes]
+    default = "${config.themeDefault}"
+    disabled = "${formatBool config.themeDisabled}"
+
+    [authentication]
+    ${optionalString (config.authDefaultDomain != null) ''default_domain = "${config.authDefaultDomain}"''}
+  '';
+
+  resourcesIni = concatStringsSep "\n" (mapAttrsToList (name: config: ''
+    [${name}]
+    type = "${config.type}"
+    ${optionalString (config.type == "db") ''
+      db = "${config.db}"
+      host = "${config.host}"
+      ${optionalString (config.port != null) ''port = "${toString config.port}"''}
+      username = "${config.username}"
+      password = "${config.password}"
+      dbname = "${config.dbname}"
+      ${optionalString (config.charset != null) ''charset = "${config.charset}"''}
+      use_ssl = "${formatBool config.useSSL}"
+      ${optionalString (config.sslCert != null) ''ssl_cert = "${config.sslCert}"''}
+      ${optionalString (config.sslKey != null) ''ssl_cert = "${config.sslKey}"''}
+      ${optionalString (config.sslCA != null) ''ssl_cert = "${config.sslCA}"''}
+      ${optionalString (config.sslCApath != null) ''ssl_cert = "${config.sslCApath}"''}
+      ${optionalString (config.sslCipher != null) ''ssl_cert = "${config.sslCipher}"''}
+    ''}
+    ${optionalString (config.type == "ldap") ''
+      hostname = "${config.host}"
+      ${optionalString (config.port != null) ''port = "${toString config.port}"''}
+      root_dn = "${config.rootDN}"
+      bind_dn = "${config.username}"
+      bind_pw = "${config.password}"
+      encryption = "${config.ldapEncryption}"
+      timeout = "${toString config.ldapTimeout}"
+    ''}
+    ${optionalString (config.type == "ssh") ''
+      user = "${config.username}"
+      private_key = "${config.sshPrivateKey}"
+    ''}
+
+  '') cfg.resources);
+
+  authenticationIni = concatStringsSep "\n" (mapAttrsToList (name: config: ''
+    [${name}]
+    backend = "${config.backend}"
+    ${optionalString (config.domain != null) ''domain = "${config.domain}"''}
+    ${optionalString (config.backend == "external" && config.externalStripRegex != null) ''strip_username_regexp = "${config.externalStripRegex}"''}
+    ${optionalString (config.backend != "external") ''resource = "${config.resource}"''}
+    ${optionalString (config.backend == "ldap" || config.backend == "msldap") ''
+      ${optionalString (config.ldapUserClass != null) ''user_class = "${config.ldapUserClass}"''}
+      ${optionalString (config.ldapUserNameAttr != null) ''user_name_attribute = "${config.ldapUserNameAttr}"''}
+      ${optionalString (config.ldapFilter != null) ''filter = "${config.ldapFilter}"''}
+    ''}
+  '') cfg.authentications);
+
+  groupsIni = concatStringsSep "\n" (mapAttrsToList (name: config: ''
+    [${name}]
+    backend = "${config.backend}"
+    resource = "${config.resource}"
+    ${optionalString (config.backend != "db") ''
+      ${optionalString (config.ldapUserClass != null) ''user_class = "${config.ldapUserClass}"''}
+      ${optionalString (config.ldapUserNameAttr != null) ''user_name_attribute = "${config.ldapUserNameAttr}"''}
+      ${optionalString (config.ldapGroupClass != null) ''group_class = "${config.ldapGroupClass}"''}
+      ${optionalString (config.ldapGroupNameAttr != null) ''group_name_attribute = "${config.ldapGroupNameAttr}"''}
+      ${optionalString (config.ldapGroupFilter != null) ''group_filter = "${config.ldapGroupFilter}"''}
+    ''}
+    ${optionalString (config.backend == "msldap" && config.ldapNestedSearch) ''nested_group_search = "1"''}
+  '') cfg.groupBackends);
+
+  rolesIni = let
+    optionalList = var: attribute: optionalString (builtins.length var > 0) ''${attribute} = "${concatStringsSep "," var}"'';
+  in concatStringsSep "\n" (mapAttrsToList (name: config: ''
+    [${name}]
+    ${optionalList config.users "users"}
+    ${optionalList config.groups "groups"}
+    ${optionalList config.permissions "permissions"}
+    ${optionalList config.permissions "permissions"}
+    ${concatStringsSep "\n" (mapAttrsToList (key: value: optionalList value key) config.extraAssignments)}
+  '') cfg.roles);
+
+in {
+  options.services.icingaweb2 = with types; {
+    enable = mkEnableOption "the icingaweb2 web interface";
+
+    pool = mkOption {
+      type = str;
+      default = "${poolName}";
+      description = ''
+         Name of existing PHP-FPM pool that is used to run Icingaweb2.
+         If not specified, a pool will automatically created with default values.
+      '';
+    };
+
+    virtualHost = mkOption {
+      type = nullOr str;
+      default = "icingaweb2";
+      description = ''
+        Name of the nginx virtualhost to use and setup. If null, no virtualhost is set up.
+      '';
+    };
+
+    timezone = mkOption {
+      type = str;
+      default = "UTC";
+      example = "Europe/Berlin";
+      description = "PHP-compliant timezone specification";
+    };
+
+    modules = {
+      doc.enable = mkEnableOption "the icingaweb2 doc module";
+      migrate.enable = mkEnableOption "the icingaweb2 migrate module";
+      setup.enable = mkEnableOption "the icingaweb2 setup module";
+      test.enable = mkEnableOption "the icingaweb2 test module";
+      translation.enable = mkEnableOption "the icingaweb2 translation module";
+    };
+
+    modulePackages = mkOption {
+      type = attrsOf package;
+      default = {};
+      example = literalExample ''
+        {
+          "snow" = pkgs.icingaweb2Modules.theme-snow;
+        }
+      '';
+      description = ''
+        Name-package attrset of Icingaweb 2 modules packages to enable.
+
+        If you enable modules manually (e.g. via the web ui), they will not be touched.
+      '';
+    };
+
+    generalConfig = {
+      mutable = mkOption {
+        type = bool;
+        default = false;
+        description = ''
+          Make config.ini mutable (e.g. via the web interface).
+          Not that you need to update module_path manually.
+        '';
+      };
+
+      showStacktraces = mkOption {
+        type = bool;
+        default = true;
+        description = "Enable stack traces in the Web UI";
+      };
+
+      showApplicationStateMessages = mkOption {
+        type = bool;
+        default = true;
+        description = "Enable application state messages in the Web UI";
+      };
+
+      modulePath = mkOption {
+        type = listOf str;
+        default = [];
+        description = "List of additional module search paths";
+      };
+
+      configBackend = mkOption {
+        type = enum [ "ini" "db" "none" ];
+        default = "db";
+        description = "Where to store user preferences";
+      };
+
+      configResource = mkOption {
+        type = nullOr str;
+        default = null;
+        description = "Database resource where user preferences are stored (if they are stored in a database)";
+      };
+
+      log = mkOption {
+        type = enum [ "syslog" "php" "file" "none" ];
+        default = "syslog";
+        description = "Logging target";
+      };
+
+      logLevel = mkOption {
+        type = enum [ "ERROR" "WARNING" "INFO" "DEBUG" ];
+        default = "ERROR";
+        description = "Maximum logging level to emit";
+      };
+
+      logApplication = mkOption {
+        type = str;
+        default = "icingaweb2";
+        description = "Application name to log under (syslog and php log)";
+      };
+
+      logFacility = mkOption {
+        type = enum [ "user" "local0" "local1" "local2" "local3" "local4" "local5" "local6" "local7" ];
+        default = "user";
+        description = "Syslog facility to log to";
+      };
+
+      logFile = mkOption {
+        type = str;
+        default = "/var/log/icingaweb2/icingaweb2.log";
+        description = "File to log to";
+      };
+
+      themeDefault = mkOption {
+        type = str;
+        default = "Icinga";
+        description = "Name of the default theme";
+      };
+
+      themeDisabled = mkOption {
+        type = bool;
+        default = false;
+        description = "Disallow users to change the theme";
+      };
+
+      authDefaultDomain = mkOption {
+        type = nullOr str;
+        default = null;
+        description = "Domain for users logging in without a qualified domain";
+      };
+    };
+
+    mutableResources = mkOption {
+      type = bool;
+      default = false;
+      description = "Make resources.ini mutable (e.g. via the web interface)";
+    };
+
+    resources = mkOption {
+      default = {};
+      description = "Icingaweb 2 resources to define";
+      type = attrsOf (submodule ({ name, ... }: {
+        options = {
+          name = mkOption {
+            visible = false;
+            default = name;
+            type = str;
+            description = "Name of this resource";
+          };
+
+          type = mkOption {
+            type = enum [ "db" "ldap" "ssh" ];
+            default = "db";
+            description = "Type of this resouce";
+          };
+
+          db = mkOption {
+            type = enum [ "mysql" "pgsql" ];
+            default = "mysql";
+            description = "Type of this database resource";
+          };
+
+          host = mkOption {
+            type = str;
+            description = "Host to connect to";
+          };
+
+          port = mkOption {
+            type = nullOr port;
+            default = null;
+            description = "Port to connect on";
+          };
+
+          username = mkOption {
+            type = str;
+            description = "Database or SSH user or LDAP bind DN to connect with";
+          };
+
+          password = mkOption {
+            type = str;
+            description = "Password for the database user or LDAP bind DN";
+          };
+
+          dbname = mkOption {
+            type = str;
+            description = "Name of the database to connect to";
+          };
+
+          charset = mkOption {
+            type = nullOr str;
+            default = null;
+            example = "utf8";
+            description = "Database character set to connect with";
+          };
+
+          useSSL = mkOption {
+            type = nullOr bool;
+            default = false;
+            description = "Whether to connect to the database using SSL";
+          };
+
+          sslCert = mkOption {
+            type = nullOr str;
+            default = null;
+            description = "The file path to the SSL certificate. Only available for the mysql database.";
+          };
+
+          sslKey = mkOption {
+            type = nullOr str;
+            default = null;
+            description = "The file path to the SSL key. Only available for the mysql database.";
+          };
+
+          sslCA = mkOption {
+            type = nullOr str;
+            default = null;
+            description = "The file path to the SSL certificate authority. Only available for the mysql database.";
+          };
+
+          sslCApath = mkOption {
+            type = nullOr str;
+            default = null;
+            description = "The file path to the directory that contains the trusted SSL CA certificates in PEM format. Only available for the mysql database.";
+          };
+
+          sslCipher = mkOption {
+            type = nullOr str;
+            default = null;
+            description = "A list of one or more permissible ciphers to use for SSL encryption, in a format understood by OpenSSL. Only available for the mysql database.";
+          };
+
+          rootDN = mkOption {
+            type = str;
+            description = "Root object of the LDAP tree";
+          };
+
+          ldapEncryption = mkOption {
+            type = enum [ "none" "starttls" "ldaps" ];
+            default = "none";
+            description = "LDAP encryption to use";
+          };
+
+          ldapTimeout = mkOption {
+            type = ints.positive;
+            default = 5;
+            description = "Connection timeout for every LDAP connection";
+          };
+
+          sshPrivateKey = mkOption {
+            type = str;
+            description = "The path to the private key of the user";
+          };
+        };
+      }));
+    };
+
+    mutableAuthConfig = mkOption {
+      type = bool;
+      default = true;
+      description = "Make authentication.ini mutable (e.g. via the web interface)";
+    };
+
+    authentications = mkOption {
+      default = {};
+      description = "Icingaweb 2 authentications to define";
+      type = attrsOf (submodule ({ name, ... }: {
+        options = {
+          name = mkOption {
+            visible = false;
+            default = name;
+            type = str;
+            description = "Name of this authentication";
+          };
+
+          backend = mkOption {
+            type = enum [ "external" "ldap" "msldap" "db" ];
+            default = "db";
+            description = "The type of this authentication backend";
+          };
+
+          domain = mkOption {
+            type = nullOr str;
+            default = null;
+            description = "Domain for domain-aware authentication";
+          };
+
+          externalStripRegex = mkOption {
+            type = nullOr str;
+            default = null;
+            description = "Regular expression to strip off specific user name parts";
+          };
+
+          resource = mkOption {
+            type = str;
+            description = "Name of the database/LDAP resource";
+          };
+
+          ldapUserClass = mkOption {
+            type = nullOr str;
+            default = null;
+            description = "LDAP user class";
+          };
+
+          ldapUserNameAttr = mkOption {
+            type = nullOr str;
+            default = null;
+            description = "LDAP attribute which contains the username";
+          };
+
+          ldapFilter = mkOption {
+            type = nullOr str;
+            default = null;
+            description = "LDAP search filter";
+          };
+        };
+      }));
+    };
+
+    mutableGroupsConfig = mkOption {
+      type = bool;
+      default = true;
+      description = "Make groups.ini mutable (e.g. via the web interface)";
+    };
+
+    groupBackends = mkOption {
+      default = {};
+      description = "Icingaweb 2 group backends to define";
+      type = attrsOf (submodule ({ name, ... }: {
+        options = {
+          name = mkOption {
+            visible = false;
+            default = name;
+            type = str;
+            description = "Name of this group backend";
+          };
+
+          backend = mkOption {
+            type = enum [ "ldap" "msldap" "db" ];
+            default = "db";
+            description = "The type of this group backend";
+          };
+
+          resource = mkOption {
+            type = str;
+            description = "Name of the database/LDAP resource";
+          };
+
+          ldapUserClass = mkOption {
+            type = nullOr str;
+            default = null;
+            description = "LDAP user class";
+          };
+
+          ldapUserNameAttr = mkOption {
+            type = nullOr str;
+            default = null;
+            description = "LDAP attribute which contains the username";
+          };
+
+          ldapGroupClass = mkOption {
+            type = nullOr str;
+            default = null;
+            description = "LDAP group class";
+          };
+
+          ldapGroupNameAttr = mkOption {
+            type = nullOr str;
+            default = null;
+            description = "LDAP attribute which contains the groupname";
+          };
+
+          ldapGroupFilter = mkOption {
+            type = nullOr str;
+            default = null;
+            description = "LDAP group search filter";
+          };
+
+          ldapNestedSearch = mkOption {
+            type = bool;
+            default = false;
+            description = "Enable nested group search in Active Directory based on the user";
+          };
+        };
+      }));
+    };
+
+    mutableRolesConfig = mkOption {
+      type = bool;
+      default = true;
+      description = "Make roles.ini mutable (e.g. via the web interface)";
+    };
+
+    roles = mkOption {
+      default = {};
+      description = "Icingaweb 2 roles to define";
+      type = attrsOf (submodule ({ name, ... }: {
+        options = {
+          name = mkOption {
+            visible = false;
+            default = name;
+            type = str;
+            description = "Name of this role";
+          };
+
+          users = mkOption {
+            type = listOf str;
+            default = [];
+            description = "List of users that are assigned to the role";
+          };
+
+          groups = mkOption {
+            type = listOf str;
+            default = [];
+            description = "List of groups that are assigned to the role";
+          };
+
+          permissions = mkOption {
+            type = listOf str;
+            default = [];
+            example = [ "application/share/navigation" "config/*" ];
+            description = "The permissions to grant";
+          };
+
+          extraAssignments = mkOption {
+            type = attrsOf (listOf str);
+            default = {};
+            example = { "monitoring/blacklist/properties" = [ "sla" "customer"]; };
+            description = "Additional assignments of this role";
+          };
+        };
+      }));
+    };
+  };
+
+  config = mkIf cfg.enable {
+    services.phpfpm.poolConfigs = mkIf (cfg.pool == "${poolName}") {
+      "${poolName}" = ''
+        listen = "${phpfpmSocketName}"
+        listen.owner = nginx
+        listen.group = nginx
+        listen.mode = 0600
+        user = icingaweb2
+        pm = dynamic
+        pm.max_children = 75
+        pm.start_servers = 2
+        pm.min_spare_servers = 2
+        pm.max_spare_servers = 10
+      '';
+    };
+
+    services.phpfpm.phpOptions = mkIf (cfg.pool == "${poolName}")
+      ''
+        extension = ${pkgs.phpPackages.imagick}/lib/php/extensions/imagick.so
+        date.timezone = "${cfg.timezone}"
+      '';
+
+    systemd.services."phpfpm-${poolName}".serviceConfig.ReadWritePaths = [ "/etc/icingaweb2" ];
+
+    services.nginx = {
+      enable = true;
+      virtualHosts = mkIf (cfg.virtualHost != null) {
+        "${cfg.virtualHost}" = {
+          root = "${pkgs.icingaweb2}/public";
+
+          extraConfig = ''
+            index index.php;
+            try_files $1 $uri $uri/ /index.php$is_args$args;
+          '';
+
+          locations."~ ..*/.*.php$".extraConfig = ''
+            return 403;
+          '';
+
+          locations."~ ^/index.php(.*)$".extraConfig = ''
+            fastcgi_intercept_errors on;
+            fastcgi_index index.php;
+            include ${config.services.nginx.package}/conf/fastcgi.conf;
+            try_files $uri =404;
+            fastcgi_split_path_info ^(.+\.php)(/.+)$;
+            fastcgi_pass unix:${phpfpmSocketName};
+            fastcgi_param SCRIPT_FILENAME ${pkgs.icingaweb2}/public/index.php;
+          '';
+        };
+      };
+    };
+
+    # /etc/icingaweb2
+    environment.etc = let
+      doModule = name: optionalAttrs (cfg.modules."${name}".enable) (nameValuePair "icingaweb2/enabledModules/${name}" { source = "${pkgs.icingaweb2}/modules/${name}"; });
+    in {}
+      # Module packages
+      // (mapAttrs' (k: v: nameValuePair "icingaweb2/enabledModules/${k}" { source = v; }) cfg.modulePackages)
+      # Built-in modules
+      // doModule "doc"
+      // doModule "migrate"
+      // doModule "setup"
+      // doModule "test"
+      // doModule "translation"
+      # Configs
+      // optionalAttrs (!cfg.generalConfig.mutable) { "icingaweb2/config.ini".text = configIni; }
+      // optionalAttrs (!cfg.mutableResources) { "icingaweb2/resources.ini".text = resourcesIni; }
+      // optionalAttrs (!cfg.mutableAuthConfig) { "icingaweb2/authentication.ini".text = authenticationIni; }
+      // optionalAttrs (!cfg.mutableGroupsConfig) { "icingaweb2/groups.ini".text = groupsIni; }
+      // optionalAttrs (!cfg.mutableRolesConfig) { "icingaweb2/roles.ini".text = rolesIni; };
+
+    # User and group
+    users.groups.icingaweb2 = {};
+    users.users.icingaweb2 = {
+      description = "Icingaweb2 service user";
+      group = "icingaweb2";
+      isSystemUser = true;
+    };
+  };
+}
diff --git a/nixos/modules/services/web-apps/icingaweb2/module-monitoring.nix b/nixos/modules/services/web-apps/icingaweb2/module-monitoring.nix
new file mode 100644
index 00000000000..167e5e38956
--- /dev/null
+++ b/nixos/modules/services/web-apps/icingaweb2/module-monitoring.nix
@@ -0,0 +1,157 @@
+{ config, lib, pkgs, ... }: with lib; let
+  cfg = config.services.icingaweb2.modules.monitoring;
+
+  configIni = ''
+    [security]
+    protected_customvars = "${concatStringsSep "," cfg.generalConfig.protectedVars}"
+  '';
+
+  backendsIni = let
+    formatBool = b: if b then "1" else "0";
+  in concatStringsSep "\n" (mapAttrsToList (name: config: ''
+    [${name}]
+    type = "ido"
+    resource = "${config.resource}"
+    disabled = "${formatBool config.disabled}"
+  '') cfg.backends);
+
+  transportsIni = concatStringsSep "\n" (mapAttrsToList (name: config: ''
+    [${name}]
+    type = "${config.type}"
+    ${optionalString (config.instance != null) ''instance = "${config.instance}"''}
+    ${optionalString (config.type == "local" || config.type == "remote") ''path = "${config.path}"''}
+    ${optionalString (config.type != "local") ''
+      host = "${config.host}"
+      ${optionalString (config.port != null) ''port = "${toString config.port}"''}
+      user${optionalString (config.type == "api") "name"} = "${config.username}"
+    ''}
+    ${optionalString (config.type == "api") ''password = "${config.password}"''}
+    ${optionalString (config.type == "remote") ''resource = "${config.resource}"''}
+  '') cfg.transports);
+
+in {
+  options.services.icingaweb2.modules.monitoring = with types; {
+    enable = mkOption {
+      type = bool;
+      default = true;
+      description = "Whether to enable the icingaweb2 monitoring module.";
+    };
+
+    generalConfig = {
+      mutable = mkOption {
+        type = bool;
+        default = false;
+        description = "Make config.ini of the monitoring module mutable (e.g. via the web interface).";
+      };
+
+      protectedVars = mkOption {
+        type = listOf str;
+        default = [ "*pw*" "*pass*" "community" ];
+        description = "List of string patterns for custom variables which should be excluded from user’s view.";
+      };
+    };
+
+    mutableBackends = mkOption {
+      type = bool;
+      default = false;
+      description = "Make backends.ini of the monitoring module mutable (e.g. via the web interface).";
+    };
+
+    backends = mkOption {
+      default = { "icinga" = { resource = "icinga_ido"; }; };
+      description = "Monitoring backends to define";
+      type = attrsOf (submodule ({ name, ... }: {
+        options = {
+          name = mkOption {
+            visible = false;
+            default = name;
+            type = str;
+            description = "Name of this backend";
+          };
+
+          resource = mkOption {
+            type = str;
+            description = "Name of the IDO resource";
+          };
+
+          disabled = mkOption {
+            type = bool;
+            default = false;
+            description = "Disable this backend";
+          };
+        };
+      }));
+    };
+
+    mutableTransports = mkOption {
+      type = bool;
+      default = true;
+      description = "Make commandtransports.ini of the monitoring module mutable (e.g. via the web interface).";
+    };
+
+    transports = mkOption {
+      default = {};
+      description = "Command transports to define";
+      type = attrsOf (submodule ({ name, ... }: {
+        options = {
+          name = mkOption {
+            visible = false;
+            default = name;
+            type = str;
+            description = "Name of this transport";
+          };
+
+          type = mkOption {
+            type = enum [ "api" "local" "remote" ];
+            default = "api";
+            description = "Type of  this transport";
+          };
+
+          instance = mkOption {
+            type = nullOr str;
+            default = null;
+            description = "Assign a icinga instance to this transport";
+          };
+
+          path = mkOption {
+            type = str;
+            description = "Path to the socket for local or remote transports";
+          };
+
+          host = mkOption {
+            type = str;
+            description = "Host for the api or remote transport";
+          };
+
+          port = mkOption {
+            type = nullOr str;
+            default = null;
+            description = "Port to connect to for the api or remote transport";
+          };
+
+          username = mkOption {
+            type = str;
+            description = "Username for the api or remote transport";
+          };
+
+          password = mkOption {
+            type = str;
+            description = "Password for the api transport";
+          };
+
+          resource = mkOption {
+            type = str;
+            description = "SSH identity resource for the remote transport";
+          };
+        };
+      }));
+    };
+  };
+
+  config = mkIf (config.services.icingaweb2.enable && cfg.enable) {
+    environment.etc = { "icingaweb2/enabledModules/monitoring" = { source = "${pkgs.icingaweb2}/modules/monitoring"; }; }
+      // optionalAttrs (!cfg.generalConfig.mutable) { "icingaweb2/modules/monitoring/config.ini".text = configIni; }
+      // optionalAttrs (!cfg.mutableBackends) { "icingaweb2/modules/monitoring/backends.ini".text = backendsIni; }
+      // optionalAttrs (!cfg.mutableTransports) { "icingaweb2/modules/monitoring/commandtransports.ini".text = transportsIni; };
+  };
+}
diff --git a/nixos/modules/services/web-apps/matomo-doc.xml b/nixos/modules/services/web-apps/matomo-doc.xml
index 510a335edc3..20d2de9f418 100644
--- a/nixos/modules/services/web-apps/matomo-doc.xml
+++ b/nixos/modules/services/web-apps/matomo-doc.xml
@@ -12,15 +12,15 @@
   An automatic setup is not suported by Matomo, so you need to configure Matomo
   itself in the browser-based Matomo setup.
  </para>
+
  <section xml:id="module-services-matomo-database-setup">
   <title>Database Setup</title>
-
   <para>
    You also need to configure a MariaDB or MySQL database and -user for Matomo
    yourself, and enter those credentials in your browser. You can use
    passwordless database authentication via the UNIX_SOCKET authentication
    plugin with the following SQL commands:
-<programlisting>
+   <programlisting>
         # For MariaDB
         INSTALL PLUGIN unix_socket SONAME 'auth_socket';
         CREATE DATABASE matomo;
@@ -32,7 +32,7 @@
         CREATE DATABASE matomo;
         CREATE USER 'matomo'@'localhost' IDENTIFIED WITH auth_socket;
         GRANT ALL PRIVILEGES ON matomo.* TO 'matomo'@'localhost';
-      </programlisting>
+   </programlisting>
    Then fill in <literal>matomo</literal> as database user and database name,
    and leave the password field blank. This authentication works by allowing
    only the <literal>matomo</literal> unix user to authenticate as the
@@ -46,9 +46,30 @@
    database is not on the same host.
   </para>
  </section>
+
+ <section xml:id="module-services-matomo-archive-processing">
+  <title>Archive Processing</title>
+  <para>
+   This module comes with the systemd service <literal>matomo-archive-processing.service</literal>
+   and a timer that automatically triggers archive processing every hour.
+   This means that you can safely
+   <link xlink:href="https://matomo.org/docs/setup-auto-archiving/#disable-browser-triggers-for-matomo-archiving-and-limit-matomo-reports-to-updating-every-hour">
+    disable browser triggers for Matomo archiving
+   </link> at <literal>Administration > System > General Settings</literal>.
+  </para>
+  <para>
+   With automatic archive processing, you can now also enable to
+   <link xlink:href="https://matomo.org/docs/privacy/#step-2-delete-old-visitors-logs">
+    delete old visitor logs
+   </link> at <literal>Administration > System > Privacy</literal>,
+   but make sure that you run <literal>systemctl start matomo-archive-processing.service</literal>
+   at least once without errors if you have already collected data before,
+   so that the reports get archived before the source data gets deleted.
+  </para>
+ </section>
+
  <section xml:id="module-services-matomo-backups">
   <title>Backup</title>
-
   <para>
    You only need to take backups of your MySQL database and the
    <filename>/var/lib/matomo/config/config.ini.php</filename> file. Use a user
@@ -57,9 +78,9 @@
    <link xlink:href="https://matomo.org/faq/how-to-install/faq_138/" />.
   </para>
  </section>
+
  <section xml:id="module-services-matomo-issues">
   <title>Issues</title>
-
   <itemizedlist>
    <listitem>
     <para>
@@ -76,6 +97,7 @@
    </listitem>
   </itemizedlist>
  </section>
+
  <section xml:id="module-services-matomo-other-web-servers">
   <title>Using other Web Servers than nginx</title>
 
diff --git a/nixos/modules/services/web-apps/matomo.nix b/nixos/modules/services/web-apps/matomo.nix
index 9fddf832074..14aca45a342 100644
--- a/nixos/modules/services/web-apps/matomo.nix
+++ b/nixos/modules/services/web-apps/matomo.nix
@@ -23,20 +23,24 @@ in {
   options = {
     services.matomo = {
       # NixOS PR for database setup: https://github.com/NixOS/nixpkgs/pull/6963
-      # matomo issue for automatic matomo setup: https://github.com/matomo-org/matomo/issues/10257
-      # TODO: find a nice way to do this when more NixOS MySQL and / or matomo automatic setup stuff is implemented.
+      # Matomo issue for automatic Matomo setup: https://github.com/matomo-org/matomo/issues/10257
+      # TODO: find a nice way to do this when more NixOS MySQL and / or Matomo automatic setup stuff is implemented.
       enable = mkOption {
         type = types.bool;
         default = false;
         description = ''
-          Enable matomo web analytics with php-fpm backend.
+          Enable Matomo web analytics with php-fpm backend.
           Either the nginx option or the webServerUser option is mandatory.
         '';
       };
 
       package = mkOption {
         type = types.package;
-        description = "Matomo package to use";
+        description = ''
+          Matomo package for the service to use.
+          This can be used to point to newer releases from nixos-unstable,
+          as they don't get backported if they are not security-relevant.
+        '';
         default = pkgs.matomo;
         defaultText = "pkgs.matomo";
       };
@@ -45,12 +49,25 @@ in {
         type = types.nullOr types.str;
         default = null;
         example = "lighttpd";
-        # TODO: piwik.php might get renamed to matomo.php in future releases
         description = ''
-          Name of the web server user that forwards requests to the ${phpSocket} fastcgi socket for matomo if the nginx
+          Name of the web server user that forwards requests to the ${phpSocket} fastcgi socket for Matomo if the nginx
           option is not used. Either this option or the nginx option is mandatory.
           If you want to use another webserver than nginx, you need to set this to that server's user
-          and pass fastcgi requests to `index.php` and `piwik.php` to this socket.
+          and pass fastcgi requests to `index.php`, `matomo.php` and `piwik.php` (legacy name) to this socket.
+        '';
+      };
+
+      periodicArchiveProcessing = mkOption {
+        type = types.bool;
+        default = true;
+        description = ''
+          Enable periodic archive processing, which generates aggregated reports from the visits.
+
+          This means that you can safely disable browser triggers for Matomo archiving,
+          and safely enable to delete old visitor logs.
+          Before deleting visitor logs,
+          make sure though that you run <literal>systemctl start matomo-archive-processing.service</literal>
+          at least once without errors if you have already collected data before.
         '';
       };
 
@@ -69,7 +86,7 @@ in {
           catch_workers_output = yes
         '';
         description = ''
-          Settings for phpfpm's process manager. You might need to change this depending on the load for matomo.
+          Settings for phpfpm's process manager. You might need to change this depending on the load for Matomo.
         '';
       };
 
@@ -79,7 +96,7 @@ in {
             (import ../web-servers/nginx/vhost-options.nix { inherit config lib; })
             {
               # enable encryption by default,
-              # as sensitive login and matomo data should not be transmitted in clear text.
+              # as sensitive login and Matomo data should not be transmitted in clear text.
               options.forceSSL.default = true;
               options.enableACME.default = true;
             }
@@ -94,7 +111,7 @@ in {
           enableACME = false;
         };
         description = ''
-            With this option, you can customize an nginx virtualHost which already has sensible defaults for matomo.
+            With this option, you can customize an nginx virtualHost which already has sensible defaults for Matomo.
             Either this option or the webServerUser option is mandatory.
             Set this to {} to just enable the virtualHost if you don't need any customization.
             If enabled, then by default, the <option>serverName</option> is
@@ -124,29 +141,30 @@ in {
     };
     users.groups.${user} = {};
 
-    systemd.services.matomo_setup_update = {
-      # everything needs to set up and up to date before matomo php files are executed
+    systemd.services.matomo-setup-update = {
+      # everything needs to set up and up to date before Matomo php files are executed
       requiredBy = [ "${phpExecutionUnit}.service" ];
       before = [ "${phpExecutionUnit}.service" ];
       # the update part of the script can only work if the database is already up and running
       requires = [ databaseService ];
       after = [ databaseService ];
       path = [ cfg.package ];
+      environment.PIWIK_USER_PATH = dataDir;
       serviceConfig = {
         Type = "oneshot";
         User = user;
         # hide especially config.ini.php from other
         UMask = "0007";
         # TODO: might get renamed to MATOMO_USER_PATH in future versions
-        Environment = "PIWIK_USER_PATH=${dataDir}";
         # chown + chmod in preStart needs root
         PermissionsStartOnly = true;
       };
+
       # correct ownership and permissions in case they're not correct anymore,
       # e.g. after restoring from backup or moving from another system.
       # Note that ${dataDir}/config/config.ini.php might contain the MySQL password.
       preStart = ''
-        # migrate data from piwik to matomo folder
+        # migrate data from piwik to Matomo folder
         if [ -d ${deprecatedDataDir} ]; then
           echo "Migrating from ${deprecatedDataDir} to ${dataDir}"
           mv -T ${deprecatedDataDir} ${dataDir}
@@ -155,7 +173,7 @@ in {
         chmod -R ug+rwX,o-rwx ${dataDir}
         '';
       script = ''
-            # Use User-Private Group scheme to protect matomo data, but allow administration / backup via matomo group
+            # Use User-Private Group scheme to protect Matomo data, but allow administration / backup via 'matomo' group
             # Copy config folder
             chmod g+s "${dataDir}"
             cp -r "${cfg.package}/config" "${dataDir}/"
@@ -169,8 +187,39 @@ in {
       '';
     };
 
+    # If this is run regularly via the timer,
+    # 'Browser trigger archiving' can be disabled in Matomo UI > Settings > General Settings.
+    systemd.services.matomo-archive-processing = {
+      description = "Archive Matomo reports";
+      # the archiving can only work if the database is already up and running
+      requires = [ databaseService ];
+      after = [ databaseService ];
+
+      # TODO: might get renamed to MATOMO_USER_PATH in future versions
+      environment.PIWIK_USER_PATH = dataDir;
+      serviceConfig = {
+        Type = "oneshot";
+        User = user;
+        UMask = "0007";
+        CPUSchedulingPolicy = "idle";
+        IOSchedulingClass = "idle";
+        ExecStart = "${cfg.package}/bin/matomo-console core:archive --url=https://${user}.${fqdn}";
+      };
+    };
+
+    systemd.timers.matomo-archive-processing = mkIf cfg.periodicArchiveProcessing {
+      description = "Automatically archive Matomo reports every hour";
+
+      wantedBy = [ "timers.target" ];
+      timerConfig = {
+        OnCalendar = "hourly";
+        Persistent = "yes";
+        AccuracySec = "10m";
+      };
+    };
+
     systemd.services.${phpExecutionUnit} = {
-      # stop phpfpm on package upgrade, do database upgrade via matomo_setup_update, and then restart
+      # stop phpfpm on package upgrade, do database upgrade via matomo-setup-update, and then restart
       restartTriggers = [ cfg.package ];
       # stop config.ini.php from getting written with read permission for others
       serviceConfig.UMask = "0007";
@@ -200,13 +249,13 @@ in {
       # https://fralef.me/piwik-hardening-with-nginx-and-php-fpm.html
       # https://github.com/perusio/piwik-nginx
       "${user}.${fqdn}" = mkMerge [ cfg.nginx {
-        # don't allow to override the root easily, as it will almost certainly break matomo.
+        # don't allow to override the root easily, as it will almost certainly break Matomo.
         # disadvantage: not shown as default in docs.
         root = mkForce "${cfg.package}/share";
 
         # define locations here instead of as the submodule option's default
         # so that they can easily be extended with additional locations if required
-        # without needing to redefine the matomo ones.
+        # without needing to redefine the Matomo ones.
         # disadvantage: not shown as default in docs.
         locations."/" = {
           index = "index.php";
@@ -215,8 +264,11 @@ in {
         locations."= /index.php".extraConfig = ''
           fastcgi_pass unix:${phpSocket};
         '';
-        # TODO: might get renamed to matomo.php in future versions
-        # allow piwik.php for tracking
+        # allow matomo.php for tracking
+        locations."= /matomo.php".extraConfig = ''
+          fastcgi_pass unix:${phpSocket};
+        '';
+        # allow piwik.php for tracking (deprecated name)
         locations."= /piwik.php".extraConfig = ''
           fastcgi_pass unix:${phpSocket};
         '';
@@ -237,8 +289,11 @@ in {
         locations."= /robots.txt".extraConfig = ''
           return 200 "User-agent: *\nDisallow: /\n";
         '';
-        # TODO: might get renamed to matomo.js in future versions
-        # let browsers cache piwik.js
+        # let browsers cache matomo.js
+        locations."= /matomo.js".extraConfig = ''
+          expires 1M;
+        '';
+        # let browsers cache piwik.js (deprecated name)
         locations."= /piwik.js".extraConfig = ''
           expires 1M;
         '';
diff --git a/nixos/modules/services/web-apps/tt-rss.nix b/nixos/modules/services/web-apps/tt-rss.nix
index e043ce4b581..f7a3daa5fdd 100644
--- a/nixos/modules/services/web-apps/tt-rss.nix
+++ b/nixos/modules/services/web-apps/tt-rss.nix
@@ -40,7 +40,7 @@ let
         else if (cfg.database.passwordFile != null) then
           "file_get_contents('${cfg.database.passwordFile}')"
         else
-          ""
+          "''"
       });
       define('DB_PORT', '${toString dbPort}');
 
@@ -53,7 +53,17 @@ let
       define('SINGLE_USER_MODE', ${boolToString cfg.singleUserMode});
 
       define('SIMPLE_UPDATE_MODE', ${boolToString cfg.simpleUpdateMode});
-      define('CHECK_FOR_UPDATES', ${boolToString cfg.checkForUpdates});
+
+      // Never check for updates - the running version of the code should be
+      // controlled entirely by the version of TT-RSS active in the current Nix
+      // profile. If TT-RSS updates itself to a version requiring a database
+      // schema upgrade, and then the SystemD tt-rss.service is restarted, the
+      // old code copied from the Nix store will overwrite the updated version,
+      // causing the code to detect the need for a schema "upgrade" (since the
+      // schema version in the database is different than in the code), but the
+      // update schema operation in TT-RSS will do nothing because the schema
+      // version in the database is newer than that in the code.
+      define('CHECK_FOR_UPDATES', false);
 
       define('FORCE_ARTICLE_PURGE', ${toString cfg.forceArticlePurge});
       define('SESSION_COOKIE_LIFETIME', ${toString cfg.sessionCookieLifetime});
@@ -414,14 +424,6 @@ let
         '';
       };
 
-      checkForUpdates = mkOption {
-        type = types.bool;
-        default = true;
-        description = ''
-          Check for updates automatically if running Git version
-        '';
-      };
-
       enableGZipOutput = mkOption {
         type = types.bool;
         default = true;
@@ -489,6 +491,14 @@ let
     };
   };
 
+  imports = [
+    (mkRemovedOptionModule ["services" "tt-rss" "checkForUpdates"] ''
+      This option was removed because setting this to true will cause TT-RSS
+      to be unable to start if an automatic update of the code in
+      services.tt-rss.root leads to a database schema upgrade that is not
+      supported by the code active in the Nix store.
+    '')
+  ];
 
   ###### implementation
 
@@ -552,7 +562,7 @@ let
           callSql = e:
               if cfg.database.type == "pgsql" then ''
                   ${optionalString (cfg.database.password != null) "PGPASSWORD=${cfg.database.password}"} \
-                  ${optionalString (cfg.database.passwordFile != null) "PGPASSWORD=$(cat ${cfg.database.passwordFile}"}) \
+                  ${optionalString (cfg.database.passwordFile != null) "PGPASSWORD=$(cat ${cfg.database.passwordFile})"} \
                   ${pkgs.sudo}/bin/sudo -u ${cfg.user} ${config.services.postgresql.package}/bin/psql \
                     -U ${cfg.database.user} \
                     ${optionalString (cfg.database.host != null) "-h ${cfg.database.host} --port ${toString dbPort}"} \
diff --git a/nixos/modules/services/web-servers/apache-httpd/default.nix b/nixos/modules/services/web-servers/apache-httpd/default.nix
index bb962334786..3fd19d425c7 100644
--- a/nixos/modules/services/web-servers/apache-httpd/default.nix
+++ b/nixos/modules/services/web-servers/apache-httpd/default.nix
@@ -376,6 +376,8 @@ let
     Include ${httpd}/conf/extra/httpd-multilang-errordoc.conf
     Include ${httpd}/conf/extra/httpd-languages.conf
 
+    TraceEnable off
+
     ${if enableSSL then sslConf else ""}
 
     # Fascist default - deny access to everything.
@@ -495,8 +497,8 @@ in
         default = false;
         description = ''
           If enabled, each virtual host gets its own
-          <filename>access_log</filename> and
-          <filename>error_log</filename>, namely suffixed by the
+          <filename>access.log</filename> and
+          <filename>error.log</filename>, namely suffixed by the
           <option>hostName</option> of the virtual host.
         '';
       };
@@ -639,8 +641,8 @@ in
 
       sslProtocols = mkOption {
         type = types.str;
-        default = "All -SSLv2 -SSLv3";
-        example = "All -SSLv2 -SSLv3 -TLSv1";
+        default = "All -SSLv2 -SSLv3 -TLSv1";
+        example = "All -SSLv2 -SSLv3";
         description = "Allowed SSL/TLS protocol versions.";
       };
     }
@@ -684,6 +686,9 @@ in
       ''
         ; Needed for PHP's mail() function.
         sendmail_path = sendmail -t -i
+
+        ; Don't advertise PHP
+        expose_php = off
       '' + optionalString (!isNull config.time.timeZone) ''
 
         ; Apparently PHP doesn't use $TZ.
diff --git a/nixos/modules/services/web-servers/phpfpm/default.nix b/nixos/modules/services/web-servers/phpfpm/default.nix
index 152c89a2cae..97c730061bd 100644
--- a/nixos/modules/services/web-servers/phpfpm/default.nix
+++ b/nixos/modules/services/web-servers/phpfpm/default.nix
@@ -14,11 +14,13 @@ let
 
   mapPoolConfig = n: p: {
     phpPackage = cfg.phpPackage;
+    phpOptions = cfg.phpOptions;
     config = p;
   };
 
   mapPool = n: p: {
     phpPackage = p.phpPackage;
+    phpOptions = p.phpOptions;
     config = ''
       listen = ${p.listen}
       ${p.extraConfig}
@@ -35,8 +37,8 @@ let
     ${conf}
   '';
 
-  phpIni = pkgs.runCommand "php.ini" {
-    inherit (cfg) phpPackage phpOptions;
+  phpIni = pool: pkgs.runCommand "php.ini" {
+    inherit (pool) phpPackage phpOptions;
     nixDefaults = ''
       sendmail_path = "/run/wrappers/bin/sendmail -t -i"
     '';
@@ -156,6 +158,7 @@ in {
         '';
         serviceConfig = let
           cfgFile = fpmCfgFile pool poolConfig.config;
+          iniFile = phpIni poolConfig;
         in {
           Slice = "phpfpm.slice";
           PrivateDevices = true;
@@ -164,7 +167,7 @@ in {
           # XXX: We need AF_NETLINK to make the sendmail SUID binary from postfix work
           RestrictAddressFamilies = "AF_UNIX AF_INET AF_INET6 AF_NETLINK";
           Type = "notify";
-          ExecStart = "${poolConfig.phpPackage}/bin/php-fpm -y ${cfgFile} -c ${phpIni}";
+          ExecStart = "${poolConfig.phpPackage}/bin/php-fpm -y ${cfgFile} -c ${iniFile}";
           ExecReload = "${pkgs.coreutils}/bin/kill -USR2 $MAINPID";
         };
       }
diff --git a/nixos/modules/services/web-servers/phpfpm/pool-options.nix b/nixos/modules/services/web-servers/phpfpm/pool-options.nix
index 40c83cddb95..d9ad7eff71f 100644
--- a/nixos/modules/services/web-servers/phpfpm/pool-options.nix
+++ b/nixos/modules/services/web-servers/phpfpm/pool-options.nix
@@ -25,6 +25,15 @@ with lib; {
       '';
     };
 
+    phpOptions = mkOption {
+      type = types.lines;
+      default = fpmCfg.phpOptions;
+      defaultText = "config.services.phpfpm.phpOptions";
+      description = ''
+        "Options appended to the PHP configuration file <filename>php.ini</filename> used for this PHP-FPM pool."
+      '';
+    };
+
     extraConfig = mkOption {
       type = types.lines;
       example = ''
diff --git a/nixos/modules/services/x11/desktop-managers/gnome3.nix b/nixos/modules/services/x11/desktop-managers/gnome3.nix
index 7544ba4638a..31ff60019ae 100644
--- a/nixos/modules/services/x11/desktop-managers/gnome3.nix
+++ b/nixos/modules/services/x11/desktop-managers/gnome3.nix
@@ -151,6 +151,7 @@ in {
     services.colord.enable = mkDefault true;
     services.packagekit.enable = mkDefault true;
     hardware.bluetooth.enable = mkDefault true;
+    services.hardware.bolt.enable = mkDefault true;
     services.xserver.libinput.enable = mkDefault true; # for controlling touchpad settings via gnome control center
     services.udev.packages = [ pkgs.gnome3.gnome-settings-daemon ];
     systemd.packages = [ pkgs.gnome3.vino ];
diff --git a/nixos/modules/system/boot/systemd.nix b/nixos/modules/system/boot/systemd.nix
index 58812bf33d9..9fdef0251d7 100644
--- a/nixos/modules/system/boot/systemd.nix
+++ b/nixos/modules/system/boot/systemd.nix
@@ -650,6 +650,18 @@ in
       '';
     };
 
+    services.logind.lidSwitchExternalPower = mkOption {
+      default = config.services.logind.lidSwitch;
+      example = "ignore";
+      type = logindHandlerType;
+
+      description = ''
+        Specifies what to do when the laptop lid is closed and the system is
+        on external power. By default use the same action as specified in
+        services.logind.lidSwitch.
+      '';
+    };
+
     systemd.user.extraConfig = mkOption {
       default = "";
       type = types.lines;
@@ -797,6 +809,7 @@ in
         KillUserProcesses=${if config.services.logind.killUserProcesses then "yes" else "no"}
         HandleLidSwitch=${config.services.logind.lidSwitch}
         HandleLidSwitchDocked=${config.services.logind.lidSwitchDocked}
+        HandleLidSwitchExternalPower=${config.services.logind.lidSwitchExternalPower}
         ${config.services.logind.extraConfig}
       '';
 
diff --git a/nixos/modules/virtualisation/vmware-guest.nix b/nixos/modules/virtualisation/vmware-guest.nix
index 15c78f14c52..d18778f8158 100644
--- a/nixos/modules/virtualisation/vmware-guest.nix
+++ b/nixos/modules/virtualisation/vmware-guest.nix
@@ -3,19 +3,17 @@
 with lib;
 
 let
-  cfg = config.services.vmwareGuest;
+  cfg = config.virtualisation.vmware.guest;
   open-vm-tools = if cfg.headless then pkgs.open-vm-tools-headless else pkgs.open-vm-tools;
   xf86inputvmmouse = pkgs.xorg.xf86inputvmmouse;
 in
 {
-  options = {
-    services.vmwareGuest = {
-      enable = mkEnableOption "VMWare Guest Support";
-      headless = mkOption {
-        type = types.bool;
-        default = false;
-        description = "Whether to disable X11-related features.";
-      };
+  options.virtualisation.vmware.guest = {
+    enable = mkEnableOption "VMWare Guest Support";
+    headless = mkOption {
+      type = types.bool;
+      default = false;
+      description = "Whether to disable X11-related features.";
     };
   };
 
@@ -25,6 +23,8 @@ in
       message = "VMWare guest is not currently supported on ${pkgs.stdenv.hostPlatform.system}";
     } ];
 
+    boot.initrd.kernelModules = [ "vmw_pvscsi" ];
+
     environment.systemPackages = [ open-vm-tools ];
 
     systemd.services.vmware =
diff --git a/nixos/tests/all-tests.nix b/nixos/tests/all-tests.nix
index fe4ca5772c6..3d8fea95a50 100644
--- a/nixos/tests/all-tests.nix
+++ b/nixos/tests/all-tests.nix
@@ -73,6 +73,7 @@ in
   ferm = handleTest ./ferm.nix {};
   firefox = handleTest ./firefox.nix {};
   firewall = handleTest ./firewall.nix {};
+  flannel = handleTestOn ["x86_64-linux"] ./flannel.nix {};
   flatpak = handleTest ./flatpak.nix {};
   fsck = handleTest ./fsck.nix {};
   fwupd = handleTestOn ["x86_64-linux"] ./fwupd.nix {}; # libsmbios is unsupported on aarch64
diff --git a/nixos/tests/flannel.nix b/nixos/tests/flannel.nix
index fb66fe28209..0b261a68477 100644
--- a/nixos/tests/flannel.nix
+++ b/nixos/tests/flannel.nix
@@ -21,8 +21,9 @@ import ./make-test.nix ({ pkgs, ...} : rec {
       services = {
         etcd = {
           enable = true;
-          listenClientUrls = ["http://etcd:2379"];
-          listenPeerUrls = ["http://etcd:2380"];
+          listenClientUrls = ["http://0.0.0.0:2379"]; # requires ip-address for binding
+          listenPeerUrls = ["http://0.0.0.0:2380"]; # requires ip-address for binding
+          advertiseClientUrls = ["http://etcd:2379"];
           initialAdvertisePeerUrls = ["http://etcd:2379"];
           initialCluster = ["etcd=http://etcd:2379"];
         };
diff --git a/nixos/tests/kubernetes/base.nix b/nixos/tests/kubernetes/base.nix
index 9d77be13175..ec1a75e74c4 100644
--- a/nixos/tests/kubernetes/base.nix
+++ b/nixos/tests/kubernetes/base.nix
@@ -10,7 +10,6 @@ let
   mkKubernetesBaseTest =
     { name, domain ? "my.zyx", test, machines
     , pkgs ? import <nixpkgs> { inherit system; }
-    , certs ? import ./certs.nix { inherit pkgs; externalDomain = domain; kubelets = attrNames machines; }
     , extraConfiguration ? null }:
     let
       masterName = head (filter (machineName: any (role: role == "master") machines.${machineName}.roles) (attrNames machines));
@@ -20,6 +19,10 @@ let
         ${master.ip}  api.${domain}
         ${concatMapStringsSep "\n" (machineName: "${machines.${machineName}.ip}  ${machineName}.${domain}") (attrNames machines)}
       '';
+      kubectl = with pkgs; runCommand "wrap-kubectl" { buildInputs = [ makeWrapper ]; } ''
+        mkdir -p $out/bin
+        makeWrapper ${pkgs.kubernetes}/bin/kubectl $out/bin/kubectl --set KUBECONFIG "/etc/kubernetes/cluster-admin.kubeconfig"
+      '';
     in makeTest {
       inherit name;
 
@@ -27,6 +30,7 @@ let
         { config, pkgs, lib, nodes, ... }:
           mkMerge [
             {
+              boot.postBootCommands = "rm -fr /var/lib/kubernetes/secrets /tmp/shared/*";
               virtualisation.memorySize = mkDefault 1536;
               virtualisation.diskSize = mkDefault 4096;
               networking = {
@@ -45,34 +49,25 @@ let
                 };
               };
               programs.bash.enableCompletion = true;
-              environment.variables = {
-                ETCDCTL_CERT_FILE = "${certs.worker}/etcd-client.pem";
-                ETCDCTL_KEY_FILE = "${certs.worker}/etcd-client-key.pem";
-                ETCDCTL_CA_FILE = "${certs.worker}/ca.pem";
-                ETCDCTL_PEERS = "https://etcd.${domain}:2379";
-              };
+              environment.systemPackages = [ kubectl ];
               services.flannel.iface = "eth1";
-              services.kubernetes.apiserver.advertiseAddress = master.ip;
+              services.kubernetes = {
+                addons.dashboard.enable = true;
+
+                easyCerts = true;
+                inherit (machine) roles;
+                apiserver = {
+                  securePort = 443;
+                  advertiseAddress = master.ip;
+                };
+                masterAddress = "${masterName}.${config.networking.domain}";
+              };
             }
             (optionalAttrs (any (role: role == "master") machine.roles) {
               networking.firewall.allowedTCPPorts = [
-                2379 2380  # etcd
                 443 # kubernetes apiserver
               ];
-              services.etcd = {
-                enable = true;
-                certFile = "${certs.master}/etcd.pem";
-                keyFile = "${certs.master}/etcd-key.pem";
-                trustedCaFile = "${certs.master}/ca.pem";
-                peerClientCertAuth = true;
-                listenClientUrls = ["https://0.0.0.0:2379"];
-                listenPeerUrls = ["https://0.0.0.0:2380"];
-                advertiseClientUrls = ["https://etcd.${config.networking.domain}:2379"];
-                initialCluster = ["${masterName}=https://etcd.${config.networking.domain}:2380"];
-                initialAdvertisePeerUrls = ["https://etcd.${config.networking.domain}:2380"];
-              };
             })
-            (import ./kubernetes-common.nix { inherit (machine) roles; inherit pkgs config certs; })
             (optionalAttrs (machine ? "extraConfiguration") (machine.extraConfiguration { inherit config pkgs lib nodes; }))
             (optionalAttrs (extraConfiguration != null) (extraConfiguration { inherit config pkgs lib nodes; }))
           ]
diff --git a/nixos/tests/kubernetes/certs.nix b/nixos/tests/kubernetes/certs.nix
deleted file mode 100644
index 85e92f6330c..00000000000
--- a/nixos/tests/kubernetes/certs.nix
+++ /dev/null
@@ -1,219 +0,0 @@
-{
-  pkgs ? import <nixpkgs> {},
-  externalDomain ? "myawesomecluster.cluster.yourdomain.net",
-  serviceClusterIp ? "10.0.0.1",
-  kubelets,
-  ...
-}:
-let
-   runWithCFSSL = name: cmd:
-     let secrets = pkgs.runCommand "${name}-cfss.json" {
-         buildInputs = [ pkgs.cfssl pkgs.jq ];
-         outputs = [ "out" "cert" "key" "csr" ];
-       }
-       ''
-         (
-           echo "${cmd}"
-           cfssl ${cmd} > tmp
-           cat tmp | jq -r .key > $key
-           cat tmp | jq -r .cert > $cert
-           cat tmp | jq -r .csr > $csr
-
-           touch $out
-         ) 2>&1 | fold -w 80 -s
-       '';
-     in {
-       key = secrets.key;
-       cert = secrets.cert;
-       csr = secrets.csr;
-     };
-
-   writeCFSSL = content:
-     pkgs.runCommand content.name {
-      buildInputs = [ pkgs.cfssl pkgs.jq ];
-     } ''
-       mkdir -p $out
-       cd $out
-
-       json=${pkgs.lib.escapeShellArg (builtins.toJSON content)}
-
-       # for a given $field in the $json, treat the associated value as a
-       # file path and substitute the contents thereof into the $json
-       # object.
-       expandFileField() {
-         local field=$1
-         if jq -e --arg field "$field" 'has($field)'; then
-           local path="$(echo "$json" | jq -r ".$field")"
-           json="$(echo "$json" | jq --arg val "$(cat "$path")" ".$field = \$val")"
-         fi
-       }
-
-       expandFileField key
-       expandFileField ca
-       expandFileField cert
-
-       echo "$json" | cfssljson -bare ${content.name}
-     '';
-
-  noCSR = content: pkgs.lib.filterAttrs (n: v: n != "csr") content;
-  noKey = content: pkgs.lib.filterAttrs (n: v: n != "key") content;
-
-  writeFile = content:
-    if pkgs.lib.isDerivation content
-    then content
-    else pkgs.writeText "content" (builtins.toJSON content);
-
-  createServingCertKey = { ca, cn, hosts? [], size ? 2048, name ? cn }:
-    noCSR (
-      (runWithCFSSL name "gencert -ca=${writeFile ca.cert} -ca-key=${writeFile ca.key} -profile=server -config=${writeFile ca.config} ${writeFile {
-        CN = cn;
-        hosts = hosts;
-        key = { algo = "rsa"; inherit size; };
-      }}") // { inherit name; }
-    );
-
-  createClientCertKey = { ca, cn, groups ? [], size ? 2048, name ? cn }:
-    noCSR (
-      (runWithCFSSL name "gencert -ca=${writeFile ca.cert} -ca-key=${writeFile ca.key} -profile=client -config=${writeFile ca.config} ${writeFile {
-        CN = cn;
-        names = map (group: {O = group;}) groups;
-        hosts = [""];
-        key = { algo = "rsa"; inherit size; };
-      }}") // { inherit name; }
-    );
-
-  createSigningCertKey = { C ? "xx", ST ? "x", L ? "x", O ? "x", OU ? "x", CN ? "ca", emailAddress ? "x", expiry ? "43800h", size ? 2048, name ? CN }:
-    (noCSR (runWithCFSSL CN "genkey -initca ${writeFile {
-      key = { algo = "rsa"; inherit size; };
-      names = [{ inherit C ST L O OU CN emailAddress; }];
-    }}")) // {
-      inherit name;
-      config.signing = {
-        default.expiry = expiry;
-        profiles = {
-          server = {
-            inherit expiry;
-            usages = [
-              "signing"
-              "key encipherment"
-              "server auth"
-            ];
-          };
-          client = {
-            inherit expiry;
-            usages = [
-              "signing"
-              "key encipherment"
-              "client auth"
-            ];
-          };
-          peer = {
-            inherit expiry;
-            usages = [
-              "signing"
-              "key encipherment"
-              "server auth"
-              "client auth"
-            ];
-          };
-        };
-      };
-    };
-
-  ca = createSigningCertKey {};
-
-  kube-apiserver = createServingCertKey {
-    inherit ca;
-    cn = "kube-apiserver";
-    hosts = ["kubernetes.default" "kubernetes.default.svc" "localhost" "api.${externalDomain}" serviceClusterIp];
-  };
-
-  kubelet = createServingCertKey {
-    inherit ca;
-    cn = "kubelet";
-    hosts = ["*.${externalDomain}"];
-  };
-
-  service-accounts = createServingCertKey {
-    inherit ca;
-    cn = "kube-service-accounts";
-  };
-
-  etcd = createServingCertKey {
-    inherit ca;
-    cn = "etcd";
-    hosts = ["etcd.${externalDomain}"];
-  };
-
-  etcd-client = createClientCertKey {
-    inherit ca;
-    cn = "etcd-client";
-  };
-
-  kubelet-client = createClientCertKey {
-    inherit ca;
-    cn = "kubelet-client";
-    groups = ["system:masters"];
-  };
-
-  apiserver-client = {
-    kubelet = hostname: createClientCertKey {
-      inherit ca;
-      name = "apiserver-client-kubelet-${hostname}";
-      cn = "system:node:${hostname}.${externalDomain}";
-      groups = ["system:nodes"];
-    };
-
-    kube-proxy = createClientCertKey {
-      inherit ca;
-      name = "apiserver-client-kube-proxy";
-      cn = "system:kube-proxy";
-      groups = ["system:kube-proxy" "system:nodes"];
-    };
-
-    kube-controller-manager = createClientCertKey {
-      inherit ca;
-      name = "apiserver-client-kube-controller-manager";
-      cn = "system:kube-controller-manager";
-      groups = ["system:masters"];
-    };
-
-    kube-scheduler = createClientCertKey {
-      inherit ca;
-      name = "apiserver-client-kube-scheduler";
-      cn = "system:kube-scheduler";
-      groups = ["system:kube-scheduler"];
-    };
-
-    admin = createClientCertKey {
-      inherit ca;
-      cn = "admin";
-      groups = ["system:masters"];
-    };
-  };
-in {
-  master = pkgs.buildEnv {
-    name = "master-keys";
-    paths = [
-      (writeCFSSL (noKey ca))
-      (writeCFSSL kube-apiserver)
-      (writeCFSSL kubelet-client)
-      (writeCFSSL apiserver-client.kube-controller-manager)
-      (writeCFSSL apiserver-client.kube-scheduler)
-      (writeCFSSL service-accounts)
-      (writeCFSSL etcd)
-    ];
-  };
-
-  worker = pkgs.buildEnv {
-    name = "worker-keys";
-    paths = [
-      (writeCFSSL (noKey ca))
-      (writeCFSSL kubelet)
-      (writeCFSSL apiserver-client.kube-proxy)
-      (writeCFSSL etcd-client)
-    ] ++ map (hostname: writeCFSSL (apiserver-client.kubelet hostname)) kubelets;
-  };
-
-  admin = writeCFSSL apiserver-client.admin;
-}
diff --git a/nixos/tests/kubernetes/dns.nix b/nixos/tests/kubernetes/dns.nix
index f25ea5b9ed8..46bcb01a526 100644
--- a/nixos/tests/kubernetes/dns.nix
+++ b/nixos/tests/kubernetes/dns.nix
@@ -71,17 +71,17 @@ let
 
   base = {
     name = "dns";
-    inherit domain certs extraConfiguration;
+    inherit domain extraConfiguration;
   };
 
   singleNodeTest = {
     test = ''
       # prepare machine1 for test
       $machine1->waitUntilSucceeds("kubectl get node machine1.${domain} | grep -w Ready");
-      $machine1->execute("docker load < ${redisImage}");
+      $machine1->waitUntilSucceeds("docker load < ${redisImage}");
       $machine1->waitUntilSucceeds("kubectl create -f ${redisPod}");
       $machine1->waitUntilSucceeds("kubectl create -f ${redisService}");
-      $machine1->execute("docker load < ${probeImage}");
+      $machine1->waitUntilSucceeds("docker load < ${probeImage}");
       $machine1->waitUntilSucceeds("kubectl create -f ${probePod}");
 
       # check if pods are running
@@ -99,13 +99,16 @@ let
 
   multiNodeTest = {
     test = ''
+      # Node token exchange
+      $machine1->waitUntilSucceeds("cp -f /var/lib/cfssl/apitoken.secret /tmp/shared/apitoken.secret");
+      $machine2->waitUntilSucceeds("cat /tmp/shared/apitoken.secret | nixos-kubernetes-node-join");
+
       # prepare machines for test
-      $machine1->waitUntilSucceeds("kubectl get node machine1.${domain} | grep -w Ready");
       $machine1->waitUntilSucceeds("kubectl get node machine2.${domain} | grep -w Ready");
-      $machine2->execute("docker load < ${redisImage}");
+      $machine2->waitUntilSucceeds("docker load < ${redisImage}");
       $machine1->waitUntilSucceeds("kubectl create -f ${redisPod}");
       $machine1->waitUntilSucceeds("kubectl create -f ${redisService}");
-      $machine2->execute("docker load < ${probeImage}");
+      $machine2->waitUntilSucceeds("docker load < ${probeImage}");
       $machine1->waitUntilSucceeds("kubectl create -f ${probePod}");
 
       # check if pods are running
diff --git a/nixos/tests/kubernetes/kubernetes-common.nix b/nixos/tests/kubernetes/kubernetes-common.nix
deleted file mode 100644
index 87c65b88365..00000000000
--- a/nixos/tests/kubernetes/kubernetes-common.nix
+++ /dev/null
@@ -1,57 +0,0 @@
-{ roles, config, pkgs, certs }:
-with pkgs.lib;
-let
-  base = {
-    inherit roles;
-    flannel.enable = true;
-    addons.dashboard.enable = true;
-
-    caFile = "${certs.master}/ca.pem";
-    apiserver = {
-      tlsCertFile = "${certs.master}/kube-apiserver.pem";
-      tlsKeyFile = "${certs.master}/kube-apiserver-key.pem";
-      kubeletClientCertFile = "${certs.master}/kubelet-client.pem";
-      kubeletClientKeyFile = "${certs.master}/kubelet-client-key.pem";
-      serviceAccountKeyFile = "${certs.master}/kube-service-accounts.pem";
-    };
-    etcd = {
-      servers = ["https://etcd.${config.networking.domain}:2379"];
-      certFile = "${certs.worker}/etcd-client.pem";
-      keyFile = "${certs.worker}/etcd-client-key.pem";
-    };
-    kubeconfig = {
-      server = "https://api.${config.networking.domain}";
-    };
-    kubelet = {
-      tlsCertFile = "${certs.worker}/kubelet.pem";
-      tlsKeyFile = "${certs.worker}/kubelet-key.pem";
-      hostname = "${config.networking.hostName}.${config.networking.domain}";
-      kubeconfig = {
-        certFile = "${certs.worker}/apiserver-client-kubelet-${config.networking.hostName}.pem";
-        keyFile = "${certs.worker}/apiserver-client-kubelet-${config.networking.hostName}-key.pem";
-      };
-    };
-    controllerManager = {
-      serviceAccountKeyFile = "${certs.master}/kube-service-accounts-key.pem";
-      kubeconfig = {
-        certFile = "${certs.master}/apiserver-client-kube-controller-manager.pem";
-        keyFile = "${certs.master}/apiserver-client-kube-controller-manager-key.pem";
-      };
-    };
-    scheduler = {
-      kubeconfig = {
-        certFile = "${certs.master}/apiserver-client-kube-scheduler.pem";
-        keyFile = "${certs.master}/apiserver-client-kube-scheduler-key.pem";
-      };
-    };
-    proxy = {
-      kubeconfig = {
-        certFile = "${certs.worker}/apiserver-client-kube-proxy.pem";
-        keyFile = "${certs.worker}//apiserver-client-kube-proxy-key.pem";
-      };
-    };
-  };
-
-in {
-  services.kubernetes = base;
-}
diff --git a/nixos/tests/kubernetes/rbac.nix b/nixos/tests/kubernetes/rbac.nix
index 226808c4b26..3ce7adcd0d7 100644
--- a/nixos/tests/kubernetes/rbac.nix
+++ b/nixos/tests/kubernetes/rbac.nix
@@ -96,7 +96,7 @@ let
     test = ''
       $machine1->waitUntilSucceeds("kubectl get node machine1.my.zyx | grep -w Ready");
 
-      $machine1->execute("docker load < ${kubectlImage}");
+      $machine1->waitUntilSucceeds("docker load < ${kubectlImage}");
 
       $machine1->waitUntilSucceeds("kubectl apply -f ${roServiceAccount}");
       $machine1->waitUntilSucceeds("kubectl apply -f ${roRole}");
@@ -105,7 +105,7 @@ let
 
       $machine1->waitUntilSucceeds("kubectl get pod kubectl | grep Running");
 
-      $machine1->succeed("kubectl exec -ti kubectl -- kubectl get pods");
+      $machine1->waitUntilSucceeds("kubectl exec -ti kubectl -- kubectl get pods");
       $machine1->fail("kubectl exec -ti kubectl -- kubectl create -f /kubectl-pod-2.json");
       $machine1->fail("kubectl exec -ti kubectl -- kubectl delete pods -l name=kubectl");
     '';
@@ -113,10 +113,13 @@ let
 
   multinode = base // {
     test = ''
-      $machine1->waitUntilSucceeds("kubectl get node machine1.my.zyx | grep -w Ready");
+      # Node token exchange
+      $machine1->waitUntilSucceeds("cp -f /var/lib/cfssl/apitoken.secret /tmp/shared/apitoken.secret");
+      $machine2->waitUntilSucceeds("cat /tmp/shared/apitoken.secret | nixos-kubernetes-node-join");
+
       $machine1->waitUntilSucceeds("kubectl get node machine2.my.zyx | grep -w Ready");
 
-      $machine2->execute("docker load < ${kubectlImage}");
+      $machine2->waitUntilSucceeds("docker load < ${kubectlImage}");
 
       $machine1->waitUntilSucceeds("kubectl apply -f ${roServiceAccount}");
       $machine1->waitUntilSucceeds("kubectl apply -f ${roRole}");
@@ -125,7 +128,7 @@ let
 
       $machine1->waitUntilSucceeds("kubectl get pod kubectl | grep Running");
 
-      $machine1->succeed("kubectl exec -ti kubectl -- kubectl get pods");
+      $machine1->waitUntilSucceeds("kubectl exec -ti kubectl -- kubectl get pods");
       $machine1->fail("kubectl exec -ti kubectl -- kubectl create -f /kubectl-pod-2.json");
       $machine1->fail("kubectl exec -ti kubectl -- kubectl delete pods -l name=kubectl");
     '';
diff --git a/nixos/tests/printing.nix b/nixos/tests/printing.nix
index d85abf3c105..7026637ead1 100644
--- a/nixos/tests/printing.nix
+++ b/nixos/tests/printing.nix
@@ -39,6 +39,8 @@ import ./make-test.nix ({pkgs, ... }: {
       $client->waitForUnit("cups.service");
       $client->sleep(10); # wait until cups is fully initialized
       $client->succeed("lpstat -r") =~ /scheduler is running/ or die;
+      # check local encrypted connections work without error
+      $client->succeed("lpstat -E -r") =~ /scheduler is running/ or die;
       # Test that UNIX socket is used for connections.
       $client->succeed("lpstat -H") =~ "/var/run/cups/cups.sock" or die;
       # Test that HTTP server is available too.
diff --git a/nixos/tests/switch-test.nix b/nixos/tests/switch-test.nix
index 32010838e67..0dba3697980 100644
--- a/nixos/tests/switch-test.nix
+++ b/nixos/tests/switch-test.nix
@@ -18,8 +18,17 @@ import ./make-test.nix ({ pkgs, ...} : {
   testScript = {nodes, ...}: let
     originalSystem = nodes.machine.config.system.build.toplevel;
     otherSystem = nodes.other.config.system.build.toplevel;
+
+    # Ensures failures pass through using pipefail, otherwise failing to
+    # switch-to-configuration is hidden by the success of `tee`.
+    stderrRunner = pkgs.writeScript "stderr-runner" ''
+      #! ${pkgs.stdenv.shell}
+      set -e
+      set -o pipefail
+      exec env -i "$@" | tee /dev/stderr
+    '';
   in ''
-    $machine->succeed("env -i ${originalSystem}/bin/switch-to-configuration test | tee /dev/stderr");
-    $machine->succeed("env -i ${otherSystem}/bin/switch-to-configuration test | tee /dev/stderr");
+    $machine->succeed("${stderrRunner} ${originalSystem}/bin/switch-to-configuration test");
+    $machine->succeed("${stderrRunner} ${otherSystem}/bin/switch-to-configuration test");
   '';
 })