summary refs log tree commit diff
path: root/nixos
diff options
context:
space:
mode:
authorJan Tojnar <jtojnar@gmail.com>2019-09-06 03:21:30 +0200
committerJan Tojnar <jtojnar@gmail.com>2019-09-06 03:21:30 +0200
commit306cc9b31182a6c2c7304e7cbf52c0f88e6a03c3 (patch)
treecc8f1a2bdb1fd9f5513e9f2ef9f32982213a90a6 /nixos
parente5dece4cbffc4d9859ef90d1489a16cfe52821a9 (diff)
parentcdf426488b5dc3a7c051d7ad1c90c07dc0c3a89f (diff)
downloadnixpkgs-306cc9b31182a6c2c7304e7cbf52c0f88e6a03c3.tar
nixpkgs-306cc9b31182a6c2c7304e7cbf52c0f88e6a03c3.tar.gz
nixpkgs-306cc9b31182a6c2c7304e7cbf52c0f88e6a03c3.tar.bz2
nixpkgs-306cc9b31182a6c2c7304e7cbf52c0f88e6a03c3.tar.lz
nixpkgs-306cc9b31182a6c2c7304e7cbf52c0f88e6a03c3.tar.xz
nixpkgs-306cc9b31182a6c2c7304e7cbf52c0f88e6a03c3.tar.zst
nixpkgs-306cc9b31182a6c2c7304e7cbf52c0f88e6a03c3.zip
Merge branch 'staging-next' into staging
Diffstat (limited to 'nixos')
-rw-r--r--nixos/doc/manual/configuration/customizing-packages.xml6
-rw-r--r--nixos/doc/manual/release-notes/rl-1703.xml2
-rw-r--r--nixos/doc/manual/release-notes/rl-1909.xml22
-rw-r--r--nixos/maintainers/scripts/ec2/amazon-image.nix28
-rwxr-xr-xnixos/maintainers/scripts/ec2/create-amis.sh525
-rw-r--r--nixos/modules/config/gtk/gtk-icon-cache.nix2
-rw-r--r--nixos/modules/installer/tools/nix-fallback-paths.nix8
-rw-r--r--nixos/modules/module-list.nix1
-rw-r--r--nixos/modules/programs/plotinus.nix2
-rw-r--r--nixos/modules/programs/plotinus.xml4
-rw-r--r--nixos/modules/rename.nix1
-rw-r--r--nixos/modules/services/cluster/kubernetes/addon-manager.nix85
-rw-r--r--nixos/modules/services/cluster/kubernetes/addons/dashboard.nix36
-rw-r--r--nixos/modules/services/cluster/kubernetes/apiserver.nix48
-rw-r--r--nixos/modules/services/cluster/kubernetes/controller-manager.nix39
-rw-r--r--nixos/modules/services/cluster/kubernetes/default.nix25
-rw-r--r--nixos/modules/services/cluster/kubernetes/flannel.nix73
-rw-r--r--nixos/modules/services/cluster/kubernetes/kubelet.nix93
-rw-r--r--nixos/modules/services/cluster/kubernetes/pki.nix166
-rw-r--r--nixos/modules/services/cluster/kubernetes/proxy.nix37
-rw-r--r--nixos/modules/services/cluster/kubernetes/scheduler.nix34
-rw-r--r--nixos/modules/services/databases/postgresql.nix4
-rw-r--r--nixos/modules/services/editors/emacs.xml6
-rw-r--r--nixos/modules/services/misc/zookeeper.nix1
-rw-r--r--nixos/modules/services/network-filesystems/ceph.nix110
-rw-r--r--nixos/modules/services/web-servers/darkhttpd.nix2
-rw-r--r--nixos/modules/services/x11/desktop-managers/enlightenment.nix4
-rw-r--r--nixos/modules/services/x11/desktop-managers/mate.nix2
-rw-r--r--nixos/modules/services/x11/desktop-managers/xfce.nix6
-rw-r--r--nixos/modules/services/x11/desktop-managers/xfce4-14.nix4
-rw-r--r--nixos/modules/virtualisation/amazon-image.nix14
-rw-r--r--nixos/modules/virtualisation/amazon-options.nix9
-rw-r--r--nixos/modules/virtualisation/railcar.nix125
-rw-r--r--nixos/release.nix16
-rw-r--r--nixos/tests/ceph.nix52
-rw-r--r--nixos/tests/kubernetes/base.nix5
-rw-r--r--nixos/tests/kubernetes/dns.nix3
-rw-r--r--nixos/tests/kubernetes/rbac.nix4
-rw-r--r--nixos/tests/quake3.nix4
39 files changed, 756 insertions, 852 deletions
diff --git a/nixos/doc/manual/configuration/customizing-packages.xml b/nixos/doc/manual/configuration/customizing-packages.xml
index 03b5bb53197..34e6ab4b24d 100644
--- a/nixos/doc/manual/configuration/customizing-packages.xml
+++ b/nixos/doc/manual/configuration/customizing-packages.xml
@@ -24,8 +24,8 @@
  <para>
   Apart from high-level options, it’s possible to tweak a package in almost
   arbitrary ways, such as changing or disabling dependencies of a package. For
-  instance, the Emacs package in Nixpkgs by default has a dependency on GTK+ 2.
-  If you want to build it against GTK+ 3, you can specify that as follows:
+  instance, the Emacs package in Nixpkgs by default has a dependency on GTK 2.
+  If you want to build it against GTK 3, you can specify that as follows:
 <programlisting>
 <xref linkend="opt-environment.systemPackages"/> = [ (pkgs.emacs.override { gtk = pkgs.gtk3; }) ];
 </programlisting>
@@ -33,7 +33,7 @@
   function that produces Emacs, with the original arguments amended by the set
   of arguments specified by you. So here the function argument
   <varname>gtk</varname> gets the value <literal>pkgs.gtk3</literal>, causing
-  Emacs to depend on GTK+ 3. (The parentheses are necessary because in Nix,
+  Emacs to depend on GTK 3. (The parentheses are necessary because in Nix,
   function application binds more weakly than list construction, so without
   them, <xref linkend="opt-environment.systemPackages"/> would be a list with
   two elements.)
diff --git a/nixos/doc/manual/release-notes/rl-1703.xml b/nixos/doc/manual/release-notes/rl-1703.xml
index 86f4a1ccfb7..14b31b232e9 100644
--- a/nixos/doc/manual/release-notes/rl-1703.xml
+++ b/nixos/doc/manual/release-notes/rl-1703.xml
@@ -730,7 +730,7 @@ in
    </listitem>
    <listitem>
     <para>
-     <literal>jre</literal> now defaults to GTK+ UI by default. This improves
+     <literal>jre</literal> now defaults to GTK UI by default. This improves
      visual consistency and makes Java follow system font style, improving the
      situation on HighDPI displays. This has a cost of increased closure size;
      for server and other headless workloads it's recommended to use
diff --git a/nixos/doc/manual/release-notes/rl-1909.xml b/nixos/doc/manual/release-notes/rl-1909.xml
index c0b7cc48a46..ff1bd628ad2 100644
--- a/nixos/doc/manual/release-notes/rl-1909.xml
+++ b/nixos/doc/manual/release-notes/rl-1909.xml
@@ -422,6 +422,12 @@
        It was not useful except for debugging purposes and was confusingly set as default in some circumstances.
      </para>
    </listitem>
+   <listitem>
+    <para>
+     The WeeChat plugin <literal>pkgs.weechatScripts.weechat-xmpp</literal> has been removed as it doesn't receive
+     any updates from upstream and depends on outdated Python2-based modules.
+    </para>
+   </listitem>
 
   </itemizedlist>
  </section>
@@ -710,6 +716,22 @@
        <literal>nix-shell -p altcoins.dogecoin</literal>, etc.
      </para>
    </listitem>
+   <listitem>
+     <para>
+       Ceph has been upgraded to v14.2.1.
+       See the <link xlink:href="https://ceph.com/releases/v14-2-0-nautilus-released/">release notes</link> for details.
+       The mgr dashboard as well as osds backed by loop-devices is no longer explicitly supported by the package and module.
+       Note: There's been some issues with python-cherrypy, which is used by the dashboard
+       and prometheus mgr modules (and possibly others), hence 0000-dont-check-cherrypy-version.patch.
+     </para>
+    </listitem>
+    <listitem>
+     <para>
+      <literal>pkgs.weechat</literal> is now compiled against <literal>pkgs.python3</literal>.
+      Weechat also recommends <link xlink:href="https://weechat.org/scripts/python3/">to use Python3
+      in their docs.</link>
+     </para>
+    </listitem>
   </itemizedlist>
  </section>
 </section>
diff --git a/nixos/maintainers/scripts/ec2/amazon-image.nix b/nixos/maintainers/scripts/ec2/amazon-image.nix
index 88d95e67544..31e15537179 100644
--- a/nixos/maintainers/scripts/ec2/amazon-image.nix
+++ b/nixos/maintainers/scripts/ec2/amazon-image.nix
@@ -17,7 +17,7 @@ in {
     name = mkOption {
       type = types.str;
       description = "The name of the generated derivation";
-      default = "nixos-disk-image";
+      default = "nixos-amazon-image-${config.system.nixos.label}-${pkgs.stdenv.hostPlatform.system}";
     };
 
     contents = mkOption {
@@ -42,7 +42,7 @@ in {
 
     format = mkOption {
       type = types.enum [ "raw" "qcow2" "vpc" ];
-      default = "qcow2";
+      default = "vpc";
       description = "The image format to output";
     };
   };
@@ -51,7 +51,9 @@ in {
     inherit lib config;
     inherit (cfg) contents format name;
     pkgs = import ../../../.. { inherit (pkgs) system; }; # ensure we use the regular qemu-kvm package
-    partitionTableType = if config.ec2.hvm then "legacy" else "none";
+    partitionTableType = if config.ec2.efi then "efi"
+                         else if config.ec2.hvm then "legacy"
+                         else "none";
     diskSize = cfg.sizeMB;
     fsType = "ext4";
     configFile = pkgs.writeText "configuration.nix"
@@ -61,7 +63,27 @@ in {
           ${optionalString config.ec2.hvm ''
             ec2.hvm = true;
           ''}
+          ${optionalString config.ec2.efi ''
+            ec2.efi = true;
+          ''}
         }
       '';
+    postVM = ''
+      extension=''${diskImage##*.}
+      friendlyName=$out/${cfg.name}.$extension
+      mv "$diskImage" "$friendlyName"
+      diskImage=$friendlyName
+
+      mkdir -p $out/nix-support
+      echo "file ${cfg.format} $diskImage" >> $out/nix-support/hydra-build-products
+
+      ${pkgs.jq}/bin/jq -n \
+        --arg label ${lib.escapeShellArg config.system.nixos.label} \
+        --arg system ${lib.escapeShellArg pkgs.stdenv.hostPlatform.system} \
+        --arg logical_bytes "$(${pkgs.qemu}/bin/qemu-img info --output json "$diskImage" | ${pkgs.jq}/bin/jq '."virtual-size"')" \
+        --arg file "$diskImage" \
+        '$ARGS.named' \
+        > $out/nix-support/image-info.json
+    '';
   };
 }
diff --git a/nixos/maintainers/scripts/ec2/create-amis.sh b/nixos/maintainers/scripts/ec2/create-amis.sh
index 790cc6cbc53..c4149e3e8ff 100755
--- a/nixos/maintainers/scripts/ec2/create-amis.sh
+++ b/nixos/maintainers/scripts/ec2/create-amis.sh
@@ -1,279 +1,296 @@
 #!/usr/bin/env nix-shell
-#! nix-shell -i bash -p qemu ec2_ami_tools jq ec2_api_tools awscli
+#!nix-shell -p awscli -p jq -p qemu -i bash
+
+# Uploads and registers NixOS images built from the
+# <nixos/release.nix> amazonImage attribute. Images are uploaded and
+# registered via a home region, and then copied to other regions.
+
+# The home region requires an s3 bucket, and a "vmimport" IAM role
+# with access to the S3 bucket.  Configuration of the vmimport role is
+# documented in
+# https://docs.aws.amazon.com/vm-import/latest/userguide/vmimport-image-import.html
+
+# set -x
+set -euo pipefail
+
+# configuration
+state_dir=/home/deploy/amis/ec2-images
+home_region=eu-west-1
+bucket=nixos-amis
+
+regions=(eu-west-1 eu-west-2 eu-west-3 eu-central-1
+         us-east-1 us-east-2 us-west-1 us-west-2
+         ca-central-1
+         ap-southeast-1 ap-southeast-2 ap-northeast-1 ap-northeast-2
+         ap-south-1 ap-east-1
+         sa-east-1)
+
+log() {
+    echo "$@" >&2
+}
+
+if [ -z "$1" ]; then
+    log "Usage: ./upload-amazon-image.sh IMAGE_OUTPUT"
+    exit 1
+fi
+
+# result of the amazon-image from nixos/release.nix
+store_path=$1
+
+if [ ! -e "$store_path" ]; then
+    log "Store path: $store_path does not exist, fetching..."
+    nix-store --realise "$store_path"
+fi
+
+if [ ! -d "$store_path" ]; then
+    log "store_path: $store_path is not a directory. aborting"
+    exit 1
+fi
+
+read_image_info() {
+    if [ ! -e "$store_path/nix-support/image-info.json" ]; then
+        log "Image missing metadata"
+        exit 1
+    fi
+    jq -r "$1" "$store_path/nix-support/image-info.json"
+}
+
+# We handle a single image per invocation, store all attributes in
+# globals for convenience.
+image_label=$(read_image_info .label)
+image_system=$(read_image_info .system)
+image_file=$(read_image_info .file)
+image_logical_bytes=$(read_image_info .logical_bytes)
+
+# Derived attributes
+
+image_logical_gigabytes=$((($image_logical_bytes-1)/1024/1024/1024+1)) # Round to the next GB
+
+case "$image_system" in
+    aarch64-linux)
+        amazon_arch=arm64
+        ;;
+    x86_64-linux)
+        amazon_arch=x86_64
+        ;;
+    *)
+        log "Unknown system: $image_system"
+        exit 1
+esac
+
+image_name="NixOS-${image_label}-${image_system}"
+image_description="NixOS ${image_label} ${image_system}"
+
+log "Image Details:"
+log " Name: $image_name"
+log " Description: $image_description"
+log " Size (gigabytes): $image_logical_gigabytes"
+log " System: $image_system"
+log " Amazon Arch: $amazon_arch"
+
+read_state() {
+    local state_key=$1
+    local type=$2
+
+    cat "$state_dir/$state_key.$type" 2>/dev/null || true
+}
+
+write_state() {
+    local state_key=$1
+    local type=$2
+    local val=$3
+
+    mkdir -p $state_dir
+    echo "$val" > "$state_dir/$state_key.$type"
+}
+
+wait_for_import() {
+    local region=$1
+    local task_id=$2
+    local state snapshot_id
+    log "Waiting for import task $task_id to be completed"
+    while true; do
+        read state progress snapshot_id < <(
+            aws ec2 describe-import-snapshot-tasks --region $region --import-task-ids "$task_id" | \
+                jq -r '.ImportSnapshotTasks[].SnapshotTaskDetail | "\(.Status) \(.Progress) \(.SnapshotId)"'
+        )
+        log " ... state=$state progress=$progress snapshot_id=$snapshot_id"
+        case "$state" in
+            active)
+                sleep 10
+                ;;
+            completed)
+                echo "$snapshot_id"
+                return
+                ;;
+            *)
+                log "Unexpected snapshot import state: '${state}'"
+                exit 1
+                ;;
+        esac
+    done
+}
+
+wait_for_image() {
+    local region=$1
+    local ami_id=$2
+    local state
+    log "Waiting for image $ami_id to be available"
+
+    while true; do
+        read state < <(
+            aws ec2 describe-images --image-ids "$ami_id" --region $region | \
+                jq -r ".Images[].State"
+        )
+        log " ... state=$state"
+        case "$state" in
+            pending)
+                sleep 10
+                ;;
+            available)
+                return
+                ;;
+            *)
+                log "Unexpected AMI state: '${state}'"
+                exit 1
+                ;;
+        esac
+    done
+}
+
+
+make_image_public() {
+    local region=$1
+    local ami_id=$2
 
-# To start with do: nix-shell -p awscli --run "aws configure"
+    wait_for_image $region "$ami_id"
 
-set -e
-set -o pipefail
+    log "Making image $ami_id public"
 
-version=$(nix-instantiate --eval --strict '<nixpkgs>' -A lib.version | sed s/'"'//g)
-major=${version:0:5}
-echo "NixOS version is $version ($major)"
+    aws ec2 modify-image-attribute \
+        --image-id "$ami_id" --region "$region" --launch-permission 'Add={Group=all}' >&2
+}
 
-stateDir=/home/deploy/amis/ec2-image-$version
-echo "keeping state in $stateDir"
-mkdir -p $stateDir
+upload_image() {
+    local region=$1
 
-rm -f ec2-amis.nix
+    local aws_path=${image_file#/}
 
-types="hvm"
-stores="ebs"
-regions="eu-west-1 eu-west-2 eu-west-3 eu-central-1 us-east-1 us-east-2 us-west-1 us-west-2 ca-central-1 ap-southeast-1 ap-southeast-2 ap-northeast-1 ap-northeast-2 sa-east-1 ap-south-1"
+    local state_key="$region.$image_label.$image_system"
+    local task_id=$(read_state "$state_key" task_id)
+    local snapshot_id=$(read_state "$state_key" snapshot_id)
+    local ami_id=$(read_state "$state_key" ami_id)
 
-for type in $types; do
-    link=$stateDir/$type
-    imageFile=$link/nixos.qcow2
-    system=x86_64-linux
-    arch=x86_64
+    if [ -z "$task_id" ]; then
+        log "Checking for image on S3"
+        if ! aws s3 ls --region "$region" "s3://${bucket}/${aws_path}" >&2; then
+            log "Image missing from aws, uploading"
+            aws s3 cp --region $region "$image_file" "s3://${bucket}/${aws_path}" >&2
+        fi
 
-    # Build the image.
-    if ! [ -L $link ]; then
-        if [ $type = pv ]; then hvmFlag=false; else hvmFlag=true; fi
+        log "Importing image from S3 path s3://$bucket/$aws_path"
 
-        echo "building image type '$type'..."
-        nix-build -o $link \
-            '<nixpkgs/nixos>' \
-            -A config.system.build.amazonImage \
-            --arg configuration "{ imports = [ <nixpkgs/nixos/maintainers/scripts/ec2/amazon-image.nix> ]; ec2.hvm = $hvmFlag; }"
+        task_id=$(aws ec2 import-snapshot --disk-container "{
+          \"Description\": \"nixos-image-${image_label}-${image_system}\",
+          \"Format\": \"vhd\",
+          \"UserBucket\": {
+              \"S3Bucket\": \"$bucket\",
+              \"S3Key\": \"$aws_path\"
+          }
+        }" --region $region | jq -r '.ImportTaskId')
+
+        write_state "$state_key" task_id "$task_id"
     fi
 
-    for store in $stores; do
-
-        bucket=nixos-amis
-        bucketDir="$version-$type-$store"
-
-        prevAmi=
-        prevRegion=
-
-        for region in $regions; do
-
-            name=nixos-$version-$arch-$type-$store
-            description="NixOS $system $version ($type-$store)"
-
-            amiFile=$stateDir/$region.$type.$store.ami-id
-
-            if ! [ -e $amiFile ]; then
-
-                echo "doing $name in $region..."
-
-                if [ -n "$prevAmi" ]; then
-                    ami=$(aws ec2 copy-image \
-                        --region "$region" \
-                        --source-region "$prevRegion" --source-image-id "$prevAmi" \
-                        --name "$name" --description "$description" | jq -r '.ImageId')
-                    if [ "$ami" = null ]; then break; fi
-                else
-
-                    if [ $store = s3 ]; then
-
-                        # Bundle the image.
-                        imageDir=$stateDir/$type-bundled
-
-                        # Convert the image to raw format.
-                        rawFile=$stateDir/$type.raw
-                        if ! [ -e $rawFile ]; then
-                            qemu-img convert -f qcow2 -O raw $imageFile $rawFile.tmp
-                            mv $rawFile.tmp $rawFile
-                        fi
-
-                        if ! [ -d $imageDir ]; then
-                            rm -rf $imageDir.tmp
-                            mkdir -p $imageDir.tmp
-                            ec2-bundle-image \
-                                -d $imageDir.tmp \
-                                -i $rawFile --arch $arch \
-                                --user "$AWS_ACCOUNT" -c "$EC2_CERT" -k "$EC2_PRIVATE_KEY"
-                            mv $imageDir.tmp $imageDir
-                        fi
-
-                        # Upload the bundle to S3.
-                        if ! [ -e $imageDir/uploaded ]; then
-                            echo "uploading bundle to S3..."
-                            ec2-upload-bundle \
-                                -m $imageDir/$type.raw.manifest.xml \
-                                -b "$bucket/$bucketDir" \
-                                -a "$AWS_ACCESS_KEY_ID" -s "$AWS_SECRET_ACCESS_KEY" \
-                                --location EU
-                            touch $imageDir/uploaded
-                        fi
-
-                        extraFlags="--image-location $bucket/$bucketDir/$type.raw.manifest.xml"
-
-                    else
-
-                        # Convert the image to vhd format so we don't have
-                        # to upload a huge raw image.
-                        vhdFile=$stateDir/$type.vhd
-                        if ! [ -e $vhdFile ]; then
-                            qemu-img convert -f qcow2 -O vpc $imageFile $vhdFile.tmp
-                            mv $vhdFile.tmp $vhdFile
-                        fi
-
-                        vhdFileLogicalBytes="$(qemu-img info "$vhdFile" | grep ^virtual\ size: | cut -f 2 -d \(  | cut -f 1 -d \ )"
-                        vhdFileLogicalGigaBytes=$(((vhdFileLogicalBytes-1)/1024/1024/1024+1)) # Round to the next GB
-
-                        echo "Disk size is $vhdFileLogicalBytes bytes. Will be registered as $vhdFileLogicalGigaBytes GB."
-
-                        taskId=$(cat $stateDir/$region.$type.task-id 2> /dev/null || true)
-                        volId=$(cat $stateDir/$region.$type.vol-id 2> /dev/null || true)
-                        snapId=$(cat $stateDir/$region.$type.snap-id 2> /dev/null || true)
-
-                        # Import the VHD file.
-                        if [ -z "$snapId" -a -z "$volId" -a -z "$taskId" ]; then
-                            echo "importing $vhdFile..."
-                            taskId=$(ec2-import-volume $vhdFile --no-upload -f vhd \
-                                -O "$AWS_ACCESS_KEY_ID" -W "$AWS_SECRET_ACCESS_KEY" \
-                                -o "$AWS_ACCESS_KEY_ID" -w "$AWS_SECRET_ACCESS_KEY" \
-                                --region "$region" -z "${region}a" \
-                                --bucket "$bucket" --prefix "$bucketDir/" \
-                                | tee /dev/stderr \
-                                | sed 's/.*\(import-vol-[0-9a-z]\+\).*/\1/ ; t ; d')
-                            echo -n "$taskId" > $stateDir/$region.$type.task-id
-                        fi
-
-                        if [ -z "$snapId" -a -z "$volId" ]; then
-                            ec2-resume-import  $vhdFile -t "$taskId" --region "$region" \
-                                -O "$AWS_ACCESS_KEY_ID" -W "$AWS_SECRET_ACCESS_KEY" \
-                                -o "$AWS_ACCESS_KEY_ID" -w "$AWS_SECRET_ACCESS_KEY"
-                        fi
-
-                        # Wait for the volume creation to finish.
-                        if [ -z "$snapId" -a -z "$volId" ]; then
-                            echo "waiting for import to finish..."
-                            while true; do
-                                volId=$(aws ec2 describe-conversion-tasks --conversion-task-ids "$taskId" --region "$region" | jq -r .ConversionTasks[0].ImportVolume.Volume.Id)
-                                if [ "$volId" != null ]; then break; fi
-                                sleep 10
-                            done
-
-                            echo -n "$volId" > $stateDir/$region.$type.vol-id
-                        fi
-
-                        # Delete the import task.
-                        if [ -n "$volId" -a -n "$taskId" ]; then
-                            echo "removing import task..."
-                            ec2-delete-disk-image -t "$taskId" --region "$region" \
-                                -O "$AWS_ACCESS_KEY_ID" -W "$AWS_SECRET_ACCESS_KEY" \
-                                -o "$AWS_ACCESS_KEY_ID" -w "$AWS_SECRET_ACCESS_KEY" || true
-                            rm -f $stateDir/$region.$type.task-id
-                        fi
-
-                        # Create a snapshot.
-                        if [ -z "$snapId" ]; then
-                            echo "creating snapshot..."
-                            # FIXME: this can fail with InvalidVolume.NotFound. Eventual consistency yay.
-                            snapId=$(aws ec2 create-snapshot --volume-id "$volId" --region "$region" --description "$description" | jq -r .SnapshotId)
-                            if [ "$snapId" = null ]; then exit 1; fi
-                            echo -n "$snapId" > $stateDir/$region.$type.snap-id
-                        fi
-
-                        # Wait for the snapshot to finish.
-                        echo "waiting for snapshot to finish..."
-                        while true; do
-                            status=$(aws ec2 describe-snapshots --snapshot-ids "$snapId" --region "$region" | jq -r .Snapshots[0].State)
-                            if [ "$status" = completed ]; then break; fi
-                            sleep 10
-                        done
-
-                        # Delete the volume.
-                        if [ -n "$volId" ]; then
-                            echo "deleting volume..."
-                            aws ec2 delete-volume --volume-id "$volId" --region "$region" || true
-                            rm -f $stateDir/$region.$type.vol-id
-                        fi
-
-                        blockDeviceMappings="DeviceName=/dev/sda1,Ebs={SnapshotId=$snapId,VolumeSize=$vhdFileLogicalGigaBytes,DeleteOnTermination=true,VolumeType=gp2}"
-                        extraFlags=""
-
-                        if [ $type = pv ]; then
-                            extraFlags+=" --root-device-name /dev/sda1"
-                        else
-                            extraFlags+=" --root-device-name /dev/sda1"
-                            extraFlags+=" --sriov-net-support simple"
-                            extraFlags+=" --ena-support"
-                        fi
-
-                        blockDeviceMappings+=" DeviceName=/dev/sdb,VirtualName=ephemeral0"
-                        blockDeviceMappings+=" DeviceName=/dev/sdc,VirtualName=ephemeral1"
-                        blockDeviceMappings+=" DeviceName=/dev/sdd,VirtualName=ephemeral2"
-                        blockDeviceMappings+=" DeviceName=/dev/sde,VirtualName=ephemeral3"
-                    fi
-
-                    if [ $type = hvm ]; then
-                        extraFlags+=" --sriov-net-support simple"
-                        extraFlags+=" --ena-support"
-                    fi
-
-                    # Register the AMI.
-                    if [ $type = pv ]; then
-                        kernel=$(aws ec2 describe-images --owner amazon --filters "Name=name,Values=pv-grub-hd0_1.05-$arch.gz" | jq -r .Images[0].ImageId)
-                        if [ "$kernel" = null ]; then break; fi
-                        echo "using PV-GRUB kernel $kernel"
-                        extraFlags+=" --virtualization-type paravirtual --kernel $kernel"
-                    else
-                        extraFlags+=" --virtualization-type hvm"
-                    fi
-
-                    ami=$(aws ec2 register-image \
-                        --name "$name" \
-                        --description "$description" \
-                        --region "$region" \
-                        --architecture "$arch" \
-                        --block-device-mappings $blockDeviceMappings \
-                        $extraFlags | jq -r .ImageId)
-                    if [ "$ami" = null ]; then break; fi
-                fi
-
-                echo -n "$ami" > $amiFile
-                echo "created AMI $ami of type '$type' in $region..."
-
-            else
-                ami=$(cat $amiFile)
-            fi
-
-            echo "region = $region, type = $type, store = $store, ami = $ami"
-
-            if [ -z "$prevAmi" ]; then
-                prevAmi="$ami"
-                prevRegion="$region"
-            fi
-        done
+    if [ -z "$snapshot_id" ]; then
+        snapshot_id=$(wait_for_import "$region" "$task_id")
+        write_state "$state_key" snapshot_id "$snapshot_id"
+    fi
 
-    done
+    if [ -z "$ami_id" ]; then
+        log "Registering snapshot $snapshot_id as AMI"
+
+        local block_device_mappings=(
+            "DeviceName=/dev/sda1,Ebs={SnapshotId=$snapshot_id,VolumeSize=$image_logical_gigabytes,DeleteOnTermination=true,VolumeType=gp2}"
+        )
+
+        local extra_flags=(
+            --root-device-name /dev/sda1
+            --sriov-net-support simple
+            --ena-support
+            --virtualization-type hvm
+        )
+
+        block_device_mappings+=(DeviceName=/dev/sdb,VirtualName=ephemeral0)
+        block_device_mappings+=(DeviceName=/dev/sdc,VirtualName=ephemeral1)
+        block_device_mappings+=(DeviceName=/dev/sdd,VirtualName=ephemeral2)
+        block_device_mappings+=(DeviceName=/dev/sde,VirtualName=ephemeral3)
+
+        ami_id=$(
+            aws ec2 register-image \
+                --name "$image_name" \
+                --description "$image_description" \
+                --region $region \
+                --architecture $amazon_arch \
+                --block-device-mappings "${block_device_mappings[@]}" \
+                "${extra_flags[@]}" \
+                | jq -r '.ImageId'
+              )
+
+        write_state "$state_key" ami_id "$ami_id"
+    fi
 
-done
+    make_image_public $region "$ami_id"
 
-for type in $types; do
-    link=$stateDir/$type
-    system=x86_64-linux
-    arch=x86_64
+    echo "$ami_id"
+}
 
-    for store in $stores; do
+copy_to_region() {
+    local region=$1
+    local from_region=$2
+    local from_ami_id=$3
 
-        for region in $regions; do
+    state_key="$region.$image_label.$image_system"
+    ami_id=$(read_state "$state_key" ami_id)
 
-            name=nixos-$version-$arch-$type-$store
-            amiFile=$stateDir/$region.$type.$store.ami-id
-            ami=$(cat $amiFile)
+    if [ -z "$ami_id" ]; then
+        log "Copying $from_ami_id to $region"
+        ami_id=$(
+            aws ec2 copy-image \
+                --region "$region" \
+                --source-region "$from_region" \
+                --source-image-id "$from_ami_id" \
+                --name "$image_name" \
+                --description "$image_description" \
+                | jq -r '.ImageId'
+              )
 
-            echo "region = $region, type = $type, store = $store, ami = $ami"
+        write_state "$state_key" ami_id "$ami_id"
+    fi
 
-            echo -n "waiting for AMI..."
-            while true; do
-                status=$(aws ec2 describe-images --image-ids "$ami" --region "$region" | jq -r .Images[0].State)
-                if [ "$status" = available ]; then break; fi
-                sleep 10
-                echo -n '.'
-            done
-            echo
+    make_image_public $region "$ami_id"
+
+    echo "$ami_id"
+}
 
-            # Make the image public.
-            aws ec2 modify-image-attribute \
-                --image-id "$ami" --region "$region" --launch-permission 'Add={Group=all}'
+upload_all() {
+    home_image_id=$(upload_image "$home_region")
+    jq -n \
+       --arg key "$home_region.$image_system" \
+       --arg value "$home_image_id" \
+       '$ARGS.named'
 
-            echo "  \"$major\".$region.$type-$store = \"$ami\";" >> ec2-amis.nix
-        done
+    for region in "${regions[@]}"; do
+        if [ "$region" = "$home_region" ]; then
+            continue
+        fi
+        copied_image_id=$(copy_to_region "$region" "$home_region" "$home_image_id")
 
+        jq -n \
+           --arg key "$region.$image_system" \
+           --arg value "$copied_image_id" \
+           '$ARGS.named'
     done
+}
 
-done
+upload_all | jq --slurp from_entries
diff --git a/nixos/modules/config/gtk/gtk-icon-cache.nix b/nixos/modules/config/gtk/gtk-icon-cache.nix
index 9c5d993b9c5..86a6bfb5af4 100644
--- a/nixos/modules/config/gtk/gtk-icon-cache.nix
+++ b/nixos/modules/config/gtk/gtk-icon-cache.nix
@@ -7,7 +7,7 @@ with lib;
       type = types.bool;
       default = config.services.xserver.enable;
       description = ''
-        Whether to build icon theme caches for GTK+ applications.
+        Whether to build icon theme caches for GTK applications.
       '';
     };
   };
diff --git a/nixos/modules/installer/tools/nix-fallback-paths.nix b/nixos/modules/installer/tools/nix-fallback-paths.nix
index b9ab2053c41..2673887d2b9 100644
--- a/nixos/modules/installer/tools/nix-fallback-paths.nix
+++ b/nixos/modules/installer/tools/nix-fallback-paths.nix
@@ -1,6 +1,6 @@
 {
-  x86_64-linux = "/nix/store/hbhdjn5ik3byg642d1m11k3k3s0kn3py-nix-2.2.2";
-  i686-linux = "/nix/store/fz5cikwvj3n0a6zl44h6l2z3cin64mda-nix-2.2.2";
-  aarch64-linux = "/nix/store/2gba4cyl4wvxzfbhmli90jy4n5aj0kjj-nix-2.2.2";
-  x86_64-darwin = "/nix/store/87i4fp46jfw9yl8c7i9gx75m5yph7irl-nix-2.2.2";
+  x86_64-linux = "/nix/store/3ds3cgji9vjxdbgp10av6smyym1126d1-nix-2.3";
+  i686-linux = "/nix/store/ln1ndqvfpc9cdl03vqxi6kvlxm9wfv9g-nix-2.3";
+  aarch64-linux = "/nix/store/n8a1rwzrp20qcr2c4hvyn6c5q9zx8csw-nix-2.3";
+  x86_64-darwin = "/nix/store/jq6npmpld02sz4rgniz0qrsdfnm6j17a-nix-2.3";
 }
diff --git a/nixos/modules/module-list.nix b/nixos/modules/module-list.nix
index 290c29993b5..fe28cf7fa49 100644
--- a/nixos/modules/module-list.nix
+++ b/nixos/modules/module-list.nix
@@ -948,6 +948,7 @@
   ./virtualisation/openvswitch.nix
   ./virtualisation/parallels-guest.nix
   ./virtualisation/qemu-guest-agent.nix
+  ./virtualisation/railcar.nix
   ./virtualisation/rkt.nix
   ./virtualisation/virtualbox-guest.nix
   ./virtualisation/virtualbox-host.nix
diff --git a/nixos/modules/programs/plotinus.nix b/nixos/modules/programs/plotinus.nix
index 065e72d6c37..e3549c79588 100644
--- a/nixos/modules/programs/plotinus.nix
+++ b/nixos/modules/programs/plotinus.nix
@@ -18,7 +18,7 @@ in
       enable = mkOption {
         default = false;
         description = ''
-          Whether to enable the Plotinus GTK+3 plugin.  Plotinus provides a
+          Whether to enable the Plotinus GTK 3 plugin. Plotinus provides a
           popup (triggered by Ctrl-Shift-P) to search the menus of a
           compatible application.
         '';
diff --git a/nixos/modules/programs/plotinus.xml b/nixos/modules/programs/plotinus.xml
index 902cd89e0c4..8fc8c22c6d7 100644
--- a/nixos/modules/programs/plotinus.xml
+++ b/nixos/modules/programs/plotinus.xml
@@ -13,10 +13,10 @@
   <link xlink:href="https://github.com/p-e-w/plotinus"/>
  </para>
  <para>
-  Plotinus is a searchable command palette in every modern GTK+ application.
+  Plotinus is a searchable command palette in every modern GTK application.
  </para>
  <para>
-  When in a GTK+3 application and Plotinus is enabled, you can press
+  When in a GTK 3 application and Plotinus is enabled, you can press
   <literal>Ctrl+Shift+P</literal> to open the command palette. The command
   palette provides a searchable list of of all menu items in the application.
  </para>
diff --git a/nixos/modules/rename.nix b/nixos/modules/rename.nix
index 9e0ab60ca67..1fa91f05030 100644
--- a/nixos/modules/rename.nix
+++ b/nixos/modules/rename.nix
@@ -34,6 +34,7 @@ with lib;
     (mkRenamedOptionModule [ "services" "kubernetes" "etcd" "caFile" ] [ "services" "kubernetes" "apiserver" "etcd" "caFile" ])
     (mkRemovedOptionModule [ "services" "kubernetes" "kubelet" "applyManifests" ] "")
     (mkRemovedOptionModule [ "services" "kubernetes" "kubelet" "cadvisorPort" ] "")
+    (mkRemovedOptionModule [ "services" "kubernetes" "kubelet" "allowPrivileged" ] "")
     (mkRenamedOptionModule [ "services" "kubernetes" "proxy" "address" ] ["services" "kubernetes" "proxy" "bindAddress"])
     (mkRemovedOptionModule [ "services" "kubernetes" "verbose" ] "")
     (mkRenamedOptionModule [ "services" "logstash" "address" ] [ "services" "logstash" "listenAddress" ])
diff --git a/nixos/modules/services/cluster/kubernetes/addon-manager.nix b/nixos/modules/services/cluster/kubernetes/addon-manager.nix
index ad7d17c9c28..17f2dde31a7 100644
--- a/nixos/modules/services/cluster/kubernetes/addon-manager.nix
+++ b/nixos/modules/services/cluster/kubernetes/addon-manager.nix
@@ -62,50 +62,19 @@ in
       '';
     };
 
-    enable = mkEnableOption "Kubernetes addon manager";
-
-    kubeconfig = top.lib.mkKubeConfigOptions "Kubernetes addon manager";
-    bootstrapAddonsKubeconfig = top.lib.mkKubeConfigOptions "Kubernetes addon manager bootstrap";
+    enable = mkEnableOption "Whether to enable Kubernetes addon manager.";
   };
 
   ###### implementation
-  config = let
-
-    addonManagerPaths = filter (a: a != null) [
-      cfg.kubeconfig.caFile
-      cfg.kubeconfig.certFile
-      cfg.kubeconfig.keyFile
-    ];
-    bootstrapAddonsPaths = filter (a: a != null) [
-      cfg.bootstrapAddonsKubeconfig.caFile
-      cfg.bootstrapAddonsKubeconfig.certFile
-      cfg.bootstrapAddonsKubeconfig.keyFile
-    ];
-
-  in mkIf cfg.enable {
+  config = mkIf cfg.enable {
     environment.etc."kubernetes/addons".source = "${addons}/";
 
-    #TODO: Get rid of kube-addon-manager in the future for the following reasons
-    # - it is basically just a shell script wrapped around kubectl
-    # - it assumes that it is clusterAdmin or can gain clusterAdmin rights through serviceAccount
-    # - it is designed to be used with k8s system components only
-    # - it would be better with a more Nix-oriented way of managing addons
     systemd.services.kube-addon-manager = {
       description = "Kubernetes addon manager";
       wantedBy = [ "kubernetes.target" ];
-      after = [ "kube-node-online.target" ];
-      before = [ "kubernetes.target" ];
-      environment = {
-        ADDON_PATH = "/etc/kubernetes/addons/";
-        KUBECONFIG = top.lib.mkKubeConfig "kube-addon-manager" cfg.kubeconfig;
-      };
-      path = with pkgs; [ gawk kubectl ];
-      preStart = ''
-        until kubectl -n kube-system get serviceaccounts/default 2>/dev/null; do
-          echo kubectl -n kube-system get serviceaccounts/default: exit status $?
-          sleep 2
-        done
-      '';
+      after = [ "kube-apiserver.service" ];
+      environment.ADDON_PATH = "/etc/kubernetes/addons/";
+      path = [ pkgs.gawk ];
       serviceConfig = {
         Slice = "kubernetes.slice";
         ExecStart = "${top.package}/bin/kube-addons";
@@ -115,52 +84,8 @@ in
         Restart = "on-failure";
         RestartSec = 10;
       };
-      unitConfig.ConditionPathExists = addonManagerPaths;
     };
 
-    systemd.paths.kube-addon-manager = {
-      wantedBy = [ "kube-addon-manager.service" ];
-      pathConfig = {
-        PathExists = addonManagerPaths;
-        PathChanged = addonManagerPaths;
-      };
-    };
-
-    services.kubernetes.addonManager.kubeconfig.server = mkDefault top.apiserverAddress;
-
-    systemd.services.kube-addon-manager-bootstrap = mkIf (top.apiserver.enable && top.addonManager.bootstrapAddons != {}) {
-      wantedBy = [ "kube-control-plane-online.target" ];
-      after = [ "kube-apiserver.service" ];
-      before = [ "kube-control-plane-online.target" ];
-      path = [ pkgs.kubectl ];
-      environment = {
-        KUBECONFIG = top.lib.mkKubeConfig "kube-addon-manager-bootstrap" cfg.bootstrapAddonsKubeconfig;
-      };
-      preStart = with pkgs; let
-        files = mapAttrsToList (n: v: writeText "${n}.json" (builtins.toJSON v))
-          cfg.bootstrapAddons;
-      in ''
-        until kubectl auth can-i '*' '*' -q 2>/dev/null; do
-          echo kubectl auth can-i '*' '*': exit status $?
-          sleep 2
-        done
-
-        kubectl apply -f ${concatStringsSep " \\\n -f " files}
-      '';
-      script = "echo Ok";
-      unitConfig.ConditionPathExists = bootstrapAddonsPaths;
-    };
-
-    systemd.paths.kube-addon-manager-bootstrap = {
-      wantedBy = [ "kube-addon-manager-bootstrap.service" ];
-      pathConfig = {
-        PathExists = bootstrapAddonsPaths;
-        PathChanged = bootstrapAddonsPaths;
-      };
-    };
-
-    services.kubernetes.addonManager.bootstrapAddonsKubeconfig.server = mkDefault top.apiserverAddress;
-
     services.kubernetes.addonManager.bootstrapAddons = mkIf isRBACEnabled
     (let
       name = system:kube-addon-manager;
diff --git a/nixos/modules/services/cluster/kubernetes/addons/dashboard.nix b/nixos/modules/services/cluster/kubernetes/addons/dashboard.nix
index 5117726bee9..70f96d75a46 100644
--- a/nixos/modules/services/cluster/kubernetes/addons/dashboard.nix
+++ b/nixos/modules/services/cluster/kubernetes/addons/dashboard.nix
@@ -169,23 +169,6 @@ in {
         };
       };
 
-      kubernetes-dashboard-cm = {
-        apiVersion = "v1";
-        kind = "ConfigMap";
-        metadata = {
-          labels = {
-            k8s-app = "kubernetes-dashboard";
-            # Allows editing resource and makes sure it is created first.
-            "addonmanager.kubernetes.io/mode" = "EnsureExists";
-          };
-          name = "kubernetes-dashboard-settings";
-          namespace = "kube-system";
-        };
-      };
-    };
-
-    services.kubernetes.addonManager.bootstrapAddons = mkMerge [{
-
       kubernetes-dashboard-sa = {
         apiVersion = "v1";
         kind = "ServiceAccount";
@@ -227,9 +210,20 @@ in {
         };
         type = "Opaque";
       };
-    }
-
-    (optionalAttrs cfg.rbac.enable
+      kubernetes-dashboard-cm = {
+        apiVersion = "v1";
+        kind = "ConfigMap";
+        metadata = {
+          labels = {
+            k8s-app = "kubernetes-dashboard";
+            # Allows editing resource and makes sure it is created first.
+            "addonmanager.kubernetes.io/mode" = "EnsureExists";
+          };
+          name = "kubernetes-dashboard-settings";
+          namespace = "kube-system";
+        };
+      };
+    } // (optionalAttrs cfg.rbac.enable
       (let
         subjects = [{
           kind = "ServiceAccount";
@@ -329,6 +323,6 @@ in {
             inherit subjects;
           };
         })
-    ))];
+    ));
   };
 }
diff --git a/nixos/modules/services/cluster/kubernetes/apiserver.nix b/nixos/modules/services/cluster/kubernetes/apiserver.nix
index f293dd79f42..33796bf2e08 100644
--- a/nixos/modules/services/cluster/kubernetes/apiserver.nix
+++ b/nixos/modules/services/cluster/kubernetes/apiserver.nix
@@ -290,32 +290,11 @@ in
   ###### implementation
   config = mkMerge [
 
-    (let
-
-      apiserverPaths = filter (a: a != null) [
-        cfg.clientCaFile
-        cfg.etcd.caFile
-        cfg.etcd.certFile
-        cfg.etcd.keyFile
-        cfg.kubeletClientCaFile
-        cfg.kubeletClientCertFile
-        cfg.kubeletClientKeyFile
-        cfg.serviceAccountKeyFile
-        cfg.tlsCertFile
-        cfg.tlsKeyFile
-      ];
-      etcdPaths = filter (a: a != null) [
-        config.services.etcd.trustedCaFile
-        config.services.etcd.certFile
-        config.services.etcd.keyFile
-      ];
-
-    in mkIf cfg.enable {
+    (mkIf cfg.enable {
         systemd.services.kube-apiserver = {
           description = "Kubernetes APIServer Service";
-          wantedBy = [ "kube-control-plane-online.target" ];
-          after = [ "certmgr.service" ];
-          before = [ "kube-control-plane-online.target" ];
+          wantedBy = [ "kubernetes.target" ];
+          after = [ "network.target" ];
           serviceConfig = {
             Slice = "kubernetes.slice";
             ExecStart = ''${top.package}/bin/kube-apiserver \
@@ -386,15 +365,6 @@ in
             Restart = "on-failure";
             RestartSec = 5;
           };
-          unitConfig.ConditionPathExists = apiserverPaths;
-        };
-
-        systemd.paths.kube-apiserver = mkIf top.apiserver.enable {
-          wantedBy = [ "kube-apiserver.service" ];
-          pathConfig = {
-            PathExists = apiserverPaths;
-            PathChanged = apiserverPaths;
-          };
         };
 
         services.etcd = {
@@ -408,18 +378,6 @@ in
           initialAdvertisePeerUrls = mkDefault ["https://${top.masterAddress}:2380"];
         };
 
-        systemd.services.etcd = {
-          unitConfig.ConditionPathExists = etcdPaths;
-        };
-
-        systemd.paths.etcd = {
-          wantedBy = [ "etcd.service" ];
-          pathConfig = {
-            PathExists = etcdPaths;
-            PathChanged = etcdPaths;
-          };
-        };
-
         services.kubernetes.addonManager.bootstrapAddons = mkIf isRBACEnabled {
 
           apiserver-kubelet-api-admin-crb = {
diff --git a/nixos/modules/services/cluster/kubernetes/controller-manager.nix b/nixos/modules/services/cluster/kubernetes/controller-manager.nix
index b94e8bd86d4..0b73d090f24 100644
--- a/nixos/modules/services/cluster/kubernetes/controller-manager.nix
+++ b/nixos/modules/services/cluster/kubernetes/controller-manager.nix
@@ -104,31 +104,11 @@ in
   };
 
   ###### implementation
-  config = let
-
-    controllerManagerPaths = filter (a: a != null) [
-      cfg.kubeconfig.caFile
-      cfg.kubeconfig.certFile
-      cfg.kubeconfig.keyFile
-      cfg.rootCaFile
-      cfg.serviceAccountKeyFile
-      cfg.tlsCertFile
-      cfg.tlsKeyFile
-    ];
-
-  in mkIf cfg.enable {
-    systemd.services.kube-controller-manager = rec {
+  config = mkIf cfg.enable {
+    systemd.services.kube-controller-manager = {
       description = "Kubernetes Controller Manager Service";
-      wantedBy = [ "kube-control-plane-online.target" ];
+      wantedBy = [ "kubernetes.target" ];
       after = [ "kube-apiserver.service" ];
-      before = [ "kube-control-plane-online.target" ];
-      environment.KUBECONFIG = top.lib.mkKubeConfig "kube-controller-manager" cfg.kubeconfig;
-      preStart = ''
-        until kubectl auth can-i get /api -q 2>/dev/null; do
-          echo kubectl auth can-i get /api: exit status $?
-          sleep 2
-        done
-      '';
       serviceConfig = {
         RestartSec = "30s";
         Restart = "on-failure";
@@ -140,7 +120,7 @@ in
             "--cluster-cidr=${cfg.clusterCidr}"} \
           ${optionalString (cfg.featureGates != [])
             "--feature-gates=${concatMapStringsSep "," (feature: "${feature}=true") cfg.featureGates}"} \
-          --kubeconfig=${environment.KUBECONFIG} \
+          --kubeconfig=${top.lib.mkKubeConfig "kube-controller-manager" cfg.kubeconfig} \
           --leader-elect=${boolToString cfg.leaderElect} \
           ${optionalString (cfg.rootCaFile!=null)
             "--root-ca-file=${cfg.rootCaFile}"} \
@@ -161,16 +141,7 @@ in
         User = "kubernetes";
         Group = "kubernetes";
       };
-      path = top.path ++ [ pkgs.kubectl ];
-      unitConfig.ConditionPathExists = controllerManagerPaths;
-    };
-
-    systemd.paths.kube-controller-manager = {
-      wantedBy = [ "kube-controller-manager.service" ];
-      pathConfig = {
-        PathExists = controllerManagerPaths;
-        PathChanged = controllerManagerPaths;
-      };
+      path = top.path;
     };
 
     services.kubernetes.pki.certs = with top.lib; {
diff --git a/nixos/modules/services/cluster/kubernetes/default.nix b/nixos/modules/services/cluster/kubernetes/default.nix
index 823cc1c35f4..3790ac9b691 100644
--- a/nixos/modules/services/cluster/kubernetes/default.nix
+++ b/nixos/modules/services/cluster/kubernetes/default.nix
@@ -256,29 +256,6 @@ in {
         wantedBy = [ "multi-user.target" ];
       };
 
-      systemd.targets.kube-control-plane-online = {
-        wantedBy = [ "kubernetes.target" ];
-        before = [ "kubernetes.target" ];
-      };
-
-      systemd.services.kube-control-plane-online = {
-        description = "Kubernetes control plane is online";
-        wantedBy = [ "kube-control-plane-online.target" ];
-        after = [ "kube-scheduler.service" "kube-controller-manager.service" ];
-        before = [ "kube-control-plane-online.target" ];
-        path = [ pkgs.curl ];
-        preStart = ''
-          until curl -Ssf ${cfg.apiserverAddress}/healthz do
-            echo curl -Ssf ${cfg.apiserverAddress}/healthz: exit status $?
-            sleep 3
-          done
-        '';
-        script = "echo Ok";
-        serviceConfig = {
-          TimeoutSec = "500";
-        };
-      };
-
       systemd.tmpfiles.rules = [
         "d /opt/cni/bin 0755 root root -"
         "d /run/kubernetes 0755 kubernetes kubernetes -"
@@ -302,8 +279,6 @@ in {
       services.kubernetes.apiserverAddress = mkDefault ("https://${if cfg.apiserver.advertiseAddress != null
                           then cfg.apiserver.advertiseAddress
                           else "${cfg.masterAddress}:${toString cfg.apiserver.securePort}"}");
-
-      services.kubernetes.kubeconfig.server = mkDefault cfg.apiserverAddress;
     })
   ];
 }
diff --git a/nixos/modules/services/cluster/kubernetes/flannel.nix b/nixos/modules/services/cluster/kubernetes/flannel.nix
index d9437427d6d..74d10d68437 100644
--- a/nixos/modules/services/cluster/kubernetes/flannel.nix
+++ b/nixos/modules/services/cluster/kubernetes/flannel.nix
@@ -14,36 +14,25 @@ let
     buildInputs = [ pkgs.makeWrapper ];
   } ''
     mkdir -p $out
-    cp ${pkgs.kubernetes.src}/cluster/centos/node/bin/mk-docker-opts.sh $out/mk-docker-opts.sh
 
     # bashInteractive needed for `compgen`
-    makeWrapper ${pkgs.bashInteractive}/bin/bash $out/mk-docker-opts --add-flags "$out/mk-docker-opts.sh"
+    makeWrapper ${pkgs.bashInteractive}/bin/bash $out/mk-docker-opts --add-flags "${pkgs.kubernetes}/bin/mk-docker-opts.sh"
   '';
 in
 {
   ###### interface
   options.services.kubernetes.flannel = {
-    enable = mkEnableOption "flannel networking";
-    kubeconfig = top.lib.mkKubeConfigOptions "Kubernetes flannel";
+    enable = mkEnableOption "enable flannel networking";
   };
 
   ###### implementation
-  config = let
-
-    flannelPaths = filter (a: a != null) [
-      cfg.kubeconfig.caFile
-      cfg.kubeconfig.certFile
-      cfg.kubeconfig.keyFile
-    ];
-    kubeconfig = top.lib.mkKubeConfig "flannel" cfg.kubeconfig;
-
-  in mkIf cfg.enable {
+  config = mkIf cfg.enable {
     services.flannel = {
 
       enable = mkDefault true;
       network = mkDefault top.clusterCidr;
-      inherit storageBackend kubeconfig;
-      nodeName = top.kubelet.hostname;
+      inherit storageBackend;
+      nodeName = config.services.kubernetes.kubelet.hostname;
     };
 
     services.kubernetes.kubelet = {
@@ -58,66 +47,24 @@ in
       }];
     };
 
-    systemd.services.mk-docker-opts = {
+    systemd.services."mk-docker-opts" = {
       description = "Pre-Docker Actions";
-      wantedBy = [ "flannel.target" ];
-      before = [ "flannel.target" ];
       path = with pkgs; [ gawk gnugrep ];
       script = ''
         ${mkDockerOpts}/mk-docker-opts -d /run/flannel/docker
         systemctl restart docker
       '';
-      unitConfig.ConditionPathExists = [ "/run/flannel/subnet.env" ];
       serviceConfig.Type = "oneshot";
     };
 
-    systemd.paths.flannel-subnet-env = {
-      wantedBy = [ "mk-docker-opts.service" ];
-      pathConfig = {
-        PathExists = [ "/run/flannel/subnet.env" ];
-        PathChanged = [ "/run/flannel/subnet.env" ];
-        Unit = "mk-docker-opts.service";
-      };
-    };
-
-    systemd.targets.flannel = {
-      wantedBy = [ "kube-node-online.target" ];
-      before = [ "kube-node-online.target" ];
-    };
-
-    systemd.services.flannel = {
-      wantedBy = [ "flannel.target" ];
-      after = [ "kubelet.target" ];
-      before = [ "flannel.target" ];
-      path = with pkgs; [ iptables kubectl ];
-      environment.KUBECONFIG = kubeconfig;
-      preStart = let
-        args = [
-          "--selector=kubernetes.io/hostname=${top.kubelet.hostname}"
-          # flannel exits if node is not registered yet, before that there is no podCIDR
-          "--output=jsonpath={.items[0].spec.podCIDR}"
-          # if jsonpath cannot be resolved exit with status 1
-          "--allow-missing-template-keys=false"
-        ];
-      in ''
-        until kubectl get nodes ${concatStringsSep " " args} 2>/dev/null; do
-          echo Waiting for ${top.kubelet.hostname} to be RegisteredNode
-          sleep 1
-        done
-      '';
-      unitConfig.ConditionPathExists = flannelPaths;
-    };
-
-    systemd.paths.flannel = {
+    systemd.paths."flannel-subnet-env" = {
       wantedBy = [ "flannel.service" ];
       pathConfig = {
-        PathExists = flannelPaths;
-        PathChanged = flannelPaths;
+        PathModified = "/run/flannel/subnet.env";
+        Unit = "mk-docker-opts.service";
       };
     };
 
-    services.kubernetes.flannel.kubeconfig.server = mkDefault top.apiserverAddress;
-
     systemd.services.docker = {
       environment.DOCKER_OPTS = "-b none";
       serviceConfig.EnvironmentFile = "-/run/flannel/docker";
@@ -144,6 +91,7 @@ in
 
     # give flannel som kubernetes rbac permissions if applicable
     services.kubernetes.addonManager.bootstrapAddons = mkIf ((storageBackend == "kubernetes") && (elem "RBAC" top.apiserver.authorizationMode)) {
+
       flannel-cr = {
         apiVersion = "rbac.authorization.k8s.io/v1beta1";
         kind = "ClusterRole";
@@ -179,6 +127,7 @@ in
           name = "flannel-client";
         }];
       };
+
     };
   };
 }
diff --git a/nixos/modules/services/cluster/kubernetes/kubelet.nix b/nixos/modules/services/cluster/kubernetes/kubelet.nix
index 4c5df96bcc6..250da4c807e 100644
--- a/nixos/modules/services/cluster/kubernetes/kubelet.nix
+++ b/nixos/modules/services/cluster/kubernetes/kubelet.nix
@@ -61,12 +61,6 @@ in
       type = str;
     };
 
-    allowPrivileged = mkOption {
-      description = "Whether to allow Kubernetes containers to request privileged mode.";
-      default = false;
-      type = bool;
-    };
-
     clusterDns = mkOption {
       description = "Use alternative DNS.";
       default = "10.1.0.1";
@@ -234,28 +228,21 @@ in
 
   ###### implementation
   config = mkMerge [
-    (let
-
-      kubeletPaths = filter (a: a != null) [
-        cfg.kubeconfig.caFile
-        cfg.kubeconfig.certFile
-        cfg.kubeconfig.keyFile
-        cfg.clientCaFile
-        cfg.tlsCertFile
-        cfg.tlsKeyFile
-      ];
-
-    in mkIf cfg.enable {
+    (mkIf cfg.enable {
       services.kubernetes.kubelet.seedDockerImages = [infraContainer];
 
       systemd.services.kubelet = {
         description = "Kubernetes Kubelet Service";
-        wantedBy = [ "kubelet.target" ];
-        after = [ "kube-control-plane-online.target" ];
-        before = [ "kubelet.target" ];
+        wantedBy = [ "kubernetes.target" ];
+        after = [ "network.target" "docker.service" "kube-apiserver.service" ];
         path = with pkgs; [ gitMinimal openssh docker utillinux iproute ethtool thin-provisioning-tools iptables socat ] ++ top.path;
         preStart = ''
-          rm -f /opt/cni/bin/* || true
+          ${concatMapStrings (img: ''
+            echo "Seeding docker image: ${img}"
+            docker load <${img}
+          '') cfg.seedDockerImages}
+
+          rm /opt/cni/bin/* || true
           ${concatMapStrings (package: ''
             echo "Linking cni package: ${package}"
             ln -fs ${package}/bin/* /opt/cni/bin
@@ -269,7 +256,6 @@ in
           RestartSec = "1000ms";
           ExecStart = ''${top.package}/bin/kubelet \
             --address=${cfg.address} \
-            --allow-privileged=${boolToString cfg.allowPrivileged} \
             --authentication-token-webhook \
             --authentication-token-webhook-cache-ttl="10s" \
             --authorization-mode=Webhook \
@@ -308,56 +294,6 @@ in
           '';
           WorkingDirectory = top.dataDir;
         };
-        unitConfig.ConditionPathExists = kubeletPaths;
-      };
-
-      systemd.paths.kubelet = {
-        wantedBy =  [ "kubelet.service" ];
-        pathConfig = {
-          PathExists = kubeletPaths;
-          PathChanged = kubeletPaths;
-        };
-      };
-
-      systemd.services.docker.before = [ "kubelet.service" ];
-
-      systemd.services.docker-seed-images = {
-        wantedBy = [ "docker.service" ];
-        after = [ "docker.service" ];
-        before = [ "kubelet.service" ];
-        path = with pkgs; [ docker ];
-        preStart = ''
-          ${concatMapStrings (img: ''
-            echo "Seeding docker image: ${img}"
-            docker load <${img}
-          '') cfg.seedDockerImages}
-        '';
-        script = "echo Ok";
-        serviceConfig.Type = "oneshot";
-        serviceConfig.RemainAfterExit = true;
-        serviceConfig.Slice = "kubernetes.slice";
-      };
-
-      systemd.services.kubelet-online = {
-        wantedBy = [ "kube-node-online.target" ];
-        after = [ "flannel.target" "kubelet.target" ];
-        before = [ "kube-node-online.target" ];
-        # it is complicated. flannel needs kubelet to run the pause container before
-        # it discusses the node CIDR with apiserver and afterwards configures and restarts
-        # dockerd. Until then prevent creating any pods because they have to be recreated anyway
-        # because the network of docker0 has been changed by flannel.
-        script = let
-          docker-env = "/run/flannel/docker";
-          flannel-date = "stat --print=%Y ${docker-env}";
-          docker-date = "systemctl show --property=ActiveEnterTimestamp --value docker";
-        in ''
-          until test -f ${docker-env} ; do sleep 1 ; done
-          while test `${flannel-date}` -gt `date +%s --date="$(${docker-date})"` ; do
-            sleep 1
-          done
-        '';
-        serviceConfig.Type = "oneshot";
-        serviceConfig.Slice = "kubernetes.slice";
       };
 
       # Allways include cni plugins
@@ -404,16 +340,5 @@ in
       };
     })
 
-    {
-      systemd.targets.kubelet = {
-        wantedBy = [ "kube-node-online.target" ];
-        before = [ "kube-node-online.target" ];
-      };
-
-      systemd.targets.kube-node-online = {
-        wantedBy = [ "kubernetes.target" ];
-        before = [ "kubernetes.target" ];
-      };
-    }
   ];
 }
diff --git a/nixos/modules/services/cluster/kubernetes/pki.nix b/nixos/modules/services/cluster/kubernetes/pki.nix
index 47384ae50a0..733479e24c9 100644
--- a/nixos/modules/services/cluster/kubernetes/pki.nix
+++ b/nixos/modules/services/cluster/kubernetes/pki.nix
@@ -27,11 +27,12 @@ let
   certmgrAPITokenPath = "${top.secretsPath}/${cfsslAPITokenBaseName}";
   cfsslAPITokenLength = 32;
 
-  clusterAdminKubeconfig = with cfg.certs.clusterAdmin; {
-    server = top.apiserverAddress;
-    certFile = cert;
-    keyFile = key;
-  };
+  clusterAdminKubeconfig = with cfg.certs.clusterAdmin;
+    top.lib.mkKubeConfig "cluster-admin" {
+        server = top.apiserverAddress;
+        certFile = cert;
+        keyFile = key;
+    };
 
   remote = with config.services; "https://${kubernetes.masterAddress}:${toString cfssl.port}";
 in
@@ -118,11 +119,6 @@ in
     cfsslCertPathPrefix = "${config.services.cfssl.dataDir}/cfssl";
     cfsslCert = "${cfsslCertPathPrefix}.pem";
     cfsslKey = "${cfsslCertPathPrefix}-key.pem";
-
-    certmgrPaths = [
-      top.caFile
-      certmgrAPITokenPath
-    ];
   in
   {
 
@@ -172,40 +168,13 @@ in
         chown cfssl "${cfsslAPITokenPath}" && chmod 400 "${cfsslAPITokenPath}"
       '')]);
 
-    systemd.targets.cfssl-online = {
-      wantedBy = [ "network-online.target" ];
-      after = [ "cfssl.service" "network-online.target" "cfssl-online.service" ];
-    };
-
-    systemd.services.cfssl-online = {
-      description = "Wait for ${remote} to be reachable.";
-      wantedBy = [ "cfssl-online.target" ];
-      before = [ "cfssl-online.target" ];
-      path = [ pkgs.curl ];
-      preStart = ''
-        until curl --fail-early -fskd '{}' ${remote}/api/v1/cfssl/info -o /dev/null; do
-          echo curl ${remote}/api/v1/cfssl/info: exit status $?
-          sleep 2
-        done
-      '';
-      script = "echo Ok";
-      serviceConfig = {
-        TimeoutSec = "300";
-      };
-    };
-
     systemd.services.kube-certmgr-bootstrap = {
       description = "Kubernetes certmgr bootstrapper";
-      wantedBy = [ "cfssl-online.target" ];
-      after = [ "cfssl-online.target" ];
-      before = [ "certmgr.service" ];
-      path = with pkgs; [ curl cfssl ];
+      wantedBy = [ "certmgr.service" ];
+      after = [ "cfssl.target" ];
       script = concatStringsSep "\n" [''
         set -e
 
-        mkdir -p $(dirname ${certmgrAPITokenPath})
-        mkdir -p $(dirname ${top.caFile})
-
         # If there's a cfssl (cert issuer) running locally, then don't rely on user to
         # manually paste it in place. Just symlink.
         # otherwise, create the target file, ready for users to insert the token
@@ -217,18 +186,15 @@ in
         fi
       ''
       (optionalString (cfg.pkiTrustOnBootstrap) ''
-        if [ ! -s "${top.caFile}" ]; then
-          until test -s ${top.caFile}.json; do
-            sleep 2
-            curl --fail-early -fskd '{}' ${remote}/api/v1/cfssl/info -o ${top.caFile}.json
-          done
-          cfssljson -f ${top.caFile}.json -stdout >${top.caFile}
-          rm ${top.caFile}.json
+        if [ ! -f "${top.caFile}" ] || [ $(cat "${top.caFile}" | wc -c) -lt 1 ]; then
+          ${pkgs.curl}/bin/curl --fail-early -f -kd '{}' ${remote}/api/v1/cfssl/info | \
+            ${pkgs.cfssl}/bin/cfssljson -stdout >${top.caFile}
         fi
       '')
       ];
       serviceConfig = {
-        TimeoutSec = "500";
+        RestartSec = "10s";
+        Restart = "on-failure";
       };
     };
 
@@ -264,28 +230,35 @@ in
           mapAttrs mkSpec cfg.certs;
       };
 
-      systemd.services.certmgr = {
-        wantedBy = [ "cfssl-online.target" ];
-        after = [ "cfssl-online.target" "kube-certmgr-bootstrap.service" ];
-        preStart = ''
-          while ! test -s ${certmgrAPITokenPath} ; do
-            sleep 1
-            echo Waiting for ${certmgrAPITokenPath}
-          done
-        '';
-        unitConfig.ConditionPathExists = certmgrPaths;
-      };
-
-      systemd.paths.certmgr = {
-        wantedBy = [ "certmgr.service" ];
-        pathConfig = {
-          PathExists = certmgrPaths;
-          PathChanged = certmgrPaths;
-        };
-      };
-
-      environment.etc.${cfg.etcClusterAdminKubeconfig}.source = mkIf (cfg.etcClusterAdminKubeconfig != null)
-        (top.lib.mkKubeConfig "cluster-admin" clusterAdminKubeconfig);
+      #TODO: Get rid of kube-addon-manager in the future for the following reasons
+      # - it is basically just a shell script wrapped around kubectl
+      # - it assumes that it is clusterAdmin or can gain clusterAdmin rights through serviceAccount
+      # - it is designed to be used with k8s system components only
+      # - it would be better with a more Nix-oriented way of managing addons
+      systemd.services.kube-addon-manager = mkIf top.addonManager.enable (mkMerge [{
+        environment.KUBECONFIG = with cfg.certs.addonManager;
+          top.lib.mkKubeConfig "addon-manager" {
+            server = top.apiserverAddress;
+            certFile = cert;
+            keyFile = key;
+          };
+        }
+
+        (optionalAttrs (top.addonManager.bootstrapAddons != {}) {
+          serviceConfig.PermissionsStartOnly = true;
+          preStart = with pkgs;
+          let
+            files = mapAttrsToList (n: v: writeText "${n}.json" (builtins.toJSON v))
+              top.addonManager.bootstrapAddons;
+          in
+          ''
+            export KUBECONFIG=${clusterAdminKubeconfig}
+            ${kubectl}/bin/kubectl apply -f ${concatStringsSep " \\\n -f " files}
+          '';
+        })]);
+
+      environment.etc.${cfg.etcClusterAdminKubeconfig}.source = mkIf (!isNull cfg.etcClusterAdminKubeconfig)
+        clusterAdminKubeconfig;
 
       environment.systemPackages = mkIf (top.kubelet.enable || top.proxy.enable) [
       (pkgs.writeScriptBin "nixos-kubernetes-node-join" ''
@@ -311,22 +284,38 @@ in
           exit 1
         fi
 
-        do_restart=$(test -s ${certmgrAPITokenPath} && echo -n y || echo -n n)
-
         echo $token > ${certmgrAPITokenPath}
         chmod 600 ${certmgrAPITokenPath}
 
-        if [ y = $do_restart ]; then
-          echo "Restarting certmgr..." >&1
-          systemctl restart certmgr
-        fi
+        echo "Restarting certmgr..." >&1
+        systemctl restart certmgr
+
+        echo "Waiting for certs to appear..." >&1
+
+        ${optionalString top.kubelet.enable ''
+          while [ ! -f ${cfg.certs.kubelet.cert} ]; do sleep 1; done
+          echo "Restarting kubelet..." >&1
+          systemctl restart kubelet
+        ''}
+
+        ${optionalString top.proxy.enable ''
+          while [ ! -f ${cfg.certs.kubeProxyClient.cert} ]; do sleep 1; done
+          echo "Restarting kube-proxy..." >&1
+          systemctl restart kube-proxy
+        ''}
 
-        echo "Node joined succesfully" >&1
+        ${optionalString top.flannel.enable ''
+          while [ ! -f ${cfg.certs.flannelClient.cert} ]; do sleep 1; done
+          echo "Restarting flannel..." >&1
+          systemctl restart flannel
+        ''}
+
+        echo "Node joined succesfully"
       '')];
 
       # isolate etcd on loopback at the master node
       # easyCerts doesn't support multimaster clusters anyway atm.
-      services.etcd = mkIf top.apiserver.enable (with cfg.certs.etcd; {
+      services.etcd = with cfg.certs.etcd; {
         listenClientUrls = ["https://127.0.0.1:2379"];
         listenPeerUrls = ["https://127.0.0.1:2380"];
         advertiseClientUrls = ["https://etcd.local:2379"];
@@ -335,11 +324,19 @@ in
         certFile = mkDefault cert;
         keyFile = mkDefault key;
         trustedCaFile = mkDefault caCert;
-      });
+      };
       networking.extraHosts = mkIf (config.services.etcd.enable) ''
         127.0.0.1 etcd.${top.addons.dns.clusterDomain} etcd.local
       '';
 
+      services.flannel = with cfg.certs.flannelClient; {
+        kubeconfig = top.lib.mkKubeConfig "flannel" {
+          server = top.apiserverAddress;
+          certFile = cert;
+          keyFile = key;
+        };
+      };
+
       services.kubernetes = {
 
         apiserver = mkIf top.apiserver.enable (with cfg.certs.apiServer; {
@@ -359,13 +356,6 @@ in
           proxyClientCertFile = mkDefault cfg.certs.apiserverProxyClient.cert;
           proxyClientKeyFile = mkDefault cfg.certs.apiserverProxyClient.key;
         });
-        addonManager = mkIf top.addonManager.enable {
-          kubeconfig = with cfg.certs.addonManager; {
-            certFile = mkDefault cert;
-            keyFile = mkDefault key;
-          };
-          bootstrapAddonsKubeconfig = clusterAdminKubeconfig;
-        };
         controllerManager = mkIf top.controllerManager.enable {
           serviceAccountKeyFile = mkDefault cfg.certs.serviceAccount.key;
           rootCaFile = cfg.certs.controllerManagerClient.caCert;
@@ -374,12 +364,6 @@ in
             keyFile = mkDefault key;
           };
         };
-        flannel = mkIf top.flannel.enable {
-          kubeconfig = with cfg.certs.flannelClient; {
-            certFile = cert;
-            keyFile = key;
-          };
-        };
         scheduler = mkIf top.scheduler.enable {
           kubeconfig = with cfg.certs.schedulerClient; {
             certFile = mkDefault cert;
diff --git a/nixos/modules/services/cluster/kubernetes/proxy.nix b/nixos/modules/services/cluster/kubernetes/proxy.nix
index 23f4d97b703..bd4bf04ea83 100644
--- a/nixos/modules/services/cluster/kubernetes/proxy.nix
+++ b/nixos/modules/services/cluster/kubernetes/proxy.nix
@@ -45,28 +45,12 @@ in
   };
 
   ###### implementation
-  config = let
-
-    proxyPaths = filter (a: a != null) [
-      cfg.kubeconfig.caFile
-      cfg.kubeconfig.certFile
-      cfg.kubeconfig.keyFile
-    ];
-
-  in mkIf cfg.enable {
-    systemd.services.kube-proxy = rec {
+  config = mkIf cfg.enable {
+    systemd.services.kube-proxy = {
       description = "Kubernetes Proxy Service";
-      wantedBy = [ "kube-node-online.target" ];
-      after = [ "kubelet-online.service" ];
-      before = [ "kube-node-online.target" ];
-      environment.KUBECONFIG = top.lib.mkKubeConfig "kube-proxy" cfg.kubeconfig;
-      path = with pkgs; [ iptables conntrack_tools kubectl ];
-      preStart = ''
-        until kubectl auth can-i get nodes/${top.kubelet.hostname} -q 2>/dev/null; do
-          echo kubectl auth can-i get nodes/${top.kubelet.hostname}: exit status $?
-          sleep 2
-        done
-      '';
+      wantedBy = [ "kubernetes.target" ];
+      after = [ "kube-apiserver.service" ];
+      path = with pkgs; [ iptables conntrack_tools ];
       serviceConfig = {
         Slice = "kubernetes.slice";
         ExecStart = ''${top.package}/bin/kube-proxy \
@@ -75,7 +59,7 @@ in
             "--cluster-cidr=${top.clusterCidr}"} \
           ${optionalString (cfg.featureGates != [])
             "--feature-gates=${concatMapStringsSep "," (feature: "${feature}=true") cfg.featureGates}"} \
-          --kubeconfig=${environment.KUBECONFIG} \
+          --kubeconfig=${top.lib.mkKubeConfig "kube-proxy" cfg.kubeconfig} \
           ${optionalString (cfg.verbosity != null) "--v=${toString cfg.verbosity}"} \
           ${cfg.extraOpts}
         '';
@@ -83,15 +67,6 @@ in
         Restart = "on-failure";
         RestartSec = 5;
       };
-      unitConfig.ConditionPathExists = proxyPaths;
-    };
-
-    systemd.paths.kube-proxy = {
-      wantedBy = [ "kube-proxy.service" ];
-      pathConfig = {
-        PathExists = proxyPaths;
-        PathChanged = proxyPaths;
-      };
     };
 
     services.kubernetes.pki.certs = {
diff --git a/nixos/modules/services/cluster/kubernetes/scheduler.nix b/nixos/modules/services/cluster/kubernetes/scheduler.nix
index a0e48454295..5f6113227d9 100644
--- a/nixos/modules/services/cluster/kubernetes/scheduler.nix
+++ b/nixos/modules/services/cluster/kubernetes/scheduler.nix
@@ -56,35 +56,18 @@ in
   };
 
   ###### implementation
-  config =  let
-
-    schedulerPaths = filter (a: a != null) [
-      cfg.kubeconfig.caFile
-      cfg.kubeconfig.certFile
-      cfg.kubeconfig.keyFile
-    ];
-
-  in mkIf cfg.enable {
-    systemd.services.kube-scheduler = rec {
+  config = mkIf cfg.enable {
+    systemd.services.kube-scheduler = {
       description = "Kubernetes Scheduler Service";
-      wantedBy = [ "kube-control-plane-online.target" ];
+      wantedBy = [ "kubernetes.target" ];
       after = [ "kube-apiserver.service" ];
-      before = [ "kube-control-plane-online.target" ];
-      environment.KUBECONFIG = top.lib.mkKubeConfig "kube-scheduler" cfg.kubeconfig;
-      path = [ pkgs.kubectl ];
-      preStart = ''
-        until kubectl auth can-i get /api -q 2>/dev/null; do
-          echo kubectl auth can-i get /api: exit status $?
-          sleep 2
-        done
-      '';
       serviceConfig = {
         Slice = "kubernetes.slice";
         ExecStart = ''${top.package}/bin/kube-scheduler \
           --address=${cfg.address} \
           ${optionalString (cfg.featureGates != [])
             "--feature-gates=${concatMapStringsSep "," (feature: "${feature}=true") cfg.featureGates}"} \
-          --kubeconfig=${environment.KUBECONFIG} \
+          --kubeconfig=${top.lib.mkKubeConfig "kube-scheduler" cfg.kubeconfig} \
           --leader-elect=${boolToString cfg.leaderElect} \
           --port=${toString cfg.port} \
           ${optionalString (cfg.verbosity != null) "--v=${toString cfg.verbosity}"} \
@@ -96,15 +79,6 @@ in
         Restart = "on-failure";
         RestartSec = 5;
       };
-      unitConfig.ConditionPathExists = schedulerPaths;
-    };
-
-    systemd.paths.kube-scheduler = {
-      wantedBy = [ "kube-scheduler.service" ];
-      pathConfig = {
-        PathExists = schedulerPaths;
-        PathChanged = schedulerPaths;
-      };
     };
 
     services.kubernetes.pki.certs = {
diff --git a/nixos/modules/services/databases/postgresql.nix b/nixos/modules/services/databases/postgresql.nix
index 10250bb5193..1ed4d3290ce 100644
--- a/nixos/modules/services/databases/postgresql.nix
+++ b/nixos/modules/services/databases/postgresql.nix
@@ -81,6 +81,10 @@ in
         default = "";
         description = ''
           Defines the mapping from system users to database users.
+
+          The general form is:
+
+          map-name system-username database-username
         '';
       };
 
diff --git a/nixos/modules/services/editors/emacs.xml b/nixos/modules/services/editors/emacs.xml
index 8ced302bad1..03483f69fa2 100644
--- a/nixos/modules/services/editors/emacs.xml
+++ b/nixos/modules/services/editors/emacs.xml
@@ -59,7 +59,7 @@
        <para>
         The latest stable version of Emacs 25 using the
         <link
-                xlink:href="http://www.gtk.org">GTK+ 2</link>
+                xlink:href="http://www.gtk.org">GTK 2</link>
         widget toolkit.
        </para>
       </listitem>
@@ -321,7 +321,7 @@ https://nixos.org/nixpkgs/manual/#sec-modify-via-packageOverrides
    <para>
     If you want, you can tweak the Emacs package itself from your
     <filename>emacs.nix</filename>. For example, if you want to have a
-    GTK+3-based Emacs instead of the default GTK+2-based binary and remove the
+    GTK 3-based Emacs instead of the default GTK 2-based binary and remove the
     automatically generated <filename>emacs.desktop</filename> (useful is you
     only use <command>emacsclient</command>), you can change your file
     <filename>emacs.nix</filename> in this way:
@@ -349,7 +349,7 @@ in [...]
 
    <para>
     After building this file as shown in <xref linkend="ex-emacsNix" />, you
-    will get an GTK3-based Emacs binary pre-loaded with your favorite packages.
+    will get an GTK 3-based Emacs binary pre-loaded with your favorite packages.
    </para>
   </section>
  </section>
diff --git a/nixos/modules/services/misc/zookeeper.nix b/nixos/modules/services/misc/zookeeper.nix
index 50c84e3c6b8..5d91e44a199 100644
--- a/nixos/modules/services/misc/zookeeper.nix
+++ b/nixos/modules/services/misc/zookeeper.nix
@@ -121,6 +121,7 @@ in {
 
     systemd.tmpfiles.rules = [
       "d '${cfg.dataDir}' 0700 zookeeper - - -"
+      "Z '${cfg.dataDir}' 0700 zookeeper - - -"
     ];
 
     systemd.services.zookeeper = {
diff --git a/nixos/modules/services/network-filesystems/ceph.nix b/nixos/modules/services/network-filesystems/ceph.nix
index 0191b0640f0..3dc5b8feef6 100644
--- a/nixos/modules/services/network-filesystems/ceph.nix
+++ b/nixos/modules/services/network-filesystems/ceph.nix
@@ -3,18 +3,18 @@
 with lib;
 
 let
-  ceph = pkgs.ceph;
   cfg  = config.services.ceph;
+
   # function that translates "camelCaseOptions" to "camel case options", credits to tilpner in #nixos@freenode
-  translateOption = replaceStrings upperChars (map (s: " ${s}") lowerChars);
-  generateDaemonList = (daemonType: daemons: extraServiceConfig:
-    mkMerge (
-      map (daemon:
-        { "ceph-${daemonType}-${daemon}" = generateServiceFile daemonType daemon cfg.global.clusterName ceph extraServiceConfig; }
-      ) daemons
-    )
-  );
-  generateServiceFile = (daemonType: daemonId: clusterName: ceph: extraServiceConfig: {
+  expandCamelCase = replaceStrings upperChars (map (s: " ${s}") lowerChars);
+  expandCamelCaseAttrs = mapAttrs' (name: value: nameValuePair (expandCamelCase name) value);
+
+  makeServices = (daemonType: daemonIds: extraServiceConfig:
+    mkMerge (map (daemonId:
+      { "ceph-${daemonType}-${daemonId}" = makeService daemonType daemonId cfg.global.clusterName pkgs.ceph extraServiceConfig; })
+      daemonIds));
+
+  makeService = (daemonType: daemonId: clusterName: ceph: extraServiceConfig: {
     enable = true;
     description = "Ceph ${builtins.replaceStrings lowerChars upperChars daemonType} daemon ${daemonId}";
     after = [ "network-online.target" "time-sync.target" ] ++ optional (daemonType == "osd") "ceph-mon.target";
@@ -34,23 +34,29 @@ let
       Restart = "on-failure";
       StartLimitBurst = "5";
       StartLimitInterval = "30min";
-      ExecStart = "${ceph.out}/bin/${if daemonType == "rgw" then "radosgw" else "ceph-${daemonType}"} -f --cluster ${clusterName} --id ${if daemonType == "rgw" then "client.${daemonId}" else daemonId} --setuser ceph --setgroup ceph";
+      ExecStart = ''${ceph.out}/bin/${if daemonType == "rgw" then "radosgw" else "ceph-${daemonType}"} \
+                    -f --cluster ${clusterName} --id ${daemonId} --setuser ceph \
+                    --setgroup ${if daemonType == "osd" then "disk" else "ceph"}'';
     } // extraServiceConfig
-      // optionalAttrs (daemonType == "osd") { ExecStartPre = "${ceph.out}/libexec/ceph/ceph-osd-prestart.sh --id ${daemonId} --cluster ${clusterName}"; };
-    } // optionalAttrs (builtins.elem daemonType [ "mds" "mon" "rgw" "mgr" ]) { preStart = ''
+      // optionalAttrs (daemonType == "osd") { ExecStartPre = ''${ceph.lib}/libexec/ceph/ceph-osd-prestart.sh \
+                                                              --id ${daemonId} --cluster ${clusterName}''; };
+    } // optionalAttrs (builtins.elem daemonType [ "mds" "mon" "rgw" "mgr" ]) {
+      preStart = ''
         daemonPath="/var/lib/ceph/${if daemonType == "rgw" then "radosgw" else daemonType}/${clusterName}-${daemonId}"
-        if [ ! -d ''$daemonPath ]; then
-          mkdir -m 755 -p ''$daemonPath
-          chown -R ceph:ceph ''$daemonPath
+        if [ ! -d $daemonPath ]; then
+          mkdir -m 755 -p $daemonPath
+          chown -R ceph:ceph $daemonPath
         fi
       '';
     } // optionalAttrs (daemonType == "osd") { path = [ pkgs.getopt ]; }
   );
-  generateTargetFile = (daemonType:
+
+  makeTarget = (daemonType:
     {
       "ceph-${daemonType}" = {
         description = "Ceph target allowing to start/stop all ceph-${daemonType} services at once";
         partOf = [ "ceph.target" ];
+        wantedBy = [ "ceph.target" ];
         before = [ "ceph.target" ];
       };
     }
@@ -82,6 +88,14 @@ in
         '';
       };
 
+      mgrModulePath = mkOption {
+        type = types.path;
+        default = "${pkgs.ceph.lib}/lib/ceph/mgr";
+        description = ''
+          Path at which to find ceph-mgr modules.
+        '';
+      };
+
       monInitialMembers = mkOption {
         type = with types; nullOr commas;
         default = null;
@@ -157,6 +171,27 @@ in
           A comma-separated list of subnets that will be used as cluster networks in the cluster.
         '';
       };
+
+      rgwMimeTypesFile = mkOption {
+        type = with types; nullOr path;
+        default = "${pkgs.mime-types}/etc/mime.types";
+        description = ''
+          Path to mime types used by radosgw.
+        '';
+      };
+    };
+
+    extraConfig = mkOption {
+      type = with types; attrsOf str;
+      default = {};
+      example = ''
+        {
+          "ms bind ipv6" = "true";
+        };
+      '';
+      description = ''
+        Extra configuration to add to the global section. Use for setting values that are common for all daemons in the cluster.
+      '';
     };
 
     mgr = {
@@ -216,6 +251,7 @@ in
           to the id part in ceph i.e. [ "name1" ] would result in osd.name1
         '';
       };
+
       extraConfig = mkOption {
         type = with types; attrsOf str;
         default = {
@@ -296,9 +332,6 @@ in
       { assertion = cfg.global.fsid != "";
         message = "fsid has to be set to a valid uuid for the cluster to function";
       }
-      { assertion = cfg.mgr.enable == true;
-        message = "ceph 12.x requires atleast 1 MGR daemon enabled for the cluster to function";
-      }
       { assertion = cfg.mon.enable == true -> cfg.mon.daemons != [];
         message = "have to set id of atleast one MON if you're going to enable Monitor";
       }
@@ -317,14 +350,12 @@ in
       ''Not setting up a list of members in monInitialMembers requires that you set the host variable for each mon daemon or else the cluster won't function'';
 
     environment.etc."ceph/ceph.conf".text = let
-      # Translate camelCaseOptions to the expected camel case option for ceph.conf
-      translatedGlobalConfig = mapAttrs' (name: value: nameValuePair (translateOption name) value) cfg.global;
       # Merge the extraConfig set for mgr daemons, as mgr don't have their own section
-      globalAndMgrConfig = translatedGlobalConfig // optionalAttrs cfg.mgr.enable cfg.mgr.extraConfig;
+      globalSection = expandCamelCaseAttrs (cfg.global // cfg.extraConfig // optionalAttrs cfg.mgr.enable cfg.mgr.extraConfig);
       # Remove all name-value pairs with null values from the attribute set to avoid making empty sections in the ceph.conf
-      globalConfig = mapAttrs' (name: value: nameValuePair (translateOption name) value) (filterAttrs (name: value: value != null) globalAndMgrConfig);
+      globalSection' = filterAttrs (name: value: value != null) globalSection;
       totalConfig = {
-          global = globalConfig;
+          global = globalSection';
         } // optionalAttrs (cfg.mon.enable && cfg.mon.extraConfig != {}) { mon = cfg.mon.extraConfig; }
           // optionalAttrs (cfg.mds.enable && cfg.mds.extraConfig != {}) { mds = cfg.mds.extraConfig; }
           // optionalAttrs (cfg.osd.enable && cfg.osd.extraConfig != {}) { osd = cfg.osd.extraConfig; }
@@ -336,8 +367,9 @@ in
       name = "ceph";
       uid = config.ids.uids.ceph;
       description = "Ceph daemon user";
+      group = "ceph";
+      extraGroups = [ "disk" ];
     };
-
     users.groups = singleton {
       name = "ceph";
       gid = config.ids.gids.ceph;
@@ -345,22 +377,26 @@ in
 
     systemd.services = let
       services = []
-        ++ optional cfg.mon.enable (generateDaemonList "mon" cfg.mon.daemons { RestartSec = "10"; })
-        ++ optional cfg.mds.enable (generateDaemonList "mds" cfg.mds.daemons { StartLimitBurst = "3"; })
-        ++ optional cfg.osd.enable (generateDaemonList "osd" cfg.osd.daemons { StartLimitBurst = "30"; RestartSec = "20s"; })
-        ++ optional cfg.rgw.enable (generateDaemonList "rgw" cfg.rgw.daemons { })
-        ++ optional cfg.mgr.enable (generateDaemonList "mgr" cfg.mgr.daemons { StartLimitBurst = "3"; });
+        ++ optional cfg.mon.enable (makeServices "mon" cfg.mon.daemons { RestartSec = "10"; })
+        ++ optional cfg.mds.enable (makeServices "mds" cfg.mds.daemons { StartLimitBurst = "3"; })
+        ++ optional cfg.osd.enable (makeServices "osd" cfg.osd.daemons { StartLimitBurst = "30";
+                                                                         RestartSec = "20s";
+                                                                         PrivateDevices = "no"; # osd needs disk access
+                                                                       })
+        ++ optional cfg.rgw.enable (makeServices "rgw" cfg.rgw.daemons { })
+        ++ optional cfg.mgr.enable (makeServices "mgr" cfg.mgr.daemons { StartLimitBurst = "3"; });
       in
         mkMerge services;
 
     systemd.targets = let
       targets = [
-        { ceph = { description = "Ceph target allowing to start/stop all ceph service instances at once"; }; }
-      ] ++ optional cfg.mon.enable (generateTargetFile "mon")
-        ++ optional cfg.mds.enable (generateTargetFile "mds")
-        ++ optional cfg.osd.enable (generateTargetFile "osd")
-        ++ optional cfg.rgw.enable (generateTargetFile "rgw")
-        ++ optional cfg.mgr.enable (generateTargetFile "mgr");
+        { "ceph" = { description = "Ceph target allowing to start/stop all ceph service instances at once";
+                     wantedBy = [ "multi-user.target" ]; }; }
+      ] ++ optional cfg.mon.enable (makeTarget "mon")
+        ++ optional cfg.mds.enable (makeTarget "mds")
+        ++ optional cfg.osd.enable (makeTarget "osd")
+        ++ optional cfg.rgw.enable (makeTarget "rgw")
+        ++ optional cfg.mgr.enable (makeTarget "mgr");
       in
         mkMerge targets;
 
diff --git a/nixos/modules/services/web-servers/darkhttpd.nix b/nixos/modules/services/web-servers/darkhttpd.nix
index 80870118c33..d6649fd472d 100644
--- a/nixos/modules/services/web-servers/darkhttpd.nix
+++ b/nixos/modules/services/web-servers/darkhttpd.nix
@@ -67,7 +67,7 @@ in {
       wantedBy = [ "multi-user.target" ];
       serviceConfig = {
         DynamicUser = true;
-        ExecStart = "${cfg.package}/bin/darkhttpd ${args}";
+        ExecStart = "${pkgs.darkhttpd}/bin/darkhttpd ${args}";
         AmbientCapabilities = lib.mkIf (cfg.port < 1024) [ "CAP_NET_BIND_SERVICE" ];
         Restart = "on-failure";
         RestartSec = "2s";
diff --git a/nixos/modules/services/x11/desktop-managers/enlightenment.nix b/nixos/modules/services/x11/desktop-managers/enlightenment.nix
index 527e4b18045..9914b668709 100644
--- a/nixos/modules/services/x11/desktop-managers/enlightenment.nix
+++ b/nixos/modules/services/x11/desktop-managers/enlightenment.nix
@@ -31,7 +31,7 @@ in
       e.efl e.enlightenment
       e.terminology e.econnman
       pkgs.xorg.xauth # used by kdesu
-      pkgs.gtk2 # To get GTK+'s themes.
+      pkgs.gtk2 # To get GTK's themes.
       pkgs.tango-icon-theme
 
       pkgs.gnome2.gnome_icon_theme
@@ -48,7 +48,7 @@ in
     services.xserver.desktopManager.session = [
     { name = "Enlightenment";
       start = ''
-        # Set GTK_DATA_PREFIX so that GTK+ can find the themes
+        # Set GTK_DATA_PREFIX so that GTK can find the themes
         export GTK_DATA_PREFIX=${config.system.path}
         # find theme engines
         export GTK_PATH=${config.system.path}/lib/gtk-3.0:${config.system.path}/lib/gtk-2.0
diff --git a/nixos/modules/services/x11/desktop-managers/mate.nix b/nixos/modules/services/x11/desktop-managers/mate.nix
index 6a2aa650c0b..a9ca945fc66 100644
--- a/nixos/modules/services/x11/desktop-managers/mate.nix
+++ b/nixos/modules/services/x11/desktop-managers/mate.nix
@@ -48,7 +48,7 @@ in
       name = "mate";
       bgSupport = true;
       start = ''
-        # Set GTK_DATA_PREFIX so that GTK+ can find the themes
+        # Set GTK_DATA_PREFIX so that GTK can find the themes
         export GTK_DATA_PREFIX=${config.system.path}
 
         # Find theme engines
diff --git a/nixos/modules/services/x11/desktop-managers/xfce.nix b/nixos/modules/services/x11/desktop-managers/xfce.nix
index 1102f73d1ac..e3249aef50c 100644
--- a/nixos/modules/services/x11/desktop-managers/xfce.nix
+++ b/nixos/modules/services/x11/desktop-managers/xfce.nix
@@ -48,7 +48,7 @@ in
 
   config = mkIf cfg.enable {
     environment.systemPackages = with pkgs.xfce // pkgs; [
-      # Get GTK+ themes and gtk-update-icon-cache
+      # Get GTK themes and gtk-update-icon-cache
       gtk2.out
 
       # Supplies some abstract icons such as:
@@ -107,10 +107,10 @@ in
       start = ''
         ${cfg.extraSessionCommands}
 
-        # Set GTK_PATH so that GTK+ can find the theme engines.
+        # Set GTK_PATH so that GTK can find the theme engines.
         export GTK_PATH="${config.system.path}/lib/gtk-2.0:${config.system.path}/lib/gtk-3.0"
 
-        # Set GTK_DATA_PREFIX so that GTK+ can find the Xfce themes.
+        # Set GTK_DATA_PREFIX so that GTK can find the Xfce themes.
         export GTK_DATA_PREFIX=${config.system.path}
 
         ${pkgs.runtimeShell} ${pkgs.xfce.xinitrc} &
diff --git a/nixos/modules/services/x11/desktop-managers/xfce4-14.nix b/nixos/modules/services/x11/desktop-managers/xfce4-14.nix
index 16329c093f9..55c88223e78 100644
--- a/nixos/modules/services/x11/desktop-managers/xfce4-14.nix
+++ b/nixos/modules/services/x11/desktop-managers/xfce4-14.nix
@@ -114,10 +114,10 @@ in
       name = "xfce4-14";
       bgSupport = true;
       start = ''
-        # Set GTK_PATH so that GTK+ can find the theme engines.
+        # Set GTK_PATH so that GTK can find the theme engines.
         export GTK_PATH="${config.system.path}/lib/gtk-2.0:${config.system.path}/lib/gtk-3.0"
 
-        # Set GTK_DATA_PREFIX so that GTK+ can find the Xfce themes.
+        # Set GTK_DATA_PREFIX so that GTK can find the Xfce themes.
         export GTK_DATA_PREFIX=${config.system.path}
 
         ${pkgs.runtimeShell} ${pkgs.xfce4-14.xinitrc} &
diff --git a/nixos/modules/virtualisation/amazon-image.nix b/nixos/modules/virtualisation/amazon-image.nix
index 0c4ad90b4eb..aadfc5add35 100644
--- a/nixos/modules/virtualisation/amazon-image.nix
+++ b/nixos/modules/virtualisation/amazon-image.nix
@@ -25,6 +25,9 @@ in
       { assertion = cfg.hvm;
         message = "Paravirtualized EC2 instances are no longer supported.";
       }
+      { assertion = cfg.efi -> cfg.hvm;
+        message = "EC2 instances using EFI must be HVM instances.";
+      }
     ];
 
     boot.growPartition = cfg.hvm;
@@ -35,6 +38,11 @@ in
       autoResize = true;
     };
 
+    fileSystems."/boot" = mkIf cfg.efi {
+      device = "/dev/disk/by-label/ESP";
+      fsType = "vfat";
+    };
+
     boot.extraModulePackages = [
       config.boot.kernelPackages.ena
     ];
@@ -50,8 +58,10 @@ in
 
     # Generate a GRUB menu.  Amazon's pv-grub uses this to boot our kernel/initrd.
     boot.loader.grub.version = if cfg.hvm then 2 else 1;
-    boot.loader.grub.device = if cfg.hvm then "/dev/xvda" else "nodev";
+    boot.loader.grub.device = if (cfg.hvm && !cfg.efi) then "/dev/xvda" else "nodev";
     boot.loader.grub.extraPerEntryConfig = mkIf (!cfg.hvm) "root (hd0)";
+    boot.loader.grub.efiSupport = cfg.efi;
+    boot.loader.grub.efiInstallAsRemovable = cfg.efi;
     boot.loader.timeout = 0;
 
     boot.initrd.network.enable = true;
@@ -137,7 +147,7 @@ in
     networking.timeServers = [ "169.254.169.123" ];
 
     # udisks has become too bloated to have in a headless system
-    # (e.g. it depends on GTK+).
+    # (e.g. it depends on GTK).
     services.udisks2.enable = false;
   };
 }
diff --git a/nixos/modules/virtualisation/amazon-options.nix b/nixos/modules/virtualisation/amazon-options.nix
index 15de8638bba..2e807131e93 100644
--- a/nixos/modules/virtualisation/amazon-options.nix
+++ b/nixos/modules/virtualisation/amazon-options.nix
@@ -1,4 +1,4 @@
-{ config, lib, ... }:
+{ config, lib, pkgs, ... }:
 {
   options = {
     ec2 = {
@@ -9,6 +9,13 @@
           Whether the EC2 instance is a HVM instance.
         '';
       };
+      efi = lib.mkOption {
+        default = pkgs.stdenv.hostPlatform.isAarch64;
+        internal = true;
+        description = ''
+          Whether the EC2 instance is using EFI.
+        '';
+      };
     };
   };
 }
diff --git a/nixos/modules/virtualisation/railcar.nix b/nixos/modules/virtualisation/railcar.nix
new file mode 100644
index 00000000000..8b643e3b6d6
--- /dev/null
+++ b/nixos/modules/virtualisation/railcar.nix
@@ -0,0 +1,125 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.railcar;
+  generateUnit = name: containerConfig:
+    let
+      container = pkgs.ociTools.buildContainer {
+        args = [
+          (pkgs.writeShellScript "run.sh" containerConfig.cmd).outPath
+        ];
+      };
+    in
+      nameValuePair "railcar-${name}" {
+        enable = true;
+        wantedBy = [ "multi-user.target" ];
+        serviceConfig = {
+            ExecStart = ''
+              ${cfg.package}/bin/railcar -r ${cfg.stateDir} run ${name} -b ${container}
+            '';
+            Type = containerConfig.runType;
+          };
+      };
+  mount = with types; (submodule {
+    options = {
+      type = mkOption {
+        type = string;
+        default = "none";
+        description = ''
+          The type of the filesystem to be mounted.
+          Linux: filesystem types supported by the kernel as listed in 
+          `/proc/filesystems` (e.g., "minix", "ext2", "ext3", "jfs", "xfs", 
+          "reiserfs", "msdos", "proc", "nfs", "iso9660"). For bind mounts 
+          (when options include either bind or rbind), the type is a dummy,
+          often "none" (not listed in /proc/filesystems).
+        '';
+      };
+      source = mkOption {
+        type = string;
+        description = "Source for the in-container mount";
+      };
+      options = mkOption {
+        type = loaOf (string);
+        default = [ "bind" ];
+        description = ''
+          Mount options of the filesystem to be used.
+        
+          Support optoions are listed in the mount(8) man page. Note that 
+          both filesystem-independent and filesystem-specific options 
+          are listed.
+        '';
+      };
+    };
+  });
+in
+{
+  options.services.railcar = {
+    enable = mkEnableOption "railcar";
+
+    containers = mkOption {
+      default = {};
+      description = "Declarative container configuration";
+      type = with types; loaOf (submodule ({ name, config, ... }: {
+        options = {
+          cmd = mkOption {
+            type = types.string;
+            description = "Command or script to run inside the container";
+          };
+
+          mounts = mkOption {
+            type = with types; attrsOf mount;
+            default = {};
+            description = ''
+              A set of mounts inside the container.
+
+              The defaults have been chosen for simple bindmounts, meaning
+              that you only need to provide the "source" parameter.
+            '';
+            example = ''
+              { "/data" = { source = "/var/lib/data"; }; }
+            '';
+          };
+
+          runType = mkOption {
+            type = types.string;
+            default = "oneshot";
+            description = "The systemd service run type";
+          };
+
+          os = mkOption {
+            type = types.string;
+            default = "linux";
+            description = "OS type of the container";
+          };
+
+          arch = mkOption {
+            type = types.string;
+            default = "x86_64";
+            description = "Computer architecture type of the container";
+          };
+        };
+      }));
+    };
+
+    stateDir = mkOption {
+      type = types.path;
+      default = ''/var/railcar'';
+      description = "Railcar persistent state directory";
+    };
+
+    package = mkOption {
+      type = types.package;
+      default = pkgs.railcar;
+      description = "Railcar package to use";
+    };
+  };
+
+  config = mkIf cfg.enable {
+    systemd.services = flip mapAttrs' cfg.containers (name: containerConfig:
+      generateUnit name containerConfig
+    );
+  };
+}
+
diff --git a/nixos/release.nix b/nixos/release.nix
index df2c52ccd0b..c7c60965890 100644
--- a/nixos/release.nix
+++ b/nixos/release.nix
@@ -196,6 +196,22 @@ in rec {
   );
 
 
+  # A disk image that can be imported to Amazon EC2 and registered as an AMI
+  amazonImage = forMatchingSystems [ "x86_64-linux" "aarch64-linux" ] (system:
+
+    with import nixpkgs { inherit system; };
+
+    hydraJob ((import lib/eval-config.nix {
+      inherit system;
+      modules =
+        [ versionModule
+          ./maintainers/scripts/ec2/amazon-image.nix
+        ];
+    }).config.system.build.amazonImage)
+
+  );
+
+
   # Ensure that all packages used by the minimal NixOS config end up in the channel.
   dummy = forAllSystems (system: pkgs.runCommand "dummy"
     { toplevel = (import lib/eval-config.nix {
diff --git a/nixos/tests/ceph.nix b/nixos/tests/ceph.nix
index 8722ea33ec5..0706b68075c 100644
--- a/nixos/tests/ceph.nix
+++ b/nixos/tests/ceph.nix
@@ -1,4 +1,4 @@
-import ./make-test.nix ({pkgs, ...}: {
+import ./make-test.nix ({pkgs, lib, ...}: {
   name = "All-in-one-basic-ceph-cluster";
   meta = with pkgs.stdenv.lib.maintainers; {
     maintainers = [ lejonet ];
@@ -7,6 +7,7 @@ import ./make-test.nix ({pkgs, ...}: {
   nodes = {
     aio = { pkgs, ... }: {
       virtualisation = {
+        memorySize = 1536;
         emptyDiskImages = [ 20480 20480 ];
         vlans = [ 1 ];
       };
@@ -24,9 +25,6 @@ import ./make-test.nix ({pkgs, ...}: {
         ceph
         xfsprogs
       ];
-      nixpkgs.config.packageOverrides = super: {
-        ceph = super.ceph.override({ nss = super.nss; libxfs = super.libxfs; libaio = super.libaio; jemalloc = super.jemalloc; });
-      };
 
       boot.kernelModules = [ "xfs" ];
 
@@ -51,6 +49,9 @@ import ./make-test.nix ({pkgs, ...}: {
         enable = true;
         daemons = [ "0" "1" ];
       };
+
+      # So that we don't have to battle systemd when bootstraping
+      systemd.targets.ceph.wantedBy = lib.mkForce [];
     };
   };
 
@@ -61,24 +62,26 @@ import ./make-test.nix ({pkgs, ...}: {
 
     # Create the ceph-related directories
     $aio->mustSucceed(
-      "mkdir -p /var/lib/ceph/mgr/ceph-aio/",
-      "mkdir -p /var/lib/ceph/mon/ceph-aio/",
-      "mkdir -p /var/lib/ceph/osd/ceph-{0..1}/",
-      "chown ceph:ceph -R /var/lib/ceph/"
+      "mkdir -p /var/lib/ceph/mgr/ceph-aio",
+      "mkdir -p /var/lib/ceph/mon/ceph-aio",
+      "mkdir -p /var/lib/ceph/osd/ceph-{0,1}",
+      "chown ceph:ceph -R /var/lib/ceph/",
+      "mkdir -p /etc/ceph",
+      "chown ceph:ceph -R /etc/ceph"
     );
 
     # Bootstrap ceph-mon daemon
     $aio->mustSucceed(
-      "mkdir -p /var/lib/ceph/bootstrap-osd && chown ceph:ceph /var/lib/ceph/bootstrap-osd",
       "sudo -u ceph ceph-authtool --create-keyring /tmp/ceph.mon.keyring --gen-key -n mon. --cap mon 'allow *'",
-      "ceph-authtool --create-keyring /etc/ceph/ceph.client.admin.keyring --gen-key -n client.admin --set-uid=0 --cap mon 'allow *' --cap osd 'allow *' --cap mds 'allow *' --cap mgr 'allow *'",
-      "ceph-authtool /tmp/ceph.mon.keyring --import-keyring /etc/ceph/ceph.client.admin.keyring",
-            "monmaptool --create --add aio 192.168.1.1 --fsid 066ae264-2a5d-4729-8001-6ad265f50b03 /tmp/monmap",
+      "sudo -u ceph ceph-authtool --create-keyring /etc/ceph/ceph.client.admin.keyring --gen-key -n client.admin --cap mon 'allow *' --cap osd 'allow *' --cap mds 'allow *' --cap mgr 'allow *'",
+      "sudo -u ceph ceph-authtool /tmp/ceph.mon.keyring --import-keyring /etc/ceph/ceph.client.admin.keyring",
+      "monmaptool --create --add aio 192.168.1.1 --fsid 066ae264-2a5d-4729-8001-6ad265f50b03 /tmp/monmap",
       "sudo -u ceph ceph-mon --mkfs -i aio --monmap /tmp/monmap --keyring /tmp/ceph.mon.keyring",
-      "touch /var/lib/ceph/mon/ceph-aio/done",
+      "sudo -u ceph touch /var/lib/ceph/mon/ceph-aio/done",
       "systemctl start ceph-mon-aio"
     );
     $aio->waitForUnit("ceph-mon-aio");
+    $aio->mustSucceed("ceph mon enable-msgr2");
 
     # Can't check ceph status until a mon is up
     $aio->succeed("ceph -s | grep 'mon: 1 daemons'");
@@ -90,6 +93,7 @@ import ./make-test.nix ({pkgs, ...}: {
     );
     $aio->waitForUnit("ceph-mgr-aio");
     $aio->waitUntilSucceeds("ceph -s | grep 'quorum aio'");
+    $aio->waitUntilSucceeds("ceph -s | grep 'mgr: aio(active,'");
 
     # Bootstrap both OSDs
     $aio->mustSucceed(
@@ -112,8 +116,8 @@ import ./make-test.nix ({pkgs, ...}: {
       "systemctl start ceph-osd-1"
     );
 
-    $aio->waitUntilSucceeds("ceph osd stat | grep '2 osds: 2 up, 2 in'");
-    $aio->waitUntilSucceeds("ceph -s | grep 'mgr: aio(active)'");
+    $aio->waitUntilSucceeds("ceph osd stat | grep -e '2 osds: 2 up[^,]*, 2 in'");
+    $aio->waitUntilSucceeds("ceph -s | grep 'mgr: aio(active,'");
     $aio->waitUntilSucceeds("ceph -s | grep 'HEALTH_OK'");
 
     $aio->mustSucceed(
@@ -135,5 +139,23 @@ import ./make-test.nix ({pkgs, ...}: {
       "ceph osd pool ls | grep 'aio-test'",
       "ceph osd pool delete aio-other-test aio-other-test --yes-i-really-really-mean-it"
     );
+
+    # As we disable the target in the config, we still want to test that it works as intended
+    $aio->mustSucceed(
+      "systemctl stop ceph-osd-0",
+      "systemctl stop ceph-osd-1",
+      "systemctl stop ceph-mgr-aio",
+      "systemctl stop ceph-mon-aio"
+    );
+    $aio->succeed("systemctl start ceph.target");
+    $aio->waitForUnit("ceph-mon-aio");
+    $aio->waitForUnit("ceph-mgr-aio");
+    $aio->waitForUnit("ceph-osd-0");
+    $aio->waitForUnit("ceph-osd-1");
+    $aio->succeed("ceph -s | grep 'mon: 1 daemons'");
+    $aio->waitUntilSucceeds("ceph -s | grep 'quorum aio'");
+    $aio->waitUntilSucceeds("ceph osd stat | grep -e '2 osds: 2 up[^,]*, 2 in'");
+    $aio->waitUntilSucceeds("ceph -s | grep 'mgr: aio(active,'");
+    $aio->waitUntilSucceeds("ceph -s | grep 'HEALTH_OK'");
   '';
 })
diff --git a/nixos/tests/kubernetes/base.nix b/nixos/tests/kubernetes/base.nix
index f5307f253a5..f21634c4ffb 100644
--- a/nixos/tests/kubernetes/base.nix
+++ b/nixos/tests/kubernetes/base.nix
@@ -30,10 +30,7 @@ let
         { config, pkgs, lib, nodes, ... }:
           mkMerge [
             {
-              boot = {
-                postBootCommands = "rm -fr /var/lib/kubernetes/secrets /tmp/shared/*";
-                kernel.sysctl = { "fs.inotify.max_user_instances" = 256; };
-              };
+              boot.postBootCommands = "rm -fr /var/lib/kubernetes/secrets /tmp/shared/*";
               virtualisation.memorySize = mkDefault 1536;
               virtualisation.diskSize = mkDefault 4096;
               networking = {
diff --git a/nixos/tests/kubernetes/dns.nix b/nixos/tests/kubernetes/dns.nix
index e7db0a58ab6..46bcb01a526 100644
--- a/nixos/tests/kubernetes/dns.nix
+++ b/nixos/tests/kubernetes/dns.nix
@@ -77,7 +77,6 @@ let
   singleNodeTest = {
     test = ''
       # prepare machine1 for test
-      $machine1->waitForUnit("kubernetes.target");
       $machine1->waitUntilSucceeds("kubectl get node machine1.${domain} | grep -w Ready");
       $machine1->waitUntilSucceeds("docker load < ${redisImage}");
       $machine1->waitUntilSucceeds("kubectl create -f ${redisPod}");
@@ -103,8 +102,6 @@ let
       # Node token exchange
       $machine1->waitUntilSucceeds("cp -f /var/lib/cfssl/apitoken.secret /tmp/shared/apitoken.secret");
       $machine2->waitUntilSucceeds("cat /tmp/shared/apitoken.secret | nixos-kubernetes-node-join");
-      $machine1->waitForUnit("kubernetes.target");
-      $machine2->waitForUnit("kubernetes.target");
 
       # prepare machines for test
       $machine1->waitUntilSucceeds("kubectl get node machine2.${domain} | grep -w Ready");
diff --git a/nixos/tests/kubernetes/rbac.nix b/nixos/tests/kubernetes/rbac.nix
index 967fe506004..3ce7adcd0d7 100644
--- a/nixos/tests/kubernetes/rbac.nix
+++ b/nixos/tests/kubernetes/rbac.nix
@@ -94,8 +94,6 @@ let
 
   singlenode = base // {
     test = ''
-      $machine1->waitForUnit("kubernetes.target");
-
       $machine1->waitUntilSucceeds("kubectl get node machine1.my.zyx | grep -w Ready");
 
       $machine1->waitUntilSucceeds("docker load < ${kubectlImage}");
@@ -118,8 +116,6 @@ let
       # Node token exchange
       $machine1->waitUntilSucceeds("cp -f /var/lib/cfssl/apitoken.secret /tmp/shared/apitoken.secret");
       $machine2->waitUntilSucceeds("cat /tmp/shared/apitoken.secret | nixos-kubernetes-node-join");
-      $machine1->waitForUnit("kubernetes.target");
-      $machine2->waitForUnit("kubernetes.target");
 
       $machine1->waitUntilSucceeds("kubectl get node machine2.my.zyx | grep -w Ready");
 
diff --git a/nixos/tests/quake3.nix b/nixos/tests/quake3.nix
index d3e59a32373..4253ce4a867 100644
--- a/nixos/tests/quake3.nix
+++ b/nixos/tests/quake3.nix
@@ -12,9 +12,9 @@ let
 
   # Only allow the demo data to be used (only if it's unfreeRedistributable).
   unfreePredicate = pkg: with pkgs.lib; let
-    allowDrvPredicates = [ "quake3-demo" "quake3-pointrelease" ];
+    allowPackageNames = [ "quake3-demodata" "quake3-pointrelease" ];
     allowLicenses = [ pkgs.lib.licenses.unfreeRedistributable ];
-  in any (flip hasPrefix pkg.name) allowDrvPredicates &&
+  in elem pkg.pname allowPackageNames &&
      elem (pkg.meta.license or null) allowLicenses;
 
 in