summary refs log tree commit diff
path: root/nixos/lib
diff options
context:
space:
mode:
Diffstat (limited to 'nixos/lib')
-rw-r--r--nixos/lib/build-vms.nix113
-rw-r--r--nixos/lib/default.nix33
-rw-r--r--nixos/lib/eval-cacheable-options.nix53
-rw-r--r--nixos/lib/eval-config-minimal.nix49
-rw-r--r--nixos/lib/eval-config.nix111
-rw-r--r--nixos/lib/from-env.nix4
-rw-r--r--nixos/lib/make-channel.nix31
-rw-r--r--nixos/lib/make-disk-image.nix443
-rw-r--r--nixos/lib/make-ext4-fs.nix86
-rw-r--r--nixos/lib/make-iso9660-image.nix65
-rw-r--r--nixos/lib/make-iso9660-image.sh139
-rw-r--r--nixos/lib/make-options-doc/default.nix169
-rw-r--r--nixos/lib/make-options-doc/generateAsciiDoc.py37
-rw-r--r--nixos/lib/make-options-doc/generateCommonMark.py27
-rw-r--r--nixos/lib/make-options-doc/mergeJSON.py93
-rw-r--r--nixos/lib/make-options-doc/options-to-docbook.xsl246
-rw-r--r--nixos/lib/make-options-doc/optionsJSONtoXML.nix6
-rw-r--r--nixos/lib/make-options-doc/postprocess-option-descriptions.xsl115
-rw-r--r--nixos/lib/make-options-doc/sortXML.py27
-rw-r--r--nixos/lib/make-squashfs.nix35
-rw-r--r--nixos/lib/make-system-tarball.nix56
-rw-r--r--nixos/lib/make-system-tarball.sh57
-rw-r--r--nixos/lib/make-zfs-image.nix333
-rw-r--r--nixos/lib/qemu-common.nix32
-rw-r--r--nixos/lib/systemd-lib.nix440
-rw-r--r--nixos/lib/systemd-unit-options.nix552
-rw-r--r--nixos/lib/test-driver/default.nix32
-rw-r--r--nixos/lib/test-driver/setup.py13
-rwxr-xr-xnixos/lib/test-driver/test_driver/__init__.py128
-rw-r--r--nixos/lib/test-driver/test_driver/driver.py225
-rw-r--r--nixos/lib/test-driver/test_driver/logger.py101
-rw-r--r--nixos/lib/test-driver/test_driver/machine.py988
-rw-r--r--nixos/lib/test-driver/test_driver/polling_condition.py77
-rw-r--r--nixos/lib/test-driver/test_driver/vlan.py58
-rw-r--r--nixos/lib/testing-python.nix251
-rw-r--r--nixos/lib/utils.nix201
36 files changed, 5426 insertions, 0 deletions
diff --git a/nixos/lib/build-vms.nix b/nixos/lib/build-vms.nix
new file mode 100644
index 00000000000..05d9ce89dbd
--- /dev/null
+++ b/nixos/lib/build-vms.nix
@@ -0,0 +1,113 @@
+{ system
+, # Use a minimal kernel?
+  minimal ? false
+, # Ignored
+  config ? null
+, # Nixpkgs, for qemu, lib and more
+  pkgs, lib
+, # !!! See comment about args in lib/modules.nix
+  specialArgs ? {}
+, # NixOS configuration to add to the VMs
+  extraConfigurations ? []
+}:
+
+with lib;
+
+rec {
+
+  inherit pkgs;
+
+  # Build a virtual network from an attribute set `{ machine1 =
+  # config1; ... machineN = configN; }', where `machineX' is the
+  # hostname and `configX' is a NixOS system configuration.  Each
+  # machine is given an arbitrary IP address in the virtual network.
+  buildVirtualNetwork =
+    nodes: let nodesOut = mapAttrs (n: buildVM nodesOut) (assignIPAddresses nodes); in nodesOut;
+
+
+  buildVM =
+    nodes: configurations:
+
+    import ./eval-config.nix {
+      inherit system specialArgs;
+      modules = configurations ++ extraConfigurations;
+      baseModules =  (import ../modules/module-list.nix) ++
+        [ ../modules/virtualisation/qemu-vm.nix
+          ../modules/testing/test-instrumentation.nix # !!! should only get added for automated test runs
+          { key = "no-manual"; documentation.nixos.enable = false; }
+          { key = "no-revision";
+            # Make the revision metadata constant, in order to avoid needless retesting.
+            # The human version (e.g. 21.05-pre) is left as is, because it is useful
+            # for external modules that test with e.g. nixosTest and rely on that
+            # version number.
+            config.system.nixos.revision = mkForce "constant-nixos-revision";
+          }
+          { key = "nodes"; _module.args.nodes = nodes; }
+        ] ++ optional minimal ../modules/testing/minimal-kernel.nix;
+    };
+
+
+  # Given an attribute set { machine1 = config1; ... machineN =
+  # configN; }, sequentially assign IP addresses in the 192.168.1.0/24
+  # range to each machine, and set the hostname to the attribute name.
+  assignIPAddresses = nodes:
+
+    let
+
+      machines = attrNames nodes;
+
+      machinesNumbered = zipLists machines (range 1 254);
+
+      nodes_ = forEach machinesNumbered (m: nameValuePair m.fst
+        [ ( { config, nodes, ... }:
+            let
+              interfacesNumbered = zipLists config.virtualisation.vlans (range 1 255);
+              interfaces = forEach interfacesNumbered ({ fst, snd }:
+                nameValuePair "eth${toString snd}" { ipv4.addresses =
+                  [ { address = "192.168.${toString fst}.${toString m.snd}";
+                      prefixLength = 24;
+                  } ];
+                });
+
+              networkConfig =
+                { networking.hostName = mkDefault m.fst;
+
+                  networking.interfaces = listToAttrs interfaces;
+
+                  networking.primaryIPAddress =
+                    optionalString (interfaces != []) (head (head interfaces).value.ipv4.addresses).address;
+
+                  # Put the IP addresses of all VMs in this machine's
+                  # /etc/hosts file.  If a machine has multiple
+                  # interfaces, use the IP address corresponding to
+                  # the first interface (i.e. the first network in its
+                  # virtualisation.vlans option).
+                  networking.extraHosts = flip concatMapStrings machines
+                    (m': let config = (getAttr m' nodes).config; in
+                      optionalString (config.networking.primaryIPAddress != "")
+                        ("${config.networking.primaryIPAddress} " +
+                         optionalString (config.networking.domain != null)
+                           "${config.networking.hostName}.${config.networking.domain} " +
+                         "${config.networking.hostName}\n"));
+
+                  virtualisation.qemu.options =
+                    let qemu-common = import ../lib/qemu-common.nix { inherit lib pkgs; };
+                    in flip concatMap interfacesNumbered
+                      ({ fst, snd }: qemu-common.qemuNICFlags snd fst m.snd);
+                };
+
+              in
+                { key = "ip-address";
+                  config = networkConfig // {
+                    # Expose the networkConfig items for tests like nixops
+                    # that need to recreate the network config.
+                    system.build.networkConfig = networkConfig;
+                  };
+                }
+          )
+          (getAttr m.fst nodes)
+        ] );
+
+    in listToAttrs nodes_;
+
+}
diff --git a/nixos/lib/default.nix b/nixos/lib/default.nix
new file mode 100644
index 00000000000..2b3056e0145
--- /dev/null
+++ b/nixos/lib/default.nix
@@ -0,0 +1,33 @@
+let
+  # The warning is in a top-level let binding so it is only printed once.
+  minimalModulesWarning = warn "lib.nixos.evalModules is experimental and subject to change. See nixos/lib/default.nix" null;
+  inherit (nonExtendedLib) warn;
+  nonExtendedLib = import ../../lib;
+in
+{ # Optional. Allows an extended `lib` to be used instead of the regular Nixpkgs lib.
+  lib ? nonExtendedLib,
+
+  # Feature flags allow you to opt in to unfinished code. These may change some
+  # behavior or disable warnings.
+  featureFlags ? {},
+
+  # This file itself is rather new, so we accept unknown parameters to be forward
+  # compatible. This is generally not recommended, because typos go undetected.
+  ...
+}:
+let
+  seqIf = cond: if cond then builtins.seq else a: b: b;
+  # If cond, force `a` before returning any attr
+  seqAttrsIf = cond: a: lib.mapAttrs (_: v: seqIf cond a v);
+
+  eval-config-minimal = import ./eval-config-minimal.nix { inherit lib; };
+in
+/*
+  This attribute set appears as lib.nixos in the flake, or can be imported
+  using a binding like `nixosLib = import (nixpkgs + "/nixos/lib") { }`.
+*/
+{
+  inherit (seqAttrsIf (!featureFlags?minimalModules) minimalModulesWarning eval-config-minimal)
+    evalModules
+    ;
+}
diff --git a/nixos/lib/eval-cacheable-options.nix b/nixos/lib/eval-cacheable-options.nix
new file mode 100644
index 00000000000..c3ba2ce6637
--- /dev/null
+++ b/nixos/lib/eval-cacheable-options.nix
@@ -0,0 +1,53 @@
+{ libPath
+, pkgsLibPath
+, nixosPath
+, modules
+, stateVersion
+, release
+}:
+
+let
+  lib = import libPath;
+  modulesPath = "${nixosPath}/modules";
+  # dummy pkgs set that contains no packages, only `pkgs.lib` from the full set.
+  # not having `pkgs.lib` causes all users of `pkgs.formats` to fail.
+  pkgs = import pkgsLibPath {
+    inherit lib;
+    pkgs = null;
+  };
+  utils = import "${nixosPath}/lib/utils.nix" {
+    inherit config lib;
+    pkgs = null;
+  };
+  # this is used both as a module and as specialArgs.
+  # as a module it sets the _module special values, as specialArgs it makes `config`
+  # unusable. this causes documentation attributes depending on `config` to fail.
+  config = {
+    _module.check = false;
+    _module.args = {};
+    system.stateVersion = stateVersion;
+  };
+  eval = lib.evalModules {
+    modules = (map (m: "${modulesPath}/${m}") modules) ++ [
+      config
+    ];
+    specialArgs = {
+      inherit config pkgs utils;
+    };
+  };
+  docs = import "${nixosPath}/doc/manual" {
+    pkgs = pkgs // {
+      inherit lib;
+      # duplicate of the declaration in all-packages.nix
+      buildPackages.nixosOptionsDoc = attrs:
+        (import "${nixosPath}/lib/make-options-doc")
+          ({ inherit pkgs lib; } // attrs);
+    };
+    config = config.config;
+    options = eval.options;
+    version = release;
+    revision = "release-${release}";
+    prefix = modulesPath;
+  };
+in
+  docs.optionsNix
diff --git a/nixos/lib/eval-config-minimal.nix b/nixos/lib/eval-config-minimal.nix
new file mode 100644
index 00000000000..d45b9ffd426
--- /dev/null
+++ b/nixos/lib/eval-config-minimal.nix
@@ -0,0 +1,49 @@
+
+# DO NOT IMPORT. Use nixpkgsFlake.lib.nixos, or import (nixpkgs + "/nixos/lib")
+{ lib }: # read -^
+
+let
+
+  /*
+    Invoke NixOS. Unlike traditional NixOS, this does not include all modules.
+    Any such modules have to be explicitly added via the `modules` parameter,
+    or imported using `imports` in a module.
+
+    A minimal module list improves NixOS evaluation performance and allows
+    modules to be independently usable, supporting new use cases.
+
+    Parameters:
+
+      modules:        A list of modules that constitute the configuration.
+
+      specialArgs:    An attribute set of module arguments. Unlike
+                      `config._module.args`, these are available for use in
+                      `imports`.
+                      `config._module.args` should be preferred when possible.
+
+    Return:
+
+      An attribute set containing `config.system.build.toplevel` among other
+      attributes. See `lib.evalModules` in the Nixpkgs library.
+
+   */
+  evalModules = {
+    prefix ? [],
+    modules ? [],
+    specialArgs ? {},
+  }:
+  # NOTE: Regular NixOS currently does use this function! Don't break it!
+  #       Ideally we don't diverge, unless we learn that we should.
+  #       In other words, only the public interface of nixos.evalModules
+  #       is experimental.
+  lib.evalModules {
+    inherit prefix modules;
+    specialArgs = {
+      modulesPath = builtins.toString ../modules;
+    } // specialArgs;
+  };
+
+in
+{
+  inherit evalModules;
+}
diff --git a/nixos/lib/eval-config.nix b/nixos/lib/eval-config.nix
new file mode 100644
index 00000000000..2daaa8a1186
--- /dev/null
+++ b/nixos/lib/eval-config.nix
@@ -0,0 +1,111 @@
+# From an end-user configuration file (`configuration.nix'), build a NixOS
+# configuration object (`config') from which we can retrieve option
+# values.
+
+# !!! Please think twice before adding to this argument list!
+# Ideally eval-config.nix would be an extremely thin wrapper
+# around lib.evalModules, so that modular systems that have nixos configs
+# as subcomponents (e.g. the container feature, or nixops if network
+# expressions are ever made modular at the top level) can just use
+# types.submodule instead of using eval-config.nix
+evalConfigArgs@
+{ # !!! system can be set modularly, would be nice to remove
+  system ? builtins.currentSystem
+, # !!! is this argument needed any more? The pkgs argument can
+  # be set modularly anyway.
+  pkgs ? null
+, # !!! what do we gain by making this configurable?
+  baseModules ? import ../modules/module-list.nix
+, # !!! See comment about args in lib/modules.nix
+  extraArgs ? {}
+, # !!! See comment about args in lib/modules.nix
+  specialArgs ? {}
+, modules
+, modulesLocation ? (builtins.unsafeGetAttrPos "modules" evalConfigArgs).file or null
+, # !!! See comment about check in lib/modules.nix
+  check ? true
+, prefix ? []
+, lib ? import ../../lib
+, extraModules ? let e = builtins.getEnv "NIXOS_EXTRA_MODULE_PATH";
+                 in if e == "" then [] else [(import e)]
+}:
+
+let pkgs_ = pkgs;
+in
+
+let
+  evalModulesMinimal = (import ./default.nix {
+    inherit lib;
+    # Implicit use of feature is noted in implementation.
+    featureFlags.minimalModules = { };
+  }).evalModules;
+
+  pkgsModule = rec {
+    _file = ./eval-config.nix;
+    key = _file;
+    config = {
+      # Explicit `nixpkgs.system` or `nixpkgs.localSystem` should override
+      # this.  Since the latter defaults to the former, the former should
+      # default to the argument. That way this new default could propagate all
+      # they way through, but has the last priority behind everything else.
+      nixpkgs.system = lib.mkDefault system;
+
+      # Stash the value of the `system` argument. When using `nesting.children`
+      # we want to have the same default value behavior (immediately above)
+      # without any interference from the user's configuration.
+      nixpkgs.initialSystem = system;
+
+      _module.args.pkgs = lib.mkIf (pkgs_ != null) (lib.mkForce pkgs_);
+    };
+  };
+
+  withWarnings = x:
+    lib.warnIf (evalConfigArgs?extraArgs) "The extraArgs argument to eval-config.nix is deprecated. Please set config._module.args instead."
+    lib.warnIf (evalConfigArgs?check) "The check argument to eval-config.nix is deprecated. Please set config._module.check instead."
+    x;
+
+  legacyModules =
+    lib.optional (evalConfigArgs?extraArgs) {
+      config = {
+        _module.args = extraArgs;
+      };
+    }
+    ++ lib.optional (evalConfigArgs?check) {
+      config = {
+        _module.check = lib.mkDefault check;
+      };
+    };
+
+  allUserModules =
+    let
+      # Add the invoking file (or specified modulesLocation) as error message location
+      # for modules that don't have their own locations; presumably inline modules.
+      locatedModules =
+        if modulesLocation == null then
+          modules
+        else
+          map (lib.setDefaultModuleLocation modulesLocation) modules;
+    in
+      locatedModules ++ legacyModules;
+
+  noUserModules = evalModulesMinimal ({
+    inherit prefix specialArgs;
+    modules = baseModules ++ extraModules ++ [ pkgsModule modulesModule ];
+  });
+
+  # Extra arguments that are useful for constructing a similar configuration.
+  modulesModule = {
+    config = {
+      _module.args = {
+        inherit noUserModules baseModules extraModules modules;
+      };
+    };
+  };
+
+  nixosWithUserModules = noUserModules.extendModules { modules = allUserModules; };
+
+in
+withWarnings nixosWithUserModules // {
+  inherit extraArgs;
+  inherit (nixosWithUserModules._module.args) pkgs;
+}
diff --git a/nixos/lib/from-env.nix b/nixos/lib/from-env.nix
new file mode 100644
index 00000000000..6bd71e40e9a
--- /dev/null
+++ b/nixos/lib/from-env.nix
@@ -0,0 +1,4 @@
+# TODO: remove this file. There is lib.maybeEnv now
+name: default:
+let value = builtins.getEnv name; in
+if value == "" then default else value
diff --git a/nixos/lib/make-channel.nix b/nixos/lib/make-channel.nix
new file mode 100644
index 00000000000..9b920b989fc
--- /dev/null
+++ b/nixos/lib/make-channel.nix
@@ -0,0 +1,31 @@
+/* Build a channel tarball. These contain, in addition to the nixpkgs
+ * expressions themselves, files that indicate the version of nixpkgs
+ * that they represent.
+ */
+{ pkgs, nixpkgs, version, versionSuffix }:
+
+pkgs.releaseTools.makeSourceTarball {
+  name = "nixos-channel";
+
+  src = nixpkgs;
+
+  officialRelease = false; # FIXME: fix this in makeSourceTarball
+  inherit version versionSuffix;
+
+  buildInputs = [ pkgs.nix ];
+
+  distPhase = ''
+    rm -rf .git
+    echo -n $VERSION_SUFFIX > .version-suffix
+    echo -n ${nixpkgs.rev or nixpkgs.shortRev} > .git-revision
+    releaseName=nixos-$VERSION$VERSION_SUFFIX
+    mkdir -p $out/tarballs
+    cp -prd . ../$releaseName
+    chmod -R u+w ../$releaseName
+    ln -s . ../$releaseName/nixpkgs # hack to make ‘<nixpkgs>’ work
+    NIX_STATE_DIR=$TMPDIR nix-env -f ../$releaseName/default.nix -qaP --meta --xml \* > /dev/null
+    cd ..
+    chmod -R u+w $releaseName
+    tar cfJ $out/tarballs/$releaseName.tar.xz $releaseName
+  '';
+}
diff --git a/nixos/lib/make-disk-image.nix b/nixos/lib/make-disk-image.nix
new file mode 100644
index 00000000000..15302ae8241
--- /dev/null
+++ b/nixos/lib/make-disk-image.nix
@@ -0,0 +1,443 @@
+{ pkgs
+, lib
+
+, # The NixOS configuration to be installed onto the disk image.
+  config
+
+, # The size of the disk, in megabytes.
+  # if "auto" size is calculated based on the contents copied to it and
+  #   additionalSpace is taken into account.
+  diskSize ? "auto"
+
+, # additional disk space to be added to the image if diskSize "auto"
+  # is used
+  additionalSpace ? "512M"
+
+, # size of the boot partition, is only used if partitionTableType is
+  # either "efi" or "hybrid"
+  # This will be undersized slightly, as this is actually the offset of
+  # the end of the partition. Generally it will be 1MiB smaller.
+  bootSize ? "256M"
+
+, # The files and directories to be placed in the target file system.
+  # This is a list of attribute sets {source, target, mode, user, group} where
+  # `source' is the file system object (regular file or directory) to be
+  # grafted in the file system at path `target', `mode' is a string containing
+  # the permissions that will be set (ex. "755"), `user' and `group' are the
+  # user and group name that will be set as owner of the files.
+  # `mode', `user', and `group' are optional.
+  # When setting one of `user' or `group', the other needs to be set too.
+  contents ? []
+
+, # Type of partition table to use; either "legacy", "efi", or "none".
+  # For "efi" images, the GPT partition table is used and a mandatory ESP
+  #   partition of reasonable size is created in addition to the root partition.
+  # For "legacy", the msdos partition table is used and a single large root
+  #   partition is created.
+  # For "legacy+gpt", the GPT partition table is used, a 1MiB no-fs partition for
+  #   use by the bootloader is created, and a single large root partition is
+  #   created.
+  # For "hybrid", the GPT partition table is used and a mandatory ESP
+  #   partition of reasonable size is created in addition to the root partition.
+  #   Also a legacy MBR will be present.
+  # For "none", no partition table is created. Enabling `installBootLoader`
+  #   most likely fails as GRUB will probably refuse to install.
+  partitionTableType ? "legacy"
+
+, # Whether to invoke `switch-to-configuration boot` during image creation
+  installBootLoader ? true
+
+, # The root file system type.
+  fsType ? "ext4"
+
+, # Filesystem label
+  label ? if onlyNixStore then "nix-store" else "nixos"
+
+, # The initial NixOS configuration file to be copied to
+  # /etc/nixos/configuration.nix.
+  configFile ? null
+
+, # Shell code executed after the VM has finished.
+  postVM ? ""
+
+, # Copy the contents of the Nix store to the root of the image and
+  # skip further setup. Incompatible with `contents`,
+  # `installBootLoader` and `configFile`.
+  onlyNixStore ? false
+
+, name ? "nixos-disk-image"
+
+, # Disk image format, one of qcow2, qcow2-compressed, vdi, vpc, raw.
+  format ? "raw"
+
+, # Whether a nix channel based on the current source tree should be
+  # made available inside the image. Useful for interactive use of nix
+  # utils, but changes the hash of the image when the sources are
+  # updated.
+  copyChannel ? true
+
+, # Additional store paths to copy to the image's store.
+  additionalPaths ? []
+}:
+
+assert partitionTableType == "legacy" || partitionTableType == "legacy+gpt" || partitionTableType == "efi" || partitionTableType == "hybrid" || partitionTableType == "none";
+# We use -E offset=X below, which is only supported by e2fsprogs
+assert partitionTableType != "none" -> fsType == "ext4";
+# Either both or none of {user,group} need to be set
+assert lib.all
+         (attrs: ((attrs.user  or null) == null)
+              == ((attrs.group or null) == null))
+         contents;
+assert onlyNixStore -> contents == [] && configFile == null && !installBootLoader;
+
+with lib;
+
+let format' = format; in let
+
+  format = if format' == "qcow2-compressed" then "qcow2" else format';
+
+  compress = optionalString (format' == "qcow2-compressed") "-c";
+
+  filename = "nixos." + {
+    qcow2 = "qcow2";
+    vdi   = "vdi";
+    vpc   = "vhd";
+    raw   = "img";
+  }.${format} or format;
+
+  rootPartition = { # switch-case
+    legacy = "1";
+    "legacy+gpt" = "2";
+    efi = "2";
+    hybrid = "3";
+  }.${partitionTableType};
+
+  partitionDiskScript = { # switch-case
+    legacy = ''
+      parted --script $diskImage -- \
+        mklabel msdos \
+        mkpart primary ext4 1MiB -1
+    '';
+    "legacy+gpt" = ''
+      parted --script $diskImage -- \
+        mklabel gpt \
+        mkpart no-fs 1MB 2MB \
+        set 1 bios_grub on \
+        align-check optimal 1 \
+        mkpart primary ext4 2MB -1 \
+        align-check optimal 2 \
+        print
+    '';
+    efi = ''
+      parted --script $diskImage -- \
+        mklabel gpt \
+        mkpart ESP fat32 8MiB ${bootSize} \
+        set 1 boot on \
+        mkpart primary ext4 ${bootSize} -1
+    '';
+    hybrid = ''
+      parted --script $diskImage -- \
+        mklabel gpt \
+        mkpart ESP fat32 8MiB ${bootSize} \
+        set 1 boot on \
+        mkpart no-fs 0 1024KiB \
+        set 2 bios_grub on \
+        mkpart primary ext4 ${bootSize} -1
+    '';
+    none = "";
+  }.${partitionTableType};
+
+  nixpkgs = cleanSource pkgs.path;
+
+  # FIXME: merge with channel.nix / make-channel.nix.
+  channelSources = pkgs.runCommand "nixos-${config.system.nixos.version}" {} ''
+    mkdir -p $out
+    cp -prd ${nixpkgs.outPath} $out/nixos
+    chmod -R u+w $out/nixos
+    if [ ! -e $out/nixos/nixpkgs ]; then
+      ln -s . $out/nixos/nixpkgs
+    fi
+    rm -rf $out/nixos/.git
+    echo -n ${config.system.nixos.versionSuffix} > $out/nixos/.version-suffix
+  '';
+
+  binPath = with pkgs; makeBinPath (
+    [ rsync
+      util-linux
+      parted
+      e2fsprogs
+      lkl
+      config.system.build.nixos-install
+      config.system.build.nixos-enter
+      nix
+    ] ++ stdenv.initialPath);
+
+  # I'm preserving the line below because I'm going to search for it across nixpkgs to consolidate
+  # image building logic. The comment right below this now appears in 4 different places in nixpkgs :)
+  # !!! should use XML.
+  sources = map (x: x.source) contents;
+  targets = map (x: x.target) contents;
+  modes   = map (x: x.mode  or "''") contents;
+  users   = map (x: x.user  or "''") contents;
+  groups  = map (x: x.group or "''") contents;
+
+  basePaths = [ config.system.build.toplevel ]
+    ++ lib.optional copyChannel channelSources;
+
+  additionalPaths' = subtractLists basePaths additionalPaths;
+
+  closureInfo = pkgs.closureInfo {
+    rootPaths = basePaths ++ additionalPaths';
+  };
+
+  blockSize = toString (4 * 1024); # ext4fs block size (not block device sector size)
+
+  prepareImage = ''
+    export PATH=${binPath}
+
+    # Yes, mkfs.ext4 takes different units in different contexts. Fun.
+    sectorsToKilobytes() {
+      echo $(( ( "$1" * 512 ) / 1024 ))
+    }
+
+    sectorsToBytes() {
+      echo $(( "$1" * 512  ))
+    }
+
+    # Given lines of numbers, adds them together
+    sum_lines() {
+      local acc=0
+      while read -r number; do
+        acc=$((acc+number))
+      done
+      echo "$acc"
+    }
+
+    mebibyte=$(( 1024 * 1024 ))
+
+    # Approximative percentage of reserved space in an ext4 fs over 512MiB.
+    # 0.05208587646484375
+    #  × 1000, integer part: 52
+    compute_fudge() {
+      echo $(( $1 * 52 / 1000 ))
+    }
+
+    mkdir $out
+
+    root="$PWD/root"
+    mkdir -p $root
+
+    # Copy arbitrary other files into the image
+    # Semi-shamelessly copied from make-etc.sh. I (@copumpkin) shall factor this stuff out as part of
+    # https://github.com/NixOS/nixpkgs/issues/23052.
+    set -f
+    sources_=(${concatStringsSep " " sources})
+    targets_=(${concatStringsSep " " targets})
+    modes_=(${concatStringsSep " " modes})
+    set +f
+
+    for ((i = 0; i < ''${#targets_[@]}; i++)); do
+      source="''${sources_[$i]}"
+      target="''${targets_[$i]}"
+      mode="''${modes_[$i]}"
+
+      if [ -n "$mode" ]; then
+        rsync_chmod_flags="--chmod=$mode"
+      else
+        rsync_chmod_flags=""
+      fi
+      # Unfortunately cptofs only supports modes, not ownership, so we can't use
+      # rsync's --chown option. Instead, we change the ownerships in the
+      # VM script with chown.
+      rsync_flags="-a --no-o --no-g $rsync_chmod_flags"
+      if [[ "$source" =~ '*' ]]; then
+        # If the source name contains '*', perform globbing.
+        mkdir -p $root/$target
+        for fn in $source; do
+          rsync $rsync_flags "$fn" $root/$target/
+        done
+      else
+        mkdir -p $root/$(dirname $target)
+        if ! [ -e $root/$target ]; then
+          rsync $rsync_flags $source $root/$target
+        else
+          echo "duplicate entry $target -> $source"
+          exit 1
+        fi
+      fi
+    done
+
+    export HOME=$TMPDIR
+
+    # Provide a Nix database so that nixos-install can copy closures.
+    export NIX_STATE_DIR=$TMPDIR/state
+    nix-store --load-db < ${closureInfo}/registration
+
+    chmod 755 "$TMPDIR"
+    echo "running nixos-install..."
+    nixos-install --root $root --no-bootloader --no-root-passwd \
+      --system ${config.system.build.toplevel} \
+      ${if copyChannel then "--channel ${channelSources}" else "--no-channel-copy"} \
+      --substituters ""
+
+    ${optionalString (additionalPaths' != []) ''
+      nix --extra-experimental-features nix-command copy --to $root --no-check-sigs ${concatStringsSep " " additionalPaths'}
+    ''}
+
+    diskImage=nixos.raw
+
+    ${if diskSize == "auto" then ''
+      ${if partitionTableType == "efi" || partitionTableType == "hybrid" then ''
+        # Add the GPT at the end
+        gptSpace=$(( 512 * 34 * 1 ))
+        # Normally we'd need to account for alignment and things, if bootSize
+        # represented the actual size of the boot partition. But it instead
+        # represents the offset at which it ends.
+        # So we know bootSize is the reserved space in front of the partition.
+        reservedSpace=$(( gptSpace + $(numfmt --from=iec '${bootSize}') ))
+      '' else if partitionTableType == "legacy+gpt" then ''
+        # Add the GPT at the end
+        gptSpace=$(( 512 * 34 * 1 ))
+        # And include the bios_grub partition; the ext4 partition starts at 2MB exactly.
+        reservedSpace=$(( gptSpace + 2 * mebibyte ))
+      '' else if partitionTableType == "legacy" then ''
+        # Add the 1MiB aligned reserved space (includes MBR)
+        reservedSpace=$(( mebibyte ))
+      '' else ''
+        reservedSpace=0
+      ''}
+      additionalSpace=$(( $(numfmt --from=iec '${additionalSpace}') + reservedSpace ))
+
+      # Compute required space in filesystem blocks
+      diskUsage=$(find . ! -type d -print0 | du --files0-from=- --apparent-size --block-size "${blockSize}" | cut -f1 | sum_lines)
+      # Each inode takes space!
+      numInodes=$(find . | wc -l)
+      # Convert to bytes, inodes take two blocks each!
+      diskUsage=$(( (diskUsage + 2 * numInodes) * ${blockSize} ))
+      # Then increase the required space to account for the reserved blocks.
+      fudge=$(compute_fudge $diskUsage)
+      requiredFilesystemSpace=$(( diskUsage + fudge ))
+
+      diskSize=$(( requiredFilesystemSpace  + additionalSpace ))
+
+      # Round up to the nearest mebibyte.
+      # This ensures whole 512 bytes sector sizes in the disk image
+      # and helps towards aligning partitions optimally.
+      if (( diskSize % mebibyte )); then
+        diskSize=$(( ( diskSize / mebibyte + 1) * mebibyte ))
+      fi
+
+      truncate -s "$diskSize" $diskImage
+
+      printf "Automatic disk size...\n"
+      printf "  Closure space use: %d bytes\n" $diskUsage
+      printf "  fudge: %d bytes\n" $fudge
+      printf "  Filesystem size needed: %d bytes\n" $requiredFilesystemSpace
+      printf "  Additional space: %d bytes\n" $additionalSpace
+      printf "  Disk image size: %d bytes\n" $diskSize
+    '' else ''
+      truncate -s ${toString diskSize}M $diskImage
+    ''}
+
+    ${partitionDiskScript}
+
+    ${if partitionTableType != "none" then ''
+      # Get start & length of the root partition in sectors to $START and $SECTORS.
+      eval $(partx $diskImage -o START,SECTORS --nr ${rootPartition} --pairs)
+
+      mkfs.${fsType} -b ${blockSize} -F -L ${label} $diskImage -E offset=$(sectorsToBytes $START) $(sectorsToKilobytes $SECTORS)K
+    '' else ''
+      mkfs.${fsType} -b ${blockSize} -F -L ${label} $diskImage
+    ''}
+
+    echo "copying staging root to image..."
+    cptofs -p ${optionalString (partitionTableType != "none") "-P ${rootPartition}"} \
+           -t ${fsType} \
+           -i $diskImage \
+           $root${optionalString onlyNixStore builtins.storeDir}/* / ||
+      (echo >&2 "ERROR: cptofs failed. diskSize might be too small for closure."; exit 1)
+  '';
+
+  moveOrConvertImage = ''
+    ${if format == "raw" then ''
+      mv $diskImage $out/${filename}
+    '' else ''
+      ${pkgs.qemu}/bin/qemu-img convert -f raw -O ${format} ${compress} $diskImage $out/${filename}
+    ''}
+    diskImage=$out/${filename}
+  '';
+
+  buildImage = pkgs.vmTools.runInLinuxVM (
+    pkgs.runCommand name {
+      preVM = prepareImage;
+      buildInputs = with pkgs; [ util-linux e2fsprogs dosfstools ];
+      postVM = moveOrConvertImage + postVM;
+      memSize = 1024;
+    } ''
+      export PATH=${binPath}:$PATH
+
+      rootDisk=${if partitionTableType != "none" then "/dev/vda${rootPartition}" else "/dev/vda"}
+
+      # Some tools assume these exist
+      ln -s vda /dev/xvda
+      ln -s vda /dev/sda
+      # make systemd-boot find ESP without udev
+      mkdir /dev/block
+      ln -s /dev/vda1 /dev/block/254:1
+
+      mountPoint=/mnt
+      mkdir $mountPoint
+      mount $rootDisk $mountPoint
+
+      # Create the ESP and mount it. Unlike e2fsprogs, mkfs.vfat doesn't support an
+      # '-E offset=X' option, so we can't do this outside the VM.
+      ${optionalString (partitionTableType == "efi" || partitionTableType == "hybrid") ''
+        mkdir -p /mnt/boot
+        mkfs.vfat -n ESP /dev/vda1
+        mount /dev/vda1 /mnt/boot
+      ''}
+
+      # Install a configuration.nix
+      mkdir -p /mnt/etc/nixos
+      ${optionalString (configFile != null) ''
+        cp ${configFile} /mnt/etc/nixos/configuration.nix
+      ''}
+
+      ${lib.optionalString installBootLoader ''
+        # Set up core system link, GRUB, etc.
+        NIXOS_INSTALL_BOOTLOADER=1 nixos-enter --root $mountPoint -- /nix/var/nix/profiles/system/bin/switch-to-configuration boot
+
+        # The above scripts will generate a random machine-id and we don't want to bake a single ID into all our images
+        rm -f $mountPoint/etc/machine-id
+      ''}
+
+      # Set the ownerships of the contents. The modes are set in preVM.
+      # No globbing on targets, so no need to set -f
+      targets_=(${concatStringsSep " " targets})
+      users_=(${concatStringsSep " " users})
+      groups_=(${concatStringsSep " " groups})
+      for ((i = 0; i < ''${#targets_[@]}; i++)); do
+        target="''${targets_[$i]}"
+        user="''${users_[$i]}"
+        group="''${groups_[$i]}"
+        if [ -n "$user$group" ]; then
+          # We have to nixos-enter since we need to use the user and group of the VM
+          nixos-enter --root $mountPoint -- chown -R "$user:$group" "$target"
+        fi
+      done
+
+      umount -R /mnt
+
+      # Make sure resize2fs works. Note that resize2fs has stricter criteria for resizing than a normal
+      # mount, so the `-c 0` and `-i 0` don't affect it. Setting it to `now` doesn't produce deterministic
+      # output, of course, but we can fix that when/if we start making images deterministic.
+      ${optionalString (fsType == "ext4") ''
+        tune2fs -T now -c 0 -i 0 $rootDisk
+      ''}
+    ''
+  );
+in
+  if onlyNixStore then
+    pkgs.runCommand name {}
+      (prepareImage + moveOrConvertImage + postVM)
+  else buildImage
diff --git a/nixos/lib/make-ext4-fs.nix b/nixos/lib/make-ext4-fs.nix
new file mode 100644
index 00000000000..416beeb32f2
--- /dev/null
+++ b/nixos/lib/make-ext4-fs.nix
@@ -0,0 +1,86 @@
+# Builds an ext4 image containing a populated /nix/store with the closure
+# of store paths passed in the storePaths parameter, in addition to the
+# contents of a directory that can be populated with commands. The
+# generated image is sized to only fit its contents, with the expectation
+# that a script resizes the filesystem at boot time.
+{ pkgs
+, lib
+# List of derivations to be included
+, storePaths
+# Whether or not to compress the resulting image with zstd
+, compressImage ? false, zstd
+# Shell commands to populate the ./files directory.
+# All files in that directory are copied to the root of the FS.
+, populateImageCommands ? ""
+, volumeLabel
+, uuid ? "44444444-4444-4444-8888-888888888888"
+, e2fsprogs
+, libfaketime
+, perl
+, fakeroot
+}:
+
+let
+  sdClosureInfo = pkgs.buildPackages.closureInfo { rootPaths = storePaths; };
+in
+pkgs.stdenv.mkDerivation {
+  name = "ext4-fs.img${lib.optionalString compressImage ".zst"}";
+
+  nativeBuildInputs = [ e2fsprogs.bin libfaketime perl fakeroot ]
+  ++ lib.optional compressImage zstd;
+
+  buildCommand =
+    ''
+      ${if compressImage then "img=temp.img" else "img=$out"}
+      (
+      mkdir -p ./files
+      ${populateImageCommands}
+      )
+
+      echo "Preparing store paths for image..."
+
+      # Create nix/store before copying path
+      mkdir -p ./rootImage/nix/store
+
+      xargs -I % cp -a --reflink=auto % -t ./rootImage/nix/store/ < ${sdClosureInfo}/store-paths
+      (
+        GLOBIGNORE=".:.."
+        shopt -u dotglob
+
+        for f in ./files/*; do
+            cp -a --reflink=auto -t ./rootImage/ "$f"
+        done
+      )
+
+      # Also include a manifest of the closures in a format suitable for nix-store --load-db
+      cp ${sdClosureInfo}/registration ./rootImage/nix-path-registration
+
+      # Make a crude approximation of the size of the target image.
+      # If the script starts failing, increase the fudge factors here.
+      numInodes=$(find ./rootImage | wc -l)
+      numDataBlocks=$(du -s -c -B 4096 --apparent-size ./rootImage | tail -1 | awk '{ print int($1 * 1.10) }')
+      bytes=$((2 * 4096 * $numInodes + 4096 * $numDataBlocks))
+      echo "Creating an EXT4 image of $bytes bytes (numInodes=$numInodes, numDataBlocks=$numDataBlocks)"
+
+      truncate -s $bytes $img
+
+      faketime -f "1970-01-01 00:00:01" fakeroot mkfs.ext4 -L ${volumeLabel} -U ${uuid} -d ./rootImage $img
+
+      export EXT2FS_NO_MTAB_OK=yes
+      # I have ended up with corrupted images sometimes, I suspect that happens when the build machine's disk gets full during the build.
+      if ! fsck.ext4 -n -f $img; then
+        echo "--- Fsck failed for EXT4 image of $bytes bytes (numInodes=$numInodes, numDataBlocks=$numDataBlocks) ---"
+        cat errorlog
+        return 1
+      fi
+
+      # We may want to shrink the file system and resize the image to
+      # get rid of the unnecessary slack here--but see
+      # https://github.com/NixOS/nixpkgs/issues/125121 for caveats.
+
+      if [ ${builtins.toString compressImage} ]; then
+        echo "Compressing image"
+        zstd -v --no-progress ./$img -o $out
+      fi
+    '';
+}
diff --git a/nixos/lib/make-iso9660-image.nix b/nixos/lib/make-iso9660-image.nix
new file mode 100644
index 00000000000..549530965f6
--- /dev/null
+++ b/nixos/lib/make-iso9660-image.nix
@@ -0,0 +1,65 @@
+{ stdenv, closureInfo, xorriso, syslinux, libossp_uuid
+
+, # The file name of the resulting ISO image.
+  isoName ? "cd.iso"
+
+, # The files and directories to be placed in the ISO file system.
+  # This is a list of attribute sets {source, target} where `source'
+  # is the file system object (regular file or directory) to be
+  # grafted in the file system at path `target'.
+  contents
+
+, # In addition to `contents', the closure of the store paths listed
+  # in `storeContents' are also placed in the Nix store of the CD.
+  # This is a list of attribute sets {object, symlink} where `object'
+  # is a store path whose closure will be copied, and `symlink' is a
+  # symlink to `object' that will be added to the CD.
+  storeContents ? []
+
+, # Whether this should be an El-Torito bootable CD.
+  bootable ? false
+
+, # Whether this should be an efi-bootable El-Torito CD.
+  efiBootable ? false
+
+, # Whether this should be an hybrid CD (bootable from USB as well as CD).
+  usbBootable ? false
+
+, # The path (in the ISO file system) of the boot image.
+  bootImage ? ""
+
+, # The path (in the ISO file system) of the efi boot image.
+  efiBootImage ? ""
+
+, # The path (outside the ISO file system) of the isohybrid-mbr image.
+  isohybridMbrImage ? ""
+
+, # Whether to compress the resulting ISO image with zstd.
+  compressImage ? false, zstd
+
+, # The volume ID.
+  volumeID ? ""
+}:
+
+assert bootable -> bootImage != "";
+assert efiBootable -> efiBootImage != "";
+assert usbBootable -> isohybridMbrImage != "";
+
+stdenv.mkDerivation {
+  name = isoName;
+  builder = ./make-iso9660-image.sh;
+  nativeBuildInputs = [ xorriso syslinux zstd libossp_uuid ];
+
+  inherit isoName bootable bootImage compressImage volumeID efiBootImage efiBootable isohybridMbrImage usbBootable;
+
+  # !!! should use XML.
+  sources = map (x: x.source) contents;
+  targets = map (x: x.target) contents;
+
+  # !!! should use XML.
+  objects = map (x: x.object) storeContents;
+  symlinks = map (x: x.symlink) storeContents;
+
+  # For obtaining the closure of `storeContents'.
+  closureInfo = closureInfo { rootPaths = map (x: x.object) storeContents; };
+}
diff --git a/nixos/lib/make-iso9660-image.sh b/nixos/lib/make-iso9660-image.sh
new file mode 100644
index 00000000000..9273b8d3db8
--- /dev/null
+++ b/nixos/lib/make-iso9660-image.sh
@@ -0,0 +1,139 @@
+source $stdenv/setup
+
+sources_=($sources)
+targets_=($targets)
+
+objects=($objects)
+symlinks=($symlinks)
+
+
+# Remove the initial slash from a path, since genisofs likes it that way.
+stripSlash() {
+    res="$1"
+    if test "${res:0:1}" = /; then res=${res:1}; fi
+}
+
+# Escape potential equal signs (=) with backslash (\=)
+escapeEquals() {
+    echo "$1" | sed -e 's/\\/\\\\/g' -e 's/=/\\=/g'
+}
+
+# Queues an file/directory to be placed on the ISO.
+# An entry consists of a local source path (2) and
+# a destination path on the ISO (1).
+addPath() {
+    target="$1"
+    source="$2"
+    echo "$(escapeEquals "$target")=$(escapeEquals "$source")" >> pathlist
+}
+
+stripSlash "$bootImage"; bootImage="$res"
+
+
+if test -n "$bootable"; then
+
+    # The -boot-info-table option modifies the $bootImage file, so
+    # find it in `contents' and make a copy of it (since the original
+    # is read-only in the Nix store...).
+    for ((i = 0; i < ${#targets_[@]}; i++)); do
+        stripSlash "${targets_[$i]}"
+        if test "$res" = "$bootImage"; then
+            echo "copying the boot image ${sources_[$i]}"
+            cp "${sources_[$i]}" boot.img
+            chmod u+w boot.img
+            sources_[$i]=boot.img
+        fi
+    done
+
+    isoBootFlags="-eltorito-boot ${bootImage}
+                  -eltorito-catalog .boot.cat
+                  -no-emul-boot -boot-load-size 4 -boot-info-table
+                  --sort-weight 1 /isolinux" # Make sure isolinux is near the beginning of the ISO
+fi
+
+if test -n "$usbBootable"; then
+    usbBootFlags="-isohybrid-mbr ${isohybridMbrImage}"
+fi
+
+if test -n "$efiBootable"; then
+    efiBootFlags="-eltorito-alt-boot
+                  -e $efiBootImage
+                  -no-emul-boot
+                  -isohybrid-gpt-basdat"
+fi
+
+touch pathlist
+
+
+# Add the individual files.
+for ((i = 0; i < ${#targets_[@]}; i++)); do
+    stripSlash "${targets_[$i]}"
+    addPath "$res" "${sources_[$i]}"
+done
+
+
+# Add the closures of the top-level store objects.
+for i in $(< $closureInfo/store-paths); do
+    addPath "${i:1}" "$i"
+done
+
+
+# Also include a manifest of the closures in a format suitable for
+# nix-store --load-db.
+if [[ ${#objects[*]} != 0 ]]; then
+    cp $closureInfo/registration nix-path-registration
+    addPath "nix-path-registration" "nix-path-registration"
+fi
+
+
+# Add symlinks to the top-level store objects.
+for ((n = 0; n < ${#objects[*]}; n++)); do
+    object=${objects[$n]}
+    symlink=${symlinks[$n]}
+    if test "$symlink" != "none"; then
+        mkdir -p $(dirname ./$symlink)
+        ln -s $object ./$symlink
+        addPath "$symlink" "./$symlink"
+    fi
+done
+
+mkdir -p $out/iso
+
+# daed2280-b91e-42c0-aed6-82c825ca41f3 is an arbitrary namespace, to prevent
+# independent applications from generating the same UUID for the same value.
+# (the chance of that being problematic seem pretty slim here, but that's how
+# version-5 UUID's work)
+xorriso="xorriso
+ -boot_image any gpt_disk_guid=$(uuid -v 5 daed2280-b91e-42c0-aed6-82c825ca41f3 $out | tr -d -)
+ -volume_date all_file_dates =$SOURCE_DATE_EPOCH
+ -as mkisofs
+ -iso-level 3
+ -volid ${volumeID}
+ -appid nixos
+ -publisher nixos
+ -graft-points
+ -full-iso9660-filenames
+ -joliet
+ ${isoBootFlags}
+ ${usbBootFlags}
+ ${efiBootFlags}
+ -r
+ -path-list pathlist
+ --sort-weight 0 /
+"
+
+$xorriso -output $out/iso/$isoName
+
+if test -n "$compressImage"; then
+    echo "Compressing image..."
+    zstd -T$NIX_BUILD_CORES --rm $out/iso/$isoName
+fi
+
+mkdir -p $out/nix-support
+echo $system > $out/nix-support/system
+
+if test -n "$compressImage"; then
+    echo "file iso $out/iso/$isoName.zst" >> $out/nix-support/hydra-build-products
+else
+    echo "file iso $out/iso/$isoName" >> $out/nix-support/hydra-build-products
+fi
diff --git a/nixos/lib/make-options-doc/default.nix b/nixos/lib/make-options-doc/default.nix
new file mode 100644
index 00000000000..57652dd5db1
--- /dev/null
+++ b/nixos/lib/make-options-doc/default.nix
@@ -0,0 +1,169 @@
+/* Generate JSON, XML and DocBook documentation for given NixOS options.
+
+   Minimal example:
+
+    { pkgs,  }:
+
+    let
+      eval = import (pkgs.path + "/nixos/lib/eval-config.nix") {
+        baseModules = [
+          ../module.nix
+        ];
+        modules = [];
+      };
+    in pkgs.nixosOptionsDoc {
+      options = eval.options;
+    }
+
+*/
+{ pkgs
+, lib
+, options
+, transformOptions ? lib.id  # function for additional tranformations of the options
+, revision ? "" # Specify revision for the options
+# a set of options the docs we are generating will be merged into, as if by recursiveUpdate.
+# used to split the options doc build into a static part (nixos/modules) and a dynamic part
+# (non-nixos modules imported via configuration.nix, other module sources).
+, baseOptionsJSON ? null
+# instead of printing warnings for eg options with missing descriptions (which may be lost
+# by nix build unless -L is given), emit errors instead and fail the build
+, warningsAreErrors ? true
+}:
+
+let
+  # Make a value safe for JSON. Functions are replaced by the string "<function>",
+  # derivations are replaced with an attrset
+  # { _type = "derivation"; name = <name of that derivation>; }.
+  # We need to handle derivations specially because consumers want to know about them,
+  # but we can't easily use the type,name subset of keys (since type is often used as
+  # a module option and might cause confusion). Use _type,name instead to the same
+  # effect, since _type is already used by the module system.
+  substSpecial = x:
+    if lib.isDerivation x then { _type = "derivation"; name = x.name; }
+    else if builtins.isAttrs x then lib.mapAttrs (name: substSpecial) x
+    else if builtins.isList x then map substSpecial x
+    else if lib.isFunction x then "<function>"
+    else x;
+
+  optionsList = lib.flip map optionsListVisible
+   (opt: transformOptions opt
+    // lib.optionalAttrs (opt ? example) { example = substSpecial opt.example; }
+    // lib.optionalAttrs (opt ? default) { default = substSpecial opt.default; }
+    // lib.optionalAttrs (opt ? type) { type = substSpecial opt.type; }
+    // lib.optionalAttrs (opt ? relatedPackages && opt.relatedPackages != []) { relatedPackages = genRelatedPackages opt.relatedPackages opt.name; }
+   );
+
+  # Generate DocBook documentation for a list of packages. This is
+  # what `relatedPackages` option of `mkOption` from
+  # ../../../lib/options.nix influences.
+  #
+  # Each element of `relatedPackages` can be either
+  # - a string:  that will be interpreted as an attribute name from `pkgs` and turned into a link
+  #              to search.nixos.org,
+  # - a list:    that will be interpreted as an attribute path from `pkgs` and turned into a link
+  #              to search.nixos.org,
+  # - an attrset: that can specify `name`, `path`, `comment`
+  #   (either of `name`, `path` is required, the rest are optional).
+  #
+  # NOTE: No checks against `pkgs` are made to ensure that the referenced package actually exists.
+  # Such checks are not compatible with option docs caching.
+  genRelatedPackages = packages: optName:
+    let
+      unpack = p: if lib.isString p then { name = p; }
+                  else if lib.isList p then { path = p; }
+                  else p;
+      describe = args:
+        let
+          title = args.title or null;
+          name = args.name or (lib.concatStringsSep "." args.path);
+        in ''
+          <listitem>
+            <para>
+              <link xlink:href="https://search.nixos.org/packages?show=${name}&amp;sort=relevance&amp;query=${name}">
+                <literal>${lib.optionalString (title != null) "${title} aka "}pkgs.${name}</literal>
+              </link>
+            </para>
+            ${lib.optionalString (args ? comment) "<para>${args.comment}</para>"}
+          </listitem>
+        '';
+    in "<itemizedlist>${lib.concatStringsSep "\n" (map (p: describe (unpack p)) packages)}</itemizedlist>";
+
+  # Remove invisible and internal options.
+  optionsListVisible = lib.filter (opt: opt.visible && !opt.internal) (lib.optionAttrSetToDocList options);
+
+  optionsNix = builtins.listToAttrs (map (o: { name = o.name; value = removeAttrs o ["name" "visible" "internal"]; }) optionsList);
+
+in rec {
+  inherit optionsNix;
+
+  optionsAsciiDoc = pkgs.runCommand "options.adoc" {} ''
+    ${pkgs.python3Minimal}/bin/python ${./generateAsciiDoc.py} \
+      < ${optionsJSON}/share/doc/nixos/options.json \
+      > $out
+  '';
+
+  optionsCommonMark = pkgs.runCommand "options.md" {} ''
+    ${pkgs.python3Minimal}/bin/python ${./generateCommonMark.py} \
+      < ${optionsJSON}/share/doc/nixos/options.json \
+      > $out
+  '';
+
+  optionsJSON = pkgs.runCommand "options.json"
+    { meta.description = "List of NixOS options in JSON format";
+      buildInputs = [ pkgs.brotli ];
+      options = builtins.toFile "options.json"
+        (builtins.unsafeDiscardStringContext (builtins.toJSON optionsNix));
+    }
+    ''
+      # Export list of options in different format.
+      dst=$out/share/doc/nixos
+      mkdir -p $dst
+
+      ${
+        if baseOptionsJSON == null
+          then "cp $options $dst/options.json"
+          else ''
+            ${pkgs.python3Minimal}/bin/python ${./mergeJSON.py} \
+              ${lib.optionalString warningsAreErrors "--warnings-are-errors"} \
+              ${baseOptionsJSON} $options \
+              > $dst/options.json
+          ''
+      }
+
+      brotli -9 < $dst/options.json > $dst/options.json.br
+
+      mkdir -p $out/nix-support
+      echo "file json $dst/options.json" >> $out/nix-support/hydra-build-products
+      echo "file json-br $dst/options.json.br" >> $out/nix-support/hydra-build-products
+    '';
+
+  # Convert options.json into an XML file.
+  # The actual generation of the xml file is done in nix purely for the convenience
+  # of not having to generate the xml some other way
+  optionsXML = pkgs.runCommand "options.xml" {} ''
+    export NIX_STORE_DIR=$TMPDIR/store
+    export NIX_STATE_DIR=$TMPDIR/state
+    ${pkgs.nix}/bin/nix-instantiate \
+      --eval --xml --strict ${./optionsJSONtoXML.nix} \
+      --argstr file ${optionsJSON}/share/doc/nixos/options.json \
+      > "$out"
+  '';
+
+  optionsDocBook = pkgs.runCommand "options-docbook.xml" {} ''
+    optionsXML=${optionsXML}
+    if grep /nixpkgs/nixos/modules $optionsXML; then
+      echo "The manual appears to depend on the location of Nixpkgs, which is bad"
+      echo "since this prevents sharing via the NixOS channel.  This is typically"
+      echo "caused by an option default that refers to a relative path (see above"
+      echo "for hints about the offending path)."
+      exit 1
+    fi
+
+    ${pkgs.python3Minimal}/bin/python ${./sortXML.py} $optionsXML sorted.xml
+    ${pkgs.libxslt.bin}/bin/xsltproc \
+      --stringparam revision '${revision}' \
+      -o intermediate.xml ${./options-to-docbook.xsl} sorted.xml
+    ${pkgs.libxslt.bin}/bin/xsltproc \
+      -o "$out" ${./postprocess-option-descriptions.xsl} intermediate.xml
+  '';
+}
diff --git a/nixos/lib/make-options-doc/generateAsciiDoc.py b/nixos/lib/make-options-doc/generateAsciiDoc.py
new file mode 100644
index 00000000000..48eadd248c5
--- /dev/null
+++ b/nixos/lib/make-options-doc/generateAsciiDoc.py
@@ -0,0 +1,37 @@
+import json
+import sys
+
+options = json.load(sys.stdin)
+# TODO: declarations: link to github
+for (name, value) in options.items():
+    print(f'== {name}')
+    print()
+    print(value['description'])
+    print()
+    print('[discrete]')
+    print('=== details')
+    print()
+    print(f'Type:: {value["type"]}')
+    if 'default' in value:
+        print('Default::')
+        print('+')
+        print('----')
+        print(json.dumps(value['default'], ensure_ascii=False, separators=(',', ':')))
+        print('----')
+        print()
+    else:
+        print('No Default:: {blank}')
+    if value['readOnly']:
+        print('Read Only:: {blank}')
+    else:
+        print()
+    if 'example' in value:
+        print('Example::')
+        print('+')
+        print('----')
+        print(json.dumps(value['example'], ensure_ascii=False, separators=(',', ':')))
+        print('----')
+        print()
+    else:
+        print('No Example:: {blank}')
+    print()
diff --git a/nixos/lib/make-options-doc/generateCommonMark.py b/nixos/lib/make-options-doc/generateCommonMark.py
new file mode 100644
index 00000000000..404e53b0df9
--- /dev/null
+++ b/nixos/lib/make-options-doc/generateCommonMark.py
@@ -0,0 +1,27 @@
+import json
+import sys
+
+options = json.load(sys.stdin)
+for (name, value) in options.items():
+    print('##', name.replace('<', '\\<').replace('>', '\\>'))
+    print(value['description'])
+    print()
+    if 'type' in value:
+        print('*_Type_*:')
+        print(value['type'])
+        print()
+    print()
+    if 'default' in value:
+        print('*_Default_*')
+        print('```')
+        print(json.dumps(value['default'], ensure_ascii=False, separators=(',', ':')))
+        print('```')
+    print()
+    print()
+    if 'example' in value:
+        print('*_Example_*')
+        print('```')
+        print(json.dumps(value['example'], ensure_ascii=False, separators=(',', ':')))
+        print('```')
+    print()
+    print()
diff --git a/nixos/lib/make-options-doc/mergeJSON.py b/nixos/lib/make-options-doc/mergeJSON.py
new file mode 100644
index 00000000000..8e2ea322dc8
--- /dev/null
+++ b/nixos/lib/make-options-doc/mergeJSON.py
@@ -0,0 +1,93 @@
+import collections
+import json
+import sys
+from typing import Any, Dict, List
+
+JSON = Dict[str, Any]
+
+class Key:
+    def __init__(self, path: List[str]):
+        self.path = path
+    def __hash__(self):
+        result = 0
+        for id in self.path:
+            result ^= hash(id)
+        return result
+    def __eq__(self, other):
+        return type(self) is type(other) and self.path == other.path
+
+Option = collections.namedtuple('Option', ['name', 'value'])
+
+# pivot a dict of options keyed by their display name to a dict keyed by their path
+def pivot(options: Dict[str, JSON]) -> Dict[Key, Option]:
+    result: Dict[Key, Option] = dict()
+    for (name, opt) in options.items():
+        result[Key(opt['loc'])] = Option(name, opt)
+    return result
+
+# pivot back to indexed-by-full-name
+# like the docbook build we'll just fail if multiple options with differing locs
+# render to the same option name.
+def unpivot(options: Dict[Key, Option]) -> Dict[str, JSON]:
+    result: Dict[str, Dict] = dict()
+    for (key, opt) in options.items():
+        if opt.name in result:
+            raise RuntimeError(
+                'multiple options with colliding ids found',
+                opt.name,
+                result[opt.name]['loc'],
+                opt.value['loc'],
+            )
+        result[opt.name] = opt.value
+    return result
+
+warningsAreErrors = sys.argv[1] == "--warnings-are-errors"
+optOffset = 1 if warningsAreErrors else 0
+options = pivot(json.load(open(sys.argv[1 + optOffset], 'r')))
+overrides = pivot(json.load(open(sys.argv[2 + optOffset], 'r')))
+
+# fix up declaration paths in lazy options, since we don't eval them from a full nixpkgs dir
+for (k, v) in options.items():
+    v.value['declarations'] = list(map(lambda s: f'nixos/modules/{s}', v.value['declarations']))
+
+# merge both descriptions
+for (k, v) in overrides.items():
+    cur = options.setdefault(k, v).value
+    for (ok, ov) in v.value.items():
+        if ok == 'declarations':
+            decls = cur[ok]
+            for d in ov:
+                if d not in decls:
+                    decls += [d]
+        elif ok == "type":
+            # ignore types of placeholder options
+            if ov != "_unspecified" or cur[ok] == "_unspecified":
+                cur[ok] = ov
+        elif ov is not None or cur.get(ok, None) is None:
+            cur[ok] = ov
+
+severity = "error" if warningsAreErrors else "warning"
+
+# check that every option has a description
+hasWarnings = False
+for (k, v) in options.items():
+    if v.value.get('description', None) is None:
+        hasWarnings = True
+        print(f"\x1b[1;31m{severity}: option {v.name} has no description\x1b[0m", file=sys.stderr)
+        v.value['description'] = "This option has no description."
+    if v.value.get('type', "unspecified") == "unspecified":
+        hasWarnings = True
+        print(
+            f"\x1b[1;31m{severity}: option {v.name} has no type. Please specify a valid type, see " +
+            "https://nixos.org/manual/nixos/stable/index.html#sec-option-types\x1b[0m", file=sys.stderr)
+
+if hasWarnings and warningsAreErrors:
+    print(
+        "\x1b[1;31m" +
+        "Treating warnings as errors. Set documentation.nixos.options.warningsAreErrors " +
+        "to false to ignore these warnings." +
+        "\x1b[0m",
+        file=sys.stderr)
+    sys.exit(1)
+
+json.dump(unpivot(options), fp=sys.stdout)
diff --git a/nixos/lib/make-options-doc/options-to-docbook.xsl b/nixos/lib/make-options-doc/options-to-docbook.xsl
new file mode 100644
index 00000000000..b286f7b5e2c
--- /dev/null
+++ b/nixos/lib/make-options-doc/options-to-docbook.xsl
@@ -0,0 +1,246 @@
+<?xml version="1.0"?>
+
+<xsl:stylesheet version="1.0"
+                xmlns:xsl="http://www.w3.org/1999/XSL/Transform"
+                xmlns:str="http://exslt.org/strings"
+                xmlns:xlink="http://www.w3.org/1999/xlink"
+                xmlns:nixos="tag:nixos.org"
+                xmlns="http://docbook.org/ns/docbook"
+                extension-element-prefixes="str"
+                >
+
+  <xsl:output method='xml' encoding="UTF-8" />
+
+  <xsl:param name="revision" />
+  <xsl:param name="program" />
+
+
+  <xsl:template match="/expr/list">
+    <appendix xml:id="appendix-configuration-options">
+      <title>Configuration Options</title>
+      <variablelist xml:id="configuration-variable-list">
+        <xsl:for-each select="attrs">
+          <xsl:variable name="id" select="concat('opt-', str:replace(str:replace(str:replace(str:replace(attr[@name = 'name']/string/@value, '*', '_'), '&lt;', '_'), '>', '_'), ':', '_'))" />
+          <varlistentry>
+            <term xlink:href="#{$id}">
+              <xsl:attribute name="xml:id"><xsl:value-of select="$id"/></xsl:attribute>
+              <option>
+                <xsl:value-of select="attr[@name = 'name']/string/@value" />
+              </option>
+            </term>
+
+            <listitem>
+
+              <nixos:option-description>
+                <para>
+                  <xsl:value-of disable-output-escaping="yes"
+                                select="attr[@name = 'description']/string/@value" />
+                </para>
+              </nixos:option-description>
+
+              <xsl:if test="attr[@name = 'type']">
+                <para>
+                  <emphasis>Type:</emphasis>
+                  <xsl:text> </xsl:text>
+                  <xsl:value-of select="attr[@name = 'type']/string/@value"/>
+                  <xsl:if test="attr[@name = 'readOnly']/bool/@value = 'true'">
+                    <xsl:text> </xsl:text>
+                    <emphasis>(read only)</emphasis>
+                  </xsl:if>
+                </para>
+              </xsl:if>
+
+              <xsl:if test="attr[@name = 'default']">
+                <para>
+                  <emphasis>Default:</emphasis>
+                  <xsl:text> </xsl:text>
+                  <xsl:apply-templates select="attr[@name = 'default']/*" mode="top" />
+                </para>
+              </xsl:if>
+
+              <xsl:if test="attr[@name = 'example']">
+                <para>
+                  <emphasis>Example:</emphasis>
+                  <xsl:text> </xsl:text>
+                  <xsl:apply-templates select="attr[@name = 'example']/*" mode="top" />
+                </para>
+              </xsl:if>
+
+              <xsl:if test="attr[@name = 'relatedPackages']">
+                <para>
+                  <emphasis>Related packages:</emphasis>
+                  <xsl:text> </xsl:text>
+                  <xsl:value-of disable-output-escaping="yes"
+                                select="attr[@name = 'relatedPackages']/string/@value" />
+                </para>
+              </xsl:if>
+
+              <xsl:if test="count(attr[@name = 'declarations']/list/*) != 0">
+                <para>
+                  <emphasis>Declared by:</emphasis>
+                </para>
+                <xsl:apply-templates select="attr[@name = 'declarations']" />
+              </xsl:if>
+
+              <xsl:if test="count(attr[@name = 'definitions']/list/*) != 0">
+                <para>
+                  <emphasis>Defined by:</emphasis>
+                </para>
+                <xsl:apply-templates select="attr[@name = 'definitions']" />
+              </xsl:if>
+
+            </listitem>
+
+          </varlistentry>
+
+        </xsl:for-each>
+
+      </variablelist>
+    </appendix>
+  </xsl:template>
+
+
+  <xsl:template match="attrs[attr[@name = '_type' and string[@value = 'literalExpression']]]" mode = "top">
+    <xsl:choose>
+      <xsl:when test="contains(attr[@name = 'text']/string/@value, '&#010;')">
+        <programlisting><xsl:value-of select="attr[@name = 'text']/string/@value" /></programlisting>
+      </xsl:when>
+      <xsl:otherwise>
+        <literal><xsl:value-of select="attr[@name = 'text']/string/@value" /></literal>
+      </xsl:otherwise>
+    </xsl:choose>
+  </xsl:template>
+
+
+  <xsl:template match="attrs[attr[@name = '_type' and string[@value = 'literalDocBook']]]" mode = "top">
+    <xsl:value-of disable-output-escaping="yes" select="attr[@name = 'text']/string/@value" />
+  </xsl:template>
+
+
+  <xsl:template match="string[contains(@value, '&#010;')]" mode="top">
+    <programlisting>
+      <xsl:text>''&#010;</xsl:text>
+      <xsl:value-of select='str:replace(str:replace(@value, "&apos;&apos;", "&apos;&apos;&apos;"), "${", "&apos;&apos;${")' />
+      <xsl:text>''</xsl:text>
+    </programlisting>
+  </xsl:template>
+
+
+  <xsl:template match="*" mode="top">
+    <literal><xsl:apply-templates select="." /></literal>
+  </xsl:template>
+
+
+  <xsl:template match="null">
+    <xsl:text>null</xsl:text>
+  </xsl:template>
+
+
+  <xsl:template match="string">
+    <xsl:choose>
+      <xsl:when test="(contains(@value, '&quot;') or contains(@value, '\')) and not(contains(@value, '&#010;'))">
+        <xsl:text>''</xsl:text><xsl:value-of select='str:replace(str:replace(@value, "&apos;&apos;", "&apos;&apos;&apos;"), "${", "&apos;&apos;${")' /><xsl:text>''</xsl:text>
+      </xsl:when>
+      <xsl:otherwise>
+        <xsl:text>"</xsl:text><xsl:value-of select="str:replace(str:replace(str:replace(str:replace(@value, '\', '\\'), '&quot;', '\&quot;'), '&#010;', '\n'), '${', '\${')" /><xsl:text>"</xsl:text>
+      </xsl:otherwise>
+    </xsl:choose>
+  </xsl:template>
+
+
+  <xsl:template match="int">
+    <xsl:value-of select="@value" />
+  </xsl:template>
+
+
+  <xsl:template match="bool[@value = 'true']">
+    <xsl:text>true</xsl:text>
+  </xsl:template>
+
+
+  <xsl:template match="bool[@value = 'false']">
+    <xsl:text>false</xsl:text>
+  </xsl:template>
+
+
+  <xsl:template match="list">
+    [
+    <xsl:for-each select="*">
+      <xsl:apply-templates select="." />
+      <xsl:text> </xsl:text>
+    </xsl:for-each>
+    ]
+  </xsl:template>
+
+
+  <xsl:template match="attrs[attr[@name = '_type' and string[@value = 'literalExpression']]]">
+    <xsl:value-of select="attr[@name = 'text']/string/@value" />
+  </xsl:template>
+
+
+  <xsl:template match="attrs">
+    {
+    <xsl:for-each select="attr">
+      <xsl:value-of select="@name" />
+      <xsl:text> = </xsl:text>
+      <xsl:apply-templates select="*" /><xsl:text>; </xsl:text>
+    </xsl:for-each>
+    }
+  </xsl:template>
+
+
+  <xsl:template match="attrs[attr[@name = '_type' and string[@value = 'derivation']]]">
+    <replaceable>(build of <xsl:value-of select="attr[@name = 'name']/string/@value" />)</replaceable>
+  </xsl:template>
+
+  <xsl:template match="attr[@name = 'declarations' or @name = 'definitions']">
+    <simplelist>
+      <xsl:for-each select="list/string">
+        <member><filename>
+          <!-- Hyperlink the filename either to the NixOS Subversion
+          repository (if it’s a module and we have a revision number),
+          or to the local filesystem. -->
+          <xsl:choose>
+            <xsl:when test="not(starts-with(@value, '/'))">
+              <xsl:choose>
+                <xsl:when test="$revision = 'local'">
+                  <xsl:attribute name="xlink:href">https://github.com/NixOS/nixpkgs/blob/master/<xsl:value-of select="@value"/></xsl:attribute>
+                </xsl:when>
+                <xsl:otherwise>
+                  <xsl:attribute name="xlink:href">https://github.com/NixOS/nixpkgs/blob/<xsl:value-of select="$revision"/>/<xsl:value-of select="@value"/></xsl:attribute>
+                </xsl:otherwise>
+              </xsl:choose>
+            </xsl:when>
+            <xsl:when test="$revision != 'local' and $program = 'nixops' and contains(@value, '/nix/')">
+              <xsl:attribute name="xlink:href">https://github.com/NixOS/nixops/blob/<xsl:value-of select="$revision"/>/nix/<xsl:value-of select="substring-after(@value, '/nix/')"/></xsl:attribute>
+            </xsl:when>
+            <xsl:otherwise>
+              <xsl:attribute name="xlink:href">file://<xsl:value-of select="@value"/></xsl:attribute>
+            </xsl:otherwise>
+          </xsl:choose>
+          <!-- Print the filename and make it user-friendly by replacing the
+          /nix/store/<hash> prefix by the default location of nixos
+          sources. -->
+          <xsl:choose>
+            <xsl:when test="not(starts-with(@value, '/'))">
+              &lt;nixpkgs/<xsl:value-of select="@value"/>&gt;
+            </xsl:when>
+            <xsl:when test="contains(@value, 'nixops') and contains(@value, '/nix/')">
+              &lt;nixops/<xsl:value-of select="substring-after(@value, '/nix/')"/>&gt;
+            </xsl:when>
+            <xsl:otherwise>
+              <xsl:value-of select="@value" />
+            </xsl:otherwise>
+          </xsl:choose>
+        </filename></member>
+      </xsl:for-each>
+    </simplelist>
+  </xsl:template>
+
+
+  <xsl:template match="function">
+    <xsl:text>λ</xsl:text>
+  </xsl:template>
+
+
+</xsl:stylesheet>
diff --git a/nixos/lib/make-options-doc/optionsJSONtoXML.nix b/nixos/lib/make-options-doc/optionsJSONtoXML.nix
new file mode 100644
index 00000000000..ba50c5f898b
--- /dev/null
+++ b/nixos/lib/make-options-doc/optionsJSONtoXML.nix
@@ -0,0 +1,6 @@
+{ file }:
+
+builtins.attrValues
+  (builtins.mapAttrs
+    (name: def: def // { inherit name; })
+    (builtins.fromJSON (builtins.readFile file)))
diff --git a/nixos/lib/make-options-doc/postprocess-option-descriptions.xsl b/nixos/lib/make-options-doc/postprocess-option-descriptions.xsl
new file mode 100644
index 00000000000..1201c7612c2
--- /dev/null
+++ b/nixos/lib/make-options-doc/postprocess-option-descriptions.xsl
@@ -0,0 +1,115 @@
+<?xml version="1.0"?>
+
+<xsl:stylesheet version="1.0"
+                xmlns:xsl="http://www.w3.org/1999/XSL/Transform"
+                xmlns:str="http://exslt.org/strings"
+                xmlns:exsl="http://exslt.org/common"
+                xmlns:db="http://docbook.org/ns/docbook"
+                xmlns:nixos="tag:nixos.org"
+                extension-element-prefixes="str exsl">
+  <xsl:output method='xml' encoding="UTF-8" />
+
+  <xsl:template match="@*|node()">
+    <xsl:copy>
+      <xsl:apply-templates select="@*|node()" />
+    </xsl:copy>
+  </xsl:template>
+
+  <xsl:template name="break-up-description">
+    <xsl:param name="input" />
+    <xsl:param name="buffer" />
+
+    <!-- Every time we have two newlines following each other, we want to
+         break it into </para><para>. -->
+    <xsl:variable name="parbreak" select="'&#xa;&#xa;'" />
+
+    <!-- Similar to "(head:tail) = input" in Haskell. -->
+    <xsl:variable name="head" select="$input[1]" />
+    <xsl:variable name="tail" select="$input[position() &gt; 1]" />
+
+    <xsl:choose>
+      <xsl:when test="$head/self::text() and contains($head, $parbreak)">
+        <!-- If the haystack provided to str:split() directly starts or
+             ends with $parbreak, it doesn't generate a <token/> for that,
+             so we are doing this here. -->
+        <xsl:variable name="splitted-raw">
+          <xsl:if test="starts-with($head, $parbreak)"><token /></xsl:if>
+          <xsl:for-each select="str:split($head, $parbreak)">
+            <token><xsl:value-of select="node()" /></token>
+          </xsl:for-each>
+          <!-- Something like ends-with($head, $parbreak), but there is
+               no ends-with() in XSLT, so we need to use substring(). -->
+          <xsl:if test="
+            substring($head, string-length($head) -
+                             string-length($parbreak) + 1) = $parbreak
+          "><token /></xsl:if>
+        </xsl:variable>
+        <xsl:variable name="splitted"
+                      select="exsl:node-set($splitted-raw)/token" />
+        <!-- The buffer we had so far didn't contain any text nodes that
+             contain a $parbreak, so we can put the buffer along with the
+             first token of $splitted into a para element. -->
+        <para xmlns="http://docbook.org/ns/docbook">
+          <xsl:apply-templates select="exsl:node-set($buffer)" />
+          <xsl:apply-templates select="$splitted[1]/node()" />
+        </para>
+        <!-- We have already emitted the first splitted result, so the
+             last result is going to be set as the new $buffer later
+             because its contents may not be directly followed up by a
+             $parbreak. -->
+        <xsl:for-each select="$splitted[position() &gt; 1
+                              and position() &lt; last()]">
+          <para xmlns="http://docbook.org/ns/docbook">
+            <xsl:apply-templates select="node()" />
+          </para>
+        </xsl:for-each>
+        <xsl:call-template name="break-up-description">
+          <xsl:with-param name="input" select="$tail" />
+          <xsl:with-param name="buffer" select="$splitted[last()]/node()" />
+        </xsl:call-template>
+      </xsl:when>
+      <!-- Either non-text node or one without $parbreak, which we just
+           want to buffer and continue recursing. -->
+      <xsl:when test="$input">
+        <xsl:call-template name="break-up-description">
+          <xsl:with-param name="input" select="$tail" />
+          <!-- This essentially appends $head to $buffer. -->
+          <xsl:with-param name="buffer">
+            <xsl:if test="$buffer">
+              <xsl:for-each select="exsl:node-set($buffer)">
+                <xsl:apply-templates select="." />
+              </xsl:for-each>
+            </xsl:if>
+            <xsl:apply-templates select="$head" />
+          </xsl:with-param>
+        </xsl:call-template>
+      </xsl:when>
+      <!-- No more $input, just put the remaining $buffer in a para. -->
+      <xsl:otherwise>
+        <para xmlns="http://docbook.org/ns/docbook">
+          <xsl:apply-templates select="exsl:node-set($buffer)" />
+        </para>
+      </xsl:otherwise>
+    </xsl:choose>
+  </xsl:template>
+
+  <xsl:template match="nixos:option-description">
+    <xsl:choose>
+      <!--
+        Only process nodes that are comprised of a single <para/> element,
+        because if that's not the case the description already contains
+        </para><para> in between and we need no further processing.
+      -->
+      <xsl:when test="count(db:para) > 1">
+        <xsl:apply-templates select="node()" />
+      </xsl:when>
+      <xsl:otherwise>
+        <xsl:call-template name="break-up-description">
+          <xsl:with-param name="input"
+                          select="exsl:node-set(db:para/node())" />
+        </xsl:call-template>
+      </xsl:otherwise>
+    </xsl:choose>
+  </xsl:template>
+
+</xsl:stylesheet>
diff --git a/nixos/lib/make-options-doc/sortXML.py b/nixos/lib/make-options-doc/sortXML.py
new file mode 100644
index 00000000000..e63ff3538b3
--- /dev/null
+++ b/nixos/lib/make-options-doc/sortXML.py
@@ -0,0 +1,27 @@
+import xml.etree.ElementTree as ET
+import sys
+
+tree = ET.parse(sys.argv[1])
+# the xml tree is of the form
+# <expr><list> {all options, each an attrs} </list></expr>
+options = list(tree.getroot().find('list'))
+
+def sortKey(opt):
+    def order(s):
+        if s.startswith("enable"):
+            return 0
+        if s.startswith("package"):
+            return 1
+        return 2
+
+    return [
+        (order(p.attrib['value']), p.attrib['value'])
+        for p in opt.findall('attr[@name="loc"]/list/string')
+    ]
+
+options.sort(key=sortKey)
+
+doc = ET.Element("expr")
+newOptions = ET.SubElement(doc, "list")
+newOptions.extend(options)
+ET.ElementTree(doc).write(sys.argv[2], encoding='utf-8')
diff --git a/nixos/lib/make-squashfs.nix b/nixos/lib/make-squashfs.nix
new file mode 100644
index 00000000000..170d315fb75
--- /dev/null
+++ b/nixos/lib/make-squashfs.nix
@@ -0,0 +1,35 @@
+{ stdenv, squashfsTools, closureInfo
+
+, # The root directory of the squashfs filesystem is filled with the
+  # closures of the Nix store paths listed here.
+  storeContents ? []
+, # Compression parameters.
+  # For zstd compression you can use "zstd -Xcompression-level 6".
+  comp ? "xz -Xdict-size 100%"
+}:
+
+stdenv.mkDerivation {
+  name = "squashfs.img";
+
+  nativeBuildInputs = [ squashfsTools ];
+
+  buildCommand =
+    ''
+      closureInfo=${closureInfo { rootPaths = storeContents; }}
+
+      # Also include a manifest of the closures in a format suitable
+      # for nix-store --load-db.
+      cp $closureInfo/registration nix-path-registration
+
+      # 64 cores on i686 does not work
+      # fails with FATAL ERROR: mangle2:: xz compress failed with error code 5
+      if ((NIX_BUILD_CORES > 48)); then
+        NIX_BUILD_CORES=48
+      fi
+
+      # Generate the squashfs image.
+      mksquashfs nix-path-registration $(cat $closureInfo/store-paths) $out \
+        -no-hardlinks -keep-as-directory -all-root -b 1048576 -comp ${comp} \
+        -processors $NIX_BUILD_CORES
+    '';
+}
diff --git a/nixos/lib/make-system-tarball.nix b/nixos/lib/make-system-tarball.nix
new file mode 100644
index 00000000000..dab168f4a48
--- /dev/null
+++ b/nixos/lib/make-system-tarball.nix
@@ -0,0 +1,56 @@
+{ stdenv, closureInfo, pixz
+
+, # The file name of the resulting tarball
+  fileName ? "nixos-system-${stdenv.hostPlatform.system}"
+
+, # The files and directories to be placed in the tarball.
+  # This is a list of attribute sets {source, target} where `source'
+  # is the file system object (regular file or directory) to be
+  # grafted in the file system at path `target'.
+  contents
+
+, # In addition to `contents', the closure of the store paths listed
+  # in `packages' are also placed in the Nix store of the tarball.  This is
+  # a list of attribute sets {object, symlink} where `object' if a
+  # store path whose closure will be copied, and `symlink' is a
+  # symlink to `object' that will be added to the tarball.
+  storeContents ? []
+
+  # Extra commands to be executed before archiving files
+, extraCommands ? ""
+
+  # Extra tar arguments
+, extraArgs ? ""
+  # Command used for compression
+, compressCommand ? "pixz"
+  # Extension for the compressed tarball
+, compressionExtension ? ".xz"
+  # extra inputs, like the compressor to use
+, extraInputs ? [ pixz ]
+}:
+
+let
+  symlinks = map (x: x.symlink) storeContents;
+  objects = map (x: x.object) storeContents;
+in
+
+stdenv.mkDerivation {
+  name = "tarball";
+  builder = ./make-system-tarball.sh;
+  nativeBuildInputs = extraInputs;
+
+  inherit fileName extraArgs extraCommands compressCommand;
+
+  # !!! should use XML.
+  sources = map (x: x.source) contents;
+  targets = map (x: x.target) contents;
+
+  # !!! should use XML.
+  inherit symlinks objects;
+
+  closureInfo = closureInfo {
+    rootPaths = objects;
+  };
+
+  extension = compressionExtension;
+}
diff --git a/nixos/lib/make-system-tarball.sh b/nixos/lib/make-system-tarball.sh
new file mode 100644
index 00000000000..1a0017a1799
--- /dev/null
+++ b/nixos/lib/make-system-tarball.sh
@@ -0,0 +1,57 @@
+source $stdenv/setup
+
+sources_=($sources)
+targets_=($targets)
+
+objects=($objects)
+symlinks=($symlinks)
+
+
+# Remove the initial slash from a path, since genisofs likes it that way.
+stripSlash() {
+    res="$1"
+    if test "${res:0:1}" = /; then res=${res:1}; fi
+}
+
+# Add the individual files.
+for ((i = 0; i < ${#targets_[@]}; i++)); do
+    stripSlash "${targets_[$i]}"
+    mkdir -p "$(dirname "$res")"
+    cp -a "${sources_[$i]}" "$res"
+done
+
+
+# Add the closures of the top-level store objects.
+chmod +w .
+mkdir -p nix/store
+for i in $(< $closureInfo/store-paths); do
+    cp -a "$i" "${i:1}"
+done
+
+
+# TODO tar ruxo
+# Also include a manifest of the closures in a format suitable for
+# nix-store --load-db.
+cp $closureInfo/registration nix-path-registration
+
+# Add symlinks to the top-level store objects.
+for ((n = 0; n < ${#objects[*]}; n++)); do
+    object=${objects[$n]}
+    symlink=${symlinks[$n]}
+    if test "$symlink" != "none"; then
+        mkdir -p $(dirname ./$symlink)
+        ln -s $object ./$symlink
+    fi
+done
+
+$extraCommands
+
+mkdir -p $out/tarball
+
+rm env-vars
+
+time tar --sort=name --mtime='@1' --owner=0 --group=0 --numeric-owner -c * $extraArgs | $compressCommand > $out/tarball/$fileName.tar${extension}
+
+mkdir -p $out/nix-support
+echo $system > $out/nix-support/system
+echo "file system-tarball $out/tarball/$fileName.tar${extension}" > $out/nix-support/hydra-build-products
diff --git a/nixos/lib/make-zfs-image.nix b/nixos/lib/make-zfs-image.nix
new file mode 100644
index 00000000000..a84732aa117
--- /dev/null
+++ b/nixos/lib/make-zfs-image.nix
@@ -0,0 +1,333 @@
+# Note: This is a private API, internal to NixOS. Its interface is subject
+# to change without notice.
+#
+# The result of this builder is two disk images:
+#
+#  * `boot` - a small disk formatted with FAT to be used for /boot. FAT is
+#    chosen to support EFI.
+#  * `root` - a larger disk with a zpool taking the entire disk.
+#
+# This two-disk approach is taken to satisfy ZFS's requirements for
+# autoexpand.
+#
+# # Why doesn't autoexpand work with ZFS in a partition?
+#
+# When ZFS owns the whole disk doesn’t really use a partition: it has
+# a marker partition at the start and a marker partition at the end of
+# the disk.
+#
+# If ZFS is constrained to a partition, ZFS leaves expanding the partition
+# up to the user. Obviously, the user may not choose to do so.
+#
+# Once the user expands the partition, calling zpool online -e expands the
+# vdev to use the whole partition. It doesn’t happen automatically
+# presumably because zed doesn’t get an event saying it’s partition grew,
+# whereas it can and does get an event saying the whole disk it is on is
+# now larger.
+{ lib
+, pkgs
+, # The NixOS configuration to be installed onto the disk image.
+  config
+
+, # size of the FAT boot disk, in megabytes.
+  bootSize ? 1024
+
+, # The size of the root disk, in megabytes.
+  rootSize ? 2048
+
+, # The name of the ZFS pool
+  rootPoolName ? "tank"
+
+, # zpool properties
+  rootPoolProperties ? {
+    autoexpand = "on";
+  }
+, # pool-wide filesystem properties
+  rootPoolFilesystemProperties ? {
+    acltype = "posixacl";
+    atime = "off";
+    compression = "on";
+    mountpoint = "legacy";
+    xattr = "sa";
+  }
+
+, # datasets, with per-attribute options:
+  # mount: (optional) mount point in the VM
+  # properties: (optional) ZFS properties on the dataset, like filesystemProperties
+  # Notes:
+  # 1. datasets will be created from shorter to longer names as a simple topo-sort
+  # 2. you should define a root's dataset's mount for `/`
+  datasets ? { }
+
+, # The files and directories to be placed in the target file system.
+  # This is a list of attribute sets {source, target} where `source'
+  # is the file system object (regular file or directory) to be
+  # grafted in the file system at path `target'.
+  contents ? []
+
+, # The initial NixOS configuration file to be copied to
+  # /etc/nixos/configuration.nix. This configuration will be embedded
+  # inside a configuration which includes the described ZFS fileSystems.
+  configFile ? null
+
+, # Shell code executed after the VM has finished.
+  postVM ? ""
+
+, name ? "nixos-disk-image"
+
+, # Disk image format, one of qcow2, qcow2-compressed, vdi, vpc, raw.
+  format ? "raw"
+
+, # Include a copy of Nixpkgs in the disk image
+  includeChannel ? true
+}:
+let
+  formatOpt = if format == "qcow2-compressed" then "qcow2" else format;
+
+  compress = lib.optionalString (format == "qcow2-compressed") "-c";
+
+  filenameSuffix = "." + {
+    qcow2 = "qcow2";
+    vdi = "vdi";
+    vpc = "vhd";
+    raw = "img";
+  }.${formatOpt} or formatOpt;
+  bootFilename = "nixos.boot${filenameSuffix}";
+  rootFilename = "nixos.root${filenameSuffix}";
+
+  # FIXME: merge with channel.nix / make-channel.nix.
+  channelSources =
+    let
+      nixpkgs = lib.cleanSource pkgs.path;
+    in
+      pkgs.runCommand "nixos-${config.system.nixos.version}" {} ''
+        mkdir -p $out
+        cp -prd ${nixpkgs.outPath} $out/nixos
+        chmod -R u+w $out/nixos
+        if [ ! -e $out/nixos/nixpkgs ]; then
+          ln -s . $out/nixos/nixpkgs
+        fi
+        rm -rf $out/nixos/.git
+        echo -n ${config.system.nixos.versionSuffix} > $out/nixos/.version-suffix
+      '';
+
+  closureInfo = pkgs.closureInfo {
+    rootPaths = [ config.system.build.toplevel ]
+    ++ (lib.optional includeChannel channelSources);
+  };
+
+  modulesTree = pkgs.aggregateModules
+    (with config.boot.kernelPackages; [ kernel zfs ]);
+
+  tools = lib.makeBinPath (
+    with pkgs; [
+      config.system.build.nixos-enter
+      config.system.build.nixos-install
+      dosfstools
+      e2fsprogs
+      gptfdisk
+      nix
+      parted
+      utillinux
+      zfs
+    ]
+  );
+
+  hasDefinedMount  = disk: ((disk.mount or null) != null);
+
+  stringifyProperties = prefix: properties: lib.concatStringsSep " \\\n" (
+    lib.mapAttrsToList
+      (
+        property: value: "${prefix} ${lib.escapeShellArg property}=${lib.escapeShellArg value}"
+      )
+      properties
+  );
+
+  featuresToProperties = features:
+    lib.listToAttrs
+      (builtins.map (feature: {
+        name = "feature@${feature}";
+        value = "enabled";
+      }) features);
+
+  createDatasets =
+    let
+      datasetlist = lib.mapAttrsToList lib.nameValuePair datasets;
+      sorted = lib.sort (left: right: (lib.stringLength left.name) < (lib.stringLength right.name)) datasetlist;
+      cmd = { name, value }:
+        let
+          properties = stringifyProperties "-o" (value.properties or {});
+        in
+          "zfs create -p ${properties} ${name}";
+    in
+      lib.concatMapStringsSep "\n" cmd sorted;
+
+  mountDatasets =
+    let
+      datasetlist = lib.mapAttrsToList lib.nameValuePair datasets;
+      mounts = lib.filter ({ value, ... }: hasDefinedMount value) datasetlist;
+      sorted = lib.sort (left: right: (lib.stringLength left.value.mount) < (lib.stringLength right.value.mount)) mounts;
+      cmd = { name, value }:
+        ''
+          mkdir -p /mnt${lib.escapeShellArg value.mount}
+          mount -t zfs ${name} /mnt${lib.escapeShellArg value.mount}
+        '';
+    in
+      lib.concatMapStringsSep "\n" cmd sorted;
+
+  unmountDatasets =
+    let
+      datasetlist = lib.mapAttrsToList lib.nameValuePair datasets;
+      mounts = lib.filter ({ value, ... }: hasDefinedMount value) datasetlist;
+      sorted = lib.sort (left: right: (lib.stringLength left.value.mount) > (lib.stringLength right.value.mount)) mounts;
+      cmd = { name, value }:
+        ''
+          umount /mnt${lib.escapeShellArg value.mount}
+        '';
+    in
+      lib.concatMapStringsSep "\n" cmd sorted;
+
+
+  fileSystemsCfgFile =
+    let
+      mountable = lib.filterAttrs (_: value: hasDefinedMount value) datasets;
+    in
+      pkgs.runCommand "filesystem-config.nix" {
+        buildInputs = with pkgs; [ jq nixpkgs-fmt ];
+        filesystems = builtins.toJSON {
+          fileSystems = lib.mapAttrs'
+            (
+              dataset: attrs:
+                {
+                  name = attrs.mount;
+                  value = {
+                    fsType = "zfs";
+                    device = "${dataset}";
+                  };
+                }
+            )
+            mountable;
+        };
+        passAsFile = [ "filesystems" ];
+      } ''
+      (
+        echo "builtins.fromJSON '''"
+        jq . < "$filesystemsPath"
+        echo "'''"
+      ) > $out
+
+      nixpkgs-fmt $out
+    '';
+
+  mergedConfig =
+    if configFile == null
+    then fileSystemsCfgFile
+    else
+      pkgs.runCommand "configuration.nix" {
+        buildInputs = with pkgs; [ nixpkgs-fmt ];
+      }
+        ''
+          (
+            echo '{ imports = ['
+            printf "(%s)\n" "$(cat ${fileSystemsCfgFile})";
+            printf "(%s)\n" "$(cat ${configFile})";
+            echo ']; }'
+          ) > $out
+
+          nixpkgs-fmt $out
+        '';
+
+  image = (
+    pkgs.vmTools.override {
+      rootModules =
+        [ "zfs" "9p" "9pnet_virtio" "virtio_pci" "virtio_blk" ] ++
+          (pkgs.lib.optional pkgs.stdenv.hostPlatform.isx86 "rtc_cmos");
+      kernel = modulesTree;
+    }
+  ).runInLinuxVM (
+    pkgs.runCommand name
+      {
+        QEMU_OPTS = "-drive file=$bootDiskImage,if=virtio,cache=unsafe,werror=report"
+         + " -drive file=$rootDiskImage,if=virtio,cache=unsafe,werror=report";
+        preVM = ''
+          PATH=$PATH:${pkgs.qemu_kvm}/bin
+          mkdir $out
+          bootDiskImage=boot.raw
+          qemu-img create -f raw $bootDiskImage ${toString bootSize}M
+
+          rootDiskImage=root.raw
+          qemu-img create -f raw $rootDiskImage ${toString rootSize}M
+        '';
+
+        postVM = ''
+          ${if formatOpt == "raw" then ''
+          mv $bootDiskImage $out/${bootFilename}
+          mv $rootDiskImage $out/${rootFilename}
+        '' else ''
+          ${pkgs.qemu}/bin/qemu-img convert -f raw -O ${formatOpt} ${compress} $bootDiskImage $out/${bootFilename}
+          ${pkgs.qemu}/bin/qemu-img convert -f raw -O ${formatOpt} ${compress} $rootDiskImage $out/${rootFilename}
+        ''}
+          bootDiskImage=$out/${bootFilename}
+          rootDiskImage=$out/${rootFilename}
+          set -x
+          ${postVM}
+        '';
+      } ''
+      export PATH=${tools}:$PATH
+      set -x
+
+      cp -sv /dev/vda /dev/sda
+      cp -sv /dev/vda /dev/xvda
+
+      parted --script /dev/vda -- \
+        mklabel gpt \
+        mkpart no-fs 1MiB 2MiB \
+        set 1 bios_grub on \
+        align-check optimal 1 \
+        mkpart ESP fat32 2MiB -1MiB \
+        align-check optimal 2 \
+        print
+
+      sfdisk --dump /dev/vda
+
+
+      zpool create \
+        ${stringifyProperties "  -o" rootPoolProperties} \
+        ${stringifyProperties "  -O" rootPoolFilesystemProperties} \
+        ${rootPoolName} /dev/vdb
+      parted --script /dev/vdb -- print
+
+      ${createDatasets}
+      ${mountDatasets}
+
+      mkdir -p /mnt/boot
+      mkfs.vfat -n ESP /dev/vda2
+      mount /dev/vda2 /mnt/boot
+
+      mount
+
+      # Install a configuration.nix
+      mkdir -p /mnt/etc/nixos
+      # `cat` so it is mutable on the fs
+      cat ${mergedConfig} > /mnt/etc/nixos/configuration.nix
+
+      export NIX_STATE_DIR=$TMPDIR/state
+      nix-store --load-db < ${closureInfo}/registration
+
+      nixos-install \
+        --root /mnt \
+        --no-root-passwd \
+        --system ${config.system.build.toplevel} \
+        --substituters "" \
+        ${lib.optionalString includeChannel ''--channel ${channelSources}''}
+
+      df -h
+
+      umount /mnt/boot
+      ${unmountDatasets}
+
+      zpool export ${rootPoolName}
+    ''
+  );
+in
+image
diff --git a/nixos/lib/qemu-common.nix b/nixos/lib/qemu-common.nix
new file mode 100644
index 00000000000..20bbe9ff5d9
--- /dev/null
+++ b/nixos/lib/qemu-common.nix
@@ -0,0 +1,32 @@
+# QEMU-related utilities shared between various Nix expressions.
+{ lib, pkgs }:
+
+let
+  zeroPad = n:
+    lib.optionalString (n < 16) "0" +
+      (if n > 255
+       then throw "Can't have more than 255 nets or nodes!"
+       else lib.toHexString n);
+in
+
+rec {
+  qemuNicMac = net: machine: "52:54:00:12:${zeroPad net}:${zeroPad machine}";
+
+  qemuNICFlags = nic: net: machine:
+    [ "-device virtio-net-pci,netdev=vlan${toString nic},mac=${qemuNicMac net machine}"
+      ''-netdev vde,id=vlan${toString nic},sock="$QEMU_VDE_SOCKET_${toString net}"''
+    ];
+
+  qemuSerialDevice = if pkgs.stdenv.hostPlatform.isx86 || pkgs.stdenv.hostPlatform.isRiscV then "ttyS0"
+        else if (with pkgs.stdenv.hostPlatform; isAarch32 || isAarch64 || isPower) then "ttyAMA0"
+        else throw "Unknown QEMU serial device for system '${pkgs.stdenv.hostPlatform.system}'";
+
+  qemuBinary = qemuPkg: {
+    x86_64-linux = "${qemuPkg}/bin/qemu-kvm -cpu max";
+    armv7l-linux = "${qemuPkg}/bin/qemu-system-arm -enable-kvm -machine virt -cpu host";
+    aarch64-linux = "${qemuPkg}/bin/qemu-system-aarch64 -enable-kvm -machine virt,gic-version=host -cpu host";
+    powerpc64le-linux = "${qemuPkg}/bin/qemu-system-ppc64 -machine powernv";
+    powerpc64-linux = "${qemuPkg}/bin/qemu-system-ppc64 -machine powernv";
+    x86_64-darwin = "${qemuPkg}/bin/qemu-kvm -cpu max";
+  }.${pkgs.stdenv.hostPlatform.system} or "${qemuPkg}/bin/qemu-kvm";
+}
diff --git a/nixos/lib/systemd-lib.nix b/nixos/lib/systemd-lib.nix
new file mode 100644
index 00000000000..a472d97f5cc
--- /dev/null
+++ b/nixos/lib/systemd-lib.nix
@@ -0,0 +1,440 @@
+{ config, lib, pkgs }:
+
+with lib;
+
+let
+  cfg = config.systemd;
+  lndir = "${pkgs.buildPackages.xorg.lndir}/bin/lndir";
+  systemd = cfg.package;
+in rec {
+
+  shellEscape = s: (replaceChars [ "\\" ] [ "\\\\" ] s);
+
+  mkPathSafeName = lib.replaceChars ["@" ":" "\\" "[" "]"] ["-" "-" "-" "" ""];
+
+  # a type for options that take a unit name
+  unitNameType = types.strMatching "[a-zA-Z0-9@%:_.\\-]+[.](service|socket|device|mount|automount|swap|target|path|timer|scope|slice)";
+
+  makeUnit = name: unit:
+    if unit.enable then
+      pkgs.runCommand "unit-${mkPathSafeName name}"
+        { preferLocalBuild = true;
+          allowSubstitutes = false;
+          inherit (unit) text;
+        }
+        ''
+          mkdir -p $out
+          echo -n "$text" > $out/${shellEscape name}
+        ''
+    else
+      pkgs.runCommand "unit-${mkPathSafeName name}-disabled"
+        { preferLocalBuild = true;
+          allowSubstitutes = false;
+        }
+        ''
+          mkdir -p $out
+          ln -s /dev/null $out/${shellEscape name}
+        '';
+
+  boolValues = [true false "yes" "no"];
+
+  digits = map toString (range 0 9);
+
+  isByteFormat = s:
+    let
+      l = reverseList (stringToCharacters s);
+      suffix = head l;
+      nums = tail l;
+    in elem suffix (["K" "M" "G" "T"] ++ digits)
+      && all (num: elem num digits) nums;
+
+  assertByteFormat = name: group: attr:
+    optional (attr ? ${name} && ! isByteFormat attr.${name})
+      "Systemd ${group} field `${name}' must be in byte format [0-9]+[KMGT].";
+
+  hexChars = stringToCharacters "0123456789abcdefABCDEF";
+
+  isMacAddress = s: stringLength s == 17
+    && flip all (splitString ":" s) (bytes:
+      all (byte: elem byte hexChars) (stringToCharacters bytes)
+    );
+
+  assertMacAddress = name: group: attr:
+    optional (attr ? ${name} && ! isMacAddress attr.${name})
+      "Systemd ${group} field `${name}' must be a valid mac address.";
+
+  isPort = i: i >= 0 && i <= 65535;
+
+  assertPort = name: group: attr:
+    optional (attr ? ${name} && ! isPort attr.${name})
+      "Error on the systemd ${group} field `${name}': ${attr.name} is not a valid port number.";
+
+  assertValueOneOf = name: values: group: attr:
+    optional (attr ? ${name} && !elem attr.${name} values)
+      "Systemd ${group} field `${name}' cannot have value `${toString attr.${name}}'.";
+
+  assertHasField = name: group: attr:
+    optional (!(attr ? ${name}))
+      "Systemd ${group} field `${name}' must exist.";
+
+  assertRange = name: min: max: group: attr:
+    optional (attr ? ${name} && !(min <= attr.${name} && max >= attr.${name}))
+      "Systemd ${group} field `${name}' is outside the range [${toString min},${toString max}]";
+
+  assertMinimum = name: min: group: attr:
+    optional (attr ? ${name} && attr.${name} < min)
+      "Systemd ${group} field `${name}' must be greater than or equal to ${toString min}";
+
+  assertOnlyFields = fields: group: attr:
+    let badFields = filter (name: ! elem name fields) (attrNames attr); in
+    optional (badFields != [ ])
+      "Systemd ${group} has extra fields [${concatStringsSep " " badFields}].";
+
+  assertInt = name: group: attr:
+    optional (attr ? ${name} && !isInt attr.${name})
+      "Systemd ${group} field `${name}' is not an integer";
+
+  checkUnitConfig = group: checks: attrs: let
+    # We're applied at the top-level type (attrsOf unitOption), so the actual
+    # unit options might contain attributes from mkOverride and mkIf that we need to
+    # convert into single values before checking them.
+    defs = mapAttrs (const (v:
+      if v._type or "" == "override" then v.content
+      else if v._type or "" == "if" then v.content
+      else v
+    )) attrs;
+    errors = concatMap (c: c group defs) checks;
+  in if errors == [] then true
+     else builtins.trace (concatStringsSep "\n" errors) false;
+
+  toOption = x:
+    if x == true then "true"
+    else if x == false then "false"
+    else toString x;
+
+  attrsToSection = as:
+    concatStrings (concatLists (mapAttrsToList (name: value:
+      map (x: ''
+          ${name}=${toOption x}
+        '')
+        (if isList value then value else [value]))
+        as));
+
+  generateUnits = generateUnits' true;
+
+  generateUnits' = allowCollisions: type: units: upstreamUnits: upstreamWants:
+    pkgs.runCommand "${type}-units"
+      { preferLocalBuild = true;
+        allowSubstitutes = false;
+      } ''
+      mkdir -p $out
+
+      # Copy the upstream systemd units we're interested in.
+      for i in ${toString upstreamUnits}; do
+        fn=${cfg.package}/example/systemd/${type}/$i
+        if ! [ -e $fn ]; then echo "missing $fn"; false; fi
+        if [ -L $fn ]; then
+          target="$(readlink "$fn")"
+          if [ ''${target:0:3} = ../ ]; then
+            ln -s "$(readlink -f "$fn")" $out/
+          else
+            cp -pd $fn $out/
+          fi
+        else
+          ln -s $fn $out/
+        fi
+      done
+
+      # Copy .wants links, but only those that point to units that
+      # we're interested in.
+      for i in ${toString upstreamWants}; do
+        fn=${cfg.package}/example/systemd/${type}/$i
+        if ! [ -e $fn ]; then echo "missing $fn"; false; fi
+        x=$out/$(basename $fn)
+        mkdir $x
+        for i in $fn/*; do
+          y=$x/$(basename $i)
+          cp -pd $i $y
+          if ! [ -e $y ]; then rm $y; fi
+        done
+      done
+
+      # Symlink all units provided listed in systemd.packages.
+      packages="${toString cfg.packages}"
+
+      # Filter duplicate directories
+      declare -A unique_packages
+      for k in $packages ; do unique_packages[$k]=1 ; done
+
+      for i in ''${!unique_packages[@]}; do
+        for fn in $i/etc/systemd/${type}/* $i/lib/systemd/${type}/*; do
+          if ! [[ "$fn" =~ .wants$ ]]; then
+            if [[ -d "$fn" ]]; then
+              targetDir="$out/$(basename "$fn")"
+              mkdir -p "$targetDir"
+              ${lndir} "$fn" "$targetDir"
+            else
+              ln -s $fn $out/
+            fi
+          fi
+        done
+      done
+
+      # Symlink all units defined by systemd.units. If these are also
+      # provided by systemd or systemd.packages, then add them as
+      # <unit-name>.d/overrides.conf, which makes them extend the
+      # upstream unit.
+      for i in ${toString (mapAttrsToList (n: v: v.unit) units)}; do
+        fn=$(basename $i/*)
+        if [ -e $out/$fn ]; then
+          if [ "$(readlink -f $i/$fn)" = /dev/null ]; then
+            ln -sfn /dev/null $out/$fn
+          else
+            ${if allowCollisions then ''
+              mkdir -p $out/$fn.d
+              ln -s $i/$fn $out/$fn.d/overrides.conf
+            '' else ''
+              echo "Found multiple derivations configuring $fn!"
+              exit 1
+            ''}
+          fi
+       else
+          ln -fs $i/$fn $out/
+        fi
+      done
+
+      # Create service aliases from aliases option.
+      ${concatStrings (mapAttrsToList (name: unit:
+          concatMapStrings (name2: ''
+            ln -sfn '${name}' $out/'${name2}'
+          '') unit.aliases) units)}
+
+      # Create .wants and .requires symlinks from the wantedBy and
+      # requiredBy options.
+      ${concatStrings (mapAttrsToList (name: unit:
+          concatMapStrings (name2: ''
+            mkdir -p $out/'${name2}.wants'
+            ln -sfn '../${name}' $out/'${name2}.wants'/
+          '') unit.wantedBy) units)}
+
+      ${concatStrings (mapAttrsToList (name: unit:
+          concatMapStrings (name2: ''
+            mkdir -p $out/'${name2}.requires'
+            ln -sfn '../${name}' $out/'${name2}.requires'/
+          '') unit.requiredBy) units)}
+
+      ${optionalString (type == "system") ''
+        # Stupid misc. symlinks.
+        ln -s ${cfg.defaultUnit} $out/default.target
+        ln -s ${cfg.ctrlAltDelUnit} $out/ctrl-alt-del.target
+        ln -s rescue.target $out/kbrequest.target
+
+        mkdir -p $out/getty.target.wants/
+        ln -s ../autovt@tty1.service $out/getty.target.wants/
+
+        ln -s ../remote-fs.target $out/multi-user.target.wants/
+      ''}
+    ''; # */
+
+  makeJobScript = name: text:
+    let
+      scriptName = replaceChars [ "\\" "@" ] [ "-" "_" ] (shellEscape name);
+      out = (pkgs.writeShellScriptBin scriptName ''
+        set -e
+        ${text}
+      '').overrideAttrs (_: {
+        # The derivation name is different from the script file name
+        # to keep the script file name short to avoid cluttering logs.
+        name = "unit-script-${scriptName}";
+      });
+    in "${out}/bin/${scriptName}";
+
+  unitConfig = { config, options, ... }: {
+    config = {
+      unitConfig =
+        optionalAttrs (config.requires != [])
+          { Requires = toString config.requires; }
+        // optionalAttrs (config.wants != [])
+          { Wants = toString config.wants; }
+        // optionalAttrs (config.after != [])
+          { After = toString config.after; }
+        // optionalAttrs (config.before != [])
+          { Before = toString config.before; }
+        // optionalAttrs (config.bindsTo != [])
+          { BindsTo = toString config.bindsTo; }
+        // optionalAttrs (config.partOf != [])
+          { PartOf = toString config.partOf; }
+        // optionalAttrs (config.conflicts != [])
+          { Conflicts = toString config.conflicts; }
+        // optionalAttrs (config.requisite != [])
+          { Requisite = toString config.requisite; }
+        // optionalAttrs (config.restartTriggers != [])
+          { X-Restart-Triggers = toString config.restartTriggers; }
+        // optionalAttrs (config.reloadTriggers != [])
+          { X-Reload-Triggers = toString config.reloadTriggers; }
+        // optionalAttrs (config.description != "") {
+          Description = config.description; }
+        // optionalAttrs (config.documentation != []) {
+          Documentation = toString config.documentation; }
+        // optionalAttrs (config.onFailure != []) {
+          OnFailure = toString config.onFailure; }
+        // optionalAttrs (options.startLimitIntervalSec.isDefined) {
+          StartLimitIntervalSec = toString config.startLimitIntervalSec;
+        } // optionalAttrs (options.startLimitBurst.isDefined) {
+          StartLimitBurst = toString config.startLimitBurst;
+        };
+    };
+  };
+
+  serviceConfig = { name, config, ... }: {
+    config = mkMerge
+      [ { # Default path for systemd services.  Should be quite minimal.
+          path = mkAfter
+            [ pkgs.coreutils
+              pkgs.findutils
+              pkgs.gnugrep
+              pkgs.gnused
+              systemd
+            ];
+          environment.PATH = "${makeBinPath config.path}:${makeSearchPathOutput "bin" "sbin" config.path}";
+        }
+        (mkIf (config.preStart != "")
+          { serviceConfig.ExecStartPre =
+              [ (makeJobScript "${name}-pre-start" config.preStart) ];
+          })
+        (mkIf (config.script != "")
+          { serviceConfig.ExecStart =
+              makeJobScript "${name}-start" config.script + " " + config.scriptArgs;
+          })
+        (mkIf (config.postStart != "")
+          { serviceConfig.ExecStartPost =
+              [ (makeJobScript "${name}-post-start" config.postStart) ];
+          })
+        (mkIf (config.reload != "")
+          { serviceConfig.ExecReload =
+              makeJobScript "${name}-reload" config.reload;
+          })
+        (mkIf (config.preStop != "")
+          { serviceConfig.ExecStop =
+              makeJobScript "${name}-pre-stop" config.preStop;
+          })
+        (mkIf (config.postStop != "")
+          { serviceConfig.ExecStopPost =
+              makeJobScript "${name}-post-stop" config.postStop;
+          })
+      ];
+  };
+
+  mountConfig = { config, ... }: {
+    config = {
+      mountConfig =
+        { What = config.what;
+          Where = config.where;
+        } // optionalAttrs (config.type != "") {
+          Type = config.type;
+        } // optionalAttrs (config.options != "") {
+          Options = config.options;
+        };
+    };
+  };
+
+  automountConfig = { config, ... }: {
+    config = {
+      automountConfig =
+        { Where = config.where;
+        };
+    };
+  };
+
+  commonUnitText = def: ''
+      [Unit]
+      ${attrsToSection def.unitConfig}
+    '';
+
+  targetToUnit = name: def:
+    { inherit (def) aliases wantedBy requiredBy enable;
+      text =
+        ''
+          [Unit]
+          ${attrsToSection def.unitConfig}
+        '';
+    };
+
+  serviceToUnit = name: def:
+    { inherit (def) aliases wantedBy requiredBy enable;
+      text = commonUnitText def +
+        ''
+          [Service]
+          ${let env = cfg.globalEnvironment // def.environment;
+            in concatMapStrings (n:
+              let s = optionalString (env.${n} != null)
+                "Environment=${builtins.toJSON "${n}=${env.${n}}"}\n";
+              # systemd max line length is now 1MiB
+              # https://github.com/systemd/systemd/commit/e6dde451a51dc5aaa7f4d98d39b8fe735f73d2af
+              in if stringLength s >= 1048576 then throw "The value of the environment variable ‘${n}’ in systemd service ‘${name}.service’ is too long." else s) (attrNames env)}
+          ${if def.reloadIfChanged then ''
+            X-ReloadIfChanged=true
+          '' else if !def.restartIfChanged then ''
+            X-RestartIfChanged=false
+          '' else ""}
+          ${optionalString (!def.stopIfChanged) "X-StopIfChanged=false"}
+          ${attrsToSection def.serviceConfig}
+        '';
+    };
+
+  socketToUnit = name: def:
+    { inherit (def) aliases wantedBy requiredBy enable;
+      text = commonUnitText def +
+        ''
+          [Socket]
+          ${attrsToSection def.socketConfig}
+          ${concatStringsSep "\n" (map (s: "ListenStream=${s}") def.listenStreams)}
+          ${concatStringsSep "\n" (map (s: "ListenDatagram=${s}") def.listenDatagrams)}
+        '';
+    };
+
+  timerToUnit = name: def:
+    { inherit (def) aliases wantedBy requiredBy enable;
+      text = commonUnitText def +
+        ''
+          [Timer]
+          ${attrsToSection def.timerConfig}
+        '';
+    };
+
+  pathToUnit = name: def:
+    { inherit (def) aliases wantedBy requiredBy enable;
+      text = commonUnitText def +
+        ''
+          [Path]
+          ${attrsToSection def.pathConfig}
+        '';
+    };
+
+  mountToUnit = name: def:
+    { inherit (def) aliases wantedBy requiredBy enable;
+      text = commonUnitText def +
+        ''
+          [Mount]
+          ${attrsToSection def.mountConfig}
+        '';
+    };
+
+  automountToUnit = name: def:
+    { inherit (def) aliases wantedBy requiredBy enable;
+      text = commonUnitText def +
+        ''
+          [Automount]
+          ${attrsToSection def.automountConfig}
+        '';
+    };
+
+  sliceToUnit = name: def:
+    { inherit (def) aliases wantedBy requiredBy enable;
+      text = commonUnitText def +
+        ''
+          [Slice]
+          ${attrsToSection def.sliceConfig}
+        '';
+    };
+}
diff --git a/nixos/lib/systemd-unit-options.nix b/nixos/lib/systemd-unit-options.nix
new file mode 100644
index 00000000000..8029ba0e3f6
--- /dev/null
+++ b/nixos/lib/systemd-unit-options.nix
@@ -0,0 +1,552 @@
+{ lib, systemdUtils }:
+
+with systemdUtils.lib;
+with lib;
+
+let
+  checkService = checkUnitConfig "Service" [
+    (assertValueOneOf "Type" [
+      "exec" "simple" "forking" "oneshot" "dbus" "notify" "idle"
+    ])
+    (assertValueOneOf "Restart" [
+      "no" "on-success" "on-failure" "on-abnormal" "on-abort" "always"
+    ])
+  ];
+
+in rec {
+
+  unitOption = mkOptionType {
+    name = "systemd option";
+    merge = loc: defs:
+      let
+        defs' = filterOverrides defs;
+        defs'' = getValues defs';
+      in
+        if isList (head defs'')
+        then concatLists defs''
+        else mergeEqualOption loc defs';
+  };
+
+  sharedOptions = {
+
+    enable = mkOption {
+      default = true;
+      type = types.bool;
+      description = ''
+        If set to false, this unit will be a symlink to
+        /dev/null. This is primarily useful to prevent specific
+        template instances
+        (e.g. <literal>serial-getty@ttyS0</literal>) from being
+        started. Note that <literal>enable=true</literal> does not
+        make a unit start by default at boot; if you want that, see
+        <literal>wantedBy</literal>.
+      '';
+    };
+
+    requiredBy = mkOption {
+      default = [];
+      type = types.listOf unitNameType;
+      description = ''
+        Units that require (i.e. depend on and need to go down with)
+        this unit. The discussion under <literal>wantedBy</literal>
+        applies here as well: inverse <literal>.requires</literal>
+        symlinks are established.
+      '';
+    };
+
+    wantedBy = mkOption {
+      default = [];
+      type = types.listOf unitNameType;
+      description = ''
+        Units that want (i.e. depend on) this unit. The standard way
+        to make a unit start by default at boot is to set this option
+        to <literal>[ "multi-user.target" ]</literal>. That's despite
+        the fact that the systemd.unit(5) manpage says this option
+        goes in the <literal>[Install]</literal> section that controls
+        the behaviour of <literal>systemctl enable</literal>. Since
+        such a process is stateful and thus contrary to the design of
+        NixOS, setting this option instead causes the equivalent
+        inverse <literal>.wants</literal> symlink to be present,
+        establishing the same desired relationship in a stateless way.
+      '';
+    };
+
+    aliases = mkOption {
+      default = [];
+      type = types.listOf unitNameType;
+      description = "Aliases of that unit.";
+    };
+
+  };
+
+  concreteUnitOptions = sharedOptions // {
+
+    text = mkOption {
+      type = types.nullOr types.str;
+      default = null;
+      description = "Text of this systemd unit.";
+    };
+
+    unit = mkOption {
+      internal = true;
+      description = "The generated unit.";
+    };
+
+  };
+
+  commonUnitOptions = sharedOptions // {
+
+    description = mkOption {
+      default = "";
+      type = types.singleLineStr;
+      description = "Description of this unit used in systemd messages and progress indicators.";
+    };
+
+    documentation = mkOption {
+      default = [];
+      type = types.listOf types.str;
+      description = "A list of URIs referencing documentation for this unit or its configuration.";
+    };
+
+    requires = mkOption {
+      default = [];
+      type = types.listOf unitNameType;
+      description = ''
+        Start the specified units when this unit is started, and stop
+        this unit when the specified units are stopped or fail.
+      '';
+    };
+
+    wants = mkOption {
+      default = [];
+      type = types.listOf unitNameType;
+      description = ''
+        Start the specified units when this unit is started.
+      '';
+    };
+
+    after = mkOption {
+      default = [];
+      type = types.listOf unitNameType;
+      description = ''
+        If the specified units are started at the same time as
+        this unit, delay this unit until they have started.
+      '';
+    };
+
+    before = mkOption {
+      default = [];
+      type = types.listOf unitNameType;
+      description = ''
+        If the specified units are started at the same time as
+        this unit, delay them until this unit has started.
+      '';
+    };
+
+    bindsTo = mkOption {
+      default = [];
+      type = types.listOf unitNameType;
+      description = ''
+        Like ‘requires’, but in addition, if the specified units
+        unexpectedly disappear, this unit will be stopped as well.
+      '';
+    };
+
+    partOf = mkOption {
+      default = [];
+      type = types.listOf unitNameType;
+      description = ''
+        If the specified units are stopped or restarted, then this
+        unit is stopped or restarted as well.
+      '';
+    };
+
+    conflicts = mkOption {
+      default = [];
+      type = types.listOf unitNameType;
+      description = ''
+        If the specified units are started, then this unit is stopped
+        and vice versa.
+      '';
+    };
+
+    requisite = mkOption {
+      default = [];
+      type = types.listOf unitNameType;
+      description = ''
+        Similar to requires. However if the units listed are not started,
+        they will not be started and the transaction will fail.
+      '';
+    };
+
+    unitConfig = mkOption {
+      default = {};
+      example = { RequiresMountsFor = "/data"; };
+      type = types.attrsOf unitOption;
+      description = ''
+        Each attribute in this set specifies an option in the
+        <literal>[Unit]</literal> section of the unit.  See
+        <citerefentry><refentrytitle>systemd.unit</refentrytitle>
+        <manvolnum>5</manvolnum></citerefentry> for details.
+      '';
+    };
+
+    restartTriggers = mkOption {
+      default = [];
+      type = types.listOf types.unspecified;
+      description = ''
+        An arbitrary list of items such as derivations.  If any item
+        in the list changes between reconfigurations, the service will
+        be restarted.
+      '';
+    };
+
+    reloadTriggers = mkOption {
+      default = [];
+      type = types.listOf unitOption;
+      description = ''
+        An arbitrary list of items such as derivations.  If any item
+        in the list changes between reconfigurations, the service will
+        be reloaded.  If anything but a reload trigger changes in the
+        unit file, the unit will be restarted instead.
+      '';
+    };
+
+    onFailure = mkOption {
+      default = [];
+      type = types.listOf unitNameType;
+      description = ''
+        A list of one or more units that are activated when
+        this unit enters the "failed" state.
+      '';
+    };
+
+    startLimitBurst = mkOption {
+       type = types.int;
+       description = ''
+         Configure unit start rate limiting. Units which are started
+         more than startLimitBurst times within an interval time
+         interval are not permitted to start any more.
+       '';
+    };
+
+    startLimitIntervalSec = mkOption {
+       type = types.int;
+       description = ''
+         Configure unit start rate limiting. Units which are started
+         more than startLimitBurst times within an interval time
+         interval are not permitted to start any more.
+       '';
+    };
+
+  };
+
+
+  serviceOptions = commonUnitOptions // {
+
+    environment = mkOption {
+      default = {};
+      type = with types; attrsOf (nullOr (oneOf [ str path package ]));
+      example = { PATH = "/foo/bar/bin"; LANG = "nl_NL.UTF-8"; };
+      description = "Environment variables passed to the service's processes.";
+    };
+
+    path = mkOption {
+      default = [];
+      type = with types; listOf (oneOf [ package str ]);
+      description = ''
+        Packages added to the service's <envar>PATH</envar>
+        environment variable.  Both the <filename>bin</filename>
+        and <filename>sbin</filename> subdirectories of each
+        package are added.
+      '';
+    };
+
+    serviceConfig = mkOption {
+      default = {};
+      example =
+        { RestartSec = 5;
+        };
+      type = types.addCheck (types.attrsOf unitOption) checkService;
+      description = ''
+        Each attribute in this set specifies an option in the
+        <literal>[Service]</literal> section of the unit.  See
+        <citerefentry><refentrytitle>systemd.service</refentrytitle>
+        <manvolnum>5</manvolnum></citerefentry> for details.
+      '';
+    };
+
+    script = mkOption {
+      type = types.lines;
+      default = "";
+      description = "Shell commands executed as the service's main process.";
+    };
+
+    scriptArgs = mkOption {
+      type = types.str;
+      default = "";
+      description = "Arguments passed to the main process script.";
+    };
+
+    preStart = mkOption {
+      type = types.lines;
+      default = "";
+      description = ''
+        Shell commands executed before the service's main process
+        is started.
+      '';
+    };
+
+    postStart = mkOption {
+      type = types.lines;
+      default = "";
+      description = ''
+        Shell commands executed after the service's main process
+        is started.
+      '';
+    };
+
+    reload = mkOption {
+      type = types.lines;
+      default = "";
+      description = ''
+        Shell commands executed when the service's main process
+        is reloaded.
+      '';
+    };
+
+    preStop = mkOption {
+      type = types.lines;
+      default = "";
+      description = ''
+        Shell commands executed to stop the service.
+      '';
+    };
+
+    postStop = mkOption {
+      type = types.lines;
+      default = "";
+      description = ''
+        Shell commands executed after the service's main process
+        has exited.
+      '';
+    };
+
+    restartIfChanged = mkOption {
+      type = types.bool;
+      default = true;
+      description = ''
+        Whether the service should be restarted during a NixOS
+        configuration switch if its definition has changed.
+      '';
+    };
+
+    reloadIfChanged = mkOption {
+      type = types.bool;
+      default = false;
+      description = ''
+        Whether the service should be reloaded during a NixOS
+        configuration switch if its definition has changed.  If
+        enabled, the value of <option>restartIfChanged</option> is
+        ignored.
+
+        This option should not be used anymore in favor of
+        <option>reloadTriggers</option> which allows more granular
+        control of when a service is reloaded and when a service
+        is restarted.
+      '';
+    };
+
+    stopIfChanged = mkOption {
+      type = types.bool;
+      default = true;
+      description = ''
+        If set, a changed unit is restarted by calling
+        <command>systemctl stop</command> in the old configuration,
+        then <command>systemctl start</command> in the new one.
+        Otherwise, it is restarted in a single step using
+        <command>systemctl restart</command> in the new configuration.
+        The latter is less correct because it runs the
+        <literal>ExecStop</literal> commands from the new
+        configuration.
+      '';
+    };
+
+    startAt = mkOption {
+      type = with types; either str (listOf str);
+      default = [];
+      example = "Sun 14:00:00";
+      description = ''
+        Automatically start this unit at the given date/time, which
+        must be in the format described in
+        <citerefentry><refentrytitle>systemd.time</refentrytitle>
+        <manvolnum>7</manvolnum></citerefentry>.  This is equivalent
+        to adding a corresponding timer unit with
+        <option>OnCalendar</option> set to the value given here.
+      '';
+      apply = v: if isList v then v else [ v ];
+    };
+
+  };
+
+
+  socketOptions = commonUnitOptions // {
+
+    listenStreams = mkOption {
+      default = [];
+      type = types.listOf types.str;
+      example = [ "0.0.0.0:993" "/run/my-socket" ];
+      description = ''
+        For each item in this list, a <literal>ListenStream</literal>
+        option in the <literal>[Socket]</literal> section will be created.
+      '';
+    };
+
+    listenDatagrams = mkOption {
+      default = [];
+      type = types.listOf types.str;
+      example = [ "0.0.0.0:993" "/run/my-socket" ];
+      description = ''
+        For each item in this list, a <literal>ListenDatagram</literal>
+        option in the <literal>[Socket]</literal> section will be created.
+      '';
+    };
+
+    socketConfig = mkOption {
+      default = {};
+      example = { ListenStream = "/run/my-socket"; };
+      type = types.attrsOf unitOption;
+      description = ''
+        Each attribute in this set specifies an option in the
+        <literal>[Socket]</literal> section of the unit.  See
+        <citerefentry><refentrytitle>systemd.socket</refentrytitle>
+        <manvolnum>5</manvolnum></citerefentry> for details.
+      '';
+    };
+
+  };
+
+
+  timerOptions = commonUnitOptions // {
+
+    timerConfig = mkOption {
+      default = {};
+      example = { OnCalendar = "Sun 14:00:00"; Unit = "foo.service"; };
+      type = types.attrsOf unitOption;
+      description = ''
+        Each attribute in this set specifies an option in the
+        <literal>[Timer]</literal> section of the unit.  See
+        <citerefentry><refentrytitle>systemd.timer</refentrytitle>
+        <manvolnum>5</manvolnum></citerefentry> and
+        <citerefentry><refentrytitle>systemd.time</refentrytitle>
+        <manvolnum>7</manvolnum></citerefentry> for details.
+      '';
+    };
+
+  };
+
+
+  pathOptions = commonUnitOptions // {
+
+    pathConfig = mkOption {
+      default = {};
+      example = { PathChanged = "/some/path"; Unit = "changedpath.service"; };
+      type = types.attrsOf unitOption;
+      description = ''
+        Each attribute in this set specifies an option in the
+        <literal>[Path]</literal> section of the unit.  See
+        <citerefentry><refentrytitle>systemd.path</refentrytitle>
+        <manvolnum>5</manvolnum></citerefentry> for details.
+      '';
+    };
+
+  };
+
+
+  mountOptions = commonUnitOptions // {
+
+    what = mkOption {
+      example = "/dev/sda1";
+      type = types.str;
+      description = "Absolute path of device node, file or other resource. (Mandatory)";
+    };
+
+    where = mkOption {
+      example = "/mnt";
+      type = types.str;
+      description = ''
+        Absolute path of a directory of the mount point.
+        Will be created if it doesn't exist. (Mandatory)
+      '';
+    };
+
+    type = mkOption {
+      default = "";
+      example = "ext4";
+      type = types.str;
+      description = "File system type.";
+    };
+
+    options = mkOption {
+      default = "";
+      example = "noatime";
+      type = types.commas;
+      description = "Options used to mount the file system.";
+    };
+
+    mountConfig = mkOption {
+      default = {};
+      example = { DirectoryMode = "0775"; };
+      type = types.attrsOf unitOption;
+      description = ''
+        Each attribute in this set specifies an option in the
+        <literal>[Mount]</literal> section of the unit.  See
+        <citerefentry><refentrytitle>systemd.mount</refentrytitle>
+        <manvolnum>5</manvolnum></citerefentry> for details.
+      '';
+    };
+  };
+
+  automountOptions = commonUnitOptions // {
+
+    where = mkOption {
+      example = "/mnt";
+      type = types.str;
+      description = ''
+        Absolute path of a directory of the mount point.
+        Will be created if it doesn't exist. (Mandatory)
+      '';
+    };
+
+    automountConfig = mkOption {
+      default = {};
+      example = { DirectoryMode = "0775"; };
+      type = types.attrsOf unitOption;
+      description = ''
+        Each attribute in this set specifies an option in the
+        <literal>[Automount]</literal> section of the unit.  See
+        <citerefentry><refentrytitle>systemd.automount</refentrytitle>
+        <manvolnum>5</manvolnum></citerefentry> for details.
+      '';
+    };
+  };
+
+  targetOptions = commonUnitOptions;
+
+  sliceOptions = commonUnitOptions // {
+
+    sliceConfig = mkOption {
+      default = {};
+      example = { MemoryMax = "2G"; };
+      type = types.attrsOf unitOption;
+      description = ''
+        Each attribute in this set specifies an option in the
+        <literal>[Slice]</literal> section of the unit.  See
+        <citerefentry><refentrytitle>systemd.slice</refentrytitle>
+        <manvolnum>5</manvolnum></citerefentry> for details.
+      '';
+    };
+
+  };
+
+}
diff --git a/nixos/lib/test-driver/default.nix b/nixos/lib/test-driver/default.nix
new file mode 100644
index 00000000000..3aee9134318
--- /dev/null
+++ b/nixos/lib/test-driver/default.nix
@@ -0,0 +1,32 @@
+{ lib
+, python3Packages
+, enableOCR ? false
+, qemu_pkg ? qemu_test
+, coreutils
+, imagemagick_light
+, libtiff
+, netpbm
+, qemu_test
+, socat
+, tesseract4
+, vde2
+}:
+
+python3Packages.buildPythonApplication rec {
+  pname = "nixos-test-driver";
+  version = "1.1";
+  src = ./.;
+
+  propagatedBuildInputs = [ coreutils netpbm python3Packages.colorama python3Packages.ptpython qemu_pkg socat vde2 ]
+    ++ (lib.optionals enableOCR [ imagemagick_light tesseract4 ]);
+
+  doCheck = true;
+  checkInputs = with python3Packages; [ mypy pylint black ];
+  checkPhase = ''
+    mypy --disallow-untyped-defs \
+          --no-implicit-optional \
+          --ignore-missing-imports ${src}/test_driver
+    pylint --errors-only --enable=unused-import ${src}/test_driver
+    black --check --diff ${src}/test_driver
+  '';
+}
diff --git a/nixos/lib/test-driver/setup.py b/nixos/lib/test-driver/setup.py
new file mode 100644
index 00000000000..476c7b2dab2
--- /dev/null
+++ b/nixos/lib/test-driver/setup.py
@@ -0,0 +1,13 @@
+from setuptools import setup, find_packages
+
+setup(
+  name="nixos-test-driver",
+  version='1.1',
+  packages=find_packages(),
+  entry_points={
+    "console_scripts": [
+      "nixos-test-driver=test_driver:main",
+      "generate-driver-symbols=test_driver:generate_driver_symbols"
+    ]
+  },
+)
diff --git a/nixos/lib/test-driver/test_driver/__init__.py b/nixos/lib/test-driver/test_driver/__init__.py
new file mode 100755
index 00000000000..61d91c9ed65
--- /dev/null
+++ b/nixos/lib/test-driver/test_driver/__init__.py
@@ -0,0 +1,128 @@
+from pathlib import Path
+import argparse
+import ptpython.repl
+import os
+import time
+
+from test_driver.logger import rootlog
+from test_driver.driver import Driver
+
+
+class EnvDefault(argparse.Action):
+    """An argpars Action that takes values from the specified
+    environment variable as the flags default value.
+    """
+
+    def __init__(self, envvar, required=False, default=None, nargs=None, **kwargs):  # type: ignore
+        if not default and envvar:
+            if envvar in os.environ:
+                if nargs is not None and (nargs.isdigit() or nargs in ["*", "+"]):
+                    default = os.environ[envvar].split()
+                else:
+                    default = os.environ[envvar]
+                kwargs["help"] = (
+                    kwargs["help"] + f" (default from environment: {default})"
+                )
+        if required and default:
+            required = False
+        super(EnvDefault, self).__init__(
+            default=default, required=required, nargs=nargs, **kwargs
+        )
+
+    def __call__(self, parser, namespace, values, option_string=None):  # type: ignore
+        setattr(namespace, self.dest, values)
+
+
+def writeable_dir(arg: str) -> Path:
+    """Raises an ArgumentTypeError if the given argument isn't a writeable directory
+    Note: We want to fail as early as possible if a directory isn't writeable,
+    since an executed nixos-test could fail (very late) because of the test-driver
+    writing in a directory without proper permissions.
+    """
+    path = Path(arg)
+    if not path.is_dir():
+        raise argparse.ArgumentTypeError("{0} is not a directory".format(path))
+    if not os.access(path, os.W_OK):
+        raise argparse.ArgumentTypeError(
+            "{0} is not a writeable directory".format(path)
+        )
+    return path
+
+
+def main() -> None:
+    arg_parser = argparse.ArgumentParser(prog="nixos-test-driver")
+    arg_parser.add_argument(
+        "-K",
+        "--keep-vm-state",
+        help="re-use a VM state coming from a previous run",
+        action="store_true",
+    )
+    arg_parser.add_argument(
+        "-I",
+        "--interactive",
+        help="drop into a python repl and run the tests interactively",
+        action=argparse.BooleanOptionalAction,
+    )
+    arg_parser.add_argument(
+        "--start-scripts",
+        metavar="START-SCRIPT",
+        action=EnvDefault,
+        envvar="startScripts",
+        nargs="*",
+        help="start scripts for participating virtual machines",
+    )
+    arg_parser.add_argument(
+        "--vlans",
+        metavar="VLAN",
+        action=EnvDefault,
+        envvar="vlans",
+        nargs="*",
+        help="vlans to span by the driver",
+    )
+    arg_parser.add_argument(
+        "-o",
+        "--output_directory",
+        help="""The path to the directory where outputs copied from the VM will be placed.
+                By e.g. Machine.copy_from_vm or Machine.screenshot""",
+        default=Path.cwd(),
+        type=writeable_dir,
+    )
+    arg_parser.add_argument(
+        "testscript",
+        action=EnvDefault,
+        envvar="testScript",
+        help="the test script to run",
+        type=Path,
+    )
+
+    args = arg_parser.parse_args()
+
+    if not args.keep_vm_state:
+        rootlog.info("Machine state will be reset. To keep it, pass --keep-vm-state")
+
+    with Driver(
+        args.start_scripts,
+        args.vlans,
+        args.testscript.read_text(),
+        args.output_directory.resolve(),
+        args.keep_vm_state,
+    ) as driver:
+        if args.interactive:
+            ptpython.repl.embed(driver.test_symbols(), {})
+        else:
+            tic = time.time()
+            driver.run_tests()
+            toc = time.time()
+            rootlog.info(f"test script finished in {(toc-tic):.2f}s")
+
+
+def generate_driver_symbols() -> None:
+    """
+    This generates a file with symbols of the test-driver code that can be used
+    in user's test scripts. That list is then used by pyflakes to lint those
+    scripts.
+    """
+    d = Driver([], [], "", Path())
+    test_symbols = d.test_symbols()
+    with open("driver-symbols", "w") as fp:
+        fp.write(",".join(test_symbols.keys()))
diff --git a/nixos/lib/test-driver/test_driver/driver.py b/nixos/lib/test-driver/test_driver/driver.py
new file mode 100644
index 00000000000..880b1c5fdec
--- /dev/null
+++ b/nixos/lib/test-driver/test_driver/driver.py
@@ -0,0 +1,225 @@
+from contextlib import contextmanager
+from pathlib import Path
+from typing import Any, Dict, Iterator, List, Union, Optional, Callable, ContextManager
+import os
+import tempfile
+
+from test_driver.logger import rootlog
+from test_driver.machine import Machine, NixStartScript, retry
+from test_driver.vlan import VLan
+from test_driver.polling_condition import PollingCondition
+
+
+def get_tmp_dir() -> Path:
+    """Returns a temporary directory that is defined by TMPDIR, TEMP, TMP or CWD
+    Raises an exception in case the retrieved temporary directory is not writeable
+    See https://docs.python.org/3/library/tempfile.html#tempfile.gettempdir
+    """
+    tmp_dir = Path(tempfile.gettempdir())
+    tmp_dir.mkdir(mode=0o700, exist_ok=True)
+    if not tmp_dir.is_dir():
+        raise NotADirectoryError(
+            "The directory defined by TMPDIR, TEMP, TMP or CWD: {0} is not a directory".format(
+                tmp_dir
+            )
+        )
+    if not os.access(tmp_dir, os.W_OK):
+        raise PermissionError(
+            "The directory defined by TMPDIR, TEMP, TMP, or CWD: {0} is not writeable".format(
+                tmp_dir
+            )
+        )
+    return tmp_dir
+
+
+class Driver:
+    """A handle to the driver that sets up the environment
+    and runs the tests"""
+
+    tests: str
+    vlans: List[VLan]
+    machines: List[Machine]
+    polling_conditions: List[PollingCondition]
+
+    def __init__(
+        self,
+        start_scripts: List[str],
+        vlans: List[int],
+        tests: str,
+        out_dir: Path,
+        keep_vm_state: bool = False,
+    ):
+        self.tests = tests
+        self.out_dir = out_dir
+
+        tmp_dir = get_tmp_dir()
+
+        with rootlog.nested("start all VLans"):
+            self.vlans = [VLan(nr, tmp_dir) for nr in vlans]
+
+        def cmd(scripts: List[str]) -> Iterator[NixStartScript]:
+            for s in scripts:
+                yield NixStartScript(s)
+
+        self.polling_conditions = []
+
+        self.machines = [
+            Machine(
+                start_command=cmd,
+                keep_vm_state=keep_vm_state,
+                name=cmd.machine_name,
+                tmp_dir=tmp_dir,
+                callbacks=[self.check_polling_conditions],
+                out_dir=self.out_dir,
+            )
+            for cmd in cmd(start_scripts)
+        ]
+
+    def __enter__(self) -> "Driver":
+        return self
+
+    def __exit__(self, *_: Any) -> None:
+        with rootlog.nested("cleanup"):
+            for machine in self.machines:
+                machine.release()
+
+    def subtest(self, name: str) -> Iterator[None]:
+        """Group logs under a given test name"""
+        with rootlog.nested(name):
+            try:
+                yield
+                return True
+            except Exception as e:
+                rootlog.error(f'Test "{name}" failed with error: "{e}"')
+                raise e
+
+    def test_symbols(self) -> Dict[str, Any]:
+        @contextmanager
+        def subtest(name: str) -> Iterator[None]:
+            return self.subtest(name)
+
+        general_symbols = dict(
+            start_all=self.start_all,
+            test_script=self.test_script,
+            machines=self.machines,
+            vlans=self.vlans,
+            driver=self,
+            log=rootlog,
+            os=os,
+            create_machine=self.create_machine,
+            subtest=subtest,
+            run_tests=self.run_tests,
+            join_all=self.join_all,
+            retry=retry,
+            serial_stdout_off=self.serial_stdout_off,
+            serial_stdout_on=self.serial_stdout_on,
+            polling_condition=self.polling_condition,
+            Machine=Machine,  # for typing
+        )
+        machine_symbols = {m.name: m for m in self.machines}
+        # If there's exactly one machine, make it available under the name
+        # "machine", even if it's not called that.
+        if len(self.machines) == 1:
+            (machine_symbols["machine"],) = self.machines
+        vlan_symbols = {
+            f"vlan{v.nr}": self.vlans[idx] for idx, v in enumerate(self.vlans)
+        }
+        print(
+            "additionally exposed symbols:\n    "
+            + ", ".join(map(lambda m: m.name, self.machines))
+            + ",\n    "
+            + ", ".join(map(lambda v: f"vlan{v.nr}", self.vlans))
+            + ",\n    "
+            + ", ".join(list(general_symbols.keys()))
+        )
+        return {**general_symbols, **machine_symbols, **vlan_symbols}
+
+    def test_script(self) -> None:
+        """Run the test script"""
+        with rootlog.nested("run the VM test script"):
+            symbols = self.test_symbols()  # call eagerly
+            exec(self.tests, symbols, None)
+
+    def run_tests(self) -> None:
+        """Run the test script (for non-interactive test runs)"""
+        self.test_script()
+        # TODO: Collect coverage data
+        for machine in self.machines:
+            if machine.is_up():
+                machine.execute("sync")
+
+    def start_all(self) -> None:
+        """Start all machines"""
+        with rootlog.nested("start all VMs"):
+            for machine in self.machines:
+                machine.start()
+
+    def join_all(self) -> None:
+        """Wait for all machines to shut down"""
+        with rootlog.nested("wait for all VMs to finish"):
+            for machine in self.machines:
+                machine.wait_for_shutdown()
+
+    def create_machine(self, args: Dict[str, Any]) -> Machine:
+        rootlog.warning(
+            "Using legacy create_machine(), please instantiate the"
+            "Machine class directly, instead"
+        )
+
+        tmp_dir = get_tmp_dir()
+
+        if args.get("startCommand"):
+            start_command: str = args.get("startCommand", "")
+            cmd = NixStartScript(start_command)
+            name = args.get("name", cmd.machine_name)
+        else:
+            cmd = Machine.create_startcommand(args)  # type: ignore
+            name = args.get("name", "machine")
+
+        return Machine(
+            tmp_dir=tmp_dir,
+            out_dir=self.out_dir,
+            start_command=cmd,
+            name=name,
+            keep_vm_state=args.get("keep_vm_state", False),
+            allow_reboot=args.get("allow_reboot", False),
+        )
+
+    def serial_stdout_on(self) -> None:
+        rootlog._print_serial_logs = True
+
+    def serial_stdout_off(self) -> None:
+        rootlog._print_serial_logs = False
+
+    def check_polling_conditions(self) -> None:
+        for condition in self.polling_conditions:
+            condition.maybe_raise()
+
+    def polling_condition(
+        self,
+        fun_: Optional[Callable] = None,
+        *,
+        seconds_interval: float = 2.0,
+        description: Optional[str] = None,
+    ) -> Union[Callable[[Callable], ContextManager], ContextManager]:
+        driver = self
+
+        class Poll:
+            def __init__(self, fun: Callable):
+                self.condition = PollingCondition(
+                    fun,
+                    seconds_interval,
+                    description,
+                )
+
+            def __enter__(self) -> None:
+                driver.polling_conditions.append(self.condition)
+
+            def __exit__(self, a, b, c) -> None:  # type: ignore
+                res = driver.polling_conditions.pop()
+                assert res is self.condition
+
+        if fun_ is None:
+            return Poll
+        else:
+            return Poll(fun_)
diff --git a/nixos/lib/test-driver/test_driver/logger.py b/nixos/lib/test-driver/test_driver/logger.py
new file mode 100644
index 00000000000..5b3091a5129
--- /dev/null
+++ b/nixos/lib/test-driver/test_driver/logger.py
@@ -0,0 +1,101 @@
+from colorama import Style
+from contextlib import contextmanager
+from typing import Any, Dict, Iterator
+from queue import Queue, Empty
+from xml.sax.saxutils import XMLGenerator
+import codecs
+import os
+import sys
+import time
+import unicodedata
+
+
+class Logger:
+    def __init__(self) -> None:
+        self.logfile = os.environ.get("LOGFILE", "/dev/null")
+        self.logfile_handle = codecs.open(self.logfile, "wb")
+        self.xml = XMLGenerator(self.logfile_handle, encoding="utf-8")
+        self.queue: "Queue[Dict[str, str]]" = Queue()
+
+        self.xml.startDocument()
+        self.xml.startElement("logfile", attrs={})
+
+        self._print_serial_logs = True
+
+    @staticmethod
+    def _eprint(*args: object, **kwargs: Any) -> None:
+        print(*args, file=sys.stderr, **kwargs)
+
+    def close(self) -> None:
+        self.xml.endElement("logfile")
+        self.xml.endDocument()
+        self.logfile_handle.close()
+
+    def sanitise(self, message: str) -> str:
+        return "".join(ch for ch in message if unicodedata.category(ch)[0] != "C")
+
+    def maybe_prefix(self, message: str, attributes: Dict[str, str]) -> str:
+        if "machine" in attributes:
+            return "{}: {}".format(attributes["machine"], message)
+        return message
+
+    def log_line(self, message: str, attributes: Dict[str, str]) -> None:
+        self.xml.startElement("line", attributes)
+        self.xml.characters(message)
+        self.xml.endElement("line")
+
+    def info(self, *args, **kwargs) -> None:  # type: ignore
+        self.log(*args, **kwargs)
+
+    def warning(self, *args, **kwargs) -> None:  # type: ignore
+        self.log(*args, **kwargs)
+
+    def error(self, *args, **kwargs) -> None:  # type: ignore
+        self.log(*args, **kwargs)
+        sys.exit(1)
+
+    def log(self, message: str, attributes: Dict[str, str] = {}) -> None:
+        self._eprint(self.maybe_prefix(message, attributes))
+        self.drain_log_queue()
+        self.log_line(message, attributes)
+
+    def log_serial(self, message: str, machine: str) -> None:
+        self.enqueue({"msg": message, "machine": machine, "type": "serial"})
+        if self._print_serial_logs:
+            self._eprint(
+                Style.DIM + "{} # {}".format(machine, message) + Style.RESET_ALL
+            )
+
+    def enqueue(self, item: Dict[str, str]) -> None:
+        self.queue.put(item)
+
+    def drain_log_queue(self) -> None:
+        try:
+            while True:
+                item = self.queue.get_nowait()
+                msg = self.sanitise(item["msg"])
+                del item["msg"]
+                self.log_line(msg, item)
+        except Empty:
+            pass
+
+    @contextmanager
+    def nested(self, message: str, attributes: Dict[str, str] = {}) -> Iterator[None]:
+        self._eprint(self.maybe_prefix(message, attributes))
+
+        self.xml.startElement("nest", attrs={})
+        self.xml.startElement("head", attributes)
+        self.xml.characters(message)
+        self.xml.endElement("head")
+
+        tic = time.time()
+        self.drain_log_queue()
+        yield
+        self.drain_log_queue()
+        toc = time.time()
+        self.log("(finished: {}, in {:.2f} seconds)".format(message, toc - tic))
+
+        self.xml.endElement("nest")
+
+
+rootlog = Logger()
diff --git a/nixos/lib/test-driver/test_driver/machine.py b/nixos/lib/test-driver/test_driver/machine.py
new file mode 100644
index 00000000000..569a0f3c61e
--- /dev/null
+++ b/nixos/lib/test-driver/test_driver/machine.py
@@ -0,0 +1,988 @@
+from contextlib import _GeneratorContextManager
+from pathlib import Path
+from queue import Queue
+from typing import Any, Callable, Dict, Iterable, List, Optional, Tuple
+import base64
+import io
+import os
+import queue
+import re
+import shlex
+import shutil
+import socket
+import subprocess
+import sys
+import tempfile
+import threading
+import time
+
+from test_driver.logger import rootlog
+
+CHAR_TO_KEY = {
+    "A": "shift-a",
+    "N": "shift-n",
+    "-": "0x0C",
+    "_": "shift-0x0C",
+    "B": "shift-b",
+    "O": "shift-o",
+    "=": "0x0D",
+    "+": "shift-0x0D",
+    "C": "shift-c",
+    "P": "shift-p",
+    "[": "0x1A",
+    "{": "shift-0x1A",
+    "D": "shift-d",
+    "Q": "shift-q",
+    "]": "0x1B",
+    "}": "shift-0x1B",
+    "E": "shift-e",
+    "R": "shift-r",
+    ";": "0x27",
+    ":": "shift-0x27",
+    "F": "shift-f",
+    "S": "shift-s",
+    "'": "0x28",
+    '"': "shift-0x28",
+    "G": "shift-g",
+    "T": "shift-t",
+    "`": "0x29",
+    "~": "shift-0x29",
+    "H": "shift-h",
+    "U": "shift-u",
+    "\\": "0x2B",
+    "|": "shift-0x2B",
+    "I": "shift-i",
+    "V": "shift-v",
+    ",": "0x33",
+    "<": "shift-0x33",
+    "J": "shift-j",
+    "W": "shift-w",
+    ".": "0x34",
+    ">": "shift-0x34",
+    "K": "shift-k",
+    "X": "shift-x",
+    "/": "0x35",
+    "?": "shift-0x35",
+    "L": "shift-l",
+    "Y": "shift-y",
+    " ": "spc",
+    "M": "shift-m",
+    "Z": "shift-z",
+    "\n": "ret",
+    "!": "shift-0x02",
+    "@": "shift-0x03",
+    "#": "shift-0x04",
+    "$": "shift-0x05",
+    "%": "shift-0x06",
+    "^": "shift-0x07",
+    "&": "shift-0x08",
+    "*": "shift-0x09",
+    "(": "shift-0x0A",
+    ")": "shift-0x0B",
+}
+
+
+def make_command(args: list) -> str:
+    return " ".join(map(shlex.quote, (map(str, args))))
+
+
+def _perform_ocr_on_screenshot(
+    screenshot_path: str, model_ids: Iterable[int]
+) -> List[str]:
+    if shutil.which("tesseract") is None:
+        raise Exception("OCR requested but enableOCR is false")
+
+    magick_args = (
+        "-filter Catrom -density 72 -resample 300 "
+        + "-contrast -normalize -despeckle -type grayscale "
+        + "-sharpen 1 -posterize 3 -negate -gamma 100 "
+        + "-blur 1x65535"
+    )
+
+    tess_args = f"-c debug_file=/dev/null --psm 11"
+
+    cmd = f"convert {magick_args} {screenshot_path} tiff:{screenshot_path}.tiff"
+    ret = subprocess.run(cmd, shell=True, capture_output=True)
+    if ret.returncode != 0:
+        raise Exception(f"TIFF conversion failed with exit code {ret.returncode}")
+
+    model_results = []
+    for model_id in model_ids:
+        cmd = f"tesseract {screenshot_path}.tiff - {tess_args} --oem {model_id}"
+        ret = subprocess.run(cmd, shell=True, capture_output=True)
+        if ret.returncode != 0:
+            raise Exception(f"OCR failed with exit code {ret.returncode}")
+        model_results.append(ret.stdout.decode("utf-8"))
+
+    return model_results
+
+
+def retry(fn: Callable, timeout: int = 900) -> None:
+    """Call the given function repeatedly, with 1 second intervals,
+    until it returns True or a timeout is reached.
+    """
+
+    for _ in range(timeout):
+        if fn(False):
+            return
+        time.sleep(1)
+
+    if not fn(True):
+        raise Exception(f"action timed out after {timeout} seconds")
+
+
+class StartCommand:
+    """The Base Start Command knows how to append the necesary
+    runtime qemu options as determined by a particular test driver
+    run. Any such start command is expected to happily receive and
+    append additional qemu args.
+    """
+
+    _cmd: str
+
+    def cmd(
+        self,
+        monitor_socket_path: Path,
+        shell_socket_path: Path,
+        allow_reboot: bool = False,  # TODO: unused, legacy?
+    ) -> str:
+        display_opts = ""
+        display_available = any(x in os.environ for x in ["DISPLAY", "WAYLAND_DISPLAY"])
+        if not display_available:
+            display_opts += " -nographic"
+
+        # qemu options
+        qemu_opts = ""
+        qemu_opts += (
+            ""
+            if allow_reboot
+            else " -no-reboot"
+            " -device virtio-serial"
+            " -device virtconsole,chardev=shell"
+            " -device virtio-rng-pci"
+            " -serial stdio"
+        )
+        # TODO: qemu script already catpures this env variable, legacy?
+        qemu_opts += " " + os.environ.get("QEMU_OPTS", "")
+
+        return (
+            f"{self._cmd}"
+            f" -monitor unix:{monitor_socket_path}"
+            f" -chardev socket,id=shell,path={shell_socket_path}"
+            f"{qemu_opts}"
+            f"{display_opts}"
+        )
+
+    @staticmethod
+    def build_environment(
+        state_dir: Path,
+        shared_dir: Path,
+    ) -> dict:
+        # We make a copy to not update the current environment
+        env = dict(os.environ)
+        env.update(
+            {
+                "TMPDIR": str(state_dir),
+                "SHARED_DIR": str(shared_dir),
+                "USE_TMPDIR": "1",
+            }
+        )
+        return env
+
+    def run(
+        self,
+        state_dir: Path,
+        shared_dir: Path,
+        monitor_socket_path: Path,
+        shell_socket_path: Path,
+    ) -> subprocess.Popen:
+        return subprocess.Popen(
+            self.cmd(monitor_socket_path, shell_socket_path),
+            stdin=subprocess.DEVNULL,
+            stdout=subprocess.PIPE,
+            stderr=subprocess.STDOUT,
+            shell=True,
+            cwd=state_dir,
+            env=self.build_environment(state_dir, shared_dir),
+        )
+
+
+class NixStartScript(StartCommand):
+    """A start script from nixos/modules/virtualiation/qemu-vm.nix
+    that also satisfies the requirement of the BaseStartCommand.
+    These Nix commands have the particular charactersitic that the
+    machine name can be extracted out of them via a regex match.
+    (Admittedly a _very_ implicit contract, evtl. TODO fix)
+    """
+
+    def __init__(self, script: str):
+        self._cmd = script
+
+    @property
+    def machine_name(self) -> str:
+        match = re.search("run-(.+)-vm$", self._cmd)
+        name = "machine"
+        if match:
+            name = match.group(1)
+        return name
+
+
+class LegacyStartCommand(StartCommand):
+    """Used in some places to create an ad-hoc machine instead of
+    using nix test instrumentation + module system for that purpose.
+    Legacy.
+    """
+
+    def __init__(
+        self,
+        netBackendArgs: Optional[str] = None,
+        netFrontendArgs: Optional[str] = None,
+        hda: Optional[Tuple[Path, str]] = None,
+        cdrom: Optional[str] = None,
+        usb: Optional[str] = None,
+        bios: Optional[str] = None,
+        qemuBinary: Optional[str] = None,
+        qemuFlags: Optional[str] = None,
+    ):
+        if qemuBinary is not None:
+            self._cmd = qemuBinary
+        else:
+            self._cmd = "qemu-kvm"
+
+        self._cmd += " -m 384"
+
+        # networking
+        net_backend = "-netdev user,id=net0"
+        net_frontend = "-device virtio-net-pci,netdev=net0"
+        if netBackendArgs is not None:
+            net_backend += "," + netBackendArgs
+        if netFrontendArgs is not None:
+            net_frontend += "," + netFrontendArgs
+        self._cmd += f" {net_backend} {net_frontend}"
+
+        # hda
+        hda_cmd = ""
+        if hda is not None:
+            hda_path = hda[0].resolve()
+            hda_interface = hda[1]
+            if hda_interface == "scsi":
+                hda_cmd += (
+                    f" -drive id=hda,file={hda_path},werror=report,if=none"
+                    " -device scsi-hd,drive=hda"
+                )
+            else:
+                hda_cmd += f" -drive file={hda_path},if={hda_interface},werror=report"
+        self._cmd += hda_cmd
+
+        # cdrom
+        if cdrom is not None:
+            self._cmd += f" -cdrom {cdrom}"
+
+        # usb
+        usb_cmd = ""
+        if usb is not None:
+            # https://github.com/qemu/qemu/blob/master/docs/usb2.txt
+            usb_cmd += (
+                " -device usb-ehci"
+                f" -drive id=usbdisk,file={usb},if=none,readonly"
+                " -device usb-storage,drive=usbdisk "
+            )
+        self._cmd += usb_cmd
+
+        # bios
+        if bios is not None:
+            self._cmd += f" -bios {bios}"
+
+        # qemu flags
+        if qemuFlags is not None:
+            self._cmd += f" {qemuFlags}"
+
+
+class Machine:
+    """A handle to the machine with this name, that also knows how to manage
+    the machine lifecycle with the help of a start script / command."""
+
+    name: str
+    out_dir: Path
+    tmp_dir: Path
+    shared_dir: Path
+    state_dir: Path
+    monitor_path: Path
+    shell_path: Path
+
+    start_command: StartCommand
+    keep_vm_state: bool
+    allow_reboot: bool
+
+    process: Optional[subprocess.Popen]
+    pid: Optional[int]
+    monitor: Optional[socket.socket]
+    shell: Optional[socket.socket]
+    serial_thread: Optional[threading.Thread]
+
+    booted: bool
+    connected: bool
+    # Store last serial console lines for use
+    # of wait_for_console_text
+    last_lines: Queue = Queue()
+    callbacks: List[Callable]
+
+    def __repr__(self) -> str:
+        return f"<Machine '{self.name}'>"
+
+    def __init__(
+        self,
+        out_dir: Path,
+        tmp_dir: Path,
+        start_command: StartCommand,
+        name: str = "machine",
+        keep_vm_state: bool = False,
+        allow_reboot: bool = False,
+        callbacks: Optional[List[Callable]] = None,
+    ) -> None:
+        self.out_dir = out_dir
+        self.tmp_dir = tmp_dir
+        self.keep_vm_state = keep_vm_state
+        self.allow_reboot = allow_reboot
+        self.name = name
+        self.start_command = start_command
+        self.callbacks = callbacks if callbacks is not None else []
+
+        # set up directories
+        self.shared_dir = self.tmp_dir / "shared-xchg"
+        self.shared_dir.mkdir(mode=0o700, exist_ok=True)
+
+        self.state_dir = self.tmp_dir / f"vm-state-{self.name}"
+        self.monitor_path = self.state_dir / "monitor"
+        self.shell_path = self.state_dir / "shell"
+        if (not self.keep_vm_state) and self.state_dir.exists():
+            self.cleanup_statedir()
+        self.state_dir.mkdir(mode=0o700, exist_ok=True)
+
+        self.process = None
+        self.pid = None
+        self.monitor = None
+        self.shell = None
+        self.serial_thread = None
+
+        self.booted = False
+        self.connected = False
+
+    @staticmethod
+    def create_startcommand(args: Dict[str, str]) -> StartCommand:
+        rootlog.warning(
+            "Using legacy create_startcommand(),"
+            "please use proper nix test vm instrumentation, instead"
+            "to generate the appropriate nixos test vm qemu startup script"
+        )
+        hda = None
+        if args.get("hda"):
+            hda_arg: str = args.get("hda", "")
+            hda_arg_path: Path = Path(hda_arg)
+            hda = (hda_arg_path, args.get("hdaInterface", ""))
+        return LegacyStartCommand(
+            netBackendArgs=args.get("netBackendArgs"),
+            netFrontendArgs=args.get("netFrontendArgs"),
+            hda=hda,
+            cdrom=args.get("cdrom"),
+            usb=args.get("usb"),
+            bios=args.get("bios"),
+            qemuBinary=args.get("qemuBinary"),
+            qemuFlags=args.get("qemuFlags"),
+        )
+
+    def is_up(self) -> bool:
+        return self.booted and self.connected
+
+    def log(self, msg: str) -> None:
+        rootlog.log(msg, {"machine": self.name})
+
+    def log_serial(self, msg: str) -> None:
+        rootlog.log_serial(msg, self.name)
+
+    def nested(self, msg: str, attrs: Dict[str, str] = {}) -> _GeneratorContextManager:
+        my_attrs = {"machine": self.name}
+        my_attrs.update(attrs)
+        return rootlog.nested(msg, my_attrs)
+
+    def wait_for_monitor_prompt(self) -> str:
+        with self.nested("waiting for monitor prompt"):
+            assert self.monitor is not None
+            answer = ""
+            while True:
+                undecoded_answer = self.monitor.recv(1024)
+                if not undecoded_answer:
+                    break
+                answer += undecoded_answer.decode()
+                if answer.endswith("(qemu) "):
+                    break
+            return answer
+
+    def send_monitor_command(self, command: str) -> str:
+        self.run_callbacks()
+        with self.nested("sending monitor command: {}".format(command)):
+            message = ("{}\n".format(command)).encode()
+            assert self.monitor is not None
+            self.monitor.send(message)
+            return self.wait_for_monitor_prompt()
+
+    def wait_for_unit(self, unit: str, user: Optional[str] = None) -> None:
+        """Wait for a systemd unit to get into "active" state.
+        Throws exceptions on "failed" and "inactive" states as well as
+        after timing out.
+        """
+
+        def check_active(_: Any) -> bool:
+            info = self.get_unit_info(unit, user)
+            state = info["ActiveState"]
+            if state == "failed":
+                raise Exception('unit "{}" reached state "{}"'.format(unit, state))
+
+            if state == "inactive":
+                status, jobs = self.systemctl("list-jobs --full 2>&1", user)
+                if "No jobs" in jobs:
+                    info = self.get_unit_info(unit, user)
+                    if info["ActiveState"] == state:
+                        raise Exception(
+                            (
+                                'unit "{}" is inactive and there ' "are no pending jobs"
+                            ).format(unit)
+                        )
+
+            return state == "active"
+
+        with self.nested(
+            "waiting for unit {}{}".format(
+                unit, f" with user {user}" if user is not None else ""
+            )
+        ):
+            retry(check_active)
+
+    def get_unit_info(self, unit: str, user: Optional[str] = None) -> Dict[str, str]:
+        status, lines = self.systemctl('--no-pager show "{}"'.format(unit), user)
+        if status != 0:
+            raise Exception(
+                'retrieving systemctl info for unit "{}" {} failed with exit code {}'.format(
+                    unit, "" if user is None else 'under user "{}"'.format(user), status
+                )
+            )
+
+        line_pattern = re.compile(r"^([^=]+)=(.*)$")
+
+        def tuple_from_line(line: str) -> Tuple[str, str]:
+            match = line_pattern.match(line)
+            assert match is not None
+            return match[1], match[2]
+
+        return dict(
+            tuple_from_line(line)
+            for line in lines.split("\n")
+            if line_pattern.match(line)
+        )
+
+    def systemctl(self, q: str, user: Optional[str] = None) -> Tuple[int, str]:
+        if user is not None:
+            q = q.replace("'", "\\'")
+            return self.execute(
+                (
+                    "su -l {} --shell /bin/sh -c "
+                    "$'XDG_RUNTIME_DIR=/run/user/`id -u` "
+                    "systemctl --user {}'"
+                ).format(user, q)
+            )
+        return self.execute("systemctl {}".format(q))
+
+    def require_unit_state(self, unit: str, require_state: str = "active") -> None:
+        with self.nested(
+            "checking if unit ‘{}’ has reached state '{}'".format(unit, require_state)
+        ):
+            info = self.get_unit_info(unit)
+            state = info["ActiveState"]
+            if state != require_state:
+                raise Exception(
+                    "Expected unit ‘{}’ to to be in state ".format(unit)
+                    + "'{}' but it is in state ‘{}’".format(require_state, state)
+                )
+
+    def _next_newline_closed_block_from_shell(self) -> str:
+        assert self.shell
+        output_buffer = []
+        while True:
+            # This receives up to 4096 bytes from the socket
+            chunk = self.shell.recv(4096)
+            if not chunk:
+                # Probably a broken pipe, return the output we have
+                break
+
+            decoded = chunk.decode()
+            output_buffer += [decoded]
+            if decoded[-1] == "\n":
+                break
+        return "".join(output_buffer)
+
+    def execute(
+        self, command: str, check_return: bool = True, timeout: Optional[int] = 900
+    ) -> Tuple[int, str]:
+        self.run_callbacks()
+        self.connect()
+
+        if timeout is not None:
+            command = "timeout {} sh -c {}".format(timeout, shlex.quote(command))
+
+        out_command = f"( set -euo pipefail; {command} ) | (base64 --wrap 0; echo)\n"
+        assert self.shell
+        self.shell.send(out_command.encode())
+
+        # Get the output
+        output = base64.b64decode(self._next_newline_closed_block_from_shell())
+
+        if not check_return:
+            return (-1, output.decode())
+
+        # Get the return code
+        self.shell.send("echo ${PIPESTATUS[0]}\n".encode())
+        rc = int(self._next_newline_closed_block_from_shell().strip())
+
+        return (rc, output.decode())
+
+    def shell_interact(self) -> None:
+        """Allows you to interact with the guest shell
+
+        Should only be used during test development, not in the production test."""
+        self.connect()
+        self.log("Terminal is ready (there is no initial prompt):")
+
+        assert self.shell
+        subprocess.run(
+            ["socat", "READLINE,prompt=$ ", f"FD:{self.shell.fileno()}"],
+            pass_fds=[self.shell.fileno()],
+        )
+
+    def succeed(self, *commands: str, timeout: Optional[int] = None) -> str:
+        """Execute each command and check that it succeeds."""
+        output = ""
+        for command in commands:
+            with self.nested("must succeed: {}".format(command)):
+                (status, out) = self.execute(command, timeout=timeout)
+                if status != 0:
+                    self.log("output: {}".format(out))
+                    raise Exception(
+                        "command `{}` failed (exit code {})".format(command, status)
+                    )
+                output += out
+        return output
+
+    def fail(self, *commands: str, timeout: Optional[int] = None) -> str:
+        """Execute each command and check that it fails."""
+        output = ""
+        for command in commands:
+            with self.nested("must fail: {}".format(command)):
+                (status, out) = self.execute(command, timeout=timeout)
+                if status == 0:
+                    raise Exception(
+                        "command `{}` unexpectedly succeeded".format(command)
+                    )
+                output += out
+        return output
+
+    def wait_until_succeeds(self, command: str, timeout: int = 900) -> str:
+        """Wait until a command returns success and return its output.
+        Throws an exception on timeout.
+        """
+        output = ""
+
+        def check_success(_: Any) -> bool:
+            nonlocal output
+            status, output = self.execute(command, timeout=timeout)
+            return status == 0
+
+        with self.nested("waiting for success: {}".format(command)):
+            retry(check_success, timeout)
+            return output
+
+    def wait_until_fails(self, command: str, timeout: int = 900) -> str:
+        """Wait until a command returns failure.
+        Throws an exception on timeout.
+        """
+        output = ""
+
+        def check_failure(_: Any) -> bool:
+            nonlocal output
+            status, output = self.execute(command, timeout=timeout)
+            return status != 0
+
+        with self.nested("waiting for failure: {}".format(command)):
+            retry(check_failure)
+            return output
+
+    def wait_for_shutdown(self) -> None:
+        if not self.booted:
+            return
+
+        with self.nested("waiting for the VM to power off"):
+            sys.stdout.flush()
+            assert self.process
+            self.process.wait()
+
+            self.pid = None
+            self.booted = False
+            self.connected = False
+
+    def get_tty_text(self, tty: str) -> str:
+        status, output = self.execute(
+            "fold -w$(stty -F /dev/tty{0} size | "
+            "awk '{{print $2}}') /dev/vcs{0}".format(tty)
+        )
+        return output
+
+    def wait_until_tty_matches(self, tty: str, regexp: str) -> None:
+        """Wait until the visible output on the chosen TTY matches regular
+        expression. Throws an exception on timeout.
+        """
+        matcher = re.compile(regexp)
+
+        def tty_matches(last: bool) -> bool:
+            text = self.get_tty_text(tty)
+            if last:
+                self.log(
+                    f"Last chance to match /{regexp}/ on TTY{tty}, "
+                    f"which currently contains: {text}"
+                )
+            return len(matcher.findall(text)) > 0
+
+        with self.nested("waiting for {} to appear on tty {}".format(regexp, tty)):
+            retry(tty_matches)
+
+    def send_chars(self, chars: List[str]) -> None:
+        with self.nested("sending keys ‘{}‘".format(chars)):
+            for char in chars:
+                self.send_key(char)
+
+    def wait_for_file(self, filename: str) -> None:
+        """Waits until the file exists in machine's file system."""
+
+        def check_file(_: Any) -> bool:
+            status, _ = self.execute("test -e {}".format(filename))
+            return status == 0
+
+        with self.nested("waiting for file ‘{}‘".format(filename)):
+            retry(check_file)
+
+    def wait_for_open_port(self, port: int) -> None:
+        def port_is_open(_: Any) -> bool:
+            status, _ = self.execute("nc -z localhost {}".format(port))
+            return status == 0
+
+        with self.nested("waiting for TCP port {}".format(port)):
+            retry(port_is_open)
+
+    def wait_for_closed_port(self, port: int) -> None:
+        def port_is_closed(_: Any) -> bool:
+            status, _ = self.execute("nc -z localhost {}".format(port))
+            return status != 0
+
+        with self.nested("waiting for TCP port {} to be closed"):
+            retry(port_is_closed)
+
+    def start_job(self, jobname: str, user: Optional[str] = None) -> Tuple[int, str]:
+        return self.systemctl("start {}".format(jobname), user)
+
+    def stop_job(self, jobname: str, user: Optional[str] = None) -> Tuple[int, str]:
+        return self.systemctl("stop {}".format(jobname), user)
+
+    def wait_for_job(self, jobname: str) -> None:
+        self.wait_for_unit(jobname)
+
+    def connect(self) -> None:
+        if self.connected:
+            return
+
+        with self.nested("waiting for the VM to finish booting"):
+            self.start()
+
+            assert self.shell
+
+            tic = time.time()
+            self.shell.recv(1024)
+            # TODO: Timeout
+            toc = time.time()
+
+            self.log("connected to guest root shell")
+            self.log("(connecting took {:.2f} seconds)".format(toc - tic))
+            self.connected = True
+
+    def screenshot(self, filename: str) -> None:
+        word_pattern = re.compile(r"^\w+$")
+        if word_pattern.match(filename):
+            filename = os.path.join(self.out_dir, "{}.png".format(filename))
+        tmp = "{}.ppm".format(filename)
+
+        with self.nested(
+            "making screenshot {}".format(filename),
+            {"image": os.path.basename(filename)},
+        ):
+            self.send_monitor_command("screendump {}".format(tmp))
+            ret = subprocess.run("pnmtopng {} > {}".format(tmp, filename), shell=True)
+            os.unlink(tmp)
+            if ret.returncode != 0:
+                raise Exception("Cannot convert screenshot")
+
+    def copy_from_host_via_shell(self, source: str, target: str) -> None:
+        """Copy a file from the host into the guest by piping it over the
+        shell into the destination file. Works without host-guest shared folder.
+        Prefer copy_from_host for whenever possible.
+        """
+        with open(source, "rb") as fh:
+            content_b64 = base64.b64encode(fh.read()).decode()
+            self.succeed(
+                f"mkdir -p $(dirname {target})",
+                f"echo -n {content_b64} | base64 -d > {target}",
+            )
+
+    def copy_from_host(self, source: str, target: str) -> None:
+        """Copy a file from the host into the guest via the `shared_dir` shared
+        among all the VMs (using a temporary directory).
+        """
+        host_src = Path(source)
+        vm_target = Path(target)
+        with tempfile.TemporaryDirectory(dir=self.shared_dir) as shared_td:
+            shared_temp = Path(shared_td)
+            host_intermediate = shared_temp / host_src.name
+            vm_shared_temp = Path("/tmp/shared") / shared_temp.name
+            vm_intermediate = vm_shared_temp / host_src.name
+
+            self.succeed(make_command(["mkdir", "-p", vm_shared_temp]))
+            if host_src.is_dir():
+                shutil.copytree(host_src, host_intermediate)
+            else:
+                shutil.copy(host_src, host_intermediate)
+            self.succeed(make_command(["mkdir", "-p", vm_target.parent]))
+            self.succeed(make_command(["cp", "-r", vm_intermediate, vm_target]))
+
+    def copy_from_vm(self, source: str, target_dir: str = "") -> None:
+        """Copy a file from the VM (specified by an in-VM source path) to a path
+        relative to `$out`. The file is copied via the `shared_dir` shared among
+        all the VMs (using a temporary directory).
+        """
+        # Compute the source, target, and intermediate shared file names
+        vm_src = Path(source)
+        with tempfile.TemporaryDirectory(dir=self.shared_dir) as shared_td:
+            shared_temp = Path(shared_td)
+            vm_shared_temp = Path("/tmp/shared") / shared_temp.name
+            vm_intermediate = vm_shared_temp / vm_src.name
+            intermediate = shared_temp / vm_src.name
+            # Copy the file to the shared directory inside VM
+            self.succeed(make_command(["mkdir", "-p", vm_shared_temp]))
+            self.succeed(make_command(["cp", "-r", vm_src, vm_intermediate]))
+            abs_target = self.out_dir / target_dir / vm_src.name
+            abs_target.parent.mkdir(exist_ok=True, parents=True)
+            # Copy the file from the shared directory outside VM
+            if intermediate.is_dir():
+                shutil.copytree(intermediate, abs_target)
+            else:
+                shutil.copy(intermediate, abs_target)
+
+    def dump_tty_contents(self, tty: str) -> None:
+        """Debugging: Dump the contents of the TTY<n>"""
+        self.execute("fold -w 80 /dev/vcs{} | systemd-cat".format(tty))
+
+    def _get_screen_text_variants(self, model_ids: Iterable[int]) -> List[str]:
+        with tempfile.TemporaryDirectory() as tmpdir:
+            screenshot_path = os.path.join(tmpdir, "ppm")
+            self.send_monitor_command(f"screendump {screenshot_path}")
+            return _perform_ocr_on_screenshot(screenshot_path, model_ids)
+
+    def get_screen_text_variants(self) -> List[str]:
+        return self._get_screen_text_variants([0, 1, 2])
+
+    def get_screen_text(self) -> str:
+        return self._get_screen_text_variants([2])[0]
+
+    def wait_for_text(self, regex: str) -> None:
+        def screen_matches(last: bool) -> bool:
+            variants = self.get_screen_text_variants()
+            for text in variants:
+                if re.search(regex, text) is not None:
+                    return True
+
+            if last:
+                self.log("Last OCR attempt failed. Text was: {}".format(variants))
+
+            return False
+
+        with self.nested("waiting for {} to appear on screen".format(regex)):
+            retry(screen_matches)
+
+    def wait_for_console_text(self, regex: str) -> None:
+        with self.nested("waiting for {} to appear on console".format(regex)):
+            # Buffer the console output, this is needed
+            # to match multiline regexes.
+            console = io.StringIO()
+            while True:
+                try:
+                    console.write(self.last_lines.get())
+                except queue.Empty:
+                    self.sleep(1)
+                    continue
+                console.seek(0)
+                matches = re.search(regex, console.read())
+                if matches is not None:
+                    return
+
+    def send_key(self, key: str) -> None:
+        key = CHAR_TO_KEY.get(key, key)
+        self.send_monitor_command("sendkey {}".format(key))
+        time.sleep(0.01)
+
+    def start(self) -> None:
+        if self.booted:
+            return
+
+        self.log("starting vm")
+
+        def clear(path: Path) -> Path:
+            if path.exists():
+                path.unlink()
+            return path
+
+        def create_socket(path: Path) -> socket.socket:
+            s = socket.socket(family=socket.AF_UNIX, type=socket.SOCK_STREAM)
+            s.bind(str(path))
+            s.listen(1)
+            return s
+
+        monitor_socket = create_socket(clear(self.monitor_path))
+        shell_socket = create_socket(clear(self.shell_path))
+        self.process = self.start_command.run(
+            self.state_dir,
+            self.shared_dir,
+            self.monitor_path,
+            self.shell_path,
+        )
+        self.monitor, _ = monitor_socket.accept()
+        self.shell, _ = shell_socket.accept()
+
+        # Store last serial console lines for use
+        # of wait_for_console_text
+        self.last_lines: Queue = Queue()
+
+        def process_serial_output() -> None:
+            assert self.process
+            assert self.process.stdout
+            for _line in self.process.stdout:
+                # Ignore undecodable bytes that may occur in boot menus
+                line = _line.decode(errors="ignore").replace("\r", "").rstrip()
+                self.last_lines.put(line)
+                self.log_serial(line)
+
+        self.serial_thread = threading.Thread(target=process_serial_output)
+        self.serial_thread.start()
+
+        self.wait_for_monitor_prompt()
+
+        self.pid = self.process.pid
+        self.booted = True
+
+        self.log("QEMU running (pid {})".format(self.pid))
+
+    def cleanup_statedir(self) -> None:
+        shutil.rmtree(self.state_dir)
+        rootlog.log(f"deleting VM state directory {self.state_dir}")
+        rootlog.log("if you want to keep the VM state, pass --keep-vm-state")
+
+    def shutdown(self) -> None:
+        if not self.booted:
+            return
+
+        assert self.shell
+        self.shell.send("poweroff\n".encode())
+        self.wait_for_shutdown()
+
+    def crash(self) -> None:
+        if not self.booted:
+            return
+
+        self.log("forced crash")
+        self.send_monitor_command("quit")
+        self.wait_for_shutdown()
+
+    def wait_for_x(self) -> None:
+        """Wait until it is possible to connect to the X server.  Note that
+        testing the existence of /tmp/.X11-unix/X0 is insufficient.
+        """
+
+        def check_x(_: Any) -> bool:
+            cmd = (
+                "journalctl -b SYSLOG_IDENTIFIER=systemd | "
+                + 'grep "Reached target Current graphical"'
+            )
+            status, _ = self.execute(cmd)
+            if status != 0:
+                return False
+            status, _ = self.execute("[ -e /tmp/.X11-unix/X0 ]")
+            return status == 0
+
+        with self.nested("waiting for the X11 server"):
+            retry(check_x)
+
+    def get_window_names(self) -> List[str]:
+        return self.succeed(
+            r"xwininfo -root -tree | sed 's/.*0x[0-9a-f]* \"\([^\"]*\)\".*/\1/; t; d'"
+        ).splitlines()
+
+    def wait_for_window(self, regexp: str) -> None:
+        pattern = re.compile(regexp)
+
+        def window_is_visible(last_try: bool) -> bool:
+            names = self.get_window_names()
+            if last_try:
+                self.log(
+                    "Last chance to match {} on the window list,".format(regexp)
+                    + " which currently contains: "
+                    + ", ".join(names)
+                )
+            return any(pattern.search(name) for name in names)
+
+        with self.nested("waiting for a window to appear"):
+            retry(window_is_visible)
+
+    def sleep(self, secs: int) -> None:
+        # We want to sleep in *guest* time, not *host* time.
+        self.succeed(f"sleep {secs}")
+
+    def forward_port(self, host_port: int = 8080, guest_port: int = 80) -> None:
+        """Forward a TCP port on the host to a TCP port on the guest.
+        Useful during interactive testing.
+        """
+        self.send_monitor_command(
+            "hostfwd_add tcp::{}-:{}".format(host_port, guest_port)
+        )
+
+    def block(self) -> None:
+        """Make the machine unreachable by shutting down eth1 (the multicast
+        interface used to talk to the other VMs).  We keep eth0 up so that
+        the test driver can continue to talk to the machine.
+        """
+        self.send_monitor_command("set_link virtio-net-pci.1 off")
+
+    def unblock(self) -> None:
+        """Make the machine reachable."""
+        self.send_monitor_command("set_link virtio-net-pci.1 on")
+
+    def release(self) -> None:
+        if self.pid is None:
+            return
+        rootlog.info(f"kill machine (pid {self.pid})")
+        assert self.process
+        assert self.shell
+        assert self.monitor
+        assert self.serial_thread
+
+        self.process.terminate()
+        self.shell.close()
+        self.monitor.close()
+        self.serial_thread.join()
+
+    def run_callbacks(self) -> None:
+        for callback in self.callbacks:
+            callback()
diff --git a/nixos/lib/test-driver/test_driver/polling_condition.py b/nixos/lib/test-driver/test_driver/polling_condition.py
new file mode 100644
index 00000000000..459845452fa
--- /dev/null
+++ b/nixos/lib/test-driver/test_driver/polling_condition.py
@@ -0,0 +1,77 @@
+from typing import Callable, Optional
+import time
+
+from .logger import rootlog
+
+
+class PollingConditionFailed(Exception):
+    pass
+
+
+class PollingCondition:
+    condition: Callable[[], bool]
+    seconds_interval: float
+    description: Optional[str]
+
+    last_called: float
+    entered: bool
+
+    def __init__(
+        self,
+        condition: Callable[[], Optional[bool]],
+        seconds_interval: float = 2.0,
+        description: Optional[str] = None,
+    ):
+        self.condition = condition  # type: ignore
+        self.seconds_interval = seconds_interval
+
+        if description is None:
+            if condition.__doc__:
+                self.description = condition.__doc__
+            else:
+                self.description = condition.__name__
+        else:
+            self.description = str(description)
+
+        self.last_called = float("-inf")
+        self.entered = False
+
+    def check(self) -> bool:
+        if self.entered or not self.overdue:
+            return True
+
+        with self, rootlog.nested(self.nested_message):
+            rootlog.info(f"Time since last: {time.monotonic() - self.last_called:.2f}s")
+            try:
+                res = self.condition()  # type: ignore
+            except Exception:
+                res = False
+            res = res is None or res
+            rootlog.info(self.status_message(res))
+            return res
+
+    def maybe_raise(self) -> None:
+        if not self.check():
+            raise PollingConditionFailed(self.status_message(False))
+
+    def status_message(self, status: bool) -> str:
+        return f"Polling condition {'succeeded' if status else 'failed'}: {self.description}"
+
+    @property
+    def nested_message(self) -> str:
+        nested_message = ["Checking polling condition"]
+        if self.description is not None:
+            nested_message.append(repr(self.description))
+
+        return " ".join(nested_message)
+
+    @property
+    def overdue(self) -> bool:
+        return self.last_called + self.seconds_interval < time.monotonic()
+
+    def __enter__(self) -> None:
+        self.entered = True
+
+    def __exit__(self, exc_type, exc_value, traceback) -> None:  # type: ignore
+        self.entered = False
+        self.last_called = time.monotonic()
diff --git a/nixos/lib/test-driver/test_driver/vlan.py b/nixos/lib/test-driver/test_driver/vlan.py
new file mode 100644
index 00000000000..e5c8f07b4ed
--- /dev/null
+++ b/nixos/lib/test-driver/test_driver/vlan.py
@@ -0,0 +1,58 @@
+from pathlib import Path
+import io
+import os
+import pty
+import subprocess
+
+from test_driver.logger import rootlog
+
+
+class VLan:
+    """This class handles a VLAN that the run-vm scripts identify via its
+    number handles. The network's lifetime equals the object's lifetime.
+    """
+
+    nr: int
+    socket_dir: Path
+
+    process: subprocess.Popen
+    pid: int
+    fd: io.TextIOBase
+
+    def __repr__(self) -> str:
+        return f"<Vlan Nr. {self.nr}>"
+
+    def __init__(self, nr: int, tmp_dir: Path):
+        self.nr = nr
+        self.socket_dir = tmp_dir / f"vde{self.nr}.ctl"
+
+        # TODO: don't side-effect environment here
+        os.environ[f"QEMU_VDE_SOCKET_{self.nr}"] = str(self.socket_dir)
+
+        rootlog.info("start vlan")
+        pty_master, pty_slave = pty.openpty()
+
+        self.process = subprocess.Popen(
+            ["vde_switch", "-s", self.socket_dir, "--dirmode", "0700"],
+            stdin=pty_slave,
+            stdout=subprocess.PIPE,
+            stderr=subprocess.PIPE,
+            shell=False,
+        )
+        self.pid = self.process.pid
+        self.fd = os.fdopen(pty_master, "w")
+        self.fd.write("version\n")
+
+        # TODO: perl version checks if this can be read from
+        # an if not, dies. we could hang here forever. Fix it.
+        assert self.process.stdout is not None
+        self.process.stdout.readline()
+        if not (self.socket_dir / "ctl").exists():
+            rootlog.error("cannot start vde_switch")
+
+        rootlog.info(f"running vlan (pid {self.pid})")
+
+    def __del__(self) -> None:
+        rootlog.info(f"kill vlan (pid {self.pid})")
+        self.fd.close()
+        self.process.terminate()
diff --git a/nixos/lib/testing-python.nix b/nixos/lib/testing-python.nix
new file mode 100644
index 00000000000..0d3c3a89e78
--- /dev/null
+++ b/nixos/lib/testing-python.nix
@@ -0,0 +1,251 @@
+{ system
+, pkgs ? import ../.. { inherit system config; }
+  # Use a minimal kernel?
+, minimal ? false
+  # Ignored
+, config ? { }
+  # !!! See comment about args in lib/modules.nix
+, specialArgs ? { }
+  # Modules to add to each VM
+, extraConfigurations ? [ ]
+}:
+
+with pkgs;
+
+rec {
+
+  inherit pkgs;
+
+  # Run an automated test suite in the given virtual network.
+  runTests = { driver, driverInteractive, pos }:
+    stdenv.mkDerivation {
+      name = "vm-test-run-${driver.testName}";
+
+      requiredSystemFeatures = [ "kvm" "nixos-test" ];
+
+      buildCommand =
+        ''
+          mkdir -p $out
+
+          # effectively mute the XMLLogger
+          export LOGFILE=/dev/null
+
+          ${driver}/bin/nixos-test-driver -o $out
+        '';
+
+      passthru = driver.passthru // {
+        inherit driver driverInteractive;
+      };
+
+      inherit pos; # for better debugging
+    };
+
+  # Generate convenience wrappers for running the test driver
+  # has vlans, vms and test script defaulted through env variables
+  # also instantiates test script with nodes, if it's a function (contract)
+  setupDriverForTest = {
+      testScript
+    , testName
+    , nodes
+    , qemu_pkg ? pkgs.qemu_test
+    , enableOCR ? false
+    , skipLint ? false
+    , passthru ? {}
+    , interactive ? false
+  }:
+    let
+      # Reifies and correctly wraps the python test driver for
+      # the respective qemu version and with or without ocr support
+      testDriver = pkgs.callPackage ./test-driver {
+        inherit enableOCR;
+        qemu_pkg = qemu_test;
+        imagemagick_light = imagemagick_light.override { inherit libtiff; };
+        tesseract4 = tesseract4.override { enableLanguages = [ "eng" ]; };
+      };
+
+
+      testDriverName =
+        let
+          # A standard store path to the vm monitor is built like this:
+          #   /tmp/nix-build-vm-test-run-$name.drv-0/vm-state-machine/monitor
+          # The max filename length of a unix domain socket is 108 bytes.
+          # This means $name can at most be 50 bytes long.
+          maxTestNameLen = 50;
+          testNameLen = builtins.stringLength testName;
+        in with builtins;
+          if testNameLen > maxTestNameLen then
+            abort
+              ("The name of the test '${testName}' must not be longer than ${toString maxTestNameLen} " +
+                "it's currently ${toString testNameLen} characters long.")
+          else
+            "nixos-test-driver-${testName}";
+
+      vlans = map (m: m.config.virtualisation.vlans) (lib.attrValues nodes);
+      vms = map (m: m.config.system.build.vm) (lib.attrValues nodes);
+
+      nodeHostNames = let
+        nodesList = map (c: c.config.system.name) (lib.attrValues nodes);
+      in nodesList ++ lib.optional (lib.length nodesList == 1) "machine";
+
+      # TODO: This is an implementation error and needs fixing
+      # the testing famework cannot legitimately restrict hostnames further
+      # beyond RFC1035
+      invalidNodeNames = lib.filter
+        (node: builtins.match "^[A-z_]([A-z0-9_]+)?$" node == null)
+        nodeHostNames;
+
+      testScript' =
+        # Call the test script with the computed nodes.
+        if lib.isFunction testScript
+        then testScript { inherit nodes; }
+        else testScript;
+
+    in
+    if lib.length invalidNodeNames > 0 then
+      throw ''
+        Cannot create machines out of (${lib.concatStringsSep ", " invalidNodeNames})!
+        All machines are referenced as python variables in the testing framework which will break the
+        script when special characters are used.
+
+        This is an IMPLEMENTATION ERROR and needs to be fixed. Meanwhile,
+        please stick to alphanumeric chars and underscores as separation.
+      ''
+    else lib.warnIf skipLint "Linting is disabled" (runCommand testDriverName
+      {
+        inherit testName;
+        nativeBuildInputs = [ makeWrapper ];
+        testScript = testScript';
+        preferLocalBuild = true;
+        passthru = passthru // {
+          inherit nodes;
+        };
+      }
+      ''
+        mkdir -p $out/bin
+
+        vmStartScripts=($(for i in ${toString vms}; do echo $i/bin/run-*-vm; done))
+        echo -n "$testScript" > $out/test-script
+        ln -s ${testDriver}/bin/nixos-test-driver $out/bin/nixos-test-driver
+
+        ${testDriver}/bin/generate-driver-symbols
+        ${lib.optionalString (!skipLint) ''
+          PYFLAKES_BUILTINS="$(
+            echo -n ${lib.escapeShellArg (lib.concatStringsSep "," nodeHostNames)},
+            < ${lib.escapeShellArg "driver-symbols"}
+          )" ${python3Packages.pyflakes}/bin/pyflakes $out/test-script
+        ''}
+
+        # set defaults through environment
+        # see: ./test-driver/test-driver.py argparse implementation
+        wrapProgram $out/bin/nixos-test-driver \
+          --set startScripts "''${vmStartScripts[*]}" \
+          --set testScript "$out/test-script" \
+          --set vlans '${toString vlans}' \
+          ${lib.optionalString (interactive) "--add-flags --interactive"}
+      '');
+
+  # Make a full-blown test
+  makeTest =
+    { testScript
+    , enableOCR ? false
+    , name ? "unnamed"
+      # Skip linting (mainly intended for faster dev cycles)
+    , skipLint ? false
+    , passthru ? {}
+    , # For meta.position
+      pos ? # position used in error messages and for meta.position
+        (if t.meta.description or null != null
+          then builtins.unsafeGetAttrPos "description" t.meta
+          else builtins.unsafeGetAttrPos "testScript" t)
+    , ...
+    } @ t:
+    let
+      nodes = qemu_pkg:
+        let
+          testScript' =
+            # Call the test script with the computed nodes.
+            if lib.isFunction testScript
+            then testScript { nodes = nodes qemu_pkg; }
+            else testScript;
+
+          build-vms = import ./build-vms.nix {
+            inherit system lib pkgs minimal specialArgs;
+            extraConfigurations = extraConfigurations ++ [(
+              { config, ... }:
+              {
+                virtualisation.qemu.package = qemu_pkg;
+
+                # Make sure all derivations referenced by the test
+                # script are available on the nodes. When the store is
+                # accessed through 9p, this isn't important, since
+                # everything in the store is available to the guest,
+                # but when building a root image it is, as all paths
+                # that should be available to the guest has to be
+                # copied to the image.
+                virtualisation.additionalPaths =
+                  lib.optional
+                    # A testScript may evaluate nodes, which has caused
+                    # infinite recursions. The demand cycle involves:
+                    #   testScript -->
+                    #   nodes -->
+                    #   toplevel -->
+                    #   additionalPaths -->
+                    #   hasContext testScript' -->
+                    #   testScript (ad infinitum)
+                    # If we don't need to build an image, we can break this
+                    # cycle by short-circuiting when useNixStoreImage is false.
+                    (config.virtualisation.useNixStoreImage && builtins.hasContext testScript')
+                    (pkgs.writeStringReferencesToFile testScript');
+
+                # Ensure we do not use aliases. Ideally this is only set
+                # when the test framework is used by Nixpkgs NixOS tests.
+                nixpkgs.config.allowAliases = false;
+              }
+            )];
+          };
+        in
+          build-vms.buildVirtualNetwork (
+              t.nodes or (if t ? machine then { machine = t.machine; } else { })
+          );
+
+      driver = setupDriverForTest {
+        inherit testScript enableOCR skipLint passthru;
+        testName = name;
+        qemu_pkg = pkgs.qemu_test;
+        nodes = nodes pkgs.qemu_test;
+      };
+      driverInteractive = setupDriverForTest {
+        inherit testScript enableOCR skipLint passthru;
+        testName = name;
+        qemu_pkg = pkgs.qemu;
+        nodes = nodes pkgs.qemu;
+        interactive = true;
+      };
+
+      test =
+        let
+          passMeta = drv: drv // lib.optionalAttrs (t ? meta) {
+            meta = (drv.meta or { }) // t.meta;
+          };
+        in passMeta (runTests { inherit driver pos driverInteractive; });
+
+    in
+      test // {
+        inherit test driver driverInteractive nodes;
+      };
+
+  abortForFunction = functionName: abort ''The ${functionName} function was
+    removed because it is not an essential part of the NixOS testing
+    infrastructure. It had no usage in NixOS or Nixpkgs and it had no designated
+    maintainer. You are free to reintroduce it by documenting it in the manual
+    and adding yourself as maintainer. It was removed in
+    https://github.com/NixOS/nixpkgs/pull/137013
+  '';
+
+  runInMachine = abortForFunction "runInMachine";
+
+  runInMachineWithX = abortForFunction "runInMachineWithX";
+
+  simpleTest = as: (makeTest as).test;
+
+}
diff --git a/nixos/lib/utils.nix b/nixos/lib/utils.nix
new file mode 100644
index 00000000000..ae68c3920c5
--- /dev/null
+++ b/nixos/lib/utils.nix
@@ -0,0 +1,201 @@
+{ lib, config, pkgs }: with lib;
+
+rec {
+
+  # Copy configuration files to avoid having the entire sources in the system closure
+  copyFile = filePath: pkgs.runCommand (builtins.unsafeDiscardStringContext (builtins.baseNameOf filePath)) {} ''
+    cp ${filePath} $out
+  '';
+
+  # Check whenever fileSystem is needed for boot.  NOTE: Make sure
+  # pathsNeededForBoot is closed under the parent relationship, i.e. if /a/b/c
+  # is in the list, put /a and /a/b in as well.
+  pathsNeededForBoot = [ "/" "/nix" "/nix/store" "/var" "/var/log" "/var/lib" "/var/lib/nixos" "/etc" "/usr" ];
+  fsNeededForBoot = fs: fs.neededForBoot || elem fs.mountPoint pathsNeededForBoot;
+
+  # Check whenever `b` depends on `a` as a fileSystem
+  fsBefore = a: b:
+    let
+      # normalisePath adds a slash at the end of the path if it didn't already
+      # have one.
+      #
+      # The reason slashes are added at the end of each path is to prevent `b`
+      # from accidentally depending on `a` in cases like
+      #    a = { mountPoint = "/aaa"; ... }
+      #    b = { device     = "/aaaa"; ... }
+      # Here a.mountPoint *is* a prefix of b.device even though a.mountPoint is
+      # *not* a parent of b.device. If we add a slash at the end of each string,
+      # though, this is not a problem: "/aaa/" is not a prefix of "/aaaa/".
+      normalisePath = path: "${path}${optionalString (!(hasSuffix "/" path)) "/"}";
+      normalise = mount: mount // { device = normalisePath (toString mount.device);
+                                    mountPoint = normalisePath mount.mountPoint;
+                                    depends = map normalisePath mount.depends;
+                                  };
+
+      a' = normalise a;
+      b' = normalise b;
+
+    in hasPrefix a'.mountPoint b'.device
+    || hasPrefix a'.mountPoint b'.mountPoint
+    || any (hasPrefix a'.mountPoint) b'.depends;
+
+  # Escape a path according to the systemd rules, e.g. /dev/xyzzy
+  # becomes dev-xyzzy.  FIXME: slow.
+  escapeSystemdPath = s:
+   replaceChars ["/" "-" " "] ["-" "\\x2d" "\\x20"]
+   (removePrefix "/" s);
+
+  # Quotes an argument for use in Exec* service lines.
+  # systemd accepts "-quoted strings with escape sequences, toJSON produces
+  # a subset of these.
+  # Additionally we escape % to disallow expansion of % specifiers. Any lone ;
+  # in the input will be turned it ";" and thus lose its special meaning.
+  # Every $ is escaped to $$, this makes it unnecessary to disable environment
+  # substitution for the directive.
+  escapeSystemdExecArg = arg:
+    let
+      s = if builtins.isPath arg then "${arg}"
+        else if builtins.isString arg then arg
+        else if builtins.isInt arg || builtins.isFloat arg then toString arg
+        else throw "escapeSystemdExecArg only allows strings, paths and numbers";
+    in
+      replaceChars [ "%" "$" ] [ "%%" "$$" ] (builtins.toJSON s);
+
+  # Quotes a list of arguments into a single string for use in a Exec*
+  # line.
+  escapeSystemdExecArgs = concatMapStringsSep " " escapeSystemdExecArg;
+
+  # Returns a system path for a given shell package
+  toShellPath = shell:
+    if types.shellPackage.check shell then
+      "/run/current-system/sw${shell.shellPath}"
+    else if types.package.check shell then
+      throw "${shell} is not a shell package"
+    else
+      shell;
+
+  /* Recurse into a list or an attrset, searching for attrs named like
+     the value of the "attr" parameter, and return an attrset where the
+     names are the corresponding jq path where the attrs were found and
+     the values are the values of the attrs.
+
+     Example:
+       recursiveGetAttrWithJqPrefix {
+         example = [
+           {
+             irrelevant = "not interesting";
+           }
+           {
+             ignored = "ignored attr";
+             relevant = {
+               secret = {
+                 _secret = "/path/to/secret";
+               };
+             };
+           }
+         ];
+       } "_secret" -> { ".example[1].relevant.secret" = "/path/to/secret"; }
+  */
+  recursiveGetAttrWithJqPrefix = item: attr:
+    let
+      recurse = prefix: item:
+        if item ? ${attr} then
+          nameValuePair prefix item.${attr}
+        else if isAttrs item then
+          map (name: recurse (prefix + "." + name) item.${name}) (attrNames item)
+        else if isList item then
+          imap0 (index: item: recurse (prefix + "[${toString index}]") item) item
+        else
+          [];
+    in listToAttrs (flatten (recurse "" item));
+
+  /* Takes an attrset and a file path and generates a bash snippet that
+     outputs a JSON file at the file path with all instances of
+
+     { _secret = "/path/to/secret" }
+
+     in the attrset replaced with the contents of the file
+     "/path/to/secret" in the output JSON.
+
+     When a configuration option accepts an attrset that is finally
+     converted to JSON, this makes it possible to let the user define
+     arbitrary secret values.
+
+     Example:
+       If the file "/path/to/secret" contains the string
+       "topsecretpassword1234",
+
+       genJqSecretsReplacementSnippet {
+         example = [
+           {
+             irrelevant = "not interesting";
+           }
+           {
+             ignored = "ignored attr";
+             relevant = {
+               secret = {
+                 _secret = "/path/to/secret";
+               };
+             };
+           }
+         ];
+       } "/path/to/output.json"
+
+       would generate a snippet that, when run, outputs the following
+       JSON file at "/path/to/output.json":
+
+       {
+         "example": [
+           {
+             "irrelevant": "not interesting"
+           },
+           {
+             "ignored": "ignored attr",
+             "relevant": {
+               "secret": "topsecretpassword1234"
+             }
+           }
+         ]
+       }
+  */
+  genJqSecretsReplacementSnippet = genJqSecretsReplacementSnippet' "_secret";
+
+  # Like genJqSecretsReplacementSnippet, but allows the name of the
+  # attr which identifies the secret to be changed.
+  genJqSecretsReplacementSnippet' = attr: set: output:
+    let
+      secrets = recursiveGetAttrWithJqPrefix set attr;
+    in ''
+      if [[ -h '${output}' ]]; then
+        rm '${output}'
+      fi
+
+      inherit_errexit_enabled=0
+      shopt -pq inherit_errexit && inherit_errexit_enabled=1
+      shopt -s inherit_errexit
+    ''
+    + concatStringsSep
+        "\n"
+        (imap1 (index: name: ''
+                  secret${toString index}=$(<'${secrets.${name}}')
+                  export secret${toString index}
+                '')
+               (attrNames secrets))
+    + "\n"
+    + "${pkgs.jq}/bin/jq >'${output}' '"
+    + concatStringsSep
+      " | "
+      (imap1 (index: name: ''${name} = $ENV.secret${toString index}'')
+             (attrNames secrets))
+    + ''
+      ' <<'EOF'
+      ${builtins.toJSON set}
+      EOF
+      (( ! $inherit_errexit_enabled )) && shopt -u inherit_errexit
+    '';
+
+  systemdUtils = {
+    lib = import ./systemd-lib.nix { inherit lib config pkgs; };
+    unitOptions = import ./systemd-unit-options.nix { inherit lib systemdUtils; };
+  };
+}