summary refs log tree commit diff
path: root/nixos/modules/tasks/filesystems
diff options
context:
space:
mode:
Diffstat (limited to 'nixos/modules/tasks/filesystems')
-rw-r--r--nixos/modules/tasks/filesystems/apfs.nix22
-rw-r--r--nixos/modules/tasks/filesystems/bcachefs.nix65
-rw-r--r--nixos/modules/tasks/filesystems/btrfs.nix149
-rw-r--r--nixos/modules/tasks/filesystems/cifs.nix25
-rw-r--r--nixos/modules/tasks/filesystems/ecryptfs.nix24
-rw-r--r--nixos/modules/tasks/filesystems/exfat.nix13
-rw-r--r--nixos/modules/tasks/filesystems/ext.nix22
-rw-r--r--nixos/modules/tasks/filesystems/f2fs.nix25
-rw-r--r--nixos/modules/tasks/filesystems/glusterfs.nix11
-rw-r--r--nixos/modules/tasks/filesystems/jfs.nix19
-rw-r--r--nixos/modules/tasks/filesystems/nfs.nix135
-rw-r--r--nixos/modules/tasks/filesystems/ntfs.nix11
-rw-r--r--nixos/modules/tasks/filesystems/reiserfs.nix25
-rw-r--r--nixos/modules/tasks/filesystems/unionfs-fuse.nix32
-rw-r--r--nixos/modules/tasks/filesystems/vboxsf.nix23
-rw-r--r--nixos/modules/tasks/filesystems/vfat.nix25
-rw-r--r--nixos/modules/tasks/filesystems/xfs.nix30
-rw-r--r--nixos/modules/tasks/filesystems/zfs.nix795
18 files changed, 1451 insertions, 0 deletions
diff --git a/nixos/modules/tasks/filesystems/apfs.nix b/nixos/modules/tasks/filesystems/apfs.nix
new file mode 100644
index 00000000000..2f2be351df6
--- /dev/null
+++ b/nixos/modules/tasks/filesystems/apfs.nix
@@ -0,0 +1,22 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  inInitrd = any (fs: fs == "apfs") config.boot.initrd.supportedFilesystems;
+
+in
+
+{
+  config = mkIf (any (fs: fs == "apfs") config.boot.supportedFilesystems) {
+
+    system.fsPackages = [ pkgs.apfsprogs ];
+
+    boot.extraModulePackages = [ config.boot.kernelPackages.apfs ];
+
+    boot.initrd.kernelModules = mkIf inInitrd [ "apfs" ];
+
+    # Don't copy apfsck into the initramfs since it does not support repairing the filesystem
+  };
+}
diff --git a/nixos/modules/tasks/filesystems/bcachefs.nix b/nixos/modules/tasks/filesystems/bcachefs.nix
new file mode 100644
index 00000000000..ac41ba5f93a
--- /dev/null
+++ b/nixos/modules/tasks/filesystems/bcachefs.nix
@@ -0,0 +1,65 @@
+{ config, lib, pkgs, utils, ... }:
+
+with lib;
+
+let
+
+  bootFs = filterAttrs (n: fs: (fs.fsType == "bcachefs") && (utils.fsNeededForBoot fs)) config.fileSystems;
+
+  commonFunctions = ''
+    prompt() {
+        local name="$1"
+        printf "enter passphrase for $name: "
+    }
+    tryUnlock() {
+        local name="$1"
+        local path="$2"
+        if bcachefs unlock -c $path > /dev/null 2> /dev/null; then    # test for encryption
+            prompt $name
+            until bcachefs unlock $path 2> /dev/null; do              # repeat until sucessfully unlocked
+                printf "unlocking failed!\n"
+                prompt $name
+            done
+            printf "unlocking successful.\n"
+        fi
+    }
+  '';
+
+  openCommand = name: fs:
+    let
+      # we need only unlock one device manually, and cannot pass multiple at once
+      # remove this adaptation when bcachefs implements mounting by filesystem uuid
+      # also, implement automatic waiting for the constituent devices when that happens
+      # bcachefs does not support mounting devices with colons in the path, ergo we don't (see #49671)
+      firstDevice = head (splitString ":" fs.device);
+    in
+      ''
+        tryUnlock ${name} ${firstDevice}
+      '';
+
+in
+
+{
+  config = mkIf (elem "bcachefs" config.boot.supportedFilesystems) (mkMerge [
+    {
+      system.fsPackages = [ pkgs.bcachefs-tools ];
+
+      # use kernel package with bcachefs support until it's in mainline
+      boot.kernelPackages = pkgs.linuxPackages_testing_bcachefs;
+    }
+
+    (mkIf ((elem "bcachefs" config.boot.initrd.supportedFilesystems) || (bootFs != {})) {
+      # chacha20 and poly1305 are required only for decryption attempts
+      boot.initrd.availableKernelModules = [ "bcachefs" "sha256" "chacha20" "poly1305" ];
+
+      boot.initrd.extraUtilsCommands = ''
+        copy_bin_and_libs ${pkgs.bcachefs-tools}/bin/bcachefs
+      '';
+      boot.initrd.extraUtilsCommandsTest = ''
+        $out/bin/bcachefs version
+      '';
+
+      boot.initrd.postDeviceCommands = commonFunctions + concatStrings (mapAttrsToList openCommand bootFs);
+    })
+  ]);
+}
diff --git a/nixos/modules/tasks/filesystems/btrfs.nix b/nixos/modules/tasks/filesystems/btrfs.nix
new file mode 100644
index 00000000000..ae1dab5b8d8
--- /dev/null
+++ b/nixos/modules/tasks/filesystems/btrfs.nix
@@ -0,0 +1,149 @@
+{ config, lib, pkgs, utils, ... }:
+
+with lib;
+
+let
+
+  inInitrd = any (fs: fs == "btrfs") config.boot.initrd.supportedFilesystems;
+  inSystem = any (fs: fs == "btrfs") config.boot.supportedFilesystems;
+
+  cfgScrub = config.services.btrfs.autoScrub;
+
+  enableAutoScrub = cfgScrub.enable;
+  enableBtrfs = inInitrd || inSystem || enableAutoScrub;
+
+in
+
+{
+  options = {
+    # One could also do regular btrfs balances, but that shouldn't be necessary
+    # during normal usage and as long as the filesystems aren't filled near capacity
+    services.btrfs.autoScrub = {
+      enable = mkEnableOption "regular btrfs scrub";
+
+      fileSystems = mkOption {
+        type = types.listOf types.path;
+        example = [ "/" ];
+        description = ''
+          List of paths to btrfs filesystems to regularily call <command>btrfs scrub</command> on.
+          Defaults to all mount points with btrfs filesystems.
+          If you mount a filesystem multiple times or additionally mount subvolumes,
+          you need to manually specify this list to avoid scrubbing multiple times.
+        '';
+      };
+
+      interval = mkOption {
+        default = "monthly";
+        type = types.str;
+        example = "weekly";
+        description = ''
+          Systemd calendar expression for when to scrub btrfs filesystems.
+          The recommended period is a month but could be less
+          (<citerefentry><refentrytitle>btrfs-scrub</refentrytitle>
+          <manvolnum>8</manvolnum></citerefentry>).
+          See
+          <citerefentry><refentrytitle>systemd.time</refentrytitle>
+          <manvolnum>7</manvolnum></citerefentry>
+          for more information on the syntax.
+        '';
+      };
+
+    };
+  };
+
+  config = mkMerge [
+    (mkIf enableBtrfs {
+      system.fsPackages = [ pkgs.btrfs-progs ];
+
+      boot.initrd.kernelModules = mkIf inInitrd [ "btrfs" ];
+      boot.initrd.availableKernelModules = mkIf inInitrd (
+        [ "crc32c" ]
+        ++ optionals (config.boot.kernelPackages.kernel.kernelAtLeast "5.5") [
+          # Needed for mounting filesystems with new checksums
+          "xxhash_generic"
+          "blake2b_generic"
+          "sha256_generic" # Should be baked into our kernel, just to be sure
+        ]
+      );
+
+      boot.initrd.extraUtilsCommands = mkIf inInitrd
+      ''
+        copy_bin_and_libs ${pkgs.btrfs-progs}/bin/btrfs
+        ln -sv btrfs $out/bin/btrfsck
+        ln -sv btrfsck $out/bin/fsck.btrfs
+      '';
+
+      boot.initrd.extraUtilsCommandsTest = mkIf inInitrd
+      ''
+        $out/bin/btrfs --version
+      '';
+
+      boot.initrd.postDeviceCommands = mkIf inInitrd
+      ''
+        btrfs device scan
+      '';
+    })
+
+    (mkIf enableAutoScrub {
+      assertions = [
+        {
+          assertion = cfgScrub.enable -> (cfgScrub.fileSystems != []);
+          message = ''
+            If 'services.btrfs.autoScrub' is enabled, you need to have at least one
+            btrfs file system mounted via 'fileSystems' or specify a list manually
+            in 'services.btrfs.autoScrub.fileSystems'.
+          '';
+        }
+      ];
+
+      # This will yield duplicated units if the user mounts a filesystem multiple times
+      # or additionally mounts subvolumes, but going the other way around via devices would
+      # yield duplicated units when a filesystem spans multiple devices.
+      # This way around seems like the more sensible default.
+      services.btrfs.autoScrub.fileSystems = mkDefault (mapAttrsToList (name: fs: fs.mountPoint)
+      (filterAttrs (name: fs: fs.fsType == "btrfs") config.fileSystems));
+
+      # TODO: Did not manage to do it via the usual btrfs-scrub@.timer/.service
+      # template units due to problems enabling the parameterized units,
+      # so settled with many units and templating via nix for now.
+      # https://github.com/NixOS/nixpkgs/pull/32496#discussion_r156527544
+      systemd.timers = let
+        scrubTimer = fs: let
+          fs' = utils.escapeSystemdPath fs;
+        in nameValuePair "btrfs-scrub-${fs'}" {
+          description = "regular btrfs scrub timer on ${fs}";
+
+          wantedBy = [ "timers.target" ];
+          timerConfig = {
+            OnCalendar = cfgScrub.interval;
+            AccuracySec = "1d";
+            Persistent = true;
+          };
+        };
+      in listToAttrs (map scrubTimer cfgScrub.fileSystems);
+
+      systemd.services = let
+        scrubService = fs: let
+          fs' = utils.escapeSystemdPath fs;
+        in nameValuePair "btrfs-scrub-${fs'}" {
+          description = "btrfs scrub on ${fs}";
+          # scrub prevents suspend2ram or proper shutdown
+          conflicts = [ "shutdown.target" "sleep.target" ];
+          before = [ "shutdown.target" "sleep.target" ];
+
+          serviceConfig = {
+            # simple and not oneshot, otherwise ExecStop is not used
+            Type = "simple";
+            Nice = 19;
+            IOSchedulingClass = "idle";
+            ExecStart = "${pkgs.btrfs-progs}/bin/btrfs scrub start -B ${fs}";
+            # if the service is stopped before scrub end, cancel it
+            ExecStop  = pkgs.writeShellScript "btrfs-scrub-maybe-cancel" ''
+              (${pkgs.btrfs-progs}/bin/btrfs scrub status ${fs} | ${pkgs.gnugrep}/bin/grep finished) || ${pkgs.btrfs-progs}/bin/btrfs scrub cancel ${fs}
+            '';
+          };
+        };
+      in listToAttrs (map scrubService cfgScrub.fileSystems);
+    })
+  ];
+}
diff --git a/nixos/modules/tasks/filesystems/cifs.nix b/nixos/modules/tasks/filesystems/cifs.nix
new file mode 100644
index 00000000000..47ba0c03c56
--- /dev/null
+++ b/nixos/modules/tasks/filesystems/cifs.nix
@@ -0,0 +1,25 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  inInitrd = any (fs: fs == "cifs") config.boot.initrd.supportedFilesystems;
+
+in
+
+{
+  config = {
+
+    system.fsPackages = mkIf (any (fs: fs == "cifs") config.boot.supportedFilesystems) [ pkgs.cifs-utils ];
+
+    boot.initrd.availableKernelModules = mkIf inInitrd
+      [ "cifs" "nls_utf8" "hmac" "md4" "ecb" "des_generic" "sha256" ];
+
+    boot.initrd.extraUtilsCommands = mkIf inInitrd
+      ''
+        copy_bin_and_libs ${pkgs.cifs-utils}/sbin/mount.cifs
+      '';
+
+  };
+}
diff --git a/nixos/modules/tasks/filesystems/ecryptfs.nix b/nixos/modules/tasks/filesystems/ecryptfs.nix
new file mode 100644
index 00000000000..8138e659161
--- /dev/null
+++ b/nixos/modules/tasks/filesystems/ecryptfs.nix
@@ -0,0 +1,24 @@
+{ config, lib, pkgs, ... }:
+# TODO: make ecryptfs work in initramfs?
+
+with lib;
+
+{
+  config = mkIf (any (fs: fs == "ecryptfs") config.boot.supportedFilesystems) {
+    system.fsPackages = [ pkgs.ecryptfs ];
+    security.wrappers = {
+      "mount.ecryptfs_private" =
+        { setuid = true;
+          owner = "root";
+          group = "root";
+          source = "${pkgs.ecryptfs.out}/bin/mount.ecryptfs_private";
+        };
+      "umount.ecryptfs_private" =
+        { setuid = true;
+          owner = "root";
+          group = "root";
+          source = "${pkgs.ecryptfs.out}/bin/umount.ecryptfs_private";
+        };
+    };
+  };
+}
diff --git a/nixos/modules/tasks/filesystems/exfat.nix b/nixos/modules/tasks/filesystems/exfat.nix
new file mode 100644
index 00000000000..540b9b91c3e
--- /dev/null
+++ b/nixos/modules/tasks/filesystems/exfat.nix
@@ -0,0 +1,13 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+{
+  config = mkIf (any (fs: fs == "exfat") config.boot.supportedFilesystems) {
+    system.fsPackages = if config.boot.kernelPackages.kernelOlder "5.7" then [
+      pkgs.exfat # FUSE
+    ] else [
+      pkgs.exfatprogs # non-FUSE
+    ];
+  };
+}
diff --git a/nixos/modules/tasks/filesystems/ext.nix b/nixos/modules/tasks/filesystems/ext.nix
new file mode 100644
index 00000000000..a14a3ac3854
--- /dev/null
+++ b/nixos/modules/tasks/filesystems/ext.nix
@@ -0,0 +1,22 @@
+{ pkgs, ... }:
+
+{
+  config = {
+
+    system.fsPackages = [ pkgs.e2fsprogs ];
+
+    # As of kernel 4.3, there is no separate ext3 driver (they're also handled by ext4.ko)
+    boot.initrd.availableKernelModules = [ "ext2" "ext4" ];
+
+    boot.initrd.extraUtilsCommands =
+      ''
+        # Copy e2fsck and friends.
+        copy_bin_and_libs ${pkgs.e2fsprogs}/sbin/e2fsck
+        copy_bin_and_libs ${pkgs.e2fsprogs}/sbin/tune2fs
+        ln -sv e2fsck $out/bin/fsck.ext2
+        ln -sv e2fsck $out/bin/fsck.ext3
+        ln -sv e2fsck $out/bin/fsck.ext4
+      '';
+
+  };
+}
diff --git a/nixos/modules/tasks/filesystems/f2fs.nix b/nixos/modules/tasks/filesystems/f2fs.nix
new file mode 100644
index 00000000000..a305235979a
--- /dev/null
+++ b/nixos/modules/tasks/filesystems/f2fs.nix
@@ -0,0 +1,25 @@
+{ config, pkgs, lib, ... }:
+
+with lib;
+
+let
+  inInitrd = any (fs: fs == "f2fs") config.boot.initrd.supportedFilesystems;
+  fileSystems = filter (x: x.fsType == "f2fs") config.system.build.fileSystems;
+in
+{
+  config = mkIf (any (fs: fs == "f2fs") config.boot.supportedFilesystems) {
+
+    system.fsPackages = [ pkgs.f2fs-tools ];
+
+    boot.initrd.availableKernelModules = mkIf inInitrd [ "f2fs" "crc32" ];
+
+    boot.initrd.extraUtilsCommands = mkIf inInitrd ''
+      copy_bin_and_libs ${pkgs.f2fs-tools}/sbin/fsck.f2fs
+      ${optionalString (any (fs: fs.autoResize) fileSystems) ''
+        # We need f2fs-tools' tools to resize filesystems
+        copy_bin_and_libs ${pkgs.f2fs-tools}/sbin/resize.f2fs
+      ''}
+
+    '';
+  };
+}
diff --git a/nixos/modules/tasks/filesystems/glusterfs.nix b/nixos/modules/tasks/filesystems/glusterfs.nix
new file mode 100644
index 00000000000..e8c7fa8efba
--- /dev/null
+++ b/nixos/modules/tasks/filesystems/glusterfs.nix
@@ -0,0 +1,11 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+{
+  config = mkIf (any (fs: fs == "glusterfs") config.boot.supportedFilesystems) {
+
+    system.fsPackages = [ pkgs.glusterfs ];
+
+  };
+}
diff --git a/nixos/modules/tasks/filesystems/jfs.nix b/nixos/modules/tasks/filesystems/jfs.nix
new file mode 100644
index 00000000000..fc3905c7dc2
--- /dev/null
+++ b/nixos/modules/tasks/filesystems/jfs.nix
@@ -0,0 +1,19 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  inInitrd = any (fs: fs == "jfs") config.boot.initrd.supportedFilesystems;
+in
+{
+  config = mkIf (any (fs: fs == "jfs") config.boot.supportedFilesystems) {
+
+    system.fsPackages = [ pkgs.jfsutils ];
+
+    boot.initrd.kernelModules = mkIf inInitrd [ "jfs" ];
+
+    boot.initrd.extraUtilsCommands = mkIf inInitrd ''
+      copy_bin_and_libs ${pkgs.jfsutils}/sbin/fsck.jfs
+    '';
+  };
+}
diff --git a/nixos/modules/tasks/filesystems/nfs.nix b/nixos/modules/tasks/filesystems/nfs.nix
new file mode 100644
index 00000000000..38c3920a78a
--- /dev/null
+++ b/nixos/modules/tasks/filesystems/nfs.nix
@@ -0,0 +1,135 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  inInitrd = any (fs: fs == "nfs") config.boot.initrd.supportedFilesystems;
+
+  nfsStateDir = "/var/lib/nfs";
+
+  rpcMountpoint = "${nfsStateDir}/rpc_pipefs";
+
+  format = pkgs.formats.ini {};
+
+  idmapdConfFile = format.generate "idmapd.conf" cfg.idmapd.settings;
+  nfsConfFile = pkgs.writeText "nfs.conf" cfg.extraConfig;
+  requestKeyConfFile = pkgs.writeText "request-key.conf" ''
+    create id_resolver * * ${pkgs.nfs-utils}/bin/nfsidmap -t 600 %k %d
+  '';
+
+  cfg = config.services.nfs;
+
+in
+
+{
+  ###### interface
+
+  options = {
+    services.nfs = {
+      idmapd.settings = mkOption {
+        type = format.type;
+        default = {};
+        description = ''
+          libnfsidmap configuration. Refer to
+          <link xlink:href="https://linux.die.net/man/5/idmapd.conf"/>
+          for details.
+        '';
+        example = literalExpression ''
+          {
+            Translation = {
+              GSS-Methods = "static,nsswitch";
+            };
+            Static = {
+              "root/hostname.domain.com@REALM.COM" = "root";
+            };
+          }
+        '';
+      };
+      extraConfig = mkOption {
+        type = types.lines;
+        default = "";
+        description = ''
+          Extra nfs-utils configuration.
+        '';
+      };
+    };
+  };
+
+  ###### implementation
+
+  config = mkIf (any (fs: fs == "nfs" || fs == "nfs4") config.boot.supportedFilesystems) {
+
+    services.rpcbind.enable = true;
+
+    services.nfs.idmapd.settings = {
+      General = mkMerge [
+        { Pipefs-Directory = rpcMountpoint; }
+        (mkIf (config.networking.domain != null) { Domain = config.networking.domain; })
+      ];
+      Mapping = {
+        Nobody-User = "nobody";
+        Nobody-Group = "nogroup";
+      };
+      Translation = {
+        Method = "nsswitch";
+      };
+    };
+
+    system.fsPackages = [ pkgs.nfs-utils ];
+
+    boot.initrd.kernelModules = mkIf inInitrd [ "nfs" ];
+
+    systemd.packages = [ pkgs.nfs-utils ];
+
+    environment.systemPackages = [ pkgs.keyutils ];
+
+    environment.etc = {
+      "idmapd.conf".source = idmapdConfFile;
+      "nfs.conf".source = nfsConfFile;
+      "request-key.conf".source = requestKeyConfFile;
+    };
+
+    systemd.services.nfs-blkmap =
+      { restartTriggers = [ nfsConfFile ];
+      };
+
+    systemd.targets.nfs-client =
+      { wantedBy = [ "multi-user.target" "remote-fs.target" ];
+      };
+
+    systemd.services.nfs-idmapd =
+      { restartTriggers = [ idmapdConfFile ];
+      };
+
+    systemd.services.nfs-mountd =
+      { restartTriggers = [ nfsConfFile ];
+        enable = mkDefault false;
+      };
+
+    systemd.services.nfs-server =
+      { restartTriggers = [ nfsConfFile ];
+        enable = mkDefault false;
+      };
+
+    systemd.services.auth-rpcgss-module =
+      {
+        unitConfig.ConditionPathExists = [ "" "/etc/krb5.keytab" ];
+      };
+
+    systemd.services.rpc-gssd =
+      { restartTriggers = [ nfsConfFile ];
+        unitConfig.ConditionPathExists = [ "" "/etc/krb5.keytab" ];
+      };
+
+    systemd.services.rpc-statd =
+      { restartTriggers = [ nfsConfFile ];
+
+        preStart =
+          ''
+            mkdir -p /var/lib/nfs/{sm,sm.bak}
+          '';
+      };
+
+  };
+}
diff --git a/nixos/modules/tasks/filesystems/ntfs.nix b/nixos/modules/tasks/filesystems/ntfs.nix
new file mode 100644
index 00000000000..c40d2a1a80b
--- /dev/null
+++ b/nixos/modules/tasks/filesystems/ntfs.nix
@@ -0,0 +1,11 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+{
+  config = mkIf (any (fs: fs == "ntfs" || fs == "ntfs-3g") config.boot.supportedFilesystems) {
+
+    system.fsPackages = [ pkgs.ntfs3g ];
+
+  };
+}
diff --git a/nixos/modules/tasks/filesystems/reiserfs.nix b/nixos/modules/tasks/filesystems/reiserfs.nix
new file mode 100644
index 00000000000..ab4c43e2ab8
--- /dev/null
+++ b/nixos/modules/tasks/filesystems/reiserfs.nix
@@ -0,0 +1,25 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  inInitrd = any (fs: fs == "reiserfs") config.boot.initrd.supportedFilesystems;
+
+in
+
+{
+  config = mkIf (any (fs: fs == "reiserfs") config.boot.supportedFilesystems) {
+
+    system.fsPackages = [ pkgs.reiserfsprogs ];
+
+    boot.initrd.kernelModules = mkIf inInitrd [ "reiserfs" ];
+
+    boot.initrd.extraUtilsCommands = mkIf inInitrd
+      ''
+        copy_bin_and_libs ${pkgs.reiserfsprogs}/sbin/reiserfsck
+        ln -s reiserfsck $out/bin/fsck.reiserfs
+      '';
+
+  };
+}
diff --git a/nixos/modules/tasks/filesystems/unionfs-fuse.nix b/nixos/modules/tasks/filesystems/unionfs-fuse.nix
new file mode 100644
index 00000000000..f54f3559c34
--- /dev/null
+++ b/nixos/modules/tasks/filesystems/unionfs-fuse.nix
@@ -0,0 +1,32 @@
+{ config, pkgs, lib, ... }:
+
+{
+  config = lib.mkMerge [
+
+    (lib.mkIf (lib.any (fs: fs == "unionfs-fuse") config.boot.initrd.supportedFilesystems) {
+      boot.initrd.kernelModules = [ "fuse" ];
+
+      boot.initrd.extraUtilsCommands = ''
+        copy_bin_and_libs ${pkgs.fuse}/sbin/mount.fuse
+        copy_bin_and_libs ${pkgs.unionfs-fuse}/bin/unionfs
+        substitute ${pkgs.unionfs-fuse}/sbin/mount.unionfs-fuse $out/bin/mount.unionfs-fuse \
+          --replace '${pkgs.bash}/bin/bash' /bin/sh \
+          --replace '${pkgs.fuse}/sbin' /bin \
+          --replace '${pkgs.unionfs-fuse}/bin' /bin
+        chmod +x $out/bin/mount.unionfs-fuse
+      '';
+
+      boot.initrd.postDeviceCommands = ''
+          # Hacky!!! fuse hard-codes the path to mount
+          mkdir -p /nix/store/eeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee-${pkgs.util-linux.name}-bin/bin
+          ln -s $(which mount) /nix/store/eeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee-${pkgs.util-linux.name}-bin/bin
+          ln -s $(which umount) /nix/store/eeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee-${pkgs.util-linux.name}-bin/bin
+        '';
+    })
+
+    (lib.mkIf (lib.any (fs: fs == "unionfs-fuse") config.boot.supportedFilesystems) {
+      system.fsPackages = [ pkgs.unionfs-fuse ];
+    })
+
+  ];
+}
diff --git a/nixos/modules/tasks/filesystems/vboxsf.nix b/nixos/modules/tasks/filesystems/vboxsf.nix
new file mode 100644
index 00000000000..5497194f6a8
--- /dev/null
+++ b/nixos/modules/tasks/filesystems/vboxsf.nix
@@ -0,0 +1,23 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  inInitrd = any (fs: fs == "vboxsf") config.boot.initrd.supportedFilesystems;
+
+  package = pkgs.runCommand "mount.vboxsf" { preferLocalBuild = true; } ''
+    mkdir -p $out/bin
+    cp ${pkgs.linuxPackages.virtualboxGuestAdditions}/bin/mount.vboxsf $out/bin
+  '';
+in
+
+{
+  config = mkIf (any (fs: fs == "vboxsf") config.boot.supportedFilesystems) {
+
+    system.fsPackages = [ package ];
+
+    boot.initrd.kernelModules = mkIf inInitrd [ "vboxsf" ];
+
+  };
+}
diff --git a/nixos/modules/tasks/filesystems/vfat.nix b/nixos/modules/tasks/filesystems/vfat.nix
new file mode 100644
index 00000000000..958e27ae8a3
--- /dev/null
+++ b/nixos/modules/tasks/filesystems/vfat.nix
@@ -0,0 +1,25 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  inInitrd = any (fs: fs == "vfat") config.boot.initrd.supportedFilesystems;
+
+in
+
+{
+  config = mkIf (any (fs: fs == "vfat") config.boot.supportedFilesystems) {
+
+    system.fsPackages = [ pkgs.dosfstools ];
+
+    boot.initrd.kernelModules = mkIf inInitrd [ "vfat" "nls_cp437" "nls_iso8859-1" ];
+
+    boot.initrd.extraUtilsCommands = mkIf inInitrd
+      ''
+        copy_bin_and_libs ${pkgs.dosfstools}/sbin/dosfsck
+        ln -sv dosfsck $out/bin/fsck.vfat
+      '';
+
+  };
+}
diff --git a/nixos/modules/tasks/filesystems/xfs.nix b/nixos/modules/tasks/filesystems/xfs.nix
new file mode 100644
index 00000000000..98038701ca5
--- /dev/null
+++ b/nixos/modules/tasks/filesystems/xfs.nix
@@ -0,0 +1,30 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  inInitrd = any (fs: fs == "xfs") config.boot.initrd.supportedFilesystems;
+
+in
+
+{
+  config = mkIf (any (fs: fs == "xfs") config.boot.supportedFilesystems) {
+
+    system.fsPackages = [ pkgs.xfsprogs.bin ];
+
+    boot.initrd.availableKernelModules = mkIf inInitrd [ "xfs" "crc32c" ];
+
+    boot.initrd.extraUtilsCommands = mkIf inInitrd
+      ''
+        copy_bin_and_libs ${pkgs.xfsprogs.bin}/bin/fsck.xfs
+        copy_bin_and_libs ${pkgs.xfsprogs.bin}/bin/xfs_repair
+      '';
+
+    # Trick just to set 'sh' after the extraUtils nuke-refs.
+    boot.initrd.extraUtilsCommandsTest = mkIf inInitrd
+      ''
+        sed -i -e 's,^#!.*,#!'$out/bin/sh, $out/bin/fsck.xfs
+      '';
+  };
+}
diff --git a/nixos/modules/tasks/filesystems/zfs.nix b/nixos/modules/tasks/filesystems/zfs.nix
new file mode 100644
index 00000000000..3bc0dedec00
--- /dev/null
+++ b/nixos/modules/tasks/filesystems/zfs.nix
@@ -0,0 +1,795 @@
+{ config, lib, options, pkgs, utils, ... }:
+#
+# TODO: zfs tunables
+
+with utils;
+with lib;
+
+let
+
+  cfgZfs = config.boot.zfs;
+  optZfs = options.boot.zfs;
+  cfgExpandOnBoot = config.services.zfs.expandOnBoot;
+  cfgSnapshots = config.services.zfs.autoSnapshot;
+  cfgSnapFlags = cfgSnapshots.flags;
+  cfgScrub = config.services.zfs.autoScrub;
+  cfgTrim = config.services.zfs.trim;
+  cfgZED = config.services.zfs.zed;
+
+  inInitrd = any (fs: fs == "zfs") config.boot.initrd.supportedFilesystems;
+  inSystem = any (fs: fs == "zfs") config.boot.supportedFilesystems;
+
+  autosnapPkg = pkgs.zfstools.override {
+    zfs = cfgZfs.package;
+  };
+
+  zfsAutoSnap = "${autosnapPkg}/bin/zfs-auto-snapshot";
+
+  datasetToPool = x: elemAt (splitString "/" x) 0;
+
+  fsToPool = fs: datasetToPool fs.device;
+
+  zfsFilesystems = filter (x: x.fsType == "zfs") config.system.build.fileSystems;
+
+  allPools = unique ((map fsToPool zfsFilesystems) ++ cfgZfs.extraPools);
+
+  rootPools = unique (map fsToPool (filter fsNeededForBoot zfsFilesystems));
+
+  dataPools = unique (filter (pool: !(elem pool rootPools)) allPools);
+
+  snapshotNames = [ "frequent" "hourly" "daily" "weekly" "monthly" ];
+
+  # When importing ZFS pools, there's one difficulty: These scripts may run
+  # before the backing devices (physical HDDs, etc.) of the pool have been
+  # scanned and initialized.
+  #
+  # An attempted import with all devices missing will just fail, and can be
+  # retried, but an import where e.g. two out of three disks in a three-way
+  # mirror are missing, will succeed. This is a problem: When the missing disks
+  # are later discovered, they won't be automatically set online, rendering the
+  # pool redundancy-less (and far slower) until such time as the system reboots.
+  #
+  # The solution is the below. poolReady checks the status of an un-imported
+  # pool, to see if *every* device is available -- in which case the pool will be
+  # in state ONLINE, as opposed to DEGRADED, FAULTED or MISSING.
+  #
+  # The import scripts then loop over this, waiting until the pool is ready or a
+  # sufficient amount of time has passed that we can assume it won't be. In the
+  # latter case it makes one last attempt at importing, allowing the system to
+  # (eventually) boot even with a degraded pool.
+  importLib = {zpoolCmd, awkCmd, cfgZfs}: ''
+    poolReady() {
+      pool="$1"
+      state="$("${zpoolCmd}" import 2>/dev/null | "${awkCmd}" "/pool: $pool/ { found = 1 }; /state:/ { if (found == 1) { print \$2; exit } }; END { if (found == 0) { print \"MISSING\" } }")"
+      if [[ "$state" = "ONLINE" ]]; then
+        return 0
+      else
+        echo "Pool $pool in state $state, waiting"
+        return 1
+      fi
+    }
+    poolImported() {
+      pool="$1"
+      "${zpoolCmd}" list "$pool" >/dev/null 2>/dev/null
+    }
+    poolImport() {
+      pool="$1"
+      "${zpoolCmd}" import -d "${cfgZfs.devNodes}" -N $ZFS_FORCE "$pool"
+    }
+  '';
+
+  zedConf = generators.toKeyValue {
+    mkKeyValue = generators.mkKeyValueDefault {
+      mkValueString = v:
+        if isInt           v then toString v
+        else if isString   v then "\"${v}\""
+        else if true  ==   v then "1"
+        else if false ==   v then "0"
+        else if isList     v then "\"" + (concatStringsSep " " v) + "\""
+        else err "this value is" (toString v);
+    } "=";
+  } cfgZED.settings;
+in
+
+{
+
+  imports = [
+    (mkRemovedOptionModule [ "boot" "zfs" "enableLegacyCrypto" ] "The corresponding package was removed from nixpkgs.")
+  ];
+
+  ###### interface
+
+  options = {
+    boot.zfs = {
+      package = mkOption {
+        readOnly = true;
+        type = types.package;
+        default = if config.boot.zfs.enableUnstable then pkgs.zfsUnstable else pkgs.zfs;
+        defaultText = literalExpression "if config.boot.zfs.enableUnstable then pkgs.zfsUnstable else pkgs.zfs";
+        description = "Configured ZFS userland tools package.";
+      };
+
+      enabled = mkOption {
+        readOnly = true;
+        type = types.bool;
+        default = inInitrd || inSystem;
+        defaultText = literalDocBook "<literal>true</literal> if ZFS filesystem support is enabled";
+        description = "True if ZFS filesystem support is enabled";
+      };
+
+      enableUnstable = mkOption {
+        type = types.bool;
+        default = false;
+        description = ''
+          Use the unstable zfs package. This might be an option, if the latest
+          kernel is not yet supported by a published release of ZFS. Enabling
+          this option will install a development version of ZFS on Linux. The
+          version will have already passed an extensive test suite, but it is
+          more likely to hit an undiscovered bug compared to running a released
+          version of ZFS on Linux.
+          '';
+      };
+
+      extraPools = mkOption {
+        type = types.listOf types.str;
+        default = [];
+        example = [ "tank" "data" ];
+        description = ''
+          Name or GUID of extra ZFS pools that you wish to import during boot.
+
+          Usually this is not necessary. Instead, you should set the mountpoint property
+          of ZFS filesystems to <literal>legacy</literal> and add the ZFS filesystems to
+          NixOS's <option>fileSystems</option> option, which makes NixOS automatically
+          import the associated pool.
+
+          However, in some cases (e.g. if you have many filesystems) it may be preferable
+          to exclusively use ZFS commands to manage filesystems. If so, since NixOS/systemd
+          will not be managing those filesystems, you will need to specify the ZFS pool here
+          so that NixOS automatically imports it on every boot.
+        '';
+      };
+
+      devNodes = mkOption {
+        type = types.path;
+        default = "/dev/disk/by-id";
+        description = ''
+          Name of directory from which to import ZFS devices.
+
+          This should be a path under /dev containing stable names for all devices needed, as
+          import may fail if device nodes are renamed concurrently with a device failing.
+        '';
+      };
+
+      forceImportRoot = mkOption {
+        type = types.bool;
+        default = true;
+        description = ''
+          Forcibly import the ZFS root pool(s) during early boot.
+
+          This is enabled by default for backwards compatibility purposes, but it is highly
+          recommended to disable this option, as it bypasses some of the safeguards ZFS uses
+          to protect your ZFS pools.
+
+          If you set this option to <literal>false</literal> and NixOS subsequently fails to
+          boot because it cannot import the root pool, you should boot with the
+          <literal>zfs_force=1</literal> option as a kernel parameter (e.g. by manually
+          editing the kernel params in grub during boot). You should only need to do this
+          once.
+        '';
+      };
+
+      forceImportAll = mkOption {
+        type = types.bool;
+        default = false;
+        description = ''
+          Forcibly import all ZFS pool(s).
+
+          If you set this option to <literal>false</literal> and NixOS subsequently fails to
+          import your non-root ZFS pool(s), you should manually import each pool with
+          "zpool import -f &lt;pool-name&gt;", and then reboot. You should only need to do
+          this once.
+        '';
+      };
+
+      requestEncryptionCredentials = mkOption {
+        type = types.either types.bool (types.listOf types.str);
+        default = true;
+        example = [ "tank" "data" ];
+        description = ''
+          If true on import encryption keys or passwords for all encrypted datasets
+          are requested. To only decrypt selected datasets supply a list of dataset
+          names instead. For root pools the encryption key can be supplied via both
+          an interactive prompt (keylocation=prompt) and from a file (keylocation=file://).
+        '';
+      };
+    };
+
+    services.zfs.autoSnapshot = {
+      enable = mkOption {
+        default = false;
+        type = types.bool;
+        description = ''
+          Enable the (OpenSolaris-compatible) ZFS auto-snapshotting service.
+          Note that you must set the <literal>com.sun:auto-snapshot</literal>
+          property to <literal>true</literal> on all datasets which you wish
+          to auto-snapshot.
+
+          You can override a child dataset to use, or not use auto-snapshotting
+          by setting its flag with the given interval:
+          <literal>zfs set com.sun:auto-snapshot:weekly=false DATASET</literal>
+        '';
+      };
+
+      flags = mkOption {
+        default = "-k -p";
+        example = "-k -p --utc";
+        type = types.str;
+        description = ''
+          Flags to pass to the zfs-auto-snapshot command.
+
+          Run <literal>zfs-auto-snapshot</literal> (without any arguments) to
+          see available flags.
+
+          If it's not too inconvenient for snapshots to have timestamps in UTC,
+          it is suggested that you append <literal>--utc</literal> to the list
+          of default options (see example).
+
+          Otherwise, snapshot names can cause name conflicts or apparent time
+          reversals due to daylight savings, timezone or other date/time changes.
+        '';
+      };
+
+      frequent = mkOption {
+        default = 4;
+        type = types.int;
+        description = ''
+          Number of frequent (15-minute) auto-snapshots that you wish to keep.
+        '';
+      };
+
+      hourly = mkOption {
+        default = 24;
+        type = types.int;
+        description = ''
+          Number of hourly auto-snapshots that you wish to keep.
+        '';
+      };
+
+      daily = mkOption {
+        default = 7;
+        type = types.int;
+        description = ''
+          Number of daily auto-snapshots that you wish to keep.
+        '';
+      };
+
+      weekly = mkOption {
+        default = 4;
+        type = types.int;
+        description = ''
+          Number of weekly auto-snapshots that you wish to keep.
+        '';
+      };
+
+      monthly = mkOption {
+        default = 12;
+        type = types.int;
+        description = ''
+          Number of monthly auto-snapshots that you wish to keep.
+        '';
+      };
+    };
+
+    services.zfs.trim = {
+      enable = mkOption {
+        description = "Whether to enable periodic TRIM on all ZFS pools.";
+        default = true;
+        example = false;
+        type = types.bool;
+      };
+
+      interval = mkOption {
+        default = "weekly";
+        type = types.str;
+        example = "daily";
+        description = ''
+          How often we run trim. For most desktop and server systems
+          a sufficient trimming frequency is once a week.
+
+          The format is described in
+          <citerefentry><refentrytitle>systemd.time</refentrytitle>
+          <manvolnum>7</manvolnum></citerefentry>.
+        '';
+      };
+    };
+
+    services.zfs.autoScrub = {
+      enable = mkEnableOption "periodic scrubbing of ZFS pools";
+
+      interval = mkOption {
+        default = "Sun, 02:00";
+        type = types.str;
+        example = "daily";
+        description = ''
+          Systemd calendar expression when to scrub ZFS pools. See
+          <citerefentry><refentrytitle>systemd.time</refentrytitle>
+          <manvolnum>7</manvolnum></citerefentry>.
+        '';
+      };
+
+      pools = mkOption {
+        default = [];
+        type = types.listOf types.str;
+        example = [ "tank" ];
+        description = ''
+          List of ZFS pools to periodically scrub. If empty, all pools
+          will be scrubbed.
+        '';
+      };
+    };
+
+    services.zfs.expandOnBoot = mkOption {
+      type = types.either (types.enum [ "disabled" "all" ]) (types.listOf types.str);
+      default = "disabled";
+      example = [ "tank" "dozer" ];
+      description = ''
+        After importing, expand each device in the specified pools.
+
+        Set the value to the plain string "all" to expand all pools on boot:
+
+            services.zfs.expandOnBoot = "all";
+
+        or set the value to a list of pools to expand the disks of specific pools:
+
+            services.zfs.expandOnBoot = [ "tank" "dozer" ];
+      '';
+    };
+
+    services.zfs.zed = {
+      enableMail = mkEnableOption "ZED's ability to send emails" // {
+        default = cfgZfs.package.enableMail;
+        defaultText = literalExpression "config.${optZfs.package}.enableMail";
+      };
+
+      settings = mkOption {
+        type = with types; attrsOf (oneOf [ str int bool (listOf str) ]);
+        example = literalExpression ''
+          {
+            ZED_DEBUG_LOG = "/tmp/zed.debug.log";
+
+            ZED_EMAIL_ADDR = [ "root" ];
+            ZED_EMAIL_PROG = "mail";
+            ZED_EMAIL_OPTS = "-s '@SUBJECT@' @ADDRESS@";
+
+            ZED_NOTIFY_INTERVAL_SECS = 3600;
+            ZED_NOTIFY_VERBOSE = false;
+
+            ZED_USE_ENCLOSURE_LEDS = true;
+            ZED_SCRUB_AFTER_RESILVER = false;
+          }
+        '';
+        description = ''
+          ZFS Event Daemon /etc/zfs/zed.d/zed.rc content
+
+          See
+          <citerefentry><refentrytitle>zed</refentrytitle><manvolnum>8</manvolnum></citerefentry>
+          for details on ZED and the scripts in /etc/zfs/zed.d to find the possible variables
+        '';
+      };
+    };
+  };
+
+  ###### implementation
+
+  config = mkMerge [
+    (mkIf cfgZfs.enabled {
+      assertions = [
+        {
+          assertion = cfgZED.enableMail -> cfgZfs.package.enableMail;
+          message = ''
+            To allow ZED to send emails, ZFS needs to be configured to enable
+            this. To do so, one must override the `zfs` package and set
+            `enableMail` to true.
+          '';
+        }
+        {
+          assertion = config.networking.hostId != null;
+          message = "ZFS requires networking.hostId to be set";
+        }
+        {
+          assertion = !cfgZfs.forceImportAll || cfgZfs.forceImportRoot;
+          message = "If you enable boot.zfs.forceImportAll, you must also enable boot.zfs.forceImportRoot";
+        }
+      ];
+
+      boot = {
+        kernelModules = [ "zfs" ];
+
+        extraModulePackages = [
+          (if config.boot.zfs.enableUnstable then
+            config.boot.kernelPackages.zfsUnstable
+           else
+            config.boot.kernelPackages.zfs)
+        ];
+      };
+
+      boot.initrd = mkIf inInitrd {
+        kernelModules = [ "zfs" ] ++ optional (!cfgZfs.enableUnstable) "spl";
+        extraUtilsCommands =
+          ''
+            copy_bin_and_libs ${cfgZfs.package}/sbin/zfs
+            copy_bin_and_libs ${cfgZfs.package}/sbin/zdb
+            copy_bin_and_libs ${cfgZfs.package}/sbin/zpool
+          '';
+        extraUtilsCommandsTest = mkIf inInitrd
+          ''
+            $out/bin/zfs --help >/dev/null 2>&1
+            $out/bin/zpool --help >/dev/null 2>&1
+          '';
+        postDeviceCommands = concatStringsSep "\n" ([''
+            ZFS_FORCE="${optionalString cfgZfs.forceImportRoot "-f"}"
+
+            for o in $(cat /proc/cmdline); do
+              case $o in
+                zfs_force|zfs_force=1)
+                  ZFS_FORCE="-f"
+                  ;;
+              esac
+            done
+          ''] ++ [(importLib {
+            # See comments at importLib definition.
+            zpoolCmd = "zpool";
+            awkCmd = "awk";
+            inherit cfgZfs;
+          })] ++ (map (pool: ''
+            echo -n "importing root ZFS pool \"${pool}\"..."
+            # Loop across the import until it succeeds, because the devices needed may not be discovered yet.
+            if ! poolImported "${pool}"; then
+              for trial in `seq 1 60`; do
+                poolReady "${pool}" > /dev/null && msg="$(poolImport "${pool}" 2>&1)" && break
+                sleep 1
+                echo -n .
+              done
+              echo
+              if [[ -n "$msg" ]]; then
+                echo "$msg";
+              fi
+              poolImported "${pool}" || poolImport "${pool}"  # Try one last time, e.g. to import a degraded pool.
+            fi
+            ${if isBool cfgZfs.requestEncryptionCredentials
+              then optionalString cfgZfs.requestEncryptionCredentials ''
+                zfs load-key -a
+              ''
+              else concatMapStrings (fs: ''
+                zfs load-key ${fs}
+              '') cfgZfs.requestEncryptionCredentials}
+        '') rootPools));
+      };
+
+      # TODO FIXME See https://github.com/NixOS/nixpkgs/pull/99386#issuecomment-798813567. To not break people's bootloader and as probably not everybody would read release notes that thoroughly add inSystem.
+      boot.loader.grub = mkIf (inInitrd || inSystem) {
+        zfsSupport = true;
+      };
+
+      services.zfs.zed.settings = {
+        ZED_EMAIL_PROG = mkIf cfgZED.enableMail (mkDefault "${pkgs.mailutils}/bin/mail");
+        PATH = lib.makeBinPath [
+          cfgZfs.package
+          pkgs.coreutils
+          pkgs.curl
+          pkgs.gawk
+          pkgs.gnugrep
+          pkgs.gnused
+          pkgs.nettools
+          pkgs.util-linux
+        ];
+      };
+
+      environment.etc = genAttrs
+        (map
+          (file: "zfs/zed.d/${file}")
+          [
+            "all-syslog.sh"
+            "pool_import-led.sh"
+            "resilver_finish-start-scrub.sh"
+            "statechange-led.sh"
+            "vdev_attach-led.sh"
+            "zed-functions.sh"
+            "data-notify.sh"
+            "resilver_finish-notify.sh"
+            "scrub_finish-notify.sh"
+            "statechange-notify.sh"
+            "vdev_clear-led.sh"
+          ]
+        )
+        (file: { source = "${cfgZfs.package}/etc/${file}"; })
+      // {
+        "zfs/zed.d/zed.rc".text = zedConf;
+        "zfs/zpool.d".source = "${cfgZfs.package}/etc/zfs/zpool.d/";
+      };
+
+      system.fsPackages = [ cfgZfs.package ]; # XXX: needed? zfs doesn't have (need) a fsck
+      environment.systemPackages = [ cfgZfs.package ]
+        ++ optional cfgSnapshots.enable autosnapPkg; # so the user can run the command to see flags
+
+      services.udev.packages = [ cfgZfs.package ]; # to hook zvol naming, etc.
+      systemd.packages = [ cfgZfs.package ];
+
+      systemd.services = let
+        getPoolFilesystems = pool:
+          filter (x: x.fsType == "zfs" && (fsToPool x) == pool) config.system.build.fileSystems;
+
+        getPoolMounts = pool:
+          let
+            mountPoint = fs: escapeSystemdPath fs.mountPoint;
+          in
+            map (x: "${mountPoint x}.mount") (getPoolFilesystems pool);
+
+        createImportService = pool:
+          nameValuePair "zfs-import-${pool}" {
+            description = "Import ZFS pool \"${pool}\"";
+            # we need systemd-udev-settle until https://github.com/zfsonlinux/zfs/pull/4943 is merged
+            requires = [ "systemd-udev-settle.service" ];
+            after = [
+              "systemd-udev-settle.service"
+              "systemd-modules-load.service"
+              "systemd-ask-password-console.service"
+            ];
+            wantedBy = (getPoolMounts pool) ++ [ "local-fs.target" ];
+            before = (getPoolMounts pool) ++ [ "local-fs.target" ];
+            unitConfig = {
+              DefaultDependencies = "no";
+            };
+            serviceConfig = {
+              Type = "oneshot";
+              RemainAfterExit = true;
+            };
+            environment.ZFS_FORCE = optionalString cfgZfs.forceImportAll "-f";
+            script = (importLib {
+              # See comments at importLib definition.
+              zpoolCmd = "${cfgZfs.package}/sbin/zpool";
+              awkCmd = "${pkgs.gawk}/bin/awk";
+              inherit cfgZfs;
+            }) + ''
+              poolImported "${pool}" && exit
+              echo -n "importing ZFS pool \"${pool}\"..."
+              # Loop across the import until it succeeds, because the devices needed may not be discovered yet.
+              for trial in `seq 1 60`; do
+                poolReady "${pool}" && poolImport "${pool}" && break
+                sleep 1
+              done
+              poolImported "${pool}" || poolImport "${pool}"  # Try one last time, e.g. to import a degraded pool.
+              if poolImported "${pool}"; then
+                ${optionalString (if isBool cfgZfs.requestEncryptionCredentials
+                                  then cfgZfs.requestEncryptionCredentials
+                                  else cfgZfs.requestEncryptionCredentials != []) ''
+                  ${cfgZfs.package}/sbin/zfs list -rHo name,keylocation ${pool} | while IFS=$'\t' read ds kl; do
+                    {
+                      ${optionalString (!isBool cfgZfs.requestEncryptionCredentials) ''
+                         if ! echo '${concatStringsSep "\n" cfgZfs.requestEncryptionCredentials}' | grep -qFx "$ds"; then
+                           continue
+                         fi
+                       ''}
+                    case "$kl" in
+                      none )
+                        ;;
+                      prompt )
+                        ${config.systemd.package}/bin/systemd-ask-password "Enter key for $ds:" | ${cfgZfs.package}/sbin/zfs load-key "$ds"
+                        ;;
+                      * )
+                        ${cfgZfs.package}/sbin/zfs load-key "$ds"
+                        ;;
+                    esac
+                    } < /dev/null # To protect while read ds kl in case anything reads stdin
+                  done
+                ''}
+                echo "Successfully imported ${pool}"
+              else
+                exit 1
+              fi
+            '';
+          };
+
+        # This forces a sync of any ZFS pools prior to poweroff, even if they're set
+        # to sync=disabled.
+        createSyncService = pool:
+          nameValuePair "zfs-sync-${pool}" {
+            description = "Sync ZFS pool \"${pool}\"";
+            wantedBy = [ "shutdown.target" ];
+            unitConfig = {
+              DefaultDependencies = false;
+            };
+            serviceConfig = {
+              Type = "oneshot";
+              RemainAfterExit = true;
+            };
+            script = ''
+              ${cfgZfs.package}/sbin/zfs set nixos:shutdown-time="$(date)" "${pool}"
+            '';
+          };
+
+        createZfsService = serv:
+          nameValuePair serv {
+            after = [ "systemd-modules-load.service" ];
+            wantedBy = [ "zfs.target" ];
+          };
+
+      in listToAttrs (map createImportService dataPools ++
+                      map createSyncService allPools ++
+                      map createZfsService [ "zfs-mount" "zfs-share" "zfs-zed" ]);
+
+      systemd.targets.zfs-import =
+        let
+          services = map (pool: "zfs-import-${pool}.service") dataPools;
+        in
+          {
+            requires = services;
+            after = services;
+            wantedBy = [ "zfs.target" ];
+          };
+
+      systemd.targets.zfs.wantedBy = [ "multi-user.target" ];
+    })
+
+    (mkIf (cfgZfs.enabled && cfgExpandOnBoot != "disabled") {
+      systemd.services."zpool-expand@" = {
+        description = "Expand ZFS pools";
+        after = [ "zfs.target" ];
+
+        serviceConfig = {
+          Type = "oneshot";
+          RemainAfterExit = true;
+        };
+
+        scriptArgs = "%i";
+        path = [ pkgs.gawk cfgZfs.package ];
+
+        # ZFS has no way of enumerating just devices in a pool in a way
+        # that 'zpool online -e' supports. Thus, we've implemented a
+        # bit of a strange approach of highlighting just devices.
+        # See: https://github.com/openzfs/zfs/issues/12505
+        script = let
+          # This UUID has been chosen at random and is to provide a
+          # collision-proof, predictable token to search for
+          magicIdentifier = "NIXOS-ZFS-ZPOOL-DEVICE-IDENTIFIER-37108bec-aff6-4b58-9e5e-53c7c9766f05";
+          zpoolScripts = pkgs.writeShellScriptBin "device-highlighter" ''
+            echo "${magicIdentifier}"
+          '';
+        in ''
+          pool=$1
+
+          echo "Expanding all devices for $pool."
+
+          # Put our device-highlighter script it to the PATH
+          export ZPOOL_SCRIPTS_PATH=${zpoolScripts}/bin
+
+          # Enable running our precisely specified zpool script as root
+          export ZPOOL_SCRIPTS_AS_ROOT=1
+
+          devices() (
+            zpool status -c device-highlighter "$pool" \
+             | awk '($2 == "ONLINE" && $6 == "${magicIdentifier}") { print $1; }'
+          )
+
+          for device in $(devices); do
+            echo "Attempting to expand $device of $pool..."
+            if ! zpool online -e "$pool" "$device"; then
+              echo "Failed to expand '$device' of '$pool'."
+            fi
+          done
+        '';
+      };
+
+      systemd.services."zpool-expand-pools" =
+        let
+          # Create a string, to be interpolated in a bash script
+          # which enumerates all of the pools to expand.
+          # If the `pools` option is `true`, we want to dynamically
+          # expand every pool. Otherwise we want to enumerate
+          # just the specifically provided list of pools.
+          poolListProvider = if cfgExpandOnBoot == "all"
+            then "$(zpool list -H | awk '{print $1}')"
+            else lib.escapeShellArgs cfgExpandOnBoot;
+        in
+        {
+          description = "Expand specified ZFS pools";
+          wantedBy = [ "default.target" ];
+          after = [ "zfs.target" ];
+
+          serviceConfig = {
+            Type = "oneshot";
+            RemainAfterExit = true;
+          };
+
+          path = [ pkgs.gawk cfgZfs.package ];
+
+          script = ''
+            for pool in ${poolListProvider}; do
+              systemctl start --no-block "zpool-expand@$pool"
+            done
+          '';
+        };
+    })
+
+    (mkIf (cfgZfs.enabled && cfgSnapshots.enable) {
+      systemd.services = let
+                           descr = name: if name == "frequent" then "15 mins"
+                                    else if name == "hourly" then "hour"
+                                    else if name == "daily" then "day"
+                                    else if name == "weekly" then "week"
+                                    else if name == "monthly" then "month"
+                                    else throw "unknown snapshot name";
+                           numSnapshots = name: builtins.getAttr name cfgSnapshots;
+                         in builtins.listToAttrs (map (snapName:
+                              {
+                                name = "zfs-snapshot-${snapName}";
+                                value = {
+                                  description = "ZFS auto-snapshotting every ${descr snapName}";
+                                  after = [ "zfs-import.target" ];
+                                  serviceConfig = {
+                                    Type = "oneshot";
+                                    ExecStart = "${zfsAutoSnap} ${cfgSnapFlags} ${snapName} ${toString (numSnapshots snapName)}";
+                                  };
+                                  restartIfChanged = false;
+                                };
+                              }) snapshotNames);
+
+      systemd.timers = let
+                         timer = name: if name == "frequent" then "*:0,15,30,45" else name;
+                       in builtins.listToAttrs (map (snapName:
+                            {
+                              name = "zfs-snapshot-${snapName}";
+                              value = {
+                                wantedBy = [ "timers.target" ];
+                                timerConfig = {
+                                  OnCalendar = timer snapName;
+                                  Persistent = "yes";
+                                };
+                              };
+                            }) snapshotNames);
+    })
+
+    (mkIf (cfgZfs.enabled && cfgScrub.enable) {
+      systemd.services.zfs-scrub = {
+        description = "ZFS pools scrubbing";
+        after = [ "zfs-import.target" ];
+        serviceConfig = {
+          Type = "oneshot";
+        };
+        script = ''
+          ${cfgZfs.package}/bin/zpool scrub ${
+            if cfgScrub.pools != [] then
+              (concatStringsSep " " cfgScrub.pools)
+            else
+              "$(${cfgZfs.package}/bin/zpool list -H -o name)"
+            }
+        '';
+      };
+
+      systemd.timers.zfs-scrub = {
+        wantedBy = [ "timers.target" ];
+        after = [ "multi-user.target" ]; # Apparently scrubbing before boot is complete hangs the system? #53583
+        timerConfig = {
+          OnCalendar = cfgScrub.interval;
+          Persistent = "yes";
+        };
+      };
+    })
+
+    (mkIf (cfgZfs.enabled && cfgTrim.enable) {
+      systemd.services.zpool-trim = {
+        description = "ZFS pools trim";
+        after = [ "zfs-import.target" ];
+        path = [ cfgZfs.package ];
+        startAt = cfgTrim.interval;
+        # By default we ignore errors returned by the trim command, in case:
+        # - HDDs are mixed with SSDs
+        # - There is a SSDs in a pool that is currently trimmed.
+        # - There are only HDDs and we would set the system in a degraded state
+        serviceConfig.ExecStart = "${pkgs.runtimeShell} -c 'for pool in $(zpool list -H -o name); do zpool trim $pool;  done || true' ";
+      };
+
+      systemd.timers.zpool-trim.timerConfig.Persistent = "yes";
+    })
+  ];
+}