summary refs log tree commit diff
path: root/nixos/modules/services/network-filesystems
diff options
context:
space:
mode:
Diffstat (limited to 'nixos/modules/services/network-filesystems')
-rw-r--r--nixos/modules/services/network-filesystems/cachefilesd.nix63
-rw-r--r--nixos/modules/services/network-filesystems/ceph.nix406
-rw-r--r--nixos/modules/services/network-filesystems/davfs2.nix93
-rw-r--r--nixos/modules/services/network-filesystems/diod.nix159
-rw-r--r--nixos/modules/services/network-filesystems/drbd.nix63
-rw-r--r--nixos/modules/services/network-filesystems/glusterfs.nix208
-rw-r--r--nixos/modules/services/network-filesystems/ipfs.nix311
-rw-r--r--nixos/modules/services/network-filesystems/kbfs.nix118
-rw-r--r--nixos/modules/services/network-filesystems/litestream/default.nix100
-rw-r--r--nixos/modules/services/network-filesystems/litestream/litestream.xml65
-rw-r--r--nixos/modules/services/network-filesystems/moosefs.nix249
-rw-r--r--nixos/modules/services/network-filesystems/netatalk.nix97
-rw-r--r--nixos/modules/services/network-filesystems/nfsd.nix175
-rw-r--r--nixos/modules/services/network-filesystems/openafs/client.nix252
-rw-r--r--nixos/modules/services/network-filesystems/openafs/lib.nix33
-rw-r--r--nixos/modules/services/network-filesystems/openafs/server.nix269
-rw-r--r--nixos/modules/services/network-filesystems/orangefs/client.nix96
-rw-r--r--nixos/modules/services/network-filesystems/orangefs/server.nix225
-rw-r--r--nixos/modules/services/network-filesystems/rsyncd.nix128
-rw-r--r--nixos/modules/services/network-filesystems/samba-wsdd.nix124
-rw-r--r--nixos/modules/services/network-filesystems/samba.nix252
-rw-r--r--nixos/modules/services/network-filesystems/tahoe.nix366
-rw-r--r--nixos/modules/services/network-filesystems/u9fs.nix78
-rw-r--r--nixos/modules/services/network-filesystems/webdav-server-rs.nix144
-rw-r--r--nixos/modules/services/network-filesystems/webdav.nix107
-rw-r--r--nixos/modules/services/network-filesystems/xtreemfs.nix495
-rw-r--r--nixos/modules/services/network-filesystems/yandex-disk.nix116
27 files changed, 4792 insertions, 0 deletions
diff --git a/nixos/modules/services/network-filesystems/cachefilesd.nix b/nixos/modules/services/network-filesystems/cachefilesd.nix
new file mode 100644
index 00000000000..229c9665419
--- /dev/null
+++ b/nixos/modules/services/network-filesystems/cachefilesd.nix
@@ -0,0 +1,63 @@
+{ config, pkgs, lib, ... }:
+
+with lib;
+
+let
+
+  cfg = config.services.cachefilesd;
+
+  cfgFile = pkgs.writeText "cachefilesd.conf" ''
+    dir ${cfg.cacheDir}
+    ${cfg.extraConfig}
+  '';
+
+in
+
+{
+  options = {
+    services.cachefilesd = {
+
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = "Whether to enable cachefilesd network filesystems caching daemon.";
+      };
+
+      cacheDir = mkOption {
+        type = types.str;
+        default = "/var/cache/fscache";
+        description = "Directory to contain filesystem cache.";
+      };
+
+      extraConfig = mkOption {
+        type = types.lines;
+        default = "";
+        example = "brun 10%";
+        description = "Additional configuration file entries. See cachefilesd.conf(5) for more information.";
+      };
+
+    };
+  };
+
+  ###### implementation
+
+  config = mkIf cfg.enable {
+
+    boot.kernelModules = [ "cachefiles" ];
+
+    systemd.services.cachefilesd = {
+      description = "Local network file caching management daemon";
+      wantedBy = [ "multi-user.target" ];
+      serviceConfig = {
+        Type = "exec";
+        ExecStart = "${pkgs.cachefilesd}/bin/cachefilesd -n -f ${cfgFile}";
+        Restart = "on-failure";
+        PrivateTmp = true;
+      };
+    };
+
+    systemd.tmpfiles.rules = [
+      "d ${cfg.cacheDir} 0700 root root - -"
+    ];
+  };
+}
diff --git a/nixos/modules/services/network-filesystems/ceph.nix b/nixos/modules/services/network-filesystems/ceph.nix
new file mode 100644
index 00000000000..7a1444decaf
--- /dev/null
+++ b/nixos/modules/services/network-filesystems/ceph.nix
@@ -0,0 +1,406 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg  = config.services.ceph;
+
+  # function that translates "camelCaseOptions" to "camel case options", credits to tilpner in #nixos@freenode
+  expandCamelCase = replaceStrings upperChars (map (s: " ${s}") lowerChars);
+  expandCamelCaseAttrs = mapAttrs' (name: value: nameValuePair (expandCamelCase name) value);
+
+  makeServices = (daemonType: daemonIds:
+    mkMerge (map (daemonId:
+      { "ceph-${daemonType}-${daemonId}" = makeService daemonType daemonId cfg.global.clusterName pkgs.ceph; })
+      daemonIds));
+
+  makeService = (daemonType: daemonId: clusterName: ceph:
+    let
+      stateDirectory = "ceph/${if daemonType == "rgw" then "radosgw" else daemonType}/${clusterName}-${daemonId}"; in {
+    enable = true;
+    description = "Ceph ${builtins.replaceStrings lowerChars upperChars daemonType} daemon ${daemonId}";
+    after = [ "network-online.target" "time-sync.target" ] ++ optional (daemonType == "osd") "ceph-mon.target";
+    wants = [ "network-online.target" "time-sync.target" ];
+    partOf = [ "ceph-${daemonType}.target" ];
+    wantedBy = [ "ceph-${daemonType}.target" ];
+
+    path = [ pkgs.getopt ];
+
+    # Don't start services that are not yet initialized
+    unitConfig.ConditionPathExists = "/var/lib/${stateDirectory}/keyring";
+    startLimitBurst =
+      if daemonType == "osd" then 30 else if lib.elem daemonType ["mgr" "mds"] then 3 else 5;
+    startLimitIntervalSec = 60 * 30;  # 30 mins
+
+    serviceConfig = {
+      LimitNOFILE = 1048576;
+      LimitNPROC = 1048576;
+      Environment = "CLUSTER=${clusterName}";
+      ExecReload = "${pkgs.coreutils}/bin/kill -HUP $MAINPID";
+      PrivateDevices = "yes";
+      PrivateTmp = "true";
+      ProtectHome = "true";
+      ProtectSystem = "full";
+      Restart = "on-failure";
+      StateDirectory = stateDirectory;
+      User = "ceph";
+      Group = if daemonType == "osd" then "disk" else "ceph";
+      ExecStart = ''${ceph.out}/bin/${if daemonType == "rgw" then "radosgw" else "ceph-${daemonType}"} \
+                    -f --cluster ${clusterName} --id ${daemonId}'';
+    } // optionalAttrs (daemonType == "osd") {
+      ExecStartPre = "${ceph.lib}/libexec/ceph/ceph-osd-prestart.sh --id ${daemonId} --cluster ${clusterName}";
+      RestartSec = "20s";
+      PrivateDevices = "no"; # osd needs disk access
+    } // optionalAttrs ( daemonType == "mon") {
+      RestartSec = "10";
+    };
+  });
+
+  makeTarget = (daemonType:
+    {
+      "ceph-${daemonType}" = {
+        description = "Ceph target allowing to start/stop all ceph-${daemonType} services at once";
+        partOf = [ "ceph.target" ];
+        wantedBy = [ "ceph.target" ];
+        before = [ "ceph.target" ];
+        unitConfig.StopWhenUnneeded = true;
+      };
+    }
+  );
+in
+{
+  options.services.ceph = {
+    # Ceph has a monolithic configuration file but different sections for
+    # each daemon, a separate client section and a global section
+    enable = mkEnableOption "Ceph global configuration";
+
+    global = {
+      fsid = mkOption {
+        type = types.str;
+        example = ''
+          433a2193-4f8a-47a0-95d2-209d7ca2cca5
+        '';
+        description = ''
+          Filesystem ID, a generated uuid, its must be generated and set before
+          attempting to start a cluster
+        '';
+      };
+
+      clusterName = mkOption {
+        type = types.str;
+        default = "ceph";
+        description = ''
+          Name of cluster
+        '';
+      };
+
+      mgrModulePath = mkOption {
+        type = types.path;
+        default = "${pkgs.ceph.lib}/lib/ceph/mgr";
+        defaultText = literalExpression ''"''${pkgs.ceph.lib}/lib/ceph/mgr"'';
+        description = ''
+          Path at which to find ceph-mgr modules.
+        '';
+      };
+
+      monInitialMembers = mkOption {
+        type = with types; nullOr commas;
+        default = null;
+        example = ''
+          node0, node1, node2
+        '';
+        description = ''
+          List of hosts that will be used as monitors at startup.
+        '';
+      };
+
+      monHost = mkOption {
+        type = with types; nullOr commas;
+        default = null;
+        example = ''
+          10.10.0.1, 10.10.0.2, 10.10.0.3
+        '';
+        description = ''
+          List of hostname shortnames/IP addresses of the initial monitors.
+        '';
+      };
+
+      maxOpenFiles = mkOption {
+        type = types.int;
+        default = 131072;
+        description = ''
+          Max open files for each OSD daemon.
+        '';
+      };
+
+      authClusterRequired = mkOption {
+        type = types.enum [ "cephx" "none" ];
+        default = "cephx";
+        description = ''
+          Enables requiring daemons to authenticate with eachother in the cluster.
+        '';
+      };
+
+      authServiceRequired = mkOption {
+        type = types.enum [ "cephx" "none" ];
+        default = "cephx";
+        description = ''
+          Enables requiring clients to authenticate with the cluster to access services in the cluster (e.g. radosgw, mds or osd).
+        '';
+      };
+
+      authClientRequired = mkOption {
+        type = types.enum [ "cephx" "none" ];
+        default = "cephx";
+        description = ''
+          Enables requiring the cluster to authenticate itself to the client.
+        '';
+      };
+
+      publicNetwork = mkOption {
+        type = with types; nullOr commas;
+        default = null;
+        example = ''
+          10.20.0.0/24, 192.168.1.0/24
+        '';
+        description = ''
+          A comma-separated list of subnets that will be used as public networks in the cluster.
+        '';
+      };
+
+      clusterNetwork = mkOption {
+        type = with types; nullOr commas;
+        default = null;
+        example = ''
+          10.10.0.0/24, 192.168.0.0/24
+        '';
+        description = ''
+          A comma-separated list of subnets that will be used as cluster networks in the cluster.
+        '';
+      };
+
+      rgwMimeTypesFile = mkOption {
+        type = with types; nullOr path;
+        default = "${pkgs.mailcap}/etc/mime.types";
+        defaultText = literalExpression ''"''${pkgs.mailcap}/etc/mime.types"'';
+        description = ''
+          Path to mime types used by radosgw.
+        '';
+      };
+    };
+
+    extraConfig = mkOption {
+      type = with types; attrsOf str;
+      default = {};
+      example = {
+        "ms bind ipv6" = "true";
+      };
+      description = ''
+        Extra configuration to add to the global section. Use for setting values that are common for all daemons in the cluster.
+      '';
+    };
+
+    mgr = {
+      enable = mkEnableOption "Ceph MGR daemon";
+      daemons = mkOption {
+        type = with types; listOf str;
+        default = [];
+        example = [ "name1" "name2" ];
+        description = ''
+          A list of names for manager daemons that should have a service created. The names correspond
+          to the id part in ceph i.e. [ "name1" ] would result in mgr.name1
+        '';
+      };
+      extraConfig = mkOption {
+        type = with types; attrsOf str;
+        default = {};
+        description = ''
+          Extra configuration to add to the global section for manager daemons.
+        '';
+      };
+    };
+
+    mon = {
+      enable = mkEnableOption "Ceph MON daemon";
+      daemons = mkOption {
+        type = with types; listOf str;
+        default = [];
+        example = [ "name1" "name2" ];
+        description = ''
+          A list of monitor daemons that should have a service created. The names correspond
+          to the id part in ceph i.e. [ "name1" ] would result in mon.name1
+        '';
+      };
+      extraConfig = mkOption {
+        type = with types; attrsOf str;
+        default = {};
+        description = ''
+          Extra configuration to add to the monitor section.
+        '';
+      };
+    };
+
+    osd = {
+      enable = mkEnableOption "Ceph OSD daemon";
+      daemons = mkOption {
+        type = with types; listOf str;
+        default = [];
+        example = [ "name1" "name2" ];
+        description = ''
+          A list of OSD daemons that should have a service created. The names correspond
+          to the id part in ceph i.e. [ "name1" ] would result in osd.name1
+        '';
+      };
+
+      extraConfig = mkOption {
+        type = with types; attrsOf str;
+        default = {
+          "osd journal size" = "10000";
+          "osd pool default size" = "3";
+          "osd pool default min size" = "2";
+          "osd pool default pg num" = "200";
+          "osd pool default pgp num" = "200";
+          "osd crush chooseleaf type" = "1";
+        };
+        description = ''
+          Extra configuration to add to the OSD section.
+        '';
+      };
+    };
+
+    mds = {
+      enable = mkEnableOption "Ceph MDS daemon";
+      daemons = mkOption {
+        type = with types; listOf str;
+        default = [];
+        example = [ "name1" "name2" ];
+        description = ''
+          A list of metadata service daemons that should have a service created. The names correspond
+          to the id part in ceph i.e. [ "name1" ] would result in mds.name1
+        '';
+      };
+      extraConfig = mkOption {
+        type = with types; attrsOf str;
+        default = {};
+        description = ''
+          Extra configuration to add to the MDS section.
+        '';
+      };
+    };
+
+    rgw = {
+      enable = mkEnableOption "Ceph RadosGW daemon";
+      daemons = mkOption {
+        type = with types; listOf str;
+        default = [];
+        example = [ "name1" "name2" ];
+        description = ''
+          A list of rados gateway daemons that should have a service created. The names correspond
+          to the id part in ceph i.e. [ "name1" ] would result in client.name1, radosgw daemons
+          aren't daemons to cluster in the sense that OSD, MGR or MON daemons are. They are simply
+          daemons, from ceph, that uses the cluster as a backend.
+        '';
+      };
+    };
+
+    client = {
+      enable = mkEnableOption "Ceph client configuration";
+      extraConfig = mkOption {
+        type = with types; attrsOf (attrsOf str);
+        default = {};
+        example = literalExpression ''
+          {
+            # This would create a section for a radosgw daemon named node0 and related
+            # configuration for it
+            "client.radosgw.node0" = { "some config option" = "true"; };
+          };
+        '';
+        description = ''
+          Extra configuration to add to the client section. Configuration for rados gateways
+          would be added here, with their own sections, see example.
+        '';
+      };
+    };
+  };
+
+  config = mkIf config.services.ceph.enable {
+    assertions = [
+      { assertion = cfg.global.fsid != "";
+        message = "fsid has to be set to a valid uuid for the cluster to function";
+      }
+      { assertion = cfg.mon.enable == true -> cfg.mon.daemons != [];
+        message = "have to set id of atleast one MON if you're going to enable Monitor";
+      }
+      { assertion = cfg.mds.enable == true -> cfg.mds.daemons != [];
+        message = "have to set id of atleast one MDS if you're going to enable Metadata Service";
+      }
+      { assertion = cfg.osd.enable == true -> cfg.osd.daemons != [];
+        message = "have to set id of atleast one OSD if you're going to enable OSD";
+      }
+      { assertion = cfg.mgr.enable == true -> cfg.mgr.daemons != [];
+        message = "have to set id of atleast one MGR if you're going to enable MGR";
+      }
+    ];
+
+    warnings = optional (cfg.global.monInitialMembers == null)
+      "Not setting up a list of members in monInitialMembers requires that you set the host variable for each mon daemon or else the cluster won't function";
+
+    environment.etc."ceph/ceph.conf".text = let
+      # Merge the extraConfig set for mgr daemons, as mgr don't have their own section
+      globalSection = expandCamelCaseAttrs (cfg.global // cfg.extraConfig // optionalAttrs cfg.mgr.enable cfg.mgr.extraConfig);
+      # Remove all name-value pairs with null values from the attribute set to avoid making empty sections in the ceph.conf
+      globalSection' = filterAttrs (name: value: value != null) globalSection;
+      totalConfig = {
+          global = globalSection';
+        } // optionalAttrs (cfg.mon.enable && cfg.mon.extraConfig != {}) { mon = cfg.mon.extraConfig; }
+          // optionalAttrs (cfg.mds.enable && cfg.mds.extraConfig != {}) { mds = cfg.mds.extraConfig; }
+          // optionalAttrs (cfg.osd.enable && cfg.osd.extraConfig != {}) { osd = cfg.osd.extraConfig; }
+          // optionalAttrs (cfg.client.enable && cfg.client.extraConfig != {})  cfg.client.extraConfig;
+      in
+        generators.toINI {} totalConfig;
+
+    users.users.ceph = {
+      uid = config.ids.uids.ceph;
+      description = "Ceph daemon user";
+      group = "ceph";
+      extraGroups = [ "disk" ];
+    };
+
+    users.groups.ceph = {
+      gid = config.ids.gids.ceph;
+    };
+
+    systemd.services = let
+      services = []
+        ++ optional cfg.mon.enable (makeServices "mon" cfg.mon.daemons)
+        ++ optional cfg.mds.enable (makeServices "mds" cfg.mds.daemons)
+        ++ optional cfg.osd.enable (makeServices "osd" cfg.osd.daemons)
+        ++ optional cfg.rgw.enable (makeServices "rgw" cfg.rgw.daemons)
+        ++ optional cfg.mgr.enable (makeServices "mgr" cfg.mgr.daemons);
+      in
+        mkMerge services;
+
+    systemd.targets = let
+      targets = [
+        { ceph = {
+          description = "Ceph target allowing to start/stop all ceph service instances at once";
+          wantedBy = [ "multi-user.target" ];
+          unitConfig.StopWhenUnneeded = true;
+        }; } ]
+        ++ optional cfg.mon.enable (makeTarget "mon")
+        ++ optional cfg.mds.enable (makeTarget "mds")
+        ++ optional cfg.osd.enable (makeTarget "osd")
+        ++ optional cfg.rgw.enable (makeTarget "rgw")
+        ++ optional cfg.mgr.enable (makeTarget "mgr");
+      in
+        mkMerge targets;
+
+    systemd.tmpfiles.rules = [
+      "d /etc/ceph - ceph ceph - -"
+      "d /run/ceph 0770 ceph ceph -"
+      "d /var/lib/ceph - ceph ceph - -"]
+    ++ optionals cfg.mgr.enable [ "d /var/lib/ceph/mgr - ceph ceph - -"]
+    ++ optionals cfg.mon.enable [ "d /var/lib/ceph/mon - ceph ceph - -"]
+    ++ optionals cfg.osd.enable [ "d /var/lib/ceph/osd - ceph ceph - -"];
+  };
+}
diff --git a/nixos/modules/services/network-filesystems/davfs2.nix b/nixos/modules/services/network-filesystems/davfs2.nix
new file mode 100644
index 00000000000..8cf314fe63a
--- /dev/null
+++ b/nixos/modules/services/network-filesystems/davfs2.nix
@@ -0,0 +1,93 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.davfs2;
+  cfgFile = pkgs.writeText "davfs2.conf" ''
+    dav_user ${cfg.davUser}
+    dav_group ${cfg.davGroup}
+    ${cfg.extraConfig}
+  '';
+in
+{
+  options.services.davfs2 = {
+    enable = mkOption {
+      type = types.bool;
+      default = false;
+      description = ''
+        Whether to enable davfs2.
+      '';
+    };
+
+    davUser = mkOption {
+      type = types.str;
+      default = "davfs2";
+      description = ''
+        When invoked by root the mount.davfs daemon will run as this user.
+        Value must be given as name, not as numerical id.
+      '';
+    };
+
+    davGroup = mkOption {
+      type = types.str;
+      default = "davfs2";
+      description = ''
+        The group of the running mount.davfs daemon. Ordinary users must be
+        member of this group in order to mount a davfs2 file system. Value must
+        be given as name, not as numerical id.
+      '';
+    };
+
+    extraConfig = mkOption {
+      type = types.lines;
+      default = "";
+      example = ''
+        kernel_fs coda
+        proxy foo.bar:8080
+        use_locks 0
+      '';
+      description = ''
+        Extra lines appended to the configuration of davfs2.
+      ''  ;
+    };
+  };
+
+  config = mkIf cfg.enable {
+    environment.systemPackages = [ pkgs.davfs2 ];
+    environment.etc."davfs2/davfs2.conf".source = cfgFile;
+
+    users.groups = optionalAttrs (cfg.davGroup == "davfs2") {
+      davfs2.gid = config.ids.gids.davfs2;
+    };
+
+    users.users = optionalAttrs (cfg.davUser == "davfs2") {
+      davfs2 = {
+        createHome = false;
+        group = cfg.davGroup;
+        uid = config.ids.uids.davfs2;
+        description = "davfs2 user";
+      };
+    };
+
+    security.wrappers."mount.davfs" = {
+      program = "mount.davfs";
+      source = "${pkgs.davfs2}/bin/mount.davfs";
+      owner = "root";
+      group = cfg.davGroup;
+      setuid = true;
+      permissions = "u+rx,g+x";
+    };
+
+    security.wrappers."umount.davfs" = {
+      program = "umount.davfs";
+      source = "${pkgs.davfs2}/bin/umount.davfs";
+      owner = "root";
+      group = cfg.davGroup;
+      setuid = true;
+      permissions = "u+rx,g+x";
+    };
+
+  };
+
+}
diff --git a/nixos/modules/services/network-filesystems/diod.nix b/nixos/modules/services/network-filesystems/diod.nix
new file mode 100644
index 00000000000..063bae6ddb1
--- /dev/null
+++ b/nixos/modules/services/network-filesystems/diod.nix
@@ -0,0 +1,159 @@
+{ config, lib, pkgs, ... }:
+with lib;
+let
+  cfg = config.services.diod;
+
+  diodBool = b: if b then "1" else "0";
+
+  diodConfig = pkgs.writeText "diod.conf" ''
+    allsquash = ${diodBool cfg.allsquash}
+    auth_required = ${diodBool cfg.authRequired}
+    exportall = ${diodBool cfg.exportall}
+    exportopts = "${concatStringsSep "," cfg.exportopts}"
+    exports = { ${concatStringsSep ", " (map (s: ''"${s}"'' ) cfg.exports)} }
+    listen = { ${concatStringsSep ", " (map (s: ''"${s}"'' ) cfg.listen)} }
+    logdest = "${cfg.logdest}"
+    nwthreads = ${toString cfg.nwthreads}
+    squashuser = "${cfg.squashuser}"
+    statfs_passthru = ${diodBool cfg.statfsPassthru}
+    userdb = ${diodBool cfg.userdb}
+    ${cfg.extraConfig}
+  '';
+in
+{
+  options = {
+    services.diod = {
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = "Whether to enable the diod 9P file server.";
+      };
+
+      listen = mkOption {
+        type = types.listOf types.str;
+        default = [ "0.0.0.0:564" ];
+        description = ''
+          [ "IP:PORT" [,"IP:PORT",...] ]
+          List the interfaces and ports that diod should listen on.
+        '';
+      };
+
+      exports = mkOption {
+        type = types.listOf types.str;
+        default = [];
+        description = ''
+          List the file systems that clients will be allowed to mount. All paths should
+          be fully qualified. The exports table can include two types of element:
+          a string element (as above),
+          or an alternate table element form { path="/path", opts="ro" }.
+          In the alternate form, the (optional) opts attribute is a comma-separated list
+          of export options. The two table element forms can be mixed in the exports
+          table. Note that although diod will not traverse file system boundaries for a
+          given mount due to inode uniqueness constraints, subdirectories of a file
+          system can be separately exported.
+        '';
+      };
+
+      exportall = mkOption {
+        type = types.bool;
+        default = true;
+        description = ''
+          Export all file systems listed in /proc/mounts. If new file systems are mounted
+          after diod has started, they will become immediately mountable. If there is a
+          duplicate entry for a file system in the exports list, any options listed in
+          the exports entry will apply.
+        '';
+      };
+
+      exportopts = mkOption {
+        type = types.listOf types.str;
+        default = [];
+        description = ''
+          Establish a default set of export options. These are overridden, not appended
+          to, by opts attributes in an "exports" entry.
+        '';
+      };
+
+      nwthreads = mkOption {
+        type = types.int;
+        default = 16;
+        description = ''
+          Sets the (fixed) number of worker threads created to handle 9P
+          requests for a unique aname.
+        '';
+      };
+
+      authRequired = mkOption {
+        type = types.bool;
+        default = false;
+        description = ''
+          Allow clients to connect without authentication, i.e. without a valid MUNGE credential.
+        '';
+      };
+
+      userdb = mkOption {
+        type = types.bool;
+        default = false;
+        description = ''
+          This option disables password/group lookups. It allows any uid to attach and
+          assumes gid=uid, and supplementary groups contain only the primary gid.
+        '';
+      };
+
+      allsquash = mkOption {
+        type = types.bool;
+        default = true;
+        description = ''
+          Remap all users to "nobody". The attaching user need not be present in the
+          password file.
+        '';
+      };
+
+      squashuser = mkOption {
+        type = types.str;
+        default = "nobody";
+        description = ''
+          Change the squash user. The squash user must be present in the password file.
+        '';
+      };
+
+      logdest = mkOption {
+        type = types.str;
+        default = "syslog:daemon:err";
+        description = ''
+          Set the destination for logging.
+          The value has the form of "syslog:facility:level" or "filename".
+        '';
+      };
+
+
+      statfsPassthru = mkOption {
+        type = types.bool;
+        default = false;
+        description = ''
+          This option configures statfs to return the host file system's type
+          rather than V9FS_MAGIC.
+        '';
+      };
+
+      extraConfig = mkOption {
+        type = types.lines;
+        default = "";
+        description = "Extra configuration options for diod.conf.";
+      };
+    };
+  };
+
+  config = mkIf config.services.diod.enable {
+    environment.systemPackages = [ pkgs.diod ];
+
+    systemd.services.diod = {
+      description = "diod 9P file server";
+      wantedBy = [ "multi-user.target" ];
+      after = [ "network.target" ];
+      serviceConfig = {
+        ExecStart = "${pkgs.diod}/sbin/diod -f -c ${diodConfig}";
+      };
+    };
+  };
+}
diff --git a/nixos/modules/services/network-filesystems/drbd.nix b/nixos/modules/services/network-filesystems/drbd.nix
new file mode 100644
index 00000000000..c730e0b34e9
--- /dev/null
+++ b/nixos/modules/services/network-filesystems/drbd.nix
@@ -0,0 +1,63 @@
+# Support for DRBD, the Distributed Replicated Block Device.
+
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let cfg = config.services.drbd; in
+
+{
+
+  ###### interface
+
+  options = {
+
+    services.drbd.enable = mkOption {
+      default = false;
+      type = types.bool;
+      description = ''
+        Whether to enable support for DRBD, the Distributed Replicated
+        Block Device.
+      '';
+    };
+
+    services.drbd.config = mkOption {
+      default = "";
+      type = types.lines;
+      description = ''
+        Contents of the <filename>drbd.conf</filename> configuration file.
+      '';
+    };
+
+  };
+
+
+  ###### implementation
+
+  config = mkIf cfg.enable {
+
+    environment.systemPackages = [ pkgs.drbd ];
+
+    services.udev.packages = [ pkgs.drbd ];
+
+    boot.kernelModules = [ "drbd" ];
+
+    boot.extraModprobeConfig =
+      ''
+        options drbd usermode_helper=/run/current-system/sw/bin/drbdadm
+      '';
+
+    environment.etc."drbd.conf" =
+      { source = pkgs.writeText "drbd.conf" cfg.config; };
+
+    systemd.services.drbd = {
+      after = [ "systemd-udev.settle.service" "network.target" ];
+      wants = [ "systemd-udev.settle.service" ];
+      wantedBy = [ "multi-user.target" ];
+      serviceConfig = {
+        ExecStart = "${pkgs.drbd}/sbin/drbdadm up all";
+        ExecStop = "${pkgs.drbd}/sbin/drbdadm down all";
+      };
+    };
+  };
+}
diff --git a/nixos/modules/services/network-filesystems/glusterfs.nix b/nixos/modules/services/network-filesystems/glusterfs.nix
new file mode 100644
index 00000000000..38be098de5d
--- /dev/null
+++ b/nixos/modules/services/network-filesystems/glusterfs.nix
@@ -0,0 +1,208 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  inherit (pkgs) glusterfs rsync;
+
+  tlsCmd = if (cfg.tlsSettings != null) then
+  ''
+    mkdir -p /var/lib/glusterd
+    touch /var/lib/glusterd/secure-access
+  ''
+  else
+  ''
+    rm -f /var/lib/glusterd/secure-access
+  '';
+
+  restartTriggers = if (cfg.tlsSettings != null) then [
+    config.environment.etc."ssl/glusterfs.pem".source
+    config.environment.etc."ssl/glusterfs.key".source
+    config.environment.etc."ssl/glusterfs.ca".source
+  ] else [];
+
+  cfg = config.services.glusterfs;
+
+in
+
+{
+
+  ###### interface
+
+  options = {
+
+    services.glusterfs = {
+
+      enable = mkEnableOption "GlusterFS Daemon";
+
+      logLevel = mkOption {
+        type = types.enum ["DEBUG" "INFO" "WARNING" "ERROR" "CRITICAL" "TRACE" "NONE"];
+        description = "Log level used by the GlusterFS daemon";
+        default = "INFO";
+      };
+
+      useRpcbind = mkOption {
+        type = types.bool;
+        description = ''
+          Enable use of rpcbind. This is required for Gluster's NFS functionality.
+
+          You may want to turn it off to reduce the attack surface for DDoS reflection attacks.
+
+          See https://davelozier.com/glusterfs-and-rpcbind-portmap-ddos-reflection-attacks/
+          and https://bugzilla.redhat.com/show_bug.cgi?id=1426842 for details.
+        '';
+        default = true;
+      };
+
+      enableGlustereventsd = mkOption {
+        type = types.bool;
+        description = "Whether to enable the GlusterFS Events Daemon";
+        default = true;
+      };
+
+      killMode = mkOption {
+        type = types.enum ["control-group" "process" "mixed" "none"];
+        description = ''
+          The systemd KillMode to use for glusterd.
+
+          glusterd spawns other daemons like gsyncd.
+          If you want these to stop when glusterd is stopped (e.g. to ensure
+          that NixOS config changes are reflected even for these sub-daemons),
+          set this to 'control-group'.
+          If however you want running volume processes (glusterfsd) and thus
+          gluster mounts not be interrupted when glusterd is restarted
+          (for example, when you want to restart them manually at a later time),
+          set this to 'process'.
+        '';
+        default = "control-group";
+      };
+
+      stopKillTimeout = mkOption {
+        type = types.str;
+        description = ''
+          The systemd TimeoutStopSec to use.
+
+          After this time after having been asked to shut down, glusterd
+          (and depending on the killMode setting also its child processes)
+          are killed by systemd.
+
+          The default is set low because GlusterFS (as of 3.10) is known to
+          not tell its children (like gsyncd) to terminate at all.
+        '';
+        default = "5s";
+      };
+
+      extraFlags = mkOption {
+        type = types.listOf types.str;
+        description = "Extra flags passed to the GlusterFS daemon";
+        default = [];
+      };
+
+      tlsSettings = mkOption {
+        description = ''
+          Make the server communicate via TLS.
+          This means it will only connect to other gluster
+          servers having certificates signed by the same CA.
+
+          Enabling this will create a file <filename>/var/lib/glusterd/secure-access</filename>.
+          Disabling will delete this file again.
+
+          See also: https://gluster.readthedocs.io/en/latest/Administrator%20Guide/SSL/
+        '';
+        default = null;
+        type = types.nullOr (types.submodule {
+          options = {
+            tlsKeyPath = mkOption {
+              type = types.str;
+              description = "Path to the private key used for TLS.";
+            };
+
+            tlsPem = mkOption {
+              type = types.path;
+              description = "Path to the certificate used for TLS.";
+            };
+
+            caCert = mkOption {
+              type = types.path;
+              description = "Path certificate authority used to sign the cluster certificates.";
+            };
+          };
+        });
+      };
+    };
+  };
+
+  ###### implementation
+
+  config = mkIf cfg.enable {
+    environment.systemPackages = [ pkgs.glusterfs ];
+
+    services.rpcbind.enable = cfg.useRpcbind;
+
+    environment.etc = mkIf (cfg.tlsSettings != null) {
+      "ssl/glusterfs.pem".source = cfg.tlsSettings.tlsPem;
+      "ssl/glusterfs.key".source = cfg.tlsSettings.tlsKeyPath;
+      "ssl/glusterfs.ca".source = cfg.tlsSettings.caCert;
+    };
+
+    systemd.services.glusterd = {
+      inherit restartTriggers;
+
+      description = "GlusterFS, a clustered file-system server";
+
+      wantedBy = [ "multi-user.target" ];
+
+      requires = lib.optional cfg.useRpcbind "rpcbind.service";
+      after = [ "network.target" ] ++ lib.optional cfg.useRpcbind "rpcbind.service";
+
+      preStart = ''
+        install -m 0755 -d /var/log/glusterfs
+      ''
+      # The copying of hooks is due to upstream bug https://bugzilla.redhat.com/show_bug.cgi?id=1452761
+      + ''
+        mkdir -p /var/lib/glusterd/hooks/
+        ${rsync}/bin/rsync -a ${glusterfs}/var/lib/glusterd/hooks/ /var/lib/glusterd/hooks/
+
+        ${tlsCmd}
+      ''
+      # `glusterfind` needs dirs that upstream installs at `make install` phase
+      # https://github.com/gluster/glusterfs/blob/v3.10.2/tools/glusterfind/Makefile.am#L16-L17
+      + ''
+        mkdir -p /var/lib/glusterd/glusterfind/.keys
+        mkdir -p /var/lib/glusterd/hooks/1/delete/post/
+      '';
+
+      serviceConfig = {
+        LimitNOFILE=65536;
+        ExecStart="${glusterfs}/sbin/glusterd --no-daemon --log-level=${cfg.logLevel} ${toString cfg.extraFlags}";
+        KillMode=cfg.killMode;
+        TimeoutStopSec=cfg.stopKillTimeout;
+      };
+    };
+
+    systemd.services.glustereventsd = mkIf cfg.enableGlustereventsd {
+      inherit restartTriggers;
+
+      description = "Gluster Events Notifier";
+
+      wantedBy = [ "multi-user.target" ];
+
+      after = [ "network.target" ];
+
+      preStart = ''
+        install -m 0755 -d /var/log/glusterfs
+      '';
+
+      # glustereventsd uses the `gluster` executable
+      path = [ glusterfs ];
+
+      serviceConfig = {
+        Type="simple";
+        PIDFile="/run/glustereventsd.pid";
+        ExecStart="${glusterfs}/sbin/glustereventsd --pid-file /run/glustereventsd.pid";
+        ExecReload="/bin/kill -SIGUSR2 $MAINPID";
+        KillMode="control-group";
+      };
+    };
+  };
+}
diff --git a/nixos/modules/services/network-filesystems/ipfs.nix b/nixos/modules/services/network-filesystems/ipfs.nix
new file mode 100644
index 00000000000..17da020bf3e
--- /dev/null
+++ b/nixos/modules/services/network-filesystems/ipfs.nix
@@ -0,0 +1,311 @@
+{ config, lib, pkgs, options, ... }:
+with lib;
+let
+  cfg = config.services.ipfs;
+  opt = options.services.ipfs;
+
+  ipfsFlags = toString ([
+    (optionalString cfg.autoMount "--mount")
+    (optionalString cfg.enableGC "--enable-gc")
+    (optionalString (cfg.serviceFdlimit != null) "--manage-fdlimit=false")
+    (optionalString (cfg.defaultMode == "offline") "--offline")
+    (optionalString (cfg.defaultMode == "norouting") "--routing=none")
+  ] ++ cfg.extraFlags);
+
+  profile =
+    if cfg.localDiscovery
+    then "local-discovery"
+    else "server";
+
+  splitMulitaddr = addrRaw: lib.tail (lib.splitString "/" addrRaw);
+
+  multiaddrToListenStream = addrRaw:
+    let
+      addr = splitMulitaddr addrRaw;
+      s = builtins.elemAt addr;
+    in
+    if s 0 == "ip4" && s 2 == "tcp"
+    then "${s 1}:${s 3}"
+    else if s 0 == "ip6" && s 2 == "tcp"
+    then "[${s 1}]:${s 3}"
+    else if s 0 == "unix"
+    then "/${lib.concatStringsSep "/" (lib.tail addr)}"
+    else null; # not valid for listen stream, skip
+
+  multiaddrToListenDatagram = addrRaw:
+    let
+      addr = splitMulitaddr addrRaw;
+      s = builtins.elemAt addr;
+    in
+    if s 0 == "ip4" && s 2 == "udp"
+    then "${s 1}:${s 3}"
+    else if s 0 == "ip6" && s 2 == "udp"
+    then "[${s 1}]:${s 3}"
+    else null; # not valid for listen datagram, skip
+
+in
+{
+
+  ###### interface
+
+  options = {
+
+    services.ipfs = {
+
+      enable = mkEnableOption "Interplanetary File System (WARNING: may cause severe network degredation)";
+
+      package = mkOption {
+        type = types.package;
+        default = pkgs.ipfs;
+        defaultText = literalExpression "pkgs.ipfs";
+        description = "Which IPFS package to use.";
+      };
+
+      user = mkOption {
+        type = types.str;
+        default = "ipfs";
+        description = "User under which the IPFS daemon runs";
+      };
+
+      group = mkOption {
+        type = types.str;
+        default = "ipfs";
+        description = "Group under which the IPFS daemon runs";
+      };
+
+      dataDir = mkOption {
+        type = types.str;
+        default =
+          if versionAtLeast config.system.stateVersion "17.09"
+          then "/var/lib/ipfs"
+          else "/var/lib/ipfs/.ipfs";
+        defaultText = literalExpression ''
+          if versionAtLeast config.system.stateVersion "17.09"
+          then "/var/lib/ipfs"
+          else "/var/lib/ipfs/.ipfs"
+        '';
+        description = "The data dir for IPFS";
+      };
+
+      defaultMode = mkOption {
+        type = types.enum [ "online" "offline" "norouting" ];
+        default = "online";
+        description = "systemd service that is enabled by default";
+      };
+
+      autoMount = mkOption {
+        type = types.bool;
+        default = false;
+        description = "Whether IPFS should try to mount /ipfs and /ipns at startup.";
+      };
+
+      autoMigrate = mkOption {
+        type = types.bool;
+        default = true;
+        description = "Whether IPFS should try to run the fs-repo-migration at startup.";
+      };
+
+      ipfsMountDir = mkOption {
+        type = types.str;
+        default = "/ipfs";
+        description = "Where to mount the IPFS namespace to";
+      };
+
+      ipnsMountDir = mkOption {
+        type = types.str;
+        default = "/ipns";
+        description = "Where to mount the IPNS namespace to";
+      };
+
+      gatewayAddress = mkOption {
+        type = types.str;
+        default = "/ip4/127.0.0.1/tcp/8080";
+        description = "Where the IPFS Gateway can be reached";
+      };
+
+      apiAddress = mkOption {
+        type = types.str;
+        default = "/ip4/127.0.0.1/tcp/5001";
+        description = "Where IPFS exposes its API to";
+      };
+
+      swarmAddress = mkOption {
+        type = types.listOf types.str;
+        default = [
+          "/ip4/0.0.0.0/tcp/4001"
+          "/ip6/::/tcp/4001"
+          "/ip4/0.0.0.0/udp/4001/quic"
+          "/ip6/::/udp/4001/quic"
+        ];
+        description = "Where IPFS listens for incoming p2p connections";
+      };
+
+      enableGC = mkOption {
+        type = types.bool;
+        default = false;
+        description = "Whether to enable automatic garbage collection";
+      };
+
+      emptyRepo = mkOption {
+        type = types.bool;
+        default = false;
+        description = "If set to true, the repo won't be initialized with help files";
+      };
+
+      extraConfig = mkOption {
+        type = types.attrs;
+        description = ''
+          Attrset of daemon configuration to set using <command>ipfs config</command>, every time the daemon starts.
+          These are applied last, so may override configuration set by other options in this module.
+          Keep in mind that this configuration is stateful; i.e., unsetting anything in here does not reset the value to the default!
+        '';
+        default = { };
+        example = {
+          Datastore.StorageMax = "100GB";
+          Discovery.MDNS.Enabled = false;
+          Bootstrap = [
+            "/ip4/128.199.219.111/tcp/4001/ipfs/QmSoLSafTMBsPKadTEgaXctDQVcqN88CNLHXMkTNwMKPnu"
+            "/ip4/162.243.248.213/tcp/4001/ipfs/QmSoLueR4xBeUbY9WZ9xGUUxunbKWcrNFTDAadQJmocnWm"
+          ];
+          Swarm.AddrFilters = null;
+        };
+
+      };
+
+      extraFlags = mkOption {
+        type = types.listOf types.str;
+        description = "Extra flags passed to the IPFS daemon";
+        default = [ ];
+      };
+
+      localDiscovery = mkOption {
+        type = types.bool;
+        description = ''Whether to enable local discovery for the ipfs daemon.
+          This will allow ipfs to scan ports on your local network. Some hosting services will ban you if you do this.
+        '';
+        default = false;
+      };
+
+      serviceFdlimit = mkOption {
+        type = types.nullOr types.int;
+        default = null;
+        description = "The fdlimit for the IPFS systemd unit or <literal>null</literal> to have the daemon attempt to manage it";
+        example = 64 * 1024;
+      };
+
+      startWhenNeeded = mkOption {
+        type = types.bool;
+        default = false;
+        description = "Whether to use socket activation to start IPFS when needed.";
+      };
+
+    };
+  };
+
+  ###### implementation
+
+  config = mkIf cfg.enable {
+    environment.systemPackages = [ cfg.package ];
+    environment.variables.IPFS_PATH = cfg.dataDir;
+
+    # https://github.com/lucas-clemente/quic-go/wiki/UDP-Receive-Buffer-Size
+    boot.kernel.sysctl."net.core.rmem_max" = mkDefault 2500000;
+
+    programs.fuse = mkIf cfg.autoMount {
+      userAllowOther = true;
+    };
+
+    users.users = mkIf (cfg.user == "ipfs") {
+      ipfs = {
+        group = cfg.group;
+        home = cfg.dataDir;
+        createHome = false;
+        uid = config.ids.uids.ipfs;
+        description = "IPFS daemon user";
+        packages = [
+          pkgs.ipfs-migrator
+        ];
+      };
+    };
+
+    users.groups = mkIf (cfg.group == "ipfs") {
+      ipfs.gid = config.ids.gids.ipfs;
+    };
+
+    systemd.tmpfiles.rules = [
+      "d '${cfg.dataDir}' - ${cfg.user} ${cfg.group} - -"
+    ] ++ optionals cfg.autoMount [
+      "d '${cfg.ipfsMountDir}' - ${cfg.user} ${cfg.group} - -"
+      "d '${cfg.ipnsMountDir}' - ${cfg.user} ${cfg.group} - -"
+    ];
+
+    systemd.packages = [ cfg.package ];
+
+    systemd.services.ipfs = {
+      path = [ "/run/wrappers" cfg.package ];
+      environment.IPFS_PATH = cfg.dataDir;
+
+      preStart = ''
+        if [[ ! -f "$IPFS_PATH/config" ]]; then
+          ipfs init ${optionalString cfg.emptyRepo "-e"} --profile=${profile}
+        else
+          # After an unclean shutdown this file may exist which will cause the config command to attempt to talk to the daemon. This will hang forever if systemd is holding our sockets open.
+          rm -vf "$IPFS_PATH/api"
+
+          ipfs --offline config profile apply ${profile}
+        fi
+      '' + optionalString cfg.autoMount ''
+        ipfs --offline config Mounts.FuseAllowOther --json true
+        ipfs --offline config Mounts.IPFS ${cfg.ipfsMountDir}
+        ipfs --offline config Mounts.IPNS ${cfg.ipnsMountDir}
+      '' + optionalString cfg.autoMigrate ''
+        ${pkgs.ipfs-migrator}/bin/fs-repo-migrations -to '${cfg.package.repoVersion}' -y
+      '' + ''
+        ipfs --offline config show \
+          | ${pkgs.jq}/bin/jq '. * $extraConfig' --argjson extraConfig ${
+              escapeShellArg (builtins.toJSON ({
+                Addresses.API = cfg.apiAddress;
+                Addresses.Gateway = cfg.gatewayAddress;
+                Addresses.Swarm = cfg.swarmAddress;
+              } // cfg.extraConfig))
+            } \
+          | ipfs --offline config replace -
+      '';
+      serviceConfig = {
+        ExecStart = [ "" "${cfg.package}/bin/ipfs daemon ${ipfsFlags}" ];
+        User = cfg.user;
+        Group = cfg.group;
+      } // optionalAttrs (cfg.serviceFdlimit != null) { LimitNOFILE = cfg.serviceFdlimit; };
+    } // optionalAttrs (!cfg.startWhenNeeded) {
+      wantedBy = [ "default.target" ];
+    };
+
+    systemd.sockets.ipfs-gateway = {
+      wantedBy = [ "sockets.target" ];
+      socketConfig = {
+        ListenStream =
+          let
+            fromCfg = multiaddrToListenStream cfg.gatewayAddress;
+          in
+          [ "" ] ++ lib.optional (fromCfg != null) fromCfg;
+        ListenDatagram =
+          let
+            fromCfg = multiaddrToListenDatagram cfg.gatewayAddress;
+          in
+          [ "" ] ++ lib.optional (fromCfg != null) fromCfg;
+      };
+    };
+
+    systemd.sockets.ipfs-api = {
+      wantedBy = [ "sockets.target" ];
+      # We also include "%t/ipfs.sock" because there is no way to put the "%t"
+      # in the multiaddr.
+      socketConfig.ListenStream =
+        let
+          fromCfg = multiaddrToListenStream cfg.apiAddress;
+        in
+        [ "" "%t/ipfs.sock" ] ++ lib.optional (fromCfg != null) fromCfg;
+    };
+
+  };
+}
diff --git a/nixos/modules/services/network-filesystems/kbfs.nix b/nixos/modules/services/network-filesystems/kbfs.nix
new file mode 100644
index 00000000000..a43ac656f66
--- /dev/null
+++ b/nixos/modules/services/network-filesystems/kbfs.nix
@@ -0,0 +1,118 @@
+{ config, lib, pkgs, ... }:
+with lib;
+let
+  inherit (config.security) wrapperDir;
+  cfg = config.services.kbfs;
+
+in {
+
+  ###### interface
+
+  options = {
+
+    services.kbfs = {
+
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = "Whether to mount the Keybase filesystem.";
+      };
+
+      enableRedirector = mkOption {
+        type = types.bool;
+        default = false;
+        description = ''
+          Whether to enable the Keybase root redirector service, allowing
+          any user to access KBFS files via <literal>/keybase</literal>,
+          which will show different contents depending on the requester.
+        '';
+      };
+
+      mountPoint = mkOption {
+        type = types.str;
+        default = "%h/keybase";
+        example = "/keybase";
+        description = "Mountpoint for the Keybase filesystem.";
+      };
+
+      extraFlags = mkOption {
+        type = types.listOf types.str;
+        default = [];
+        example = [
+          "-label kbfs"
+          "-mount-type normal"
+        ];
+        description = ''
+          Additional flags to pass to the Keybase filesystem on launch.
+        '';
+      };
+
+    };
+  };
+
+  ###### implementation
+
+  config = mkIf cfg.enable (mkMerge [
+    {
+      # Upstream: https://github.com/keybase/client/blob/master/packaging/linux/systemd/kbfs.service
+      systemd.user.services.kbfs = {
+        description = "Keybase File System";
+
+        # Note that the "Requires" directive will cause a unit to be restarted whenever its dependency is restarted.
+        # Do not issue a hard dependency on keybase, because kbfs can reconnect to a restarted service.
+        # Do not issue a hard dependency on keybase-redirector, because it's ok if it fails (e.g., if it is disabled).
+        wants = [ "keybase.service" ] ++ optional cfg.enableRedirector "keybase-redirector.service";
+        path = [ "/run/wrappers" ];
+        unitConfig.ConditionUser = "!@system";
+
+        serviceConfig = {
+          Type = "notify";
+          # Keybase notifies from a forked process
+          EnvironmentFile = [
+            "-%E/keybase/keybase.autogen.env"
+            "-%E/keybase/keybase.env"
+          ];
+          ExecStartPre = [
+            "${pkgs.coreutils}/bin/mkdir -p \"${cfg.mountPoint}\""
+            "-${wrapperDir}/fusermount -uz \"${cfg.mountPoint}\""
+          ];
+          ExecStart = "${pkgs.kbfs}/bin/kbfsfuse ${toString cfg.extraFlags} \"${cfg.mountPoint}\"";
+          ExecStop = "${wrapperDir}/fusermount -uz \"${cfg.mountPoint}\"";
+          Restart = "on-failure";
+          PrivateTmp = true;
+        };
+        wantedBy = [ "default.target" ];
+      };
+
+      services.keybase.enable = true;
+
+      environment.systemPackages = [ pkgs.kbfs ];
+    }
+
+    (mkIf cfg.enableRedirector {
+      security.wrappers."keybase-redirector".source = "${pkgs.kbfs}/bin/redirector";
+
+      systemd.tmpfiles.rules = [ "d /keybase 0755 root root 0" ];
+
+      # Upstream: https://github.com/keybase/client/blob/master/packaging/linux/systemd/keybase-redirector.service
+      systemd.user.services.keybase-redirector = {
+        description = "Keybase Root Redirector for KBFS";
+        wants = [ "keybase.service" ];
+        unitConfig.ConditionUser = "!@system";
+
+        serviceConfig = {
+          EnvironmentFile = [
+            "-%E/keybase/keybase.autogen.env"
+            "-%E/keybase/keybase.env"
+          ];
+          # Note: The /keybase mount point is not currently configurable upstream.
+          ExecStart = "${wrapperDir}/keybase-redirector /keybase";
+          Restart = "on-failure";
+          PrivateTmp = true;
+        };
+
+        wantedBy = [ "default.target" ];
+      };
+    })
+  ]);
+}
diff --git a/nixos/modules/services/network-filesystems/litestream/default.nix b/nixos/modules/services/network-filesystems/litestream/default.nix
new file mode 100644
index 00000000000..51eb920d778
--- /dev/null
+++ b/nixos/modules/services/network-filesystems/litestream/default.nix
@@ -0,0 +1,100 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.litestream;
+  settingsFormat = pkgs.formats.yaml {};
+in
+{
+  options.services.litestream = {
+    enable = mkEnableOption "litestream";
+
+    package = mkOption {
+      description = "Package to use.";
+      default = pkgs.litestream;
+      defaultText = literalExpression "pkgs.litestream";
+      type = types.package;
+    };
+
+    settings = mkOption {
+      description = ''
+        See the <link xlink:href="https://litestream.io/reference/config/">documentation</link>.
+      '';
+      type = settingsFormat.type;
+      example = {
+        dbs = [
+          {
+            path = "/var/lib/db1";
+            replicas = [
+              {
+                url = "s3://mybkt.litestream.io/db1";
+              }
+            ];
+          }
+        ];
+      };
+    };
+
+    environmentFile = mkOption {
+      type = types.nullOr types.path;
+      default = null;
+      example = "/run/secrets/litestream";
+      description = ''
+        Environment file as defined in <citerefentry>
+        <refentrytitle>systemd.exec</refentrytitle><manvolnum>5</manvolnum>
+        </citerefentry>.
+
+        Secrets may be passed to the service without adding them to the
+        world-readable Nix store, by specifying placeholder variables as
+        the option value in Nix and setting these variables accordingly in the
+        environment file.
+
+        By default, Litestream will perform environment variable expansion
+        within the config file before reading it. Any references to ''$VAR or
+        ''${VAR} formatted variables will be replaced with their environment
+        variable values. If no value is set then it will be replaced with an
+        empty string.
+
+        <programlisting>
+          # Content of the environment file
+          LITESTREAM_ACCESS_KEY_ID=AKIAxxxxxxxxxxxxxxxx
+          LITESTREAM_SECRET_ACCESS_KEY=xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx/xxxxxxxxx
+        </programlisting>
+
+        Note that this file needs to be available on the host on which
+        this exporter is running.
+      '';
+    };
+  };
+
+  config = mkIf cfg.enable {
+    environment.systemPackages = [ cfg.package ];
+    environment.etc = {
+      "litestream.yml" = {
+        source = settingsFormat.generate "litestream-config.yaml" cfg.settings;
+      };
+    };
+
+    systemd.services.litestream = {
+      description = "Litestream";
+      wantedBy = [ "multi-user.target" ];
+      after = [ "networking.target" ];
+      serviceConfig = {
+        EnvironmentFile = mkIf (cfg.environmentFile != null) cfg.environmentFile;
+        ExecStart = "${cfg.package}/bin/litestream replicate";
+        Restart = "always";
+        User = "litestream";
+        Group = "litestream";
+      };
+    };
+
+    users.users.litestream = {
+      description = "Litestream user";
+      group = "litestream";
+      isSystemUser = true;
+    };
+    users.groups.litestream = {};
+  };
+  meta.doc = ./litestream.xml;
+}
diff --git a/nixos/modules/services/network-filesystems/litestream/litestream.xml b/nixos/modules/services/network-filesystems/litestream/litestream.xml
new file mode 100644
index 00000000000..598f9be8cf6
--- /dev/null
+++ b/nixos/modules/services/network-filesystems/litestream/litestream.xml
@@ -0,0 +1,65 @@
+<chapter xmlns="http://docbook.org/ns/docbook"
+         xmlns:xlink="http://www.w3.org/1999/xlink"
+         xmlns:xi="http://www.w3.org/2001/XInclude"
+         version="5.0"
+         xml:id="module-services-litestream">
+ <title>Litestream</title>
+ <para>
+  <link xlink:href="https://litestream.io/">Litestream</link> is a standalone streaming
+  replication tool for SQLite.
+ </para>
+
+ <section xml:id="module-services-litestream-configuration">
+  <title>Configuration</title>
+
+  <para>
+   Litestream service is managed by a dedicated user named <literal>litestream</literal>
+   which needs permission to the database file. Here's an example config which gives
+   required permissions to access <link linkend="opt-services.grafana.database.path">
+   grafana database</link>:
+<programlisting>
+{ pkgs, ... }:
+{
+  users.users.litestream.extraGroups = [ "grafana" ];
+
+  systemd.services.grafana.serviceConfig.ExecStartPost = "+" + pkgs.writeShellScript "grant-grafana-permissions" ''
+    timeout=10
+
+    while [ ! -f /var/lib/grafana/data/grafana.db ];
+    do
+      if [ "$timeout" == 0 ]; then
+        echo "ERROR: Timeout while waiting for /var/lib/grafana/data/grafana.db."
+        exit 1
+      fi
+
+      sleep 1
+
+      ((timeout--))
+    done
+
+    find /var/lib/grafana -type d -exec chmod -v 775 {} \;
+    find /var/lib/grafana -type f -exec chmod -v 660 {} \;
+  '';
+
+  services.litestream = {
+    enable = true;
+
+    environmentFile = "/run/secrets/litestream";
+
+    settings = {
+      dbs = [
+        {
+          path = "/var/lib/grafana/data/grafana.db";
+          replicas = [{
+            url = "s3://mybkt.litestream.io/grafana";
+          }];
+        }
+      ];
+    };
+  };
+}
+</programlisting>
+  </para>
+ </section>
+
+</chapter>
diff --git a/nixos/modules/services/network-filesystems/moosefs.nix b/nixos/modules/services/network-filesystems/moosefs.nix
new file mode 100644
index 00000000000..88b2ada37e7
--- /dev/null
+++ b/nixos/modules/services/network-filesystems/moosefs.nix
@@ -0,0 +1,249 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.moosefs;
+
+  mfsUser = if cfg.runAsUser then "moosefs" else "root";
+
+  settingsFormat = let
+    listSep = " ";
+    allowedTypes = with types; [ bool int float str ];
+    valueToString = val:
+        if isList val then concatStringsSep listSep (map (x: valueToString x) val)
+        else if isBool val then (if val then "1" else "0")
+        else toString val;
+
+    in {
+      type = with types; let
+        valueType = oneOf ([
+          (listOf valueType)
+        ] ++ allowedTypes) // {
+          description = "Flat key-value file";
+        };
+      in attrsOf valueType;
+
+      generate = name: value:
+        pkgs.writeText name ( lib.concatStringsSep "\n" (
+          lib.mapAttrsToList (key: val: "${key} = ${valueToString val}") value ));
+    };
+
+
+  initTool = pkgs.writeShellScriptBin "mfsmaster-init" ''
+    if [ ! -e ${cfg.master.settings.DATA_PATH}/metadata.mfs ]; then
+      cp ${pkgs.moosefs}/var/mfs/metadata.mfs.empty ${cfg.master.settings.DATA_PATH}
+      chmod +w ${cfg.master.settings.DATA_PATH}/metadata.mfs.empty
+      ${pkgs.moosefs}/bin/mfsmaster -a -c ${masterCfg} start
+      ${pkgs.moosefs}/bin/mfsmaster -c ${masterCfg} stop
+      rm ${cfg.master.settings.DATA_PATH}/metadata.mfs.empty
+    fi
+  '';
+
+  # master config file
+  masterCfg = settingsFormat.generate
+    "mfsmaster.cfg" cfg.master.settings;
+
+  # metalogger config file
+  metaloggerCfg = settingsFormat.generate
+    "mfsmetalogger.cfg" cfg.metalogger.settings;
+
+  # chunkserver config file
+  chunkserverCfg = settingsFormat.generate
+    "mfschunkserver.cfg" cfg.chunkserver.settings;
+
+  # generic template for all deamons
+  systemdService = name: extraConfig: configFile: {
+    wantedBy = [ "multi-user.target" ];
+    wants = [ "network-online.target" ];
+    after = [ "network.target" "network-online.target" ];
+
+    serviceConfig = {
+      Type = "forking";
+      ExecStart  = "${pkgs.moosefs}/bin/mfs${name} -c ${configFile} start";
+      ExecStop   = "${pkgs.moosefs}/bin/mfs${name} -c ${configFile} stop";
+      ExecReload = "${pkgs.moosefs}/bin/mfs${name} -c ${configFile} reload";
+      PIDFile = "${cfg."${name}".settings.DATA_PATH}/.mfs${name}.lock";
+    } // extraConfig;
+  };
+
+in {
+  ###### interface
+
+  options = {
+    services.moosefs = {
+      masterHost = mkOption {
+        type = types.str;
+        default = null;
+        description = "IP or DNS name of master host.";
+      };
+
+      runAsUser = mkOption {
+        type = types.bool;
+        default = true;
+        example = true;
+        description = "Run daemons as user moosefs instead of root.";
+      };
+
+      client.enable = mkEnableOption "Moosefs client.";
+
+      master = {
+        enable = mkOption {
+          type = types.bool;
+          description = ''
+            Enable Moosefs master daemon.
+
+            You need to run <literal>mfsmaster-init</literal> on a freshly installed master server to
+            initialize the <literal>DATA_PATH</literal> direcory.
+          '';
+          default = false;
+        };
+
+        exports = mkOption {
+          type = with types; listOf str;
+          default = null;
+          description = "Paths to export (see mfsexports.cfg).";
+          example = [
+            "* / rw,alldirs,admin,maproot=0:0"
+            "* . rw"
+          ];
+        };
+
+        openFirewall = mkOption {
+          type = types.bool;
+          description = "Whether to automatically open the necessary ports in the firewall.";
+          default = false;
+        };
+
+        settings = mkOption {
+          type = types.submodule {
+            freeformType = settingsFormat.type;
+
+            options.DATA_PATH = mkOption {
+              type = types.str;
+              default = "/var/lib/mfs";
+              description = "Data storage directory.";
+            };
+          };
+
+          description = "Contents of config file (mfsmaster.cfg).";
+        };
+      };
+
+      metalogger = {
+        enable = mkEnableOption "Moosefs metalogger daemon.";
+
+        settings = mkOption {
+          type = types.submodule {
+            freeformType = settingsFormat.type;
+
+            options.DATA_PATH = mkOption {
+              type = types.str;
+              default = "/var/lib/mfs";
+              description = "Data storage directory";
+            };
+          };
+
+          description = "Contents of metalogger config file (mfsmetalogger.cfg).";
+        };
+      };
+
+      chunkserver = {
+        enable = mkEnableOption "Moosefs chunkserver daemon.";
+
+        openFirewall = mkOption {
+          type = types.bool;
+          description = "Whether to automatically open the necessary ports in the firewall.";
+          default = false;
+        };
+
+        hdds = mkOption {
+          type = with types; listOf str;
+          default =  null;
+          description = "Mount points to be used by chunkserver for storage (see mfshdd.cfg).";
+          example = [ "/mnt/hdd1" ];
+        };
+
+        settings = mkOption {
+          type = types.submodule {
+            freeformType = settingsFormat.type;
+
+            options.DATA_PATH = mkOption {
+              type = types.str;
+              default = "/var/lib/mfs";
+              description = "Directory for lock file.";
+            };
+          };
+
+          description = "Contents of chunkserver config file (mfschunkserver.cfg).";
+        };
+      };
+    };
+  };
+
+  ###### implementation
+
+  config =  mkIf ( cfg.client.enable || cfg.master.enable || cfg.metalogger.enable || cfg.chunkserver.enable ) {
+
+    warnings = [ ( mkIf (!cfg.runAsUser) "Running moosefs services as root is not recommended.") ];
+
+    # Service settings
+    services.moosefs = {
+      master.settings = mkIf cfg.master.enable {
+        WORKING_USER = mfsUser;
+        EXPORTS_FILENAME = toString ( pkgs.writeText "mfsexports.cfg"
+          (concatStringsSep "\n" cfg.master.exports));
+      };
+
+      metalogger.settings = mkIf cfg.metalogger.enable {
+        WORKING_USER = mfsUser;
+        MASTER_HOST = cfg.masterHost;
+      };
+
+      chunkserver.settings = mkIf cfg.chunkserver.enable {
+        WORKING_USER = mfsUser;
+        MASTER_HOST = cfg.masterHost;
+        HDD_CONF_FILENAME = toString ( pkgs.writeText "mfshdd.cfg"
+          (concatStringsSep "\n" cfg.chunkserver.hdds));
+      };
+    };
+
+    # Create system user account for daemons
+    users = mkIf ( cfg.runAsUser && ( cfg.master.enable || cfg.metalogger.enable || cfg.chunkserver.enable ) ) {
+      users.moosefs = {
+        isSystemUser = true;
+        description = "moosefs daemon user";
+        group = "moosefs";
+      };
+      groups.moosefs = {};
+    };
+
+    environment.systemPackages =
+      (lib.optional cfg.client.enable pkgs.moosefs) ++
+      (lib.optional cfg.master.enable initTool);
+
+    networking.firewall.allowedTCPPorts =
+      (lib.optionals cfg.master.openFirewall [ 9419 9420 9421 ]) ++
+      (lib.optional cfg.chunkserver.openFirewall 9422);
+
+    # Ensure storage directories exist
+    systemd.tmpfiles.rules =
+         optional cfg.master.enable "d ${cfg.master.settings.DATA_PATH} 0700 ${mfsUser} ${mfsUser}"
+      ++ optional cfg.metalogger.enable "d ${cfg.metalogger.settings.DATA_PATH} 0700 ${mfsUser} ${mfsUser}"
+      ++ optional cfg.chunkserver.enable "d ${cfg.chunkserver.settings.DATA_PATH} 0700 ${mfsUser} ${mfsUser}";
+
+    # Service definitions
+    systemd.services.mfs-master = mkIf cfg.master.enable
+    ( systemdService "master" {
+      TimeoutStartSec = 1800;
+      TimeoutStopSec = 1800;
+      Restart = "no";
+    } masterCfg );
+
+    systemd.services.mfs-metalogger = mkIf cfg.metalogger.enable
+      ( systemdService "metalogger" { Restart = "on-abnormal"; } metaloggerCfg );
+
+    systemd.services.mfs-chunkserver = mkIf cfg.chunkserver.enable
+      ( systemdService "chunkserver" { Restart = "on-abnormal"; } chunkserverCfg );
+    };
+}
diff --git a/nixos/modules/services/network-filesystems/netatalk.nix b/nixos/modules/services/network-filesystems/netatalk.nix
new file mode 100644
index 00000000000..06a36eb30c2
--- /dev/null
+++ b/nixos/modules/services/network-filesystems/netatalk.nix
@@ -0,0 +1,97 @@
+{ config, pkgs, lib, ... }:
+
+with lib;
+
+let
+  cfg = config.services.netatalk;
+  settingsFormat = pkgs.formats.ini { };
+  afpConfFile = settingsFormat.generate "afp.conf" cfg.settings;
+in {
+  options = {
+    services.netatalk = {
+
+      enable = mkEnableOption "the Netatalk AFP fileserver";
+
+      port = mkOption {
+        type = types.port;
+        default = 548;
+        description = "TCP port to be used for AFP.";
+      };
+
+      settings = mkOption {
+        inherit (settingsFormat) type;
+        default = { };
+        example = {
+          Global = { "uam list" = "uams_guest.so"; };
+          Homes = {
+            path = "afp-data";
+            "basedir regex" = "/home";
+          };
+          example-volume = {
+            path = "/srv/volume";
+            "read only" = true;
+          };
+        };
+        description = ''
+          Configuration for Netatalk. See
+          <citerefentry><refentrytitle>afp.conf</refentrytitle>
+          <manvolnum>5</manvolnum></citerefentry>.
+        '';
+      };
+
+      extmap = mkOption {
+        type = types.lines;
+        default = "";
+        description = ''
+          File name extension mappings.
+          See <citerefentry><refentrytitle>extmap.conf</refentrytitle>
+          <manvolnum>5</manvolnum></citerefentry>. for more information.
+        '';
+      };
+
+    };
+  };
+
+  imports = (map (option:
+    mkRemovedOptionModule [ "services" "netatalk" option ]
+    "This option was removed in favor of `services.netatalk.settings`.") [
+      "extraConfig"
+      "homes"
+      "volumes"
+    ]);
+
+  config = mkIf cfg.enable {
+
+    services.netatalk.settings.Global = {
+      "afp port" = toString cfg.port;
+      "extmap file" = "${pkgs.writeText "extmap.conf" cfg.extmap}";
+    };
+
+    systemd.services.netatalk = {
+      description = "Netatalk AFP fileserver for Macintosh clients";
+      unitConfig.Documentation =
+        "man:afp.conf(5) man:netatalk(8) man:afpd(8) man:cnid_metad(8) man:cnid_dbd(8)";
+      after = [ "network.target" "avahi-daemon.service" ];
+      wantedBy = [ "multi-user.target" ];
+
+      path = [ pkgs.netatalk ];
+
+      serviceConfig = {
+        Type = "forking";
+        GuessMainPID = "no";
+        PIDFile = "/run/lock/netatalk";
+        ExecStart = "${pkgs.netatalk}/sbin/netatalk -F ${afpConfFile}";
+        ExecReload = "${pkgs.coreutils}/bin/kill -HUP  $MAINPID";
+        ExecStop = "${pkgs.coreutils}/bin/kill -TERM $MAINPID";
+        Restart = "always";
+        RestartSec = 1;
+        StateDirectory = [ "netatalk/CNID" ];
+      };
+
+    };
+
+    security.pam.services.netatalk.unixAuth = true;
+
+  };
+
+}
diff --git a/nixos/modules/services/network-filesystems/nfsd.nix b/nixos/modules/services/network-filesystems/nfsd.nix
new file mode 100644
index 00000000000..1b62bfa8203
--- /dev/null
+++ b/nixos/modules/services/network-filesystems/nfsd.nix
@@ -0,0 +1,175 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.services.nfs.server;
+
+  exports = pkgs.writeText "exports" cfg.exports;
+
+in
+
+{
+  imports = [
+    (mkRenamedOptionModule [ "services" "nfs" "lockdPort" ] [ "services" "nfs" "server" "lockdPort" ])
+    (mkRenamedOptionModule [ "services" "nfs" "statdPort" ] [ "services" "nfs" "server" "statdPort" ])
+  ];
+
+  ###### interface
+
+  options = {
+
+    services.nfs = {
+
+      server = {
+        enable = mkOption {
+          type = types.bool;
+          default = false;
+          description = ''
+            Whether to enable the kernel's NFS server.
+          '';
+        };
+
+        extraNfsdConfig = mkOption {
+          type = types.str;
+          default = "";
+          description = ''
+            Extra configuration options for the [nfsd] section of /etc/nfs.conf.
+          '';
+        };
+
+        exports = mkOption {
+          type = types.lines;
+          default = "";
+          description = ''
+            Contents of the /etc/exports file.  See
+            <citerefentry><refentrytitle>exports</refentrytitle>
+            <manvolnum>5</manvolnum></citerefentry> for the format.
+          '';
+        };
+
+        hostName = mkOption {
+          type = types.nullOr types.str;
+          default = null;
+          description = ''
+            Hostname or address on which NFS requests will be accepted.
+            Default is all.  See the <option>-H</option> option in
+            <citerefentry><refentrytitle>nfsd</refentrytitle>
+            <manvolnum>8</manvolnum></citerefentry>.
+          '';
+        };
+
+        nproc = mkOption {
+          type = types.int;
+          default = 8;
+          description = ''
+            Number of NFS server threads.  Defaults to the recommended value of 8.
+          '';
+        };
+
+        createMountPoints = mkOption {
+          type = types.bool;
+          default = false;
+          description = "Whether to create the mount points in the exports file at startup time.";
+        };
+
+        mountdPort = mkOption {
+          type = types.nullOr types.int;
+          default = null;
+          example = 4002;
+          description = ''
+            Use fixed port for rpc.mountd, useful if server is behind firewall.
+          '';
+        };
+
+        lockdPort = mkOption {
+          type = types.nullOr types.int;
+          default = null;
+          example = 4001;
+          description = ''
+            Use a fixed port for the NFS lock manager kernel module
+            (<literal>lockd/nlockmgr</literal>).  This is useful if the
+            NFS server is behind a firewall.
+          '';
+        };
+
+        statdPort = mkOption {
+          type = types.nullOr types.int;
+          default = null;
+          example = 4000;
+          description = ''
+            Use a fixed port for <command>rpc.statd</command>. This is
+            useful if the NFS server is behind a firewall.
+          '';
+        };
+
+      };
+
+    };
+
+  };
+
+
+  ###### implementation
+
+  config = mkIf cfg.enable {
+
+    services.nfs.extraConfig = ''
+      [nfsd]
+      threads=${toString cfg.nproc}
+      ${optionalString (cfg.hostName != null) "host=${cfg.hostName}"}
+      ${cfg.extraNfsdConfig}
+
+      [mountd]
+      ${optionalString (cfg.mountdPort != null) "port=${toString cfg.mountdPort}"}
+
+      [statd]
+      ${optionalString (cfg.statdPort != null) "port=${toString cfg.statdPort}"}
+
+      [lockd]
+      ${optionalString (cfg.lockdPort != null) ''
+        port=${toString cfg.lockdPort}
+        udp-port=${toString cfg.lockdPort}
+      ''}
+    '';
+
+    services.rpcbind.enable = true;
+
+    boot.supportedFilesystems = [ "nfs" ]; # needed for statd and idmapd
+
+    environment.etc.exports.source = exports;
+
+    systemd.services.nfs-server =
+      { enable = true;
+        wantedBy = [ "multi-user.target" ];
+
+        preStart =
+          ''
+            mkdir -p /var/lib/nfs/v4recovery
+          '';
+      };
+
+    systemd.services.nfs-mountd =
+      { enable = true;
+        restartTriggers = [ exports ];
+
+        preStart =
+          ''
+            mkdir -p /var/lib/nfs
+
+            ${optionalString cfg.createMountPoints
+              ''
+                # create export directories:
+                # skip comments, take first col which may either be a quoted
+                # "foo bar" or just foo (-> man export)
+                sed '/^#.*/d;s/^"\([^"]*\)".*/\1/;t;s/[ ].*//' ${exports} \
+                | xargs -d '\n' mkdir -p
+              ''
+            }
+          '';
+      };
+
+  };
+
+}
diff --git a/nixos/modules/services/network-filesystems/openafs/client.nix b/nixos/modules/services/network-filesystems/openafs/client.nix
new file mode 100644
index 00000000000..c8cc5052c2a
--- /dev/null
+++ b/nixos/modules/services/network-filesystems/openafs/client.nix
@@ -0,0 +1,252 @@
+{ config, lib, pkgs, ... }:
+
+# openafsMod, openafsBin, mkCellServDB
+with import ./lib.nix { inherit config lib pkgs; };
+
+let
+  inherit (lib) getBin literalExpression mkOption mkIf optionalString singleton types;
+
+  cfg = config.services.openafsClient;
+
+  cellServDB = pkgs.fetchurl {
+    url = "http://dl.central.org/dl/cellservdb/CellServDB.2018-05-14";
+    sha256 = "1wmjn6mmyy2r8p10nlbdzs4nrqxy8a9pjyrdciy5nmppg4053rk2";
+  };
+
+  clientServDB = pkgs.writeText "client-cellServDB-${cfg.cellName}" (mkCellServDB cfg.cellName cfg.cellServDB);
+
+  afsConfig = pkgs.runCommand "afsconfig" { preferLocalBuild = true; } ''
+    mkdir -p $out
+    echo ${cfg.cellName} > $out/ThisCell
+    cat ${cellServDB} ${clientServDB} > $out/CellServDB
+    echo "${cfg.mountPoint}:${cfg.cache.directory}:${toString cfg.cache.blocks}" > $out/cacheinfo
+  '';
+
+in
+{
+  ###### interface
+
+  options = {
+
+    services.openafsClient = {
+
+      enable = mkOption {
+        default = false;
+        type = types.bool;
+        description = "Whether to enable the OpenAFS client.";
+      };
+
+      afsdb = mkOption {
+        default = true;
+        type = types.bool;
+        description = "Resolve cells via AFSDB DNS records.";
+      };
+
+      cellName = mkOption {
+        default = "";
+        type = types.str;
+        description = "Cell name.";
+        example = "grand.central.org";
+      };
+
+      cellServDB = mkOption {
+        default = [];
+        type = with types; listOf (submodule { options = cellServDBConfig; });
+        description = ''
+          This cell's database server records, added to the global
+          CellServDB. See CellServDB(5) man page for syntax. Ignored when
+          <literal>afsdb</literal> is set to <literal>true</literal>.
+        '';
+        example = [
+          { ip = "1.2.3.4"; dnsname = "first.afsdb.server.dns.fqdn.org"; }
+          { ip = "2.3.4.5"; dnsname = "second.afsdb.server.dns.fqdn.org"; }
+        ];
+      };
+
+      cache = {
+        blocks = mkOption {
+          default = 100000;
+          type = types.int;
+          description = "Cache size in 1KB blocks.";
+        };
+
+        chunksize = mkOption {
+          default = 0;
+          type = types.ints.between 0 30;
+          description = ''
+            Size of each cache chunk given in powers of
+            2. <literal>0</literal> resets the chunk size to its default
+            values (13 (8 KB) for memcache, 18-20 (256 KB to 1 MB) for
+            diskcache). Maximum value is 30. Important performance
+            parameter. Set to higher values when dealing with large files.
+          '';
+        };
+
+        directory = mkOption {
+          default = "/var/cache/openafs";
+          type = types.str;
+          description = "Cache directory.";
+        };
+
+        diskless = mkOption {
+          default = false;
+          type = types.bool;
+          description = ''
+            Use in-memory cache for diskless machines. Has no real
+            performance benefit anymore.
+          '';
+        };
+      };
+
+      crypt = mkOption {
+        default = true;
+        type = types.bool;
+        description = "Whether to enable (weak) protocol encryption.";
+      };
+
+      daemons = mkOption {
+        default = 2;
+        type = types.int;
+        description = ''
+          Number of daemons to serve user requests. Numbers higher than 6
+          usually do no increase performance. Default is sufficient for up
+          to five concurrent users.
+        '';
+      };
+
+      fakestat = mkOption {
+        default = false;
+        type = types.bool;
+        description = ''
+          Return fake data on stat() calls. If <literal>true</literal>,
+          always do so. If <literal>false</literal>, only do so for
+          cross-cell mounts (as these are potentially expensive).
+        '';
+      };
+
+      inumcalc = mkOption {
+        default = "compat";
+        type = types.strMatching "compat|md5";
+        description = ''
+          Inode calculation method. <literal>compat</literal> is
+          computationally less expensive, but <literal>md5</literal> greatly
+          reduces the likelihood of inode collisions in larger scenarios
+          involving multiple cells mounted into one AFS space.
+        '';
+      };
+
+      mountPoint = mkOption {
+        default = "/afs";
+        type = types.str;
+        description = ''
+          Mountpoint of the AFS file tree, conventionally
+          <literal>/afs</literal>. When set to a different value, only
+          cross-cells that use the same value can be accessed.
+        '';
+      };
+
+      packages = {
+        module = mkOption {
+          default = config.boot.kernelPackages.openafs;
+          defaultText = literalExpression "config.boot.kernelPackages.openafs";
+          type = types.package;
+          description = "OpenAFS kernel module package. MUST match the userland package!";
+        };
+        programs = mkOption {
+          default = getBin pkgs.openafs;
+          defaultText = literalExpression "getBin pkgs.openafs";
+          type = types.package;
+          description = "OpenAFS programs package. MUST match the kernel module package!";
+        };
+      };
+
+      sparse = mkOption {
+        default = true;
+        type = types.bool;
+        description = "Minimal cell list in /afs.";
+      };
+
+      startDisconnected = mkOption {
+        default = false;
+        type = types.bool;
+        description = ''
+          Start up in disconnected mode.  You need to execute
+          <literal>fs disco online</literal> (as root) to switch to
+          connected mode. Useful for roaming devices.
+        '';
+      };
+
+    };
+  };
+
+
+  ###### implementation
+
+  config = mkIf cfg.enable {
+
+    assertions = [
+      { assertion = cfg.afsdb || cfg.cellServDB != [];
+        message = "You should specify all cell-local database servers in config.services.openafsClient.cellServDB or set config.services.openafsClient.afsdb.";
+      }
+      { assertion = cfg.cellName != "";
+        message = "You must specify the local cell name in config.services.openafsClient.cellName.";
+      }
+    ];
+
+    environment.systemPackages = [ openafsBin ];
+
+    environment.etc = {
+      clientCellServDB = {
+        source = pkgs.runCommand "CellServDB" { preferLocalBuild = true; } ''
+          cat ${cellServDB} ${clientServDB} > $out
+        '';
+        target = "openafs/CellServDB";
+        mode = "0644";
+      };
+      clientCell = {
+        text = ''
+          ${cfg.cellName}
+        '';
+        target = "openafs/ThisCell";
+        mode = "0644";
+      };
+    };
+
+    systemd.services.afsd = {
+      description = "AFS client";
+      wantedBy = [ "multi-user.target" ];
+      after = singleton (if cfg.startDisconnected then  "network.target" else "network-online.target");
+      serviceConfig = { RemainAfterExit = true; };
+      restartIfChanged = false;
+
+      preStart = ''
+        mkdir -p -m 0755 ${cfg.mountPoint}
+        mkdir -m 0700 -p ${cfg.cache.directory}
+        ${pkgs.kmod}/bin/insmod ${openafsMod}/lib/modules/*/extra/openafs/libafs.ko.xz
+        ${openafsBin}/sbin/afsd \
+          -mountdir ${cfg.mountPoint} \
+          -confdir ${afsConfig} \
+          ${optionalString (!cfg.cache.diskless) "-cachedir ${cfg.cache.directory}"} \
+          -blocks ${toString cfg.cache.blocks} \
+          -chunksize ${toString cfg.cache.chunksize} \
+          ${optionalString cfg.cache.diskless "-memcache"} \
+          -inumcalc ${cfg.inumcalc} \
+          ${if cfg.fakestat then "-fakestat-all" else "-fakestat"} \
+          ${if cfg.sparse then "-dynroot-sparse" else "-dynroot"} \
+          ${optionalString cfg.afsdb "-afsdb"}
+        ${openafsBin}/bin/fs setcrypt ${if cfg.crypt then "on" else "off"}
+        ${optionalString cfg.startDisconnected "${openafsBin}/bin/fs discon offline"}
+      '';
+
+      # Doing this in preStop, because after these commands AFS is basically
+      # stopped, so systemd has nothing to do, just noticing it.  If done in
+      # postStop, then we get a hang + kernel oops, because AFS can't be
+      # stopped simply by sending signals to processes.
+      preStop = ''
+        ${pkgs.util-linux}/bin/umount ${cfg.mountPoint}
+        ${openafsBin}/sbin/afsd -shutdown
+        ${pkgs.kmod}/sbin/rmmod libafs
+      '';
+    };
+  };
+}
diff --git a/nixos/modules/services/network-filesystems/openafs/lib.nix b/nixos/modules/services/network-filesystems/openafs/lib.nix
new file mode 100644
index 00000000000..e068ee761c2
--- /dev/null
+++ b/nixos/modules/services/network-filesystems/openafs/lib.nix
@@ -0,0 +1,33 @@
+{ config, lib, ...}:
+
+let
+  inherit (lib) concatStringsSep mkOption types;
+
+in {
+
+  mkCellServDB = cellName: db: ''
+    >${cellName}
+  '' + (concatStringsSep "\n" (map (dbm: if (dbm.ip != "" && dbm.dnsname != "") then dbm.ip + " #" + dbm.dnsname else "")
+                                   db))
+     + "\n";
+
+  # CellServDB configuration type
+  cellServDBConfig = {
+    ip = mkOption {
+      type = types.str;
+      default = "";
+      example = "1.2.3.4";
+      description = "IP Address of a database server";
+    };
+    dnsname = mkOption {
+      type = types.str;
+      default = "";
+      example = "afs.example.org";
+      description = "DNS full-qualified domain name of a database server";
+    };
+  };
+
+  openafsMod = config.services.openafsClient.packages.module;
+  openafsBin = config.services.openafsClient.packages.programs;
+  openafsSrv = config.services.openafsServer.package;
+}
diff --git a/nixos/modules/services/network-filesystems/openafs/server.nix b/nixos/modules/services/network-filesystems/openafs/server.nix
new file mode 100644
index 00000000000..9c974335def
--- /dev/null
+++ b/nixos/modules/services/network-filesystems/openafs/server.nix
@@ -0,0 +1,269 @@
+{ config, lib, pkgs, ... }:
+
+# openafsBin, openafsSrv, mkCellServDB
+with import ./lib.nix { inherit config lib pkgs; };
+
+let
+  inherit (lib) concatStringsSep literalExpression mkIf mkOption optionalString types;
+
+  bosConfig = pkgs.writeText "BosConfig" (''
+    restrictmode 1
+    restarttime 16 0 0 0 0
+    checkbintime 3 0 5 0 0
+  '' + (optionalString cfg.roles.database.enable ''
+    bnode simple vlserver 1
+    parm ${openafsSrv}/libexec/openafs/vlserver ${optionalString cfg.dottedPrincipals "-allow-dotted-principals"} ${cfg.roles.database.vlserverArgs}
+    end
+    bnode simple ptserver 1
+    parm ${openafsSrv}/libexec/openafs/ptserver ${optionalString cfg.dottedPrincipals "-allow-dotted-principals"} ${cfg.roles.database.ptserverArgs}
+    end
+  '') + (optionalString cfg.roles.fileserver.enable ''
+    bnode dafs dafs 1
+    parm ${openafsSrv}/libexec/openafs/dafileserver ${optionalString cfg.dottedPrincipals "-allow-dotted-principals"} -udpsize ${udpSizeStr} ${cfg.roles.fileserver.fileserverArgs}
+    parm ${openafsSrv}/libexec/openafs/davolserver ${optionalString cfg.dottedPrincipals "-allow-dotted-principals"} -udpsize ${udpSizeStr} ${cfg.roles.fileserver.volserverArgs}
+    parm ${openafsSrv}/libexec/openafs/salvageserver ${cfg.roles.fileserver.salvageserverArgs}
+    parm ${openafsSrv}/libexec/openafs/dasalvager ${cfg.roles.fileserver.salvagerArgs}
+    end
+  '') + (optionalString (cfg.roles.database.enable && cfg.roles.backup.enable) ''
+    bnode simple buserver 1
+    parm ${openafsSrv}/libexec/openafs/buserver ${cfg.roles.backup.buserverArgs} ${optionalString (cfg.roles.backup.cellServDB != []) "-cellservdb /etc/openafs/backup/"}
+    end
+  ''));
+
+  netInfo = if (cfg.advertisedAddresses != []) then
+    pkgs.writeText "NetInfo" ((concatStringsSep "\nf " cfg.advertisedAddresses) + "\n")
+  else null;
+
+  buCellServDB = pkgs.writeText "backup-cellServDB-${cfg.cellName}" (mkCellServDB cfg.cellName cfg.roles.backup.cellServDB);
+
+  cfg = config.services.openafsServer;
+
+  udpSizeStr = toString cfg.udpPacketSize;
+
+in {
+
+  options = {
+
+    services.openafsServer = {
+
+      enable = mkOption {
+        default = false;
+        type = types.bool;
+        description = ''
+          Whether to enable the OpenAFS server. An OpenAFS server needs a
+          complex setup. So, be aware that enabling this service and setting
+          some options does not give you a turn-key-ready solution. You need
+          at least a running Kerberos 5 setup, as OpenAFS relies on it for
+          authentication. See the Guide "QuickStartUnix" coming with
+          <literal>pkgs.openafs.doc</literal> for complete setup
+          instructions.
+        '';
+      };
+
+      advertisedAddresses = mkOption {
+        type = types.listOf types.str;
+        default = [];
+        description = "List of IP addresses this server is advertised under. See NetInfo(5)";
+      };
+
+      cellName = mkOption {
+        default = "";
+        type = types.str;
+        description = "Cell name, this server will serve.";
+        example = "grand.central.org";
+      };
+
+      cellServDB = mkOption {
+        default = [];
+        type = with types; listOf (submodule [ { options = cellServDBConfig;} ]);
+        description = "Definition of all cell-local database server machines.";
+      };
+
+      package = mkOption {
+        default = pkgs.openafs.server or pkgs.openafs;
+        defaultText = literalExpression "pkgs.openafs.server or pkgs.openafs";
+        type = types.package;
+        description = "OpenAFS package for the server binaries";
+      };
+
+      roles = {
+        fileserver = {
+          enable = mkOption {
+            default = true;
+            type = types.bool;
+            description = "Fileserver role, serves files and volumes from its local storage.";
+          };
+
+          fileserverArgs = mkOption {
+            default = "-vattachpar 128 -vhashsize 11 -L -rxpck 400 -cb 1000000";
+            type = types.str;
+            description = "Arguments to the dafileserver process. See its man page.";
+          };
+
+          volserverArgs = mkOption {
+            default = "";
+            type = types.str;
+            description = "Arguments to the davolserver process. See its man page.";
+            example = "-sync never";
+          };
+
+          salvageserverArgs = mkOption {
+            default = "";
+            type = types.str;
+            description = "Arguments to the salvageserver process. See its man page.";
+            example = "-showlog";
+          };
+
+          salvagerArgs = mkOption {
+            default = "";
+            type = types.str;
+            description = "Arguments to the dasalvager process. See its man page.";
+            example = "-showlog -showmounts";
+          };
+        };
+
+        database = {
+          enable = mkOption {
+            default = true;
+            type = types.bool;
+            description = ''
+              Database server role, maintains the Volume Location Database,
+              Protection Database (and Backup Database, see
+              <literal>backup</literal> role). There can be multiple
+              servers in the database role for replication, which then need
+              reliable network connection to each other.
+
+              Servers in this role appear in AFSDB DNS records or the
+              CellServDB.
+            '';
+          };
+
+          vlserverArgs = mkOption {
+            default = "";
+            type = types.str;
+            description = "Arguments to the vlserver process. See its man page.";
+            example = "-rxbind";
+          };
+
+          ptserverArgs = mkOption {
+            default = "";
+            type = types.str;
+            description = "Arguments to the ptserver process. See its man page.";
+            example = "-restricted -default_access S---- S-M---";
+          };
+        };
+
+        backup = {
+          enable = mkOption {
+            default = false;
+            type = types.bool;
+            description = ''
+              Backup server role. Use in conjunction with the
+              <literal>database</literal> role to maintain the Backup
+              Database. Normally only used in conjunction with tape storage
+              or IBM's Tivoli Storage Manager.
+            '';
+          };
+
+          buserverArgs = mkOption {
+            default = "";
+            type = types.str;
+            description = "Arguments to the buserver process. See its man page.";
+            example = "-p 8";
+          };
+
+          cellServDB = mkOption {
+            default = [];
+            type = with types; listOf (submodule [ { options = cellServDBConfig;} ]);
+            description = ''
+              Definition of all cell-local backup database server machines.
+              Use this when your cell uses less backup database servers than
+              other database server machines.
+            '';
+          };
+        };
+      };
+
+      dottedPrincipals= mkOption {
+        default = false;
+        type = types.bool;
+        description = ''
+          If enabled, allow principal names containing (.) dots. Enabling
+          this has security implications!
+        '';
+      };
+
+      udpPacketSize = mkOption {
+        default = 1310720;
+        type = types.int;
+        description = ''
+          UDP packet size to use in Bytes. Higher values can speed up
+          communications. The default of 1 MB is a sufficient in most
+          cases. Make sure to increase the kernel's UDP buffer size
+          accordingly via <literal>net.core(w|r|opt)mem_max</literal>
+          sysctl.
+        '';
+      };
+
+    };
+
+  };
+
+  config = mkIf cfg.enable {
+
+    assertions = [
+      { assertion = cfg.cellServDB != [];
+        message = "You must specify all cell-local database servers in config.services.openafsServer.cellServDB.";
+      }
+      { assertion = cfg.cellName != "";
+        message = "You must specify the local cell name in config.services.openafsServer.cellName.";
+      }
+    ];
+
+    environment.systemPackages = [ openafsBin ];
+
+    environment.etc = {
+      bosConfig = {
+        source = bosConfig;
+        target = "openafs/BosConfig";
+        mode = "0644";
+      };
+      cellServDB = {
+        text = mkCellServDB cfg.cellName cfg.cellServDB;
+        target = "openafs/server/CellServDB";
+        mode = "0644";
+      };
+      thisCell = {
+        text = cfg.cellName;
+        target = "openafs/server/ThisCell";
+        mode = "0644";
+      };
+      buCellServDB = {
+        enable = (cfg.roles.backup.cellServDB != []);
+        text = mkCellServDB cfg.cellName cfg.roles.backup.cellServDB;
+        target = "openafs/backup/CellServDB";
+      };
+    };
+
+    systemd.services = {
+      openafs-server = {
+        description = "OpenAFS server";
+        after = [ "network.target" ];
+        wantedBy = [ "multi-user.target" ];
+        restartIfChanged = false;
+        unitConfig.ConditionPathExists = [
+          "|/etc/openafs/server/KeyFileExt"
+        ];
+        preStart = ''
+          mkdir -m 0755 -p /var/openafs
+          ${optionalString (netInfo != null) "cp ${netInfo} /var/openafs/netInfo"}
+          ${optionalString (cfg.roles.backup.cellServDB != []) "cp ${buCellServDB}"}
+        '';
+        serviceConfig = {
+          ExecStart = "${openafsBin}/bin/bosserver -nofork";
+          ExecStop = "${openafsBin}/bin/bos shutdown localhost -wait -localauth";
+        };
+      };
+    };
+  };
+}
diff --git a/nixos/modules/services/network-filesystems/orangefs/client.nix b/nixos/modules/services/network-filesystems/orangefs/client.nix
new file mode 100644
index 00000000000..36ea5af2168
--- /dev/null
+++ b/nixos/modules/services/network-filesystems/orangefs/client.nix
@@ -0,0 +1,96 @@
+{ config, lib, pkgs, ...} :
+
+with lib;
+
+let
+  cfg = config.services.orangefs.client;
+
+in {
+  ###### interface
+
+  options = {
+    services.orangefs.client = {
+      enable = mkEnableOption "OrangeFS client daemon";
+
+      extraOptions = mkOption {
+        type = with types; listOf str;
+        default = [];
+        description = "Extra command line options for pvfs2-client.";
+      };
+
+      fileSystems = mkOption {
+        description = ''
+          The orangefs file systems to be mounted.
+          This option is prefered over using <option>fileSystems</option> directly since
+          the pvfs client service needs to be running for it to be mounted.
+        '';
+
+        example = [{
+          mountPoint = "/orangefs";
+          target = "tcp://server:3334/orangefs";
+        }];
+
+        type = with types; listOf (submodule ({ ... } : {
+          options = {
+
+            mountPoint = mkOption {
+              type = types.str;
+              default = "/orangefs";
+              description = "Mount point.";
+            };
+
+            options = mkOption {
+              type = with types; listOf str;
+              default = [];
+              description = "Mount options";
+            };
+
+            target = mkOption {
+              type = types.str;
+              example = "tcp://server:3334/orangefs";
+              description = "Target URL";
+            };
+          };
+        }));
+      };
+    };
+  };
+
+
+  ###### implementation
+
+  config = mkIf cfg.enable {
+    environment.systemPackages = [ pkgs.orangefs ];
+
+    boot.supportedFilesystems = [ "pvfs2" ];
+    boot.kernelModules = [ "orangefs" ];
+
+    systemd.services.orangefs-client = {
+      requires = [ "network-online.target" ];
+      after = [ "network-online.target" ];
+
+      serviceConfig = {
+        Type = "simple";
+
+         ExecStart = ''
+           ${pkgs.orangefs}/bin/pvfs2-client-core \
+              --logtype=syslog ${concatStringsSep " " cfg.extraOptions}
+        '';
+
+        TimeoutStopSec = "120";
+      };
+    };
+
+    systemd.mounts = map (fs: {
+      requires = [ "orangefs-client.service" ];
+      after = [ "orangefs-client.service" ];
+      bindsTo = [ "orangefs-client.service" ];
+      wantedBy = [ "remote-fs.target" ];
+      type = "pvfs2";
+      options = concatStringsSep "," fs.options;
+      what = fs.target;
+      where = fs.mountPoint;
+    }) cfg.fileSystems;
+  };
+}
+
diff --git a/nixos/modules/services/network-filesystems/orangefs/server.nix b/nixos/modules/services/network-filesystems/orangefs/server.nix
new file mode 100644
index 00000000000..621c2fe8f78
--- /dev/null
+++ b/nixos/modules/services/network-filesystems/orangefs/server.nix
@@ -0,0 +1,225 @@
+{ config, lib, pkgs, ...} :
+
+with lib;
+
+let
+  cfg = config.services.orangefs.server;
+
+  aliases = mapAttrsToList (alias: url: alias) cfg.servers;
+
+  # Maximum handle number is 2^63
+  maxHandle = 9223372036854775806;
+
+  # One range of handles for each meta/data instance
+  handleStep = maxHandle / (length aliases) / 2;
+
+  fileSystems = mapAttrsToList (name: fs: ''
+    <FileSystem>
+      Name ${name}
+      ID ${toString fs.id}
+      RootHandle ${toString fs.rootHandle}
+
+      ${fs.extraConfig}
+
+      <MetaHandleRanges>
+      ${concatStringsSep "\n" (
+          imap0 (i: alias:
+            let
+              begin = i * handleStep + 3;
+              end = begin + handleStep - 1;
+            in "Range ${alias} ${toString begin}-${toString end}") aliases
+       )}
+      </MetaHandleRanges>
+
+      <DataHandleRanges>
+      ${concatStringsSep "\n" (
+          imap0 (i: alias:
+            let
+              begin = i * handleStep + 3 + (length aliases) * handleStep;
+              end = begin + handleStep - 1;
+            in "Range ${alias} ${toString begin}-${toString end}") aliases
+       )}
+      </DataHandleRanges>
+
+      <StorageHints>
+      TroveSyncMeta ${if fs.troveSyncMeta then "yes" else "no"}
+      TroveSyncData ${if fs.troveSyncData then "yes" else "no"}
+      ${fs.extraStorageHints}
+      </StorageHints>
+
+    </FileSystem>
+  '') cfg.fileSystems;
+
+  configFile = ''
+    <Defaults>
+    LogType ${cfg.logType}
+    DataStorageSpace ${cfg.dataStorageSpace}
+    MetaDataStorageSpace ${cfg.metadataStorageSpace}
+
+    BMIModules ${concatStringsSep "," cfg.BMIModules}
+    ${cfg.extraDefaults}
+    </Defaults>
+
+    ${cfg.extraConfig}
+
+    <Aliases>
+    ${concatStringsSep "\n" (mapAttrsToList (alias: url: "Alias ${alias} ${url}") cfg.servers)}
+    </Aliases>
+
+    ${concatStringsSep "\n" fileSystems}
+  '';
+
+in {
+  ###### interface
+
+  options = {
+    services.orangefs.server = {
+      enable = mkEnableOption "OrangeFS server";
+
+      logType = mkOption {
+        type = with types; enum [ "file" "syslog" ];
+        default = "syslog";
+        description = "Destination for log messages.";
+      };
+
+      dataStorageSpace = mkOption {
+        type = types.nullOr types.str;
+        default = null;
+        example = "/data/storage";
+        description = "Directory for data storage.";
+      };
+
+      metadataStorageSpace = mkOption {
+        type = types.nullOr types.str;
+        default = null;
+        example = "/data/meta";
+        description = "Directory for meta data storage.";
+      };
+
+      BMIModules = mkOption {
+        type = with types; listOf str;
+        default = [ "bmi_tcp" ];
+        example = [ "bmi_tcp" "bmi_ib"];
+        description = "List of BMI modules to load.";
+      };
+
+      extraDefaults = mkOption {
+        type = types.lines;
+        default = "";
+        description = "Extra config for <literal>&lt;Defaults&gt;</literal> section.";
+      };
+
+      extraConfig = mkOption {
+        type = types.lines;
+        default = "";
+        description = "Extra config for the global section.";
+      };
+
+      servers = mkOption {
+        type = with types; attrsOf types.str;
+        default = {};
+        example = {
+          node1 = "tcp://node1:3334";
+          node2 = "tcp://node2:3334";
+        };
+        description = "URLs for storage server including port. The attribute names define the server alias.";
+      };
+
+      fileSystems = mkOption {
+        description = ''
+          These options will create the <literal>&lt;FileSystem&gt;</literal> sections of config file.
+        '';
+        default = { orangefs = {}; };
+        example = literalExpression ''
+          {
+            fs1 = {
+              id = 101;
+            };
+
+            fs2 = {
+              id = 102;
+            };
+          }
+        '';
+        type = with types; attrsOf (submodule ({ ... } : {
+          options = {
+            id = mkOption {
+              type = types.int;
+              default = 1;
+              description = "File system ID (must be unique within configuration).";
+            };
+
+            rootHandle = mkOption {
+              type = types.int;
+              default = 3;
+              description = "File system root ID.";
+            };
+
+            extraConfig = mkOption {
+              type = types.lines;
+              default = "";
+              description = "Extra config for <literal>&lt;FileSystem&gt;</literal> section.";
+            };
+
+            troveSyncMeta = mkOption {
+              type = types.bool;
+              default = true;
+              description = "Sync meta data.";
+            };
+
+            troveSyncData = mkOption {
+              type = types.bool;
+              default = false;
+              description = "Sync data.";
+            };
+
+            extraStorageHints = mkOption {
+              type = types.lines;
+              default = "";
+              description = "Extra config for <literal>&lt;StorageHints&gt;</literal> section.";
+            };
+          };
+        }));
+      };
+    };
+  };
+
+  ###### implementation
+
+  config = mkIf cfg.enable {
+    environment.systemPackages = [ pkgs.orangefs ];
+
+    # orangefs daemon will run as user
+    users.users.orangefs = {
+      isSystemUser = true;
+      group = "orangfs";
+    };
+    users.groups.orangefs = {};
+
+    # To format the file system the config file is needed.
+    environment.etc."orangefs/server.conf" = {
+      text = configFile;
+      user = "orangefs";
+      group = "orangefs";
+    };
+
+    systemd.services.orangefs-server = {
+      wantedBy = [ "multi-user.target" ];
+      requires = [ "network-online.target" ];
+      after = [ "network-online.target" ];
+
+      serviceConfig = {
+        # Run as "simple" in forground mode.
+        # This is more reliable
+        ExecStart = ''
+          ${pkgs.orangefs}/bin/pvfs2-server -d \
+            /etc/orangefs/server.conf
+        '';
+        TimeoutStopSec = "120";
+        User = "orangefs";
+        Group = "orangefs";
+      };
+    };
+  };
+
+}
diff --git a/nixos/modules/services/network-filesystems/rsyncd.nix b/nixos/modules/services/network-filesystems/rsyncd.nix
new file mode 100644
index 00000000000..e72f9b54cd6
--- /dev/null
+++ b/nixos/modules/services/network-filesystems/rsyncd.nix
@@ -0,0 +1,128 @@
+{ config, pkgs, lib, ... }:
+
+with lib;
+
+let
+  cfg = config.services.rsyncd;
+  settingsFormat = pkgs.formats.ini { };
+  configFile = settingsFormat.generate "rsyncd.conf" cfg.settings;
+in {
+  options = {
+    services.rsyncd = {
+
+      enable = mkEnableOption "the rsync daemon";
+
+      port = mkOption {
+        default = 873;
+        type = types.port;
+        description = "TCP port the daemon will listen on.";
+      };
+
+      settings = mkOption {
+        inherit (settingsFormat) type;
+        default = { };
+        example = {
+          global = {
+            uid = "nobody";
+            gid = "nobody";
+            "use chroot" = true;
+            "max connections" = 4;
+          };
+          ftp = {
+            path = "/var/ftp/./pub";
+            comment = "whole ftp area";
+          };
+          cvs = {
+            path = "/data/cvs";
+            comment = "CVS repository (requires authentication)";
+            "auth users" = [ "tridge" "susan" ];
+            "secrets file" = "/etc/rsyncd.secrets";
+          };
+        };
+        description = ''
+          Configuration for rsyncd. See
+          <citerefentry><refentrytitle>rsyncd.conf</refentrytitle>
+          <manvolnum>5</manvolnum></citerefentry>.
+        '';
+      };
+
+      socketActivated = mkOption {
+        default = false;
+        type = types.bool;
+        description =
+          "If enabled Rsync will be socket-activated rather than run persistently.";
+      };
+
+    };
+  };
+
+  imports = (map (option:
+    mkRemovedOptionModule [ "services" "rsyncd" option ]
+    "This option was removed in favor of `services.rsyncd.settings`.") [
+      "address"
+      "extraConfig"
+      "motd"
+      "user"
+      "group"
+    ]);
+
+  config = mkIf cfg.enable {
+
+    services.rsyncd.settings.global.port = toString cfg.port;
+
+    systemd = let
+      serviceConfigSecurity = {
+        ProtectSystem = "full";
+        PrivateDevices = "on";
+        NoNewPrivileges = "on";
+      };
+    in {
+      services.rsync = {
+        enable = !cfg.socketActivated;
+        aliases = [ "rsyncd.service" ];
+
+        description = "fast remote file copy program daemon";
+        after = [ "network.target" ];
+        documentation = [ "man:rsync(1)" "man:rsyncd.conf(5)" ];
+
+        serviceConfig = serviceConfigSecurity // {
+          ExecStart =
+            "${pkgs.rsync}/bin/rsync --daemon --no-detach --config=${configFile}";
+          RestartSec = 1;
+        };
+
+        wantedBy = [ "multi-user.target" ];
+      };
+
+      services."rsync@" = {
+        description = "fast remote file copy program daemon";
+        after = [ "network.target" ];
+
+        serviceConfig = serviceConfigSecurity // {
+          ExecStart = "${pkgs.rsync}/bin/rsync --daemon --config=${configFile}";
+          StandardInput = "socket";
+          StandardOutput = "inherit";
+          StandardError = "journal";
+        };
+      };
+
+      sockets.rsync = {
+        enable = cfg.socketActivated;
+
+        description = "socket for fast remote file copy program daemon";
+        conflicts = [ "rsync.service" ];
+
+        listenStreams = [ (toString cfg.port) ];
+        socketConfig.Accept = true;
+
+        wantedBy = [ "sockets.target" ];
+      };
+    };
+
+  };
+
+  meta.maintainers = with lib.maintainers; [ ehmry ];
+
+  # TODO: socket activated rsyncd
+
+}
diff --git a/nixos/modules/services/network-filesystems/samba-wsdd.nix b/nixos/modules/services/network-filesystems/samba-wsdd.nix
new file mode 100644
index 00000000000..800ef448d37
--- /dev/null
+++ b/nixos/modules/services/network-filesystems/samba-wsdd.nix
@@ -0,0 +1,124 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.samba-wsdd;
+
+in {
+  options = {
+    services.samba-wsdd = {
+      enable = mkEnableOption ''
+        Enable Web Services Dynamic Discovery host daemon. This enables (Samba) hosts, like your local NAS device,
+        to be found by Web Service Discovery Clients like Windows.
+        <note>
+          <para>If you use the firewall consider adding the following:</para>
+          <programlisting>
+            networking.firewall.allowedTCPPorts = [ 5357 ];
+            networking.firewall.allowedUDPPorts = [ 3702 ];
+          </programlisting>
+        </note>
+      '';
+      interface = mkOption {
+        type = types.nullOr types.str;
+        default = null;
+        example = "eth0";
+        description = "Interface or address to use.";
+      };
+      hoplimit = mkOption {
+        type = types.nullOr types.int;
+        default = null;
+        example = 2;
+        description = "Hop limit for multicast packets (default = 1).";
+      };
+      workgroup = mkOption {
+        type = types.nullOr types.str;
+        default = null;
+        example = "HOME";
+        description = "Set workgroup name (default WORKGROUP).";
+      };
+      hostname = mkOption {
+        type = types.nullOr types.str;
+        default = null;
+        example = "FILESERVER";
+        description = "Override (NetBIOS) hostname to be used (default hostname).";
+      };
+      domain = mkOption {
+        type = types.nullOr types.str;
+        default = null;
+        description = "Set domain name (disables workgroup).";
+      };
+      discovery = mkOption {
+        type = types.bool;
+        default = false;
+        description = "Enable discovery operation mode.";
+      };
+      listen = mkOption {
+        type = types.str;
+        default = "/run/wsdd/wsdd.sock";
+        description = "Listen on path or localhost port in discovery mode.";
+      };
+      extraOptions = mkOption {
+        type = types.listOf types.str;
+        default = [ "--shortlog" ];
+        example = [ "--verbose" "--no-http" "--ipv4only" "--no-host" ];
+        description = "Additional wsdd options.";
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+
+    environment.systemPackages = [ pkgs.wsdd ];
+
+    systemd.services.samba-wsdd = {
+      description = "Web Services Dynamic Discovery host daemon";
+      after = [ "network.target" ];
+      wantedBy = [ "multi-user.target" ];
+      serviceConfig = {
+        DynamicUser = true;
+        Type = "simple";
+        ExecStart = ''
+          ${pkgs.wsdd}/bin/wsdd ${optionalString (cfg.interface != null) "--interface '${cfg.interface}'"} \
+                                ${optionalString (cfg.hoplimit != null) "--hoplimit '${toString cfg.hoplimit}'"} \
+                                ${optionalString (cfg.workgroup != null) "--workgroup '${cfg.workgroup}'"} \
+                                ${optionalString (cfg.hostname != null) "--hostname '${cfg.hostname}'"} \
+                                ${optionalString (cfg.domain != null) "--domain '${cfg.domain}'"} \
+                                ${optionalString cfg.discovery "--discovery --listen '${cfg.listen}'"} \
+                                ${escapeShellArgs cfg.extraOptions}
+        '';
+        # Runtime directory and mode
+        RuntimeDirectory = "wsdd";
+        RuntimeDirectoryMode = "0750";
+        # Access write directories
+        UMask = "0027";
+        # Capabilities
+        CapabilityBoundingSet = "";
+        # Security
+        NoNewPrivileges = true;
+        # Sandboxing
+        ProtectSystem = "strict";
+        ProtectHome = true;
+        PrivateTmp = true;
+        PrivateDevices = true;
+        PrivateUsers = false;
+        ProtectHostname = true;
+        ProtectClock = true;
+        ProtectKernelTunables = true;
+        ProtectKernelModules = true;
+        ProtectKernelLogs = true;
+        ProtectControlGroups = true;
+        RestrictAddressFamilies = [ "AF_UNIX" "AF_INET" "AF_INET6" "AF_NETLINK" ];
+        RestrictNamespaces = true;
+        LockPersonality = true;
+        MemoryDenyWriteExecute = true;
+        RestrictRealtime = true;
+        RestrictSUIDSGID = true;
+        PrivateMounts = true;
+        # System Call Filtering
+        SystemCallArchitectures = "native";
+        SystemCallFilter = "~@cpu-emulation @debug @mount @obsolete @privileged @resources";
+      };
+    };
+  };
+}
diff --git a/nixos/modules/services/network-filesystems/samba.nix b/nixos/modules/services/network-filesystems/samba.nix
new file mode 100644
index 00000000000..9ed755d0465
--- /dev/null
+++ b/nixos/modules/services/network-filesystems/samba.nix
@@ -0,0 +1,252 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  smbToString = x: if builtins.typeOf x == "bool"
+                   then boolToString x
+                   else toString x;
+
+  cfg = config.services.samba;
+
+  samba = cfg.package;
+
+  shareConfig = name:
+    let share = getAttr name cfg.shares; in
+    "[${name}]\n " + (smbToString (
+       map
+         (key: "${key} = ${smbToString (getAttr key share)}\n")
+         (attrNames share)
+    ));
+
+  configFile = pkgs.writeText "smb.conf"
+    (if cfg.configText != null then cfg.configText else
+    ''
+      [global]
+      security = ${cfg.securityType}
+      passwd program = /run/wrappers/bin/passwd %u
+      invalid users = ${smbToString cfg.invalidUsers}
+
+      ${cfg.extraConfig}
+
+      ${smbToString (map shareConfig (attrNames cfg.shares))}
+    '');
+
+  # This may include nss_ldap, needed for samba if it has to use ldap.
+  nssModulesPath = config.system.nssModules.path;
+
+  daemonService = appName: args:
+    { description = "Samba Service Daemon ${appName}";
+
+      after = [ (mkIf (cfg.enableNmbd && "${appName}" == "smbd") "samba-nmbd.service") ];
+      requiredBy = [ "samba.target" ];
+      partOf = [ "samba.target" ];
+
+      environment = {
+        LD_LIBRARY_PATH = nssModulesPath;
+        LOCALE_ARCHIVE = "/run/current-system/sw/lib/locale/locale-archive";
+      };
+
+      serviceConfig = {
+        ExecStart = "${samba}/sbin/${appName} --foreground --no-process-group ${args}";
+        ExecReload = "${pkgs.coreutils}/bin/kill -HUP $MAINPID";
+        LimitNOFILE = 16384;
+        PIDFile = "/run/${appName}.pid";
+        Type = "notify";
+        NotifyAccess = "all"; #may not do anything...
+      };
+      unitConfig.RequiresMountsFor = "/var/lib/samba";
+
+      restartTriggers = [ configFile ];
+    };
+
+in
+
+{
+  imports = [
+    (mkRemovedOptionModule [ "services" "samba" "defaultShare" ] "")
+    (mkRemovedOptionModule [ "services" "samba" "syncPasswordsByPam" ] "This option has been removed by upstream, see https://bugzilla.samba.org/show_bug.cgi?id=10669#c10")
+  ];
+
+  ###### interface
+
+  options = {
+
+    # !!! clean up the descriptions.
+
+    services.samba = {
+
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = ''
+          Whether to enable Samba, which provides file and print
+          services to Windows clients through the SMB/CIFS protocol.
+
+          <note>
+            <para>If you use the firewall consider adding the following:</para>
+          <programlisting>
+            services.samba.openFirewall = true;
+          </programlisting>
+          </note>
+        '';
+      };
+
+      openFirewall = mkOption {
+        type = types.bool;
+        default = false;
+        description = ''
+          Whether to automatically open the necessary ports in the firewall.
+        '';
+      };
+
+      enableNmbd = mkOption {
+        type = types.bool;
+        default = true;
+        description = ''
+          Whether to enable Samba's nmbd, which replies to NetBIOS over IP name
+          service requests. It also participates in the browsing protocols
+          which make up the Windows "Network Neighborhood" view.
+        '';
+      };
+
+      enableWinbindd = mkOption {
+        type = types.bool;
+        default = true;
+        description = ''
+          Whether to enable Samba's winbindd, which provides a number of services
+          to the Name Service Switch capability found in most modern C libraries,
+          to arbitrary applications via PAM and ntlm_auth and to Samba itself.
+        '';
+      };
+
+      package = mkOption {
+        type = types.package;
+        default = pkgs.samba;
+        defaultText = literalExpression "pkgs.samba";
+        example = literalExpression "pkgs.samba4Full";
+        description = ''
+          Defines which package should be used for the samba server.
+        '';
+      };
+
+      invalidUsers = mkOption {
+        type = types.listOf types.str;
+        default = [ "root" ];
+        description = ''
+          List of users who are denied to login via Samba.
+        '';
+      };
+
+      extraConfig = mkOption {
+        type = types.lines;
+        default = "";
+        description = ''
+          Additional global section and extra section lines go in here.
+        '';
+        example = ''
+          guest account = nobody
+          map to guest = bad user
+        '';
+      };
+
+      configText = mkOption {
+        type = types.nullOr types.lines;
+        default = null;
+        description = ''
+          Verbatim contents of smb.conf. If null (default), use the
+          autogenerated file from NixOS instead.
+        '';
+      };
+
+      securityType = mkOption {
+        type = types.str;
+        default = "user";
+        description = "Samba security type";
+      };
+
+      nsswins = mkOption {
+        default = false;
+        type = types.bool;
+        description = ''
+          Whether to enable the WINS NSS (Name Service Switch) plug-in.
+          Enabling it allows applications to resolve WINS/NetBIOS names (a.k.a.
+          Windows machine names) by transparently querying the winbindd daemon.
+        '';
+      };
+
+      shares = mkOption {
+        default = {};
+        description = ''
+          A set describing shared resources.
+          See <command>man smb.conf</command> for options.
+        '';
+        type = types.attrsOf (types.attrsOf types.unspecified);
+        example = literalExpression ''
+          { public =
+            { path = "/srv/public";
+              "read only" = true;
+              browseable = "yes";
+              "guest ok" = "yes";
+              comment = "Public samba share.";
+            };
+          }
+        '';
+      };
+
+    };
+
+  };
+
+
+  ###### implementation
+
+  config = mkMerge
+    [ { assertions =
+          [ { assertion = cfg.nsswins -> cfg.enableWinbindd;
+              message   = "If samba.nsswins is enabled, then samba.enableWinbindd must also be enabled";
+            }
+          ];
+        # Always provide a smb.conf to shut up programs like smbclient and smbspool.
+        environment.etc."samba/smb.conf".source = mkOptionDefault (
+          if cfg.enable then configFile
+          else pkgs.writeText "smb-dummy.conf" "# Samba is disabled."
+        );
+      }
+
+      (mkIf cfg.enable {
+
+        system.nssModules = optional cfg.nsswins samba;
+        system.nssDatabases.hosts = optional cfg.nsswins "wins";
+
+        systemd = {
+          targets.samba = {
+            description = "Samba Server";
+            after = [ "network.target" ];
+            wantedBy = [ "multi-user.target" ];
+          };
+          # Refer to https://github.com/samba-team/samba/tree/master/packaging/systemd
+          # for correct use with systemd
+          services = {
+            samba-smbd = daemonService "smbd" "";
+            samba-nmbd = mkIf cfg.enableNmbd (daemonService "nmbd" "");
+            samba-winbindd = mkIf cfg.enableWinbindd (daemonService "winbindd" "");
+          };
+          tmpfiles.rules = [
+            "d /var/lock/samba - - - - -"
+            "d /var/log/samba - - - - -"
+            "d /var/cache/samba - - - - -"
+            "d /var/lib/samba/private - - - - -"
+          ];
+        };
+
+        security.pam.services.samba = {};
+        environment.systemPackages = [ cfg.package ];
+
+        networking.firewall.allowedTCPPorts = mkIf cfg.openFirewall [ 139 445 ];
+        networking.firewall.allowedUDPPorts = mkIf cfg.openFirewall [ 137 138 ];
+      })
+    ];
+
+}
diff --git a/nixos/modules/services/network-filesystems/tahoe.nix b/nixos/modules/services/network-filesystems/tahoe.nix
new file mode 100644
index 00000000000..5426463dffa
--- /dev/null
+++ b/nixos/modules/services/network-filesystems/tahoe.nix
@@ -0,0 +1,366 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+let
+  cfg = config.services.tahoe;
+in
+  {
+    options.services.tahoe = {
+      introducers = mkOption {
+        default = {};
+        type = with types; attrsOf (submodule {
+          options = {
+            nickname = mkOption {
+              type = types.str;
+              description = ''
+                The nickname of this Tahoe introducer.
+              '';
+            };
+            tub.port = mkOption {
+              default = 3458;
+              type = types.int;
+              description = ''
+                The port on which the introducer will listen.
+              '';
+            };
+            tub.location = mkOption {
+              default = null;
+              type = types.nullOr types.str;
+              description = ''
+                The external location that the introducer should listen on.
+
+                If specified, the port should be included.
+              '';
+            };
+            package = mkOption {
+              default = pkgs.tahoelafs;
+              defaultText = literalExpression "pkgs.tahoelafs";
+              type = types.package;
+              description = ''
+                The package to use for the Tahoe LAFS daemon.
+              '';
+            };
+          };
+        });
+        description = ''
+          The Tahoe introducers.
+        '';
+      };
+      nodes = mkOption {
+        default = {};
+        type = with types; attrsOf (submodule {
+          options = {
+            nickname = mkOption {
+              type = types.str;
+              description = ''
+                The nickname of this Tahoe node.
+              '';
+            };
+            tub.port = mkOption {
+              default = 3457;
+              type = types.int;
+              description = ''
+                The port on which the tub will listen.
+
+                This is the correct setting to tweak if you want Tahoe's storage
+                system to listen on a different port.
+              '';
+            };
+            tub.location = mkOption {
+              default = null;
+              type = types.nullOr types.str;
+              description = ''
+                The external location that the node should listen on.
+
+                This is the setting to tweak if there are multiple interfaces
+                and you want to alter which interface Tahoe is advertising.
+
+                If specified, the port should be included.
+              '';
+            };
+            web.port = mkOption {
+              default = 3456;
+              type = types.int;
+              description = ''
+                The port on which the Web server will listen.
+
+                This is the correct setting to tweak if you want Tahoe's WUI to
+                listen on a different port.
+              '';
+            };
+            client.introducer = mkOption {
+              default = null;
+              type = types.nullOr types.str;
+              description = ''
+                The furl for a Tahoe introducer node.
+
+                Like all furls, keep this safe and don't share it.
+              '';
+            };
+            client.helper = mkOption {
+              default = null;
+              type = types.nullOr types.str;
+              description = ''
+                The furl for a Tahoe helper node.
+
+                Like all furls, keep this safe and don't share it.
+              '';
+            };
+            client.shares.needed = mkOption {
+              default = 3;
+              type = types.int;
+              description = ''
+                The number of shares required to reconstitute a file.
+              '';
+            };
+            client.shares.happy = mkOption {
+              default = 7;
+              type = types.int;
+              description = ''
+                The number of distinct storage nodes required to store
+                a file.
+              '';
+            };
+            client.shares.total = mkOption {
+              default = 10;
+              type = types.int;
+              description = ''
+                The number of shares required to store a file.
+              '';
+            };
+            storage.enable = mkEnableOption "storage service";
+            storage.reservedSpace = mkOption {
+              default = "1G";
+              type = types.str;
+              description = ''
+                The amount of filesystem space to not use for storage.
+              '';
+            };
+            helper.enable = mkEnableOption "helper service";
+            sftpd.enable = mkEnableOption "SFTP service";
+            sftpd.port = mkOption {
+              default = null;
+              type = types.nullOr types.int;
+              description = ''
+                The port on which the SFTP server will listen.
+
+                This is the correct setting to tweak if you want Tahoe's SFTP
+                daemon to listen on a different port.
+              '';
+            };
+            sftpd.hostPublicKeyFile = mkOption {
+              default = null;
+              type = types.nullOr types.path;
+              description = ''
+                Path to the SSH host public key.
+              '';
+            };
+            sftpd.hostPrivateKeyFile = mkOption {
+              default = null;
+              type = types.nullOr types.path;
+              description = ''
+                Path to the SSH host private key.
+              '';
+            };
+            sftpd.accounts.file = mkOption {
+              default = null;
+              type = types.nullOr types.path;
+              description = ''
+                Path to the accounts file.
+              '';
+            };
+            sftpd.accounts.url = mkOption {
+              default = null;
+              type = types.nullOr types.str;
+              description = ''
+                URL of the accounts server.
+              '';
+            };
+            package = mkOption {
+              default = pkgs.tahoelafs;
+              defaultText = literalExpression "pkgs.tahoelafs";
+              type = types.package;
+              description = ''
+                The package to use for the Tahoe LAFS daemon.
+              '';
+            };
+          };
+        });
+        description = ''
+          The Tahoe nodes.
+        '';
+      };
+    };
+    config = mkMerge [
+      (mkIf (cfg.introducers != {}) {
+        environment = {
+          etc = flip mapAttrs' cfg.introducers (node: settings:
+            nameValuePair "tahoe-lafs/introducer-${node}.cfg" {
+              mode = "0444";
+              text = ''
+                # This configuration is generated by Nix. Edit at your own
+                # peril; here be dragons.
+
+                [node]
+                nickname = ${settings.nickname}
+                tub.port = ${toString settings.tub.port}
+                ${optionalString (settings.tub.location != null)
+                  "tub.location = ${settings.tub.location}"}
+              '';
+            });
+          # Actually require Tahoe, so that we will have it installed.
+          systemPackages = flip mapAttrsToList cfg.introducers (node: settings:
+            settings.package
+          );
+        };
+        # Open up the firewall.
+        # networking.firewall.allowedTCPPorts = flip mapAttrsToList cfg.introducers
+        #   (node: settings: settings.tub.port);
+        systemd.services = flip mapAttrs' cfg.introducers (node: settings:
+          let
+            pidfile = "/run/tahoe.introducer-${node}.pid";
+            # This is a directory, but it has no trailing slash. Tahoe commands
+            # get antsy when there's a trailing slash.
+            nodedir = "/var/db/tahoe-lafs/introducer-${node}";
+          in nameValuePair "tahoe.introducer-${node}" {
+            description = "Tahoe LAFS node ${node}";
+            wantedBy = [ "multi-user.target" ];
+            path = [ settings.package ];
+            restartTriggers = [
+              config.environment.etc."tahoe-lafs/introducer-${node}.cfg".source ];
+            serviceConfig = {
+              Type = "simple";
+              PIDFile = pidfile;
+              # Believe it or not, Tahoe is very brittle about the order of
+              # arguments to $(tahoe run). The node directory must come first,
+              # and arguments which alter Twisted's behavior come afterwards.
+              ExecStart = ''
+                ${settings.package}/bin/tahoe run ${lib.escapeShellArg nodedir} --pidfile=${lib.escapeShellArg pidfile}
+              '';
+            };
+            preStart = ''
+              if [ ! -d ${lib.escapeShellArg nodedir} ]; then
+                mkdir -p /var/db/tahoe-lafs
+                # See https://github.com/NixOS/nixpkgs/issues/25273
+                tahoe create-introducer \
+                  --hostname="${config.networking.hostName}" \
+                  ${lib.escapeShellArg nodedir}
+              fi
+
+              # Tahoe has created a predefined tahoe.cfg which we must now
+              # scribble over.
+              # XXX I thought that a symlink would work here, but it doesn't, so
+              # we must do this on every prestart. Fixes welcome.
+              # rm ${nodedir}/tahoe.cfg
+              # ln -s /etc/tahoe-lafs/introducer-${node}.cfg ${nodedir}/tahoe.cfg
+              cp /etc/tahoe-lafs/introducer-"${node}".cfg ${lib.escapeShellArg nodedir}/tahoe.cfg
+            '';
+          });
+        users.users = flip mapAttrs' cfg.introducers (node: _:
+          nameValuePair "tahoe.introducer-${node}" {
+            description = "Tahoe node user for introducer ${node}";
+            isSystemUser = true;
+          });
+      })
+      (mkIf (cfg.nodes != {}) {
+        environment = {
+          etc = flip mapAttrs' cfg.nodes (node: settings:
+            nameValuePair "tahoe-lafs/${node}.cfg" {
+              mode = "0444";
+              text = ''
+                # This configuration is generated by Nix. Edit at your own
+                # peril; here be dragons.
+
+                [node]
+                nickname = ${settings.nickname}
+                tub.port = ${toString settings.tub.port}
+                ${optionalString (settings.tub.location != null)
+                  "tub.location = ${settings.tub.location}"}
+                # This is a Twisted endpoint. Twisted Web doesn't work on
+                # non-TCP. ~ C.
+                web.port = tcp:${toString settings.web.port}
+
+                [client]
+                ${optionalString (settings.client.introducer != null)
+                  "introducer.furl = ${settings.client.introducer}"}
+                ${optionalString (settings.client.helper != null)
+                  "helper.furl = ${settings.client.helper}"}
+
+                shares.needed = ${toString settings.client.shares.needed}
+                shares.happy = ${toString settings.client.shares.happy}
+                shares.total = ${toString settings.client.shares.total}
+
+                [storage]
+                enabled = ${boolToString settings.storage.enable}
+                reserved_space = ${settings.storage.reservedSpace}
+
+                [helper]
+                enabled = ${boolToString settings.helper.enable}
+
+                [sftpd]
+                enabled = ${boolToString settings.sftpd.enable}
+                ${optionalString (settings.sftpd.port != null)
+                  "port = ${toString settings.sftpd.port}"}
+                ${optionalString (settings.sftpd.hostPublicKeyFile != null)
+                  "host_pubkey_file = ${settings.sftpd.hostPublicKeyFile}"}
+                ${optionalString (settings.sftpd.hostPrivateKeyFile != null)
+                  "host_privkey_file = ${settings.sftpd.hostPrivateKeyFile}"}
+                ${optionalString (settings.sftpd.accounts.file != null)
+                  "accounts.file = ${settings.sftpd.accounts.file}"}
+                ${optionalString (settings.sftpd.accounts.url != null)
+                  "accounts.url = ${settings.sftpd.accounts.url}"}
+              '';
+            });
+          # Actually require Tahoe, so that we will have it installed.
+          systemPackages = flip mapAttrsToList cfg.nodes (node: settings:
+            settings.package
+          );
+        };
+        # Open up the firewall.
+        # networking.firewall.allowedTCPPorts = flip mapAttrsToList cfg.nodes
+        #   (node: settings: settings.tub.port);
+        systemd.services = flip mapAttrs' cfg.nodes (node: settings:
+          let
+            pidfile = "/run/tahoe.${node}.pid";
+            # This is a directory, but it has no trailing slash. Tahoe commands
+            # get antsy when there's a trailing slash.
+            nodedir = "/var/db/tahoe-lafs/${node}";
+          in nameValuePair "tahoe.${node}" {
+            description = "Tahoe LAFS node ${node}";
+            wantedBy = [ "multi-user.target" ];
+            path = [ settings.package ];
+            restartTriggers = [
+              config.environment.etc."tahoe-lafs/${node}.cfg".source ];
+            serviceConfig = {
+              Type = "simple";
+              PIDFile = pidfile;
+              # Believe it or not, Tahoe is very brittle about the order of
+              # arguments to $(tahoe run). The node directory must come first,
+              # and arguments which alter Twisted's behavior come afterwards.
+              ExecStart = ''
+                ${settings.package}/bin/tahoe run ${lib.escapeShellArg nodedir} --pidfile=${lib.escapeShellArg pidfile}
+              '';
+            };
+            preStart = ''
+              if [ ! -d ${lib.escapeShellArg nodedir} ]; then
+                mkdir -p /var/db/tahoe-lafs
+                tahoe create-node --hostname=localhost ${lib.escapeShellArg nodedir}
+              fi
+
+              # Tahoe has created a predefined tahoe.cfg which we must now
+              # scribble over.
+              # XXX I thought that a symlink would work here, but it doesn't, so
+              # we must do this on every prestart. Fixes welcome.
+              # rm ${nodedir}/tahoe.cfg
+              # ln -s /etc/tahoe-lafs/${lib.escapeShellArg node}.cfg ${nodedir}/tahoe.cfg
+              cp /etc/tahoe-lafs/${lib.escapeShellArg node}.cfg ${lib.escapeShellArg nodedir}/tahoe.cfg
+            '';
+          });
+        users.users = flip mapAttrs' cfg.nodes (node: _:
+          nameValuePair "tahoe.${node}" {
+            description = "Tahoe node user for node ${node}";
+            isSystemUser = true;
+          });
+      })
+    ];
+  }
diff --git a/nixos/modules/services/network-filesystems/u9fs.nix b/nixos/modules/services/network-filesystems/u9fs.nix
new file mode 100644
index 00000000000..77961b78cad
--- /dev/null
+++ b/nixos/modules/services/network-filesystems/u9fs.nix
@@ -0,0 +1,78 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.u9fs;
+in
+{
+
+  options = {
+
+    services.u9fs = {
+
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = "Whether to run the u9fs 9P server for Unix.";
+      };
+
+      listenStreams = mkOption {
+        type = types.listOf types.str;
+        default = [ "564" ];
+        example = [ "192.168.16.1:564" ];
+        description = ''
+          Sockets to listen for clients on.
+          See <command>man 5 systemd.socket</command> for socket syntax.
+        '';
+      };
+
+      user = mkOption {
+        type = types.str;
+        default = "nobody";
+        description =
+          "User to run u9fs under.";
+      };
+
+      extraArgs = mkOption {
+        type = types.str;
+        default = "";
+        example = "-a none";
+        description =
+          ''
+            Extra arguments to pass on invocation,
+            see <command>man 4 u9fs</command>
+          '';
+      };
+
+    };
+
+  };
+
+  config = mkIf cfg.enable {
+
+    systemd = {
+      sockets.u9fs = {
+        description = "U9fs Listening Socket";
+        wantedBy = [ "sockets.target" ];
+        after = [ "network.target" ];
+        inherit (cfg) listenStreams;
+        socketConfig.Accept = "yes";
+      };
+      services."u9fs@" = {
+        description = "9P Protocol Server";
+        reloadIfChanged = true;
+        requires = [ "u9fs.socket" ];
+        serviceConfig =
+          { ExecStart = "-${pkgs.u9fs}/bin/u9fs ${cfg.extraArgs}";
+            StandardInput = "socket";
+            StandardError = "journal";
+            User = cfg.user;
+            AmbientCapabilities = "cap_setuid cap_setgid";
+          };
+      };
+    };
+
+  };
+
+}
diff --git a/nixos/modules/services/network-filesystems/webdav-server-rs.nix b/nixos/modules/services/network-filesystems/webdav-server-rs.nix
new file mode 100644
index 00000000000..1c5c299cb67
--- /dev/null
+++ b/nixos/modules/services/network-filesystems/webdav-server-rs.nix
@@ -0,0 +1,144 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+let
+  cfg = config.services.webdav-server-rs;
+  format = pkgs.formats.toml { };
+  settings = recursiveUpdate
+    {
+      server.uid = config.users.users."${cfg.user}".uid;
+      server.gid = config.users.groups."${cfg.group}".gid;
+    }
+    cfg.settings;
+in
+{
+  options = {
+    services.webdav-server-rs = {
+      enable = mkEnableOption "WebDAV server";
+
+      user = mkOption {
+        type = types.str;
+        default = "webdav";
+        description = "User to run under when setuid is not enabled.";
+      };
+
+      group = mkOption {
+        type = types.str;
+        default = "webdav";
+        description = "Group to run under when setuid is not enabled.";
+      };
+
+      settings = mkOption {
+        type = format.type;
+        default = { };
+        description = ''
+          Attrset that is converted and passed as config file. Available
+          options can be found at
+          <link xlink:href="https://github.com/miquels/webdav-server-rs/blob/master/webdav-server.toml">here</link>.
+        '';
+        example = literalExpression ''
+          {
+            server.listen = [ "0.0.0.0:4918" "[::]:4918" ];
+            accounts = {
+              auth-type = "htpasswd.default";
+              acct-type = "unix";
+            };
+            htpasswd.default = {
+              htpasswd = "/etc/htpasswd";
+            };
+            location = [
+              {
+                route = [ "/public/*path" ];
+                directory = "/srv/public";
+                handler = "filesystem";
+                methods = [ "webdav-ro" ];
+                autoindex = true;
+                auth = "false";
+              }
+              {
+                route = [ "/user/:user/*path" ];
+                directory = "~";
+                handler = "filesystem";
+                methods = [ "webdav-rw" ];
+                autoindex = true;
+                auth = "true";
+                setuid = true;
+              }
+            ];
+          }
+        '';
+      };
+
+      configFile = mkOption {
+        type = types.path;
+        default = format.generate "webdav-server.toml" settings;
+        defaultText = "Config file generated from services.webdav-server-rs.settings";
+        description = ''
+          Path to config file. If this option is set, it will override any
+          configuration done in services.webdav-server-rs.settings.
+        '';
+        example = "/etc/webdav-server.toml";
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    assertions = [
+      {
+        assertion = hasAttr cfg.user config.users.users && config.users.users."${cfg.user}".uid != null;
+        message = "users.users.${cfg.user} and users.users.${cfg.user}.uid must be defined.";
+      }
+      {
+        assertion = hasAttr cfg.group config.users.groups && config.users.groups."${cfg.group}".gid != null;
+        message = "users.groups.${cfg.group} and users.groups.${cfg.group}.gid must be defined.";
+      }
+    ];
+
+    users.users = optionalAttrs (cfg.user == "webdav") {
+      webdav = {
+        description = "WebDAV user";
+        group = cfg.group;
+        uid = config.ids.uids.webdav;
+      };
+    };
+
+    users.groups = optionalAttrs (cfg.group == "webdav") {
+      webdav.gid = config.ids.gids.webdav;
+    };
+
+    systemd.services.webdav-server-rs = {
+      description = "WebDAV server";
+      after = [ "network.target" ];
+      wantedBy = [ "multi-user.target" ];
+      serviceConfig = {
+        ExecStart = "${pkgs.webdav-server-rs}/bin/webdav-server -c ${cfg.configFile}";
+
+        CapabilityBoundingSet = [
+          "CAP_SETUID"
+          "CAP_SETGID"
+        ];
+
+        NoExecPaths = [ "/" ];
+        ExecPaths = [ "/nix/store" ];
+
+        # This program actively detects if it is running in root user account
+        # when it starts and uses root privilege to switch process uid to
+        # respective unix user when a user logs in.  Maybe we can enable
+        # DynamicUser in the future when it's able to detect CAP_SETUID and
+        # CAP_SETGID capabilities.
+
+        NoNewPrivileges = true;
+        PrivateDevices = true;
+        PrivateTmp = true;
+        ProtectClock = true;
+        ProtectControlGroups = true;
+        ProtectKernelLogs = true;
+        ProtectKernelModules = true;
+        ProtectKernelTunables = true;
+        ProtectSystem = true;
+      };
+    };
+  };
+
+  meta.maintainers = with maintainers; [ pmy ];
+}
diff --git a/nixos/modules/services/network-filesystems/webdav.nix b/nixos/modules/services/network-filesystems/webdav.nix
new file mode 100644
index 00000000000..a810af40fd4
--- /dev/null
+++ b/nixos/modules/services/network-filesystems/webdav.nix
@@ -0,0 +1,107 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+let
+  cfg = config.services.webdav;
+  format = pkgs.formats.yaml { };
+in
+{
+  options = {
+    services.webdav = {
+      enable = mkEnableOption "WebDAV server";
+
+      user = mkOption {
+        type = types.str;
+        default = "webdav";
+        description = "User account under which WebDAV runs.";
+      };
+
+      group = mkOption {
+        type = types.str;
+        default = "webdav";
+        description = "Group under which WebDAV runs.";
+      };
+
+      settings = mkOption {
+        type = format.type;
+        default = { };
+        description = ''
+          Attrset that is converted and passed as config file. Available options
+          can be found at
+          <link xlink:href="https://github.com/hacdias/webdav">here</link>.
+
+          This program supports reading username and password configuration
+          from environment variables, so it's strongly recommended to store
+          username and password in a separate
+          <link xlink:href="https://www.freedesktop.org/software/systemd/man/systemd.exec.html#EnvironmentFile=">EnvironmentFile</link>.
+          This prevents adding secrets to the world-readable Nix store.
+        '';
+        example = literalExpression ''
+          {
+              address = "0.0.0.0";
+              port = 8080;
+              scope = "/srv/public";
+              modify = true;
+              auth = true;
+              users = [
+                {
+                  username = "{env}ENV_USERNAME";
+                  password = "{env}ENV_PASSWORD";
+                }
+              ];
+          }
+        '';
+      };
+
+      configFile = mkOption {
+        type = types.path;
+        default = format.generate "webdav.yaml" cfg.settings;
+        defaultText = "Config file generated from services.webdav.settings";
+        description = ''
+          Path to config file. If this option is set, it will override any
+          configuration done in options.services.webdav.settings.
+        '';
+        example = "/etc/webdav/config.yaml";
+      };
+
+      environmentFile = mkOption {
+        type = types.nullOr types.path;
+        default = null;
+        description = ''
+          Environment file as defined in <citerefentry>
+          <refentrytitle>systemd.exec</refentrytitle><manvolnum>5</manvolnum>
+          </citerefentry>.
+        '';
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    users.users = mkIf (cfg.user == "webdav") {
+      webdav = {
+        description = "WebDAV daemon user";
+        group = cfg.group;
+        uid = config.ids.uids.webdav;
+      };
+    };
+
+    users.groups = mkIf (cfg.group == "webdav") {
+      webdav.gid = config.ids.gids.webdav;
+    };
+
+    systemd.services.webdav = {
+      description = "WebDAV server";
+      after = [ "network.target" ];
+      wantedBy = [ "multi-user.target" ];
+      serviceConfig = {
+        ExecStart = "${pkgs.webdav}/bin/webdav -c ${cfg.configFile}";
+        Restart = "on-failure";
+        User = cfg.user;
+        Group = cfg.group;
+        EnvironmentFile = mkIf (cfg.environmentFile != null) [ cfg.environmentFile ];
+      };
+    };
+  };
+
+  meta.maintainers = with maintainers; [ pmy ];
+}
diff --git a/nixos/modules/services/network-filesystems/xtreemfs.nix b/nixos/modules/services/network-filesystems/xtreemfs.nix
new file mode 100644
index 00000000000..fc072311578
--- /dev/null
+++ b/nixos/modules/services/network-filesystems/xtreemfs.nix
@@ -0,0 +1,495 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.services.xtreemfs;
+
+  xtreemfs = pkgs.xtreemfs;
+
+  home = cfg.homeDir;
+
+  startupScript = class: configPath: pkgs.writeScript "xtreemfs-osd.sh" ''
+    #! ${pkgs.runtimeShell}
+    JAVA_HOME="${pkgs.jdk}"
+    JAVADIR="${xtreemfs}/share/java"
+    JAVA_CALL="$JAVA_HOME/bin/java -ea -cp $JAVADIR/XtreemFS.jar:$JAVADIR/BabuDB.jar:$JAVADIR/Flease.jar:$JAVADIR/protobuf-java-2.5.0.jar:$JAVADIR/Foundation.jar:$JAVADIR/jdmkrt.jar:$JAVADIR/jdmktk.jar:$JAVADIR/commons-codec-1.3.jar"
+    $JAVA_CALL ${class} ${configPath}
+  '';
+
+  dirReplicationConfig = pkgs.writeText "xtreemfs-dir-replication-plugin.properties" ''
+    babudb.repl.backupDir = ${home}/server-repl-dir
+    plugin.jar = ${xtreemfs}/share/java/BabuDB_replication_plugin.jar
+    babudb.repl.dependency.0 = ${xtreemfs}/share/java/Flease.jar
+
+    ${cfg.dir.replication.extraConfig}
+  '';
+
+  dirConfig = pkgs.writeText "xtreemfs-dir-config.properties" ''
+    uuid = ${cfg.dir.uuid}
+    listen.port = ${toString cfg.dir.port}
+    ${optionalString (cfg.dir.address != "") "listen.address = ${cfg.dir.address}"}
+    http_port = ${toString cfg.dir.httpPort}
+    babudb.baseDir = ${home}/dir/database
+    babudb.logDir = ${home}/dir/db-log
+    babudb.sync = ${if cfg.dir.replication.enable then "FDATASYNC" else cfg.dir.syncMode}
+
+    ${optionalString cfg.dir.replication.enable "babudb.plugin.0 = ${dirReplicationConfig}"}
+
+    ${cfg.dir.extraConfig}
+  '';
+
+  mrcReplicationConfig = pkgs.writeText "xtreemfs-mrc-replication-plugin.properties" ''
+    babudb.repl.backupDir = ${home}/server-repl-mrc
+    plugin.jar = ${xtreemfs}/share/java/BabuDB_replication_plugin.jar
+    babudb.repl.dependency.0 = ${xtreemfs}/share/java/Flease.jar
+
+    ${cfg.mrc.replication.extraConfig}
+  '';
+
+  mrcConfig = pkgs.writeText "xtreemfs-mrc-config.properties" ''
+    uuid = ${cfg.mrc.uuid}
+    listen.port = ${toString cfg.mrc.port}
+    ${optionalString (cfg.mrc.address != "") "listen.address = ${cfg.mrc.address}"}
+    http_port = ${toString cfg.mrc.httpPort}
+    babudb.baseDir = ${home}/mrc/database
+    babudb.logDir = ${home}/mrc/db-log
+    babudb.sync = ${if cfg.mrc.replication.enable then "FDATASYNC" else cfg.mrc.syncMode}
+
+    ${optionalString cfg.mrc.replication.enable "babudb.plugin.0 = ${mrcReplicationConfig}"}
+
+    ${cfg.mrc.extraConfig}
+  '';
+
+  osdConfig = pkgs.writeText "xtreemfs-osd-config.properties" ''
+    uuid = ${cfg.osd.uuid}
+    listen.port = ${toString cfg.osd.port}
+    ${optionalString (cfg.osd.address != "") "listen.address = ${cfg.osd.address}"}
+    http_port = ${toString cfg.osd.httpPort}
+    object_dir = ${home}/osd/
+
+    ${cfg.osd.extraConfig}
+  '';
+
+  optionalDir = optionals cfg.dir.enable ["xtreemfs-dir.service"];
+
+  systemdOptionalDependencies = {
+    after = [ "network.target" ] ++ optionalDir;
+    wantedBy = [ "multi-user.target" ] ++ optionalDir;
+  };
+
+in
+
+{
+
+  ###### interface
+
+  options = {
+
+    services.xtreemfs = {
+
+      enable = mkEnableOption "XtreemFS";
+
+      homeDir = mkOption {
+        type = types.path;
+        default = "/var/lib/xtreemfs";
+        description = ''
+          XtreemFS home dir for the xtreemfs user.
+        '';
+      };
+
+      dir = {
+        enable = mkOption {
+          type = types.bool;
+          default = true;
+          description = ''
+            Whether to enable XtreemFS DIR service.
+          '';
+        };
+
+        uuid = mkOption {
+          example = "eacb6bab-f444-4ebf-a06a-3f72d7465e40";
+          type = types.str;
+          description = ''
+            Must be set to a unique identifier, preferably a UUID according to
+            RFC 4122. UUIDs can be generated with `uuidgen` command, found in
+            the `util-linux` package.
+          '';
+        };
+        port = mkOption {
+          default = 32638;
+          type = types.port;
+          description = ''
+            The port to listen on for incoming connections (TCP).
+          '';
+        };
+        address = mkOption {
+          type = types.str;
+          example = "127.0.0.1";
+          default = "";
+          description = ''
+            If specified, it defines the interface to listen on. If not
+            specified, the service will listen on all interfaces (any).
+          '';
+        };
+        httpPort = mkOption {
+          default = 30638;
+          type = types.port;
+          description = ''
+            Specifies the listen port for the HTTP service that returns the
+            status page.
+          '';
+        };
+        syncMode = mkOption {
+          type = types.enum [ "ASYNC" "SYNC_WRITE_METADATA" "SYNC_WRITE" "FDATASYNC" "FSYNC" ];
+          default = "FSYNC";
+          example = "FDATASYNC";
+          description = ''
+            The sync mode influences how operations are committed to the disk
+            log before the operation is acknowledged to the caller.
+
+            -ASYNC mode the writes to the disk log are buffered in memory by the operating system. This is the fastest mode but will lead to data loss in case of a crash, kernel panic or power failure.
+            -SYNC_WRITE_METADATA opens the file with O_SYNC, the system will not buffer any writes. The operation will be acknowledged when data has been safely written to disk. This mode is slow but offers maximum data safety. However, BabuDB cannot influence the disk drive caches, this depends on the OS and hard disk model.
+            -SYNC_WRITE similar to SYNC_WRITE_METADATA but opens file with O_DSYNC which means that only the data is commit to disk. This can lead to some data loss depending on the implementation of the underlying file system. Linux does not implement this mode.
+            -FDATASYNC is similar to SYNC_WRITE but opens the file in asynchronous mode and calls fdatasync() after writing the data to disk.
+            -FSYNC is similar to SYNC_WRITE_METADATA but opens the file in asynchronous mode and calls fsync() after writing the data to disk.
+
+            For best throughput use ASYNC, for maximum data safety use FSYNC.
+
+            (If xtreemfs.dir.replication.enable is true then FDATASYNC is forced)
+          '';
+        };
+        extraConfig = mkOption {
+          type = types.lines;
+          default = "";
+          example = ''
+            # specify whether SSL is required
+            ssl.enabled = true
+            ssl.service_creds.pw = passphrase
+            ssl.service_creds.container = pkcs12
+            ssl.service_creds = /etc/xos/xtreemfs/truststore/certs/dir.p12
+            ssl.trusted_certs = /etc/xos/xtreemfs/truststore/certs/trusted.jks
+            ssl.trusted_certs.pw = jks_passphrase
+            ssl.trusted_certs.container = jks
+          '';
+          description = ''
+            Configuration of XtreemFS DIR service.
+            WARNING: configuration is saved as plaintext inside nix store.
+            For more options: http://www.xtreemfs.org/xtfs-guide-1.5.1/index.html
+          '';
+        };
+        replication = {
+          enable = mkEnableOption "XtreemFS DIR replication plugin";
+          extraConfig = mkOption {
+            type = types.lines;
+            example = ''
+              # participants of the replication including this replica
+              babudb.repl.participant.0 = 192.168.0.10
+              babudb.repl.participant.0.port = 35676
+              babudb.repl.participant.1 = 192.168.0.11
+              babudb.repl.participant.1.port = 35676
+              babudb.repl.participant.2 = 192.168.0.12
+              babudb.repl.participant.2.port = 35676
+
+              # number of servers that at least have to be up to date
+              # To have a fault-tolerant system, this value has to be set to the
+              # majority of nodes i.e., if you have three replicas, set this to 2
+              # Please note that a setup with two nodes provides no fault-tolerance.
+              babudb.repl.sync.n = 2
+
+              # specify whether SSL is required
+              babudb.ssl.enabled = true
+
+              babudb.ssl.protocol = tlsv12
+
+              # server credentials for SSL handshakes
+              babudb.ssl.service_creds = /etc/xos/xtreemfs/truststore/certs/osd.p12
+              babudb.ssl.service_creds.pw = passphrase
+              babudb.ssl.service_creds.container = pkcs12
+
+              # trusted certificates for SSL handshakes
+              babudb.ssl.trusted_certs = /etc/xos/xtreemfs/truststore/certs/trusted.jks
+              babudb.ssl.trusted_certs.pw = jks_passphrase
+              babudb.ssl.trusted_certs.container = jks
+
+              babudb.ssl.authenticationWithoutEncryption = false
+            '';
+            description = ''
+              Configuration of XtreemFS DIR replication plugin.
+              WARNING: configuration is saved as plaintext inside nix store.
+              For more options: http://www.xtreemfs.org/xtfs-guide-1.5.1/index.html
+            '';
+          };
+        };
+      };
+
+      mrc = {
+        enable = mkOption {
+          type = types.bool;
+          default = true;
+          description = ''
+            Whether to enable XtreemFS MRC service.
+          '';
+        };
+
+        uuid = mkOption {
+          example = "eacb6bab-f444-4ebf-a06a-3f72d7465e41";
+          type = types.str;
+          description = ''
+            Must be set to a unique identifier, preferably a UUID according to
+            RFC 4122. UUIDs can be generated with `uuidgen` command, found in
+            the `util-linux` package.
+          '';
+        };
+        port = mkOption {
+          default = 32636;
+          type = types.port;
+          description = ''
+            The port to listen on for incoming connections (TCP).
+          '';
+        };
+        address = mkOption {
+          example = "127.0.0.1";
+          type = types.str;
+          default = "";
+          description = ''
+            If specified, it defines the interface to listen on. If not
+            specified, the service will listen on all interfaces (any).
+          '';
+        };
+        httpPort = mkOption {
+          default = 30636;
+          type = types.port;
+          description = ''
+            Specifies the listen port for the HTTP service that returns the
+            status page.
+          '';
+        };
+        syncMode = mkOption {
+          default = "FSYNC";
+          type = types.enum [ "ASYNC" "SYNC_WRITE_METADATA" "SYNC_WRITE" "FDATASYNC" "FSYNC" ];
+          example = "FDATASYNC";
+          description = ''
+            The sync mode influences how operations are committed to the disk
+            log before the operation is acknowledged to the caller.
+
+            -ASYNC mode the writes to the disk log are buffered in memory by the operating system. This is the fastest mode but will lead to data loss in case of a crash, kernel panic or power failure.
+            -SYNC_WRITE_METADATA opens the file with O_SYNC, the system will not buffer any writes. The operation will be acknowledged when data has been safely written to disk. This mode is slow but offers maximum data safety. However, BabuDB cannot influence the disk drive caches, this depends on the OS and hard disk model.
+            -SYNC_WRITE similar to SYNC_WRITE_METADATA but opens file with O_DSYNC which means that only the data is commit to disk. This can lead to some data loss depending on the implementation of the underlying file system. Linux does not implement this mode.
+            -FDATASYNC is similar to SYNC_WRITE but opens the file in asynchronous mode and calls fdatasync() after writing the data to disk.
+            -FSYNC is similar to SYNC_WRITE_METADATA but opens the file in asynchronous mode and calls fsync() after writing the data to disk.
+
+            For best throughput use ASYNC, for maximum data safety use FSYNC.
+
+            (If xtreemfs.mrc.replication.enable is true then FDATASYNC is forced)
+          '';
+        };
+        extraConfig = mkOption {
+          type = types.lines;
+          example = ''
+            osd_check_interval = 300
+            no_atime = true
+            local_clock_renewal = 0
+            remote_time_sync = 30000
+            authentication_provider = org.xtreemfs.common.auth.NullAuthProvider
+
+            # shared secret between the MRC and all OSDs
+            capability_secret = iNG8UuQJrJ6XVDTe
+
+            dir_service.host = 192.168.0.10
+            dir_service.port = 32638
+
+            # if replication is enabled
+            dir_service.1.host = 192.168.0.11
+            dir_service.1.port = 32638
+            dir_service.2.host = 192.168.0.12
+            dir_service.2.port = 32638
+
+            # specify whether SSL is required
+            ssl.enabled = true
+            ssl.protocol = tlsv12
+            ssl.service_creds.pw = passphrase
+            ssl.service_creds.container = pkcs12
+            ssl.service_creds = /etc/xos/xtreemfs/truststore/certs/mrc.p12
+            ssl.trusted_certs = /etc/xos/xtreemfs/truststore/certs/trusted.jks
+            ssl.trusted_certs.pw = jks_passphrase
+            ssl.trusted_certs.container = jks
+          '';
+          description = ''
+            Configuration of XtreemFS MRC service.
+            WARNING: configuration is saved as plaintext inside nix store.
+            For more options: http://www.xtreemfs.org/xtfs-guide-1.5.1/index.html
+          '';
+        };
+        replication = {
+          enable = mkEnableOption "XtreemFS MRC replication plugin";
+          extraConfig = mkOption {
+            type = types.lines;
+            example = ''
+              # participants of the replication including this replica
+              babudb.repl.participant.0 = 192.168.0.10
+              babudb.repl.participant.0.port = 35678
+              babudb.repl.participant.1 = 192.168.0.11
+              babudb.repl.participant.1.port = 35678
+              babudb.repl.participant.2 = 192.168.0.12
+              babudb.repl.participant.2.port = 35678
+
+              # number of servers that at least have to be up to date
+              # To have a fault-tolerant system, this value has to be set to the
+              # majority of nodes i.e., if you have three replicas, set this to 2
+              # Please note that a setup with two nodes provides no fault-tolerance.
+              babudb.repl.sync.n = 2
+
+              # specify whether SSL is required
+              babudb.ssl.enabled = true
+
+              babudb.ssl.protocol = tlsv12
+
+              # server credentials for SSL handshakes
+              babudb.ssl.service_creds = /etc/xos/xtreemfs/truststore/certs/osd.p12
+              babudb.ssl.service_creds.pw = passphrase
+              babudb.ssl.service_creds.container = pkcs12
+
+              # trusted certificates for SSL handshakes
+              babudb.ssl.trusted_certs = /etc/xos/xtreemfs/truststore/certs/trusted.jks
+              babudb.ssl.trusted_certs.pw = jks_passphrase
+              babudb.ssl.trusted_certs.container = jks
+
+              babudb.ssl.authenticationWithoutEncryption = false
+            '';
+            description = ''
+              Configuration of XtreemFS MRC replication plugin.
+              WARNING: configuration is saved as plaintext inside nix store.
+              For more options: http://www.xtreemfs.org/xtfs-guide-1.5.1/index.html
+            '';
+          };
+        };
+      };
+
+      osd = {
+        enable = mkOption {
+          type = types.bool;
+          default = true;
+          description = ''
+            Whether to enable XtreemFS OSD service.
+          '';
+        };
+
+        uuid = mkOption {
+          example = "eacb6bab-f444-4ebf-a06a-3f72d7465e42";
+          type = types.str;
+          description = ''
+            Must be set to a unique identifier, preferably a UUID according to
+            RFC 4122. UUIDs can be generated with `uuidgen` command, found in
+            the `util-linux` package.
+          '';
+        };
+        port = mkOption {
+          default = 32640;
+          type = types.port;
+          description = ''
+            The port to listen on for incoming connections (TCP and UDP).
+          '';
+        };
+        address = mkOption {
+          example = "127.0.0.1";
+          type = types.str;
+          default = "";
+          description = ''
+            If specified, it defines the interface to listen on. If not
+            specified, the service will listen on all interfaces (any).
+          '';
+        };
+        httpPort = mkOption {
+          default = 30640;
+          type = types.port;
+          description = ''
+            Specifies the listen port for the HTTP service that returns the
+            status page.
+          '';
+        };
+        extraConfig = mkOption {
+          type = types.lines;
+          example = ''
+            local_clock_renewal = 0
+            remote_time_sync = 30000
+            report_free_space = true
+            capability_secret = iNG8UuQJrJ6XVDTe
+
+            dir_service.host = 192.168.0.10
+            dir_service.port = 32638
+
+            # if replication is used
+            dir_service.1.host = 192.168.0.11
+            dir_service.1.port = 32638
+            dir_service.2.host = 192.168.0.12
+            dir_service.2.port = 32638
+
+            # specify whether SSL is required
+            ssl.enabled = true
+            ssl.service_creds.pw = passphrase
+            ssl.service_creds.container = pkcs12
+            ssl.service_creds = /etc/xos/xtreemfs/truststore/certs/osd.p12
+            ssl.trusted_certs = /etc/xos/xtreemfs/truststore/certs/trusted.jks
+            ssl.trusted_certs.pw = jks_passphrase
+            ssl.trusted_certs.container = jks
+          '';
+          description = ''
+            Configuration of XtreemFS OSD service.
+            WARNING: configuration is saved as plaintext inside nix store.
+            For more options: http://www.xtreemfs.org/xtfs-guide-1.5.1/index.html
+          '';
+        };
+      };
+    };
+
+  };
+
+
+  ###### implementation
+
+  config = lib.mkIf cfg.enable {
+
+    environment.systemPackages = [ xtreemfs ];
+
+    users.users.xtreemfs =
+      { uid = config.ids.uids.xtreemfs;
+        description = "XtreemFS user";
+        createHome = true;
+        home = home;
+      };
+
+    users.groups.xtreemfs =
+      { gid = config.ids.gids.xtreemfs;
+      };
+
+    systemd.services.xtreemfs-dir = mkIf cfg.dir.enable {
+      description = "XtreemFS-DIR Server";
+      after = [ "network.target" ];
+      wantedBy = [ "multi-user.target" ];
+      serviceConfig = {
+        User = "xtreemfs";
+        ExecStart = "${startupScript "org.xtreemfs.dir.DIR" dirConfig}";
+      };
+    };
+
+    systemd.services.xtreemfs-mrc = mkIf cfg.mrc.enable ({
+      description = "XtreemFS-MRC Server";
+      serviceConfig = {
+        User = "xtreemfs";
+        ExecStart = "${startupScript "org.xtreemfs.mrc.MRC" mrcConfig}";
+      };
+    } // systemdOptionalDependencies);
+
+    systemd.services.xtreemfs-osd = mkIf cfg.osd.enable ({
+      description = "XtreemFS-OSD Server";
+      serviceConfig = {
+        User = "xtreemfs";
+        ExecStart = "${startupScript "org.xtreemfs.osd.OSD" osdConfig}";
+      };
+    } // systemdOptionalDependencies);
+
+  };
+
+}
diff --git a/nixos/modules/services/network-filesystems/yandex-disk.nix b/nixos/modules/services/network-filesystems/yandex-disk.nix
new file mode 100644
index 00000000000..a5b1f9d4ab6
--- /dev/null
+++ b/nixos/modules/services/network-filesystems/yandex-disk.nix
@@ -0,0 +1,116 @@
+{ config, pkgs, lib, ... }:
+
+with lib;
+
+let
+
+  cfg = config.services.yandex-disk;
+
+  dir = "/var/lib/yandex-disk";
+
+  u = if cfg.user != null then cfg.user else "yandexdisk";
+
+in
+
+{
+
+  ###### interface
+
+  options = {
+
+    services.yandex-disk = {
+
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = "
+          Whether to enable Yandex-disk client. See https://disk.yandex.ru/
+        ";
+      };
+
+      username = mkOption {
+        default = "";
+        type = types.str;
+        description = ''
+          Your yandex.com login name.
+        '';
+      };
+
+      password = mkOption {
+        default = "";
+        type = types.str;
+        description = ''
+          Your yandex.com password. Warning: it will be world-readable in /nix/store.
+        '';
+      };
+
+      user = mkOption {
+        default = null;
+        type = types.nullOr types.str;
+        description = ''
+          The user the yandex-disk daemon should run as.
+        '';
+      };
+
+      directory = mkOption {
+        type = types.path;
+        default = "/home/Yandex.Disk";
+        description = "The directory to use for Yandex.Disk storage";
+      };
+
+      excludes = mkOption {
+        default = "";
+        type = types.commas;
+        example = "data,backup";
+        description = ''
+          Comma-separated list of directories which are excluded from synchronization.
+        '';
+      };
+
+    };
+
+  };
+
+
+  ###### implementation
+
+  config = mkIf cfg.enable {
+
+    users.users = mkIf (cfg.user == null) [ {
+      name = u;
+      uid = config.ids.uids.yandexdisk;
+      group = "nogroup";
+      home = dir;
+    } ];
+
+    systemd.services.yandex-disk = {
+      description = "Yandex-disk server";
+
+      after = [ "network.target" ];
+
+      wantedBy = [ "multi-user.target" ];
+
+      # FIXME: have to specify ${directory} here as well
+      unitConfig.RequiresMountsFor = dir;
+
+      script = ''
+        mkdir -p -m 700 ${dir}
+        chown ${u} ${dir}
+
+        if ! test -d "${cfg.directory}" ; then
+          (mkdir -p -m 755 ${cfg.directory} && chown ${u} ${cfg.directory}) ||
+            exit 1
+        fi
+
+        ${pkgs.su}/bin/su -s ${pkgs.runtimeShell} ${u} \
+          -c '${pkgs.yandex-disk}/bin/yandex-disk token -p ${cfg.password} ${cfg.username} ${dir}/token'
+
+        ${pkgs.su}/bin/su -s ${pkgs.runtimeShell} ${u} \
+          -c '${pkgs.yandex-disk}/bin/yandex-disk start --no-daemon -a ${dir}/token -d ${cfg.directory} --exclude-dirs=${cfg.excludes}'
+      '';
+
+    };
+  };
+
+}
+