diff options
Diffstat (limited to 'nixos/modules/services/cluster/hadoop/hdfs.nix')
-rw-r--r-- | nixos/modules/services/cluster/hadoop/hdfs.nix | 295 |
1 files changed, 148 insertions, 147 deletions
diff --git a/nixos/modules/services/cluster/hadoop/hdfs.nix b/nixos/modules/services/cluster/hadoop/hdfs.nix index be667aa82d8..325a002ad32 100644 --- a/nixos/modules/services/cluster/hadoop/hdfs.nix +++ b/nixos/modules/services/cluster/hadoop/hdfs.nix @@ -1,191 +1,191 @@ -{ config, lib, pkgs, ...}: +{ config, lib, pkgs, ... }: with lib; let cfg = config.services.hadoop; + + # Config files for hadoop services hadoopConf = "${import ./conf.nix { inherit cfg pkgs lib; }}/"; - restartIfChanged = mkOption { - type = types.bool; - description = '' - Automatically restart the service on config change. - This can be set to false to defer restarts on clusters running critical applications. - Please consider the security implications of inadvertently running an older version, - and the possibility of unexpected behavior caused by inconsistent versions across a cluster when disabling this option. - ''; - default = false; - }; + + # Generator for HDFS service options + hadoopServiceOption = { serviceName, firewallOption ? true, extraOpts ? null }: { + enable = mkEnableOption serviceName; + restartIfChanged = mkOption { + type = types.bool; + description = '' + Automatically restart the service on config change. + This can be set to false to defer restarts on clusters running critical applications. + Please consider the security implications of inadvertently running an older version, + and the possibility of unexpected behavior caused by inconsistent versions across a cluster when disabling this option. + ''; + default = false; + }; + extraFlags = mkOption{ + type = with types; listOf str; + default = []; + description = "Extra command line flags to pass to ${serviceName}"; + example = [ + "-Dcom.sun.management.jmxremote" + "-Dcom.sun.management.jmxremote.port=8010" + ]; + }; + extraEnv = mkOption{ + type = with types; attrsOf str; + default = {}; + description = "Extra environment variables for ${serviceName}"; + }; + } // (optionalAttrs firewallOption { + openFirewall = mkOption { + type = types.bool; + default = false; + description = "Open firewall ports for ${serviceName}."; + }; + }) // (optionalAttrs (extraOpts != null) extraOpts); + + # Generator for HDFS service configs + hadoopServiceConfig = + { name + , serviceOptions ? cfg.hdfs."${toLower name}" + , description ? "Hadoop HDFS ${name}" + , User ? "hdfs" + , allowedTCPPorts ? [ ] + , preStart ? "" + , environment ? { } + , extraConfig ? { } + }: ( + + mkIf serviceOptions.enable ( mkMerge [{ + systemd.services."hdfs-${toLower name}" = { + inherit description preStart; + environment = environment // serviceOptions.extraEnv; + wantedBy = [ "multi-user.target" ]; + inherit (serviceOptions) restartIfChanged; + serviceConfig = { + inherit User; + SyslogIdentifier = "hdfs-${toLower name}"; + ExecStart = "${cfg.package}/bin/hdfs --config ${hadoopConf} ${toLower name} ${escapeShellArgs serviceOptions.extraFlags}"; + Restart = "always"; + }; + }; + + services.hadoop.gatewayRole.enable = true; + + networking.firewall.allowedTCPPorts = mkIf + ((builtins.hasAttr "openFirewall" serviceOptions) && serviceOptions.openFirewall) + allowedTCPPorts; + } extraConfig]) + ); + in { options.services.hadoop.hdfs = { - namenode = { - enable = mkEnableOption "Whether to run the HDFS NameNode"; + + namenode = hadoopServiceOption { serviceName = "HDFS NameNode"; } // { formatOnInit = mkOption { type = types.bool; default = false; description = '' - Format HDFS namenode on first start. This is useful for quickly spinning up ephemeral HDFS clusters with a single namenode. - For HA clusters, initialization involves multiple steps across multiple nodes. Follow [this guide](https://hadoop.apache.org/docs/stable/hadoop-project-dist/hadoop-hdfs/HDFSHighAvailabilityWithQJM.html) - to initialize an HA cluster manually. - ''; - }; - inherit restartIfChanged; - openFirewall = mkOption { - type = types.bool; - default = true; - description = '' - Open firewall ports for namenode - ''; - }; - }; - datanode = { - enable = mkEnableOption "Whether to run the HDFS DataNode"; - inherit restartIfChanged; - openFirewall = mkOption { - type = types.bool; - default = true; - description = '' - Open firewall ports for datanode + Format HDFS namenode on first start. This is useful for quickly spinning up + ephemeral HDFS clusters with a single namenode. + For HA clusters, initialization involves multiple steps across multiple nodes. + Follow this guide to initialize an HA cluster manually: + <link xlink:href="https://hadoop.apache.org/docs/stable/hadoop-project-dist/hadoop-hdfs/HDFSHighAvailabilityWithQJM.html"/> ''; }; }; - journalnode = { - enable = mkEnableOption "Whether to run the HDFS JournalNode"; - inherit restartIfChanged; - openFirewall = mkOption { - type = types.bool; - default = true; - description = '' - Open firewall ports for journalnode - ''; + + datanode = hadoopServiceOption { serviceName = "HDFS DataNode"; } // { + dataDirs = mkOption { + default = null; + description = "Tier and path definitions for datanode storage."; + type = with types; nullOr (listOf (submodule { + options = { + type = mkOption { + type = enum [ "SSD" "DISK" "ARCHIVE" "RAM_DISK" ]; + description = '' + Storage types ([SSD]/[DISK]/[ARCHIVE]/[RAM_DISK]) for HDFS storage policies. + ''; + }; + path = mkOption { + type = path; + example = [ "/var/lib/hadoop/hdfs/dn" ]; + description = "Determines where on the local filesystem a data node should store its blocks."; + }; + }; + })); }; }; - zkfc = { - enable = mkEnableOption "Whether to run the HDFS ZooKeeper failover controller"; - inherit restartIfChanged; + + journalnode = hadoopServiceOption { serviceName = "HDFS JournalNode"; }; + + zkfc = hadoopServiceOption { + serviceName = "HDFS ZooKeeper failover controller"; + firewallOption = false; }; - httpfs = { - enable = mkEnableOption "Whether to run the HDFS HTTPfs server"; + + httpfs = hadoopServiceOption { serviceName = "HDFS JournalNode"; } // { tempPath = mkOption { type = types.path; default = "/tmp/hadoop/httpfs"; - description = '' - HTTPFS_TEMP path used by HTTPFS - ''; - }; - inherit restartIfChanged; - openFirewall = mkOption { - type = types.bool; - default = true; - description = '' - Open firewall ports for HTTPFS - ''; + description = "HTTPFS_TEMP path used by HTTPFS"; }; }; + }; config = mkMerge [ - (mkIf cfg.hdfs.namenode.enable { - systemd.services.hdfs-namenode = { - description = "Hadoop HDFS NameNode"; - wantedBy = [ "multi-user.target" ]; - inherit (cfg.hdfs.namenode) restartIfChanged; - - preStart = (mkIf cfg.hdfs.namenode.formatOnInit '' - ${cfg.package}/bin/hdfs --config ${hadoopConf} namenode -format -nonInteractive || true - ''); - - serviceConfig = { - User = "hdfs"; - SyslogIdentifier = "hdfs-namenode"; - ExecStart = "${cfg.package}/bin/hdfs --config ${hadoopConf} namenode"; - Restart = "always"; - }; - }; - - networking.firewall.allowedTCPPorts = (mkIf cfg.hdfs.namenode.openFirewall [ + (hadoopServiceConfig { + name = "NameNode"; + allowedTCPPorts = [ 9870 # namenode.http-address 8020 # namenode.rpc-address - 8022 # namenode. servicerpc-address - ]); + 8022 # namenode.servicerpc-address + 8019 # dfs.ha.zkfc.port + ]; + preStart = (mkIf cfg.hdfs.namenode.formatOnInit + "${cfg.package}/bin/hdfs --config ${hadoopConf} namenode -format -nonInteractive || true" + ); }) - (mkIf cfg.hdfs.datanode.enable { - systemd.services.hdfs-datanode = { - description = "Hadoop HDFS DataNode"; - wantedBy = [ "multi-user.target" ]; - inherit (cfg.hdfs.datanode) restartIfChanged; - - serviceConfig = { - User = "hdfs"; - SyslogIdentifier = "hdfs-datanode"; - ExecStart = "${cfg.package}/bin/hdfs --config ${hadoopConf} datanode"; - Restart = "always"; - }; - }; - networking.firewall.allowedTCPPorts = (mkIf cfg.hdfs.datanode.openFirewall [ + (hadoopServiceConfig { + name = "DataNode"; + # port numbers for datanode changed between hadoop 2 and 3 + allowedTCPPorts = if versionAtLeast cfg.package.version "3" then [ 9864 # datanode.http.address 9866 # datanode.address 9867 # datanode.ipc.address - ]); + ] else [ + 50075 # datanode.http.address + 50010 # datanode.address + 50020 # datanode.ipc.address + ]; + extraConfig.services.hadoop.hdfsSiteInternal."dfs.datanode.data.dir" = let d = cfg.hdfs.datanode.dataDirs; in + if (d!= null) then (concatMapStringsSep "," (x: "["+x.type+"]file://"+x.path) cfg.hdfs.datanode.dataDirs) else d; }) - (mkIf cfg.hdfs.journalnode.enable { - systemd.services.hdfs-journalnode = { - description = "Hadoop HDFS JournalNode"; - wantedBy = [ "multi-user.target" ]; - inherit (cfg.hdfs.journalnode) restartIfChanged; - - serviceConfig = { - User = "hdfs"; - SyslogIdentifier = "hdfs-journalnode"; - ExecStart = "${cfg.package}/bin/hdfs --config ${hadoopConf} journalnode"; - Restart = "always"; - }; - }; - networking.firewall.allowedTCPPorts = (mkIf cfg.hdfs.journalnode.openFirewall [ + (hadoopServiceConfig { + name = "JournalNode"; + allowedTCPPorts = [ 8480 # dfs.journalnode.http-address 8485 # dfs.journalnode.rpc-address - ]); + ]; }) - (mkIf cfg.hdfs.zkfc.enable { - systemd.services.hdfs-zkfc = { - description = "Hadoop HDFS ZooKeeper failover controller"; - wantedBy = [ "multi-user.target" ]; - inherit (cfg.hdfs.zkfc) restartIfChanged; - - serviceConfig = { - User = "hdfs"; - SyslogIdentifier = "hdfs-zkfc"; - ExecStart = "${cfg.package}/bin/hdfs --config ${hadoopConf} zkfc"; - Restart = "always"; - }; - }; - }) - (mkIf cfg.hdfs.httpfs.enable { - systemd.services.hdfs-httpfs = { - description = "Hadoop httpfs"; - wantedBy = [ "multi-user.target" ]; - inherit (cfg.hdfs.httpfs) restartIfChanged; - - environment.HTTPFS_TEMP = cfg.hdfs.httpfs.tempPath; - preStart = '' - mkdir -p $HTTPFS_TEMP - ''; + (hadoopServiceConfig { + name = "zkfc"; + description = "Hadoop HDFS ZooKeeper failover controller"; + }) - serviceConfig = { - User = "httpfs"; - SyslogIdentifier = "hdfs-httpfs"; - ExecStart = "${cfg.package}/bin/hdfs --config ${hadoopConf} httpfs"; - Restart = "always"; - }; - }; - networking.firewall.allowedTCPPorts = (mkIf cfg.hdfs.httpfs.openFirewall [ + (hadoopServiceConfig { + name = "HTTPFS"; + environment.HTTPFS_TEMP = cfg.hdfs.httpfs.tempPath; + preStart = "mkdir -p $HTTPFS_TEMP"; + User = "httpfs"; + allowedTCPPorts = [ 14000 # httpfs.http.port - ]); + ]; }) - (mkIf ( - cfg.hdfs.namenode.enable || cfg.hdfs.datanode.enable || cfg.hdfs.journalnode.enable || cfg.hdfs.zkfc.enable - ) { + + (mkIf cfg.gatewayRole.enable { users.users.hdfs = { description = "Hadoop HDFS user"; group = "hadoop"; @@ -199,5 +199,6 @@ in isSystemUser = true; }; }) + ]; } |