nixos/services.hadoop.hdfs: remove with lib;

This commit is contained in:
Felix Buehler 2024-12-08 13:18:22 +01:00
parent decec5eaa3
commit e7e4c15a19

View File

@ -1,5 +1,4 @@
{ config, lib, pkgs, ... }:
with lib;
let
cfg = config.services.hadoop;
@ -8,9 +7,9 @@ let
# Generator for HDFS service options
hadoopServiceOption = { serviceName, firewallOption ? true, extraOpts ? null }: {
enable = mkEnableOption serviceName;
restartIfChanged = mkOption {
type = types.bool;
enable = lib.mkEnableOption serviceName;
restartIfChanged = lib.mkOption {
type = lib.types.bool;
description = ''
Automatically restart the service on config change.
This can be set to false to defer restarts on clusters running critical applications.
@ -19,8 +18,8 @@ let
'';
default = false;
};
extraFlags = mkOption{
type = with types; listOf str;
extraFlags = lib.mkOption{
type = with lib.types; listOf str;
default = [];
description = "Extra command line flags to pass to ${serviceName}";
example = [
@ -28,23 +27,23 @@ let
"-Dcom.sun.management.jmxremote.port=8010"
];
};
extraEnv = mkOption{
type = with types; attrsOf str;
extraEnv = lib.mkOption{
type = with lib.types; attrsOf str;
default = {};
description = "Extra environment variables for ${serviceName}";
};
} // (optionalAttrs firewallOption {
openFirewall = mkOption {
type = types.bool;
} // (lib.optionalAttrs firewallOption {
openFirewall = lib.mkOption {
type = lib.types.bool;
default = false;
description = "Open firewall ports for ${serviceName}.";
};
}) // (optionalAttrs (extraOpts != null) extraOpts);
}) // (lib.optionalAttrs (extraOpts != null) extraOpts);
# Generator for HDFS service configs
hadoopServiceConfig =
{ name
, serviceOptions ? cfg.hdfs."${toLower name}"
, serviceOptions ? cfg.hdfs."${lib.toLower name}"
, description ? "Hadoop HDFS ${name}"
, User ? "hdfs"
, allowedTCPPorts ? [ ]
@ -53,23 +52,23 @@ let
, extraConfig ? { }
}: (
mkIf serviceOptions.enable ( mkMerge [{
systemd.services."hdfs-${toLower name}" = {
lib.mkIf serviceOptions.enable ( lib.mkMerge [{
systemd.services."hdfs-${lib.toLower name}" = {
inherit description preStart;
environment = environment // serviceOptions.extraEnv;
wantedBy = [ "multi-user.target" ];
inherit (serviceOptions) restartIfChanged;
serviceConfig = {
inherit User;
SyslogIdentifier = "hdfs-${toLower name}";
ExecStart = "${cfg.package}/bin/hdfs --config ${hadoopConf} ${toLower name} ${escapeShellArgs serviceOptions.extraFlags}";
SyslogIdentifier = "hdfs-${lib.toLower name}";
ExecStart = "${cfg.package}/bin/hdfs --config ${hadoopConf} ${lib.toLower name} ${lib.escapeShellArgs serviceOptions.extraFlags}";
Restart = "always";
};
};
services.hadoop.gatewayRole.enable = true;
networking.firewall.allowedTCPPorts = mkIf
networking.firewall.allowedTCPPorts = lib.mkIf
((builtins.hasAttr "openFirewall" serviceOptions) && serviceOptions.openFirewall)
allowedTCPPorts;
} extraConfig])
@ -80,8 +79,8 @@ in
options.services.hadoop.hdfs = {
namenode = hadoopServiceOption { serviceName = "HDFS NameNode"; } // {
formatOnInit = mkOption {
type = types.bool;
formatOnInit = lib.mkOption {
type = lib.types.bool;
default = false;
description = ''
Format HDFS namenode on first start. This is useful for quickly spinning up
@ -94,18 +93,18 @@ in
};
datanode = hadoopServiceOption { serviceName = "HDFS DataNode"; } // {
dataDirs = mkOption {
dataDirs = lib.mkOption {
default = null;
description = "Tier and path definitions for datanode storage.";
type = with types; nullOr (listOf (submodule {
type = with lib.types; nullOr (listOf (submodule {
options = {
type = mkOption {
type = lib.mkOption {
type = enum [ "SSD" "DISK" "ARCHIVE" "RAM_DISK" ];
description = ''
Storage types ([SSD]/[DISK]/[ARCHIVE]/[RAM_DISK]) for HDFS storage policies.
'';
};
path = mkOption {
path = lib.mkOption {
type = path;
example = [ "/var/lib/hadoop/hdfs/dn" ];
description = "Determines where on the local filesystem a data node should store its blocks.";
@ -123,8 +122,8 @@ in
};
httpfs = hadoopServiceOption { serviceName = "HDFS JournalNode"; } // {
tempPath = mkOption {
type = types.path;
tempPath = lib.mkOption {
type = lib.types.path;
default = "/tmp/hadoop/httpfs";
description = "HTTPFS_TEMP path used by HTTPFS";
};
@ -132,7 +131,7 @@ in
};
config = mkMerge [
config = lib.mkMerge [
(hadoopServiceConfig {
name = "NameNode";
allowedTCPPorts = [
@ -141,7 +140,7 @@ in
8022 # namenode.servicerpc-address
8019 # dfs.ha.zkfc.port
];
preStart = (mkIf cfg.hdfs.namenode.formatOnInit
preStart = (lib.mkIf cfg.hdfs.namenode.formatOnInit
"${cfg.package}/bin/hdfs --config ${hadoopConf} namenode -format -nonInteractive || true"
);
})
@ -149,7 +148,7 @@ in
(hadoopServiceConfig {
name = "DataNode";
# port numbers for datanode changed between hadoop 2 and 3
allowedTCPPorts = if versionAtLeast cfg.package.version "3" then [
allowedTCPPorts = if lib.versionAtLeast cfg.package.version "3" then [
9864 # datanode.http.address
9866 # datanode.address
9867 # datanode.ipc.address
@ -158,8 +157,8 @@ in
50010 # datanode.address
50020 # datanode.ipc.address
];
extraConfig.services.hadoop.hdfsSiteInternal."dfs.datanode.data.dir" = mkIf (cfg.hdfs.datanode.dataDirs!= null)
(concatMapStringsSep "," (x: "["+x.type+"]file://"+x.path) cfg.hdfs.datanode.dataDirs);
extraConfig.services.hadoop.hdfsSiteInternal."dfs.datanode.data.dir" = lib.mkIf (cfg.hdfs.datanode.dataDirs!= null)
(lib.concatMapStringsSep "," (x: "["+x.type+"]file://"+x.path) cfg.hdfs.datanode.dataDirs);
})
(hadoopServiceConfig {
@ -185,14 +184,14 @@ in
];
})
(mkIf cfg.gatewayRole.enable {
(lib.mkIf cfg.gatewayRole.enable {
users.users.hdfs = {
description = "Hadoop HDFS user";
group = "hadoop";
uid = config.ids.uids.hdfs;
};
})
(mkIf cfg.hdfs.httpfs.enable {
(lib.mkIf cfg.hdfs.httpfs.enable {
users.users.httpfs = {
description = "Hadoop HTTPFS user";
group = "hadoop";