nixpkgs/nixos/modules/services/computing/slurm/slurm.nix

256 lines
7.5 KiB
Nix
Raw Normal View History

2015-03-01 01:12:13 +00:00
{ config, lib, pkgs, ... }:
with lib;
let
cfg = config.services.slurm;
# configuration file can be generated by http://slurm.schedmd.com/configurator.html
configFile = pkgs.writeTextDir "slurm.conf"
2015-03-01 01:12:13 +00:00
''
2015-03-01 21:48:20 +00:00
${optionalString (cfg.controlMachine != null) ''controlMachine=${cfg.controlMachine}''}
${optionalString (cfg.controlAddr != null) ''controlAddr=${cfg.controlAddr}''}
${optionalString (cfg.nodeName != null) ''nodeName=${cfg.nodeName}''}
${optionalString (cfg.partitionName != null) ''partitionName=${cfg.partitionName}''}
2017-12-17 08:25:31 +00:00
PlugStackConfig=${plugStackConfig}
ProctrackType=${cfg.procTrackType}
2015-03-01 21:48:20 +00:00
${cfg.extraConfig}
2015-03-01 01:12:13 +00:00
'';
2017-12-17 08:25:31 +00:00
plugStackConfig = pkgs.writeTextDir "plugstack.conf"
2017-12-17 08:25:31 +00:00
''
${optionalString cfg.enableSrunX11 ''optional ${pkgs.slurm-spank-x11}/lib/x11.so''}
${cfg.extraPlugstackConfig}
2017-12-17 08:25:31 +00:00
'';
cgroupConfig = pkgs.writeTextDir "cgroup.conf"
''
${cfg.extraCgroupConfig}
'';
# slurm expects some additional config files to be
# in the same directory as slurm.conf
etcSlurm = pkgs.symlinkJoin {
name = "etc-slurm";
paths = [ configFile cgroupConfig plugStackConfig ];
};
2015-03-01 01:12:13 +00:00
in
{
###### interface
options = {
services.slurm = {
server = {
enable = mkOption {
type = types.bool;
default = false;
description = ''
Wether to enable the slurm control daemon.
Note that the standard authentication method is "munge".
The "munge" service needs to be provided with a password file in order for
slurm to work properly (see <literal>services.munge.password</literal>).
'';
};
2015-03-01 01:12:13 +00:00
};
2017-12-17 08:25:31 +00:00
2015-03-01 01:12:13 +00:00
client = {
enable = mkEnableOption "slurm client daemon";
};
2015-03-01 01:12:13 +00:00
enableStools = mkOption {
type = types.bool;
default = false;
description = ''
Wether to provide a slurm.conf file.
Enable this option if you do not run a slurm daemon on this host
(i.e. <literal>server.enable</literal> and <literal>client.enable</literal> are <literal>false</literal>)
but you still want to run slurm commands from this host.
'';
2015-03-01 01:12:13 +00:00
};
2015-12-25 14:54:35 +00:00
package = mkOption {
type = types.package;
default = pkgs.slurm;
defaultText = "pkgs.slurm";
example = literalExample "pkgs.slurm-full";
2015-12-25 14:54:35 +00:00
description = ''
The package to use for slurm binaries.
2015-12-25 14:54:35 +00:00
'';
};
2015-03-01 21:48:20 +00:00
controlMachine = mkOption {
type = types.nullOr types.str;
default = null;
example = null;
description = ''
The short hostname of the machine where SLURM control functions are
executed (i.e. the name returned by the command "hostname -s", use "tux001"
rather than "tux001.my.com").
'';
};
controlAddr = mkOption {
type = types.nullOr types.str;
default = cfg.controlMachine;
example = null;
description = ''
Name that ControlMachine should be referred to in establishing a
communications path.
'';
};
nodeName = mkOption {
type = types.nullOr types.str;
default = null;
example = "linux[1-32] CPUs=1 State=UNKNOWN";
description = ''
Name that SLURM uses to refer to a node (or base partition for BlueGene
systems). Typically this would be the string that "/bin/hostname -s"
returns. Note that now you have to write node's parameters after the name.
'';
};
partitionName = mkOption {
type = types.nullOr types.str;
default = null;
example = "debug Nodes=linux[1-32] Default=YES MaxTime=INFINITE State=UP";
description = ''
Name by which the partition may be referenced. Note that now you have
to write the partition's parameters after the name.
2015-03-01 21:48:20 +00:00
'';
};
2017-12-17 08:25:31 +00:00
enableSrunX11 = mkOption {
default = false;
type = types.bool;
description = ''
If enabled srun will accept the option "--x11" to allow for X11 forwarding
from within an interactive session or a batch job. This activates the
slurm-spank-x11 module. Note that this option also enables
'services.openssh.forwardX11' on the client.
2017-12-17 08:25:31 +00:00
'';
};
procTrackType = mkOption {
type = types.string;
default = "proctrack/linuxproc";
description = ''
Plugin to be used for process tracking on a job step basis.
The slurmd daemon uses this mechanism to identify all processes
which are children of processes it spawns for a user job step.
'';
};
2015-03-01 01:12:13 +00:00
extraConfig = mkOption {
2017-12-17 08:25:31 +00:00
default = "";
2015-03-01 01:12:13 +00:00
type = types.lines;
description = ''
Extra configuration options that will be added verbatim at
the end of the slurm configuration file.
'';
};
extraPlugstackConfig = mkOption {
default = "";
type = types.lines;
description = ''
Extra configuration that will be added to the end of <literal>plugstack.conf</literal>.
'';
};
extraCgroupConfig = mkOption {
default = "";
type = types.lines;
description = ''
Extra configuration for <literal>cgroup.conf</literal>. This file is
used when <literal>procTrackType=proctrack/cgroup</literal>.
'';
};
2015-03-01 01:12:13 +00:00
};
};
###### implementation
2015-12-25 14:54:35 +00:00
config =
let
wrappedSlurm = pkgs.stdenv.mkDerivation {
name = "wrappedSlurm";
propagatedBuildInputs = [ cfg.package etcSlurm ];
2015-12-25 14:54:35 +00:00
builder = pkgs.writeText "builder.sh" ''
source $stdenv/setup
mkdir -p $out/bin
find ${getBin cfg.package}/bin -type f -executable | while read EXE
2015-12-25 14:54:35 +00:00
do
exename="$(basename $EXE)"
wrappername="$out/bin/$exename"
cat > "$wrappername" <<EOT
#!/bin/sh
if [ -z "$SLURM_CONF" ]
then
SLURM_CONF="${etcSlurm}/slurm.conf" "$EXE" "\$@"
2015-12-25 14:54:35 +00:00
else
"$EXE" "\$0"
fi
EOT
chmod +x "$wrappername"
done
'';
};
2015-03-01 01:12:13 +00:00
in mkIf (cfg.enableStools || cfg.client.enable || cfg.server.enable) {
2015-12-25 14:54:35 +00:00
environment.systemPackages = [ wrappedSlurm ];
2015-03-01 01:12:13 +00:00
services.munge.enable = mkDefault true;
2015-03-01 01:12:13 +00:00
systemd.services.slurmd = mkIf (cfg.client.enable) {
2017-12-17 08:25:31 +00:00
path = with pkgs; [ wrappedSlurm coreutils ]
++ lib.optional cfg.enableSrunX11 slurm-spank-x11;
2015-03-01 21:48:20 +00:00
2015-03-01 01:12:13 +00:00
wantedBy = [ "multi-user.target" ];
after = [ "systemd-tmpfiles-clean.service" ];
serviceConfig = {
Type = "forking";
2015-12-25 14:54:35 +00:00
ExecStart = "${wrappedSlurm}/bin/slurmd";
2015-03-01 01:12:13 +00:00
PIDFile = "/run/slurmd.pid";
ExecReload = "${pkgs.coreutils}/bin/kill -HUP $MAINPID";
};
2015-12-25 14:54:35 +00:00
preStart = ''
mkdir -p /var/spool
'';
2015-03-01 01:12:13 +00:00
};
services.openssh.forwardX11 = mkIf cfg.client.enable (mkDefault true);
2015-03-01 01:12:13 +00:00
systemd.services.slurmctld = mkIf (cfg.server.enable) {
2017-12-17 08:25:31 +00:00
path = with pkgs; [ wrappedSlurm munge coreutils ]
++ lib.optional cfg.enableSrunX11 slurm-spank-x11;
2015-03-01 01:12:13 +00:00
wantedBy = [ "multi-user.target" ];
2015-12-25 14:54:35 +00:00
after = [ "network.target" "munged.service" ];
2015-03-01 01:12:13 +00:00
requires = [ "munged.service" ];
serviceConfig = {
Type = "forking";
2015-12-25 14:54:35 +00:00
ExecStart = "${wrappedSlurm}/bin/slurmctld";
2015-03-01 01:12:13 +00:00
PIDFile = "/run/slurmctld.pid";
ExecReload = "${pkgs.coreutils}/bin/kill -HUP $MAINPID";
};
};
};
}