nvidia-docker: support config.toml as an attrset argument

This commit is contained in:
Someone Serge 2024-01-10 18:43:07 +00:00
parent 336e2218e0
commit 5e7c297ba2
No known key found for this signature in database
GPG Key ID: 7B0E3B1390D61DA4
4 changed files with 48 additions and 29 deletions

View File

@ -5,10 +5,16 @@
, buildGoModule
, linkFarm
, writeShellScript
, formats
, containerRuntimePath
, configTemplate
, configTemplatePath ? null
, libnvidia-container
}:
assert configTemplate != null -> (lib.isAttrs configTemplate && configTemplatePath == null);
assert configTemplatePath != null -> (lib.isStringLike configTemplatePath && configTemplate == null);
let
isolatedContainerRuntimePath = linkFarm "isolated_container_runtime_path" [
{
@ -23,6 +29,8 @@ let
echo >&2 "$(tput setaf 3)warning: \$XDG_CONFIG_HOME=$XDG_CONFIG_HOME$(tput sgr 0)"
fi
'';
configToml = if configTemplatePath != null then configTemplatePath else (formats.toml { }).generate "config.toml" configTemplate;
in
buildGoModule rec {
pname = "container-toolkit/container-toolkit";
@ -82,7 +90,7 @@ buildGoModule rec {
--prefix PATH : ${isolatedContainerRuntimePath}:${libnvidia-container}/bin \
--set-default XDG_CONFIG_HOME $out/etc
cp ${configTemplate} $out/etc/nvidia-container-runtime/config.toml
cp ${configToml} $out/etc/nvidia-container-runtime/config.toml
substituteInPlace $out/etc/nvidia-container-runtime/config.toml \
--subst-var-by glibcbin ${lib.getBin glibc}

View File

@ -7,16 +7,53 @@
symlinkJoin,
}:
# Note this scope isn't recursed into, at the time of writing.
lib.makeScope newScope (
self: {
# The config is only exposed as an attrset so that the user may reach the
# deafult values, for inspectability purposes.
dockerConfig = {
disable-require = false;
#swarm-resource = "DOCKER_RESOURCE_GPU"
nvidia-container-cli = {
#root = "/run/nvidia/driver";
#path = "/usr/bin/nvidia-container-cli";
environment = [ ];
#debug = "/var/log/nvidia-container-runtime-hook.log";
ldcache = "/tmp/ld.so.cache";
load-kmods = true;
#no-cgroups = false;
#user = "root:video";
ldconfig = "@@glibcbin@/bin/ldconfig";
};
};
nvidia-container-toolkit-docker = self.callPackage ./. {
containerRuntimePath = "${docker}/libexec/docker/docker";
configTemplate = ../nvidia-docker/config.toml;
configTemplate = self.dockerConfig;
};
podmanConfig = {
disable-require = true;
#swarm-resource = "DOCKER_RESOURCE_GPU";
nvidia-container-cli = {
#root = "/run/nvidia/driver";
#path = "/usr/bin/nvidia-container-cli";
environment = [ ];
#debug = "/var/log/nvidia-container-runtime-hook.log";
ldcache = "/tmp/ld.so.cache";
load-kmods = true;
no-cgroups = true;
#user = "root:video";
ldconfig = "@@glibcbin@/bin/ldconfig";
};
};
nvidia-container-toolkit-podman = self.nvidia-container-toolkit-docker.override {
containerRuntimePath = lib.getExe runc;
configTemplate = ../nvidia-podman/config.toml;
configTemplate = self.podmanConfig;
};
nvidia-docker = symlinkJoin {

View File

@ -1,13 +0,0 @@
disable-require = false
#swarm-resource = "DOCKER_RESOURCE_GPU"
[nvidia-container-cli]
#root = "/run/nvidia/driver"
#path = "/usr/bin/nvidia-container-cli"
environment = []
#debug = "/var/log/nvidia-container-runtime-hook.log"
ldcache = "/tmp/ld.so.cache"
load-kmods = true
#no-cgroups = false
#user = "root:video"
ldconfig = "@@glibcbin@/bin/ldconfig"

View File

@ -1,13 +0,0 @@
disable-require = true
#swarm-resource = "DOCKER_RESOURCE_GPU"
[nvidia-container-cli]
#root = "/run/nvidia/driver"
#path = "/usr/bin/nvidia-container-cli"
environment = []
#debug = "/var/log/nvidia-container-runtime-hook.log"
ldcache = "/tmp/ld.so.cache"
load-kmods = true
no-cgroups = true
#user = "root:video"
ldconfig = "@@glibcbin@/bin/ldconfig"