Merge pull request #280087 from SomeoneSerge/fix/nvidia-docker-runtime

nvidia-docker: unbreak the runc symlink
This commit is contained in:
Someone 2024-01-11 04:28:13 +00:00 committed by GitHub
commit 093f4f59b4
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
6 changed files with 104 additions and 52 deletions

View File

@ -116,6 +116,7 @@ stdenv.mkDerivation rec {
description = "NVIDIA container runtime library";
license = licenses.asl20;
platforms = platforms.linux;
mainProgram = "nvidia-container-cli";
maintainers = with maintainers; [ cpcloud ];
};
}

View File

@ -5,10 +5,16 @@
, buildGoModule
, linkFarm
, writeShellScript
, formats
, containerRuntimePath
, configTemplate
, configTemplatePath ? null
, libnvidia-container
}:
assert configTemplate != null -> (lib.isAttrs configTemplate && configTemplatePath == null);
assert configTemplatePath != null -> (lib.isStringLike configTemplatePath && configTemplate == null);
let
isolatedContainerRuntimePath = linkFarm "isolated_container_runtime_path" [
{
@ -23,6 +29,8 @@ let
echo >&2 "$(tput setaf 3)warning: \$XDG_CONFIG_HOME=$XDG_CONFIG_HOME$(tput sgr 0)"
fi
'';
configToml = if configTemplatePath != null then configTemplatePath else (formats.toml { }).generate "config.toml" configTemplate;
in
buildGoModule rec {
pname = "container-toolkit/container-toolkit";
@ -47,6 +55,14 @@ buildGoModule rec {
nativeBuildInputs = [ makeWrapper ];
preConfigure = ''
# Ensure the runc symlink isn't broken:
if ! readlink --quiet --canonicalize-existing "${isolatedContainerRuntimePath}/runc" ; then
echo "${isolatedContainerRuntimePath}/runc: broken symlink" >&2
exit 1
fi
'';
checkFlags =
let
skippedTests = [
@ -74,7 +90,7 @@ buildGoModule rec {
--prefix PATH : ${isolatedContainerRuntimePath}:${libnvidia-container}/bin \
--set-default XDG_CONFIG_HOME $out/etc
cp ${configTemplate} $out/etc/nvidia-container-runtime/config.toml
cp ${configToml} $out/etc/nvidia-container-runtime/config.toml
substituteInPlace $out/etc/nvidia-container-runtime/config.toml \
--subst-var-by glibcbin ${lib.getBin glibc}

View File

@ -0,0 +1,79 @@
{
lib,
newScope,
docker,
libnvidia-container,
runc,
symlinkJoin,
}:
# Note this scope isn't recursed into, at the time of writing.
lib.makeScope newScope (
self: {
# The config is only exposed as an attrset so that the user may reach the
# deafult values, for inspectability purposes.
dockerConfig = {
disable-require = false;
#swarm-resource = "DOCKER_RESOURCE_GPU"
nvidia-container-cli = {
#root = "/run/nvidia/driver";
#path = "/usr/bin/nvidia-container-cli";
environment = [ ];
#debug = "/var/log/nvidia-container-runtime-hook.log";
ldcache = "/tmp/ld.so.cache";
load-kmods = true;
#no-cgroups = false;
#user = "root:video";
ldconfig = "@@glibcbin@/bin/ldconfig";
};
};
nvidia-container-toolkit-docker = self.callPackage ./. {
containerRuntimePath = "${docker}/libexec/docker/docker";
configTemplate = self.dockerConfig;
};
podmanConfig = {
disable-require = true;
#swarm-resource = "DOCKER_RESOURCE_GPU";
nvidia-container-cli = {
#root = "/run/nvidia/driver";
#path = "/usr/bin/nvidia-container-cli";
environment = [ ];
#debug = "/var/log/nvidia-container-runtime-hook.log";
ldcache = "/tmp/ld.so.cache";
load-kmods = true;
no-cgroups = true;
#user = "root:video";
ldconfig = "@@glibcbin@/bin/ldconfig";
};
};
nvidia-container-toolkit-podman = self.nvidia-container-toolkit-docker.override {
containerRuntimePath = lib.getExe runc;
configTemplate = self.podmanConfig;
};
nvidia-docker = symlinkJoin {
name = "nvidia-docker";
paths = [
libnvidia-container
self.nvidia-docker-unwrapped
self.nvidia-container-toolkit-docker
];
inherit (self.nvidia-docker-unwrapped) meta;
};
nvidia-docker-unwrapped = self.callPackage ../nvidia-docker { };
nvidia-podman = symlinkJoin {
name = "nvidia-podman";
paths = [
libnvidia-container
self.nvidia-container-toolkit-podman
];
inherit (self.nvidia-container-toolkit-podman) meta;
};
}
)

View File

@ -1,13 +0,0 @@
disable-require = false
#swarm-resource = "DOCKER_RESOURCE_GPU"
[nvidia-container-cli]
#root = "/run/nvidia/driver"
#path = "/usr/bin/nvidia-container-cli"
environment = []
#debug = "/var/log/nvidia-container-runtime-hook.log"
ldcache = "/tmp/ld.so.cache"
load-kmods = true
#no-cgroups = false
#user = "root:video"
ldconfig = "@@glibcbin@/bin/ldconfig"

View File

@ -1,13 +0,0 @@
disable-require = true
#swarm-resource = "DOCKER_RESOURCE_GPU"
[nvidia-container-cli]
#root = "/run/nvidia/driver"
#path = "/usr/bin/nvidia-container-cli"
environment = []
#debug = "/var/log/nvidia-container-runtime-hook.log"
ldcache = "/tmp/ld.so.cache"
load-kmods = true
no-cgroups = true
#user = "root:video"
ldconfig = "@@glibcbin@/bin/ldconfig"

View File

@ -24168,31 +24168,13 @@ with pkgs;
nv-codec-headers-11 = callPackage ../development/libraries/nv-codec-headers/11_x.nix { };
nv-codec-headers-12 = callPackage ../development/libraries/nv-codec-headers/12_x.nix { };
mkNvidiaContainerPkg = { name, containerRuntimePath, configTemplate, additionalPaths ? [] }:
let
nvidia-container-toolkit = callPackage ../applications/virtualization/nvidia-container-toolkit {
inherit containerRuntimePath configTemplate;
};
in symlinkJoin {
inherit name;
paths = [
libnvidia-container
nvidia-container-toolkit
] ++ additionalPaths;
};
nvidia-docker = mkNvidiaContainerPkg {
name = "nvidia-docker";
containerRuntimePath = "${docker}/libexec/docker/runc";
configTemplate = ../applications/virtualization/nvidia-docker/config.toml;
additionalPaths = [ (callPackage ../applications/virtualization/nvidia-docker { }) ];
};
nvidia-podman = mkNvidiaContainerPkg {
name = "nvidia-podman";
containerRuntimePath = "${runc}/bin/runc";
configTemplate = ../applications/virtualization/nvidia-podman/config.toml;
};
nvidiaCtkPackages =
callPackage ../applications/virtualization/nvidia-container-toolkit/packages.nix
{ };
inherit (nvidiaCtkPackages)
nvidia-docker
nvidia-podman
;
nvidia-vaapi-driver = lib.hiPrio (callPackage ../development/libraries/nvidia-vaapi-driver { });