mirror of
https://github.com/NixOS/nixpkgs.git
synced 2024-12-01 19:33:03 +00:00
058e8f5ef1
Since version 4.1.0, podman has support for CDI, and is the recommended way to expose GPU's for containers for podman. More information: https://web.archive.org/web/20240729183805/https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/latest/install-guide.html#configuring-podman
45 lines
1.2 KiB
Nix
45 lines
1.2 KiB
Nix
{
|
|
lib,
|
|
newScope,
|
|
symlinkJoin,
|
|
}:
|
|
|
|
# Note this scope isn't recursed into, at the time of writing.
|
|
lib.makeScope newScope (
|
|
self: {
|
|
|
|
# The config is only exposed as an attrset so that the user may reach the
|
|
# deafult values, for inspectability purposes.
|
|
dockerConfig = {
|
|
disable-require = false;
|
|
#swarm-resource = "DOCKER_RESOURCE_GPU"
|
|
|
|
nvidia-container-cli = {
|
|
#root = "/run/nvidia/driver";
|
|
#path = "/usr/bin/nvidia-container-cli";
|
|
environment = [ ];
|
|
#debug = "/var/log/nvidia-container-runtime-hook.log";
|
|
ldcache = "/tmp/ld.so.cache";
|
|
load-kmods = true;
|
|
#no-cgroups = false;
|
|
#user = "root:video";
|
|
ldconfig = "@@glibcbin@/bin/ldconfig";
|
|
};
|
|
};
|
|
nvidia-container-toolkit-docker = self.callPackage ./package.nix {
|
|
configTemplate = self.dockerConfig;
|
|
};
|
|
|
|
nvidia-docker = symlinkJoin {
|
|
name = "nvidia-docker";
|
|
paths = [
|
|
self.nvidia-docker-unwrapped
|
|
self.nvidia-container-toolkit-docker
|
|
];
|
|
inherit (self.nvidia-docker-unwrapped) meta;
|
|
};
|
|
nvidia-docker-unwrapped =
|
|
self.callPackage ./nvidia-docker.nix { };
|
|
}
|
|
)
|