nixpkgs/pkgs/development/python-modules/pytorch/default.nix

101 lines
3.0 KiB
Nix
Raw Normal View History

{ fetchurl, buildPythonPackage, pythonOlder,
cudaSupport ? false, cudatoolkit ? null, cudnn ? null,
fetchFromGitHub, lib, numpy, pyyaml, cffi, typing, cmake, hypothesis,
linkFarm, symlinkJoin,
utillinux, which }:
2017-07-16 19:15:05 +00:00
assert cudnn == null || cudatoolkit != null;
assert !cudaSupport || cudatoolkit != null;
let
cudatoolkit_joined = symlinkJoin {
name = "${cudatoolkit.name}-unsplit";
paths = [ cudatoolkit.out cudatoolkit.lib ];
};
# Normally libcuda.so.1 is provided at runtime by nvidia-x11 via
# LD_LIBRARY_PATH=/run/opengl-driver/lib. We only use the stub
# libcuda.so from cudatoolkit for running tests, so that we dont have
# to recompile pytorch on every update to nvidia-x11 or the kernel.
cudaStub = linkFarm "cuda-stub" [{
name = "libcuda.so.1";
path = "${cudatoolkit}/lib/stubs/libcuda.so";
}];
cudaStubEnv = lib.optionalString cudaSupport
"LD_LIBRARY_PATH=${cudaStub}\${LD_LIBRARY_PATH:+:$LD_LIBRARY_PATH} ";
in buildPythonPackage rec {
version = "1.0.0";
2017-07-16 19:15:05 +00:00
pname = "pytorch";
src = fetchFromGitHub {
2017-08-19 01:22:23 +00:00
owner = "pytorch";
repo = "pytorch";
rev = "v${version}";
fetchSubmodules = true;
sha256 = "076cpbig4sywn9vv674c0xdg832sdrd5pk1d0725pjkm436kpvlm";
2017-07-16 19:15:05 +00:00
};
patches =
[ # Skips two tests that are only meant to run on multi GPUs
(fetchurl {
url = "https://github.com/pytorch/pytorch/commit/bfa666eb0deebac21b03486e26642fd70d66e478.patch";
sha256 = "1fgblcj02gjc0y62svwc5gnml879q3x2z7m69c9gax79dpr37s9i";
})
];
preConfigure = lib.optionalString cudaSupport ''
export CC=${cudatoolkit.cc}/bin/gcc CXX=${cudatoolkit.cc}/bin/g++
'' + lib.optionalString (cudaSupport && cudnn != null) ''
export CUDNN_INCLUDE_DIR=${cudnn}/include
2017-07-16 19:15:05 +00:00
'';
preFixup = ''
function join_by { local IFS="$1"; shift; echo "$*"; }
function strip2 {
IFS=':'
read -ra RP <<< $(patchelf --print-rpath $1)
IFS=' '
RP_NEW=$(join_by : ''${RP[@]:2})
patchelf --set-rpath \$ORIGIN:''${RP_NEW} "$1"
}
for f in $(find ''${out} -name 'libcaffe2*.so')
do
strip2 $f
done
'';
# Override the (weirdly) wrong version set by default. See
# https://github.com/NixOS/nixpkgs/pull/52437#issuecomment-449718038
# https://github.com/pytorch/pytorch/blob/v1.0.0/setup.py#L267
PYTORCH_BUILD_VERSION = version;
PYTORCH_BUILD_NUMBER = 0;
2017-07-16 19:15:05 +00:00
buildInputs = [
cmake
numpy.blas
utillinux
which
] ++ lib.optionals cudaSupport [cudatoolkit_joined cudnn];
2017-11-22 22:02:34 +00:00
2017-07-16 19:15:05 +00:00
propagatedBuildInputs = [
cffi
numpy
pyyaml
] ++ lib.optional (pythonOlder "3.5") typing;
2017-07-16 19:15:05 +00:00
checkInputs = [ hypothesis ];
checkPhase = ''
${cudaStubEnv}python test/run_test.py --exclude dataloader sparse torch utils thd_distributed distributed cpp_extensions
2017-07-16 19:15:05 +00:00
'';
2017-11-22 22:02:34 +00:00
2017-07-16 19:15:05 +00:00
meta = {
description = "Tensors and Dynamic neural networks in Python with strong GPU acceleration.";
2018-06-27 20:12:57 +00:00
homepage = https://pytorch.org/;
2017-07-16 19:15:05 +00:00
license = lib.licenses.bsd3;
platforms = lib.platforms.linux;
maintainers = with lib.maintainers; [ teh ];
};
}