nixpkgs/pkgs/development/python-modules/pytorch/bin.nix

Ignoring revisions in .git-blame-ignore-revs. Click here to bypass and see the normal blame view.

84 lines
2.1 KiB
Nix
Raw Normal View History

{ lib, stdenv
2020-08-30 09:34:32 +00:00
, buildPythonPackage
, fetchurl
, isPy37
, isPy38
, isPy39
2022-03-18 13:24:02 +00:00
, isPy310
2020-08-30 09:34:32 +00:00
, python
, addOpenGLRunpath
, future
, numpy
, patchelf
, pyyaml
, requests
, setuptools
, typing-extensions
2020-08-30 09:34:32 +00:00
}:
let
pyVerNoDot = builtins.replaceStrings [ "." ] [ "" ] python.pythonVersion;
srcs = import ./binary-hashes.nix version;
2020-08-30 09:34:32 +00:00
unsupported = throw "Unsupported system";
2022-03-18 13:24:02 +00:00
version = "1.11.0";
2020-08-30 09:34:32 +00:00
in buildPythonPackage {
inherit version;
2020-08-30 09:34:32 +00:00
pname = "pytorch";
# Don't forget to update pytorch to the same version.
format = "wheel";
2022-03-18 13:24:02 +00:00
disabled = !(isPy37 || isPy38 || isPy39 || isPy310);
2020-08-30 09:34:32 +00:00
src = fetchurl srcs."${stdenv.system}-${pyVerNoDot}" or unsupported;
nativeBuildInputs = [
addOpenGLRunpath
patchelf
];
propagatedBuildInputs = [
future
numpy
pyyaml
requests
setuptools
typing-extensions
];
2020-08-30 09:34:32 +00:00
postInstall = ''
# ONNX conversion
rm -rf $out/bin
'';
postFixup = let
rpath = lib.makeLibraryPath [ stdenv.cc.cc.lib ];
2020-08-30 09:34:32 +00:00
in ''
find $out/${python.sitePackages}/torch/lib -type f \( -name '*.so' -or -name '*.so.*' \) | while read lib; do
echo "setting rpath for $lib..."
patchelf --set-rpath "${rpath}:$out/${python.sitePackages}/torch/lib" "$lib"
addOpenGLRunpath "$lib"
done
'';
# The wheel-binary is not stripped to avoid the error of `ImportError: libtorch_cuda_cpp.so: ELF load command address/offset not properly aligned.`.
dontStrip = true;
2020-08-30 09:34:32 +00:00
pythonImportsCheck = [ "torch" ];
meta = with lib; {
2020-08-30 09:34:32 +00:00
description = "Open source, prototype-to-production deep learning platform";
homepage = "https://pytorch.org/";
changelog = "https://github.com/pytorch/pytorch/releases/tag/v${version}";
# Includes CUDA and Intel MKL, but redistributions of the binary are not limited.
# https://docs.nvidia.com/cuda/eula/index.html
# https://www.intel.com/content/www/us/en/developer/articles/license/onemkl-license-faq.html
license = licenses.bsd3;
sourceProvenance = with sourceTypes; [ binaryNativeCode ];
platforms = platforms.linux ++ platforms.darwin;
hydraPlatforms = []; # output size 3.2G on 1.11.0
2021-09-24 01:10:35 +00:00
maintainers = with maintainers; [ junjihashimoto ];
2020-08-30 09:34:32 +00:00
};
}