openai-whisper: 20230124 -> 20230314

- add cudaSupport option
- add required dependencies: numba, scipy, tiktoken
- add cuda related dependencies only when cudaSupport = true
https://github.com/openai/whisper/blob/v20230314/CHANGELOG.md
This commit is contained in:
MayNiklas 2023-04-10 16:42:24 +02:00
parent e663a3a17a
commit 79d0a1812a
2 changed files with 35 additions and 5 deletions

View File

@ -2,6 +2,7 @@
, fetchFromGitHub
, buildPythonPackage
, substituteAll
, cudaSupport ? false
# runtime
, ffmpeg
@ -9,10 +10,15 @@
# propagates
, numpy
, torch
, torchWithCuda
, tqdm
, more-itertools
, transformers
, ffmpeg-python
, numba
, openai-triton
, scipy
, tiktoken
# tests
, pytestCheckHook
@ -20,14 +26,14 @@
buildPythonPackage rec {
pname = "whisper";
version = "20230124";
version = "20230314";
format = "setuptools";
src = fetchFromGitHub {
owner = "openai";
repo = pname;
rev = "refs/tags/v${version}";
hash = "sha256-+3fs/EXK5NGlISuMTk7r2ZZ4tNFKbNFNkVS2LmHBvwk=";
hash = "sha256-qQCELjRFeRCT1k1CBc3netRtFvt+an/EbkrgnmiX/mc=";
};
patches = [
@ -39,13 +45,31 @@ buildPythonPackage rec {
propagatedBuildInputs = [
numpy
torch
tqdm
more-itertools
transformers
ffmpeg-python
numba
scipy
tiktoken
] ++ lib.optionals (!cudaSupport) [
torch
] ++ lib.optionals (cudaSupport) [
openai-triton
torchWithCuda
];
postPatch = ''
substituteInPlace requirements.txt \
--replace "tiktoken==0.3.1" "tiktoken>=0.3.1"
''
# openai-triton is only needed for CUDA support.
# triton needs CUDA to be build.
# -> by making it optional, we can build whisper without unfree packages enabled
+ lib.optionalString (!cudaSupport) ''
sed -i '/if sys.platform.startswith("linux") and platform.machine() == "x86_64":/{N;d}' setup.py
'';
preCheck = ''
export HOME=$TMPDIR
'';
@ -56,14 +80,18 @@ buildPythonPackage rec {
disabledTests = [
# requires network access to download models
"test_tokenizer"
"test_transcribe"
# requires NVIDIA drivers
"test_dtw_cuda_equivalence"
"test_median_filter_equivalence"
];
meta = with lib; {
changelog = "https://github.com/openai/whisper/blob/v$[version}/CHANGELOG.md";
description = "General-purpose speech recognition model";
homepage = "https://github.com/openai/whisper";
license = licenses.mit;
maintainers = with maintainers; [ hexa ];
};
}

View File

@ -6813,7 +6813,9 @@ self: super: with self; {
openai-triton = callPackage ../development/python-modules/openai-triton { llvmPackages = pkgs.llvmPackages_rocm; };
openai-whisper = callPackage ../development/python-modules/openai-whisper { };
openai-whisper = callPackage ../development/python-modules/openai-whisper {
cudaSupport = pkgs.config.cudaSupport or false;
};
openant = callPackage ../development/python-modules/openant { };