nixpkgs/pkgs/by-name/pi/pig/package.nix
Silvan Mosberger 4f0dadbf38 treewide: format all inactive Nix files
After final improvements to the official formatter implementation,
this commit now performs the first treewide reformat of Nix files using it.
This is part of the implementation of RFC 166.

Only "inactive" files are reformatted, meaning only files that
aren't being touched by any PR with activity in the past 2 months.
This is to avoid conflicts for PRs that might soon be merged.
Later we can do a full treewide reformat to get the rest,
which should not cause as many conflicts.

A CI check has already been running for some time to ensure that new and
already-formatted files are formatted, so the files being reformatted here
should also stay formatted.

This commit was automatically created and can be verified using

    nix-build a08b3a4d19.tar.gz \
      --argstr baseRev b32a094368
    result/bin/apply-formatting $NIXPKGS_PATH
2024-12-10 20:26:33 +01:00

60 lines
1.4 KiB
Nix

{
lib,
stdenv,
fetchurl,
makeWrapper,
hadoop,
jre,
bash,
}:
stdenv.mkDerivation rec {
pname = "pig";
version = "0.17.0";
src = fetchurl {
url = "mirror://apache/pig/${pname}-${version}/${pname}-${version}.tar.gz";
sha256 = "1wwpg0w47f49rnivn2d26vrxgyfl9gpqx3vmzbl5lhx6x5l3fqbd";
};
nativeBuildInputs = [ makeWrapper ];
installPhase = ''
mkdir -p $out
mv * $out
# no need for the windows batch script
rm $out/bin/pig.cmd $out/bin/pig.py
for n in $out/{bin,sbin}"/"*; do
wrapProgram $n \
--prefix PATH : "${
lib.makeBinPath [
jre
bash
]
}" \
--set JAVA_HOME "${jre}" --set HADOOP_PREFIX "${hadoop}"
done
'';
meta = with lib; {
homepage = "https://pig.apache.org/";
description = "High-level language for Apache Hadoop";
mainProgram = "pig";
license = licenses.asl20;
longDescription = ''
Apache Pig is a platform for analyzing large data sets that consists of a
high-level language for expressing data analysis programs, coupled with
infrastructure for evaluating these programs. The salient property of Pig
programs is that their structure is amenable to substantial parallelization,
which in turns enables them to handle very large data sets.
'';
platforms = platforms.linux;
maintainers = [ ];
};
}