mirror of
https://github.com/NixOS/nixpkgs.git
synced 2024-12-12 08:43:06 +00:00
Merge branch 'staging' into fix/glibc-libgcc_s
This commit is contained in:
commit
2ed8ee4b4d
24
.github/CODEOWNERS
vendored
24
.github/CODEOWNERS
vendored
@ -47,12 +47,15 @@
|
||||
/nixos/doc/manual/man-nixos-option.xml @nbp
|
||||
/nixos/modules/installer/tools/nixos-option.sh @nbp
|
||||
|
||||
# NixOS modules
|
||||
/nixos/modules @Infinisil
|
||||
|
||||
# Python-related code and docs
|
||||
/maintainers/scripts/update-python-libraries @FRidh
|
||||
/pkgs/top-level/python-packages.nix @FRidh
|
||||
/pkgs/development/interpreters/python @FRidh
|
||||
/pkgs/development/python-modules @FRidh
|
||||
/doc/languages-frameworks/python.md @FRidh
|
||||
/doc/languages-frameworks/python.section.md @FRidh
|
||||
|
||||
# Haskell
|
||||
/pkgs/development/compilers/ghc @peti @ryantm @basvandijk
|
||||
@ -61,13 +64,18 @@
|
||||
/pkgs/development/haskell-modules/generic-builder.nix @peti @ryantm @basvandijk
|
||||
/pkgs/development/haskell-modules/hoogle.nix @peti @ryantm @basvandijk
|
||||
|
||||
# Perl
|
||||
/pkgs/development/interpreters/perl @volth
|
||||
/pkgs/top-level/perl-packages.nix @volth
|
||||
/pkgs/development/perl-modules @volth
|
||||
|
||||
# R
|
||||
/pkgs/applications/science/math/R @peti
|
||||
/pkgs/development/r-modules @peti
|
||||
|
||||
# Ruby
|
||||
/pkgs/development/interpreters/ruby @zimbatm
|
||||
/pkgs/development/ruby-modules @zimbatm
|
||||
/pkgs/development/interpreters/ruby @alyssais @zimbatm
|
||||
/pkgs/development/ruby-modules @alyssais @zimbatm
|
||||
|
||||
# Rust
|
||||
/pkgs/development/compilers/rust @Mic92 @LnL7
|
||||
@ -113,3 +121,13 @@
|
||||
/nixos/modules/services/databases/postgresql.xml @thoughtpolice
|
||||
/nixos/modules/services/databases/postgresql.nix @thoughtpolice
|
||||
/nixos/tests/postgresql.nix @thoughtpolice
|
||||
|
||||
# Dhall
|
||||
/pkgs/development/dhall-modules @Gabriel439 @Profpatsch
|
||||
/pkgs/development/interpreters/dhall @Gabriel439 @Profpatsch
|
||||
|
||||
# Idris
|
||||
/pkgs/development/idris-modules @Infinisil
|
||||
|
||||
# Bazel
|
||||
/pkgs/development/tools/build-managers/bazel @mboes @Profpatsch
|
||||
|
1
.github/PULL_REQUEST_TEMPLATE.md
vendored
1
.github/PULL_REQUEST_TEMPLATE.md
vendored
@ -14,6 +14,7 @@
|
||||
- [ ] Tested compilation of all pkgs that depend on this change using `nix-shell -p nox --run "nox-review wip"`
|
||||
- [ ] Tested execution of all binary files (usually in `./result/bin/`)
|
||||
- [ ] Determined the impact on package closure size (by running `nix path-info -S` before and after)
|
||||
- [ ] Assured whether relevant documentation is up to date
|
||||
- [ ] Fits [CONTRIBUTING.md](https://github.com/NixOS/nixpkgs/blob/master/.github/CONTRIBUTING.md).
|
||||
|
||||
---
|
||||
|
3
.gitignore
vendored
3
.gitignore
vendored
@ -13,4 +13,5 @@ result-*
|
||||
.DS_Store
|
||||
|
||||
/pkgs/development/libraries/qt-5/*/tmp/
|
||||
/pkgs/desktops/kde-5/*/tmp/
|
||||
/pkgs/desktops/kde-5/*/tmp/
|
||||
/pkgs/development/mobile/androidenv/xml/*
|
||||
|
@ -20,7 +20,7 @@ release and `nixos-unstable` for the latest successful build of master:
|
||||
% git rebase channels/nixos-18.09
|
||||
```
|
||||
|
||||
For pull-requests, please rebase onto nixpkgs `master`.
|
||||
For pull requests, please rebase onto nixpkgs `master`.
|
||||
|
||||
[NixOS](https://nixos.org/nixos/) Linux distribution source code is located inside
|
||||
`nixos/` folder.
|
||||
|
@ -9,8 +9,10 @@ debug:
|
||||
|
||||
.PHONY: format
|
||||
format:
|
||||
find . -iname '*.xml' -type f -print0 | xargs -0 -I{} -n1 \
|
||||
xmlformat --config-file "$$XMLFORMAT_CONFIG" -i {}
|
||||
find . -iname '*.xml' -type f | while read f; do \
|
||||
echo $$f ;\
|
||||
xmlformat --config-file "$$XMLFORMAT_CONFIG" -i $$f ;\
|
||||
done
|
||||
|
||||
.PHONY: fix-misc-xml
|
||||
fix-misc-xml:
|
||||
|
@ -56,25 +56,30 @@ foo { arg = ...; }
|
||||
or list elements should be aligned:
|
||||
<programlisting>
|
||||
# A long list.
|
||||
list =
|
||||
[ elem1
|
||||
elem2
|
||||
elem3
|
||||
];
|
||||
list = [
|
||||
elem1
|
||||
elem2
|
||||
elem3
|
||||
];
|
||||
|
||||
# A long attribute set.
|
||||
attrs =
|
||||
{ attr1 = short_expr;
|
||||
attr2 =
|
||||
if true then big_expr else big_expr;
|
||||
};
|
||||
|
||||
# Alternatively:
|
||||
attrs = {
|
||||
attr1 = short_expr;
|
||||
attr2 =
|
||||
if true then big_expr else big_expr;
|
||||
};
|
||||
|
||||
# Combined
|
||||
listOfAttrs = [
|
||||
{
|
||||
attr1 = 3;
|
||||
attr2 = "fff";
|
||||
}
|
||||
{
|
||||
attr1 = 5;
|
||||
attr2 = "ggg";
|
||||
}
|
||||
];
|
||||
</programlisting>
|
||||
</para>
|
||||
</listitem>
|
||||
@ -191,6 +196,23 @@ args.stdenv.mkDerivation (args // {
|
||||
<section xml:id="sec-package-naming">
|
||||
<title>Package naming</title>
|
||||
|
||||
<para>
|
||||
The key words
|
||||
<emphasis>must</emphasis>,
|
||||
<emphasis>must not</emphasis>,
|
||||
<emphasis>required</emphasis>,
|
||||
<emphasis>shall</emphasis>,
|
||||
<emphasis>shall not</emphasis>,
|
||||
<emphasis>should</emphasis>,
|
||||
<emphasis>should not</emphasis>,
|
||||
<emphasis>recommended</emphasis>,
|
||||
<emphasis>may</emphasis>,
|
||||
and <emphasis>optional</emphasis> in this section
|
||||
are to be interpreted as described in
|
||||
<link xlink:href="https://tools.ietf.org/html/rfc2119">RFC 2119</link>.
|
||||
Only <emphasis>emphasized</emphasis> words are to be interpreted in this way.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
In Nixpkgs, there are generally three different names associated with a
|
||||
package:
|
||||
@ -231,14 +253,15 @@ args.stdenv.mkDerivation (args // {
|
||||
<itemizedlist>
|
||||
<listitem>
|
||||
<para>
|
||||
Generally, try to stick to the upstream package name.
|
||||
The <literal>name</literal> attribute <emphasis>should</emphasis>
|
||||
be identical to the upstream package name.
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
Don’t use uppercase letters in the <literal>name</literal> attribute
|
||||
— e.g., <literal>"mplayer-1.0rc2"</literal> instead of
|
||||
<literal>"MPlayer-1.0rc2"</literal>.
|
||||
The <literal>name</literal> attribute <emphasis>must not</emphasis>
|
||||
contain uppercase letters — e.g., <literal>"mplayer-1.0rc2"</literal>
|
||||
instead of <literal>"MPlayer-1.0rc2"</literal>.
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
@ -252,14 +275,14 @@ args.stdenv.mkDerivation (args // {
|
||||
<para>
|
||||
If a package is not a release but a commit from a repository, then the
|
||||
version part of the name <emphasis>must</emphasis> be the date of that
|
||||
(fetched) commit. The date must be in <literal>"YYYY-MM-DD"</literal>
|
||||
(fetched) commit. The date <emphasis>must</emphasis> be in <literal>"YYYY-MM-DD"</literal>
|
||||
format. Also append <literal>"unstable"</literal> to the name - e.g.,
|
||||
<literal>"pkgname-unstable-2014-09-23"</literal>.
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
Dashes in the package name should be preserved in new variable names,
|
||||
Dashes in the package name <emphasis>should</emphasis> be preserved in new variable names,
|
||||
rather than converted to underscores or camel cased — e.g.,
|
||||
<varname>http-parser</varname> instead of <varname>http_parser</varname>
|
||||
or <varname>httpParser</varname>. The hyphenated style is preferred in
|
||||
@ -268,7 +291,7 @@ args.stdenv.mkDerivation (args // {
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
If there are multiple versions of a package, this should be reflected in
|
||||
If there are multiple versions of a package, this <emphasis>should</emphasis> be reflected in
|
||||
the variable names in <filename>all-packages.nix</filename>, e.g.
|
||||
<varname>json-c-0-9</varname> and <varname>json-c-0-11</varname>. If
|
||||
there is an obvious “default” version, make an attribute like
|
||||
@ -791,7 +814,7 @@ args.stdenv.mkDerivation (args // {
|
||||
|
||||
<para>
|
||||
There are multiple ways to fetch a package source in nixpkgs. The general
|
||||
guideline is that you should package sources with a high degree of
|
||||
guideline is that you should package reproducible sources with a high degree of
|
||||
availability. Right now there is only one fetcher which has mirroring
|
||||
support and that is <literal>fetchurl</literal>. Note that you should also
|
||||
prefer protocols which have a corresponding proxy environment variable.
|
||||
@ -853,6 +876,123 @@ src = fetchFromGitHub {
|
||||
</itemizedlist>
|
||||
</para>
|
||||
</section>
|
||||
<section xml:id="sec-source-hashes">
|
||||
<title>Obtaining source hash</title>
|
||||
|
||||
<para>
|
||||
Preferred source hash type is sha256. There are several ways to get it.
|
||||
</para>
|
||||
|
||||
<orderedlist>
|
||||
<listitem>
|
||||
<para>
|
||||
Prefetch URL (with <literal>nix-prefetch-<replaceable>XXX</replaceable>
|
||||
<replaceable>URL</replaceable></literal>, where
|
||||
<replaceable>XXX</replaceable> is one of <literal>url</literal>,
|
||||
<literal>git</literal>, <literal>hg</literal>, <literal>cvs</literal>,
|
||||
<literal>bzr</literal>, <literal>svn</literal>). Hash is printed to
|
||||
stdout.
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
Prefetch by package source (with <literal>nix-prefetch-url
|
||||
'<nixpkgs>' -A <replaceable>PACKAGE</replaceable>.src</literal>,
|
||||
where <replaceable>PACKAGE</replaceable> is package attribute name). Hash
|
||||
is printed to stdout.
|
||||
</para>
|
||||
<para>
|
||||
This works well when you've upgraded existing package version and want to
|
||||
find out new hash, but is useless if package can't be accessed by
|
||||
attribute or package has multiple sources (<literal>.srcs</literal>,
|
||||
architecture-dependent sources, etc).
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
Upstream provided hash: use it when upstream provides
|
||||
<literal>sha256</literal> or <literal>sha512</literal> (when upstream
|
||||
provides <literal>md5</literal>, don't use it, compute
|
||||
<literal>sha256</literal> instead).
|
||||
</para>
|
||||
<para>
|
||||
A little nuance is that <literal>nix-prefetch-*</literal> tools produce
|
||||
hash encoded with <literal>base32</literal>, but upstream usually provides
|
||||
hexadecimal (<literal>base16</literal>) encoding. Fetchers understand both
|
||||
formats. Nixpkgs does not standardize on any one format.
|
||||
</para>
|
||||
<para>
|
||||
You can convert between formats with nix-hash, for example:
|
||||
<screen>
|
||||
$ nix-hash --type sha256 --to-base32 <replaceable>HASH</replaceable>
|
||||
</screen>
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
Extracting hash from local source tarball can be done with
|
||||
<literal>sha256sum</literal>. Use <literal>nix-prefetch-url
|
||||
file:///path/to/tarball </literal> if you want base32 hash.
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
Fake hash: set fake hash in package expression, perform build and extract
|
||||
correct hash from error Nix prints.
|
||||
</para>
|
||||
<para>
|
||||
For package updates it is enough to change one symbol to make hash fake.
|
||||
For new packages, you can use <literal>lib.fakeSha256</literal>,
|
||||
<literal>lib.fakeSha512</literal> or any other fake hash.
|
||||
</para>
|
||||
<para>
|
||||
This is last resort method when reconstructing source URL is non-trivial
|
||||
and <literal>nix-prefetch-url -A</literal> isn't applicable (for example,
|
||||
<link xlink:href="https://github.com/NixOS/nixpkgs/blob/d2ab091dd308b99e4912b805a5eb088dd536adb9/pkgs/applications/video/kodi/default.nix#L73">
|
||||
one of <literal>kodi</literal> dependencies</link>). The easiest way then
|
||||
would be replace hash with a fake one and rebuild. Nix build will fail and
|
||||
error message will contain desired hash.
|
||||
</para>
|
||||
<warning><para>This method has security problems. Check below for details.</para></warning>
|
||||
</listitem>
|
||||
</orderedlist>
|
||||
|
||||
<section xml:id="sec-source-hashes-security">
|
||||
<title>Obtaining hashes securely</title>
|
||||
<para>
|
||||
Let's say Man-in-the-Middle (MITM) sits close to your network. Then instead of fetching
|
||||
source you can fetch malware, and instead of source hash you get hash of malware. Here are
|
||||
security considerations for this scenario:
|
||||
</para>
|
||||
<itemizedlist>
|
||||
<listitem>
|
||||
<para>
|
||||
<literal>http://</literal> URLs are not secure to prefetch hash from;
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
hashes from upstream (in method 3) should be obtained via secure protocol;
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
<literal>https://</literal> URLs are secure in methods 1, 2, 3;
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
<literal>https://</literal> URLs are not secure in method 5. When obtaining hashes
|
||||
with fake hash method, TLS checks are disabled. So
|
||||
refetch source hash from several different networks to exclude MITM scenario.
|
||||
Alternatively, use fake hash method to make Nix error, but instead of extracting
|
||||
hash from error, extract <literal>https://</literal> URL and prefetch it
|
||||
with method 1.
|
||||
</para>
|
||||
</listitem>
|
||||
</itemizedlist>
|
||||
</section>
|
||||
</section>
|
||||
<section xml:id="sec-patches">
|
||||
<title>Patches</title>
|
||||
|
||||
|
@ -132,7 +132,7 @@
|
||||
</itemizedlist>
|
||||
|
||||
<para>
|
||||
The difference between an a package being unsupported on some system and
|
||||
The difference between a package being unsupported on some system and
|
||||
being broken is admittedly a bit fuzzy. If a program
|
||||
<emphasis>ought</emphasis> to work on a certain platform, but doesn't, the
|
||||
platform should be included in <literal>meta.platforms</literal>, but marked
|
||||
@ -175,11 +175,16 @@
|
||||
</programlisting>
|
||||
</para>
|
||||
<para>
|
||||
A more useful example, the following configuration allows only allows
|
||||
flash player and visual studio code:
|
||||
For a more useful example, try the following. This configuration
|
||||
only allows unfree packages named flash player and visual studio
|
||||
code:
|
||||
<programlisting>
|
||||
{
|
||||
allowUnfreePredicate = (pkg: elem (builtins.parseDrvName pkg.name).name [ "flashplayer" "vscode" ]);
|
||||
allowUnfreePredicate = (pkg: builtins.elem
|
||||
(builtins.parseDrvName pkg.name).name [
|
||||
"flashplayer"
|
||||
"vscode"
|
||||
]);
|
||||
}
|
||||
</programlisting>
|
||||
</para>
|
||||
@ -286,8 +291,8 @@
|
||||
|
||||
<para>
|
||||
You can define a function called <varname>packageOverrides</varname> in your
|
||||
local <filename>~/.config/nixpkgs/config.nix</filename> to override nix
|
||||
packages. It must be a function that takes pkgs as an argument and return
|
||||
local <filename>~/.config/nixpkgs/config.nix</filename> to override Nix
|
||||
packages. It must be a function that takes pkgs as an argument and returns a
|
||||
modified set of packages.
|
||||
<programlisting>
|
||||
{
|
||||
@ -321,7 +326,18 @@
|
||||
packageOverrides = pkgs: with pkgs; {
|
||||
myPackages = pkgs.buildEnv {
|
||||
name = "my-packages";
|
||||
paths = [ aspell bc coreutils gdb ffmpeg nixUnstable emscripten jq nox silver-searcher ];
|
||||
paths = [
|
||||
aspell
|
||||
bc
|
||||
coreutils
|
||||
gdb
|
||||
ffmpeg
|
||||
nixUnstable
|
||||
emscripten
|
||||
jq
|
||||
nox
|
||||
silver-searcher
|
||||
];
|
||||
};
|
||||
};
|
||||
}
|
||||
@ -342,7 +358,18 @@
|
||||
packageOverrides = pkgs: with pkgs; {
|
||||
myPackages = pkgs.buildEnv {
|
||||
name = "my-packages";
|
||||
paths = [ aspell bc coreutils gdb ffmpeg nixUnstable emscripten jq nox silver-searcher ];
|
||||
paths = [
|
||||
aspell
|
||||
bc
|
||||
coreutils
|
||||
gdb
|
||||
ffmpeg
|
||||
nixUnstable
|
||||
emscripten
|
||||
jq
|
||||
nox
|
||||
silver-searcher
|
||||
];
|
||||
pathsToLink = [ "/share" "/bin" ];
|
||||
};
|
||||
};
|
||||
@ -377,7 +404,17 @@
|
||||
packageOverrides = pkgs: with pkgs; {
|
||||
myPackages = pkgs.buildEnv {
|
||||
name = "my-packages";
|
||||
paths = [ aspell bc coreutils ffmpeg nixUnstable emscripten jq nox silver-searcher ];
|
||||
paths = [
|
||||
aspell
|
||||
bc
|
||||
coreutils
|
||||
ffmpeg
|
||||
nixUnstable
|
||||
emscripten
|
||||
jq
|
||||
nox
|
||||
silver-searcher
|
||||
];
|
||||
pathsToLink = [ "/share/man" "/share/doc" "/bin" ];
|
||||
extraOutputsToInstall = [ "man" "doc" ];
|
||||
};
|
||||
|
@ -6,17 +6,17 @@
|
||||
<title>Introduction</title>
|
||||
|
||||
<para>
|
||||
"Cross-compilation" means compiling a program on one machine for another
|
||||
type of machine. For example, a typical use of cross compilation is to
|
||||
compile programs for embedded devices. These devices often don't have the
|
||||
computing power and memory to compile their own programs. One might think
|
||||
that cross-compilation is a fairly niche concern, but there are advantages
|
||||
to being rigorous about distinguishing build-time vs run-time environments
|
||||
even when one is developing and deploying on the same machine. Nixpkgs is
|
||||
increasingly adopting the opinion that packages should be written with
|
||||
cross-compilation in mind, and nixpkgs should evaluate in a similar way (by
|
||||
minimizing cross-compilation-specific special cases) whether or not one is
|
||||
cross-compiling.
|
||||
"Cross-compilation" means compiling a program on one machine for another type
|
||||
of machine. For example, a typical use of cross-compilation is to compile
|
||||
programs for embedded devices. These devices often don't have the computing
|
||||
power and memory to compile their own programs. One might think that
|
||||
cross-compilation is a fairly niche concern. However, there are significant
|
||||
advantages to rigorously distinguishing between build-time and run-time
|
||||
environments! This applies even when one is developing and deploying on the
|
||||
same machine. Nixpkgs is increasingly adopting the opinion that packages
|
||||
should be written with cross-compilation in mind, and nixpkgs should evaluate
|
||||
in a similar way (by minimizing cross-compilation-specific special cases)
|
||||
whether or not one is cross-compiling.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
@ -34,15 +34,15 @@
|
||||
<title>Platform parameters</title>
|
||||
|
||||
<para>
|
||||
Nixpkgs follows the
|
||||
<link xlink:href="https://gcc.gnu.org/onlinedocs/gccint/Configure-Terms.html">common
|
||||
historical convention of GNU autoconf</link> of distinguishing between 3
|
||||
types of platform: <wordasword>build</wordasword>,
|
||||
<wordasword>host</wordasword>, and <wordasword>target</wordasword>. In
|
||||
summary, <wordasword>build</wordasword> is the platform on which a package
|
||||
is being built, <wordasword>host</wordasword> is the platform on which it
|
||||
is to run. The third attribute, <wordasword>target</wordasword>, is
|
||||
relevant only for certain specific compilers and build tools.
|
||||
Nixpkgs follows the <link
|
||||
xlink:href="https://gcc.gnu.org/onlinedocs/gccint/Configure-Terms.html">conventions
|
||||
of GNU autoconf</link>. We distinguish between 3 types of platforms when
|
||||
building a derivation: <wordasword>build</wordasword>,
|
||||
<wordasword>host</wordasword>, and <wordasword>target</wordasword>. In
|
||||
summary, <wordasword>build</wordasword> is the platform on which a package
|
||||
is being built, <wordasword>host</wordasword> is the platform on which it
|
||||
will run. The third attribute, <wordasword>target</wordasword>, is relevant
|
||||
only for certain specific compilers and build tools.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
@ -64,7 +64,7 @@
|
||||
<para>
|
||||
The "build platform" is the platform on which a package is built. Once
|
||||
someone has a built package, or pre-built binary package, the build
|
||||
platform should not matter and be safe to ignore.
|
||||
platform should not matter and can be ignored.
|
||||
</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
@ -94,11 +94,11 @@
|
||||
<para>
|
||||
The build process of certain compilers is written in such a way that the
|
||||
compiler resulting from a single build can itself only produce binaries
|
||||
for a single platform. The task specifying this single "target platform"
|
||||
is thus pushed to build time of the compiler. The root cause of this
|
||||
mistake is often that the compiler (which will be run on the host) and
|
||||
the the standard library/runtime (which will be run on the target) are
|
||||
built by a single build process.
|
||||
for a single platform. The task of specifying this single "target
|
||||
platform" is thus pushed to build time of the compiler. The root cause of
|
||||
this that the compiler (which will be run on the host) and the standard
|
||||
library/runtime (which will be run on the target) are built by a single
|
||||
build process.
|
||||
</para>
|
||||
<para>
|
||||
There is no fundamental need to think about a single target ahead of
|
||||
@ -135,8 +135,10 @@
|
||||
<para>
|
||||
This is a two-component shorthand for the platform. Examples of this
|
||||
would be "x86_64-darwin" and "i686-linux"; see
|
||||
<literal>lib.systems.doubles</literal> for more. This format isn't very
|
||||
standard, but has built-in support in Nix, such as the
|
||||
<literal>lib.systems.doubles</literal> for more. The first component
|
||||
corresponds to the CPU architecture of the platform and the second to the
|
||||
operating system of the platform (<literal>[cpu]-[os]</literal>). This
|
||||
format has built-in support in Nix, such as the
|
||||
<varname>builtins.currentSystem</varname> impure string.
|
||||
</para>
|
||||
</listitem>
|
||||
@ -147,12 +149,13 @@
|
||||
</term>
|
||||
<listitem>
|
||||
<para>
|
||||
This is a 3- or 4- component shorthand for the platform. Examples of
|
||||
this would be "x86_64-unknown-linux-gnu" and "aarch64-apple-darwin14".
|
||||
This is a standard format called the "LLVM target triple", as they are
|
||||
pioneered by LLVM and traditionally just used for the
|
||||
<varname>targetPlatform</varname>. This format is strictly more
|
||||
informative than the "Nix host double", as the previous format could
|
||||
This is a 3- or 4- component shorthand for the platform. Examples of this
|
||||
would be <literal>x86_64-unknown-linux-gnu</literal> and
|
||||
<literal>aarch64-apple-darwin14</literal>. This is a standard format
|
||||
called the "LLVM target triple", as they are pioneered by LLVM. In the
|
||||
4-part form, this corresponds to
|
||||
<literal>[cpu]-[vendor]-[os]-[abi]</literal>. This format is strictly
|
||||
more informative than the "Nix host double", as the previous format could
|
||||
analogously be termed. This needs a better name than
|
||||
<varname>config</varname>!
|
||||
</para>
|
||||
@ -164,12 +167,11 @@
|
||||
</term>
|
||||
<listitem>
|
||||
<para>
|
||||
This is a nix representation of a parsed LLVM target triple with
|
||||
white-listed components. This can be specified directly, or actually
|
||||
parsed from the <varname>config</varname>. [Technically, only one need
|
||||
be specified and the others can be inferred, though the precision of
|
||||
inference may not be very good.] See
|
||||
<literal>lib.systems.parse</literal> for the exact representation.
|
||||
This is a Nix representation of a parsed LLVM target triple
|
||||
with white-listed components. This can be specified directly,
|
||||
or actually parsed from the <varname>config</varname>. See
|
||||
<literal>lib.systems.parse</literal> for the exact
|
||||
representation.
|
||||
</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
@ -193,7 +195,7 @@
|
||||
<listitem>
|
||||
<para>
|
||||
These predicates are defined in <literal>lib.systems.inspect</literal>,
|
||||
and slapped on every platform. They are superior to the ones in
|
||||
and slapped onto every platform. They are superior to the ones in
|
||||
<varname>stdenv</varname> as they force the user to be explicit about
|
||||
which platform they are inspecting. Please use these instead of those.
|
||||
</para>
|
||||
@ -221,7 +223,7 @@
|
||||
|
||||
<para>
|
||||
In this section we explore the relationship between both runtime and
|
||||
buildtime dependencies and the 3 Autoconf platforms.
|
||||
build-time dependencies and the 3 Autoconf platforms.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
@ -249,17 +251,17 @@
|
||||
</para>
|
||||
|
||||
<para>
|
||||
Some examples will probably make this clearer. If a package is being built
|
||||
with a <literal>(build, host, target)</literal> platform triple of
|
||||
<literal>(foo, bar, bar)</literal>, then its build-time dependencies would
|
||||
have a triple of <literal>(foo, foo, bar)</literal>, and <emphasis>those
|
||||
packages'</emphasis> build-time dependencies would have triple of
|
||||
<literal>(foo, foo, foo)</literal>. In other words, it should take two
|
||||
"rounds" of following build-time dependency edges before one reaches a
|
||||
fixed point where, by the sliding window principle, the platform triple no
|
||||
longer changes. Indeed, this happens with cross compilation, where only
|
||||
rounds of native dependencies starting with the second necessarily coincide
|
||||
with native packages.
|
||||
Some examples will make this clearer. If a package is being built with a
|
||||
<literal>(build, host, target)</literal> platform triple of <literal>(foo,
|
||||
bar, bar)</literal>, then its build-time dependencies would have a triple of
|
||||
<literal>(foo, foo, bar)</literal>, and <emphasis>those packages'</emphasis>
|
||||
build-time dependencies would have a triple of <literal>(foo, foo,
|
||||
foo)</literal>. In other words, it should take two "rounds" of following
|
||||
build-time dependency edges before one reaches a fixed point where, by the
|
||||
sliding window principle, the platform triple no longer changes. Indeed,
|
||||
this happens with cross-compilation, where only rounds of native
|
||||
dependencies starting with the second necessarily coincide with native
|
||||
packages.
|
||||
</para>
|
||||
|
||||
<note>
|
||||
@ -271,23 +273,23 @@
|
||||
</note>
|
||||
|
||||
<para>
|
||||
How does this work in practice? Nixpkgs is now structured so that
|
||||
build-time dependencies are taken from <varname>buildPackages</varname>,
|
||||
whereas run-time dependencies are taken from the top level attribute set.
|
||||
For example, <varname>buildPackages.gcc</varname> should be used at build
|
||||
time, while <varname>gcc</varname> should be used at run time. Now, for
|
||||
most of Nixpkgs's history, there was no <varname>buildPackages</varname>,
|
||||
and most packages have not been refactored to use it explicitly. Instead,
|
||||
one can use the six (<emphasis>gasp</emphasis>) attributes used for
|
||||
specifying dependencies as documented in
|
||||
<xref linkend="ssec-stdenv-dependencies"/>. We "splice" together the
|
||||
run-time and build-time package sets with <varname>callPackage</varname>,
|
||||
and then <varname>mkDerivation</varname> for each of four attributes pulls
|
||||
the right derivation out. This splicing can be skipped when not cross
|
||||
compiling as the package sets are the same, but is a bit slow for cross
|
||||
compiling. Because of this, a best-of-both-worlds solution is in the works
|
||||
with no splicing or explicit access of <varname>buildPackages</varname>
|
||||
needed. For now, feel free to use either method.
|
||||
How does this work in practice? Nixpkgs is now structured so that build-time
|
||||
dependencies are taken from <varname>buildPackages</varname>, whereas
|
||||
run-time dependencies are taken from the top level attribute set. For
|
||||
example, <varname>buildPackages.gcc</varname> should be used at build-time,
|
||||
while <varname>gcc</varname> should be used at run-time. Now, for most of
|
||||
Nixpkgs's history, there was no <varname>buildPackages</varname>, and most
|
||||
packages have not been refactored to use it explicitly. Instead, one can use
|
||||
the six (<emphasis>gasp</emphasis>) attributes used for specifying
|
||||
dependencies as documented in <xref linkend="ssec-stdenv-dependencies"/>. We
|
||||
"splice" together the run-time and build-time package sets with
|
||||
<varname>callPackage</varname>, and then <varname>mkDerivation</varname> for
|
||||
each of four attributes pulls the right derivation out. This splicing can be
|
||||
skipped when not cross-compiling as the package sets are the same, but is a
|
||||
bit slow for cross-compiling. Because of this, a best-of-both-worlds
|
||||
solution is in the works with no splicing or explicit access of
|
||||
<varname>buildPackages</varname> needed. For now, feel free to use either
|
||||
method.
|
||||
</para>
|
||||
|
||||
<note>
|
||||
@ -305,11 +307,11 @@
|
||||
<title>Cross packaging cookbook</title>
|
||||
|
||||
<para>
|
||||
Some frequently problems when packaging for cross compilation are good to
|
||||
just spell and answer. Ideally the information above is exhaustive, so this
|
||||
section cannot provide any new information, but its ludicrous and cruel to
|
||||
expect everyone to spend effort working through the interaction of many
|
||||
features just to figure out the same answer to the same common problem.
|
||||
Some frequently encountered problems when packaging for cross-compilation
|
||||
should be answered here. Ideally, the information above is exhaustive, so
|
||||
this section cannot provide any new information, but it is ludicrous and
|
||||
cruel to expect everyone to spend effort working through the interaction of
|
||||
many features just to figure out the same answer to the same common problem.
|
||||
Feel free to add to this list!
|
||||
</para>
|
||||
|
||||
@ -364,17 +366,9 @@
|
||||
<section xml:id="sec-cross-usage">
|
||||
<title>Cross-building packages</title>
|
||||
|
||||
<note>
|
||||
<para>
|
||||
More information needs to moved from the old wiki, especially
|
||||
<link xlink:href="https://nixos.org/wiki/CrossCompiling" />, for this
|
||||
section.
|
||||
</para>
|
||||
</note>
|
||||
|
||||
<para>
|
||||
Nixpkgs can be instantiated with <varname>localSystem</varname> alone, in
|
||||
which case there is no cross compiling and everything is built by and for
|
||||
which case there is no cross-compiling and everything is built by and for
|
||||
that system, or also with <varname>crossSystem</varname>, in which case
|
||||
packages run on the latter, but all building happens on the former. Both
|
||||
parameters take the same schema as the 3 (build, host, and target) platforms
|
||||
@ -391,7 +385,7 @@ nix-build <nixpkgs> --arg crossSystem '(import <nixpkgs/lib>).system
|
||||
Eventually we would like to make these platform examples an unnecessary
|
||||
convenience so that
|
||||
<programlisting>
|
||||
nix-build <nixpkgs> --arg crossSystem.config '<arch>-<os>-<vendor>-<abi>' -A whatever</programlisting>
|
||||
nix-build <nixpkgs> --arg crossSystem '{ config = "<arch>-<os>-<vendor>-<abi>"; }' -A whatever</programlisting>
|
||||
works in the vast majority of cases. The problem today is dependencies on
|
||||
other sorts of configuration which aren't given proper defaults. We rely on
|
||||
the examples to crudely to set those configuration parameters in some
|
||||
@ -440,15 +434,14 @@ nix-build <nixpkgs> --arg crossSystem.config '<arch>-<os>-<
|
||||
build plan or package set. A simple "build vs deploy" dichotomy is adequate:
|
||||
the sliding window principle described in the previous section shows how to
|
||||
interpolate between the these two "end points" to get the 3 platform triple
|
||||
for each bootstrapping stage. That means for any package a given package
|
||||
set, even those not bound on the top level but only reachable via
|
||||
dependencies or <varname>buildPackages</varname>, the three platforms will
|
||||
be defined as one of <varname>localSystem</varname> or
|
||||
<varname>crossSystem</varname>, with the former replacing the latter as one
|
||||
traverses build-time dependencies. A last simple difference then is
|
||||
<varname>crossSystem</varname> should be null when one doesn't want to
|
||||
cross-compile, while the <varname>*Platform</varname>s are always non-null.
|
||||
<varname>localSystem</varname> is always non-null.
|
||||
for each bootstrapping stage. That means for any package a given package set,
|
||||
even those not bound on the top level but only reachable via dependencies or
|
||||
<varname>buildPackages</varname>, the three platforms will be defined as one
|
||||
of <varname>localSystem</varname> or <varname>crossSystem</varname>, with the
|
||||
former replacing the latter as one traverses build-time dependencies. A last
|
||||
simple difference is that <varname>crossSystem</varname> should be null when
|
||||
one doesn't want to cross-compile, while the <varname>*Platform</varname>s
|
||||
are always non-null. <varname>localSystem</varname> is always non-null.
|
||||
</para>
|
||||
</section>
|
||||
<!--============================================================-->
|
||||
@ -461,14 +454,14 @@ nix-build <nixpkgs> --arg crossSystem.config '<arch>-<os>-<
|
||||
|
||||
<note>
|
||||
<para>
|
||||
If one explores nixpkgs, they will see derivations with names like
|
||||
<literal>gccCross</literal>. Such <literal>*Cross</literal> derivations is
|
||||
a holdover from before we properly distinguished between the host and
|
||||
target platforms —the derivation with "Cross" in the name covered the
|
||||
<literal>build = host != target</literal> case, while the other covered the
|
||||
<literal>host = target</literal>, with build platform the same or not based
|
||||
on whether one was using its <literal>.nativeDrv</literal> or
|
||||
<literal>.crossDrv</literal>. This ugliness will disappear soon.
|
||||
If one explores Nixpkgs, they will see derivations with names like
|
||||
<literal>gccCross</literal>. Such <literal>*Cross</literal> derivations is a
|
||||
holdover from before we properly distinguished between the host and target
|
||||
platforms—the derivation with "Cross" in the name covered the <literal>build
|
||||
= host != target</literal> case, while the other covered the <literal>host =
|
||||
target</literal>, with build platform the same or not based on whether one
|
||||
was using its <literal>.nativeDrv</literal> or <literal>.crossDrv</literal>.
|
||||
This ugliness will disappear soon.
|
||||
</para>
|
||||
</note>
|
||||
</section>
|
||||
|
@ -2,8 +2,8 @@
|
||||
let
|
||||
lib = pkgs.lib;
|
||||
locationsXml = import ./lib-function-locations.nix { inherit pkgs nixpkgs; };
|
||||
in
|
||||
pkgs.stdenv.mkDerivation {
|
||||
functionDocs = import ./lib-function-docs.nix { inherit locationsXml pkgs; };
|
||||
in pkgs.stdenv.mkDerivation {
|
||||
name = "nixpkgs-manual";
|
||||
|
||||
buildInputs = with pkgs; [ pandoc libxml2 libxslt zip jing xmlformat ];
|
||||
@ -32,6 +32,7 @@ pkgs.stdenv.mkDerivation {
|
||||
postPatch = ''
|
||||
rm -rf ./functions/library/locations.xml
|
||||
ln -s ${locationsXml} ./functions/library/locations.xml
|
||||
ln -s ${functionDocs} ./functions/library/generated
|
||||
echo ${lib.version} > .version
|
||||
'';
|
||||
|
||||
|
@ -14,4 +14,5 @@
|
||||
<xi:include href="functions/fhs-environments.xml" />
|
||||
<xi:include href="functions/shell.xml" />
|
||||
<xi:include href="functions/dockertools.xml" />
|
||||
<xi:include href="functions/prefer-remote-fetch.xml" />
|
||||
</chapter>
|
||||
|
@ -12,4 +12,13 @@
|
||||
<xi:include href="./library/asserts.xml" />
|
||||
|
||||
<xi:include href="./library/attrsets.xml" />
|
||||
|
||||
<!-- These docs are generated via nixdoc. To add another generated
|
||||
library function file to this list, the file
|
||||
`lib-function-docs.nix` must also be updated. -->
|
||||
<xi:include href="./library/generated/strings.xml" />
|
||||
<xi:include href="./library/generated/trivial.xml" />
|
||||
<xi:include href="./library/generated/lists.xml" />
|
||||
<xi:include href="./library/generated/debug.xml" />
|
||||
<xi:include href="./library/generated/options.xml" />
|
||||
</section>
|
||||
|
27
doc/functions/prefer-remote-fetch.xml
Normal file
27
doc/functions/prefer-remote-fetch.xml
Normal file
@ -0,0 +1,27 @@
|
||||
<section xmlns="http://docbook.org/ns/docbook"
|
||||
xmlns:xlink="http://www.w3.org/1999/xlink"
|
||||
xmlns:xi="http://www.w3.org/2001/xinclude"
|
||||
xml:id="sec-prefer-remote-fetch">
|
||||
<title>prefer-remote-fetch overlay</title>
|
||||
|
||||
<para>
|
||||
<function>prefer-remote-fetch</function> is an overlay that download sources
|
||||
on remote builder. This is useful when the evaluating machine has a slow
|
||||
upload while the builder can fetch faster directly from the source.
|
||||
To use it, put the following snippet as a new overlay:
|
||||
<programlisting>
|
||||
self: super:
|
||||
(super.prefer-remote-fetch self super)
|
||||
</programlisting>
|
||||
|
||||
A full configuration example for that sets the overlay up for your own account,
|
||||
could look like this
|
||||
|
||||
<programlisting>
|
||||
$ mkdir ~/.config/nixpkgs/overlays/
|
||||
$ cat > ~/.config/nixpkgs/overlays/prefer-remote-fetch.nix <<EOF
|
||||
self: super: super.prefer-remote-fetch self super
|
||||
EOF
|
||||
</programlisting>
|
||||
</para>
|
||||
</section>
|
240
doc/languages-frameworks/android.section.md
Normal file
240
doc/languages-frameworks/android.section.md
Normal file
@ -0,0 +1,240 @@
|
||||
---
|
||||
title: Android
|
||||
author: Sander van der Burg
|
||||
date: 2018-11-18
|
||||
---
|
||||
# Android
|
||||
|
||||
The Android build environment provides three major features and a number of
|
||||
supporting features.
|
||||
|
||||
Deploying an Android SDK installation with plugins
|
||||
--------------------------------------------------
|
||||
The first use case is deploying the SDK with a desired set of plugins or subsets
|
||||
of an SDK.
|
||||
|
||||
```nix
|
||||
with import <nixpkgs> {};
|
||||
|
||||
let
|
||||
androidComposition = androidenv.composeAndroidPackages {
|
||||
toolsVersion = "25.2.5";
|
||||
platformToolsVersion = "27.0.1";
|
||||
buildToolsVersions = [ "27.0.3" ];
|
||||
includeEmulator = false;
|
||||
emulatorVersion = "27.2.0";
|
||||
platformVersions = [ "24" ];
|
||||
includeSources = false;
|
||||
includeDocs = false;
|
||||
includeSystemImages = false;
|
||||
systemImageTypes = [ "default" ];
|
||||
abiVersions = [ "armeabi-v7a" ];
|
||||
lldbVersions = [ "2.0.2558144" ];
|
||||
cmakeVersions = [ "3.6.4111459" ];
|
||||
includeNDK = false;
|
||||
ndkVersion = "16.1.4479499";
|
||||
useGoogleAPIs = false;
|
||||
useGoogleTVAddOns = false;
|
||||
includeExtras = [
|
||||
"extras;google;gcm"
|
||||
];
|
||||
};
|
||||
in
|
||||
androidComposition.androidsdk
|
||||
```
|
||||
|
||||
The above function invocation states that we want an Android SDK with the above
|
||||
specified plugin versions. By default, most plugins are disabled. Notable
|
||||
exceptions are the tools, platform-tools and build-tools sub packages.
|
||||
|
||||
The following parameters are supported:
|
||||
|
||||
* `toolsVersion`, specifies the version of the tools package to use
|
||||
* `platformsToolsVersion` specifies the version of the `platform-tools` plugin
|
||||
* `buildToolsVersion` specifies the versions of the `build-tools` plugins to
|
||||
use.
|
||||
* `includeEmulator` specifies whether to deploy the emulator package (`false`
|
||||
by default). When enabled, the version of the emulator to deploy can be
|
||||
specified by setting the `emulatorVersion` parameter.
|
||||
* `includeDocs` specifies whether the documentation catalog should be included.
|
||||
* `lldbVersions` specifies what LLDB versions should be deployed.
|
||||
* `cmakeVersions` specifies which CMake versions should be deployed.
|
||||
* `includeNDK` specifies that the Android NDK bundle should be included.
|
||||
Defaults to: `false`.
|
||||
* `ndkVersion` specifies the NDK version that we want to use.
|
||||
* `includeExtras` is an array of identifier strings referring to arbitrary
|
||||
add-on packages that should be installed.
|
||||
* `platformVersions` specifies which platform SDK versions should be included.
|
||||
|
||||
For each platform version that has been specified, we can apply the following
|
||||
options:
|
||||
|
||||
* `includeSystemImages` specifies whether a system image for each platform SDK
|
||||
should be included.
|
||||
* `includeSources` specifies whether the sources for each SDK version should be
|
||||
included.
|
||||
* `useGoogleAPIs` specifies that for each selected platform version the
|
||||
Google API should be included.
|
||||
* `useGoogleTVAddOns` specifies that for each selected platform version the
|
||||
Google TV add-on should be included.
|
||||
|
||||
For each requested system image we can specify the following options:
|
||||
|
||||
* `systemImageTypes` specifies what kind of system images should be included.
|
||||
Defaults to: `default`.
|
||||
* `abiVersions` specifies what kind of ABI version of each system image should
|
||||
be included. Defaults to: `armeabi-v7a`.
|
||||
|
||||
Most of the function arguments have reasonable default settings.
|
||||
|
||||
When building the above expression with:
|
||||
|
||||
```bash
|
||||
$ nix-build
|
||||
```
|
||||
|
||||
The Android SDK gets deployed with all desired plugin versions.
|
||||
|
||||
We can also deploy subsets of the Android SDK. For example, to only the the
|
||||
`platform-tools` package, you can evaluate the following expression:
|
||||
|
||||
```nix
|
||||
with import <nixpkgs> {};
|
||||
|
||||
let
|
||||
androidComposition = androidenv.composeAndroidPackages {
|
||||
# ...
|
||||
};
|
||||
in
|
||||
androidComposition.platform-tools
|
||||
```
|
||||
|
||||
Using predefine Android package compositions
|
||||
--------------------------------------------
|
||||
In addition to composing an Android package set manually, it is also possible
|
||||
to use a predefined composition that contains all basic packages for a specific
|
||||
Android version, such as version 9.0 (API-level 28).
|
||||
|
||||
The following Nix expression can be used to deploy the entire SDK with all basic
|
||||
plugins:
|
||||
|
||||
```nix
|
||||
with import <nixpkgs> {};
|
||||
|
||||
androidenv.androidPkgs_9_0.androidsdk
|
||||
```
|
||||
|
||||
It is also possible to use one plugin only:
|
||||
|
||||
```nix
|
||||
with import <nixpkgs> {};
|
||||
|
||||
androidenv.androidPkgs_9_0.platform-tools
|
||||
```
|
||||
|
||||
Building an Android application
|
||||
-------------------------------
|
||||
In addition to the SDK, it is also possible to build an Ant-based Android
|
||||
project and automatically deploy all the Android plugins that a project
|
||||
requires.
|
||||
|
||||
```nix
|
||||
with import <nixpkgs> {};
|
||||
|
||||
androidenv.buildApp {
|
||||
name = "MyAndroidApp";
|
||||
src = ./myappsources;
|
||||
release = true;
|
||||
|
||||
# If release is set to true, you need to specify the following parameters
|
||||
keyStore = ./keystore;
|
||||
keyAlias = "myfirstapp";
|
||||
keyStorePassword = "mykeystore";
|
||||
keyAliasPassword = "myfirstapp";
|
||||
|
||||
# Any Android SDK parameters that install all the relevant plugins that a
|
||||
# build requires
|
||||
platformVersions = [ "24" ];
|
||||
|
||||
# When we include the NDK, then ndk-build is invoked before Ant gets invoked
|
||||
includeNDK = true;
|
||||
}
|
||||
```
|
||||
|
||||
Aside from the app-specific build parameters (`name`, `src`, `release` and
|
||||
keystore parameters), the `buildApp {}` function supports all the function
|
||||
parameters that the SDK composition function (the function shown in the
|
||||
previous section) supports.
|
||||
|
||||
This build function is particularly useful when it is desired to use
|
||||
[Hydra](http://nixos.org/hydra): the Nix-based continuous integration solution
|
||||
to build Android apps. An Android APK gets exposed as a build product and can be
|
||||
installed on any Android device with a web browser by navigating to the build
|
||||
result page.
|
||||
|
||||
Spawning emulator instances
|
||||
---------------------------
|
||||
For testing purposes, it can also be quite convenient to automatically generate
|
||||
scripts that spawn emulator instances with all desired configuration settings.
|
||||
|
||||
An emulator spawn script can be configured by invoking the `emulateApp {}`
|
||||
function:
|
||||
|
||||
```nix
|
||||
with import <nixpkgs> {};
|
||||
|
||||
androidenv.emulateApp {
|
||||
name = "emulate-MyAndroidApp";
|
||||
platformVersion = "24";
|
||||
abiVersion = "armeabi-v7a"; # mips, x86 or x86_64
|
||||
systemImageType = "default";
|
||||
useGoogleAPIs = false;
|
||||
}
|
||||
```
|
||||
|
||||
It is also possible to specify an APK to deploy inside the emulator
|
||||
and the package and activity names to launch it:
|
||||
|
||||
```nix
|
||||
with import <nixpkgs> {};
|
||||
|
||||
androidenv.emulateApp {
|
||||
name = "emulate-MyAndroidApp";
|
||||
platformVersion = "24";
|
||||
abiVersion = "armeabi-v7a"; # mips, x86 or x86_64
|
||||
systemImageType = "default";
|
||||
useGoogleAPIs = false;
|
||||
app = ./MyApp.apk;
|
||||
package = "MyApp";
|
||||
activity = "MainActivity";
|
||||
}
|
||||
```
|
||||
|
||||
In addition to prebuilt APKs, you can also bind the APK parameter to a
|
||||
`buildApp {}` function invocation shown in the previous example.
|
||||
|
||||
Querying the available versions of each plugin
|
||||
----------------------------------------------
|
||||
When using any of the previously shown functions, it may be a bit inconvenient
|
||||
to find out what options are supported, since the Android SDK provides many
|
||||
plugins.
|
||||
|
||||
A shell script in the `pkgs/development/mobile/androidenv/` sub directory can be used to retrieve all
|
||||
possible options:
|
||||
|
||||
```bash
|
||||
sh ./querypackages.sh packages build-tools
|
||||
```
|
||||
|
||||
The above command-line instruction queries all build-tools versions in the
|
||||
generated `packages.nix` expression.
|
||||
|
||||
Updating the generated expressions
|
||||
----------------------------------
|
||||
Most of the Nix expressions are generated from XML files that the Android
|
||||
package manager uses. To update the expressions run the `generate.sh` script
|
||||
that is stored in the `pkgs/development/mobile/androidenv/` sub directory:
|
||||
|
||||
```bash
|
||||
sh ./generate.sh
|
||||
```
|
@ -935,7 +935,7 @@ The implementation can be found in the
|
||||
[integer-gmp](http://hackage.haskell.org/package/integer-gmp) package.
|
||||
|
||||
A potential problem with this is that GMP is licensed under the
|
||||
[GNU Lesser General Public License (LGPL)](http://www.gnu.org/copyleft/lesser.html),
|
||||
[GNU Lesser General Public License (LGPL)](https://www.gnu.org/copyleft/lesser.html),
|
||||
a kind of "copyleft" license. According to the terms of the LGPL, paragraph 5,
|
||||
you may distribute a program that is designed to be compiled and dynamically
|
||||
linked with the library under the terms of your choice (i.e., commercially) but
|
||||
|
@ -1,39 +1,115 @@
|
||||
Idris packages
|
||||
==============
|
||||
# Idris packages
|
||||
|
||||
This directory contains build rules for idris packages. In addition,
|
||||
it contains several functions to build and compose those packages.
|
||||
Everything is exposed to the user via the `idrisPackages` attribute.
|
||||
## Installing Idris
|
||||
|
||||
callPackage
|
||||
------------
|
||||
The easiest way to get a working idris version is to install the `idris` attribute:
|
||||
|
||||
This is like the normal nixpkgs callPackage function, specialized to
|
||||
idris packages.
|
||||
```
|
||||
$ # On NixOS
|
||||
$ nix-env -i nixos.idris
|
||||
$ # On non-NixOS
|
||||
$ nix-env -i nixpkgs.idris
|
||||
```
|
||||
|
||||
builtins
|
||||
---------
|
||||
This however only provides the `prelude` and `base` libraries. To install additional libraries:
|
||||
|
||||
This is a list of all of the libraries that come packaged with Idris
|
||||
itself.
|
||||
```
|
||||
$ nix-env -iE 'pkgs: pkgs.idrisPackages.with-packages (with pkgs.idrisPackages; [ contrib pruviloj ])'
|
||||
```
|
||||
|
||||
build-idris-package
|
||||
--------------------
|
||||
To see all available Idris packages:
|
||||
```
|
||||
$ # On NixOS
|
||||
$ nix-env -qaPA nixos.idrisPackages
|
||||
$ # On non-NixOS
|
||||
$ nix-env -qaPA nixpkgs.idrisPackages
|
||||
```
|
||||
|
||||
A function to build an idris package. Its sole argument is a set like
|
||||
you might pass to `stdenv.mkDerivation`, except `build-idris-package`
|
||||
sets several attributes for you. See `build-idris-package.nix` for
|
||||
details.
|
||||
Similarly, entering a `nix-shell`:
|
||||
```
|
||||
$ nix-shell -p 'idrisPackages.with-packages (with idrisPackages; [ contrib pruviloj ])'
|
||||
```
|
||||
|
||||
build-builtin-package
|
||||
----------------------
|
||||
## Starting Idris with library support
|
||||
|
||||
A version of `build-idris-package` specialized to builtin libraries.
|
||||
Mostly for internal use.
|
||||
To have access to these libraries in idris, call it with an argument `-p <library name>` for each library:
|
||||
|
||||
with-packages
|
||||
-------------
|
||||
```
|
||||
$ nix-shell -p 'idrisPackages.with-packages (with idrisPackages; [ contrib pruviloj ])'
|
||||
[nix-shell:~]$ idris -p contrib -p pruviloj
|
||||
```
|
||||
|
||||
Bundle idris together with a list of packages. Because idris currently
|
||||
only supports a single directory in its library path, you must include
|
||||
all desired libraries here, including `prelude` and `base`.
|
||||
A listing of all available packages the Idris binary has access to is available via `--listlibs`:
|
||||
|
||||
```
|
||||
$ idris --listlibs
|
||||
00prelude-idx.ibc
|
||||
pruviloj
|
||||
base
|
||||
contrib
|
||||
prelude
|
||||
00pruviloj-idx.ibc
|
||||
00base-idx.ibc
|
||||
00contrib-idx.ibc
|
||||
```
|
||||
|
||||
## Building an Idris project with Nix
|
||||
|
||||
As an example of how a Nix expression for an Idris package can be created, here is the one for `idrisPackages.yaml`:
|
||||
|
||||
```nix
|
||||
{ build-idris-package
|
||||
, fetchFromGitHub
|
||||
, contrib
|
||||
, lightyear
|
||||
, lib
|
||||
}:
|
||||
build-idris-package {
|
||||
name = "yaml";
|
||||
version = "2018-01-25";
|
||||
|
||||
# This is the .ipkg file that should be built, defaults to the package name
|
||||
# In this case it should build `Yaml.ipkg` instead of `yaml.ipkg`
|
||||
# This is only necessary because the yaml packages ipkg file is
|
||||
# different from its package name here.
|
||||
ipkgName = "Yaml";
|
||||
# Idris dependencies to provide for the build
|
||||
idrisDeps = [ contrib lightyear ];
|
||||
|
||||
src = fetchFromGitHub {
|
||||
owner = "Heather";
|
||||
repo = "Idris.Yaml";
|
||||
rev = "5afa51ffc839844862b8316faba3bafa15656db4";
|
||||
sha256 = "1g4pi0swmg214kndj85hj50ccmckni7piprsxfdzdfhg87s0avw7";
|
||||
};
|
||||
|
||||
meta = {
|
||||
description = "Idris YAML lib";
|
||||
homepage = https://github.com/Heather/Idris.Yaml;
|
||||
license = lib.licenses.mit;
|
||||
maintainers = [ lib.maintainers.brainrape ];
|
||||
};
|
||||
}
|
||||
```
|
||||
|
||||
Assuming this file is saved as `yaml.nix`, it's buildable using
|
||||
|
||||
```
|
||||
$ nix-build -E '(import <nixpkgs> {}).idrisPackages.callPackage ./yaml.nix {}'
|
||||
```
|
||||
|
||||
Or it's possible to use
|
||||
|
||||
```nix
|
||||
with import <nixpkgs> {};
|
||||
|
||||
{
|
||||
yaml = idrisPackages.callPackage ./yaml.nix {};
|
||||
}
|
||||
```
|
||||
|
||||
in another file (say `default.nix`) to be able to build it with
|
||||
|
||||
```
|
||||
$ nix-build -A yaml
|
||||
```
|
||||
|
@ -10,15 +10,18 @@
|
||||
Nixpkgs to easily build packages for other programming languages, such as
|
||||
Perl or Haskell. These are described in this chapter.
|
||||
</para>
|
||||
<xi:include href="android.section.xml" />
|
||||
<xi:include href="beam.xml" />
|
||||
<xi:include href="bower.xml" />
|
||||
<xi:include href="coq.xml" />
|
||||
<xi:include href="go.xml" />
|
||||
<xi:include href="haskell.section.xml" />
|
||||
<xi:include href="idris.section.xml" />
|
||||
<xi:include href="ios.section.xml" />
|
||||
<xi:include href="java.xml" />
|
||||
<xi:include href="lua.xml" />
|
||||
<xi:include href="node.section.xml" />
|
||||
<xi:include href="ocaml.xml" />
|
||||
<xi:include href="perl.xml" />
|
||||
<xi:include href="python.section.xml" />
|
||||
<xi:include href="qt.xml" />
|
||||
@ -26,6 +29,7 @@
|
||||
<xi:include href="ruby.xml" />
|
||||
<xi:include href="rust.section.xml" />
|
||||
<xi:include href="texlive.xml" />
|
||||
<xi:include href="titanium.section.xml" />
|
||||
<xi:include href="vim.section.xml" />
|
||||
<xi:include href="emscripten.section.xml" />
|
||||
</chapter>
|
||||
|
219
doc/languages-frameworks/ios.section.md
Normal file
219
doc/languages-frameworks/ios.section.md
Normal file
@ -0,0 +1,219 @@
|
||||
---
|
||||
title: iOS
|
||||
author: Sander van der Burg
|
||||
date: 2018-11-18
|
||||
---
|
||||
# iOS
|
||||
|
||||
This component is basically a wrapper/workaround that makes it possible to
|
||||
expose an Xcode installation as a Nix package by means of symlinking to the
|
||||
relevant executables on the host system.
|
||||
|
||||
Since Xcode can't be packaged with Nix, nor we can publish it as a Nix package
|
||||
(because of its license) this is basically the only integration strategy
|
||||
making it possible to do iOS application builds that integrate with other
|
||||
components of the Nix ecosystem
|
||||
|
||||
The primary objective of this project is to use the Nix expression language to
|
||||
specify how iOS apps can be built from source code, and to automatically spawn
|
||||
iOS simulator instances for testing.
|
||||
|
||||
This component also makes it possible to use [Hydra](http://nixos.org/hydra),
|
||||
the Nix-based continuous integration server to regularly build iOS apps and to
|
||||
do wireless ad-hoc installations of enterprise IPAs on iOS devices through
|
||||
Hydra.
|
||||
|
||||
The Xcode build environment implements a number of features.
|
||||
|
||||
Deploying a proxy component wrapper exposing Xcode
|
||||
--------------------------------------------------
|
||||
The first use case is deploying a Nix package that provides symlinks to the Xcode
|
||||
installation on the host system. This package can be used as a build input to
|
||||
any build function implemented in the Nix expression language that requires
|
||||
Xcode.
|
||||
|
||||
```nix
|
||||
let
|
||||
pkgs = import <nixpkgs> {};
|
||||
|
||||
xcodeenv = import ./xcodeenv {
|
||||
inherit (pkgs) stdenv;
|
||||
};
|
||||
in
|
||||
xcodeenv.composeXcodeWrapper {
|
||||
version = "9.2";
|
||||
xcodeBaseDir = "/Applications/Xcode.app";
|
||||
}
|
||||
```
|
||||
|
||||
By deploying the above expression with `nix-build` and inspecting its content
|
||||
you will notice that several Xcode-related executables are exposed as a Nix
|
||||
package:
|
||||
|
||||
```bash
|
||||
$ ls result/bin
|
||||
lrwxr-xr-x 1 sander staff 94 1 jan 1970 Simulator -> /Applications/Xcode.app/Contents/Developer/Applications/Simulator.app/Contents/MacOS/Simulator
|
||||
lrwxr-xr-x 1 sander staff 17 1 jan 1970 codesign -> /usr/bin/codesign
|
||||
lrwxr-xr-x 1 sander staff 17 1 jan 1970 security -> /usr/bin/security
|
||||
lrwxr-xr-x 1 sander staff 21 1 jan 1970 xcode-select -> /usr/bin/xcode-select
|
||||
lrwxr-xr-x 1 sander staff 61 1 jan 1970 xcodebuild -> /Applications/Xcode.app/Contents/Developer/usr/bin/xcodebuild
|
||||
lrwxr-xr-x 1 sander staff 14 1 jan 1970 xcrun -> /usr/bin/xcrun
|
||||
```
|
||||
|
||||
Building an iOS application
|
||||
---------------------------
|
||||
We can build an iOS app executable for the simulator, or an IPA/xcarchive file
|
||||
for release purposes, e.g. ad-hoc, enterprise or store installations, by
|
||||
executing the `xcodeenv.buildApp {}` function:
|
||||
|
||||
```nix
|
||||
let
|
||||
pkgs = import <nixpkgs> {};
|
||||
|
||||
xcodeenv = import ./xcodeenv {
|
||||
inherit (pkgs) stdenv;
|
||||
};
|
||||
in
|
||||
xcodeenv.buildApp {
|
||||
name = "MyApp";
|
||||
src = ./myappsources;
|
||||
sdkVersion = "11.2";
|
||||
|
||||
target = null; # Corresponds to the name of the app by default
|
||||
configuration = null; # Release for release builds, Debug for debug builds
|
||||
scheme = null; # -scheme will correspond to the app name by default
|
||||
sdk = null; # null will set it to 'iphonesimulator` for simulator builds or `iphoneos` to real builds
|
||||
xcodeFlags = "";
|
||||
|
||||
release = true;
|
||||
certificateFile = ./mycertificate.p12;
|
||||
certificatePassword = "secret";
|
||||
provisioningProfile = ./myprovisioning.profile;
|
||||
signMethod = "ad-hoc"; # 'enterprise' or 'store'
|
||||
generateIPA = true;
|
||||
generateXCArchive = false;
|
||||
|
||||
enableWirelessDistribution = true;
|
||||
installURL = "/installipa.php";
|
||||
bundleId = "mycompany.myapp";
|
||||
appVersion = "1.0";
|
||||
|
||||
# Supports all xcodewrapper parameters as well
|
||||
xcodeBaseDir = "/Applications/Xcode.app";
|
||||
}
|
||||
```
|
||||
|
||||
The above function takes a variety of parameters:
|
||||
* The `name` and `src` parameters are mandatory and specify the name of the app
|
||||
and the location where the source code resides
|
||||
* `sdkVersion` specifies which version of the iOS SDK to use.
|
||||
|
||||
It also possile to adjust the `xcodebuild` parameters. This is only needed in
|
||||
rare circumstances. In most cases the default values should suffice:
|
||||
|
||||
* Specifies which `xcodebuild` target to build. By default it takes the target
|
||||
that has the same name as the app.
|
||||
* The `configuration` parameter can be overridden if desired. By default, it
|
||||
will do a debug build for the simulator and a release build for real devices.
|
||||
* The `scheme` parameter specifies which `-scheme` parameter to propagate to
|
||||
`xcodebuild`. By default, it corresponds to the app name.
|
||||
* The `sdk` parameter specifies which SDK to use. By default, it picks
|
||||
`iphonesimulator` for simulator builds and `iphoneos` for release builds.
|
||||
* The `xcodeFlags` parameter specifies arbitrary command line parameters that
|
||||
should be propagated to `xcodebuild`.
|
||||
|
||||
By default, builds are carried out for the iOS simulator. To do release builds
|
||||
(builds for real iOS devices), you must set the `release` parameter to `true`.
|
||||
In addition, you need to set the following parameters:
|
||||
|
||||
* `certificateFile` refers to a P12 certificate file.
|
||||
* `certificatePassword` specifies the password of the P12 certificate.
|
||||
* `provisioningProfile` refers to the provision profile needed to sign the app
|
||||
* `signMethod` should refer to `ad-hoc` for signing the app with an ad-hoc
|
||||
certificate, `enterprise` for enterprise certificates and `app-store` for App
|
||||
store certificates.
|
||||
* `generateIPA` specifies that we want to produce an IPA file (this is probably
|
||||
what you want)
|
||||
* `generateXCArchive` specifies thet we want to produce an xcarchive file.
|
||||
|
||||
When building IPA files on Hydra and when it is desired to allow iOS devices to
|
||||
install IPAs by browsing to the Hydra build products page, you can enable the
|
||||
`enableWirelessDistribution` parameter.
|
||||
|
||||
When enabled, you need to configure the following options:
|
||||
|
||||
* The `installURL` parameter refers to the URL of a PHP script that composes the
|
||||
`itms-services://` URL allowing iOS devices to install the IPA file.
|
||||
* `bundleId` refers to the bundle ID value of the app
|
||||
* `appVersion` refers to the app's version number
|
||||
|
||||
To use wireless adhoc distributions, you must also install the corresponding
|
||||
PHP script on a web server (see section: 'Installing the PHP script for wireless
|
||||
ad hoc installations from Hydra' for more information).
|
||||
|
||||
In addition to the build parameters, you can also specify any parameters that
|
||||
the `xcodeenv.composeXcodeWrapper {}` function takes. For example, the
|
||||
`xcodeBaseDir` parameter can be overridden to refer to a different Xcode
|
||||
version.
|
||||
|
||||
Spawning simulator instances
|
||||
----------------------------
|
||||
In addition to building iOS apps, we can also automatically spawn simulator
|
||||
instances:
|
||||
|
||||
```nix
|
||||
let
|
||||
pkgs = import <nixpkgs> {};
|
||||
|
||||
xcodeenv = import ./xcodeenv {
|
||||
inherit (pkgs) stdenv;
|
||||
};
|
||||
in
|
||||
xcode.simulateApp {
|
||||
name = "simulate";
|
||||
|
||||
# Supports all xcodewrapper parameters as well
|
||||
xcodeBaseDir = "/Applications/Xcode.app";
|
||||
}
|
||||
```
|
||||
|
||||
The above expression produces a script that starts the simulator from the
|
||||
provided Xcode installation. The script can be started as follows:
|
||||
|
||||
```bash
|
||||
./result/bin/run-test-simulator
|
||||
```
|
||||
|
||||
By default, the script will show an overview of UDID for all available simulator
|
||||
instances and asks you to pick one. You can also provide a UDID as a
|
||||
command-line parameter to launch an instance automatically:
|
||||
|
||||
```bash
|
||||
./result/bin/run-test-simulator 5C93129D-CF39-4B1A-955F-15180C3BD4B8
|
||||
```
|
||||
|
||||
You can also extend the simulator script to automatically deploy and launch an
|
||||
app in the requested simulator instance:
|
||||
|
||||
```nix
|
||||
let
|
||||
pkgs = import <nixpkgs> {};
|
||||
|
||||
xcodeenv = import ./xcodeenv {
|
||||
inherit (pkgs) stdenv;
|
||||
};
|
||||
in
|
||||
xcode.simulateApp {
|
||||
name = "simulate";
|
||||
bundleId = "mycompany.myapp";
|
||||
app = xcode.buildApp {
|
||||
# ...
|
||||
};
|
||||
|
||||
# Supports all xcodewrapper parameters as well
|
||||
xcodeBaseDir = "/Applications/Xcode.app";
|
||||
}
|
||||
```
|
||||
|
||||
By providing the result of an `xcode.buildApp {}` function and configuring the
|
||||
app bundle id, the app gets deployed automatically and started.
|
@ -14,7 +14,7 @@ project.
|
||||
|
||||
The package set also provides support for multiple Node.js versions. The policy
|
||||
is that a new package should be added to the collection for the latest stable LTS
|
||||
release (which is currently 8.x), unless there is an explicit reason to support
|
||||
release (which is currently 10.x), unless there is an explicit reason to support
|
||||
a different release.
|
||||
|
||||
If your package uses native addons, you need to examine what kind of native
|
||||
@ -26,7 +26,7 @@ build system it uses. Here are some examples:
|
||||
|
||||
After you have identified the correct system, you need to override your package
|
||||
expression while adding in build system as a build input. For example, `dat`
|
||||
requires `node-gyp-build`, so we override its expression in `default-v8.nix`:
|
||||
requires `node-gyp-build`, so we override its expression in `default-v10.nix`:
|
||||
|
||||
```nix
|
||||
dat = nodePackages.dat.override (oldAttrs: {
|
||||
@ -36,9 +36,9 @@ dat = nodePackages.dat.override (oldAttrs: {
|
||||
|
||||
To add a package from NPM to nixpkgs:
|
||||
|
||||
1. Modify `pkgs/development/node-packages/node-packages-v8.json` to add, update
|
||||
or remove package entries. (Or `pkgs/development/node-packages/node-packages-v10.json`
|
||||
for packages depending on Node.js 10.x)
|
||||
1. Modify `pkgs/development/node-packages/node-packages-v10.json` to add, update
|
||||
or remove package entries. (Or `pkgs/development/node-packages/node-packages-v8.json`
|
||||
for packages depending on Node.js 8.x)
|
||||
2. Run the script: `(cd pkgs/development/node-packages && ./generate.sh)`.
|
||||
3. Build your new package to test your changes:
|
||||
`cd /path/to/nixpkgs && nix-build -A nodePackages.<new-or-updated-package>`.
|
||||
|
99
doc/languages-frameworks/ocaml.xml
Normal file
99
doc/languages-frameworks/ocaml.xml
Normal file
@ -0,0 +1,99 @@
|
||||
<section xmlns="http://docbook.org/ns/docbook"
|
||||
xmlns:xlink="http://www.w3.org/1999/xlink"
|
||||
xml:id="sec-language-ocaml">
|
||||
<title>OCaml</title>
|
||||
|
||||
<para>
|
||||
OCaml libraries should be installed in
|
||||
<literal>$(out)/lib/ocaml/${ocaml.version}/site-lib/</literal>. Such
|
||||
directories are automatically added to the <literal>$OCAMLPATH</literal>
|
||||
environment variable when building another package that depends on them
|
||||
or when opening a <literal>nix-shell</literal>.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
Given that most of the OCaml ecosystem is now built with dune,
|
||||
nixpkgs includes a convenience build support function called
|
||||
<literal>buildDunePackage</literal> that will build an OCaml package
|
||||
using dune, OCaml and findlib and any additional dependencies provided
|
||||
as <literal>buildInputs</literal> or <literal>propagatedBuildInputs</literal>.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
Here is a simple package example. It defines an (optional) attribute
|
||||
<literal>minimumOCamlVersion</literal> that will be used to throw a
|
||||
descriptive evaluation error if building with an older OCaml is attempted.
|
||||
It uses the <literal>fetchFromGitHub</literal> fetcher to get its source.
|
||||
It sets the <literal>doCheck</literal> (optional) attribute to
|
||||
<literal>true</literal> which means that tests will be run with
|
||||
<literal>dune runtest -p angstrom</literal> after the build
|
||||
(<literal>dune build -p angstrom</literal>) is complete.
|
||||
It uses <literal>alcotest</literal> as a build input (because it is needed
|
||||
to run the tests) and <literal>bigstringaf</literal> and
|
||||
<literal>result</literal> as propagated build inputs (thus they will also
|
||||
be available to libraries depending on this library).
|
||||
The library will be installed using the <literal>angstrom.install</literal>
|
||||
file that dune generates.
|
||||
</para>
|
||||
|
||||
<programlisting>
|
||||
{ stdenv, fetchFromGitHub, buildDunePackage, alcotest, result, bigstringaf }:
|
||||
|
||||
buildDunePackage rec {
|
||||
pname = "angstrom";
|
||||
version = "0.10.0";
|
||||
|
||||
minimumOCamlVersion = "4.03";
|
||||
|
||||
src = fetchFromGitHub {
|
||||
owner = "inhabitedtype";
|
||||
repo = pname;
|
||||
rev = version;
|
||||
sha256 = "0lh6024yf9ds0nh9i93r9m6p5psi8nvrqxl5x7jwl13zb0r9xfpw";
|
||||
};
|
||||
|
||||
buildInputs = [ alcotest ];
|
||||
propagatedBuildInputs = [ bigstringaf result ];
|
||||
doCheck = true;
|
||||
|
||||
meta = {
|
||||
homepage = https://github.com/inhabitedtype/angstrom;
|
||||
description = "OCaml parser combinators built for speed and memory efficiency";
|
||||
license = stdenv.lib.licenses.bsd3;
|
||||
maintainers = with stdenv.lib.maintainers; [ sternenseemann ];
|
||||
};
|
||||
}
|
||||
</programlisting>
|
||||
|
||||
<para>
|
||||
Here is a second example, this time using a source archive generated with
|
||||
<literal>dune-release</literal>. It is a good idea to use this archive when
|
||||
it is available as it will usually contain substituted variables such as a
|
||||
<literal>%%VERSION%%</literal> field. This library does not depend
|
||||
on any other OCaml library and no tests are run after building it.
|
||||
</para>
|
||||
|
||||
<programlisting>
|
||||
{ stdenv, fetchurl, buildDunePackage }:
|
||||
|
||||
buildDunePackage rec {
|
||||
pname = "wtf8";
|
||||
version = "1.0.1";
|
||||
|
||||
minimumOCamlVersion = "4.01";
|
||||
|
||||
src = fetchurl {
|
||||
url = "https://github.com/flowtype/ocaml-${pname}/releases/download/v${version}/${pname}-${version}.tbz";
|
||||
sha256 = "1msg3vycd3k8qqj61sc23qks541cxpb97vrnrvrhjnqxsqnh6ygq";
|
||||
};
|
||||
|
||||
meta = with stdenv.lib; {
|
||||
homepage = https://github.com/flowtype/ocaml-wtf8;
|
||||
description = "WTF-8 is a superset of UTF-8 that allows unpaired surrogates.";
|
||||
license = licenses.mit;
|
||||
maintainers = [ maintainers.eqyiel ];
|
||||
};
|
||||
}
|
||||
</programlisting>
|
||||
|
||||
</section>
|
@ -483,12 +483,14 @@ and in this case the `python35` interpreter is automatically used.
|
||||
|
||||
### Interpreters
|
||||
|
||||
Versions 2.7, 3.4, 3.5, 3.6 and 3.7 of the CPython interpreter are available as
|
||||
respectively `python27`, `python34`, `python35`, `python36` and `python37`. The PyPy interpreter
|
||||
is available as `pypy`. The aliases `python2` and `python3` correspond to respectively `python27` and
|
||||
`python37`. The default interpreter, `python`, maps to `python2`.
|
||||
The Nix expressions for the interpreters can be found in
|
||||
`pkgs/development/interpreters/python`.
|
||||
Versions 2.7, 3.5, 3.6 and 3.7 of the CPython interpreter are available as
|
||||
respectively `python27`, `python35`, `python36` and `python37`. The aliases
|
||||
`python2` and `python3` correspond to respectively `python27` and
|
||||
`python37`. The default interpreter, `python`, maps to `python2`. The PyPy
|
||||
interpreters compatible with Python 2.7 and 3 are available as `pypy27` and
|
||||
`pypy3`, with aliases `pypy2` mapping to `pypy27` and `pypy` mapping to
|
||||
`pypy2`. The Nix expressions for the interpreters can be
|
||||
found in `pkgs/development/interpreters/python`.
|
||||
|
||||
All packages depending on any Python interpreter get appended
|
||||
`out/{python.sitePackages}` to `$PYTHONPATH` if such directory
|
||||
@ -507,7 +509,7 @@ Each interpreter has the following attributes:
|
||||
- `buildEnv`. Function to build python interpreter environments with extra packages bundled together. See section *python.buildEnv function* for usage and documentation.
|
||||
- `withPackages`. Simpler interface to `buildEnv`. See section *python.withPackages function* for usage and documentation.
|
||||
- `sitePackages`. Alias for `lib/${libPrefix}/site-packages`.
|
||||
- `executable`. Name of the interpreter executable, e.g. `python3.4`.
|
||||
- `executable`. Name of the interpreter executable, e.g. `python3.7`.
|
||||
- `pkgs`. Set of Python packages for that specific interpreter. The package set can be modified by overriding the interpreter and passing `packageOverrides`.
|
||||
|
||||
### Building packages and applications
|
||||
@ -529,7 +531,6 @@ attribute set is created for each available Python interpreter. The available
|
||||
sets are
|
||||
|
||||
* `pkgs.python27Packages`
|
||||
* `pkgs.python34Packages`
|
||||
* `pkgs.python35Packages`
|
||||
* `pkgs.python36Packages`
|
||||
* `pkgs.python37Packages`
|
||||
@ -837,7 +838,7 @@ community to help save time. No tool is preferred at the moment.
|
||||
|
||||
### Deterministic builds
|
||||
|
||||
Python 2.7, 3.5 and 3.6 are now built deterministically and 3.4 mostly.
|
||||
The Python interpreters are now built deterministically.
|
||||
Minor modifications had to be made to the interpreters in order to generate
|
||||
deterministic bytecode. This has security implications and is relevant for
|
||||
those using Python in a `nix-shell`.
|
||||
@ -1103,7 +1104,7 @@ on `numpy` will be built with `mkl`.
|
||||
The following is an overlay that configures `numpy` to use `mkl`:
|
||||
```nix
|
||||
self: super: {
|
||||
python36 = super.python36.override {
|
||||
python37 = super.python37.override {
|
||||
packageOverrides = python-self: python-super: {
|
||||
numpy = python-super.numpy.override {
|
||||
blas = super.pkgs.mkl;
|
||||
@ -1113,6 +1114,15 @@ self: super: {
|
||||
}
|
||||
```
|
||||
|
||||
`mkl` requires an `openmp` implementation when running with multiple processors.
|
||||
By default, `mkl` will use Intel's `iomp` implementation if no other is
|
||||
specified, but this is a runtime-only dependency and binary compatible with the
|
||||
LLVM implementation. To use that one instead, Intel recommends users set it with
|
||||
`LD_PRELOAD`.
|
||||
|
||||
Note that `mkl` is only available on `x86_64-{linux,darwin}` platforms;
|
||||
moreover, Hydra is not building and distributing pre-compiled binaries using it.
|
||||
|
||||
## Contributing
|
||||
|
||||
### Contributing guidelines
|
||||
|
@ -50,6 +50,17 @@ bundlerEnv rec {
|
||||
future updates can be run easily.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
Updating Ruby packages can then be done like this:
|
||||
</para>
|
||||
|
||||
<screen>
|
||||
<![CDATA[$ cd pkgs/servers/monitoring/sensu
|
||||
$ nix-shell -p bundler --run 'bundle lock --update'
|
||||
$ nix-shell -p bundix --run 'bundix'
|
||||
]]>
|
||||
</screen>
|
||||
|
||||
<para>
|
||||
For tools written in Ruby - i.e. where the desire is to install a package and
|
||||
then execute e.g. <command>rake</command> at the command line, there is an
|
||||
|
@ -303,11 +303,15 @@ with import <nixpkgs> {};
|
||||
|
||||
stdenv.mkDerivation {
|
||||
name = "rust-env";
|
||||
buildInputs = [
|
||||
nativeBuildInputs = [
|
||||
rustc cargo
|
||||
|
||||
# Example Additional Dependencies
|
||||
pkgconfig openssl
|
||||
# Example Build-time Additional Dependencies
|
||||
pkgconfig
|
||||
];
|
||||
buildInputs = [
|
||||
# Example Run-time Additional Dependencies
|
||||
openssl
|
||||
];
|
||||
|
||||
# Set Environment Variables
|
||||
|
@ -49,12 +49,12 @@ texlive.combine {
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
You can list packages e.g. by <command>nix-repl</command>.
|
||||
<programlisting>
|
||||
$ nix-repl
|
||||
nix-repl> :l <nixpkgs>
|
||||
nix-repl> texlive.collection-<TAB>
|
||||
</programlisting>
|
||||
You can list packages e.g. by <command>nix repl</command>.
|
||||
<programlisting><![CDATA[
|
||||
$ nix repl
|
||||
nix-repl> :l <nixpkgs>
|
||||
nix-repl> texlive.collection-<TAB>
|
||||
]]></programlisting>
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
|
115
doc/languages-frameworks/titanium.section.md
Normal file
115
doc/languages-frameworks/titanium.section.md
Normal file
@ -0,0 +1,115 @@
|
||||
---
|
||||
title: Titanium
|
||||
author: Sander van der Burg
|
||||
date: 2018-11-18
|
||||
---
|
||||
# Titanium
|
||||
|
||||
The Nixpkgs repository contains facilities to deploy a variety of versions of
|
||||
the [Titanium SDK](https://www.appcelerator.com) versions, a cross-platform
|
||||
mobile app development framework using JavaScript as an implementation language,
|
||||
and includes a function abstraction making it possible to build Titanium
|
||||
applications for Android and iOS devices from source code.
|
||||
|
||||
Not all Titanium features supported -- currently, it can only be used to build
|
||||
Android and iOS apps.
|
||||
|
||||
Building a Titanium app
|
||||
-----------------------
|
||||
We can build a Titanium app from source for Android or iOS and for debugging or
|
||||
release purposes by invoking the `titaniumenv.buildApp {}` function:
|
||||
|
||||
```nix
|
||||
titaniumenv.buildApp {
|
||||
name = "myapp";
|
||||
src = ./myappsource;
|
||||
|
||||
preBuild = "";
|
||||
target = "android"; # or 'iphone'
|
||||
tiVersion = "7.1.0.GA";
|
||||
release = true;
|
||||
|
||||
androidsdkArgs = {
|
||||
platformVersions = [ "25" "26" ];
|
||||
};
|
||||
androidKeyStore = ./keystore;
|
||||
androidKeyAlias = "myfirstapp";
|
||||
androidKeyStorePassword = "secret";
|
||||
|
||||
xcodeBaseDir = "/Applications/Xcode.app";
|
||||
xcodewrapperArgs = {
|
||||
version = "9.3";
|
||||
};
|
||||
iosMobileProvisioningProfile = ./myprovisioning.profile;
|
||||
iosCertificateName = "My Company";
|
||||
iosCertificate = ./mycertificate.p12;
|
||||
iosCertificatePassword = "secret";
|
||||
iosVersion = "11.3";
|
||||
iosBuildStore = false;
|
||||
|
||||
enableWirelessDistribution = true;
|
||||
installURL = "/installipa.php";
|
||||
}
|
||||
```
|
||||
|
||||
The `titaniumenv.buildApp {}` function takes the following parameters:
|
||||
|
||||
* The `name` parameter refers to the name in the Nix store.
|
||||
* The `src` parameter refers to the source code location of the app that needs
|
||||
to be built.
|
||||
* `preRebuild` contains optional build instructions that are carried out before
|
||||
the build starts.
|
||||
* `target` indicates for which device the app must be built. Currently only
|
||||
'android' and 'iphone' (for iOS) are supported.
|
||||
* `tiVersion` can be used to optionally override the requested Titanium version
|
||||
in `tiapp.xml`. If not specified, it will use the version in `tiapp.xml`.
|
||||
* `release` should be set to true when building an app for submission to the
|
||||
Google Playstore or Apple Appstore. Otherwise, it should be false.
|
||||
|
||||
When the `target` has been set to `android`, we can configure the following
|
||||
parameters:
|
||||
|
||||
* The `androidSdkArgs` parameter refers to an attribute set that propagates all
|
||||
parameters to the `androidenv.composeAndroidPackages {}` function. This can
|
||||
be used to install all relevant Android plugins that may be needed to perform
|
||||
the Android build. If no parameters are given, it will deploy the platform
|
||||
SDKs for API-levels 25 and 26 by default.
|
||||
|
||||
When the `release` parameter has been set to true, you need to provide
|
||||
parameters to sign the app:
|
||||
|
||||
* `androidKeyStore` is the path to the keystore file
|
||||
* `androidKeyAlias` is the key alias
|
||||
* `androidKeyStorePassword` refers to the password to open the keystore file.
|
||||
|
||||
When the `target` has been set to `iphone`, we can configure the following
|
||||
parameters:
|
||||
|
||||
* The `xcodeBaseDir` parameter refers to the location where Xcode has been
|
||||
installed. When none value is given, the above value is the default.
|
||||
* The `xcodewrapperArgs` parameter passes arbitrary parameters to the
|
||||
`xcodeenv.composeXcodeWrapper {}` function. This can, for example, be used
|
||||
to adjust the default version of Xcode.
|
||||
|
||||
When `release` has been set to true, you also need to provide the following
|
||||
parameters:
|
||||
|
||||
* `iosMobileProvisioningProfile` refers to a mobile provisioning profile needed
|
||||
for signing.
|
||||
* `iosCertificateName` refers to the company name in the P12 certificate.
|
||||
* `iosCertificate` refers to the path to the P12 file.
|
||||
* `iosCertificatePassword` contains the password to open the P12 file.
|
||||
* `iosVersion` refers to the iOS SDK version to use. It defaults to the latest
|
||||
version.
|
||||
* `iosBuildStore` should be set to `true` when building for the Apple Appstore
|
||||
submission. For enterprise or ad-hoc builds it should be set to `false`.
|
||||
|
||||
When `enableWirelessDistribution` has been enabled, you must also provide the
|
||||
path of the PHP script (`installURL`) (that is included with the iOS build
|
||||
environment) to enable wireless ad-hoc installations.
|
||||
|
||||
Emulating or simulating the app
|
||||
-------------------------------
|
||||
It is also possible to simulate the correspond iOS simulator build by using
|
||||
`xcodeenv.simulateApp {}` and emulate an Android APK by using
|
||||
`androidenv.emulateApp {}`.
|
@ -46,9 +46,24 @@ neovim.override {
|
||||
}
|
||||
```
|
||||
|
||||
If you want to use `neovim-qt` as a graphical editor, you can configure it by overriding neovim in an overlay
|
||||
or passing it an overridden neovimn:
|
||||
|
||||
```
|
||||
neovim-qt.override {
|
||||
neovim = neovim.override {
|
||||
configure = {
|
||||
customRC = ''
|
||||
# your custom configuration
|
||||
'';
|
||||
};
|
||||
};
|
||||
}
|
||||
```
|
||||
|
||||
## Managing plugins with Vim packages
|
||||
|
||||
To store you plugins in Vim packages the following example can be used:
|
||||
To store you plugins in Vim packages (the native vim plugin manager, see `:help packages`) the following example can be used:
|
||||
|
||||
```
|
||||
vim_configurable.customize {
|
||||
@ -56,6 +71,8 @@ vim_configurable.customize {
|
||||
# loaded on launch
|
||||
start = [ youcompleteme fugitive ];
|
||||
# manually loadable by calling `:packadd $plugin-name`
|
||||
# however, if a vim plugin has a dependency that is not explicitly listed in
|
||||
# opt that dependency will always be added to start to avoid confusion.
|
||||
opt = [ phpCompletion elm-vim ];
|
||||
# To automatically load a plugin when opening a filetype, add vimrc lines like:
|
||||
# autocmd FileType php :packadd phpCompletion
|
||||
@ -63,6 +80,7 @@ vim_configurable.customize {
|
||||
}
|
||||
```
|
||||
|
||||
`myVimPackage` is an arbitrary name for the generated package. You can choose any name you like.
|
||||
For Neovim the syntax is:
|
||||
|
||||
```
|
||||
@ -74,6 +92,8 @@ neovim.override {
|
||||
packages.myVimPackage = with pkgs.vimPlugins; {
|
||||
# see examples below how to use custom packages
|
||||
start = [ ];
|
||||
# If a vim plugin has a dependency that is not explicitly listed in
|
||||
# opt that dependency will always be added to start to avoid confusion.
|
||||
opt = [ ];
|
||||
};
|
||||
};
|
||||
|
26
doc/lib-function-docs.nix
Normal file
26
doc/lib-function-docs.nix
Normal file
@ -0,0 +1,26 @@
|
||||
# Generates the documentation for library functons via nixdoc. To add
|
||||
# another library function file to this list, the include list in the
|
||||
# file `doc/functions/library.xml` must also be updated.
|
||||
|
||||
{ pkgs ? import ./.. {}, locationsXml }:
|
||||
|
||||
with pkgs; stdenv.mkDerivation {
|
||||
name = "nixpkgs-lib-docs";
|
||||
src = ./../lib;
|
||||
|
||||
buildInputs = [ nixdoc ];
|
||||
installPhase = ''
|
||||
function docgen {
|
||||
nixdoc -c "$1" -d "$2" -f "../lib/$1.nix" > "$out/$1.xml"
|
||||
}
|
||||
|
||||
mkdir -p $out
|
||||
ln -s ${locationsXml} $out/locations.xml
|
||||
|
||||
docgen strings 'String manipulation functions'
|
||||
docgen trivial 'Miscellaneous functions'
|
||||
docgen lists 'List manipulation functions'
|
||||
docgen debug 'Debugging functions'
|
||||
docgen options 'NixOS / nixpkgs option handling'
|
||||
'';
|
||||
}
|
62
doc/meta.xml
62
doc/meta.xml
@ -14,7 +14,7 @@ meta = with stdenv.lib; {
|
||||
GNU Hello is a program that prints "Hello, world!" when you run it.
|
||||
It is fully customizable.
|
||||
'';
|
||||
homepage = http://www.gnu.org/software/hello/manual/;
|
||||
homepage = https://www.gnu.org/software/hello/manual/;
|
||||
license = licenses.gpl3Plus;
|
||||
maintainers = [ maintainers.eelco ];
|
||||
platforms = platforms.all;
|
||||
@ -35,7 +35,7 @@ $ nix-env -qa hello --json
|
||||
"hello": {
|
||||
"meta": {
|
||||
"description": "A program that produces a familiar, friendly greeting",
|
||||
"homepage": "http://www.gnu.org/software/hello/manual/",
|
||||
"homepage": "https://www.gnu.org/software/hello/manual/",
|
||||
"license": {
|
||||
"fullName": "GNU General Public License version 3 or later",
|
||||
"shortName": "GPLv3+",
|
||||
@ -135,7 +135,7 @@ hello-2.3 A program that produces a familiar, friendly greeting
|
||||
<listitem>
|
||||
<para>
|
||||
The package’s homepage. Example:
|
||||
<literal>http://www.gnu.org/software/hello/manual/</literal>
|
||||
<literal>https://www.gnu.org/software/hello/manual/</literal>
|
||||
</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
@ -146,7 +146,7 @@ hello-2.3 A program that produces a familiar, friendly greeting
|
||||
<listitem>
|
||||
<para>
|
||||
The page where a link to the current version can be found. Example:
|
||||
<literal>http://ftp.gnu.org/gnu/hello/</literal>
|
||||
<literal>https://ftp.gnu.org/gnu/hello/</literal>
|
||||
</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
@ -250,6 +250,60 @@ meta.platforms = stdenv.lib.platforms.linux;
|
||||
</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
<varlistentry>
|
||||
<term>
|
||||
<varname>tests</varname>
|
||||
</term>
|
||||
<listitem>
|
||||
<warning>
|
||||
<para>
|
||||
This attribute is special in that it is not actually under the
|
||||
<literal>meta</literal> attribute set but rather under the
|
||||
<literal>passthru</literal> attribute set. This is due to a current
|
||||
limitation of Nix, and will change as soon as Nixpkgs will be able to
|
||||
depend on a new enough version of Nix. See
|
||||
<link xlink:href="https://github.com/NixOS/nix/issues/2532">the relevant
|
||||
issue</link> for more details.
|
||||
</para>
|
||||
</warning>
|
||||
<para>
|
||||
An attribute set with as values tests. A test is a derivation, which
|
||||
builds successfully when the test passes, and fails to build otherwise. A
|
||||
derivation that is a test needs to have <literal>meta.timeout</literal>
|
||||
defined.
|
||||
</para>
|
||||
<para>
|
||||
The NixOS tests are available as <literal>nixosTests</literal> in
|
||||
parameters of derivations. For instance, the OpenSMTPD derivation
|
||||
includes lines similar to:
|
||||
<programlisting>
|
||||
{ /* ... */, nixosTests }:
|
||||
{
|
||||
# ...
|
||||
passthru.tests = {
|
||||
basic-functionality-and-dovecot-integration = nixosTests.opensmtpd;
|
||||
};
|
||||
}
|
||||
</programlisting>
|
||||
</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
<varlistentry>
|
||||
<term>
|
||||
<varname>timeout</varname>
|
||||
</term>
|
||||
<listitem>
|
||||
<para>
|
||||
A timeout (in seconds) for building the derivation. If the derivation
|
||||
takes longer than this time to build, it can fail due to breaking the
|
||||
timeout. However, all computers do not have the same computing power,
|
||||
hence some builders may decide to apply a multiplicative factor to this
|
||||
value. When filling this value in, try to keep it approximately
|
||||
consistent with other values already present in
|
||||
<literal>nixpkgs</literal>.
|
||||
</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
<varlistentry>
|
||||
<term>
|
||||
<varname>hydraPlatforms</varname>
|
||||
|
@ -12,7 +12,7 @@
|
||||
<para>
|
||||
The Nix language allows a derivation to produce multiple outputs, which is
|
||||
similar to what is utilized by other Linux distribution packaging systems.
|
||||
The outputs reside in separate nix store paths, so they can be mostly
|
||||
The outputs reside in separate Nix store paths, so they can be mostly
|
||||
handled independently of each other, including passing to build inputs,
|
||||
garbage collection or binary substitution. The exception is that building
|
||||
from source always produces all the outputs.
|
||||
|
@ -3,9 +3,9 @@
|
||||
xml:id="chap-overlays">
|
||||
<title>Overlays</title>
|
||||
<para>
|
||||
This chapter describes how to extend and change Nixpkgs packages using
|
||||
overlays. Overlays are used to add layers in the fix-point used by Nixpkgs to
|
||||
compose the set of all packages.
|
||||
This chapter describes how to extend and change Nixpkgs using overlays.
|
||||
Overlays are used to add layers in the fixed-point used by Nixpkgs to compose
|
||||
the set of all packages.
|
||||
</para>
|
||||
<para>
|
||||
Nixpkgs can be configured with a list of overlays, which are applied in
|
||||
@ -60,7 +60,7 @@
|
||||
<para>
|
||||
First, if an
|
||||
<link xlink:href="#sec-overlays-argument"><varname>overlays</varname>
|
||||
argument</link> to the nixpkgs function itself is given, then that is
|
||||
argument</link> to the Nixpkgs function itself is given, then that is
|
||||
used and no path lookup will be performed.
|
||||
</para>
|
||||
</listitem>
|
||||
|
@ -205,7 +205,7 @@ $ cat $(PRINT_PATH=1 nix-prefetch-url $i | tail -n 1) \
|
||||
|
||||
<para>
|
||||
Nixpkgs provides a number of packages that will install Eclipse in its
|
||||
various forms, these range from the bare-bones Eclipse Platform to the more
|
||||
various forms. These range from the bare-bones Eclipse Platform to the more
|
||||
fully featured Eclipse SDK or Scala-IDE packages and multiple version are
|
||||
often available. It is possible to list available Eclipse packages by
|
||||
issuing the command:
|
||||
@ -307,23 +307,19 @@ packageOverrides = pkgs: {
|
||||
</screen>
|
||||
</para>
|
||||
</section>
|
||||
|
||||
<section xml:id="sec-elm">
|
||||
<title>Elm</title>
|
||||
|
||||
<para>
|
||||
The Nix expressions for Elm reside in
|
||||
<filename>pkgs/development/compilers/elm</filename>. They are generated
|
||||
automatically by <command>update-elm.rb</command> script. One should specify
|
||||
versions of Elm packages inside the script, clear the
|
||||
<filename>packages</filename> directory and run the script from inside it.
|
||||
<literal>elm-reactor</literal> is special because it also has Elm package
|
||||
dependencies. The process is not automated very much for now -- you should
|
||||
get the <literal>elm-reactor</literal> source tree (e.g. with
|
||||
<command>nix-shell</command>) and run <command>elm2nix.rb</command> inside
|
||||
it. Place the resulting <filename>package.nix</filename> file into
|
||||
<filename>packages/elm-reactor-elm.nix</filename>.
|
||||
To update Elm compiler, see <filename>nixpkgs/pkgs/development/compilers/elm/README.md</filename>.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
To package Elm applications, <link xlink:href="https://github.com/hercules-ci/elm2nix#elm2nix">read about elm2nix</link>.
|
||||
</para>
|
||||
</section>
|
||||
|
||||
<section xml:id="sec-shell-helpers">
|
||||
<title>Interactive shell helpers</title>
|
||||
|
||||
|
@ -6,13 +6,13 @@
|
||||
<title>Darwin (macOS)</title>
|
||||
|
||||
<para>
|
||||
Some common issues when packaging software for darwin:
|
||||
Some common issues when packaging software for Darwin:
|
||||
</para>
|
||||
|
||||
<itemizedlist>
|
||||
<listitem>
|
||||
<para>
|
||||
The darwin <literal>stdenv</literal> uses clang instead of gcc. When
|
||||
The Darwin <literal>stdenv</literal> uses clang instead of gcc. When
|
||||
referring to the compiler <varname>$CC</varname> or <command>cc</command>
|
||||
will work in both cases. Some builds hardcode gcc/g++ in their build
|
||||
scripts, that can usually be fixed with using something like
|
||||
@ -31,7 +31,7 @@
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
On darwin libraries are linked using absolute paths, libraries are
|
||||
On Darwin, libraries are linked using absolute paths, libraries are
|
||||
resolved by their <literal>install_name</literal> at link time. Sometimes
|
||||
packages won't set this correctly causing the library lookups to fail at
|
||||
runtime. This can be fixed by adding extra linker flags or by running
|
||||
@ -96,8 +96,8 @@
|
||||
</programlisting>
|
||||
<para>
|
||||
The package <literal>xcbuild</literal> can be used to build projects that
|
||||
really depend on Xcode, however projects that build some kind of graphical
|
||||
interface won't work without using Xcode in an impure way.
|
||||
really depend on Xcode. However, this replacement is not 100%
|
||||
compatible with Xcode and can occasionally cause issues.
|
||||
</para>
|
||||
</listitem>
|
||||
</itemizedlist>
|
||||
|
@ -147,8 +147,8 @@ $ git add pkgs/development/libraries/libfoo/default.nix</screen>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
You can use <command>nix-prefetch-url</command> (or similar
|
||||
nix-prefetch-git, etc) <replaceable>url</replaceable> to get the
|
||||
You can use <command>nix-prefetch-url</command>
|
||||
<replaceable>url</replaceable> to get the
|
||||
SHA-256 hash of source distributions. There are similar commands as
|
||||
<command>nix-prefetch-git</command> and
|
||||
<command>nix-prefetch-hg</command> available in
|
||||
|
@ -17,22 +17,20 @@
|
||||
</para>
|
||||
</warning>
|
||||
<para>
|
||||
The nixpkgs project receives a fairly high number of contributions via GitHub
|
||||
pull-requests. Reviewing and approving these is an important task and a way
|
||||
The Nixpkgs project receives a fairly high number of contributions via GitHub
|
||||
pull requests. Reviewing and approving these is an important task and a way
|
||||
to contribute to the project.
|
||||
</para>
|
||||
<para>
|
||||
The high change rate of nixpkgs makes any pull request that remains open for
|
||||
The high change rate of Nixpkgs makes any pull request that remains open for
|
||||
too long subject to conflicts that will require extra work from the submitter
|
||||
or the merger. Reviewing pull requests in a timely manner and being
|
||||
responsive to the comments is the key to avoid these. GitHub provides sort
|
||||
filters that can be used to see the
|
||||
<link
|
||||
xlink:href="https://github.com/NixOS/nixpkgs/pulls?q=is%3Apr+is%3Aopen+sort%3Aupdated-desc">most
|
||||
recently</link> and the
|
||||
<link
|
||||
xlink:href="https://github.com/NixOS/nixpkgs/pulls?q=is%3Apr+is%3Aopen+sort%3Aupdated-asc">least
|
||||
recently</link> updated pull-requests. We highly encourage looking at
|
||||
or the merger. Reviewing pull requests in a timely manner and being responsive
|
||||
to the comments is the key to avoid this issue. GitHub provides sort filters
|
||||
that can be used to see the <link
|
||||
xlink:href="https://github.com/NixOS/nixpkgs/pulls?q=is%3Apr+is%3Aopen+sort%3Aupdated-desc">most
|
||||
recently</link> and the <link
|
||||
xlink:href="https://github.com/NixOS/nixpkgs/pulls?q=is%3Apr+is%3Aopen+sort%3Aupdated-asc">least
|
||||
recently</link> updated pull requests. We highly encourage looking at
|
||||
<link xlink:href="https://github.com/NixOS/nixpkgs/pulls?q=is%3Apr+is%3Aopen+review%3Anone+status%3Asuccess+-label%3A%222.status%3A+work-in-progress%22+no%3Aproject+no%3Aassignee+no%3Amilestone">
|
||||
this list of ready to merge, unreviewed pull requests</link>.
|
||||
</para>
|
||||
@ -43,12 +41,12 @@
|
||||
</para>
|
||||
<para>
|
||||
GitHub provides reactions as a simple and quick way to provide feedback to
|
||||
pull-requests or any comments. The thumb-down reaction should be used with
|
||||
pull requests or any comments. The thumb-down reaction should be used with
|
||||
care and if possible accompanied with some explanation so the submitter has
|
||||
directions to improve their contribution.
|
||||
</para>
|
||||
<para>
|
||||
Pull-request reviews should include a list of what has been reviewed in a
|
||||
pull request reviews should include a list of what has been reviewed in a
|
||||
comment, so other reviewers and mergers can know the state of the review.
|
||||
</para>
|
||||
<para>
|
||||
@ -60,8 +58,8 @@
|
||||
<title>Package updates</title>
|
||||
|
||||
<para>
|
||||
A package update is the most trivial and common type of pull-request. These
|
||||
pull-requests mainly consist of updating the version part of the package
|
||||
A package update is the most trivial and common type of pull request. These
|
||||
pull requests mainly consist of updating the version part of the package
|
||||
name and the source hash.
|
||||
</para>
|
||||
|
||||
@ -77,7 +75,7 @@
|
||||
<itemizedlist>
|
||||
<listitem>
|
||||
<para>
|
||||
Add labels to the pull-request. (Requires commit rights)
|
||||
Add labels to the pull request. (Requires commit rights)
|
||||
</para>
|
||||
<itemizedlist>
|
||||
<listitem>
|
||||
@ -144,8 +142,8 @@
|
||||
<itemizedlist>
|
||||
<listitem>
|
||||
<para>
|
||||
Pull-requests are often targeted to the master or staging branch, and
|
||||
building the pull-request locally when it is submitted can trigger many
|
||||
pull requests are often targeted to the master or staging branch, and
|
||||
building the pull request locally when it is submitted can trigger many
|
||||
source builds.
|
||||
</para>
|
||||
<para>
|
||||
@ -174,14 +172,14 @@ $ git rebase --onto nixos-unstable BASEBRANCH FETCH_HEAD <co
|
||||
</callout>
|
||||
<callout arearefs='reviewing-rebase-3'>
|
||||
<para>
|
||||
Fetching the pull-request changes, <varname>PRNUMBER</varname> is the
|
||||
number at the end of the pull-request title and
|
||||
<varname>BASEBRANCH</varname> the base branch of the pull-request.
|
||||
Fetching the pull request changes, <varname>PRNUMBER</varname> is the
|
||||
number at the end of the pull request title and
|
||||
<varname>BASEBRANCH</varname> the base branch of the pull request.
|
||||
</para>
|
||||
</callout>
|
||||
<callout arearefs='reviewing-rebase-4'>
|
||||
<para>
|
||||
Rebasing the pull-request changes to the nixos-unstable branch.
|
||||
Rebasing the pull request changes to the nixos-unstable branch.
|
||||
</para>
|
||||
</callout>
|
||||
</calloutlist>
|
||||
@ -190,10 +188,10 @@ $ git rebase --onto nixos-unstable BASEBRANCH FETCH_HEAD <co
|
||||
<listitem>
|
||||
<para>
|
||||
The <link xlink:href="https://github.com/madjar/nox">nox</link> tool can
|
||||
be used to review a pull-request content in a single command. It doesn't
|
||||
be used to review a pull request content in a single command. It doesn't
|
||||
rebase on a channel branch so it might trigger multiple source builds.
|
||||
<varname>PRNUMBER</varname> should be replaced by the number at the end
|
||||
of the pull-request title.
|
||||
of the pull request title.
|
||||
</para>
|
||||
<screen>
|
||||
$ nix-shell -p nox --run "nox-review -k pr PRNUMBER"
|
||||
@ -230,7 +228,7 @@ $ nix-shell -p nox --run "nox-review -k pr PRNUMBER"
|
||||
<title>New packages</title>
|
||||
|
||||
<para>
|
||||
New packages are a common type of pull-requests. These pull requests
|
||||
New packages are a common type of pull requests. These pull requests
|
||||
consists in adding a new nix-expression for a package.
|
||||
</para>
|
||||
|
||||
@ -241,7 +239,7 @@ $ nix-shell -p nox --run "nox-review -k pr PRNUMBER"
|
||||
<itemizedlist>
|
||||
<listitem>
|
||||
<para>
|
||||
Add labels to the pull-request. (Requires commit rights)
|
||||
Add labels to the pull request. (Requires commit rights)
|
||||
</para>
|
||||
<itemizedlist>
|
||||
<listitem>
|
||||
@ -279,7 +277,7 @@ $ nix-shell -p nox --run "nox-review -k pr PRNUMBER"
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
A maintainer must be set, this can be the package submitter or a
|
||||
A maintainer must be set. This can be the package submitter or a
|
||||
community member that accepts to take maintainership of the package.
|
||||
</para>
|
||||
</listitem>
|
||||
@ -361,7 +359,7 @@ $ nix-shell -p nox --run "nox-review -k pr PRNUMBER"
|
||||
<itemizedlist>
|
||||
<listitem>
|
||||
<para>
|
||||
Add labels to the pull-request. (Requires commit rights)
|
||||
Add labels to the pull request. (Requires commit rights)
|
||||
</para>
|
||||
<itemizedlist>
|
||||
<listitem>
|
||||
@ -474,7 +472,7 @@ $ nix-shell -p nox --run "nox-review -k pr PRNUMBER"
|
||||
<itemizedlist>
|
||||
<listitem>
|
||||
<para>
|
||||
Add labels to the pull-request. (Requires commit rights)
|
||||
Add labels to the pull request. (Requires commit rights)
|
||||
</para>
|
||||
<itemizedlist>
|
||||
<listitem>
|
||||
@ -576,7 +574,7 @@ $ nix-shell -p nox --run "nox-review -k pr PRNUMBER"
|
||||
like to be a long-term reviewer for related submissions, please contact the
|
||||
current reviewers for that topic. They will give you information about the
|
||||
reviewing process. The main reviewers for a topic can be hard to find as
|
||||
there is no list, but checking past pull-requests to see who reviewed or
|
||||
there is no list, but checking past pull requests to see who reviewed or
|
||||
git-blaming the code to see who committed to that topic can give some hints.
|
||||
</para>
|
||||
|
||||
@ -586,7 +584,7 @@ $ nix-shell -p nox --run "nox-review -k pr PRNUMBER"
|
||||
</para>
|
||||
</section>
|
||||
<section xml:id="reviewing-contributions--merging-pull-requests">
|
||||
<title>Merging pull-requests</title>
|
||||
<title>Merging pull requests</title>
|
||||
|
||||
<para>
|
||||
It is possible for community members that have enough knowledge and
|
||||
@ -607,11 +605,11 @@ policy.
|
||||
-->
|
||||
|
||||
<para>
|
||||
In a case a contributor leaves definitively the Nix community, he should
|
||||
In a case a contributor definitively leaves the Nix community, they should
|
||||
create an issue or post on
|
||||
<link
|
||||
xlink:href="https://discourse.nixos.org">Discourse</link> with
|
||||
references of packages and modules he maintains so the maintainership can be
|
||||
references of packages and modules they maintain so the maintainership can be
|
||||
taken over by other contributors.
|
||||
</para>
|
||||
</section>
|
||||
|
678
doc/stdenv.xml
678
doc/stdenv.xml
@ -228,18 +228,19 @@ genericBuild
|
||||
</para>
|
||||
|
||||
<para>
|
||||
The extension of <envar>PATH</envar> with dependencies, alluded to above,
|
||||
proceeds according to the relative platforms alone. The process is carried
|
||||
out only for dependencies whose host platform matches the new derivation's
|
||||
build platform–i.e. which run on the platform where the new derivation
|
||||
will be built.
|
||||
The extension of <envar>PATH</envar> with dependencies, alluded to
|
||||
above, proceeds according to the relative platforms alone. The
|
||||
process is carried out only for dependencies whose host platform
|
||||
matches the new derivation's build platform i.e. dependencies which
|
||||
run on the platform where the new derivation will be built.
|
||||
<footnote xml:id="footnote-stdenv-native-dependencies-in-path">
|
||||
<para>
|
||||
Currently, that means for native builds all dependencies are put on the
|
||||
<envar>PATH</envar>. But in the future that may not be the case for sake
|
||||
of matching cross: the platforms would be assumed to be unique for native
|
||||
and cross builds alike, so only the <varname>depsBuild*</varname> and
|
||||
<varname>nativeBuildDependencies</varname> dependencies would affect the
|
||||
Currently, this means for native builds all dependencies are put
|
||||
on the <envar>PATH</envar>. But in the future that may not be the
|
||||
case for sake of matching cross: the platforms would be assumed
|
||||
to be unique for native and cross builds alike, so only the
|
||||
<varname>depsBuild*</varname> and
|
||||
<varname>nativeBuildInputs</varname> would be added to the
|
||||
<envar>PATH</envar>.
|
||||
</para>
|
||||
</footnote>
|
||||
@ -251,28 +252,27 @@ genericBuild
|
||||
<para>
|
||||
The dependency is propagated when it forces some of its other-transitive
|
||||
(non-immediate) downstream dependencies to also take it on as an immediate
|
||||
dependency. Nix itself already takes a package's transitive dependencies
|
||||
into account, but this propagation ensures nixpkgs-specific infrastructure
|
||||
like setup hooks (mentioned above) also are run as if the propagated
|
||||
dependency.
|
||||
dependency. Nix itself already takes a package's transitive dependencies into
|
||||
account, but this propagation ensures nixpkgs-specific infrastructure like
|
||||
setup hooks (mentioned above) also are run as if the propagated dependency.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
It is important to note dependencies are not necessary propagated as the
|
||||
same sort of dependency that they were before, but rather as the
|
||||
It is important to note that dependencies are not necessarily propagated as
|
||||
the same sort of dependency that they were before, but rather as the
|
||||
corresponding sort so that the platform rules still line up. The exact rules
|
||||
for dependency propagation can be given by assigning each sort of dependency
|
||||
two integers based one how it's host and target platforms are offset from
|
||||
the depending derivation's platforms. Those offsets are given are given
|
||||
below in the descriptions of each dependency list attribute.
|
||||
Algorithmically, we traverse propagated inputs, accumulating every
|
||||
propagated dep's propagated deps and adjusting them to account for the
|
||||
"shift in perspective" described by the current dep's platform offsets. This
|
||||
results in sort a transitive closure of the dependency relation, with the
|
||||
offsets being approximately summed when two dependency links are combined.
|
||||
We also prune transitive deps whose combined offsets go out-of-bounds, which
|
||||
can be viewed as a filter over that transitive closure removing dependencies
|
||||
that are blatantly absurd.
|
||||
for dependency propagation can be given by assigning to each dependency two
|
||||
integers based one how its host and target platforms are offset from the
|
||||
depending derivation's platforms. Those offsets are given below in the
|
||||
descriptions of each dependency list attribute. Algorithmically, we traverse
|
||||
propagated inputs, accumulating every propagated dependency's propagated
|
||||
dependencies and adjusting them to account for the "shift in perspective"
|
||||
described by the current dependency's platform offsets. This results in sort
|
||||
a transitive closure of the dependency relation, with the offsets being
|
||||
approximately summed when two dependency links are combined. We also prune
|
||||
transitive dependencies whose combined offsets go out-of-bounds, which can be
|
||||
viewed as a filter over that transitive closure removing dependencies that
|
||||
are blatantly absurd.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
@ -287,8 +287,8 @@ genericBuild
|
||||
propagation logic.
|
||||
</para>
|
||||
</footnote>
|
||||
They're confusing in very different ways so...hopefully if something doesn't
|
||||
make sense in one presentation, it does in the other!
|
||||
They're confusing in very different ways so... hopefully if something doesn't
|
||||
make sense in one presentation, it will in the other!
|
||||
<programlisting>
|
||||
let mapOffset(h, t, i) = i + (if i <= 0 then h else t - 1)
|
||||
|
||||
@ -307,13 +307,13 @@ dep(h0, _, A, B)
|
||||
propagated-dep(h1, t1, B, C)
|
||||
h0 + h1 in {-1, 0, 1}
|
||||
h0 + t1 in {-1, 0, -1}
|
||||
-------------------------------------- Take immediate deps' propagated deps
|
||||
----------------------------- Take immediate dependencies' propagated dependencies
|
||||
propagated-dep(mapOffset(h0, t0, h1),
|
||||
mapOffset(h0, t0, t1),
|
||||
A, C)</programlisting>
|
||||
<programlisting>
|
||||
propagated-dep(h, t, A, B)
|
||||
-------------------------------------- Propagated deps count as deps
|
||||
----------------------------- Propagated dependencies count as dependencies
|
||||
dep(h, t, A, B)</programlisting>
|
||||
Some explanation of this monstrosity is in order. In the common case, the
|
||||
target offset of a dependency is the successor to the target offset:
|
||||
@ -324,31 +324,31 @@ let f(h, h + 1, i) = i + (if i <= 0 then h else (h + 1) - 1)
|
||||
let f(h, h + 1, i) = i + (if i <= 0 then h else h)
|
||||
let f(h, h + 1, i) = i + h
|
||||
</programlisting>
|
||||
This is where the "sum-like" comes from above: We can just sum all the host
|
||||
offset to get the host offset of the transitive dependency. The target
|
||||
offset is the transitive dep is simply the host offset + 1, just as it was
|
||||
with the dependencies composed to make this transitive one; it can be
|
||||
This is where "sum-like" comes in from above: We can just sum all of the host
|
||||
offsets to get the host offset of the transitive dependency. The target
|
||||
offset is the transitive dependency is simply the host offset + 1, just as it
|
||||
was with the dependencies composed to make this transitive one; it can be
|
||||
ignored as it doesn't add any new information.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
Because of the bounds checks, the uncommon cases are <literal>h =
|
||||
t</literal> and <literal>h + 2 = t</literal>. In the former case, the
|
||||
motivation for <function>mapOffset</function> is that since its host and
|
||||
target platforms are the same, no transitive dep of it should be able to
|
||||
"discover" an offset greater than its reduced target offsets.
|
||||
Because of the bounds checks, the uncommon cases are <literal>h = t</literal>
|
||||
and <literal>h + 2 = t</literal>. In the former case, the motivation for
|
||||
<function>mapOffset</function> is that since its host and target platforms
|
||||
are the same, no transitive dependency of it should be able to "discover" an
|
||||
offset greater than its reduced target offsets.
|
||||
<function>mapOffset</function> effectively "squashes" all its transitive
|
||||
dependencies' offsets so that none will ever be greater than the target
|
||||
offset of the original <literal>h = t</literal> package. In the other case,
|
||||
<literal>h + 1</literal> is skipped over between the host and target
|
||||
offsets. Instead of squashing the offsets, we need to "rip" them apart so no
|
||||
<literal>h + 1</literal> is skipped over between the host and target offsets.
|
||||
Instead of squashing the offsets, we need to "rip" them apart so no
|
||||
transitive dependencies' offset is that one.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
Overall, the unifying theme here is that propagation shouldn't be
|
||||
introducing transitive dependencies involving platforms the needing package
|
||||
is unaware of. The offset bounds checking and definition of
|
||||
Overall, the unifying theme here is that propagation shouldn't be introducing
|
||||
transitive dependencies involving platforms the depending package is unaware
|
||||
of. The offset bounds checking and definition of
|
||||
<function>mapOffset</function> together ensure that this is the case.
|
||||
Discovering a new offset is discovering a new platform, and since those
|
||||
platforms weren't in the derivation "spec" of the needing package, they
|
||||
@ -369,20 +369,20 @@ let f(h, h + 1, i) = i + h
|
||||
A list of dependencies whose host and target platforms are the new
|
||||
derivation's build platform. This means a <literal>-1</literal> host and
|
||||
<literal>-1</literal> target offset from the new derivation's platforms.
|
||||
They are programs/libraries used at build time that furthermore produce
|
||||
programs/libraries also used at build time. If the dependency doesn't
|
||||
care about the target platform (i.e. isn't a compiler or similar tool),
|
||||
put it in <varname>nativeBuildInputs</varname> instead. The most common
|
||||
use for this <literal>buildPackages.stdenv.cc</literal>, the default C
|
||||
compiler for this role. That example crops up more than one might think
|
||||
in old commonly used C libraries.
|
||||
These are programs and libraries used at build time that produce programs
|
||||
and libraries also used at build time. If the dependency doesn't care
|
||||
about the target platform (i.e. isn't a compiler or similar tool), put it
|
||||
in <varname>nativeBuildInputs</varname> instead. The most common use of
|
||||
this <literal>buildPackages.stdenv.cc</literal>, the default C compiler
|
||||
for this role. That example crops up more than one might think in old
|
||||
commonly used C libraries.
|
||||
</para>
|
||||
<para>
|
||||
Since these packages are able to be run at build time, that are always
|
||||
Since these packages are able to be run at build-time, they are always
|
||||
added to the <envar>PATH</envar>, as described above. But since these
|
||||
packages are only guaranteed to be able to run then, they shouldn't
|
||||
persist as run-time dependencies. This isn't currently enforced, but
|
||||
could be in the future.
|
||||
persist as run-time dependencies. This isn't currently enforced, but could
|
||||
be in the future.
|
||||
</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
@ -395,21 +395,20 @@ let f(h, h + 1, i) = i + h
|
||||
A list of dependencies whose host platform is the new derivation's build
|
||||
platform, and target platform is the new derivation's host platform. This
|
||||
means a <literal>-1</literal> host offset and <literal>0</literal> target
|
||||
offset from the new derivation's platforms. They are programs/libraries
|
||||
used at build time that, if they are a compiler or similar tool, produce
|
||||
code to run at run time—i.e. tools used to build the new derivation. If
|
||||
the dependency doesn't care about the target platform (i.e. isn't a
|
||||
compiler or similar tool), put it here, rather than in
|
||||
offset from the new derivation's platforms. These are programs and
|
||||
libraries used at build-time that, if they are a compiler or similar tool,
|
||||
produce code to run at run-time—i.e. tools used to build the new
|
||||
derivation. If the dependency doesn't care about the target platform (i.e.
|
||||
isn't a compiler or similar tool), put it here, rather than in
|
||||
<varname>depsBuildBuild</varname> or <varname>depsBuildTarget</varname>.
|
||||
This would be called <varname>depsBuildHost</varname> but for historical
|
||||
continuity.
|
||||
This could be called <varname>depsBuildHost</varname> but
|
||||
<varname>nativeBuildInputs</varname> is used for historical continuity.
|
||||
</para>
|
||||
<para>
|
||||
Since these packages are able to be run at build time, that are added to
|
||||
the <envar>PATH</envar>, as described above. But since these packages
|
||||
only are guaranteed to be able to run then, they shouldn't persist as
|
||||
run-time dependencies. This isn't currently enforced, but could be in the
|
||||
future.
|
||||
Since these packages are able to be run at build-time, they are added to
|
||||
the <envar>PATH</envar>, as described above. But since these packages are
|
||||
only guaranteed to be able to run then, they shouldn't persist as run-time
|
||||
dependencies. This isn't currently enforced, but could be in the future.
|
||||
</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
@ -422,34 +421,33 @@ let f(h, h + 1, i) = i + h
|
||||
A list of dependencies whose host platform is the new derivation's build
|
||||
platform, and target platform is the new derivation's target platform.
|
||||
This means a <literal>-1</literal> host offset and <literal>1</literal>
|
||||
target offset from the new derivation's platforms. They are programs used
|
||||
at build time that produce code to run at run with code produced by the
|
||||
depending package. Most commonly, these would tools used to build the
|
||||
runtime or standard library the currently-being-built compiler will
|
||||
inject into any code it compiles. In many cases, the currently-being
|
||||
built compiler is itself employed for that task, but when that compiler
|
||||
won't run (i.e. its build and host platform differ) this is not possible.
|
||||
Other times, the compiler relies on some other tool, like binutils, that
|
||||
is always built separately so the dependency is unconditional.
|
||||
target offset from the new derivation's platforms. These are programs used
|
||||
at build time that produce code to run with code produced by the depending
|
||||
package. Most commonly, these are tools used to build the runtime or
|
||||
standard library that the currently-being-built compiler will inject into
|
||||
any code it compiles. In many cases, the currently-being-built-compiler is
|
||||
itself employed for that task, but when that compiler won't run (i.e. its
|
||||
build and host platform differ) this is not possible. Other times, the
|
||||
compiler relies on some other tool, like binutils, that is always built
|
||||
separately so that the dependency is unconditional.
|
||||
</para>
|
||||
<para>
|
||||
This is a somewhat confusing dependency to wrap ones head around, and for
|
||||
good reason. As the only one where the platform offsets are not adjacent
|
||||
integers, it requires thinking of a bootstrapping stage
|
||||
<emphasis>two</emphasis> away from the current one. It and it's use-case
|
||||
go hand in hand and are both considered poor form: try not to need this
|
||||
sort dependency, and try not avoid building standard libraries / runtimes
|
||||
This is a somewhat confusing concept to wrap one’s head around, and for
|
||||
good reason. As the only dependency type where the platform offsets are
|
||||
not adjacent integers, it requires thinking of a bootstrapping stage
|
||||
<emphasis>two</emphasis> away from the current one. It and its use-case go
|
||||
hand in hand and are both considered poor form: try to not need this sort
|
||||
of dependency, and try to avoid building standard libraries and runtimes
|
||||
in the same derivation as the compiler produces code using them. Instead
|
||||
strive to build those like a normal library, using the newly-built
|
||||
compiler just as a normal library would. In short, do not use this
|
||||
attribute unless you are packaging a compiler and are sure it is needed.
|
||||
</para>
|
||||
<para>
|
||||
Since these packages are able to be run at build time, that are added to
|
||||
the <envar>PATH</envar>, as described above. But since these packages
|
||||
only are guaranteed to be able to run then, they shouldn't persist as
|
||||
run-time dependencies. This isn't currently enforced, but could be in the
|
||||
future.
|
||||
Since these packages are able to run at build time, they are added to the
|
||||
<envar>PATH</envar>, as described above. But since these packages are only
|
||||
guaranteed to be able to run then, they shouldn't persist as run-time
|
||||
dependencies. This isn't currently enforced, but could be in the future.
|
||||
</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
@ -460,15 +458,15 @@ let f(h, h + 1, i) = i + h
|
||||
<listitem>
|
||||
<para>
|
||||
A list of dependencies whose host and target platforms match the new
|
||||
derivation's host platform. This means a both <literal>0</literal> host
|
||||
offset and <literal>0</literal> target offset from the new derivation's
|
||||
host platform. These are packages used at run-time to generate code also
|
||||
used at run-time. In practice, that would usually be tools used by
|
||||
compilers for metaprogramming/macro systems, or libraries used by the
|
||||
macros/metaprogramming code itself. It's always preferable to use a
|
||||
<varname>depsBuildBuild</varname> dependency in the derivation being
|
||||
built than a <varname>depsHostHost</varname> on the tool doing the
|
||||
building for this purpose.
|
||||
derivation's host platform. This means a <literal>0</literal> host offset
|
||||
and <literal>0</literal> target offset from the new derivation's host
|
||||
platform. These are packages used at run-time to generate code also used
|
||||
at run-time. In practice, this would usually be tools used by compilers
|
||||
for macros or a metaprogramming system, or libraries used by the macros or
|
||||
metaprogramming code itself. It's always preferable to use a
|
||||
<varname>depsBuildBuild</varname> dependency in the derivation being built
|
||||
over a <varname>depsHostHost</varname> on the tool doing the building for
|
||||
this purpose.
|
||||
</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
@ -479,20 +477,20 @@ let f(h, h + 1, i) = i + h
|
||||
<listitem>
|
||||
<para>
|
||||
A list of dependencies whose host platform and target platform match the
|
||||
new derivation's. This means a <literal>0</literal> host offset and
|
||||
new derivation's. This means a <literal>0</literal> host offset and a
|
||||
<literal>1</literal> target offset from the new derivation's host
|
||||
platform. This would be called <varname>depsHostTarget</varname> but for
|
||||
historical continuity. If the dependency doesn't care about the target
|
||||
platform (i.e. isn't a compiler or similar tool), put it here, rather
|
||||
than in <varname>depsBuildBuild</varname>.
|
||||
platform (i.e. isn't a compiler or similar tool), put it here, rather than
|
||||
in <varname>depsBuildBuild</varname>.
|
||||
</para>
|
||||
<para>
|
||||
These often are programs/libraries used by the new derivation at
|
||||
These are often programs and libraries used by the new derivation at
|
||||
<emphasis>run</emphasis>-time, but that isn't always the case. For
|
||||
example, the machine code in a statically linked library is only used at
|
||||
run time, but the derivation containing the library is only needed at
|
||||
build time. Even in the dynamic case, the library may also be needed at
|
||||
build time to appease the linker.
|
||||
example, the machine code in a statically-linked library is only used at
|
||||
run-time, but the derivation containing the library is only needed at
|
||||
build-time. Even in the dynamic case, the library may also be needed at
|
||||
build-time to appease the linker.
|
||||
</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
@ -581,7 +579,7 @@ let f(h, h + 1, i) = i + h
|
||||
</varlistentry>
|
||||
<varlistentry>
|
||||
<term>
|
||||
<varname>depsTargetTarget</varname>
|
||||
<varname>depsTargetTargetPropagated</varname>
|
||||
</term>
|
||||
<listitem>
|
||||
<para>
|
||||
@ -604,10 +602,10 @@ let f(h, h + 1, i) = i + h
|
||||
<listitem>
|
||||
<para>
|
||||
A natural number indicating how much information to log. If set to 1 or
|
||||
higher, <literal>stdenv</literal> will print moderate debug information
|
||||
during the build. In particular, the <command>gcc</command> and
|
||||
<command>ld</command> wrapper scripts will print out the complete command
|
||||
line passed to the wrapped tools. If set to 6 or higher, the
|
||||
higher, <literal>stdenv</literal> will print moderate debugging
|
||||
information during the build. In particular, the <command>gcc</command>
|
||||
and <command>ld</command> wrapper scripts will print out the complete
|
||||
command line passed to the wrapped tools. If set to 6 or higher, the
|
||||
<literal>stdenv</literal> setup script will be run with <literal>set
|
||||
-x</literal> tracing. If set to 7 or higher, the <command>gcc</command>
|
||||
and <command>ld</command> wrapper scripts will also be run with
|
||||
@ -618,7 +616,7 @@ let f(h, h + 1, i) = i + h
|
||||
</variablelist>
|
||||
|
||||
<variablelist>
|
||||
<title>Variables affecting build properties</title>
|
||||
<title>Attributes affecting build properties</title>
|
||||
<varlistentry>
|
||||
<term>
|
||||
<varname>enableParallelBuilding</varname>
|
||||
@ -637,21 +635,6 @@ let f(h, h + 1, i) = i + h
|
||||
</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
<varlistentry>
|
||||
<term>
|
||||
<varname>preferLocalBuild</varname>
|
||||
</term>
|
||||
<listitem>
|
||||
<para>
|
||||
If set, specifies that the package is so lightweight in terms of build
|
||||
operations (e.g. write a text file from a Nix string to the store) that
|
||||
there's no need to look for it in binary caches -- it's faster to just
|
||||
build it locally. It also tells Hydra and other facilities that this
|
||||
package doesn't need to be exported in binary caches (noone would use it,
|
||||
after all).
|
||||
</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
</variablelist>
|
||||
|
||||
<variablelist>
|
||||
@ -681,11 +664,47 @@ passthru = {
|
||||
<literal>hello.baz.value1</literal>. We don't specify any usage or schema
|
||||
of <literal>passthru</literal> - it is meant for values that would be
|
||||
useful outside the derivation in other parts of a Nix expression (e.g. in
|
||||
other derivations). An example would be to convey some specific
|
||||
dependency of your derivation which contains a program with plugins
|
||||
support. Later, others who make derivations with plugins can use
|
||||
passed-through dependency to ensure that their plugin would be
|
||||
binary-compatible with built program.
|
||||
other derivations). An example would be to convey some specific dependency
|
||||
of your derivation which contains a program with plugins support. Later,
|
||||
others who make derivations with plugins can use passed-through dependency
|
||||
to ensure that their plugin would be binary-compatible with built program.
|
||||
</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
<varlistentry>
|
||||
<term>
|
||||
<varname>passthru.updateScript</varname>
|
||||
</term>
|
||||
<listitem>
|
||||
<para>
|
||||
A script to be run by <filename>maintainers/scripts/update.nix</filename> when
|
||||
the package is matched. It needs to be an executable file, either on the file
|
||||
system:
|
||||
<programlisting>
|
||||
passthru.updateScript = ./update.sh;
|
||||
</programlisting>
|
||||
or inside the expression itself:
|
||||
<programlisting>
|
||||
passthru.updateScript = writeScript "update-zoom-us" ''
|
||||
#!/usr/bin/env nix-shell
|
||||
#!nix-shell -i bash -p curl pcre common-updater-scripts
|
||||
|
||||
set -eu -o pipefail
|
||||
|
||||
version="$(curl -sI https://zoom.us/client/latest/zoom_x86_64.tar.xz | grep -Fi 'Location:' | pcregrep -o1 '/(([0-9]\.?)+)/')"
|
||||
update-source-version zoom-us "$version"
|
||||
'';
|
||||
</programlisting>
|
||||
The attribute can also contain a list, a script followed by arguments to be passed to it:
|
||||
<programlisting>
|
||||
passthru.updateScript = [ ../../update.sh pname "--requested-release=unstable" ];
|
||||
</programlisting>
|
||||
Note that the update scripts will be run in parallel by default; you should avoid running <command>git commit</command> or any other commands that cannot handle that.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
For information about how to run the updates, execute
|
||||
<cmdsynopsis><command>nix-shell</command> <arg>maintainers/scripts/update.nix</arg></cmdsynopsis>.
|
||||
</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
@ -851,7 +870,7 @@ passthru = {
|
||||
<para>
|
||||
Zip files are unpacked using <command>unzip</command>. However,
|
||||
<command>unzip</command> is not in the standard environment, so you
|
||||
should add it to <varname>buildInputs</varname> yourself.
|
||||
should add it to <varname>nativeBuildInputs</varname> yourself.
|
||||
</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
@ -1091,6 +1110,17 @@ passthru = {
|
||||
</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
<varlistentry>
|
||||
<term>
|
||||
<varname>prefixKey</varname>
|
||||
</term>
|
||||
<listitem>
|
||||
<para>
|
||||
The key to use when specifying the prefix. By default, this is set to
|
||||
<option>--prefix=</option> as that is used by the majority of packages.
|
||||
</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
<varlistentry>
|
||||
<term>
|
||||
<varname>dontAddDisableDepTrack</varname>
|
||||
@ -1148,12 +1178,11 @@ passthru = {
|
||||
By default, when cross compiling, the configure script has
|
||||
<option>--build=...</option> and <option>--host=...</option> passed.
|
||||
Packages can instead pass <literal>[ "build" "host" "target" ]</literal>
|
||||
or a subset to control exactly which platform flags are passed.
|
||||
Compilers and other tools should use this to also pass the target
|
||||
platform, for example.
|
||||
or a subset to control exactly which platform flags are passed. Compilers
|
||||
and other tools can use this to also pass the target platform.
|
||||
<footnote xml:id="footnote-stdenv-build-time-guessing-impurity">
|
||||
<para>
|
||||
Eventually these will be passed when in native builds too, to improve
|
||||
Eventually these will be passed building natively as well, to improve
|
||||
determinism: build-time guessing, as is done today, is a risk of
|
||||
impurity.
|
||||
</para>
|
||||
@ -1218,17 +1247,6 @@ passthru = {
|
||||
</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
<varlistentry>
|
||||
<term>
|
||||
<varname>checkInputs</varname>
|
||||
</term>
|
||||
<listitem>
|
||||
<para>
|
||||
A list of dependencies used by the phase. This gets included in
|
||||
<varname>buildInputs</varname> when <varname>doCheck</varname> is set.
|
||||
</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
<varlistentry>
|
||||
<term>
|
||||
<varname>makeFlags</varname>
|
||||
@ -1378,6 +1396,18 @@ makeFlagsArray=(CFLAGS="-O0 -g" LDFLAGS="-lfoo -lbar")
|
||||
</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
<varlistentry>
|
||||
<term>
|
||||
<varname>checkInputs</varname>
|
||||
</term>
|
||||
<listitem>
|
||||
<para>
|
||||
A list of dependencies used by the phase. This gets included in
|
||||
<varname>nativeBuildInputs</varname> when <varname>doCheck</varname> is
|
||||
set.
|
||||
</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
<varlistentry>
|
||||
<term>
|
||||
<varname>preCheck</varname>
|
||||
@ -1632,6 +1662,18 @@ installTargets = "install-bin install-doc";</programlisting>
|
||||
</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
<varlistentry>
|
||||
<term>
|
||||
<varname>dontPruneLibtoolFiles</varname>
|
||||
</term>
|
||||
<listitem>
|
||||
<para>
|
||||
If set, libtool <literal>.la</literal> files associated with shared
|
||||
libraries won't have their <literal>dependency_libs</literal> field
|
||||
cleared.
|
||||
</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
<varlistentry>
|
||||
<term>
|
||||
<varname>forceShare</varname>
|
||||
@ -1650,13 +1692,11 @@ installTargets = "install-bin install-doc";</programlisting>
|
||||
</term>
|
||||
<listitem>
|
||||
<para>
|
||||
A package can export a <link
|
||||
linkend="ssec-setup-hooks">setup
|
||||
hook</link> by setting this variable. The setup hook, if defined, is
|
||||
copied to <filename>$out/nix-support/setup-hook</filename>. Environment
|
||||
variables are then substituted in it using
|
||||
<function
|
||||
linkend="fun-substituteAll">substituteAll</function>.
|
||||
A package can export a <link linkend="ssec-setup-hooks">setup hook</link>
|
||||
by setting this variable. The setup hook, if defined, is copied to
|
||||
<filename>$out/nix-support/setup-hook</filename>. Environment variables
|
||||
are then substituted in it using <function
|
||||
linkend="fun-substituteAll">substituteAll</function>.
|
||||
</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
@ -1770,7 +1810,7 @@ set debug-file-directory ~/.nix-profile/lib/debug
|
||||
<listitem>
|
||||
<para>
|
||||
A list of dependencies used by the phase. This gets included in
|
||||
<varname>buildInputs</varname> when <varname>doInstallCheck</varname> is
|
||||
<varname>nativeBuildInputs</varname> when <varname>doInstallCheck</varname> is
|
||||
set.
|
||||
</para>
|
||||
</listitem>
|
||||
@ -2089,12 +2129,12 @@ someVar=$(stripHash $name)
|
||||
<title>Package setup hooks</title>
|
||||
|
||||
<para>
|
||||
Nix itself considers a build-time dependency merely something that should
|
||||
Nix itself considers a build-time dependency as merely something that should
|
||||
previously be built and accessible at build time—packages themselves are
|
||||
on their own to perform any additional setup. In most cases, that is fine,
|
||||
and the downstream derivation can deal with it's own dependencies. But for a
|
||||
and the downstream derivation can deal with its own dependencies. But for a
|
||||
few common tasks, that would result in almost every package doing the same
|
||||
sort of setup work---depending not on the package itself, but entirely on
|
||||
sort of setup work—depending not on the package itself, but entirely on
|
||||
which dependencies were used.
|
||||
</para>
|
||||
|
||||
@ -2109,20 +2149,19 @@ someVar=$(stripHash $name)
|
||||
</para>
|
||||
|
||||
<para>
|
||||
The Setup hook mechanism is a bit of a sledgehammer though: a powerful
|
||||
The setup hook mechanism is a bit of a sledgehammer though: a powerful
|
||||
feature with a broad and indiscriminate area of effect. The combination of
|
||||
its power and implicit use may be expedient, but isn't without costs. Nix
|
||||
itself is unchanged, but the spirit of adding dependencies being effect-free
|
||||
itself is unchanged, but the spirit of added dependencies being effect-free
|
||||
is violated even if the letter isn't. For example, if a derivation path is
|
||||
mentioned more than once, Nix itself doesn't care and simply makes sure the
|
||||
dependency derivation is already built just the same—depending is just
|
||||
needing something to exist, and needing is idempotent. However, a dependency
|
||||
specified twice will have its setup hook run twice, and that could easily
|
||||
change the build environment (though a well-written setup hook will
|
||||
therefore strive to be idempotent so this is in fact not observable). More
|
||||
broadly, setup hooks are anti-modular in that multiple dependencies, whether
|
||||
the same or different, should not interfere and yet their setup hooks may
|
||||
well do so.
|
||||
change the build environment (though a well-written setup hook will therefore
|
||||
strive to be idempotent so this is in fact not observable). More broadly,
|
||||
setup hooks are anti-modular in that multiple dependencies, whether the same
|
||||
or different, should not interfere and yet their setup hooks may well do so.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
@ -2141,15 +2180,14 @@ someVar=$(stripHash $name)
|
||||
<para>
|
||||
Packages adding a hook should not hard code a specific hook, but rather
|
||||
choose a variable <emphasis>relative</emphasis> to how they are included.
|
||||
Returning to the C compiler wrapper example, if it itself is an
|
||||
Returning to the C compiler wrapper example, if the wrapper itself is an
|
||||
<literal>n</literal> dependency, then it only wants to accumulate flags from
|
||||
<literal>n + 1</literal> dependencies, as only those ones match the
|
||||
compiler's target platform. The <envar>hostOffset</envar> variable is
|
||||
defined with the current dependency's host offset
|
||||
<envar>targetOffset</envar> with its target offset, before it's setup hook
|
||||
is sourced. Additionally, since most environment hooks don't care about the
|
||||
target platform, That means the setup hook can append to the right bash
|
||||
array by doing something like
|
||||
compiler's target platform. The <envar>hostOffset</envar> variable is defined
|
||||
with the current dependency's host offset <envar>targetOffset</envar> with
|
||||
its target offset, before its setup hook is sourced. Additionally, since most
|
||||
environment hooks don't care about the target platform, that means the setup
|
||||
hook can append to the right bash array by doing something like
|
||||
<programlisting language="bash">
|
||||
addEnvHooks "$hostOffset" myBashFunction
|
||||
</programlisting>
|
||||
@ -2157,7 +2195,7 @@ addEnvHooks "$hostOffset" myBashFunction
|
||||
|
||||
<para>
|
||||
The <emphasis>existence</emphasis> of setups hooks has long been documented
|
||||
and packages inside Nixpkgs are free to use these mechanism. Other packages,
|
||||
and packages inside Nixpkgs are free to use this mechanism. Other packages,
|
||||
however, should not rely on these mechanisms not changing between Nixpkgs
|
||||
versions. Because of the existing issues with this system, there's little
|
||||
benefit from mandating it be stable for any period of time.
|
||||
@ -2174,19 +2212,19 @@ addEnvHooks "$hostOffset" myBashFunction
|
||||
</term>
|
||||
<listitem>
|
||||
<para>
|
||||
Bintools Wrapper wraps the binary utilities for a bunch of miscellaneous
|
||||
purposes. These are GNU Binutils when targetting Linux, and a mix of
|
||||
cctools and GNU binutils for Darwin. [The "Bintools" name is supposed to
|
||||
be a compromise between "Binutils" and "cctools" not denoting any
|
||||
specific implementation.] Specifically, the underlying bintools package,
|
||||
and a C standard library (glibc or Darwin's libSystem, just for the
|
||||
dynamic loader) are all fed in, and dependency finding, hardening (see
|
||||
below), and purity checks for each are handled by Bintools Wrapper.
|
||||
Packages typically depend on CC Wrapper, which in turn (at run time)
|
||||
depends on Bintools Wrapper.
|
||||
The Bintools Wrapper wraps the binary utilities for a bunch of
|
||||
miscellaneous purposes. These are GNU Binutils when targetting Linux, and
|
||||
a mix of cctools and GNU binutils for Darwin. [The "Bintools" name is
|
||||
supposed to be a compromise between "Binutils" and "cctools" not denoting
|
||||
any specific implementation.] Specifically, the underlying bintools
|
||||
package, and a C standard library (glibc or Darwin's libSystem, just for
|
||||
the dynamic loader) are all fed in, and dependency finding, hardening
|
||||
(see below), and purity checks for each are handled by the Bintools
|
||||
Wrapper. Packages typically depend on CC Wrapper, which in turn (at run
|
||||
time) depends on the Bintools Wrapper.
|
||||
</para>
|
||||
<para>
|
||||
Bintools Wrapper was only just recently split off from CC Wrapper, so
|
||||
The Bintools Wrapper was only just recently split off from CC Wrapper, so
|
||||
the division of labor is still being worked out. For example, it
|
||||
shouldn't care about about the C standard library, but just take a
|
||||
derivation with the dynamic loader (which happens to be the glibc on
|
||||
@ -2194,24 +2232,24 @@ addEnvHooks "$hostOffset" myBashFunction
|
||||
to need to share, and probably the most important to understand. It is
|
||||
currently accomplished by collecting directories of host-platform
|
||||
dependencies (i.e. <varname>buildInputs</varname> and
|
||||
<varname>nativeBuildInputs</varname>) in environment variables. Bintools
|
||||
Wrapper's setup hook causes any <filename>lib</filename> and
|
||||
<varname>nativeBuildInputs</varname>) in environment variables. The
|
||||
Bintools Wrapper's setup hook causes any <filename>lib</filename> and
|
||||
<filename>lib64</filename> subdirectories to be added to
|
||||
<envar>NIX_LDFLAGS</envar>. Since CC Wrapper and Bintools Wrapper use
|
||||
the same strategy, most of the Bintools Wrapper code is sparsely
|
||||
commented and refers to CC Wrapper. But CC Wrapper's code, by contrast,
|
||||
has quite lengthy comments. Bintools Wrapper merely cites those, rather
|
||||
than repeating them, to avoid falling out of sync.
|
||||
<envar>NIX_LDFLAGS</envar>. Since the CC Wrapper and the Bintools Wrapper
|
||||
use the same strategy, most of the Bintools Wrapper code is sparsely
|
||||
commented and refers to the CC Wrapper. But the CC Wrapper's code, by
|
||||
contrast, has quite lengthy comments. The Bintools Wrapper merely cites
|
||||
those, rather than repeating them, to avoid falling out of sync.
|
||||
</para>
|
||||
<para>
|
||||
A final task of the setup hook is defining a number of standard
|
||||
environment variables to tell build systems which executables full-fill
|
||||
environment variables to tell build systems which executables fulfill
|
||||
which purpose. They are defined to just be the base name of the tools,
|
||||
under the assumption that Bintools Wrapper's binaries will be on the
|
||||
under the assumption that the Bintools Wrapper's binaries will be on the
|
||||
path. Firstly, this helps poorly-written packages, e.g. ones that look
|
||||
for just <command>gcc</command> when <envar>CC</envar> isn't defined yet
|
||||
<command>clang</command> is to be used. Secondly, this helps packages
|
||||
not get confused when cross-compiling, in which case multiple Bintools
|
||||
<command>clang</command> is to be used. Secondly, this helps packages not
|
||||
get confused when cross-compiling, in which case multiple Bintools
|
||||
Wrappers may simultaneously be in use.
|
||||
<footnote xml:id="footnote-stdenv-per-platform-wrapper">
|
||||
<para>
|
||||
@ -2223,20 +2261,20 @@ addEnvHooks "$hostOffset" myBashFunction
|
||||
</para>
|
||||
</footnote>
|
||||
<envar>BUILD_</envar>- and <envar>TARGET_</envar>-prefixed versions of
|
||||
the normal environment variable are defined for the additional Bintools
|
||||
the normal environment variable are defined for additional Bintools
|
||||
Wrappers, properly disambiguating them.
|
||||
</para>
|
||||
<para>
|
||||
A problem with this final task is that Bintools Wrapper is honest and
|
||||
A problem with this final task is that the Bintools Wrapper is honest and
|
||||
defines <envar>LD</envar> as <command>ld</command>. Most packages,
|
||||
however, firstly use the C compiler for linking, secondly use
|
||||
<envar>LD</envar> anyways, defining it as the C compiler, and thirdly,
|
||||
only so define <envar>LD</envar> when it is undefined as a fallback.
|
||||
This triple-threat means Bintools Wrapper will break those packages, as
|
||||
LD is already defined as the actual linker which the package won't
|
||||
override yet doesn't want to use. The workaround is to define, just for
|
||||
the problematic package, <envar>LD</envar> as the C compiler. A good way
|
||||
to do this would be <command>preConfigure = "LD=$CC"</command>.
|
||||
only so define <envar>LD</envar> when it is undefined as a fallback. This
|
||||
triple-threat means Bintools Wrapper will break those packages, as LD is
|
||||
already defined as the actual linker which the package won't override yet
|
||||
doesn't want to use. The workaround is to define, just for the
|
||||
problematic package, <envar>LD</envar> as the C compiler. A good way to
|
||||
do this would be <command>preConfigure = "LD=$CC"</command>.
|
||||
</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
@ -2246,30 +2284,31 @@ addEnvHooks "$hostOffset" myBashFunction
|
||||
</term>
|
||||
<listitem>
|
||||
<para>
|
||||
CC Wrapper wraps a C toolchain for a bunch of miscellaneous purposes.
|
||||
The CC Wrapper wraps a C toolchain for a bunch of miscellaneous purposes.
|
||||
Specifically, a C compiler (GCC or Clang), wrapped binary tools, and a C
|
||||
standard library (glibc or Darwin's libSystem, just for the dynamic
|
||||
loader) are all fed in, and dependency finding, hardening (see below),
|
||||
and purity checks for each are handled by CC Wrapper. Packages typically
|
||||
depend on CC Wrapper, which in turn (at run time) depends on Bintools
|
||||
Wrapper.
|
||||
and purity checks for each are handled by the CC Wrapper. Packages
|
||||
typically depend on the CC Wrapper, which in turn (at run-time) depends
|
||||
on the Bintools Wrapper.
|
||||
</para>
|
||||
<para>
|
||||
Dependency finding is undoubtedly the main task of CC Wrapper. This
|
||||
works just like Bintools Wrapper, except that any
|
||||
Dependency finding is undoubtedly the main task of the CC Wrapper. This
|
||||
works just like the Bintools Wrapper, except that any
|
||||
<filename>include</filename> subdirectory of any relevant dependency is
|
||||
added to <envar>NIX_CFLAGS_COMPILE</envar>. The setup hook itself
|
||||
contains some lengthy comments describing the exact convoluted mechanism
|
||||
by which this is accomplished.
|
||||
</para>
|
||||
<para>
|
||||
CC Wrapper also like Bintools Wrapper defines standard environment
|
||||
variables with the names of the tools it wraps, for the same reasons
|
||||
described above. Importantly, while it includes a <command>cc</command>
|
||||
symlink to the c compiler for portability, the <envar>CC</envar> will be
|
||||
defined using the compiler's "real name" (i.e. <command>gcc</command> or
|
||||
<command>clang</command>). This helps lousy build systems that inspect
|
||||
on the name of the compiler rather than run it.
|
||||
Similarly, the CC Wrapper follows the Bintools Wrapper in defining
|
||||
standard environment variables with the names of the tools it wraps, for
|
||||
the same reasons described above. Importantly, while it includes a
|
||||
<command>cc</command> symlink to the c compiler for portability, the
|
||||
<envar>CC</envar> will be defined using the compiler's "real name" (i.e.
|
||||
<command>gcc</command> or <command>clang</command>). This helps lousy
|
||||
build systems that inspect on the name of the compiler rather than run
|
||||
it.
|
||||
</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
@ -2329,9 +2368,11 @@ addEnvHooks "$hostOffset" myBashFunction
|
||||
<listitem>
|
||||
<para>
|
||||
The <varname>autoreconfHook</varname> derivation adds
|
||||
<varname>autoreconfPhase</varname>, which runs autoreconf, libtoolize
|
||||
and automake, essentially preparing the configure script in
|
||||
autotools-based builds.
|
||||
<varname>autoreconfPhase</varname>, which runs autoreconf, libtoolize and
|
||||
automake, essentially preparing the configure script in autotools-based
|
||||
builds. Most autotools-based packages come with the configure script
|
||||
pre-generated, but this hook is necessary for a few packages and when you
|
||||
need to patch the package’s configure scripts.
|
||||
</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
@ -2375,9 +2416,9 @@ addEnvHooks "$hostOffset" myBashFunction
|
||||
</term>
|
||||
<listitem>
|
||||
<para>
|
||||
Exports <envar>GDK_PIXBUF_MODULE_FILE</envar> environment variable the
|
||||
the builder. Add librsvg package to <varname>buildInputs</varname> to
|
||||
get svg support.
|
||||
Exports <envar>GDK_PIXBUF_MODULE_FILE</envar> environment variable to the
|
||||
builder. Add librsvg package to <varname>buildInputs</varname> to get svg
|
||||
support.
|
||||
</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
@ -2404,30 +2445,6 @@ addEnvHooks "$hostOffset" myBashFunction
|
||||
</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
<varlistentry>
|
||||
<term>
|
||||
paxctl
|
||||
</term>
|
||||
<listitem>
|
||||
<para>
|
||||
Defines the <varname>paxmark</varname> helper for setting per-executable
|
||||
PaX flags on Linux (where it is available by default; on all other
|
||||
platforms, <varname>paxmark</varname> is a no-op). For example, to
|
||||
disable secure memory protections on the executable
|
||||
<replaceable>foo</replaceable>:
|
||||
<programlisting>
|
||||
postFixup = ''
|
||||
paxmark m $out/bin/<replaceable>foo</replaceable>
|
||||
'';
|
||||
</programlisting>
|
||||
The <literal>m</literal> flag is the most common flag and is typically
|
||||
required for applications that employ JIT compilation or otherwise need
|
||||
to execute code generated at run-time. Disabling PaX protections should
|
||||
be considered a last resort: if possible, problematic features should be
|
||||
disabled or patched to work with PaX.
|
||||
</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
<varlistentry>
|
||||
<term>
|
||||
autoPatchelfHook
|
||||
@ -2436,12 +2453,31 @@ addEnvHooks "$hostOffset" myBashFunction
|
||||
<para>
|
||||
This is a special setup hook which helps in packaging proprietary
|
||||
software in that it automatically tries to find missing shared library
|
||||
dependencies of ELF files. All packages within the
|
||||
<envar>runtimeDependencies</envar> environment variable are
|
||||
unconditionally added to executables, which is useful for programs that
|
||||
use <citerefentry>
|
||||
<refentrytitle>dlopen</refentrytitle>
|
||||
<manvolnum>3</manvolnum> </citerefentry> to load libraries at runtime.
|
||||
dependencies of ELF files based on the given
|
||||
<varname>buildInputs</varname> and <varname>nativeBuildInputs</varname>.
|
||||
</para>
|
||||
<para>
|
||||
You can also specify a <envar>runtimeDependencies</envar> environment
|
||||
variable which lists dependencies that are unconditionally added to all
|
||||
executables.
|
||||
</para>
|
||||
<para>
|
||||
This is useful for programs that use <citerefentry>
|
||||
<refentrytitle>dlopen</refentrytitle>
|
||||
<manvolnum>3</manvolnum>
|
||||
</citerefentry> to load libraries at runtime.
|
||||
</para>
|
||||
<para>
|
||||
In certain situations you may want to run the main command
|
||||
(<command>autoPatchelf</command>) of the setup hook on a file or a set
|
||||
of directories instead of unconditionally patching all outputs. This
|
||||
can be done by setting the <envar>dontAutoPatchelf</envar> environment
|
||||
variable to a non-empty value.
|
||||
</para>
|
||||
<para>
|
||||
The <command>autoPatchelf</command> command also recognizes a
|
||||
<parameter class="command">--no-recurse</parameter> command line flag,
|
||||
which prevents it from recursing into subdirectories.
|
||||
</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
@ -2452,22 +2488,146 @@ addEnvHooks "$hostOffset" myBashFunction
|
||||
<listitem>
|
||||
<para>
|
||||
This hook will make a build pause instead of stopping when a failure
|
||||
happen. It prevents nix to cleanup the build environment immediatly and
|
||||
happens. It prevents nix from cleaning up the build environment immediately and
|
||||
allows the user to attach to a build environment using the
|
||||
<command>cntr</command> command. On build error it will print the
|
||||
instruction that are neccessary for <command>cntr</command>. Installing
|
||||
<command>cntr</command> command. Upon build error it will print
|
||||
instructions on how to use <command>cntr</command>. Installing
|
||||
cntr and running the command will provide shell access to the build
|
||||
sandbox of failed build. At <filename>/var/lib/cntr</filename> the
|
||||
sandbox filesystem is mounted. All commands and files of the system are
|
||||
sandboxed filesystem is mounted. All commands and files of the system are
|
||||
still accessible within the shell. To execute commands from the sandbox
|
||||
use the cntr exec subcommand. Note that <command>cntr</command> also
|
||||
needs to be executed on the machine that is doing the build, which might
|
||||
be not the case when remote builders are enabled.
|
||||
<command>cntr</command> is only supported on linux based platforms.
|
||||
not be the case when remote builders are enabled.
|
||||
<command>cntr</command> is only supported on Linux-based platforms. To
|
||||
use it first add <literal>cntr</literal> to your
|
||||
<literal>environment.systemPackages</literal> on NixOS or alternatively to
|
||||
the root user on non-NixOS systems. Then in the package that is supposed
|
||||
to be inspected, add <literal>breakpointHook</literal> to
|
||||
<literal>nativeBuildInputs</literal>.
|
||||
<programlisting>
|
||||
nativeBuildInputs = [ breakpointHook ];
|
||||
</programlisting>
|
||||
When a build failure happens there will be an instruction printed that
|
||||
shows how to attach with <literal>cntr</literal> to the build sandbox.
|
||||
</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
</variablelist>
|
||||
<varlistentry>
|
||||
<term>
|
||||
libiconv, libintl
|
||||
</term>
|
||||
<listitem>
|
||||
<para>
|
||||
A few libraries automatically add to
|
||||
<literal>NIX_LDFLAGS</literal> their library, making their
|
||||
symbols automatically available to the linker. This includes
|
||||
libiconv and libintl (gettext). This is done to provide
|
||||
compatibility between GNU Linux, where libiconv and libintl
|
||||
are bundled in, and other systems where that might not be the
|
||||
case. Sometimes, this behavior is not desired. To disable
|
||||
this behavior, set <literal>dontAddExtraLibs</literal>.
|
||||
</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
<varlistentry>
|
||||
<term>
|
||||
cmake
|
||||
</term>
|
||||
<listitem>
|
||||
<para>
|
||||
Overrides the default configure phase to run the CMake command. By
|
||||
default, we use the Make generator of CMake. In
|
||||
addition, dependencies are added automatically to CMAKE_PREFIX_PATH so
|
||||
that packages are correctly detected by CMake. Some additional flags
|
||||
are passed in to give similar behavior to configure-based packages. You
|
||||
can disable this hook’s behavior by setting configurePhase to a custom
|
||||
value, or by setting dontUseCmakeConfigure. cmakeFlags controls flags
|
||||
passed only to CMake. By default, parallel building is enabled as CMake
|
||||
supports parallel building almost everywhere. When Ninja is also in
|
||||
use, CMake will detect that and use the ninja generator.
|
||||
</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
<varlistentry>
|
||||
<term>
|
||||
xcbuildHook
|
||||
</term>
|
||||
<listitem>
|
||||
<para>
|
||||
Overrides the build and install phases to run the “xcbuild” command.
|
||||
This hook is needed when a project only comes with build files for the
|
||||
XCode build system. You can disable this behavior by setting buildPhase
|
||||
and configurePhase to a custom value. xcbuildFlags controls flags
|
||||
passed only to xcbuild.
|
||||
</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
<varlistentry>
|
||||
<term>
|
||||
meson
|
||||
</term>
|
||||
<listitem>
|
||||
<para>
|
||||
Overrides the configure phase to run meson to generate Ninja files. You
|
||||
can disable this behavior by setting configurePhase to a custom value,
|
||||
or by setting dontUseMesonConfigure. To run these files, you should
|
||||
accompany meson with ninja. mesonFlags controls only the flags passed
|
||||
to meson. By default, parallel building is enabled as Meson supports
|
||||
parallel building almost everywhere.
|
||||
</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
<varlistentry>
|
||||
<term>
|
||||
ninja
|
||||
</term>
|
||||
<listitem>
|
||||
<para>
|
||||
Overrides the build, install, and check phase to run ninja instead of
|
||||
make. You can disable this behavior with the dontUseNinjaBuild,
|
||||
dontUseNinjaInstall, and dontUseNinjaCheck, respectively. Parallel
|
||||
building is enabled by default in Ninja.
|
||||
</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
<varlistentry>
|
||||
<term>
|
||||
unzip
|
||||
</term>
|
||||
<listitem>
|
||||
<para>
|
||||
This setup hook will allow you to unzip .zip files specified in $src.
|
||||
There are many similar packages like unrar, undmg, etc.
|
||||
</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
<varlistentry>
|
||||
<term>
|
||||
wafHook
|
||||
</term>
|
||||
<listitem>
|
||||
<para>
|
||||
Overrides the configure, build, and install phases. This will run the
|
||||
"waf" script used by many projects. If waf doesn’t exist, it will copy
|
||||
the version of waf available in Nixpkgs wafFlags can be used to pass
|
||||
flags to the waf script.
|
||||
</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
<varlistentry>
|
||||
<term>
|
||||
scons
|
||||
</term>
|
||||
<listitem>
|
||||
<para>
|
||||
Overrides the build, install, and check phases. This uses the scons
|
||||
build system as a replacement for make. scons does not provide a
|
||||
configure phase, so everything is managed at build and install time.
|
||||
</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
</variablelist>
|
||||
</para>
|
||||
</section>
|
||||
<section xml:id="sec-purity-in-nixpkgs">
|
||||
|
@ -9,6 +9,7 @@
|
||||
body
|
||||
{
|
||||
font-family: "Nimbus Sans L", sans-serif;
|
||||
font-size: 1em;
|
||||
background: white;
|
||||
margin: 2em 1em 2em 1em;
|
||||
}
|
||||
@ -28,6 +29,25 @@ h2 /* chapters, appendices, subtitle */
|
||||
font-size: 180%;
|
||||
}
|
||||
|
||||
div.book
|
||||
{
|
||||
text-align: center;
|
||||
}
|
||||
|
||||
div.book > div
|
||||
{
|
||||
/*
|
||||
* based on https://medium.com/@zkareemz/golden-ratio-62b3b6d4282a
|
||||
* we do 70 characters per line to fit code listings better
|
||||
* 70 * (font-size / 1.618)
|
||||
* expression for emacs:
|
||||
* (* 70 (/ 1 1.618))
|
||||
*/
|
||||
max-width: 43.2em;
|
||||
text-align: left;
|
||||
margin: auto;
|
||||
}
|
||||
|
||||
/* Extra space between chapters, appendices. */
|
||||
div.chapter > div.titlepage h2, div.appendix > div.titlepage h2
|
||||
{
|
||||
@ -102,8 +122,8 @@ pre.screen, pre.programlisting
|
||||
{
|
||||
border: 1px solid #b0b0b0;
|
||||
padding: 3px 3px;
|
||||
margin-left: 1.5em;
|
||||
margin-right: 1.5em;
|
||||
margin-left: 0.5em;
|
||||
margin-right: 0.5em;
|
||||
|
||||
background: #f4f4f8;
|
||||
font-family: monospace;
|
||||
|
@ -94,6 +94,15 @@ rec {
|
||||
attrValues = builtins.attrValues or (attrs: attrVals (attrNames attrs) attrs);
|
||||
|
||||
|
||||
/* Given a set of attribute names, return the set of the corresponding
|
||||
attributes from the given set.
|
||||
|
||||
Example:
|
||||
getAttrs [ "a" "b" ] { a = 1; b = 2; c = 3; }
|
||||
=> { a = 1; b = 2; }
|
||||
*/
|
||||
getAttrs = names: attrs: genAttrs names (name: attrs.${name});
|
||||
|
||||
/* Collect each attribute named `attr' from a list of attribute
|
||||
sets. Sets that don't contain the named attribute are ignored.
|
||||
|
||||
|
@ -1,113 +0,0 @@
|
||||
{lib, pkgs}:
|
||||
let inherit (lib) nvs; in
|
||||
{
|
||||
|
||||
# composableDerivation basically mixes these features:
|
||||
# - fix function
|
||||
# - mergeAttrBy
|
||||
# - provides shortcuts for "options" such as "--enable-foo" and adding
|
||||
# buildInputs, see php example
|
||||
#
|
||||
# It predates styles which are common today, such as
|
||||
# * the config attr
|
||||
# * mkDerivation.override feature
|
||||
# * overrideDerivation (lib/customization.nix)
|
||||
#
|
||||
# Some of the most more important usage examples (which could be rewritten if it was important):
|
||||
# * php
|
||||
# * postgis
|
||||
# * vim_configurable
|
||||
#
|
||||
# A minimal example illustrating most features would look like this:
|
||||
# let base = composableDerivation { (fixed: let inherit (fixed.fixed) name in {
|
||||
# src = fetchurl {
|
||||
# }
|
||||
# buildInputs = [A];
|
||||
# preConfigre = "echo ${name}";
|
||||
# # attention, "name" attr is missing, thus you cannot instantiate "base".
|
||||
# }
|
||||
# in {
|
||||
# # These all add name attribute, thus you can instantiate those:
|
||||
# v1 = base.merge ({ name = "foo-add-B"; buildInputs = [B]; }); // B gets merged into buildInputs
|
||||
# v2 = base.merge ({ name = "mix-in-pre-configure-lines" preConfigre = ""; });
|
||||
# v3 = base.replace ({ name = "foo-no-A-only-B;" buildInputs = [B]; });
|
||||
# }
|
||||
#
|
||||
# So yes, you can think about it being something like nixos modules, and
|
||||
# you'd be merging "features" in one at a time using .merge or .replace
|
||||
# Thanks Shea for telling me that I rethink the documentation ..
|
||||
#
|
||||
# issues:
|
||||
# * its complicated to understand
|
||||
# * some "features" such as exact merge behaviour are buried in mergeAttrBy
|
||||
# and defaultOverridableDelayableArgs assuming the default behaviour does
|
||||
# the right thing in the common case
|
||||
# * Eelco once said using such fix style functions are slow to evaluate
|
||||
# * Too quick & dirty. Hard to understand for others. The benefit was that
|
||||
# you were able to create a kernel builder like base derivation and replace
|
||||
# / add patches the way you want without having to declare function arguments
|
||||
#
|
||||
# nice features:
|
||||
# declaring "optional features" is modular. For instance:
|
||||
# flags.curl = {
|
||||
# configureFlags = ["--with-curl=${curl.dev}" "--with-curlwrappers"];
|
||||
# buildInputs = [curl openssl];
|
||||
# };
|
||||
# flags.other = { .. }
|
||||
# (Example taken from PHP)
|
||||
#
|
||||
# alternative styles / related features:
|
||||
# * Eg see function supporting building the kernel
|
||||
# * versionedDerivation (discussion about this is still going on - or ended)
|
||||
# * composedArgsAndFun
|
||||
# * mkDerivation.override
|
||||
# * overrideDerivation
|
||||
# * using { .., *Support ? false }: like configurable options.
|
||||
# To find those examples use grep
|
||||
#
|
||||
# To sum up: It exists for historical reasons - and for most commonly used
|
||||
# tasks the alternatives should be used
|
||||
#
|
||||
# If you have questions about this code ping Marc Weber.
|
||||
composableDerivation = {
|
||||
mkDerivation ? pkgs.stdenv.mkDerivation,
|
||||
|
||||
# list of functions to be applied before defaultOverridableDelayableArgs removes removeAttrs names
|
||||
# prepareDerivationArgs handles derivation configurations
|
||||
applyPreTidy ? [ lib.prepareDerivationArgs ],
|
||||
|
||||
# consider adding addtional elements by derivation.merge { removeAttrs = ["elem"]; };
|
||||
removeAttrs ? ["cfg" "flags"]
|
||||
|
||||
}: (lib.defaultOverridableDelayableArgs ( a: mkDerivation a)
|
||||
{
|
||||
inherit applyPreTidy removeAttrs;
|
||||
}).merge;
|
||||
|
||||
# some utility functions
|
||||
# use this function to generate flag attrs for prepareDerivationArgs
|
||||
# E nable D isable F eature
|
||||
edf = {name, feat ? name, enable ? {}, disable ? {} , value ? ""}:
|
||||
nvs name {
|
||||
set = {
|
||||
configureFlags = ["--enable-${feat}${if value == "" then "" else "="}${value}"];
|
||||
} // enable;
|
||||
unset = {
|
||||
configureFlags = ["--disable-${feat}"];
|
||||
} // disable;
|
||||
};
|
||||
|
||||
# same for --with and --without-
|
||||
# W ith or W ithout F eature
|
||||
wwf = {name, feat ? name, enable ? {}, disable ? {}, value ? ""}:
|
||||
nvs name {
|
||||
set = enable // {
|
||||
configureFlags = ["--with-${feat}${if value == "" then "" else "="}${value}"]
|
||||
++ lib.maybeAttr "configureFlags" [] enable;
|
||||
};
|
||||
unset = disable // {
|
||||
configureFlags = ["--without-${feat}"]
|
||||
++ lib.maybeAttr "configureFlags" [] disable;
|
||||
};
|
||||
};
|
||||
}
|
@ -61,10 +61,10 @@ let
|
||||
boolToString mergeAttrs flip mapNullable inNixShell min max
|
||||
importJSON warn info nixpkgsVersion version mod compare
|
||||
splitByAndCompare functionArgs setFunctionArgs isFunction;
|
||||
inherit (fixedPoints) fix fix' extends composeExtensions
|
||||
inherit (fixedPoints) fix fix' converge extends composeExtensions
|
||||
makeExtensible makeExtensibleWithCustomName;
|
||||
inherit (attrsets) attrByPath hasAttrByPath setAttrByPath
|
||||
getAttrFromPath attrVals attrValues catAttrs filterAttrs
|
||||
getAttrFromPath attrVals attrValues getAttrs catAttrs filterAttrs
|
||||
filterAttrsRecursive foldAttrs collect nameValuePair mapAttrs
|
||||
mapAttrs' mapAttrsToList mapAttrsRecursive mapAttrsRecursiveCond
|
||||
genAttrs isDerivation toDerivation optionalAttrs
|
||||
@ -80,7 +80,7 @@ let
|
||||
inherit (strings) concatStrings concatMapStrings concatImapStrings
|
||||
intersperse concatStringsSep concatMapStringsSep
|
||||
concatImapStringsSep makeSearchPath makeSearchPathOutput
|
||||
makeLibraryPath makeBinPath makePerlPath makeFullPerlPath optionalString
|
||||
makeLibraryPath makeBinPath optionalString
|
||||
hasPrefix hasSuffix stringToCharacters stringAsChars escape
|
||||
escapeShellArg escapeShellArgs replaceChars lowerChars
|
||||
upperChars toLower toUpper addContextFrom splitString
|
||||
@ -94,7 +94,7 @@ let
|
||||
callPackageWith callPackagesWith extendDerivation hydraJob
|
||||
makeScope;
|
||||
inherit (meta) addMetaAttrs dontDistribute setName updateName
|
||||
appendToName mapDerivationAttrset lowPrio lowPrioSet hiPrio
|
||||
appendToName mapDerivationAttrset setPrio lowPrio lowPrioSet hiPrio
|
||||
hiPrioSet;
|
||||
inherit (sources) pathType pathIsDirectory cleanSourceFilter
|
||||
cleanSource sourceByRegex sourceFilesBySuffices
|
||||
@ -109,7 +109,7 @@ let
|
||||
mkFixStrictness mkOrder mkBefore mkAfter mkAliasDefinitions
|
||||
mkAliasAndWrapDefinitions fixMergeModules mkRemovedOptionModule
|
||||
mkRenamedOptionModule mkMergedOptionModule mkChangedOptionModule
|
||||
mkAliasOptionModule doRename filterModules;
|
||||
mkAliasOptionModule mkAliasOptionModuleWithPriority doRename filterModules;
|
||||
inherit (options) isOption mkEnableOption mkSinkUndeclaredOptions
|
||||
mergeDefaultOption mergeOneOption mergeEqualOption getValues
|
||||
getFiles optionAttrSetToDocList optionAttrSetToDocList'
|
||||
@ -125,14 +125,14 @@ let
|
||||
traceShowValMarked showVal traceCall traceCall2 traceCall3
|
||||
traceValIfNot runTests testAllTrue traceCallXml attrNamesToStr;
|
||||
inherit (misc) maybeEnv defaultMergeArg defaultMerge foldArgs
|
||||
defaultOverridableDelayableArgs composedArgsAndFun
|
||||
maybeAttrNullable maybeAttr ifEnable checkFlag getValue
|
||||
checkReqs uniqList uniqListExt condConcat lazyGenericClosure
|
||||
innerModifySumArgs modifySumArgs innerClosePropagation
|
||||
closePropagation mapAttrsFlatten nvs setAttr setAttrMerge
|
||||
mergeAttrsWithFunc mergeAttrsConcatenateValues
|
||||
mergeAttrsNoOverride mergeAttrByFunc mergeAttrsByFuncDefaults
|
||||
mergeAttrsByFuncDefaultsClean mergeAttrBy prepareDerivationArgs
|
||||
nixType imap overridableDelayableArgs;
|
||||
mergeAttrsByFuncDefaultsClean mergeAttrBy
|
||||
fakeSha256 fakeSha512
|
||||
nixType imap;
|
||||
});
|
||||
in lib
|
||||
|
@ -35,74 +35,6 @@ rec {
|
||||
withStdOverrides;
|
||||
|
||||
|
||||
# predecessors: proposed replacement for applyAndFun (which has a bug cause it merges twice)
|
||||
# the naming "overridableDelayableArgs" tries to express that you can
|
||||
# - override attr values which have been supplied earlier
|
||||
# - use attr values before they have been supplied by accessing the fix point
|
||||
# name "fixed"
|
||||
# f: the (delayed overridden) arguments are applied to this
|
||||
#
|
||||
# initial: initial attrs arguments and settings. see defaultOverridableDelayableArgs
|
||||
#
|
||||
# returns: f applied to the arguments // special attributes attrs
|
||||
# a) merge: merge applied args with new args. Wether an argument is overridden depends on the merge settings
|
||||
# b) replace: this let's you replace and remove names no matter which merge function has been set
|
||||
#
|
||||
# examples: see test cases "res" below;
|
||||
overridableDelayableArgs =
|
||||
f: # the function applied to the arguments
|
||||
initial: # you pass attrs, the functions below are passing a function taking the fix argument
|
||||
let
|
||||
takeFixed = if lib.isFunction initial then initial else (fixed : initial); # transform initial to an expression always taking the fixed argument
|
||||
tidy = args:
|
||||
let # apply all functions given in "applyPreTidy" in sequence
|
||||
applyPreTidyFun = fold ( n: a: x: n ( a x ) ) lib.id (maybeAttr "applyPreTidy" [] args);
|
||||
in removeAttrs (applyPreTidyFun args) ( ["applyPreTidy"] ++ (maybeAttr "removeAttrs" [] args) ); # tidy up args before applying them
|
||||
fun = n: x:
|
||||
let newArgs = fixed:
|
||||
let args = takeFixed fixed;
|
||||
mergeFun = args.${n};
|
||||
in if isAttrs x then (mergeFun args x)
|
||||
else assert lib.isFunction x;
|
||||
mergeFun args (x ( args // { inherit fixed; }));
|
||||
in overridableDelayableArgs f newArgs;
|
||||
in
|
||||
(f (tidy (lib.fix takeFixed))) // {
|
||||
merge = fun "mergeFun";
|
||||
replace = fun "keepFun";
|
||||
};
|
||||
defaultOverridableDelayableArgs = f:
|
||||
let defaults = {
|
||||
mergeFun = mergeAttrByFunc; # default merge function. merge strategie (concatenate lists, strings) is given by mergeAttrBy
|
||||
keepFun = a: b: { inherit (a) removeAttrs mergeFun keepFun mergeAttrBy; } // b; # even when using replace preserve these values
|
||||
applyPreTidy = []; # list of functions applied to args before args are tidied up (usage case : prepareDerivationArgs)
|
||||
mergeAttrBy = mergeAttrBy // {
|
||||
applyPreTidy = a: b: a ++ b;
|
||||
removeAttrs = a: b: a ++ b;
|
||||
};
|
||||
removeAttrs = ["mergeFun" "keepFun" "mergeAttrBy" "removeAttrs" "fixed" ]; # before applying the arguments to the function make sure these names are gone
|
||||
};
|
||||
in (overridableDelayableArgs f defaults).merge;
|
||||
|
||||
|
||||
|
||||
# rec { # an example of how composedArgsAndFun can be used
|
||||
# a = composedArgsAndFun (x: x) { a = ["2"]; meta = { d = "bar";}; };
|
||||
# # meta.d will be lost ! It's your task to preserve it (eg using a merge function)
|
||||
# b = a.passthru.function { a = [ "3" ]; meta = { d2 = "bar2";}; };
|
||||
# # instead of passing/ overriding values you can use a merge function:
|
||||
# c = b.passthru.function ( x: { a = x.a ++ ["4"]; }); # consider using (maybeAttr "a" [] x)
|
||||
# }
|
||||
# result:
|
||||
# {
|
||||
# a = { a = ["2"]; meta = { d = "bar"; }; passthru = { function = .. }; };
|
||||
# b = { a = ["3"]; meta = { d2 = "bar2"; }; passthru = { function = .. }; };
|
||||
# c = { a = ["3" "4"]; meta = { d2 = "bar2"; }; passthru = { function = .. }; };
|
||||
# # c2 is equal to c
|
||||
# }
|
||||
composedArgsAndFun = f: foldArgs defaultMerge f {};
|
||||
|
||||
|
||||
# shortcut for attrByPath ["name"] default attrs
|
||||
maybeAttrNullable = maybeAttr;
|
||||
|
||||
@ -285,7 +217,7 @@ rec {
|
||||
# };
|
||||
# will result in
|
||||
# { mergeAttrsBy = [...]; buildInputs = [ a b c d ]; }
|
||||
# is used by prepareDerivationArgs, defaultOverridableDelayableArgs and can be used when composing using
|
||||
# is used by defaultOverridableDelayableArgs and can be used when composing using
|
||||
# foldArgs, composedArgsAndFun or applyAndFun. Example: composableDerivation in all-packages.nix
|
||||
mergeAttrByFunc = x: y:
|
||||
let
|
||||
@ -318,58 +250,6 @@ rec {
|
||||
// listToAttrs (map (n: nameValuePair n (a: b: "${a}\n${b}") ) [ "preConfigure" "postInstall" ])
|
||||
;
|
||||
|
||||
# prepareDerivationArgs tries to make writing configurable derivations easier
|
||||
# example:
|
||||
# prepareDerivationArgs {
|
||||
# mergeAttrBy = {
|
||||
# myScript = x: y: x ++ "\n" ++ y;
|
||||
# };
|
||||
# cfg = {
|
||||
# readlineSupport = true;
|
||||
# };
|
||||
# flags = {
|
||||
# readline = {
|
||||
# set = {
|
||||
# configureFlags = [ "--with-compiler=${compiler}" ];
|
||||
# buildInputs = [ compiler ];
|
||||
# pass = { inherit compiler; READLINE=1; };
|
||||
# assertion = compiler.dllSupport;
|
||||
# myScript = "foo";
|
||||
# };
|
||||
# unset = { configureFlags = ["--without-compiler"]; };
|
||||
# };
|
||||
# };
|
||||
# src = ...
|
||||
# buildPhase = '' ... '';
|
||||
# name = ...
|
||||
# myScript = "bar";
|
||||
# };
|
||||
# if you don't have need for unset you can omit the surrounding set = { .. } attr
|
||||
# all attrs except flags cfg and mergeAttrBy will be merged with the
|
||||
# additional data from flags depending on config settings
|
||||
# It's used in composableDerivation in all-packages.nix. It's also used
|
||||
# heavily in the new python and libs implementation
|
||||
#
|
||||
# should we check for misspelled cfg options?
|
||||
# TODO use args.mergeFun here as well?
|
||||
prepareDerivationArgs = args:
|
||||
let args2 = { cfg = {}; flags = {}; } // args;
|
||||
flagName = name: "${name}Support";
|
||||
cfgWithDefaults = (listToAttrs (map (n: nameValuePair (flagName n) false) (attrNames args2.flags)))
|
||||
// args2.cfg;
|
||||
opts = attrValues (mapAttrs (a: v:
|
||||
let v2 = if v ? set || v ? unset then v else { set = v; };
|
||||
n = if cfgWithDefaults.${flagName a} then "set" else "unset";
|
||||
attr = maybeAttr n {} v2; in
|
||||
if (maybeAttr "assertion" true attr)
|
||||
then attr
|
||||
else throw "assertion of flag ${a} of derivation ${args.name} failed"
|
||||
) args2.flags );
|
||||
in removeAttrs
|
||||
(mergeAttrsByFuncDefaults ([args] ++ opts ++ [{ passthru = cfgWithDefaults; }]))
|
||||
["flags" "cfg" "mergeAttrBy" ];
|
||||
|
||||
|
||||
nixType = x:
|
||||
if isAttrs x then
|
||||
if x ? outPath then "derivation"
|
||||
@ -390,4 +270,8 @@ rec {
|
||||
starting at zero.
|
||||
*/
|
||||
imap = imap1;
|
||||
|
||||
# Fake hashes. Can be used as hash placeholders, when computing hash ahead isn't trivial
|
||||
fakeSha256 = "0000000000000000000000000000000000000000000000000000000000000000";
|
||||
fakeSha512 = "00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000";
|
||||
}
|
||||
|
@ -24,6 +24,16 @@ rec {
|
||||
# for a concrete example.
|
||||
fix' = f: let x = f x // { __unfix__ = f; }; in x;
|
||||
|
||||
# Return the fixpoint that `f` converges to when called recursively, starting
|
||||
# with the input `x`.
|
||||
#
|
||||
# nix-repl> converge (x: x / 2) 16
|
||||
# 0
|
||||
converge = f: x:
|
||||
if (f x) == x
|
||||
then x
|
||||
else converge f (f x);
|
||||
|
||||
# Modify the contents of an explicitly recursive attribute set in a way that
|
||||
# honors `self`-references. This is accomplished with a function
|
||||
#
|
||||
@ -41,6 +51,18 @@ rec {
|
||||
# think of it as an infix operator `g extends f` that mimics the syntax from
|
||||
# Java. It may seem counter-intuitive to have the "base class" as the second
|
||||
# argument, but it's nice this way if several uses of `extends` are cascaded.
|
||||
#
|
||||
# To get a better understanding how `extends` turns a function with a fix
|
||||
# point (the package set we start with) into a new function with a different fix
|
||||
# point (the desired packages set) lets just see, how `extends g f`
|
||||
# unfolds with `g` and `f` defined above:
|
||||
#
|
||||
# extends g f = self: let super = f self; in super // g self super;
|
||||
# = self: let super = { foo = "foo"; bar = "bar"; foobar = self.foo + self.bar; }; in super // g self super
|
||||
# = self: { foo = "foo"; bar = "bar"; foobar = self.foo + self.bar; } // g self { foo = "foo"; bar = "bar"; foobar = self.foo + self.bar; }
|
||||
# = self: { foo = "foo"; bar = "bar"; foobar = self.foo + self.bar; } // { foo = "foo" + " + "; }
|
||||
# = self: { foo = "foo + "; bar = "bar"; foobar = self.foo + self.bar; }
|
||||
#
|
||||
extends = f: rattrs: self: let super = rattrs self; in super // f self super;
|
||||
|
||||
# Compose two extending functions of the type expected by 'extends'
|
||||
|
@ -13,6 +13,11 @@ lib.mapAttrs (n: v: v // { shortName = n; }) rec {
|
||||
* add it to this list. The URL mentioned above is a good source for inspiration.
|
||||
*/
|
||||
|
||||
abstyles = spdx {
|
||||
spdxId = "Abstyles";
|
||||
fullName = "Abstyles License";
|
||||
};
|
||||
|
||||
afl21 = spdx {
|
||||
spdxId = "AFL-2.1";
|
||||
fullName = "Academic Free License v2.1";
|
||||
@ -24,13 +29,13 @@ lib.mapAttrs (n: v: v // { shortName = n; }) rec {
|
||||
};
|
||||
|
||||
agpl3 = spdx {
|
||||
spdxId = "AGPL-3.0";
|
||||
fullName = "GNU Affero General Public License v3.0";
|
||||
spdxId = "AGPL-3.0-only";
|
||||
fullName = "GNU Affero General Public License v3.0 only";
|
||||
};
|
||||
|
||||
agpl3Plus = {
|
||||
agpl3Plus = spdx {
|
||||
spdxId = "AGPL-3.0-or-later";
|
||||
fullName = "GNU Affero General Public License v3.0 or later";
|
||||
inherit (agpl3) url;
|
||||
};
|
||||
|
||||
amazonsl = {
|
||||
@ -42,6 +47,7 @@ lib.mapAttrs (n: v: v // { shortName = n; }) rec {
|
||||
amd = {
|
||||
fullName = "AMD License Agreement";
|
||||
url = http://developer.amd.com/amd-license-agreement/;
|
||||
free = false;
|
||||
};
|
||||
|
||||
apsl20 = spdx {
|
||||
@ -99,14 +105,10 @@ lib.mapAttrs (n: v: v // { shortName = n; }) rec {
|
||||
fullName = ''BSD 4-clause "Original" or "Old" License'';
|
||||
};
|
||||
|
||||
bsl10 = {
|
||||
fullName = "Business Source License 1.0";
|
||||
url = https://mariadb.com/bsl10;
|
||||
};
|
||||
|
||||
bsl11 = {
|
||||
fullName = "Business Source License 1.1";
|
||||
url = https://mariadb.com/bsl11;
|
||||
free = false;
|
||||
};
|
||||
|
||||
clArtistic = spdx {
|
||||
@ -264,13 +266,23 @@ lib.mapAttrs (n: v: v // { shortName = n; }) rec {
|
||||
};
|
||||
|
||||
fdl12 = spdx {
|
||||
spdxId = "GFDL-1.2";
|
||||
fullName = "GNU Free Documentation License v1.2";
|
||||
spdxId = "GFDL-1.2-only";
|
||||
fullName = "GNU Free Documentation License v1.2 only";
|
||||
};
|
||||
|
||||
fdl12Plus = spdx {
|
||||
spdxId = "GFDL-1.2-or-later";
|
||||
fullName = "GNU Free Documentation License v1.2 or later";
|
||||
};
|
||||
|
||||
fdl13 = spdx {
|
||||
spdxId = "GFDL-1.3";
|
||||
fullName = "GNU Free Documentation License v1.3";
|
||||
spdxId = "GFDL-1.3-only";
|
||||
fullName = "GNU Free Documentation License v1.3 only";
|
||||
};
|
||||
|
||||
fdl13Plus = spdx {
|
||||
spdxId = "GFDL-1.3-or-later";
|
||||
fullName = "GNU Free Documentation License v1.3 or later";
|
||||
};
|
||||
|
||||
ffsl = {
|
||||
@ -295,24 +307,23 @@ lib.mapAttrs (n: v: v // { shortName = n; }) rec {
|
||||
};
|
||||
|
||||
gpl1 = spdx {
|
||||
spdxId = "GPL-1.0";
|
||||
spdxId = "GPL-1.0-only";
|
||||
fullName = "GNU General Public License v1.0 only";
|
||||
};
|
||||
|
||||
gpl1Plus = spdx {
|
||||
spdxId = "GPL-1.0+";
|
||||
spdxId = "GPL-1.0-or-later";
|
||||
fullName = "GNU General Public License v1.0 or later";
|
||||
};
|
||||
|
||||
gpl2 = spdx {
|
||||
spdxId = "GPL-2.0";
|
||||
spdxId = "GPL-2.0-only";
|
||||
fullName = "GNU General Public License v2.0 only";
|
||||
};
|
||||
|
||||
gpl2Classpath = {
|
||||
gpl2Classpath = spdx {
|
||||
spdxId = "GPL-2.0-with-classpath-exception";
|
||||
fullName = "GNU General Public License v2.0 only (with Classpath exception)";
|
||||
url = https://fedoraproject.org/wiki/Licensing/GPL_Classpath_Exception;
|
||||
};
|
||||
|
||||
gpl2ClasspathPlus = {
|
||||
@ -326,17 +337,17 @@ lib.mapAttrs (n: v: v // { shortName = n; }) rec {
|
||||
};
|
||||
|
||||
gpl2Plus = spdx {
|
||||
spdxId = "GPL-2.0+";
|
||||
spdxId = "GPL-2.0-or-later";
|
||||
fullName = "GNU General Public License v2.0 or later";
|
||||
};
|
||||
|
||||
gpl3 = spdx {
|
||||
spdxId = "GPL-3.0";
|
||||
spdxId = "GPL-3.0-only";
|
||||
fullName = "GNU General Public License v3.0 only";
|
||||
};
|
||||
|
||||
gpl3Plus = spdx {
|
||||
spdxId = "GPL-3.0+";
|
||||
spdxId = "GPL-3.0-or-later";
|
||||
fullName = "GNU General Public License v3.0 or later";
|
||||
};
|
||||
|
||||
@ -406,32 +417,32 @@ lib.mapAttrs (n: v: v // { shortName = n; }) rec {
|
||||
};
|
||||
|
||||
lgpl2 = spdx {
|
||||
spdxId = "LGPL-2.0";
|
||||
spdxId = "LGPL-2.0-only";
|
||||
fullName = "GNU Library General Public License v2 only";
|
||||
};
|
||||
|
||||
lgpl2Plus = spdx {
|
||||
spdxId = "LGPL-2.0+";
|
||||
spdxId = "LGPL-2.0-or-later";
|
||||
fullName = "GNU Library General Public License v2 or later";
|
||||
};
|
||||
|
||||
lgpl21 = spdx {
|
||||
spdxId = "LGPL-2.1";
|
||||
spdxId = "LGPL-2.1-only";
|
||||
fullName = "GNU Library General Public License v2.1 only";
|
||||
};
|
||||
|
||||
lgpl21Plus = spdx {
|
||||
spdxId = "LGPL-2.1+";
|
||||
spdxId = "LGPL-2.1-or-later";
|
||||
fullName = "GNU Library General Public License v2.1 or later";
|
||||
};
|
||||
|
||||
lgpl3 = spdx {
|
||||
spdxId = "LGPL-3.0";
|
||||
spdxId = "LGPL-3.0-only";
|
||||
fullName = "GNU Lesser General Public License v3.0 only";
|
||||
};
|
||||
|
||||
lgpl3Plus = spdx {
|
||||
spdxId = "LGPL-3.0+";
|
||||
spdxId = "LGPL-3.0-or-later";
|
||||
fullName = "GNU Lesser General Public License v3.0 or later";
|
||||
};
|
||||
|
||||
@ -440,6 +451,11 @@ lib.mapAttrs (n: v: v // { shortName = n; }) rec {
|
||||
fullName = "libpng License";
|
||||
};
|
||||
|
||||
libpng2 = {
|
||||
fullName = "libpng License v2"; # 1.6.36+
|
||||
url = "http://www.libpng.org/pub/png/src/libpng-LICENSE.txt";
|
||||
};
|
||||
|
||||
libtiff = spdx {
|
||||
spdxId = "libtiff";
|
||||
fullName = "libtiff License";
|
||||
@ -503,6 +519,12 @@ lib.mapAttrs (n: v: v // { shortName = n; }) rec {
|
||||
free = false;
|
||||
};
|
||||
|
||||
nasa13 = spdx {
|
||||
spdxId = "NASA-1.3";
|
||||
fullName = "NASA Open Source Agreement 1.3";
|
||||
free = false;
|
||||
};
|
||||
|
||||
ncsa = spdx {
|
||||
spdxId = "NCSA";
|
||||
fullName = "University of Illinois/NCSA Open Source License";
|
||||
@ -689,7 +711,7 @@ lib.mapAttrs (n: v: v // { shortName = n; }) rec {
|
||||
};
|
||||
|
||||
wxWindows = spdx {
|
||||
spdxId = "WXwindows";
|
||||
spdxId = "wxWindows";
|
||||
fullName = "wxWindows Library Licence, Version 3.1";
|
||||
};
|
||||
|
||||
|
11
lib/meta.nix
11
lib/meta.nix
@ -41,16 +41,18 @@ rec {
|
||||
let x = builtins.parseDrvName name; in "${x.name}-${suffix}-${x.version}");
|
||||
|
||||
|
||||
/* Apply a function to each derivation and only to derivations in an attrset
|
||||
/* Apply a function to each derivation and only to derivations in an attrset.
|
||||
*/
|
||||
mapDerivationAttrset = f: set: lib.mapAttrs (name: pkg: if lib.isDerivation pkg then (f pkg) else pkg) set;
|
||||
|
||||
/* Set the nix-env priority of the package.
|
||||
*/
|
||||
setPrio = priority: addMetaAttrs { inherit priority; };
|
||||
|
||||
/* Decrease the nix-env priority of the package, i.e., other
|
||||
versions/variants of the package will be preferred.
|
||||
*/
|
||||
lowPrio = drv: addMetaAttrs { priority = 10; } drv;
|
||||
|
||||
lowPrio = setPrio 10;
|
||||
|
||||
/* Apply lowPrio to an attrset with derivations
|
||||
*/
|
||||
@ -60,8 +62,7 @@ rec {
|
||||
/* Increase the nix-env priority of the package, i.e., this
|
||||
version/variant of the package will be preferred.
|
||||
*/
|
||||
hiPrio = drv: addMetaAttrs { priority = -10; } drv;
|
||||
|
||||
hiPrio = setPrio (-10);
|
||||
|
||||
/* Apply hiPrio to an attrset with derivations
|
||||
*/
|
||||
|
@ -450,8 +450,7 @@ rec {
|
||||
|
||||
filterOverrides' = defs:
|
||||
let
|
||||
defaultPrio = 100;
|
||||
getPrio = def: if def.value._type or "" == "override" then def.value.priority else defaultPrio;
|
||||
getPrio = def: if def.value._type or "" == "override" then def.value.priority else defaultPriority;
|
||||
highestPrio = foldl' (prio: def: min (getPrio def) prio) 9999 defs;
|
||||
strip = def: if def.value._type or "" == "override" then def // { value = def.value.content; } else def;
|
||||
in {
|
||||
@ -534,6 +533,8 @@ rec {
|
||||
mkBefore = mkOrder 500;
|
||||
mkAfter = mkOrder 1500;
|
||||
|
||||
# The default priority for things that don't have a priority specified.
|
||||
defaultPriority = 100;
|
||||
|
||||
# Convenient property used to transfer all definitions and their
|
||||
# properties from one option to another. This property is useful for
|
||||
@ -556,8 +557,20 @@ rec {
|
||||
#
|
||||
mkAliasDefinitions = mkAliasAndWrapDefinitions id;
|
||||
mkAliasAndWrapDefinitions = wrap: option:
|
||||
mkIf (isOption option && option.isDefined) (wrap (mkMerge option.definitions));
|
||||
mkAliasIfDef option (wrap (mkMerge option.definitions));
|
||||
|
||||
# Similar to mkAliasAndWrapDefinitions but copies over the priority from the
|
||||
# option as well.
|
||||
#
|
||||
# If a priority is not set, it assumes a priority of defaultPriority.
|
||||
mkAliasAndWrapDefsWithPriority = wrap: option:
|
||||
let
|
||||
prio = option.highestPrio or defaultPriority;
|
||||
defsWithPrio = map (mkOverride prio) option.definitions;
|
||||
in mkAliasIfDef option (wrap (mkMerge defsWithPrio));
|
||||
|
||||
mkAliasIfDef = option:
|
||||
mkIf (isOption option && option.isDefined);
|
||||
|
||||
/* Compatibility. */
|
||||
fixMergeModules = modules: args: evalModules { inherit modules args; check = false; };
|
||||
@ -690,7 +703,16 @@ rec {
|
||||
use = id;
|
||||
};
|
||||
|
||||
doRename = { from, to, visible, warn, use }:
|
||||
/* Like ‘mkAliasOptionModule’, but copy over the priority of the option as well. */
|
||||
mkAliasOptionModuleWithPriority = from: to: doRename {
|
||||
inherit from to;
|
||||
visible = true;
|
||||
warn = false;
|
||||
use = id;
|
||||
withPriority = true;
|
||||
};
|
||||
|
||||
doRename = { from, to, visible, warn, use, withPriority ? false }:
|
||||
{ config, options, ... }:
|
||||
let
|
||||
fromOpt = getAttrFromPath from options;
|
||||
@ -708,7 +730,9 @@ rec {
|
||||
warnings = optional (warn && fromOpt.isDefined)
|
||||
"The option `${showOption from}' defined in ${showFiles fromOpt.files} has been renamed to `${showOption to}'.";
|
||||
}
|
||||
(mkAliasAndWrapDefinitions (setAttrByPath to) fromOpt)
|
||||
(if withPriority
|
||||
then mkAliasAndWrapDefsWithPriority (setAttrByPath to) fromOpt
|
||||
else mkAliasAndWrapDefinitions (setAttrByPath to) fromOpt)
|
||||
];
|
||||
};
|
||||
|
||||
|
@ -73,7 +73,7 @@ rec {
|
||||
# Get the commit id of a git repo
|
||||
# Example: commitIdFromGitRepo <nixpkgs/.git>
|
||||
commitIdFromGitRepo =
|
||||
let readCommitFromFile = path: file:
|
||||
let readCommitFromFile = file: path:
|
||||
with builtins;
|
||||
let fileName = toString path + "/" + file;
|
||||
packedRefsName = toString path + "/packed-refs";
|
||||
@ -85,7 +85,7 @@ rec {
|
||||
matchRef = match "^ref: (.*)$" fileContent;
|
||||
in if isNull matchRef
|
||||
then fileContent
|
||||
else readCommitFromFile path (lib.head matchRef)
|
||||
else readCommitFromFile (lib.head matchRef) path
|
||||
# Sometimes, the file isn't there at all and has been packed away in the
|
||||
# packed-refs file, so we have to grep through it:
|
||||
else if lib.pathExists packedRefsName
|
||||
@ -96,7 +96,7 @@ rec {
|
||||
then throw ("Could not find " + file + " in " + packedRefsName)
|
||||
else lib.head matchRef
|
||||
else throw ("Not a .git directory: " + path);
|
||||
in lib.flip readCommitFromFile "HEAD";
|
||||
in readCommitFromFile "HEAD";
|
||||
|
||||
pathHasContext = builtins.hasContext or (lib.hasPrefix builtins.storeDir);
|
||||
|
||||
|
@ -162,26 +162,6 @@ rec {
|
||||
*/
|
||||
makeBinPath = makeSearchPathOutput "bin" "bin";
|
||||
|
||||
|
||||
/* Construct a perl search path (such as $PERL5LIB)
|
||||
|
||||
Example:
|
||||
pkgs = import <nixpkgs> { }
|
||||
makePerlPath [ pkgs.perlPackages.libnet ]
|
||||
=> "/nix/store/n0m1fk9c960d8wlrs62sncnadygqqc6y-perl-Net-SMTP-1.25/lib/perl5/site_perl"
|
||||
*/
|
||||
# FIXME(zimbatm): this should be moved in perl-specific code
|
||||
makePerlPath = makeSearchPathOutput "lib" "lib/perl5/site_perl";
|
||||
|
||||
/* Construct a perl search path recursively including all dependencies (such as $PERL5LIB)
|
||||
|
||||
Example:
|
||||
pkgs = import <nixpkgs> { }
|
||||
makeFullPerlPath [ pkgs.perlPackages.CGI ]
|
||||
=> "/nix/store/fddivfrdc1xql02h9q500fpnqy12c74n-perl-CGI-4.38/lib/perl5/site_perl:/nix/store/8hsvdalmsxqkjg0c5ifigpf31vc4vsy2-perl-HTML-Parser-3.72/lib/perl5/site_perl:/nix/store/zhc7wh0xl8hz3y3f71nhlw1559iyvzld-perl-HTML-Tagset-3.20/lib/perl5/site_perl"
|
||||
*/
|
||||
makeFullPerlPath = deps: makePerlPath (lib.misc.closePropagation deps);
|
||||
|
||||
/* Depending on the boolean `cond', return either the given string
|
||||
or the empty string. Useful to concatenate against a bigger string.
|
||||
|
||||
@ -236,6 +216,26 @@ rec {
|
||||
in lenContent >= lenSuffix &&
|
||||
substring (lenContent - lenSuffix) lenContent content == suffix;
|
||||
|
||||
/* Determine whether a string contains the given infix
|
||||
|
||||
Type: hasInfix :: string -> string -> bool
|
||||
|
||||
Example:
|
||||
hasInfix "bc" "abcd"
|
||||
=> true
|
||||
hasInfix "ab" "abcd"
|
||||
=> true
|
||||
hasInfix "cd" "abcd"
|
||||
=> true
|
||||
hasInfix "foo" "abcd"
|
||||
=> false
|
||||
*/
|
||||
hasInfix = infix: content:
|
||||
let
|
||||
drop = x: substring 1 (stringLength x) x;
|
||||
in hasPrefix infix content
|
||||
|| content != "" && hasInfix infix (drop content);
|
||||
|
||||
/* Convert a string to a list of characters (i.e. singleton strings).
|
||||
This allows you to, e.g., map a function over each character. However,
|
||||
note that this will likely be horribly inefficient; Nix is not a
|
||||
|
@ -66,6 +66,46 @@ rec {
|
||||
# uname -r
|
||||
release = null;
|
||||
};
|
||||
|
||||
qemuArch =
|
||||
if final.isArm then "arm"
|
||||
else if final.isx86_64 then "x86_64"
|
||||
else if final.isx86 then "i386"
|
||||
else {
|
||||
"powerpc" = "ppc";
|
||||
"powerpc64" = "ppc64";
|
||||
"powerpc64le" = "ppc64";
|
||||
"mips64" = "mips";
|
||||
"mipsel64" = "mipsel";
|
||||
}.${final.parsed.cpu.name} or final.parsed.cpu.name;
|
||||
|
||||
emulator = pkgs: let
|
||||
qemu-user = pkgs.qemu.override {
|
||||
smartcardSupport = false;
|
||||
spiceSupport = false;
|
||||
openGLSupport = false;
|
||||
virglSupport = false;
|
||||
vncSupport = false;
|
||||
gtkSupport = false;
|
||||
sdlSupport = false;
|
||||
pulseSupport = false;
|
||||
smbdSupport = false;
|
||||
seccompSupport = false;
|
||||
hostCpuTargets = ["${final.qemuArch}-linux-user"];
|
||||
};
|
||||
wine-name = "wine${toString final.parsed.cpu.bits}";
|
||||
wine = (pkgs.winePackagesFor wine-name).minimal;
|
||||
in
|
||||
if final.parsed.kernel.name == pkgs.stdenv.hostPlatform.parsed.kernel.name &&
|
||||
(final.parsed.cpu.name == pkgs.stdenv.hostPlatform.parsed.cpu.name ||
|
||||
(final.isi686 && pkgs.stdenv.hostPlatform.isx86_64))
|
||||
then pkgs.runtimeShell
|
||||
else if final.isWindows
|
||||
then "${wine}/bin/${wine-name}"
|
||||
else if final.isLinux && pkgs.stdenv.hostPlatform.isLinux
|
||||
then "${qemu-user}/bin/qemu-${final.qemuArch}"
|
||||
else throw "Don't know how to run ${final.config} executables.";
|
||||
|
||||
} // mapAttrs (n: v: v final.parsed) inspect.predicates
|
||||
// args;
|
||||
in assert final.useAndroidPrebuilt -> final.isAndroid;
|
||||
|
@ -2,7 +2,14 @@
|
||||
# `crossSystem`. They are put here for user convenience, but also used by cross
|
||||
# tests and linux cross stdenv building, so handle with care!
|
||||
{ lib }:
|
||||
let platforms = import ./platforms.nix { inherit lib; }; in
|
||||
let
|
||||
platforms = import ./platforms.nix { inherit lib; };
|
||||
|
||||
riscv = bits: {
|
||||
config = "riscv${bits}-unknown-linux-gnu";
|
||||
platform = platforms.riscv-multiplatform bits;
|
||||
};
|
||||
in
|
||||
|
||||
rec {
|
||||
#
|
||||
@ -40,7 +47,7 @@ rec {
|
||||
armv5te-android-prebuilt = rec {
|
||||
config = "armv5tel-unknown-linux-androideabi";
|
||||
sdkVer = "21";
|
||||
ndkVer = "10e";
|
||||
ndkVer = "18b";
|
||||
platform = platforms.armv5te-android;
|
||||
useAndroidPrebuilt = true;
|
||||
};
|
||||
@ -48,7 +55,7 @@ rec {
|
||||
armv7a-android-prebuilt = rec {
|
||||
config = "armv7a-unknown-linux-androideabi";
|
||||
sdkVer = "24";
|
||||
ndkVer = "17c";
|
||||
ndkVer = "18b";
|
||||
platform = platforms.armv7a-android;
|
||||
useAndroidPrebuilt = true;
|
||||
};
|
||||
@ -56,7 +63,7 @@ rec {
|
||||
aarch64-android-prebuilt = rec {
|
||||
config = "aarch64-unknown-linux-android";
|
||||
sdkVer = "24";
|
||||
ndkVer = "17c";
|
||||
ndkVer = "18b";
|
||||
platform = platforms.aarch64-multiplatform;
|
||||
useAndroidPrebuilt = true;
|
||||
};
|
||||
@ -92,10 +99,6 @@ rec {
|
||||
musl64 = { config = "x86_64-unknown-linux-musl"; };
|
||||
musl32 = { config = "i686-unknown-linux-musl"; };
|
||||
|
||||
riscv = bits: {
|
||||
config = "riscv${bits}-unknown-linux-gnu";
|
||||
platform = platforms.riscv-multiplatform bits;
|
||||
};
|
||||
riscv64 = riscv "64";
|
||||
riscv32 = riscv "32";
|
||||
|
||||
@ -107,16 +110,35 @@ rec {
|
||||
config = "arm-none-eabi";
|
||||
libc = "newlib";
|
||||
};
|
||||
armhf-embedded = {
|
||||
config = "arm-none-eabihf";
|
||||
libc = "newlib";
|
||||
};
|
||||
|
||||
aarch64-embedded = {
|
||||
config = "aarch64-none-elf";
|
||||
libc = "newlib";
|
||||
};
|
||||
|
||||
aarch64be-embedded = {
|
||||
config = "aarch64_be-none-elf";
|
||||
libc = "newlib";
|
||||
};
|
||||
|
||||
ppc-embedded = {
|
||||
config = "powerpc-none-eabi";
|
||||
libc = "newlib";
|
||||
};
|
||||
|
||||
ppcle-embedded = {
|
||||
config = "powerpcle-none-eabi";
|
||||
libc = "newlib";
|
||||
};
|
||||
|
||||
alpha-embedded = {
|
||||
config = "alpha-elf";
|
||||
libc = "newlib";
|
||||
};
|
||||
|
||||
i686-embedded = {
|
||||
config = "i686-elf";
|
||||
|
@ -9,7 +9,8 @@ let abis = lib.mapAttrs (_: abi: builtins.removeAttrs abi [ "assertions" ]) abis
|
||||
rec {
|
||||
patterns = rec {
|
||||
isi686 = { cpu = cpuTypes.i686; };
|
||||
isx86_64 = { cpu = cpuTypes.x86_64; };
|
||||
isx86_32 = { cpu = { family = "x86"; bits = 32; }; };
|
||||
isx86_64 = { cpu = { family = "x86"; bits = 64; }; };
|
||||
isPowerPC = { cpu = cpuTypes.powerpc; };
|
||||
isPower = { cpu = { family = "power"; }; };
|
||||
isx86 = { cpu = { family = "x86"; }; };
|
||||
|
@ -80,7 +80,11 @@ rec {
|
||||
armv8r = { bits = 32; significantByte = littleEndian; family = "arm"; version = "8"; };
|
||||
armv8m = { bits = 32; significantByte = littleEndian; family = "arm"; version = "8"; };
|
||||
aarch64 = { bits = 64; significantByte = littleEndian; family = "arm"; version = "8"; };
|
||||
aarch64_be = { bits = 64; significantByte = bigEndian; family = "arm"; version = "8"; };
|
||||
|
||||
i386 = { bits = 32; significantByte = littleEndian; family = "x86"; };
|
||||
i486 = { bits = 32; significantByte = littleEndian; family = "x86"; };
|
||||
i586 = { bits = 32; significantByte = littleEndian; family = "x86"; };
|
||||
i686 = { bits = 32; significantByte = littleEndian; family = "x86"; };
|
||||
x86_64 = { bits = 64; significantByte = littleEndian; family = "x86"; };
|
||||
|
||||
@ -92,6 +96,7 @@ rec {
|
||||
powerpc = { bits = 32; significantByte = bigEndian; family = "power"; };
|
||||
powerpc64 = { bits = 64; significantByte = bigEndian; family = "power"; };
|
||||
powerpc64le = { bits = 64; significantByte = littleEndian; family = "power"; };
|
||||
powerpcle = { bits = 32; significantByte = littleEndian; family = "power"; };
|
||||
|
||||
riscv32 = { bits = 32; significantByte = littleEndian; family = "riscv"; };
|
||||
riscv64 = { bits = 64; significantByte = littleEndian; family = "riscv"; };
|
||||
@ -101,6 +106,8 @@ rec {
|
||||
|
||||
wasm32 = { bits = 32; significantByte = littleEndian; family = "wasm"; };
|
||||
wasm64 = { bits = 64; significantByte = littleEndian; family = "wasm"; };
|
||||
|
||||
alpha = { bits = 64; significantByte = littleEndian; family = "alpha"; };
|
||||
|
||||
avr = { bits = 8; family = "avr"; };
|
||||
};
|
||||
@ -202,8 +209,15 @@ rec {
|
||||
abis = setTypes types.openAbi {
|
||||
cygnus = {};
|
||||
msvc = {};
|
||||
eabi = {};
|
||||
elf = {};
|
||||
|
||||
# Note: eabi is specific to ARM and PowerPC.
|
||||
# On PowerPC, this corresponds to PPCEABI.
|
||||
# On ARM, this corresponds to ARMEABI.
|
||||
eabi = { float = "soft"; };
|
||||
eabihf = { float = "hard"; };
|
||||
|
||||
# Other architectures should use ELF in embedded situations.
|
||||
elf = {};
|
||||
|
||||
androideabi = {};
|
||||
android = {
|
||||
@ -265,10 +279,14 @@ rec {
|
||||
"2" = # We only do 2-part hacks for things Nix already supports
|
||||
if elemAt l 1 == "cygwin"
|
||||
then { cpu = elemAt l 0; kernel = "windows"; abi = "cygnus"; }
|
||||
else if (elemAt l 1 == "eabi")
|
||||
then { cpu = elemAt l 0; vendor = "none"; kernel = "none"; abi = elemAt l 1; }
|
||||
else if (elemAt l 1 == "elf")
|
||||
then { cpu = elemAt l 0; vendor = "none"; kernel = "none"; abi = elemAt l 1; }
|
||||
# MSVC ought to be the default ABI so this case isn't needed. But then it
|
||||
# becomes difficult to handle the gnu* variants for Aarch32 correctly for
|
||||
# minGW. So it's easier to make gnu* the default for the MinGW, but
|
||||
# hack-in MSVC for the non-MinGW case right here.
|
||||
else if elemAt l 1 == "windows"
|
||||
then { cpu = elemAt l 0; kernel = "windows"; abi = "msvc"; }
|
||||
else if (elemAt l 1) == "elf"
|
||||
then { cpu = elemAt l 0; vendor = "unknown"; kernel = "none"; abi = elemAt l 1; }
|
||||
else { cpu = elemAt l 0; kernel = elemAt l 1; };
|
||||
"3" = # Awkwards hacks, beware!
|
||||
if elemAt l 1 == "apple"
|
||||
@ -276,13 +294,11 @@ rec {
|
||||
else if (elemAt l 1 == "linux") || (elemAt l 2 == "gnu")
|
||||
then { cpu = elemAt l 0; kernel = elemAt l 1; abi = elemAt l 2; }
|
||||
else if (elemAt l 2 == "mingw32") # autotools breaks on -gnu for window
|
||||
then { cpu = elemAt l 0; vendor = elemAt l 1; kernel = "windows"; abi = "gnu"; }
|
||||
then { cpu = elemAt l 0; vendor = elemAt l 1; kernel = "windows"; }
|
||||
else if hasPrefix "netbsd" (elemAt l 2)
|
||||
then { cpu = elemAt l 0; vendor = elemAt l 1; kernel = elemAt l 2; }
|
||||
else if (elemAt l 2 == "eabi")
|
||||
then { cpu = elemAt l 0; vendor = elemAt l 1; kernel = "none"; abi = elemAt l 2; }
|
||||
else if (elemAt l 2 == "elf")
|
||||
then { cpu = elemAt l 0; vendor = elemAt l 1; kernel = "none"; abi = elemAt l 2; }
|
||||
else if (elem (elemAt l 2) ["eabi" "eabihf" "elf"])
|
||||
then { cpu = elemAt l 0; vendor = "unknown"; kernel = elemAt l 1; abi = elemAt l 2; }
|
||||
else throw "Target specification with 3 components is ambiguous";
|
||||
"4" = { cpu = elemAt l 0; vendor = elemAt l 1; kernel = elemAt l 2; abi = elemAt l 3; };
|
||||
}.${toString (length l)}
|
||||
@ -314,13 +330,12 @@ rec {
|
||||
else getKernel args.kernel;
|
||||
abi =
|
||||
/**/ if args ? abi then getAbi args.abi
|
||||
else if isLinux parsed then
|
||||
else if isLinux parsed || isWindows parsed then
|
||||
if isAarch32 parsed then
|
||||
if lib.versionAtLeast (parsed.cpu.version or "0") "6"
|
||||
then abis.gnueabihf
|
||||
else abis.gnueabi
|
||||
else abis.gnu
|
||||
else if isWindows parsed then abis.gnu
|
||||
else abis.unknown;
|
||||
};
|
||||
|
||||
|
@ -467,6 +467,8 @@ rec {
|
||||
};
|
||||
|
||||
selectBySystem = system: {
|
||||
"i486-linux" = pc32;
|
||||
"i586-linux" = pc32;
|
||||
"i686-linux" = pc32;
|
||||
"x86_64-linux" = pc64;
|
||||
"armv5tel-linux" = sheevaplug;
|
||||
|
@ -401,42 +401,4 @@ runTests {
|
||||
expected = "«foo»";
|
||||
};
|
||||
|
||||
|
||||
# MISC
|
||||
|
||||
testOverridableDelayableArgsTest = {
|
||||
expr =
|
||||
let res1 = defaultOverridableDelayableArgs id {};
|
||||
res2 = defaultOverridableDelayableArgs id { a = 7; };
|
||||
res3 = let x = defaultOverridableDelayableArgs id { a = 7; };
|
||||
in (x.merge) { b = 10; };
|
||||
res4 = let x = defaultOverridableDelayableArgs id { a = 7; };
|
||||
in (x.merge) ( x: { b = 10; });
|
||||
res5 = let x = defaultOverridableDelayableArgs id { a = 7; };
|
||||
in (x.merge) ( x: { a = builtins.add x.a 3; });
|
||||
res6 = let x = defaultOverridableDelayableArgs id { a = 7; mergeAttrBy = { a = builtins.add; }; };
|
||||
y = x.merge {};
|
||||
in (y.merge) { a = 10; };
|
||||
|
||||
resRem7 = res6.replace (a: removeAttrs a ["a"]);
|
||||
|
||||
# fixed tests (delayed args): (when using them add some comments, please)
|
||||
resFixed1 =
|
||||
let x = defaultOverridableDelayableArgs id ( x: { a = 7; c = x.fixed.b; });
|
||||
y = x.merge (x: { name = "name-${builtins.toString x.fixed.c}"; });
|
||||
in (y.merge) { b = 10; };
|
||||
strip = attrs: removeAttrs attrs ["merge" "replace"];
|
||||
in all id
|
||||
[ ((strip res1) == { })
|
||||
((strip res2) == { a = 7; })
|
||||
((strip res3) == { a = 7; b = 10; })
|
||||
((strip res4) == { a = 7; b = 10; })
|
||||
((strip res5) == { a = 10; })
|
||||
((strip res6) == { a = 17; })
|
||||
((strip resRem7) == {})
|
||||
((strip resFixed1) == { a = 7; b = 10; c =10; name = "name-10"; })
|
||||
];
|
||||
expected = true;
|
||||
};
|
||||
|
||||
}
|
||||
|
@ -149,6 +149,12 @@ checkConfigOutput "1 2 3 4 5 6 7 8 9 10" config.result ./loaOf-with-long-list.ni
|
||||
# Check loaOf with many merges of lists.
|
||||
checkConfigOutput "1 2 3 4 5 6 7 8 9 10" config.result ./loaOf-with-many-list-merges.nix
|
||||
|
||||
# Check mkAliasOptionModuleWithPriority.
|
||||
checkConfigOutput "true" config.enable ./alias-with-priority.nix
|
||||
checkConfigOutput "true" config.enableAlias ./alias-with-priority.nix
|
||||
checkConfigOutput "false" config.enable ./alias-with-priority-can-override.nix
|
||||
checkConfigOutput "false" config.enableAlias ./alias-with-priority-can-override.nix
|
||||
|
||||
cat <<EOF
|
||||
====== module tests ======
|
||||
$pass Pass
|
||||
|
52
lib/tests/modules/alias-with-priority-can-override.nix
Normal file
52
lib/tests/modules/alias-with-priority-can-override.nix
Normal file
@ -0,0 +1,52 @@
|
||||
# This is a test to show that mkAliasOptionModule sets the priority correctly
|
||||
# for aliased options.
|
||||
|
||||
{ config, lib, ... }:
|
||||
|
||||
with lib;
|
||||
|
||||
{
|
||||
options = {
|
||||
# A simple boolean option that can be enabled or disabled.
|
||||
enable = lib.mkOption {
|
||||
type = types.nullOr types.bool;
|
||||
default = null;
|
||||
example = true;
|
||||
description = ''
|
||||
Some descriptive text
|
||||
'';
|
||||
};
|
||||
|
||||
# mkAliasOptionModule sets warnings, so this has to be defined.
|
||||
warnings = mkOption {
|
||||
internal = true;
|
||||
default = [];
|
||||
type = types.listOf types.str;
|
||||
example = [ "The `foo' service is deprecated and will go away soon!" ];
|
||||
description = ''
|
||||
This option allows modules to show warnings to users during
|
||||
the evaluation of the system configuration.
|
||||
'';
|
||||
};
|
||||
};
|
||||
|
||||
imports = [
|
||||
# Create an alias for the "enable" option.
|
||||
(mkAliasOptionModuleWithPriority [ "enableAlias" ] [ "enable" ])
|
||||
|
||||
# Disable the aliased option, but with a default (low) priority so it
|
||||
# should be able to be overridden by the next import.
|
||||
( { config, lib, ... }:
|
||||
{
|
||||
enableAlias = lib.mkForce false;
|
||||
}
|
||||
)
|
||||
|
||||
# Enable the normal (non-aliased) option.
|
||||
( { config, lib, ... }:
|
||||
{
|
||||
enable = true;
|
||||
}
|
||||
)
|
||||
];
|
||||
}
|
52
lib/tests/modules/alias-with-priority.nix
Normal file
52
lib/tests/modules/alias-with-priority.nix
Normal file
@ -0,0 +1,52 @@
|
||||
# This is a test to show that mkAliasOptionModule sets the priority correctly
|
||||
# for aliased options.
|
||||
|
||||
{ config, lib, ... }:
|
||||
|
||||
with lib;
|
||||
|
||||
{
|
||||
options = {
|
||||
# A simple boolean option that can be enabled or disabled.
|
||||
enable = lib.mkOption {
|
||||
type = types.nullOr types.bool;
|
||||
default = null;
|
||||
example = true;
|
||||
description = ''
|
||||
Some descriptive text
|
||||
'';
|
||||
};
|
||||
|
||||
# mkAliasOptionModule sets warnings, so this has to be defined.
|
||||
warnings = mkOption {
|
||||
internal = true;
|
||||
default = [];
|
||||
type = types.listOf types.str;
|
||||
example = [ "The `foo' service is deprecated and will go away soon!" ];
|
||||
description = ''
|
||||
This option allows modules to show warnings to users during
|
||||
the evaluation of the system configuration.
|
||||
'';
|
||||
};
|
||||
};
|
||||
|
||||
imports = [
|
||||
# Create an alias for the "enable" option.
|
||||
(mkAliasOptionModuleWithPriority [ "enableAlias" ] [ "enable" ])
|
||||
|
||||
# Disable the aliased option, but with a default (low) priority so it
|
||||
# should be able to be overridden by the next import.
|
||||
( { config, lib, ... }:
|
||||
{
|
||||
enableAlias = lib.mkDefault false;
|
||||
}
|
||||
)
|
||||
|
||||
# Enable the normal (non-aliased) option.
|
||||
( { config, lib, ... }:
|
||||
{
|
||||
enable = true;
|
||||
}
|
||||
)
|
||||
];
|
||||
}
|
@ -129,6 +129,13 @@ rec {
|
||||
/* Returns the current nixpkgs release number as string. */
|
||||
release = lib.strings.fileContents ../.version;
|
||||
|
||||
/* Returns the current nixpkgs release code name.
|
||||
|
||||
On each release the first letter is bumped and a new animal is chosen
|
||||
starting with that new letter.
|
||||
*/
|
||||
codeName = "Koi";
|
||||
|
||||
/* Returns the current nixpkgs version suffix as string. */
|
||||
versionSuffix =
|
||||
let suffixFile = ../.version-suffix;
|
||||
|
@ -169,6 +169,9 @@ rec {
|
||||
# s32 = sign 32 4294967296;
|
||||
};
|
||||
|
||||
# Alias of u16 for a port number
|
||||
port = ints.u16;
|
||||
|
||||
float = mkOptionType rec {
|
||||
name = "float";
|
||||
description = "floating point number";
|
||||
|
@ -1,21 +1,41 @@
|
||||
/* List of NixOS maintainers.
|
||||
|
||||
handle = {
|
||||
name = "Real name";
|
||||
# Required
|
||||
name = "Your name";
|
||||
email = "address@example.org";
|
||||
|
||||
# Optional
|
||||
github = "GithubUsername";
|
||||
keys = [{
|
||||
longkeyid = "rsa2048/0x0123456789ABCDEF";
|
||||
fingerprint = "AAAA BBBB CCCC DDDD EEEE FFFF 0000 1111 2222 3333";
|
||||
}];
|
||||
};
|
||||
|
||||
where `name` is your real name, `email` is your maintainer email
|
||||
address and `github` is your GitHub handle (as it appears in the
|
||||
URL of your profile page, `https://github.com/<userhandle>`).
|
||||
address
|
||||
The only required fields are `name` and `email`.
|
||||
where
|
||||
|
||||
- `handle` is the handle you are going to use in nixpkgs expressions,
|
||||
- `name` is your, preferably real, name,
|
||||
- `email` is your maintainer email address, and
|
||||
- `github` is your GitHub handle (as it appears in the URL of your profile page, `https://github.com/<userhandle>`),
|
||||
- `keys` is a list of your PGP/GPG key IDs and fingerprints.
|
||||
|
||||
`handle == github` is strongly preffered whenever `github` is an acceptable attribute name and is short and convenient.
|
||||
|
||||
Add PGP/GPG keys only if you actually use them to sign commits and/or mail.
|
||||
|
||||
To get the required PGP/GPG values for a key run
|
||||
```shell
|
||||
gpg --keyid-format 0xlong --fingerprint <email> | head -n 2
|
||||
```
|
||||
|
||||
!!! Note that PGP/GPG values stored here are for informational purposes only, don't use this file as a source of truth.
|
||||
|
||||
More fields may be added in the future.
|
||||
|
||||
Please keep the list alphabetically sorted.
|
||||
See `../maintainers/scripts/check-maintainer-github-handles.sh`
|
||||
for an example on how to work with this data.
|
||||
See `./scripts/check-maintainer-github-handles.sh` for an example on how to work with this data.
|
||||
*/
|
||||
{
|
||||
"1000101" = {
|
||||
@ -73,6 +93,11 @@
|
||||
github = "acowley";
|
||||
name = "Anthony Cowley";
|
||||
};
|
||||
adamt = {
|
||||
email = "mail@adamtulinius.dk";
|
||||
github = "adamtulinius";
|
||||
name = "Adam Tulinius";
|
||||
};
|
||||
adelbertc = {
|
||||
email = "adelbertc@gmail.com";
|
||||
github = "adelbertc";
|
||||
@ -212,10 +237,15 @@
|
||||
name = "Nix Committers";
|
||||
};
|
||||
alunduil = {
|
||||
email = "alunduil@alunduil.com";
|
||||
email = "alunduil@gmail.com";
|
||||
github = "alunduil";
|
||||
name = "Alex Brandt";
|
||||
};
|
||||
amar1729 = {
|
||||
email = "amar.paul16@gmail.com";
|
||||
github = "amar1729";
|
||||
name = "Amar Paul";
|
||||
};
|
||||
ambrop72 = {
|
||||
email = "ambrop7@gmail.com";
|
||||
github = "ambrop72";
|
||||
@ -246,6 +276,11 @@
|
||||
github = "AndrewMorsillo";
|
||||
name = "Andrew Morsillo";
|
||||
};
|
||||
andersk = {
|
||||
email = "andersk@mit.edu";
|
||||
github = "andersk";
|
||||
name = "Anders Kaseorg";
|
||||
};
|
||||
AndersonTorres = {
|
||||
email = "torres.anderson.85@protonmail.com";
|
||||
github = "AndersonTorres";
|
||||
@ -326,6 +361,11 @@
|
||||
github = "apeyroux";
|
||||
name = "Alexandre Peyroux";
|
||||
};
|
||||
ar1a = {
|
||||
email = "aria@ar1as.space";
|
||||
github = "ar1a";
|
||||
name = "Aria Edmonds";
|
||||
};
|
||||
arcadio = {
|
||||
email = "arc@well.ox.ac.uk";
|
||||
github = "arcadio";
|
||||
@ -376,6 +416,11 @@
|
||||
github = "asppsa";
|
||||
name = "Alastair Pharo";
|
||||
};
|
||||
astro = {
|
||||
email = "astro@spaceboyz.net";
|
||||
github = "astro";
|
||||
name = "Astro";
|
||||
};
|
||||
astsmtl = {
|
||||
email = "astsmtl@yandex.ru";
|
||||
github = "astsmtl";
|
||||
@ -391,6 +436,11 @@
|
||||
github = "aszlig";
|
||||
name = "aszlig";
|
||||
};
|
||||
atnnn = {
|
||||
email = "etienne@atnnn.com";
|
||||
github = "atnnn";
|
||||
name = "Etienne Laurin";
|
||||
};
|
||||
auntie = {
|
||||
email = "auntieNeo@gmail.com";
|
||||
github = "auntie";
|
||||
@ -406,6 +456,11 @@
|
||||
github = "AveryLychee";
|
||||
name = "Avery Lychee";
|
||||
};
|
||||
averelld = {
|
||||
email = "averell+nixos@rxd4.com";
|
||||
github = "averelld";
|
||||
name = "averelld";
|
||||
};
|
||||
avnik = {
|
||||
email = "avn@avnik.info";
|
||||
github = "avnik";
|
||||
@ -445,6 +500,11 @@
|
||||
github = "bandresen";
|
||||
name = "Benjamin Andresen";
|
||||
};
|
||||
baracoder = {
|
||||
email = "baracoder@googlemail.com";
|
||||
github = "baracoder";
|
||||
name = "Herman Fries";
|
||||
};
|
||||
barrucadu = {
|
||||
email = "mike@barrucadu.co.uk";
|
||||
github = "barrucadu";
|
||||
@ -619,6 +679,11 @@
|
||||
github = "bramd";
|
||||
name = "Bram Duvigneau";
|
||||
};
|
||||
braydenjw = {
|
||||
email = "nixpkgs@willenborg.ca";
|
||||
github = "braydenjw";
|
||||
name = "Brayden Willenborg";
|
||||
};
|
||||
brian-dawn = {
|
||||
email = "brian.t.dawn@gmail.com";
|
||||
github = "brian-dawn";
|
||||
@ -634,6 +699,11 @@
|
||||
github = "bstrik";
|
||||
name = "Berno Strik";
|
||||
};
|
||||
buffet = {
|
||||
email = "niclas@countingsort.com";
|
||||
github = "buffet";
|
||||
name = "Niclas Meyer";
|
||||
};
|
||||
bugworm = {
|
||||
email = "bugworm@zoho.com";
|
||||
github = "bugworm";
|
||||
@ -673,6 +743,11 @@
|
||||
github = "campadrenalin";
|
||||
name = "Philip Horger";
|
||||
};
|
||||
candeira = {
|
||||
email = "javier@candeira.com";
|
||||
github = "candeira";
|
||||
name = "Javier Candeira";
|
||||
};
|
||||
canndrew = {
|
||||
email = "shum@canndrew.org";
|
||||
github = "canndrew";
|
||||
@ -747,6 +822,11 @@
|
||||
github = "ChengCat";
|
||||
name = "Yucheng Zhang";
|
||||
};
|
||||
chessai = {
|
||||
email = "chessai1996@gmail.com";
|
||||
github = "chessai";
|
||||
name = "Daniel Cartwright";
|
||||
};
|
||||
chiiruno = {
|
||||
email = "okinan@protonmail.com";
|
||||
github = "chiiruno";
|
||||
@ -882,11 +962,6 @@
|
||||
github = "couchemar";
|
||||
name = "Andrey Pavlov";
|
||||
};
|
||||
countingsort = {
|
||||
email = "niclas@countingsort.com";
|
||||
github = "countingsort";
|
||||
name = "Niclas Meyer";
|
||||
};
|
||||
cpages = {
|
||||
email = "page@ruiec.cat";
|
||||
github = "cpages";
|
||||
@ -957,6 +1032,11 @@
|
||||
github = "danielfullmer";
|
||||
name = "Daniel Fullmer";
|
||||
};
|
||||
das-g = {
|
||||
email = "nixpkgs@raphael.dasgupta.ch";
|
||||
github = "das-g";
|
||||
name = "Raphael Das Gupta";
|
||||
};
|
||||
das_j = {
|
||||
email = "janne@hess.ooo";
|
||||
github = "dasJ";
|
||||
@ -1284,6 +1364,16 @@
|
||||
github = "ellis";
|
||||
name = "Ellis Whitehead";
|
||||
};
|
||||
elohmeier = {
|
||||
email = "elo-nixos@nerdworks.de";
|
||||
github = "elohmeier";
|
||||
name = "Enno Lohmeier";
|
||||
};
|
||||
elseym = {
|
||||
email = "elseym@me.com";
|
||||
github = "elseym";
|
||||
name = "Simon Waibl";
|
||||
};
|
||||
elvishjerricco = {
|
||||
email = "elvishjerricco@gmail.com";
|
||||
github = "ElvishJerricco";
|
||||
@ -1423,7 +1513,7 @@
|
||||
name = "Felipe Espinoza";
|
||||
};
|
||||
fgaz = {
|
||||
email = "francygazz@gmail.com";
|
||||
email = "fgaz@fgaz.me";
|
||||
github = "fgaz";
|
||||
name = "Francesco Gazzetta";
|
||||
};
|
||||
@ -1512,15 +1602,30 @@
|
||||
github = "ftrvxmtrx";
|
||||
name = "Siarhei Zirukin";
|
||||
};
|
||||
fuerbringer = {
|
||||
email = "severin@fuerbringer.info";
|
||||
github = "fuerbringer";
|
||||
name = "Severin Fürbringer";
|
||||
};
|
||||
funfunctor = {
|
||||
email = "eocallaghan@alterapraxis.com";
|
||||
name = "Edward O'Callaghan";
|
||||
};
|
||||
fusion809 = {
|
||||
email = "brentonhorne77@gmail.com";
|
||||
github = "fusion809";
|
||||
name = "Brenton Horne";
|
||||
};
|
||||
fuuzetsu = {
|
||||
email = "fuuzetsu@fuuzetsu.co.uk";
|
||||
github = "fuuzetsu";
|
||||
name = "Mateusz Kowalczyk";
|
||||
};
|
||||
fuwa = {
|
||||
email = "echowss@gmail.com";
|
||||
github = "fuwa0529";
|
||||
name = "Haruka Akiyama";
|
||||
};
|
||||
fuzzy-id = {
|
||||
email = "hacking+nixos@babibo.de";
|
||||
name = "Thomas Bach";
|
||||
@ -1680,6 +1785,16 @@
|
||||
github = "hamhut1066";
|
||||
name = "Hamish Hutchings";
|
||||
};
|
||||
hansjoergschurr = {
|
||||
email = "commits@schurr.at";
|
||||
github = "hansjoergschurr";
|
||||
name = "Hans-Jörg Schurr";
|
||||
};
|
||||
HaoZeke = {
|
||||
email = "r95g10@gmail.com";
|
||||
github = "haozeke";
|
||||
name = "Rohit Goswami";
|
||||
};
|
||||
haslersn = {
|
||||
email = "haslersn@fius.informatik.uni-stuttgart.de";
|
||||
github = "haslersn";
|
||||
@ -1739,6 +1854,11 @@
|
||||
email = "t@larkery.com";
|
||||
name = "Tom Hinton";
|
||||
};
|
||||
hlolli = {
|
||||
email = "hlolli@gmail.com";
|
||||
github = "hlolli";
|
||||
name = "Hlodver Sigurdsson";
|
||||
};
|
||||
hodapp = {
|
||||
email = "hodapp87@gmail.com";
|
||||
github = "Hodapp87";
|
||||
@ -1829,15 +1949,34 @@
|
||||
github = "infinisil";
|
||||
name = "Silvan Mosberger";
|
||||
};
|
||||
ingenieroariel = {
|
||||
email = "ariel@nunez.co";
|
||||
github = "ingenieroariel";
|
||||
name = "Ariel Nunez";
|
||||
};
|
||||
ironpinguin = {
|
||||
email = "michele@catalano.de";
|
||||
github = "ironpinguin";
|
||||
name = "Michele Catalano";
|
||||
};
|
||||
ivan = {
|
||||
email = "ivan@ludios.org";
|
||||
github = "ivan";
|
||||
name = "Ivan Kozik";
|
||||
};
|
||||
ivan-tkatchev = {
|
||||
email = "tkatchev@gmail.com";
|
||||
name = "Ivan Tkatchev";
|
||||
};
|
||||
ivegotasthma = {
|
||||
email = "ivegotasthma@protonmail.com";
|
||||
github = "ivegotasthma";
|
||||
name = "John Doe";
|
||||
keys = [{
|
||||
longkeyid = "rsa4096/09AC52AEA87817A4";
|
||||
fingerprint = "4008 2A5B 56A4 79B9 83CB 95FD 09AC 52AE A878 17A4";
|
||||
}];
|
||||
};
|
||||
ixmatus = {
|
||||
email = "parnell@digitalmentat.com";
|
||||
github = "ixmatus";
|
||||
@ -1863,6 +2002,16 @@
|
||||
github = "jagajaga";
|
||||
name = "Arseniy Seroka";
|
||||
};
|
||||
jakelogemann = {
|
||||
email = "jake.logemann@gmail.com";
|
||||
github = "jakelogemann";
|
||||
name = "Jake Logemann";
|
||||
};
|
||||
jakewaksbaum = {
|
||||
email = "jake.waksbaum@gmail.com";
|
||||
github = "jbaum98";
|
||||
name = "Jake Waksbaum";
|
||||
};
|
||||
jammerful = {
|
||||
email = "jammerful@gmail.com";
|
||||
github = "jammerful";
|
||||
@ -1977,6 +2126,11 @@
|
||||
github = "jhhuh";
|
||||
name = "Ji-Haeng Huh";
|
||||
};
|
||||
jhillyerd = {
|
||||
email = "james+nixos@hillyerd.com";
|
||||
github = "jhillyerd";
|
||||
name = "James Hillyerd";
|
||||
};
|
||||
jirkamarsik = {
|
||||
email = "jiri.marsik89@gmail.com";
|
||||
github = "jirkamarsik";
|
||||
@ -1992,6 +2146,11 @@
|
||||
github = "jluttine";
|
||||
name = "Jaakko Luttinen";
|
||||
};
|
||||
jmagnusj = {
|
||||
email = "jmagnusj@gmail.com";
|
||||
github = "magnusjonsson";
|
||||
name = "Johan Magnus Jonsson";
|
||||
};
|
||||
jmettes = {
|
||||
email = "jonathan@jmettes.com";
|
||||
github = "jmettes";
|
||||
@ -2075,6 +2234,11 @@
|
||||
github = "joncojonathan";
|
||||
name = "Jonathan Haddock";
|
||||
};
|
||||
jorsn = {
|
||||
name = "Johannes Rosenberger";
|
||||
email = "johannes@jorsn.eu";
|
||||
github = "jorsn";
|
||||
};
|
||||
jpdoyle = {
|
||||
email = "joethedoyle@gmail.com";
|
||||
github = "jpdoyle";
|
||||
@ -2203,11 +2367,20 @@
|
||||
github = "kirelagin";
|
||||
name = "Kirill Elagin";
|
||||
};
|
||||
kisonecat = {
|
||||
email = "kisonecat@gmail.com";
|
||||
github = "kisonecat";
|
||||
name = "Jim Fowler";
|
||||
};
|
||||
kkallio = {
|
||||
email = "tierpluspluslists@gmail.com";
|
||||
name = "Karn Kallio";
|
||||
};
|
||||
|
||||
klntsky = {
|
||||
email = "klntsky@gmail.com";
|
||||
name = "Vladimir Kalnitsky";
|
||||
github = "8084";
|
||||
};
|
||||
kmeakin = {
|
||||
email = "karlwfmeakin@gmail.com";
|
||||
name = "Karl Meakin";
|
||||
@ -2219,6 +2392,11 @@
|
||||
github = "knedlsepp";
|
||||
name = "Josef Kemetmüller";
|
||||
};
|
||||
knl = {
|
||||
email = "nikola@knezevic.co";
|
||||
github = "knl";
|
||||
name = "Nikola Knežević";
|
||||
};
|
||||
konimex = {
|
||||
email = "herdiansyah@netc.eu";
|
||||
github = "konimex";
|
||||
@ -2394,6 +2572,11 @@
|
||||
github = "listx";
|
||||
name = "Linus Arver";
|
||||
};
|
||||
lionello = {
|
||||
email = "lio@lunesu.com";
|
||||
github = "lionello";
|
||||
name = "Lionello Lunesu";
|
||||
};
|
||||
lluchs = {
|
||||
email = "lukas.werling@gmail.com";
|
||||
github = "lluchs";
|
||||
@ -2666,6 +2849,11 @@
|
||||
github = "melsigl";
|
||||
name = "Melanie B. Sigl";
|
||||
};
|
||||
melkor333 = {
|
||||
email = "samuel@ton-kunst.ch";
|
||||
github = "melkor333";
|
||||
name = "Samuel Ruprecht";
|
||||
};
|
||||
metabar = {
|
||||
email = "softs@metabarcoding.org";
|
||||
name = "Celine Mercier";
|
||||
@ -2675,6 +2863,11 @@
|
||||
github = "mgdelacroix";
|
||||
name = "Miguel de la Cruz";
|
||||
};
|
||||
mgregoire = {
|
||||
email = "gregoire@martinache.net";
|
||||
github = "M-Gregoire";
|
||||
name = "Gregoire Martinache";
|
||||
};
|
||||
mgttlinger = {
|
||||
email = "megoettlinger@gmail.com";
|
||||
github = "mgttlinger";
|
||||
@ -2933,6 +3126,11 @@
|
||||
github = "nadrieril";
|
||||
name = "Nadrieril Feneanar";
|
||||
};
|
||||
nalbyuites = {
|
||||
email = "ashijit007@gmail.com";
|
||||
github = "nalbyuites";
|
||||
name = "Ashijit Pramanik";
|
||||
};
|
||||
namore = {
|
||||
email = "namor@hemio.de";
|
||||
github = "namore";
|
||||
@ -3091,6 +3289,11 @@
|
||||
github = "nyarly";
|
||||
name = "Judson Lester";
|
||||
};
|
||||
nzhang-zh = {
|
||||
email = "n.zhang.hp.au@gmail.com";
|
||||
github = "nzhang-zh";
|
||||
name = "Ning Zhang";
|
||||
};
|
||||
obadz = {
|
||||
email = "obadz-nixos@obadz.com";
|
||||
github = "obadz";
|
||||
@ -3169,6 +3372,10 @@
|
||||
email = "oxij@oxij.org";
|
||||
github = "oxij";
|
||||
name = "Jan Malakhovski";
|
||||
keys = [{
|
||||
longkeyid = "rsa2048/0x0E6CA66E5C557AA8";
|
||||
fingerprint = "514B B966 B46E 3565 0508 86E8 0E6C A66E 5C55 7AA8";
|
||||
}];
|
||||
};
|
||||
oyren = {
|
||||
email = "m.scheuren@oyra.eu";
|
||||
@ -3329,6 +3536,11 @@
|
||||
github = "pkmx";
|
||||
name = "Chih-Mao Chen";
|
||||
};
|
||||
plchldr = {
|
||||
email = "mail@oddco.de";
|
||||
github = "plchldr";
|
||||
name = "Jonas Beyer";
|
||||
};
|
||||
plcplc = {
|
||||
email = "plcplc@gmail.com";
|
||||
github = "plcplc";
|
||||
@ -3425,7 +3637,7 @@
|
||||
};
|
||||
psyanticy = {
|
||||
email = "iuns@outlook.fr";
|
||||
github = "Assassinkin";
|
||||
github = "PsyanticY";
|
||||
name = "Psyanticy";
|
||||
};
|
||||
puffnfresh = {
|
||||
@ -3776,11 +3988,30 @@
|
||||
github = "sauyon";
|
||||
name = "Sauyon Lee";
|
||||
};
|
||||
sb0 = {
|
||||
email = "sb@m-labs.hk";
|
||||
github = "sbourdeauducq";
|
||||
name = "Sébastien Bourdeauducq";
|
||||
};
|
||||
sboosali = {
|
||||
email = "SamBoosalis@gmail.com";
|
||||
github = "sboosali";
|
||||
name = "Sam Boosalis";
|
||||
};
|
||||
scalavision = {
|
||||
email = "scalavision@gmail.com";
|
||||
github = "scalavision";
|
||||
name = "Tom Sorlie";
|
||||
};
|
||||
schmitthenner = {
|
||||
email = "development@schmitthenner.eu";
|
||||
github = "fkz";
|
||||
name = "Fabian Schmitthenner";
|
||||
};
|
||||
schmittlauch = {
|
||||
email = "t.schmittlauch+nixos@orlives.de";
|
||||
github = "schmittlauch";
|
||||
};
|
||||
schneefux = {
|
||||
email = "schneefux+nixos_pkg@schneefux.xyz";
|
||||
github = "schneefux";
|
||||
@ -3800,6 +4031,11 @@
|
||||
github = "scolobb";
|
||||
name = "Sergiu Ivanov";
|
||||
};
|
||||
screendriver = {
|
||||
email = "nix@echooff.de";
|
||||
github = "screendriver";
|
||||
name = "Christian Rackerseder";
|
||||
};
|
||||
Scriptkiddi = {
|
||||
email = "nixos@scriptkiddi.de";
|
||||
github = "scriptkiddi";
|
||||
@ -3840,6 +4076,11 @@
|
||||
github = "seppeljordan";
|
||||
name = "Sebastian Jordan";
|
||||
};
|
||||
seqizz = {
|
||||
email = "seqizz@gmail.com";
|
||||
github = "seqizz";
|
||||
name = "Gurkan Gur";
|
||||
};
|
||||
sfrijters = {
|
||||
email = "sfrijters@gmail.com";
|
||||
github = "sfrijters";
|
||||
@ -3983,6 +4224,11 @@
|
||||
github = "spacefrogg";
|
||||
name = "Michael Raitza";
|
||||
};
|
||||
spacekookie = {
|
||||
email = "kookie@spacekookie.de";
|
||||
github = "spacekookie";
|
||||
name = "Katharina Fey";
|
||||
};
|
||||
spencerjanssen = {
|
||||
email = "spencerjanssen@gmail.com";
|
||||
github = "spencerjanssen";
|
||||
@ -4123,6 +4369,11 @@
|
||||
github = "t184256";
|
||||
name = "Alexander Sosedkin";
|
||||
};
|
||||
tadeokondrak = {
|
||||
email = "me@tadeo.ca";
|
||||
github = "tadeokondrak";
|
||||
name = "Tadeo Kondrak";
|
||||
};
|
||||
tadfisher = {
|
||||
email = "tadfisher@gmail.com";
|
||||
github = "tadfisher";
|
||||
@ -4168,6 +4419,11 @@
|
||||
github = "talyz";
|
||||
name = "Kim Lindberger";
|
||||
};
|
||||
taneb = {
|
||||
email = "nvd1234@gmail.com";
|
||||
github = "Taneb";
|
||||
name = "Nathan van Doorn";
|
||||
};
|
||||
tari = {
|
||||
email = "peter@taricorp.net";
|
||||
github = "tari";
|
||||
@ -4183,6 +4439,11 @@
|
||||
github = "tazjin";
|
||||
name = "Vincent Ambo";
|
||||
};
|
||||
tbenst = {
|
||||
email = "nix@tylerbenster.com";
|
||||
github = "tbenst";
|
||||
name = "Tyler Benster";
|
||||
};
|
||||
teh = {
|
||||
email = "tehunger@gmail.com";
|
||||
github = "teh";
|
||||
@ -4208,6 +4469,11 @@
|
||||
github = "tex";
|
||||
name = "Milan Svoboda";
|
||||
};
|
||||
tg-x = {
|
||||
email = "*@tg-x.net";
|
||||
github = "tg-x";
|
||||
name = "TG ⊗ Θ";
|
||||
};
|
||||
thall = {
|
||||
email = "niclas.thall@gmail.com";
|
||||
github = "thall";
|
||||
@ -4527,6 +4793,11 @@
|
||||
github = "vklquevs";
|
||||
name = "vklquevs";
|
||||
};
|
||||
vlaci = {
|
||||
email = "laszlo.vasko@outlook.com";
|
||||
github = "vlaci";
|
||||
name = "László Vaskó";
|
||||
};
|
||||
vlstill = {
|
||||
email = "xstill@fi.muni.cz";
|
||||
github = "vlstill";
|
||||
@ -4773,6 +5044,11 @@
|
||||
github = "umazalakain";
|
||||
name = "Unai Zalakain";
|
||||
};
|
||||
zaninime = {
|
||||
email = "francesco@zanini.me";
|
||||
github = "zaninime";
|
||||
name = "Francesco Zanini";
|
||||
};
|
||||
zarelit = {
|
||||
email = "david@zarel.net";
|
||||
github = "zarelit";
|
||||
@ -4827,4 +5103,9 @@
|
||||
github = "zzamboni";
|
||||
name = "Diego Zamboni";
|
||||
};
|
||||
mredaelli = {
|
||||
email = "massimo@typish.io";
|
||||
github = "mredaelli";
|
||||
name = "Massimo Redaelli";
|
||||
};
|
||||
}
|
||||
|
@ -4,7 +4,7 @@ stdenv.mkDerivation {
|
||||
name = "nix-generate-from-cpan-3";
|
||||
|
||||
buildInputs = with perlPackages; [
|
||||
makeWrapper perl CPANMeta GetoptLongDescriptive CPANPLUS Readonly LogLog4perl
|
||||
makeWrapper perl GetoptLongDescriptive CPANPLUS Readonly LogLog4perl
|
||||
];
|
||||
|
||||
phases = [ "installPhase" ];
|
||||
|
@ -1,361 +1,5 @@
|
||||
#! /usr/bin/env nix-shell
|
||||
#! nix-shell -i python3 -p "python3.withPackages(ps: with ps; [ packaging requests toolz ])" -p git
|
||||
#!/bin/sh
|
||||
build=`nix-build -E "with import (fetchTarball "channel:nixpkgs-unstable") {}; python3.withPackages(ps: with ps; [ packaging requests toolz ])"`
|
||||
python=${build}/bin/python
|
||||
exec ${python} pkgs/development/interpreters/python/update-python-libraries/update-python-libraries.py $@
|
||||
|
||||
"""
|
||||
Update a Python package expression by passing in the `.nix` file, or the directory containing it.
|
||||
You can pass in multiple files or paths.
|
||||
|
||||
You'll likely want to use
|
||||
``
|
||||
$ ./update-python-libraries ../../pkgs/development/python-modules/*
|
||||
``
|
||||
to update all libraries in that folder.
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import logging
|
||||
import os
|
||||
import re
|
||||
import requests
|
||||
import toolz
|
||||
from concurrent.futures import ThreadPoolExecutor as Pool
|
||||
from packaging.version import Version as _Version
|
||||
from packaging.version import InvalidVersion
|
||||
from packaging.specifiers import SpecifierSet
|
||||
import collections
|
||||
import subprocess
|
||||
|
||||
INDEX = "https://pypi.io/pypi"
|
||||
"""url of PyPI"""
|
||||
|
||||
EXTENSIONS = ['tar.gz', 'tar.bz2', 'tar', 'zip', '.whl']
|
||||
"""Permitted file extensions. These are evaluated from left to right and the first occurance is returned."""
|
||||
|
||||
PRERELEASES = False
|
||||
|
||||
import logging
|
||||
logging.basicConfig(level=logging.INFO)
|
||||
|
||||
|
||||
class Version(_Version, collections.abc.Sequence):
|
||||
|
||||
def __init__(self, version):
|
||||
super().__init__(version)
|
||||
# We cannot use `str(Version(0.04.21))` because that becomes `0.4.21`
|
||||
# https://github.com/avian2/unidecode/issues/13#issuecomment-354538882
|
||||
self.raw_version = version
|
||||
|
||||
def __getitem__(self, i):
|
||||
return self._version.release[i]
|
||||
|
||||
def __len__(self):
|
||||
return len(self._version.release)
|
||||
|
||||
def __iter__(self):
|
||||
yield from self._version.release
|
||||
|
||||
|
||||
def _get_values(attribute, text):
|
||||
"""Match attribute in text and return all matches.
|
||||
|
||||
:returns: List of matches.
|
||||
"""
|
||||
regex = '{}\s+=\s+"(.*)";'.format(attribute)
|
||||
regex = re.compile(regex)
|
||||
values = regex.findall(text)
|
||||
return values
|
||||
|
||||
def _get_unique_value(attribute, text):
|
||||
"""Match attribute in text and return unique match.
|
||||
|
||||
:returns: Single match.
|
||||
"""
|
||||
values = _get_values(attribute, text)
|
||||
n = len(values)
|
||||
if n > 1:
|
||||
raise ValueError("found too many values for {}".format(attribute))
|
||||
elif n == 1:
|
||||
return values[0]
|
||||
else:
|
||||
raise ValueError("no value found for {}".format(attribute))
|
||||
|
||||
def _get_line_and_value(attribute, text):
|
||||
"""Match attribute in text. Return the line and the value of the attribute."""
|
||||
regex = '({}\s+=\s+"(.*)";)'.format(attribute)
|
||||
regex = re.compile(regex)
|
||||
value = regex.findall(text)
|
||||
n = len(value)
|
||||
if n > 1:
|
||||
raise ValueError("found too many values for {}".format(attribute))
|
||||
elif n == 1:
|
||||
return value[0]
|
||||
else:
|
||||
raise ValueError("no value found for {}".format(attribute))
|
||||
|
||||
|
||||
def _replace_value(attribute, value, text):
|
||||
"""Search and replace value of attribute in text."""
|
||||
old_line, old_value = _get_line_and_value(attribute, text)
|
||||
new_line = old_line.replace(old_value, value)
|
||||
new_text = text.replace(old_line, new_line)
|
||||
return new_text
|
||||
|
||||
def _fetch_page(url):
|
||||
r = requests.get(url)
|
||||
if r.status_code == requests.codes.ok:
|
||||
return r.json()
|
||||
else:
|
||||
raise ValueError("request for {} failed".format(url))
|
||||
|
||||
|
||||
SEMVER = {
|
||||
'major' : 0,
|
||||
'minor' : 1,
|
||||
'patch' : 2,
|
||||
}
|
||||
|
||||
|
||||
def _determine_latest_version(current_version, target, versions):
|
||||
"""Determine latest version, given `target`.
|
||||
"""
|
||||
current_version = Version(current_version)
|
||||
|
||||
def _parse_versions(versions):
|
||||
for v in versions:
|
||||
try:
|
||||
yield Version(v)
|
||||
except InvalidVersion:
|
||||
pass
|
||||
|
||||
versions = _parse_versions(versions)
|
||||
|
||||
index = SEMVER[target]
|
||||
|
||||
ceiling = list(current_version[0:index])
|
||||
if len(ceiling) == 0:
|
||||
ceiling = None
|
||||
else:
|
||||
ceiling[-1]+=1
|
||||
ceiling = Version(".".join(map(str, ceiling)))
|
||||
|
||||
# We do not want prereleases
|
||||
versions = SpecifierSet(prereleases=PRERELEASES).filter(versions)
|
||||
|
||||
if ceiling is not None:
|
||||
versions = SpecifierSet(f"<{ceiling}").filter(versions)
|
||||
|
||||
return (max(sorted(versions))).raw_version
|
||||
|
||||
|
||||
def _get_latest_version_pypi(package, extension, current_version, target):
|
||||
"""Get latest version and hash from PyPI."""
|
||||
url = "{}/{}/json".format(INDEX, package)
|
||||
json = _fetch_page(url)
|
||||
|
||||
versions = json['releases'].keys()
|
||||
version = _determine_latest_version(current_version, target, versions)
|
||||
|
||||
try:
|
||||
releases = json['releases'][version]
|
||||
except KeyError as e:
|
||||
raise KeyError('Could not find version {} for {}'.format(version, package)) from e
|
||||
for release in releases:
|
||||
if release['filename'].endswith(extension):
|
||||
# TODO: In case of wheel we need to do further checks!
|
||||
sha256 = release['digests']['sha256']
|
||||
break
|
||||
else:
|
||||
sha256 = None
|
||||
return version, sha256
|
||||
|
||||
|
||||
def _get_latest_version_github(package, extension, current_version, target):
|
||||
raise ValueError("updating from GitHub is not yet supported.")
|
||||
|
||||
|
||||
FETCHERS = {
|
||||
'fetchFromGitHub' : _get_latest_version_github,
|
||||
'fetchPypi' : _get_latest_version_pypi,
|
||||
'fetchurl' : _get_latest_version_pypi,
|
||||
}
|
||||
|
||||
|
||||
DEFAULT_SETUPTOOLS_EXTENSION = 'tar.gz'
|
||||
|
||||
|
||||
FORMATS = {
|
||||
'setuptools' : DEFAULT_SETUPTOOLS_EXTENSION,
|
||||
'wheel' : 'whl'
|
||||
}
|
||||
|
||||
def _determine_fetcher(text):
|
||||
# Count occurences of fetchers.
|
||||
nfetchers = sum(text.count('src = {}'.format(fetcher)) for fetcher in FETCHERS.keys())
|
||||
if nfetchers == 0:
|
||||
raise ValueError("no fetcher.")
|
||||
elif nfetchers > 1:
|
||||
raise ValueError("multiple fetchers.")
|
||||
else:
|
||||
# Then we check which fetcher to use.
|
||||
for fetcher in FETCHERS.keys():
|
||||
if 'src = {}'.format(fetcher) in text:
|
||||
return fetcher
|
||||
|
||||
|
||||
def _determine_extension(text, fetcher):
|
||||
"""Determine what extension is used in the expression.
|
||||
|
||||
If we use:
|
||||
- fetchPypi, we check if format is specified.
|
||||
- fetchurl, we determine the extension from the url.
|
||||
- fetchFromGitHub we simply use `.tar.gz`.
|
||||
"""
|
||||
if fetcher == 'fetchPypi':
|
||||
try:
|
||||
src_format = _get_unique_value('format', text)
|
||||
except ValueError as e:
|
||||
src_format = None # format was not given
|
||||
|
||||
try:
|
||||
extension = _get_unique_value('extension', text)
|
||||
except ValueError as e:
|
||||
extension = None # extension was not given
|
||||
|
||||
if extension is None:
|
||||
if src_format is None:
|
||||
src_format = 'setuptools'
|
||||
elif src_format == 'flit':
|
||||
raise ValueError("Don't know how to update a Flit package.")
|
||||
extension = FORMATS[src_format]
|
||||
|
||||
elif fetcher == 'fetchurl':
|
||||
url = _get_unique_value('url', text)
|
||||
extension = os.path.splitext(url)[1]
|
||||
if 'pypi' not in url:
|
||||
raise ValueError('url does not point to PyPI.')
|
||||
|
||||
elif fetcher == 'fetchFromGitHub':
|
||||
raise ValueError('updating from GitHub is not yet implemented.')
|
||||
|
||||
return extension
|
||||
|
||||
|
||||
def _update_package(path, target):
|
||||
|
||||
# Read the expression
|
||||
with open(path, 'r') as f:
|
||||
text = f.read()
|
||||
|
||||
# Determine pname.
|
||||
pname = _get_unique_value('pname', text)
|
||||
|
||||
# Determine version.
|
||||
version = _get_unique_value('version', text)
|
||||
|
||||
# First we check how many fetchers are mentioned.
|
||||
fetcher = _determine_fetcher(text)
|
||||
|
||||
extension = _determine_extension(text, fetcher)
|
||||
|
||||
new_version, new_sha256 = FETCHERS[fetcher](pname, extension, version, target)
|
||||
|
||||
if new_version == version:
|
||||
logging.info("Path {}: no update available for {}.".format(path, pname))
|
||||
return False
|
||||
elif Version(new_version) <= Version(version):
|
||||
raise ValueError("downgrade for {}.".format(pname))
|
||||
if not new_sha256:
|
||||
raise ValueError("no file available for {}.".format(pname))
|
||||
|
||||
text = _replace_value('version', new_version, text)
|
||||
text = _replace_value('sha256', new_sha256, text)
|
||||
|
||||
with open(path, 'w') as f:
|
||||
f.write(text)
|
||||
|
||||
logging.info("Path {}: updated {} from {} to {}".format(path, pname, version, new_version))
|
||||
|
||||
result = {
|
||||
'path' : path,
|
||||
'target': target,
|
||||
'pname': pname,
|
||||
'old_version' : version,
|
||||
'new_version' : new_version,
|
||||
#'fetcher' : fetcher,
|
||||
}
|
||||
|
||||
return result
|
||||
|
||||
|
||||
def _update(path, target):
|
||||
|
||||
# We need to read and modify a Nix expression.
|
||||
if os.path.isdir(path):
|
||||
path = os.path.join(path, 'default.nix')
|
||||
|
||||
# If a default.nix does not exist, we quit.
|
||||
if not os.path.isfile(path):
|
||||
logging.info("Path {}: does not exist.".format(path))
|
||||
return False
|
||||
|
||||
# If file is not a Nix expression, we quit.
|
||||
if not path.endswith(".nix"):
|
||||
logging.info("Path {}: does not end with `.nix`.".format(path))
|
||||
return False
|
||||
|
||||
try:
|
||||
return _update_package(path, target)
|
||||
except ValueError as e:
|
||||
logging.warning("Path {}: {}".format(path, e))
|
||||
return False
|
||||
|
||||
|
||||
def _commit(path, pname, old_version, new_version, **kwargs):
|
||||
"""Commit result.
|
||||
"""
|
||||
|
||||
msg = f'python: {pname}: {old_version} -> {new_version}'
|
||||
|
||||
try:
|
||||
subprocess.check_call(['git', 'add', path])
|
||||
subprocess.check_call(['git', 'commit', '-m', msg])
|
||||
except subprocess.CalledProcessError as e:
|
||||
subprocess.check_call(['git', 'checkout', path])
|
||||
raise subprocess.CalledProcessError(f'Could not commit {path}') from e
|
||||
|
||||
return True
|
||||
|
||||
|
||||
def main():
|
||||
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument('package', type=str, nargs='+')
|
||||
parser.add_argument('--target', type=str, choices=SEMVER.keys(), default='major')
|
||||
parser.add_argument('--commit', action='store_true', help='Create a commit for each package update')
|
||||
|
||||
args = parser.parse_args()
|
||||
target = args.target
|
||||
|
||||
packages = list(map(os.path.abspath, args.package))
|
||||
|
||||
logging.info("Updating packages...")
|
||||
|
||||
# Use threads to update packages concurrently
|
||||
with Pool() as p:
|
||||
results = list(p.map(lambda pkg: _update(pkg, target), packages))
|
||||
|
||||
logging.info("Finished updating packages.")
|
||||
|
||||
# Commits are created sequentially.
|
||||
if args.commit:
|
||||
logging.info("Committing updates...")
|
||||
list(map(lambda x: _commit(**x), filter(bool, results)))
|
||||
logging.info("Finished committing updates")
|
||||
|
||||
count = sum(map(bool, results))
|
||||
logging.info("{} package(s) updated".format(count))
|
||||
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
|
@ -1,6 +1,8 @@
|
||||
{ package ? null
|
||||
, maintainer ? null
|
||||
, path ? null
|
||||
, max-workers ? null
|
||||
, keep-going ? null
|
||||
}:
|
||||
|
||||
# TODO: add assert statements
|
||||
@ -105,26 +107,23 @@ let
|
||||
% nix-shell maintainers/scripts/update.nix --argstr path gnome3
|
||||
|
||||
to run update script for all package under an attribute path.
|
||||
|
||||
You can also add
|
||||
|
||||
--argstr max-workers 8
|
||||
|
||||
to increase the number of jobs in parallel, or
|
||||
|
||||
--argstr keep-going true
|
||||
|
||||
to continue running when a single update fails.
|
||||
'';
|
||||
|
||||
runUpdateScript = package: ''
|
||||
echo -ne " - ${package.name}: UPDATING ..."\\r
|
||||
${package.updateScript} &> ${(builtins.parseDrvName package.name).name}.log
|
||||
CODE=$?
|
||||
if [ "$CODE" != "0" ]; then
|
||||
echo " - ${package.name}: ERROR "
|
||||
echo ""
|
||||
echo "--- SHOWING ERROR LOG FOR ${package.name} ----------------------"
|
||||
echo ""
|
||||
cat ${(builtins.parseDrvName package.name).name}.log
|
||||
echo ""
|
||||
echo "--- SHOWING ERROR LOG FOR ${package.name} ----------------------"
|
||||
exit $CODE
|
||||
else
|
||||
rm ${(builtins.parseDrvName package.name).name}.log
|
||||
fi
|
||||
echo " - ${package.name}: DONE. "
|
||||
'';
|
||||
packageData = package: {
|
||||
name = package.name;
|
||||
pname = (builtins.parseDrvName package.name).name;
|
||||
updateScript = pkgs.lib.toList package.updateScript;
|
||||
};
|
||||
|
||||
in pkgs.stdenv.mkDerivation {
|
||||
name = "nixpkgs-update-script";
|
||||
@ -139,21 +138,7 @@ in pkgs.stdenv.mkDerivation {
|
||||
exit 1
|
||||
'';
|
||||
shellHook = ''
|
||||
echo ""
|
||||
echo "Going to be running update for following packages:"
|
||||
echo "${builtins.concatStringsSep "\n" (map (x: " - ${x.name}") packages)}"
|
||||
echo ""
|
||||
read -n1 -r -p "Press space to continue..." confirm
|
||||
if [ "$confirm" = "" ]; then
|
||||
echo ""
|
||||
echo "Running update for:"
|
||||
${builtins.concatStringsSep "\n" (map runUpdateScript packages)}
|
||||
echo ""
|
||||
echo "Packages updated!"
|
||||
exit 0
|
||||
else
|
||||
echo "Aborting!"
|
||||
exit 1
|
||||
fi
|
||||
unset shellHook # do not contaminate nested shells
|
||||
exec ${pkgs.python3.interpreter} ${./update.py} ${pkgs.writeText "packages.json" (builtins.toJSON (map packageData packages))}${pkgs.lib.optionalString (max-workers != null) " --max-workers=${max-workers}"}${pkgs.lib.optionalString (keep-going == "true") " --keep-going"}
|
||||
'';
|
||||
}
|
||||
|
79
maintainers/scripts/update.py
Normal file
79
maintainers/scripts/update.py
Normal file
@ -0,0 +1,79 @@
|
||||
import argparse
|
||||
import concurrent.futures
|
||||
import json
|
||||
import os
|
||||
import subprocess
|
||||
import sys
|
||||
|
||||
updates = {}
|
||||
|
||||
def eprint(*args, **kwargs):
|
||||
print(*args, file=sys.stderr, **kwargs)
|
||||
|
||||
def run_update_script(package):
|
||||
eprint(f" - {package['name']}: UPDATING ...")
|
||||
|
||||
subprocess.run(package['updateScript'], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, check=True)
|
||||
|
||||
|
||||
def main(max_workers, keep_going, packages):
|
||||
with open(sys.argv[1]) as f:
|
||||
packages = json.load(f)
|
||||
|
||||
eprint()
|
||||
eprint('Going to be running update for following packages:')
|
||||
for package in packages:
|
||||
eprint(f" - {package['name']}")
|
||||
eprint()
|
||||
|
||||
confirm = input('Press Enter key to continue...')
|
||||
if confirm == '':
|
||||
eprint()
|
||||
eprint('Running update for:')
|
||||
|
||||
with concurrent.futures.ProcessPoolExecutor(max_workers=max_workers) as executor:
|
||||
for package in packages:
|
||||
updates[executor.submit(run_update_script, package)] = package
|
||||
|
||||
for future in concurrent.futures.as_completed(updates):
|
||||
package = updates[future]
|
||||
|
||||
try:
|
||||
future.result()
|
||||
eprint(f" - {package['name']}: DONE.")
|
||||
except subprocess.CalledProcessError as e:
|
||||
eprint(f" - {package['name']}: ERROR")
|
||||
eprint()
|
||||
eprint(f"--- SHOWING ERROR LOG FOR {package['name']} ----------------------")
|
||||
eprint()
|
||||
eprint(e.stdout.decode('utf-8'))
|
||||
with open(f"{package['pname']}.log", 'wb') as f:
|
||||
f.write(e.stdout)
|
||||
eprint()
|
||||
eprint(f"--- SHOWING ERROR LOG FOR {package['name']} ----------------------")
|
||||
|
||||
if not keep_going:
|
||||
sys.exit(1)
|
||||
|
||||
eprint()
|
||||
eprint('Packages updated!')
|
||||
sys.exit()
|
||||
else:
|
||||
eprint('Aborting!')
|
||||
sys.exit(130)
|
||||
|
||||
parser = argparse.ArgumentParser(description='Update packages')
|
||||
parser.add_argument('--max-workers', '-j', dest='max_workers', type=int, help='Number of updates to run concurrently', nargs='?', default=4)
|
||||
parser.add_argument('--keep-going', '-k', dest='keep_going', action='store_true', help='Do not stop after first failure')
|
||||
parser.add_argument('packages', help='JSON file containing the list of package names and their update scripts')
|
||||
|
||||
if __name__ == '__main__':
|
||||
args = parser.parse_args()
|
||||
|
||||
try:
|
||||
main(args.max_workers, args.keep_going, args.packages)
|
||||
except (KeyboardInterrupt, SystemExit) as e:
|
||||
for update in updates:
|
||||
update.cancel()
|
||||
|
||||
sys.exit(e.code if isinstance(e, SystemExit) else 130)
|
@ -31,7 +31,7 @@ $ cd nixpkgs
|
||||
<para>
|
||||
The second possibility is to add the package outside of the Nixpkgs tree. For
|
||||
instance, here is how you specify a build of the
|
||||
<link xlink:href="http://www.gnu.org/software/hello/">GNU Hello</link>
|
||||
<link xlink:href="https://www.gnu.org/software/hello/">GNU Hello</link>
|
||||
package directly in <filename>configuration.nix</filename>:
|
||||
<programlisting>
|
||||
<xref linkend="opt-environment.systemPackages"/> =
|
||||
|
@ -22,5 +22,6 @@
|
||||
<xi:include href="networking.xml" />
|
||||
<xi:include href="linux-kernel.xml" />
|
||||
<xi:include href="../generated/modules.xml" xpointer="xpointer(//section[@id='modules']/*)" />
|
||||
<xi:include href="profiles.xml" />
|
||||
<!-- Apache; libvirtd virtualisation -->
|
||||
</part>
|
||||
|
@ -113,12 +113,10 @@ $ nixos-option <xref linkend="opt-boot.kernelModules"/>
|
||||
[ "tun" "ipv6" "loop" <replaceable>...</replaceable> ]
|
||||
</screen>
|
||||
Interactive exploration of the configuration is possible using
|
||||
<command
|
||||
xlink:href="https://github.com/edolstra/nix-repl">nix-repl</command>,
|
||||
a read-eval-print loop for Nix expressions. It’s not installed by default;
|
||||
run <literal>nix-env -i nix-repl</literal> to get it. A typical use:
|
||||
<command>nix repl</command>, a read-eval-print loop for Nix expressions.
|
||||
A typical use:
|
||||
<screen>
|
||||
$ nix-repl '<nixpkgs/nixos>'
|
||||
$ nix repl '<nixpkgs/nixos>'
|
||||
|
||||
nix-repl> config.<xref linkend="opt-networking.hostName"/>
|
||||
"mandark"
|
||||
@ -127,4 +125,23 @@ nix-repl> map (x: x.hostName) config.<xref linkend="opt-services.httpd.virtualHo
|
||||
[ "example.org" "example.gov" ]
|
||||
</screen>
|
||||
</para>
|
||||
|
||||
<para>
|
||||
While abstracting your configuration, you may find it useful to generate
|
||||
modules using code, instead of writing files. The example
|
||||
below would have the same effect as importing a file which sets those
|
||||
options.
|
||||
<screen>
|
||||
{ config, pkgs, ... }:
|
||||
|
||||
let netConfig = { hostName }: {
|
||||
networking.hostName = hostName;
|
||||
networking.useDHCP = false;
|
||||
};
|
||||
|
||||
in
|
||||
|
||||
{ imports = [ (netConfig "nixos.localdomain") ]; }
|
||||
</screen>
|
||||
</para>
|
||||
</section>
|
||||
|
39
nixos/doc/manual/configuration/profiles.xml
Normal file
39
nixos/doc/manual/configuration/profiles.xml
Normal file
@ -0,0 +1,39 @@
|
||||
<chapter xmlns="http://docbook.org/ns/docbook"
|
||||
xmlns:xlink="http://www.w3.org/1999/xlink"
|
||||
xmlns:xi="http://www.w3.org/2001/XInclude"
|
||||
version="5.0"
|
||||
xml:id="ch-profiles">
|
||||
<title>Profiles</title>
|
||||
<para>
|
||||
In some cases, it may be desirable to take advantage of commonly-used,
|
||||
predefined configurations provided by nixpkgs, but different from those that
|
||||
come as default. This is a role fulfilled by NixOS's Profiles, which come as
|
||||
files living in <filename><nixpkgs/nixos/modules/profiles></filename>.
|
||||
That is to say, expected usage is to add them to the imports list of your
|
||||
<filename>/etc/configuration.nix</filename> as such:
|
||||
</para>
|
||||
<programlisting>
|
||||
imports = [
|
||||
<nixpkgs/nixos/modules/profiles/profile-name.nix>
|
||||
];
|
||||
</programlisting>
|
||||
<para>
|
||||
Even if some of these profiles seem only useful in the context of
|
||||
install media, many are actually intended to be used in real installs.
|
||||
</para>
|
||||
<para>
|
||||
What follows is a brief explanation on the purpose and use-case for each
|
||||
profile. Detailing each option configured by each one is out of scope.
|
||||
</para>
|
||||
<xi:include href="profiles/all-hardware.xml" />
|
||||
<xi:include href="profiles/base.xml" />
|
||||
<xi:include href="profiles/clone-config.xml" />
|
||||
<xi:include href="profiles/demo.xml" />
|
||||
<xi:include href="profiles/docker-container.xml" />
|
||||
<xi:include href="profiles/graphical.xml" />
|
||||
<xi:include href="profiles/hardened.xml" />
|
||||
<xi:include href="profiles/headless.xml" />
|
||||
<xi:include href="profiles/installation-device.xml" />
|
||||
<xi:include href="profiles/minimal.xml" />
|
||||
<xi:include href="profiles/qemu-guest.xml" />
|
||||
</chapter>
|
20
nixos/doc/manual/configuration/profiles/all-hardware.xml
Normal file
20
nixos/doc/manual/configuration/profiles/all-hardware.xml
Normal file
@ -0,0 +1,20 @@
|
||||
|
||||
<section xmlns="http://docbook.org/ns/docbook"
|
||||
xmlns:xlink="http://www.w3.org/1999/xlink"
|
||||
xmlns:xi="http://www.w3.org/2001/XInclude"
|
||||
version="5.0"
|
||||
xml:id="sec-profile-all-hardware">
|
||||
<title>All Hardware</title>
|
||||
<para>
|
||||
Enables all hardware supported by NixOS: i.e., all firmware is
|
||||
included, and all devices from which one may boot are enabled in the initrd.
|
||||
Its primary use is in the NixOS installation CDs.
|
||||
</para>
|
||||
<para>
|
||||
The enabled kernel modules include support for SATA and PATA, SCSI
|
||||
(partially), USB, Firewire (untested), Virtio (QEMU, KVM, etc.), VMware, and
|
||||
Hyper-V. Additionally, <xref linkend="opt-hardware.enableAllFirmware"/> is
|
||||
enabled, and the firmware for the ZyDAS ZD1211 chipset is specifically
|
||||
installed.
|
||||
</para>
|
||||
</section>
|
15
nixos/doc/manual/configuration/profiles/base.xml
Normal file
15
nixos/doc/manual/configuration/profiles/base.xml
Normal file
@ -0,0 +1,15 @@
|
||||
|
||||
<section xmlns="http://docbook.org/ns/docbook"
|
||||
xmlns:xlink="http://www.w3.org/1999/xlink"
|
||||
xmlns:xi="http://www.w3.org/2001/XInclude"
|
||||
version="5.0"
|
||||
xml:id="sec-profile-base">
|
||||
<title>Base</title>
|
||||
<para>
|
||||
Defines the software packages included in the "minimal"
|
||||
installation CD. It installs several utilities useful in a simple recovery or
|
||||
install media, such as a text-mode web browser, and tools for manipulating
|
||||
block devices, networking, hardware diagnostics, and filesystems (with their
|
||||
respective kernel modules).
|
||||
</para>
|
||||
</section>
|
14
nixos/doc/manual/configuration/profiles/clone-config.xml
Normal file
14
nixos/doc/manual/configuration/profiles/clone-config.xml
Normal file
@ -0,0 +1,14 @@
|
||||
|
||||
<section xmlns="http://docbook.org/ns/docbook"
|
||||
xmlns:xlink="http://www.w3.org/1999/xlink"
|
||||
xmlns:xi="http://www.w3.org/2001/XInclude"
|
||||
version="5.0"
|
||||
xml:id="sec-profile-clone-config">
|
||||
<title>Clone Config</title>
|
||||
<para>
|
||||
This profile is used in installer images.
|
||||
It provides an editable configuration.nix that imports all the modules that
|
||||
were also used when creating the image in the first place.
|
||||
As a result it allows users to edit and rebuild the live-system.
|
||||
</para>
|
||||
</section>
|
13
nixos/doc/manual/configuration/profiles/demo.xml
Normal file
13
nixos/doc/manual/configuration/profiles/demo.xml
Normal file
@ -0,0 +1,13 @@
|
||||
|
||||
<section xmlns="http://docbook.org/ns/docbook"
|
||||
xmlns:xlink="http://www.w3.org/1999/xlink"
|
||||
xmlns:xi="http://www.w3.org/2001/XInclude"
|
||||
version="5.0"
|
||||
xml:id="sec-profile-demo">
|
||||
<title>Demo</title>
|
||||
<para>
|
||||
This profile just enables a <systemitem class="username">demo</systemitem> user, with password <literal>demo</literal>, uid <literal>1000</literal>, <systemitem class="groupname">wheel</systemitem>
|
||||
group and <link linkend="opt-services.xserver.displayManager.sddm.autoLogin">
|
||||
autologin in the SDDM display manager</link>.
|
||||
</para>
|
||||
</section>
|
15
nixos/doc/manual/configuration/profiles/docker-container.xml
Normal file
15
nixos/doc/manual/configuration/profiles/docker-container.xml
Normal file
@ -0,0 +1,15 @@
|
||||
|
||||
<section xmlns="http://docbook.org/ns/docbook"
|
||||
xmlns:xlink="http://www.w3.org/1999/xlink"
|
||||
xmlns:xi="http://www.w3.org/2001/XInclude"
|
||||
version="5.0"
|
||||
xml:id="sec-profile-docker-container">
|
||||
<title>Docker Container</title>
|
||||
<para>
|
||||
This is the profile from which the Docker images are generated. It prepares a
|
||||
working system by importing the <link linkend="sec-profile-minimal">Minimal</link> and
|
||||
<link linkend="sec-profile-clone-config">Clone Config</link> profiles, and setting appropriate
|
||||
configuration options that are useful inside a container context, like
|
||||
<xref linkend="opt-boot.isContainer"/>.
|
||||
</para>
|
||||
</section>
|
21
nixos/doc/manual/configuration/profiles/graphical.xml
Normal file
21
nixos/doc/manual/configuration/profiles/graphical.xml
Normal file
@ -0,0 +1,21 @@
|
||||
|
||||
<section xmlns="http://docbook.org/ns/docbook"
|
||||
xmlns:xlink="http://www.w3.org/1999/xlink"
|
||||
xmlns:xi="http://www.w3.org/2001/XInclude"
|
||||
version="5.0"
|
||||
xml:id="sec-profile-graphical">
|
||||
<title>Graphical</title>
|
||||
<para>
|
||||
Defines a NixOS configuration with the Plasma 5 desktop. It's used by the
|
||||
graphical installation CD.
|
||||
</para>
|
||||
<para>
|
||||
It sets <xref linkend="opt-services.xserver.enable"/>,
|
||||
<xref linkend="opt-services.xserver.displayManager.sddm.enable"/>,
|
||||
<xref linkend="opt-services.xserver.desktopManager.plasma5.enable"/> (
|
||||
<link linkend="opt-services.xserver.desktopManager.plasma5.enableQt4Support">
|
||||
without Qt4 Support</link>), and
|
||||
<xref linkend="opt-services.xserver.libinput.enable"/> to true. It also
|
||||
includes glxinfo and firefox in the system packages list.
|
||||
</para>
|
||||
</section>
|
22
nixos/doc/manual/configuration/profiles/hardened.xml
Normal file
22
nixos/doc/manual/configuration/profiles/hardened.xml
Normal file
@ -0,0 +1,22 @@
|
||||
|
||||
<section xmlns="http://docbook.org/ns/docbook"
|
||||
xmlns:xlink="http://www.w3.org/1999/xlink"
|
||||
xmlns:xi="http://www.w3.org/2001/XInclude"
|
||||
version="5.0"
|
||||
xml:id="sec-profile-hardened">
|
||||
<title>Hardened</title>
|
||||
<para>
|
||||
A profile with most (vanilla) hardening options enabled by default,
|
||||
potentially at the cost of features and performance.
|
||||
</para>
|
||||
<para>
|
||||
This includes a hardened kernel, and limiting the system information
|
||||
available to processes through the <filename>/sys</filename> and
|
||||
<filename>/proc</filename> filesystems. It also disables the User Namespaces
|
||||
feature of the kernel, which stops Nix from being able to build anything
|
||||
(this particular setting can be overriden via
|
||||
<xref linkend="opt-security.allowUserNamespaces"/>). See the <literal
|
||||
xlink:href="https://github.com/nixos/nixpkgs/tree/master/nixos/modules/profiles/hardened.nix">
|
||||
profile source</literal> for further detail on which settings are altered.
|
||||
</para>
|
||||
</section>
|
18
nixos/doc/manual/configuration/profiles/headless.xml
Normal file
18
nixos/doc/manual/configuration/profiles/headless.xml
Normal file
@ -0,0 +1,18 @@
|
||||
|
||||
<section xmlns="http://docbook.org/ns/docbook"
|
||||
xmlns:xlink="http://www.w3.org/1999/xlink"
|
||||
xmlns:xi="http://www.w3.org/2001/XInclude"
|
||||
version="5.0"
|
||||
xml:id="sec-profile-headless">
|
||||
<title>Headless</title>
|
||||
<para>
|
||||
Common configuration for headless machines (e.g., Amazon EC2 instances).
|
||||
</para>
|
||||
<para>
|
||||
Disables <link linkend="opt-sound.enable">sound</link>,
|
||||
<link linkend="opt-boot.vesa">vesa</link>, serial consoles,
|
||||
<link linkend="opt-systemd.enableEmergencyMode">emergency mode</link>,
|
||||
<link linkend="opt-boot.loader.grub.splashImage">grub splash images</link> and
|
||||
configures the kernel to reboot automatically on panic.
|
||||
</para>
|
||||
</section>
|
@ -0,0 +1,35 @@
|
||||
|
||||
<section xmlns="http://docbook.org/ns/docbook"
|
||||
xmlns:xlink="http://www.w3.org/1999/xlink"
|
||||
xmlns:xi="http://www.w3.org/2001/XInclude"
|
||||
version="5.0"
|
||||
xml:id="sec-profile-installation-device">
|
||||
<title>Installation Device</title>
|
||||
<para>
|
||||
Provides a basic configuration for installation devices like CDs. This means
|
||||
enabling hardware scans, using the <link linkend="sec-profile-clone-config">
|
||||
Clone Config profile</link> to guarantee
|
||||
<filename>/etc/nixos/configuration.nix</filename> exists (for
|
||||
<command>nixos-rebuild</command> to work), a copy of the Nixpkgs channel
|
||||
snapshot used to create the install media.
|
||||
</para>
|
||||
<para>
|
||||
Additionally, documentation for <link linkend="opt-documentation.enable">
|
||||
Nixpkgs</link> and <link linkend="opt-documentation.nixos.enable">NixOS
|
||||
</link> are forcefully enabled (to override the
|
||||
<link linkend="sec-profile-minimal">Minimal profile</link> preference); the
|
||||
NixOS manual is shown automatically on TTY 8, sudo and udisks are disabled.
|
||||
Autologin is enabled as root.
|
||||
</para>
|
||||
<para>
|
||||
A message is shown to the user to start a display manager if needed,
|
||||
ssh with <xref linkend="opt-services.openssh.permitRootLogin"/> are enabled (but
|
||||
doesn't autostart). WPA Supplicant is also enabled without autostart.
|
||||
</para>
|
||||
<para>
|
||||
Finally, vim is installed, root is set to not have a password, the kernel is
|
||||
made more silent for remote public IP installs, and several settings are
|
||||
tweaked so that the installer has a better chance of succeeding under
|
||||
low-memory environments.
|
||||
</para>
|
||||
</section>
|
17
nixos/doc/manual/configuration/profiles/minimal.xml
Normal file
17
nixos/doc/manual/configuration/profiles/minimal.xml
Normal file
@ -0,0 +1,17 @@
|
||||
|
||||
<section xmlns="http://docbook.org/ns/docbook"
|
||||
xmlns:xlink="http://www.w3.org/1999/xlink"
|
||||
xmlns:xi="http://www.w3.org/2001/XInclude"
|
||||
version="5.0"
|
||||
xml:id="sec-profile-minimal">
|
||||
<title>Minimal</title>
|
||||
<para>
|
||||
This profile defines a small NixOS configuration. It does not contain any
|
||||
graphical stuff. It's a very short file that enables
|
||||
<link linkend="opt-environment.noXlibs">noXlibs</link>, sets
|
||||
<link linkend="opt-i18n.supportedLocales">i18n.supportedLocales</link>
|
||||
to only support the user-selected locale,
|
||||
<link linkend="opt-documentation.enable">disables packages' documentation
|
||||
</link>, and <link linkend="opt-sound.enable">disables sound</link>.
|
||||
</para>
|
||||
</section>
|
16
nixos/doc/manual/configuration/profiles/qemu-guest.xml
Normal file
16
nixos/doc/manual/configuration/profiles/qemu-guest.xml
Normal file
@ -0,0 +1,16 @@
|
||||
<section xmlns="http://docbook.org/ns/docbook"
|
||||
xmlns:xlink="http://www.w3.org/1999/xlink"
|
||||
xmlns:xi="http://www.w3.org/2001/XInclude"
|
||||
version="5.0"
|
||||
xml:id="sec-profile-qemu-guest">
|
||||
<title>QEMU Guest</title>
|
||||
<para>
|
||||
This profile contains common configuration for virtual machines running under
|
||||
QEMU (using virtio).
|
||||
</para>
|
||||
<para>
|
||||
It makes virtio modules available on the initrd, sets the system time from
|
||||
the hardware clock to work around a bug in qemu-kvm, and
|
||||
<link linkend="opt-security.rngd.enable">enables rngd</link>.
|
||||
</para>
|
||||
</section>
|
@ -35,11 +35,11 @@
|
||||
</para>
|
||||
<para>
|
||||
NixOS’s default <emphasis>display manager</emphasis> (the program that
|
||||
provides a graphical login prompt and manages the X server) is SLiM. You can
|
||||
provides a graphical login prompt and manages the X server) is LightDM. You can
|
||||
select an alternative one by picking one of the following lines:
|
||||
<programlisting>
|
||||
<xref linkend="opt-services.xserver.displayManager.sddm.enable"/> = true;
|
||||
<xref linkend="opt-services.xserver.displayManager.lightdm.enable"/> = true;
|
||||
<xref linkend="opt-services.xserver.displayManager.slim.enable"/> = true;
|
||||
</programlisting>
|
||||
</para>
|
||||
<para>
|
||||
|
@ -1,37 +0,0 @@
|
||||
<section xmlns="http://docbook.org/ns/docbook"
|
||||
xmlns:xlink="http://www.w3.org/1999/xlink"
|
||||
xmlns:xi="http://www.w3.org/2001/XInclude"
|
||||
version="5.0"
|
||||
xml:id="sec-debugging-nixos-tests">
|
||||
<title>Debugging NixOS tests</title>
|
||||
|
||||
<para>
|
||||
Tests may fail and infrastructure offers access to inspect machine state.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
To prevent test from stopping and cleaning up, insert a sleep command:
|
||||
</para>
|
||||
|
||||
<programlisting>
|
||||
$machine->succeed("sleep 84000");
|
||||
</programlisting>
|
||||
|
||||
<para>
|
||||
As soon as machine starts run as root:
|
||||
</para>
|
||||
|
||||
<programlisting>
|
||||
nix-shell -p socat --run "socat STDIO,raw,echo=0,escape=0x11 UNIX:/tmp/nix-build-vm-test-run-*.drv-0/vm-state-machine/backdoor"
|
||||
</programlisting>
|
||||
|
||||
<para>
|
||||
You may need to find the correct path, replacing <literal>/tmp</literal>,
|
||||
<literal>*</literal> or <literal>machine</literal>.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
Press "enter" to open up console and login as "root". After you're done,
|
||||
press "ctrl-q" to exit the console.
|
||||
</para>
|
||||
</section>
|
@ -16,5 +16,4 @@ xlink:href="https://github.com/NixOS/nixpkgs/tree/master/nixos/tests">nixos/test
|
||||
<xi:include href="writing-nixos-tests.xml" />
|
||||
<xi:include href="running-nixos-tests.xml" />
|
||||
<xi:include href="running-nixos-tests-interactively.xml" />
|
||||
<xi:include href="debugging-nixos-tests.xml" />
|
||||
</chapter>
|
||||
|
@ -106,7 +106,7 @@
|
||||
</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
<varlistentry>
|
||||
<varlistentry xml:id='types.ints.ux'>
|
||||
<term>
|
||||
<varname>types.ints.{u8, u16, u32}</varname>
|
||||
</term>
|
||||
@ -131,6 +131,17 @@
|
||||
</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
<varlistentry>
|
||||
<term>
|
||||
<varname>types.port</varname>
|
||||
</term>
|
||||
<listitem>
|
||||
<para>
|
||||
A port number. This type is an alias to
|
||||
<link linkend='types.ints.ux'><varname>types.ints.u16</varname></link>.
|
||||
</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
</variablelist>
|
||||
|
||||
<para>
|
||||
|
@ -19,7 +19,7 @@ starting VDE switch for network 1
|
||||
> startAll
|
||||
> testScript
|
||||
> $machine->succeed("touch /tmp/foo")
|
||||
> print($machine->succeed("pwd"), "\n") # Show stdout of command
|
||||
> print($machine->succeed("pwd")) # Show stdout of command
|
||||
</screen>
|
||||
The function <command>testScript</command> executes the entire test script
|
||||
and drops you back into the test driver command line upon its completion.
|
||||
|
@ -108,7 +108,7 @@ xlink:href="https://github.com/NixOS/nixpkgs/blob/master/nixos/modules/virtualis
|
||||
<programlisting>
|
||||
$machine->start;
|
||||
$machine->waitForUnit("default.target");
|
||||
die unless $machine->succeed("uname") =~ /Linux/;
|
||||
$machine->succeed("uname") =~ /Linux/ or die;
|
||||
</programlisting>
|
||||
The first line is actually unnecessary; machines are implicitly started when
|
||||
you first execute an action on them (such as <literal>waitForUnit</literal>
|
||||
|
@ -23,7 +23,7 @@ $ diskutil list
|
||||
[..]
|
||||
$ diskutil unmountDisk diskN
|
||||
Unmount of all volumes on diskN was successful
|
||||
$ sudo dd bs=1m if=nix.iso of=/dev/rdiskN
|
||||
$ sudo dd bs=1000000 if=nix.iso of=/dev/rdiskN
|
||||
</programlisting>
|
||||
Using the 'raw' <command>rdiskN</command> device instead of
|
||||
<command>diskN</command> completes in minutes instead of hours. After
|
||||
|
@ -77,18 +77,22 @@
|
||||
Shared folders can be given a name and a path in the host system in the
|
||||
VirtualBox settings (Machine / Settings / Shared Folders, then click on the
|
||||
"Add" icon). Add the following to the
|
||||
<literal>/etc/nixos/configuration.nix</literal> to auto-mount them:
|
||||
<literal>/etc/nixos/configuration.nix</literal> to auto-mount them. If you
|
||||
do not add <literal>"nofail"</literal>, the system will no boot properly.
|
||||
The same goes for disabling <literal>rngd</literal> which is normally used
|
||||
to get randomness but this does not work in virtual machines.
|
||||
</para>
|
||||
|
||||
<programlisting>
|
||||
{ config, pkgs, ...} :
|
||||
{
|
||||
security.rngd.enable = false; // otherwise vm will not boot
|
||||
...
|
||||
|
||||
fileSystems."/virtualboxshare" = {
|
||||
fsType = "vboxsf";
|
||||
device = "nameofthesharedfolder";
|
||||
options = [ "rw" ];
|
||||
options = [ "rw" "nofail" ];
|
||||
};
|
||||
}
|
||||
</programlisting>
|
||||
|
@ -13,18 +13,18 @@
|
||||
</refnamediv>
|
||||
<refsynopsisdiv>
|
||||
<cmdsynopsis>
|
||||
<command>nixos-generate-config</command>
|
||||
<command>nixos-generate-config</command>
|
||||
<arg>
|
||||
<option>--force</option>
|
||||
</arg>
|
||||
|
||||
|
||||
<arg>
|
||||
<arg choice='plain'>
|
||||
<option>--root</option>
|
||||
</arg>
|
||||
<replaceable>root</replaceable>
|
||||
</arg>
|
||||
|
||||
|
||||
<arg>
|
||||
<arg choice='plain'>
|
||||
<option>--dir</option>
|
||||
@ -167,7 +167,7 @@ $ nixos-generate-config --root /mnt
|
||||
|
||||
{
|
||||
imports =
|
||||
[ <nixos/modules/installer/scan/not-detected.nix>
|
||||
[ <nixos/modules/installer/scan/not-detected.nix>
|
||||
];
|
||||
|
||||
boot.initrd.availableKernelModules = [ "ehci_hcd" "ahci" ];
|
||||
|
@ -13,35 +13,35 @@
|
||||
</refnamediv>
|
||||
<refsynopsisdiv>
|
||||
<cmdsynopsis>
|
||||
<command>nixos-rebuild</command><group choice='req'>
|
||||
<command>nixos-rebuild</command><group choice='req'>
|
||||
<arg choice='plain'>
|
||||
<option>switch</option>
|
||||
</arg>
|
||||
|
||||
|
||||
<arg choice='plain'>
|
||||
<option>boot</option>
|
||||
</arg>
|
||||
|
||||
|
||||
<arg choice='plain'>
|
||||
<option>test</option>
|
||||
</arg>
|
||||
|
||||
|
||||
<arg choice='plain'>
|
||||
<option>build</option>
|
||||
</arg>
|
||||
|
||||
|
||||
<arg choice='plain'>
|
||||
<option>dry-build</option>
|
||||
</arg>
|
||||
|
||||
|
||||
<arg choice='plain'>
|
||||
<option>dry-activate</option>
|
||||
</arg>
|
||||
|
||||
|
||||
<arg choice='plain'>
|
||||
<option>build-vm</option>
|
||||
</arg>
|
||||
|
||||
|
||||
<arg choice='plain'>
|
||||
<option>build-vm-with-bootloader</option>
|
||||
</arg>
|
||||
@ -50,29 +50,33 @@
|
||||
<arg>
|
||||
<option>--upgrade</option>
|
||||
</arg>
|
||||
|
||||
|
||||
<arg>
|
||||
<option>--install-bootloader</option>
|
||||
</arg>
|
||||
|
||||
|
||||
<arg>
|
||||
<option>--no-build-nix</option>
|
||||
</arg>
|
||||
|
||||
|
||||
<arg>
|
||||
<option>--fast</option>
|
||||
</arg>
|
||||
|
||||
|
||||
<arg>
|
||||
<option>--rollback</option>
|
||||
</arg>
|
||||
<arg>
|
||||
<option>--builders</option>
|
||||
<replaceable>builder-spec</replaceable>
|
||||
</arg>
|
||||
<sbr />
|
||||
<arg>
|
||||
<group choice='req'>
|
||||
<group choice='req'>
|
||||
<arg choice='plain'>
|
||||
<option>--profile-name</option>
|
||||
</arg>
|
||||
|
||||
|
||||
<arg choice='plain'>
|
||||
<option>-p</option>
|
||||
</arg>
|
||||
@ -315,6 +319,27 @@ $ ./result/bin/run-*-vm
|
||||
</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
<varlistentry>
|
||||
<term>
|
||||
<option>--builders</option>
|
||||
<replaceable>builder-spec</replaceable>
|
||||
</term>
|
||||
<listitem>
|
||||
<para>
|
||||
Allow ad-hoc remote builders for building the new system.
|
||||
This requires the user executing <command>nixos-rebuild</command> (usually
|
||||
root) to be configured as a trusted user in the Nix daemon. This can be
|
||||
achieved by using the <literal>nix.trustedUsers</literal> NixOS option.
|
||||
Examples values for that option are described in the
|
||||
<literal>Remote builds chapter</literal> in the Nix manual,
|
||||
(i.e. <command>--builders "ssh://bigbrother x86_64-linux"</command>).
|
||||
By specifying an empty string existing builders specified in
|
||||
<filename>/etc/nix/machines</filename> can be ignored:
|
||||
<command>--builders ""</command> for example when they are not
|
||||
reachable due to network connectivity.
|
||||
</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
<varlistentry>
|
||||
<term>
|
||||
<option>--profile-name</option>
|
||||
|
@ -17,7 +17,7 @@
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
<link xlink:href="http://thread.gmane.org/gmane.linux.distributions.nixos/15165">
|
||||
<link xlink:href="https://www.mail-archive.com/nix-dev@lists.science.uu.nl/msg13957.html">
|
||||
Nix has been updated to 1.8.</link>
|
||||
</para>
|
||||
</listitem>
|
||||
|
@ -21,7 +21,7 @@
|
||||
<listitem>
|
||||
<para>
|
||||
The default Python 3 interpreter is now CPython 3.7 instead of CPython 3.6.
|
||||
<para />
|
||||
</para>
|
||||
</listitem>
|
||||
</itemizedlist>
|
||||
</section>
|
||||
@ -39,7 +39,18 @@
|
||||
|
||||
<itemizedlist>
|
||||
<listitem>
|
||||
<para />
|
||||
<para>
|
||||
<literal>./programs/nm-applet.nix</literal>
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
There is a new <varname>security.googleOsLogin</varname> module for using
|
||||
<link xlink:href="https://cloud.google.com/compute/docs/instances/managing-instance-access">OS Login</link>
|
||||
to manage SSH access to Google Compute Engine instances, which supersedes
|
||||
the imperative and broken <literal>google-accounts-daemon</literal> used
|
||||
in <literal>nixos/modules/virtualisation/google-compute-config.nix</literal>.
|
||||
</para>
|
||||
</listitem>
|
||||
</itemizedlist>
|
||||
</section>
|
||||
@ -99,18 +110,28 @@
|
||||
start org.nixos.nix-daemon</command>.
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
The Syncthing state and configuration data has been moved from
|
||||
<varname>services.syncthing.dataDir</varname> to the newly defined
|
||||
<varname>services.syncthing.configDir</varname>, which default to
|
||||
<literal>/var/lib/syncthing/.config/syncthing</literal>.
|
||||
This change makes possible to share synced directories using ACLs
|
||||
without Syncthing resetting the permission on every start.
|
||||
</para>
|
||||
</listitem>
|
||||
</itemizedlist>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
The Syncthing state and configuration data has been moved from
|
||||
<varname>services.syncthing.dataDir</varname> to the newly defined
|
||||
<varname>services.syncthing.configDir</varname>, which default to
|
||||
<literal>/var/lib/syncthing/.config/syncthing</literal>.
|
||||
This change makes possible to share synced directories using ACLs
|
||||
without Syncthing resetting the permission on every start.
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
The <literal>ntp</literal> module now has sane default restrictions.
|
||||
If you're relying on the previous defaults, which permitted all queries
|
||||
and commands from all firewall-permitted sources, you can set
|
||||
<varname>services.ntp.restrictDefault</varname> and
|
||||
<varname>services.ntp.restrictSource</varname> to
|
||||
<literal>[]</literal>.
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
Package <varname>rabbitmq_server</varname> is renamed to
|
||||
@ -149,6 +170,14 @@
|
||||
make sure to update your configuration if you want to keep <literal>proglodyte-wasm</literal>
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
When the <literal>nixpkgs.pkgs</literal> option is set, NixOS will no
|
||||
longer ignore the <literal>nixpkgs.overlays</literal> option. The old
|
||||
behavior can be recovered by setting <literal>nixpkgs.overlays =
|
||||
lib.mkForce [];</literal>.
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
OpenSMTPD has been upgraded to version 6.4.0p1. This release makes
|
||||
@ -164,6 +193,14 @@
|
||||
has been renamed to <varname>postgresql_9_6</varname>.
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
Package <literal>consul-ui</literal> and passthrough <literal>consul.ui</literal> have been removed.
|
||||
The package <literal>consul</literal> now uses upstream releases that vendor the UI into the binary.
|
||||
See <link xlink:href="https://github.com/NixOS/nixpkgs/pull/48714#issuecomment-433454834">#48714</link>
|
||||
for details.
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
Slurm introduces the new option
|
||||
@ -184,6 +221,135 @@
|
||||
options can occour more than once in the configuration.
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
The <literal>solr</literal> package has been upgraded from 4.10.3 to 7.5.0 and has undergone
|
||||
some major changes. The <literal>services.solr</literal> module has been updated to reflect
|
||||
these changes. Please review http://lucene.apache.org/solr/ carefully before upgrading.
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
Package <literal>ckb</literal> is renamed to <literal>ckb-next</literal>,
|
||||
and options <literal>hardware.ckb.*</literal> are renamed to
|
||||
<literal>hardware.ckb-next.*</literal>.
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
The option <literal>services.xserver.displayManager.job.logToFile</literal> which was
|
||||
previously set to <literal>true</literal> when using the display managers
|
||||
<literal>lightdm</literal>, <literal>sddm</literal> or <literal>xpra</literal> has been
|
||||
reset to the default value (<literal>false</literal>).
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
Network interface indiscriminate NixOS firewall options
|
||||
(<literal>networking.firewall.allow*</literal>) are now preserved when also
|
||||
setting interface specific rules such as <literal>networking.firewall.interfaces.en0.allow*</literal>.
|
||||
These rules continue to use the pseudo device "default"
|
||||
(<literal>networking.firewall.interfaces.default.*</literal>), and assigning
|
||||
to this pseudo device will override the (<literal>networking.firewall.allow*</literal>)
|
||||
options.
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
The <literal>nscd</literal> service now disables all caching of
|
||||
<literal>passwd</literal> and <literal>group</literal> databases by
|
||||
default. This was interferring with the correct functioning of the
|
||||
<literal>libnss_systemd.so</literal> module which is used by
|
||||
<literal>systemd</literal> to manage uids and usernames in the presence of
|
||||
<literal>DynamicUser=</literal> in systemd services. This was already the
|
||||
default behaviour in presence of <literal>services.sssd.enable =
|
||||
true</literal> because nscd caching would interfere with
|
||||
<literal>sssd</literal> in unpredictable ways as well. Because we're
|
||||
using nscd not for caching, but for convincing glibc to find NSS modules
|
||||
in the nix store instead of an absolute path, we have decided to disable
|
||||
caching globally now, as it's usually not the behaviour the user wants and
|
||||
can lead to surprising behaviour. Furthermore, negative caching of host
|
||||
lookups is also disabled now by default. This should fix the issue of dns
|
||||
lookups failing in the presence of an unreliable network.
|
||||
</para>
|
||||
<para>
|
||||
If the old behaviour is desired, this can be restored by setting
|
||||
the <literal>services.nscd.config</literal> option
|
||||
with the desired caching parameters.
|
||||
<programlisting>
|
||||
services.nscd.config =
|
||||
''
|
||||
server-user nscd
|
||||
threads 1
|
||||
paranoia no
|
||||
debug-level 0
|
||||
|
||||
enable-cache passwd yes
|
||||
positive-time-to-live passwd 600
|
||||
negative-time-to-live passwd 20
|
||||
suggested-size passwd 211
|
||||
check-files passwd yes
|
||||
persistent passwd no
|
||||
shared passwd yes
|
||||
|
||||
enable-cache group yes
|
||||
positive-time-to-live group 3600
|
||||
negative-time-to-live group 60
|
||||
suggested-size group 211
|
||||
check-files group yes
|
||||
persistent group no
|
||||
shared group yes
|
||||
|
||||
enable-cache hosts yes
|
||||
positive-time-to-live hosts 600
|
||||
negative-time-to-live hosts 5
|
||||
suggested-size hosts 211
|
||||
check-files hosts yes
|
||||
persistent hosts no
|
||||
shared hosts yes
|
||||
'';
|
||||
</programlisting>
|
||||
See <link xlink:href="https://github.com/NixOS/nixpkgs/pull/50316">#50316</link>
|
||||
for details.
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
GitLab Shell previously used the nix store paths for the
|
||||
<literal>gitlab-shell</literal> command in its
|
||||
<literal>authorized_keys</literal> file, which might stop working after
|
||||
garbage collection. To circumvent that, we regenerated that file on each
|
||||
startup. As <literal>gitlab-shell</literal> has now been changed to use
|
||||
<literal>/var/run/current-system/sw/bin/gitlab-shell</literal>, this is
|
||||
not necessary anymore, but there might be leftover lines with a nix store
|
||||
path. Regenerate the <literal>authorized_keys</literal> file via
|
||||
<command>sudo -u git -H gitlab-rake gitlab:shell:setup</command> in that
|
||||
case.
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
The <literal>pam_unix</literal> account module is now loaded with its
|
||||
control field set to <literal>required</literal> instead of
|
||||
<literal>sufficient</literal>, so that later pam account modules that
|
||||
might do more extensive checks are being executed.
|
||||
Previously, the whole account module verification was exited prematurely
|
||||
in case a nss module provided the account name to
|
||||
<literal>pam_unix</literal>.
|
||||
The LDAP and SSSD NixOS modules already add their NSS modules when
|
||||
enabled. In case your setup breaks due to some later pam account module
|
||||
previosuly shadowed, or failing NSS lookups, please file a bug. You can
|
||||
get back the old behaviour by manually setting
|
||||
<literal><![CDATA[security.pam.services.<name?>.text]]></literal>.
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
<literal>fish</literal> has been upgraded to 3.0.
|
||||
It comes with a number of improvements and backwards incompatible changes.
|
||||
See the <literal>fish</literal> <link xlink:href="https://github.com/fish-shell/fish-shell/releases/tag/3.0.0">release notes</link> for more information.
|
||||
</para>
|
||||
</listitem>
|
||||
</itemizedlist>
|
||||
</section>
|
||||
|
||||
@ -202,6 +368,12 @@
|
||||
Matomo version.
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
<literal>composableDerivation</literal> along with supporting library functions
|
||||
has been removed.
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
The deprecated <literal>truecrypt</literal> package has been removed
|
||||
@ -211,6 +383,31 @@
|
||||
supports loading TrueCrypt volumes.
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
The Kubernetes DNS addons, kube-dns, has been replaced with CoreDNS.
|
||||
This change is made in accordance with Kubernetes making CoreDNS the official default
|
||||
starting from
|
||||
<link xlink:href="https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG-1.11.md#sig-cluster-lifecycle">Kubernetes v1.11</link>.
|
||||
Please beware that upgrading DNS-addon on existing clusters might induce
|
||||
minor downtime while the DNS-addon terminates and re-initializes.
|
||||
Also note that the DNS-service now runs with 2 pod replicas by default.
|
||||
The desired number of replicas can be configured using:
|
||||
<option>services.kubernetes.addons.dns.replicas</option>.
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
The quassel-webserver package and module was removed from nixpkgs due to the lack
|
||||
of maintainers.
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
The owncloud server packages and httpd subservice module were removed
|
||||
from nixpkgs due to the lack of maintainers.
|
||||
</para>
|
||||
</listitem>
|
||||
</itemizedlist>
|
||||
</section>
|
||||
</section>
|
||||
|
@ -1,6 +1,13 @@
|
||||
{ system, minimal ? false, config ? {} }:
|
||||
|
||||
let pkgs = import ../.. { inherit system config; }; in
|
||||
{ system
|
||||
, # Use a minimal kernel?
|
||||
minimal ? false
|
||||
, # Ignored
|
||||
config ? null
|
||||
# Nixpkgs, for qemu, lib and more
|
||||
, pkgs
|
||||
, # NixOS configuration to add to the VMs
|
||||
extraConfigurations ? []
|
||||
}:
|
||||
|
||||
with pkgs.lib;
|
||||
with import ../lib/qemu-flags.nix { inherit pkgs; };
|
||||
@ -30,7 +37,8 @@ rec {
|
||||
../modules/testing/test-instrumentation.nix # !!! should only get added for automated test runs
|
||||
{ key = "no-manual"; documentation.nixos.enable = false; }
|
||||
{ key = "qemu"; system.build.qemu = qemu; }
|
||||
] ++ optional minimal ../modules/testing/minimal-kernel.nix;
|
||||
] ++ optional minimal ../modules/testing/minimal-kernel.nix
|
||||
++ extraConfigurations;
|
||||
extraArgs = { inherit nodes; };
|
||||
};
|
||||
|
||||
@ -75,6 +83,8 @@ rec {
|
||||
(m': let config = (getAttr m' nodes).config; in
|
||||
optionalString (config.networking.primaryIPAddress != "")
|
||||
("${config.networking.primaryIPAddress} " +
|
||||
optionalString (config.networking.domain != null)
|
||||
"${config.networking.hostName}.${config.networking.domain} " +
|
||||
"${config.networking.hostName}\n"));
|
||||
|
||||
virtualisation.qemu.options =
|
||||
|
@ -1,3 +1,7 @@
|
||||
/* Build a channel tarball. These contain, in addition to the nixpkgs
|
||||
* expressions themselves, files that indicate the version of nixpkgs
|
||||
* that they represent.
|
||||
*/
|
||||
{ pkgs, nixpkgs, version, versionSuffix }:
|
||||
|
||||
pkgs.releaseTools.makeSourceTarball {
|
||||
|
@ -84,7 +84,7 @@ let format' = format; in let
|
||||
# FIXME: merge with channel.nix / make-channel.nix.
|
||||
channelSources = pkgs.runCommand "nixos-${config.system.nixos.version}" {} ''
|
||||
mkdir -p $out
|
||||
cp -prd ${nixpkgs} $out/nixos
|
||||
cp -prd ${nixpkgs.outPath} $out/nixos
|
||||
chmod -R u+w $out/nixos
|
||||
if [ ! -e $out/nixos/nixpkgs ]; then
|
||||
ln -s . $out/nixos/nixpkgs
|
||||
|
@ -9,6 +9,7 @@
|
||||
, e2fsprogs
|
||||
, libfaketime
|
||||
, perl
|
||||
, lkl
|
||||
}:
|
||||
|
||||
let
|
||||
@ -18,16 +19,13 @@ in
|
||||
pkgs.stdenv.mkDerivation {
|
||||
name = "ext4-fs.img";
|
||||
|
||||
nativeBuildInputs = [e2fsprogs.bin libfaketime perl];
|
||||
nativeBuildInputs = [e2fsprogs.bin libfaketime perl lkl];
|
||||
|
||||
buildCommand =
|
||||
''
|
||||
# Add the closures of the top-level store objects.
|
||||
storePaths=$(cat ${sdClosureInfo}/store-paths)
|
||||
|
||||
# Also include a manifest of the closures in a format suitable for nix-store --load-db.
|
||||
cp ${sdClosureInfo}/registration nix-path-registration
|
||||
|
||||
# Make a crude approximation of the size of the target image.
|
||||
# If the script starts failing, increase the fudge factors here.
|
||||
numInodes=$(find $storePaths | wc -l)
|
||||
@ -38,55 +36,16 @@ pkgs.stdenv.mkDerivation {
|
||||
truncate -s $bytes $out
|
||||
faketime -f "1970-01-01 00:00:01" mkfs.ext4 -L ${volumeLabel} -U ${uuid} $out
|
||||
|
||||
# Populate the image contents by piping a bunch of commands to the `debugfs` tool from e2fsprogs.
|
||||
# For example, to copy /nix/store/abcd...efg-coreutils-8.23/bin/sleep:
|
||||
# cd /nix/store/abcd...efg-coreutils-8.23/bin
|
||||
# write /nix/store/abcd...efg-coreutils-8.23/bin/sleep sleep
|
||||
# sif sleep mode 040555
|
||||
# sif sleep gid 30000
|
||||
# In particular, debugfs doesn't handle absolute target paths; you have to 'cd' in the virtual
|
||||
# filesystem first. Likewise the intermediate directories must already exist (using `find`
|
||||
# handles that for us). And when setting the file's permissions, the inode type flags (__S_IFDIR,
|
||||
# __S_IFREG) need to be set as well.
|
||||
(
|
||||
echo write nix-path-registration nix-path-registration
|
||||
echo mkdir nix
|
||||
echo cd /nix
|
||||
echo mkdir store
|
||||
# Also include a manifest of the closures in a format suitable for nix-store --load-db.
|
||||
cp ${sdClosureInfo}/registration nix-path-registration
|
||||
cptofs -t ext4 -i $out nix-path-registration /
|
||||
|
||||
# XXX: This explodes in exciting ways if anything in /nix/store has a space in it.
|
||||
find $storePaths -printf '%y %f %h %m\n'| while read -r type file dir perms; do
|
||||
# echo "TYPE=$type DIR=$dir FILE=$file PERMS=$perms" >&2
|
||||
# Create nix/store before copying paths
|
||||
faketime -f "1970-01-01 00:00:01" mkdir -p nix/store
|
||||
cptofs -t ext4 -i $out nix /
|
||||
|
||||
echo "cd $dir"
|
||||
case $type in
|
||||
d)
|
||||
echo "mkdir $file"
|
||||
echo sif $file mode $((040000 | 0$perms)) # magic constant is __S_IFDIR
|
||||
;;
|
||||
f)
|
||||
echo "write $dir/$file $file"
|
||||
echo sif $file mode $((0100000 | 0$perms)) # magic constant is __S_IFREG
|
||||
;;
|
||||
l)
|
||||
echo "symlink $file $(readlink "$dir/$file")"
|
||||
;;
|
||||
*)
|
||||
echo "Unknown entry: $type $dir $file $perms" >&2
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
|
||||
echo sif $file gid 30000 # chgrp to nixbld
|
||||
done
|
||||
) | faketime -f "1970-01-01 00:00:01" debugfs -w $out -f /dev/stdin > errorlog 2>&1
|
||||
|
||||
# The debugfs tool doesn't terminate on error nor exit with a non-zero status. Check manually.
|
||||
if egrep -q 'Could not allocate|File not found' errorlog; then
|
||||
cat errorlog
|
||||
echo "--- Failed to create EXT4 image of $bytes bytes (numInodes=$numInodes, numDataBlocks=$numDataBlocks) ---"
|
||||
return 1
|
||||
fi
|
||||
echo "copying store paths to image..."
|
||||
cptofs -t ext4 -i $out $storePaths /nix/store/
|
||||
|
||||
# I have ended up with corrupted images sometimes, I suspect that happens when the build machine's disk gets full during the build.
|
||||
if ! fsck.ext4 -n -f $out; then
|
||||
@ -94,5 +53,24 @@ pkgs.stdenv.mkDerivation {
|
||||
cat errorlog
|
||||
return 1
|
||||
fi
|
||||
|
||||
(
|
||||
# Resizes **snugly** to its actual limits (or closer to)
|
||||
free=$(dumpe2fs $out | grep '^Free blocks:')
|
||||
blocksize=$(dumpe2fs $out | grep '^Block size:')
|
||||
blocks=$(dumpe2fs $out | grep '^Block count:')
|
||||
blocks=$((''${blocks##*:})) # format the number.
|
||||
blocksize=$((''${blocksize##*:})) # format the number.
|
||||
# System can't boot with 0 blocks free.
|
||||
# Add 16MiB of free space
|
||||
fudge=$(( 16 * 1024 * 1024 / blocksize ))
|
||||
size=$(( blocks - ''${free##*:} + fudge ))
|
||||
|
||||
echo "Resizing from $blocks blocks to $size blocks. (~ $((size*blocksize/1024/1024))MiB)"
|
||||
EXT2FS_NO_MTAB_OK=yes resize2fs $out -f $size
|
||||
)
|
||||
|
||||
# And a final fsck, because of the previous truncating.
|
||||
fsck.ext4 -n -f $out
|
||||
'';
|
||||
}
|
||||
|
@ -47,7 +47,8 @@ if test -n "$bootable"; then
|
||||
|
||||
isoBootFlags="-eltorito-boot ${bootImage}
|
||||
-eltorito-catalog .boot.cat
|
||||
-no-emul-boot -boot-load-size 4 -boot-info-table"
|
||||
-no-emul-boot -boot-load-size 4 -boot-info-table
|
||||
--sort-weight 1 /isolinux" # Make sure isolinux is near the beginning of the ISO
|
||||
fi
|
||||
|
||||
if test -n "$usbBootable"; then
|
||||
@ -112,7 +113,7 @@ xorriso="xorriso
|
||||
-r
|
||||
-path-list pathlist
|
||||
--sort-weight 0 /
|
||||
--sort-weight 1 /isolinux" # Make sure isolinux is near the beginning of the ISO
|
||||
"
|
||||
|
||||
$xorriso -output $out/iso/$isoName
|
||||
|
||||
|
@ -3,6 +3,9 @@
|
||||
, # The root directory of the squashfs filesystem is filled with the
|
||||
# closures of the Nix store paths listed here.
|
||||
storeContents ? []
|
||||
, # Compression parameters.
|
||||
# For zstd compression you can use "zstd -Xcompression-level 6".
|
||||
comp ? "xz -Xdict-size 100%"
|
||||
}:
|
||||
|
||||
stdenv.mkDerivation {
|
||||
@ -20,6 +23,6 @@ stdenv.mkDerivation {
|
||||
|
||||
# Generate the squashfs image.
|
||||
mksquashfs nix-path-registration $(cat $closureInfo/store-paths) $out \
|
||||
-keep-as-directory -all-root -b 1048576 -comp xz -Xdict-size 100%
|
||||
-keep-as-directory -all-root -b 1048576 -comp ${comp}
|
||||
'';
|
||||
}
|
||||
|
@ -1,4 +1,4 @@
|
||||
{ stdenv, perl, pixz, pathsFromGraph
|
||||
{ stdenv, closureInfo, pixz
|
||||
|
||||
, # The file name of the resulting tarball
|
||||
fileName ? "nixos-system-${stdenv.hostPlatform.system}"
|
||||
@ -29,24 +29,28 @@
|
||||
, extraInputs ? [ pixz ]
|
||||
}:
|
||||
|
||||
let
|
||||
symlinks = map (x: x.symlink) storeContents;
|
||||
objects = map (x: x.object) storeContents;
|
||||
in
|
||||
|
||||
stdenv.mkDerivation {
|
||||
name = "tarball";
|
||||
builder = ./make-system-tarball.sh;
|
||||
buildInputs = [ perl ] ++ extraInputs;
|
||||
buildInputs = extraInputs;
|
||||
|
||||
inherit fileName pathsFromGraph extraArgs extraCommands compressCommand;
|
||||
inherit fileName extraArgs extraCommands compressCommand;
|
||||
|
||||
# !!! should use XML.
|
||||
sources = map (x: x.source) contents;
|
||||
targets = map (x: x.target) contents;
|
||||
|
||||
# !!! should use XML.
|
||||
objects = map (x: x.object) storeContents;
|
||||
symlinks = map (x: x.symlink) storeContents;
|
||||
inherit symlinks objects;
|
||||
|
||||
# For obtaining the closure of `storeContents'.
|
||||
exportReferencesGraph =
|
||||
map (x: [("closure-" + baseNameOf x.object) x.object]) storeContents;
|
||||
closureInfo = closureInfo {
|
||||
rootPaths = objects;
|
||||
};
|
||||
|
||||
extension = compressionExtension;
|
||||
}
|
||||
|
@ -3,7 +3,6 @@ source $stdenv/setup
|
||||
sources_=($sources)
|
||||
targets_=($targets)
|
||||
|
||||
echo $objects
|
||||
objects=($objects)
|
||||
symlinks=($symlinks)
|
||||
|
||||
@ -14,8 +13,6 @@ stripSlash() {
|
||||
if test "${res:0:1}" = /; then res=${res:1}; fi
|
||||
}
|
||||
|
||||
touch pathlist
|
||||
|
||||
# Add the individual files.
|
||||
for ((i = 0; i < ${#targets_[@]}; i++)); do
|
||||
stripSlash "${targets_[$i]}"
|
||||
@ -25,9 +22,9 @@ done
|
||||
|
||||
|
||||
# Add the closures of the top-level store objects.
|
||||
chmod +w .
|
||||
mkdir -p nix/store
|
||||
storePaths=$(perl $pathsFromGraph closure-*)
|
||||
for i in $storePaths; do
|
||||
for i in $(< $closureInfo/store-paths); do
|
||||
cp -a "$i" "${i:1}"
|
||||
done
|
||||
|
||||
@ -35,7 +32,7 @@ done
|
||||
# TODO tar ruxo
|
||||
# Also include a manifest of the closures in a format suitable for
|
||||
# nix-store --load-db.
|
||||
printRegistration=1 perl $pathsFromGraph closure-* > nix-path-registration
|
||||
cp $closureInfo/registration nix-path-registration
|
||||
|
||||
# Add symlinks to the top-level store objects.
|
||||
for ((n = 0; n < ${#objects[*]}; n++)); do
|
||||
|
@ -4,6 +4,7 @@ use strict;
|
||||
use Thread::Queue;
|
||||
use XML::Writer;
|
||||
use Encode qw(decode encode);
|
||||
use Time::HiRes qw(clock_gettime CLOCK_MONOTONIC);
|
||||
|
||||
sub new {
|
||||
my ($class) = @_;
|
||||
@ -46,10 +47,12 @@ sub nest {
|
||||
print STDERR maybePrefix("$msg\n", $attrs);
|
||||
$self->{log}->startTag("nest");
|
||||
$self->{log}->dataElement("head", $msg, %{$attrs});
|
||||
my $now = clock_gettime(CLOCK_MONOTONIC);
|
||||
$self->drainLogQueue();
|
||||
eval { &$coderef };
|
||||
my $res = $@;
|
||||
$self->drainLogQueue();
|
||||
$self->log(sprintf("(%.2f seconds)", clock_gettime(CLOCK_MONOTONIC) - $now));
|
||||
$self->{log}->endTag("nest");
|
||||
die $@ if $@;
|
||||
}
|
||||
|
@ -10,6 +10,7 @@ use Cwd;
|
||||
use File::Basename;
|
||||
use File::Path qw(make_path);
|
||||
use File::Slurp;
|
||||
use Time::HiRes qw(clock_gettime CLOCK_MONOTONIC);
|
||||
|
||||
|
||||
my $showGraphics = defined $ENV{'DISPLAY'};
|
||||
@ -155,10 +156,8 @@ sub start {
|
||||
$ENV{USE_TMPDIR} = 1;
|
||||
$ENV{QEMU_OPTS} =
|
||||
($self->{allowReboot} ? "" : "-no-reboot ") .
|
||||
"-monitor unix:./monitor " .
|
||||
"-chardev socket,id=shell,path=./shell -device virtio-serial -device virtconsole,chardev=shell " .
|
||||
# socket backdoor, see "Debugging NixOS tests" section in NixOS manual
|
||||
"-chardev socket,id=backdoor,path=./backdoor,server,nowait -device virtio-serial -device virtconsole,chardev=backdoor " .
|
||||
"-monitor unix:./monitor -chardev socket,id=shell,path=./shell " .
|
||||
"-device virtio-serial -device virtconsole,chardev=shell " .
|
||||
"-device virtio-rng-pci " .
|
||||
($showGraphics ? "-serial stdio" : "-nographic") . " " . ($ENV{QEMU_OPTS} || "");
|
||||
chdir $self->{stateDir} or die;
|
||||
@ -249,13 +248,15 @@ sub connect {
|
||||
|
||||
$self->start;
|
||||
|
||||
my $now = clock_gettime(CLOCK_MONOTONIC);
|
||||
local $SIG{ALRM} = sub { die "timed out waiting for the VM to connect\n"; };
|
||||
# 50 minutes -- increased as a test, see #49441
|
||||
alarm 3000;
|
||||
alarm 600;
|
||||
readline $self->{socket} or die "the VM quit before connecting\n";
|
||||
alarm 0;
|
||||
|
||||
$self->log("connected to guest root shell");
|
||||
# We're interested in tracking how close we are to `alarm`.
|
||||
$self->log(sprintf("(connecting took %.2f seconds)", clock_gettime(CLOCK_MONOTONIC) - $now));
|
||||
$self->{connected} = 1;
|
||||
|
||||
});
|
||||
|
@ -1,6 +1,13 @@
|
||||
{ system, minimal ? false, config ? {} }:
|
||||
{ system
|
||||
, pkgs ? import ../.. { inherit system config; }
|
||||
# Use a minimal kernel?
|
||||
, minimal ? false
|
||||
# Ignored
|
||||
, config ? null
|
||||
# Modules to add to each VM
|
||||
, extraConfigurations ? [] }:
|
||||
|
||||
with import ./build-vms.nix { inherit system minimal config; };
|
||||
with import ./build-vms.nix { inherit system pkgs minimal extraConfigurations; };
|
||||
with pkgs;
|
||||
|
||||
let
|
||||
@ -27,14 +34,14 @@ in rec {
|
||||
cp ${./test-driver/test-driver.pl} $out/bin/nixos-test-driver
|
||||
chmod u+x $out/bin/nixos-test-driver
|
||||
|
||||
libDir=$out/lib/perl5/site_perl
|
||||
libDir=$out/${perl.libPrefix}
|
||||
mkdir -p $libDir
|
||||
cp ${./test-driver/Machine.pm} $libDir/Machine.pm
|
||||
cp ${./test-driver/Logger.pm} $libDir/Logger.pm
|
||||
|
||||
wrapProgram $out/bin/nixos-test-driver \
|
||||
--prefix PATH : "${lib.makeBinPath [ qemu_test vde2 netpbm coreutils ]}" \
|
||||
--prefix PERL5LIB : "${with perlPackages; lib.makePerlPath [ TermReadLineGnu XMLWriter IOTty FileSlurp ]}:$out/lib/perl5/site_perl"
|
||||
--prefix PERL5LIB : "${with perlPackages; makePerlPath [ TermReadLineGnu XMLWriter IOTty FileSlurp ]}:$out/${perl.libPrefix}"
|
||||
'';
|
||||
};
|
||||
|
||||
@ -69,7 +76,7 @@ in rec {
|
||||
mkdir -p $out/coverage-data
|
||||
mv $i $out/coverage-data/$(dirname $(dirname $i))
|
||||
done
|
||||
''; # */
|
||||
'';
|
||||
};
|
||||
|
||||
|
||||
@ -109,7 +116,7 @@ in rec {
|
||||
|
||||
vms = map (m: m.config.system.build.vm) (lib.attrValues nodes);
|
||||
|
||||
ocrProg = tesseract_4.override { enableLanguages = [ "eng" ]; };
|
||||
ocrProg = tesseract4.override { enableLanguages = [ "eng" ]; };
|
||||
|
||||
imagemagick_tiff = imagemagick_light.override { inherit libtiff; };
|
||||
|
||||
@ -149,9 +156,23 @@ in rec {
|
||||
test = passMeta (runTests driver);
|
||||
report = passMeta (releaseTools.gcovReport { coverageRuns = [ test ]; });
|
||||
|
||||
in (if makeCoverageReport then report else test) // {
|
||||
inherit nodes driver test;
|
||||
};
|
||||
nodeNames = builtins.attrNames nodes;
|
||||
invalidNodeNames = lib.filter
|
||||
(node: builtins.match "^[A-z_][A-z0-9_]+$" node == null) nodeNames;
|
||||
|
||||
in
|
||||
if lib.length invalidNodeNames > 0 then
|
||||
throw ''
|
||||
Cannot create machines out of (${lib.concatStringsSep ", " invalidNodeNames})!
|
||||
All machines are referenced as perl variables in the testing framework which will break the
|
||||
script when special characters are used.
|
||||
|
||||
Please stick to alphanumeric chars and underscores as separation.
|
||||
''
|
||||
else
|
||||
(if makeCoverageReport then report else test) // {
|
||||
inherit nodes driver test;
|
||||
};
|
||||
|
||||
runInMachine =
|
||||
{ drv
|
||||
|
@ -7,9 +7,8 @@ rec {
|
||||
|| elem fs.mountPoint [ "/" "/nix" "/nix/store" "/var" "/var/log" "/var/lib" "/etc" ];
|
||||
|
||||
# Check whenever `b` depends on `a` as a fileSystem
|
||||
# FIXME: it's incorrect to simply use hasPrefix here: "/dev/a" is not a parent of "/dev/ab"
|
||||
fsBefore = a: b: ((any (x: elem x [ "bind" "move" ]) b.options) && (a.mountPoint == b.device))
|
||||
|| (hasPrefix a.mountPoint b.mountPoint);
|
||||
fsBefore = a: b: a.mountPoint == b.device
|
||||
|| hasPrefix "${a.mountPoint}${optionalString (!(hasSuffix "/" a.mountPoint)) "/"}" b.mountPoint;
|
||||
|
||||
# Escape a path according to the systemd rules, e.g. /dev/xyzzy
|
||||
# becomes dev-xyzzy. FIXME: slow.
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user