nixpkgs/pkgs/stdenv/generic/make-derivation.nix

Ignoring revisions in .git-blame-ignore-revs. Click here to bypass and see the normal blame view.

622 lines
24 KiB
Nix
Raw Normal View History

{ lib, config }:
stdenv:
check-env: don't execute check-meta.nix 15,000 times Generated from https://github.com/NixOS/nix/pull/2761: ``` ns calls ns/call - /home/grahamc/projects/github.com/NixOS/nixpkgs/pkgs/stdenv/generic/check-meta.nix:22:5 591200 15026 39.3451 + /home/grahamc/projects/github.com/NixOS/nixpkgs/pkgs/stdenv/generic/check-meta.nix:22:5 8744 308 28.3896 ``` more, generated by: ``` $ NIX_SHOW_STATS=1 NIX_COUNT_CALLS=1 nix-instantiate ./pkgs/top-level/release.nix -A unstable > before 2>&1 $ jq -r '.functions | map((.name + ":" + .file + ":" + (.line|tostring) + ":" + (.column|tostring) + " " + (.count|tostring))) | .[]' before | sort > before.list ``` applying this patch, then: ``` $ NIX_SHOW_STATS=1 NIX_COUNT_CALLS=1 nix-instantiate ./pkgs/top-level/release.nix -A unstable > after 2>&1 $ jq -r '.functions | map((.name + ":" + .file + ":" + (.line|tostring) + ":" + (.column|tostring) + " " + (.count|tostring))) | .[]' after | sort > after.list ``` and then diffing before.list and after.list to get: ``` calls - :/home/grahamc/projects/github.com/NixOS/nixpkgs/pkgs/stdenv/generic/check-meta.nix:4:1 7513 + :/home/grahamc/projects/github.com/NixOS/nixpkgs/pkgs/stdenv/generic/check-meta.nix:4:1 154 - mutuallyExclusive:/home/grahamc/projects/github.com/NixOS/nixpkgs/lib/lists.nix:658:23 7513 + mutuallyExclusive:/home/grahamc/projects/github.com/NixOS/nixpkgs/lib/lists.nix:658:23 154 - mutuallyExclusive:/home/grahamc/projects/github.com/NixOS/nixpkgs/lib/lists.nix:658:26 7513 + mutuallyExclusive:/home/grahamc/projects/github.com/NixOS/nixpkgs/lib/lists.nix:658:26 154 - onlyLicenses:/home/grahamc/projects/github.com/NixOS/nixpkgs/pkgs/stdenv/generic/check-meta.nix:21:18 15026 + onlyLicenses:/home/grahamc/projects/github.com/NixOS/nixpkgs/pkgs/stdenv/generic/check-meta.nix:21:18 308 ``` The following information is from `NIX_SHOW_STATS=1 GC_INITIAL_HEAP_SIZE=4g nix-env -f ./outpaths.nix -qaP --no-name --out-path --arg checkMeta true`: | stat | before | after | Δ | Δ% | |:---------------------------|---------------:|---------------:|:----------------|--------:| | **cpuTime** | 179.915 | 145.543 | 🡖 34.372 | -19.10% | | **envs-bytes** | 3,900,878,824 | 3,599,483,208 | 🡖 301,395,616 | -7.73% | | **envs-elements** | 214,426,071 | 185,881,709 | 🡖 28,544,362 | -13.31% | | **envs-number** | 136,591,891 | 132,026,846 | 🡖 4,565,045 | -3.34% | | **gc-heapSize** | 11,400,048,640 | 12,314,890,240 | 🡕 914,841,600 | 8.02% | | **gc-totalBytes** | 25,976,902,560 | 24,510,740,176 | 🡖 1,466,162,384 | -5.64% | | **list-bytes** | 1,665,290,080 | 1,665,290,080 | 0 | | | **list-concats** | 7,264,417 | 7,264,417 | 0 | | | **list-elements** | 208,161,260 | 208,161,260 | 0 | | | **nrAvoided** | 191,359,386 | 179,693,661 | 🡖 11,665,725 | -6.10% | | **nrFunctionCalls** | 119,665,062 | 116,348,547 | 🡖 3,316,515 | -2.77% | | **nrLookups** | 80,996,257 | 76,069,825 | 🡖 4,926,432 | -6.08% | | **nrOpUpdateValuesCopied** | 213,930,649 | 213,930,649 | 0 | | | **nrOpUpdates** | 12,025,937 | 12,025,937 | 0 | | | **nrPrimOpCalls** | 88,105,604 | 86,451,598 | 🡖 1,654,006 | -1.88% | | **nrThunks** | 196,842,044 | 175,126,701 | 🡖 21,715,343 | -11.03% | | **sets-bytes** | 7,678,425,776 | 7,285,767,928 | 🡖 392,657,848 | -5.11% | | **sets-elements** | 310,241,340 | 294,373,227 | 🡖 15,868,113 | -5.11% | | **sets-number** | 29,079,202 | 27,601,310 | 🡖 1,477,892 | -5.08% | | **sizes-Attr** | 24 | 24 | 0 | | | **sizes-Bindings** | 8 | 8 | 0 | | | **sizes-Env** | 16 | 16 | 0 | | | **sizes-Value** | 24 | 24 | 0 | | | **symbols-bytes** | 16,474,666 | 16,474,676 | 🡕 10 | 0.00% | | **symbols-number** | 376,426 | 376,427 | 🡕 1 | 0.00% | | **values-bytes** | 6,856,506,288 | 6,316,585,560 | 🡖 539,920,728 | -7.87% | | **values-number** | 285,687,762 | 263,191,065 | 🡖 22,496,697 | -7.87% | The following information is from `NIX_SHOW_STATS=1 GC_INITIAL_HEAP_SIZE=4g nix-instantiate ./nixos/release-combined.nix -A tested`: | stat | before | after | Δ | Δ% | |:---------------------------|---------------:|---------------:|:----------------|-------:| | **cpuTime** | 256.071 | 237.531 | 🡖 18.54 | -7.24% | | **envs-bytes** | 7,111,004,192 | 7,041,478,520 | 🡖 69,525,672 | -0.98% | | **envs-elements** | 346,236,940 | 339,588,487 | 🡖 6,648,453 | -1.92% | | **envs-number** | 271,319,292 | 270,298,164 | 🡖 1,021,128 | -0.38% | | **gc-heapSize** | 8,995,291,136 | 10,110,009,344 | 🡕 1,114,718,208 | 12.39% | | **gc-totalBytes** | 37,172,737,408 | 36,878,391,888 | 🡖 294,345,520 | -0.79% | | **list-bytes** | 1,886,162,656 | 1,886,163,472 | 🡕 816 | 0.00% | | **list-concats** | 6,898,114 | 6,898,114 | 0 | | | **list-elements** | 235,770,332 | 235,770,434 | 🡕 102 | 0.00% | | **nrAvoided** | 328,829,821 | 326,618,157 | 🡖 2,211,664 | -0.67% | | **nrFunctionCalls** | 240,850,845 | 239,998,495 | 🡖 852,350 | -0.35% | | **nrLookups** | 144,849,632 | 142,126,339 | 🡖 2,723,293 | -1.88% | | **nrOpUpdateValuesCopied** | 251,032,504 | 251,032,504 | 0 | | | **nrOpUpdates** | 17,903,110 | 17,903,110 | 0 | | | **nrPrimOpCalls** | 140,674,913 | 139,485,975 | 🡖 1,188,938 | -0.85% | | **nrThunks** | 294,643,131 | 288,678,022 | 🡖 5,965,109 | -2.02% | | **sets-bytes** | 9,464,322,192 | 9,456,172,048 | 🡖 8,150,144 | -0.09% | | **sets-elements** | 377,474,889 | 377,134,877 | 🡖 340,012 | -0.09% | | **sets-number** | 50,615,607 | 50,616,875 | 🡕 1,268 | 0.00% | | **sizes-Attr** | 24 | 24 | 0 | | | **sizes-Bindings** | 8 | 8 | 0 | | | **sizes-Env** | 16 | 16 | 0 | | | **sizes-Value** | 24 | 24 | 0 | | | **symbols-bytes** | 3,147,102 | 3,147,064 | 🡖 38 | -0.00% | | **symbols-number** | 82,819 | 82,819 | 0 | | | **values-bytes** | 11,147,448,768 | 10,996,111,512 | 🡖 151,337,256 | -1.36% | | **values-number** | 464,477,032 | 458,171,313 | 🡖 6,305,719 | -1.36% |
2019-04-11 16:35:35 +00:00
let
stdenv: Improve performance | stat | before | after | Δ | Δ% | |------------------------|-----------------|-----------------|-----------------|---------| | cpuTime | 513.67 | 507.77 | ↘ 5.90 | -1.15% | | envs-bytes | 20,682,847,968 | 20,628,961,616 | ↘ 53,886,352 | -0.26% | | envs-elements | 1,054,735,104 | 1,051,395,620 | ↘ 3,339,484 | -0.32% | | envs-number | 765,310,446 | 763,612,291 | ↘ 1,698,155 | -0.22% | | gc-heapSize | 53,439,602,688 | 51,711,545,344 | ↘ 1,728,057,344 | -3.23% | | gc-totalBytes | 113,062,066,672 | 112,139,998,240 | ↘ 922,068,432 | -0.82% | | list-bytes | 3,118,249,784 | 3,118,249,784 | 0 | | | list-concats | 52,834,140 | 52,834,140 | 0 | | | list-elements | 389,781,223 | 389,781,223 | 0 | | | nrAvoided | 968,097,988 | 991,889,795 | ↗ 23,791,807 | 2.46% | | nrFunctionCalls | 697,259,792 | 697,259,792 | 0 | | | nrLookups | 510,257,062 | 338,275,331 | ↘ 171,981,731 | -33.70% | | nrOpUpdateValuesCopied | 1,446,690,216 | 1,446,690,216 | 0 | | | nrOpUpdates | 68,504,034 | 68,504,034 | 0 | | | nrPrimOpCalls | 429,464,805 | 429,464,805 | 0 | | | nrThunks | 1,009,240,391 | 982,109,100 | ↘ 27,131,291 | -2.69% | | sets-bytes | 33,524,722,928 | 33,524,722,928 | 0 | | | sets-elements | 1,938,309,212 | 1,938,309,212 | 0 | | | sets-number | 156,985,971 | 156,985,971 | 0 | | | sizes-Attr | 16 | 16 | 0 | | | sizes-Bindings | 16 | 16 | 0 | | | sizes-Env | 16 | 16 | 0 | | | sizes-Value | 24 | 24 | 0 | | | symbols-bytes | 2,151,298 | 2,151,298 | 0 | | | symbols-number | 159,707 | 159,707 | 0 | | | values-bytes | 30,218,194,248 | 29,567,043,264 | ↘ 651,150,984 | -2.15% | | values-number | 1,259,091,427 | 1,231,960,136 | ↘ 27,131,291 | -2.15% | > Accessing the lexical scope directly should be more efficient, yes, because it changes from a binary search (many lookups) to just two memory accesses > correction: one short linked list + one array access > oh and you had to do the lexical scope lookup anyway for lib itself > so it really does save a binary search at basically no extra cost - roberth after seeing the stats > Oooh nice. I did not consider that more of the maybeThunk optimization becomes effective (nrAvoided). Those lookups also caused allocations! - roberth Left `lib.generators` and `lib.strings` alone because they're only used once.
2023-11-11 17:15:20 +00:00
# Lib attributes are inherited to the lexical scope for performance reasons.
inherit (lib)
any
assertMsg
attrNames
boolToString
concatLists
concatMap
concatMapStrings
concatStringsSep
elem
elemAt
extendDerivation
filter
findFirst
getDev
stdenv: Improve performance | stat | before | after | Δ | Δ% | |------------------------|-----------------|-----------------|-----------------|---------| | cpuTime | 513.67 | 507.77 | ↘ 5.90 | -1.15% | | envs-bytes | 20,682,847,968 | 20,628,961,616 | ↘ 53,886,352 | -0.26% | | envs-elements | 1,054,735,104 | 1,051,395,620 | ↘ 3,339,484 | -0.32% | | envs-number | 765,310,446 | 763,612,291 | ↘ 1,698,155 | -0.22% | | gc-heapSize | 53,439,602,688 | 51,711,545,344 | ↘ 1,728,057,344 | -3.23% | | gc-totalBytes | 113,062,066,672 | 112,139,998,240 | ↘ 922,068,432 | -0.82% | | list-bytes | 3,118,249,784 | 3,118,249,784 | 0 | | | list-concats | 52,834,140 | 52,834,140 | 0 | | | list-elements | 389,781,223 | 389,781,223 | 0 | | | nrAvoided | 968,097,988 | 991,889,795 | ↗ 23,791,807 | 2.46% | | nrFunctionCalls | 697,259,792 | 697,259,792 | 0 | | | nrLookups | 510,257,062 | 338,275,331 | ↘ 171,981,731 | -33.70% | | nrOpUpdateValuesCopied | 1,446,690,216 | 1,446,690,216 | 0 | | | nrOpUpdates | 68,504,034 | 68,504,034 | 0 | | | nrPrimOpCalls | 429,464,805 | 429,464,805 | 0 | | | nrThunks | 1,009,240,391 | 982,109,100 | ↘ 27,131,291 | -2.69% | | sets-bytes | 33,524,722,928 | 33,524,722,928 | 0 | | | sets-elements | 1,938,309,212 | 1,938,309,212 | 0 | | | sets-number | 156,985,971 | 156,985,971 | 0 | | | sizes-Attr | 16 | 16 | 0 | | | sizes-Bindings | 16 | 16 | 0 | | | sizes-Env | 16 | 16 | 0 | | | sizes-Value | 24 | 24 | 0 | | | symbols-bytes | 2,151,298 | 2,151,298 | 0 | | | symbols-number | 159,707 | 159,707 | 0 | | | values-bytes | 30,218,194,248 | 29,567,043,264 | ↘ 651,150,984 | -2.15% | | values-number | 1,259,091,427 | 1,231,960,136 | ↘ 27,131,291 | -2.15% | > Accessing the lexical scope directly should be more efficient, yes, because it changes from a binary search (many lookups) to just two memory accesses > correction: one short linked list + one array access > oh and you had to do the lexical scope lookup anyway for lib itself > so it really does save a binary search at basically no extra cost - roberth after seeing the stats > Oooh nice. I did not consider that more of the maybeThunk optimization becomes effective (nrAvoided). Those lookups also caused allocations! - roberth Left `lib.generators` and `lib.strings` alone because they're only used once.
2023-11-11 17:15:20 +00:00
head
imap1
isAttrs
isBool
isDerivation
isInt
isList
isString
mapAttrs
mapNullable
optional
optionalAttrs
optionalString
optionals
remove
splitString
subtractLists
unique
;
inherit (import ../../build-support/lib/cmake.nix { inherit lib stdenv; }) makeCMakeFlags;
inherit (import ../../build-support/lib/meson.nix { inherit lib stdenv; }) makeMesonFlags;
mkDerivation =
fnOrAttrs:
if builtins.isFunction fnOrAttrs
then makeDerivationExtensible fnOrAttrs
else makeDerivationExtensibleConst fnOrAttrs;
check-env: don't execute check-meta.nix 15,000 times Generated from https://github.com/NixOS/nix/pull/2761: ``` ns calls ns/call - /home/grahamc/projects/github.com/NixOS/nixpkgs/pkgs/stdenv/generic/check-meta.nix:22:5 591200 15026 39.3451 + /home/grahamc/projects/github.com/NixOS/nixpkgs/pkgs/stdenv/generic/check-meta.nix:22:5 8744 308 28.3896 ``` more, generated by: ``` $ NIX_SHOW_STATS=1 NIX_COUNT_CALLS=1 nix-instantiate ./pkgs/top-level/release.nix -A unstable > before 2>&1 $ jq -r '.functions | map((.name + ":" + .file + ":" + (.line|tostring) + ":" + (.column|tostring) + " " + (.count|tostring))) | .[]' before | sort > before.list ``` applying this patch, then: ``` $ NIX_SHOW_STATS=1 NIX_COUNT_CALLS=1 nix-instantiate ./pkgs/top-level/release.nix -A unstable > after 2>&1 $ jq -r '.functions | map((.name + ":" + .file + ":" + (.line|tostring) + ":" + (.column|tostring) + " " + (.count|tostring))) | .[]' after | sort > after.list ``` and then diffing before.list and after.list to get: ``` calls - :/home/grahamc/projects/github.com/NixOS/nixpkgs/pkgs/stdenv/generic/check-meta.nix:4:1 7513 + :/home/grahamc/projects/github.com/NixOS/nixpkgs/pkgs/stdenv/generic/check-meta.nix:4:1 154 - mutuallyExclusive:/home/grahamc/projects/github.com/NixOS/nixpkgs/lib/lists.nix:658:23 7513 + mutuallyExclusive:/home/grahamc/projects/github.com/NixOS/nixpkgs/lib/lists.nix:658:23 154 - mutuallyExclusive:/home/grahamc/projects/github.com/NixOS/nixpkgs/lib/lists.nix:658:26 7513 + mutuallyExclusive:/home/grahamc/projects/github.com/NixOS/nixpkgs/lib/lists.nix:658:26 154 - onlyLicenses:/home/grahamc/projects/github.com/NixOS/nixpkgs/pkgs/stdenv/generic/check-meta.nix:21:18 15026 + onlyLicenses:/home/grahamc/projects/github.com/NixOS/nixpkgs/pkgs/stdenv/generic/check-meta.nix:21:18 308 ``` The following information is from `NIX_SHOW_STATS=1 GC_INITIAL_HEAP_SIZE=4g nix-env -f ./outpaths.nix -qaP --no-name --out-path --arg checkMeta true`: | stat | before | after | Δ | Δ% | |:---------------------------|---------------:|---------------:|:----------------|--------:| | **cpuTime** | 179.915 | 145.543 | 🡖 34.372 | -19.10% | | **envs-bytes** | 3,900,878,824 | 3,599,483,208 | 🡖 301,395,616 | -7.73% | | **envs-elements** | 214,426,071 | 185,881,709 | 🡖 28,544,362 | -13.31% | | **envs-number** | 136,591,891 | 132,026,846 | 🡖 4,565,045 | -3.34% | | **gc-heapSize** | 11,400,048,640 | 12,314,890,240 | 🡕 914,841,600 | 8.02% | | **gc-totalBytes** | 25,976,902,560 | 24,510,740,176 | 🡖 1,466,162,384 | -5.64% | | **list-bytes** | 1,665,290,080 | 1,665,290,080 | 0 | | | **list-concats** | 7,264,417 | 7,264,417 | 0 | | | **list-elements** | 208,161,260 | 208,161,260 | 0 | | | **nrAvoided** | 191,359,386 | 179,693,661 | 🡖 11,665,725 | -6.10% | | **nrFunctionCalls** | 119,665,062 | 116,348,547 | 🡖 3,316,515 | -2.77% | | **nrLookups** | 80,996,257 | 76,069,825 | 🡖 4,926,432 | -6.08% | | **nrOpUpdateValuesCopied** | 213,930,649 | 213,930,649 | 0 | | | **nrOpUpdates** | 12,025,937 | 12,025,937 | 0 | | | **nrPrimOpCalls** | 88,105,604 | 86,451,598 | 🡖 1,654,006 | -1.88% | | **nrThunks** | 196,842,044 | 175,126,701 | 🡖 21,715,343 | -11.03% | | **sets-bytes** | 7,678,425,776 | 7,285,767,928 | 🡖 392,657,848 | -5.11% | | **sets-elements** | 310,241,340 | 294,373,227 | 🡖 15,868,113 | -5.11% | | **sets-number** | 29,079,202 | 27,601,310 | 🡖 1,477,892 | -5.08% | | **sizes-Attr** | 24 | 24 | 0 | | | **sizes-Bindings** | 8 | 8 | 0 | | | **sizes-Env** | 16 | 16 | 0 | | | **sizes-Value** | 24 | 24 | 0 | | | **symbols-bytes** | 16,474,666 | 16,474,676 | 🡕 10 | 0.00% | | **symbols-number** | 376,426 | 376,427 | 🡕 1 | 0.00% | | **values-bytes** | 6,856,506,288 | 6,316,585,560 | 🡖 539,920,728 | -7.87% | | **values-number** | 285,687,762 | 263,191,065 | 🡖 22,496,697 | -7.87% | The following information is from `NIX_SHOW_STATS=1 GC_INITIAL_HEAP_SIZE=4g nix-instantiate ./nixos/release-combined.nix -A tested`: | stat | before | after | Δ | Δ% | |:---------------------------|---------------:|---------------:|:----------------|-------:| | **cpuTime** | 256.071 | 237.531 | 🡖 18.54 | -7.24% | | **envs-bytes** | 7,111,004,192 | 7,041,478,520 | 🡖 69,525,672 | -0.98% | | **envs-elements** | 346,236,940 | 339,588,487 | 🡖 6,648,453 | -1.92% | | **envs-number** | 271,319,292 | 270,298,164 | 🡖 1,021,128 | -0.38% | | **gc-heapSize** | 8,995,291,136 | 10,110,009,344 | 🡕 1,114,718,208 | 12.39% | | **gc-totalBytes** | 37,172,737,408 | 36,878,391,888 | 🡖 294,345,520 | -0.79% | | **list-bytes** | 1,886,162,656 | 1,886,163,472 | 🡕 816 | 0.00% | | **list-concats** | 6,898,114 | 6,898,114 | 0 | | | **list-elements** | 235,770,332 | 235,770,434 | 🡕 102 | 0.00% | | **nrAvoided** | 328,829,821 | 326,618,157 | 🡖 2,211,664 | -0.67% | | **nrFunctionCalls** | 240,850,845 | 239,998,495 | 🡖 852,350 | -0.35% | | **nrLookups** | 144,849,632 | 142,126,339 | 🡖 2,723,293 | -1.88% | | **nrOpUpdateValuesCopied** | 251,032,504 | 251,032,504 | 0 | | | **nrOpUpdates** | 17,903,110 | 17,903,110 | 0 | | | **nrPrimOpCalls** | 140,674,913 | 139,485,975 | 🡖 1,188,938 | -0.85% | | **nrThunks** | 294,643,131 | 288,678,022 | 🡖 5,965,109 | -2.02% | | **sets-bytes** | 9,464,322,192 | 9,456,172,048 | 🡖 8,150,144 | -0.09% | | **sets-elements** | 377,474,889 | 377,134,877 | 🡖 340,012 | -0.09% | | **sets-number** | 50,615,607 | 50,616,875 | 🡕 1,268 | 0.00% | | **sizes-Attr** | 24 | 24 | 0 | | | **sizes-Bindings** | 8 | 8 | 0 | | | **sizes-Env** | 16 | 16 | 0 | | | **sizes-Value** | 24 | 24 | 0 | | | **symbols-bytes** | 3,147,102 | 3,147,064 | 🡖 38 | -0.00% | | **symbols-number** | 82,819 | 82,819 | 0 | | | **values-bytes** | 11,147,448,768 | 10,996,111,512 | 🡖 151,337,256 | -1.36% | | **values-number** | 464,477,032 | 458,171,313 | 🡖 6,305,719 | -1.36% |
2019-04-11 16:35:35 +00:00
checkMeta = import ./check-meta.nix {
inherit lib config;
# Nix itself uses the `system` field of a derivation to decide where
# to build it. This is a bit confusing for cross compilation.
inherit (stdenv) hostPlatform;
};
# Based off lib.makeExtensible, with modifications:
makeDerivationExtensible = rattrs:
let
# NOTE: The following is a hint that will be printed by the Nix cli when
# encountering an infinite recursion. It must not be formatted into
# separate lines, because Nix would only show the last line of the comment.
# An infinite recursion here can be caused by having the attribute names of expression `e` in `.overrideAttrs(finalAttrs: previousAttrs: e)` depend on `finalAttrs`. Only the attribute values of `e` can depend on `finalAttrs`.
args = rattrs (args // { inherit finalPackage overrideAttrs; });
# ^^^^
overrideAttrs = f0:
let
f = self: super:
# Convert f0 to an overlay. Legacy is:
# overrideAttrs (super: {})
# We want to introduce self. We follow the convention of overlays:
# overrideAttrs (self: super: {})
# Which means the first parameter can be either self or super.
# This is surprising, but far better than the confusion that would
# arise from flipping an overlay's parameters in some cases.
let x = f0 super;
in
if builtins.isFunction x
then
# Can't reuse `x`, because `self` comes first.
# Looks inefficient, but `f0 super` was a cheap thunk.
f0 self super
else x;
in
makeDerivationExtensible
(self: let super = rattrs self; in super // (if builtins.isFunction f0 || f0?__functor then f self super else f0));
finalPackage =
mkDerivationSimple overrideAttrs args;
in finalPackage;
#makeDerivationExtensibleConst = attrs: makeDerivationExtensible (_: attrs);
# but pre-evaluated for a slight improvement in performance.
makeDerivationExtensibleConst = attrs:
mkDerivationSimple
(f0:
let
f = self: super:
let x = f0 super;
in
if builtins.isFunction x
then
f0 self super
else x;
in
makeDerivationExtensible (self: attrs // (if builtins.isFunction f0 || f0?__functor then f self attrs else f0)))
attrs;
makeDerivationArgument =
# `mkDerivation` wraps the builtin `derivation` function to
# produce derivations that use this stdenv and its shell.
#
# See also:
#
# * https://nixos.org/nixpkgs/manual/#sec-using-stdenv
# Details on how to use this mkDerivation function
#
# * https://nixos.org/manual/nix/stable/expressions/derivations.html#derivations
# Explanation about derivations in general
{
# These types of dependencies are all exhaustively documented in
# the "Specifying Dependencies" section of the "Standard
# Environment" chapter of the Nixpkgs manual.
# TODO(@Ericson2314): Stop using legacy dep attribute names
# host offset -> target offset
depsBuildBuild ? [] # -1 -> -1
, depsBuildBuildPropagated ? [] # -1 -> -1
, nativeBuildInputs ? [] # -1 -> 0 N.B. Legacy name
, propagatedNativeBuildInputs ? [] # -1 -> 0 N.B. Legacy name
, depsBuildTarget ? [] # -1 -> 1
, depsBuildTargetPropagated ? [] # -1 -> 1
, depsHostHost ? [] # 0 -> 0
, depsHostHostPropagated ? [] # 0 -> 0
, buildInputs ? [] # 0 -> 1 N.B. Legacy name
, propagatedBuildInputs ? [] # 0 -> 1 N.B. Legacy name
, depsTargetTarget ? [] # 1 -> 1
, depsTargetTargetPropagated ? [] # 1 -> 1
, checkInputs ? []
, installCheckInputs ? []
, nativeCheckInputs ? []
, nativeInstallCheckInputs ? []
# Configure Phase
, configureFlags ? []
, # Target is not included by default because most programs don't care.
# Including it then would cause needless mass rebuilds.
#
# TODO(@Ericson2314): Make [ "build" "host" ] always the default / resolve #87909
stdenv: Improve performance | stat | before | after | Δ | Δ% | |------------------------|-----------------|-----------------|-----------------|---------| | cpuTime | 513.67 | 507.77 | ↘ 5.90 | -1.15% | | envs-bytes | 20,682,847,968 | 20,628,961,616 | ↘ 53,886,352 | -0.26% | | envs-elements | 1,054,735,104 | 1,051,395,620 | ↘ 3,339,484 | -0.32% | | envs-number | 765,310,446 | 763,612,291 | ↘ 1,698,155 | -0.22% | | gc-heapSize | 53,439,602,688 | 51,711,545,344 | ↘ 1,728,057,344 | -3.23% | | gc-totalBytes | 113,062,066,672 | 112,139,998,240 | ↘ 922,068,432 | -0.82% | | list-bytes | 3,118,249,784 | 3,118,249,784 | 0 | | | list-concats | 52,834,140 | 52,834,140 | 0 | | | list-elements | 389,781,223 | 389,781,223 | 0 | | | nrAvoided | 968,097,988 | 991,889,795 | ↗ 23,791,807 | 2.46% | | nrFunctionCalls | 697,259,792 | 697,259,792 | 0 | | | nrLookups | 510,257,062 | 338,275,331 | ↘ 171,981,731 | -33.70% | | nrOpUpdateValuesCopied | 1,446,690,216 | 1,446,690,216 | 0 | | | nrOpUpdates | 68,504,034 | 68,504,034 | 0 | | | nrPrimOpCalls | 429,464,805 | 429,464,805 | 0 | | | nrThunks | 1,009,240,391 | 982,109,100 | ↘ 27,131,291 | -2.69% | | sets-bytes | 33,524,722,928 | 33,524,722,928 | 0 | | | sets-elements | 1,938,309,212 | 1,938,309,212 | 0 | | | sets-number | 156,985,971 | 156,985,971 | 0 | | | sizes-Attr | 16 | 16 | 0 | | | sizes-Bindings | 16 | 16 | 0 | | | sizes-Env | 16 | 16 | 0 | | | sizes-Value | 24 | 24 | 0 | | | symbols-bytes | 2,151,298 | 2,151,298 | 0 | | | symbols-number | 159,707 | 159,707 | 0 | | | values-bytes | 30,218,194,248 | 29,567,043,264 | ↘ 651,150,984 | -2.15% | | values-number | 1,259,091,427 | 1,231,960,136 | ↘ 27,131,291 | -2.15% | > Accessing the lexical scope directly should be more efficient, yes, because it changes from a binary search (many lookups) to just two memory accesses > correction: one short linked list + one array access > oh and you had to do the lexical scope lookup anyway for lib itself > so it really does save a binary search at basically no extra cost - roberth after seeing the stats > Oooh nice. I did not consider that more of the maybeThunk optimization becomes effective (nrAvoided). Those lookups also caused allocations! - roberth Left `lib.generators` and `lib.strings` alone because they're only used once.
2023-11-11 17:15:20 +00:00
configurePlatforms ? optionals
(stdenv.hostPlatform != stdenv.buildPlatform || config.configurePlatformsByDefault)
[ "build" "host" ]
# TODO(@Ericson2314): Make unconditional / resolve #33599
# Check phase
, doCheck ? config.doCheckByDefault or false
# TODO(@Ericson2314): Make unconditional / resolve #33599
# InstallCheck phase
, doInstallCheck ? config.doCheckByDefault or false
, # TODO(@Ericson2314): Make always true and remove / resolve #178468
strictDeps ? if config.strictDepsByDefault then true else stdenv.hostPlatform != stdenv.buildPlatform
, enableParallelBuilding ? config.enableParallelBuildingByDefault
, separateDebugInfo ? false
, outputs ? [ "out" ]
, __darwinAllowLocalNetworking ? false
, __impureHostDeps ? []
, __propagatedImpureHostDeps ? []
, sandboxProfile ? ""
, propagatedSandboxProfile ? ""
, hardeningEnable ? []
, hardeningDisable ? []
, patches ? []
, __contentAddressed ?
(! attrs ? outputHash) # Fixed-output drvs can't be content addressed too
&& config.contentAddressedByDefault
# Experimental. For simple packages mostly just works,
# but for anything complex, be prepared to debug if enabling.
, __structuredAttrs ? config.structuredAttrsByDefault or false
, ... } @ attrs:
# Policy on acceptable hash types in nixpkgs
assert attrs ? outputHash -> (
let algo =
stdenv: Improve performance | stat | before | after | Δ | Δ% | |------------------------|-----------------|-----------------|-----------------|---------| | cpuTime | 513.67 | 507.77 | ↘ 5.90 | -1.15% | | envs-bytes | 20,682,847,968 | 20,628,961,616 | ↘ 53,886,352 | -0.26% | | envs-elements | 1,054,735,104 | 1,051,395,620 | ↘ 3,339,484 | -0.32% | | envs-number | 765,310,446 | 763,612,291 | ↘ 1,698,155 | -0.22% | | gc-heapSize | 53,439,602,688 | 51,711,545,344 | ↘ 1,728,057,344 | -3.23% | | gc-totalBytes | 113,062,066,672 | 112,139,998,240 | ↘ 922,068,432 | -0.82% | | list-bytes | 3,118,249,784 | 3,118,249,784 | 0 | | | list-concats | 52,834,140 | 52,834,140 | 0 | | | list-elements | 389,781,223 | 389,781,223 | 0 | | | nrAvoided | 968,097,988 | 991,889,795 | ↗ 23,791,807 | 2.46% | | nrFunctionCalls | 697,259,792 | 697,259,792 | 0 | | | nrLookups | 510,257,062 | 338,275,331 | ↘ 171,981,731 | -33.70% | | nrOpUpdateValuesCopied | 1,446,690,216 | 1,446,690,216 | 0 | | | nrOpUpdates | 68,504,034 | 68,504,034 | 0 | | | nrPrimOpCalls | 429,464,805 | 429,464,805 | 0 | | | nrThunks | 1,009,240,391 | 982,109,100 | ↘ 27,131,291 | -2.69% | | sets-bytes | 33,524,722,928 | 33,524,722,928 | 0 | | | sets-elements | 1,938,309,212 | 1,938,309,212 | 0 | | | sets-number | 156,985,971 | 156,985,971 | 0 | | | sizes-Attr | 16 | 16 | 0 | | | sizes-Bindings | 16 | 16 | 0 | | | sizes-Env | 16 | 16 | 0 | | | sizes-Value | 24 | 24 | 0 | | | symbols-bytes | 2,151,298 | 2,151,298 | 0 | | | symbols-number | 159,707 | 159,707 | 0 | | | values-bytes | 30,218,194,248 | 29,567,043,264 | ↘ 651,150,984 | -2.15% | | values-number | 1,259,091,427 | 1,231,960,136 | ↘ 27,131,291 | -2.15% | > Accessing the lexical scope directly should be more efficient, yes, because it changes from a binary search (many lookups) to just two memory accesses > correction: one short linked list + one array access > oh and you had to do the lexical scope lookup anyway for lib itself > so it really does save a binary search at basically no extra cost - roberth after seeing the stats > Oooh nice. I did not consider that more of the maybeThunk optimization becomes effective (nrAvoided). Those lookups also caused allocations! - roberth Left `lib.generators` and `lib.strings` alone because they're only used once.
2023-11-11 17:15:20 +00:00
attrs.outputHashAlgo or (head (splitString "-" attrs.outputHash));
in
if algo == "md5" then
throw "Rejected insecure ${algo} hash '${attrs.outputHash}'"
else
true
);
let
# TODO(@oxij, @Ericson2314): This is here to keep the old semantics, remove when
# no package has `doCheck = true`.
doCheck' = doCheck && stdenv.buildPlatform.canExecute stdenv.hostPlatform;
doInstallCheck' = doInstallCheck && stdenv.buildPlatform.canExecute stdenv.hostPlatform;
separateDebugInfo' = separateDebugInfo && stdenv.hostPlatform.isLinux;
stdenv: Improve performance | stat | before | after | Δ | Δ% | |------------------------|-----------------|-----------------|-----------------|---------| | cpuTime | 513.67 | 507.77 | ↘ 5.90 | -1.15% | | envs-bytes | 20,682,847,968 | 20,628,961,616 | ↘ 53,886,352 | -0.26% | | envs-elements | 1,054,735,104 | 1,051,395,620 | ↘ 3,339,484 | -0.32% | | envs-number | 765,310,446 | 763,612,291 | ↘ 1,698,155 | -0.22% | | gc-heapSize | 53,439,602,688 | 51,711,545,344 | ↘ 1,728,057,344 | -3.23% | | gc-totalBytes | 113,062,066,672 | 112,139,998,240 | ↘ 922,068,432 | -0.82% | | list-bytes | 3,118,249,784 | 3,118,249,784 | 0 | | | list-concats | 52,834,140 | 52,834,140 | 0 | | | list-elements | 389,781,223 | 389,781,223 | 0 | | | nrAvoided | 968,097,988 | 991,889,795 | ↗ 23,791,807 | 2.46% | | nrFunctionCalls | 697,259,792 | 697,259,792 | 0 | | | nrLookups | 510,257,062 | 338,275,331 | ↘ 171,981,731 | -33.70% | | nrOpUpdateValuesCopied | 1,446,690,216 | 1,446,690,216 | 0 | | | nrOpUpdates | 68,504,034 | 68,504,034 | 0 | | | nrPrimOpCalls | 429,464,805 | 429,464,805 | 0 | | | nrThunks | 1,009,240,391 | 982,109,100 | ↘ 27,131,291 | -2.69% | | sets-bytes | 33,524,722,928 | 33,524,722,928 | 0 | | | sets-elements | 1,938,309,212 | 1,938,309,212 | 0 | | | sets-number | 156,985,971 | 156,985,971 | 0 | | | sizes-Attr | 16 | 16 | 0 | | | sizes-Bindings | 16 | 16 | 0 | | | sizes-Env | 16 | 16 | 0 | | | sizes-Value | 24 | 24 | 0 | | | symbols-bytes | 2,151,298 | 2,151,298 | 0 | | | symbols-number | 159,707 | 159,707 | 0 | | | values-bytes | 30,218,194,248 | 29,567,043,264 | ↘ 651,150,984 | -2.15% | | values-number | 1,259,091,427 | 1,231,960,136 | ↘ 27,131,291 | -2.15% | > Accessing the lexical scope directly should be more efficient, yes, because it changes from a binary search (many lookups) to just two memory accesses > correction: one short linked list + one array access > oh and you had to do the lexical scope lookup anyway for lib itself > so it really does save a binary search at basically no extra cost - roberth after seeing the stats > Oooh nice. I did not consider that more of the maybeThunk optimization becomes effective (nrAvoided). Those lookups also caused allocations! - roberth Left `lib.generators` and `lib.strings` alone because they're only used once.
2023-11-11 17:15:20 +00:00
outputs' = outputs ++ optional separateDebugInfo' "debug";
# Turn a derivation into its outPath without a string context attached.
# See the comment at the usage site.
unsafeDerivationToUntrackedOutpath = drv:
stdenv: Improve performance | stat | before | after | Δ | Δ% | |------------------------|-----------------|-----------------|-----------------|---------| | cpuTime | 513.67 | 507.77 | ↘ 5.90 | -1.15% | | envs-bytes | 20,682,847,968 | 20,628,961,616 | ↘ 53,886,352 | -0.26% | | envs-elements | 1,054,735,104 | 1,051,395,620 | ↘ 3,339,484 | -0.32% | | envs-number | 765,310,446 | 763,612,291 | ↘ 1,698,155 | -0.22% | | gc-heapSize | 53,439,602,688 | 51,711,545,344 | ↘ 1,728,057,344 | -3.23% | | gc-totalBytes | 113,062,066,672 | 112,139,998,240 | ↘ 922,068,432 | -0.82% | | list-bytes | 3,118,249,784 | 3,118,249,784 | 0 | | | list-concats | 52,834,140 | 52,834,140 | 0 | | | list-elements | 389,781,223 | 389,781,223 | 0 | | | nrAvoided | 968,097,988 | 991,889,795 | ↗ 23,791,807 | 2.46% | | nrFunctionCalls | 697,259,792 | 697,259,792 | 0 | | | nrLookups | 510,257,062 | 338,275,331 | ↘ 171,981,731 | -33.70% | | nrOpUpdateValuesCopied | 1,446,690,216 | 1,446,690,216 | 0 | | | nrOpUpdates | 68,504,034 | 68,504,034 | 0 | | | nrPrimOpCalls | 429,464,805 | 429,464,805 | 0 | | | nrThunks | 1,009,240,391 | 982,109,100 | ↘ 27,131,291 | -2.69% | | sets-bytes | 33,524,722,928 | 33,524,722,928 | 0 | | | sets-elements | 1,938,309,212 | 1,938,309,212 | 0 | | | sets-number | 156,985,971 | 156,985,971 | 0 | | | sizes-Attr | 16 | 16 | 0 | | | sizes-Bindings | 16 | 16 | 0 | | | sizes-Env | 16 | 16 | 0 | | | sizes-Value | 24 | 24 | 0 | | | symbols-bytes | 2,151,298 | 2,151,298 | 0 | | | symbols-number | 159,707 | 159,707 | 0 | | | values-bytes | 30,218,194,248 | 29,567,043,264 | ↘ 651,150,984 | -2.15% | | values-number | 1,259,091,427 | 1,231,960,136 | ↘ 27,131,291 | -2.15% | > Accessing the lexical scope directly should be more efficient, yes, because it changes from a binary search (many lookups) to just two memory accesses > correction: one short linked list + one array access > oh and you had to do the lexical scope lookup anyway for lib itself > so it really does save a binary search at basically no extra cost - roberth after seeing the stats > Oooh nice. I did not consider that more of the maybeThunk optimization becomes effective (nrAvoided). Those lookups also caused allocations! - roberth Left `lib.generators` and `lib.strings` alone because they're only used once.
2023-11-11 17:15:20 +00:00
if isDerivation drv
then builtins.unsafeDiscardStringContext drv.outPath
else drv;
noNonNativeDeps = builtins.length (depsBuildTarget ++ depsBuildTargetPropagated
++ depsHostHost ++ depsHostHostPropagated
++ buildInputs ++ propagatedBuildInputs
++ depsTargetTarget ++ depsTargetTargetPropagated) == 0;
dontAddHostSuffix = attrs ? outputHash && !noNonNativeDeps || !stdenv.hasCC;
stdenv: Improve performance | stat | before | after | Δ | Δ% | |------------------------|-----------------|-----------------|-----------------|---------| | cpuTime | 513.67 | 507.77 | ↘ 5.90 | -1.15% | | envs-bytes | 20,682,847,968 | 20,628,961,616 | ↘ 53,886,352 | -0.26% | | envs-elements | 1,054,735,104 | 1,051,395,620 | ↘ 3,339,484 | -0.32% | | envs-number | 765,310,446 | 763,612,291 | ↘ 1,698,155 | -0.22% | | gc-heapSize | 53,439,602,688 | 51,711,545,344 | ↘ 1,728,057,344 | -3.23% | | gc-totalBytes | 113,062,066,672 | 112,139,998,240 | ↘ 922,068,432 | -0.82% | | list-bytes | 3,118,249,784 | 3,118,249,784 | 0 | | | list-concats | 52,834,140 | 52,834,140 | 0 | | | list-elements | 389,781,223 | 389,781,223 | 0 | | | nrAvoided | 968,097,988 | 991,889,795 | ↗ 23,791,807 | 2.46% | | nrFunctionCalls | 697,259,792 | 697,259,792 | 0 | | | nrLookups | 510,257,062 | 338,275,331 | ↘ 171,981,731 | -33.70% | | nrOpUpdateValuesCopied | 1,446,690,216 | 1,446,690,216 | 0 | | | nrOpUpdates | 68,504,034 | 68,504,034 | 0 | | | nrPrimOpCalls | 429,464,805 | 429,464,805 | 0 | | | nrThunks | 1,009,240,391 | 982,109,100 | ↘ 27,131,291 | -2.69% | | sets-bytes | 33,524,722,928 | 33,524,722,928 | 0 | | | sets-elements | 1,938,309,212 | 1,938,309,212 | 0 | | | sets-number | 156,985,971 | 156,985,971 | 0 | | | sizes-Attr | 16 | 16 | 0 | | | sizes-Bindings | 16 | 16 | 0 | | | sizes-Env | 16 | 16 | 0 | | | sizes-Value | 24 | 24 | 0 | | | symbols-bytes | 2,151,298 | 2,151,298 | 0 | | | symbols-number | 159,707 | 159,707 | 0 | | | values-bytes | 30,218,194,248 | 29,567,043,264 | ↘ 651,150,984 | -2.15% | | values-number | 1,259,091,427 | 1,231,960,136 | ↘ 27,131,291 | -2.15% | > Accessing the lexical scope directly should be more efficient, yes, because it changes from a binary search (many lookups) to just two memory accesses > correction: one short linked list + one array access > oh and you had to do the lexical scope lookup anyway for lib itself > so it really does save a binary search at basically no extra cost - roberth after seeing the stats > Oooh nice. I did not consider that more of the maybeThunk optimization becomes effective (nrAvoided). Those lookups also caused allocations! - roberth Left `lib.generators` and `lib.strings` alone because they're only used once.
2023-11-11 17:15:20 +00:00
hardeningDisable' = if any (x: x == "fortify") hardeningDisable
# disabling fortify implies fortify3 should also be disabled
stdenv: Improve performance | stat | before | after | Δ | Δ% | |------------------------|-----------------|-----------------|-----------------|---------| | cpuTime | 513.67 | 507.77 | ↘ 5.90 | -1.15% | | envs-bytes | 20,682,847,968 | 20,628,961,616 | ↘ 53,886,352 | -0.26% | | envs-elements | 1,054,735,104 | 1,051,395,620 | ↘ 3,339,484 | -0.32% | | envs-number | 765,310,446 | 763,612,291 | ↘ 1,698,155 | -0.22% | | gc-heapSize | 53,439,602,688 | 51,711,545,344 | ↘ 1,728,057,344 | -3.23% | | gc-totalBytes | 113,062,066,672 | 112,139,998,240 | ↘ 922,068,432 | -0.82% | | list-bytes | 3,118,249,784 | 3,118,249,784 | 0 | | | list-concats | 52,834,140 | 52,834,140 | 0 | | | list-elements | 389,781,223 | 389,781,223 | 0 | | | nrAvoided | 968,097,988 | 991,889,795 | ↗ 23,791,807 | 2.46% | | nrFunctionCalls | 697,259,792 | 697,259,792 | 0 | | | nrLookups | 510,257,062 | 338,275,331 | ↘ 171,981,731 | -33.70% | | nrOpUpdateValuesCopied | 1,446,690,216 | 1,446,690,216 | 0 | | | nrOpUpdates | 68,504,034 | 68,504,034 | 0 | | | nrPrimOpCalls | 429,464,805 | 429,464,805 | 0 | | | nrThunks | 1,009,240,391 | 982,109,100 | ↘ 27,131,291 | -2.69% | | sets-bytes | 33,524,722,928 | 33,524,722,928 | 0 | | | sets-elements | 1,938,309,212 | 1,938,309,212 | 0 | | | sets-number | 156,985,971 | 156,985,971 | 0 | | | sizes-Attr | 16 | 16 | 0 | | | sizes-Bindings | 16 | 16 | 0 | | | sizes-Env | 16 | 16 | 0 | | | sizes-Value | 24 | 24 | 0 | | | symbols-bytes | 2,151,298 | 2,151,298 | 0 | | | symbols-number | 159,707 | 159,707 | 0 | | | values-bytes | 30,218,194,248 | 29,567,043,264 | ↘ 651,150,984 | -2.15% | | values-number | 1,259,091,427 | 1,231,960,136 | ↘ 27,131,291 | -2.15% | > Accessing the lexical scope directly should be more efficient, yes, because it changes from a binary search (many lookups) to just two memory accesses > correction: one short linked list + one array access > oh and you had to do the lexical scope lookup anyway for lib itself > so it really does save a binary search at basically no extra cost - roberth after seeing the stats > Oooh nice. I did not consider that more of the maybeThunk optimization becomes effective (nrAvoided). Those lookups also caused allocations! - roberth Left `lib.generators` and `lib.strings` alone because they're only used once.
2023-11-11 17:15:20 +00:00
then unique (hardeningDisable ++ [ "fortify3" ])
else hardeningDisable;
knownHardeningFlags = [
"bindnow"
"format"
"fortify"
"fortify3"
"pic"
"pie"
"relro"
"stackprotector"
"strictoverflow"
"trivialautovarinit"
"zerocallusedregs"
];
defaultHardeningFlags =
(if stdenv.hasCC then stdenv.cc else {}).defaultHardeningFlags or
# fallback safe-ish set of flags
(remove "pie" knownHardeningFlags);
enabledHardeningOptions =
if builtins.elem "all" hardeningDisable'
then []
stdenv: Improve performance | stat | before | after | Δ | Δ% | |------------------------|-----------------|-----------------|-----------------|---------| | cpuTime | 513.67 | 507.77 | ↘ 5.90 | -1.15% | | envs-bytes | 20,682,847,968 | 20,628,961,616 | ↘ 53,886,352 | -0.26% | | envs-elements | 1,054,735,104 | 1,051,395,620 | ↘ 3,339,484 | -0.32% | | envs-number | 765,310,446 | 763,612,291 | ↘ 1,698,155 | -0.22% | | gc-heapSize | 53,439,602,688 | 51,711,545,344 | ↘ 1,728,057,344 | -3.23% | | gc-totalBytes | 113,062,066,672 | 112,139,998,240 | ↘ 922,068,432 | -0.82% | | list-bytes | 3,118,249,784 | 3,118,249,784 | 0 | | | list-concats | 52,834,140 | 52,834,140 | 0 | | | list-elements | 389,781,223 | 389,781,223 | 0 | | | nrAvoided | 968,097,988 | 991,889,795 | ↗ 23,791,807 | 2.46% | | nrFunctionCalls | 697,259,792 | 697,259,792 | 0 | | | nrLookups | 510,257,062 | 338,275,331 | ↘ 171,981,731 | -33.70% | | nrOpUpdateValuesCopied | 1,446,690,216 | 1,446,690,216 | 0 | | | nrOpUpdates | 68,504,034 | 68,504,034 | 0 | | | nrPrimOpCalls | 429,464,805 | 429,464,805 | 0 | | | nrThunks | 1,009,240,391 | 982,109,100 | ↘ 27,131,291 | -2.69% | | sets-bytes | 33,524,722,928 | 33,524,722,928 | 0 | | | sets-elements | 1,938,309,212 | 1,938,309,212 | 0 | | | sets-number | 156,985,971 | 156,985,971 | 0 | | | sizes-Attr | 16 | 16 | 0 | | | sizes-Bindings | 16 | 16 | 0 | | | sizes-Env | 16 | 16 | 0 | | | sizes-Value | 24 | 24 | 0 | | | symbols-bytes | 2,151,298 | 2,151,298 | 0 | | | symbols-number | 159,707 | 159,707 | 0 | | | values-bytes | 30,218,194,248 | 29,567,043,264 | ↘ 651,150,984 | -2.15% | | values-number | 1,259,091,427 | 1,231,960,136 | ↘ 27,131,291 | -2.15% | > Accessing the lexical scope directly should be more efficient, yes, because it changes from a binary search (many lookups) to just two memory accesses > correction: one short linked list + one array access > oh and you had to do the lexical scope lookup anyway for lib itself > so it really does save a binary search at basically no extra cost - roberth after seeing the stats > Oooh nice. I did not consider that more of the maybeThunk optimization becomes effective (nrAvoided). Those lookups also caused allocations! - roberth Left `lib.generators` and `lib.strings` alone because they're only used once.
2023-11-11 17:15:20 +00:00
else subtractLists hardeningDisable' (defaultHardeningFlags ++ hardeningEnable);
# hardeningDisable additionally supports "all".
erroneousHardeningFlags = subtractLists knownHardeningFlags (hardeningEnable ++ remove "all" hardeningDisable);
checkDependencyList = checkDependencyList' [];
2024-03-12 17:39:19 +00:00
checkDependencyList' = positions: name: deps:
imap1
(index: dep:
if isDerivation dep || dep == null || builtins.isString dep || builtins.isPath dep then dep
else if isList dep then checkDependencyList' ([index] ++ positions) name dep
else throw "Dependency is not of a valid type: ${concatMapStrings (ix: "element ${toString ix} of ") ([index] ++ positions)}${name} for ${attrs.name or attrs.pname}")
deps;
in if builtins.length erroneousHardeningFlags != 0
then abort ("mkDerivation was called with unsupported hardening flags: " + lib.generators.toPretty {} {
inherit erroneousHardeningFlags hardeningDisable hardeningEnable knownHardeningFlags;
})
else let
doCheck = doCheck';
doInstallCheck = doInstallCheck';
buildInputs' = buildInputs
stdenv: Improve performance | stat | before | after | Δ | Δ% | |------------------------|-----------------|-----------------|-----------------|---------| | cpuTime | 513.67 | 507.77 | ↘ 5.90 | -1.15% | | envs-bytes | 20,682,847,968 | 20,628,961,616 | ↘ 53,886,352 | -0.26% | | envs-elements | 1,054,735,104 | 1,051,395,620 | ↘ 3,339,484 | -0.32% | | envs-number | 765,310,446 | 763,612,291 | ↘ 1,698,155 | -0.22% | | gc-heapSize | 53,439,602,688 | 51,711,545,344 | ↘ 1,728,057,344 | -3.23% | | gc-totalBytes | 113,062,066,672 | 112,139,998,240 | ↘ 922,068,432 | -0.82% | | list-bytes | 3,118,249,784 | 3,118,249,784 | 0 | | | list-concats | 52,834,140 | 52,834,140 | 0 | | | list-elements | 389,781,223 | 389,781,223 | 0 | | | nrAvoided | 968,097,988 | 991,889,795 | ↗ 23,791,807 | 2.46% | | nrFunctionCalls | 697,259,792 | 697,259,792 | 0 | | | nrLookups | 510,257,062 | 338,275,331 | ↘ 171,981,731 | -33.70% | | nrOpUpdateValuesCopied | 1,446,690,216 | 1,446,690,216 | 0 | | | nrOpUpdates | 68,504,034 | 68,504,034 | 0 | | | nrPrimOpCalls | 429,464,805 | 429,464,805 | 0 | | | nrThunks | 1,009,240,391 | 982,109,100 | ↘ 27,131,291 | -2.69% | | sets-bytes | 33,524,722,928 | 33,524,722,928 | 0 | | | sets-elements | 1,938,309,212 | 1,938,309,212 | 0 | | | sets-number | 156,985,971 | 156,985,971 | 0 | | | sizes-Attr | 16 | 16 | 0 | | | sizes-Bindings | 16 | 16 | 0 | | | sizes-Env | 16 | 16 | 0 | | | sizes-Value | 24 | 24 | 0 | | | symbols-bytes | 2,151,298 | 2,151,298 | 0 | | | symbols-number | 159,707 | 159,707 | 0 | | | values-bytes | 30,218,194,248 | 29,567,043,264 | ↘ 651,150,984 | -2.15% | | values-number | 1,259,091,427 | 1,231,960,136 | ↘ 27,131,291 | -2.15% | > Accessing the lexical scope directly should be more efficient, yes, because it changes from a binary search (many lookups) to just two memory accesses > correction: one short linked list + one array access > oh and you had to do the lexical scope lookup anyway for lib itself > so it really does save a binary search at basically no extra cost - roberth after seeing the stats > Oooh nice. I did not consider that more of the maybeThunk optimization becomes effective (nrAvoided). Those lookups also caused allocations! - roberth Left `lib.generators` and `lib.strings` alone because they're only used once.
2023-11-11 17:15:20 +00:00
++ optionals doCheck checkInputs
++ optionals doInstallCheck installCheckInputs;
nativeBuildInputs' = nativeBuildInputs
stdenv: Improve performance | stat | before | after | Δ | Δ% | |------------------------|-----------------|-----------------|-----------------|---------| | cpuTime | 513.67 | 507.77 | ↘ 5.90 | -1.15% | | envs-bytes | 20,682,847,968 | 20,628,961,616 | ↘ 53,886,352 | -0.26% | | envs-elements | 1,054,735,104 | 1,051,395,620 | ↘ 3,339,484 | -0.32% | | envs-number | 765,310,446 | 763,612,291 | ↘ 1,698,155 | -0.22% | | gc-heapSize | 53,439,602,688 | 51,711,545,344 | ↘ 1,728,057,344 | -3.23% | | gc-totalBytes | 113,062,066,672 | 112,139,998,240 | ↘ 922,068,432 | -0.82% | | list-bytes | 3,118,249,784 | 3,118,249,784 | 0 | | | list-concats | 52,834,140 | 52,834,140 | 0 | | | list-elements | 389,781,223 | 389,781,223 | 0 | | | nrAvoided | 968,097,988 | 991,889,795 | ↗ 23,791,807 | 2.46% | | nrFunctionCalls | 697,259,792 | 697,259,792 | 0 | | | nrLookups | 510,257,062 | 338,275,331 | ↘ 171,981,731 | -33.70% | | nrOpUpdateValuesCopied | 1,446,690,216 | 1,446,690,216 | 0 | | | nrOpUpdates | 68,504,034 | 68,504,034 | 0 | | | nrPrimOpCalls | 429,464,805 | 429,464,805 | 0 | | | nrThunks | 1,009,240,391 | 982,109,100 | ↘ 27,131,291 | -2.69% | | sets-bytes | 33,524,722,928 | 33,524,722,928 | 0 | | | sets-elements | 1,938,309,212 | 1,938,309,212 | 0 | | | sets-number | 156,985,971 | 156,985,971 | 0 | | | sizes-Attr | 16 | 16 | 0 | | | sizes-Bindings | 16 | 16 | 0 | | | sizes-Env | 16 | 16 | 0 | | | sizes-Value | 24 | 24 | 0 | | | symbols-bytes | 2,151,298 | 2,151,298 | 0 | | | symbols-number | 159,707 | 159,707 | 0 | | | values-bytes | 30,218,194,248 | 29,567,043,264 | ↘ 651,150,984 | -2.15% | | values-number | 1,259,091,427 | 1,231,960,136 | ↘ 27,131,291 | -2.15% | > Accessing the lexical scope directly should be more efficient, yes, because it changes from a binary search (many lookups) to just two memory accesses > correction: one short linked list + one array access > oh and you had to do the lexical scope lookup anyway for lib itself > so it really does save a binary search at basically no extra cost - roberth after seeing the stats > Oooh nice. I did not consider that more of the maybeThunk optimization becomes effective (nrAvoided). Those lookups also caused allocations! - roberth Left `lib.generators` and `lib.strings` alone because they're only used once.
2023-11-11 17:15:20 +00:00
++ optional separateDebugInfo' ../../build-support/setup-hooks/separate-debug-info.sh
++ optional stdenv.hostPlatform.isWindows ../../build-support/setup-hooks/win-dll-link.sh
++ optionals doCheck nativeCheckInputs
++ optionals doInstallCheck nativeInstallCheckInputs;
outputs = outputs';
dependencies = [
[
(map (drv: getDev drv.__spliced.buildBuild or drv) (checkDependencyList "depsBuildBuild" depsBuildBuild))
(map (drv: getDev drv.__spliced.buildHost or drv) (checkDependencyList "nativeBuildInputs" nativeBuildInputs'))
(map (drv: getDev drv.__spliced.buildTarget or drv) (checkDependencyList "depsBuildTarget" depsBuildTarget))
]
[
(map (drv: getDev drv.__spliced.hostHost or drv) (checkDependencyList "depsHostHost" depsHostHost))
(map (drv: getDev drv.__spliced.hostTarget or drv) (checkDependencyList "buildInputs" buildInputs'))
]
[
(map (drv: getDev drv.__spliced.targetTarget or drv) (checkDependencyList "depsTargetTarget" depsTargetTarget))
]
];
propagatedDependencies = [
[
(map (drv: getDev drv.__spliced.buildBuild or drv) (checkDependencyList "depsBuildBuildPropagated" depsBuildBuildPropagated))
(map (drv: getDev drv.__spliced.buildHost or drv) (checkDependencyList "propagatedNativeBuildInputs" propagatedNativeBuildInputs))
(map (drv: getDev drv.__spliced.buildTarget or drv) (checkDependencyList "depsBuildTargetPropagated" depsBuildTargetPropagated))
]
[
(map (drv: getDev drv.__spliced.hostHost or drv) (checkDependencyList "depsHostHostPropagated" depsHostHostPropagated))
(map (drv: getDev drv.__spliced.hostTarget or drv) (checkDependencyList "propagatedBuildInputs" propagatedBuildInputs))
]
[
(map (drv: getDev drv.__spliced.targetTarget or drv) (checkDependencyList "depsTargetTargetPropagated" depsTargetTargetPropagated))
]
];
derivationArg =
removeAttrs attrs [
"checkInputs" "installCheckInputs"
"nativeCheckInputs" "nativeInstallCheckInputs"
"__contentAddressed"
"__darwinAllowLocalNetworking"
"__impureHostDeps" "__propagatedImpureHostDeps"
"sandboxProfile" "propagatedSandboxProfile"]
stdenv: Improve performance | stat | before | after | Δ | Δ% | |------------------------|-----------------|-----------------|-----------------|---------| | cpuTime | 513.67 | 507.77 | ↘ 5.90 | -1.15% | | envs-bytes | 20,682,847,968 | 20,628,961,616 | ↘ 53,886,352 | -0.26% | | envs-elements | 1,054,735,104 | 1,051,395,620 | ↘ 3,339,484 | -0.32% | | envs-number | 765,310,446 | 763,612,291 | ↘ 1,698,155 | -0.22% | | gc-heapSize | 53,439,602,688 | 51,711,545,344 | ↘ 1,728,057,344 | -3.23% | | gc-totalBytes | 113,062,066,672 | 112,139,998,240 | ↘ 922,068,432 | -0.82% | | list-bytes | 3,118,249,784 | 3,118,249,784 | 0 | | | list-concats | 52,834,140 | 52,834,140 | 0 | | | list-elements | 389,781,223 | 389,781,223 | 0 | | | nrAvoided | 968,097,988 | 991,889,795 | ↗ 23,791,807 | 2.46% | | nrFunctionCalls | 697,259,792 | 697,259,792 | 0 | | | nrLookups | 510,257,062 | 338,275,331 | ↘ 171,981,731 | -33.70% | | nrOpUpdateValuesCopied | 1,446,690,216 | 1,446,690,216 | 0 | | | nrOpUpdates | 68,504,034 | 68,504,034 | 0 | | | nrPrimOpCalls | 429,464,805 | 429,464,805 | 0 | | | nrThunks | 1,009,240,391 | 982,109,100 | ↘ 27,131,291 | -2.69% | | sets-bytes | 33,524,722,928 | 33,524,722,928 | 0 | | | sets-elements | 1,938,309,212 | 1,938,309,212 | 0 | | | sets-number | 156,985,971 | 156,985,971 | 0 | | | sizes-Attr | 16 | 16 | 0 | | | sizes-Bindings | 16 | 16 | 0 | | | sizes-Env | 16 | 16 | 0 | | | sizes-Value | 24 | 24 | 0 | | | symbols-bytes | 2,151,298 | 2,151,298 | 0 | | | symbols-number | 159,707 | 159,707 | 0 | | | values-bytes | 30,218,194,248 | 29,567,043,264 | ↘ 651,150,984 | -2.15% | | values-number | 1,259,091,427 | 1,231,960,136 | ↘ 27,131,291 | -2.15% | > Accessing the lexical scope directly should be more efficient, yes, because it changes from a binary search (many lookups) to just two memory accesses > correction: one short linked list + one array access > oh and you had to do the lexical scope lookup anyway for lib itself > so it really does save a binary search at basically no extra cost - roberth after seeing the stats > Oooh nice. I did not consider that more of the maybeThunk optimization becomes effective (nrAvoided). Those lookups also caused allocations! - roberth Left `lib.generators` and `lib.strings` alone because they're only used once.
2023-11-11 17:15:20 +00:00
// (optionalAttrs (attrs ? name || (attrs ? pname && attrs ? version)) {
name =
let
# Indicate the host platform of the derivation if cross compiling.
# Fixed-output derivations like source tarballs shouldn't get a host
# suffix. But we have some weird ones with run-time deps that are
# just used for their side-affects. Those might as well since the
# hash can't be the same. See #32986.
stdenv: Improve performance | stat | before | after | Δ | Δ% | |------------------------|-----------------|-----------------|-----------------|---------| | cpuTime | 513.67 | 507.77 | ↘ 5.90 | -1.15% | | envs-bytes | 20,682,847,968 | 20,628,961,616 | ↘ 53,886,352 | -0.26% | | envs-elements | 1,054,735,104 | 1,051,395,620 | ↘ 3,339,484 | -0.32% | | envs-number | 765,310,446 | 763,612,291 | ↘ 1,698,155 | -0.22% | | gc-heapSize | 53,439,602,688 | 51,711,545,344 | ↘ 1,728,057,344 | -3.23% | | gc-totalBytes | 113,062,066,672 | 112,139,998,240 | ↘ 922,068,432 | -0.82% | | list-bytes | 3,118,249,784 | 3,118,249,784 | 0 | | | list-concats | 52,834,140 | 52,834,140 | 0 | | | list-elements | 389,781,223 | 389,781,223 | 0 | | | nrAvoided | 968,097,988 | 991,889,795 | ↗ 23,791,807 | 2.46% | | nrFunctionCalls | 697,259,792 | 697,259,792 | 0 | | | nrLookups | 510,257,062 | 338,275,331 | ↘ 171,981,731 | -33.70% | | nrOpUpdateValuesCopied | 1,446,690,216 | 1,446,690,216 | 0 | | | nrOpUpdates | 68,504,034 | 68,504,034 | 0 | | | nrPrimOpCalls | 429,464,805 | 429,464,805 | 0 | | | nrThunks | 1,009,240,391 | 982,109,100 | ↘ 27,131,291 | -2.69% | | sets-bytes | 33,524,722,928 | 33,524,722,928 | 0 | | | sets-elements | 1,938,309,212 | 1,938,309,212 | 0 | | | sets-number | 156,985,971 | 156,985,971 | 0 | | | sizes-Attr | 16 | 16 | 0 | | | sizes-Bindings | 16 | 16 | 0 | | | sizes-Env | 16 | 16 | 0 | | | sizes-Value | 24 | 24 | 0 | | | symbols-bytes | 2,151,298 | 2,151,298 | 0 | | | symbols-number | 159,707 | 159,707 | 0 | | | values-bytes | 30,218,194,248 | 29,567,043,264 | ↘ 651,150,984 | -2.15% | | values-number | 1,259,091,427 | 1,231,960,136 | ↘ 27,131,291 | -2.15% | > Accessing the lexical scope directly should be more efficient, yes, because it changes from a binary search (many lookups) to just two memory accesses > correction: one short linked list + one array access > oh and you had to do the lexical scope lookup anyway for lib itself > so it really does save a binary search at basically no extra cost - roberth after seeing the stats > Oooh nice. I did not consider that more of the maybeThunk optimization becomes effective (nrAvoided). Those lookups also caused allocations! - roberth Left `lib.generators` and `lib.strings` alone because they're only used once.
2023-11-11 17:15:20 +00:00
hostSuffix = optionalString
(stdenv.hostPlatform != stdenv.buildPlatform && !dontAddHostSuffix)
"-${stdenv.hostPlatform.config}";
# Disambiguate statically built packages. This was originally
# introduce as a means to prevent nix-env to get confused between
# nix and nixStatic. This should be also achieved by moving the
# hostSuffix before the version, so we could contemplate removing
# it again.
stdenv: Improve performance | stat | before | after | Δ | Δ% | |------------------------|-----------------|-----------------|-----------------|---------| | cpuTime | 513.67 | 507.77 | ↘ 5.90 | -1.15% | | envs-bytes | 20,682,847,968 | 20,628,961,616 | ↘ 53,886,352 | -0.26% | | envs-elements | 1,054,735,104 | 1,051,395,620 | ↘ 3,339,484 | -0.32% | | envs-number | 765,310,446 | 763,612,291 | ↘ 1,698,155 | -0.22% | | gc-heapSize | 53,439,602,688 | 51,711,545,344 | ↘ 1,728,057,344 | -3.23% | | gc-totalBytes | 113,062,066,672 | 112,139,998,240 | ↘ 922,068,432 | -0.82% | | list-bytes | 3,118,249,784 | 3,118,249,784 | 0 | | | list-concats | 52,834,140 | 52,834,140 | 0 | | | list-elements | 389,781,223 | 389,781,223 | 0 | | | nrAvoided | 968,097,988 | 991,889,795 | ↗ 23,791,807 | 2.46% | | nrFunctionCalls | 697,259,792 | 697,259,792 | 0 | | | nrLookups | 510,257,062 | 338,275,331 | ↘ 171,981,731 | -33.70% | | nrOpUpdateValuesCopied | 1,446,690,216 | 1,446,690,216 | 0 | | | nrOpUpdates | 68,504,034 | 68,504,034 | 0 | | | nrPrimOpCalls | 429,464,805 | 429,464,805 | 0 | | | nrThunks | 1,009,240,391 | 982,109,100 | ↘ 27,131,291 | -2.69% | | sets-bytes | 33,524,722,928 | 33,524,722,928 | 0 | | | sets-elements | 1,938,309,212 | 1,938,309,212 | 0 | | | sets-number | 156,985,971 | 156,985,971 | 0 | | | sizes-Attr | 16 | 16 | 0 | | | sizes-Bindings | 16 | 16 | 0 | | | sizes-Env | 16 | 16 | 0 | | | sizes-Value | 24 | 24 | 0 | | | symbols-bytes | 2,151,298 | 2,151,298 | 0 | | | symbols-number | 159,707 | 159,707 | 0 | | | values-bytes | 30,218,194,248 | 29,567,043,264 | ↘ 651,150,984 | -2.15% | | values-number | 1,259,091,427 | 1,231,960,136 | ↘ 27,131,291 | -2.15% | > Accessing the lexical scope directly should be more efficient, yes, because it changes from a binary search (many lookups) to just two memory accesses > correction: one short linked list + one array access > oh and you had to do the lexical scope lookup anyway for lib itself > so it really does save a binary search at basically no extra cost - roberth after seeing the stats > Oooh nice. I did not consider that more of the maybeThunk optimization becomes effective (nrAvoided). Those lookups also caused allocations! - roberth Left `lib.generators` and `lib.strings` alone because they're only used once.
2023-11-11 17:15:20 +00:00
staticMarker = optionalString stdenv.hostPlatform.isStatic "-static";
in
lib.strings.sanitizeDerivationName (
if attrs ? name
then attrs.name + hostSuffix
else
# we cannot coerce null to a string below
stdenv: Improve performance | stat | before | after | Δ | Δ% | |------------------------|-----------------|-----------------|-----------------|---------| | cpuTime | 513.67 | 507.77 | ↘ 5.90 | -1.15% | | envs-bytes | 20,682,847,968 | 20,628,961,616 | ↘ 53,886,352 | -0.26% | | envs-elements | 1,054,735,104 | 1,051,395,620 | ↘ 3,339,484 | -0.32% | | envs-number | 765,310,446 | 763,612,291 | ↘ 1,698,155 | -0.22% | | gc-heapSize | 53,439,602,688 | 51,711,545,344 | ↘ 1,728,057,344 | -3.23% | | gc-totalBytes | 113,062,066,672 | 112,139,998,240 | ↘ 922,068,432 | -0.82% | | list-bytes | 3,118,249,784 | 3,118,249,784 | 0 | | | list-concats | 52,834,140 | 52,834,140 | 0 | | | list-elements | 389,781,223 | 389,781,223 | 0 | | | nrAvoided | 968,097,988 | 991,889,795 | ↗ 23,791,807 | 2.46% | | nrFunctionCalls | 697,259,792 | 697,259,792 | 0 | | | nrLookups | 510,257,062 | 338,275,331 | ↘ 171,981,731 | -33.70% | | nrOpUpdateValuesCopied | 1,446,690,216 | 1,446,690,216 | 0 | | | nrOpUpdates | 68,504,034 | 68,504,034 | 0 | | | nrPrimOpCalls | 429,464,805 | 429,464,805 | 0 | | | nrThunks | 1,009,240,391 | 982,109,100 | ↘ 27,131,291 | -2.69% | | sets-bytes | 33,524,722,928 | 33,524,722,928 | 0 | | | sets-elements | 1,938,309,212 | 1,938,309,212 | 0 | | | sets-number | 156,985,971 | 156,985,971 | 0 | | | sizes-Attr | 16 | 16 | 0 | | | sizes-Bindings | 16 | 16 | 0 | | | sizes-Env | 16 | 16 | 0 | | | sizes-Value | 24 | 24 | 0 | | | symbols-bytes | 2,151,298 | 2,151,298 | 0 | | | symbols-number | 159,707 | 159,707 | 0 | | | values-bytes | 30,218,194,248 | 29,567,043,264 | ↘ 651,150,984 | -2.15% | | values-number | 1,259,091,427 | 1,231,960,136 | ↘ 27,131,291 | -2.15% | > Accessing the lexical scope directly should be more efficient, yes, because it changes from a binary search (many lookups) to just two memory accesses > correction: one short linked list + one array access > oh and you had to do the lexical scope lookup anyway for lib itself > so it really does save a binary search at basically no extra cost - roberth after seeing the stats > Oooh nice. I did not consider that more of the maybeThunk optimization becomes effective (nrAvoided). Those lookups also caused allocations! - roberth Left `lib.generators` and `lib.strings` alone because they're only used once.
2023-11-11 17:15:20 +00:00
assert assertMsg (attrs ? version && attrs.version != null) "The version attribute cannot be null.";
"${attrs.pname}${staticMarker}${hostSuffix}-${attrs.version}"
);
}) // {
builder = attrs.realBuilder or stdenv.shell;
args = attrs.args or ["-e" (attrs.builder or ./default-builder.sh)];
inherit stdenv;
# The `system` attribute of a derivation has special meaning to Nix.
# Derivations set it to choose what sort of machine could be used to
# execute the build, The build platform entirely determines this,
# indeed more finely than Nix knows or cares about. The `system`
# attribute of `buildPlatfom` matches Nix's degree of specificity.
# exactly.
inherit (stdenv.buildPlatform) system;
userHook = config.stdenv.userHook or null;
__ignoreNulls = true;
inherit __structuredAttrs strictDeps;
stdenv: Improve performance | stat | before | after | Δ | Δ% | |------------------------|-----------------|-----------------|-----------------|---------| | cpuTime | 513.67 | 507.77 | ↘ 5.90 | -1.15% | | envs-bytes | 20,682,847,968 | 20,628,961,616 | ↘ 53,886,352 | -0.26% | | envs-elements | 1,054,735,104 | 1,051,395,620 | ↘ 3,339,484 | -0.32% | | envs-number | 765,310,446 | 763,612,291 | ↘ 1,698,155 | -0.22% | | gc-heapSize | 53,439,602,688 | 51,711,545,344 | ↘ 1,728,057,344 | -3.23% | | gc-totalBytes | 113,062,066,672 | 112,139,998,240 | ↘ 922,068,432 | -0.82% | | list-bytes | 3,118,249,784 | 3,118,249,784 | 0 | | | list-concats | 52,834,140 | 52,834,140 | 0 | | | list-elements | 389,781,223 | 389,781,223 | 0 | | | nrAvoided | 968,097,988 | 991,889,795 | ↗ 23,791,807 | 2.46% | | nrFunctionCalls | 697,259,792 | 697,259,792 | 0 | | | nrLookups | 510,257,062 | 338,275,331 | ↘ 171,981,731 | -33.70% | | nrOpUpdateValuesCopied | 1,446,690,216 | 1,446,690,216 | 0 | | | nrOpUpdates | 68,504,034 | 68,504,034 | 0 | | | nrPrimOpCalls | 429,464,805 | 429,464,805 | 0 | | | nrThunks | 1,009,240,391 | 982,109,100 | ↘ 27,131,291 | -2.69% | | sets-bytes | 33,524,722,928 | 33,524,722,928 | 0 | | | sets-elements | 1,938,309,212 | 1,938,309,212 | 0 | | | sets-number | 156,985,971 | 156,985,971 | 0 | | | sizes-Attr | 16 | 16 | 0 | | | sizes-Bindings | 16 | 16 | 0 | | | sizes-Env | 16 | 16 | 0 | | | sizes-Value | 24 | 24 | 0 | | | symbols-bytes | 2,151,298 | 2,151,298 | 0 | | | symbols-number | 159,707 | 159,707 | 0 | | | values-bytes | 30,218,194,248 | 29,567,043,264 | ↘ 651,150,984 | -2.15% | | values-number | 1,259,091,427 | 1,231,960,136 | ↘ 27,131,291 | -2.15% | > Accessing the lexical scope directly should be more efficient, yes, because it changes from a binary search (many lookups) to just two memory accesses > correction: one short linked list + one array access > oh and you had to do the lexical scope lookup anyway for lib itself > so it really does save a binary search at basically no extra cost - roberth after seeing the stats > Oooh nice. I did not consider that more of the maybeThunk optimization becomes effective (nrAvoided). Those lookups also caused allocations! - roberth Left `lib.generators` and `lib.strings` alone because they're only used once.
2023-11-11 17:15:20 +00:00
depsBuildBuild = elemAt (elemAt dependencies 0) 0;
nativeBuildInputs = elemAt (elemAt dependencies 0) 1;
depsBuildTarget = elemAt (elemAt dependencies 0) 2;
depsHostHost = elemAt (elemAt dependencies 1) 0;
buildInputs = elemAt (elemAt dependencies 1) 1;
depsTargetTarget = elemAt (elemAt dependencies 2) 0;
stdenv: Improve performance | stat | before | after | Δ | Δ% | |------------------------|-----------------|-----------------|-----------------|---------| | cpuTime | 513.67 | 507.77 | ↘ 5.90 | -1.15% | | envs-bytes | 20,682,847,968 | 20,628,961,616 | ↘ 53,886,352 | -0.26% | | envs-elements | 1,054,735,104 | 1,051,395,620 | ↘ 3,339,484 | -0.32% | | envs-number | 765,310,446 | 763,612,291 | ↘ 1,698,155 | -0.22% | | gc-heapSize | 53,439,602,688 | 51,711,545,344 | ↘ 1,728,057,344 | -3.23% | | gc-totalBytes | 113,062,066,672 | 112,139,998,240 | ↘ 922,068,432 | -0.82% | | list-bytes | 3,118,249,784 | 3,118,249,784 | 0 | | | list-concats | 52,834,140 | 52,834,140 | 0 | | | list-elements | 389,781,223 | 389,781,223 | 0 | | | nrAvoided | 968,097,988 | 991,889,795 | ↗ 23,791,807 | 2.46% | | nrFunctionCalls | 697,259,792 | 697,259,792 | 0 | | | nrLookups | 510,257,062 | 338,275,331 | ↘ 171,981,731 | -33.70% | | nrOpUpdateValuesCopied | 1,446,690,216 | 1,446,690,216 | 0 | | | nrOpUpdates | 68,504,034 | 68,504,034 | 0 | | | nrPrimOpCalls | 429,464,805 | 429,464,805 | 0 | | | nrThunks | 1,009,240,391 | 982,109,100 | ↘ 27,131,291 | -2.69% | | sets-bytes | 33,524,722,928 | 33,524,722,928 | 0 | | | sets-elements | 1,938,309,212 | 1,938,309,212 | 0 | | | sets-number | 156,985,971 | 156,985,971 | 0 | | | sizes-Attr | 16 | 16 | 0 | | | sizes-Bindings | 16 | 16 | 0 | | | sizes-Env | 16 | 16 | 0 | | | sizes-Value | 24 | 24 | 0 | | | symbols-bytes | 2,151,298 | 2,151,298 | 0 | | | symbols-number | 159,707 | 159,707 | 0 | | | values-bytes | 30,218,194,248 | 29,567,043,264 | ↘ 651,150,984 | -2.15% | | values-number | 1,259,091,427 | 1,231,960,136 | ↘ 27,131,291 | -2.15% | > Accessing the lexical scope directly should be more efficient, yes, because it changes from a binary search (many lookups) to just two memory accesses > correction: one short linked list + one array access > oh and you had to do the lexical scope lookup anyway for lib itself > so it really does save a binary search at basically no extra cost - roberth after seeing the stats > Oooh nice. I did not consider that more of the maybeThunk optimization becomes effective (nrAvoided). Those lookups also caused allocations! - roberth Left `lib.generators` and `lib.strings` alone because they're only used once.
2023-11-11 17:15:20 +00:00
depsBuildBuildPropagated = elemAt (elemAt propagatedDependencies 0) 0;
propagatedNativeBuildInputs = elemAt (elemAt propagatedDependencies 0) 1;
depsBuildTargetPropagated = elemAt (elemAt propagatedDependencies 0) 2;
depsHostHostPropagated = elemAt (elemAt propagatedDependencies 1) 0;
propagatedBuildInputs = elemAt (elemAt propagatedDependencies 1) 1;
depsTargetTargetPropagated = elemAt (elemAt propagatedDependencies 2) 0;
# This parameter is sometimes a string, sometimes null, and sometimes a list, yuck
stdenv: Improve performance | stat | before | after | Δ | Δ% | |------------------------|-----------------|-----------------|-----------------|---------| | cpuTime | 513.67 | 507.77 | ↘ 5.90 | -1.15% | | envs-bytes | 20,682,847,968 | 20,628,961,616 | ↘ 53,886,352 | -0.26% | | envs-elements | 1,054,735,104 | 1,051,395,620 | ↘ 3,339,484 | -0.32% | | envs-number | 765,310,446 | 763,612,291 | ↘ 1,698,155 | -0.22% | | gc-heapSize | 53,439,602,688 | 51,711,545,344 | ↘ 1,728,057,344 | -3.23% | | gc-totalBytes | 113,062,066,672 | 112,139,998,240 | ↘ 922,068,432 | -0.82% | | list-bytes | 3,118,249,784 | 3,118,249,784 | 0 | | | list-concats | 52,834,140 | 52,834,140 | 0 | | | list-elements | 389,781,223 | 389,781,223 | 0 | | | nrAvoided | 968,097,988 | 991,889,795 | ↗ 23,791,807 | 2.46% | | nrFunctionCalls | 697,259,792 | 697,259,792 | 0 | | | nrLookups | 510,257,062 | 338,275,331 | ↘ 171,981,731 | -33.70% | | nrOpUpdateValuesCopied | 1,446,690,216 | 1,446,690,216 | 0 | | | nrOpUpdates | 68,504,034 | 68,504,034 | 0 | | | nrPrimOpCalls | 429,464,805 | 429,464,805 | 0 | | | nrThunks | 1,009,240,391 | 982,109,100 | ↘ 27,131,291 | -2.69% | | sets-bytes | 33,524,722,928 | 33,524,722,928 | 0 | | | sets-elements | 1,938,309,212 | 1,938,309,212 | 0 | | | sets-number | 156,985,971 | 156,985,971 | 0 | | | sizes-Attr | 16 | 16 | 0 | | | sizes-Bindings | 16 | 16 | 0 | | | sizes-Env | 16 | 16 | 0 | | | sizes-Value | 24 | 24 | 0 | | | symbols-bytes | 2,151,298 | 2,151,298 | 0 | | | symbols-number | 159,707 | 159,707 | 0 | | | values-bytes | 30,218,194,248 | 29,567,043,264 | ↘ 651,150,984 | -2.15% | | values-number | 1,259,091,427 | 1,231,960,136 | ↘ 27,131,291 | -2.15% | > Accessing the lexical scope directly should be more efficient, yes, because it changes from a binary search (many lookups) to just two memory accesses > correction: one short linked list + one array access > oh and you had to do the lexical scope lookup anyway for lib itself > so it really does save a binary search at basically no extra cost - roberth after seeing the stats > Oooh nice. I did not consider that more of the maybeThunk optimization becomes effective (nrAvoided). Those lookups also caused allocations! - roberth Left `lib.generators` and `lib.strings` alone because they're only used once.
2023-11-11 17:15:20 +00:00
configureFlags =
configureFlags
++ optional (elem "build" configurePlatforms) "--build=${stdenv.buildPlatform.config}"
++ optional (elem "host" configurePlatforms) "--host=${stdenv.hostPlatform.config}"
++ optional (elem "target" configurePlatforms) "--target=${stdenv.targetPlatform.config}";
inherit patches;
inherit doCheck doInstallCheck;
inherit outputs;
stdenv: Improve performance | stat | before | after | Δ | Δ% | |------------------------|-----------------|-----------------|-----------------|---------| | cpuTime | 513.67 | 507.77 | ↘ 5.90 | -1.15% | | envs-bytes | 20,682,847,968 | 20,628,961,616 | ↘ 53,886,352 | -0.26% | | envs-elements | 1,054,735,104 | 1,051,395,620 | ↘ 3,339,484 | -0.32% | | envs-number | 765,310,446 | 763,612,291 | ↘ 1,698,155 | -0.22% | | gc-heapSize | 53,439,602,688 | 51,711,545,344 | ↘ 1,728,057,344 | -3.23% | | gc-totalBytes | 113,062,066,672 | 112,139,998,240 | ↘ 922,068,432 | -0.82% | | list-bytes | 3,118,249,784 | 3,118,249,784 | 0 | | | list-concats | 52,834,140 | 52,834,140 | 0 | | | list-elements | 389,781,223 | 389,781,223 | 0 | | | nrAvoided | 968,097,988 | 991,889,795 | ↗ 23,791,807 | 2.46% | | nrFunctionCalls | 697,259,792 | 697,259,792 | 0 | | | nrLookups | 510,257,062 | 338,275,331 | ↘ 171,981,731 | -33.70% | | nrOpUpdateValuesCopied | 1,446,690,216 | 1,446,690,216 | 0 | | | nrOpUpdates | 68,504,034 | 68,504,034 | 0 | | | nrPrimOpCalls | 429,464,805 | 429,464,805 | 0 | | | nrThunks | 1,009,240,391 | 982,109,100 | ↘ 27,131,291 | -2.69% | | sets-bytes | 33,524,722,928 | 33,524,722,928 | 0 | | | sets-elements | 1,938,309,212 | 1,938,309,212 | 0 | | | sets-number | 156,985,971 | 156,985,971 | 0 | | | sizes-Attr | 16 | 16 | 0 | | | sizes-Bindings | 16 | 16 | 0 | | | sizes-Env | 16 | 16 | 0 | | | sizes-Value | 24 | 24 | 0 | | | symbols-bytes | 2,151,298 | 2,151,298 | 0 | | | symbols-number | 159,707 | 159,707 | 0 | | | values-bytes | 30,218,194,248 | 29,567,043,264 | ↘ 651,150,984 | -2.15% | | values-number | 1,259,091,427 | 1,231,960,136 | ↘ 27,131,291 | -2.15% | > Accessing the lexical scope directly should be more efficient, yes, because it changes from a binary search (many lookups) to just two memory accesses > correction: one short linked list + one array access > oh and you had to do the lexical scope lookup anyway for lib itself > so it really does save a binary search at basically no extra cost - roberth after seeing the stats > Oooh nice. I did not consider that more of the maybeThunk optimization becomes effective (nrAvoided). Those lookups also caused allocations! - roberth Left `lib.generators` and `lib.strings` alone because they're only used once.
2023-11-11 17:15:20 +00:00
} // optionalAttrs (__contentAddressed) {
inherit __contentAddressed;
# Provide default values for outputHashMode and outputHashAlgo because
# most people won't care about these anyways
outputHashAlgo = attrs.outputHashAlgo or "sha256";
outputHashMode = attrs.outputHashMode or "recursive";
stdenv: Improve performance | stat | before | after | Δ | Δ% | |------------------------|-----------------|-----------------|-----------------|---------| | cpuTime | 513.67 | 507.77 | ↘ 5.90 | -1.15% | | envs-bytes | 20,682,847,968 | 20,628,961,616 | ↘ 53,886,352 | -0.26% | | envs-elements | 1,054,735,104 | 1,051,395,620 | ↘ 3,339,484 | -0.32% | | envs-number | 765,310,446 | 763,612,291 | ↘ 1,698,155 | -0.22% | | gc-heapSize | 53,439,602,688 | 51,711,545,344 | ↘ 1,728,057,344 | -3.23% | | gc-totalBytes | 113,062,066,672 | 112,139,998,240 | ↘ 922,068,432 | -0.82% | | list-bytes | 3,118,249,784 | 3,118,249,784 | 0 | | | list-concats | 52,834,140 | 52,834,140 | 0 | | | list-elements | 389,781,223 | 389,781,223 | 0 | | | nrAvoided | 968,097,988 | 991,889,795 | ↗ 23,791,807 | 2.46% | | nrFunctionCalls | 697,259,792 | 697,259,792 | 0 | | | nrLookups | 510,257,062 | 338,275,331 | ↘ 171,981,731 | -33.70% | | nrOpUpdateValuesCopied | 1,446,690,216 | 1,446,690,216 | 0 | | | nrOpUpdates | 68,504,034 | 68,504,034 | 0 | | | nrPrimOpCalls | 429,464,805 | 429,464,805 | 0 | | | nrThunks | 1,009,240,391 | 982,109,100 | ↘ 27,131,291 | -2.69% | | sets-bytes | 33,524,722,928 | 33,524,722,928 | 0 | | | sets-elements | 1,938,309,212 | 1,938,309,212 | 0 | | | sets-number | 156,985,971 | 156,985,971 | 0 | | | sizes-Attr | 16 | 16 | 0 | | | sizes-Bindings | 16 | 16 | 0 | | | sizes-Env | 16 | 16 | 0 | | | sizes-Value | 24 | 24 | 0 | | | symbols-bytes | 2,151,298 | 2,151,298 | 0 | | | symbols-number | 159,707 | 159,707 | 0 | | | values-bytes | 30,218,194,248 | 29,567,043,264 | ↘ 651,150,984 | -2.15% | | values-number | 1,259,091,427 | 1,231,960,136 | ↘ 27,131,291 | -2.15% | > Accessing the lexical scope directly should be more efficient, yes, because it changes from a binary search (many lookups) to just two memory accesses > correction: one short linked list + one array access > oh and you had to do the lexical scope lookup anyway for lib itself > so it really does save a binary search at basically no extra cost - roberth after seeing the stats > Oooh nice. I did not consider that more of the maybeThunk optimization becomes effective (nrAvoided). Those lookups also caused allocations! - roberth Left `lib.generators` and `lib.strings` alone because they're only used once.
2023-11-11 17:15:20 +00:00
} // optionalAttrs (enableParallelBuilding) {
inherit enableParallelBuilding;
enableParallelChecking = attrs.enableParallelChecking or true;
enableParallelInstalling = attrs.enableParallelInstalling or true;
stdenv: Improve performance | stat | before | after | Δ | Δ% | |------------------------|-----------------|-----------------|-----------------|---------| | cpuTime | 513.67 | 507.77 | ↘ 5.90 | -1.15% | | envs-bytes | 20,682,847,968 | 20,628,961,616 | ↘ 53,886,352 | -0.26% | | envs-elements | 1,054,735,104 | 1,051,395,620 | ↘ 3,339,484 | -0.32% | | envs-number | 765,310,446 | 763,612,291 | ↘ 1,698,155 | -0.22% | | gc-heapSize | 53,439,602,688 | 51,711,545,344 | ↘ 1,728,057,344 | -3.23% | | gc-totalBytes | 113,062,066,672 | 112,139,998,240 | ↘ 922,068,432 | -0.82% | | list-bytes | 3,118,249,784 | 3,118,249,784 | 0 | | | list-concats | 52,834,140 | 52,834,140 | 0 | | | list-elements | 389,781,223 | 389,781,223 | 0 | | | nrAvoided | 968,097,988 | 991,889,795 | ↗ 23,791,807 | 2.46% | | nrFunctionCalls | 697,259,792 | 697,259,792 | 0 | | | nrLookups | 510,257,062 | 338,275,331 | ↘ 171,981,731 | -33.70% | | nrOpUpdateValuesCopied | 1,446,690,216 | 1,446,690,216 | 0 | | | nrOpUpdates | 68,504,034 | 68,504,034 | 0 | | | nrPrimOpCalls | 429,464,805 | 429,464,805 | 0 | | | nrThunks | 1,009,240,391 | 982,109,100 | ↘ 27,131,291 | -2.69% | | sets-bytes | 33,524,722,928 | 33,524,722,928 | 0 | | | sets-elements | 1,938,309,212 | 1,938,309,212 | 0 | | | sets-number | 156,985,971 | 156,985,971 | 0 | | | sizes-Attr | 16 | 16 | 0 | | | sizes-Bindings | 16 | 16 | 0 | | | sizes-Env | 16 | 16 | 0 | | | sizes-Value | 24 | 24 | 0 | | | symbols-bytes | 2,151,298 | 2,151,298 | 0 | | | symbols-number | 159,707 | 159,707 | 0 | | | values-bytes | 30,218,194,248 | 29,567,043,264 | ↘ 651,150,984 | -2.15% | | values-number | 1,259,091,427 | 1,231,960,136 | ↘ 27,131,291 | -2.15% | > Accessing the lexical scope directly should be more efficient, yes, because it changes from a binary search (many lookups) to just two memory accesses > correction: one short linked list + one array access > oh and you had to do the lexical scope lookup anyway for lib itself > so it really does save a binary search at basically no extra cost - roberth after seeing the stats > Oooh nice. I did not consider that more of the maybeThunk optimization becomes effective (nrAvoided). Those lookups also caused allocations! - roberth Left `lib.generators` and `lib.strings` alone because they're only used once.
2023-11-11 17:15:20 +00:00
} // optionalAttrs (hardeningDisable != [] || hardeningEnable != [] || stdenv.hostPlatform.isMusl) {
NIX_HARDENING_ENABLE = enabledHardeningOptions;
stdenv: Improve performance | stat | before | after | Δ | Δ% | |------------------------|-----------------|-----------------|-----------------|---------| | cpuTime | 513.67 | 507.77 | ↘ 5.90 | -1.15% | | envs-bytes | 20,682,847,968 | 20,628,961,616 | ↘ 53,886,352 | -0.26% | | envs-elements | 1,054,735,104 | 1,051,395,620 | ↘ 3,339,484 | -0.32% | | envs-number | 765,310,446 | 763,612,291 | ↘ 1,698,155 | -0.22% | | gc-heapSize | 53,439,602,688 | 51,711,545,344 | ↘ 1,728,057,344 | -3.23% | | gc-totalBytes | 113,062,066,672 | 112,139,998,240 | ↘ 922,068,432 | -0.82% | | list-bytes | 3,118,249,784 | 3,118,249,784 | 0 | | | list-concats | 52,834,140 | 52,834,140 | 0 | | | list-elements | 389,781,223 | 389,781,223 | 0 | | | nrAvoided | 968,097,988 | 991,889,795 | ↗ 23,791,807 | 2.46% | | nrFunctionCalls | 697,259,792 | 697,259,792 | 0 | | | nrLookups | 510,257,062 | 338,275,331 | ↘ 171,981,731 | -33.70% | | nrOpUpdateValuesCopied | 1,446,690,216 | 1,446,690,216 | 0 | | | nrOpUpdates | 68,504,034 | 68,504,034 | 0 | | | nrPrimOpCalls | 429,464,805 | 429,464,805 | 0 | | | nrThunks | 1,009,240,391 | 982,109,100 | ↘ 27,131,291 | -2.69% | | sets-bytes | 33,524,722,928 | 33,524,722,928 | 0 | | | sets-elements | 1,938,309,212 | 1,938,309,212 | 0 | | | sets-number | 156,985,971 | 156,985,971 | 0 | | | sizes-Attr | 16 | 16 | 0 | | | sizes-Bindings | 16 | 16 | 0 | | | sizes-Env | 16 | 16 | 0 | | | sizes-Value | 24 | 24 | 0 | | | symbols-bytes | 2,151,298 | 2,151,298 | 0 | | | symbols-number | 159,707 | 159,707 | 0 | | | values-bytes | 30,218,194,248 | 29,567,043,264 | ↘ 651,150,984 | -2.15% | | values-number | 1,259,091,427 | 1,231,960,136 | ↘ 27,131,291 | -2.15% | > Accessing the lexical scope directly should be more efficient, yes, because it changes from a binary search (many lookups) to just two memory accesses > correction: one short linked list + one array access > oh and you had to do the lexical scope lookup anyway for lib itself > so it really does save a binary search at basically no extra cost - roberth after seeing the stats > Oooh nice. I did not consider that more of the maybeThunk optimization becomes effective (nrAvoided). Those lookups also caused allocations! - roberth Left `lib.generators` and `lib.strings` alone because they're only used once.
2023-11-11 17:15:20 +00:00
} // optionalAttrs (stdenv.hostPlatform.isx86_64 && stdenv.hostPlatform ? gcc.arch) {
requiredSystemFeatures = attrs.requiredSystemFeatures or [] ++ [ "gccarch-${stdenv.hostPlatform.gcc.arch}" ];
} // optionalAttrs (stdenv.buildPlatform.isDarwin) (
let
computedSandboxProfile =
concatMap (input: input.__propagatedSandboxProfile or [])
(stdenv.extraNativeBuildInputs
++ stdenv.extraBuildInputs
++ concatLists dependencies);
computedPropagatedSandboxProfile =
concatMap (input: input.__propagatedSandboxProfile or [])
(concatLists propagatedDependencies);
computedImpureHostDeps =
unique (concatMap (input: input.__propagatedImpureHostDeps or [])
(stdenv.extraNativeBuildInputs
++ stdenv.extraBuildInputs
++ concatLists dependencies));
computedPropagatedImpureHostDeps =
unique (concatMap (input: input.__propagatedImpureHostDeps or [])
(concatLists propagatedDependencies));
in {
inherit __darwinAllowLocalNetworking;
stdenv: Improve performance | stat | before | after | Δ | Δ% | |------------------------|-----------------|-----------------|-----------------|---------| | cpuTime | 513.67 | 507.77 | ↘ 5.90 | -1.15% | | envs-bytes | 20,682,847,968 | 20,628,961,616 | ↘ 53,886,352 | -0.26% | | envs-elements | 1,054,735,104 | 1,051,395,620 | ↘ 3,339,484 | -0.32% | | envs-number | 765,310,446 | 763,612,291 | ↘ 1,698,155 | -0.22% | | gc-heapSize | 53,439,602,688 | 51,711,545,344 | ↘ 1,728,057,344 | -3.23% | | gc-totalBytes | 113,062,066,672 | 112,139,998,240 | ↘ 922,068,432 | -0.82% | | list-bytes | 3,118,249,784 | 3,118,249,784 | 0 | | | list-concats | 52,834,140 | 52,834,140 | 0 | | | list-elements | 389,781,223 | 389,781,223 | 0 | | | nrAvoided | 968,097,988 | 991,889,795 | ↗ 23,791,807 | 2.46% | | nrFunctionCalls | 697,259,792 | 697,259,792 | 0 | | | nrLookups | 510,257,062 | 338,275,331 | ↘ 171,981,731 | -33.70% | | nrOpUpdateValuesCopied | 1,446,690,216 | 1,446,690,216 | 0 | | | nrOpUpdates | 68,504,034 | 68,504,034 | 0 | | | nrPrimOpCalls | 429,464,805 | 429,464,805 | 0 | | | nrThunks | 1,009,240,391 | 982,109,100 | ↘ 27,131,291 | -2.69% | | sets-bytes | 33,524,722,928 | 33,524,722,928 | 0 | | | sets-elements | 1,938,309,212 | 1,938,309,212 | 0 | | | sets-number | 156,985,971 | 156,985,971 | 0 | | | sizes-Attr | 16 | 16 | 0 | | | sizes-Bindings | 16 | 16 | 0 | | | sizes-Env | 16 | 16 | 0 | | | sizes-Value | 24 | 24 | 0 | | | symbols-bytes | 2,151,298 | 2,151,298 | 0 | | | symbols-number | 159,707 | 159,707 | 0 | | | values-bytes | 30,218,194,248 | 29,567,043,264 | ↘ 651,150,984 | -2.15% | | values-number | 1,259,091,427 | 1,231,960,136 | ↘ 27,131,291 | -2.15% | > Accessing the lexical scope directly should be more efficient, yes, because it changes from a binary search (many lookups) to just two memory accesses > correction: one short linked list + one array access > oh and you had to do the lexical scope lookup anyway for lib itself > so it really does save a binary search at basically no extra cost - roberth after seeing the stats > Oooh nice. I did not consider that more of the maybeThunk optimization becomes effective (nrAvoided). Those lookups also caused allocations! - roberth Left `lib.generators` and `lib.strings` alone because they're only used once.
2023-11-11 17:15:20 +00:00
# TODO: remove `unique` once nix has a list canonicalization primitive
__sandboxProfile =
let profiles = [ stdenv.extraSandboxProfile ] ++ computedSandboxProfile ++ computedPropagatedSandboxProfile ++ [ propagatedSandboxProfile sandboxProfile ];
stdenv: Improve performance | stat | before | after | Δ | Δ% | |------------------------|-----------------|-----------------|-----------------|---------| | cpuTime | 513.67 | 507.77 | ↘ 5.90 | -1.15% | | envs-bytes | 20,682,847,968 | 20,628,961,616 | ↘ 53,886,352 | -0.26% | | envs-elements | 1,054,735,104 | 1,051,395,620 | ↘ 3,339,484 | -0.32% | | envs-number | 765,310,446 | 763,612,291 | ↘ 1,698,155 | -0.22% | | gc-heapSize | 53,439,602,688 | 51,711,545,344 | ↘ 1,728,057,344 | -3.23% | | gc-totalBytes | 113,062,066,672 | 112,139,998,240 | ↘ 922,068,432 | -0.82% | | list-bytes | 3,118,249,784 | 3,118,249,784 | 0 | | | list-concats | 52,834,140 | 52,834,140 | 0 | | | list-elements | 389,781,223 | 389,781,223 | 0 | | | nrAvoided | 968,097,988 | 991,889,795 | ↗ 23,791,807 | 2.46% | | nrFunctionCalls | 697,259,792 | 697,259,792 | 0 | | | nrLookups | 510,257,062 | 338,275,331 | ↘ 171,981,731 | -33.70% | | nrOpUpdateValuesCopied | 1,446,690,216 | 1,446,690,216 | 0 | | | nrOpUpdates | 68,504,034 | 68,504,034 | 0 | | | nrPrimOpCalls | 429,464,805 | 429,464,805 | 0 | | | nrThunks | 1,009,240,391 | 982,109,100 | ↘ 27,131,291 | -2.69% | | sets-bytes | 33,524,722,928 | 33,524,722,928 | 0 | | | sets-elements | 1,938,309,212 | 1,938,309,212 | 0 | | | sets-number | 156,985,971 | 156,985,971 | 0 | | | sizes-Attr | 16 | 16 | 0 | | | sizes-Bindings | 16 | 16 | 0 | | | sizes-Env | 16 | 16 | 0 | | | sizes-Value | 24 | 24 | 0 | | | symbols-bytes | 2,151,298 | 2,151,298 | 0 | | | symbols-number | 159,707 | 159,707 | 0 | | | values-bytes | 30,218,194,248 | 29,567,043,264 | ↘ 651,150,984 | -2.15% | | values-number | 1,259,091,427 | 1,231,960,136 | ↘ 27,131,291 | -2.15% | > Accessing the lexical scope directly should be more efficient, yes, because it changes from a binary search (many lookups) to just two memory accesses > correction: one short linked list + one array access > oh and you had to do the lexical scope lookup anyway for lib itself > so it really does save a binary search at basically no extra cost - roberth after seeing the stats > Oooh nice. I did not consider that more of the maybeThunk optimization becomes effective (nrAvoided). Those lookups also caused allocations! - roberth Left `lib.generators` and `lib.strings` alone because they're only used once.
2023-11-11 17:15:20 +00:00
final = concatStringsSep "\n" (filter (x: x != "") (unique profiles));
in final;
stdenv: Improve performance | stat | before | after | Δ | Δ% | |------------------------|-----------------|-----------------|-----------------|---------| | cpuTime | 513.67 | 507.77 | ↘ 5.90 | -1.15% | | envs-bytes | 20,682,847,968 | 20,628,961,616 | ↘ 53,886,352 | -0.26% | | envs-elements | 1,054,735,104 | 1,051,395,620 | ↘ 3,339,484 | -0.32% | | envs-number | 765,310,446 | 763,612,291 | ↘ 1,698,155 | -0.22% | | gc-heapSize | 53,439,602,688 | 51,711,545,344 | ↘ 1,728,057,344 | -3.23% | | gc-totalBytes | 113,062,066,672 | 112,139,998,240 | ↘ 922,068,432 | -0.82% | | list-bytes | 3,118,249,784 | 3,118,249,784 | 0 | | | list-concats | 52,834,140 | 52,834,140 | 0 | | | list-elements | 389,781,223 | 389,781,223 | 0 | | | nrAvoided | 968,097,988 | 991,889,795 | ↗ 23,791,807 | 2.46% | | nrFunctionCalls | 697,259,792 | 697,259,792 | 0 | | | nrLookups | 510,257,062 | 338,275,331 | ↘ 171,981,731 | -33.70% | | nrOpUpdateValuesCopied | 1,446,690,216 | 1,446,690,216 | 0 | | | nrOpUpdates | 68,504,034 | 68,504,034 | 0 | | | nrPrimOpCalls | 429,464,805 | 429,464,805 | 0 | | | nrThunks | 1,009,240,391 | 982,109,100 | ↘ 27,131,291 | -2.69% | | sets-bytes | 33,524,722,928 | 33,524,722,928 | 0 | | | sets-elements | 1,938,309,212 | 1,938,309,212 | 0 | | | sets-number | 156,985,971 | 156,985,971 | 0 | | | sizes-Attr | 16 | 16 | 0 | | | sizes-Bindings | 16 | 16 | 0 | | | sizes-Env | 16 | 16 | 0 | | | sizes-Value | 24 | 24 | 0 | | | symbols-bytes | 2,151,298 | 2,151,298 | 0 | | | symbols-number | 159,707 | 159,707 | 0 | | | values-bytes | 30,218,194,248 | 29,567,043,264 | ↘ 651,150,984 | -2.15% | | values-number | 1,259,091,427 | 1,231,960,136 | ↘ 27,131,291 | -2.15% | > Accessing the lexical scope directly should be more efficient, yes, because it changes from a binary search (many lookups) to just two memory accesses > correction: one short linked list + one array access > oh and you had to do the lexical scope lookup anyway for lib itself > so it really does save a binary search at basically no extra cost - roberth after seeing the stats > Oooh nice. I did not consider that more of the maybeThunk optimization becomes effective (nrAvoided). Those lookups also caused allocations! - roberth Left `lib.generators` and `lib.strings` alone because they're only used once.
2023-11-11 17:15:20 +00:00
__propagatedSandboxProfile = unique (computedPropagatedSandboxProfile ++ [ propagatedSandboxProfile ]);
__impureHostDeps = computedImpureHostDeps ++ computedPropagatedImpureHostDeps ++ __propagatedImpureHostDeps ++ __impureHostDeps ++ stdenv.__extraImpureHostDeps ++ [
"/dev/zero"
"/dev/random"
"/dev/urandom"
"/bin/sh"
];
__propagatedImpureHostDeps = computedPropagatedImpureHostDeps ++ __propagatedImpureHostDeps;
}) //
# If we use derivations directly here, they end up as build-time dependencies.
# This is especially problematic in the case of disallowed*, since the disallowed
# derivations will be built by nix as build-time dependencies, while those
# derivations might take a very long time to build, or might not even build
# successfully on the platform used.
# We can improve on this situation by instead passing only the outPath,
# without an attached string context, to nix. The out path will be a placeholder
# which will be replaced by the actual out path if the derivation in question
# is part of the final closure (and thus needs to be built). If it is not
# part of the final closure, then the placeholder will be passed along,
# but in that case we know for a fact that the derivation is not part of the closure.
# This means that passing the out path to nix does the right thing in either
# case, both for disallowed and allowed references/requisites, and we won't
# build the derivation if it wouldn't be part of the closure, saving time and resources.
# While the problem is less severe for allowed*, since we want the derivation
# to be built eventually, we would still like to get the error early and without
# having to wait while nix builds a derivation that might not be used.
# See also https://github.com/NixOS/nix/issues/4629
stdenv: Improve performance | stat | before | after | Δ | Δ% | |------------------------|-----------------|-----------------|-----------------|---------| | cpuTime | 513.67 | 507.77 | ↘ 5.90 | -1.15% | | envs-bytes | 20,682,847,968 | 20,628,961,616 | ↘ 53,886,352 | -0.26% | | envs-elements | 1,054,735,104 | 1,051,395,620 | ↘ 3,339,484 | -0.32% | | envs-number | 765,310,446 | 763,612,291 | ↘ 1,698,155 | -0.22% | | gc-heapSize | 53,439,602,688 | 51,711,545,344 | ↘ 1,728,057,344 | -3.23% | | gc-totalBytes | 113,062,066,672 | 112,139,998,240 | ↘ 922,068,432 | -0.82% | | list-bytes | 3,118,249,784 | 3,118,249,784 | 0 | | | list-concats | 52,834,140 | 52,834,140 | 0 | | | list-elements | 389,781,223 | 389,781,223 | 0 | | | nrAvoided | 968,097,988 | 991,889,795 | ↗ 23,791,807 | 2.46% | | nrFunctionCalls | 697,259,792 | 697,259,792 | 0 | | | nrLookups | 510,257,062 | 338,275,331 | ↘ 171,981,731 | -33.70% | | nrOpUpdateValuesCopied | 1,446,690,216 | 1,446,690,216 | 0 | | | nrOpUpdates | 68,504,034 | 68,504,034 | 0 | | | nrPrimOpCalls | 429,464,805 | 429,464,805 | 0 | | | nrThunks | 1,009,240,391 | 982,109,100 | ↘ 27,131,291 | -2.69% | | sets-bytes | 33,524,722,928 | 33,524,722,928 | 0 | | | sets-elements | 1,938,309,212 | 1,938,309,212 | 0 | | | sets-number | 156,985,971 | 156,985,971 | 0 | | | sizes-Attr | 16 | 16 | 0 | | | sizes-Bindings | 16 | 16 | 0 | | | sizes-Env | 16 | 16 | 0 | | | sizes-Value | 24 | 24 | 0 | | | symbols-bytes | 2,151,298 | 2,151,298 | 0 | | | symbols-number | 159,707 | 159,707 | 0 | | | values-bytes | 30,218,194,248 | 29,567,043,264 | ↘ 651,150,984 | -2.15% | | values-number | 1,259,091,427 | 1,231,960,136 | ↘ 27,131,291 | -2.15% | > Accessing the lexical scope directly should be more efficient, yes, because it changes from a binary search (many lookups) to just two memory accesses > correction: one short linked list + one array access > oh and you had to do the lexical scope lookup anyway for lib itself > so it really does save a binary search at basically no extra cost - roberth after seeing the stats > Oooh nice. I did not consider that more of the maybeThunk optimization becomes effective (nrAvoided). Those lookups also caused allocations! - roberth Left `lib.generators` and `lib.strings` alone because they're only used once.
2023-11-11 17:15:20 +00:00
optionalAttrs (attrs ? disallowedReferences) {
disallowedReferences =
map unsafeDerivationToUntrackedOutpath attrs.disallowedReferences;
} //
stdenv: Improve performance | stat | before | after | Δ | Δ% | |------------------------|-----------------|-----------------|-----------------|---------| | cpuTime | 513.67 | 507.77 | ↘ 5.90 | -1.15% | | envs-bytes | 20,682,847,968 | 20,628,961,616 | ↘ 53,886,352 | -0.26% | | envs-elements | 1,054,735,104 | 1,051,395,620 | ↘ 3,339,484 | -0.32% | | envs-number | 765,310,446 | 763,612,291 | ↘ 1,698,155 | -0.22% | | gc-heapSize | 53,439,602,688 | 51,711,545,344 | ↘ 1,728,057,344 | -3.23% | | gc-totalBytes | 113,062,066,672 | 112,139,998,240 | ↘ 922,068,432 | -0.82% | | list-bytes | 3,118,249,784 | 3,118,249,784 | 0 | | | list-concats | 52,834,140 | 52,834,140 | 0 | | | list-elements | 389,781,223 | 389,781,223 | 0 | | | nrAvoided | 968,097,988 | 991,889,795 | ↗ 23,791,807 | 2.46% | | nrFunctionCalls | 697,259,792 | 697,259,792 | 0 | | | nrLookups | 510,257,062 | 338,275,331 | ↘ 171,981,731 | -33.70% | | nrOpUpdateValuesCopied | 1,446,690,216 | 1,446,690,216 | 0 | | | nrOpUpdates | 68,504,034 | 68,504,034 | 0 | | | nrPrimOpCalls | 429,464,805 | 429,464,805 | 0 | | | nrThunks | 1,009,240,391 | 982,109,100 | ↘ 27,131,291 | -2.69% | | sets-bytes | 33,524,722,928 | 33,524,722,928 | 0 | | | sets-elements | 1,938,309,212 | 1,938,309,212 | 0 | | | sets-number | 156,985,971 | 156,985,971 | 0 | | | sizes-Attr | 16 | 16 | 0 | | | sizes-Bindings | 16 | 16 | 0 | | | sizes-Env | 16 | 16 | 0 | | | sizes-Value | 24 | 24 | 0 | | | symbols-bytes | 2,151,298 | 2,151,298 | 0 | | | symbols-number | 159,707 | 159,707 | 0 | | | values-bytes | 30,218,194,248 | 29,567,043,264 | ↘ 651,150,984 | -2.15% | | values-number | 1,259,091,427 | 1,231,960,136 | ↘ 27,131,291 | -2.15% | > Accessing the lexical scope directly should be more efficient, yes, because it changes from a binary search (many lookups) to just two memory accesses > correction: one short linked list + one array access > oh and you had to do the lexical scope lookup anyway for lib itself > so it really does save a binary search at basically no extra cost - roberth after seeing the stats > Oooh nice. I did not consider that more of the maybeThunk optimization becomes effective (nrAvoided). Those lookups also caused allocations! - roberth Left `lib.generators` and `lib.strings` alone because they're only used once.
2023-11-11 17:15:20 +00:00
optionalAttrs (attrs ? disallowedRequisites) {
disallowedRequisites =
map unsafeDerivationToUntrackedOutpath attrs.disallowedRequisites;
} //
stdenv: Improve performance | stat | before | after | Δ | Δ% | |------------------------|-----------------|-----------------|-----------------|---------| | cpuTime | 513.67 | 507.77 | ↘ 5.90 | -1.15% | | envs-bytes | 20,682,847,968 | 20,628,961,616 | ↘ 53,886,352 | -0.26% | | envs-elements | 1,054,735,104 | 1,051,395,620 | ↘ 3,339,484 | -0.32% | | envs-number | 765,310,446 | 763,612,291 | ↘ 1,698,155 | -0.22% | | gc-heapSize | 53,439,602,688 | 51,711,545,344 | ↘ 1,728,057,344 | -3.23% | | gc-totalBytes | 113,062,066,672 | 112,139,998,240 | ↘ 922,068,432 | -0.82% | | list-bytes | 3,118,249,784 | 3,118,249,784 | 0 | | | list-concats | 52,834,140 | 52,834,140 | 0 | | | list-elements | 389,781,223 | 389,781,223 | 0 | | | nrAvoided | 968,097,988 | 991,889,795 | ↗ 23,791,807 | 2.46% | | nrFunctionCalls | 697,259,792 | 697,259,792 | 0 | | | nrLookups | 510,257,062 | 338,275,331 | ↘ 171,981,731 | -33.70% | | nrOpUpdateValuesCopied | 1,446,690,216 | 1,446,690,216 | 0 | | | nrOpUpdates | 68,504,034 | 68,504,034 | 0 | | | nrPrimOpCalls | 429,464,805 | 429,464,805 | 0 | | | nrThunks | 1,009,240,391 | 982,109,100 | ↘ 27,131,291 | -2.69% | | sets-bytes | 33,524,722,928 | 33,524,722,928 | 0 | | | sets-elements | 1,938,309,212 | 1,938,309,212 | 0 | | | sets-number | 156,985,971 | 156,985,971 | 0 | | | sizes-Attr | 16 | 16 | 0 | | | sizes-Bindings | 16 | 16 | 0 | | | sizes-Env | 16 | 16 | 0 | | | sizes-Value | 24 | 24 | 0 | | | symbols-bytes | 2,151,298 | 2,151,298 | 0 | | | symbols-number | 159,707 | 159,707 | 0 | | | values-bytes | 30,218,194,248 | 29,567,043,264 | ↘ 651,150,984 | -2.15% | | values-number | 1,259,091,427 | 1,231,960,136 | ↘ 27,131,291 | -2.15% | > Accessing the lexical scope directly should be more efficient, yes, because it changes from a binary search (many lookups) to just two memory accesses > correction: one short linked list + one array access > oh and you had to do the lexical scope lookup anyway for lib itself > so it really does save a binary search at basically no extra cost - roberth after seeing the stats > Oooh nice. I did not consider that more of the maybeThunk optimization becomes effective (nrAvoided). Those lookups also caused allocations! - roberth Left `lib.generators` and `lib.strings` alone because they're only used once.
2023-11-11 17:15:20 +00:00
optionalAttrs (attrs ? allowedReferences) {
allowedReferences =
stdenv: Improve performance | stat | before | after | Δ | Δ% | |------------------------|-----------------|-----------------|-----------------|---------| | cpuTime | 513.67 | 507.77 | ↘ 5.90 | -1.15% | | envs-bytes | 20,682,847,968 | 20,628,961,616 | ↘ 53,886,352 | -0.26% | | envs-elements | 1,054,735,104 | 1,051,395,620 | ↘ 3,339,484 | -0.32% | | envs-number | 765,310,446 | 763,612,291 | ↘ 1,698,155 | -0.22% | | gc-heapSize | 53,439,602,688 | 51,711,545,344 | ↘ 1,728,057,344 | -3.23% | | gc-totalBytes | 113,062,066,672 | 112,139,998,240 | ↘ 922,068,432 | -0.82% | | list-bytes | 3,118,249,784 | 3,118,249,784 | 0 | | | list-concats | 52,834,140 | 52,834,140 | 0 | | | list-elements | 389,781,223 | 389,781,223 | 0 | | | nrAvoided | 968,097,988 | 991,889,795 | ↗ 23,791,807 | 2.46% | | nrFunctionCalls | 697,259,792 | 697,259,792 | 0 | | | nrLookups | 510,257,062 | 338,275,331 | ↘ 171,981,731 | -33.70% | | nrOpUpdateValuesCopied | 1,446,690,216 | 1,446,690,216 | 0 | | | nrOpUpdates | 68,504,034 | 68,504,034 | 0 | | | nrPrimOpCalls | 429,464,805 | 429,464,805 | 0 | | | nrThunks | 1,009,240,391 | 982,109,100 | ↘ 27,131,291 | -2.69% | | sets-bytes | 33,524,722,928 | 33,524,722,928 | 0 | | | sets-elements | 1,938,309,212 | 1,938,309,212 | 0 | | | sets-number | 156,985,971 | 156,985,971 | 0 | | | sizes-Attr | 16 | 16 | 0 | | | sizes-Bindings | 16 | 16 | 0 | | | sizes-Env | 16 | 16 | 0 | | | sizes-Value | 24 | 24 | 0 | | | symbols-bytes | 2,151,298 | 2,151,298 | 0 | | | symbols-number | 159,707 | 159,707 | 0 | | | values-bytes | 30,218,194,248 | 29,567,043,264 | ↘ 651,150,984 | -2.15% | | values-number | 1,259,091,427 | 1,231,960,136 | ↘ 27,131,291 | -2.15% | > Accessing the lexical scope directly should be more efficient, yes, because it changes from a binary search (many lookups) to just two memory accesses > correction: one short linked list + one array access > oh and you had to do the lexical scope lookup anyway for lib itself > so it really does save a binary search at basically no extra cost - roberth after seeing the stats > Oooh nice. I did not consider that more of the maybeThunk optimization becomes effective (nrAvoided). Those lookups also caused allocations! - roberth Left `lib.generators` and `lib.strings` alone because they're only used once.
2023-11-11 17:15:20 +00:00
mapNullable unsafeDerivationToUntrackedOutpath attrs.allowedReferences;
} //
stdenv: Improve performance | stat | before | after | Δ | Δ% | |------------------------|-----------------|-----------------|-----------------|---------| | cpuTime | 513.67 | 507.77 | ↘ 5.90 | -1.15% | | envs-bytes | 20,682,847,968 | 20,628,961,616 | ↘ 53,886,352 | -0.26% | | envs-elements | 1,054,735,104 | 1,051,395,620 | ↘ 3,339,484 | -0.32% | | envs-number | 765,310,446 | 763,612,291 | ↘ 1,698,155 | -0.22% | | gc-heapSize | 53,439,602,688 | 51,711,545,344 | ↘ 1,728,057,344 | -3.23% | | gc-totalBytes | 113,062,066,672 | 112,139,998,240 | ↘ 922,068,432 | -0.82% | | list-bytes | 3,118,249,784 | 3,118,249,784 | 0 | | | list-concats | 52,834,140 | 52,834,140 | 0 | | | list-elements | 389,781,223 | 389,781,223 | 0 | | | nrAvoided | 968,097,988 | 991,889,795 | ↗ 23,791,807 | 2.46% | | nrFunctionCalls | 697,259,792 | 697,259,792 | 0 | | | nrLookups | 510,257,062 | 338,275,331 | ↘ 171,981,731 | -33.70% | | nrOpUpdateValuesCopied | 1,446,690,216 | 1,446,690,216 | 0 | | | nrOpUpdates | 68,504,034 | 68,504,034 | 0 | | | nrPrimOpCalls | 429,464,805 | 429,464,805 | 0 | | | nrThunks | 1,009,240,391 | 982,109,100 | ↘ 27,131,291 | -2.69% | | sets-bytes | 33,524,722,928 | 33,524,722,928 | 0 | | | sets-elements | 1,938,309,212 | 1,938,309,212 | 0 | | | sets-number | 156,985,971 | 156,985,971 | 0 | | | sizes-Attr | 16 | 16 | 0 | | | sizes-Bindings | 16 | 16 | 0 | | | sizes-Env | 16 | 16 | 0 | | | sizes-Value | 24 | 24 | 0 | | | symbols-bytes | 2,151,298 | 2,151,298 | 0 | | | symbols-number | 159,707 | 159,707 | 0 | | | values-bytes | 30,218,194,248 | 29,567,043,264 | ↘ 651,150,984 | -2.15% | | values-number | 1,259,091,427 | 1,231,960,136 | ↘ 27,131,291 | -2.15% | > Accessing the lexical scope directly should be more efficient, yes, because it changes from a binary search (many lookups) to just two memory accesses > correction: one short linked list + one array access > oh and you had to do the lexical scope lookup anyway for lib itself > so it really does save a binary search at basically no extra cost - roberth after seeing the stats > Oooh nice. I did not consider that more of the maybeThunk optimization becomes effective (nrAvoided). Those lookups also caused allocations! - roberth Left `lib.generators` and `lib.strings` alone because they're only used once.
2023-11-11 17:15:20 +00:00
optionalAttrs (attrs ? allowedRequisites) {
allowedRequisites =
stdenv: Improve performance | stat | before | after | Δ | Δ% | |------------------------|-----------------|-----------------|-----------------|---------| | cpuTime | 513.67 | 507.77 | ↘ 5.90 | -1.15% | | envs-bytes | 20,682,847,968 | 20,628,961,616 | ↘ 53,886,352 | -0.26% | | envs-elements | 1,054,735,104 | 1,051,395,620 | ↘ 3,339,484 | -0.32% | | envs-number | 765,310,446 | 763,612,291 | ↘ 1,698,155 | -0.22% | | gc-heapSize | 53,439,602,688 | 51,711,545,344 | ↘ 1,728,057,344 | -3.23% | | gc-totalBytes | 113,062,066,672 | 112,139,998,240 | ↘ 922,068,432 | -0.82% | | list-bytes | 3,118,249,784 | 3,118,249,784 | 0 | | | list-concats | 52,834,140 | 52,834,140 | 0 | | | list-elements | 389,781,223 | 389,781,223 | 0 | | | nrAvoided | 968,097,988 | 991,889,795 | ↗ 23,791,807 | 2.46% | | nrFunctionCalls | 697,259,792 | 697,259,792 | 0 | | | nrLookups | 510,257,062 | 338,275,331 | ↘ 171,981,731 | -33.70% | | nrOpUpdateValuesCopied | 1,446,690,216 | 1,446,690,216 | 0 | | | nrOpUpdates | 68,504,034 | 68,504,034 | 0 | | | nrPrimOpCalls | 429,464,805 | 429,464,805 | 0 | | | nrThunks | 1,009,240,391 | 982,109,100 | ↘ 27,131,291 | -2.69% | | sets-bytes | 33,524,722,928 | 33,524,722,928 | 0 | | | sets-elements | 1,938,309,212 | 1,938,309,212 | 0 | | | sets-number | 156,985,971 | 156,985,971 | 0 | | | sizes-Attr | 16 | 16 | 0 | | | sizes-Bindings | 16 | 16 | 0 | | | sizes-Env | 16 | 16 | 0 | | | sizes-Value | 24 | 24 | 0 | | | symbols-bytes | 2,151,298 | 2,151,298 | 0 | | | symbols-number | 159,707 | 159,707 | 0 | | | values-bytes | 30,218,194,248 | 29,567,043,264 | ↘ 651,150,984 | -2.15% | | values-number | 1,259,091,427 | 1,231,960,136 | ↘ 27,131,291 | -2.15% | > Accessing the lexical scope directly should be more efficient, yes, because it changes from a binary search (many lookups) to just two memory accesses > correction: one short linked list + one array access > oh and you had to do the lexical scope lookup anyway for lib itself > so it really does save a binary search at basically no extra cost - roberth after seeing the stats > Oooh nice. I did not consider that more of the maybeThunk optimization becomes effective (nrAvoided). Those lookups also caused allocations! - roberth Left `lib.generators` and `lib.strings` alone because they're only used once.
2023-11-11 17:15:20 +00:00
mapNullable unsafeDerivationToUntrackedOutpath attrs.allowedRequisites;
};
in
derivationArg;
mkDerivationSimple = overrideAttrs:
# `mkDerivation` wraps the builtin `derivation` function to
# produce derivations that use this stdenv and its shell.
#
# See also:
#
# * https://nixos.org/nixpkgs/manual/#sec-using-stdenv
# Details on how to use this mkDerivation function
#
# * https://nixos.org/manual/nix/stable/expressions/derivations.html#derivations
# Explanation about derivations in general
{
# Configure Phase
cmakeFlags ? []
, mesonFlags ? []
, meta ? {}
, passthru ? {}
, pos ? # position used in error messages and for meta.position
(if attrs.meta.description or null != null
then builtins.unsafeGetAttrPos "description" attrs.meta
else if attrs.version or null != null
then builtins.unsafeGetAttrPos "version" attrs
else builtins.unsafeGetAttrPos "name" attrs)
# Experimental. For simple packages mostly just works,
# but for anything complex, be prepared to debug if enabling.
, __structuredAttrs ? config.structuredAttrsByDefault or false
, env ? { }
, ... } @ attrs:
# Policy on acceptable hash types in nixpkgs
assert attrs ? outputHash -> (
let algo =
attrs.outputHashAlgo or (head (splitString "-" attrs.outputHash));
in
if algo == "md5" then
throw "Rejected insecure ${algo} hash '${attrs.outputHash}'"
else
true
);
let
envIsExportable = isAttrs env && !isDerivation env;
derivationArg = makeDerivationArgument
(removeAttrs
attrs
(["meta" "passthru" "pos"]
++ optional (__structuredAttrs || envIsExportable) "env"
)
// optionalAttrs __structuredAttrs { env = checkedEnv; }
// {
cmakeFlags = makeCMakeFlags attrs;
mesonFlags = makeMesonFlags attrs;
});
meta = checkMeta.commonMeta {
inherit validity attrs pos;
references = attrs.nativeBuildInputs ++ attrs.buildInputs
++ attrs.propagatedNativeBuildInputs ++ attrs.propagatedBuildInputs;
};
validity = checkMeta.assertValidity { inherit meta attrs; };
checkedEnv =
let
overlappingNames = attrNames (builtins.intersectAttrs env derivationArg);
in
assert assertMsg envIsExportable
"When using structured attributes, `env` must be an attribute set of environment variables.";
assert assertMsg (overlappingNames == [ ])
"The env attribute set cannot contain any attributes passed to derivation. The following attributes are overlapping: ${concatStringsSep ", " overlappingNames}";
mapAttrs
(n: v: assert assertMsg (isString v || isBool v || isInt v || isDerivation v)
"The env attribute set can only contain derivation, string, boolean or integer attributes. The ${n} attribute is of type ${builtins.typeOf v}."; v)
env;
in
stdenv: Improve performance | stat | before | after | Δ | Δ% | |------------------------|-----------------|-----------------|-----------------|---------| | cpuTime | 513.67 | 507.77 | ↘ 5.90 | -1.15% | | envs-bytes | 20,682,847,968 | 20,628,961,616 | ↘ 53,886,352 | -0.26% | | envs-elements | 1,054,735,104 | 1,051,395,620 | ↘ 3,339,484 | -0.32% | | envs-number | 765,310,446 | 763,612,291 | ↘ 1,698,155 | -0.22% | | gc-heapSize | 53,439,602,688 | 51,711,545,344 | ↘ 1,728,057,344 | -3.23% | | gc-totalBytes | 113,062,066,672 | 112,139,998,240 | ↘ 922,068,432 | -0.82% | | list-bytes | 3,118,249,784 | 3,118,249,784 | 0 | | | list-concats | 52,834,140 | 52,834,140 | 0 | | | list-elements | 389,781,223 | 389,781,223 | 0 | | | nrAvoided | 968,097,988 | 991,889,795 | ↗ 23,791,807 | 2.46% | | nrFunctionCalls | 697,259,792 | 697,259,792 | 0 | | | nrLookups | 510,257,062 | 338,275,331 | ↘ 171,981,731 | -33.70% | | nrOpUpdateValuesCopied | 1,446,690,216 | 1,446,690,216 | 0 | | | nrOpUpdates | 68,504,034 | 68,504,034 | 0 | | | nrPrimOpCalls | 429,464,805 | 429,464,805 | 0 | | | nrThunks | 1,009,240,391 | 982,109,100 | ↘ 27,131,291 | -2.69% | | sets-bytes | 33,524,722,928 | 33,524,722,928 | 0 | | | sets-elements | 1,938,309,212 | 1,938,309,212 | 0 | | | sets-number | 156,985,971 | 156,985,971 | 0 | | | sizes-Attr | 16 | 16 | 0 | | | sizes-Bindings | 16 | 16 | 0 | | | sizes-Env | 16 | 16 | 0 | | | sizes-Value | 24 | 24 | 0 | | | symbols-bytes | 2,151,298 | 2,151,298 | 0 | | | symbols-number | 159,707 | 159,707 | 0 | | | values-bytes | 30,218,194,248 | 29,567,043,264 | ↘ 651,150,984 | -2.15% | | values-number | 1,259,091,427 | 1,231,960,136 | ↘ 27,131,291 | -2.15% | > Accessing the lexical scope directly should be more efficient, yes, because it changes from a binary search (many lookups) to just two memory accesses > correction: one short linked list + one array access > oh and you had to do the lexical scope lookup anyway for lib itself > so it really does save a binary search at basically no extra cost - roberth after seeing the stats > Oooh nice. I did not consider that more of the maybeThunk optimization becomes effective (nrAvoided). Those lookups also caused allocations! - roberth Left `lib.generators` and `lib.strings` alone because they're only used once.
2023-11-11 17:15:20 +00:00
extendDerivation
validity.handled
({
# A derivation that always builds successfully and whose runtime
# dependencies are the original derivations build time dependencies
# This allows easy building and distributing of all derivations
# needed to enter a nix-shell with
# nix-build shell.nix -A inputDerivation
inputDerivation = derivation (derivationArg // {
# Add a name in case the original drv didn't have one
name = derivationArg.name or "inputDerivation";
# This always only has one output
outputs = [ "out" ];
# Propagate the original builder and arguments, since we override
# them and they might contain references to build inputs
_derivation_original_builder = derivationArg.builder;
_derivation_original_args = derivationArg.args;
builder = stdenv.shell;
# The bash builtin `export` dumps all current environment variables,
# which is where all build input references end up (e.g. $PATH for
# binaries). By writing this to $out, Nix can find and register
# them as runtime dependencies (since Nix greps for store paths
# through $out to find them)
args = [ "-c" ''
export > $out
for var in $passAsFile; do
pathVar="''${var}Path"
printf "%s" "$(< "''${!pathVar}")" >> $out
done
'' ];
# inputDerivation produces the inputs; not the outputs, so any
# restrictions on what used to be the outputs don't serve a purpose
# anymore.
allowedReferences = null;
allowedRequisites = null;
disallowedReferences = [ ];
disallowedRequisites = [ ];
});
inherit passthru overrideAttrs;
inherit meta;
} //
# Pass through extra attributes that are not inputs, but
# should be made available to Nix expressions using the
# derivation (e.g., in assertions).
passthru)
stdenv: Improve performance | stat | before | after | Δ | Δ% | |------------------------|-----------------|-----------------|-----------------|---------| | cpuTime | 513.67 | 507.77 | ↘ 5.90 | -1.15% | | envs-bytes | 20,682,847,968 | 20,628,961,616 | ↘ 53,886,352 | -0.26% | | envs-elements | 1,054,735,104 | 1,051,395,620 | ↘ 3,339,484 | -0.32% | | envs-number | 765,310,446 | 763,612,291 | ↘ 1,698,155 | -0.22% | | gc-heapSize | 53,439,602,688 | 51,711,545,344 | ↘ 1,728,057,344 | -3.23% | | gc-totalBytes | 113,062,066,672 | 112,139,998,240 | ↘ 922,068,432 | -0.82% | | list-bytes | 3,118,249,784 | 3,118,249,784 | 0 | | | list-concats | 52,834,140 | 52,834,140 | 0 | | | list-elements | 389,781,223 | 389,781,223 | 0 | | | nrAvoided | 968,097,988 | 991,889,795 | ↗ 23,791,807 | 2.46% | | nrFunctionCalls | 697,259,792 | 697,259,792 | 0 | | | nrLookups | 510,257,062 | 338,275,331 | ↘ 171,981,731 | -33.70% | | nrOpUpdateValuesCopied | 1,446,690,216 | 1,446,690,216 | 0 | | | nrOpUpdates | 68,504,034 | 68,504,034 | 0 | | | nrPrimOpCalls | 429,464,805 | 429,464,805 | 0 | | | nrThunks | 1,009,240,391 | 982,109,100 | ↘ 27,131,291 | -2.69% | | sets-bytes | 33,524,722,928 | 33,524,722,928 | 0 | | | sets-elements | 1,938,309,212 | 1,938,309,212 | 0 | | | sets-number | 156,985,971 | 156,985,971 | 0 | | | sizes-Attr | 16 | 16 | 0 | | | sizes-Bindings | 16 | 16 | 0 | | | sizes-Env | 16 | 16 | 0 | | | sizes-Value | 24 | 24 | 0 | | | symbols-bytes | 2,151,298 | 2,151,298 | 0 | | | symbols-number | 159,707 | 159,707 | 0 | | | values-bytes | 30,218,194,248 | 29,567,043,264 | ↘ 651,150,984 | -2.15% | | values-number | 1,259,091,427 | 1,231,960,136 | ↘ 27,131,291 | -2.15% | > Accessing the lexical scope directly should be more efficient, yes, because it changes from a binary search (many lookups) to just two memory accesses > correction: one short linked list + one array access > oh and you had to do the lexical scope lookup anyway for lib itself > so it really does save a binary search at basically no extra cost - roberth after seeing the stats > Oooh nice. I did not consider that more of the maybeThunk optimization becomes effective (nrAvoided). Those lookups also caused allocations! - roberth Left `lib.generators` and `lib.strings` alone because they're only used once.
2023-11-11 17:15:20 +00:00
(derivation (derivationArg // optionalAttrs envIsExportable checkedEnv));
in
{
inherit mkDerivation;
}