mirror of
https://github.com/NixOS/nixpkgs.git
synced 2024-11-21 22:43:01 +00:00
Merge master into haskell-updates
This commit is contained in:
commit
14ba90a8e8
5
.github/CODEOWNERS
vendored
5
.github/CODEOWNERS
vendored
@ -387,3 +387,8 @@ pkgs/by-name/lx/lxc* @adamcstephens
|
||||
/pkgs/os-specific/linux/checkpolicy @RossComputerGuy
|
||||
/pkgs/os-specific/linux/libselinux @RossComputerGuy
|
||||
/pkgs/os-specific/linux/libsepol @RossComputerGuy
|
||||
|
||||
# installShellFiles
|
||||
/pkgs/by-name/in/installShellFiles/* @Ericson2314
|
||||
/pkgs/test/install-shell-files/* @Ericson2314
|
||||
/doc/hooks/installShellFiles.section.md @Ericson2314
|
||||
|
@ -354,7 +354,7 @@ The following paragraphs about how to deal with unactive contributors is just a
|
||||
Please note that contributors with commit rights unactive for more than three months will have their commit rights revoked.
|
||||
-->
|
||||
|
||||
Please see the discussion in [GitHub nixpkgs issue #50105](https://github.com/NixOS/nixpkgs/issues/50105) for information on how to proceed to be granted this level of access.
|
||||
Please see the discussion in [GitHub nixpkgs issue #321665](https://github.com/NixOS/nixpkgs/issues/321665) for information on how to proceed to be granted this level of access.
|
||||
|
||||
In a case a contributor definitively leaves the Nix community, they should create an issue or post on [Discourse](https://discourse.nixos.org) with references of packages and modules they maintain so the maintainership can be taken over by other contributors.
|
||||
|
||||
|
@ -157,6 +157,12 @@ Here are security considerations for this scenario:
|
||||
|
||||
In more concrete terms, if you use any other hash, the [`--insecure` flag](https://curl.se/docs/manpage.html#-k) will be passed to the underlying call to `curl` when downloading content.
|
||||
|
||||
## Proxy usage {#sec-pkgs-fetchers-proxy}
|
||||
|
||||
Nixpkgs fetchers can make use of a http(s) proxy. Each fetcher will automatically inherit proxy-related environment variables (`http_proxy`, `https_proxy`, etc) via [impureEnvVars](https://nixos.org/manual/nix/stable/language/advanced-attributes#adv-attr-impureEnvVars).
|
||||
|
||||
The environment variable `NIX_SSL_CERT_FILE` is also inherited in fetchers, and can be used to provide a custom certificate bundle to fetchers. This is usually required for a https proxy to work without certificate validation errors.
|
||||
|
||||
[]{#fetchurl}
|
||||
## `fetchurl` {#sec-pkgs-fetchers-fetchurl}
|
||||
|
||||
|
@ -1,16 +1,79 @@
|
||||
# `installShellFiles` {#installshellfiles}
|
||||
|
||||
This hook helps with installing manpages and shell completion files. It exposes 2 shell functions `installManPage` and `installShellCompletion` that can be used from your `postInstall` hook.
|
||||
This hook adds helpers that install artifacts like executable files, manpages
|
||||
and shell completions.
|
||||
|
||||
The `installManPage` function takes one or more paths to manpages to install. The manpages must have a section suffix, and may optionally be compressed (with `.gz` suffix). This function will place them into the correct `share/man/man<section>/` directory, in [`outputMan`](#outputman).
|
||||
It exposes the following functions that can be used from your `postInstall`
|
||||
hook:
|
||||
|
||||
The `installShellCompletion` function takes one or more paths to shell completion files. By default it will autodetect the shell type from the completion file extension, but you may also specify it by passing one of `--bash`, `--fish`, or `--zsh`. These flags apply to all paths listed after them (up until another shell flag is given). Each path may also have a custom installation name provided by providing a flag `--name NAME` before the path. If this flag is not provided, zsh completions will be renamed automatically such that `foobar.zsh` becomes `_foobar`. A root name may be provided for all paths using the flag `--cmd NAME`; this synthesizes the appropriate name depending on the shell (e.g. `--cmd foo` will synthesize the name `foo.bash` for bash and `_foo` for zsh).
|
||||
## `installBin` {#installshellfiles-installbin}
|
||||
|
||||
The `installBin` function takes one or more paths to files to install as
|
||||
executable files.
|
||||
|
||||
This function will place them into [`outputBin`](#outputbin).
|
||||
|
||||
### Example Usage {#installshellfiles-installbin-exampleusage}
|
||||
|
||||
```nix
|
||||
{
|
||||
nativeBuildInputs = [ installShellFiles ];
|
||||
|
||||
# Sometimes the file has an undersirable name. It should be renamed before
|
||||
# being installed via installBin
|
||||
postInstall = ''
|
||||
mv a.out delmar
|
||||
installBin foobar delmar
|
||||
'';
|
||||
}
|
||||
```
|
||||
|
||||
## `installManPage` {#installshellfiles-installmanpage}
|
||||
|
||||
The `installManPage` function takes one or more paths to manpages to install.
|
||||
|
||||
The manpages must have a section suffix, and may optionally be compressed (with
|
||||
`.gz` suffix). This function will place them into the correct
|
||||
`share/man/man<section>/` directory in [`outputMan`](#outputman).
|
||||
|
||||
### Example Usage {#installshellfiles-installmanpage-exampleusage}
|
||||
|
||||
```nix
|
||||
{
|
||||
nativeBuildInputs = [ installShellFiles ];
|
||||
|
||||
# Sometimes the manpage file has an undersirable name; e.g. it conflicts with
|
||||
# another software with an equal name. It should be renamed before being
|
||||
# installed via installManPage
|
||||
postInstall = ''
|
||||
mv fromsea.3 delmar.3
|
||||
installManPage foobar.1 delmar.3
|
||||
'';
|
||||
}
|
||||
```
|
||||
|
||||
## `installShellCompletion` {#installshellfiles-installshellcompletion}
|
||||
|
||||
The `installShellCompletion` function takes one or more paths to shell
|
||||
completion files.
|
||||
|
||||
By default it will autodetect the shell type from the completion file extension,
|
||||
but you may also specify it by passing one of `--bash`, `--fish`, or
|
||||
`--zsh`. These flags apply to all paths listed after them (up until another
|
||||
shell flag is given). Each path may also have a custom installation name
|
||||
provided by providing a flag `--name NAME` before the path. If this flag is not
|
||||
provided, zsh completions will be renamed automatically such that `foobar.zsh`
|
||||
becomes `_foobar`. A root name may be provided for all paths using the flag
|
||||
`--cmd NAME`; this synthesizes the appropriate name depending on the shell
|
||||
(e.g. `--cmd foo` will synthesize the name `foo.bash` for bash and `_foo` for
|
||||
zsh).
|
||||
|
||||
### Example Usage {#installshellfiles-installshellcompletion-exampleusage}
|
||||
|
||||
```nix
|
||||
{
|
||||
nativeBuildInputs = [ installShellFiles ];
|
||||
postInstall = ''
|
||||
installManPage doc/foobar.1 doc/barfoo.3
|
||||
# explicit behavior
|
||||
installShellCompletion --bash --name foobar.bash share/completions.bash
|
||||
installShellCompletion --fish --name foobar.fish share/completions.fish
|
||||
@ -21,9 +84,17 @@ The `installShellCompletion` function takes one or more paths to shell completio
|
||||
}
|
||||
```
|
||||
|
||||
The path may also be a fifo or named fd (such as produced by `<(cmd)`), in which case the shell and name must be provided (see below).
|
||||
The path may also be a fifo or named fd (such as produced by `<(cmd)`), in which
|
||||
case the shell and name must be provided (see below).
|
||||
|
||||
If the destination shell completion file is not actually present or consists of zero bytes after calling `installShellCompletion` this is treated as a build failure. In particular, if completion files are not vendored but are generated by running an executable, this is likely to fail in cross compilation scenarios. The result will be a zero byte completion file and hence a build failure. To prevent this, guard the completion commands against this, e.g.
|
||||
If the destination shell completion file is not actually present or consists of
|
||||
zero bytes after calling `installShellCompletion` this is treated as a build
|
||||
failure. In particular, if completion files are not vendored but are generated
|
||||
by running an executable, this is likely to fail in cross compilation
|
||||
scenarios. The result will be a zero byte completion file and hence a build
|
||||
failure. To prevent this, guard the completion generation commands.
|
||||
|
||||
### Example Usage {#installshellfiles-installshellcompletion-exampleusage-guarded}
|
||||
|
||||
```nix
|
||||
{
|
||||
|
@ -154,7 +154,7 @@ let
|
||||
defaultGemConfig = pkgs.defaultGemConfig // {
|
||||
pg = attrs: {
|
||||
buildFlags =
|
||||
[ "--with-pg-config=${pkgs."postgresql_${pg_version}"}/bin/pg_config" ];
|
||||
[ "--with-pg-config=${lib.getDev pkgs."postgresql_${pg_version}"}/bin/pg_config" ];
|
||||
};
|
||||
};
|
||||
};
|
||||
@ -172,7 +172,7 @@ let
|
||||
gemConfig = pkgs.defaultGemConfig // {
|
||||
pg = attrs: {
|
||||
buildFlags =
|
||||
[ "--with-pg-config=${pkgs."postgresql_${pg_version}"}/bin/pg_config" ];
|
||||
[ "--with-pg-config=${lib.getDev pkgs."postgresql_${pg_version}"}/bin/pg_config" ];
|
||||
};
|
||||
};
|
||||
};
|
||||
@ -190,9 +190,7 @@ let
|
||||
defaultGemConfig = super.defaultGemConfig // {
|
||||
pg = attrs: {
|
||||
buildFlags = [
|
||||
"--with-pg-config=${
|
||||
pkgs."postgresql_${pg_version}"
|
||||
}/bin/pg_config"
|
||||
"--with-pg-config=${lib.getDev pkgs."postgresql_${pg_version}"}/bin/pg_config"
|
||||
];
|
||||
};
|
||||
};
|
||||
|
@ -9,6 +9,9 @@
|
||||
# by definition pure.
|
||||
"http_proxy" "https_proxy" "ftp_proxy" "all_proxy" "no_proxy"
|
||||
"HTTP_PROXY" "HTTPS_PROXY" "FTP_PROXY" "ALL_PROXY" "NO_PROXY"
|
||||
|
||||
# https proxies typically need to inject custom root CAs too
|
||||
"NIX_SSL_CERT_FILE"
|
||||
];
|
||||
|
||||
}
|
||||
|
@ -298,8 +298,11 @@ let
|
||||
};
|
||||
wine = (pkgs.winePackagesFor "wine${toString final.parsed.cpu.bits}").minimal;
|
||||
in
|
||||
# Note: we guarantee that the return value is either `null` or a path
|
||||
# to an emulator program. That is, if an emulator requires additional
|
||||
# arguments, a wrapper should be used.
|
||||
if pkgs.stdenv.hostPlatform.canExecute final
|
||||
then "${pkgs.runtimeShell} -c '\"$@\"' --"
|
||||
then "${pkgs.execline}/bin/exec"
|
||||
else if final.isWindows
|
||||
then "${wine}/bin/wine${optionalString (final.parsed.cpu.bits == 64) "64"}"
|
||||
else if final.isLinux && pkgs.stdenv.hostPlatform.isLinux && final.qemuArch != null
|
||||
|
@ -8604,6 +8604,12 @@
|
||||
githubId = 1592375;
|
||||
name = "Walter Huf";
|
||||
};
|
||||
hughmandalidis = {
|
||||
name = "Hugh Mandalidis";
|
||||
email = "mandalidis.hugh@gmail.com";
|
||||
github = "ThanePatrol";
|
||||
githubId = 23148089;
|
||||
};
|
||||
hughobrien = {
|
||||
email = "github@hughobrien.ie";
|
||||
github = "hughobrien";
|
||||
@ -18547,6 +18553,13 @@
|
||||
githubId = 695473;
|
||||
name = "Sascha Grunert";
|
||||
};
|
||||
saturn745 = {
|
||||
email = "git-commits.rk7uq@aleeas.com";
|
||||
github = "saturn745";
|
||||
githubId = 90934664;
|
||||
name = "Saturn745";
|
||||
matrix = "@galaxyyy:matrix.org";
|
||||
};
|
||||
saulecabrera = {
|
||||
name = "Saúl Cabrera";
|
||||
email = "saulecabrera@gmail.com";
|
||||
|
@ -46,6 +46,237 @@ have a predefined type and string generator already declared under
|
||||
`generate` to build a Java `.properties` file, taking
|
||||
care of the correct escaping, etc.
|
||||
|
||||
`pkgs.formats.hocon` { *`generator`* ? `<derivation>`, *`validator`* ? `<derivation>`, *`doCheck`* ? true }
|
||||
|
||||
: A function taking an attribute set with values
|
||||
|
||||
`generator`
|
||||
|
||||
: A derivation used for converting the JSON output
|
||||
from the nix settings into HOCON. This might be
|
||||
useful if your HOCON variant is slightly different
|
||||
from the java-based one, or for testing purposes.
|
||||
|
||||
`validator`
|
||||
|
||||
: A derivation used for verifying that the HOCON
|
||||
output is correct and parsable. This might be
|
||||
useful if your HOCON variant is slightly different
|
||||
from the java-based one, or for testing purposes.
|
||||
|
||||
`doCheck`
|
||||
|
||||
: Whether to enable/disable the validator check.
|
||||
|
||||
It returns an attrset with a `type`, `generate` function,
|
||||
and a `lib` attset, as specified [below](#pkgs-formats-result).
|
||||
Some of the lib functions will be best understood if you have
|
||||
read the reference specification. You can find this
|
||||
specification here:
|
||||
|
||||
<https://github.com/lightbend/config/blob/main/HOCON.md>
|
||||
|
||||
Inside of `lib`, you will find these functions
|
||||
|
||||
`mkInclude`
|
||||
|
||||
: This is used together with a specially named
|
||||
attribute `includes`, to include other HOCON
|
||||
sources into the document.
|
||||
|
||||
The function has a shorthand variant where it
|
||||
is up to the HOCON parser to figure out what type
|
||||
of include is being used. The include will default
|
||||
to being non-required. If you want to be more
|
||||
explicit about the details of the include, you can
|
||||
provide an attrset with following arguments
|
||||
|
||||
`required`
|
||||
|
||||
: Whether the parser should fail upon failure
|
||||
to include the document
|
||||
|
||||
`type`
|
||||
|
||||
: Type of the source of the included document.
|
||||
Valid values are `file`, `url` and `classpath`.
|
||||
See upstream documentation for the semantics
|
||||
behind each value
|
||||
|
||||
`value`
|
||||
|
||||
: The URI/path/classpath pointing to the source of
|
||||
the document to be included.
|
||||
|
||||
`Example usage:`
|
||||
|
||||
```nix
|
||||
let
|
||||
format = pkgs.formats.hocon { };
|
||||
hocon_file = pkgs.writeText "to_include.hocon" ''
|
||||
a = 1;
|
||||
'';
|
||||
in {
|
||||
some.nested.hocon.attrset = {
|
||||
_includes = [
|
||||
(format.lib.mkInclude hocon_file)
|
||||
(format.lib.mkInclude "https://example.com/to_include.hocon")
|
||||
(format.lib.mkInclude {
|
||||
required = true;
|
||||
type = "file";
|
||||
value = include_file;
|
||||
})
|
||||
];
|
||||
...
|
||||
};
|
||||
}
|
||||
```
|
||||
|
||||
`mkAppend`
|
||||
|
||||
: This is used to invoke the `+=` operator.
|
||||
This can be useful if you need to add something
|
||||
to a list that is included from outside of nix.
|
||||
See upstream documentation for the semantics
|
||||
behind the `+=` operation.
|
||||
|
||||
`Example usage:`
|
||||
|
||||
```nix
|
||||
let
|
||||
format = pkgs.formats.hocon { };
|
||||
hocon_file = pkgs.writeText "to_include.hocon" ''
|
||||
a = [ 1 ];
|
||||
b = [ 2 ];
|
||||
'';
|
||||
in {
|
||||
_includes = [
|
||||
(format.lib.mkInclude hocon_file)
|
||||
];
|
||||
|
||||
c = 3;
|
||||
a = format.lib.mkAppend 3;
|
||||
b = format.lib.mkAppend (format.lib.mkSubstitution "c");
|
||||
}
|
||||
```
|
||||
|
||||
`mkSubstitution`
|
||||
|
||||
: This is used to make HOCON substitutions.
|
||||
Similarly to `mkInclude`, this function has
|
||||
a shorthand variant where you just give it
|
||||
the string with the substitution value.
|
||||
The substitution is not optional by default.
|
||||
Alternatively, you can provide an attrset
|
||||
with more options
|
||||
|
||||
`optional`
|
||||
|
||||
: Whether the parser should fail upon
|
||||
failure to fetch the substitution value.
|
||||
|
||||
`value`
|
||||
|
||||
: The name of the variable to use for
|
||||
substitution.
|
||||
|
||||
See upstream documentation for semantics
|
||||
behind the substitution functionality.
|
||||
|
||||
`Example usage:`
|
||||
|
||||
```nix
|
||||
let
|
||||
format = pkgs.formats.hocon { };
|
||||
in {
|
||||
a = 1;
|
||||
b = format.lib.mkSubstitution "a";
|
||||
c = format.lib.mkSubstition "SOME_ENVVAR";
|
||||
d = format.lib.mkSubstition {
|
||||
value = "SOME_OPTIONAL_ENVVAR";
|
||||
optional = true;
|
||||
};
|
||||
}
|
||||
```
|
||||
|
||||
`Implementation notes:`
|
||||
|
||||
- classpath includes are not implemented in pyhocon,
|
||||
which is used for validating the HOCON output. This
|
||||
means that if you are using classpath includes,
|
||||
you will want to either use an alternative validator
|
||||
or set `doCheck = false` in the format options.
|
||||
|
||||
`pkgs.formats.libconfig` { *`generator`* ? `<derivation>`, *`validator`* ? `<derivation>` }
|
||||
|
||||
: A function taking an attribute set with values
|
||||
|
||||
`generator`
|
||||
|
||||
: A derivation used for converting the JSON output
|
||||
from the nix settings into libconfig. This might be
|
||||
useful if your libconfig variant is slightly different
|
||||
from the original one, or for testing purposes.
|
||||
|
||||
`validator`
|
||||
|
||||
: A derivation used for verifying that the libconfig
|
||||
output is correct and parsable. This might be
|
||||
useful if your libconfig variant is slightly different
|
||||
from the original one, or for testing purposes.
|
||||
|
||||
It returns an attrset with a `type`, `generate` function,
|
||||
and a `lib` attset, as specified [below](#pkgs-formats-result).
|
||||
Some of the lib functions will be best understood if you have
|
||||
read the reference specification. You can find this
|
||||
specification here:
|
||||
|
||||
<https://hyperrealm.github.io/libconfig/libconfig_manual.html#Configuration-Files>
|
||||
|
||||
Inside of `lib`, you will find these functions
|
||||
|
||||
`mkHex`, `mkOctal`, `mkFloat`
|
||||
|
||||
: Use these to specify numbers in other formats.
|
||||
|
||||
`Example usage:`
|
||||
|
||||
```nix
|
||||
let
|
||||
format = pkgs.formats.libconfig { };
|
||||
in {
|
||||
myHexValue = format.lib.mkHex "0x1FC3";
|
||||
myOctalValue = format.lib.mkOctal "0027";
|
||||
myFloatValue = format.lib.mkFloat "1.2E-3";
|
||||
}
|
||||
```
|
||||
|
||||
`mkArray`, `mkList`
|
||||
|
||||
: Use these to differentiate between whether
|
||||
a nix list should be considered as a libconfig
|
||||
array or a libconfig list. See the upstream
|
||||
documentation for the semantics behind these types.
|
||||
|
||||
`Example usage:`
|
||||
|
||||
```nix
|
||||
let
|
||||
format = pkgs.formats.libconfig { };
|
||||
in {
|
||||
myList = format.lib.mkList [ "foo" 1 true ];
|
||||
myArray = format.lib.mkArray [ 1 2 3 ];
|
||||
}
|
||||
```
|
||||
|
||||
`Implementation notes:`
|
||||
|
||||
- Since libconfig does not allow setting names to start with an underscore,
|
||||
this is used as a prefix for both special types and include directives.
|
||||
|
||||
- The difference between 32bit and 64bit values became optional in libconfig
|
||||
1.5, so we assume 64bit values for all numbers.
|
||||
|
||||
`pkgs.formats.json` { }
|
||||
|
||||
: A function taking an empty attribute set (for future extensibility)
|
||||
|
@ -124,6 +124,8 @@
|
||||
|
||||
- [foot](https://codeberg.org/dnkl/foot), a fast, lightweight and minimalistic Wayland terminal emulator. Available as [programs.foot](#opt-programs.foot.enable).
|
||||
|
||||
- [ToDesk](https://www.todesk.com/linux.html), a remote desktop applicaton. Available as [services.todesk.enable](#opt-services.todesk.enable).
|
||||
|
||||
## Backward Incompatibilities {#sec-release-24.11-incompatibilities}
|
||||
|
||||
- `transmission` package has been aliased with a `trace` warning to `transmission_3`. Since [Transmission 4 has been released last year](https://github.com/transmission/transmission/releases/tag/4.0.0), and Transmission 3 will eventually go away, it was decided perform this warning alias to make people aware of the new version. The `services.transmission.package` defaults to `transmission_3` as well because the upgrade can cause data loss in certain specific usage patterns (examples: [#5153](https://github.com/transmission/transmission/issues/5153), [#6796](https://github.com/transmission/transmission/issues/6796)). Please make sure to back up to your data directory per your usage:
|
||||
@ -319,6 +321,8 @@
|
||||
|
||||
- PPD files for Utax printers got renamed (spaces replaced by underscores) in newest `foomatic-db` package; users of Utax printers might need to adapt their `hardware.printers.ensurePrinters.*.model` value.
|
||||
|
||||
- The `kvdo` kernel module package was removed, because it was upstreamed in kernel version 6.9, where it is called `dm-vdo`.
|
||||
|
||||
- `libe57format` has been updated to `>= 3.0.0`, which contains some backward-incompatible API changes. See the [release note](https://github.com/asmaloney/libE57Format/releases/tag/v3.0.0) for more details.
|
||||
|
||||
- `gitlab` deprecated support for *runner registration tokens* in GitLab 16.0, disabled their support in GitLab 17.0 and will
|
||||
@ -362,6 +366,8 @@
|
||||
|
||||
- Docker now defaults to 27.x, because version 24.x stopped receiving security updates and bug fixes after [February 1, 2024](https://github.com/moby/moby/pull/46772#discussion_r1686464084).
|
||||
|
||||
- `postgresql` was split into default and -dev outputs. To make this work without circular dependencies, the output of the `pg_config` system view has been removed. The `pg_config` binary is provided in the -dev output and still works as expected.
|
||||
|
||||
- `keycloak` was updated to version 25, which introduces new hostname related options.
|
||||
See [Upgrading Guide](https://www.keycloak.org/docs/25.0.1/upgrading/#migrating-to-25-0-0) for instructions.
|
||||
|
||||
@ -396,6 +402,12 @@
|
||||
|
||||
- The `services.trust-dns` module has been renamed to `services.hickory-dns`.
|
||||
|
||||
- The option `services.prometheus.exporters.pgbouncer.connectionStringFile` has been removed since
|
||||
it leaked the connection string (and thus potentially the DB password) into the cmdline
|
||||
of process making it effectively world-readable.
|
||||
|
||||
Use [`services.prometheus.exporters.pgbouncer.connectionEnvFile`](#opt-services.prometheus.exporters.pgbouncer.connectionEnvFile) instead.
|
||||
|
||||
- The `lsh` package and the `services.lshd` module have been removed as they had no maintainer in Nixpkgs and hadn’t seen an upstream release in over a decade. It is recommended to migrate to `openssh` and `services.openssh`.
|
||||
|
||||
- `opencv2` and `opencv3` have been removed, as they are obsolete and
|
||||
@ -484,6 +496,8 @@
|
||||
|
||||
- The `shadowstack` hardening flag has been added, though disabled by default.
|
||||
|
||||
- `xxd` is now provided by the `tinyxxd` package, rather than `vim.xxd`, to reduce closure size and vulnerability impact. Since it has the same options and semantics as Vim's `xxd` utility, there is no user impact. Vim's `xxd` remains available as the `vim.xxd` package.
|
||||
|
||||
- `prometheus-openldap-exporter` was removed since it was unmaintained upstream and had no nixpkgs maintainers.
|
||||
|
||||
- `restic` module now has an option for inhibiting system sleep while backups are running, defaulting to off (not inhibiting sleep), available as [`services.restic.backups.<name>.inhibitsSleep`](#opt-services.restic.backups._name_.inhibitsSleep).
|
||||
|
@ -1,50 +1,24 @@
|
||||
{
|
||||
config,
|
||||
lib,
|
||||
pkgs,
|
||||
...
|
||||
}:
|
||||
{ config, lib, pkgs, ... }:
|
||||
|
||||
let
|
||||
inherit (lib)
|
||||
mkOption
|
||||
optionalString
|
||||
types
|
||||
versionAtLeast
|
||||
;
|
||||
inherit (lib) mkOption optionalString types versionAtLeast;
|
||||
inherit (lib.options) literalExpression;
|
||||
cfg = config.amazonImage;
|
||||
amiBootMode = if config.ec2.efi then "uefi" else "legacy-bios";
|
||||
virtualisationOptions = import ../../../modules/virtualisation/virtualisation-options.nix;
|
||||
|
||||
in
|
||||
{
|
||||
imports = [
|
||||
../../../modules/virtualisation/amazon-image.nix
|
||||
virtualisationOptions.diskSize
|
||||
(lib.mkRenamedOptionModuleWith {
|
||||
sinceRelease = 2411;
|
||||
from = [
|
||||
"virtualisation"
|
||||
"amazonImage"
|
||||
"sizeMB"
|
||||
];
|
||||
to = [
|
||||
"virtualisation"
|
||||
"diskSize"
|
||||
];
|
||||
})
|
||||
];
|
||||
in {
|
||||
|
||||
imports = [ ../../../modules/virtualisation/amazon-image.nix ];
|
||||
|
||||
# Amazon recommends setting this to the highest possible value for a good EBS
|
||||
# experience, which prior to 4.15 was 255.
|
||||
# https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/nvme-ebs-volumes.html#timeout-nvme-ebs-volumes
|
||||
config.boot.kernelParams =
|
||||
let
|
||||
timeout =
|
||||
if versionAtLeast config.boot.kernelPackages.kernel.version "4.15" then "4294967295" else "255";
|
||||
in
|
||||
[ "nvme_core.io_timeout=${timeout}" ];
|
||||
let timeout =
|
||||
if versionAtLeast config.boot.kernelPackages.kernel.version "4.15"
|
||||
then "4294967295"
|
||||
else "255";
|
||||
in [ "nvme_core.io_timeout=${timeout}" ];
|
||||
|
||||
options.amazonImage = {
|
||||
name = mkOption {
|
||||
@ -60,30 +34,30 @@ in
|
||||
}
|
||||
]
|
||||
'';
|
||||
default = [ ];
|
||||
default = [];
|
||||
description = ''
|
||||
This option lists files to be copied to fixed locations in the
|
||||
generated image. Glob patterns work.
|
||||
'';
|
||||
};
|
||||
|
||||
sizeMB = mkOption {
|
||||
type = with types; either (enum [ "auto" ]) int;
|
||||
default = 3072;
|
||||
example = 8192;
|
||||
description = "The size in MB of the image";
|
||||
};
|
||||
|
||||
format = mkOption {
|
||||
type = types.enum [
|
||||
"raw"
|
||||
"qcow2"
|
||||
"vpc"
|
||||
];
|
||||
type = types.enum [ "raw" "qcow2" "vpc" ];
|
||||
default = "vpc";
|
||||
description = "The image format to output";
|
||||
};
|
||||
};
|
||||
|
||||
config.virtualisation.diskSize = lib.mkDefault (3 * 1024);
|
||||
config.virtualisation.diskSizeAutoSupported = !config.ec2.zfs.enable;
|
||||
|
||||
config.system.build.amazonImage =
|
||||
let
|
||||
configFile = pkgs.writeText "configuration.nix" ''
|
||||
config.system.build.amazonImage = let
|
||||
configFile = pkgs.writeText "configuration.nix"
|
||||
''
|
||||
{ modulesPath, ... }: {
|
||||
imports = [ "''${modulesPath}/virtualisation/amazon-image.nix" ];
|
||||
${optionalString config.ec2.efi ''
|
||||
@ -96,102 +70,91 @@ in
|
||||
}
|
||||
'';
|
||||
|
||||
zfsBuilder = import ../../../lib/make-multi-disk-zfs-image.nix {
|
||||
inherit
|
||||
lib
|
||||
config
|
||||
configFile
|
||||
pkgs
|
||||
;
|
||||
inherit (cfg) contents format name;
|
||||
zfsBuilder = import ../../../lib/make-multi-disk-zfs-image.nix {
|
||||
inherit lib config configFile pkgs;
|
||||
inherit (cfg) contents format name;
|
||||
|
||||
includeChannel = true;
|
||||
includeChannel = true;
|
||||
|
||||
bootSize = 1000; # 1G is the minimum EBS volume
|
||||
bootSize = 1000; # 1G is the minimum EBS volume
|
||||
|
||||
rootSize = config.virtualisation.diskSize;
|
||||
rootPoolProperties = {
|
||||
ashift = 12;
|
||||
autoexpand = "on";
|
||||
};
|
||||
|
||||
datasets = config.ec2.zfs.datasets;
|
||||
|
||||
postVM = ''
|
||||
extension=''${rootDiskImage##*.}
|
||||
friendlyName=$out/${cfg.name}
|
||||
rootDisk="$friendlyName.root.$extension"
|
||||
bootDisk="$friendlyName.boot.$extension"
|
||||
mv "$rootDiskImage" "$rootDisk"
|
||||
mv "$bootDiskImage" "$bootDisk"
|
||||
|
||||
mkdir -p $out/nix-support
|
||||
echo "file ${cfg.format} $bootDisk" >> $out/nix-support/hydra-build-products
|
||||
echo "file ${cfg.format} $rootDisk" >> $out/nix-support/hydra-build-products
|
||||
|
||||
${pkgs.jq}/bin/jq -n \
|
||||
--arg system_label ${lib.escapeShellArg config.system.nixos.label} \
|
||||
--arg system ${lib.escapeShellArg pkgs.stdenv.hostPlatform.system} \
|
||||
--arg root_logical_bytes "$(${pkgs.qemu_kvm}/bin/qemu-img info --output json "$rootDisk" | ${pkgs.jq}/bin/jq '."virtual-size"')" \
|
||||
--arg boot_logical_bytes "$(${pkgs.qemu_kvm}/bin/qemu-img info --output json "$bootDisk" | ${pkgs.jq}/bin/jq '."virtual-size"')" \
|
||||
--arg boot_mode "${amiBootMode}" \
|
||||
--arg root "$rootDisk" \
|
||||
--arg boot "$bootDisk" \
|
||||
'{}
|
||||
| .label = $system_label
|
||||
| .boot_mode = $boot_mode
|
||||
| .system = $system
|
||||
| .disks.boot.logical_bytes = $boot_logical_bytes
|
||||
| .disks.boot.file = $boot
|
||||
| .disks.root.logical_bytes = $root_logical_bytes
|
||||
| .disks.root.file = $root
|
||||
' > $out/nix-support/image-info.json
|
||||
'';
|
||||
rootSize = cfg.sizeMB;
|
||||
rootPoolProperties = {
|
||||
ashift = 12;
|
||||
autoexpand = "on";
|
||||
};
|
||||
|
||||
extBuilder = import ../../../lib/make-disk-image.nix {
|
||||
inherit
|
||||
lib
|
||||
config
|
||||
configFile
|
||||
pkgs
|
||||
;
|
||||
datasets = config.ec2.zfs.datasets;
|
||||
|
||||
inherit (cfg) contents format name;
|
||||
postVM = ''
|
||||
extension=''${rootDiskImage##*.}
|
||||
friendlyName=$out/${cfg.name}
|
||||
rootDisk="$friendlyName.root.$extension"
|
||||
bootDisk="$friendlyName.boot.$extension"
|
||||
mv "$rootDiskImage" "$rootDisk"
|
||||
mv "$bootDiskImage" "$bootDisk"
|
||||
|
||||
fsType = "ext4";
|
||||
partitionTableType = if config.ec2.efi then "efi" else "legacy+gpt";
|
||||
mkdir -p $out/nix-support
|
||||
echo "file ${cfg.format} $bootDisk" >> $out/nix-support/hydra-build-products
|
||||
echo "file ${cfg.format} $rootDisk" >> $out/nix-support/hydra-build-products
|
||||
|
||||
inherit (config.virtualisation) diskSize;
|
||||
${pkgs.jq}/bin/jq -n \
|
||||
--arg system_label ${lib.escapeShellArg config.system.nixos.label} \
|
||||
--arg system ${lib.escapeShellArg pkgs.stdenv.hostPlatform.system} \
|
||||
--arg root_logical_bytes "$(${pkgs.qemu_kvm}/bin/qemu-img info --output json "$rootDisk" | ${pkgs.jq}/bin/jq '."virtual-size"')" \
|
||||
--arg boot_logical_bytes "$(${pkgs.qemu_kvm}/bin/qemu-img info --output json "$bootDisk" | ${pkgs.jq}/bin/jq '."virtual-size"')" \
|
||||
--arg boot_mode "${amiBootMode}" \
|
||||
--arg root "$rootDisk" \
|
||||
--arg boot "$bootDisk" \
|
||||
'{}
|
||||
| .label = $system_label
|
||||
| .boot_mode = $boot_mode
|
||||
| .system = $system
|
||||
| .disks.boot.logical_bytes = $boot_logical_bytes
|
||||
| .disks.boot.file = $boot
|
||||
| .disks.root.logical_bytes = $root_logical_bytes
|
||||
| .disks.root.file = $root
|
||||
' > $out/nix-support/image-info.json
|
||||
'';
|
||||
};
|
||||
|
||||
postVM = ''
|
||||
extension=''${diskImage##*.}
|
||||
friendlyName=$out/${cfg.name}.$extension
|
||||
mv "$diskImage" "$friendlyName"
|
||||
diskImage=$friendlyName
|
||||
extBuilder = import ../../../lib/make-disk-image.nix {
|
||||
inherit lib config configFile pkgs;
|
||||
|
||||
mkdir -p $out/nix-support
|
||||
echo "file ${cfg.format} $diskImage" >> $out/nix-support/hydra-build-products
|
||||
inherit (cfg) contents format name;
|
||||
|
||||
${pkgs.jq}/bin/jq -n \
|
||||
--arg system_label ${lib.escapeShellArg config.system.nixos.label} \
|
||||
--arg system ${lib.escapeShellArg pkgs.stdenv.hostPlatform.system} \
|
||||
--arg logical_bytes "$(${pkgs.qemu_kvm}/bin/qemu-img info --output json "$diskImage" | ${pkgs.jq}/bin/jq '."virtual-size"')" \
|
||||
--arg boot_mode "${amiBootMode}" \
|
||||
--arg file "$diskImage" \
|
||||
'{}
|
||||
| .label = $system_label
|
||||
| .boot_mode = $boot_mode
|
||||
| .system = $system
|
||||
| .logical_bytes = $logical_bytes
|
||||
| .file = $file
|
||||
| .disks.root.logical_bytes = $logical_bytes
|
||||
| .disks.root.file = $file
|
||||
' > $out/nix-support/image-info.json
|
||||
'';
|
||||
};
|
||||
in
|
||||
if config.ec2.zfs.enable then zfsBuilder else extBuilder;
|
||||
fsType = "ext4";
|
||||
partitionTableType = if config.ec2.efi then "efi" else "legacy+gpt";
|
||||
|
||||
diskSize = cfg.sizeMB;
|
||||
|
||||
postVM = ''
|
||||
extension=''${diskImage##*.}
|
||||
friendlyName=$out/${cfg.name}.$extension
|
||||
mv "$diskImage" "$friendlyName"
|
||||
diskImage=$friendlyName
|
||||
|
||||
mkdir -p $out/nix-support
|
||||
echo "file ${cfg.format} $diskImage" >> $out/nix-support/hydra-build-products
|
||||
|
||||
${pkgs.jq}/bin/jq -n \
|
||||
--arg system_label ${lib.escapeShellArg config.system.nixos.label} \
|
||||
--arg system ${lib.escapeShellArg pkgs.stdenv.hostPlatform.system} \
|
||||
--arg logical_bytes "$(${pkgs.qemu_kvm}/bin/qemu-img info --output json "$diskImage" | ${pkgs.jq}/bin/jq '."virtual-size"')" \
|
||||
--arg boot_mode "${amiBootMode}" \
|
||||
--arg file "$diskImage" \
|
||||
'{}
|
||||
| .label = $system_label
|
||||
| .boot_mode = $boot_mode
|
||||
| .system = $system
|
||||
| .logical_bytes = $logical_bytes
|
||||
| .file = $file
|
||||
| .disks.root.logical_bytes = $logical_bytes
|
||||
| .disks.root.file = $file
|
||||
' > $out/nix-support/image-info.json
|
||||
'';
|
||||
};
|
||||
in if config.ec2.zfs.enable then zfsBuilder else extBuilder;
|
||||
|
||||
meta.maintainers = with lib.maintainers; [ arianvp ];
|
||||
}
|
||||
|
@ -1,37 +1,18 @@
|
||||
# nix-build '<nixpkgs/nixos>' -A config.system.build.openstackImage --arg configuration "{ imports = [ ./nixos/maintainers/scripts/openstack/openstack-image.nix ]; }"
|
||||
|
||||
{
|
||||
config,
|
||||
lib,
|
||||
pkgs,
|
||||
...
|
||||
}:
|
||||
{ config, lib, pkgs, ... }:
|
||||
let
|
||||
inherit (lib) mkOption types;
|
||||
copyChannel = true;
|
||||
cfg = config.openstackImage;
|
||||
imageBootMode = if config.openstack.efi then "uefi" else "legacy-bios";
|
||||
virtualisationOptions = import ../../../modules/virtualisation/virtualisation-options.nix;
|
||||
in
|
||||
{
|
||||
imports = [
|
||||
../../../modules/virtualisation/openstack-config.nix
|
||||
virtualisationOptions.diskSize
|
||||
(lib.mkRenamedOptionModuleWith {
|
||||
sinceRelease = 2411;
|
||||
from = [
|
||||
"virtualisation"
|
||||
"openstackImage"
|
||||
"sizeMB"
|
||||
];
|
||||
to = [
|
||||
"virtualisation"
|
||||
"diskSize"
|
||||
];
|
||||
})
|
||||
|
||||
] ++ (lib.optional copyChannel ../../../modules/installer/cd-dvd/channel.nix);
|
||||
|
||||
|
||||
options.openstackImage = {
|
||||
name = mkOption {
|
||||
type = types.str;
|
||||
@ -41,15 +22,18 @@ in
|
||||
|
||||
ramMB = mkOption {
|
||||
type = types.int;
|
||||
default = (3 * 1024);
|
||||
default = 1024;
|
||||
description = "RAM allocation for build VM";
|
||||
};
|
||||
|
||||
sizeMB = mkOption {
|
||||
type = types.int;
|
||||
default = 8192;
|
||||
description = "The size in MB of the image";
|
||||
};
|
||||
|
||||
format = mkOption {
|
||||
type = types.enum [
|
||||
"raw"
|
||||
"qcow2"
|
||||
];
|
||||
type = types.enum [ "raw" "qcow2" ];
|
||||
default = "qcow2";
|
||||
description = "The image format to output";
|
||||
};
|
||||
@ -70,26 +54,24 @@ in
|
||||
};
|
||||
};
|
||||
|
||||
virtualisation.diskSize = lib.mkDefault (8 * 1024);
|
||||
virtualisation.diskSizeAutoSupported = false;
|
||||
|
||||
system.build.openstackImage = import ../../../lib/make-single-disk-zfs-image.nix {
|
||||
inherit lib config;
|
||||
inherit (cfg) contents format name;
|
||||
pkgs = import ../../../.. { inherit (pkgs) system; }; # ensure we use the regular qemu-kvm package
|
||||
|
||||
configFile = pkgs.writeText "configuration.nix" ''
|
||||
{ modulesPath, ... }: {
|
||||
imports = [ "''${modulesPath}/virtualisation/openstack-config.nix" ];
|
||||
openstack.zfs.enable = true;
|
||||
}
|
||||
'';
|
||||
configFile = pkgs.writeText "configuration.nix"
|
||||
''
|
||||
{ modulesPath, ... }: {
|
||||
imports = [ "''${modulesPath}/virtualisation/openstack-config.nix" ];
|
||||
openstack.zfs.enable = true;
|
||||
}
|
||||
'';
|
||||
|
||||
includeChannel = copyChannel;
|
||||
|
||||
bootSize = 1000;
|
||||
memSize = cfg.ramMB;
|
||||
rootSize = config.virtualisation.diskSize;
|
||||
rootSize = cfg.sizeMB;
|
||||
rootPoolProperties = {
|
||||
ashift = 12;
|
||||
autoexpand = "on";
|
||||
|
@ -177,14 +177,8 @@ let
|
||||
mkdir -p $dst
|
||||
|
||||
# fonts.conf
|
||||
cp ${pkg.out}/etc/fonts/fonts.conf \
|
||||
ln -s ${pkg.out}/etc/fonts/fonts.conf \
|
||||
$dst/../fonts.conf
|
||||
|
||||
# horrible sed hack to add the line that was accidentally removed
|
||||
# from the default config in #324516
|
||||
# FIXME: fix that, revert this
|
||||
sed '5i <include ignore_missing="yes">/etc/fonts/conf.d</include>' -i $dst/../fonts.conf
|
||||
|
||||
# TODO: remove this legacy symlink once people stop using packages built before #95358 was merged
|
||||
mkdir -p $out/etc/fonts/2.11
|
||||
ln -s /etc/fonts/fonts.conf \
|
||||
|
@ -929,6 +929,7 @@
|
||||
./services/monitoring/teamviewer.nix
|
||||
./services/monitoring/telegraf.nix
|
||||
./services/monitoring/thanos.nix
|
||||
./services/monitoring/todesk.nix
|
||||
./services/monitoring/tremor-rs.nix
|
||||
./services/monitoring/tuptime.nix
|
||||
./services/monitoring/unpoller.nix
|
||||
|
@ -1,9 +1,4 @@
|
||||
{
|
||||
config,
|
||||
lib,
|
||||
options,
|
||||
...
|
||||
}:
|
||||
{ config, lib, options, ... }:
|
||||
|
||||
let
|
||||
keysDirectory = "/var/keys";
|
||||
@ -20,19 +15,6 @@ in
|
||||
imports = [
|
||||
../virtualisation/qemu-vm.nix
|
||||
|
||||
(lib.mkRenamedOptionModuleWith {
|
||||
sinceRelease = 2411;
|
||||
from = [
|
||||
"virtualisation"
|
||||
"darwin-builder"
|
||||
"diskSize"
|
||||
];
|
||||
to = [
|
||||
"virtualisation"
|
||||
"diskSize"
|
||||
];
|
||||
})
|
||||
|
||||
# Avoid a dependency on stateVersion
|
||||
{
|
||||
disabledModules = [
|
||||
@ -41,16 +23,17 @@ in
|
||||
];
|
||||
# swraid's default depends on stateVersion
|
||||
config.boot.swraid.enable = false;
|
||||
options.boot.isContainer = lib.mkOption {
|
||||
default = false;
|
||||
internal = true;
|
||||
};
|
||||
options.boot.isContainer = lib.mkOption { default = false; internal = true; };
|
||||
}
|
||||
];
|
||||
|
||||
options.virtualisation.description = "The maximum disk space allocated to the runner in megabytes";
|
||||
|
||||
options.virtualisation.darwin-builder = with lib; {
|
||||
diskSize = mkOption {
|
||||
default = 20 * 1024;
|
||||
type = types.int;
|
||||
example = 30720;
|
||||
description = "The maximum disk space allocated to the runner in MB";
|
||||
};
|
||||
memorySize = mkOption {
|
||||
default = 3 * 1024;
|
||||
type = types.int;
|
||||
@ -76,13 +59,13 @@ in
|
||||
'';
|
||||
};
|
||||
workingDirectory = mkOption {
|
||||
default = ".";
|
||||
type = types.str;
|
||||
example = "/var/lib/darwin-builder";
|
||||
description = ''
|
||||
The working directory to use to run the script. When running
|
||||
as part of a flake will need to be set to a non read-only filesystem.
|
||||
'';
|
||||
default = ".";
|
||||
type = types.str;
|
||||
example = "/var/lib/darwin-builder";
|
||||
description = ''
|
||||
The working directory to use to run the script. When running
|
||||
as part of a flake will need to be set to a non read-only filesystem.
|
||||
'';
|
||||
};
|
||||
hostPort = mkOption {
|
||||
default = 31022;
|
||||
@ -175,34 +158,26 @@ in
|
||||
script = hostPkgs.writeShellScriptBin "create-builder" (
|
||||
''
|
||||
set -euo pipefail
|
||||
''
|
||||
+
|
||||
# When running as non-interactively as part of a DarwinConfiguration the working directory
|
||||
# must be set to a writeable directory.
|
||||
(
|
||||
if cfg.workingDirectory != "." then
|
||||
''
|
||||
${hostPkgs.coreutils}/bin/mkdir --parent "${cfg.workingDirectory}"
|
||||
cd "${cfg.workingDirectory}"
|
||||
''
|
||||
else
|
||||
""
|
||||
)
|
||||
+ ''
|
||||
KEYS="''${KEYS:-./keys}"
|
||||
${hostPkgs.coreutils}/bin/mkdir --parent "''${KEYS}"
|
||||
PRIVATE_KEY="''${KEYS}/${user}_${keyType}"
|
||||
PUBLIC_KEY="''${PRIVATE_KEY}.pub"
|
||||
if [ ! -e "''${PRIVATE_KEY}" ] || [ ! -e "''${PUBLIC_KEY}" ]; then
|
||||
${hostPkgs.coreutils}/bin/rm --force -- "''${PRIVATE_KEY}" "''${PUBLIC_KEY}"
|
||||
${hostPkgs.openssh}/bin/ssh-keygen -q -f "''${PRIVATE_KEY}" -t ${keyType} -N "" -C 'builder@localhost'
|
||||
fi
|
||||
if ! ${hostPkgs.diffutils}/bin/cmp "''${PUBLIC_KEY}" ${publicKey}; then
|
||||
(set -x; sudo --reset-timestamp ${installCredentials} "''${KEYS}")
|
||||
fi
|
||||
KEYS="$(${hostPkgs.nix}/bin/nix-store --add "$KEYS")" ${lib.getExe config.system.build.vm}
|
||||
''
|
||||
);
|
||||
'' +
|
||||
# When running as non-interactively as part of a DarwinConfiguration the working directory
|
||||
# must be set to a writeable directory.
|
||||
(if cfg.workingDirectory != "." then ''
|
||||
${hostPkgs.coreutils}/bin/mkdir --parent "${cfg.workingDirectory}"
|
||||
cd "${cfg.workingDirectory}"
|
||||
'' else "") + ''
|
||||
KEYS="''${KEYS:-./keys}"
|
||||
${hostPkgs.coreutils}/bin/mkdir --parent "''${KEYS}"
|
||||
PRIVATE_KEY="''${KEYS}/${user}_${keyType}"
|
||||
PUBLIC_KEY="''${PRIVATE_KEY}.pub"
|
||||
if [ ! -e "''${PRIVATE_KEY}" ] || [ ! -e "''${PUBLIC_KEY}" ]; then
|
||||
${hostPkgs.coreutils}/bin/rm --force -- "''${PRIVATE_KEY}" "''${PUBLIC_KEY}"
|
||||
${hostPkgs.openssh}/bin/ssh-keygen -q -f "''${PRIVATE_KEY}" -t ${keyType} -N "" -C 'builder@localhost'
|
||||
fi
|
||||
if ! ${hostPkgs.diffutils}/bin/cmp "''${PUBLIC_KEY}" ${publicKey}; then
|
||||
(set -x; sudo --reset-timestamp ${installCredentials} "''${KEYS}")
|
||||
fi
|
||||
KEYS="$(${hostPkgs.nix}/bin/nix-store --add "$KEYS")" ${lib.getExe config.system.build.vm}
|
||||
'');
|
||||
|
||||
in
|
||||
script.overrideAttrs (old: {
|
||||
@ -248,16 +223,12 @@ in
|
||||
'';
|
||||
|
||||
virtualisation = {
|
||||
diskSize = lib.mkDefault (20 * 1024);
|
||||
diskSize = cfg.diskSize;
|
||||
|
||||
memorySize = cfg.memorySize;
|
||||
|
||||
forwardPorts = [
|
||||
{
|
||||
from = "host";
|
||||
guest.port = 22;
|
||||
host.port = cfg.hostPort;
|
||||
}
|
||||
{ from = "host"; guest.port = 22; host.port = cfg.hostPort; }
|
||||
];
|
||||
|
||||
# Disable graphics for the builder since users will likely want to run it
|
||||
|
@ -1,7 +1,4 @@
|
||||
{ config, options, lib, pkgs, ... }:
|
||||
|
||||
with lib;
|
||||
|
||||
let
|
||||
|
||||
name = "snapserver";
|
||||
@ -9,8 +6,8 @@ let
|
||||
cfg = config.services.snapserver;
|
||||
|
||||
# Using types.nullOr to inherit upstream defaults.
|
||||
sampleFormat = mkOption {
|
||||
type = with types; nullOr str;
|
||||
sampleFormat = lib.mkOption {
|
||||
type = with lib.types; nullOr str;
|
||||
default = null;
|
||||
description = ''
|
||||
Default sample format.
|
||||
@ -18,8 +15,8 @@ let
|
||||
example = "48000:16:2";
|
||||
};
|
||||
|
||||
codec = mkOption {
|
||||
type = with types; nullOr str;
|
||||
codec = lib.mkOption {
|
||||
type = with lib.types; nullOr str;
|
||||
default = null;
|
||||
description = ''
|
||||
Default audio compression method.
|
||||
@ -30,20 +27,20 @@ let
|
||||
streamToOption = name: opt:
|
||||
let
|
||||
os = val:
|
||||
optionalString (val != null) "${val}";
|
||||
lib.optionalString (val != null) "${val}";
|
||||
os' = prefix: val:
|
||||
optionalString (val != null) (prefix + "${val}");
|
||||
lib.optionalString (val != null) (prefix + "${val}");
|
||||
flatten = key: value:
|
||||
"&${key}=${value}";
|
||||
in
|
||||
"--stream.stream=\"${opt.type}://" + os opt.location + "?" + os' "name=" name
|
||||
+ os' "&sampleformat=" opt.sampleFormat + os' "&codec=" opt.codec
|
||||
+ concatStrings (mapAttrsToList flatten opt.query) + "\"";
|
||||
+ lib.concatStrings (lib.mapAttrsToList lib.flatten opt.query) + "\"";
|
||||
|
||||
optionalNull = val: ret:
|
||||
optional (val != null) ret;
|
||||
lib.optional (val != null) ret;
|
||||
|
||||
optionString = concatStringsSep " " (mapAttrsToList streamToOption cfg.streams
|
||||
optionString = lib.concatStringsSep " " (lib.mapAttrsToList streamToOption cfg.streams
|
||||
# global options
|
||||
++ [ "--stream.bind_to_address=${cfg.listenAddress}" ]
|
||||
++ [ "--stream.port=${toString cfg.port}" ]
|
||||
@ -51,22 +48,22 @@ let
|
||||
++ optionalNull cfg.codec "--stream.codec=${cfg.codec}"
|
||||
++ optionalNull cfg.streamBuffer "--stream.stream_buffer=${toString cfg.streamBuffer}"
|
||||
++ optionalNull cfg.buffer "--stream.buffer=${toString cfg.buffer}"
|
||||
++ optional cfg.sendToMuted "--stream.send_to_muted"
|
||||
++ lib.optional cfg.sendToMuted "--stream.send_to_muted"
|
||||
# tcp json rpc
|
||||
++ [ "--tcp.enabled=${toString cfg.tcp.enable}" ]
|
||||
++ optionals cfg.tcp.enable [
|
||||
++ lib.optionals cfg.tcp.enable [
|
||||
"--tcp.bind_to_address=${cfg.tcp.listenAddress}"
|
||||
"--tcp.port=${toString cfg.tcp.port}" ]
|
||||
# http json rpc
|
||||
++ [ "--http.enabled=${toString cfg.http.enable}" ]
|
||||
++ optionals cfg.http.enable [
|
||||
++ lib.optionals cfg.http.enable [
|
||||
"--http.bind_to_address=${cfg.http.listenAddress}"
|
||||
"--http.port=${toString cfg.http.port}"
|
||||
] ++ optional (cfg.http.docRoot != null) "--http.doc_root=\"${toString cfg.http.docRoot}\"");
|
||||
] ++ lib.optional (cfg.http.docRoot != null) "--http.doc_root=\"${toString cfg.http.docRoot}\"");
|
||||
|
||||
in {
|
||||
imports = [
|
||||
(mkRenamedOptionModule [ "services" "snapserver" "controlPort" ] [ "services" "snapserver" "tcp" "port" ])
|
||||
(lib.mkRenamedOptionModule [ "services" "snapserver" "controlPort" ] [ "services" "snapserver" "tcp" "port" ])
|
||||
];
|
||||
|
||||
###### interface
|
||||
@ -75,16 +72,16 @@ in {
|
||||
|
||||
services.snapserver = {
|
||||
|
||||
enable = mkOption {
|
||||
type = types.bool;
|
||||
enable = lib.mkOption {
|
||||
type = lib.types.bool;
|
||||
default = false;
|
||||
description = ''
|
||||
Whether to enable snapserver.
|
||||
'';
|
||||
};
|
||||
|
||||
listenAddress = mkOption {
|
||||
type = types.str;
|
||||
listenAddress = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
default = "::";
|
||||
example = "0.0.0.0";
|
||||
description = ''
|
||||
@ -92,16 +89,16 @@ in {
|
||||
'';
|
||||
};
|
||||
|
||||
port = mkOption {
|
||||
type = types.port;
|
||||
port = lib.mkOption {
|
||||
type = lib.types.port;
|
||||
default = 1704;
|
||||
description = ''
|
||||
The port that snapclients can connect to.
|
||||
'';
|
||||
};
|
||||
|
||||
openFirewall = mkOption {
|
||||
type = types.bool;
|
||||
openFirewall = lib.mkOption {
|
||||
type = lib.types.bool;
|
||||
default = false;
|
||||
description = ''
|
||||
Whether to automatically open the specified ports in the firewall.
|
||||
@ -111,8 +108,8 @@ in {
|
||||
inherit sampleFormat;
|
||||
inherit codec;
|
||||
|
||||
streamBuffer = mkOption {
|
||||
type = with types; nullOr int;
|
||||
streamBuffer = lib.mkOption {
|
||||
type = with lib.types; nullOr int;
|
||||
default = null;
|
||||
description = ''
|
||||
Stream read (input) buffer in ms.
|
||||
@ -120,8 +117,8 @@ in {
|
||||
example = 20;
|
||||
};
|
||||
|
||||
buffer = mkOption {
|
||||
type = with types; nullOr int;
|
||||
buffer = lib.mkOption {
|
||||
type = with lib.types; nullOr int;
|
||||
default = null;
|
||||
description = ''
|
||||
Network buffer in ms.
|
||||
@ -129,24 +126,24 @@ in {
|
||||
example = 1000;
|
||||
};
|
||||
|
||||
sendToMuted = mkOption {
|
||||
type = types.bool;
|
||||
sendToMuted = lib.mkOption {
|
||||
type = lib.types.bool;
|
||||
default = false;
|
||||
description = ''
|
||||
Send audio to muted clients.
|
||||
'';
|
||||
};
|
||||
|
||||
tcp.enable = mkOption {
|
||||
type = types.bool;
|
||||
tcp.enable = lib.mkOption {
|
||||
type = lib.types.bool;
|
||||
default = true;
|
||||
description = ''
|
||||
Whether to enable the JSON-RPC via TCP.
|
||||
'';
|
||||
};
|
||||
|
||||
tcp.listenAddress = mkOption {
|
||||
type = types.str;
|
||||
tcp.listenAddress = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
default = "::";
|
||||
example = "0.0.0.0";
|
||||
description = ''
|
||||
@ -154,24 +151,24 @@ in {
|
||||
'';
|
||||
};
|
||||
|
||||
tcp.port = mkOption {
|
||||
type = types.port;
|
||||
tcp.port = lib.mkOption {
|
||||
type = lib.types.port;
|
||||
default = 1705;
|
||||
description = ''
|
||||
The port where the TCP JSON-RPC listens on.
|
||||
'';
|
||||
};
|
||||
|
||||
http.enable = mkOption {
|
||||
type = types.bool;
|
||||
http.enable = lib.mkOption {
|
||||
type = lib.types.bool;
|
||||
default = true;
|
||||
description = ''
|
||||
Whether to enable the JSON-RPC via HTTP.
|
||||
'';
|
||||
};
|
||||
|
||||
http.listenAddress = mkOption {
|
||||
type = types.str;
|
||||
http.listenAddress = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
default = "::";
|
||||
example = "0.0.0.0";
|
||||
description = ''
|
||||
@ -179,27 +176,27 @@ in {
|
||||
'';
|
||||
};
|
||||
|
||||
http.port = mkOption {
|
||||
type = types.port;
|
||||
http.port = lib.mkOption {
|
||||
type = lib.types.port;
|
||||
default = 1780;
|
||||
description = ''
|
||||
The port where the HTTP JSON-RPC listens on.
|
||||
'';
|
||||
};
|
||||
|
||||
http.docRoot = mkOption {
|
||||
type = with types; nullOr path;
|
||||
http.docRoot = lib.mkOption {
|
||||
type = with lib.types; nullOr path;
|
||||
default = null;
|
||||
description = ''
|
||||
Path to serve from the HTTP servers root.
|
||||
'';
|
||||
};
|
||||
|
||||
streams = mkOption {
|
||||
type = with types; attrsOf (submodule {
|
||||
streams = lib.mkOption {
|
||||
type = with lib.types; attrsOf (submodule {
|
||||
options = {
|
||||
location = mkOption {
|
||||
type = types.oneOf [ types.path types.str ];
|
||||
location = lib.mkOption {
|
||||
type = lib.types.oneOf [ lib.types.path lib.types.str ];
|
||||
description = ''
|
||||
For type `pipe` or `file`, the path to the pipe or file.
|
||||
For type `librespot`, `airplay` or `process`, the path to the corresponding binary.
|
||||
@ -207,27 +204,27 @@ in {
|
||||
For type `meta`, a list of stream names in the form `/one/two/...`. Don't forget the leading slash.
|
||||
For type `alsa`, use an empty string.
|
||||
'';
|
||||
example = literalExpression ''
|
||||
example = lib.literalExpression ''
|
||||
"/path/to/pipe"
|
||||
"/path/to/librespot"
|
||||
"192.168.1.2:4444"
|
||||
"/MyTCP/Spotify/MyPipe"
|
||||
'';
|
||||
};
|
||||
type = mkOption {
|
||||
type = types.enum [ "pipe" "librespot" "airplay" "file" "process" "tcp" "alsa" "spotify" "meta" ];
|
||||
type = lib.mkOption {
|
||||
type = lib.types.enum [ "pipe" "librespot" "airplay" "file" "process" "tcp" "alsa" "spotify" "meta" ];
|
||||
default = "pipe";
|
||||
description = ''
|
||||
The type of input stream.
|
||||
'';
|
||||
};
|
||||
query = mkOption {
|
||||
query = lib.mkOption {
|
||||
type = attrsOf str;
|
||||
default = {};
|
||||
description = ''
|
||||
Key-value pairs that convey additional parameters about a stream.
|
||||
'';
|
||||
example = literalExpression ''
|
||||
example = lib.literalExpression ''
|
||||
# for type == "pipe":
|
||||
{
|
||||
mode = "create";
|
||||
@ -255,7 +252,7 @@ in {
|
||||
description = ''
|
||||
The definition for an input source.
|
||||
'';
|
||||
example = literalExpression ''
|
||||
example = lib.literalExpression ''
|
||||
{
|
||||
mpd = {
|
||||
type = "pipe";
|
||||
@ -272,11 +269,11 @@ in {
|
||||
|
||||
###### implementation
|
||||
|
||||
config = mkIf cfg.enable {
|
||||
config = lib.mkIf cfg.enable {
|
||||
|
||||
warnings =
|
||||
# https://github.com/badaix/snapcast/blob/98ac8b2fb7305084376607b59173ce4097c620d8/server/streamreader/stream_manager.cpp#L85
|
||||
filter (w: w != "") (mapAttrsToList (k: v: optionalString (v.type == "spotify") ''
|
||||
lib.filter (w: w != "") (lib.mapAttrsToList (k: v: lib.optionalString (v.type == "spotify") ''
|
||||
services.snapserver.streams.${k}.type = "spotify" is deprecated, use services.snapserver.streams.${k}.type = "librespot" instead.
|
||||
'') cfg.streams);
|
||||
|
||||
@ -305,13 +302,13 @@ in {
|
||||
};
|
||||
|
||||
networking.firewall.allowedTCPPorts =
|
||||
optionals cfg.openFirewall [ cfg.port ]
|
||||
++ optional (cfg.openFirewall && cfg.tcp.enable) cfg.tcp.port
|
||||
++ optional (cfg.openFirewall && cfg.http.enable) cfg.http.port;
|
||||
lib.optionals cfg.openFirewall [ cfg.port ]
|
||||
++ lib.optional (cfg.openFirewall && cfg.tcp.enable) cfg.tcp.port
|
||||
++ lib.optional (cfg.openFirewall && cfg.http.enable) cfg.http.port;
|
||||
};
|
||||
|
||||
meta = {
|
||||
maintainers = with maintainers; [ tobim ];
|
||||
maintainers = with lib.maintainers; [ tobim ];
|
||||
};
|
||||
|
||||
}
|
||||
|
@ -1,7 +1,4 @@
|
||||
{ config, lib, pkgs, ... }:
|
||||
|
||||
with lib;
|
||||
|
||||
let
|
||||
|
||||
isLocalPath = x:
|
||||
@ -11,17 +8,17 @@ let
|
||||
|
||||
mkExcludeFile = cfg:
|
||||
# Write each exclude pattern to a new line
|
||||
pkgs.writeText "excludefile" (concatMapStrings (s: s + "\n") cfg.exclude);
|
||||
pkgs.writeText "excludefile" (lib.concatMapStrings (s: s + "\n") cfg.exclude);
|
||||
|
||||
mkPatternsFile = cfg:
|
||||
# Write each pattern to a new line
|
||||
pkgs.writeText "patternsfile" (concatMapStrings (s: s + "\n") cfg.patterns);
|
||||
pkgs.writeText "patternsfile" (lib.concatMapStrings (s: s + "\n") cfg.patterns);
|
||||
|
||||
mkKeepArgs = cfg:
|
||||
# If cfg.prune.keep e.g. has a yearly attribute,
|
||||
# its content is passed on as --keep-yearly
|
||||
concatStringsSep " "
|
||||
(mapAttrsToList (x: y: "--keep-${x}=${toString y}") cfg.prune.keep);
|
||||
lib.concatStringsSep " "
|
||||
(lib.mapAttrsToList (x: y: "--keep-${x}=${toString y}") cfg.prune.keep);
|
||||
|
||||
mkBackupScript = name: cfg: pkgs.writeShellScript "${name}-script" (''
|
||||
set -e
|
||||
@ -44,10 +41,10 @@ let
|
||||
fi
|
||||
}
|
||||
|
||||
archiveName="${optionalString (cfg.archiveBaseName != null) (cfg.archiveBaseName + "-")}$(date ${cfg.dateFormat})"
|
||||
archiveSuffix="${optionalString cfg.appendFailedSuffix ".failed"}"
|
||||
archiveName="${lib.optionalString (cfg.archiveBaseName != null) (cfg.archiveBaseName + "-")}$(date ${cfg.dateFormat})"
|
||||
archiveSuffix="${lib.optionalString cfg.appendFailedSuffix ".failed"}"
|
||||
${cfg.preHook}
|
||||
'' + optionalString cfg.doInit ''
|
||||
'' + lib.optionalString cfg.doInit ''
|
||||
# Run borg init if the repo doesn't exist yet
|
||||
if ! borgWrapper list $extraArgs > /dev/null; then
|
||||
borgWrapper init $extraArgs \
|
||||
@ -58,24 +55,24 @@ let
|
||||
'' + ''
|
||||
(
|
||||
set -o pipefail
|
||||
${optionalString (cfg.dumpCommand != null) ''${escapeShellArg cfg.dumpCommand} | \''}
|
||||
${lib.optionalString (cfg.dumpCommand != null) ''${lib.escapeShellArg cfg.dumpCommand} | \''}
|
||||
borgWrapper create $extraArgs \
|
||||
--compression ${cfg.compression} \
|
||||
--exclude-from ${mkExcludeFile cfg} \
|
||||
--patterns-from ${mkPatternsFile cfg} \
|
||||
$extraCreateArgs \
|
||||
"::$archiveName$archiveSuffix" \
|
||||
${if cfg.paths == null then "-" else escapeShellArgs cfg.paths}
|
||||
${if cfg.paths == null then "-" else lib.escapeShellArgs cfg.paths}
|
||||
)
|
||||
'' + optionalString cfg.appendFailedSuffix ''
|
||||
'' + lib.optionalString cfg.appendFailedSuffix ''
|
||||
borgWrapper rename $extraArgs \
|
||||
"::$archiveName$archiveSuffix" "$archiveName"
|
||||
'' + ''
|
||||
${cfg.postCreate}
|
||||
'' + optionalString (cfg.prune.keep != { }) ''
|
||||
'' + lib.optionalString (cfg.prune.keep != { }) ''
|
||||
borgWrapper prune $extraArgs \
|
||||
${mkKeepArgs cfg} \
|
||||
${optionalString (cfg.prune.prefix != null) "--glob-archives ${escapeShellArg "${cfg.prune.prefix}*"}"} \
|
||||
${lib.optionalString (cfg.prune.prefix != null) "--glob-archives ${lib.escapeShellArg "${cfg.prune.prefix}*"}"} \
|
||||
$extraPruneArgs
|
||||
borgWrapper compact $extraArgs $extraCompactArgs
|
||||
${cfg.postPrune}
|
||||
@ -93,18 +90,18 @@ let
|
||||
userHome = config.users.users.${cfg.user}.home;
|
||||
backupJobName = "borgbackup-job-${name}";
|
||||
backupScript = mkBackupScript backupJobName cfg;
|
||||
in nameValuePair backupJobName {
|
||||
in lib.nameValuePair backupJobName {
|
||||
description = "BorgBackup job ${name}";
|
||||
path = [
|
||||
config.services.borgbackup.package pkgs.openssh
|
||||
];
|
||||
script = "exec " + optionalString cfg.inhibitsSleep ''\
|
||||
script = "exec " + lib.optionalString cfg.inhibitsSleep ''\
|
||||
${pkgs.systemd}/bin/systemd-inhibit \
|
||||
--who="borgbackup" \
|
||||
--what="sleep" \
|
||||
--why="Scheduled backup" \
|
||||
'' + backupScript;
|
||||
unitConfig = optionalAttrs (isLocalPath cfg.repo) {
|
||||
unitConfig = lib.optionalAttrs (isLocalPath cfg.repo) {
|
||||
RequiresMountsFor = [ cfg.repo ];
|
||||
};
|
||||
serviceConfig = {
|
||||
@ -118,7 +115,7 @@ let
|
||||
[ "${userHome}/.config/borg" "${userHome}/.cache/borg" ]
|
||||
++ cfg.readWritePaths
|
||||
# Borg needs write access to repo if it is not remote
|
||||
++ optional (isLocalPath cfg.repo) cfg.repo;
|
||||
++ lib.optional (isLocalPath cfg.repo) cfg.repo;
|
||||
PrivateTmp = cfg.privateTmp;
|
||||
};
|
||||
environment = {
|
||||
@ -128,7 +125,7 @@ let
|
||||
};
|
||||
|
||||
mkBackupTimers = name: cfg:
|
||||
nameValuePair "borgbackup-job-${name}" {
|
||||
lib.nameValuePair "borgbackup-job-${name}" {
|
||||
description = "BorgBackup job ${name} timer";
|
||||
wantedBy = [ "timers.target" ];
|
||||
timerConfig = {
|
||||
@ -136,8 +133,8 @@ let
|
||||
OnCalendar = cfg.startAt;
|
||||
};
|
||||
# if remote-backup wait for network
|
||||
after = optional (cfg.persistentTimer && !isLocalPath cfg.repo) "network-online.target";
|
||||
wants = optional (cfg.persistentTimer && !isLocalPath cfg.repo) "network-online.target";
|
||||
after = lib.optional (cfg.persistentTimer && !isLocalPath cfg.repo) "network-online.target";
|
||||
wants = lib.optional (cfg.persistentTimer && !isLocalPath cfg.repo) "network-online.target";
|
||||
};
|
||||
|
||||
# utility function around makeWrapper
|
||||
@ -148,11 +145,11 @@ let
|
||||
nativeBuildInputs = [ pkgs.makeWrapper ];
|
||||
} (with lib; ''
|
||||
makeWrapper "${original}" "$out/bin/${name}" \
|
||||
${concatStringsSep " \\\n " (mapAttrsToList (name: value: ''--set ${name} "${value}"'') set)}
|
||||
${lib.concatStringsSep " \\\n " (lib.mapAttrsToList (name: value: ''--set ${name} "${value}"'') set)}
|
||||
'');
|
||||
|
||||
mkBorgWrapper = name: cfg: mkWrapperDrv {
|
||||
original = getExe config.services.borgbackup.package;
|
||||
original = lib.getExe config.services.borgbackup.package;
|
||||
name = "borg-job-${name}";
|
||||
set = { BORG_REPO = cfg.repo; } // (mkPassEnv cfg) // cfg.environment;
|
||||
};
|
||||
@ -167,7 +164,7 @@ let
|
||||
"${config.users.users."${cfg.user}".home}/.cache".d = settings;
|
||||
"${config.users.users."${cfg.user}".home}/.config/borg".d = settings;
|
||||
"${config.users.users."${cfg.user}".home}/.cache/borg".d = settings;
|
||||
} // optionalAttrs (isLocalPath cfg.repo && !cfg.removableDevice) {
|
||||
} // lib.optionalAttrs (isLocalPath cfg.repo && !cfg.removableDevice) {
|
||||
"${cfg.repo}".d = settings;
|
||||
});
|
||||
|
||||
@ -180,11 +177,11 @@ let
|
||||
};
|
||||
|
||||
mkRepoService = name: cfg:
|
||||
nameValuePair "borgbackup-repo-${name}" {
|
||||
lib.nameValuePair "borgbackup-repo-${name}" {
|
||||
description = "Create BorgBackup repository ${name} directory";
|
||||
script = ''
|
||||
mkdir -p ${escapeShellArg cfg.path}
|
||||
chown ${cfg.user}:${cfg.group} ${escapeShellArg cfg.path}
|
||||
mkdir -p ${lib.escapeShellArg cfg.path}
|
||||
chown ${cfg.user}:${cfg.group} ${lib.escapeShellArg cfg.path}
|
||||
'';
|
||||
serviceConfig = {
|
||||
# The service's only task is to ensure that the specified path exists
|
||||
@ -196,10 +193,10 @@ let
|
||||
mkAuthorizedKey = cfg: appendOnly: key:
|
||||
let
|
||||
# Because of the following line, clients do not need to specify an absolute repo path
|
||||
cdCommand = "cd ${escapeShellArg cfg.path}";
|
||||
cdCommand = "cd ${lib.escapeShellArg cfg.path}";
|
||||
restrictedArg = "--restrict-to-${if cfg.allowSubRepos then "path" else "repository"} .";
|
||||
appendOnlyArg = optionalString appendOnly "--append-only";
|
||||
quotaArg = optionalString (cfg.quota != null) "--storage-quota ${cfg.quota}";
|
||||
appendOnlyArg = lib.optionalString appendOnly "--append-only";
|
||||
quotaArg = lib.optionalString (cfg.quota != null) "--storage-quota ${cfg.quota}";
|
||||
serveCommand = "borg serve ${restrictedArg} ${appendOnlyArg} ${quotaArg}";
|
||||
in
|
||||
''command="${cdCommand} && ${serveCommand}",restrict ${key}'';
|
||||
@ -224,7 +221,7 @@ let
|
||||
};
|
||||
|
||||
mkSourceAssertions = name: cfg: {
|
||||
assertion = count isNull [ cfg.dumpCommand cfg.paths ] == 1;
|
||||
assertion = lib.count isNull [ cfg.dumpCommand cfg.paths ] == 1;
|
||||
message = ''
|
||||
Exactly one of borgbackup.jobs.${name}.paths or borgbackup.jobs.${name}.dumpCommand
|
||||
must be set.
|
||||
@ -239,14 +236,14 @@ let
|
||||
};
|
||||
|
||||
in {
|
||||
meta.maintainers = with maintainers; [ dotlambda ];
|
||||
meta.maintainers = with lib.maintainers; [ dotlambda ];
|
||||
meta.doc = ./borgbackup.md;
|
||||
|
||||
###### interface
|
||||
|
||||
options.services.borgbackup.package = mkPackageOption pkgs "borgbackup" { };
|
||||
options.services.borgbackup.package = lib.mkPackageOption pkgs "borgbackup" { };
|
||||
|
||||
options.services.borgbackup.jobs = mkOption {
|
||||
options.services.borgbackup.jobs = lib.mkOption {
|
||||
description = ''
|
||||
Deduplicating backups using BorgBackup.
|
||||
Adding a job will cause a borg-job-NAME wrapper to be added
|
||||
@ -254,7 +251,7 @@ in {
|
||||
See also the chapter about BorgBackup in the NixOS manual.
|
||||
'';
|
||||
default = { };
|
||||
example = literalExpression ''
|
||||
example = lib.literalExpression ''
|
||||
{ # for a local backup
|
||||
rootBackup = {
|
||||
paths = "/";
|
||||
@ -286,12 +283,12 @@ in {
|
||||
startAt = "daily";
|
||||
};
|
||||
'';
|
||||
type = types.attrsOf (types.submodule (let globalConfig = config; in
|
||||
type = lib.types.attrsOf (lib.types.submodule (let globalConfig = config; in
|
||||
{ name, config, ... }: {
|
||||
options = {
|
||||
|
||||
paths = mkOption {
|
||||
type = with types; nullOr (coercedTo str lib.singleton (listOf str));
|
||||
paths = lib.mkOption {
|
||||
type = with lib.types; nullOr (coercedTo str lib.singleton (listOf str));
|
||||
default = null;
|
||||
description = ''
|
||||
Path(s) to back up.
|
||||
@ -300,8 +297,8 @@ in {
|
||||
example = "/home/user";
|
||||
};
|
||||
|
||||
dumpCommand = mkOption {
|
||||
type = with types; nullOr path;
|
||||
dumpCommand = lib.mkOption {
|
||||
type = with lib.types; nullOr path;
|
||||
default = null;
|
||||
description = ''
|
||||
Backup the stdout of this program instead of filesystem paths.
|
||||
@ -310,22 +307,22 @@ in {
|
||||
example = "/path/to/createZFSsend.sh";
|
||||
};
|
||||
|
||||
repo = mkOption {
|
||||
type = types.str;
|
||||
repo = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
description = "Remote or local repository to back up to.";
|
||||
example = "user@machine:/path/to/repo";
|
||||
};
|
||||
|
||||
removableDevice = mkOption {
|
||||
type = types.bool;
|
||||
removableDevice = lib.mkOption {
|
||||
type = lib.types.bool;
|
||||
default = false;
|
||||
description = "Whether the repo (which must be local) is a removable device.";
|
||||
};
|
||||
|
||||
archiveBaseName = mkOption {
|
||||
type = types.nullOr (types.strMatching "[^/{}]+");
|
||||
archiveBaseName = lib.mkOption {
|
||||
type = lib.types.nullOr (lib.types.strMatching "[^/{}]+");
|
||||
default = "${globalConfig.networking.hostName}-${name}";
|
||||
defaultText = literalExpression ''"''${config.networking.hostName}-<name>"'';
|
||||
defaultText = lib.literalExpression ''"''${config.networking.hostName}-<name>"'';
|
||||
description = ''
|
||||
How to name the created archives. A timestamp, whose format is
|
||||
determined by {option}`dateFormat`, will be appended. The full
|
||||
@ -335,8 +332,8 @@ in {
|
||||
'';
|
||||
};
|
||||
|
||||
dateFormat = mkOption {
|
||||
type = types.str;
|
||||
dateFormat = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
description = ''
|
||||
Arguments passed to {command}`date`
|
||||
to create a timestamp suffix for the archive name.
|
||||
@ -345,8 +342,8 @@ in {
|
||||
example = "-u +%s";
|
||||
};
|
||||
|
||||
startAt = mkOption {
|
||||
type = with types; either str (listOf str);
|
||||
startAt = lib.mkOption {
|
||||
type = with lib.types; either str (listOf str);
|
||||
default = "daily";
|
||||
description = ''
|
||||
When or how often the backup should run.
|
||||
@ -359,9 +356,9 @@ in {
|
||||
'';
|
||||
};
|
||||
|
||||
persistentTimer = mkOption {
|
||||
persistentTimer = lib.mkOption {
|
||||
default = false;
|
||||
type = types.bool;
|
||||
type = lib.types.bool;
|
||||
example = true;
|
||||
description = ''
|
||||
Set the `Persistent` option for the
|
||||
@ -371,17 +368,17 @@ in {
|
||||
'';
|
||||
};
|
||||
|
||||
inhibitsSleep = mkOption {
|
||||
inhibitsSleep = lib.mkOption {
|
||||
default = false;
|
||||
type = types.bool;
|
||||
type = lib.types.bool;
|
||||
example = true;
|
||||
description = ''
|
||||
Prevents the system from sleeping while backing up.
|
||||
'';
|
||||
};
|
||||
|
||||
user = mkOption {
|
||||
type = types.str;
|
||||
user = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
description = ''
|
||||
The user {command}`borg` is run as.
|
||||
User or group need read permission
|
||||
@ -390,8 +387,8 @@ in {
|
||||
default = "root";
|
||||
};
|
||||
|
||||
group = mkOption {
|
||||
type = types.str;
|
||||
group = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
description = ''
|
||||
The group borg is run as. User or group needs read permission
|
||||
for the specified {option}`paths`.
|
||||
@ -399,8 +396,8 @@ in {
|
||||
default = "root";
|
||||
};
|
||||
|
||||
encryption.mode = mkOption {
|
||||
type = types.enum [
|
||||
encryption.mode = lib.mkOption {
|
||||
type = lib.types.enum [
|
||||
"repokey" "keyfile"
|
||||
"repokey-blake2" "keyfile-blake2"
|
||||
"authenticated" "authenticated-blake2"
|
||||
@ -415,8 +412,8 @@ in {
|
||||
example = "repokey-blake2";
|
||||
};
|
||||
|
||||
encryption.passCommand = mkOption {
|
||||
type = with types; nullOr str;
|
||||
encryption.passCommand = lib.mkOption {
|
||||
type = with lib.types; nullOr str;
|
||||
description = ''
|
||||
A command which prints the passphrase to stdout.
|
||||
Mutually exclusive with {option}`passphrase`.
|
||||
@ -425,8 +422,8 @@ in {
|
||||
example = "cat /path/to/passphrase_file";
|
||||
};
|
||||
|
||||
encryption.passphrase = mkOption {
|
||||
type = with types; nullOr str;
|
||||
encryption.passphrase = lib.mkOption {
|
||||
type = with lib.types; nullOr str;
|
||||
description = ''
|
||||
The passphrase the backups are encrypted with.
|
||||
Mutually exclusive with {option}`passCommand`.
|
||||
@ -436,11 +433,11 @@ in {
|
||||
default = null;
|
||||
};
|
||||
|
||||
compression = mkOption {
|
||||
compression = lib.mkOption {
|
||||
# "auto" is optional,
|
||||
# compression mode must be given,
|
||||
# compression level is optional
|
||||
type = types.strMatching "none|(auto,)?(lz4|zstd|zlib|lzma)(,[[:digit:]]{1,2})?";
|
||||
type = lib.types.strMatching "none|(auto,)?(lz4|zstd|zlib|lzma)(,[[:digit:]]{1,2})?";
|
||||
description = ''
|
||||
Compression method to use. Refer to
|
||||
{command}`borg help compression`
|
||||
@ -450,8 +447,8 @@ in {
|
||||
example = "auto,lzma";
|
||||
};
|
||||
|
||||
exclude = mkOption {
|
||||
type = with types; listOf str;
|
||||
exclude = lib.mkOption {
|
||||
type = with lib.types; listOf str;
|
||||
description = ''
|
||||
Exclude paths matching any of the given patterns. See
|
||||
{command}`borg help patterns` for pattern syntax.
|
||||
@ -463,8 +460,8 @@ in {
|
||||
];
|
||||
};
|
||||
|
||||
patterns = mkOption {
|
||||
type = with types; listOf str;
|
||||
patterns = lib.mkOption {
|
||||
type = with lib.types; listOf str;
|
||||
description = ''
|
||||
Include/exclude paths matching the given patterns. The first
|
||||
matching patterns is used, so if an include pattern (prefix `+`)
|
||||
@ -478,8 +475,8 @@ in {
|
||||
];
|
||||
};
|
||||
|
||||
readWritePaths = mkOption {
|
||||
type = with types; listOf path;
|
||||
readWritePaths = lib.mkOption {
|
||||
type = with lib.types; listOf path;
|
||||
description = ''
|
||||
By default, borg cannot write anywhere on the system but
|
||||
`$HOME/.config/borg` and `$HOME/.cache/borg`.
|
||||
@ -492,8 +489,8 @@ in {
|
||||
];
|
||||
};
|
||||
|
||||
privateTmp = mkOption {
|
||||
type = types.bool;
|
||||
privateTmp = lib.mkOption {
|
||||
type = lib.types.bool;
|
||||
description = ''
|
||||
Set the `PrivateTmp` option for
|
||||
the systemd-service. Set to false if you need sockets
|
||||
@ -502,8 +499,8 @@ in {
|
||||
default = true;
|
||||
};
|
||||
|
||||
failOnWarnings = mkOption {
|
||||
type = types.bool;
|
||||
failOnWarnings = lib.mkOption {
|
||||
type = lib.types.bool;
|
||||
description = ''
|
||||
Fail the whole backup job if any borg command returns a warning
|
||||
(exit code 1), for example because a file changed during backup.
|
||||
@ -511,8 +508,8 @@ in {
|
||||
default = true;
|
||||
};
|
||||
|
||||
doInit = mkOption {
|
||||
type = types.bool;
|
||||
doInit = lib.mkOption {
|
||||
type = lib.types.bool;
|
||||
description = ''
|
||||
Run {command}`borg init` if the
|
||||
specified {option}`repo` does not exist.
|
||||
@ -523,8 +520,8 @@ in {
|
||||
default = true;
|
||||
};
|
||||
|
||||
appendFailedSuffix = mkOption {
|
||||
type = types.bool;
|
||||
appendFailedSuffix = lib.mkOption {
|
||||
type = lib.types.bool;
|
||||
description = ''
|
||||
Append a `.failed` suffix
|
||||
to the archive name, which is only removed if
|
||||
@ -533,18 +530,18 @@ in {
|
||||
default = true;
|
||||
};
|
||||
|
||||
prune.keep = mkOption {
|
||||
prune.keep = lib.mkOption {
|
||||
# Specifying e.g. `prune.keep.yearly = -1`
|
||||
# means there is no limit of yearly archives to keep
|
||||
# The regex is for use with e.g. --keep-within 1y
|
||||
type = with types; attrsOf (either int (strMatching "[[:digit:]]+[Hdwmy]"));
|
||||
type = with lib.types; attrsOf (either int (strMatching "[[:digit:]]+[Hdwmy]"));
|
||||
description = ''
|
||||
Prune a repository by deleting all archives not matching any of the
|
||||
specified retention options. See {command}`borg help prune`
|
||||
for the available options.
|
||||
'';
|
||||
default = { };
|
||||
example = literalExpression ''
|
||||
example = lib.literalExpression ''
|
||||
{
|
||||
within = "1d"; # Keep all archives from the last day
|
||||
daily = 7;
|
||||
@ -554,19 +551,19 @@ in {
|
||||
'';
|
||||
};
|
||||
|
||||
prune.prefix = mkOption {
|
||||
type = types.nullOr (types.str);
|
||||
prune.prefix = lib.mkOption {
|
||||
type = lib.types.nullOr (lib.types.str);
|
||||
description = ''
|
||||
Only consider archive names starting with this prefix for pruning.
|
||||
By default, only archives created by this job are considered.
|
||||
Use `""` or `null` to consider all archives.
|
||||
'';
|
||||
default = config.archiveBaseName;
|
||||
defaultText = literalExpression "archiveBaseName";
|
||||
defaultText = lib.literalExpression "archiveBaseName";
|
||||
};
|
||||
|
||||
environment = mkOption {
|
||||
type = with types; attrsOf str;
|
||||
environment = lib.mkOption {
|
||||
type = with lib.types; attrsOf str;
|
||||
description = ''
|
||||
Environment variables passed to the backup script.
|
||||
You can for example specify which SSH key to use.
|
||||
@ -575,8 +572,8 @@ in {
|
||||
example = { BORG_RSH = "ssh -i /path/to/key"; };
|
||||
};
|
||||
|
||||
preHook = mkOption {
|
||||
type = types.lines;
|
||||
preHook = lib.mkOption {
|
||||
type = lib.types.lines;
|
||||
description = ''
|
||||
Shell commands to run before the backup.
|
||||
This can for example be used to mount file systems.
|
||||
@ -588,16 +585,16 @@ in {
|
||||
'';
|
||||
};
|
||||
|
||||
postInit = mkOption {
|
||||
type = types.lines;
|
||||
postInit = lib.mkOption {
|
||||
type = lib.types.lines;
|
||||
description = ''
|
||||
Shell commands to run after {command}`borg init`.
|
||||
'';
|
||||
default = "";
|
||||
};
|
||||
|
||||
postCreate = mkOption {
|
||||
type = types.lines;
|
||||
postCreate = lib.mkOption {
|
||||
type = lib.types.lines;
|
||||
description = ''
|
||||
Shell commands to run after {command}`borg create`. The name
|
||||
of the created archive is stored in `$archiveName`.
|
||||
@ -605,16 +602,16 @@ in {
|
||||
default = "";
|
||||
};
|
||||
|
||||
postPrune = mkOption {
|
||||
type = types.lines;
|
||||
postPrune = lib.mkOption {
|
||||
type = lib.types.lines;
|
||||
description = ''
|
||||
Shell commands to run after {command}`borg prune`.
|
||||
'';
|
||||
default = "";
|
||||
};
|
||||
|
||||
postHook = mkOption {
|
||||
type = types.lines;
|
||||
postHook = lib.mkOption {
|
||||
type = lib.types.lines;
|
||||
description = ''
|
||||
Shell commands to run just before exit. They are executed
|
||||
even if a previous command exits with a non-zero exit code.
|
||||
@ -623,8 +620,8 @@ in {
|
||||
default = "";
|
||||
};
|
||||
|
||||
extraArgs = mkOption {
|
||||
type = with types; coercedTo (listOf str) escapeShellArgs str;
|
||||
extraArgs = lib.mkOption {
|
||||
type = with lib.types; coercedTo (listOf str) lib.escapeShellArgs str;
|
||||
description = ''
|
||||
Additional arguments for all {command}`borg` calls the
|
||||
service has. Handle with care.
|
||||
@ -633,8 +630,8 @@ in {
|
||||
example = [ "--remote-path=/path/to/borg" ];
|
||||
};
|
||||
|
||||
extraInitArgs = mkOption {
|
||||
type = with types; coercedTo (listOf str) escapeShellArgs str;
|
||||
extraInitArgs = lib.mkOption {
|
||||
type = with lib.types; coercedTo (listOf str) lib.escapeShellArgs str;
|
||||
description = ''
|
||||
Additional arguments for {command}`borg init`.
|
||||
Can also be set at runtime using `$extraInitArgs`.
|
||||
@ -643,8 +640,8 @@ in {
|
||||
example = [ "--append-only" ];
|
||||
};
|
||||
|
||||
extraCreateArgs = mkOption {
|
||||
type = with types; coercedTo (listOf str) escapeShellArgs str;
|
||||
extraCreateArgs = lib.mkOption {
|
||||
type = with lib.types; coercedTo (listOf str) lib.escapeShellArgs str;
|
||||
description = ''
|
||||
Additional arguments for {command}`borg create`.
|
||||
Can also be set at runtime using `$extraCreateArgs`.
|
||||
@ -656,8 +653,8 @@ in {
|
||||
];
|
||||
};
|
||||
|
||||
extraPruneArgs = mkOption {
|
||||
type = with types; coercedTo (listOf str) escapeShellArgs str;
|
||||
extraPruneArgs = lib.mkOption {
|
||||
type = with lib.types; coercedTo (listOf str) lib.escapeShellArgs str;
|
||||
description = ''
|
||||
Additional arguments for {command}`borg prune`.
|
||||
Can also be set at runtime using `$extraPruneArgs`.
|
||||
@ -666,8 +663,8 @@ in {
|
||||
example = [ "--save-space" ];
|
||||
};
|
||||
|
||||
extraCompactArgs = mkOption {
|
||||
type = with types; coercedTo (listOf str) escapeShellArgs str;
|
||||
extraCompactArgs = lib.mkOption {
|
||||
type = with lib.types; coercedTo (listOf str) lib.escapeShellArgs str;
|
||||
description = ''
|
||||
Additional arguments for {command}`borg compact`.
|
||||
Can also be set at runtime using `$extraCompactArgs`.
|
||||
@ -680,7 +677,7 @@ in {
|
||||
));
|
||||
};
|
||||
|
||||
options.services.borgbackup.repos = mkOption {
|
||||
options.services.borgbackup.repos = lib.mkOption {
|
||||
description = ''
|
||||
Serve BorgBackup repositories to given public SSH keys,
|
||||
restricting their access to the repository only.
|
||||
@ -689,11 +686,11 @@ in {
|
||||
i.e. `user@machine:.` is enough. (Note colon and dot.)
|
||||
'';
|
||||
default = { };
|
||||
type = types.attrsOf (types.submodule (
|
||||
type = lib.types.attrsOf (lib.types.submodule (
|
||||
{ ... }: {
|
||||
options = {
|
||||
path = mkOption {
|
||||
type = types.path;
|
||||
path = lib.mkOption {
|
||||
type = lib.types.path;
|
||||
description = ''
|
||||
Where to store the backups. Note that the directory
|
||||
is created automatically, with correct permissions.
|
||||
@ -701,8 +698,8 @@ in {
|
||||
default = "/var/lib/borgbackup";
|
||||
};
|
||||
|
||||
user = mkOption {
|
||||
type = types.str;
|
||||
user = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
description = ''
|
||||
The user {command}`borg serve` is run as.
|
||||
User or group needs write permission
|
||||
@ -711,8 +708,8 @@ in {
|
||||
default = "borg";
|
||||
};
|
||||
|
||||
group = mkOption {
|
||||
type = types.str;
|
||||
group = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
description = ''
|
||||
The group {command}`borg serve` is run as.
|
||||
User or group needs write permission
|
||||
@ -721,8 +718,8 @@ in {
|
||||
default = "borg";
|
||||
};
|
||||
|
||||
authorizedKeys = mkOption {
|
||||
type = with types; listOf str;
|
||||
authorizedKeys = lib.mkOption {
|
||||
type = with lib.types; listOf str;
|
||||
description = ''
|
||||
Public SSH keys that are given full write access to this repository.
|
||||
You should use a different SSH key for each repository you write to, because
|
||||
@ -732,8 +729,8 @@ in {
|
||||
default = [ ];
|
||||
};
|
||||
|
||||
authorizedKeysAppendOnly = mkOption {
|
||||
type = with types; listOf str;
|
||||
authorizedKeysAppendOnly = lib.mkOption {
|
||||
type = with lib.types; listOf str;
|
||||
description = ''
|
||||
Public SSH keys that can only be used to append new data (archives) to the repository.
|
||||
Note that archives can still be marked as deleted and are subsequently removed from disk
|
||||
@ -742,8 +739,8 @@ in {
|
||||
default = [ ];
|
||||
};
|
||||
|
||||
allowSubRepos = mkOption {
|
||||
type = types.bool;
|
||||
allowSubRepos = lib.mkOption {
|
||||
type = lib.types.bool;
|
||||
description = ''
|
||||
Allow clients to create repositories in subdirectories of the
|
||||
specified {option}`path`. These can be accessed using
|
||||
@ -755,9 +752,9 @@ in {
|
||||
default = false;
|
||||
};
|
||||
|
||||
quota = mkOption {
|
||||
quota = lib.mkOption {
|
||||
# See the definition of parse_file_size() in src/borg/helpers/parseformat.py
|
||||
type = with types; nullOr (strMatching "[[:digit:].]+[KMGTP]?");
|
||||
type = with lib.types; nullOr (strMatching "[[:digit:].]+[KMGTP]?");
|
||||
description = ''
|
||||
Storage quota for the repository. This quota is ensured for all
|
||||
sub-repositories if {option}`allowSubRepos` is enabled
|
||||
@ -774,29 +771,29 @@ in {
|
||||
|
||||
###### implementation
|
||||
|
||||
config = mkIf (with config.services.borgbackup; jobs != { } || repos != { })
|
||||
config = lib.mkIf (with config.services.borgbackup; jobs != { } || repos != { })
|
||||
(with config.services.borgbackup; {
|
||||
assertions =
|
||||
mapAttrsToList mkPassAssertion jobs
|
||||
++ mapAttrsToList mkKeysAssertion repos
|
||||
++ mapAttrsToList mkSourceAssertions jobs
|
||||
++ mapAttrsToList mkRemovableDeviceAssertions jobs;
|
||||
lib.mapAttrsToList mkPassAssertion jobs
|
||||
++ lib.mapAttrsToList mkKeysAssertion repos
|
||||
++ lib.mapAttrsToList mkSourceAssertions jobs
|
||||
++ lib.mapAttrsToList mkRemovableDeviceAssertions jobs;
|
||||
|
||||
systemd.tmpfiles.settings = mapAttrs' mkTmpfiles jobs;
|
||||
systemd.tmpfiles.settings = lib.mapAttrs' mkTmpfiles jobs;
|
||||
|
||||
systemd.services =
|
||||
# A job named "foo" is mapped to systemd.services.borgbackup-job-foo
|
||||
mapAttrs' mkBackupService jobs
|
||||
lib.mapAttrs' mkBackupService jobs
|
||||
# A repo named "foo" is mapped to systemd.services.borgbackup-repo-foo
|
||||
// mapAttrs' mkRepoService repos;
|
||||
// lib.mapAttrs' mkRepoService repos;
|
||||
|
||||
# A job named "foo" is mapped to systemd.timers.borgbackup-job-foo
|
||||
# only generate the timer if interval (startAt) is set
|
||||
systemd.timers = mapAttrs' mkBackupTimers (filterAttrs (_: cfg: cfg.startAt != []) jobs);
|
||||
systemd.timers = lib.mapAttrs' mkBackupTimers (lib.filterAttrs (_: cfg: cfg.startAt != []) jobs);
|
||||
|
||||
users = mkMerge (mapAttrsToList mkUsersConfig repos);
|
||||
users = lib.mkMerge (lib.mapAttrsToList mkUsersConfig repos);
|
||||
|
||||
environment.systemPackages =
|
||||
[ config.services.borgbackup.package ] ++ (mapAttrsToList mkBorgWrapper jobs);
|
||||
[ config.services.borgbackup.package ] ++ (lib.mapAttrsToList mkBorgWrapper jobs);
|
||||
});
|
||||
}
|
||||
|
@ -362,3 +362,7 @@ postgresql.withJIT.pname
|
||||
```
|
||||
|
||||
evaluates to `"foobar"`.
|
||||
|
||||
## Notable differences to upstream {#module-services-postgres-upstream-deviation}
|
||||
|
||||
- To avoid circular dependencies between default and -dev outputs, the output of the `pg_config` system view has been removed.
|
||||
|
@ -368,6 +368,33 @@ in
|
||||
'';
|
||||
};
|
||||
};
|
||||
|
||||
environmentFile = lib.mkOption {
|
||||
type = lib.types.nullOr lib.types.path;
|
||||
default = null;
|
||||
example = "/var/lib/teeworlds/teeworlds.env";
|
||||
description = ''
|
||||
Environment file as defined in {manpage}`systemd.exec(5)`.
|
||||
|
||||
Secrets may be passed to the service without adding them to the world-readable
|
||||
Nix store, by specifying placeholder variables as the option value in Nix and
|
||||
setting these variables accordingly in the environment file.
|
||||
|
||||
```
|
||||
# snippet of teeworlds-related config
|
||||
services.teeworlds.password = "$TEEWORLDS_PASSWORD";
|
||||
```
|
||||
|
||||
```
|
||||
# content of the environment file
|
||||
TEEWORLDS_PASSWORD=verysecretpassword
|
||||
```
|
||||
|
||||
Note that this file needs to be available on the host on which
|
||||
`teeworlds` is running.
|
||||
'';
|
||||
};
|
||||
|
||||
};
|
||||
};
|
||||
|
||||
@ -383,7 +410,15 @@ in
|
||||
|
||||
serviceConfig = {
|
||||
DynamicUser = true;
|
||||
ExecStart = "${cfg.package}/bin/teeworlds_srv -f ${teeworldsConf}";
|
||||
RuntimeDirectory = "teeworlds";
|
||||
RuntimeDirectoryMode = "0700";
|
||||
EnvironmentFile = lib.mkIf (cfg.environmentFile != null) [ cfg.environmentFile ];
|
||||
ExecStartPre = ''
|
||||
${pkgs.envsubst}/bin/envsubst \
|
||||
-i ${teeworldsConf} \
|
||||
-o /run/teeworlds/teeworlds.yaml
|
||||
'';
|
||||
ExecStart = "${lib.getExe cfg.package} -f /run/teeworlds/teeworlds.yaml";
|
||||
|
||||
# Hardening
|
||||
CapabilityBoundingSet = false;
|
||||
|
@ -1,23 +1,20 @@
|
||||
{ config, lib, pkgs, ... }:
|
||||
|
||||
with lib;
|
||||
|
||||
let
|
||||
cfg = config.services.portunus;
|
||||
|
||||
in
|
||||
{
|
||||
options.services.portunus = {
|
||||
enable = mkEnableOption "Portunus, a self-contained user/group management and authentication service for LDAP";
|
||||
enable = lib.mkEnableOption "Portunus, a self-contained user/group management and authentication service for LDAP";
|
||||
|
||||
domain = mkOption {
|
||||
type = types.str;
|
||||
domain = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
example = "sso.example.com";
|
||||
description = "Subdomain which gets reverse proxied to Portunus webserver.";
|
||||
};
|
||||
|
||||
port = mkOption {
|
||||
type = types.port;
|
||||
port = lib.mkOption {
|
||||
type = lib.types.port;
|
||||
default = 8080;
|
||||
description = ''
|
||||
Port where the Portunus webserver should listen on.
|
||||
@ -26,10 +23,10 @@ in
|
||||
'';
|
||||
};
|
||||
|
||||
package = mkPackageOption pkgs "portunus" { };
|
||||
package = lib.mkPackageOption pkgs "portunus" { };
|
||||
|
||||
seedPath = mkOption {
|
||||
type = types.nullOr types.path;
|
||||
seedPath = lib.mkOption {
|
||||
type = lib.types.nullOr lib.types.path;
|
||||
default = null;
|
||||
description = ''
|
||||
Path to a portunus seed file in json format.
|
||||
@ -46,26 +43,26 @@ in
|
||||
'';
|
||||
};
|
||||
|
||||
stateDir = mkOption {
|
||||
type = types.path;
|
||||
stateDir = lib.mkOption {
|
||||
type = lib.types.path;
|
||||
default = "/var/lib/portunus";
|
||||
description = "Path where Portunus stores its state.";
|
||||
};
|
||||
|
||||
user = mkOption {
|
||||
type = types.str;
|
||||
user = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
default = "portunus";
|
||||
description = "User account under which Portunus runs its webserver.";
|
||||
};
|
||||
|
||||
group = mkOption {
|
||||
type = types.str;
|
||||
group = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
default = "portunus";
|
||||
description = "Group account under which Portunus runs its webserver.";
|
||||
};
|
||||
|
||||
dex = {
|
||||
enable = mkEnableOption ''
|
||||
enable = lib.mkEnableOption ''
|
||||
Dex ldap connector.
|
||||
|
||||
To activate dex, first a search user must be created in the Portunus web ui
|
||||
@ -73,15 +70,15 @@ in
|
||||
in the [](#opt-services.dex.environmentFile) setting
|
||||
'';
|
||||
|
||||
oidcClients = mkOption {
|
||||
type = types.listOf (types.submodule {
|
||||
oidcClients = lib.mkOption {
|
||||
type = lib.types.listOf (lib.types.submodule {
|
||||
options = {
|
||||
callbackURL = mkOption {
|
||||
type = types.str;
|
||||
callbackURL = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
description = "URL where the OIDC client should redirect";
|
||||
};
|
||||
id = mkOption {
|
||||
type = types.str;
|
||||
id = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
description = "ID of the OIDC client";
|
||||
};
|
||||
};
|
||||
@ -105,23 +102,23 @@ in
|
||||
'';
|
||||
};
|
||||
|
||||
port = mkOption {
|
||||
type = types.port;
|
||||
port = lib.mkOption {
|
||||
type = lib.types.port;
|
||||
default = 5556;
|
||||
description = "Port where dex should listen on.";
|
||||
};
|
||||
};
|
||||
|
||||
ldap = {
|
||||
package = mkOption {
|
||||
type = types.package;
|
||||
package = lib.mkOption {
|
||||
type = lib.types.package;
|
||||
default = pkgs.openldap;
|
||||
defaultText = lib.literalExpression "pkgs.openldap.override { libxcrypt = pkgs.libxcrypt-legacy; }";
|
||||
description = "The OpenLDAP package to use.";
|
||||
};
|
||||
|
||||
searchUserName = mkOption {
|
||||
type = types.str;
|
||||
searchUserName = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
default = "";
|
||||
example = "admin";
|
||||
description = ''
|
||||
@ -130,8 +127,8 @@ in
|
||||
'';
|
||||
};
|
||||
|
||||
suffix = mkOption {
|
||||
type = types.str;
|
||||
suffix = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
example = "dc=example,dc=org";
|
||||
description = ''
|
||||
The DN of the topmost entry in your LDAP directory.
|
||||
@ -139,8 +136,8 @@ in
|
||||
'';
|
||||
};
|
||||
|
||||
tls = mkOption {
|
||||
type = types.bool;
|
||||
tls = lib.mkOption {
|
||||
type = lib.types.bool;
|
||||
default = false;
|
||||
description = ''
|
||||
Whether to enable LDAPS protocol.
|
||||
@ -151,21 +148,21 @@ in
|
||||
'';
|
||||
};
|
||||
|
||||
user = mkOption {
|
||||
type = types.str;
|
||||
user = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
default = "openldap";
|
||||
description = "User account under which Portunus runs its LDAP server.";
|
||||
};
|
||||
|
||||
group = mkOption {
|
||||
type = types.str;
|
||||
group = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
default = "openldap";
|
||||
description = "Group account under which Portunus runs its LDAP server.";
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
config = mkIf cfg.enable {
|
||||
config = lib.mkIf cfg.enable {
|
||||
assertions = [
|
||||
{
|
||||
assertion = cfg.dex.enable -> cfg.ldap.searchUserName != "";
|
||||
@ -177,13 +174,13 @@ in
|
||||
environment.systemPackages = [ cfg.ldap.package ];
|
||||
|
||||
# allow connecting via ldaps /w certificate without opening ports
|
||||
networking.hosts = mkIf cfg.ldap.tls {
|
||||
networking.hosts = lib.mkIf cfg.ldap.tls {
|
||||
"::1" = [ cfg.domain ];
|
||||
"127.0.0.1" = [ cfg.domain ];
|
||||
};
|
||||
|
||||
services = {
|
||||
dex = mkIf cfg.dex.enable {
|
||||
dex = lib.mkIf cfg.dex.enable {
|
||||
enable = true;
|
||||
settings = {
|
||||
issuer = "https://${cfg.domain}/dex";
|
||||
@ -219,7 +216,7 @@ in
|
||||
};
|
||||
}];
|
||||
|
||||
staticClients = forEach cfg.dex.oidcClients (client: {
|
||||
staticClients = lib.forEach cfg.dex.oidcClients (client: {
|
||||
inherit (client) id;
|
||||
redirectURIs = [ client.callbackURL ];
|
||||
name = "OIDC for ${client.id}";
|
||||
@ -232,7 +229,7 @@ in
|
||||
};
|
||||
|
||||
systemd.services = {
|
||||
dex = mkIf cfg.dex.enable {
|
||||
dex = lib.mkIf cfg.dex.enable {
|
||||
serviceConfig = {
|
||||
# `dex.service` is super locked down out of the box, but we need some
|
||||
# place to write the SQLite database. This creates $STATE_DIRECTORY below
|
||||
@ -261,9 +258,9 @@ in
|
||||
PORTUNUS_SLAPD_GROUP = cfg.ldap.group;
|
||||
PORTUNUS_SLAPD_USER = cfg.ldap.user;
|
||||
PORTUNUS_SLAPD_SCHEMA_DIR = "${cfg.ldap.package}/etc/schema";
|
||||
} // (optionalAttrs (cfg.seedPath != null) ({
|
||||
} // (lib.optionalAttrs (cfg.seedPath != null) ({
|
||||
PORTUNUS_SEED_PATH = cfg.seedPath;
|
||||
})) // (optionalAttrs cfg.ldap.tls (
|
||||
})) // (lib.optionalAttrs cfg.ldap.tls (
|
||||
let
|
||||
acmeDirectory = config.security.acme.certs."${cfg.domain}".directory;
|
||||
in
|
||||
@ -277,14 +274,14 @@ in
|
||||
};
|
||||
};
|
||||
|
||||
users.users = mkMerge [
|
||||
(mkIf (cfg.ldap.user == "openldap") {
|
||||
users.users = lib.mkMerge [
|
||||
(lib.mkIf (cfg.ldap.user == "openldap") {
|
||||
openldap = {
|
||||
group = cfg.ldap.group;
|
||||
isSystemUser = true;
|
||||
};
|
||||
})
|
||||
(mkIf (cfg.user == "portunus") {
|
||||
(lib.mkIf (cfg.user == "portunus") {
|
||||
portunus = {
|
||||
group = cfg.group;
|
||||
isSystemUser = true;
|
||||
@ -292,15 +289,15 @@ in
|
||||
})
|
||||
];
|
||||
|
||||
users.groups = mkMerge [
|
||||
(mkIf (cfg.ldap.user == "openldap") {
|
||||
users.groups = lib.mkMerge [
|
||||
(lib.mkIf (cfg.ldap.user == "openldap") {
|
||||
openldap = { };
|
||||
})
|
||||
(mkIf (cfg.user == "portunus") {
|
||||
(lib.mkIf (cfg.user == "portunus") {
|
||||
portunus = { };
|
||||
})
|
||||
];
|
||||
};
|
||||
|
||||
meta.maintainers = [ maintainers.majewsky ] ++ teams.c3d2.members;
|
||||
meta.maintainers = [ lib.maintainers.majewsky ] ++ lib.teams.c3d2.members;
|
||||
}
|
||||
|
@ -1,5 +1,4 @@
|
||||
{ config, lib, pkgs, ... }:
|
||||
with lib;
|
||||
let
|
||||
cfg = config.services.radicle;
|
||||
|
||||
@ -14,18 +13,18 @@ let
|
||||
# Convenient wrapper to run `rad` in the namespaces of `radicle-node.service`
|
||||
rad-system = pkgs.writeShellScriptBin "rad-system" ''
|
||||
set -o allexport
|
||||
${toShellVars env}
|
||||
${lib.toShellVars env}
|
||||
# Note that --env is not used to preserve host's envvars like $TERM
|
||||
exec ${getExe' pkgs.util-linux "nsenter"} -a \
|
||||
-t "$(${getExe' config.systemd.package "systemctl"} show -P MainPID radicle-node.service)" \
|
||||
-S "$(${getExe' config.systemd.package "systemctl"} show -P UID radicle-node.service)" \
|
||||
-G "$(${getExe' config.systemd.package "systemctl"} show -P GID radicle-node.service)" \
|
||||
${getExe' cfg.package "rad"} "$@"
|
||||
exec ${lib.getExe' pkgs.util-linux "nsenter"} -a \
|
||||
-t "$(${lib.getExe' config.systemd.package "systemctl"} show -P MainPID radicle-node.service)" \
|
||||
-S "$(${lib.getExe' config.systemd.package "systemctl"} show -P UID radicle-node.service)" \
|
||||
-G "$(${lib.getExe' config.systemd.package "systemctl"} show -P GID radicle-node.service)" \
|
||||
${lib.getExe' cfg.package "rad"} "$@"
|
||||
'';
|
||||
|
||||
commonServiceConfig = serviceName: {
|
||||
environment = env // {
|
||||
RUST_LOG = mkDefault "info";
|
||||
RUST_LOG = lib.mkDefault "info";
|
||||
};
|
||||
path = [
|
||||
pkgs.gitMinimal
|
||||
@ -41,11 +40,11 @@ let
|
||||
"network-online.target"
|
||||
];
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
serviceConfig = mkMerge [
|
||||
serviceConfig = lib.mkMerge [
|
||||
{
|
||||
BindReadOnlyPaths = [
|
||||
"${cfg.configFile}:${env.RAD_HOME}/config.json"
|
||||
"${if types.path.check cfg.publicKey then cfg.publicKey else pkgs.writeText "radicle.pub" cfg.publicKey}:${env.RAD_HOME}/keys/radicle.pub"
|
||||
"${if lib.types.path.check cfg.publicKey then cfg.publicKey else pkgs.writeText "radicle.pub" cfg.publicKey}:${env.RAD_HOME}/keys/radicle.pub"
|
||||
];
|
||||
KillMode = "process";
|
||||
StateDirectory = [ "radicle" ];
|
||||
@ -107,7 +106,7 @@ let
|
||||
pkgs.gitMinimal
|
||||
cfg.package
|
||||
pkgs.iana-etc
|
||||
(getLib pkgs.nss)
|
||||
(lib.getLib pkgs.nss)
|
||||
pkgs.tzdata
|
||||
];
|
||||
};
|
||||
@ -116,11 +115,11 @@ in
|
||||
{
|
||||
options = {
|
||||
services.radicle = {
|
||||
enable = mkEnableOption "Radicle Seed Node";
|
||||
package = mkPackageOption pkgs "radicle-node" { };
|
||||
privateKeyFile = mkOption {
|
||||
enable = lib.mkEnableOption "Radicle Seed Node";
|
||||
package = lib.mkPackageOption pkgs "radicle-node" { };
|
||||
privateKeyFile = lib.mkOption {
|
||||
# Note that a key encrypted by systemd-creds is not a path but a str.
|
||||
type = with types; either path str;
|
||||
type = with lib.types; either path str;
|
||||
description = ''
|
||||
Absolute file path to an SSH private key,
|
||||
usually generated by `rad auth`.
|
||||
@ -130,44 +129,44 @@ in
|
||||
and the string after as a path encrypted with `systemd-creds`.
|
||||
'';
|
||||
};
|
||||
publicKey = mkOption {
|
||||
type = with types; either path str;
|
||||
publicKey = lib.mkOption {
|
||||
type = with lib.types; either path str;
|
||||
description = ''
|
||||
An SSH public key (as an absolute file path or directly as a string),
|
||||
usually generated by `rad auth`.
|
||||
'';
|
||||
};
|
||||
node = {
|
||||
listenAddress = mkOption {
|
||||
type = types.str;
|
||||
listenAddress = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
default = "[::]";
|
||||
example = "127.0.0.1";
|
||||
description = "The IP address on which `radicle-node` listens.";
|
||||
};
|
||||
listenPort = mkOption {
|
||||
type = types.port;
|
||||
listenPort = lib.mkOption {
|
||||
type = lib.types.port;
|
||||
default = 8776;
|
||||
description = "The port on which `radicle-node` listens.";
|
||||
};
|
||||
openFirewall = mkEnableOption "opening the firewall for `radicle-node`";
|
||||
extraArgs = mkOption {
|
||||
type = with types; listOf str;
|
||||
openFirewall = lib.mkEnableOption "opening the firewall for `radicle-node`";
|
||||
extraArgs = lib.mkOption {
|
||||
type = with lib.types; listOf str;
|
||||
default = [ ];
|
||||
description = "Extra arguments for `radicle-node`";
|
||||
};
|
||||
};
|
||||
configFile = mkOption {
|
||||
type = types.package;
|
||||
configFile = lib.mkOption {
|
||||
type = lib.types.package;
|
||||
internal = true;
|
||||
default = (json.generate "config.json" cfg.settings).overrideAttrs (previousAttrs: {
|
||||
preferLocalBuild = true;
|
||||
# None of the usual phases are run here because runCommandWith uses buildCommand,
|
||||
# so just append to buildCommand what would usually be a checkPhase.
|
||||
buildCommand = previousAttrs.buildCommand + optionalString cfg.checkConfig ''
|
||||
buildCommand = previousAttrs.buildCommand + lib.optionalString cfg.checkConfig ''
|
||||
ln -s $out config.json
|
||||
install -D -m 644 /dev/stdin keys/radicle.pub <<<"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIBgFMhajUng+Rjj/sCFXI9PzG8BQjru2n7JgUVF1Kbv5 snakeoil"
|
||||
export RAD_HOME=$PWD
|
||||
${getExe' pkgs.buildPackages.radicle-node "rad"} config >/dev/null || {
|
||||
${lib.getExe' pkgs.buildPackages.radicle-node "rad"} config >/dev/null || {
|
||||
cat -n config.json
|
||||
echo "Invalid config.json according to rad."
|
||||
echo "Please double-check your services.radicle.settings (producing the config.json above),"
|
||||
@ -177,13 +176,13 @@ in
|
||||
'';
|
||||
});
|
||||
};
|
||||
checkConfig = mkEnableOption "checking the {file}`config.json` file resulting from {option}`services.radicle.settings`" // { default = true; };
|
||||
settings = mkOption {
|
||||
checkConfig = lib.mkEnableOption "checking the {file}`config.json` file resulting from {option}`services.radicle.settings`" // { default = true; };
|
||||
settings = lib.mkOption {
|
||||
description = ''
|
||||
See https://app.radicle.xyz/nodes/seed.radicle.garden/rad:z3gqcJUoA1n9HaHKufZs5FCSGazv5/tree/radicle/src/node/config.rs#L275
|
||||
'';
|
||||
default = { };
|
||||
example = literalExpression ''
|
||||
example = lib.literalExpression ''
|
||||
{
|
||||
web.pinned.repositories = [
|
||||
"rad:z3gqcJUoA1n9HaHKufZs5FCSGazv5" # heartwood
|
||||
@ -191,27 +190,27 @@ in
|
||||
];
|
||||
}
|
||||
'';
|
||||
type = types.submodule {
|
||||
type = lib.types.submodule {
|
||||
freeformType = json.type;
|
||||
};
|
||||
};
|
||||
httpd = {
|
||||
enable = mkEnableOption "Radicle HTTP gateway to radicle-node";
|
||||
package = mkPackageOption pkgs "radicle-httpd" { };
|
||||
listenAddress = mkOption {
|
||||
type = types.str;
|
||||
enable = lib.mkEnableOption "Radicle HTTP gateway to radicle-node";
|
||||
package = lib.mkPackageOption pkgs "radicle-httpd" { };
|
||||
listenAddress = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
default = "127.0.0.1";
|
||||
description = "The IP address on which `radicle-httpd` listens.";
|
||||
};
|
||||
listenPort = mkOption {
|
||||
type = types.port;
|
||||
listenPort = lib.mkOption {
|
||||
type = lib.types.port;
|
||||
default = 8080;
|
||||
description = "The port on which `radicle-httpd` listens.";
|
||||
};
|
||||
nginx = mkOption {
|
||||
nginx = lib.mkOption {
|
||||
# Type of a single virtual host, or null.
|
||||
type = types.nullOr (types.submodule (
|
||||
recursiveUpdate (import ../web-servers/nginx/vhost-options.nix { inherit config lib; }) {
|
||||
type = lib.types.nullOr (lib.types.submodule (
|
||||
lib.recursiveUpdate (import ../web-servers/nginx/vhost-options.nix { inherit config lib; }) {
|
||||
options.serverName = {
|
||||
default = "radicle-${config.networking.hostName}.${config.networking.domain}";
|
||||
defaultText = "radicle-\${config.networking.hostName}.\${config.networking.domain}";
|
||||
@ -219,7 +218,7 @@ in
|
||||
}
|
||||
));
|
||||
default = null;
|
||||
example = literalExpression ''
|
||||
example = lib.literalExpression ''
|
||||
{
|
||||
serverAliases = [
|
||||
"seed.''${config.networking.domain}"
|
||||
@ -237,8 +236,8 @@ in
|
||||
If this is set to null (the default), no nginx virtual host will be configured.
|
||||
'';
|
||||
};
|
||||
extraArgs = mkOption {
|
||||
type = with types; listOf str;
|
||||
extraArgs = lib.mkOption {
|
||||
type = with lib.types; listOf str;
|
||||
default = [ ];
|
||||
description = "Extra arguments for `radicle-httpd`";
|
||||
};
|
||||
@ -246,19 +245,19 @@ in
|
||||
};
|
||||
};
|
||||
|
||||
config = mkIf cfg.enable (mkMerge [
|
||||
config = lib.mkIf cfg.enable (lib.mkMerge [
|
||||
{
|
||||
systemd.services.radicle-node = mkMerge [
|
||||
systemd.services.radicle-node = lib.mkMerge [
|
||||
(commonServiceConfig "radicle-node")
|
||||
{
|
||||
description = "Radicle Node";
|
||||
documentation = [ "man:radicle-node(1)" ];
|
||||
serviceConfig = {
|
||||
ExecStart = "${getExe' cfg.package "radicle-node"} --force --listen ${cfg.node.listenAddress}:${toString cfg.node.listenPort} ${escapeShellArgs cfg.node.extraArgs}";
|
||||
Restart = mkDefault "on-failure";
|
||||
ExecStart = "${lib.getExe' cfg.package "radicle-node"} --force --listen ${cfg.node.listenAddress}:${toString cfg.node.listenPort} ${lib.escapeShellArgs cfg.node.extraArgs}";
|
||||
Restart = lib.mkDefault "on-failure";
|
||||
RestartSec = "30";
|
||||
SocketBindAllow = [ "tcp:${toString cfg.node.listenPort}" ];
|
||||
SystemCallFilter = mkAfter [
|
||||
SystemCallFilter = lib.mkAfter [
|
||||
# Needed by git upload-pack which calls alarm() and setitimer() when providing a rad clone
|
||||
"@timer"
|
||||
];
|
||||
@ -271,11 +270,11 @@ in
|
||||
{
|
||||
serviceConfig =
|
||||
let keyCred = builtins.split ":" "${cfg.privateKeyFile}"; in
|
||||
if length keyCred > 1
|
||||
if lib.length keyCred > 1
|
||||
then {
|
||||
LoadCredentialEncrypted = [ cfg.privateKeyFile ];
|
||||
# Note that neither %d nor ${CREDENTIALS_DIRECTORY} works in BindReadOnlyPaths=
|
||||
BindReadOnlyPaths = [ "/run/credentials/radicle-node.service/${head keyCred}:${env.RAD_HOME}/keys/radicle" ];
|
||||
BindReadOnlyPaths = [ "/run/credentials/radicle-node.service/${lib.head keyCred}:${env.RAD_HOME}/keys/radicle" ];
|
||||
}
|
||||
else {
|
||||
LoadCredential = [ "radicle:${cfg.privateKeyFile}" ];
|
||||
@ -288,7 +287,7 @@ in
|
||||
rad-system
|
||||
];
|
||||
|
||||
networking.firewall = mkIf cfg.node.openFirewall {
|
||||
networking.firewall = lib.mkIf cfg.node.openFirewall {
|
||||
allowedTCPPorts = [ cfg.node.listenPort ];
|
||||
};
|
||||
|
||||
@ -304,19 +303,19 @@ in
|
||||
};
|
||||
}
|
||||
|
||||
(mkIf cfg.httpd.enable (mkMerge [
|
||||
(lib.mkIf cfg.httpd.enable (lib.mkMerge [
|
||||
{
|
||||
systemd.services.radicle-httpd = mkMerge [
|
||||
systemd.services.radicle-httpd = lib.mkMerge [
|
||||
(commonServiceConfig "radicle-httpd")
|
||||
{
|
||||
description = "Radicle HTTP gateway to radicle-node";
|
||||
documentation = [ "man:radicle-httpd(1)" ];
|
||||
serviceConfig = {
|
||||
ExecStart = "${getExe' cfg.httpd.package "radicle-httpd"} --listen ${cfg.httpd.listenAddress}:${toString cfg.httpd.listenPort} ${escapeShellArgs cfg.httpd.extraArgs}";
|
||||
Restart = mkDefault "on-failure";
|
||||
ExecStart = "${lib.getExe' cfg.httpd.package "radicle-httpd"} --listen ${cfg.httpd.listenAddress}:${toString cfg.httpd.listenPort} ${lib.escapeShellArgs cfg.httpd.extraArgs}";
|
||||
Restart = lib.mkDefault "on-failure";
|
||||
RestartSec = "10";
|
||||
SocketBindAllow = [ "tcp:${toString cfg.httpd.listenPort}" ];
|
||||
SystemCallFilter = mkAfter [
|
||||
SystemCallFilter = lib.mkAfter [
|
||||
# Needed by git upload-pack which calls alarm() and setitimer() when providing a git clone
|
||||
"@timer"
|
||||
];
|
||||
@ -328,12 +327,12 @@ in
|
||||
];
|
||||
}
|
||||
|
||||
(mkIf (cfg.httpd.nginx != null) {
|
||||
(lib.mkIf (cfg.httpd.nginx != null) {
|
||||
services.nginx.virtualHosts.${cfg.httpd.nginx.serverName} = lib.mkMerge [
|
||||
cfg.httpd.nginx
|
||||
{
|
||||
forceSSL = mkDefault true;
|
||||
enableACME = mkDefault true;
|
||||
forceSSL = lib.mkDefault true;
|
||||
enableACME = lib.mkDefault true;
|
||||
locations."/" = {
|
||||
proxyPass = "http://${cfg.httpd.listenAddress}:${toString cfg.httpd.listenPort}";
|
||||
recommendedProxySettings = true;
|
||||
@ -342,8 +341,8 @@ in
|
||||
];
|
||||
|
||||
services.radicle.settings = {
|
||||
node.alias = mkDefault cfg.httpd.nginx.serverName;
|
||||
node.externalAddresses = mkDefault [
|
||||
node.alias = lib.mkDefault cfg.httpd.nginx.serverName;
|
||||
node.externalAddresses = lib.mkDefault [
|
||||
"${cfg.httpd.nginx.serverName}:${toString cfg.node.listenPort}"
|
||||
];
|
||||
};
|
||||
|
@ -1,7 +1,4 @@
|
||||
{ config, lib, options, pkgs, ... }:
|
||||
|
||||
with lib;
|
||||
|
||||
let
|
||||
cfg = config.services.rippled;
|
||||
opt = options.services.rippled;
|
||||
@ -11,28 +8,28 @@ let
|
||||
dbCfg = db: ''
|
||||
type=${db.type}
|
||||
path=${db.path}
|
||||
${optionalString (db.compression != null) ("compression=${b2i db.compression}") }
|
||||
${optionalString (db.onlineDelete != null) ("online_delete=${toString db.onlineDelete}")}
|
||||
${optionalString (db.advisoryDelete != null) ("advisory_delete=${b2i db.advisoryDelete}")}
|
||||
${lib.optionalString (db.compression != null) ("compression=${b2i db.compression}") }
|
||||
${lib.optionalString (db.onlineDelete != null) ("online_delete=${toString db.onlineDelete}")}
|
||||
${lib.optionalString (db.advisoryDelete != null) ("advisory_delete=${b2i db.advisoryDelete}")}
|
||||
${db.extraOpts}
|
||||
'';
|
||||
|
||||
rippledCfg = ''
|
||||
[server]
|
||||
${concatMapStringsSep "\n" (n: "port_${n}") (attrNames cfg.ports)}
|
||||
${lib.concatMapStringsSep "\n" (n: "port_${n}") (lib.attrNames cfg.ports)}
|
||||
|
||||
${concatMapStrings (p: ''
|
||||
${lib.concatMapStrings (p: ''
|
||||
[port_${p.name}]
|
||||
ip=${p.ip}
|
||||
port=${toString p.port}
|
||||
protocol=${concatStringsSep "," p.protocol}
|
||||
${optionalString (p.user != "") "user=${p.user}"}
|
||||
${optionalString (p.password != "") "user=${p.password}"}
|
||||
admin=${concatStringsSep "," p.admin}
|
||||
${optionalString (p.ssl.key != null) "ssl_key=${p.ssl.key}"}
|
||||
${optionalString (p.ssl.cert != null) "ssl_cert=${p.ssl.cert}"}
|
||||
${optionalString (p.ssl.chain != null) "ssl_chain=${p.ssl.chain}"}
|
||||
'') (attrValues cfg.ports)}
|
||||
protocol=${lib.concatStringsSep "," p.protocol}
|
||||
${lib.optionalString (p.user != "") "user=${p.user}"}
|
||||
${lib.optionalString (p.password != "") "user=${p.password}"}
|
||||
admin=${lib.concatStringsSep "," p.admin}
|
||||
${lib.optionalString (p.ssl.key != null) "ssl_key=${p.ssl.key}"}
|
||||
${lib.optionalString (p.ssl.cert != null) "ssl_cert=${p.ssl.cert}"}
|
||||
${lib.optionalString (p.ssl.chain != null) "ssl_chain=${p.ssl.chain}"}
|
||||
'') (lib.attrValues cfg.ports)}
|
||||
|
||||
[database_path]
|
||||
${cfg.databasePath}
|
||||
@ -40,22 +37,22 @@ let
|
||||
[node_db]
|
||||
${dbCfg cfg.nodeDb}
|
||||
|
||||
${optionalString (cfg.tempDb != null) ''
|
||||
${lib.optionalString (cfg.tempDb != null) ''
|
||||
[temp_db]
|
||||
${dbCfg cfg.tempDb}''}
|
||||
|
||||
${optionalString (cfg.importDb != null) ''
|
||||
${lib.optionalString (cfg.importDb != null) ''
|
||||
[import_db]
|
||||
${dbCfg cfg.importDb}''}
|
||||
|
||||
[ips]
|
||||
${concatStringsSep "\n" cfg.ips}
|
||||
${lib.concatStringsSep "\n" cfg.ips}
|
||||
|
||||
[ips_fixed]
|
||||
${concatStringsSep "\n" cfg.ipsFixed}
|
||||
${lib.concatStringsSep "\n" cfg.ipsFixed}
|
||||
|
||||
[validators]
|
||||
${concatStringsSep "\n" cfg.validators}
|
||||
${lib.concatStringsSep "\n" cfg.validators}
|
||||
|
||||
[node_size]
|
||||
${cfg.nodeSize}
|
||||
@ -70,9 +67,9 @@ let
|
||||
${toString cfg.validationQuorum}
|
||||
|
||||
[sntp_servers]
|
||||
${concatStringsSep "\n" cfg.sntpServers}
|
||||
${lib.concatStringsSep "\n" cfg.sntpServers}
|
||||
|
||||
${optionalString cfg.statsd.enable ''
|
||||
${lib.optionalString cfg.statsd.enable ''
|
||||
[insight]
|
||||
server=statsd
|
||||
address=${cfg.statsd.address}
|
||||
@ -85,70 +82,70 @@ let
|
||||
|
||||
portOptions = { name, ...}: {
|
||||
options = {
|
||||
name = mkOption {
|
||||
name = lib.mkOption {
|
||||
internal = true;
|
||||
default = name;
|
||||
};
|
||||
|
||||
ip = mkOption {
|
||||
ip = lib.mkOption {
|
||||
default = "127.0.0.1";
|
||||
description = "Ip where rippled listens.";
|
||||
type = types.str;
|
||||
type = lib.types.str;
|
||||
};
|
||||
|
||||
port = mkOption {
|
||||
port = lib.mkOption {
|
||||
description = "Port where rippled listens.";
|
||||
type = types.port;
|
||||
type = lib.types.port;
|
||||
};
|
||||
|
||||
protocol = mkOption {
|
||||
protocol = lib.mkOption {
|
||||
description = "Protocols expose by rippled.";
|
||||
type = types.listOf (types.enum ["http" "https" "ws" "wss" "peer"]);
|
||||
type = lib.types.listOf (lib.types.enum ["http" "https" "ws" "wss" "peer"]);
|
||||
};
|
||||
|
||||
user = mkOption {
|
||||
user = lib.mkOption {
|
||||
description = "When set, these credentials will be required on HTTP/S requests.";
|
||||
type = types.str;
|
||||
type = lib.types.str;
|
||||
default = "";
|
||||
};
|
||||
|
||||
password = mkOption {
|
||||
password = lib.mkOption {
|
||||
description = "When set, these credentials will be required on HTTP/S requests.";
|
||||
type = types.str;
|
||||
type = lib.types.str;
|
||||
default = "";
|
||||
};
|
||||
|
||||
admin = mkOption {
|
||||
admin = lib.mkOption {
|
||||
description = "A comma-separated list of admin IP addresses.";
|
||||
type = types.listOf types.str;
|
||||
type = lib.types.listOf lib.types.str;
|
||||
default = ["127.0.0.1"];
|
||||
};
|
||||
|
||||
ssl = {
|
||||
key = mkOption {
|
||||
key = lib.mkOption {
|
||||
description = ''
|
||||
Specifies the filename holding the SSL key in PEM format.
|
||||
'';
|
||||
default = null;
|
||||
type = types.nullOr types.path;
|
||||
type = lib.types.nullOr lib.types.path;
|
||||
};
|
||||
|
||||
cert = mkOption {
|
||||
cert = lib.mkOption {
|
||||
description = ''
|
||||
Specifies the path to the SSL certificate file in PEM format.
|
||||
This is not needed if the chain includes it.
|
||||
'';
|
||||
default = null;
|
||||
type = types.nullOr types.path;
|
||||
type = lib.types.nullOr lib.types.path;
|
||||
};
|
||||
|
||||
chain = mkOption {
|
||||
chain = lib.mkOption {
|
||||
description = ''
|
||||
If you need a certificate chain, specify the path to the
|
||||
certificate chain here. The chain may include the end certificate.
|
||||
'';
|
||||
default = null;
|
||||
type = types.nullOr types.path;
|
||||
type = lib.types.nullOr lib.types.path;
|
||||
};
|
||||
};
|
||||
};
|
||||
@ -156,44 +153,44 @@ let
|
||||
|
||||
dbOptions = {
|
||||
options = {
|
||||
type = mkOption {
|
||||
type = lib.mkOption {
|
||||
description = "Rippled database type.";
|
||||
type = types.enum ["rocksdb" "nudb"];
|
||||
type = lib.types.enum ["rocksdb" "nudb"];
|
||||
default = "rocksdb";
|
||||
};
|
||||
|
||||
path = mkOption {
|
||||
path = lib.mkOption {
|
||||
description = "Location to store the database.";
|
||||
type = types.path;
|
||||
type = lib.types.path;
|
||||
default = cfg.databasePath;
|
||||
defaultText = literalExpression "config.${opt.databasePath}";
|
||||
defaultText = lib.literalExpression "config.${opt.databasePath}";
|
||||
};
|
||||
|
||||
compression = mkOption {
|
||||
compression = lib.mkOption {
|
||||
description = "Whether to enable snappy compression.";
|
||||
type = types.nullOr types.bool;
|
||||
type = lib.types.nullOr lib.types.bool;
|
||||
default = null;
|
||||
};
|
||||
|
||||
onlineDelete = mkOption {
|
||||
onlineDelete = lib.mkOption {
|
||||
description = "Enable automatic purging of older ledger information.";
|
||||
type = types.nullOr (types.addCheck types.int (v: v > 256));
|
||||
type = lib.types.nullOr (lib.types.addCheck lib.types.int (v: v > 256));
|
||||
default = cfg.ledgerHistory;
|
||||
defaultText = literalExpression "config.${opt.ledgerHistory}";
|
||||
defaultText = lib.literalExpression "config.${opt.ledgerHistory}";
|
||||
};
|
||||
|
||||
advisoryDelete = mkOption {
|
||||
advisoryDelete = lib.mkOption {
|
||||
description = ''
|
||||
If set, then require administrative RPC call "can_delete"
|
||||
to enable online deletion of ledger records.
|
||||
'';
|
||||
type = types.nullOr types.bool;
|
||||
type = lib.types.nullOr lib.types.bool;
|
||||
default = null;
|
||||
};
|
||||
|
||||
extraOpts = mkOption {
|
||||
extraOpts = lib.mkOption {
|
||||
description = "Extra database options.";
|
||||
type = types.lines;
|
||||
type = lib.types.lines;
|
||||
default = "";
|
||||
};
|
||||
};
|
||||
@ -207,13 +204,13 @@ in
|
||||
|
||||
options = {
|
||||
services.rippled = {
|
||||
enable = mkEnableOption "rippled, a decentralized cryptocurrency blockchain daemon implementing the XRP Ledger protocol in C++";
|
||||
enable = lib.mkEnableOption "rippled, a decentralized cryptocurrency blockchain daemon implementing the XRP Ledger protocol in C++";
|
||||
|
||||
package = mkPackageOption pkgs "rippled" { };
|
||||
package = lib.mkPackageOption pkgs "rippled" { };
|
||||
|
||||
ports = mkOption {
|
||||
ports = lib.mkOption {
|
||||
description = "Ports exposed by rippled";
|
||||
type = with types; attrsOf (submodule portOptions);
|
||||
type = with lib.types; attrsOf (submodule portOptions);
|
||||
default = {
|
||||
rpc = {
|
||||
port = 5005;
|
||||
@ -235,9 +232,9 @@ in
|
||||
};
|
||||
};
|
||||
|
||||
nodeDb = mkOption {
|
||||
nodeDb = lib.mkOption {
|
||||
description = "Rippled main database options.";
|
||||
type = with types; nullOr (submodule dbOptions);
|
||||
type = with lib.types; nullOr (submodule dbOptions);
|
||||
default = {
|
||||
type = "rocksdb";
|
||||
extraOpts = ''
|
||||
@ -250,28 +247,28 @@ in
|
||||
};
|
||||
};
|
||||
|
||||
tempDb = mkOption {
|
||||
tempDb = lib.mkOption {
|
||||
description = "Rippled temporary database options.";
|
||||
type = with types; nullOr (submodule dbOptions);
|
||||
type = with lib.types; nullOr (submodule dbOptions);
|
||||
default = null;
|
||||
};
|
||||
|
||||
importDb = mkOption {
|
||||
importDb = lib.mkOption {
|
||||
description = "Settings for performing a one-time import.";
|
||||
type = with types; nullOr (submodule dbOptions);
|
||||
type = with lib.types; nullOr (submodule dbOptions);
|
||||
default = null;
|
||||
};
|
||||
|
||||
nodeSize = mkOption {
|
||||
nodeSize = lib.mkOption {
|
||||
description = ''
|
||||
Rippled size of the node you are running.
|
||||
"tiny", "small", "medium", "large", and "huge"
|
||||
'';
|
||||
type = types.enum ["tiny" "small" "medium" "large" "huge"];
|
||||
type = lib.types.enum ["tiny" "small" "medium" "large" "huge"];
|
||||
default = "small";
|
||||
};
|
||||
|
||||
ips = mkOption {
|
||||
ips = lib.mkOption {
|
||||
description = ''
|
||||
List of hostnames or ips where the Ripple protocol is served.
|
||||
For a starter list, you can either copy entries from:
|
||||
@ -282,11 +279,11 @@ in
|
||||
address. By convention, if known, IPs are listed in from most
|
||||
to least trusted.
|
||||
'';
|
||||
type = types.listOf types.str;
|
||||
type = lib.types.listOf lib.types.str;
|
||||
default = ["r.ripple.com 51235"];
|
||||
};
|
||||
|
||||
ipsFixed = mkOption {
|
||||
ipsFixed = lib.mkOption {
|
||||
description = ''
|
||||
List of IP addresses or hostnames to which rippled should always
|
||||
attempt to maintain peer connections with. This is useful for
|
||||
@ -296,16 +293,16 @@ in
|
||||
|
||||
A port may optionally be specified after adding a space to the address
|
||||
'';
|
||||
type = types.listOf types.str;
|
||||
type = lib.types.listOf lib.types.str;
|
||||
default = [];
|
||||
};
|
||||
|
||||
validators = mkOption {
|
||||
validators = lib.mkOption {
|
||||
description = ''
|
||||
List of nodes to always accept as validators. Nodes are specified by domain
|
||||
or public key.
|
||||
'';
|
||||
type = types.listOf types.str;
|
||||
type = lib.types.listOf lib.types.str;
|
||||
default = [
|
||||
"n949f75evCHwgyP4fPVgaHqNHxUVN15PsJEZ3B3HnXPcPjcZAoy7 RL1"
|
||||
"n9MD5h24qrQqiyBC8aeqqCWvpiBiYQ3jxSr91uiDvmrkyHRdYLUj RL2"
|
||||
@ -315,46 +312,46 @@ in
|
||||
];
|
||||
};
|
||||
|
||||
databasePath = mkOption {
|
||||
databasePath = lib.mkOption {
|
||||
description = ''
|
||||
Path to the ripple database.
|
||||
'';
|
||||
type = types.path;
|
||||
type = lib.types.path;
|
||||
default = "/var/lib/rippled";
|
||||
};
|
||||
|
||||
validationQuorum = mkOption {
|
||||
validationQuorum = lib.mkOption {
|
||||
description = ''
|
||||
The minimum number of trusted validations a ledger must have before
|
||||
the server considers it fully validated.
|
||||
'';
|
||||
type = types.int;
|
||||
type = lib.types.int;
|
||||
default = 3;
|
||||
};
|
||||
|
||||
ledgerHistory = mkOption {
|
||||
ledgerHistory = lib.mkOption {
|
||||
description = ''
|
||||
The number of past ledgers to acquire on server startup and the minimum
|
||||
to maintain while running.
|
||||
'';
|
||||
type = types.either types.int (types.enum ["full"]);
|
||||
type = lib.types.either lib.types.int (lib.types.enum ["full"]);
|
||||
default = 1296000; # 1 month
|
||||
};
|
||||
|
||||
fetchDepth = mkOption {
|
||||
fetchDepth = lib.mkOption {
|
||||
description = ''
|
||||
The number of past ledgers to serve to other peers that request historical
|
||||
ledger data (or "full" for no limit).
|
||||
'';
|
||||
type = types.either types.int (types.enum ["full"]);
|
||||
type = lib.types.either lib.types.int (lib.types.enum ["full"]);
|
||||
default = "full";
|
||||
};
|
||||
|
||||
sntpServers = mkOption {
|
||||
sntpServers = lib.mkOption {
|
||||
description = ''
|
||||
IP address or domain of NTP servers to use for time synchronization.;
|
||||
'';
|
||||
type = types.listOf types.str;
|
||||
type = lib.types.listOf lib.types.str;
|
||||
default = [
|
||||
"time.windows.com"
|
||||
"time.apple.com"
|
||||
@ -363,40 +360,40 @@ in
|
||||
];
|
||||
};
|
||||
|
||||
logLevel = mkOption {
|
||||
logLevel = lib.mkOption {
|
||||
description = "Logging verbosity.";
|
||||
type = types.enum ["debug" "error" "info"];
|
||||
type = lib.types.enum ["debug" "error" "info"];
|
||||
default = "error";
|
||||
};
|
||||
|
||||
statsd = {
|
||||
enable = mkEnableOption "statsd monitoring for rippled";
|
||||
enable = lib.mkEnableOption "statsd monitoring for rippled";
|
||||
|
||||
address = mkOption {
|
||||
address = lib.mkOption {
|
||||
description = "The UDP address and port of the listening StatsD server.";
|
||||
default = "127.0.0.1:8125";
|
||||
type = types.str;
|
||||
type = lib.types.str;
|
||||
};
|
||||
|
||||
prefix = mkOption {
|
||||
prefix = lib.mkOption {
|
||||
description = "A string prepended to each collected metric.";
|
||||
default = "";
|
||||
type = types.str;
|
||||
type = lib.types.str;
|
||||
};
|
||||
};
|
||||
|
||||
extraConfig = mkOption {
|
||||
extraConfig = lib.mkOption {
|
||||
default = "";
|
||||
type = types.lines;
|
||||
type = lib.types.lines;
|
||||
description = ''
|
||||
Extra lines to be added verbatim to the rippled.cfg configuration file.
|
||||
'';
|
||||
};
|
||||
|
||||
config = mkOption {
|
||||
config = lib.mkOption {
|
||||
internal = true;
|
||||
default = pkgs.writeText "rippled.conf" rippledCfg;
|
||||
defaultText = literalMD "generated config file";
|
||||
defaultText = lib.literalMD "generated config file";
|
||||
};
|
||||
};
|
||||
};
|
||||
@ -404,7 +401,7 @@ in
|
||||
|
||||
###### implementation
|
||||
|
||||
config = mkIf cfg.enable {
|
||||
config = lib.mkIf cfg.enable {
|
||||
|
||||
users.users.rippled = {
|
||||
description = "Ripple server user";
|
||||
|
@ -1,14 +1,11 @@
|
||||
{ config, lib, pkgs, ... }:
|
||||
|
||||
with lib;
|
||||
|
||||
let
|
||||
cfg = config.services.taskserver;
|
||||
|
||||
taskd = "${pkgs.taskserver}/bin/taskd";
|
||||
|
||||
mkManualPkiOption = desc: mkOption {
|
||||
type = types.nullOr types.path;
|
||||
mkManualPkiOption = desc: lib.mkOption {
|
||||
type = lib.types.nullOr lib.types.path;
|
||||
default = null;
|
||||
description = ''
|
||||
${desc}
|
||||
@ -46,8 +43,8 @@ let
|
||||
:::
|
||||
'';
|
||||
|
||||
mkExpireOption = desc: mkOption {
|
||||
type = types.nullOr types.int;
|
||||
mkExpireOption = desc: lib.mkOption {
|
||||
type = lib.types.nullOr lib.types.int;
|
||||
default = null;
|
||||
example = 365;
|
||||
apply = val: if val == null then -1 else val;
|
||||
@ -58,8 +55,8 @@ let
|
||||
};
|
||||
|
||||
autoPkiOptions = {
|
||||
bits = mkOption {
|
||||
type = types.int;
|
||||
bits = lib.mkOption {
|
||||
type = lib.types.int;
|
||||
default = 4096;
|
||||
example = 2048;
|
||||
description = mkAutoDesc "The bit size for generated keys.";
|
||||
@ -75,20 +72,20 @@ let
|
||||
|
||||
needToCreateCA = let
|
||||
notFound = path: let
|
||||
dotted = concatStringsSep "." path;
|
||||
dotted = lib.concatStringsSep "." path;
|
||||
in throw "Can't find option definitions for path `${dotted}'.";
|
||||
findPkiDefinitions = path: attrs: let
|
||||
mkSublist = key: val: let
|
||||
newPath = path ++ singleton key;
|
||||
in if isOption val
|
||||
then attrByPath newPath (notFound newPath) cfg.pki.manual
|
||||
newPath = path ++ lib.singleton key;
|
||||
in if lib.isOption val
|
||||
then lib.attrByPath newPath (notFound newPath) cfg.pki.manual
|
||||
else findPkiDefinitions newPath val;
|
||||
in flatten (mapAttrsToList mkSublist attrs);
|
||||
in all (x: x == null) (findPkiDefinitions [] manualPkiOptions);
|
||||
in lib.flatten (lib.mapAttrsToList mkSublist attrs);
|
||||
in lib.all (x: x == null) (findPkiDefinitions [] manualPkiOptions);
|
||||
|
||||
orgOptions = { ... }: {
|
||||
options.users = mkOption {
|
||||
type = types.uniq (types.listOf types.str);
|
||||
options.users = lib.mkOption {
|
||||
type = lib.types.uniq (lib.types.listOf lib.types.str);
|
||||
default = [];
|
||||
example = [ "alice" "bob" ];
|
||||
description = ''
|
||||
@ -96,8 +93,8 @@ let
|
||||
'';
|
||||
};
|
||||
|
||||
options.groups = mkOption {
|
||||
type = types.listOf types.str;
|
||||
options.groups = lib.mkOption {
|
||||
type = lib.types.listOf lib.types.str;
|
||||
default = [];
|
||||
example = [ "workers" "slackers" ];
|
||||
description = ''
|
||||
@ -137,8 +134,8 @@ let
|
||||
in {
|
||||
options = {
|
||||
services.taskserver = {
|
||||
enable = mkOption {
|
||||
type = types.bool;
|
||||
enable = lib.mkOption {
|
||||
type = lib.types.bool;
|
||||
default = false;
|
||||
description = let
|
||||
url = "https://nixos.org/manual/nixos/stable/index.html#module-services-taskserver";
|
||||
@ -150,26 +147,26 @@ in {
|
||||
'';
|
||||
};
|
||||
|
||||
user = mkOption {
|
||||
type = types.str;
|
||||
user = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
default = "taskd";
|
||||
description = "User for Taskserver.";
|
||||
};
|
||||
|
||||
group = mkOption {
|
||||
type = types.str;
|
||||
group = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
default = "taskd";
|
||||
description = "Group for Taskserver.";
|
||||
};
|
||||
|
||||
dataDir = mkOption {
|
||||
type = types.path;
|
||||
dataDir = lib.mkOption {
|
||||
type = lib.types.path;
|
||||
default = "/var/lib/taskserver";
|
||||
description = "Data directory for Taskserver.";
|
||||
};
|
||||
|
||||
ciphers = mkOption {
|
||||
type = types.nullOr (types.separatedString ":");
|
||||
ciphers = lib.mkOption {
|
||||
type = lib.types.nullOr (lib.types.separatedString ":");
|
||||
default = null;
|
||||
example = "NORMAL:-VERS-SSL3.0";
|
||||
description = let
|
||||
@ -180,8 +177,8 @@ in {
|
||||
'';
|
||||
};
|
||||
|
||||
organisations = mkOption {
|
||||
type = types.attrsOf (types.submodule orgOptions);
|
||||
organisations = lib.mkOption {
|
||||
type = lib.types.attrsOf (lib.types.submodule orgOptions);
|
||||
default = {};
|
||||
example.myShinyOrganisation.users = [ "alice" "bob" ];
|
||||
example.myShinyOrganisation.groups = [ "staff" "outsiders" ];
|
||||
@ -193,24 +190,24 @@ in {
|
||||
'';
|
||||
};
|
||||
|
||||
confirmation = mkOption {
|
||||
type = types.bool;
|
||||
confirmation = lib.mkOption {
|
||||
type = lib.types.bool;
|
||||
default = true;
|
||||
description = ''
|
||||
Determines whether certain commands are confirmed.
|
||||
'';
|
||||
};
|
||||
|
||||
debug = mkOption {
|
||||
type = types.bool;
|
||||
debug = lib.mkOption {
|
||||
type = lib.types.bool;
|
||||
default = false;
|
||||
description = ''
|
||||
Logs debugging information.
|
||||
'';
|
||||
};
|
||||
|
||||
extensions = mkOption {
|
||||
type = types.nullOr types.path;
|
||||
extensions = lib.mkOption {
|
||||
type = lib.types.nullOr lib.types.path;
|
||||
default = null;
|
||||
description = ''
|
||||
Fully qualified path of the Taskserver extension scripts.
|
||||
@ -218,32 +215,32 @@ in {
|
||||
'';
|
||||
};
|
||||
|
||||
ipLog = mkOption {
|
||||
type = types.bool;
|
||||
ipLog = lib.mkOption {
|
||||
type = lib.types.bool;
|
||||
default = false;
|
||||
description = ''
|
||||
Logs the IP addresses of incoming requests.
|
||||
'';
|
||||
};
|
||||
|
||||
queueSize = mkOption {
|
||||
type = types.int;
|
||||
queueSize = lib.mkOption {
|
||||
type = lib.types.int;
|
||||
default = 10;
|
||||
description = ''
|
||||
Size of the connection backlog, see {manpage}`listen(2)`.
|
||||
'';
|
||||
};
|
||||
|
||||
requestLimit = mkOption {
|
||||
type = types.int;
|
||||
requestLimit = lib.mkOption {
|
||||
type = lib.types.int;
|
||||
default = 1048576;
|
||||
description = ''
|
||||
Size limit of incoming requests, in bytes.
|
||||
'';
|
||||
};
|
||||
|
||||
allowedClientIDs = mkOption {
|
||||
type = with types; either str (listOf str);
|
||||
allowedClientIDs = lib.mkOption {
|
||||
type = with lib.types; either str (listOf str);
|
||||
default = [];
|
||||
example = [ "[Tt]ask [2-9]+" ];
|
||||
description = ''
|
||||
@ -256,8 +253,8 @@ in {
|
||||
'';
|
||||
};
|
||||
|
||||
disallowedClientIDs = mkOption {
|
||||
type = with types; either str (listOf str);
|
||||
disallowedClientIDs = lib.mkOption {
|
||||
type = with lib.types; either str (listOf str);
|
||||
default = [];
|
||||
example = [ "[Tt]ask [2-9]+" ];
|
||||
description = ''
|
||||
@ -270,8 +267,8 @@ in {
|
||||
'';
|
||||
};
|
||||
|
||||
listenHost = mkOption {
|
||||
type = types.str;
|
||||
listenHost = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
default = "localhost";
|
||||
example = "::";
|
||||
description = ''
|
||||
@ -279,24 +276,24 @@ in {
|
||||
'';
|
||||
};
|
||||
|
||||
listenPort = mkOption {
|
||||
type = types.int;
|
||||
listenPort = lib.mkOption {
|
||||
type = lib.types.int;
|
||||
default = 53589;
|
||||
description = ''
|
||||
Port number of the Taskserver.
|
||||
'';
|
||||
};
|
||||
|
||||
openFirewall = mkOption {
|
||||
type = types.bool;
|
||||
openFirewall = lib.mkOption {
|
||||
type = lib.types.bool;
|
||||
default = false;
|
||||
description = ''
|
||||
Whether to open the firewall for the specified Taskserver port.
|
||||
'';
|
||||
};
|
||||
|
||||
fqdn = mkOption {
|
||||
type = types.str;
|
||||
fqdn = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
default = "localhost";
|
||||
description = ''
|
||||
The fully qualified domain name of this server, which is also used
|
||||
@ -304,8 +301,8 @@ in {
|
||||
'';
|
||||
};
|
||||
|
||||
trust = mkOption {
|
||||
type = types.enum [ "allow all" "strict" ];
|
||||
trust = lib.mkOption {
|
||||
type = lib.types.enum [ "allow all" "strict" ];
|
||||
default = "strict";
|
||||
description = ''
|
||||
Determines how client certificates are validated.
|
||||
@ -320,8 +317,8 @@ in {
|
||||
pki.manual = manualPkiOptions;
|
||||
pki.auto = autoPkiOptions;
|
||||
|
||||
config = mkOption {
|
||||
type = types.attrs;
|
||||
config = lib.mkOption {
|
||||
type = lib.types.attrs;
|
||||
example.client.cert = "/tmp/debugging.cert";
|
||||
description = ''
|
||||
Configuration options to pass to Taskserver.
|
||||
@ -340,23 +337,23 @@ in {
|
||||
'';
|
||||
apply = let
|
||||
mkKey = path: if path == ["server" "listen"] then "server"
|
||||
else concatStringsSep "." path;
|
||||
else lib.concatStringsSep "." path;
|
||||
recurse = path: attrs: let
|
||||
mapper = name: val: let
|
||||
newPath = path ++ [ name ];
|
||||
scalar = if val == true then "true"
|
||||
else if val == false then "false"
|
||||
else toString val;
|
||||
in if isAttrs val then recurse newPath val
|
||||
in if lib.isAttrs val then recurse newPath val
|
||||
else [ "${mkKey newPath}=${scalar}" ];
|
||||
in concatLists (mapAttrsToList mapper attrs);
|
||||
in lib.concatLists (lib.mapAttrsToList mapper attrs);
|
||||
in recurse [];
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
imports = [
|
||||
(mkRemovedOptionModule ["services" "taskserver" "extraConfig"] ''
|
||||
(lib.mkRemovedOptionModule ["services" "taskserver" "extraConfig"] ''
|
||||
This option was removed in favor of `services.taskserver.config` with
|
||||
different semantics (it's now a list of attributes instead of lines).
|
||||
|
||||
@ -366,11 +363,11 @@ in {
|
||||
'')
|
||||
];
|
||||
|
||||
config = mkMerge [
|
||||
(mkIf cfg.enable {
|
||||
config = lib.mkMerge [
|
||||
(lib.mkIf cfg.enable {
|
||||
environment.systemPackages = [ nixos-taskserver ];
|
||||
|
||||
users.users = optionalAttrs (cfg.user == "taskd") {
|
||||
users.users = lib.optionalAttrs (cfg.user == "taskd") {
|
||||
taskd = {
|
||||
uid = config.ids.uids.taskd;
|
||||
description = "Taskserver user";
|
||||
@ -378,7 +375,7 @@ in {
|
||||
};
|
||||
};
|
||||
|
||||
users.groups = optionalAttrs (cfg.group == "taskd") {
|
||||
users.groups = lib.optionalAttrs (cfg.group == "taskd") {
|
||||
taskd.gid = config.ids.gids.taskd;
|
||||
};
|
||||
|
||||
@ -413,7 +410,7 @@ in {
|
||||
} else {
|
||||
cert = "${cfg.pki.manual.server.cert}";
|
||||
key = "${cfg.pki.manual.server.key}";
|
||||
${mapNullable (_: "crl") cfg.pki.manual.server.crl} = "${cfg.pki.manual.server.crl}";
|
||||
${lib.mapNullable (_: "crl") cfg.pki.manual.server.crl} = "${cfg.pki.manual.server.crl}";
|
||||
});
|
||||
|
||||
ca.cert = if needToCreateCA then "${cfg.dataDir}/keys/ca.cert"
|
||||
@ -464,8 +461,8 @@ in {
|
||||
|
||||
serviceConfig = {
|
||||
ExecStart = let
|
||||
mkCfgFlag = flag: escapeShellArg "--${flag}";
|
||||
cfgFlags = concatMapStringsSep " " mkCfgFlag cfg.config;
|
||||
mkCfgFlag = flag: lib.escapeShellArg "--${flag}";
|
||||
cfgFlags = lib.concatMapStringsSep " " mkCfgFlag cfg.config;
|
||||
in "@${taskd} taskd server ${cfgFlags}";
|
||||
ExecReload = "${pkgs.coreutils}/bin/kill -USR1 $MAINPID";
|
||||
Restart = "on-failure";
|
||||
@ -477,7 +474,7 @@ in {
|
||||
};
|
||||
};
|
||||
})
|
||||
(mkIf (cfg.enable && needToCreateCA) {
|
||||
(lib.mkIf (cfg.enable && needToCreateCA) {
|
||||
systemd.services.taskserver-ca = {
|
||||
wantedBy = [ "taskserver.service" ];
|
||||
after = [ "taskserver-init.service" ];
|
||||
@ -561,7 +558,7 @@ in {
|
||||
'';
|
||||
};
|
||||
})
|
||||
(mkIf (cfg.enable && cfg.openFirewall) {
|
||||
(lib.mkIf (cfg.enable && cfg.openFirewall) {
|
||||
networking.firewall.allowedTCPPorts = [ cfg.listenPort ];
|
||||
})
|
||||
];
|
||||
|
@ -1,11 +1,8 @@
|
||||
{ config, lib, options, pkgs, ... }:
|
||||
|
||||
with lib;
|
||||
|
||||
let
|
||||
cfg = config.services.graphite;
|
||||
opt = options.services.graphite;
|
||||
writeTextOrNull = f: t: mapNullable (pkgs.writeTextDir f) t;
|
||||
writeTextOrNull = f: t: lib.mapNullable (pkgs.writeTextDir f) t;
|
||||
|
||||
dataDir = cfg.dataDir;
|
||||
staticDir = cfg.dataDir + "/static";
|
||||
@ -20,7 +17,7 @@ let
|
||||
|
||||
graphiteLocalSettings = pkgs.writeText "graphite_local_settings.py" (
|
||||
"STATIC_ROOT = '${staticDir}'\n" +
|
||||
optionalString (config.time.timeZone != null) "TIME_ZONE = '${config.time.timeZone}'\n"
|
||||
lib.optionalString (config.time.timeZone != null) "TIME_ZONE = '${config.time.timeZone}'\n"
|
||||
+ cfg.web.extraConfig
|
||||
);
|
||||
|
||||
@ -32,7 +29,7 @@ let
|
||||
|
||||
configDir = pkgs.buildEnv {
|
||||
name = "graphite-config";
|
||||
paths = lists.filter (el: el != null) [
|
||||
paths = lib.lists.filter (el: el != null) [
|
||||
(writeTextOrNull "carbon.conf" cfg.carbon.config)
|
||||
(writeTextOrNull "storage-aggregation.conf" cfg.carbon.storageAggregation)
|
||||
(writeTextOrNull "storage-schemas.conf" cfg.carbon.storageSchemas)
|
||||
@ -62,16 +59,16 @@ let
|
||||
in {
|
||||
|
||||
imports = [
|
||||
(mkRemovedOptionModule ["services" "graphite" "api"] "")
|
||||
(mkRemovedOptionModule ["services" "graphite" "beacon"] "")
|
||||
(mkRemovedOptionModule ["services" "graphite" "pager"] "")
|
||||
(lib.mkRemovedOptionModule ["services" "graphite" "api"] "")
|
||||
(lib.mkRemovedOptionModule ["services" "graphite" "beacon"] "")
|
||||
(lib.mkRemovedOptionModule ["services" "graphite" "pager"] "")
|
||||
];
|
||||
|
||||
###### interface
|
||||
|
||||
options.services.graphite = {
|
||||
dataDir = mkOption {
|
||||
type = types.path;
|
||||
dataDir = lib.mkOption {
|
||||
type = lib.types.path;
|
||||
default = "/var/db/graphite";
|
||||
description = ''
|
||||
Data directory for graphite.
|
||||
@ -79,26 +76,26 @@ in {
|
||||
};
|
||||
|
||||
web = {
|
||||
enable = mkOption {
|
||||
enable = lib.mkOption {
|
||||
description = "Whether to enable graphite web frontend.";
|
||||
default = false;
|
||||
type = types.bool;
|
||||
type = lib.types.bool;
|
||||
};
|
||||
|
||||
listenAddress = mkOption {
|
||||
listenAddress = lib.mkOption {
|
||||
description = "Graphite web frontend listen address.";
|
||||
default = "127.0.0.1";
|
||||
type = types.str;
|
||||
type = lib.types.str;
|
||||
};
|
||||
|
||||
port = mkOption {
|
||||
port = lib.mkOption {
|
||||
description = "Graphite web frontend port.";
|
||||
default = 8080;
|
||||
type = types.port;
|
||||
type = lib.types.port;
|
||||
};
|
||||
|
||||
extraConfig = mkOption {
|
||||
type = types.str;
|
||||
extraConfig = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
default = "";
|
||||
description = ''
|
||||
Graphite webapp settings. See:
|
||||
@ -108,7 +105,7 @@ in {
|
||||
};
|
||||
|
||||
carbon = {
|
||||
config = mkOption {
|
||||
config = lib.mkOption {
|
||||
description = "Content of carbon configuration file.";
|
||||
default = ''
|
||||
[cache]
|
||||
@ -121,19 +118,19 @@ in {
|
||||
LOG_UPDATES = False
|
||||
LOG_CACHE_HITS = False
|
||||
'';
|
||||
type = types.str;
|
||||
type = lib.types.str;
|
||||
};
|
||||
|
||||
enableCache = mkOption {
|
||||
enableCache = lib.mkOption {
|
||||
description = "Whether to enable carbon cache, the graphite storage daemon.";
|
||||
default = false;
|
||||
type = types.bool;
|
||||
type = lib.types.bool;
|
||||
};
|
||||
|
||||
storageAggregation = mkOption {
|
||||
storageAggregation = lib.mkOption {
|
||||
description = "Defines how to aggregate data to lower-precision retentions.";
|
||||
default = null;
|
||||
type = types.nullOr types.str;
|
||||
type = lib.types.nullOr lib.types.str;
|
||||
example = ''
|
||||
[all_min]
|
||||
pattern = \.min$
|
||||
@ -142,10 +139,10 @@ in {
|
||||
'';
|
||||
};
|
||||
|
||||
storageSchemas = mkOption {
|
||||
storageSchemas = lib.mkOption {
|
||||
description = "Defines retention rates for storing metrics.";
|
||||
default = "";
|
||||
type = types.nullOr types.str;
|
||||
type = lib.types.nullOr lib.types.str;
|
||||
example = ''
|
||||
[apache_busyWorkers]
|
||||
pattern = ^servers\.www.*\.workers\.busyWorkers$
|
||||
@ -153,27 +150,27 @@ in {
|
||||
'';
|
||||
};
|
||||
|
||||
blacklist = mkOption {
|
||||
blacklist = lib.mkOption {
|
||||
description = "Any metrics received which match one of the expressions will be dropped.";
|
||||
default = null;
|
||||
type = types.nullOr types.str;
|
||||
type = lib.types.nullOr lib.types.str;
|
||||
example = "^some\\.noisy\\.metric\\.prefix\\..*";
|
||||
};
|
||||
|
||||
whitelist = mkOption {
|
||||
whitelist = lib.mkOption {
|
||||
description = "Only metrics received which match one of the expressions will be persisted.";
|
||||
default = null;
|
||||
type = types.nullOr types.str;
|
||||
type = lib.types.nullOr lib.types.str;
|
||||
example = ".*";
|
||||
};
|
||||
|
||||
rewriteRules = mkOption {
|
||||
rewriteRules = lib.mkOption {
|
||||
description = ''
|
||||
Regular expression patterns that can be used to rewrite metric names
|
||||
in a search and replace fashion.
|
||||
'';
|
||||
default = null;
|
||||
type = types.nullOr types.str;
|
||||
type = lib.types.nullOr lib.types.str;
|
||||
example = ''
|
||||
[post]
|
||||
_sum$ =
|
||||
@ -181,16 +178,16 @@ in {
|
||||
'';
|
||||
};
|
||||
|
||||
enableRelay = mkOption {
|
||||
enableRelay = lib.mkOption {
|
||||
description = "Whether to enable carbon relay, the carbon replication and sharding service.";
|
||||
default = false;
|
||||
type = types.bool;
|
||||
type = lib.types.bool;
|
||||
};
|
||||
|
||||
relayRules = mkOption {
|
||||
relayRules = lib.mkOption {
|
||||
description = "Relay rules are used to send certain metrics to a certain backend.";
|
||||
default = null;
|
||||
type = types.nullOr types.str;
|
||||
type = lib.types.nullOr lib.types.str;
|
||||
example = ''
|
||||
[example]
|
||||
pattern = ^mydata\.foo\..+
|
||||
@ -198,16 +195,16 @@ in {
|
||||
'';
|
||||
};
|
||||
|
||||
enableAggregator = mkOption {
|
||||
enableAggregator = lib.mkOption {
|
||||
description = "Whether to enable carbon aggregator, the carbon buffering service.";
|
||||
default = false;
|
||||
type = types.bool;
|
||||
type = lib.types.bool;
|
||||
};
|
||||
|
||||
aggregationRules = mkOption {
|
||||
aggregationRules = lib.mkOption {
|
||||
description = "Defines if and how received metrics will be aggregated.";
|
||||
default = null;
|
||||
type = types.nullOr types.str;
|
||||
type = lib.types.nullOr lib.types.str;
|
||||
example = ''
|
||||
<env>.applications.<app>.all.requests (60) = sum <env>.applications.<app>.*.requests
|
||||
<env>.applications.<app>.all.latency (60) = avg <env>.applications.<app>.*.latency
|
||||
@ -216,47 +213,47 @@ in {
|
||||
};
|
||||
|
||||
seyren = {
|
||||
enable = mkOption {
|
||||
enable = lib.mkOption {
|
||||
description = "Whether to enable seyren service.";
|
||||
default = false;
|
||||
type = types.bool;
|
||||
type = lib.types.bool;
|
||||
};
|
||||
|
||||
port = mkOption {
|
||||
port = lib.mkOption {
|
||||
description = "Seyren listening port.";
|
||||
default = 8081;
|
||||
type = types.port;
|
||||
type = lib.types.port;
|
||||
};
|
||||
|
||||
seyrenUrl = mkOption {
|
||||
seyrenUrl = lib.mkOption {
|
||||
default = "http://localhost:${toString cfg.seyren.port}/";
|
||||
defaultText = literalExpression ''"http://localhost:''${toString config.${opt.seyren.port}}/"'';
|
||||
defaultText = lib.literalExpression ''"http://localhost:''${toString config.${opt.seyren.port}}/"'';
|
||||
description = "Host where seyren is accessible.";
|
||||
type = types.str;
|
||||
type = lib.types.str;
|
||||
};
|
||||
|
||||
graphiteUrl = mkOption {
|
||||
graphiteUrl = lib.mkOption {
|
||||
default = "http://${cfg.web.listenAddress}:${toString cfg.web.port}";
|
||||
defaultText = literalExpression ''"http://''${config.${opt.web.listenAddress}}:''${toString config.${opt.web.port}}"'';
|
||||
defaultText = lib.literalExpression ''"http://''${config.${opt.web.listenAddress}}:''${toString config.${opt.web.port}}"'';
|
||||
description = "Host where graphite service runs.";
|
||||
type = types.str;
|
||||
type = lib.types.str;
|
||||
};
|
||||
|
||||
mongoUrl = mkOption {
|
||||
mongoUrl = lib.mkOption {
|
||||
default = "mongodb://${config.services.mongodb.bind_ip}:27017/seyren";
|
||||
defaultText = literalExpression ''"mongodb://''${config.services.mongodb.bind_ip}:27017/seyren"'';
|
||||
defaultText = lib.literalExpression ''"mongodb://''${config.services.mongodb.bind_ip}:27017/seyren"'';
|
||||
description = "Mongodb connection string.";
|
||||
type = types.str;
|
||||
type = lib.types.str;
|
||||
};
|
||||
|
||||
extraConfig = mkOption {
|
||||
extraConfig = lib.mkOption {
|
||||
default = {};
|
||||
description = ''
|
||||
Extra seyren configuration. See
|
||||
<https://github.com/scobal/seyren#config>
|
||||
'';
|
||||
type = types.attrsOf types.str;
|
||||
example = literalExpression ''
|
||||
type = lib.types.attrsOf lib.types.str;
|
||||
example = lib.literalExpression ''
|
||||
{
|
||||
GRAPHITE_USERNAME = "user";
|
||||
GRAPHITE_PASSWORD = "pass";
|
||||
@ -268,8 +265,8 @@ in {
|
||||
|
||||
###### implementation
|
||||
|
||||
config = mkMerge [
|
||||
(mkIf cfg.carbon.enableCache {
|
||||
config = lib.mkMerge [
|
||||
(lib.mkIf cfg.carbon.enableCache {
|
||||
systemd.services.carbonCache = let name = "carbon-cache"; in {
|
||||
description = "Graphite Data Storage Backend";
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
@ -290,7 +287,7 @@ in {
|
||||
};
|
||||
})
|
||||
|
||||
(mkIf cfg.carbon.enableAggregator {
|
||||
(lib.mkIf cfg.carbon.enableAggregator {
|
||||
systemd.services.carbonAggregator = let name = "carbon-aggregator"; in {
|
||||
enable = cfg.carbon.enableAggregator;
|
||||
description = "Carbon Data Aggregator";
|
||||
@ -307,7 +304,7 @@ in {
|
||||
};
|
||||
})
|
||||
|
||||
(mkIf cfg.carbon.enableRelay {
|
||||
(lib.mkIf cfg.carbon.enableRelay {
|
||||
systemd.services.carbonRelay = let name = "carbon-relay"; in {
|
||||
description = "Carbon Data Relay";
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
@ -323,13 +320,13 @@ in {
|
||||
};
|
||||
})
|
||||
|
||||
(mkIf (cfg.carbon.enableCache || cfg.carbon.enableAggregator || cfg.carbon.enableRelay) {
|
||||
(lib.mkIf (cfg.carbon.enableCache || cfg.carbon.enableAggregator || cfg.carbon.enableRelay) {
|
||||
environment.systemPackages = [
|
||||
pkgs.python3Packages.carbon
|
||||
];
|
||||
})
|
||||
|
||||
(mkIf cfg.web.enable ({
|
||||
(lib.mkIf cfg.web.enable ({
|
||||
systemd.services.graphiteWeb = {
|
||||
description = "Graphite Web Interface";
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
@ -343,7 +340,7 @@ in {
|
||||
];
|
||||
};
|
||||
penvPack = "${penv}/${pkgs.python3.sitePackages}";
|
||||
in concatStringsSep ":" [
|
||||
in lib.concatStringsSep ":" [
|
||||
"${graphiteLocalSettingsDir}"
|
||||
"${penvPack}"
|
||||
# explicitly adding pycairo in path because it cannot be imported via buildEnv
|
||||
@ -389,7 +386,7 @@ in {
|
||||
environment.systemPackages = [ pkgs.python3Packages.graphite-web ];
|
||||
}))
|
||||
|
||||
(mkIf cfg.seyren.enable {
|
||||
(lib.mkIf cfg.seyren.enable {
|
||||
systemd.services.seyren = {
|
||||
description = "Graphite Alerting Dashboard";
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
@ -409,10 +406,10 @@ in {
|
||||
'';
|
||||
};
|
||||
|
||||
services.mongodb.enable = mkDefault true;
|
||||
services.mongodb.enable = lib.mkDefault true;
|
||||
})
|
||||
|
||||
(mkIf (
|
||||
(lib.mkIf (
|
||||
cfg.carbon.enableCache || cfg.carbon.enableAggregator || cfg.carbon.enableRelay ||
|
||||
cfg.web.enable || cfg.seyren.enable
|
||||
) {
|
||||
|
@ -3,7 +3,7 @@
|
||||
let
|
||||
inherit (lib) concatStrings foldl foldl' genAttrs literalExpression maintainers
|
||||
mapAttrs mapAttrsToList mkDefault mkEnableOption mkIf mkMerge mkOption
|
||||
optional types mkOptionDefault flip attrNames;
|
||||
optional types mkOptionDefault flip attrNames xor;
|
||||
|
||||
cfg = config.services.prometheus.exporters;
|
||||
|
||||
@ -230,6 +230,7 @@ let
|
||||
in
|
||||
mkIf conf.enable {
|
||||
warnings = conf.warnings or [];
|
||||
assertions = conf.assertions or [];
|
||||
users.users."${name}-exporter" = (mkIf (conf.user == "${name}-exporter" && !enableDynamicUser) {
|
||||
description = "Prometheus ${name} exporter service user";
|
||||
isSystemUser = true;
|
||||
@ -359,13 +360,6 @@ in
|
||||
Please specify either 'services.prometheus.exporters.nextcloud.passwordFile' or
|
||||
'services.prometheus.exporters.nextcloud.tokenFile'
|
||||
'';
|
||||
} {
|
||||
assertion = cfg.pgbouncer.enable -> (
|
||||
(cfg.pgbouncer.connectionStringFile != null || cfg.pgbouncer.connectionString != "")
|
||||
);
|
||||
message = ''
|
||||
PgBouncer exporter needs either connectionStringFile or connectionString configured"
|
||||
'';
|
||||
} {
|
||||
assertion = cfg.sql.enable -> (
|
||||
(cfg.sql.configFile == null) != (cfg.sql.configuration == null)
|
||||
@ -405,7 +399,15 @@ in
|
||||
Please ensure you have either `services.prometheus.exporters.deluge.delugePassword'
|
||||
or `services.prometheus.exporters.deluge.delugePasswordFile' set!
|
||||
'';
|
||||
} ] ++ (flip map (attrNames exporterOpts) (exporter: {
|
||||
} {
|
||||
assertion = cfg.pgbouncer.enable -> (
|
||||
xor (cfg.pgbouncer.connectionEnvFile == null) (cfg.pgbouncer.connectionString == null)
|
||||
);
|
||||
message = ''
|
||||
Options `services.prometheus.exporters.pgbouncer.connectionEnvFile` and
|
||||
`services.prometheus.exporters.pgbouncer.connectionString` are mutually exclusive!
|
||||
'';
|
||||
}] ++ (flip map (attrNames exporterOpts) (exporter: {
|
||||
assertion = cfg.${exporter}.firewallFilter != null -> cfg.${exporter}.openFirewall;
|
||||
message = ''
|
||||
The `firewallFilter'-option of exporter ${exporter} doesn't have any effect unless
|
||||
@ -419,11 +421,6 @@ in
|
||||
Consider using `services.prometheus.exporters.idrac.configuration` instead.
|
||||
''
|
||||
)
|
||||
(mkIf
|
||||
(cfg.pgbouncer.enable && cfg.pgbouncer.connectionString != "") ''
|
||||
config.services.prometheus.exporters.pgbouncer.connectionString is insecure. Use connectionStringFile instead.
|
||||
''
|
||||
)
|
||||
] ++ config.services.prometheus.exporters.warnings;
|
||||
}] ++ [(mkIf config.services.prometheus.exporters.rtl_433.enable {
|
||||
hardware.rtl-sdr.enable = mkDefault true;
|
||||
|
@ -7,11 +7,8 @@ let
|
||||
mkPackageOption
|
||||
types
|
||||
optionals
|
||||
optionalString
|
||||
getExe
|
||||
getExe'
|
||||
escapeShellArg
|
||||
escapeShellArgs
|
||||
concatStringsSep
|
||||
;
|
||||
in
|
||||
@ -29,8 +26,8 @@ in
|
||||
};
|
||||
|
||||
connectionString = mkOption {
|
||||
type = types.str;
|
||||
default = "";
|
||||
type = types.nullOr types.str;
|
||||
default = null;
|
||||
example = "postgres://admin:@localhost:6432/pgbouncer?sslmode=require";
|
||||
description = ''
|
||||
Connection string for accessing pgBouncer.
|
||||
@ -43,26 +40,28 @@ in
|
||||
auth_file if auth_type other than "any" is used.
|
||||
|
||||
WARNING: this secret is stored in the world-readable Nix store!
|
||||
Use {option}`connectionStringFile` instead.
|
||||
Use [](#opt-services.prometheus.exporters.pgbouncer.connectionEnvFile) if the
|
||||
URL contains a secret.
|
||||
'';
|
||||
};
|
||||
|
||||
connectionStringFile = mkOption {
|
||||
type = types.nullOr types.path;
|
||||
connectionEnvFile = mkOption {
|
||||
type = types.nullOr types.str;
|
||||
default = null;
|
||||
example = "/run/keys/pgBouncer-connection-string";
|
||||
description = ''
|
||||
File that contains pgBouncer connection string in format:
|
||||
postgres://admin:@localhost:6432/pgbouncer?sslmode=require
|
||||
File that must contain the environment variable
|
||||
`PGBOUNCER_EXPORTER_CONNECTION_STRING` which is set to the connection
|
||||
string used by pgbouncer. I.e. the format is supposed to look like this:
|
||||
|
||||
NOTE: You MUST keep pgbouncer as database name (special internal db)!!!
|
||||
```
|
||||
PGBOUNCER_EXPORTER_CONNECTION_STRING="postgres://admin@localhost:6432/pgbouncer?sslmode=require"
|
||||
```
|
||||
|
||||
NOTE: ignore_startup_parameters MUST contain "extra_float_digits".
|
||||
NOTE: You MUST keep pgbouncer as database name (special internal db)!
|
||||
NOTE: `services.pgbouncer.settings.pgbouncer.ignore_startup_parameters`
|
||||
MUST contain "extra_float_digits".
|
||||
|
||||
NOTE: Admin user (with password or passwordless) MUST exist in the
|
||||
auth_file if auth_type other than "any" is used.
|
||||
|
||||
{option}`connectionStringFile` takes precedence over {option}`connectionString`
|
||||
Mutually exclusive with [](#opt-services.prometheus.exporters.pgbouncer.connectionString).
|
||||
'';
|
||||
};
|
||||
|
||||
@ -126,16 +125,11 @@ in
|
||||
|
||||
serviceOpts = {
|
||||
after = [ "pgbouncer.service" ];
|
||||
script = optionalString (cfg.connectionStringFile != null) ''
|
||||
connectionString=$(${escapeShellArgs [
|
||||
(getExe' pkgs.coreutils "cat") "--" cfg.connectionStringFile
|
||||
]})
|
||||
'' + concatStringsSep " " ([
|
||||
script = concatStringsSep " " ([
|
||||
"exec -- ${escapeShellArg (getExe cfg.package)}"
|
||||
"--web.listen-address ${cfg.listenAddress}:${toString cfg.port}"
|
||||
"--pgBouncer.connectionString ${if cfg.connectionStringFile != null
|
||||
then "\"$connectionString\""
|
||||
else "${escapeShellArg cfg.connectionString}"}"
|
||||
] ++ optionals (cfg.connectionString != null) [
|
||||
"--pgBouncer.connectionString ${escapeShellArg cfg.connectionString}"
|
||||
] ++ optionals (cfg.telemetryPath != null) [
|
||||
"--web.telemetry-path ${escapeShellArg cfg.telemetryPath}"
|
||||
] ++ optionals (cfg.pidFile != null) [
|
||||
@ -151,5 +145,21 @@ in
|
||||
] ++ cfg.extraFlags);
|
||||
|
||||
serviceConfig.RestrictAddressFamilies = [ "AF_INET" "AF_INET6" "AF_UNIX" ];
|
||||
serviceConfig.EnvironmentFile = lib.mkIf (cfg.connectionEnvFile != null) [
|
||||
cfg.connectionEnvFile
|
||||
];
|
||||
};
|
||||
|
||||
imports = [
|
||||
(lib.mkRemovedOptionModule [ "connectionStringFile" ] ''
|
||||
As replacement, the option `services.prometheus.exporters.pgbouncer.connectionEnvFile`
|
||||
has been added. In contrast to `connectionStringFile` it must be an environment file
|
||||
with the connection string being set to `PGBOUNCER_EXPORTER_CONNECTION_STRING`.
|
||||
|
||||
The change was necessary since the former option wrote the contents of the file
|
||||
into the cmdline of the exporter making the connection string effectively
|
||||
world-readable.
|
||||
'')
|
||||
({ options.warnings = options.warnings; options.assertions = options.assertions; })
|
||||
];
|
||||
}
|
||||
|
45
nixos/modules/services/monitoring/todesk.nix
Normal file
45
nixos/modules/services/monitoring/todesk.nix
Normal file
@ -0,0 +1,45 @@
|
||||
{
|
||||
config,
|
||||
lib,
|
||||
pkgs,
|
||||
...
|
||||
}:
|
||||
|
||||
let
|
||||
cfg = config.services.todesk;
|
||||
in
|
||||
{
|
||||
options = {
|
||||
services.todesk.enable = lib.mkEnableOption "ToDesk daemon";
|
||||
services.todesk.package = lib.mkPackageOption pkgs "todesk" { };
|
||||
};
|
||||
|
||||
config = lib.mkIf cfg.enable {
|
||||
|
||||
environment.systemPackages = [ cfg.package ];
|
||||
|
||||
systemd.services.todeskd = {
|
||||
description = "ToDesk Daemon Service";
|
||||
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
wants = [
|
||||
"network-online.target"
|
||||
"display-manager.service"
|
||||
"nss-lookup.target"
|
||||
];
|
||||
serviceConfig = {
|
||||
Type = "simple";
|
||||
ExecStart = "${cfg.package}/bin/todesk service";
|
||||
ExecReload = "${pkgs.coreutils}/bin/kill -SIGINT $MAINPID";
|
||||
Restart = "on-failure";
|
||||
WorkingDirectory = "/var/lib/todesk";
|
||||
PrivateTmp = true;
|
||||
StateDirectory = "todesk";
|
||||
StateDirectoryMode = "0777"; # Desktop application read and write /opt/todesk/config/config.ini. Such a pain!
|
||||
ProtectSystem = "strict";
|
||||
ProtectHome = "read-only";
|
||||
RemoveIPC = "yes";
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
@ -64,7 +64,7 @@ let
|
||||
|
||||
path = [ pkgs.iptables pkgs.iproute2 pkgs.nettools ];
|
||||
|
||||
serviceConfig.ExecStart = "@${openvpn}/sbin/openvpn openvpn --suppress-timestamps --config ${configFile}";
|
||||
serviceConfig.ExecStart = "@${openvpn}/sbin/openvpn openvpn --suppress-timestamps --config ${configFile} ${cfg.extraArgs}";
|
||||
serviceConfig.Restart = "always";
|
||||
serviceConfig.Type = "notify";
|
||||
};
|
||||
@ -181,6 +181,15 @@ in
|
||||
'';
|
||||
};
|
||||
|
||||
extraArgs = mkOption {
|
||||
default = null;
|
||||
type = listOf str;
|
||||
description = ''
|
||||
Additional command line arguments to pass to this OpenVPN instance.
|
||||
'';
|
||||
apply = lib.escapeShellArgs;
|
||||
};
|
||||
|
||||
authUserPass = mkOption {
|
||||
default = null;
|
||||
description = ''
|
||||
|
@ -48,6 +48,13 @@ let
|
||||
};
|
||||
};
|
||||
|
||||
# The original argument name `websocketPingFrequency` is a misnomer, as the frequency is the inverse of the interval.
|
||||
websocketPingInterval = lib.mkOption {
|
||||
description = "Frequency at which the client will send websocket ping to the server.";
|
||||
type = lib.types.nullOr lib.types.ints.unsigned;
|
||||
default = null;
|
||||
};
|
||||
|
||||
loggingLevel = lib.mkOption {
|
||||
description = ''
|
||||
Passed to --log-lvl
|
||||
@ -232,13 +239,6 @@ let
|
||||
default = true;
|
||||
};
|
||||
|
||||
# The original argument name `websocketPingFrequency` is a misnomer, as the frequency is the inverse of the interval.
|
||||
websocketPingInterval = lib.mkOption {
|
||||
description = "Frequency at which the client will send websocket ping to the server.";
|
||||
type = lib.types.nullOr lib.types.ints.unsigned;
|
||||
default = null;
|
||||
};
|
||||
|
||||
upgradeCredentials = lib.mkOption {
|
||||
description = ''
|
||||
Use these credentials to authenticate during the HTTP upgrade request
|
||||
@ -321,6 +321,7 @@ let
|
||||
tls-certificate =
|
||||
if useACMEHost != null then "${certConfig.directory}/fullchain.pem" else "${tlsCertificate}";
|
||||
tls-private-key = if useACMEHost != null then "${certConfig.directory}/key.pem" else "${tlsKey}";
|
||||
websocket-ping-frequency-sec = websocketPingInterval;
|
||||
} extraArgs
|
||||
)
|
||||
} \
|
||||
|
@ -17,6 +17,10 @@ let
|
||||
extraConfig = mkPhpIni cfg.phpOptions;
|
||||
};
|
||||
|
||||
# "you're escaped" -> "'you\'re escaped'"
|
||||
# https://www.php.net/manual/en/language.types.string.php#language.types.string.syntax.single
|
||||
toPhpString = s: "'${escape [ "'" "\\" ] s}'";
|
||||
|
||||
dokuwikiAclAuthConfig = hostName: cfg: let
|
||||
inherit (cfg) acl;
|
||||
acl_gen = concatMapStringsSep "\n" (l: "${l.page} \t ${l.actor} \t ${toString l.level}");
|
||||
@ -43,12 +47,12 @@ let
|
||||
mkPhpValue = v: let
|
||||
isHasAttr = s: isAttrs v && hasAttr s v;
|
||||
in
|
||||
if isString v then escapeShellArg v
|
||||
if isString v then toPhpString v
|
||||
# NOTE: If any value contains a , (comma) this will not get escaped
|
||||
else if isList v && any lib.strings.isCoercibleToString v then escapeShellArg (concatMapStringsSep "," toString v)
|
||||
else if isList v && any lib.strings.isCoercibleToString v then toPhpString (concatMapStringsSep "," toString v)
|
||||
else if isInt v then toString v
|
||||
else if isBool v then toString (if v then 1 else 0)
|
||||
else if isHasAttr "_file" then "trim(file_get_contents(${lib.escapeShellArg v._file}))"
|
||||
else if isHasAttr "_file" then "trim(file_get_contents(${toPhpString v._file}))"
|
||||
else if isHasAttr "_raw" then v._raw
|
||||
else abort "The dokuwiki localConf value ${lib.generators.toPretty {} v} can not be encoded."
|
||||
;
|
||||
@ -59,7 +63,7 @@ let
|
||||
[" = ${mkPhpValue v};"]
|
||||
else
|
||||
mkPhpAttrVals v;
|
||||
in map (e: "[${escapeShellArg k}]${e}") (flatten values);
|
||||
in map (e: "[${toPhpString k}]${e}") (flatten values);
|
||||
|
||||
dokuwikiLocalConfig = hostName: cfg: let
|
||||
conf_gen = c: map (v: "$conf${v}") (mkPhpAttrVals c);
|
||||
|
@ -54,24 +54,20 @@ in
|
||||
};
|
||||
baseurl = mkOption {
|
||||
type = types.str;
|
||||
default = "";
|
||||
example = "/gancio";
|
||||
description = "The URL path under which the server is reachable.";
|
||||
default = "http${
|
||||
lib.optionalString config.services.nginx.virtualHosts."${cfg.settings.hostname}".enableACME "s"
|
||||
}://${cfg.settings.hostname}";
|
||||
defaultText = lib.literalExpression ''"https://''${cfg.settings.hostname}"'';
|
||||
example = "https://demo.gancio.org/gancio";
|
||||
description = "The full URL under which the server is reachable.";
|
||||
};
|
||||
server = {
|
||||
host = mkOption {
|
||||
type = types.str;
|
||||
default = "localhost";
|
||||
example = "::";
|
||||
socket = mkOption {
|
||||
type = types.path;
|
||||
readOnly = true;
|
||||
default = "/run/gancio/socket";
|
||||
description = ''
|
||||
The address (IPv4, IPv6 or DNS) for the gancio server to listen on.
|
||||
'';
|
||||
};
|
||||
port = mkOption {
|
||||
type = types.port;
|
||||
default = 13120;
|
||||
description = ''
|
||||
Port number of the gancio server to listen on.
|
||||
The unix socket for the gancio server to listen on.
|
||||
'';
|
||||
};
|
||||
};
|
||||
@ -157,11 +153,18 @@ in
|
||||
};
|
||||
|
||||
nginx = mkOption {
|
||||
type = types.submodule (import ../web-servers/nginx/vhost-options.nix { inherit config lib; });
|
||||
type = types.submodule (
|
||||
lib.recursiveUpdate (import ../web-servers/nginx/vhost-options.nix { inherit config lib; }) {
|
||||
# enable encryption by default,
|
||||
# as sensitive login credentials should not be transmitted in clear text.
|
||||
options.forceSSL.default = true;
|
||||
options.enableACME.default = true;
|
||||
}
|
||||
);
|
||||
default = { };
|
||||
example = {
|
||||
enableACME = true;
|
||||
forceSSL = true;
|
||||
enableACME = false;
|
||||
forceSSL = false;
|
||||
};
|
||||
description = "Extra configuration for the nginx virtual host of gancio.";
|
||||
};
|
||||
@ -224,6 +227,10 @@ in
|
||||
|
||||
serviceConfig = {
|
||||
ExecStart = "${getExe cfg.package} start ${configFile}";
|
||||
# set umask so that nginx can write to the server socket
|
||||
# FIXME: upstream socket permission configuration in Nuxt
|
||||
UMask = "0002";
|
||||
RuntimeDirectory = "gancio";
|
||||
StateDirectory = "gancio";
|
||||
WorkingDirectory = "/var/lib/gancio";
|
||||
LogsDirectory = "gancio";
|
||||
@ -260,8 +267,6 @@ in
|
||||
virtualHosts."${cfg.settings.hostname}" = mkMerge [
|
||||
cfg.nginx
|
||||
{
|
||||
enableACME = mkDefault true;
|
||||
forceSSL = mkDefault true;
|
||||
locations = {
|
||||
"/" = {
|
||||
index = "index.html";
|
||||
@ -269,12 +274,14 @@ in
|
||||
};
|
||||
"@proxy" = {
|
||||
proxyWebsockets = true;
|
||||
proxyPass = "http://${cfg.settings.server.host}:${toString cfg.settings.server.port}";
|
||||
proxyPass = "http://unix:${cfg.settings.server.socket}";
|
||||
recommendedProxySettings = true;
|
||||
};
|
||||
};
|
||||
}
|
||||
];
|
||||
};
|
||||
# for nginx to access gancio socket
|
||||
users.users."${config.services.nginx.user}".extraGroups = [ config.users.users.${cfg.user}.group ];
|
||||
};
|
||||
}
|
||||
|
@ -170,7 +170,7 @@ in
|
||||
|
||||
apply = set: {
|
||||
script = ''
|
||||
unset PATH
|
||||
export PATH=
|
||||
for i in ${toString path}; do
|
||||
PATH=$PATH:$i/bin:$i/sbin
|
||||
done
|
||||
|
@ -87,9 +87,14 @@ in {
|
||||
environment.systemPackages = [ pkgs.thin-provisioning-tools ];
|
||||
})
|
||||
(mkIf cfg.boot.vdo.enable {
|
||||
assertions = [{
|
||||
assertion = lib.versionAtLeast config.boot.kernelPackages.kernel.version "6.9";
|
||||
message = "boot.vdo.enable requires at least kernel version 6.9";
|
||||
}];
|
||||
|
||||
boot = {
|
||||
initrd = {
|
||||
kernelModules = [ "kvdo" ];
|
||||
kernelModules = [ "dm-vdo" ];
|
||||
|
||||
systemd.initrdBin = lib.mkIf config.boot.initrd.services.lvm.enable [ pkgs.vdo ];
|
||||
|
||||
@ -98,16 +103,15 @@ in {
|
||||
copy_bin_and_libs ${pkgs.vdo}/bin/$BIN
|
||||
done
|
||||
substituteInPlace $out/bin/vdorecover --replace "${pkgs.bash}/bin/bash" "/bin/sh"
|
||||
substituteInPlace $out/bin/adaptLVMVDO.sh --replace "${pkgs.bash}/bin/bash" "/bin/sh"
|
||||
substituteInPlace $out/bin/adaptlvm --replace "${pkgs.bash}/bin/bash" "/bin/sh"
|
||||
'';
|
||||
|
||||
extraUtilsCommandsTest = mkIf (!config.boot.initrd.systemd.enable)''
|
||||
ls ${pkgs.vdo}/bin/ | grep -vE '(adaptLVMVDO|vdorecover)' | while read BIN; do
|
||||
ls ${pkgs.vdo}/bin/ | grep -vE '(adaptlvm|vdorecover)' | while read BIN; do
|
||||
$out/bin/$(basename $BIN) --help > /dev/null
|
||||
done
|
||||
'';
|
||||
};
|
||||
extraModulePackages = [ config.boot.kernelPackages.kvdo ];
|
||||
};
|
||||
|
||||
services.lvm.package = mkOverride 999 pkgs.lvm2_vdo; # this overrides mkDefault
|
||||
|
@ -1,34 +1,22 @@
|
||||
{
|
||||
config,
|
||||
lib,
|
||||
pkgs,
|
||||
...
|
||||
}:
|
||||
{ config, lib, pkgs, ... }:
|
||||
|
||||
with lib;
|
||||
let
|
||||
cfg = config.virtualisation.azureImage;
|
||||
virtualisationOptions = import ./virtualisation-options.nix;
|
||||
in
|
||||
{
|
||||
imports = [
|
||||
./azure-common.nix
|
||||
virtualisationOptions.diskSize
|
||||
(lib.mkRenamedOptionModuleWith {
|
||||
sinceRelease = 2411;
|
||||
from = [
|
||||
"virtualisation"
|
||||
"azureImage"
|
||||
"diskSize"
|
||||
];
|
||||
to = [
|
||||
"virtualisation"
|
||||
"diskSize"
|
||||
];
|
||||
})
|
||||
];
|
||||
imports = [ ./azure-common.nix ];
|
||||
|
||||
options.virtualisation.azureImage = {
|
||||
diskSize = mkOption {
|
||||
type = with types; either (enum [ "auto" ]) int;
|
||||
default = "auto";
|
||||
example = 2048;
|
||||
description = ''
|
||||
Size of disk image. Unit is MB.
|
||||
'';
|
||||
};
|
||||
|
||||
bootSize = mkOption {
|
||||
type = types.int;
|
||||
default = 256;
|
||||
@ -47,12 +35,7 @@ in
|
||||
};
|
||||
|
||||
vmGeneration = mkOption {
|
||||
type =
|
||||
with types;
|
||||
enum [
|
||||
"v1"
|
||||
"v2"
|
||||
];
|
||||
type = with types; enum [ "v1" "v2" ];
|
||||
default = "v1";
|
||||
description = ''
|
||||
VM Generation to use.
|
||||
@ -74,8 +57,7 @@ in
|
||||
bootSize = "${toString cfg.bootSize}M";
|
||||
partitionTableType = if cfg.vmGeneration == "v2" then "efi" else "legacy";
|
||||
|
||||
inherit (cfg) contents;
|
||||
inherit (config.virtualisation) diskSize;
|
||||
inherit (cfg) diskSize contents;
|
||||
inherit config lib pkgs;
|
||||
};
|
||||
};
|
||||
|
@ -1,35 +1,23 @@
|
||||
{
|
||||
config,
|
||||
lib,
|
||||
pkgs,
|
||||
...
|
||||
}:
|
||||
{ config, lib, pkgs, ... }:
|
||||
|
||||
with lib;
|
||||
let
|
||||
cfg = config.virtualisation.digitalOceanImage;
|
||||
virtualisationOptions = import ./virtualisation-options.nix;
|
||||
in
|
||||
{
|
||||
|
||||
imports = [
|
||||
./digital-ocean-config.nix
|
||||
virtualisationOptions.diskSize
|
||||
(lib.mkRenamedOptionModuleWith {
|
||||
sinceRelease = 2411;
|
||||
from = [
|
||||
"virtualisation"
|
||||
"digitialOceanImage"
|
||||
"diskSize"
|
||||
];
|
||||
to = [
|
||||
"virtualisation"
|
||||
"diskSize"
|
||||
];
|
||||
})
|
||||
];
|
||||
imports = [ ./digital-ocean-config.nix ];
|
||||
|
||||
options = {
|
||||
virtualisation.digitalOceanImage.diskSize = mkOption {
|
||||
type = with types; either (enum [ "auto" ]) int;
|
||||
default = "auto";
|
||||
example = 4096;
|
||||
description = ''
|
||||
Size of disk image. Unit is MB.
|
||||
'';
|
||||
};
|
||||
|
||||
virtualisation.digitalOceanImage.configFile = mkOption {
|
||||
type = with types; nullOr path;
|
||||
default = null;
|
||||
@ -43,10 +31,7 @@ in
|
||||
};
|
||||
|
||||
virtualisation.digitalOceanImage.compressionMethod = mkOption {
|
||||
type = types.enum [
|
||||
"gzip"
|
||||
"bzip2"
|
||||
];
|
||||
type = types.enum [ "gzip" "bzip2" ];
|
||||
default = "gzip";
|
||||
example = "bzip2";
|
||||
description = ''
|
||||
@ -59,35 +44,27 @@ in
|
||||
|
||||
#### implementation
|
||||
config = {
|
||||
|
||||
system.build.digitalOceanImage = import ../../lib/make-disk-image.nix {
|
||||
name = "digital-ocean-image";
|
||||
format = "qcow2";
|
||||
postVM =
|
||||
let
|
||||
compress =
|
||||
{
|
||||
"gzip" = "${pkgs.gzip}/bin/gzip";
|
||||
"bzip2" = "${pkgs.bzip2}/bin/bzip2";
|
||||
}
|
||||
.${cfg.compressionMethod};
|
||||
in
|
||||
''
|
||||
${compress} $diskImage
|
||||
'';
|
||||
configFile =
|
||||
if cfg.configFile == null then
|
||||
config.virtualisation.digitalOcean.defaultConfigFile
|
||||
else
|
||||
cfg.configFile;
|
||||
inherit (config.virtualisation) diskSize;
|
||||
postVM = let
|
||||
compress = {
|
||||
"gzip" = "${pkgs.gzip}/bin/gzip";
|
||||
"bzip2" = "${pkgs.bzip2}/bin/bzip2";
|
||||
}.${cfg.compressionMethod};
|
||||
in ''
|
||||
${compress} $diskImage
|
||||
'';
|
||||
configFile = if cfg.configFile == null
|
||||
then config.virtualisation.digitalOcean.defaultConfigFile
|
||||
else cfg.configFile;
|
||||
inherit (cfg) diskSize;
|
||||
inherit config lib pkgs;
|
||||
};
|
||||
|
||||
};
|
||||
|
||||
meta.maintainers = with maintainers; [
|
||||
arianvp
|
||||
eamsden
|
||||
];
|
||||
meta.maintainers = with maintainers; [ arianvp eamsden ];
|
||||
|
||||
}
|
||||
|
@ -1,9 +1,4 @@
|
||||
{
|
||||
config,
|
||||
lib,
|
||||
pkgs,
|
||||
...
|
||||
}:
|
||||
{ config, lib, pkgs, ... }:
|
||||
|
||||
with lib;
|
||||
let
|
||||
@ -16,28 +11,21 @@ let
|
||||
];
|
||||
}
|
||||
'';
|
||||
virtualisationOptions = import ./virtualisation-options.nix;
|
||||
in
|
||||
{
|
||||
|
||||
imports = [
|
||||
./google-compute-config.nix
|
||||
virtualisationOptions.diskSize
|
||||
(lib.mkRenamedOptionModuleWith {
|
||||
sinceRelease = 2411;
|
||||
from = [
|
||||
"virtualisation"
|
||||
"googleComputeImage"
|
||||
"diskSize"
|
||||
];
|
||||
to = [
|
||||
"virtualisation"
|
||||
"diskSize"
|
||||
];
|
||||
})
|
||||
];
|
||||
imports = [ ./google-compute-config.nix ];
|
||||
|
||||
options = {
|
||||
virtualisation.googleComputeImage.diskSize = mkOption {
|
||||
type = with types; either (enum [ "auto" ]) int;
|
||||
default = "auto";
|
||||
example = 1536;
|
||||
description = ''
|
||||
Size of disk image. Unit is MB.
|
||||
'';
|
||||
};
|
||||
|
||||
virtualisation.googleComputeImage.configFile = mkOption {
|
||||
type = with types; nullOr str;
|
||||
default = null;
|
||||
@ -76,13 +64,7 @@ in
|
||||
system.build.googleComputeImage = import ../../lib/make-disk-image.nix {
|
||||
name = "google-compute-image";
|
||||
postVM = ''
|
||||
PATH=$PATH:${
|
||||
with pkgs;
|
||||
lib.makeBinPath [
|
||||
gnutar
|
||||
gzip
|
||||
]
|
||||
}
|
||||
PATH=$PATH:${with pkgs; lib.makeBinPath [ gnutar gzip ]}
|
||||
pushd $out
|
||||
mv $diskImage disk.raw
|
||||
tar -Sc disk.raw | gzip -${toString cfg.compressionLevel} > \
|
||||
@ -93,7 +75,7 @@ in
|
||||
format = "raw";
|
||||
configFile = if cfg.configFile == null then defaultConfigFile else cfg.configFile;
|
||||
partitionTableType = if cfg.efi then "efi" else "legacy";
|
||||
inherit (config.virtualisation) diskSize;
|
||||
inherit (cfg) diskSize;
|
||||
inherit config lib pkgs;
|
||||
};
|
||||
|
||||
|
@ -1,37 +1,21 @@
|
||||
{
|
||||
config,
|
||||
pkgs,
|
||||
lib,
|
||||
...
|
||||
}:
|
||||
{ config, pkgs, lib, ... }:
|
||||
|
||||
with lib;
|
||||
|
||||
let
|
||||
cfg = config.hyperv;
|
||||
virtualisationOptions = import ./virtualisation-options.nix;
|
||||
|
||||
in
|
||||
{
|
||||
|
||||
imports = [
|
||||
virtualisationOptions.diskSize
|
||||
(lib.mkRenamedOptionModuleWith {
|
||||
sinceRelease = 2411;
|
||||
from = [
|
||||
"virtualisation"
|
||||
"hyperv"
|
||||
"baseImageSize"
|
||||
];
|
||||
to = [
|
||||
"virtualisation"
|
||||
"diskSize"
|
||||
];
|
||||
})
|
||||
];
|
||||
|
||||
in {
|
||||
options = {
|
||||
hyperv = {
|
||||
baseImageSize = mkOption {
|
||||
type = with types; either (enum [ "auto" ]) int;
|
||||
default = "auto";
|
||||
example = 2048;
|
||||
description = ''
|
||||
The size of the hyper-v base image in MiB.
|
||||
'';
|
||||
};
|
||||
vmDerivationName = mkOption {
|
||||
type = types.str;
|
||||
default = "nixos-hyperv-${config.system.nixos.label}-${pkgs.stdenv.hostPlatform.system}";
|
||||
@ -50,8 +34,6 @@ in
|
||||
};
|
||||
|
||||
config = {
|
||||
virtualisation.diskSize = lib.mkDefault (4 * 1024);
|
||||
|
||||
system.build.hypervImage = import ../../lib/make-disk-image.nix {
|
||||
name = cfg.vmDerivationName;
|
||||
postVM = ''
|
||||
@ -59,7 +41,7 @@ in
|
||||
rm $diskImage
|
||||
'';
|
||||
format = "raw";
|
||||
inherit (config.virtualisation) diskSize;
|
||||
diskSize = cfg.baseImageSize;
|
||||
partitionTableType = "efi";
|
||||
inherit config lib pkgs;
|
||||
};
|
||||
|
@ -1,9 +1,4 @@
|
||||
{
|
||||
config,
|
||||
lib,
|
||||
pkgs,
|
||||
...
|
||||
}:
|
||||
{ config, lib, pkgs, ... }:
|
||||
|
||||
with lib;
|
||||
let
|
||||
@ -15,27 +10,19 @@ let
|
||||
];
|
||||
}
|
||||
'';
|
||||
virtualisationOptions = import ./virtualisation-options.nix;
|
||||
in
|
||||
{
|
||||
imports = [
|
||||
./linode-config.nix
|
||||
virtualisationOptions.diskSize
|
||||
(lib.mkRenamedOptionModuleWith {
|
||||
sinceRelease = 2411;
|
||||
from = [
|
||||
"virtualisation"
|
||||
"linodeImage"
|
||||
"diskSize"
|
||||
];
|
||||
to = [
|
||||
"virtualisation"
|
||||
"diskSize"
|
||||
];
|
||||
})
|
||||
];
|
||||
imports = [ ./linode-config.nix ];
|
||||
|
||||
options = {
|
||||
virtualisation.linodeImage.diskSize = mkOption {
|
||||
type = with types; either (enum (singleton "auto")) ints.positive;
|
||||
default = "auto";
|
||||
example = 1536;
|
||||
description = ''
|
||||
Size of disk image in MB.
|
||||
'';
|
||||
};
|
||||
|
||||
virtualisation.linodeImage.configFile = mkOption {
|
||||
type = with types; nullOr str;
|
||||
@ -70,7 +57,7 @@ in
|
||||
format = "raw";
|
||||
partitionTableType = "none";
|
||||
configFile = if cfg.configFile == null then defaultConfigFile else cfg.configFile;
|
||||
inherit (config.virtualisation) diskSize;
|
||||
inherit (cfg) diskSize;
|
||||
inherit config lib pkgs;
|
||||
};
|
||||
};
|
||||
|
@ -1,9 +1,4 @@
|
||||
{
|
||||
config,
|
||||
lib,
|
||||
pkgs,
|
||||
...
|
||||
}:
|
||||
{ config, lib, pkgs, ... }:
|
||||
|
||||
let
|
||||
cfg = config.oci;
|
||||
@ -12,12 +7,9 @@ in
|
||||
imports = [ ./oci-common.nix ];
|
||||
|
||||
config = {
|
||||
virtualisation.diskSize = lib.mkDefault (8 * 1024);
|
||||
virtualisation.diskSizeAutoSupported = false;
|
||||
|
||||
system.build.OCIImage = import ../../lib/make-disk-image.nix {
|
||||
inherit config lib pkgs;
|
||||
inherit (config.virtualisation) diskSize;
|
||||
inherit (cfg) diskSize;
|
||||
name = "oci-image";
|
||||
configFile = ./oci-config-user.nix;
|
||||
format = "qcow2";
|
||||
@ -33,10 +25,7 @@ in
|
||||
after = [ "network-online.target" ];
|
||||
wants = [ "network-online.target" ];
|
||||
|
||||
path = [
|
||||
pkgs.coreutils
|
||||
pkgs.curl
|
||||
];
|
||||
path = [ pkgs.coreutils pkgs.curl ];
|
||||
script = ''
|
||||
mkdir -m 0700 -p /root/.ssh
|
||||
if [ -f /root/.ssh/authorized_keys ]; then
|
||||
|
@ -1,27 +1,5 @@
|
||||
{ config, lib, pkgs, ... }:
|
||||
{
|
||||
lib,
|
||||
...
|
||||
}:
|
||||
let
|
||||
virtualisationOptions = import ./virtualisation-options.nix;
|
||||
in
|
||||
{
|
||||
imports = [
|
||||
virtualisationOptions.diskSize
|
||||
(lib.mkRenamedOptionModuleWith {
|
||||
sinceRelease = 2411;
|
||||
from = [
|
||||
"virtualisation"
|
||||
"oci"
|
||||
"diskSize"
|
||||
];
|
||||
to = [
|
||||
"virtualisation"
|
||||
"diskSize"
|
||||
];
|
||||
})
|
||||
];
|
||||
|
||||
options = {
|
||||
oci = {
|
||||
efi = lib.mkOption {
|
||||
@ -31,6 +9,12 @@ in
|
||||
Whether the OCI instance is using EFI.
|
||||
'';
|
||||
};
|
||||
diskSize = lib.mkOption {
|
||||
type = lib.types.int;
|
||||
default = 8192;
|
||||
description = "Size of the disk image created in MB.";
|
||||
example = "diskSize = 12 * 1024; # 12GiB";
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
||||
|
@ -21,6 +21,7 @@ let
|
||||
{
|
||||
outputs = [ "out" "man" ];
|
||||
inherit (podmanPackage) meta;
|
||||
preferLocalBuild = true;
|
||||
} ''
|
||||
mkdir -p $out/bin
|
||||
ln -s ${podmanPackage}/bin/podman $out/bin/docker
|
||||
@ -235,7 +236,10 @@ in
|
||||
systemd.tmpfiles.packages = [
|
||||
# The /run/podman rule interferes with our podman group, so we remove
|
||||
# it and let the systemd socket logic take care of it.
|
||||
(pkgs.runCommand "podman-tmpfiles-nixos" { package = cfg.package; } ''
|
||||
(pkgs.runCommand "podman-tmpfiles-nixos" {
|
||||
package = cfg.package;
|
||||
preferLocalBuild = true;
|
||||
} ''
|
||||
mkdir -p $out/lib/tmpfiles.d/
|
||||
grep -v 'D! /run/podman 0700 root root' \
|
||||
<$package/lib/tmpfiles.d/podman.conf \
|
||||
|
@ -1,31 +1,8 @@
|
||||
{
|
||||
config,
|
||||
pkgs,
|
||||
lib,
|
||||
...
|
||||
}:
|
||||
{ config, pkgs, lib, ... }:
|
||||
|
||||
with lib;
|
||||
let
|
||||
virtualisationOptions = import ./virtualisation-options.nix;
|
||||
in
|
||||
{
|
||||
imports = [
|
||||
virtualisationOptions.diskSize
|
||||
(lib.mkRenamedOptionModuleWith {
|
||||
sinceRelease = 2411;
|
||||
from = [
|
||||
"virtualisation"
|
||||
"proxmoxImage"
|
||||
"diskSize"
|
||||
];
|
||||
to = [
|
||||
"virtualisation"
|
||||
"diskSize"
|
||||
];
|
||||
})
|
||||
];
|
||||
|
||||
{
|
||||
options.proxmox = {
|
||||
qemuConf = {
|
||||
# essential configs
|
||||
@ -77,10 +54,7 @@ in
|
||||
'';
|
||||
};
|
||||
bios = mkOption {
|
||||
type = types.enum [
|
||||
"seabios"
|
||||
"ovmf"
|
||||
];
|
||||
type = types.enum [ "seabios" "ovmf" ];
|
||||
default = "seabios";
|
||||
description = ''
|
||||
Select BIOS implementation (seabios = Legacy BIOS, ovmf = UEFI).
|
||||
@ -113,6 +87,16 @@ in
|
||||
either "efi" or "hybrid".
|
||||
'';
|
||||
};
|
||||
diskSize = mkOption {
|
||||
type = types.str;
|
||||
default = "auto";
|
||||
example = "20480";
|
||||
description = ''
|
||||
The size of the disk, in megabytes.
|
||||
if "auto" size is calculated based on the contents copied to it and
|
||||
additionalSpace is taken into account.
|
||||
'';
|
||||
};
|
||||
net0 = mkOption {
|
||||
type = types.commas;
|
||||
default = "virtio=00:00:00:00:00:00,bridge=vmbr0,firewall=1";
|
||||
@ -140,13 +124,8 @@ in
|
||||
};
|
||||
};
|
||||
qemuExtraConf = mkOption {
|
||||
type =
|
||||
with types;
|
||||
attrsOf (oneOf [
|
||||
str
|
||||
int
|
||||
]);
|
||||
default = { };
|
||||
type = with types; attrsOf (oneOf [ str int ]);
|
||||
default = {};
|
||||
example = literalExpression ''
|
||||
{
|
||||
cpu = "host";
|
||||
@ -158,12 +137,7 @@ in
|
||||
'';
|
||||
};
|
||||
partitionTableType = mkOption {
|
||||
type = types.enum [
|
||||
"efi"
|
||||
"hybrid"
|
||||
"legacy"
|
||||
"legacy+gpt"
|
||||
];
|
||||
type = types.enum [ "efi" "hybrid" "legacy" "legacy+gpt" ];
|
||||
description = ''
|
||||
Partition table type to use. See make-disk-image.nix partitionTableType for details.
|
||||
Defaults to 'legacy' for 'proxmox.qemuConf.bios="seabios"' (default), other bios values defaults to 'efi'.
|
||||
@ -211,163 +185,142 @@ in
|
||||
};
|
||||
};
|
||||
|
||||
config =
|
||||
let
|
||||
cfg = config.proxmox;
|
||||
cfgLine = name: value: ''
|
||||
${name}: ${builtins.toString value}
|
||||
'';
|
||||
virtio0Storage = builtins.head (builtins.split ":" cfg.qemuConf.virtio0);
|
||||
cfgFile =
|
||||
fileName: properties:
|
||||
pkgs.writeTextDir fileName ''
|
||||
# generated by NixOS
|
||||
${lib.concatStrings (lib.mapAttrsToList cfgLine properties)}
|
||||
#qmdump#map:virtio0:drive-virtio0:${virtio0Storage}:raw:
|
||||
'';
|
||||
config = let
|
||||
cfg = config.proxmox;
|
||||
cfgLine = name: value: ''
|
||||
${name}: ${builtins.toString value}
|
||||
'';
|
||||
virtio0Storage = builtins.head (builtins.split ":" cfg.qemuConf.virtio0);
|
||||
cfgFile = fileName: properties: pkgs.writeTextDir fileName ''
|
||||
# generated by NixOS
|
||||
${lib.concatStrings (lib.mapAttrsToList cfgLine properties)}
|
||||
#qmdump#map:virtio0:drive-virtio0:${virtio0Storage}:raw:
|
||||
'';
|
||||
inherit (cfg) partitionTableType;
|
||||
supportEfi = partitionTableType == "efi" || partitionTableType == "hybrid";
|
||||
supportBios = partitionTableType == "legacy" || partitionTableType == "hybrid" || partitionTableType == "legacy+gpt";
|
||||
hasBootPartition = partitionTableType == "efi" || partitionTableType == "hybrid";
|
||||
hasNoFsPartition = partitionTableType == "hybrid" || partitionTableType == "legacy+gpt";
|
||||
in {
|
||||
assertions = [
|
||||
{
|
||||
assertion = config.boot.loader.systemd-boot.enable -> config.proxmox.qemuConf.bios == "ovmf";
|
||||
message = "systemd-boot requires 'ovmf' bios";
|
||||
}
|
||||
{
|
||||
assertion = partitionTableType == "efi" -> config.proxmox.qemuConf.bios == "ovmf";
|
||||
message = "'efi' disk partitioning requires 'ovmf' bios";
|
||||
}
|
||||
{
|
||||
assertion = partitionTableType == "legacy" -> config.proxmox.qemuConf.bios == "seabios";
|
||||
message = "'legacy' disk partitioning requires 'seabios' bios";
|
||||
}
|
||||
{
|
||||
assertion = partitionTableType == "legacy+gpt" -> config.proxmox.qemuConf.bios == "seabios";
|
||||
message = "'legacy+gpt' disk partitioning requires 'seabios' bios";
|
||||
}
|
||||
];
|
||||
system.build.VMA = import ../../lib/make-disk-image.nix {
|
||||
name = "proxmox-${cfg.filenameSuffix}";
|
||||
inherit (cfg) partitionTableType;
|
||||
supportEfi = partitionTableType == "efi" || partitionTableType == "hybrid";
|
||||
supportBios =
|
||||
partitionTableType == "legacy"
|
||||
|| partitionTableType == "hybrid"
|
||||
|| partitionTableType == "legacy+gpt";
|
||||
hasBootPartition = partitionTableType == "efi" || partitionTableType == "hybrid";
|
||||
hasNoFsPartition = partitionTableType == "hybrid" || partitionTableType == "legacy+gpt";
|
||||
in
|
||||
{
|
||||
assertions = [
|
||||
{
|
||||
assertion = config.boot.loader.systemd-boot.enable -> config.proxmox.qemuConf.bios == "ovmf";
|
||||
message = "systemd-boot requires 'ovmf' bios";
|
||||
}
|
||||
{
|
||||
assertion = partitionTableType == "efi" -> config.proxmox.qemuConf.bios == "ovmf";
|
||||
message = "'efi' disk partitioning requires 'ovmf' bios";
|
||||
}
|
||||
{
|
||||
assertion = partitionTableType == "legacy" -> config.proxmox.qemuConf.bios == "seabios";
|
||||
message = "'legacy' disk partitioning requires 'seabios' bios";
|
||||
}
|
||||
{
|
||||
assertion = partitionTableType == "legacy+gpt" -> config.proxmox.qemuConf.bios == "seabios";
|
||||
message = "'legacy+gpt' disk partitioning requires 'seabios' bios";
|
||||
}
|
||||
];
|
||||
system.build.VMA = import ../../lib/make-disk-image.nix {
|
||||
name = "proxmox-${cfg.filenameSuffix}";
|
||||
inherit (cfg) partitionTableType;
|
||||
postVM =
|
||||
let
|
||||
# Build qemu with PVE's patch that adds support for the VMA format
|
||||
vma =
|
||||
(pkgs.qemu_kvm.override {
|
||||
alsaSupport = false;
|
||||
pulseSupport = false;
|
||||
sdlSupport = false;
|
||||
jackSupport = false;
|
||||
gtkSupport = false;
|
||||
vncSupport = false;
|
||||
smartcardSupport = false;
|
||||
spiceSupport = false;
|
||||
ncursesSupport = false;
|
||||
libiscsiSupport = false;
|
||||
tpmSupport = false;
|
||||
numaSupport = false;
|
||||
seccompSupport = false;
|
||||
guestAgentSupport = false;
|
||||
}).overrideAttrs
|
||||
(super: rec {
|
||||
# Check https://github.com/proxmox/pve-qemu/tree/master for the version
|
||||
# of qemu and patch to use
|
||||
version = "9.0.0";
|
||||
src = pkgs.fetchurl {
|
||||
url = "https://download.qemu.org/qemu-${version}.tar.xz";
|
||||
hash = "sha256-MnCKxmww2MiSYz6paMdxwcdtWX1w3erSGg0izPOG2mk=";
|
||||
};
|
||||
patches = [
|
||||
# Proxmox' VMA tool is published as a particular patch upon QEMU
|
||||
"${
|
||||
pkgs.fetchFromGitHub {
|
||||
owner = "proxmox";
|
||||
repo = "pve-qemu";
|
||||
rev = "14afbdd55f04d250bd679ca1ad55d3f47cd9d4c8";
|
||||
hash = "sha256-lSJQA5SHIHfxJvMLIID2drv2H43crTPMNIlIT37w9Nc=";
|
||||
}
|
||||
}/debian/patches/pve/0027-PVE-Backup-add-vma-backup-format-code.patch"
|
||||
];
|
||||
postVM = let
|
||||
# Build qemu with PVE's patch that adds support for the VMA format
|
||||
vma = (pkgs.qemu_kvm.override {
|
||||
alsaSupport = false;
|
||||
pulseSupport = false;
|
||||
sdlSupport = false;
|
||||
jackSupport = false;
|
||||
gtkSupport = false;
|
||||
vncSupport = false;
|
||||
smartcardSupport = false;
|
||||
spiceSupport = false;
|
||||
ncursesSupport = false;
|
||||
libiscsiSupport = false;
|
||||
tpmSupport = false;
|
||||
numaSupport = false;
|
||||
seccompSupport = false;
|
||||
guestAgentSupport = false;
|
||||
}).overrideAttrs ( super: rec {
|
||||
# Check https://github.com/proxmox/pve-qemu/tree/master for the version
|
||||
# of qemu and patch to use
|
||||
version = "9.0.0";
|
||||
src = pkgs.fetchurl {
|
||||
url = "https://download.qemu.org/qemu-${version}.tar.xz";
|
||||
hash = "sha256-MnCKxmww2MiSYz6paMdxwcdtWX1w3erSGg0izPOG2mk=";
|
||||
};
|
||||
patches = [
|
||||
# Proxmox' VMA tool is published as a particular patch upon QEMU
|
||||
"${pkgs.fetchFromGitHub {
|
||||
owner = "proxmox";
|
||||
repo = "pve-qemu";
|
||||
rev = "14afbdd55f04d250bd679ca1ad55d3f47cd9d4c8";
|
||||
hash = "sha256-lSJQA5SHIHfxJvMLIID2drv2H43crTPMNIlIT37w9Nc=";
|
||||
}}/debian/patches/pve/0027-PVE-Backup-add-vma-backup-format-code.patch"
|
||||
];
|
||||
|
||||
buildInputs = super.buildInputs ++ [ pkgs.libuuid ];
|
||||
nativeBuildInputs = super.nativeBuildInputs ++ [ pkgs.perl ];
|
||||
buildInputs = super.buildInputs ++ [ pkgs.libuuid ];
|
||||
nativeBuildInputs = super.nativeBuildInputs ++ [ pkgs.perl ];
|
||||
|
||||
});
|
||||
in
|
||||
''
|
||||
${vma}/bin/vma create "vzdump-qemu-${cfg.filenameSuffix}.vma" \
|
||||
-c ${
|
||||
cfgFile "qemu-server.conf" (cfg.qemuConf // cfg.qemuExtraConf)
|
||||
}/qemu-server.conf drive-virtio0=$diskImage
|
||||
rm $diskImage
|
||||
${pkgs.zstd}/bin/zstd "vzdump-qemu-${cfg.filenameSuffix}.vma"
|
||||
mv "vzdump-qemu-${cfg.filenameSuffix}.vma.zst" $out/
|
||||
});
|
||||
in
|
||||
''
|
||||
${vma}/bin/vma create "vzdump-qemu-${cfg.filenameSuffix}.vma" \
|
||||
-c ${cfgFile "qemu-server.conf" (cfg.qemuConf // cfg.qemuExtraConf)}/qemu-server.conf drive-virtio0=$diskImage
|
||||
rm $diskImage
|
||||
${pkgs.zstd}/bin/zstd "vzdump-qemu-${cfg.filenameSuffix}.vma"
|
||||
mv "vzdump-qemu-${cfg.filenameSuffix}.vma.zst" $out/
|
||||
|
||||
mkdir -p $out/nix-support
|
||||
echo "file vma $out/vzdump-qemu-${cfg.filenameSuffix}.vma.zst" > $out/nix-support/hydra-build-products
|
||||
'';
|
||||
inherit (cfg.qemuConf) additionalSpace bootSize;
|
||||
inherit (config.virtualisation) diskSize;
|
||||
format = "raw";
|
||||
inherit config lib pkgs;
|
||||
};
|
||||
|
||||
boot = {
|
||||
growPartition = true;
|
||||
kernelParams = [ "console=ttyS0" ];
|
||||
loader.grub = {
|
||||
device = lib.mkDefault (
|
||||
if (hasNoFsPartition || supportBios) then
|
||||
# Even if there is a separate no-fs partition ("/dev/disk/by-partlabel/no-fs" i.e. "/dev/vda2"),
|
||||
# which will be used the bootloader, do not set it as loader.grub.device.
|
||||
# GRUB installation fails, unless the whole disk is selected.
|
||||
"/dev/vda"
|
||||
else
|
||||
"nodev"
|
||||
);
|
||||
efiSupport = lib.mkDefault supportEfi;
|
||||
efiInstallAsRemovable = lib.mkDefault supportEfi;
|
||||
};
|
||||
|
||||
loader.timeout = 0;
|
||||
initrd.availableKernelModules = [
|
||||
"uas"
|
||||
"virtio_blk"
|
||||
"virtio_pci"
|
||||
];
|
||||
};
|
||||
|
||||
fileSystems."/" = {
|
||||
device = "/dev/disk/by-label/nixos";
|
||||
autoResize = true;
|
||||
fsType = "ext4";
|
||||
};
|
||||
fileSystems."/boot" = lib.mkIf hasBootPartition {
|
||||
device = "/dev/disk/by-label/ESP";
|
||||
fsType = "vfat";
|
||||
};
|
||||
|
||||
networking = mkIf cfg.cloudInit.enable {
|
||||
hostName = mkForce "";
|
||||
useDHCP = false;
|
||||
};
|
||||
|
||||
services = {
|
||||
cloud-init = mkIf cfg.cloudInit.enable {
|
||||
enable = true;
|
||||
network.enable = true;
|
||||
};
|
||||
sshd.enable = mkDefault true;
|
||||
qemuGuest.enable = true;
|
||||
};
|
||||
|
||||
proxmox.qemuExtraConf.${cfg.cloudInit.device} = "${cfg.cloudInit.defaultStorage}:vm-9999-cloudinit,media=cdrom";
|
||||
mkdir -p $out/nix-support
|
||||
echo "file vma $out/vzdump-qemu-${cfg.filenameSuffix}.vma.zst" > $out/nix-support/hydra-build-products
|
||||
'';
|
||||
inherit (cfg.qemuConf) additionalSpace diskSize bootSize;
|
||||
format = "raw";
|
||||
inherit config lib pkgs;
|
||||
};
|
||||
|
||||
boot = {
|
||||
growPartition = true;
|
||||
kernelParams = [ "console=ttyS0" ];
|
||||
loader.grub = {
|
||||
device = lib.mkDefault (if (hasNoFsPartition || supportBios) then
|
||||
# Even if there is a separate no-fs partition ("/dev/disk/by-partlabel/no-fs" i.e. "/dev/vda2"),
|
||||
# which will be used the bootloader, do not set it as loader.grub.device.
|
||||
# GRUB installation fails, unless the whole disk is selected.
|
||||
"/dev/vda"
|
||||
else
|
||||
"nodev");
|
||||
efiSupport = lib.mkDefault supportEfi;
|
||||
efiInstallAsRemovable = lib.mkDefault supportEfi;
|
||||
};
|
||||
|
||||
loader.timeout = 0;
|
||||
initrd.availableKernelModules = [ "uas" "virtio_blk" "virtio_pci" ];
|
||||
};
|
||||
|
||||
fileSystems."/" = {
|
||||
device = "/dev/disk/by-label/nixos";
|
||||
autoResize = true;
|
||||
fsType = "ext4";
|
||||
};
|
||||
fileSystems."/boot" = lib.mkIf hasBootPartition {
|
||||
device = "/dev/disk/by-label/ESP";
|
||||
fsType = "vfat";
|
||||
};
|
||||
|
||||
networking = mkIf cfg.cloudInit.enable {
|
||||
hostName = mkForce "";
|
||||
useDHCP = false;
|
||||
};
|
||||
|
||||
services = {
|
||||
cloud-init = mkIf cfg.cloudInit.enable {
|
||||
enable = true;
|
||||
network.enable = true;
|
||||
};
|
||||
sshd.enable = mkDefault true;
|
||||
qemuGuest.enable = true;
|
||||
};
|
||||
|
||||
proxmox.qemuExtraConf.${cfg.cloudInit.device} = "${cfg.cloudInit.defaultStorage}:vm-9999-cloudinit,media=cdrom";
|
||||
};
|
||||
}
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -1,37 +1,23 @@
|
||||
{
|
||||
config,
|
||||
lib,
|
||||
pkgs,
|
||||
...
|
||||
}:
|
||||
{ config, lib, pkgs, ... }:
|
||||
|
||||
with lib;
|
||||
|
||||
let
|
||||
|
||||
cfg = config.virtualbox;
|
||||
virtualisationOptions = import ./virtualisation-options.nix;
|
||||
|
||||
in
|
||||
{
|
||||
imports = [
|
||||
virtualisationOptions.diskSize
|
||||
(lib.mkRenamedOptionModuleWith {
|
||||
sinceRelease = 2411;
|
||||
from = [
|
||||
"virtualisation"
|
||||
"virtualbox"
|
||||
"baseImageSize"
|
||||
];
|
||||
to = [
|
||||
"virtualisation"
|
||||
"diskSize"
|
||||
];
|
||||
})
|
||||
];
|
||||
in {
|
||||
|
||||
options = {
|
||||
virtualbox = {
|
||||
baseImageSize = mkOption {
|
||||
type = with types; either (enum [ "auto" ]) int;
|
||||
default = "auto";
|
||||
example = 50 * 1024;
|
||||
description = ''
|
||||
The size of the VirtualBox base image in MiB.
|
||||
'';
|
||||
};
|
||||
baseImageFreeSpace = mkOption {
|
||||
type = with types; int;
|
||||
default = 30 * 1024;
|
||||
@ -68,14 +54,7 @@ in
|
||||
'';
|
||||
};
|
||||
params = mkOption {
|
||||
type =
|
||||
with types;
|
||||
attrsOf (oneOf [
|
||||
str
|
||||
int
|
||||
bool
|
||||
(listOf str)
|
||||
]);
|
||||
type = with types; attrsOf (oneOf [ str int bool (listOf str) ]);
|
||||
example = {
|
||||
audio = "alsa";
|
||||
rtcuseutc = "on";
|
||||
@ -88,21 +67,11 @@ in
|
||||
'';
|
||||
};
|
||||
exportParams = mkOption {
|
||||
type =
|
||||
with types;
|
||||
listOf (oneOf [
|
||||
str
|
||||
int
|
||||
bool
|
||||
(listOf str)
|
||||
]);
|
||||
type = with types; listOf (oneOf [ str int bool (listOf str) ]);
|
||||
example = [
|
||||
"--vsys"
|
||||
"0"
|
||||
"--vendor"
|
||||
"ACME Inc."
|
||||
"--vsys" "0" "--vendor" "ACME Inc."
|
||||
];
|
||||
default = [ ];
|
||||
default = [];
|
||||
description = ''
|
||||
Parameters passed to the Virtualbox export command.
|
||||
|
||||
@ -120,25 +89,23 @@ in
|
||||
mountPoint = "/home/demo/storage";
|
||||
size = 100 * 1024;
|
||||
};
|
||||
type = types.nullOr (
|
||||
types.submodule {
|
||||
options = {
|
||||
size = mkOption {
|
||||
type = types.int;
|
||||
description = "Size in MiB";
|
||||
};
|
||||
label = mkOption {
|
||||
type = types.str;
|
||||
default = "vm-extra-storage";
|
||||
description = "Label for the disk partition";
|
||||
};
|
||||
mountPoint = mkOption {
|
||||
type = types.str;
|
||||
description = "Path where to mount this disk.";
|
||||
};
|
||||
type = types.nullOr (types.submodule {
|
||||
options = {
|
||||
size = mkOption {
|
||||
type = types.int;
|
||||
description = "Size in MiB";
|
||||
};
|
||||
}
|
||||
);
|
||||
label = mkOption {
|
||||
type = types.str;
|
||||
default = "vm-extra-storage";
|
||||
description = "Label for the disk partition";
|
||||
};
|
||||
mountPoint = mkOption {
|
||||
type = types.str;
|
||||
description = "Path where to mount this disk.";
|
||||
};
|
||||
};
|
||||
});
|
||||
};
|
||||
postExportCommands = mkOption {
|
||||
type = types.lines;
|
||||
@ -158,14 +125,7 @@ in
|
||||
'';
|
||||
};
|
||||
storageController = mkOption {
|
||||
type =
|
||||
with types;
|
||||
attrsOf (oneOf [
|
||||
str
|
||||
int
|
||||
bool
|
||||
(listOf str)
|
||||
]);
|
||||
type = with types; attrsOf (oneOf [ str int bool (listOf str) ]);
|
||||
example = {
|
||||
name = "SCSI";
|
||||
add = "scsi";
|
||||
@ -192,8 +152,6 @@ in
|
||||
|
||||
config = {
|
||||
|
||||
virtualisation.diskSize = lib.mkDefault (50 * 1024);
|
||||
|
||||
virtualbox.params = mkMerge [
|
||||
(mapAttrs (name: mkDefault) {
|
||||
acpi = "on";
|
||||
@ -217,83 +175,80 @@ in
|
||||
|
||||
inherit pkgs lib config;
|
||||
partitionTableType = "legacy";
|
||||
inherit (config.virtualisation) diskSize;
|
||||
diskSize = cfg.baseImageSize;
|
||||
additionalSpace = "${toString cfg.baseImageFreeSpace}M";
|
||||
|
||||
postVM = ''
|
||||
export HOME=$PWD
|
||||
export PATH=${pkgs.virtualbox}/bin:$PATH
|
||||
postVM =
|
||||
''
|
||||
export HOME=$PWD
|
||||
export PATH=${pkgs.virtualbox}/bin:$PATH
|
||||
|
||||
echo "converting image to VirtualBox format..."
|
||||
VBoxManage convertfromraw $diskImage disk.vdi
|
||||
echo "converting image to VirtualBox format..."
|
||||
VBoxManage convertfromraw $diskImage disk.vdi
|
||||
|
||||
${optionalString (cfg.extraDisk != null) ''
|
||||
echo "creating extra disk: data-disk.raw"
|
||||
dataDiskImage=data-disk.raw
|
||||
truncate -s ${toString cfg.extraDisk.size}M $dataDiskImage
|
||||
${optionalString (cfg.extraDisk != null) ''
|
||||
echo "creating extra disk: data-disk.raw"
|
||||
dataDiskImage=data-disk.raw
|
||||
truncate -s ${toString cfg.extraDisk.size}M $dataDiskImage
|
||||
|
||||
parted --script $dataDiskImage -- \
|
||||
mklabel msdos \
|
||||
mkpart primary ext4 1MiB -1
|
||||
eval $(partx $dataDiskImage -o START,SECTORS --nr 1 --pairs)
|
||||
mkfs.ext4 -F -L ${cfg.extraDisk.label} $dataDiskImage -E offset=$(sectorsToBytes $START) $(sectorsToKilobytes $SECTORS)K
|
||||
echo "creating extra disk: data-disk.vdi"
|
||||
VBoxManage convertfromraw $dataDiskImage data-disk.vdi
|
||||
''}
|
||||
parted --script $dataDiskImage -- \
|
||||
mklabel msdos \
|
||||
mkpart primary ext4 1MiB -1
|
||||
eval $(partx $dataDiskImage -o START,SECTORS --nr 1 --pairs)
|
||||
mkfs.ext4 -F -L ${cfg.extraDisk.label} $dataDiskImage -E offset=$(sectorsToBytes $START) $(sectorsToKilobytes $SECTORS)K
|
||||
echo "creating extra disk: data-disk.vdi"
|
||||
VBoxManage convertfromraw $dataDiskImage data-disk.vdi
|
||||
''}
|
||||
|
||||
echo "creating VirtualBox VM..."
|
||||
vmName="${cfg.vmName}";
|
||||
VBoxManage createvm --name "$vmName" --register \
|
||||
--ostype ${if pkgs.stdenv.hostPlatform.system == "x86_64-linux" then "Linux26_64" else "Linux26"}
|
||||
VBoxManage modifyvm "$vmName" \
|
||||
--memory ${toString cfg.memorySize} \
|
||||
${lib.cli.toGNUCommandLineShell { } cfg.params}
|
||||
VBoxManage storagectl "$vmName" ${lib.cli.toGNUCommandLineShell { } cfg.storageController}
|
||||
VBoxManage storageattach "$vmName" --storagectl ${cfg.storageController.name} --port 0 --device 0 --type hdd \
|
||||
--medium disk.vdi
|
||||
${optionalString (cfg.extraDisk != null) ''
|
||||
VBoxManage storageattach "$vmName" --storagectl ${cfg.storageController.name} --port 1 --device 0 --type hdd \
|
||||
--medium data-disk.vdi
|
||||
''}
|
||||
echo "creating VirtualBox VM..."
|
||||
vmName="${cfg.vmName}";
|
||||
VBoxManage createvm --name "$vmName" --register \
|
||||
--ostype ${if pkgs.stdenv.hostPlatform.system == "x86_64-linux" then "Linux26_64" else "Linux26"}
|
||||
VBoxManage modifyvm "$vmName" \
|
||||
--memory ${toString cfg.memorySize} \
|
||||
${lib.cli.toGNUCommandLineShell { } cfg.params}
|
||||
VBoxManage storagectl "$vmName" ${lib.cli.toGNUCommandLineShell { } cfg.storageController}
|
||||
VBoxManage storageattach "$vmName" --storagectl ${cfg.storageController.name} --port 0 --device 0 --type hdd \
|
||||
--medium disk.vdi
|
||||
${optionalString (cfg.extraDisk != null) ''
|
||||
VBoxManage storageattach "$vmName" --storagectl ${cfg.storageController.name} --port 1 --device 0 --type hdd \
|
||||
--medium data-disk.vdi
|
||||
''}
|
||||
|
||||
echo "exporting VirtualBox VM..."
|
||||
mkdir -p $out
|
||||
fn="$out/${cfg.vmFileName}"
|
||||
VBoxManage export "$vmName" --output "$fn" --options manifest ${escapeShellArgs cfg.exportParams}
|
||||
${cfg.postExportCommands}
|
||||
echo "exporting VirtualBox VM..."
|
||||
mkdir -p $out
|
||||
fn="$out/${cfg.vmFileName}"
|
||||
VBoxManage export "$vmName" --output "$fn" --options manifest ${escapeShellArgs cfg.exportParams}
|
||||
${cfg.postExportCommands}
|
||||
|
||||
rm -v $diskImage
|
||||
rm -v $diskImage
|
||||
|
||||
mkdir -p $out/nix-support
|
||||
echo "file ova $fn" >> $out/nix-support/hydra-build-products
|
||||
'';
|
||||
mkdir -p $out/nix-support
|
||||
echo "file ova $fn" >> $out/nix-support/hydra-build-products
|
||||
'';
|
||||
};
|
||||
|
||||
fileSystems =
|
||||
{
|
||||
"/" = {
|
||||
device = "/dev/disk/by-label/nixos";
|
||||
autoResize = true;
|
||||
fsType = "ext4";
|
||||
};
|
||||
}
|
||||
// (lib.optionalAttrs (cfg.extraDisk != null) {
|
||||
${cfg.extraDisk.mountPoint} = {
|
||||
device = "/dev/disk/by-label/" + cfg.extraDisk.label;
|
||||
autoResize = true;
|
||||
fsType = "ext4";
|
||||
};
|
||||
});
|
||||
fileSystems = {
|
||||
"/" = {
|
||||
device = "/dev/disk/by-label/nixos";
|
||||
autoResize = true;
|
||||
fsType = "ext4";
|
||||
};
|
||||
} // (lib.optionalAttrs (cfg.extraDisk != null) {
|
||||
${cfg.extraDisk.mountPoint} = {
|
||||
device = "/dev/disk/by-label/" + cfg.extraDisk.label;
|
||||
autoResize = true;
|
||||
fsType = "ext4";
|
||||
};
|
||||
});
|
||||
|
||||
boot.growPartition = true;
|
||||
boot.loader.grub.device = "/dev/sda";
|
||||
|
||||
swapDevices = [
|
||||
{
|
||||
device = "/var/swap";
|
||||
size = 2048;
|
||||
}
|
||||
];
|
||||
swapDevices = [{
|
||||
device = "/var/swap";
|
||||
size = 2048;
|
||||
}];
|
||||
|
||||
virtualisation.virtualbox.guest.enable = true;
|
||||
|
||||
|
@ -1,60 +0,0 @@
|
||||
# This modules declares shared options for virtual machines,
|
||||
# containers and anything else in `virtualisation`.
|
||||
#
|
||||
# This is useful to declare e.g. defaults for
|
||||
# `virtualisation.diskSize` once, while building multiple
|
||||
# different image formats of a NixOS configuration.
|
||||
#
|
||||
# Additional options can be migrated over time from
|
||||
# `modules/virtualisation/qemu-vm.nix` and others.
|
||||
# Please keep defaults and descriptions here generic
|
||||
# and independent of i.e. hypervisor-specific notes
|
||||
# and defaults where.
|
||||
# Those can be added in the consuming modules where needed.
|
||||
# needed.
|
||||
let
|
||||
_file = ./virtualisation-options.nix;
|
||||
key = _file;
|
||||
in
|
||||
{
|
||||
diskSize =
|
||||
{ lib, config, ... }:
|
||||
let
|
||||
t = lib.types;
|
||||
in
|
||||
{
|
||||
inherit _file key;
|
||||
|
||||
options = {
|
||||
virtualisation.diskSizeAutoSupported = lib.mkOption {
|
||||
type = t.bool;
|
||||
default = true;
|
||||
description = ''
|
||||
Whether the current image builder or vm runner supports `virtualisation.diskSize = "auto".`
|
||||
'';
|
||||
internal = true;
|
||||
};
|
||||
|
||||
virtualisation.diskSize = lib.mkOption {
|
||||
type = t.either (t.enum [ "auto" ]) t.ints.positive;
|
||||
default = "auto";
|
||||
description = ''
|
||||
The disk size in megabytes of the virtual machine.
|
||||
'';
|
||||
};
|
||||
};
|
||||
|
||||
config =
|
||||
let
|
||||
inherit (config.virtualisation) diskSize diskSizeAutoSupported;
|
||||
in
|
||||
{
|
||||
assertions = [
|
||||
{
|
||||
assertion = diskSize != "auto" || diskSizeAutoSupported;
|
||||
message = "Setting virtualisation.diskSize to `auto` is not supported by the current image build or vm runner; use an explicit size.";
|
||||
}
|
||||
];
|
||||
};
|
||||
};
|
||||
}
|
@ -312,7 +312,7 @@ in rec {
|
||||
[ configuration
|
||||
versionModule
|
||||
./maintainers/scripts/ec2/amazon-image.nix
|
||||
({ ... }: { amazonImage.virtualisation.diskSize = "auto"; })
|
||||
({ ... }: { amazonImage.sizeMB = "auto"; })
|
||||
];
|
||||
}).config.system.build.amazonImage)
|
||||
|
||||
|
@ -71,7 +71,7 @@ import ./make-test-python.nix (
|
||||
server.wait_for_unit("postgresql")
|
||||
server.wait_for_unit("gancio")
|
||||
server.wait_for_unit("nginx")
|
||||
server.wait_for_open_port(13120)
|
||||
server.wait_for_file("/run/gancio/socket")
|
||||
server.wait_for_open_port(80)
|
||||
|
||||
# Check can create user via cli
|
||||
|
@ -10,7 +10,7 @@ let
|
||||
tests = let callTest = p: lib.flip (import p) { inherit system pkgs; }; in {
|
||||
thinpool = { test = callTest ./thinpool.nix; kernelFilter = lib.id; };
|
||||
# we would like to test all versions, but the kernel module currently does not compile against the other versions
|
||||
vdo = { test = callTest ./vdo.nix; kernelFilter = lib.filter (v: v == "6.1"); };
|
||||
vdo = { test = callTest ./vdo.nix; kernelFilter = lib.filter (v: v == "latest"); };
|
||||
|
||||
|
||||
# systemd in stage 1
|
||||
@ -26,7 +26,7 @@ let
|
||||
};
|
||||
vdo-sd-stage-1 = {
|
||||
test = callTest ./systemd-stage-1.nix;
|
||||
kernelFilter = lib.filter (v: v == "6.1");
|
||||
kernelFilter = lib.filter (v: v == "latest");
|
||||
flavour = "vdo";
|
||||
};
|
||||
};
|
||||
|
@ -21,6 +21,8 @@ import ./make-test-python.nix ({ pkgs, ... }: {
|
||||
pkgs.grub2
|
||||
];
|
||||
|
||||
system.switch.enable = true;
|
||||
|
||||
virtualisation = {
|
||||
cores = 2;
|
||||
memorySize = 4096;
|
||||
|
@ -482,7 +482,6 @@ let
|
||||
json = {
|
||||
exporterConfig = {
|
||||
enable = true;
|
||||
url = "http://localhost";
|
||||
configFile = pkgs.writeText "json-exporter-conf.json" (builtins.toJSON {
|
||||
modules = {
|
||||
default = {
|
||||
@ -932,7 +931,9 @@ let
|
||||
pgbouncer = {
|
||||
exporterConfig = {
|
||||
enable = true;
|
||||
connectionStringFile = pkgs.writeText "connection.conf" "postgres://admin:@localhost:6432/pgbouncer?sslmode=disable";
|
||||
connectionEnvFile = "${pkgs.writeText "connstr-env" ''
|
||||
PGBOUNCER_EXPORTER_CONNECTION_STRING=postgres://admin@localhost:6432/pgbouncer?sslmode=disable
|
||||
''}";
|
||||
};
|
||||
|
||||
metricProvider = {
|
||||
|
@ -21,14 +21,14 @@
|
||||
|
||||
stdenv.mkDerivation rec {
|
||||
pname = "grandorgue";
|
||||
version = "3.14.2-1";
|
||||
version = "3.15.1-1";
|
||||
|
||||
src = fetchFromGitHub {
|
||||
owner = "GrandOrgue";
|
||||
repo = pname;
|
||||
repo = "grandorgue";
|
||||
rev = version;
|
||||
fetchSubmodules = true;
|
||||
hash = "sha256-FHM8fFUga9poGhojKBTF4gsJ6L4XEksueVxfMbngvks=";
|
||||
hash = "sha256-5uAA878OBc04PkUgCwoRtc6lIASivq3YcfFffTae6uM=";
|
||||
};
|
||||
|
||||
postPatch = ''
|
||||
@ -57,7 +57,7 @@ stdenv.mkDerivation rec {
|
||||
postInstall = lib.optionalString stdenv.isDarwin ''
|
||||
mkdir -p $out/{Applications,bin,lib}
|
||||
mv $out/GrandOrgue.app $out/Applications/
|
||||
for lib in $out/Applications/GrandOrgue.app/Contents/MacOS/lib*; do
|
||||
for lib in $out/Applications/GrandOrgue.app/Contents/Frameworks/lib*; do
|
||||
ln -s $lib $out/lib/
|
||||
done
|
||||
makeWrapper $out/{Applications/GrandOrgue.app/Contents/MacOS,bin}/GrandOrgue
|
||||
|
@ -63,11 +63,10 @@ libBuildHelper.extendMkDerivation' genericBuild (finalAttrs:
|
||||
/*
|
||||
recipe: Optional MELPA recipe.
|
||||
Default: a minimally functional recipe
|
||||
This can be a path of a recipe file, a string of the recipe content or an empty string.
|
||||
The default value is used if it is an empty string.
|
||||
*/
|
||||
, recipe ? (writeText "${finalAttrs.pname}-recipe" ''
|
||||
(${finalAttrs.ename} :fetcher git :url ""
|
||||
${lib.optionalString (finalAttrs.files != null) ":files ${finalAttrs.files}"})
|
||||
'')
|
||||
, recipe ? ""
|
||||
, preUnpack ? ""
|
||||
, postUnpack ? ""
|
||||
, meta ? {}
|
||||
@ -98,9 +97,21 @@ libBuildHelper.extendMkDerivation' genericBuild (finalAttrs:
|
||||
|
||||
preUnpack = ''
|
||||
mkdir -p "$NIX_BUILD_TOP/recipes"
|
||||
if [ -n "$recipe" ]; then
|
||||
cp "$recipe" "$NIX_BUILD_TOP/recipes/$ename"
|
||||
recipeFile="$NIX_BUILD_TOP/recipes/$ename"
|
||||
if [ -r "$recipe" ]; then
|
||||
ln -s "$recipe" "$recipeFile"
|
||||
nixInfoLog "link recipe"
|
||||
elif [ -n "$recipe" ]; then
|
||||
printf "%s" "$recipe" > "$recipeFile"
|
||||
nixInfoLog "write recipe"
|
||||
else
|
||||
cat > "$recipeFile" <<'EOF'
|
||||
(${finalAttrs.ename} :fetcher git :url "" ${lib.optionalString (finalAttrs.files != null) ":files ${finalAttrs.files}"})
|
||||
EOF
|
||||
nixInfoLog "use default recipe"
|
||||
fi
|
||||
nixInfoLog "recipe content:" "$(< $recipeFile)"
|
||||
unset -v recipeFile
|
||||
|
||||
ln -s "$packageBuild" "$NIX_BUILD_TOP/package-build"
|
||||
|
||||
@ -115,6 +126,11 @@ libBuildHelper.extendMkDerivation' genericBuild (finalAttrs:
|
||||
buildPhase = args.buildPhase or ''
|
||||
runHook preBuild
|
||||
|
||||
# This is modified from stdenv buildPhase. foundMakefile is used in stdenv checkPhase.
|
||||
if [[ ! ( -z "''${makeFlags-}" && -z "''${makefile:-}" && ! ( -e Makefile || -e makefile || -e GNUmakefile ) ) ]]; then
|
||||
foundMakefile=1
|
||||
fi
|
||||
|
||||
pushd "$NIX_BUILD_TOP"
|
||||
|
||||
emacs --batch -Q \
|
||||
|
@ -14,6 +14,11 @@ args:
|
||||
buildPhase = args.buildPhase or ''
|
||||
runHook preBuild
|
||||
|
||||
# This is modified from stdenv buildPhase. foundMakefile is used in stdenv checkPhase.
|
||||
if [[ ! ( -z "''${makeFlags-}" && -z "''${makefile:-}" && ! ( -e Makefile || -e makefile || -e GNUmakefile ) ) ]]; then
|
||||
foundMakefile=1
|
||||
fi
|
||||
|
||||
emacs -L . --batch -f batch-byte-compile *.el
|
||||
|
||||
runHook postBuild
|
||||
|
@ -1,6 +1,6 @@
|
||||
{ lib, fetchFromGitHub }:
|
||||
rec {
|
||||
version = "9.1.0689";
|
||||
version = "9.1.0707";
|
||||
|
||||
outputs = [ "out" "xxd" ];
|
||||
|
||||
@ -8,7 +8,7 @@ rec {
|
||||
owner = "vim";
|
||||
repo = "vim";
|
||||
rev = "v${version}";
|
||||
hash = "sha256-87y/STnGB2Yf64TMwCd6VCFF2kvy+DmNyaXVKPIc86E=";
|
||||
hash = "sha256-iHOLABPk5B7Sh7EBYnM7wdxnK2Wv7q4WS3FEp780SV4=";
|
||||
};
|
||||
|
||||
enableParallelBuilding = true;
|
||||
|
@ -8,8 +8,8 @@ vscode-utils.buildVscodeMarketplaceExtension {
|
||||
mktplcRef = {
|
||||
name = "vscode-pylance";
|
||||
publisher = "MS-python";
|
||||
version = "2024.8.1";
|
||||
hash = "sha256-UfbhvEWZVAQ/3xg57JpNqURTx/+g6zsWZ0WOzRHtrOU=";
|
||||
version = "2024.8.2";
|
||||
hash = "sha256-EwxQjCBSmJ78L06EtKB8twIz5x51Jo/DHNlpD31pIKA=";
|
||||
};
|
||||
|
||||
buildInputs = [ pyright ];
|
||||
|
@ -19,12 +19,12 @@
|
||||
|
||||
stdenv.mkDerivation rec {
|
||||
pname = "visualboyadvance-m";
|
||||
version = "2.1.9";
|
||||
version = "2.1.10";
|
||||
src = fetchFromGitHub {
|
||||
owner = "visualboyadvance-m";
|
||||
repo = "visualboyadvance-m";
|
||||
rev = "v${version}";
|
||||
sha256 = "sha256-t5/CM5KXDG0OCByu7mUyuC5NkYmB3BFmEHHgnMY05nE=";
|
||||
sha256 = "sha256-ca+BKedHuOwHOCXgjLkkpR6Pd+59X2R66dbPWEg2O5A=";
|
||||
};
|
||||
|
||||
nativeBuildInputs = [ cmake pkg-config wrapGAppsHook3 ];
|
||||
|
@ -6,14 +6,14 @@
|
||||
, gitUpdater
|
||||
}: python3.pkgs.buildPythonApplication rec {
|
||||
pname = "KlipperScreen";
|
||||
version = "0.3.2";
|
||||
version = "0.4.3";
|
||||
format = "other";
|
||||
|
||||
src = fetchFromGitHub {
|
||||
owner = "jordanruthe";
|
||||
repo = "KlipperScreen";
|
||||
rev = "v${version}";
|
||||
hash = "sha256-LweO5EVWr3OxziHrjtQDdWyUBCVUJ17afkw7RCZWgcg=";
|
||||
hash = "sha256-6hMefH9VcNnWhKdQRs7cPVL86LEDRSLaxygmi15HdLI=";
|
||||
};
|
||||
|
||||
nativeBuildInputs = [
|
||||
@ -49,7 +49,7 @@
|
||||
description = "Touchscreen GUI for the Klipper 3D printer firmware";
|
||||
homepage = "https://github.com/jordanruthe/KlipperScreen";
|
||||
license = licenses.agpl3Only;
|
||||
maintainers = with maintainers; [ cab404 ];
|
||||
maintainers = with maintainers; [ cab404 saturn745 ];
|
||||
mainProgram = "KlipperScreen";
|
||||
};
|
||||
}
|
||||
|
@ -24,8 +24,8 @@ stdenv.mkDerivation rec {
|
||||
|
||||
nativeBuildInputs = [ pkg-config qmake wrapQtAppsHook ];
|
||||
qmakeFlags = [ "pgmodeler.pro" "CONFIG+=release" ] ++ lib.optionals stdenv.isDarwin [
|
||||
"PGSQL_INC=${postgresql}/include"
|
||||
"PGSQL_LIB=${postgresql.lib}/lib/libpq.dylib"
|
||||
"PGSQL_INC=${lib.getDev postgresql}/include"
|
||||
"PGSQL_LIB=${lib.getLib postgresql}/lib/libpq.dylib"
|
||||
"XML_INC=${libxml2.dev}/include/libxml2"
|
||||
"XML_LIB=${libxml2.out}/lib/libxml2.dylib"
|
||||
"PREFIX=${placeholder "out"}/Applications/pgModeler.app/Contents"
|
||||
|
@ -18,14 +18,14 @@
|
||||
|
||||
python3Packages.buildPythonApplication rec {
|
||||
pname = "polychromatic";
|
||||
version = "0.9.1";
|
||||
version = "0.9.2";
|
||||
format = "other";
|
||||
|
||||
src = fetchFromGitHub {
|
||||
owner = "polychromatic";
|
||||
repo = "polychromatic";
|
||||
rev = "v${version}";
|
||||
hash = "sha256-3Pt1Z8G0xDWlFD7LxJILPUifMBTN4OvPNHZv80umO1s=";
|
||||
rev = "refs/tags/v${version}";
|
||||
hash = "sha256-eSfyoEu4qQv+R17wgTfATOE1uHkksNxo17btR6swuZo=";
|
||||
};
|
||||
|
||||
postPatch = ''
|
||||
|
@ -15,13 +15,13 @@ let
|
||||
in
|
||||
stdenv.mkDerivation rec {
|
||||
pname = "xmrig";
|
||||
version = "6.21.3";
|
||||
version = "6.22.0";
|
||||
|
||||
src = fetchFromGitHub {
|
||||
owner = "xmrig";
|
||||
repo = "xmrig";
|
||||
rev = "v${version}";
|
||||
hash = "sha256-1lIrxJ1Y5YRoXbZn77Msah5lSVW71gDczYUlXQjf01s=";
|
||||
hash = "sha256-kFjUAOs92xExCV/ph81TFvgRXC6ZRi1m0G51c4JmeMA=";
|
||||
};
|
||||
|
||||
patches = [
|
||||
|
@ -13,13 +13,13 @@ let
|
||||
in
|
||||
stdenv.mkDerivation rec {
|
||||
pname = "xmrig-proxy";
|
||||
version = "6.21.1";
|
||||
version = "6.22.0";
|
||||
|
||||
src = fetchFromGitHub {
|
||||
owner = "xmrig";
|
||||
repo = "xmrig-proxy";
|
||||
rev = "v${version}";
|
||||
hash = "sha256-70SYdO3uyPINanAoARd2lDwyiuc2f/gg4QuoDgoXjjs=";
|
||||
hash = "sha256-qRn/FiYvogGFUIUj3CojtfO6fXRZghH+bgRP+ysI6mc=";
|
||||
};
|
||||
|
||||
postPatch = ''
|
||||
|
@ -58,13 +58,13 @@ let
|
||||
in
|
||||
stdenv.mkDerivation (finalAttrs: {
|
||||
pname = "ladybird";
|
||||
version = "0-unstable-2024-08-12";
|
||||
version = "0-unstable-2024-09-08";
|
||||
|
||||
src = fetchFromGitHub {
|
||||
owner = "LadybirdWebBrowser";
|
||||
repo = "ladybird";
|
||||
rev = "7e57cc7b090455e93261c847064f12a61d686ff3";
|
||||
hash = "sha256-8rkgxEfRH8ERuC7iplQKOzKb1EJ4+SNGDX5gTGpOmQo=";
|
||||
rev = "8d6f36f8d6c0aea0253df8c84746f8c99bf79b4d";
|
||||
hash = "sha256-EB26SAh9eckpq/HrO8O+PivMMmLpFtCdCNkOJcLQvZw=";
|
||||
};
|
||||
|
||||
postPatch = ''
|
||||
|
@ -24,7 +24,7 @@ let
|
||||
vivaldiName = if isSnapshot then "vivaldi-snapshot" else "vivaldi";
|
||||
in stdenv.mkDerivation rec {
|
||||
pname = "vivaldi";
|
||||
version = "6.9.3447.37";
|
||||
version = "6.9.3447.41";
|
||||
|
||||
suffix = {
|
||||
aarch64-linux = "arm64";
|
||||
@ -34,8 +34,8 @@ in stdenv.mkDerivation rec {
|
||||
src = fetchurl {
|
||||
url = "https://downloads.vivaldi.com/${branch}/vivaldi-${branch}_${version}-1_${suffix}.deb";
|
||||
hash = {
|
||||
aarch64-linux = "sha256-kYTnWad/jrJt9z+AhjXzHYxVSIwIIO3RKD7szuPEg2s=";
|
||||
x86_64-linux = "sha256-+h7SHci8gZ+epKFHD0PiXyME2xT+loD2KXpJGFCfIFg=";
|
||||
aarch64-linux = "sha256-Up2n7G3vatsQC9JKF1A1jAIBbdWm9UhL/75AXuxDCsg=";
|
||||
x86_64-linux = "sha256-Hcd8W8bDlRUT/zPYP+aiJnUmepS38KuK0wRFYB3uW1Y=";
|
||||
}.${stdenv.hostPlatform.system} or (throw "Unsupported system: ${stdenv.hostPlatform.system}");
|
||||
};
|
||||
|
||||
|
@ -6,13 +6,13 @@
|
||||
|
||||
buildGoModule rec {
|
||||
pname = "arkade";
|
||||
version = "0.11.22";
|
||||
version = "0.11.24";
|
||||
|
||||
src = fetchFromGitHub {
|
||||
owner = "alexellis";
|
||||
repo = "arkade";
|
||||
rev = version;
|
||||
hash = "sha256-Qc8cQLLRcCNYouWfs8NzF9nrKIPrM1+1VA0wbP2iupQ=";
|
||||
hash = "sha256-9g3SGfJLzn+WIkBGcCwgOaJSuSUSFSU8d/9NZlN0h8E=";
|
||||
};
|
||||
|
||||
CGO_ENABLED = 0;
|
||||
|
@ -2,16 +2,16 @@
|
||||
|
||||
buildGoModule rec {
|
||||
pname = "kubecm";
|
||||
version = "0.30.0";
|
||||
version = "0.31.0";
|
||||
|
||||
src = fetchFromGitHub {
|
||||
owner = "sunny0826";
|
||||
repo = "kubecm";
|
||||
rev = "v${version}";
|
||||
hash = "sha256-zyXxMp+59buSwm4fZY62b2xqAmq7XAzhET0qez8oWPs=";
|
||||
hash = "sha256-Go2lroa8lq1XjIzvdA5ZL/lOjTAyDKopBepqgWzsUII=";
|
||||
};
|
||||
|
||||
vendorHash = "sha256-6RrnsRbQ1+Cx7vnqauisBICgwmvTpJQT32DnIDVc6ts=";
|
||||
vendorHash = "sha256-BrSYfxftrnNOcPgG/rsTF3OukDd+VlOvE7OJcos7vW4=";
|
||||
ldflags = [ "-s" "-w" "-X github.com/sunny0826/kubecm/version.Version=${version}"];
|
||||
|
||||
doCheck = false;
|
||||
|
@ -15,17 +15,17 @@
|
||||
|
||||
buildGoModule rec {
|
||||
inherit pname;
|
||||
version = "2.8.2";
|
||||
version = "2.8.3";
|
||||
tags = lib.optionals enableGateway [ "gateway" ];
|
||||
|
||||
src = fetchFromGitHub {
|
||||
owner = "kumahq";
|
||||
repo = "kuma";
|
||||
rev = version;
|
||||
hash = "sha256-znjOMegh0lgFDonUXtRfs+1ZMN5Olzz01E2tX+tRcns=";
|
||||
hash = "sha256-wGEO7DJLWy/d6SYsTb8EZhF9c1ptYBXDL/Owter4nfo=";
|
||||
};
|
||||
|
||||
vendorHash = "sha256-FEdDOpz6C89OlzU3Pl4Uu6P0WgM4QsuccQ9vAHnb4xI=";
|
||||
vendorHash = "sha256-PAW2Byzz6Ky4I51QrJoNoyn1QH/i0SeU2dDHvj2BqXM=";
|
||||
|
||||
# no test files
|
||||
doCheck = false;
|
||||
|
@ -2,7 +2,7 @@
|
||||
|
||||
(callPackage ./generic.nix { }) {
|
||||
channel = "edge";
|
||||
version = "24.8.2";
|
||||
sha256 = "0jvyw002xy5zdb27q02r3bj88138zpc73an61sbgmls3jwp9w9iq";
|
||||
vendorHash = "sha256-16tdpREYDJDvwIZLpwCxGsZGERxMdSyPH7c6wbD2GCI=";
|
||||
version = "24.8.3";
|
||||
sha256 = "05ynk7p86pa81nyfj9vkfmvgss0nfz3zszrlm967cakhanc5083g";
|
||||
vendorHash = "sha256-Edn5w264IU3ez47jb2wqX5zXeKiLtewWs05LXYr5q50=";
|
||||
}
|
||||
|
@ -15,9 +15,9 @@
|
||||
|
||||
buildGoModule rec {
|
||||
pname = "minikube";
|
||||
version = "1.33.1";
|
||||
version = "1.34.0";
|
||||
|
||||
vendorHash = "sha256-VHl6CKPWqahX70GHbZE6SVa8XPfiC912DvsOteH2B0w=";
|
||||
vendorHash = "sha256-gw5Ol7Gp26KyIaiMvwik8FJpABpMT86vpFnZnAJ6hhs=";
|
||||
|
||||
doCheck = false;
|
||||
|
||||
@ -25,7 +25,7 @@ buildGoModule rec {
|
||||
owner = "kubernetes";
|
||||
repo = "minikube";
|
||||
rev = "v${version}";
|
||||
sha256 = "sha256-z0wNngEzddxpeeLyQVA2yRC5SfYvU5G66V95sVmW6bA=";
|
||||
sha256 = "sha256-Z7x3MOQUF3a19X4SSiIUfSJ3xl3482eKH700m/9pqcU=";
|
||||
};
|
||||
postPatch =
|
||||
(
|
||||
|
@ -1,6 +1,7 @@
|
||||
{ lib
|
||||
, stdenv
|
||||
, buildGoModule
|
||||
, buildGo123Module
|
||||
, fetchFromGitHub
|
||||
, fetchFromGitLab
|
||||
, callPackage
|
||||
@ -24,7 +25,7 @@ let
|
||||
, deleteVendor ? false
|
||||
, proxyVendor ? false
|
||||
, mkProviderFetcher ? fetchFromGitHub
|
||||
, mkProviderGoModule ? buildGoModule
|
||||
, mkProviderGoModule ? buildGo123Module
|
||||
# "https://registry.terraform.io/providers/vancluever/acme"
|
||||
, homepage ? ""
|
||||
# "registry.terraform.io/vancluever/acme"
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -2,13 +2,13 @@
|
||||
|
||||
buildGoModule rec {
|
||||
pname = "deck";
|
||||
version = "1.39.5";
|
||||
version = "1.39.6";
|
||||
|
||||
src = fetchFromGitHub {
|
||||
owner = "Kong";
|
||||
repo = "deck";
|
||||
rev = "v${version}";
|
||||
hash = "sha256-kG7eT9g4akiQV2dpZuIi3uabW2lnCm2SF2uT/wFIUiA=";
|
||||
hash = "sha256-IiwS+NsjXW4kVAaJnsI8HEAl2pPRQr3K2ZpC7n/VjU4=";
|
||||
};
|
||||
|
||||
nativeBuildInputs = [ installShellFiles ];
|
||||
@ -21,7 +21,7 @@ buildGoModule rec {
|
||||
];
|
||||
|
||||
proxyVendor = true; # darwin/linux hash mismatch
|
||||
vendorHash = "sha256-3iUnNSelViAgmwsA9XZg50+JGbizamiM1Y64rZ7KeFo=";
|
||||
vendorHash = "sha256-wpTXuyeUIPg6WPzVyOIFadodlKHzr5DeDeHhDRKsYbY=";
|
||||
|
||||
postInstall = ''
|
||||
installShellCompletion --cmd deck \
|
||||
|
@ -63,14 +63,14 @@ let
|
||||
in
|
||||
stdenv.mkDerivation (finalAttrs: {
|
||||
pname = "telegram-desktop";
|
||||
version = "5.5.1";
|
||||
version = "5.5.2";
|
||||
|
||||
src = fetchFromGitHub {
|
||||
owner = "telegramdesktop";
|
||||
repo = "tdesktop";
|
||||
rev = "v${finalAttrs.version}";
|
||||
fetchSubmodules = true;
|
||||
hash = "sha256-PTa79SbSsOyWlZJ0ad4w6YIiChLzqaSZGRmzyq5qVK0=";
|
||||
hash = "sha256-5edk3+RusCq93A6mkzCoegxr6J2fRc9rtGk6IpGHBAY=";
|
||||
};
|
||||
|
||||
patches = [
|
||||
|
@ -36,14 +36,14 @@ let
|
||||
in
|
||||
assert lib.all (p: p.enabled -> ! (builtins.elem null p.buildInputs)) plugins;
|
||||
stdenv.mkDerivation rec {
|
||||
version = "4.4.1";
|
||||
version = "4.4.2";
|
||||
pname = "weechat";
|
||||
|
||||
hardeningEnable = [ "pie" ];
|
||||
|
||||
src = fetchurl {
|
||||
url = "https://weechat.org/files/src/weechat-${version}.tar.xz";
|
||||
hash = "sha256-5d4L0UwqV6UFgTqDw9NyZI0tlXPccoNoV78ocXMmk2w=";
|
||||
hash = "sha256-1N8ompxbygOm1PrgBuUgNwZO8Dutb76VnFOPMZdDTew=";
|
||||
};
|
||||
|
||||
# Why is this needed? https://github.com/weechat/weechat/issues/2031
|
||||
|
@ -2,13 +2,13 @@
|
||||
|
||||
stdenv.mkDerivation rec {
|
||||
pname = "notmuch-bower";
|
||||
version = "1.0";
|
||||
version = "1.1";
|
||||
|
||||
src = fetchFromGitHub {
|
||||
owner = "wangp";
|
||||
repo = "bower";
|
||||
rev = version;
|
||||
sha256 = "sha256-BNuJEVuzreI2AK/fqVMRHq8ZhPQjO33Y2FzkrWlfmm0=";
|
||||
sha256 = "sha256-CqA9JU/ujqIn/NvtbPtSWxKDYCv4oDdLCgbf2jj9Av4=";
|
||||
};
|
||||
|
||||
nativeBuildInputs = [ mercury pandoc ];
|
||||
|
@ -74,7 +74,7 @@ let
|
||||
];
|
||||
gemConfig = defaultGemConfig // {
|
||||
pg = attrs: {
|
||||
buildFlags = [ "--with-pg-config=${postgresql}/bin/pg_config" ];
|
||||
buildFlags = [ "--with-pg-config=${lib.getDev postgresql}/bin/pg_config" ];
|
||||
};
|
||||
rszr = attrs: {
|
||||
buildInputs = [ imlib2 imlib2.dev ];
|
||||
|
@ -33,14 +33,14 @@ let
|
||||
}.${system} or throwSystem;
|
||||
|
||||
hash = {
|
||||
x86_64-linux = "sha256-DoN6I1lk4WpOZ+jC+od7jum3lxBHFppea5QFTuqY5nk=";
|
||||
x86_64-linux = "sha256-25FFXrUE1NvIXlOFR9KZyjD3w8xuvPlpqz/KkUTt1TQ=";
|
||||
}.${system} or throwSystem;
|
||||
|
||||
displayname = "XPipe";
|
||||
|
||||
in stdenvNoCC.mkDerivation rec {
|
||||
pname = "xpipe";
|
||||
version = "11.1";
|
||||
version = "11.2";
|
||||
|
||||
src = fetchzip {
|
||||
url = "https://github.com/xpipe-io/xpipe/releases/download/${version}/xpipe-portable-linux-${arch}.tar.gz";
|
||||
|
@ -2,11 +2,11 @@
|
||||
|
||||
let
|
||||
pname = "jbrowse";
|
||||
version = "2.14.0";
|
||||
version = "2.15.0";
|
||||
|
||||
src = fetchurl {
|
||||
url = "https://github.com/GMOD/jbrowse-components/releases/download/v${version}/jbrowse-desktop-v${version}-linux.AppImage";
|
||||
sha256 = "sha256-fxXOUB+glJmg4WdL+mNfkp0O4iUsl8L1EuIYpBO1gRA=";
|
||||
sha256 = "sha256-WA0R1V83xlUFSDR4B95BX7VpzLUVF4U7f+t+x+lt30o=";
|
||||
};
|
||||
|
||||
appimageContents = appimageTools.extractType2 {
|
||||
|
@ -58,13 +58,13 @@ let
|
||||
in
|
||||
stdenv.mkDerivation rec {
|
||||
pname = "cp2k";
|
||||
version = "2024.2";
|
||||
version = "2024.3";
|
||||
|
||||
src = fetchFromGitHub {
|
||||
owner = "cp2k";
|
||||
repo = "cp2k";
|
||||
rev = "v${version}";
|
||||
hash = "sha256-KXxqzapdPZggFlxX1rkNcxEYb2+aQIPFclFspxII7aE=";
|
||||
hash = "sha256-TeVQ0wVUx6d4knwMi9z3LjQZ4ELE6s1TnvwfFz8jbYk=";
|
||||
fetchSubmodules = true;
|
||||
};
|
||||
|
||||
|
@ -57,24 +57,24 @@
|
||||
|
||||
let
|
||||
pname = "gitkraken";
|
||||
version = "10.2.0";
|
||||
version = "10.3.0";
|
||||
|
||||
throwSystem = throw "Unsupported system: ${stdenv.hostPlatform.system}";
|
||||
|
||||
srcs = {
|
||||
x86_64-linux = fetchzip {
|
||||
url = "https://release.axocdn.com/linux/GitKraken-v${version}.tar.gz";
|
||||
hash = "sha256-NY7IgUt6Q27Pz1K46xL4LYFBwTJdT+fvsFDM2OhoPWg=";
|
||||
hash = "sha256-5WICLLuv+NL++fgm7p/ScyEvzwVqcXNI6eXGr4e9k20=";
|
||||
};
|
||||
|
||||
x86_64-darwin = fetchzip {
|
||||
url = "https://release.axocdn.com/darwin/GitKraken-v${version}.zip";
|
||||
hash = "sha256-e7zIMJNMdFy7/8zsV3nH1OT76xNznoSPRUVhoLfR6QI=";
|
||||
hash = "sha256-zkQQR90pdYGIZf3OmAdKc1SlotgdSRGJaYsA1n74aZk=";
|
||||
};
|
||||
|
||||
aarch64-darwin = fetchzip {
|
||||
url = "https://release.axocdn.com/darwin-arm64/GitKraken-v${version}.zip";
|
||||
hash = "sha256-YjGH9tKbJaYc5qENBCCpJGWWFJIQTD8O2H3onhMwGrw=";
|
||||
hash = "sha256-WYBXupyunpAaoHmA4dHfd/oruP20rYsIq5mO4/dDsoM=";
|
||||
};
|
||||
};
|
||||
|
||||
|
@ -1,34 +0,0 @@
|
||||
{ lib, rustPlatform, fetchFromGitHub, stdenv, darwin, git }:
|
||||
|
||||
rustPlatform.buildRustPackage rec {
|
||||
pname = "srvc";
|
||||
version = "0.20.0";
|
||||
|
||||
src = fetchFromGitHub {
|
||||
owner = "insilica";
|
||||
repo = "rs-srvc";
|
||||
rev = "v${version}";
|
||||
hash = "sha256-pnlbMU/uoP9ZK8kzTRYTMY9+X9VIKJHwW2qMXXD8Udg=";
|
||||
};
|
||||
|
||||
cargoHash = "sha256-+m8WJMn1aq3FBDO5c/ZwbcK2G+UE5pSwHTgOl2s6pDw=";
|
||||
|
||||
buildInputs = lib.optionals stdenv.isDarwin [
|
||||
darwin.apple_sdk.frameworks.CoreServices
|
||||
darwin.apple_sdk.frameworks.Security
|
||||
];
|
||||
|
||||
nativeCheckInputs = [ git ];
|
||||
|
||||
# remove timeouts in tests to make them less flaky
|
||||
TEST_SRVC_DISABLE_TIMEOUT = 1;
|
||||
|
||||
meta = with lib; {
|
||||
description = "Sysrev version control";
|
||||
homepage = "https://github.com/insilica/rs-srvc";
|
||||
changelog = "https://github.com/insilica/rs-srvc/blob/v${version}/CHANGELOG.md";
|
||||
license = licenses.asl20;
|
||||
maintainers = with maintainers; [ john-shaffer ];
|
||||
mainProgram = "sr";
|
||||
};
|
||||
}
|
@ -1,7 +1,7 @@
|
||||
{ stdenv, lib, fetchFromGitHub
|
||||
, cmake, wrapQtAppsHook, perl
|
||||
, flatbuffers, protobuf, mbedtls
|
||||
, hidapi, libcec, libusb1
|
||||
, alsa-lib, hidapi, libcec, libusb1
|
||||
, libX11, libxcb, libXrandr, python3
|
||||
, qtbase, qtserialport, qtsvg, qtx11extras
|
||||
, withRPiDispmanx ? false, libraspberrypi
|
||||
@ -9,13 +9,13 @@
|
||||
|
||||
stdenv.mkDerivation rec {
|
||||
pname = "hyperion.ng";
|
||||
version = "2.0.14";
|
||||
version = "2.0.16";
|
||||
|
||||
src = fetchFromGitHub {
|
||||
owner = "hyperion-project";
|
||||
repo = pname;
|
||||
rev = version;
|
||||
hash = "sha256-Y1PZ+YyPMZEX4fBpMG6IVT1gtXR9ZHlavJMCQ4KAenc=";
|
||||
hash = "sha256-nQPtJw9DOKMPGI5trxZxpP+z2PYsbRKqOQEyaGzvmmA=";
|
||||
# needed for `dependencies/external/`:
|
||||
# * rpi_ws281x` - not possible to use as a "system" lib
|
||||
# * qmdnsengine - not in nixpkgs yet
|
||||
@ -23,6 +23,7 @@ stdenv.mkDerivation rec {
|
||||
};
|
||||
|
||||
buildInputs = [
|
||||
alsa-lib
|
||||
hidapi
|
||||
libusb1
|
||||
libX11
|
||||
|
@ -37,13 +37,13 @@ let
|
||||
in
|
||||
buildPerlModule rec {
|
||||
pname = "pipe-viewer";
|
||||
version = "0.5.2";
|
||||
version = "0.5.3";
|
||||
|
||||
src = fetchFromGitHub {
|
||||
owner = "trizen";
|
||||
repo = "pipe-viewer";
|
||||
rev = version;
|
||||
hash = "sha256-TCcAQjz0B3eWILMAoqHCnMLvu8zD0W5wOFg+UaMPmXg=";
|
||||
hash = "sha256-crYdbHIDcecfq1FKoRWX3u9x9wqdlaYrBgr7mGdEHeU=";
|
||||
};
|
||||
|
||||
nativeBuildInputs = [ makeWrapper ]
|
||||
|
@ -15,13 +15,13 @@
|
||||
|
||||
python3Packages.buildPythonApplication rec {
|
||||
pname = "tartube";
|
||||
version = "2.5.0";
|
||||
version = "2.5.040";
|
||||
|
||||
src = fetchFromGitHub {
|
||||
owner = "axcore";
|
||||
repo = "tartube";
|
||||
rev = "refs/tags/v${version}";
|
||||
sha256 = "sha256-IcJDh8Q9K6SROZWVi98R1N2kSdgwJczScLdJFKy2FIU=";
|
||||
sha256 = "sha256-yFsQbEXjWPxLYqFxsI6MjK1hE8Lk2Z0sPj3peLBs7r8=";
|
||||
};
|
||||
|
||||
nativeBuildInputs = [
|
||||
|
@ -142,7 +142,7 @@ stdenv.mkDerivation rec {
|
||||
''}
|
||||
'';
|
||||
|
||||
patchPhase = lib.optionalString enableMacOSGuests ''
|
||||
postPatch = lib.optionalString enableMacOSGuests ''
|
||||
cp -R "${unlockerSrc}" unlocker/
|
||||
|
||||
substituteInPlace unlocker/unlocker.py --replace \
|
||||
@ -153,6 +153,8 @@ stdenv.mkDerivation rec {
|
||||
'';
|
||||
|
||||
installPhase = ''
|
||||
runHook preInstall
|
||||
|
||||
mkdir -p \
|
||||
$out/bin \
|
||||
$out/etc/vmware \
|
||||
@ -324,7 +326,7 @@ stdenv.mkDerivation rec {
|
||||
sed -i -e "s,/sbin/modprobe,${kmod}/bin/modprobe," $out/bin/vmplayer
|
||||
sed -i -e "s,@@BINARY@@,$out/bin/vmplayer," $out/share/applications/vmware-player.desktop
|
||||
|
||||
## VMware OVF Tool compoment
|
||||
## VMware OVF Tool component
|
||||
echo "Installing VMware OVF Tool for Linux"
|
||||
unpacked="unpacked/vmware-ovftool"
|
||||
mkdir -p $out/lib/vmware-ovftool/
|
||||
@ -390,7 +392,7 @@ stdenv.mkDerivation rec {
|
||||
|
||||
chmod +x $out/bin/* $out/lib/vmware/bin/* $out/lib/vmware/setup/*
|
||||
|
||||
# Harcoded pkexec hack
|
||||
# Hardcoded pkexec hack
|
||||
for lib in "lib/vmware/lib/libvmware-mount.so/libvmware-mount.so" "lib/vmware/lib/libvmwareui.so/libvmwareui.so" "lib/vmware/lib/libvmware-fuseUI.so/libvmware-fuseUI.so"
|
||||
do
|
||||
sed -i -e "s,/usr/local/sbin,/run/vmware/bin," "$out/$lib"
|
||||
@ -405,6 +407,8 @@ stdenv.mkDerivation rec {
|
||||
wrapProgram $out/lib/vmware/bin/vmware-vmx
|
||||
rm $out/lib/vmware/bin/vmware-vmx
|
||||
ln -s /run/wrappers/bin/vmware-vmx $out/lib/vmware/bin/vmware-vmx
|
||||
|
||||
runHook postInstall
|
||||
'';
|
||||
|
||||
meta = with lib; {
|
||||
|
@ -9,13 +9,13 @@
|
||||
|
||||
stdenv.mkDerivation (finalAttrs: {
|
||||
pname = "dk";
|
||||
version = "2.2";
|
||||
version = "2.3";
|
||||
|
||||
src = fetchFromBitbucket {
|
||||
owner = "natemaia";
|
||||
repo = "dk";
|
||||
rev = "v${finalAttrs.version}";
|
||||
hash = "sha256-u1fZTcfGLwKFeRADU55MFYDvtSOaOg5qtWB90xYpVuY=";
|
||||
hash = "sha256-VkNF3F/NRQadBkbnbVmMZliIXRxFU0qqxOeQDX4UrJg=";
|
||||
};
|
||||
|
||||
buildInputs = [
|
||||
|
@ -688,7 +688,7 @@ stdenvNoCC.mkDerivation {
|
||||
''
|
||||
|
||||
+ optionalString targetPlatform.isAndroid ''
|
||||
echo "-D__ANDROID_API__=${targetPlatform.sdkVer}" >> $out/nix-support/cc-cflags
|
||||
echo "-D__ANDROID_API__=${targetPlatform.androidSdkVersion}" >> $out/nix-support/cc-cflags
|
||||
''
|
||||
|
||||
# There are a few tools (to name one libstdcxx5) which do not work
|
||||
|
@ -67,7 +67,7 @@ stdenvNoCC.mkDerivation {
|
||||
builder = ./builder.sh;
|
||||
fetcher = ./nix-prefetch-git;
|
||||
|
||||
nativeBuildInputs = [ git ]
|
||||
nativeBuildInputs = [ git cacert ]
|
||||
++ lib.optionals fetchLFS [ git-lfs ];
|
||||
|
||||
outputHashAlgo = if hash != "" then null else "sha256";
|
||||
@ -94,8 +94,6 @@ stdenvNoCC.mkDerivation {
|
||||
export HOME=$PWD
|
||||
'';
|
||||
|
||||
GIT_SSL_CAINFO = "${cacert}/etc/ssl/certs/ca-bundle.crt";
|
||||
|
||||
impureEnvVars = lib.fetchers.proxyImpureEnvVars ++ netrcImpureEnvVars ++ [
|
||||
"GIT_PROXY_COMMAND" "NIX_GIT_SSL_CAINFO" "SOCKS_SERVER"
|
||||
];
|
||||
|
@ -17,9 +17,9 @@ branchName=$NIX_PREFETCH_GIT_BRANCH_NAME
|
||||
out=${out:-}
|
||||
http_proxy=${http_proxy:-}
|
||||
|
||||
# allow overwriting cacert's ca-bundle.crt with a custom one
|
||||
# this can be done by setting NIX_GIT_SSL_CAINFO and NIX_SSL_CERT_FILE environment variables for the nix-daemon
|
||||
GIT_SSL_CAINFO=${NIX_GIT_SSL_CAINFO:-$GIT_SSL_CAINFO}
|
||||
# NOTE: use of NIX_GIT_SSL_CAINFO is for backwards compatibility; NIX_SSL_CERT_FILE is preferred
|
||||
# as of PR#303307
|
||||
GIT_SSL_CAINFO=${NIX_GIT_SSL_CAINFO:-$NIX_SSL_CERT_FILE}
|
||||
|
||||
# populated by clone_user_rev()
|
||||
fullRev=
|
||||
|
@ -136,6 +136,7 @@ in
|
||||
exit 10
|
||||
fi
|
||||
|
||||
export GIT_SSL_CAINFO=$NIX_SSL_CERT_FILE
|
||||
${if finalAttrs.proxyVendor then ''
|
||||
mkdir -p "''${GOPATH}/pkg/mod/cache/download"
|
||||
go mod download
|
||||
|
@ -1,12 +0,0 @@
|
||||
{ makeSetupHook, tests }:
|
||||
|
||||
# See the header comment in ../setup-hooks/install-shell-files.sh for example usage.
|
||||
let
|
||||
setupHook = makeSetupHook { name = "install-shell-files"; } ../setup-hooks/install-shell-files.sh;
|
||||
in
|
||||
|
||||
setupHook.overrideAttrs (oldAttrs: {
|
||||
passthru = (oldAttrs.passthru or {}) // {
|
||||
tests = tests.install-shell-files;
|
||||
};
|
||||
})
|
@ -37,9 +37,9 @@ in
|
||||
, extension ? _compressorMeta.extension or
|
||||
(throw "Unrecognised compressor ${_compressorName}, please specify filename extension")
|
||||
|
||||
# List of { object = path_or_derivation; symlink = "/path"; }
|
||||
# List of { source = path_or_derivation; target = "/path"; }
|
||||
# The paths are copied into the initramfs in their nix store path
|
||||
# form, then linked at the root according to `symlink`.
|
||||
# form, then linked at the root according to `target`.
|
||||
, contents
|
||||
|
||||
# List of uncompressed cpio files to prepend to the initramfs. This
|
||||
|
@ -141,10 +141,20 @@ const performParallel = tasks => {
|
||||
return Promise.all(workers)
|
||||
}
|
||||
|
||||
// This could be implemented using [`Map.groupBy`](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Map/groupBy),
|
||||
// but that method is only supported starting with Node 21
|
||||
const uniqueBy = (arr, callback) => {
|
||||
const map = new Map()
|
||||
for (const elem of arr) {
|
||||
map.set(callback(elem), elem)
|
||||
}
|
||||
return [...map.values()]
|
||||
}
|
||||
|
||||
const prefetchYarnDeps = async (lockContents, verbose) => {
|
||||
const lockData = lockfile.parse(lockContents)
|
||||
await performParallel(
|
||||
Object.entries(lockData.object)
|
||||
uniqueBy(Object.entries(lockData.object), ([_, value]) => value.resolved)
|
||||
.map(([key, value]) => () => downloadPkg({ key, ...value }, verbose))
|
||||
)
|
||||
await fs.promises.writeFile('yarn.lock', lockContents)
|
||||
|
@ -4,8 +4,11 @@ autoreconfPhase() {
|
||||
runHook preAutoreconf
|
||||
|
||||
local flagsArray=()
|
||||
: "${autoreconfFlags:=--install --force --verbose}"
|
||||
concatTo flagsArray autoreconfFlags
|
||||
if [[ -v autoreconfFlags ]]; then
|
||||
concatTo flagsArray autoreconfFlags
|
||||
else
|
||||
flagsArray+=(--install --force --verbose)
|
||||
fi
|
||||
|
||||
autoreconf "${flagsArray[@]}"
|
||||
runHook postAutoreconf
|
||||
|
@ -90,8 +90,8 @@ patchShebangs() {
|
||||
if [[ $arg0 == "-S" ]]; then
|
||||
arg0=${args%% *}
|
||||
args=${args#* }
|
||||
newPath="$(PATH="${!pathName}" command -v "env" || true)"
|
||||
args="-S $(PATH="${!pathName}" command -v "$arg0" || true) $args"
|
||||
newPath="$(PATH="${!pathName}" type -P "env" || true)"
|
||||
args="-S $(PATH="${!pathName}" type -P "$arg0" || true) $args"
|
||||
|
||||
# Check for unsupported 'env' functionality:
|
||||
# - options: something starting with a '-' besides '-S'
|
||||
@ -100,7 +100,7 @@ patchShebangs() {
|
||||
echo "$f: unsupported interpreter directive \"$oldInterpreterLine\" (set dontPatchShebangs=1 and handle shebang patching yourself)" >&2
|
||||
exit 1
|
||||
else
|
||||
newPath="$(PATH="${!pathName}" command -v "$arg0" || true)"
|
||||
newPath="$(PATH="${!pathName}" type -P "$arg0" || true)"
|
||||
fi
|
||||
else
|
||||
if [[ -z $oldPath ]]; then
|
||||
@ -109,7 +109,7 @@ patchShebangs() {
|
||||
oldPath="/bin/sh"
|
||||
fi
|
||||
|
||||
newPath="$(PATH="${!pathName}" command -v "$(basename "$oldPath")" || true)"
|
||||
newPath="$(PATH="${!pathName}" type -P "$(basename "$oldPath")" || true)"
|
||||
|
||||
args="$arg0 $args"
|
||||
fi
|
||||
|
@ -12,7 +12,7 @@ let
|
||||
self = python3;
|
||||
packageOverrides = _: super: { tree-sitter = super.tree-sitter_0_21; };
|
||||
};
|
||||
version = "0.54.0";
|
||||
version = "0.56.0";
|
||||
in
|
||||
python3.pkgs.buildPythonApplication {
|
||||
pname = "aider-chat";
|
||||
@ -23,10 +23,12 @@ python3.pkgs.buildPythonApplication {
|
||||
owner = "paul-gauthier";
|
||||
repo = "aider";
|
||||
rev = "refs/tags/v${version}";
|
||||
hash = "sha256-ysNhfhFGSDhEQLQLP26Lv6qmZehmwtQTSlAqJVPD5O8=";
|
||||
hash = "sha256-e0Fqj67vYt41Zbr1FN2fuLp6cHRius8RtacBHLgB9dM=";
|
||||
};
|
||||
|
||||
build-system = with python3.pkgs; [ setuptools ];
|
||||
pythonRelaxDeps = true;
|
||||
|
||||
build-system = with python3.pkgs; [ setuptools-scm ];
|
||||
|
||||
dependencies =
|
||||
with python3.pkgs;
|
||||
@ -41,6 +43,7 @@ python3.pkgs.buildPythonApplication {
|
||||
gitpython
|
||||
grep-ast
|
||||
importlib-resources
|
||||
json5
|
||||
jsonschema
|
||||
jiter
|
||||
litellm
|
||||
@ -70,32 +73,29 @@ python3.pkgs.buildPythonApplication {
|
||||
|
||||
buildInputs = [ portaudio ];
|
||||
|
||||
pythonRelaxDeps = true;
|
||||
|
||||
nativeCheckInputs = (with python3.pkgs; [ pytestCheckHook ]) ++ [ gitMinimal ];
|
||||
|
||||
disabledTestPaths = [
|
||||
# requires network
|
||||
# Tests require network access
|
||||
"tests/scrape/test_scrape.py"
|
||||
|
||||
# Expected 'mock' to have been called once
|
||||
"tests/help/test_help.py"
|
||||
];
|
||||
|
||||
disabledTests =
|
||||
[
|
||||
# requires network
|
||||
# Tests require network
|
||||
"test_urls"
|
||||
"test_get_commit_message_with_custom_prompt"
|
||||
|
||||
# FileNotFoundError
|
||||
"test_get_commit_message"
|
||||
|
||||
# Expected 'launch_gui' to have been called once
|
||||
"test_browser_flag_imports_streamlit"
|
||||
# AttributeError
|
||||
"test_simple_send_with_retries"
|
||||
]
|
||||
++ lib.optionals stdenv.hostPlatform.isDarwin [
|
||||
# fails on darwin
|
||||
# Tests fails on darwin
|
||||
"test_dark_mode_sets_code_theme"
|
||||
"test_default_env_file_sets_automatic_variable"
|
||||
];
|
||||
@ -107,8 +107,9 @@ python3.pkgs.buildPythonApplication {
|
||||
meta = {
|
||||
description = "AI pair programming in your terminal";
|
||||
homepage = "https://github.com/paul-gauthier/aider";
|
||||
changelog = "https://github.com/paul-gauthier/aider/blob/v${version}/HISTORY.md";
|
||||
license = lib.licenses.asl20;
|
||||
mainProgram = "aider";
|
||||
maintainers = with lib.maintainers; [ taha-yassine ];
|
||||
mainProgram = "aider";
|
||||
};
|
||||
}
|
||||
|
@ -5,7 +5,7 @@
|
||||
}:
|
||||
buildGoModule rec {
|
||||
pname = "astartectl";
|
||||
version = "23.5.2";
|
||||
version = "24.5.0";
|
||||
|
||||
# Workaround for go vendor failing
|
||||
# https://github.com/astarte-platform/astartectl/pull/244
|
||||
@ -15,7 +15,7 @@ buildGoModule rec {
|
||||
owner = "astarte-platform";
|
||||
repo = "astartectl";
|
||||
rev = "v${version}";
|
||||
hash = "sha256-EIyta/10K6WQ1vzQZryz+c3K2AwMOUUQCw5f4Wkp6Yk=";
|
||||
hash = "sha256-4Iyd+1hLSatWyeV2J7RSqo2jVEc8dSp5JBObsn3RciI=";
|
||||
};
|
||||
|
||||
vendorHash = "sha256-NWPLHbUHrk/oJXCOJF8kKhQiZR8aqZChxuz73Acu1cM=";
|
||||
|
@ -11,13 +11,13 @@
|
||||
}:
|
||||
|
||||
let
|
||||
version = "1.17.2";
|
||||
version = "1.17.3";
|
||||
|
||||
src = fetchFromGitHub {
|
||||
owner = "detachhead";
|
||||
repo = "basedpyright";
|
||||
rev = "refs/tags/v${version}";
|
||||
hash = "sha256-6pksb2drjiZo1Hp6P/G06LAj3nW5WXJbVNGt5897jAA=";
|
||||
hash = "sha256-cnhtge0ueveo70cYDpb/+ss5osHbO1Yyv74NrYBYZOM=";
|
||||
};
|
||||
|
||||
patchedPackageJSON = runCommand "package.json" { } ''
|
||||
|
@ -14,9 +14,8 @@ bmakeBuildPhase() {
|
||||
local flagsArray=(
|
||||
${enableParallelBuilding:+-j${NIX_BUILD_CORES}}
|
||||
SHELL="$SHELL"
|
||||
$makeFlags ${makeFlagsArray+"${makeFlagsArray[@]}"}
|
||||
$buildFlags ${buildFlagsArray+"${buildFlagsArray[@]}"}
|
||||
)
|
||||
concatTo flagsArray makeFlags makeFlagsArray buildFlags buildFlagsArray
|
||||
|
||||
echoCmd 'build flags' "${flagsArray[@]}"
|
||||
bmake ${makefile:+-f $makefile} "${flagsArray[@]}"
|
||||
@ -42,11 +41,8 @@ bmakeCheckPhase() {
|
||||
local flagsArray=(
|
||||
${enableParallelChecking:+-j${NIX_BUILD_CORES}}
|
||||
SHELL="$SHELL"
|
||||
# Old bash empty array hack
|
||||
$makeFlags ${makeFlagsArray+"${makeFlagsArray[@]}"}
|
||||
${checkFlags:-VERBOSE=y} ${checkFlagsArray+"${checkFlagsArray[@]}"}
|
||||
${checkTarget}
|
||||
)
|
||||
concatTo flagsArray makeFlags makeFlagsArray checkFlags=VERBOSE=y checkFlagsArray checkTarget
|
||||
|
||||
echoCmd 'check flags' "${flagsArray[@]}"
|
||||
bmake ${makefile:+-f $makefile} "${flagsArray[@]}"
|
||||
@ -65,11 +61,8 @@ bmakeInstallPhase() {
|
||||
local flagsArray=(
|
||||
${enableParallelInstalling:+-j${NIX_BUILD_CORES}}
|
||||
SHELL="$SHELL"
|
||||
# Old bash empty array hack
|
||||
$makeFlags ${makeFlagsArray+"${makeFlagsArray[@]}"}
|
||||
$installFlags ${installFlagsArray+"${installFlagsArray[@]}"}
|
||||
${installTargets:-install}
|
||||
)
|
||||
concatTo flagsArray makeFlags makeFlagsArray installFlags installFlagsArray installTargets=install
|
||||
|
||||
echoCmd 'install flags' "${flagsArray[@]}"
|
||||
bmake ${makefile:+-f $makefile} "${flagsArray[@]}"
|
||||
@ -84,10 +77,8 @@ bmakeDistPhase() {
|
||||
mkdir -p "$prefix"
|
||||
fi
|
||||
|
||||
# Old bash empty array hack
|
||||
local flagsArray=(
|
||||
$distFlags ${distFlagsArray+"${distFlagsArray[@]}"} ${distTarget:-dist}
|
||||
)
|
||||
local flagsArray=()
|
||||
concatTo flagsArray distFlags distFlagsArray distTarget=dist
|
||||
|
||||
echo 'dist flags: %q' "${flagsArray[@]}"
|
||||
bmake ${makefile:+-f $makefile} "${flagsArray[@]}"
|
||||
|
@ -39,14 +39,14 @@ let
|
||||
in
|
||||
python3Packages.buildPythonApplication rec {
|
||||
pname = "cameractrls";
|
||||
version = "0.6.6";
|
||||
version = "0.6.7";
|
||||
pyproject = false;
|
||||
|
||||
src = fetchFromGitHub {
|
||||
owner = "soyersoyer";
|
||||
repo = "cameractrls";
|
||||
rev = "v${version}";
|
||||
hash = "sha256-QjjLd5L+8Slxc3ywurhsWp1pZ2E1Y7NOdnCV2ZYBlqU=";
|
||||
hash = "sha256-MM+Py8tHtqJWIGmGg3RaIhJa0E1zM3hXOnTOqXPuejw=";
|
||||
};
|
||||
|
||||
postPatch = ''
|
||||
@ -125,7 +125,7 @@ python3Packages.buildPythonApplication rec {
|
||||
presets at device connection).
|
||||
'';
|
||||
homepage = "https://github.com/soyersoyer/cameractrls";
|
||||
license = lib.licenses.gpl3Plus;
|
||||
license = lib.licenses.lgpl3Plus;
|
||||
mainProgram = mainExecutable;
|
||||
maintainers = with lib.maintainers; [ aleksana ];
|
||||
platforms = lib.platforms.linux;
|
||||
|
@ -27,6 +27,20 @@ let
|
||||
hash = "sha256-HyRTOqPj4SnV9gktqRegxOYz9c8mQHOX+IrdZlHhYpo=";
|
||||
};
|
||||
|
||||
meta-unwrapped = {
|
||||
description = "Clash GUI based on tauri";
|
||||
homepage = "https://github.com/clash-verge-rev/clash-verge-rev";
|
||||
license = lib.licenses.gpl3Only;
|
||||
maintainers = with lib.maintainers; [
|
||||
Guanran928
|
||||
bot-wxt1221
|
||||
];
|
||||
platforms = [
|
||||
"x86_64-linux"
|
||||
"aarch64-linux"
|
||||
];
|
||||
};
|
||||
|
||||
service-cargo-hash = "sha256-NBeHR6JvdCp06Ug/UEtLY2tu3iCmlsCU0x8umRbJXLU=";
|
||||
|
||||
service = callPackage ./service.nix {
|
||||
@ -36,6 +50,7 @@ let
|
||||
service-cargo-hash
|
||||
pname
|
||||
;
|
||||
meta = meta-unwrapped;
|
||||
};
|
||||
|
||||
webui = callPackage ./webui.nix {
|
||||
@ -44,6 +59,8 @@ let
|
||||
src
|
||||
pname
|
||||
;
|
||||
meta = meta-unwrapped;
|
||||
|
||||
};
|
||||
|
||||
sysproxy-hash = "sha256-TEC51s/viqXUoEH9rJev8LdC2uHqefInNcarxeogePk=";
|
||||
@ -55,8 +72,8 @@ let
|
||||
src
|
||||
sysproxy-hash
|
||||
webui
|
||||
meta
|
||||
;
|
||||
meta = meta-unwrapped;
|
||||
};
|
||||
|
||||
meta = {
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user