Merge branch 'master' into staging

This commit is contained in:
Vladimír Čunát 2017-09-26 22:31:59 +02:00
commit 41aa302727
No known key found for this signature in database
GPG Key ID: E747DF1F9575A3AA
519 changed files with 9218 additions and 4802 deletions

View File

@ -785,7 +785,20 @@ example of such a situation is when `py.test` is used.
#### Common issues
- Non-working tests can often be deselected. In the case of `py.test`: `py.test -k 'not function_name and not other_function'`.
- Non-working tests can often be deselected. By default `buildPythonPackage` runs `python setup.py test`.
Most python modules follows the standard test protocol where the pytest runner can be used instead.
`py.test` supports a `-k` parameter to ignore test methods or classes:
```nix
buildPythonPackage {
# ...
# assumes the tests are located in tests
checkInputs = [ pytest ];
checkPhase = ''
py.test -k 'not function_name and not other_function' tests
'';
}
```
- Unicode issues can typically be fixed by including `glibcLocales` in `buildInputs` and exporting `LC_ALL=en_US.utf-8`.
- Tests that attempt to access `$HOME` can be fixed by using the following work-around before running tests (e.g. `preCheck`): `export HOME=$(mktemp -d)`

View File

@ -107,6 +107,7 @@
choochootrain = "Hurshal Patel <hurshal@imap.cc>";
chris-martin = "Chris Martin <ch.martin@gmail.com>";
chrisjefferson = "Christopher Jefferson <chris@bubblescope.net>";
chrisrosset = "Christopher Rosset <chris@rosset.org.uk>";
christopherpoole = "Christopher Mark Poole <mail@christopherpoole.net>";
ciil = "Simon Lackerbauer <simon@lackerbauer.com>";
ckampka = "Christian Kampka <christian@kampka.net>";
@ -192,12 +193,14 @@
erikryb = "Erik Rybakken <erik.rybakken@math.ntnu.no>";
ertes = "Ertugrul Söylemez <esz@posteo.de>";
ethercrow = "Dmitry Ivanov <ethercrow@gmail.com>";
etu = "Elis Hirwing <elis@hirwing.se>";
exi = "Reno Reckling <nixos@reckling.org>";
exlevan = "Alexey Levan <exlevan@gmail.com>";
expipiplus1 = "Joe Hermaszewski <nix@monoid.al>";
fadenb = "Tristan Helmich <tristan.helmich+nixos@gmail.com>";
fare = "Francois-Rene Rideau <fahree@gmail.com>";
falsifian = "James Cook <james.cook@utoronto.ca>";
fare = "Francois-Rene Rideau <fahree@gmail.com>";
fgaz = "Francesco Gazzetta <francygazz@gmail.com>";
florianjacob = "Florian Jacob <projects+nixos@florianjacob.de>";
flosse = "Markus Kohlhase <mail@markus-kohlhase.de>";
fluffynukeit = "Daniel Austin <dan@fluffynukeit.com>";
@ -289,12 +292,12 @@
jonafato = "Jon Banafato <jon@jonafato.com>";
jpierre03 = "Jean-Pierre PRUNARET <nix@prunetwork.fr>";
jpotier = "Martin Potier <jpo.contributes.to.nixos@marvid.fr>";
jyp = "Jean-Philippe Bernardy <jeanphilippe.bernardy@gmail.com>";
jraygauthier = "Raymond Gauthier <jraygauthier@gmail.com>";
jtojnar = "Jan Tojnar <jtojnar@gmail.com>";
juliendehos = "Julien Dehos <dehos@lisic.univ-littoral.fr>";
jwiegley = "John Wiegley <johnw@newartisans.com>";
jwilberding = "Jordan Wilberding <jwilberding@afiniate.com>";
jyp = "Jean-Philippe Bernardy <jeanphilippe.bernardy@gmail.com>";
jzellner = "Jeff Zellner <jeffz@eml.cc>";
kaiha = "Kai Harries <kai.harries@gmail.com>";
kamilchm = "Kamil Chmielewski <kamil.chm@gmail.com>";
@ -334,6 +337,7 @@
lovek323 = "Jason O'Conal <jason@oconal.id.au>";
lowfatcomputing = "Andreas Wagner <andreas.wagner@lowfatcomputing.org>";
lsix = "Lancelot SIX <lsix@lancelotsix.com>";
ltavard = "Laure Tavard <laure.tavard@univ-grenoble-alpes.fr>";
lucas8 = "Luc Chabassier <luc.linux@mailoo.org>";
ludo = "Ludovic Courtès <ludo@gnu.org>";
lufia = "Kyohei Kadota <lufia@lufia.org>";
@ -496,6 +500,7 @@
renzo = "Renzo Carbonara <renzocarbonara@gmail.com>";
retrry = "Tadas Barzdžius <retrry@gmail.com>";
rht = "rht <rhtbot@protonmail.com>";
richardipsum = "Richard Ipsum <richardipsum@fastmail.co.uk>";
rick68 = "Wei-Ming Yang <rick68@gmail.com>";
rickynils = "Rickard Nilsson <rickynils@gmail.com>";
ris = "Robert Scott <code@humanleg.org.uk>";
@ -505,6 +510,7 @@
robberer = "Longrin Wischnewski <robberer@freakmail.de>";
robbinch = "Robbin C. <robbinch33@gmail.com>";
roberth = "Robert Hensing <nixpkgs@roberthensing.nl>";
robertodr = "Roberto Di Remigio <roberto.diremigio@gmail.com>";
robgssp = "Rob Glossop <robgssp@gmail.com>";
roblabla = "Robin Lambertz <robinlambertz+dev@gmail.com>";
roconnor = "Russell O'Connor <roconnor@theorem.ca>";
@ -578,10 +584,9 @@
taku0 = "Takuo Yonezawa <mxxouy6x3m_github@tatapa.org>";
tari = "Peter Marheine <peter@taricorp.net>";
tavyc = "Octavian Cerna <octavian.cerna@gmail.com>";
ltavard = "Laure Tavard <laure.tavard@univ-grenoble-alpes.fr>";
teh = "Tom Hunger <tehunger@gmail.com>";
teto = "Matthieu Coudron <mcoudron@hotmail.com>";
telotortium = "Robert Irelan <rirelan@gmail.com>";
teto = "Matthieu Coudron <mcoudron@hotmail.com>";
thall = "Niclas Thall <niclas.thall@gmail.com>";
thammers = "Tobias Hammerschmidt <jawr@gmx.de>";
the-kenny = "Moritz Ulrich <moritz@tarn-vedra.de>";
@ -610,6 +615,7 @@
#urkud = "Yury G. Kudryashov <urkud+nix@ya.ru>"; inactive since 2012
uwap = "uwap <me@uwap.name>";
vaibhavsagar = "Vaibhav Sagar <vaibhavsagar@gmail.com>";
valeriangalliat = "Valérian Galliat <val@codejam.info>";
vandenoever = "Jos van den Oever <jos@vandenoever.info>";
vanschelven = "Klaas van Schelven <klaas@vanschelven.com>";
vanzef = "Ivan Solyankin <vanzef@gmail.com>";
@ -626,7 +632,6 @@
vlstill = "Vladimír Štill <xstill@fi.muni.cz>";
vmandela = "Venkateswara Rao Mandela <venkat.mandela@gmail.com>";
vmchale = "Vanessa McHale <tmchale@wisc.edu>";
valeriangalliat = "Valérian Galliat <val@codejam.info>";
volhovm = "Mikhail Volkhov <volhovm.cs@gmail.com>";
volth = "Jaroslavas Pocepko <jaroslavas@volth.com>";
vozz = "Oliver Hunt <oliver.huntuk@gmail.com>";
@ -648,6 +653,7 @@
xvapx = "Marti Serra <marti.serra.coscollano@gmail.com>";
xwvvvvwx = "David Terry <davidterry@posteo.de>";
yarr = "Dmitry V. <savraz@gmail.com>";
yegortimoshenko = "Yegor Timoshenko <yegortimoshenko@gmail.com>";
yochai = "Yochai <yochai@titat.info>";
yorickvp = "Yorick van Pelt <yorickvanpelt@gmail.com>";
yuriaisaka = "Yuri Aisaka <yuri.aisaka+nix@gmail.com>";

View File

@ -9,17 +9,15 @@ GNOME_FTP=ftp.gnome.org/pub/GNOME/sources
NO_GNOME_MAJOR="ghex gtkhtml gdm"
usage() {
echo "Usage: $0 gnome_dir <show project>|<update project>|<update-all> [major.minor]" >&2
echo "gnome_dir is for example pkgs/desktops/gnome-3/3.18" >&2
echo "Usage: $0 <show project>|<update project>|<update-all> [major.minor]" >&2
exit 0
}
if [ "$#" -lt 2 ]; then
if [ "$#" -lt 1 ]; then
usage
fi
GNOME_TOP=$1
shift
GNOME_TOP=pkgs/desktops/gnome-3
action=$1

View File

@ -13,10 +13,8 @@ from pyquery import PyQuery as pq
maintainers_json = subprocess.check_output([
'nix-instantiate',
'lib/maintainers.nix',
'--eval',
'--json'])
'nix-instantiate', '-E', 'import ./lib/maintainers.nix {}', '--eval', '--json'
])
maintainers = json.loads(maintainers_json)
MAINTAINERS = {v: k for k, v in maintainers.iteritems()}

View File

@ -45,6 +45,33 @@ has the following highlights: </para>
even though <literal>HDMI-0</literal> is the first head in the list.
</para>
</listitem>
<listitem>
<para>
The handling of SSL in the nginx module has been cleaned up, renaming
the misnomed <literal>enableSSL</literal> to <literal>onlySSL</literal>
which reflects its original intention. This is not to be used with the
already existing <literal>forceSSL</literal> which creates a second
non-SSL virtual host redirecting to the SSL virtual host. This by
chance had worked earlier due to specific implementation details. In
case you had specified both please remove the <literal>enableSSL</literal>
option to keep the previous behaviour.
</para>
<para>
Another <literal>addSSL</literal> option has been introduced to configure
both a non-SSL virtual host and an SSL virtual host.
</para>
<para>
Options to configure <literal>resolver</literal>s and
<literal>upstream</literal>s have been introduced. See their information
for further details.
</para>
<para>
The <literal>port</literal> option has been replaced by a more generic
<literal>listen</literal> option which makes it possible to specify
multiple addresses, ports and SSL configs dependant on the new SSL
handling mentioned above.
</para>
</listitem>
</itemizedlist>
<para>The following new services were added since the last release:</para>
@ -62,10 +89,17 @@ following incompatible changes:</para>
<itemizedlist>
<listitem>
<para>
<literal>aiccu</literal> package was removed. This is due to SixXS
The <literal>aiccu</literal> package was removed. This is due to SixXS
<link xlink:href="https://www.sixxs.net/main/"> sunsetting</link> its IPv6 tunnel.
</para>
</listitem>
<listitem>
<para>
The <literal>fanctl</literal> package and <literal>fan</literal> module
have been removed due to the developers not upstreaming their iproute2
patches and lagging with compatibility to recent iproute2 versions.
</para>
</listitem>
<listitem>
<para>
Top-level <literal>idea</literal> package collection was renamed.
@ -202,6 +236,59 @@ rmdir /var/lib/ipfs/.ipfs
<command>gpgv</command>, etc.
</para>
</listitem>
<listitem>
<para>
<literal>services.mysql</literal> now has declarative
configuration of databases and users with the <literal>ensureDatabases</literal> and
<literal>ensureUsers</literal> options.
</para>
<para>
These options will never delete existing databases and users,
especially not when the value of the options are changed.
</para>
<para>
The MySQL users will be identified using
<link xlink:href="https://mariadb.com/kb/en/library/authentication-plugin-unix-socket/">
Unix socket authentication</link>. This authenticates the
Unix user with the same name only, and that without the need
for a password.
</para>
<para>
If you have previously created a MySQL <literal>root</literal>
user <emphasis>with a password</emphasis>, you will need to add
<literal>root</literal> user for unix socket authentication
before using the new options. This can be done by running the
following SQL script:
<programlisting language="sql">
CREATE USER 'root'@'%' IDENTIFIED BY '';
GRANT ALL PRIVILEGES ON *.* TO 'root'@'%' WITH GRANT OPTION;
FLUSH PRIVILEGES;
-- Optionally, delete the password-authenticated user:
-- DROP USER 'root'@'localhost';
</programlisting>
</para>
</listitem>
<listitem>
<para>
<literal>sha256</literal> argument value of
<literal>dockerTools.pullImage</literal> expression must be
updated since the mechanism to download the image has been
changed. Skopeo is now used to pull the image instead of the
Docker daemon.
</para>
</listitem>
<listitem>
<para>
Templated systemd services e.g <literal>container@name</literal> are
now handled currectly when switching to a new configuration, resulting
in them being reloaded.
</para>
</listitem>
</itemizedlist>
<para>Other notable improvements:</para>
@ -257,14 +344,48 @@ rmdir /var/lib/ipfs/.ipfs
</listitem>
<listitem>
<para>
<literal>sha256</literal> argument value of
<literal>dockerTools.pullImage</literal> expression must be
updated since the mechanism to download the image has been
changed. Skopeo is now used to pull the image instead of the
Docker daemon.
Definitions for <filename>/etc/hosts</filename> can now be sped
declaratively with <literal>networking.hosts</literal>.
</para>
</listitem>
<listitem>
<para>
Two new options have been added to the installer loader, in addition
to the default having changed. The kernel log verbosity has been lowered
to the upstream default for the default options, in order to not spam
the console when e.g. joining a network.
</para>
<para>
This therefore leads to adding a new <literal>debug</literal> option
to set the log level to the previous verbose mode, to make debugging
easier, but still accessible easily.
</para>
<para>
Additionally a <literal>copytoram</literal> option has been added,
which makes it possible to remove the install medium after booting.
This allows tethering from your phone after booting from it.
</para>
<para>
<literal>services.gitlab-runner.configOptions</literal> has been added
to specify the configuration of gitlab-runners declaratively.
</para>
<para>
<literal>services.jenkins.plugins</literal> has been added
to install plugins easily, this can be generated with jenkinsPlugins2nix.
</para>
<para>
<literal>services.postfix.config</literal> has been added
to specify the main.cf with NixOS options. Additionally other options
have been added to the postfix module and has been improved further.
</para>
<para>
The GitLab package and module have been updated to the latest 9.5 release.
</para>
<para>
The <literal>systemd-boot</literal> boot loader now lists the NixOS
version, kernel version and build date of all bootable generations.
</para>
</listitem>
</itemizedlist>
</section>

View File

@ -77,7 +77,6 @@ let
excludedOptions = [
"boot.systemd.services"
"systemd.services"
"environment.gnome3.packageSet"
"kde.extraPackages"
];
excludeOptions = list:

View File

@ -9,9 +9,7 @@ let
cfg = config.networking;
dnsmasqResolve = config.services.dnsmasq.enable &&
config.services.dnsmasq.resolveLocalQueries;
bindResolve = config.services.bind.enable &&
config.services.bind.resolveLocalQueries;
hasLocalResolver = bindResolve || dnsmasqResolve;
hasLocalResolver = config.services.bind.enable || dnsmasqResolve;
resolvconfOptions = cfg.resolvconfOptions
++ optional cfg.dnsSingleRequest "single-request"

View File

@ -46,17 +46,24 @@ let
# A variant to boot with 'nomodeset'
LABEL boot-nomodeset
MENU LABEL NixOS ${config.system.nixosVersion}${config.isoImage.appendToMenuLabel} (with nomodeset)
MENU LABEL NixOS ${config.system.nixosVersion}${config.isoImage.appendToMenuLabel} (nomodeset)
LINUX /boot/bzImage
APPEND init=${config.system.build.toplevel}/init ${toString config.boot.kernelParams} nomodeset
INITRD /boot/initrd
# A variant to boot with 'copytoram'
LABEL boot-copytoram
MENU LABEL NixOS ${config.system.nixosVersion}${config.isoImage.appendToMenuLabel} (with copytoram)
MENU LABEL NixOS ${config.system.nixosVersion}${config.isoImage.appendToMenuLabel} (copytoram)
LINUX /boot/bzImage
APPEND init=${config.system.build.toplevel}/init ${toString config.boot.kernelParams} copytoram
INITRD /boot/initrd
# A variant to boot with verbose logging to the console
LABEL boot-nomodeset
MENU LABEL NixOS ${config.system.nixosVersion}${config.isoImage.appendToMenuLabel} (debug)
LINUX /boot/bzImage
APPEND init=${config.system.build.toplevel}/init ${toString config.boot.kernelParams} loglevel=7
INITRD /boot/initrd
'';
isolinuxMemtest86Entry = ''
@ -74,25 +81,43 @@ let
cp -v ${pkgs.systemd}/lib/systemd/boot/efi/systemd-boot${targetArch}.efi $out/EFI/boot/boot${targetArch}.efi
mkdir -p $out/loader/entries
echo "title NixOS Live CD" > $out/loader/entries/nixos-livecd.conf
echo "linux /boot/bzImage" >> $out/loader/entries/nixos-livecd.conf
echo "initrd /boot/initrd" >> $out/loader/entries/nixos-livecd.conf
echo "options init=${config.system.build.toplevel}/init ${toString config.boot.kernelParams}" >> $out/loader/entries/nixos-livecd.conf
cat << EOF > $out/loader/entries/nixos-iso.conf
title NixOS ${config.system.nixosVersion}${config.isoImage.appendToMenuLabel}
linux /boot/bzImage
initrd /boot/initrd
options init=${config.system.build.toplevel}/init ${toString config.boot.kernelParams}
EOF
# A variant to boot with 'nomodeset'
echo "title NixOS Live CD (with nomodeset)" > $out/loader/entries/nixos-livecd-nomodeset.conf
echo "linux /boot/bzImage" >> $out/loader/entries/nixos-livecd-nomodeset.conf
echo "initrd /boot/initrd" >> $out/loader/entries/nixos-livecd-nomodeset.conf
echo "options init=${config.system.build.toplevel}/init ${toString config.boot.kernelParams} nomodeset" >> $out/loader/entries/nixos-livecd-nomodeset.conf
cat << EOF > $out/loader/entries/nixos-iso-nomodeset.conf
title NixOS ${config.system.nixosVersion}${config.isoImage.appendToMenuLabel}
version nomodeset
linux /boot/bzImage
initrd /boot/initrd
options init=${config.system.build.toplevel}/init ${toString config.boot.kernelParams} nomodeset
EOF
# A variant to boot with 'copytoram'
echo "title NixOS Live CD (with copytoram)" > $out/loader/entries/nixos-livecd-copytoram.conf
echo "linux /boot/bzImage" >> $out/loader/entries/nixos-livecd-copytoram.conf
echo "initrd /boot/initrd" >> $out/loader/entries/nixos-livecd-copytoram.conf
echo "options init=${config.system.build.toplevel}/init ${toString config.boot.kernelParams} copytoram" >> $out/loader/entries/nixos-livecd-copytoram.conf
cat << EOF > $out/loader/entries/nixos-iso-copytoram.conf
title NixOS ${config.system.nixosVersion}${config.isoImage.appendToMenuLabel}
version copytoram
linux /boot/bzImage
initrd /boot/initrd
options init=${config.system.build.toplevel}/init ${toString config.boot.kernelParams} copytoram
EOF
echo "default nixos-livecd" > $out/loader/loader.conf
echo "timeout ${builtins.toString config.boot.loader.timeout}" >> $out/loader/loader.conf
# A variant to boot with verbose logging to the console
cat << EOF > $out/loader/entries/nixos-iso-debug.conf
title NixOS ${config.system.nixosVersion}${config.isoImage.appendToMenuLabel} (debug)
linux /boot/bzImage
initrd /boot/initrd
options init=${config.system.build.toplevel}/init ${toString config.boot.kernelParams} loglevel=7
EOF
cat << EOF > $out/loader/loader.conf
default nixos-iso
timeout ${builtins.toString config.boot.loader.timeout}
EOF
'';
efiImg = pkgs.runCommand "efi-image_eltorito" { buildInputs = [ pkgs.mtools pkgs.libfaketime ]; }

View File

@ -583,9 +583,15 @@ $bootLoaderConfig
# List packages installed in system profile. To search by name, run:
# \$ nix-env -qaP | grep wget
# environment.systemPackages = with pkgs; [
# wget
# wget vim
# ];
# Some programs need SUID wrappers, can be configured further or are
# started in user sessions.
# programs.bash.enableCompletion = true;
# programs.mtr.enable = true;
# programs.gnupg.agent = { enable = true; enableSSHSupport = true; };
# List services that you want to enable:
# Enable the OpenSSH daemon.

View File

@ -102,7 +102,7 @@ fi
extraBuildFlags+=(--option "build-users-group" "$buildUsersGroup")
# Inherit binary caches from the host
# TODO: will this still work with Nix 1.12 now that it has no perl? Probably not...
# TODO: will this still work with Nix 1.12 now that it has no perl? Probably not...
binary_caches="$(@perl@/bin/perl -I @nix@/lib/perl5/site_perl/*/* -e 'use Nix::Config; Nix::Config::readConfig; print $Nix::Config::config{"binary-caches"};')"
extraBuildFlags+=(--option "binary-caches" "$binary_caches")
@ -113,8 +113,33 @@ if [[ -z "$closure" ]]; then
fi
unset NIXOS_CONFIG
# TODO: do I need to set NIX_SUBSTITUTERS here or is the --option binary-caches above enough?
# These get created in nixos-prepare-root as well, but we want to make sure they're here in case we're
# running with --chroot. TODO: --chroot should just be split into a separate tool.
mkdir -m 0755 -p "$mountPoint/dev" "$mountPoint/proc" "$mountPoint/sys"
# Set up some bind mounts we'll want regardless of chroot or not
mount --rbind /dev "$mountPoint/dev"
mount --rbind /proc "$mountPoint/proc"
mount --rbind /sys "$mountPoint/sys"
# If we asked for a chroot, that means we're not actually installing anything (yeah I was confused too)
# and we just want to run a command in the context of a $mountPoint that we're assuming has already been
# set up by a previous nixos-install invocation. In that case we set up some remaining bind mounts and
# exec the requested command, skipping the rest of the installation procedure.
if [ -n "$runChroot" ]; then
mount -t tmpfs -o "mode=0755" none $mountPoint/run
rm -rf $mountPoint/var/run
ln -s /run $mountPoint/var/run
for f in /etc/resolv.conf /etc/hosts; do rm -f $mountPoint/$f; [ -f "$f" ] && cp -Lf $f $mountPoint/etc/; done
for f in /etc/passwd /etc/group; do touch $mountPoint/$f; [ -f "$f" ] && mount --rbind -o ro $f $mountPoint/$f; done
if ! [ -L $mountPoint/nix/var/nix/profiles/system ]; then
echo "$0: installation not finished; cannot chroot into installation directory"
exit 1
fi
ln -s /nix/var/nix/profiles/system $mountPoint/run/current-system
exec chroot $mountPoint "${chrootCommand[@]}"
fi
# A place to drop temporary closures
trap "rm -rf $tmpdir" EXIT
@ -153,9 +178,7 @@ nix-store --export $channel_root > $channel_closure
# nixos-prepare-root doesn't currently do anything with file ownership, so we set it up here instead
chown @root_uid@:@nixbld_gid@ $mountPoint/nix/store
mount --rbind /dev $mountPoint/dev
mount --rbind /proc $mountPoint/proc
mount --rbind /sys $mountPoint/sys
# Grub needs an mtab.
ln -sfn /proc/mounts $mountPoint/etc/mtab

View File

@ -426,7 +426,7 @@
teamspeak = 124;
influxdb = 125;
nsd = 126;
#gitolite = 127; # unused
gitolite = 127;
znc = 128;
polipo = 129;
mopidy = 130;

View File

@ -92,6 +92,7 @@
./programs/mosh.nix
./programs/mtr.nix
./programs/nano.nix
./programs/npm.nix
./programs/oblogout.nix
./programs/qt5ct.nix
./programs/screen.nix
@ -156,7 +157,9 @@
./services/backup/tarsnap.nix
./services/backup/znapzend.nix
./services/cluster/fleet.nix
./services/cluster/kubernetes.nix
./services/cluster/kubernetes/default.nix
./services/cluster/kubernetes/dns.nix
./services/cluster/kubernetes/dashboard.nix
./services/cluster/panamax.nix
./services/computing/boinc/client.nix
./services/computing/torque/server.nix
@ -352,6 +355,7 @@
./services/monitoring/collectd.nix
./services/monitoring/das_watchdog.nix
./services/monitoring/dd-agent/dd-agent.nix
./services/monitoring/fusion-inventory.nix
./services/monitoring/grafana.nix
./services/monitoring/graphite.nix
./services/monitoring/hdaps.nix
@ -423,12 +427,12 @@
./services/networking/ddclient.nix
./services/networking/dhcpcd.nix
./services/networking/dhcpd.nix
./services/networking/dnscache.nix
./services/networking/dnschain.nix
./services/networking/dnscrypt-proxy.nix
./services/networking/dnscrypt-wrapper.nix
./services/networking/dnsmasq.nix
./services/networking/ejabberd.nix
./services/networking/fan.nix
./services/networking/fakeroute.nix
./services/networking/ferm.nix
./services/networking/firefox/sync-server.nix
@ -524,6 +528,7 @@
./services/networking/tcpcrypt.nix
./services/networking/teamspeak3.nix
./services/networking/tinc.nix
./services/networking/tinydns.nix
./services/networking/tftpd.nix
./services/networking/tox-bootstrapd.nix
./services/networking/toxvpn.nix

View File

@ -77,7 +77,6 @@ with lib;
# Show all debug messages from the kernel but don't log refused packets
# because we have the firewall enabled. This makes installs from the
# console less cumbersome if the machine has a public IP.
boot.consoleLogLevel = mkDefault 7;
networking.firewall.logRefusedConnections = mkDefault false;
environment.systemPackages = [ pkgs.vim ];

View File

@ -0,0 +1,44 @@
{ config, lib, ... }:
with lib;
let
cfg = config.programs.npm;
in
{
###### interface
options = {
programs.npm = {
enable = mkEnableOption "<command>npm</command> global config";
npmrc = lib.mkOption {
type = lib.types.lines;
description = ''
The system-wide npm configuration.
See <link xlink:href="https://docs.npmjs.com/misc/config"/>.
'';
default = ''
prefix = ''${HOME}/.npm
'';
example = ''
prefix = ''${HOME}/.npm
https-proxy=proxy.example.com
init-license=MIT
init-author-url=http://npmjs.org
color=true
'';
};
};
};
###### implementation
config = lib.mkIf cfg.enable {
environment.etc."npmrc".text = cfg.npmrc;
environment.variables.NPM_CONFIG_GLOBALCONFIG = "/etc/npmrc";
};
}

View File

@ -105,7 +105,8 @@ in {
RABBITMQ_MNESIA_BASE = "${cfg.dataDir}/mnesia";
RABBITMQ_NODE_IP_ADDRESS = cfg.listenAddress;
RABBITMQ_NODE_PORT = toString cfg.port;
RABBITMQ_SERVER_START_ARGS = "-rabbit error_logger tty -rabbit sasl_error_logger false";
RABBITMQ_LOGS = "-";
RABBITMQ_SASL_LOGS = "-";
RABBITMQ_PID_FILE = "${cfg.dataDir}/pid";
SYS_PREFIX = "";
RABBITMQ_ENABLED_PLUGINS_FILE = pkgs.writeText "enabled_plugins" ''
@ -128,7 +129,7 @@ in {
preStart = ''
${optionalString (cfg.cookie != "") ''
echo -n ${cfg.cookie} > ${cfg.dataDir}/.erlang.cookie
chmod 400 ${cfg.dataDir}/.erlang.cookie
chmod 600 ${cfg.dataDir}/.erlang.cookie
''}
'';
};

View File

@ -0,0 +1,160 @@
{ config, pkgs, lib, ... }:
with lib;
let
cfg = config.services.kubernetes.addons.dashboard;
name = "gcr.io/google_containers/kubernetes-dashboard-amd64";
version = "v1.6.3";
image = pkgs.dockerTools.pullImage {
imageName = name;
imageTag = version;
sha256 = "0b5v7xa3s91yi9yfsw2b8wijiprnicbb02f5kqa579h4yndb3gfz";
};
in {
options.services.kubernetes.addons.dashboard = {
enable = mkEnableOption "kubernetes dashboard addon";
enableRBAC = mkOption {
description = "Whether to enable role based access control is enabled for kubernetes dashboard";
type = types.bool;
default = elem "RBAC" config.services.kubernetes.apiserver.authorizationMode;
};
};
config = mkIf cfg.enable {
services.kubernetes.kubelet.seedDockerImages = [image];
services.kubernetes.addonManager.addons = {
kubernetes-dashboard-deployment = {
kind = "Deployment";
apiVersion = "apps/v1beta1";
metadata = {
labels = {
k8s-addon = "kubernetes-dashboard.addons.k8s.io";
k8s-app = "kubernetes-dashboard";
version = version;
"kubernetes.io/cluster-service" = "true";
"addonmanager.kubernetes.io/mode" = "Reconcile";
};
name = "kubernetes-dashboard";
namespace = "kube-system";
};
spec = {
replicas = 1;
revisionHistoryLimit = 10;
selector.matchLabels."k8s-app" = "kubernetes-dashboard";
template = {
metadata = {
labels = {
k8s-addon = "kubernetes-dashboard.addons.k8s.io";
k8s-app = "kubernetes-dashboard";
version = version;
"kubernetes.io/cluster-service" = "true";
};
annotations = {
"scheduler.alpha.kubernetes.io/critical-pod" = "";
#"scheduler.alpha.kubernetes.io/tolerations" = ''[{"key":"CriticalAddonsOnly", "operator":"Exists"}]'';
};
};
spec = {
containers = [{
name = "kubernetes-dashboard";
image = "${name}:${version}";
ports = [{
containerPort = 9090;
protocol = "TCP";
}];
resources = {
limits = {
cpu = "100m";
memory = "50Mi";
};
requests = {
cpu = "100m";
memory = "50Mi";
};
};
livenessProbe = {
httpGet = {
path = "/";
port = 9090;
};
initialDelaySeconds = 30;
timeoutSeconds = 30;
};
}];
serviceAccountName = "kubernetes-dashboard";
tolerations = [{
key = "node-role.kubernetes.io/master";
effect = "NoSchedule";
}];
};
};
};
};
kubernetes-dashboard-svc = {
apiVersion = "v1";
kind = "Service";
metadata = {
labels = {
k8s-addon = "kubernetes-dashboard.addons.k8s.io";
k8s-app = "kubernetes-dashboard";
"kubernetes.io/cluster-service" = "true";
"kubernetes.io/name" = "KubeDashboard";
"addonmanager.kubernetes.io/mode" = "Reconcile";
};
name = "kubernetes-dashboard";
namespace = "kube-system";
};
spec = {
ports = [{
port = 80;
targetPort = 9090;
}];
selector.k8s-app = "kubernetes-dashboard";
};
};
kubernetes-dashboard-sa = {
apiVersion = "v1";
kind = "ServiceAccount";
metadata = {
labels = {
k8s-app = "kubernetes-dashboard";
k8s-addon = "kubernetes-dashboard.addons.k8s.io";
"addonmanager.kubernetes.io/mode" = "Reconcile";
};
name = "kubernetes-dashboard";
namespace = "kube-system";
};
};
} // (optionalAttrs cfg.enableRBAC {
kubernetes-dashboard-crb = {
apiVersion = "rbac.authorization.k8s.io/v1beta1";
kind = "ClusterRoleBinding";
metadata = {
name = "kubernetes-dashboard";
labels = {
k8s-app = "kubernetes-dashboard";
k8s-addon = "kubernetes-dashboard.addons.k8s.io";
"addonmanager.kubernetes.io/mode" = "Reconcile";
};
};
roleRef = {
apiGroup = "rbac.authorization.k8s.io";
kind = "ClusterRole";
name = "cluster-admin";
};
subjects = [{
kind = "ServiceAccount";
name = "kubernetes-dashboard";
namespace = "kube-system";
}];
};
});
};
}

View File

@ -0,0 +1,311 @@
{ config, pkgs, lib, ... }:
with lib;
let
version = "1.14.4";
k8s-dns-kube-dns = pkgs.dockerTools.pullImage {
imageName = "gcr.io/google_containers/k8s-dns-kube-dns-amd64";
imageTag = version;
sha256 = "0g64jc2076ng28xl4w3w9svf7hc6s9h8rq9mhvvwpfy2p6lgj6gy";
};
k8s-dns-dnsmasq-nanny = pkgs.dockerTools.pullImage {
imageName = "gcr.io/google_containers/k8s-dns-dnsmasq-nanny-amd64";
imageTag = version;
sha256 = "0sdpsbj1vismihy7ass1cn96nwmav6sf3r5h6i4k2dxha0y0jsh5";
};
k8s-dns-sidecar = pkgs.dockerTools.pullImage {
imageName = "gcr.io/google_containers/k8s-dns-sidecar-amd64";
imageTag = version;
sha256 = "01zpi189hpy2z62awl38fap908s8rrhc3v5gb6m90y2pycl4ad6q";
};
cfg = config.services.kubernetes.addons.dns;
in {
options.services.kubernetes.addons.dns = {
enable = mkEnableOption "kubernetes dns addon";
clusterIp = mkOption {
description = "Dns addon clusterIP";
# this default is also what kubernetes users
default = (
concatStringsSep "." (
take 3 (splitString "." config.services.kubernetes.apiserver.serviceClusterIpRange
))
) + ".254";
type = types.str;
};
clusterDomain = mkOption {
description = "Dns cluster domain";
default = "cluster.local";
type = types.str;
};
};
config = mkIf cfg.enable {
services.kubernetes.kubelet.seedDockerImages = [
k8s-dns-kube-dns
k8s-dns-dnsmasq-nanny
k8s-dns-sidecar
];
services.kubernetes.addonManager.addons = {
kubedns-deployment = {
apiVersion = "apps/v1beta1";
kind = "Deployment";
metadata = {
labels = {
"addonmanager.kubernetes.io/mode" = "Reconcile";
"k8s-app" = "kube-dns";
"kubernetes.io/cluster-service" = "true";
};
name = "kube-dns";
namespace = "kube-system";
};
spec = {
selector.matchLabels."k8s-app" = "kube-dns";
strategy = {
rollingUpdate = {
maxSurge = "10%";
maxUnavailable = 0;
};
};
template = {
metadata = {
annotations."scheduler.alpha.kubernetes.io/critical-pod" = "";
labels.k8s-app = "kube-dns";
};
spec = {
containers = [
{
name = "kubedns";
args = [
"--domain=${cfg.clusterDomain}"
"--dns-port=10053"
"--config-dir=/kube-dns-config"
"--v=2"
];
env = [
{
name = "PROMETHEUS_PORT";
value = "10055";
}
];
image = "gcr.io/google_containers/k8s-dns-kube-dns-amd64:${version}";
livenessProbe = {
failureThreshold = 5;
httpGet = {
path = "/healthcheck/kubedns";
port = 10054;
scheme = "HTTP";
};
initialDelaySeconds = 60;
successThreshold = 1;
timeoutSeconds = 5;
};
ports = [
{
containerPort = 10053;
name = "dns-local";
protocol = "UDP";
}
{
containerPort = 10053;
name = "dns-tcp-local";
protocol = "TCP";
}
{
containerPort = 10055;
name = "metrics";
protocol = "TCP";
}
];
readinessProbe = {
httpGet = {
path = "/readiness";
port = 8081;
scheme = "HTTP";
};
initialDelaySeconds = 3;
timeoutSeconds = 5;
};
resources = {
limits.memory = "170Mi";
requests = {
cpu = "100m";
memory = "70Mi";
};
};
volumeMounts = [
{
mountPath = "/kube-dns-config";
name = "kube-dns-config";
}
];
}
{
args = [
"-v=2"
"-logtostderr"
"-configDir=/etc/k8s/dns/dnsmasq-nanny"
"-restartDnsmasq=true"
"--"
"-k"
"--cache-size=1000"
"--log-facility=-"
"--server=/${cfg.clusterDomain}/127.0.0.1#10053"
"--server=/in-addr.arpa/127.0.0.1#10053"
"--server=/ip6.arpa/127.0.0.1#10053"
];
image = "gcr.io/google_containers/k8s-dns-dnsmasq-nanny-amd64:${version}";
livenessProbe = {
failureThreshold = 5;
httpGet = {
path = "/healthcheck/dnsmasq";
port = 10054;
scheme = "HTTP";
};
initialDelaySeconds = 60;
successThreshold = 1;
timeoutSeconds = 5;
};
name = "dnsmasq";
ports = [
{
containerPort = 53;
name = "dns";
protocol = "UDP";
}
{
containerPort = 53;
name = "dns-tcp";
protocol = "TCP";
}
];
resources = {
requests = {
cpu = "150m";
memory = "20Mi";
};
};
volumeMounts = [
{
mountPath = "/etc/k8s/dns/dnsmasq-nanny";
name = "kube-dns-config";
}
];
}
{
name = "sidecar";
image = "gcr.io/google_containers/k8s-dns-sidecar-amd64:${version}";
args = [
"--v=2"
"--logtostderr"
"--probe=kubedns,127.0.0.1:10053,kubernetes.default.svc.${cfg.clusterDomain},5,A"
"--probe=dnsmasq,127.0.0.1:53,kubernetes.default.svc.${cfg.clusterDomain},5,A"
];
livenessProbe = {
failureThreshold = 5;
httpGet = {
path = "/metrics";
port = 10054;
scheme = "HTTP";
};
initialDelaySeconds = 60;
successThreshold = 1;
timeoutSeconds = 5;
};
ports = [
{
containerPort = 10054;
name = "metrics";
protocol = "TCP";
}
];
resources = {
requests = {
cpu = "10m";
memory = "20Mi";
};
};
}
];
dnsPolicy = "Default";
serviceAccountName = "kube-dns";
tolerations = [
{
key = "CriticalAddonsOnly";
operator = "Exists";
}
];
volumes = [
{
configMap = {
name = "kube-dns";
optional = true;
};
name = "kube-dns-config";
}
];
};
};
};
};
kubedns-svc = {
apiVersion = "v1";
kind = "Service";
metadata = {
labels = {
"addonmanager.kubernetes.io/mode" = "Reconcile";
"k8s-app" = "kube-dns";
"kubernetes.io/cluster-service" = "true";
"kubernetes.io/name" = "KubeDNS";
};
name = "kube-dns";
namespace = "kube-system";
};
spec = {
clusterIP = cfg.clusterIp;
ports = [
{name = "dns"; port = 53; protocol = "UDP";}
{name = "dns-tcp"; port = 53; protocol = "TCP";}
];
selector.k8s-app = "kube-dns";
};
};
kubedns-sa = {
apiVersion = "v1";
kind = "ServiceAccount";
metadata = {
name = "kube-dns";
namespace = "kube-system";
labels = {
"kubernetes.io/cluster-service" = "true";
"addonmanager.kubernetes.io/mode" = "Reconcile";
};
};
};
kubedns-cm = {
apiVersion = "v1";
kind = "ConfigMap";
metadata = {
name = "kube-dns";
namespace = "kube-system";
labels = {
"addonmanager.kubernetes.io/mode" = "EnsureExists";
};
};
};
};
services.kubernetes.kubelet.clusterDns = mkDefault cfg.clusterIp;
};
}

View File

@ -170,11 +170,16 @@ in
mkdir -m 0770 -p ${cfg.dataDir}
if [ "$(id -u)" = 0 ]; then chown -R ${cfg.user}:${cfg.group} ${cfg.dataDir}; fi
'';
postStart = mkBefore ''
until ${pkgs.curl.bin}/bin/curl -s -o /dev/null ${if configOptions.http.https-enabled then "-k https" else "http"}://127.0.0.1${toString configOptions.http.bind-address}/ping; do
sleep 1;
done
'';
postStart =
let
scheme = if configOptions.http.https-enabled then "-k https" else "http";
bindAddr = (ba: if hasPrefix ":" ba then "127.0.0.1${ba}" else "${ba}")(toString configOptions.http.bind-address);
in
mkBefore ''
until ${pkgs.curl.bin}/bin/curl -s -o /dev/null ${scheme}://${bindAddr}/ping; do
sleep 1;
done
'';
};
users.extraUsers = optional (cfg.user == "influxdb") {

View File

@ -34,6 +34,8 @@ with lib;
services.dbus.packages = [ pkgs.at_spi2_core ];
systemd.packages = [ pkgs.at_spi2_core ];
};
}

View File

@ -4,9 +4,6 @@
with lib;
let
gnome3 = config.environment.gnome3.packageSet;
in
{
###### interface
@ -33,11 +30,11 @@ in
config = mkIf config.services.gnome3.evolution-data-server.enable {
environment.systemPackages = [ gnome3.evolution_data_server ];
environment.systemPackages = [ pkgs.gnome3.evolution_data_server ];
services.dbus.packages = [ gnome3.evolution_data_server ];
services.dbus.packages = [ pkgs.gnome3.evolution_data_server ];
systemd.packages = [ gnome3.evolution_data_server ];
systemd.packages = [ pkgs.gnome3.evolution_data_server ];
};

View File

@ -4,9 +4,6 @@
with lib;
let
gnome3 = config.environment.gnome3.packageSet;
in
{
###### interface
@ -33,9 +30,9 @@ in
config = mkIf config.services.gnome3.gnome-disks.enable {
environment.systemPackages = [ gnome3.gnome-disk-utility ];
environment.systemPackages = [ pkgs.gnome3.gnome-disk-utility ];
services.dbus.packages = [ gnome3.gnome-disk-utility ];
services.dbus.packages = [ pkgs.gnome3.gnome-disk-utility ];
};

View File

@ -4,9 +4,6 @@
with lib;
let
gnome3 = config.environment.gnome3.packageSet;
in
{
###### interface
@ -33,9 +30,9 @@ in
config = mkIf config.services.gnome3.gnome-documents.enable {
environment.systemPackages = [ gnome3.gnome-documents ];
environment.systemPackages = [ pkgs.gnome3.gnome-documents ];
services.dbus.packages = [ gnome3.gnome-documents ];
services.dbus.packages = [ pkgs.gnome3.gnome-documents ];
services.gnome3.gnome-online-accounts.enable = true;

View File

@ -4,9 +4,6 @@
with lib;
let
gnome3 = config.environment.gnome3.packageSet;
in
{
###### interface
@ -34,9 +31,9 @@ in
config = mkIf config.services.gnome3.gnome-keyring.enable {
environment.systemPackages = [ gnome3.gnome_keyring ];
environment.systemPackages = [ pkgs.gnome3.gnome_keyring ];
services.dbus.packages = [ gnome3.gnome_keyring gnome3.gcr ];
services.dbus.packages = [ pkgs.gnome3.gnome_keyring pkgs.gnome3.gcr ];
};

View File

@ -4,9 +4,6 @@
with lib;
let
gnome3 = config.environment.gnome3.packageSet;
in
{
###### interface
@ -33,9 +30,9 @@ in
config = mkIf config.services.gnome3.gnome-online-accounts.enable {
environment.systemPackages = [ gnome3.gnome_online_accounts ];
environment.systemPackages = [ pkgs.gnome3.gnome_online_accounts ];
services.dbus.packages = [ gnome3.gnome_online_accounts ];
services.dbus.packages = [ pkgs.gnome3.gnome_online_accounts ];
};

View File

@ -4,9 +4,6 @@
with lib;
let
gnome3 = config.environment.gnome3.packageSet;
in
{
###### interface
@ -33,9 +30,9 @@ in
config = mkIf config.services.gnome3.gnome-online-miners.enable {
environment.systemPackages = [ gnome3.gnome-online-miners ];
environment.systemPackages = [ pkgs.gnome3.gnome-online-miners ];
services.dbus.packages = [ gnome3.gnome-online-miners ];
services.dbus.packages = [ pkgs.gnome3.gnome-online-miners ];
};

View File

@ -4,9 +4,6 @@
with lib;
let
gnome3 = config.environment.gnome3.packageSet;
in
{
###### interface
@ -33,11 +30,11 @@ in
config = mkIf config.services.gnome3.gnome-terminal-server.enable {
environment.systemPackages = [ gnome3.gnome_terminal ];
environment.systemPackages = [ pkgs.gnome3.gnome_terminal ];
services.dbus.packages = [ gnome3.gnome_terminal ];
services.dbus.packages = [ pkgs.gnome3.gnome_terminal ];
systemd.packages = [ gnome3.gnome_terminal ];
systemd.packages = [ pkgs.gnome3.gnome_terminal ];
};

View File

@ -4,9 +4,6 @@
with lib;
let
gnome3 = config.environment.gnome3.packageSet;
in
{
###### interface
@ -33,9 +30,9 @@ in
config = mkIf config.services.gnome3.gnome-user-share.enable {
environment.systemPackages = [ gnome3.gnome-user-share ];
environment.systemPackages = [ pkgs.gnome3.gnome-user-share ];
services.xserver.displayManager.sessionCommands = with gnome3; ''
services.xserver.displayManager.sessionCommands = with pkgs.gnome3; ''
# Don't let gnome-control-center depend upon gnome-user-share
export XDG_DATA_DIRS=$XDG_DATA_DIRS''${XDG_DATA_DIRS:+:}${gnome-user-share}/share/gsettings-schemas/${gnome-user-share.name}
'';

View File

@ -1,11 +1,8 @@
# GPaste daemon.
{ config, lib, ... }:
{ config, lib, pkgs, ... }:
with lib;
let
gnome3 = config.environment.gnome3.packageSet;
in
{
###### interface
options = {
@ -22,9 +19,9 @@ in
###### implementation
config = mkIf config.services.gnome3.gpaste.enable {
environment.systemPackages = [ gnome3.gpaste ];
services.dbus.packages = [ gnome3.gpaste ];
services.xserver.desktopManager.gnome3.sessionPath = [ gnome3.gpaste ];
systemd.packages = [ gnome3.gpaste ];
environment.systemPackages = [ pkgs.gnome3.gpaste ];
services.dbus.packages = [ pkgs.gnome3.gpaste ];
services.xserver.desktopManager.gnome3.sessionPath = [ pkgs.gnome3.gpaste ];
systemd.packages = [ pkgs.gnome3.gpaste ];
};
}

View File

@ -4,9 +4,6 @@
with lib;
let
gnome3 = config.environment.gnome3.packageSet;
in
{
###### interface
@ -33,11 +30,11 @@ in
config = mkIf config.services.gnome3.gvfs.enable {
environment.systemPackages = [ gnome3.gvfs ];
environment.systemPackages = [ pkgs.gnome3.gvfs ];
services.dbus.packages = [ gnome3.gvfs ];
services.dbus.packages = [ pkgs.gnome3.gvfs ];
systemd.packages = [ gnome3.gvfs ];
systemd.packages = [ pkgs.gnome3.gvfs ];
services.udev.packages = [ pkgs.libmtp.bin ];

View File

@ -4,9 +4,6 @@
with lib;
let
gnome3 = config.environment.gnome3.packageSet;
in
{
###### interface
@ -32,9 +29,9 @@ in
config = mkIf config.services.gnome3.seahorse.enable {
environment.systemPackages = [ gnome3.seahorse ];
environment.systemPackages = [ pkgs.gnome3.seahorse ];
services.dbus.packages = [ gnome3.seahorse ];
services.dbus.packages = [ pkgs.gnome3.seahorse ];
};

View File

@ -4,9 +4,6 @@
with lib;
let
gnome3 = config.environment.gnome3.packageSet;
in
{
###### interface
@ -32,9 +29,9 @@ in
config = mkIf config.services.gnome3.sushi.enable {
environment.systemPackages = [ gnome3.sushi ];
environment.systemPackages = [ pkgs.gnome3.sushi ];
services.dbus.packages = [ gnome3.sushi ];
services.dbus.packages = [ pkgs.gnome3.sushi ];
};

View File

@ -4,9 +4,6 @@
with lib;
let
gnome3 = config.environment.gnome3.packageSet;
in
{
###### interface
@ -33,11 +30,11 @@ in
config = mkIf config.services.gnome3.tracker.enable {
environment.systemPackages = [ gnome3.tracker ];
environment.systemPackages = [ pkgs.gnome3.tracker ];
services.dbus.packages = [ gnome3.tracker ];
services.dbus.packages = [ pkgs.gnome3.tracker ];
systemd.packages = [ gnome3.tracker ];
systemd.packages = [ pkgs.gnome3.tracker ];
};

View File

@ -41,6 +41,15 @@ in
'';
};
enableGitAnnex = mkOption {
type = types.bool;
default = false;
description = ''
Enable git-annex support. Uses the <literal>extraGitoliteRc</literal> option
to apply the necessary configuration.
'';
};
commonHooks = mkOption {
type = types.listOf types.path;
default = [];
@ -75,6 +84,8 @@ in
will need to take any customizations you may have in
<literal>~/.gitolite.rc</literal>, convert them to appropriate Perl
statements, add them to this option, and remove the file.
See also the <literal>enableGitAnnex</literal> option.
'';
};
@ -85,6 +96,14 @@ in
Gitolite user account. This is the username of the gitolite endpoint.
'';
};
group = mkOption {
type = types.str;
default = "gitolite";
description = ''
Primary group of the Gitolite user account.
'';
};
};
};
@ -116,13 +135,20 @@ in
''} >>"$out/gitolite.rc"
'';
in {
services.gitolite.extraGitoliteRc = optionalString cfg.enableGitAnnex ''
# Enable git-annex support:
push( @{$RC{ENABLE}}, 'git-annex-shell ua');
'';
users.extraUsers.${cfg.user} = {
description = "Gitolite user";
home = cfg.dataDir;
createHome = true;
uid = config.ids.uids.gitolite;
group = cfg.group;
useDefaultShell = true;
};
users.extraGroups."${cfg.group}".gid = config.ids.gids.gitolite;
systemd.services."gitolite-init" = {
description = "Gitolite initialization";
@ -188,6 +214,7 @@ in
'';
};
environment.systemPackages = [ pkgs.gitolite pkgs.git ];
environment.systemPackages = [ pkgs.gitolite pkgs.git ]
++ optional cfg.enableGitAnnex pkgs.gitAndTools.git-annex;
});
}

View File

@ -0,0 +1,66 @@
# Fusion Inventory daemon.
{ config, lib, pkgs, ... }:
with lib;
let
cfg = config.services.fusionInventory;
configFile = pkgs.writeText "fusion_inventory.conf" ''
server = ${concatStringsSep ", " cfg.servers}
logger = stderr
${cfg.extraConfig}
'';
in {
###### interface
options = {
services.fusionInventory = {
enable = mkEnableOption "Fusion Inventory Agent";
servers = mkOption {
type = types.listOf types.str;
description = ''
The urls of the OCS/GLPI servers to connect to.
'';
};
extraConfig = mkOption {
default = "";
type = types.lines;
description = ''
Configuration that is injected verbatim into the configuration file.
'';
};
};
};
###### implementation
config = mkIf cfg.enable {
users.extraUsers = singleton {
name = "fusion-inventory";
description = "FusionInventory user";
};
systemd.services."fusion-inventory" = {
description = "Fusion Inventory Agent";
wantedBy = [ "multi-user.target" ];
environment = {
OPTIONS = "--no-category=software";
};
serviceConfig = {
ExecStart = "${pkgs.fusionInventory}/bin/fusioninventory-agent --conf-file=${configFile} --daemon --no-fork";
};
};
};
}

View File

@ -27,6 +27,14 @@ in
'';
};
extraNfsdConfig = mkOption {
type = types.str;
default = "";
description = ''
Extra configuration options for the [nfsd] section of /etc/nfs.conf.
'';
};
exports = mkOption {
type = types.lines;
default = "";
@ -107,6 +115,7 @@ in
[nfsd]
threads=${toString cfg.nproc}
${optionalString (cfg.hostName != null) "host=${cfg.hostName}"}
${cfg.extraNfsdConfig}
[mountd]
${optionalString (cfg.mountdPort != null) "port=${toString cfg.mountdPort}"}

View File

@ -151,15 +151,6 @@ in
";
};
resolveLocalQueries = mkOption {
type = types.bool;
default = true;
description = ''
Whether bind should resolve local queries (i.e. add 127.0.0.1 to
/etc/resolv.conf, overriding networking.nameserver).
'';
};
};
};

View File

@ -0,0 +1,86 @@
{ config, lib, pkgs, ... }:
with lib;
let
cfg = config.services.dnscache;
dnscache-root = pkgs.runCommand "dnscache-root" {} ''
mkdir -p $out/{servers,ip}
${concatMapStrings (ip: ''
echo > "$out/ip/"${lib.escapeShellArg ip}
'') cfg.clientIps}
${concatStrings (mapAttrsToList (host: ips: ''
${concatMapStrings (ip: ''
echo ${lib.escapeShellArg ip} > "$out/servers/"${lib.escapeShellArg host}
'') ips}
'') cfg.domainServers)}
# djbdns contains an outdated list of root servers;
# if one was not provided in config, provide a current list
if [ ! -e servers/@ ]; then
awk '/^.?.ROOT-SERVERS.NET/ { print $4 }' ${pkgs.dns-root-data}/root.hints > $out/servers/@
fi
'';
in {
###### interface
options = {
services.dnscache = {
enable = mkOption {
default = false;
type = types.bool;
description = "Whether to run the dnscache caching dns server";
};
ip = mkOption {
default = "0.0.0.0";
type = types.str;
description = "IP address on which to listen for connections";
};
clientIps = mkOption {
default = [ "127.0.0.1" ];
type = types.listOf types.str;
description = "client IP addresses (or prefixes) from which to accept connections";
example = ["192.168" "172.23.75.82"];
};
domainServers = mkOption {
default = { };
type = types.attrsOf (types.listOf types.str);
description = "table of {hostname: server} pairs to use as authoritative servers for hosts (and subhosts)";
example = {
"example.com" = ["8.8.8.8" "8.8.4.4"];
};
};
};
};
###### implementation
config = mkIf config.services.dnscache.enable {
environment.systemPackages = [ pkgs.djbdns ];
users.extraUsers.dnscache = {};
systemd.services.dnscache = {
description = "djbdns dnscache server";
wantedBy = [ "multi-user.target" ];
path = with pkgs; [ bash daemontools djbdns ];
preStart = ''
rm -rf /var/lib/dnscache
dnscache-conf dnscache dnscache /var/lib/dnscache ${config.services.dnscache.ip}
rm -rf /var/lib/dnscache/root
ln -sf ${dnscache-root} /var/lib/dnscache/root
'';
script = ''
cd /var/lib/dnscache/
exec ./run
'';
};
};
}

View File

@ -42,7 +42,7 @@ in
default = true;
description = ''
Whether dnsmasq should resolve local queries (i.e. add 127.0.0.1 to
/etc/resolv.conf overriding networking.nameservers).
/etc/resolv.conf).
'';
};

View File

@ -1,60 +0,0 @@
{ config, lib, pkgs, ... }:
with lib;
let
cfg = config.networking.fan;
modprobe = "${pkgs.kmod}/bin/modprobe";
in
{
###### interface
options = {
networking.fan = {
enable = mkEnableOption "FAN Networking";
};
};
###### implementation
config = mkIf cfg.enable {
environment.systemPackages = [ pkgs.fanctl ];
systemd.services.fan = {
description = "FAN Networking";
wantedBy = [ "multi-user.target" ];
after = [ "network-online.target" ];
before = [ "docker.service" ];
restartIfChanged = false;
preStart = ''
if [ ! -f /proc/sys/net/fan/version ]; then
${modprobe} ipip
if [ ! -f /proc/sys/net/fan/version ]; then
echo "The Fan Networking patches have not been applied to this kernel!" 1>&2
exit 1
fi
fi
mkdir -p /var/lib/fan-networking
'';
serviceConfig = {
Type = "oneshot";
RemainAfterExit = true;
ExecStart = "${pkgs.fanctl}/bin/fanctl up -a";
ExecStop = "${pkgs.fanctl}/bin/fanctl down -a";
};
};
};
}

View File

@ -9,7 +9,7 @@ let
confFile = pkgs.writeText "radicale.conf" cfg.config;
# This enables us to default to version 2 while still not breaking configurations of people with version 1
defaultPackage = if versionAtLeast "17.09" config.system.stateVersion then {
defaultPackage = if versionAtLeast config.system.stateVersion "17.09" then {
pkg = pkgs.radicale2;
text = "pkgs.radicale2";
} else {

View File

@ -0,0 +1,54 @@
{ config, lib, pkgs, ... }:
with lib;
{
###### interface
options = {
services.tinydns = {
enable = mkOption {
default = false;
type = types.bool;
description = "Whether to run the tinydns dns server";
};
data = mkOption {
type = types.lines;
default = "";
description = "The DNS data to serve, in the format described by tinydns-data(8)";
};
ip = mkOption {
default = "0.0.0.0";
type = types.str;
description = "IP address on which to listen for connections";
};
};
};
###### implementation
config = mkIf config.services.tinydns.enable {
environment.systemPackages = [ pkgs.djbdns ];
users.extraUsers.tinydns = {};
systemd.services.tinydns = {
description = "djbdns tinydns server";
wantedBy = [ "multi-user.target" ];
path = with pkgs; [ daemontools djbdns ];
preStart = ''
rm -rf /var/lib/tinydns
tinydns-conf tinydns tinydns /var/lib/tinydns ${config.services.tinydns.ip}
cd /var/lib/tinydns/root/
ln -sf ${pkgs.writeText "tinydns-data" config.services.tinydns.data} data
tinydns-data
'';
script = ''
cd /var/lib/tinydns
exec ./run
'';
};
};
}

View File

@ -95,6 +95,14 @@ let
type = with types; listOf (submodule peerOpts);
};
allowedIPsAsRoutes = mkOption {
example = false;
default = true;
type = types.bool;
description = ''
Determines whether to add allowed IPs as routes or not.
'';
};
};
};
@ -217,11 +225,11 @@ let
"${ipCommand} link set up dev ${name}"
(map (peer:
(optionals (values.allowedIPsAsRoutes != false) (map (peer:
(map (allowedIP:
"${ipCommand} route replace ${allowedIP} dev ${name} table ${values.table}"
) peer.allowedIPs)
) values.peers)
) values.peers))
values.postSetup
]);

View File

@ -83,11 +83,11 @@ let
# Unpack Mediawiki and put the config file in its root directory.
mediawikiRoot = pkgs.stdenv.mkDerivation rec {
name= "mediawiki-1.27.3";
name= "mediawiki-1.29.1";
src = pkgs.fetchurl {
url = "http://download.wikimedia.org/mediawiki/1.27/${name}.tar.gz";
sha256 = "08x8mvc0y1gwq8rg0zm98wc6hc5j8imb6dcpx6s7392j5dc71m0i";
url = "http://download.wikimedia.org/mediawiki/1.29/${name}.tar.gz";
sha256 = "03mpazbxvb011s2nmlw5p6dc43yjgl5yrsilmj1imyykm57bwb3m";
};
skins = config.skins;

View File

@ -4,7 +4,6 @@ with lib;
let
cfg = config.services.xserver.desktopManager.gnome3;
gnome3 = config.environment.gnome3.packageSet;
# Remove packages of ys from xs, based on their names
removePackagesByName = xs: ys:
@ -28,7 +27,7 @@ let
nixos-gsettings-desktop-schemas = pkgs.runCommand "nixos-gsettings-desktop-schemas" {}
''
mkdir -p $out/share/gsettings-schemas/nixos-gsettings-overrides/glib-2.0/schemas
cp -rf ${gnome3.gsettings_desktop_schemas}/share/gsettings-schemas/gsettings-desktop-schemas*/glib-2.0/schemas/*.xml $out/share/gsettings-schemas/nixos-gsettings-overrides/glib-2.0/schemas
cp -rf ${pkgs.gnome3.gsettings_desktop_schemas}/share/gsettings-schemas/gsettings-desktop-schemas*/glib-2.0/schemas/*.xml $out/share/gsettings-schemas/nixos-gsettings-overrides/glib-2.0/schemas
${concatMapStrings (pkg: "cp -rf ${pkg}/share/gsettings-schemas/*/glib-2.0/schemas/*.xml $out/share/gsettings-schemas/nixos-gsettings-overrides/glib-2.0/schemas\n") cfg.extraGSettingsOverridePackages}
@ -61,7 +60,7 @@ in {
example = literalExample "[ pkgs.gnome3.gpaste ]";
description = "Additional list of packages to be added to the session search path.
Useful for gnome shell extensions or gsettings-conditionated autostart.";
apply = list: list ++ [ gnome3.gnome_shell gnome3.gnome-shell-extensions ];
apply = list: list ++ [ pkgs.gnome3.gnome_shell pkgs.gnome3.gnome-shell-extensions ];
};
extraGSettingsOverrides = mkOption {
@ -79,13 +78,6 @@ in {
debug = mkEnableOption "gnome-session debug messages";
};
environment.gnome3.packageSet = mkOption {
default = null;
example = literalExample "pkgs.gnome3_22";
description = "Which GNOME 3 package set to use.";
apply = p: if p == null then pkgs.gnome3 else p;
};
environment.gnome3.excludePackages = mkOption {
default = [];
example = literalExample "[ pkgs.gnome3.totem ]";
@ -169,26 +161,26 @@ in {
# Update user dirs as described in http://freedesktop.org/wiki/Software/xdg-user-dirs/
${pkgs.xdg-user-dirs}/bin/xdg-user-dirs-update
${gnome3.gnome_session}/bin/gnome-session ${optionalString cfg.debug "--debug"} &
${pkgs.gnome3.gnome_session}/bin/gnome-session ${optionalString cfg.debug "--debug"} &
waitPID=$!
'';
};
services.xserver.updateDbusEnvironment = true;
environment.variables.GIO_EXTRA_MODULES = [ "${lib.getLib gnome3.dconf}/lib/gio/modules"
"${gnome3.glib_networking.out}/lib/gio/modules"
"${gnome3.gvfs}/lib/gio/modules" ];
environment.systemPackages = gnome3.corePackages ++ cfg.sessionPath
++ (removePackagesByName gnome3.optionalPackages config.environment.gnome3.excludePackages);
environment.variables.GIO_EXTRA_MODULES = [ "${lib.getLib pkgs.gnome3.dconf}/lib/gio/modules"
"${pkgs.gnome3.glib_networking.out}/lib/gio/modules"
"${pkgs.gnome3.gvfs}/lib/gio/modules" ];
environment.systemPackages = pkgs.gnome3.corePackages ++ cfg.sessionPath
++ (removePackagesByName pkgs.gnome3.optionalPackages config.environment.gnome3.excludePackages);
# Use the correct gnome3 packageSet
networking.networkmanager.basePackages =
{ inherit (pkgs) networkmanager modemmanager wpa_supplicant;
inherit (gnome3) networkmanager_openvpn networkmanager_vpnc
networkmanager_openconnect networkmanager_fortisslvpn
networkmanager_pptp networkmanager_iodine
networkmanager_l2tp; };
inherit (pkgs.gnome3) networkmanager_openvpn networkmanager_vpnc
networkmanager_openconnect networkmanager_fortisslvpn
networkmanager_pptp networkmanager_iodine
networkmanager_l2tp; };
# Needed for themes and backgrounds
environment.pathsToLink = [ "/share" ];

View File

@ -5,8 +5,7 @@ with lib;
let
cfg = config.services.xserver.displayManager;
gnome3 = config.environment.gnome3.packageSet;
gdm = gnome3.gdm;
gdm = pkgs.gnome3.gdm;
in
@ -103,7 +102,7 @@ in
(filter (arg: arg != "-terminate") cfg.xserverArgs);
GDM_SESSIONS_DIR = "${cfg.session.desktops}";
# Find the mouse
XCURSOR_PATH = "~/.icons:${gnome3.adwaita-icon-theme}/share/icons";
XCURSOR_PATH = "~/.icons:${pkgs.gnome3.adwaita-icon-theme}/share/icons";
};
execCmd = "exec ${gdm}/bin/gdm";
};
@ -127,7 +126,7 @@ in
StandardError = "inherit";
};
systemd.services.display-manager.path = [ gnome3.gnome_session ];
systemd.services.display-manager.path = [ pkgs.gnome3.gnome_session ];
services.dbus.packages = [ gdm ];
@ -186,7 +185,7 @@ in
auth required pam_env.so envfile=${config.system.build.pamEnvironment}
auth required pam_succeed_if.so uid >= 1000 quiet
auth optional ${gnome3.gnome_keyring}/lib/security/pam_gnome_keyring.so
auth optional ${pkgs.gnome3.gnome_keyring}/lib/security/pam_gnome_keyring.so
auth ${if config.security.pam.enableEcryptfs then "required" else "sufficient"} pam_unix.so nullok likeauth
${optionalString config.security.pam.enableEcryptfs
"auth required ${pkgs.ecryptfs}/lib/security/pam_ecryptfs.so unwrap"}
@ -206,7 +205,7 @@ in
"session optional ${pkgs.ecryptfs}/lib/security/pam_ecryptfs.so"}
session required pam_loginuid.so
session optional ${pkgs.systemd}/lib/security/pam_systemd.so
session optional ${gnome3.gnome_keyring}/lib/security/pam_gnome_keyring.so auto_start
session optional ${pkgs.gnome3.gnome_keyring}/lib/security/pam_gnome_keyring.so auto_start
'';
gdm-password.text = ''
@ -214,7 +213,7 @@ in
auth required pam_env.so envfile=${config.system.build.pamEnvironment}
auth required pam_succeed_if.so uid >= 1000 quiet
auth optional ${gnome3.gnome_keyring}/lib/security/pam_gnome_keyring.so
auth optional ${pkgs.gnome3.gnome_keyring}/lib/security/pam_gnome_keyring.so
auth ${if config.security.pam.enableEcryptfs then "required" else "sufficient"} pam_unix.so nullok likeauth
${optionalString config.security.pam.enableEcryptfs
"auth required ${pkgs.ecryptfs}/lib/security/pam_ecryptfs.so unwrap"}
@ -233,7 +232,7 @@ in
"session optional ${pkgs.ecryptfs}/lib/security/pam_ecryptfs.so"}
session required pam_loginuid.so
session optional ${pkgs.systemd}/lib/security/pam_systemd.so
session optional ${gnome3.gnome_keyring}/lib/security/pam_gnome_keyring.so auto_start
session optional ${pkgs.gnome3.gnome_keyring}/lib/security/pam_gnome_keyring.so auto_start
'';
gdm-autologin.text = ''

View File

@ -639,11 +639,7 @@ in
Rules for creating and cleaning up temporary files
automatically. See
<citerefentry><refentrytitle>tmpfiles.d</refentrytitle><manvolnum>5</manvolnum></citerefentry>
for the exact format. You should not use this option to create
files required by systemd services, since there is no
guarantee that <command>systemd-tmpfiles</command> runs when
the system is reconfigured using
<command>nixos-rebuild</command>.
for the exact format.
'';
};

View File

@ -56,6 +56,13 @@ in
};
config = mkIf anyEncrypted {
assertions = map (dev: {
assertion = dev.label != null;
message = ''
The filesystem for ${dev.mountPoint} has encrypted.enable set to true, but no encrypted.label set
'';
}) encDevs;
boot.initrd = {
luks = {
devices =

View File

@ -140,6 +140,17 @@ in
this once.
'';
};
requestEncryptionCredentials = mkOption {
type = types.bool;
default = config.boot.zfs.enableUnstable;
description = ''
Request encryption keys or passwords for all encrypted datasets on import.
Dataset encryption is only supported in zfsUnstable at the moment.
'';
};
};
services.zfs.autoSnapshot = {
@ -263,6 +274,10 @@ in
assertion = !cfgZfs.forceImportAll || cfgZfs.forceImportRoot;
message = "If you enable boot.zfs.forceImportAll, you must also enable boot.zfs.forceImportRoot";
}
{
assertion = cfgZfs.requestEncryptionCredentials -> cfgZfs.enableUnstable;
message = "This feature is only available for zfs unstable. Set the NixOS option boot.zfs.enableUnstable.";
}
];
boot = {
@ -306,6 +321,9 @@ in
done
echo
if [[ -n "$msg" ]]; then echo "$msg"; fi
${lib.optionalString cfgZfs.requestEncryptionCredentials ''
zfs load-key -a
''}
'') rootPools));
};

View File

@ -9,6 +9,12 @@ let
interfaces = attrValues cfg.interfaces;
hasVirtuals = any (i: i.virtual) interfaces;
slaves = concatMap (i: i.interfaces) (attrValues cfg.bonds)
++ concatMap (i: i.interfaces) (attrValues cfg.bridges)
++ concatMap (i: i.interfaces) (attrValues cfg.vswitches)
++ concatMap (i: [i.interface]) (attrValues cfg.macvlans)
++ concatMap (i: [i.interface]) (attrValues cfg.vlans);
# We must escape interfaces due to the systemd interpretation
subsystemDevice = interface:
"sys-subsystem-net-devices-${escapeSystemdPath interface}.device";
@ -105,7 +111,7 @@ let
''
# Set the static DNS configuration, if given.
${pkgs.openresolv}/sbin/resolvconf -m 1 -a static <<EOF
${optionalString (cfg.domain != null) ''
${optionalString (cfg.nameservers != [] && cfg.domain != null) ''
domain ${cfg.domain}
''}
${optionalString (cfg.search != []) ("search " + concatStringsSep " " cfg.search)}
@ -152,7 +158,11 @@ let
in
nameValuePair "network-addresses-${i.name}"
{ description = "Address configuration of ${i.name}";
wantedBy = [ "network-setup.service" ];
wantedBy = [
"network-setup.service"
"network-link-${i.name}.service"
"network.target"
];
# propagate stop and reload from network-setup
partOf = [ "network-setup.service" ];
# order before network-setup because the routes that are configured
@ -206,7 +216,7 @@ let
after = [ "dev-net-tun.device" "network-pre.target" ];
wantedBy = [ "network-setup.service" (subsystemDevice i.name) ];
partOf = [ "network-setup.service" ];
before = [ "network-setup.service" (subsystemDevice i.name) ];
before = [ "network-setup.service" ];
path = [ pkgs.iproute ];
serviceConfig = {
Type = "oneshot";
@ -232,7 +242,7 @@ let
partOf = [ "network-setup.service" ] ++ optional v.rstp "mstpd.service";
after = [ "network-pre.target" ] ++ deps ++ optional v.rstp "mstpd.service"
++ concatMap (i: [ "network-addresses-${i}.service" "network-link-${i}.service" ]) v.interfaces;
before = [ "network-setup.service" (subsystemDevice n) ];
before = [ "network-setup.service" ];
serviceConfig.Type = "oneshot";
serviceConfig.RemainAfterExit = true;
path = [ pkgs.iproute ];
@ -331,7 +341,7 @@ let
partOf = [ "network-setup.service" ];
after = [ "network-pre.target" ] ++ deps
++ concatMap (i: [ "network-addresses-${i}.service" "network-link-${i}.service" ]) v.interfaces;
before = [ "network-setup.service" (subsystemDevice n) ];
before = [ "network-setup.service" ];
serviceConfig.Type = "oneshot";
serviceConfig.RemainAfterExit = true;
path = [ pkgs.iproute pkgs.gawk ];
@ -369,7 +379,7 @@ let
bindsTo = deps;
partOf = [ "network-setup.service" ];
after = [ "network-pre.target" ] ++ deps;
before = [ "network-setup.service" (subsystemDevice n) ];
before = [ "network-setup.service" ];
serviceConfig.Type = "oneshot";
serviceConfig.RemainAfterExit = true;
path = [ pkgs.iproute ];
@ -394,7 +404,7 @@ let
bindsTo = deps;
partOf = [ "network-setup.service" ];
after = [ "network-pre.target" ] ++ deps;
before = [ "network-setup.service" (subsystemDevice n) ];
before = [ "network-setup.service" ];
serviceConfig.Type = "oneshot";
serviceConfig.RemainAfterExit = true;
path = [ pkgs.iproute ];
@ -422,7 +432,7 @@ let
bindsTo = deps;
partOf = [ "network-setup.service" ];
after = [ "network-pre.target" ] ++ deps;
before = [ "network-setup.service" (subsystemDevice n) ];
before = [ "network-setup.service" ];
serviceConfig.Type = "oneshot";
serviceConfig.RemainAfterExit = true;
path = [ pkgs.iproute ];
@ -465,5 +475,8 @@ in
config = mkMerge [
bondWarnings
(mkIf (!cfg.useNetworkd) normalConfig)
{ # Ensure slave interfaces are brought up
networking.interfaces = genAttrs slaves (i: {});
}
];
}

View File

@ -271,7 +271,7 @@ in rec {
tests.kernel-latest = callTest tests/kernel-latest.nix {};
tests.kernel-lts = callTest tests/kernel-lts.nix {};
tests.keystone = callTest tests/keystone.nix {};
tests.kubernetes = hydraJob (import tests/kubernetes.nix { system = "x86_64-linux"; });
tests.kubernetes = hydraJob (import tests/kubernetes/default.nix { system = "x86_64-linux"; });
tests.latestKernel.login = callTest tests/login.nix { latestKernel = true; };
tests.ldap = callTest tests/ldap.nix {};
#tests.lightdm = callTest tests/lightdm.nix {};

View File

@ -1,409 +0,0 @@
{ system ? builtins.currentSystem }:
with import ../lib/testing.nix { inherit system; };
with import ../lib/qemu-flags.nix;
with pkgs.lib;
let
redisPod = pkgs.writeText "redis-master-pod.json" (builtins.toJSON {
kind = "Pod";
apiVersion = "v1";
metadata.name = "redis";
metadata.labels.name = "redis";
spec.containers = [{
name = "redis";
image = "redis";
args = ["--bind" "0.0.0.0"];
imagePullPolicy = "Never";
ports = [{
name = "redis-server";
containerPort = 6379;
}];
}];
});
redisService = pkgs.writeText "redis-service.json" (builtins.toJSON {
kind = "Service";
apiVersion = "v1";
metadata.name = "redis";
spec = {
ports = [{port = 6379; targetPort = 6379;}];
selector = {name = "redis";};
};
});
redisImage = pkgs.dockerTools.buildImage {
name = "redis";
tag = "latest";
contents = pkgs.redis;
config.Entrypoint = "/bin/redis-server";
};
testSimplePod = ''
$kubernetes->execute("docker load < ${redisImage}");
$kubernetes->waitUntilSucceeds("kubectl create -f ${redisPod}");
$kubernetes->succeed("kubectl create -f ${redisService}");
$kubernetes->waitUntilSucceeds("kubectl get pod redis | grep Running");
$kubernetes->succeed("nc -z \$\(dig \@10.10.0.1 redis.default.svc.cluster.local +short\) 6379");
'';
in {
# This test runs kubernetes on a single node
trivial = makeTest {
name = "kubernetes-trivial";
nodes = {
kubernetes =
{ config, pkgs, lib, nodes, ... }:
{
virtualisation.memorySize = 768;
virtualisation.diskSize = 2048;
programs.bash.enableCompletion = true;
environment.systemPackages = with pkgs; [ netcat bind ];
services.kubernetes.roles = ["master" "node"];
virtualisation.docker.extraOptions = "--iptables=false --ip-masq=false -b cbr0";
networking.bridges.cbr0.interfaces = [];
networking.interfaces.cbr0 = {};
};
};
testScript = ''
startAll;
$kubernetes->waitUntilSucceeds("kubectl get nodes | grep kubernetes | grep Ready");
${testSimplePod}
'';
};
cluster = let
runWithOpenSSL = file: cmd: pkgs.runCommand file {
buildInputs = [ pkgs.openssl ];
} cmd;
ca_key = runWithOpenSSL "ca-key.pem" "openssl genrsa -out $out 2048";
ca_pem = runWithOpenSSL "ca.pem" ''
openssl req \
-x509 -new -nodes -key ${ca_key} \
-days 10000 -out $out -subj "/CN=etcd-ca"
'';
etcd_key = runWithOpenSSL "etcd-key.pem" "openssl genrsa -out $out 2048";
etcd_csr = runWithOpenSSL "etcd.csr" ''
openssl req \
-new -key ${etcd_key} \
-out $out -subj "/CN=etcd" \
-config ${openssl_cnf}
'';
etcd_cert = runWithOpenSSL "etcd.pem" ''
openssl x509 \
-req -in ${etcd_csr} \
-CA ${ca_pem} -CAkey ${ca_key} \
-CAcreateserial -out $out \
-days 365 -extensions v3_req \
-extfile ${openssl_cnf}
'';
etcd_client_key = runWithOpenSSL "etcd-client-key.pem"
"openssl genrsa -out $out 2048";
etcd_client_csr = runWithOpenSSL "etcd-client-key.pem" ''
openssl req \
-new -key ${etcd_client_key} \
-out $out -subj "/CN=etcd-client" \
-config ${client_openssl_cnf}
'';
etcd_client_cert = runWithOpenSSL "etcd-client.crt" ''
openssl x509 \
-req -in ${etcd_client_csr} \
-CA ${ca_pem} -CAkey ${ca_key} -CAcreateserial \
-out $out -days 365 -extensions v3_req \
-extfile ${client_openssl_cnf}
'';
apiserver_key = runWithOpenSSL "apiserver-key.pem" "openssl genrsa -out $out 2048";
apiserver_csr = runWithOpenSSL "apiserver.csr" ''
openssl req \
-new -key ${apiserver_key} \
-out $out -subj "/CN=kube-apiserver" \
-config ${apiserver_cnf}
'';
apiserver_cert = runWithOpenSSL "apiserver.pem" ''
openssl x509 \
-req -in ${apiserver_csr} \
-CA ${ca_pem} -CAkey ${ca_key} -CAcreateserial \
-out $out -days 365 -extensions v3_req \
-extfile ${apiserver_cnf}
'';
worker_key = runWithOpenSSL "worker-key.pem" "openssl genrsa -out $out 2048";
worker_csr = runWithOpenSSL "worker.csr" ''
openssl req \
-new -key ${worker_key} \
-out $out -subj "/CN=kube-worker" \
-config ${worker_cnf}
'';
worker_cert = runWithOpenSSL "worker.pem" ''
openssl x509 \
-req -in ${worker_csr} \
-CA ${ca_pem} -CAkey ${ca_key} -CAcreateserial \
-out $out -days 365 -extensions v3_req \
-extfile ${worker_cnf}
'';
openssl_cnf = pkgs.writeText "openssl.cnf" ''
[req]
req_extensions = v3_req
distinguished_name = req_distinguished_name
[req_distinguished_name]
[ v3_req ]
basicConstraints = CA:FALSE
keyUsage = digitalSignature, keyEncipherment
extendedKeyUsage = serverAuth
subjectAltName = @alt_names
[alt_names]
DNS.1 = etcd1
DNS.2 = etcd2
DNS.3 = etcd3
IP.1 = 127.0.0.1
'';
client_openssl_cnf = pkgs.writeText "client-openssl.cnf" ''
[req]
req_extensions = v3_req
distinguished_name = req_distinguished_name
[req_distinguished_name]
[ v3_req ]
basicConstraints = CA:FALSE
keyUsage = digitalSignature, keyEncipherment
extendedKeyUsage = clientAuth
'';
apiserver_cnf = pkgs.writeText "apiserver-openssl.cnf" ''
[req]
req_extensions = v3_req
distinguished_name = req_distinguished_name
[req_distinguished_name]
[ v3_req ]
basicConstraints = CA:FALSE
keyUsage = nonRepudiation, digitalSignature, keyEncipherment
subjectAltName = @alt_names
[alt_names]
DNS.1 = kubernetes
DNS.2 = kubernetes.default
DNS.3 = kubernetes.default.svc
DNS.4 = kubernetes.default.svc.cluster.local
IP.1 = 10.10.10.1
'';
worker_cnf = pkgs.writeText "worker-openssl.cnf" ''
[req]
req_extensions = v3_req
distinguished_name = req_distinguished_name
[req_distinguished_name]
[ v3_req ]
basicConstraints = CA:FALSE
keyUsage = nonRepudiation, digitalSignature, keyEncipherment
subjectAltName = @alt_names
[alt_names]
DNS.1 = kubeWorker1
DNS.2 = kubeWorker2
'';
etcdNodeConfig = {
virtualisation.memorySize = 128;
services = {
etcd = {
enable = true;
keyFile = etcd_key;
certFile = etcd_cert;
trustedCaFile = ca_pem;
peerClientCertAuth = true;
listenClientUrls = ["https://0.0.0.0:2379"];
listenPeerUrls = ["https://0.0.0.0:2380"];
};
};
environment.variables = {
ETCDCTL_CERT_FILE = "${etcd_client_cert}";
ETCDCTL_KEY_FILE = "${etcd_client_key}";
ETCDCTL_CA_FILE = "${ca_pem}";
ETCDCTL_PEERS = "https://127.0.0.1:2379";
};
networking.firewall.allowedTCPPorts = [ 2379 2380 ];
};
kubeConfig = {
virtualisation.diskSize = 2048;
programs.bash.enableCompletion = true;
services.flannel = {
enable = true;
network = "10.10.0.0/16";
iface = "eth1";
etcd = {
endpoints = ["https://etcd1:2379" "https://etcd2:2379" "https://etcd3:2379"];
keyFile = etcd_client_key;
certFile = etcd_client_cert;
caFile = ca_pem;
};
};
# vxlan
networking.firewall.allowedUDPPorts = [ 8472 ];
systemd.services.docker.after = ["flannel.service"];
systemd.services.docker.serviceConfig.EnvironmentFile = "/run/flannel/subnet.env";
virtualisation.docker.extraOptions = "--iptables=false --ip-masq=false --bip $FLANNEL_SUBNET";
services.kubernetes.verbose = true;
services.kubernetes.etcd = {
servers = ["https://etcd1:2379" "https://etcd2:2379" "https://etcd3:2379"];
keyFile = etcd_client_key;
certFile = etcd_client_cert;
caFile = ca_pem;
};
environment.systemPackages = [ pkgs.bind pkgs.tcpdump pkgs.utillinux ];
};
kubeMasterConfig = {pkgs, ...}: {
require = [kubeConfig];
# kube apiserver
networking.firewall.allowedTCPPorts = [ 443 ];
virtualisation.memorySize = 512;
services.kubernetes = {
roles = ["master"];
scheduler.leaderElect = true;
controllerManager.leaderElect = true;
apiserver = {
publicAddress = "0.0.0.0";
advertiseAddress = "192.168.1.8";
tlsKeyFile = apiserver_key;
tlsCertFile = apiserver_cert;
clientCaFile = ca_pem;
kubeletClientCaFile = ca_pem;
kubeletClientKeyFile = worker_key;
kubeletClientCertFile = worker_cert;
};
};
};
kubeWorkerConfig = { pkgs, ... }: {
require = [kubeConfig];
virtualisation.memorySize = 512;
# kubelet
networking.firewall.allowedTCPPorts = [ 10250 ];
services.kubernetes = {
roles = ["node"];
kubeconfig = {
server = "https://kubernetes:443";
caFile = ca_pem;
certFile = worker_cert;
keyFile = worker_key;
};
kubelet = {
tlsKeyFile = worker_key;
tlsCertFile = worker_cert;
};
};
};
in makeTest {
name = "kubernetes-cluster";
nodes = {
etcd1 = { config, pkgs, nodes, ... }: {
require = [etcdNodeConfig];
services.etcd = {
advertiseClientUrls = ["https://etcd1:2379"];
initialCluster = ["etcd1=https://etcd1:2380" "etcd2=https://etcd2:2380" "etcd3=https://etcd3:2380"];
initialAdvertisePeerUrls = ["https://etcd1:2380"];
};
};
etcd2 = { config, pkgs, ... }: {
require = [etcdNodeConfig];
services.etcd = {
advertiseClientUrls = ["https://etcd2:2379"];
initialCluster = ["etcd1=https://etcd1:2380" "etcd2=https://etcd2:2380" "etcd3=https://etcd3:2380"];
initialAdvertisePeerUrls = ["https://etcd2:2380"];
};
};
etcd3 = { config, pkgs, ... }: {
require = [etcdNodeConfig];
services.etcd = {
advertiseClientUrls = ["https://etcd3:2379"];
initialCluster = ["etcd1=https://etcd1:2380" "etcd2=https://etcd2:2380" "etcd3=https://etcd3:2380"];
initialAdvertisePeerUrls = ["https://etcd3:2380"];
};
};
kubeMaster1 = { config, pkgs, lib, nodes, ... }: {
require = [kubeMasterConfig];
};
kubeMaster2 = { config, pkgs, lib, nodes, ... }: {
require = [kubeMasterConfig];
};
# Kubernetes TCP load balancer
kubernetes = { config, pkgs, ... }: {
# kubernetes
networking.firewall.allowedTCPPorts = [ 443 ];
services.haproxy.enable = true;
services.haproxy.config = ''
global
log 127.0.0.1 local0 notice
user haproxy
group haproxy
defaults
log global
retries 2
timeout connect 3000
timeout server 5000
timeout client 5000
listen kubernetes
bind 0.0.0.0:443
mode tcp
option ssl-hello-chk
balance roundrobin
server kube-master-1 kubeMaster1:443 check
server kube-master-2 kubeMaster2:443 check
'';
};
kubeWorker1 = { config, pkgs, lib, nodes, ... }: {
require = [kubeWorkerConfig];
};
kubeWorker2 = { config, pkgs, lib, nodes, ... }: {
require = [kubeWorkerConfig];
};
};
testScript = ''
startAll;
${testSimplePod}
'';
};
}

View File

@ -0,0 +1,113 @@
{ system ? builtins.currentSystem }:
with import ../../lib/testing.nix { inherit system; };
with import ../../lib/qemu-flags.nix;
with pkgs.lib;
let
mkKubernetesBaseTest =
{ name, domain ? "my.zyx", test, machines
, pkgs ? import <nixpkgs> { inherit system; }
, certs ? import ./certs.nix { inherit pkgs; externalDomain = domain; }
, extraConfiguration ? null }:
let
masterName = head (filter (machineName: any (role: role == "master") machines.${machineName}.roles) (attrNames machines));
master = machines.${masterName};
extraHosts = ''
${master.ip} etcd.${domain}
${master.ip} api.${domain}
${concatMapStringsSep "\n" (machineName: "${machines.${machineName}.ip} ${machineName}.${domain}") (attrNames machines)}
'';
in makeTest {
inherit name;
nodes = mapAttrs (machineName: machine:
{ config, pkgs, lib, nodes, ... }:
mkMerge [
{
virtualisation.memorySize = mkDefault 768;
virtualisation.diskSize = mkDefault 4096;
networking = {
inherit domain extraHosts;
primaryIPAddress = mkForce machine.ip;
firewall = {
allowedTCPPorts = [
10250 # kubelet
];
trustedInterfaces = ["docker0"];
extraCommands = concatMapStrings (node: ''
iptables -A INPUT -s ${node.config.networking.primaryIPAddress} -j ACCEPT
'') (attrValues nodes);
};
};
programs.bash.enableCompletion = true;
environment.variables = {
ETCDCTL_CERT_FILE = "${certs.worker}/etcd-client.pem";
ETCDCTL_KEY_FILE = "${certs.worker}/etcd-client-key.pem";
ETCDCTL_CA_FILE = "${certs.worker}/ca.pem";
ETCDCTL_PEERS = "https://etcd.${domain}:2379";
};
services.flannel.iface = "eth1";
services.kubernetes.apiserver.advertiseAddress = master.ip;
}
(optionalAttrs (any (role: role == "master") machine.roles) {
networking.firewall.allowedTCPPorts = [
2379 2380 # etcd
443 # kubernetes apiserver
];
services.etcd = {
enable = true;
certFile = "${certs.master}/etcd.pem";
keyFile = "${certs.master}/etcd-key.pem";
trustedCaFile = "${certs.master}/ca.pem";
peerClientCertAuth = true;
listenClientUrls = ["https://0.0.0.0:2379"];
listenPeerUrls = ["https://0.0.0.0:2380"];
advertiseClientUrls = ["https://etcd.${config.networking.domain}:2379"];
initialCluster = ["${masterName}=https://etcd.${config.networking.domain}:2380"];
initialAdvertisePeerUrls = ["https://etcd.${config.networking.domain}:2380"];
};
})
(import ./kubernetes-common.nix { inherit (machine) roles; inherit pkgs config certs; })
(optionalAttrs (machine ? "extraConfiguration") (machine.extraConfiguration { inherit config pkgs lib nodes; }))
(optionalAttrs (extraConfiguration != null) (extraConfiguration { inherit config pkgs lib nodes; }))
]
) machines;
testScript = ''
startAll;
${test}
'';
};
mkKubernetesMultiNodeTest = attrs: mkKubernetesBaseTest ({
machines = {
machine1 = {
roles = ["master"];
ip = "192.168.1.1";
};
machine2 = {
roles = ["node"];
ip = "192.168.1.2";
};
};
} // attrs // {
name = "kubernetes-${attrs.name}-multinode";
});
mkKubernetesSingleNodeTest = attrs: mkKubernetesBaseTest ({
machines = {
machine1 = {
roles = ["master" "node"];
ip = "192.168.1.1";
};
};
} // attrs // {
name = "kubernetes-${attrs.name}-singlenode";
});
in {
inherit mkKubernetesBaseTest mkKubernetesSingleNodeTest mkKubernetesMultiNodeTest;
}

View File

@ -0,0 +1,185 @@
{
pkgs ? import <nixpkgs> {},
internalDomain ? "cloud.yourdomain.net",
externalDomain ? "myawesomecluster.cluster.yourdomain.net",
serviceClusterIp ? "10.0.0.1"
}:
let
runWithCFSSL = name: cmd:
builtins.fromJSON (builtins.readFile (
pkgs.runCommand "${name}-cfss.json" {
buildInputs = [ pkgs.cfssl ];
} "cfssl ${cmd} > $out"
));
writeCFSSL = content:
pkgs.runCommand content.name {
buildInputs = [ pkgs.cfssl ];
} ''
mkdir -p $out
cd $out
cat ${writeFile content} | cfssljson -bare ${content.name}
'';
noCSR = content: pkgs.lib.filterAttrs (n: v: n != "csr") content;
noKey = content: pkgs.lib.filterAttrs (n: v: n != "key") content;
writeFile = content: pkgs.writeText "content" (
if pkgs.lib.isAttrs content then builtins.toJSON content
else toString content
);
createServingCertKey = { ca, cn, hosts? [], size ? 2048, name ? cn }:
noCSR (
(runWithCFSSL name "gencert -ca=${writeFile ca.cert} -ca-key=${writeFile ca.key} -profile=server -config=${writeFile ca.config} ${writeFile {
CN = cn;
hosts = hosts;
key = { algo = "rsa"; inherit size; };
}}") // { inherit name; }
);
createClientCertKey = { ca, cn, groups ? [], size ? 2048, name ? cn }:
noCSR (
(runWithCFSSL name "gencert -ca=${writeFile ca.cert} -ca-key=${writeFile ca.key} -profile=client -config=${writeFile ca.config} ${writeFile {
CN = cn;
names = map (group: {O = group;}) groups;
hosts = [""];
key = { algo = "rsa"; inherit size; };
}}") // { inherit name; }
);
createSigningCertKey = { C ? "xx", ST ? "x", L ? "x", O ? "x", OU ? "x", CN ? "ca", emailAddress ? "x", expiry ? "43800h", size ? 2048, name ? CN }:
(noCSR (runWithCFSSL CN "genkey -initca ${writeFile {
key = { algo = "rsa"; inherit size; };
names = [{ inherit C ST L O OU CN emailAddress; }];
}}")) // {
inherit name;
config.signing = {
default.expiry = expiry;
profiles = {
server = {
inherit expiry;
usages = [
"signing"
"key encipherment"
"server auth"
];
};
client = {
inherit expiry;
usages = [
"signing"
"key encipherment"
"client auth"
];
};
peer = {
inherit expiry;
usages = [
"signing"
"key encipherment"
"server auth"
"client auth"
];
};
};
};
};
ca = createSigningCertKey {};
kube-apiserver = createServingCertKey {
inherit ca;
cn = "kube-apiserver";
hosts = ["kubernetes.default" "kubernetes.default.svc" "localhost" "api.${externalDomain}" serviceClusterIp];
};
kubelet = createServingCertKey {
inherit ca;
cn = "kubelet";
hosts = ["*.${externalDomain}"];
};
service-accounts = createServingCertKey {
inherit ca;
cn = "kube-service-accounts";
};
etcd = createServingCertKey {
inherit ca;
cn = "etcd";
hosts = ["etcd.${externalDomain}"];
};
etcd-client = createClientCertKey {
inherit ca;
cn = "etcd-client";
};
kubelet-client = createClientCertKey {
inherit ca;
cn = "kubelet-client";
groups = ["system:masters"];
};
apiserver-client = {
kubelet = createClientCertKey {
inherit ca;
cn = "apiserver-client-kubelet";
groups = ["system:nodes"];
};
kube-proxy = createClientCertKey {
inherit ca;
name = "apiserver-client-kube-proxy";
cn = "system:kube-proxy";
groups = ["system:kube-proxy" "system:nodes"];
};
kube-controller-manager = createClientCertKey {
inherit ca;
name = "apiserver-client-kube-controller-manager";
cn = "system:kube-controller-manager";
groups = ["system:masters"];
};
kube-scheduler = createClientCertKey {
inherit ca;
name = "apiserver-client-kube-scheduler";
cn = "system:kube-scheduler";
groups = ["system:kube-scheduler"];
};
admin = createClientCertKey {
inherit ca;
cn = "admin";
groups = ["system:masters"];
};
};
in {
master = pkgs.buildEnv {
name = "master-keys";
paths = [
(writeCFSSL (noKey ca))
(writeCFSSL kube-apiserver)
(writeCFSSL kubelet-client)
(writeCFSSL apiserver-client.kube-controller-manager)
(writeCFSSL apiserver-client.kube-scheduler)
(writeCFSSL service-accounts)
(writeCFSSL etcd)
];
};
worker = pkgs.buildEnv {
name = "worker-keys";
paths = [
(writeCFSSL (noKey ca))
(writeCFSSL kubelet)
(writeCFSSL apiserver-client.kubelet)
(writeCFSSL apiserver-client.kube-proxy)
(writeCFSSL etcd-client)
];
};
admin = writeCFSSL apiserver-client.admin;
}

View File

@ -0,0 +1,7 @@
{ system ? builtins.currentSystem }:
{
dns = import ./dns.nix { inherit system; };
# e2e = import ./e2e.nix { inherit system; }; # TODO: make it pass
# the following test(s) can be removed when e2e is working:
rbac = import ./rbac.nix { inherit system; };
}

View File

@ -0,0 +1,127 @@
{ system ? builtins.currentSystem, pkgs ? import <nixpkgs> { inherit system; } }:
with import ./base.nix { inherit system; };
let
domain = "my.zyx";
certs = import ./certs.nix { externalDomain = domain; };
redisPod = pkgs.writeText "redis-pod.json" (builtins.toJSON {
kind = "Pod";
apiVersion = "v1";
metadata.name = "redis";
metadata.labels.name = "redis";
spec.containers = [{
name = "redis";
image = "redis";
args = ["--bind" "0.0.0.0"];
imagePullPolicy = "Never";
ports = [{
name = "redis-server";
containerPort = 6379;
}];
}];
});
redisService = pkgs.writeText "redis-service.json" (builtins.toJSON {
kind = "Service";
apiVersion = "v1";
metadata.name = "redis";
spec = {
ports = [{port = 6379; targetPort = 6379;}];
selector = {name = "redis";};
};
});
redisImage = pkgs.dockerTools.buildImage {
name = "redis";
tag = "latest";
contents = [ pkgs.redis pkgs.bind.host ];
config.Entrypoint = "/bin/redis-server";
};
probePod = pkgs.writeText "probe-pod.json" (builtins.toJSON {
kind = "Pod";
apiVersion = "v1";
metadata.name = "probe";
metadata.labels.name = "probe";
spec.containers = [{
name = "probe";
image = "probe";
args = [ "-f" ];
tty = true;
imagePullPolicy = "Never";
}];
});
probeImage = pkgs.dockerTools.buildImage {
name = "probe";
tag = "latest";
contents = [ pkgs.bind.host pkgs.busybox ];
config.Entrypoint = "/bin/tail";
};
extraConfiguration = { config, pkgs, lib, nodes, ... }: {
environment.systemPackages = [ pkgs.bind.host ];
# virtualisation.docker.extraOptions = "--dns=${config.services.kubernetes.addons.dns.clusterIp}";
services.dnsmasq.enable = true;
services.dnsmasq.servers = [
"/cluster.local/${config.services.kubernetes.addons.dns.clusterIp}#53"
];
};
base = {
name = "dns";
inherit domain certs extraConfiguration;
};
singleNodeTest = {
test = ''
# prepare machine1 for test
$machine1->waitUntilSucceeds("kubectl get node machine1.${domain} | grep -w Ready");
$machine1->execute("docker load < ${redisImage}");
$machine1->waitUntilSucceeds("kubectl create -f ${redisPod}");
$machine1->waitUntilSucceeds("kubectl create -f ${redisService}");
$machine1->execute("docker load < ${probeImage}");
$machine1->waitUntilSucceeds("kubectl create -f ${probePod}");
# check if pods are running
$machine1->waitUntilSucceeds("kubectl get pod redis | grep Running");
$machine1->waitUntilSucceeds("kubectl get pod probe | grep Running");
$machine1->waitUntilSucceeds("kubectl get pods -n kube-system | grep 'kube-dns.*3/3'");
# check dns on host (dnsmasq)
$machine1->succeed("host redis.default.svc.cluster.local");
# check dns inside the container
$machine1->succeed("kubectl exec -ti probe -- /bin/host redis.default.svc.cluster.local");
'';
};
multiNodeTest = {
test = ''
# prepare machines for test
$machine1->waitUntilSucceeds("kubectl get node machine1.${domain} | grep -w Ready");
$machine1->waitUntilSucceeds("kubectl get node machine2.${domain} | grep -w Ready");
$machine2->execute("docker load < ${redisImage}");
$machine1->waitUntilSucceeds("kubectl create -f ${redisPod}");
$machine1->waitUntilSucceeds("kubectl create -f ${redisService}");
$machine2->execute("docker load < ${probeImage}");
$machine1->waitUntilSucceeds("kubectl create -f ${probePod}");
# check if pods are running
$machine1->waitUntilSucceeds("kubectl get pod redis | grep Running");
$machine1->waitUntilSucceeds("kubectl get pod probe | grep Running");
$machine1->waitUntilSucceeds("kubectl get pods -n kube-system | grep 'kube-dns.*3/3'");
# check dns on hosts (dnsmasq)
$machine1->succeed("host redis.default.svc.cluster.local");
$machine2->succeed("host redis.default.svc.cluster.local");
# check dns inside the container
$machine1->succeed("kubectl exec -ti probe -- /bin/host redis.default.svc.cluster.local");
'';
};
in {
singlenode = mkKubernetesSingleNodeTest (base // singleNodeTest);
multinode = mkKubernetesMultiNodeTest (base // multiNodeTest);
}

View File

@ -0,0 +1,40 @@
{ system ? builtins.currentSystem, pkgs ? import <nixpkgs> { inherit system; } }:
with import ./base.nix { inherit system; };
let
domain = "my.zyx";
certs = import ./certs.nix { externalDomain = domain; };
kubeconfig = pkgs.writeText "kubeconfig.json" (builtins.toJSON {
apiVersion = "v1";
kind = "Config";
clusters = [{
name = "local";
cluster.certificate-authority = "${certs.master}/ca.pem";
cluster.server = "https://api.${domain}";
}];
users = [{
name = "kubelet";
user = {
client-certificate = "${certs.admin}/admin.pem";
client-key = "${certs.admin}/admin-key.pem";
};
}];
contexts = [{
context = {
cluster = "local";
user = "kubelet";
};
current-context = "kubelet-context";
}];
});
base = {
name = "e2e";
inherit domain certs;
test = ''
$machine1->succeed("e2e.test -kubeconfig ${kubeconfig} -provider local -ginkgo.focus '\\[Conformance\\]' -ginkgo.skip '\\[Flaky\\]|\\[Serial\\]'");
'';
};
in {
singlenode = mkKubernetesSingleNodeTest base;
multinode = mkKubernetesMultiNodeTest base;
}

View File

@ -0,0 +1,59 @@
{ roles, config, pkgs, certs }:
with pkgs.lib;
let
base = {
inherit roles;
featureGates = ["AllAlpha"];
flannel.enable = true;
addons.dashboard.enable = true;
verbose = true;
caFile = "${certs.master}/ca.pem";
apiserver = {
tlsCertFile = "${certs.master}/kube-apiserver.pem";
tlsKeyFile = "${certs.master}/kube-apiserver-key.pem";
kubeletClientCertFile = "${certs.master}/kubelet-client.pem";
kubeletClientKeyFile = "${certs.master}/kubelet-client-key.pem";
serviceAccountKeyFile = "${certs.master}/kube-service-accounts.pem";
};
etcd = {
servers = ["https://etcd.${config.networking.domain}:2379"];
certFile = "${certs.worker}/etcd-client.pem";
keyFile = "${certs.worker}/etcd-client-key.pem";
};
kubeconfig = {
server = "https://api.${config.networking.domain}";
};
kubelet = {
tlsCertFile = "${certs.worker}/kubelet.pem";
tlsKeyFile = "${certs.worker}/kubelet-key.pem";
hostname = "${config.networking.hostName}.${config.networking.domain}";
kubeconfig = {
certFile = "${certs.worker}/apiserver-client-kubelet.pem";
keyFile = "${certs.worker}/apiserver-client-kubelet-key.pem";
};
};
controllerManager = {
serviceAccountKeyFile = "${certs.master}/kube-service-accounts-key.pem";
kubeconfig = {
certFile = "${certs.master}/apiserver-client-kube-controller-manager.pem";
keyFile = "${certs.master}/apiserver-client-kube-controller-manager-key.pem";
};
};
scheduler = {
kubeconfig = {
certFile = "${certs.master}/apiserver-client-kube-scheduler.pem";
keyFile = "${certs.master}/apiserver-client-kube-scheduler-key.pem";
};
};
proxy = {
kubeconfig = {
certFile = "${certs.worker}/apiserver-client-kube-proxy.pem";
keyFile = "${certs.worker}//apiserver-client-kube-proxy-key.pem";
};
};
};
in {
services.kubernetes = base;
}

View File

@ -0,0 +1,137 @@
{ system ? builtins.currentSystem, pkgs ? import <nixpkgs> { inherit system; } }:
with import ./base.nix { inherit system; };
let
roServiceAccount = pkgs.writeText "ro-service-account.json" (builtins.toJSON {
kind = "ServiceAccount";
apiVersion = "v1";
metadata = {
name = "read-only";
namespace = "default";
};
});
roRoleBinding = pkgs.writeText "ro-role-binding.json" (builtins.toJSON {
apiVersion = "rbac.authorization.k8s.io/v1beta1";
kind = "RoleBinding";
metadata = {
name = "read-pods";
namespace = "default";
};
roleRef = {
apiGroup = "rbac.authorization.k8s.io";
kind = "Role";
name = "pod-reader";
};
subjects = [{
kind = "ServiceAccount";
name = "read-only";
namespace = "default";
}];
});
roRole = pkgs.writeText "ro-role.json" (builtins.toJSON {
apiVersion = "rbac.authorization.k8s.io/v1beta1";
kind = "Role";
metadata = {
name = "pod-reader";
namespace = "default";
};
rules = [{
apiGroups = [""];
resources = ["pods"];
verbs = ["get" "list" "watch"];
}];
});
kubectlPod = pkgs.writeText "kubectl-pod.json" (builtins.toJSON {
kind = "Pod";
apiVersion = "v1";
metadata.name = "kubectl";
metadata.namespace = "default";
metadata.labels.name = "kubectl";
spec.serviceAccountName = "read-only";
spec.containers = [{
name = "kubectl";
image = "kubectl:latest";
command = ["/bin/tail" "-f"];
imagePullPolicy = "Never";
tty = true;
}];
});
kubectlPod2 = pkgs.writeTextDir "kubectl-pod-2.json" (builtins.toJSON {
kind = "Pod";
apiVersion = "v1";
metadata.name = "kubectl-2";
metadata.namespace = "default";
metadata.labels.name = "kubectl-2";
spec.serviceAccountName = "read-only";
spec.containers = [{
name = "kubectl-2";
image = "kubectl:latest";
command = ["/bin/tail" "-f"];
imagePullPolicy = "Never";
tty = true;
}];
});
kubectl = pkgs.runCommand "copy-kubectl" { buildInputs = [ pkgs.kubernetes ]; } ''
mkdir -p $out/bin
cp ${pkgs.kubernetes}/bin/kubectl $out/bin/kubectl
'';
kubectlImage = pkgs.dockerTools.buildImage {
name = "kubectl";
tag = "latest";
contents = [ kubectl pkgs.busybox kubectlPod2 ];
config.Entrypoint = "/bin/sh";
};
base = {
name = "rbac";
};
singlenode = base // {
test = ''
$machine1->waitUntilSucceeds("kubectl get node machine1.my.zyx | grep -w Ready");
$machine1->execute("docker load < ${kubectlImage}");
$machine1->waitUntilSucceeds("kubectl apply -f ${roServiceAccount}");
$machine1->waitUntilSucceeds("kubectl apply -f ${roRole}");
$machine1->waitUntilSucceeds("kubectl apply -f ${roRoleBinding}");
$machine1->waitUntilSucceeds("kubectl create -f ${kubectlPod}");
$machine1->waitUntilSucceeds("kubectl get pod kubectl | grep Running");
$machine1->succeed("kubectl exec -ti kubectl -- kubectl get pods");
$machine1->fail("kubectl exec -ti kubectl -- kubectl create -f /kubectl-pod-2.json");
$machine1->fail("kubectl exec -ti kubectl -- kubectl delete pods -l name=kubectl");
'';
};
multinode = base // {
test = ''
$machine1->waitUntilSucceeds("kubectl get node machine1.my.zyx | grep -w Ready");
$machine1->waitUntilSucceeds("kubectl get node machine2.my.zyx | grep -w Ready");
$machine2->execute("docker load < ${kubectlImage}");
$machine1->waitUntilSucceeds("kubectl apply -f ${roServiceAccount}");
$machine1->waitUntilSucceeds("kubectl apply -f ${roRole}");
$machine1->waitUntilSucceeds("kubectl apply -f ${roRoleBinding}");
$machine1->waitUntilSucceeds("kubectl create -f ${kubectlPod}");
$machine1->waitUntilSucceeds("kubectl get pod kubectl | grep Running");
$machine1->succeed("kubectl exec -ti kubectl -- kubectl get pods");
$machine1->fail("kubectl exec -ti kubectl -- kubectl create -f /kubectl-pod-2.json");
$machine1->fail("kubectl exec -ti kubectl -- kubectl delete pods -l name=kubectl");
'';
};
in {
singlenode = mkKubernetesSingleNodeTest singlenode;
multinode = mkKubernetesMultiNodeTest multinode;
}

View File

@ -43,6 +43,7 @@ in
});
})
];
system.stateVersion = "17.03";
};
radicale1_export = lib.recursiveUpdate radicale1 {
services.radicale.extraArgs = [

View File

@ -0,0 +1,37 @@
{ stdenv
, cmake
, extra-cmake-modules
, plasma-framework
, kwindowsystem
, fetchFromGitHub
}:
stdenv.mkDerivation rec {
name = "playbar2-${version}";
version = "2.5";
src = fetchFromGitHub {
owner = "audoban";
repo = "PlayBar2";
rev = "v${version}";
sha256 = "0iv2m4flgaz2r0k7f6l0ca8p6cw8j8j2gin1gci2pg3l5g5khbch";
};
nativeBuildInputs = [
cmake
extra-cmake-modules
];
buildInputs = [
plasma-framework
kwindowsystem
];
meta = with stdenv.lib; {
description = "Mpris2 Client for Plasma5";
homepage = https://github.com/audoban/PlayBar2;
license = licenses.gpl3;
platforms = platforms.linux;
maintainers = with maintainers; [ pjones ];
};
}

View File

@ -0,0 +1,15 @@
Dump temacs in an empty environment to prevent -dev paths from ending
up in the dumped image.
diff -ru -x '*~' emacs-25.3/src/Makefile.in emacs-25.3-new/src/Makefile.in
--- emacs-25.3/src/Makefile.in 2017-04-14 17:02:47.000000000 +0200
+++ emacs-25.3-new/src/Makefile.in 2017-09-25 19:03:02.173861038 +0200
@@ -532,7 +532,7 @@
ifeq ($(CANNOT_DUMP),yes)
ln -f temacs$(EXEEXT) $@
else
- LC_ALL=C $(RUN_TEMACS) -batch -l loadup dump
+ env -i LC_ALL=C $(RUN_TEMACS) -batch -l loadup dump
ifneq ($(PAXCTL_dumped),)
$(PAXCTL_dumped) $@
endif

View File

@ -34,7 +34,11 @@ stdenv.mkDerivation rec {
sha256 = "02y00y9q42g1iqgz5qhmsja75hwxd88yrn9zp14lanay0zkwafi5";
};
patches = (lib.optional stdenv.isDarwin ./at-fdcwd.patch);
enableParallelBuilding = true;
patches =
[ ./clean-env.patch ]
++ lib.optional stdenv.isDarwin ./at-fdcwd.patch;
nativeBuildInputs = [ pkgconfig ]
++ lib.optionals srcRepo [ autoconf automake texinfo ]

View File

@ -1,33 +1,30 @@
{ fetchurl, stdenv, ncurses, pkgconfig, libbsd }:
{ stdenv, fetchurl, pkgconfig, libbsd, ncurses }:
stdenv.mkDerivation rec {
name = "mg-${version}";
version = "20161005";
version = "20170828";
src = fetchurl {
url = "http://homepage.boetes.org/software/mg/${name}.tar.gz";
sha256 = "0qaydk2cy765n9clghmi5gdnpwn15y2v0fj6r0jcm0v7d89vbz5p";
sha256 = "139nc58l5ifj3d3478nhqls0lic52skmxfxggznzxaz9camqd20z";
};
NIX_CFLAGS_COMPILE = "-Wno-error";
preConfigure = ''
substituteInPlace GNUmakefile \
--replace /usr/bin/pkg-config ${pkgconfig}/bin/pkg-config
'';
enableParallelBuilding = true;
makeFlags = [ "PKG_CONFIG=${pkgconfig}/bin/pkg-config" ];
installPhase = ''
mkdir -p $out/bin
cp mg $out/bin
mkdir -p $out/share/man/man1
cp mg.1 $out/share/man/man1
install -m 555 -Dt $out/bin mg
install -m 444 -Dt $out/share/man/man1 mg.1
'';
nativeBuildInputs = [ pkgconfig ];
buildInputs = [ ncurses libbsd ];
buildInputs = [ libbsd ncurses ];
meta = with stdenv.lib; {
homepage = http://homepage.boetes.org/software/mg/;
description = "Micro GNU/emacs, a portable version of the mg maintained by the OpenBSD team";
homepage = "https://homepage.boetes.org/software/mg";
license = licenses.publicDomain;
platforms = platforms.all;
};

View File

@ -14,8 +14,8 @@ let
else throw "ImageMagick is not supported on this platform.";
cfg = {
version = "7.0.6-4";
sha256 = "0fvkx9lf8g0sa9bccd9s5qyhcy0g1mqnkbpqly55ryxyg1ywxqaz";
version = "7.0.7-4";
sha256 = "074w4jm5s98b8dxwjl8lljvdhmm3mbg1ikgjy1mw3c1sb08z3nc8";
patches = [];
};
in

View File

@ -14,8 +14,8 @@ let
else throw "ImageMagick is not supported on this platform.";
cfg = {
version = "6.9.9-7";
sha256 = "1lwsz9b8clygdppgawv2hsry4aykgmawjlwhg3fj70rndv4a8rw4";
version = "6.9.9-15";
sha256 = "0bxgdc1qiyvag6a2iiqcbwp4ak0m1mzi9qhs51fbrvv6syy12m6c";
patches = [];
}
# Freeze version on mingw so we don't need to port the patch too often.

View File

@ -14,4 +14,5 @@ mkDerivation {
propagatedBuildInputs = [
dolphin kdelibs4support ki18n kio kxmlgui
];
outputs = [ "out" "dev" ];
}

View File

@ -14,4 +14,5 @@ mkDerivation {
propagatedBuildInputs = [
kio kparts kxmlgui qtscript solid
];
outputs = [ "out" "dev" ];
}

View File

@ -19,4 +19,5 @@ mkDerivation {
kconfig kconfigwidgets kdbusaddons kiconthemes kcmutils knotifications
kwidgetsaddons kitemviews kio kwindowsystem plasma-framework qtdeclarative
];
outputs = [ "out" "dev" ];
}

View File

@ -41,8 +41,14 @@ stdenv.mkDerivation rec {
enableParallelBuilding = true;
postPatch = ''
substituteInPlace \
gr-fec/include/gnuradio/fec/polar_decoder_common.h \
--replace BOOST_CONSTEXPR_OR_CONST const
'';
preConfigure = ''
export NIX_CFLAGS_COMPILE="$NIX_CFLAGS_COMPILE -Wno-unused-variable"
export NIX_CFLAGS_COMPILE="$NIX_CFLAGS_COMPILE -Wno-unused-variable -std=c++11"
'';
# - Ensure we get an interactive backend for matplotlib. If not the gr_plot_*

View File

@ -0,0 +1,33 @@
{ stdenv, fetchurl, intltool, pkgconfig, gnome3, shared_mime_info, desktop_file_utils, wrapGAppsHook }:
stdenv.mkDerivation rec {
name = "gpx-viewer-${version}";
version = "0.4.0";
src = fetchurl {
url = "https://launchpad.net/gpx-viewer/trunk/${version}/+download/${name}.tar.gz";
sha256 = "956acfaf870ac436300cd9953dece630df7fd7dff8e4ae2577a6002884466f80";
};
patches = fetchurl {
url = "https://code.launchpad.net/~chkr/gpx-viewer/gtk3-bugfix/+merge/260766/+preview-diff/628965/+files/preview.diff";
sha256 = "1yl7jk7skkcx10nny5zdixswcymjd9s9c1zhm1i5y3aqhchvmfs7";
};
patchFlags = [ "-p0" ];
nativeBuildInputs = [
intltool pkgconfig
shared_mime_info # For update-mime-database
desktop_file_utils # For update-desktop-database
wrapGAppsHook # Fix error: GLib-GIO-ERROR **: No GSettings schemas are installed on the system
];
buildInputs = with gnome3; [ gdl libchamplain defaultIconTheme ];
meta = with stdenv.lib; {
homepage = https://blog.sarine.nl/tag/gpxviewer/;
description = "Simple tool to visualize tracks and waypoints stored in a gpx file";
platforms = with platforms; linux;
license = licenses.gpl2Plus;
maintainers = with maintainers; [ dotlambda ];
};
}

View File

@ -2,13 +2,13 @@
stdenv.mkDerivation rec {
name = "gpxsee-${version}";
version = "4.9";
version = "4.14";
src = fetchFromGitHub {
owner = "tumic0";
repo = "GPXSee";
rev = version;
sha256 = "0jk99yhrms1wzqpcnsjydcl2nysidv639s2j7l53yp60g0zz8174";
sha256 = "0yv3hcs5b8a88mp24h8r2sn69phwrahdff5pp74lz24270il3jgb";
};
nativeBuildInputs = [ qmake qttools ];

View File

@ -1,7 +1,7 @@
{ mkDerivation, lib, cmake, xorg, plasma-framework, fetchFromGitHub
, extra-cmake-modules, karchive, kwindowsystem, qtx11extras }:
, extra-cmake-modules, karchive, kwindowsystem, qtx11extras, kcrash }:
let version = "0.6.0"; in
let version = "0.7.1"; in
mkDerivation {
name = "latte-dock-${version}";
@ -10,13 +10,13 @@ mkDerivation {
owner = "psifidotos";
repo = "Latte-Dock";
rev = "v${version}";
sha256 = "1967hx4lavy96vvik8d5m2c6ycd2mlf9cmhrv40zr0784ni0ikyv";
sha256 = "0vdmsjj1qqlzz26mznb56znv5x7akbvw65ybbzakclp4q1xrsrm2";
};
buildInputs = [ plasma-framework xorg.libpthreadstubs xorg.libXdmcp ];
buildInputs = [ plasma-framework xorg.libpthreadstubs xorg.libXdmcp xorg.libSM ];
nativeBuildInputs = [ extra-cmake-modules cmake karchive kwindowsystem
qtx11extras ];
qtx11extras kcrash ];
meta = with lib; {
description = "Dock-style app launcher based on Plasma frameworks";

View File

@ -0,0 +1,36 @@
{ stdenv, fetchFromGitHub, pkgconfig, vte, gtk }:
stdenv.mkDerivation rec {
name = "stupidterm-2017-03-15";
nativeBuildInputs = [ pkgconfig ];
buildInputs = [ vte gtk ];
src = fetchFromGitHub {
owner = "esmil";
repo = "stupidterm";
rev = "752316a783f52317ffd9f05d32e208dbcafc5ba6";
sha256 = "1d8fyhr9sgpxgkwzkyiws0kvhmqfwwyycvcr1qf2wjldiax222lv";
};
makeFlags = "PKGCONFIG=${pkgconfig}/bin/pkg-config binary=stupidterm";
installPhase = ''
mkdir -p $out/bin $out/share/applications $out/share/stupidterm
cp stupidterm $out/bin
substituteAll ${./stupidterm.desktop} $out/share/applications/stupidterm.desktop
substituteAll stupidterm.ini $out/share/stupidterm/stupidterm.ini
'';
meta = with stdenv.lib; {
description = "Simple wrapper around the VTE terminal emulator widget for GTK+";
longDescription = ''
Simple wrapper around the VTE terminal emulator widget for GTK+
'';
homepage = https://github.com/esmil/stupidterm;
license = licenses.lgpl3Plus;
maintainers = [ maintainers.etu ];
platforms = platforms.linux;
};
}

View File

@ -0,0 +1,9 @@
[Desktop Entry]
Version=20170315
Name=stupidterm
Comment=VTE based terminal emulator
Exec=stupidterm
Icon=utilities-terminal
Terminal=false
Type=Application
Categories=System;TerminalEmulator;

View File

@ -1,8 +1,8 @@
{ stdenv, fetchgit, pkgconfig, vte, gtk3, ncurses, makeWrapper, symlinkJoin
{ stdenv, fetchgit, pkgconfig, vte, gtk3, ncurses, makeWrapper, wrapGAppsHook, symlinkJoin
, configFile ? null
}:
let
let
version = "12";
termite = stdenv.mkDerivation {
name = "termite-${version}";
@ -17,7 +17,9 @@ let
makeFlags = [ "VERSION=v${version}" "PREFIX=" "DESTDIR=$(out)" ];
buildInputs = [ pkgconfig vte gtk3 ncurses ];
buildInputs = [ vte gtk3 ncurses ];
nativeBuildInputs = [ wrapGAppsHook pkgconfig ];
outputs = [ "out" "terminfo" ];

View File

@ -0,0 +1,29 @@
{ stdenv, fetchurl
, autoreconfHook, pkgconfig, wrapGAppsHook
, glib, intltool, gtk3, gtksourceview }:
stdenv.mkDerivation rec {
name = "xpad-${version}";
version = "5.0.0";
src = fetchurl {
url = "https://launchpad.net/xpad/trunk/${version}/+download/xpad-${version}.tar.bz2";
sha256 = "02yikxg6z9bwla09ka001ppjlpbv5kbza3za9asazm5aiz376mkb";
};
nativeBuildInputs = [ autoreconfHook pkgconfig wrapGAppsHook ];
buildInputs = [ glib intltool gtk3 gtksourceview ];
autoreconfPhase = ''
./autogen.sh
'';
meta = with stdenv.lib; {
description = "A sticky note application for jotting down things to remember";
homepage = https://launchpad.net/xpad;
license = licenses.gpl3;
platforms = platforms.linux;
maintainers = with maintainers; [ michalrus ];
};
}

View File

@ -23,10 +23,11 @@
, pango
, audioSupport ? mediaSupport
, pulseaudioSupport ? audioSupport
, pulseaudioSupport ? false
, libpulseaudio
, apulse
# Media support (implies pulseaudio support)
# Media support (implies audio support)
, mediaSupport ? false
, gstreamer
, gst-plugins-base
@ -158,6 +159,11 @@ stdenv.mkDerivation rec {
# and torLibPath for accuracy, but this is more convenient ...
libPath=${libPath}:$TBB_IN_STORE:$TBB_IN_STORE/TorBrowser/Tor
# apulse uses a non-standard library path. For now special-case it.
${optionalString (audioSupport && !pulseaudioSupport) ''
libPath=${apulse}/lib/apulse:$libPath
''}
# Fixup paths to pluggable transports.
sed -i TorBrowser/Data/Tor/torrc-defaults \
-e "s,./TorBrowser,$TBB_IN_STORE/TorBrowser,g"
@ -218,6 +224,13 @@ stdenv.mkDerivation rec {
// toggling the pref takes effect.
lockPref("browser.tabs.remote.autostart.2", ${if disableContentSandbox then "false" else "true"});
// Allow sandbox access to sound devices if using ALSA directly
${if (audioSupport && !pulseaudioSupport) then ''
pref("security.sandbox.content.write_path_whitelist", "/dev/snd/");
'' else ''
clearPref("security.sandbox.content.write_path_whitelist");
''}
${optionalString (extraPrefs != "") ''
${extraPrefs}
''}
@ -336,6 +349,8 @@ stdenv.mkDerivation rec {
PULSE_SERVER="\''${PULSE_SERVER:-}" \
PULSE_COOKIE="\''${PULSE_COOKIE:-}" \
\
APULSE_PLAYBACK_DEVICE="\''${APULSE_PLAYBACK_DEVICE:-plug:dmix}" \
\
TOR_SKIP_LAUNCH="\''${TOR_SKIP_LAUNCH:-}" \
TOR_CONTROL_PORT="\''${TOR_CONTROL_PORT:-}" \
TOR_SOCKS_PORT="\''${TOR_SOCKS_PORT:-}" \

View File

@ -0,0 +1,209 @@
{ stdenv
, lib
, fetchurl
, fetchgit
, tor
, tor-browser-unwrapped
# Extensions, common
, zip
# HTTPS Everywhere
, git
, libxml2 # xmllint
, python27
, python27Packages
, rsync
}:
let
tor-browser-build_src = fetchgit {
url = "https://git.torproject.org/builders/tor-browser-build.git";
rev = "refs/tags/tbb-7.5a5-build5";
sha256 = "0j37mqldj33fnzghxifvy6v8vdwkcz0i4z81prww64md5s8qcsa9";
};
firefoxExtensions = {
https-everywhere = stdenv.mkDerivation rec {
name = "https-everywhere-${version}";
version = "5.2.21";
src = fetchgit {
url = "https://git.torproject.org/https-everywhere.git";
rev = "refs/tags/${version}";
sha256 = "0z9madihh4b4z4blvfmh6w1hsv8afyi0x7b243nciq9r4w55xgfa";
};
nativeBuildInputs = [
git
libxml2 # xmllint
python27
python27Packages.lxml
rsync
zip
];
buildCommand = ''
cp -dR --no-preserve=mode "$src" src
cd src
sed -i makexpi.sh -e '104d' # cp -a translations/* fails because the dir is empty ...
$shell ./makexpi.sh ${version} --no-recurse
install -m 444 -Dt $out pkg"/"*.xpi
'';
meta = {
homepage = https://gitweb.torproject.org/https-everywhere.git/;
};
};
noscript = fetchurl {
url = https://secure.informaction.com/download/releases/noscript-5.0.10.xpi;
sha256 = "18k5karbaj5mhd9cyjbqgik6044bw88rjalkh6anjanxbn503j6g";
};
torbutton = stdenv.mkDerivation rec {
name = "torbutton-${version}";
version = "1.9.8.1";
src = fetchgit {
url = "https://git.torproject.org/torbutton.git";
rev = "refs/tags/${version}";
sha256 = "1amp0c9ky0a7fsa0bcbi6n6ginw7s2g3an4rj7kvc1lxmrcsm65l";
};
nativeBuildInputs = [ zip ];
buildCommand = ''
cp -dR --no-preserve=mode "$src" src
cd src
$shell ./makexpi.sh
install -m 444 -Dt $out pkg"/"*.xpi
'';
};
tor-launcher = stdenv.mkDerivation rec {
name = "tor-launcher-${version}";
version = "0.2.12.3";
src = fetchgit {
url = "https://git.torproject.org/tor-launcher.git";
rev = "refs/tags/${version}";
sha256 = "0126x48pjiy2zm4l8jzhk70w24hviaz560ffp4lb9x0ar615bc9q";
};
nativeBuildInputs = [ zip ];
buildCommand = ''
cp -dR --no-preserve=mode "$src" src
cd src
make package
install -m 444 -Dt $out pkg"/"*.xpi
'';
};
};
in
stdenv.mkDerivation rec {
name = "tor-browser-bundle-${version}";
version = tor-browser-unwrapped.version;
buildInputs = [ tor-browser-unwrapped tor ];
unpackPhase = ":";
buildPhase = ":";
installPhase = ''
TBBUILD=${tor-browser-build_src}/projects/tor-browser
self=$out/lib/tor-browser
mkdir -p $self && cd $self
cp -dR ${tor-browser-unwrapped}/lib"/"*"/"* .
chmod -R +w .
# Prepare for autoconfig
cat >defaults/pref/autoconfig.js <<EOF
pref("general.config.filename", "mozilla.cfg");
pref("general.config.obscure_value", 0);
EOF
# Hardcoded configuration
cat >mozilla.cfg <<EOF
// First line must be a comment
// Always update via Nixpkgs
lockPref("app.update.auto", false);
lockPref("app.update.enabled", false);
lockPref("extensions.update.autoUpdateDefault", false);
lockPref("extensions.update.enabled", false);
lockPref("extensions.torbutton.versioncheck_enabled", false);
// Where to find the Nixpkgs tor executable & config
lockPref("extensions.torlauncher.tor_path", "${tor}/bin/tor");
lockPref("extensions.torlauncher.torrc-defaults_path", "$self/torrc-defaults");
// Captures store paths
clearPref("extensions.xpiState");
// Insist on using IPC for communicating with Tor
//
// Defaults to $XDG_RUNTIME_DIR/Tor/{socks,control}.socket
lockPref("extensions.torlauncher.control_port_use_ipc", true);
lockPref("extensions.torlauncher.socks_port_use_ipc", true);
EOF
# Preload extensions
install -m 444 -D \
${firefoxExtensions.tor-launcher}/tor-launcher-*.xpi \
browser/extensions/tor-launcher@torproject.org.xpi
install -m 444 -D \
${firefoxExtensions.torbutton}/torbutton-*.xpi \
browser/extensions/torbutton@torproject.org.xpi
install -m 444 -D \
${firefoxExtensions.https-everywhere}/https-everywhere-*-eff.xpi \
browser/extensions/https-everywhere-eff@eff.org.xpi
install -m 444 -D \
${firefoxExtensions.noscript} \
browser/extensions/{73a6fe31-595d-460b-a920-fcc0f8843232}.xpi
# Copy bundle data
cat \
$TBBUILD/Bundle-Data/linux/Data/Tor/torrc-defaults \
$TBBUILD/Bundle-Data/PTConfigs/linux/torrc-defaults-appendix \
>> torrc-defaults
cat \
$TBBUILD/Bundle-Data/linux/Data/Browser/profile.default/preferences/extension-overrides.js \
$TBBUILD/Bundle-Data/PTConfigs/bridge_prefs.js >> defaults/pref/extension-overrides.js \
>> defaults/pref/extension-overrides.js
# Generate a suitable wrapper
mkdir -p $out/bin
cat >$out/bin/tor-browser <<EOF
#! ${stdenv.shell} -e
THE_HOME=\$HOME
TBB_HOME=\''${TBB_HOME:-\''${XDG_DATA_HOME:-$HOME/.local/share}/tor-browser}
mkdir -p "\$TBB_HOME"
HOME=\$TBB_HOME
cd "\$HOME"
exec $self/firefox -no-remote about:tor
EOF
chmod +x $out/bin/tor-browser
'';
meta = with stdenv.lib; {
description = "An unofficial version of the tor browser bundle, built from source";
homepage = https://torproject.org/;
license = licenses.unfreeRedistributable; # TODO: check this
platforms = [ "x86_64-linux" ];
hydraPlatforms = [ ];
maintainers = with maintainers; [ joachifm ];
};
}

View File

@ -2,13 +2,13 @@
stdenv.mkDerivation rec {
name = "cni-${version}";
version = "0.3.0";
version = "0.5.2";
src = fetchFromGitHub {
owner = "containernetworking";
repo = "cni";
rev = "v${version}";
sha256 = "1nvixvf5slnsdrfpfs2km64x680wf83jbyp7il12bcim37q2az7m";
sha256 = "0n2sc5xf1h0i54am80kj7imrvawddn0kxvgi65w0194dpmyrg5al";
};
buildInputs = [ go ];
@ -16,8 +16,8 @@ stdenv.mkDerivation rec {
outputs = ["out" "plugins"];
buildPhase = ''
patchShebangs build
./build
patchShebangs build.sh
./build.sh
'';
installPhase = ''

View File

@ -4,10 +4,10 @@ let
then "linux-amd64"
else "darwin-amd64";
checksum = if stdenv.isLinux
then "1hkr5s1c72sqf156lk6gsnbfs75jnpqs42f64a7mz046c06kv98f"
else "00xw0c66x58g915989fc72mwliysxi5glrkdafi3gcfmlhrnc68i";
then "1i22givr52kgr76dd2azcg9avgh70wiw5dcpmmyychms2ynxi42y"
else "0phhy3si86ilc6051zfgn8jnniy5lygf1r2gysjpcyfbrc5pw3hj";
pname = "helm";
version = "2.5.1";
version = "2.6.1";
in
stdenv.mkDerivation {
name = "${pname}-${version}";

View File

@ -8,9 +8,9 @@
"cmd/kube-controller-manager"
"cmd/kube-proxy"
"plugin/cmd/kube-scheduler"
"cmd/kube-dns"
"federation/cmd/federation-apiserver"
"federation/cmd/federation-controller-manager"
"test/e2e/e2e.test"
]
}:
@ -18,13 +18,13 @@ with lib;
stdenv.mkDerivation rec {
name = "kubernetes-${version}";
version = "1.5.6";
version = "1.7.1";
src = fetchFromGitHub {
owner = "kubernetes";
repo = "kubernetes";
rev = "v${version}";
sha256 = "0mkg4vgz9szgq1k5ignkdr5gmg703xlq8zsrr422a1qfqb8zp15w";
sha256 = "1frf2nxk45lsbkq73fj72gxgr76icqdrsdqh20f5gpwiqn23n7c3";
};
buildInputs = [ removeReferencesTo makeWrapper which go rsync go-bindata ];
@ -55,6 +55,10 @@ stdenv.mkDerivation rec {
cp build/pause/pause "$pause/bin/pause"
cp -R docs/man/man1 "$man/share/man"
cp cluster/addons/addon-manager/kube-addons.sh $out/bin/kube-addons
patchShebangs $out/bin/kube-addons
wrapProgram $out/bin/kube-addons --set "KUBECTL_BIN" "$out/bin/kubectl"
$out/bin/kubectl completion bash > $out/share/bash-completion/completions/kubectl
'';

View File

@ -1,13 +1,13 @@
{ stdenv, fetchurl, dbus, gnutls, wxGTK30, libidn, tinyxml, gettext
, pkgconfig, xdg_utils, gtk2, sqlite, pugixml, libfilezilla, nettle }:
let version = "3.27.0.1"; in
let version = "3.27.1"; in
stdenv.mkDerivation {
name = "filezilla-${version}";
src = fetchurl {
url = "mirror://sourceforge/project/filezilla/FileZilla_Client/${version}/FileZilla_${version}_src.tar.bz2";
sha256 = "1yis3lk23ymgqzvad7rhdcgipnh1nw98pk0kd7a01rlm7b9b6q90";
sha256 = "14lsplbp9fy7lk6cpwi3aj6jskz4j82h67x0fik82z1bns0zm2a3";
};
configureFlags = [

View File

@ -2,7 +2,7 @@
, vala, cmake, wrapGAppsHook, pkgconfig, gettext
, gobjectIntrospection, gnome3, glib, gdk_pixbuf, gtk3, glib_networking
, xorg, libXdmcp, libxkbcommon
, libnotify
, libnotify, libsoup
, libgcrypt
, epoxy
, at_spi2_core
@ -13,13 +13,13 @@
}:
stdenv.mkDerivation rec {
name = "dino-unstable-2017-06-21";
name = "dino-unstable-2017-09-26";
src = fetchFromGitHub {
owner = "dino";
repo = "dino";
rev = "3f0089db86e2057293a33453361678989919147f";
sha256 = "011wd6qi8nagig8418hibgnsmznd76dvp3p2dzzr4wyrb7d6cgcb";
rev = "9d8e1e88ec61403659a8cc410d5c4414e3bd3a96";
sha256 = "1p8sda99n8zsb49qd6wzwb8hddlgrzr2hp7il5v7yqxjjm2vgqfl";
fetchSubmodules = true;
};
@ -42,6 +42,7 @@ stdenv.mkDerivation rec {
libnotify
gpgme
libgcrypt
libsoup
pcre
xorg.libxcb
xorg.libpthreadstubs

View File

@ -2,11 +2,11 @@
stdenv.mkDerivation rec {
name= "riot-web-${version}";
version = "0.12.5";
version = "0.12.6";
src = fetchurl {
url = "https://github.com/vector-im/riot-web/releases/download/v${version}/riot-v${version}.tar.gz";
sha256 = "1g30gl4b5fk1h13r2v4rspcqic9jg99717lxplk5birg3wi3b2d3";
sha256 = "00hxjhnsm4622hv46xm7lc81kbnzi2iz77qppwma14cbh63jbilv";
};
installPhase = ''

View File

@ -7,20 +7,20 @@
mkDerivation rec {
name = "telegram-desktop-${version}";
version = "1.1.19";
version = "1.1.23";
# Submodules
src = fetchgit {
url = "git://github.com/telegramdesktop/tdesktop";
rev = "v${version}";
sha256 = "1zpl71k2lq861k89yp6nzkm4jm6szxrzigmmbxx63rh4v03di3b6";
sha256 = "0pdjrypjg015zvg8iydrja8kzvq0jsi1wz77r2cxvyyb4rkgyv7x";
fetchSubmodules = true;
};
tgaur = fetchgit {
url = "https://aur.archlinux.org/telegram-desktop-systemqt.git";
rev = "a4ba392309116003bc2b75c1c4c12dc733168d6f";
sha256 = "1n0yar8pm050770x36kjr4iap773xjigfbnrk289b51i5vijwhsv";
rev = "885d0594d8dfa0a17c14140579a3d27ef2b9bdd0";
sha256 = "0cdci8d8j3czhznp7gqn16w32j428njmzxr34pdsv40gggh0lbpn";
};
buildInputs = [
@ -93,7 +93,7 @@ mkDerivation rec {
installPhase = ''
install -Dm755 Telegram $out/bin/telegram-desktop
mkdir -p $out/share/applications $out/share/kde4/services
sed "s,/usr/bin,$out/bin,g" $tgaur/telegramdesktop.desktop > $out/share/applications/telegramdesktop.desktop
sed "s,/usr/bin,$out/bin,g" $tgaur/telegram-desktop.desktop > $out/share/applications/telegram-desktop.desktop
sed "s,/usr/bin,$out/bin,g" $tgaur/tg.protocol > $out/share/kde4/services/tg.protocol
for icon_size in 16 32 48 64 128 256 512; do
install -Dm644 "../../../Telegram/Resources/art/icon''${icon_size}.png" "$out/share/icons/hicolor/''${icon_size}x''${icon_size}/apps/telegram-desktop.png"

View File

@ -22,11 +22,11 @@ with stdenv.lib;
stdenv.mkDerivation rec {
name = "mutt-${version}";
version = "1.9.0";
version = "1.9.1";
src = fetchurl {
url = "http://ftp.mutt.org/pub/mutt/${name}.tar.gz";
sha256 = "1m72z5schbagd0a00fv8q0nrnkz9zrgvmdb5yplnmwm1sfapavgc";
sha256 = "1c8vv4anl555a03pbnwf8wnf0d8pcnd4p35y3q8f5ikkcflq76vl";
};
patchPhase = optionalString (openssl != null) ''

View File

@ -2,11 +2,11 @@
stdenv.mkDerivation rec {
name = "owncloud-client-${version}";
version = "2.3.2";
version = "2.3.3";
src = fetchurl {
url = "https://download.owncloud.com/desktop/stable/owncloudclient-${version}.tar.xz";
sha256 = "02az9wq0d1vsgcdipddipdjwj2faf7jag8hizwd0ha3sjlmrs6d1";
sha256 = "1r5ddln1wc9iyjizgqb104i0r6qhzsmm2wdnxfaif119cv0vphda";
};
nativeBuildInputs = [ pkgconfig cmake ];

View File

@ -1,6 +1,6 @@
{ stdenv, fetchurl, makeWrapper, glib
, fontconfig, patchelf, libXext, libX11
, freetype, libXrender, zlib
{ stdenv, fetchurl, makeWrapper, patchelf
, fontconfig, freetype, glib, libICE, libSM
, libX11, libXext, libXrender, zlib
}:
let
@ -12,15 +12,16 @@ let
else if stdenv.system == "i686-linux" then "ld-linux.so.2"
else throw "Spideroak client for: ${stdenv.system} not supported!";
sha256 = if stdenv.system == "x86_64-linux" then "88fd785647def79ee36621fa2a8a5bea73c513de03103f068dd10bc25f3cf356"
else if stdenv.system == "i686-linux" then "8c23271291f40aa144bbf38ceb3cc2a05bed00759c87a65bd798cf8bb289d07a"
sha256 = if stdenv.system == "x86_64-linux" then "0k87rn4aj0v79rz9jvwspnwzmh031ih0y74ra88nc8kl8j6b6gjm"
else if stdenv.system == "i686-linux" then "1wbxfikj8f7rx26asswqrfp9vpk8w5941s21y1pnaff2gcac8m3z"
else throw "Spideroak client for: ${stdenv.system} not supported!";
ldpath = stdenv.lib.makeLibraryPath [
glib fontconfig libXext libX11 freetype libXrender zlib
fontconfig freetype glib libICE libSM
libX11 libXext libXrender zlib
];
version = "6.0.1";
version = "6.1.9";
in stdenv.mkDerivation {
name = "spideroak-${version}";

View File

@ -0,0 +1,45 @@
{ stdenv, fetchFromGitHub, pkgs }:
stdenv.mkDerivation rec {
version = "2.9.5-1";
name = "testssl.sh-${version}";
src = fetchFromGitHub {
owner = "drwetter";
repo = "testssl.sh";
rev = "v${version}";
sha256 = "0hz6g685jwl0c0jrdca746425xpwiwc8lnlc2gigga5hkcq8qzl9";
};
nativeBuildInputs = with pkgs; [
makeWrapper
];
patches = [ ./testssl.patch ];
pwdBinPath = "${stdenv.lib.makeBinPath (with pkgs; [ coreutils ])}/pwd";
opensslBinPath = "${stdenv.lib.makeBinPath (with pkgs; [ openssl ])}/openssl";
postPatch = ''
sed -i -e "s|/bin/pwd|${pwdBinPath}|g" \
-e "s|TESTSSL_INSTALL_DIR:-\"\"|TESTSSL_INSTALL_DIR:-\"$out\"|g" \
-e "s|OPENSSL:-\"\"|OPENSSL:-\"${opensslBinPath}\"|g" \
testssl.sh
'';
installPhase = ''
mkdir -p $out/bin $out/etc
cp -r etc/ $out/
cp testssl.sh $out/bin/testssl.sh
'';
meta = with stdenv.lib; {
description = "CLI tool to check a server's TLS/SSL capabilities";
longDescription = ''
CLI tool which checks a server's service on any port for the support of
TLS/SSL ciphers, protocols as well as recent cryptographic flaws and more.
'';
homepage = https://testssl.sh/;
license = licenses.gpl2;
maintainers = [ maintainers.etu ];
};
}

View File

@ -0,0 +1,10 @@
--- testssl/testssl.sh 2017-09-24 16:53:29.395263437 +0200
+++ testssl-new/testssl.sh 2017-09-24 16:53:41.221154492 +0200
@@ -165,6 +165,7 @@
# following variables make use of $ENV, e.g. OPENSSL=<myprivate_path_to_openssl> ./testssl.sh <host>
# 0 means (normally) true here. Some of the variables are also accessible with a command line switch, see --help
declare -x OPENSSL OPENSSL_TIMEOUT
+OPENSSL=${OPENSSL:-""}
FAST_SOCKET=${FAST_SOCKET:-false} # EXPERIMENTAL feature to accelerate sockets -- DO NOT USE it for production
COLOR=${COLOR:-2} # 2: Full color, 1: b/w+positioning, 0: no ESC at all
COLORBLIND=${COLORBLIND:-false} # if true, swap blue and green in the output

View File

@ -1,6 +1,6 @@
{ stdenv, lib, fetchgit, cmake
, opencv, gtest, openblas, liblapack
, cudaSupport ? false, cudatoolkit
, cudaSupport ? false, cudatoolkit, nvidia_x11
, cudnnSupport ? false, cudnn
}:
@ -20,11 +20,12 @@ stdenv.mkDerivation rec {
nativeBuildInputs = [ cmake ];
buildInputs = [ opencv gtest openblas liblapack ]
++ lib.optional cudaSupport cudatoolkit
++ lib.optionals cudaSupport [ cudatoolkit nvidia_x11 ]
++ lib.optional cudnnSupport cudnn;
cmakeFlags = lib.optional (!cudaSupport) "-DUSE_CUDA=OFF"
++ lib.optional (!cudnnSupport) "-DUSE_CUDNN=OFF";
cmakeFlags = [
(if cudaSupport then "-DCUDA_ARCH_NAME=All" else "-DUSE_CUDA=OFF")
] ++ lib.optional (!cudnnSupport) "-DUSE_CUDNN=OFF";
installPhase = ''
install -Dm755 libmxnet.so $out/lib/libmxnet.so

View File

@ -15,8 +15,8 @@ stdenv.mkDerivation rec {
nativeBuildInputs = [ autoconf bison pkgconfig ];
preConfigure = ''
find . -exec sed -e 's@/bin/rm@${coreutils}&@g' -i '{}' ';'
find . -exec sed -e 's@/bin/uname@${coreutils}&@g' -i '{}' ';'
find . -type f -exec sed -e 's@/bin/rm@${coreutils}&@g' -i '{}' ';'
find . -type f -exec sed -e 's@/bin/uname@${coreutils}&@g' -i '{}' ';'
${stdenv.lib.optionalString asLibsingular ''NIX_CFLAGS_COMPILE="$NIX_CFLAGS_COMPILE -DLIBSINGULAR"''}
'';

View File

@ -13,7 +13,7 @@
}:
let
version = "2.14.1";
version = "2.14.2";
svn = subversionClient.override { perlBindings = true; };
in
@ -22,7 +22,7 @@ stdenv.mkDerivation {
src = fetchurl {
url = "https://www.kernel.org/pub/software/scm/git/git-${version}.tar.xz";
sha256 = "1iic3wiihxp3l3k6d4z886v3869c3dzgddjxnd5124wy1rnlqwkg";
sha256 = "18f70gfzwqd210806hmf94blcd7yv5h9ka6xqkpd2jhijqwp5sah";
};
hardeningDisable = [ "format" ];

View File

@ -1,18 +1,18 @@
{ stdenv, fetchurl, SDL, ftgl, pkgconfig, libpng, libjpeg, pcre
, SDL_image, freetype, glew, mesa, boost, glm
{ stdenv, fetchurl, SDL2, ftgl, pkgconfig, libpng, libjpeg, pcre
, SDL2_image, freetype, glew, mesa, boost, glm
}:
stdenv.mkDerivation rec {
version = "0.44";
version = "0.47";
name = "gource-${version}";
src = fetchurl {
url = "https://github.com/acaudwell/Gource/releases/download/${name}/${name}.tar.gz";
sha256 = "0z095zsf5pz8czh7nmlkdy29rm93w83sqyqspg2zsprh892cl116";
sha256 = "1llqwdnfa1pff8bxk27qsqff1fcg0a9kfdib0rn7p28vl21n1cgj";
};
buildInputs = [
glew SDL ftgl pkgconfig libpng libjpeg pcre SDL_image mesa
glew SDL2 ftgl pkgconfig libpng libjpeg pcre SDL2_image mesa
boost glm freetype
];

View File

@ -38,10 +38,11 @@ assert pulseSupport -> libpulseaudio != null;
assert rtmpSupport -> rtmpdump != null;
let
kodi_version = "17.4";
rel = "Krypton";
ffmpeg_3_1_6 = fetchurl {
url = "https://github.com/xbmc/FFmpeg/archive/3.1.6-${rel}.tar.gz";
sha256 = "14jicb26s20nr3qmfpazszpc892yjwjn81zbsb8szy3a5xs19y81";
ffmpeg_3_1_9 = fetchurl {
url = "https://github.com/xbmc/FFmpeg/archive/3.1.9-${rel}-${kodi_version}.tar.gz";
sha256 = "0rhjz505ljfg2jqbm3rd7qbcjq4vnp8h9a8vad8rjf84v3alglpa";
};
# Usage of kodi fork of libdvdnav and libdvdread is necessary for functional dvd playback:
libdvdnav_src = fetchurl {
@ -53,12 +54,12 @@ let
sha256 = "e7179b2054163652596a56301c9f025515cb08c6d6310b42b897c3ad11c0199b";
};
in stdenv.mkDerivation rec {
version = kodi_version;
name = "kodi-${version}";
version = "17.3";
src = fetchurl {
url = "https://github.com/xbmc/xbmc/archive/${version}-${rel}.tar.gz";
sha256 = "189isc1jagrnq549vwpvb0x1w6p0mkjwv7phm8dzvki96wx6bs0x";
sha256 = "1p1lxkapynjbd85ns7m4jybl4k35kxzv7105xkh03hlz8kkqc23b";
};
buildInputs = [
@ -101,7 +102,7 @@ in stdenv.mkDerivation rec {
--replace 'usr/share/zoneinfo' 'etc/zoneinfo'
substituteInPlace tools/depends/target/ffmpeg/autobuild.sh \
--replace "/bin/bash" "${bash}/bin/bash -ex"
cp ${ffmpeg_3_1_6} tools/depends/target/ffmpeg/ffmpeg-3.1.6-${rel}.tar.gz
cp ${ffmpeg_3_1_9} tools/depends/target/ffmpeg/ffmpeg-3.1.9-${rel}-${version}.tar.gz
ln -s ${libdvdcss.src} tools/depends/target/libdvdcss/libdvdcss-master.tar.gz
cp ${libdvdnav_src} tools/depends/target/libdvdnav/libdvdnav-master.tar.gz
cp ${libdvdread_src} tools/depends/target/libdvdread/libdvdread-master.tar.gz

View File

@ -6,13 +6,13 @@
stdenv.mkDerivation rec {
name = "open-vm-tools-${version}";
version = "10.1.0";
version = "10.1.10";
src = fetchFromGitHub {
owner = "vmware";
repo = "open-vm-tools";
rev = "stable-${version}";
sha256 = "1qzk4mvw618ca4j9agsfpqch9jgwghvdc4rpkvlyz8kirvh9iniz";
owner = "vmware";
repo = "open-vm-tools";
rev = "stable-${version}";
sha256 = "13ifpi53rc2463ka8xw9zx407d1fz119x8sb9k48g5mwxm6c85fm";
};
sourceRoot = "${src.name}/open-vm-tools";
@ -52,8 +52,8 @@ stdenv.mkDerivation rec {
homepage = https://github.com/vmware/open-vm-tools;
description = "Set of tools for VMWare guests to improve host-guest interaction";
longDescription = ''
A set of services and modules that enable several features in VMware products for
better management of, and seamless user interactions with, guests.
A set of services and modules that enable several features in VMware products for
better management of, and seamless user interactions with, guests.
'';
license = licenses.gpl2;
platforms = platforms.linux;

View File

@ -1,45 +1,64 @@
{ stdenv, fetchFromGitHub, pango, libinput
, makeWrapper, cmake, pkgconfig, asciidoc, libxslt, docbook_xsl, cairo
, wayland, wlc, libxkbcommon, pixman, fontconfig, pcre, json_c, dbus_libs, libcap
, xwayland, pam, gdk_pixbuf
{ stdenv, fetchFromGitHub
, makeWrapper, cmake, pkgconfig, asciidoc, libxslt, docbook_xsl
, wayland, wlc, libxkbcommon, pixman, fontconfig, pcre, json_c, dbus_libs
, pango, cairo, libinput, libcap, xwayland, pam, gdk_pixbuf, libpthreadstubs
, libXdmcp
}:
let
version = "0.13.0";
in
stdenv.mkDerivation rec {
name = "sway-${version}";
# Temporary workaround (0.14.0 segfaults)
wlc_009 = stdenv.lib.overrideDerivation wlc (oldAttrs: rec {
name = "wlc-${version}";
version = "0.0.9";
src = fetchFromGitHub {
owner = "Sircmpwn";
repo = "sway";
rev = "${version}";
sha256 = "1vgk4rl51nx66yzpwg4yhnbj7wc30k5q0hh5lf8y0i1nvpal0p3q";
owner = "Cloudef";
repo = "wlc";
rev = "v${version}";
fetchSubmodules = true;
sha256 = "1r6jf64gs7n9a8129wsc0mdwhcv44p8k87kg0714rhx3g2w22asg";
};
});
in stdenv.mkDerivation rec {
name = "sway-${version}";
nativeBuildInputs = [ makeWrapper cmake pkgconfig asciidoc libxslt docbook_xsl ];
src = fetchFromGitHub {
owner = "Sircmpwn";
repo = "sway";
rev = "${version}";
sha256 = "1vgk4rl51nx66yzpwg4yhnbj7wc30k5q0hh5lf8y0i1nvpal0p3q";
};
buildInputs = [ wayland wlc libxkbcommon pixman fontconfig pcre json_c dbus_libs pango cairo libinput libcap xwayland pam gdk_pixbuf ];
nativeBuildInputs = [
makeWrapper cmake pkgconfig
asciidoc libxslt docbook_xsl
];
buildInputs = [
wayland wlc_009 libxkbcommon pixman fontconfig pcre json_c dbus_libs
pango cairo libinput libcap xwayland pam gdk_pixbuf libpthreadstubs
libXdmcp
];
patchPhase = ''
sed -i s@/etc/sway@$out/etc/sway@g CMakeLists.txt;
'';
patchPhase = ''
sed -i s@/etc/sway@$out/etc/sway@g CMakeLists.txt;
'';
makeFlags = "PREFIX=$(out)";
cmakeFlags = "-DVERSION=${version}";
installPhase = "PREFIX=$out make install";
makeFlags = "PREFIX=$(out)";
cmakeFlags = "-DVERSION=${version}";
installPhase = "PREFIX=$out make install";
LD_LIBRARY_PATH = stdenv.lib.makeLibraryPath [ wlc dbus_libs ];
preFixup = ''
wrapProgram $out/bin/sway \
--prefix LD_LIBRARY_PATH : "${LD_LIBRARY_PATH}";
'';
LD_LIBRARY_PATH = stdenv.lib.makeLibraryPath [ wlc_009 dbus_libs ];
preFixup = ''
wrapProgram $out/bin/sway \
--prefix LD_LIBRARY_PATH : "${LD_LIBRARY_PATH}";
'';
meta = with stdenv.lib; {
description = "i3-compatible window manager for Wayland";
homepage = "http://swaywm.org";
license = licenses.mit;
platforms = platforms.linux;
maintainers = with maintainers; [ ];
};
}
meta = with stdenv.lib; {
description = "i3-compatible window manager for Wayland";
homepage = http://swaywm.org;
license = licenses.mit;
platforms = platforms.linux;
maintainers = with maintainers; [ primeos ]; # Trying to keep it up-to-date.
};
}

View File

@ -137,7 +137,7 @@ rec {
};
inherit fromImage fromImageName fromImageTag;
buildInputs = [ utillinux e2fsprogs jshon rsync ];
buildInputs = [ utillinux e2fsprogs jshon rsync jq ];
} ''
rm -rf $out
@ -146,44 +146,29 @@ rec {
mount /dev/${vmTools.hd} disk
cd disk
layers=""
if [[ -n "$fromImage" ]]; then
echo "Unpacking base image..."
mkdir image
tar -C image -xpf "$fromImage"
# If the image name isn't set, read it from the image repository json.
if [[ -z "$fromImageName" ]]; then
fromImageName=$(jshon -k < image/repositories | head -n 1)
echo "From-image name wasn't set. Read $fromImageName."
fi
# If the tag isn't set, use the name as an index into the json
# and read the first key found.
if [[ -z "$fromImageTag" ]]; then
fromImageTag=$(jshon -e $fromImageName -k < image/repositories \
| head -n1)
echo "From-image tag wasn't set. Read $fromImageTag."
fi
# Use the name and tag to get the parent ID field.
parentID=$(jshon -e $fromImageName -e $fromImageTag -u \
< image/repositories)
layers=$(jq -r '.[0].Layers | join(" ")' image/manifest.json)
fi
# Unpack all of the parent layers into the image.
# Unpack all of the layers into the image.
# Layer list is ordered starting from the base image
lowerdir=""
while [[ -n "$parentID" ]]; do
echo "Unpacking layer $parentID"
mkdir -p image/$parentID/layer
tar -C image/$parentID/layer -xpf image/$parentID/layer.tar
rm image/$parentID/layer.tar
for layer in $layers; do
echo "Unpacking layer $layer"
layerDir=image/$(echo $layer | cut -d':' -f2)"_unpacked"
mkdir -p $layerDir
tar -C $layerDir -xpf image/$layer
chmod a+w image/$layer
rm image/$layer
find image/$parentID/layer -name ".wh.*" -exec bash -c 'name="$(basename {}|sed "s/^.wh.//")"; mknod "$(dirname {})/$name" c 0 0; rm {}' \;
find $layerDir -name ".wh.*" -exec bash -c 'name="$(basename {}|sed "s/^.wh.//")"; mknod "$(dirname {})/$name" c 0 0; rm {}' \;
# Get the next lower directory and continue the loop.
lowerdir=$lowerdir''${lowerdir:+:}image/$parentID/layer
parentID=$(cat image/$parentID/json \
| (jshon -e parent -u 2>/dev/null || true))
lowerdir=$lowerdir''${lowerdir:+:}$layerDir
done
mkdir work
@ -461,26 +446,17 @@ rec {
mkdir image
touch baseFiles
layers=""
if [[ -n "$fromImage" ]]; then
echo "Unpacking base image..."
tar -C image -xpf "$fromImage"
# Do not import the base image configuration and manifest
chmod a+w image image/*.json
rm -f image/*.json
if [[ -z "$fromImageName" ]]; then
fromImageName=$(jshon -k < image/repositories|head -n1)
fi
if [[ -z "$fromImageTag" ]]; then
fromImageTag=$(jshon -e $fromImageName -k \
< image/repositories|head -n1)
fi
parentID=$(jshon -e $fromImageName -e $fromImageTag -u \
< image/repositories)
for l in image/*/layer.tar; do
ls_tar $l >> baseFiles
config=$(jq -r '.[0].Config' image/manifest.json)
layers=$(jq -r '.[0].Layers | join(" ")' image/manifest.json)
for l in $layers; do
ls_tar image/$l >> baseFiles
done
chmod u+w image image/$config
rm image/$config
fi
chmod -R ug+rw image
@ -507,47 +483,28 @@ rec {
tar -rpf temp/layer.tar --mtime="@$SOURCE_DATE_EPOCH" \
--owner=0 --group=0 --no-recursion --files-from newFiles
echo "Adding meta..."
gzip temp/layer.tar
layerID="sha256:$(sha256sum temp/layer.tar.gz | cut -d ' ' -f 1)"
mv temp/layer.tar.gz image/$layerID
# If we have a parentID, add it to the json metadata.
if [[ -n "$parentID" ]]; then
cat temp/json | jshon -s "$parentID" -i parent > tmpjson
mv tmpjson temp/json
fi
# Take the sha256 sum of the generated json and use it as the layer ID.
# Compute the size and add it to the json under the 'Size' field.
layerID=$(sha256sum temp/json|cut -d ' ' -f 1)
size=$(stat --printf="%s" temp/layer.tar)
cat temp/json | jshon -s "$layerID" -i id -n $size -i Size > tmpjson
mv tmpjson temp/json
# Use the temp folder we've been working on to create a new image.
mv temp image/$layerID
# Create image json and image manifest
echo "Generating image configuration and manifest..."
imageJson=$(cat ${baseJson} | jq ". + {\"rootfs\": {\"diff_ids\": [], \"type\": \"layers\"}}")
manifestJson=$(jq -n "[{\"RepoTags\":[\"$imageName:$imageTag\"]}]")
currentID=$layerID
while [[ -n "$currentID" ]]; do
layerChecksum=$(sha256sum image/$currentID/layer.tar | cut -d ' ' -f1)
imageJson=$(echo "$imageJson" | jq ".history |= [{\"created\": \"${created}\"}] + .")
imageJson=$(echo "$imageJson" | jq ".rootfs.diff_ids |= [\"sha256:$layerChecksum\"] + .")
manifestJson=$(echo "$manifestJson" | jq ".[0].Layers |= [\"$currentID/layer.tar\"] + .")
currentID=$(cat image/$currentID/json | (jshon -e parent -u 2>/dev/null || true))
# The layer list is ordered starting from the base image
layers=$(echo $layers $layerID)
for i in $(echo $layers); do
imageJson=$(echo "$imageJson" | jq ".history |= [{\"created\": \"${created}\"}] + .")
diffId=$(gzip -dc image/$i | sha256sum | cut -d" " -f1)
imageJson=$(echo "$imageJson" | jq ".rootfs.diff_ids |= [\"sha256:$diffId\"] + .")
manifestJson=$(echo "$manifestJson" | jq ".[0].Layers |= [\"$i\"] + .")
done
imageJsonChecksum=$(echo "$imageJson" | sha256sum | cut -d ' ' -f1)
echo "$imageJson" > "image/$imageJsonChecksum.json"
manifestJson=$(echo "$manifestJson" | jq ".[0].Config = \"$imageJsonChecksum.json\"")
echo "$imageJson" > "image/sha256:$imageJsonChecksum"
manifestJson=$(echo "$manifestJson" | jq ".[0].Config = \"sha256:$imageJsonChecksum\"")
echo "$manifestJson" > image/manifest.json
# Store the json under the name image/repositories.
jshon -n object \
-n object -s "$layerID" -i "$imageTag" \
-i "$imageName" > image/repositories
# Make the image read-only.
chmod -R a-w image

View File

@ -107,11 +107,13 @@ rec {
nix = buildImageWithNixDb {
name = "nix";
contents = [
# nix-store -qR uses the 'more' program which is not included in
# the pkgs.nix dependencies. We then have to manually get it
# from the 'eject' package:/
pkgs.eject
# nix-store uses cat program to display results as specified by
# the image env variable NIX_PAGER.
pkgs.coreutils
pkgs.nix
];
config = {
Env = [ "NIX_PAGER=cat" ];
};
};
}

Some files were not shown because too many files have changed in this diff Show More