mirror of
https://github.com/NixOS/nixpkgs.git
synced 2024-12-25 15:13:46 +00:00
4f0dadbf38
After final improvements to the official formatter implementation, this commit now performs the first treewide reformat of Nix files using it. This is part of the implementation of RFC 166. Only "inactive" files are reformatted, meaning only files that aren't being touched by any PR with activity in the past 2 months. This is to avoid conflicts for PRs that might soon be merged. Later we can do a full treewide reformat to get the rest, which should not cause as many conflicts. A CI check has already been running for some time to ensure that new and already-formatted files are formatted, so the files being reformatted here should also stay formatted. This commit was automatically created and can be verified using nix-builda08b3a4d19
.tar.gz \ --argstr baseRevb32a094368
result/bin/apply-formatting $NIXPKGS_PATH
119 lines
3.8 KiB
Nix
119 lines
3.8 KiB
Nix
import ./make-test-python.nix (
|
|
{ pkgs, lib, ... }:
|
|
rec {
|
|
name = "pacemaker";
|
|
meta = with pkgs.lib.maintainers; {
|
|
maintainers = [ astro ];
|
|
};
|
|
|
|
nodes =
|
|
let
|
|
node = i: {
|
|
networking.interfaces.eth1.ipv4.addresses = [
|
|
{
|
|
address = "192.168.0.${toString i}";
|
|
prefixLength = 24;
|
|
}
|
|
];
|
|
|
|
services.corosync = {
|
|
enable = true;
|
|
clusterName = "zentralwerk-network";
|
|
nodelist = lib.imap (i: name: {
|
|
nodeid = i;
|
|
inherit name;
|
|
ring_addrs = [
|
|
(builtins.head nodes.${name}.networking.interfaces.eth1.ipv4.addresses).address
|
|
];
|
|
}) (builtins.attrNames nodes);
|
|
};
|
|
environment.etc."corosync/authkey" = {
|
|
source =
|
|
builtins.toFile "authkey"
|
|
# minimum length: 128 bytes
|
|
"testtesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttest";
|
|
mode = "0400";
|
|
};
|
|
|
|
services.pacemaker.enable = true;
|
|
|
|
# used for pacemaker resource
|
|
systemd.services.ha-cat = {
|
|
description = "Highly available netcat";
|
|
serviceConfig.ExecStart = "${pkgs.netcat}/bin/nc -l discard";
|
|
};
|
|
};
|
|
in
|
|
{
|
|
node1 = node 1;
|
|
node2 = node 2;
|
|
node3 = node 3;
|
|
};
|
|
|
|
# sets up pacemaker with resources configuration, then crashes a
|
|
# node and waits for service restart on another node
|
|
testScript =
|
|
let
|
|
resources = builtins.toFile "cib-resources.xml" ''
|
|
<resources>
|
|
<primitive id="cat" class="systemd" type="ha-cat">
|
|
<operations>
|
|
<op id="stop-cat" name="start" interval="0" timeout="1s"/>
|
|
<op id="start-cat" name="start" interval="0" timeout="1s"/>
|
|
<op id="monitor-cat" name="monitor" interval="1s" timeout="1s"/>
|
|
</operations>
|
|
</primitive>
|
|
</resources>
|
|
'';
|
|
in
|
|
''
|
|
import re
|
|
import time
|
|
|
|
start_all()
|
|
|
|
${lib.concatMapStrings (node: ''
|
|
${node}.wait_until_succeeds("corosync-quorumtool")
|
|
${node}.wait_for_unit("pacemaker.service")
|
|
'') (builtins.attrNames nodes)}
|
|
|
|
# No STONITH device
|
|
node1.succeed("crm_attribute -t crm_config -n stonith-enabled -v false")
|
|
# Configure the cat resource
|
|
node1.succeed("cibadmin --replace --scope resources --xml-file ${resources}")
|
|
|
|
# wait until the service is started
|
|
while True:
|
|
output = node1.succeed("crm_resource -r cat --locate")
|
|
match = re.search("is running on: (.+)", output)
|
|
if match:
|
|
for machine in machines:
|
|
if machine.name == match.group(1):
|
|
current_node = machine
|
|
break
|
|
time.sleep(1)
|
|
|
|
current_node.log("Service running here!")
|
|
current_node.crash()
|
|
|
|
# pick another node that's still up
|
|
for machine in machines:
|
|
if machine.booted:
|
|
check_node = machine
|
|
# find where the service has been started next
|
|
while True:
|
|
output = check_node.succeed("crm_resource -r cat --locate")
|
|
match = re.search("is running on: (.+)", output)
|
|
# output will remain the old current_node until the crash is detected by pacemaker
|
|
if match and match.group(1) != current_node.name:
|
|
for machine in machines:
|
|
if machine.name == match.group(1):
|
|
next_node = machine
|
|
break
|
|
time.sleep(1)
|
|
|
|
next_node.log("Service migrated here!")
|
|
'';
|
|
}
|
|
)
|