2019-11-11 00:32:41 +00:00
|
|
|
import ./make-test-python.nix ({pkgs, lib, ...}:
|
2019-11-01 20:48:01 +00:00
|
|
|
|
|
|
|
let
|
|
|
|
cfg = {
|
|
|
|
clusterId = "066ae264-2a5d-4729-8001-6ad265f50b03";
|
|
|
|
monA = {
|
|
|
|
name = "a";
|
|
|
|
ip = "192.168.1.1";
|
|
|
|
};
|
|
|
|
osd0 = {
|
|
|
|
name = "0";
|
|
|
|
key = "AQBCEJNa3s8nHRAANvdsr93KqzBznuIWm2gOGg==";
|
|
|
|
uuid = "55ba2294-3e24-478f-bee0-9dca4c231dd9";
|
|
|
|
};
|
|
|
|
osd1 = {
|
|
|
|
name = "1";
|
|
|
|
key = "AQBEEJNac00kExAAXEgy943BGyOpVH1LLlHafQ==";
|
|
|
|
uuid = "5e97a838-85b6-43b0-8950-cb56d554d1e5";
|
|
|
|
};
|
2020-01-07 06:36:29 +00:00
|
|
|
osd2 = {
|
|
|
|
name = "2";
|
|
|
|
key = "AQAdyhZeIaUlARAAGRoidDAmS6Vkp546UFEf5w==";
|
|
|
|
uuid = "ea999274-13d0-4dd5-9af9-ad25a324f72f";
|
|
|
|
};
|
2018-03-01 11:47:13 +00:00
|
|
|
};
|
2019-11-01 20:48:01 +00:00
|
|
|
generateCephConfig = { daemonConfig }: {
|
|
|
|
enable = true;
|
|
|
|
global = {
|
|
|
|
fsid = cfg.clusterId;
|
|
|
|
monHost = cfg.monA.ip;
|
|
|
|
monInitialMembers = cfg.monA.name;
|
|
|
|
};
|
|
|
|
} // daemonConfig;
|
2018-03-01 11:47:13 +00:00
|
|
|
|
2019-11-01 20:48:01 +00:00
|
|
|
generateHost = { pkgs, cephConfig, networkConfig, ... }: {
|
|
|
|
virtualisation = {
|
2020-01-07 06:36:29 +00:00
|
|
|
emptyDiskImages = [ 20480 20480 20480 ];
|
2019-11-01 20:48:01 +00:00
|
|
|
vlans = [ 1 ];
|
2018-03-01 11:47:13 +00:00
|
|
|
};
|
2019-11-01 20:48:01 +00:00
|
|
|
|
|
|
|
networking = networkConfig;
|
|
|
|
|
|
|
|
environment.systemPackages = with pkgs; [
|
|
|
|
bash
|
|
|
|
sudo
|
|
|
|
ceph
|
|
|
|
xfsprogs
|
|
|
|
];
|
|
|
|
|
|
|
|
boot.kernelModules = [ "xfs" ];
|
|
|
|
|
|
|
|
services.ceph = cephConfig;
|
2018-03-01 11:47:13 +00:00
|
|
|
};
|
2018-10-30 21:26:43 +00:00
|
|
|
|
2019-11-01 20:48:01 +00:00
|
|
|
networkMonA = {
|
|
|
|
dhcpcd.enable = false;
|
|
|
|
interfaces.eth1.ipv4.addresses = pkgs.lib.mkOverride 0 [
|
|
|
|
{ address = cfg.monA.ip; prefixLength = 24; }
|
|
|
|
];
|
|
|
|
};
|
|
|
|
cephConfigMonA = generateCephConfig { daemonConfig = {
|
|
|
|
mon = {
|
|
|
|
enable = true;
|
|
|
|
daemons = [ cfg.monA.name ];
|
|
|
|
};
|
|
|
|
mgr = {
|
|
|
|
enable = true;
|
|
|
|
daemons = [ cfg.monA.name ];
|
|
|
|
};
|
|
|
|
osd = {
|
|
|
|
enable = true;
|
2020-01-07 06:36:29 +00:00
|
|
|
daemons = [ cfg.osd0.name cfg.osd1.name cfg.osd2.name ];
|
2019-11-01 20:48:01 +00:00
|
|
|
};
|
|
|
|
}; };
|
|
|
|
|
2019-11-09 12:16:56 +00:00
|
|
|
# Following deployment is based on the manual deployment described here:
|
|
|
|
# https://docs.ceph.com/docs/master/install/manual-deployment/
|
|
|
|
# For other ways to deploy a ceph cluster, look at the documentation at
|
|
|
|
# https://docs.ceph.com/docs/master/
|
2019-11-01 20:48:01 +00:00
|
|
|
testscript = { ... }: ''
|
2019-11-11 00:32:41 +00:00
|
|
|
start_all()
|
2018-03-01 11:47:13 +00:00
|
|
|
|
2019-11-11 00:32:41 +00:00
|
|
|
monA.wait_for_unit("network.target")
|
2018-03-01 11:47:13 +00:00
|
|
|
|
|
|
|
# Bootstrap ceph-mon daemon
|
2019-11-11 00:32:41 +00:00
|
|
|
monA.succeed(
|
|
|
|
"sudo -u ceph ceph-authtool --create-keyring /tmp/ceph.mon.keyring --gen-key -n mon. --cap mon 'allow *'",
|
|
|
|
"sudo -u ceph ceph-authtool --create-keyring /etc/ceph/ceph.client.admin.keyring --gen-key -n client.admin --cap mon 'allow *' --cap osd 'allow *' --cap mds 'allow *' --cap mgr 'allow *'",
|
|
|
|
"sudo -u ceph ceph-authtool /tmp/ceph.mon.keyring --import-keyring /etc/ceph/ceph.client.admin.keyring",
|
|
|
|
"monmaptool --create --add ${cfg.monA.name} ${cfg.monA.ip} --fsid ${cfg.clusterId} /tmp/monmap",
|
|
|
|
"sudo -u ceph ceph-mon --mkfs -i ${cfg.monA.name} --monmap /tmp/monmap --keyring /tmp/ceph.mon.keyring",
|
|
|
|
"sudo -u ceph touch /var/lib/ceph/mon/ceph-${cfg.monA.name}/done",
|
|
|
|
"systemctl start ceph-mon-${cfg.monA.name}",
|
|
|
|
)
|
|
|
|
monA.wait_for_unit("ceph-mon-${cfg.monA.name}")
|
|
|
|
monA.succeed("ceph mon enable-msgr2")
|
2021-04-21 14:19:00 +00:00
|
|
|
monA.succeed("ceph config set mon auth_allow_insecure_global_id_reclaim false")
|
2018-03-01 11:47:13 +00:00
|
|
|
|
|
|
|
# Can't check ceph status until a mon is up
|
2019-11-11 00:32:41 +00:00
|
|
|
monA.succeed("ceph -s | grep 'mon: 1 daemons'")
|
2018-10-30 21:26:43 +00:00
|
|
|
|
nixos/ceph: run unprivileged, use StateDirectory and tmpfiles, don't pass extraServiceConfig
Don't pass user and group to ceph, and rely on it to drop ceps, but let
systemd handle running it as the appropriate user.
This also inlines the extraServiceConfig into the makeService function,
as we have conditionals depending on daemonType there anyways.
Use StateDirectory to create directories in
/var/lib/ceph/${daemonType}/${clusterName}-${daemonId}.
There previously was a condition on daemonType being one of mds,mon,rgw
or mgr. We only instantiate makeServices with these types, and "osd" was
special.
In the osd case, test examples suggest it'd be in something like
/var/lib/ceph/osd/ceph-${cfg.osd0.name} - so it's not special at all,
but exactly like the pattern for the others.
During initialization, we also need these folders, before the unit is
started up. Move the mkdir -p commands in the vm tests to the line
immediately before they're required.
2019-11-02 14:01:39 +00:00
|
|
|
# Start the ceph-mgr daemon, after copying in the keyring
|
2019-11-11 00:32:41 +00:00
|
|
|
monA.succeed(
|
|
|
|
"sudo -u ceph mkdir -p /var/lib/ceph/mgr/ceph-${cfg.monA.name}/",
|
|
|
|
"ceph auth get-or-create mgr.${cfg.monA.name} mon 'allow profile mgr' osd 'allow *' mds 'allow *' > /var/lib/ceph/mgr/ceph-${cfg.monA.name}/keyring",
|
|
|
|
"systemctl start ceph-mgr-${cfg.monA.name}",
|
|
|
|
)
|
|
|
|
monA.wait_for_unit("ceph-mgr-a")
|
|
|
|
monA.wait_until_succeeds("ceph -s | grep 'quorum ${cfg.monA.name}'")
|
|
|
|
monA.wait_until_succeeds("ceph -s | grep 'mgr: ${cfg.monA.name}(active,'")
|
2018-03-01 11:47:13 +00:00
|
|
|
|
2020-01-07 06:36:29 +00:00
|
|
|
# Bootstrap OSDs
|
2019-11-11 00:32:41 +00:00
|
|
|
monA.succeed(
|
|
|
|
"mkfs.xfs /dev/vdb",
|
|
|
|
"mkfs.xfs /dev/vdc",
|
2020-01-07 06:36:29 +00:00
|
|
|
"mkfs.xfs /dev/vdd",
|
2019-11-11 00:32:41 +00:00
|
|
|
"mkdir -p /var/lib/ceph/osd/ceph-${cfg.osd0.name}",
|
|
|
|
"mount /dev/vdb /var/lib/ceph/osd/ceph-${cfg.osd0.name}",
|
|
|
|
"mkdir -p /var/lib/ceph/osd/ceph-${cfg.osd1.name}",
|
|
|
|
"mount /dev/vdc /var/lib/ceph/osd/ceph-${cfg.osd1.name}",
|
2020-01-07 06:36:29 +00:00
|
|
|
"mkdir -p /var/lib/ceph/osd/ceph-${cfg.osd2.name}",
|
|
|
|
"mount /dev/vdd /var/lib/ceph/osd/ceph-${cfg.osd2.name}",
|
2019-11-11 00:32:41 +00:00
|
|
|
"ceph-authtool --create-keyring /var/lib/ceph/osd/ceph-${cfg.osd0.name}/keyring --name osd.${cfg.osd0.name} --add-key ${cfg.osd0.key}",
|
|
|
|
"ceph-authtool --create-keyring /var/lib/ceph/osd/ceph-${cfg.osd1.name}/keyring --name osd.${cfg.osd1.name} --add-key ${cfg.osd1.key}",
|
2020-01-07 06:36:29 +00:00
|
|
|
"ceph-authtool --create-keyring /var/lib/ceph/osd/ceph-${cfg.osd2.name}/keyring --name osd.${cfg.osd2.name} --add-key ${cfg.osd2.key}",
|
2019-11-11 00:32:41 +00:00
|
|
|
'echo \'{"cephx_secret": "${cfg.osd0.key}"}\' | ceph osd new ${cfg.osd0.uuid} -i -',
|
|
|
|
'echo \'{"cephx_secret": "${cfg.osd1.key}"}\' | ceph osd new ${cfg.osd1.uuid} -i -',
|
2020-01-07 06:36:29 +00:00
|
|
|
'echo \'{"cephx_secret": "${cfg.osd2.key}"}\' | ceph osd new ${cfg.osd2.uuid} -i -',
|
2019-11-11 00:32:41 +00:00
|
|
|
)
|
2018-03-01 11:47:13 +00:00
|
|
|
|
|
|
|
# Initialize the OSDs with regular filestore
|
2019-11-11 00:32:41 +00:00
|
|
|
monA.succeed(
|
|
|
|
"ceph-osd -i ${cfg.osd0.name} --mkfs --osd-uuid ${cfg.osd0.uuid}",
|
|
|
|
"ceph-osd -i ${cfg.osd1.name} --mkfs --osd-uuid ${cfg.osd1.uuid}",
|
2020-01-07 06:36:29 +00:00
|
|
|
"ceph-osd -i ${cfg.osd2.name} --mkfs --osd-uuid ${cfg.osd2.uuid}",
|
2019-11-11 00:32:41 +00:00
|
|
|
"chown -R ceph:ceph /var/lib/ceph/osd",
|
|
|
|
"systemctl start ceph-osd-${cfg.osd0.name}",
|
|
|
|
"systemctl start ceph-osd-${cfg.osd1.name}",
|
2020-01-07 06:36:29 +00:00
|
|
|
"systemctl start ceph-osd-${cfg.osd2.name}",
|
2019-11-11 00:32:41 +00:00
|
|
|
)
|
2020-01-07 06:36:29 +00:00
|
|
|
monA.wait_until_succeeds("ceph osd stat | grep -e '3 osds: 3 up[^,]*, 3 in'")
|
2019-11-11 00:32:41 +00:00
|
|
|
monA.wait_until_succeeds("ceph -s | grep 'mgr: ${cfg.monA.name}(active,'")
|
|
|
|
monA.wait_until_succeeds("ceph -s | grep 'HEALTH_OK'")
|
|
|
|
|
|
|
|
monA.succeed(
|
2020-07-08 13:20:18 +00:00
|
|
|
"ceph osd pool create single-node-test 32 32",
|
2019-11-11 00:32:41 +00:00
|
|
|
"ceph osd pool ls | grep 'single-node-test'",
|
|
|
|
"ceph osd pool rename single-node-test single-node-other-test",
|
|
|
|
"ceph osd pool ls | grep 'single-node-other-test'",
|
|
|
|
)
|
2020-07-08 13:20:18 +00:00
|
|
|
monA.wait_until_succeeds("ceph -s | grep '2 pools, 33 pgs'")
|
2019-11-11 00:32:41 +00:00
|
|
|
monA.succeed(
|
|
|
|
"ceph osd getcrushmap -o crush",
|
|
|
|
"crushtool -d crush -o decrushed",
|
|
|
|
"sed 's/step chooseleaf firstn 0 type host/step chooseleaf firstn 0 type osd/' decrushed > modcrush",
|
|
|
|
"crushtool -c modcrush -o recrushed",
|
|
|
|
"ceph osd setcrushmap -i recrushed",
|
|
|
|
"ceph osd pool set single-node-other-test size 2",
|
|
|
|
)
|
|
|
|
monA.wait_until_succeeds("ceph -s | grep 'HEALTH_OK'")
|
2020-07-08 13:20:18 +00:00
|
|
|
monA.wait_until_succeeds("ceph -s | grep '33 active+clean'")
|
2019-11-11 00:32:41 +00:00
|
|
|
monA.fail(
|
|
|
|
"ceph osd pool ls | grep 'multi-node-test'",
|
|
|
|
"ceph osd pool delete single-node-other-test single-node-other-test --yes-i-really-really-mean-it",
|
|
|
|
)
|
2018-11-06 13:35:48 +00:00
|
|
|
|
2019-11-09 15:05:58 +00:00
|
|
|
# Shut down ceph by stopping ceph.target.
|
2019-11-11 00:32:41 +00:00
|
|
|
monA.succeed("systemctl stop ceph.target")
|
2019-11-02 14:15:33 +00:00
|
|
|
|
2019-11-09 15:05:58 +00:00
|
|
|
# Start it up
|
2019-11-11 00:32:41 +00:00
|
|
|
monA.succeed("systemctl start ceph.target")
|
|
|
|
monA.wait_for_unit("ceph-mon-${cfg.monA.name}")
|
|
|
|
monA.wait_for_unit("ceph-mgr-${cfg.monA.name}")
|
|
|
|
monA.wait_for_unit("ceph-osd-${cfg.osd0.name}")
|
|
|
|
monA.wait_for_unit("ceph-osd-${cfg.osd1.name}")
|
2020-01-07 06:36:29 +00:00
|
|
|
monA.wait_for_unit("ceph-osd-${cfg.osd2.name}")
|
2019-11-02 14:15:33 +00:00
|
|
|
|
2019-11-09 15:05:58 +00:00
|
|
|
# Ensure the cluster comes back up again
|
2019-11-11 00:32:41 +00:00
|
|
|
monA.succeed("ceph -s | grep 'mon: 1 daemons'")
|
|
|
|
monA.wait_until_succeeds("ceph -s | grep 'quorum ${cfg.monA.name}'")
|
2020-01-07 06:36:29 +00:00
|
|
|
monA.wait_until_succeeds("ceph osd stat | grep -e '3 osds: 3 up[^,]*, 3 in'")
|
2019-11-11 00:32:41 +00:00
|
|
|
monA.wait_until_succeeds("ceph -s | grep 'mgr: ${cfg.monA.name}(active,'")
|
|
|
|
monA.wait_until_succeeds("ceph -s | grep 'HEALTH_OK'")
|
2023-01-20 22:54:49 +00:00
|
|
|
|
2023-12-11 13:41:20 +00:00
|
|
|
# This test has been commented out due to the upstream issue with pyo3
|
|
|
|
# that has broken this dashboard
|
|
|
|
# Reference: https://www.spinics.net/lists/ceph-users/msg77812.html
|
2023-01-20 22:54:49 +00:00
|
|
|
# Enable the dashboard and recheck health
|
2023-12-11 13:41:20 +00:00
|
|
|
# monA.succeed(
|
|
|
|
# "ceph mgr module enable dashboard",
|
|
|
|
# "ceph config set mgr mgr/dashboard/ssl false",
|
|
|
|
# # default is 8080 but it's better to be explicit
|
|
|
|
# "ceph config set mgr mgr/dashboard/server_port 8080",
|
|
|
|
# )
|
|
|
|
# monA.wait_for_open_port(8080)
|
|
|
|
# monA.wait_until_succeeds("curl -q --fail http://localhost:8080")
|
|
|
|
# monA.wait_until_succeeds("ceph -s | grep 'HEALTH_OK'")
|
2018-03-01 11:47:13 +00:00
|
|
|
'';
|
2019-11-01 20:48:01 +00:00
|
|
|
in {
|
|
|
|
name = "basic-single-node-ceph-cluster";
|
2021-01-10 19:08:30 +00:00
|
|
|
meta = with pkgs.lib.maintainers; {
|
2019-11-01 20:48:01 +00:00
|
|
|
maintainers = [ lejonet johanot ];
|
|
|
|
};
|
|
|
|
|
|
|
|
nodes = {
|
|
|
|
monA = generateHost { pkgs = pkgs; cephConfig = cephConfigMonA; networkConfig = networkMonA; };
|
|
|
|
};
|
|
|
|
|
|
|
|
testScript = testscript;
|
2018-03-01 11:47:13 +00:00
|
|
|
})
|