mirror of
https://github.com/NixOS/nixpkgs.git
synced 2024-11-01 07:01:54 +00:00
Merge pull request #56789 from mayflower/upstream-k8s-refactor
nixos/kubernetes: stabilize cluster deployment/startup across machines
This commit is contained in:
commit
7dc6e77bc2
@ -63,18 +63,49 @@ in
|
||||
};
|
||||
|
||||
enable = mkEnableOption "Whether to enable Kubernetes addon manager.";
|
||||
|
||||
kubeconfig = top.lib.mkKubeConfigOptions "Kubernetes addon manager";
|
||||
bootstrapAddonsKubeconfig = top.lib.mkKubeConfigOptions "Kubernetes addon manager bootstrap";
|
||||
};
|
||||
|
||||
###### implementation
|
||||
config = mkIf cfg.enable {
|
||||
config = let
|
||||
|
||||
addonManagerPaths = filter (a: a != null) [
|
||||
cfg.kubeconfig.caFile
|
||||
cfg.kubeconfig.certFile
|
||||
cfg.kubeconfig.keyFile
|
||||
];
|
||||
bootstrapAddonsPaths = filter (a: a != null) [
|
||||
cfg.bootstrapAddonsKubeconfig.caFile
|
||||
cfg.bootstrapAddonsKubeconfig.certFile
|
||||
cfg.bootstrapAddonsKubeconfig.keyFile
|
||||
];
|
||||
|
||||
in mkIf cfg.enable {
|
||||
environment.etc."kubernetes/addons".source = "${addons}/";
|
||||
|
||||
#TODO: Get rid of kube-addon-manager in the future for the following reasons
|
||||
# - it is basically just a shell script wrapped around kubectl
|
||||
# - it assumes that it is clusterAdmin or can gain clusterAdmin rights through serviceAccount
|
||||
# - it is designed to be used with k8s system components only
|
||||
# - it would be better with a more Nix-oriented way of managing addons
|
||||
systemd.services.kube-addon-manager = {
|
||||
description = "Kubernetes addon manager";
|
||||
wantedBy = [ "kubernetes.target" ];
|
||||
after = [ "kube-apiserver.service" ];
|
||||
environment.ADDON_PATH = "/etc/kubernetes/addons/";
|
||||
path = [ pkgs.gawk ];
|
||||
after = [ "kube-node-online.target" ];
|
||||
before = [ "kubernetes.target" ];
|
||||
environment = {
|
||||
ADDON_PATH = "/etc/kubernetes/addons/";
|
||||
KUBECONFIG = top.lib.mkKubeConfig "kube-addon-manager" cfg.kubeconfig;
|
||||
};
|
||||
path = with pkgs; [ gawk kubectl ];
|
||||
preStart = ''
|
||||
until kubectl -n kube-system get serviceaccounts/default 2>/dev/null; do
|
||||
echo kubectl -n kube-system get serviceaccounts/default: exit status $?
|
||||
sleep 2
|
||||
done
|
||||
'';
|
||||
serviceConfig = {
|
||||
Slice = "kubernetes.slice";
|
||||
ExecStart = "${top.package}/bin/kube-addons";
|
||||
@ -84,8 +115,52 @@ in
|
||||
Restart = "on-failure";
|
||||
RestartSec = 10;
|
||||
};
|
||||
unitConfig.ConditionPathExists = addonManagerPaths;
|
||||
};
|
||||
|
||||
systemd.paths.kube-addon-manager = {
|
||||
wantedBy = [ "kube-addon-manager.service" ];
|
||||
pathConfig = {
|
||||
PathExists = addonManagerPaths;
|
||||
PathChanged = addonManagerPaths;
|
||||
};
|
||||
};
|
||||
|
||||
services.kubernetes.addonManager.kubeconfig.server = mkDefault top.apiserverAddress;
|
||||
|
||||
systemd.services.kube-addon-manager-bootstrap = mkIf (top.apiserver.enable && top.addonManager.bootstrapAddons != {}) {
|
||||
wantedBy = [ "kube-control-plane-online.target" ];
|
||||
after = [ "kube-apiserver.service" ];
|
||||
before = [ "kube-control-plane-online.target" ];
|
||||
path = [ pkgs.kubectl ];
|
||||
environment = {
|
||||
KUBECONFIG = top.lib.mkKubeConfig "kube-addon-manager-bootstrap" cfg.bootstrapAddonsKubeconfig;
|
||||
};
|
||||
preStart = with pkgs; let
|
||||
files = mapAttrsToList (n: v: writeText "${n}.json" (builtins.toJSON v))
|
||||
cfg.bootstrapAddons;
|
||||
in ''
|
||||
until kubectl auth can-i '*' '*' -q 2>/dev/null; do
|
||||
echo kubectl auth can-i '*' '*': exit status $?
|
||||
sleep 2
|
||||
done
|
||||
|
||||
kubectl apply -f ${concatStringsSep " \\\n -f " files}
|
||||
'';
|
||||
script = "echo Ok";
|
||||
unitConfig.ConditionPathExists = bootstrapAddonsPaths;
|
||||
};
|
||||
|
||||
systemd.paths.kube-addon-manager-bootstrap = {
|
||||
wantedBy = [ "kube-addon-manager-bootstrap.service" ];
|
||||
pathConfig = {
|
||||
PathExists = bootstrapAddonsPaths;
|
||||
PathChanged = bootstrapAddonsPaths;
|
||||
};
|
||||
};
|
||||
|
||||
services.kubernetes.addonManager.bootstrapAddonsKubeconfig.server = mkDefault top.apiserverAddress;
|
||||
|
||||
services.kubernetes.addonManager.bootstrapAddons = mkIf isRBACEnabled
|
||||
(let
|
||||
name = system:kube-addon-manager;
|
||||
|
@ -169,6 +169,23 @@ in {
|
||||
};
|
||||
};
|
||||
|
||||
kubernetes-dashboard-cm = {
|
||||
apiVersion = "v1";
|
||||
kind = "ConfigMap";
|
||||
metadata = {
|
||||
labels = {
|
||||
k8s-app = "kubernetes-dashboard";
|
||||
# Allows editing resource and makes sure it is created first.
|
||||
"addonmanager.kubernetes.io/mode" = "EnsureExists";
|
||||
};
|
||||
name = "kubernetes-dashboard-settings";
|
||||
namespace = "kube-system";
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
services.kubernetes.addonManager.bootstrapAddons = mkMerge [{
|
||||
|
||||
kubernetes-dashboard-sa = {
|
||||
apiVersion = "v1";
|
||||
kind = "ServiceAccount";
|
||||
@ -210,20 +227,9 @@ in {
|
||||
};
|
||||
type = "Opaque";
|
||||
};
|
||||
kubernetes-dashboard-cm = {
|
||||
apiVersion = "v1";
|
||||
kind = "ConfigMap";
|
||||
metadata = {
|
||||
labels = {
|
||||
k8s-app = "kubernetes-dashboard";
|
||||
# Allows editing resource and makes sure it is created first.
|
||||
"addonmanager.kubernetes.io/mode" = "EnsureExists";
|
||||
};
|
||||
name = "kubernetes-dashboard-settings";
|
||||
namespace = "kube-system";
|
||||
};
|
||||
};
|
||||
} // (optionalAttrs cfg.rbac.enable
|
||||
}
|
||||
|
||||
(optionalAttrs cfg.rbac.enable
|
||||
(let
|
||||
subjects = [{
|
||||
kind = "ServiceAccount";
|
||||
@ -323,6 +329,6 @@ in {
|
||||
inherit subjects;
|
||||
};
|
||||
})
|
||||
));
|
||||
))];
|
||||
};
|
||||
}
|
||||
|
@ -272,11 +272,32 @@ in
|
||||
###### implementation
|
||||
config = mkMerge [
|
||||
|
||||
(mkIf cfg.enable {
|
||||
(let
|
||||
|
||||
apiserverPaths = filter (a: a != null) [
|
||||
cfg.clientCaFile
|
||||
cfg.etcd.caFile
|
||||
cfg.etcd.certFile
|
||||
cfg.etcd.keyFile
|
||||
cfg.kubeletClientCaFile
|
||||
cfg.kubeletClientCertFile
|
||||
cfg.kubeletClientKeyFile
|
||||
cfg.serviceAccountKeyFile
|
||||
cfg.tlsCertFile
|
||||
cfg.tlsKeyFile
|
||||
];
|
||||
etcdPaths = filter (a: a != null) [
|
||||
config.services.etcd.trustedCaFile
|
||||
config.services.etcd.certFile
|
||||
config.services.etcd.keyFile
|
||||
];
|
||||
|
||||
in mkIf cfg.enable {
|
||||
systemd.services.kube-apiserver = {
|
||||
description = "Kubernetes APIServer Service";
|
||||
wantedBy = [ "kubernetes.target" ];
|
||||
after = [ "network.target" ];
|
||||
wantedBy = [ "kube-control-plane-online.target" ];
|
||||
after = [ "certmgr.service" ];
|
||||
before = [ "kube-control-plane-online.target" ];
|
||||
serviceConfig = {
|
||||
Slice = "kubernetes.slice";
|
||||
ExecStart = ''${top.package}/bin/kube-apiserver \
|
||||
@ -341,6 +362,15 @@ in
|
||||
Restart = "on-failure";
|
||||
RestartSec = 5;
|
||||
};
|
||||
unitConfig.ConditionPathExists = apiserverPaths;
|
||||
};
|
||||
|
||||
systemd.paths.kube-apiserver = mkIf top.apiserver.enable {
|
||||
wantedBy = [ "kube-apiserver.service" ];
|
||||
pathConfig = {
|
||||
PathExists = apiserverPaths;
|
||||
PathChanged = apiserverPaths;
|
||||
};
|
||||
};
|
||||
|
||||
services.etcd = {
|
||||
@ -354,6 +384,18 @@ in
|
||||
initialAdvertisePeerUrls = mkDefault ["https://${top.masterAddress}:2380"];
|
||||
};
|
||||
|
||||
systemd.services.etcd = {
|
||||
unitConfig.ConditionPathExists = etcdPaths;
|
||||
};
|
||||
|
||||
systemd.paths.etcd = {
|
||||
wantedBy = [ "etcd.service" ];
|
||||
pathConfig = {
|
||||
PathExists = etcdPaths;
|
||||
PathChanged = etcdPaths;
|
||||
};
|
||||
};
|
||||
|
||||
services.kubernetes.addonManager.bootstrapAddons = mkIf isRBACEnabled {
|
||||
|
||||
apiserver-kubelet-api-admin-crb = {
|
||||
|
@ -104,11 +104,31 @@ in
|
||||
};
|
||||
|
||||
###### implementation
|
||||
config = mkIf cfg.enable {
|
||||
systemd.services.kube-controller-manager = {
|
||||
config = let
|
||||
|
||||
controllerManagerPaths = filter (a: a != null) [
|
||||
cfg.kubeconfig.caFile
|
||||
cfg.kubeconfig.certFile
|
||||
cfg.kubeconfig.keyFile
|
||||
cfg.rootCaFile
|
||||
cfg.serviceAccountKeyFile
|
||||
cfg.tlsCertFile
|
||||
cfg.tlsKeyFile
|
||||
];
|
||||
|
||||
in mkIf cfg.enable {
|
||||
systemd.services.kube-controller-manager = rec {
|
||||
description = "Kubernetes Controller Manager Service";
|
||||
wantedBy = [ "kubernetes.target" ];
|
||||
wantedBy = [ "kube-control-plane-online.target" ];
|
||||
after = [ "kube-apiserver.service" ];
|
||||
before = [ "kube-control-plane-online.target" ];
|
||||
environment.KUBECONFIG = top.lib.mkKubeConfig "kube-controller-manager" cfg.kubeconfig;
|
||||
preStart = ''
|
||||
until kubectl auth can-i get /api -q 2>/dev/null; do
|
||||
echo kubectl auth can-i get /api: exit status $?
|
||||
sleep 2
|
||||
done
|
||||
'';
|
||||
serviceConfig = {
|
||||
RestartSec = "30s";
|
||||
Restart = "on-failure";
|
||||
@ -120,7 +140,7 @@ in
|
||||
"--cluster-cidr=${cfg.clusterCidr}"} \
|
||||
${optionalString (cfg.featureGates != [])
|
||||
"--feature-gates=${concatMapStringsSep "," (feature: "${feature}=true") cfg.featureGates}"} \
|
||||
--kubeconfig=${top.lib.mkKubeConfig "kube-controller-manager" cfg.kubeconfig} \
|
||||
--kubeconfig=${environment.KUBECONFIG} \
|
||||
--leader-elect=${boolToString cfg.leaderElect} \
|
||||
${optionalString (cfg.rootCaFile!=null)
|
||||
"--root-ca-file=${cfg.rootCaFile}"} \
|
||||
@ -141,7 +161,16 @@ in
|
||||
User = "kubernetes";
|
||||
Group = "kubernetes";
|
||||
};
|
||||
path = top.path;
|
||||
path = top.path ++ [ pkgs.kubectl ];
|
||||
unitConfig.ConditionPathExists = controllerManagerPaths;
|
||||
};
|
||||
|
||||
systemd.paths.kube-controller-manager = {
|
||||
wantedBy = [ "kube-controller-manager.service" ];
|
||||
pathConfig = {
|
||||
PathExists = controllerManagerPaths;
|
||||
PathChanged = controllerManagerPaths;
|
||||
};
|
||||
};
|
||||
|
||||
services.kubernetes.pki.certs = with top.lib; {
|
||||
|
@ -263,6 +263,30 @@ in {
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
};
|
||||
|
||||
systemd.targets.kube-control-plane-online = {
|
||||
wantedBy = [ "kubernetes.target" ];
|
||||
before = [ "kubernetes.target" ];
|
||||
};
|
||||
|
||||
systemd.services.kube-control-plane-online = rec {
|
||||
description = "Kubernetes control plane is online";
|
||||
wantedBy = [ "kube-control-plane-online.target" ];
|
||||
after = [ "kube-scheduler.service" "kube-controller-manager.service" ];
|
||||
before = [ "kube-control-plane-online.target" ];
|
||||
environment.KUBECONFIG = cfg.lib.mkKubeConfig "default" cfg.kubeconfig;
|
||||
path = [ pkgs.kubectl ];
|
||||
preStart = ''
|
||||
until kubectl get --raw=/healthz 2>/dev/null; do
|
||||
echo kubectl get --raw=/healthz: exit status $?
|
||||
sleep 3
|
||||
done
|
||||
'';
|
||||
script = "echo Ok";
|
||||
serviceConfig = {
|
||||
TimeoutSec = "500";
|
||||
};
|
||||
};
|
||||
|
||||
systemd.tmpfiles.rules = [
|
||||
"d /opt/cni/bin 0755 root root -"
|
||||
"d /run/kubernetes 0755 kubernetes kubernetes -"
|
||||
@ -286,6 +310,8 @@ in {
|
||||
services.kubernetes.apiserverAddress = mkDefault ("https://${if cfg.apiserver.advertiseAddress != null
|
||||
then cfg.apiserver.advertiseAddress
|
||||
else "${cfg.masterAddress}:${toString cfg.apiserver.securePort}"}");
|
||||
|
||||
services.kubernetes.kubeconfig.server = mkDefault cfg.apiserverAddress;
|
||||
})
|
||||
];
|
||||
}
|
||||
|
@ -24,16 +24,26 @@ in
|
||||
###### interface
|
||||
options.services.kubernetes.flannel = {
|
||||
enable = mkEnableOption "enable flannel networking";
|
||||
kubeconfig = top.lib.mkKubeConfigOptions "Kubernetes flannel";
|
||||
};
|
||||
|
||||
###### implementation
|
||||
config = mkIf cfg.enable {
|
||||
config = let
|
||||
|
||||
flannelPaths = filter (a: a != null) [
|
||||
cfg.kubeconfig.caFile
|
||||
cfg.kubeconfig.certFile
|
||||
cfg.kubeconfig.keyFile
|
||||
];
|
||||
kubeconfig = top.lib.mkKubeConfig "flannel" cfg.kubeconfig;
|
||||
|
||||
in mkIf cfg.enable {
|
||||
services.flannel = {
|
||||
|
||||
enable = mkDefault true;
|
||||
network = mkDefault top.clusterCidr;
|
||||
inherit storageBackend;
|
||||
nodeName = config.services.kubernetes.kubelet.hostname;
|
||||
inherit storageBackend kubeconfig;
|
||||
nodeName = top.kubelet.hostname;
|
||||
};
|
||||
|
||||
services.kubernetes.kubelet = {
|
||||
@ -48,24 +58,66 @@ in
|
||||
}];
|
||||
};
|
||||
|
||||
systemd.services."mk-docker-opts" = {
|
||||
systemd.services.mk-docker-opts = {
|
||||
description = "Pre-Docker Actions";
|
||||
wantedBy = [ "flannel.target" ];
|
||||
before = [ "flannel.target" ];
|
||||
path = with pkgs; [ gawk gnugrep ];
|
||||
script = ''
|
||||
${mkDockerOpts}/mk-docker-opts -d /run/flannel/docker
|
||||
systemctl restart docker
|
||||
'';
|
||||
unitConfig.ConditionPathExists = [ "/run/flannel/subnet.env" ];
|
||||
serviceConfig.Type = "oneshot";
|
||||
};
|
||||
|
||||
systemd.paths."flannel-subnet-env" = {
|
||||
wantedBy = [ "flannel.service" ];
|
||||
systemd.paths.flannel-subnet-env = {
|
||||
wantedBy = [ "mk-docker-opts.service" ];
|
||||
pathConfig = {
|
||||
PathModified = "/run/flannel/subnet.env";
|
||||
PathExists = [ "/run/flannel/subnet.env" ];
|
||||
PathChanged = [ "/run/flannel/subnet.env" ];
|
||||
Unit = "mk-docker-opts.service";
|
||||
};
|
||||
};
|
||||
|
||||
systemd.targets.flannel = {
|
||||
wantedBy = [ "kube-node-online.target" ];
|
||||
before = [ "kube-node-online.target" ];
|
||||
};
|
||||
|
||||
systemd.services.flannel = {
|
||||
wantedBy = [ "flannel.target" ];
|
||||
after = [ "kubelet.target" ];
|
||||
before = [ "flannel.target" ];
|
||||
path = with pkgs; [ iptables kubectl ];
|
||||
environment.KUBECONFIG = kubeconfig;
|
||||
preStart = let
|
||||
args = [
|
||||
"--selector=kubernetes.io/hostname=${top.kubelet.hostname}"
|
||||
# flannel exits if node is not registered yet, before that there is no podCIDR
|
||||
"--output=jsonpath={.items[0].spec.podCIDR}"
|
||||
# if jsonpath cannot be resolved exit with status 1
|
||||
"--allow-missing-template-keys=false"
|
||||
];
|
||||
in ''
|
||||
until kubectl get nodes ${concatStringsSep " " args} 2>/dev/null; do
|
||||
echo Waiting for ${top.kubelet.hostname} to be RegisteredNode
|
||||
sleep 1
|
||||
done
|
||||
'';
|
||||
unitConfig.ConditionPathExists = flannelPaths;
|
||||
};
|
||||
|
||||
systemd.paths.flannel = {
|
||||
wantedBy = [ "flannel.service" ];
|
||||
pathConfig = {
|
||||
PathExists = flannelPaths;
|
||||
PathChanged = flannelPaths;
|
||||
};
|
||||
};
|
||||
|
||||
services.kubernetes.flannel.kubeconfig.server = mkDefault top.apiserverAddress;
|
||||
|
||||
systemd.services.docker = {
|
||||
environment.DOCKER_OPTS = "-b none";
|
||||
serviceConfig.EnvironmentFile = "-/run/flannel/docker";
|
||||
@ -92,7 +144,6 @@ in
|
||||
|
||||
# give flannel som kubernetes rbac permissions if applicable
|
||||
services.kubernetes.addonManager.bootstrapAddons = mkIf ((storageBackend == "kubernetes") && (elem "RBAC" top.apiserver.authorizationMode)) {
|
||||
|
||||
flannel-cr = {
|
||||
apiVersion = "rbac.authorization.k8s.io/v1beta1";
|
||||
kind = "ClusterRole";
|
||||
@ -128,7 +179,6 @@ in
|
||||
name = "flannel-client";
|
||||
}];
|
||||
};
|
||||
|
||||
};
|
||||
};
|
||||
}
|
||||
|
@ -241,21 +241,28 @@ in
|
||||
|
||||
###### implementation
|
||||
config = mkMerge [
|
||||
(mkIf cfg.enable {
|
||||
(let
|
||||
|
||||
kubeletPaths = filter (a: a != null) [
|
||||
cfg.kubeconfig.caFile
|
||||
cfg.kubeconfig.certFile
|
||||
cfg.kubeconfig.keyFile
|
||||
cfg.clientCaFile
|
||||
cfg.tlsCertFile
|
||||
cfg.tlsKeyFile
|
||||
];
|
||||
|
||||
in mkIf cfg.enable {
|
||||
services.kubernetes.kubelet.seedDockerImages = [infraContainer];
|
||||
|
||||
systemd.services.kubelet = {
|
||||
description = "Kubernetes Kubelet Service";
|
||||
wantedBy = [ "kubernetes.target" ];
|
||||
after = [ "network.target" "docker.service" "kube-apiserver.service" ];
|
||||
wantedBy = [ "kubelet.target" ];
|
||||
after = [ "kube-control-plane-online.target" ];
|
||||
before = [ "kubelet.target" ];
|
||||
path = with pkgs; [ gitMinimal openssh docker utillinux iproute ethtool thin-provisioning-tools iptables socat ] ++ top.path;
|
||||
preStart = ''
|
||||
${concatMapStrings (img: ''
|
||||
echo "Seeding docker image: ${img}"
|
||||
docker load <${img}
|
||||
'') cfg.seedDockerImages}
|
||||
|
||||
rm /opt/cni/bin/* || true
|
||||
rm -f /opt/cni/bin/* || true
|
||||
${concatMapStrings (package: ''
|
||||
echo "Linking cni package: ${package}"
|
||||
ln -fs ${package}/bin/* /opt/cni/bin
|
||||
@ -308,6 +315,56 @@ in
|
||||
'';
|
||||
WorkingDirectory = top.dataDir;
|
||||
};
|
||||
unitConfig.ConditionPathExists = kubeletPaths;
|
||||
};
|
||||
|
||||
systemd.paths.kubelet = {
|
||||
wantedBy = [ "kubelet.service" ];
|
||||
pathConfig = {
|
||||
PathExists = kubeletPaths;
|
||||
PathChanged = kubeletPaths;
|
||||
};
|
||||
};
|
||||
|
||||
systemd.services.docker.before = [ "kubelet.service" ];
|
||||
|
||||
systemd.services.docker-seed-images = {
|
||||
wantedBy = [ "docker.service" ];
|
||||
after = [ "docker.service" ];
|
||||
before = [ "kubelet.service" ];
|
||||
path = with pkgs; [ docker ];
|
||||
preStart = ''
|
||||
${concatMapStrings (img: ''
|
||||
echo "Seeding docker image: ${img}"
|
||||
docker load <${img}
|
||||
'') cfg.seedDockerImages}
|
||||
'';
|
||||
script = "echo Ok";
|
||||
serviceConfig.Type = "oneshot";
|
||||
serviceConfig.RemainAfterExit = true;
|
||||
serviceConfig.Slice = "kubernetes.slice";
|
||||
};
|
||||
|
||||
systemd.services.kubelet-online = {
|
||||
wantedBy = [ "kube-node-online.target" ];
|
||||
after = [ "flannel.target" "kubelet.target" ];
|
||||
before = [ "kube-node-online.target" ];
|
||||
# it is complicated. flannel needs kubelet to run the pause container before
|
||||
# it discusses the node CIDR with apiserver and afterwards configures and restarts
|
||||
# dockerd. Until then prevent creating any pods because they have to be recreated anyway
|
||||
# because the network of docker0 has been changed by flannel.
|
||||
script = let
|
||||
docker-env = "/run/flannel/docker";
|
||||
flannel-date = "stat --print=%Y ${docker-env}";
|
||||
docker-date = "systemctl show --property=ActiveEnterTimestamp --value docker";
|
||||
in ''
|
||||
until test -f ${docker-env} ; do sleep 1 ; done
|
||||
while test `${flannel-date}` -gt `date +%s --date="$(${docker-date})"` ; do
|
||||
sleep 1
|
||||
done
|
||||
'';
|
||||
serviceConfig.Type = "oneshot";
|
||||
serviceConfig.Slice = "kubernetes.slice";
|
||||
};
|
||||
|
||||
# Allways include cni plugins
|
||||
@ -354,5 +411,16 @@ in
|
||||
};
|
||||
})
|
||||
|
||||
{
|
||||
systemd.targets.kubelet = {
|
||||
wantedBy = [ "kube-node-online.target" ];
|
||||
before = [ "kube-node-online.target" ];
|
||||
};
|
||||
|
||||
systemd.targets.kube-node-online = {
|
||||
wantedBy = [ "kubernetes.target" ];
|
||||
before = [ "kubernetes.target" ];
|
||||
};
|
||||
}
|
||||
];
|
||||
}
|
||||
|
@ -27,12 +27,11 @@ let
|
||||
certmgrAPITokenPath = "${top.secretsPath}/${cfsslAPITokenBaseName}";
|
||||
cfsslAPITokenLength = 32;
|
||||
|
||||
clusterAdminKubeconfig = with cfg.certs.clusterAdmin;
|
||||
top.lib.mkKubeConfig "cluster-admin" {
|
||||
server = top.apiserverAddress;
|
||||
certFile = cert;
|
||||
keyFile = key;
|
||||
};
|
||||
clusterAdminKubeconfig = with cfg.certs.clusterAdmin; {
|
||||
server = top.apiserverAddress;
|
||||
certFile = cert;
|
||||
keyFile = key;
|
||||
};
|
||||
|
||||
remote = with config.services; "https://${kubernetes.masterAddress}:${toString cfssl.port}";
|
||||
in
|
||||
@ -119,6 +118,12 @@ in
|
||||
cfsslCertPathPrefix = "${config.services.cfssl.dataDir}/cfssl";
|
||||
cfsslCert = "${cfsslCertPathPrefix}.pem";
|
||||
cfsslKey = "${cfsslCertPathPrefix}-key.pem";
|
||||
cfsslPort = toString config.services.cfssl.port;
|
||||
|
||||
certmgrPaths = [
|
||||
top.caFile
|
||||
certmgrAPITokenPath
|
||||
];
|
||||
in
|
||||
{
|
||||
|
||||
@ -168,13 +173,40 @@ in
|
||||
chown cfssl "${cfsslAPITokenPath}" && chmod 400 "${cfsslAPITokenPath}"
|
||||
'')]);
|
||||
|
||||
systemd.targets.cfssl-online = {
|
||||
wantedBy = [ "network-online.target" ];
|
||||
after = [ "cfssl.service" "network-online.target" "cfssl-online.service" ];
|
||||
};
|
||||
|
||||
systemd.services.cfssl-online = {
|
||||
description = "Wait for ${remote} to be reachable.";
|
||||
wantedBy = [ "cfssl-online.target" ];
|
||||
before = [ "cfssl-online.target" ];
|
||||
path = [ pkgs.curl ];
|
||||
preStart = ''
|
||||
until curl --fail-early -fskd '{}' ${remote}/api/v1/cfssl/info -o /dev/null; do
|
||||
echo curl ${remote}/api/v1/cfssl/info: exit status $?
|
||||
sleep 2
|
||||
done
|
||||
'';
|
||||
script = "echo Ok";
|
||||
serviceConfig = {
|
||||
TimeoutSec = "300";
|
||||
};
|
||||
};
|
||||
|
||||
systemd.services.kube-certmgr-bootstrap = {
|
||||
description = "Kubernetes certmgr bootstrapper";
|
||||
wantedBy = [ "certmgr.service" ];
|
||||
after = [ "cfssl.target" ];
|
||||
wantedBy = [ "cfssl-online.target" ];
|
||||
after = [ "cfssl-online.target" ];
|
||||
before = [ "certmgr.service" ];
|
||||
path = with pkgs; [ curl cfssl ];
|
||||
script = concatStringsSep "\n" [''
|
||||
set -e
|
||||
|
||||
mkdir -p $(dirname ${certmgrAPITokenPath})
|
||||
mkdir -p $(dirname ${top.caFile})
|
||||
|
||||
# If there's a cfssl (cert issuer) running locally, then don't rely on user to
|
||||
# manually paste it in place. Just symlink.
|
||||
# otherwise, create the target file, ready for users to insert the token
|
||||
@ -186,15 +218,18 @@ in
|
||||
fi
|
||||
''
|
||||
(optionalString (cfg.pkiTrustOnBootstrap) ''
|
||||
if [ ! -f "${top.caFile}" ] || [ $(cat "${top.caFile}" | wc -c) -lt 1 ]; then
|
||||
${pkgs.curl}/bin/curl --fail-early -f -kd '{}' ${remote}/api/v1/cfssl/info | \
|
||||
${pkgs.cfssl}/bin/cfssljson -stdout >${top.caFile}
|
||||
if [ ! -s "${top.caFile}" ]; then
|
||||
until test -s ${top.caFile}.json; do
|
||||
sleep 2
|
||||
curl --fail-early -fskd '{}' ${remote}/api/v1/cfssl/info -o ${top.caFile}.json
|
||||
done
|
||||
cfssljson -f ${top.caFile}.json -stdout >${top.caFile}
|
||||
rm ${top.caFile}.json
|
||||
fi
|
||||
'')
|
||||
];
|
||||
serviceConfig = {
|
||||
RestartSec = "10s";
|
||||
Restart = "on-failure";
|
||||
TimeoutSec = "500";
|
||||
};
|
||||
};
|
||||
|
||||
@ -230,35 +265,28 @@ in
|
||||
mapAttrs mkSpec cfg.certs;
|
||||
};
|
||||
|
||||
#TODO: Get rid of kube-addon-manager in the future for the following reasons
|
||||
# - it is basically just a shell script wrapped around kubectl
|
||||
# - it assumes that it is clusterAdmin or can gain clusterAdmin rights through serviceAccount
|
||||
# - it is designed to be used with k8s system components only
|
||||
# - it would be better with a more Nix-oriented way of managing addons
|
||||
systemd.services.kube-addon-manager = mkIf top.addonManager.enable (mkMerge [{
|
||||
environment.KUBECONFIG = with cfg.certs.addonManager;
|
||||
top.lib.mkKubeConfig "addon-manager" {
|
||||
server = top.apiserverAddress;
|
||||
certFile = cert;
|
||||
keyFile = key;
|
||||
};
|
||||
}
|
||||
systemd.services.certmgr = {
|
||||
wantedBy = [ "cfssl-online.target" ];
|
||||
after = [ "cfssl-online.target" "kube-certmgr-bootstrap.service" ];
|
||||
preStart = ''
|
||||
while ! test -s ${certmgrAPITokenPath} ; do
|
||||
sleep 1
|
||||
echo Waiting for ${certmgrAPITokenPath}
|
||||
done
|
||||
'';
|
||||
unitConfig.ConditionPathExists = certmgrPaths;
|
||||
};
|
||||
|
||||
(optionalAttrs (top.addonManager.bootstrapAddons != {}) {
|
||||
serviceConfig.PermissionsStartOnly = true;
|
||||
preStart = with pkgs;
|
||||
let
|
||||
files = mapAttrsToList (n: v: writeText "${n}.json" (builtins.toJSON v))
|
||||
top.addonManager.bootstrapAddons;
|
||||
in
|
||||
''
|
||||
export KUBECONFIG=${clusterAdminKubeconfig}
|
||||
${kubectl}/bin/kubectl apply -f ${concatStringsSep " \\\n -f " files}
|
||||
'';
|
||||
})]);
|
||||
systemd.paths.certmgr = {
|
||||
wantedBy = [ "certmgr.service" ];
|
||||
pathConfig = {
|
||||
PathExists = certmgrPaths;
|
||||
PathChanged = certmgrPaths;
|
||||
};
|
||||
};
|
||||
|
||||
environment.etc.${cfg.etcClusterAdminKubeconfig}.source = mkIf (!isNull cfg.etcClusterAdminKubeconfig)
|
||||
clusterAdminKubeconfig;
|
||||
(top.lib.mkKubeConfig "cluster-admin" clusterAdminKubeconfig);
|
||||
|
||||
environment.systemPackages = mkIf (top.kubelet.enable || top.proxy.enable) [
|
||||
(pkgs.writeScriptBin "nixos-kubernetes-node-join" ''
|
||||
@ -284,38 +312,22 @@ in
|
||||
exit 1
|
||||
fi
|
||||
|
||||
do_restart=$(test -s ${certmgrAPITokenPath} && echo -n y || echo -n n)
|
||||
|
||||
echo $token > ${certmgrAPITokenPath}
|
||||
chmod 600 ${certmgrAPITokenPath}
|
||||
|
||||
echo "Restarting certmgr..." >&1
|
||||
systemctl restart certmgr
|
||||
if [ y = $do_restart ]; then
|
||||
echo "Restarting certmgr..." >&1
|
||||
systemctl restart certmgr
|
||||
fi
|
||||
|
||||
echo "Waiting for certs to appear..." >&1
|
||||
|
||||
${optionalString top.kubelet.enable ''
|
||||
while [ ! -f ${cfg.certs.kubelet.cert} ]; do sleep 1; done
|
||||
echo "Restarting kubelet..." >&1
|
||||
systemctl restart kubelet
|
||||
''}
|
||||
|
||||
${optionalString top.proxy.enable ''
|
||||
while [ ! -f ${cfg.certs.kubeProxyClient.cert} ]; do sleep 1; done
|
||||
echo "Restarting kube-proxy..." >&1
|
||||
systemctl restart kube-proxy
|
||||
''}
|
||||
|
||||
${optionalString top.flannel.enable ''
|
||||
while [ ! -f ${cfg.certs.flannelClient.cert} ]; do sleep 1; done
|
||||
echo "Restarting flannel..." >&1
|
||||
systemctl restart flannel
|
||||
''}
|
||||
|
||||
echo "Node joined succesfully"
|
||||
echo "Node joined succesfully" >&1
|
||||
'')];
|
||||
|
||||
# isolate etcd on loopback at the master node
|
||||
# easyCerts doesn't support multimaster clusters anyway atm.
|
||||
services.etcd = with cfg.certs.etcd; {
|
||||
services.etcd = mkIf top.apiserver.enable (with cfg.certs.etcd; {
|
||||
listenClientUrls = ["https://127.0.0.1:2379"];
|
||||
listenPeerUrls = ["https://127.0.0.1:2380"];
|
||||
advertiseClientUrls = ["https://etcd.local:2379"];
|
||||
@ -324,19 +336,11 @@ in
|
||||
certFile = mkDefault cert;
|
||||
keyFile = mkDefault key;
|
||||
trustedCaFile = mkDefault caCert;
|
||||
};
|
||||
});
|
||||
networking.extraHosts = mkIf (config.services.etcd.enable) ''
|
||||
127.0.0.1 etcd.${top.addons.dns.clusterDomain} etcd.local
|
||||
'';
|
||||
|
||||
services.flannel = with cfg.certs.flannelClient; {
|
||||
kubeconfig = top.lib.mkKubeConfig "flannel" {
|
||||
server = top.apiserverAddress;
|
||||
certFile = cert;
|
||||
keyFile = key;
|
||||
};
|
||||
};
|
||||
|
||||
services.kubernetes = {
|
||||
|
||||
apiserver = mkIf top.apiserver.enable (with cfg.certs.apiServer; {
|
||||
@ -354,6 +358,13 @@ in
|
||||
kubeletClientCertFile = mkDefault cfg.certs.apiserverKubeletClient.cert;
|
||||
kubeletClientKeyFile = mkDefault cfg.certs.apiserverKubeletClient.key;
|
||||
});
|
||||
addonManager = mkIf top.addonManager.enable {
|
||||
kubeconfig = with cfg.certs.addonManager; {
|
||||
certFile = mkDefault cert;
|
||||
keyFile = mkDefault key;
|
||||
};
|
||||
bootstrapAddonsKubeconfig = clusterAdminKubeconfig;
|
||||
};
|
||||
controllerManager = mkIf top.controllerManager.enable {
|
||||
serviceAccountKeyFile = mkDefault cfg.certs.serviceAccount.key;
|
||||
rootCaFile = cfg.certs.controllerManagerClient.caCert;
|
||||
@ -362,6 +373,12 @@ in
|
||||
keyFile = mkDefault key;
|
||||
};
|
||||
};
|
||||
flannel = mkIf top.flannel.enable {
|
||||
kubeconfig = with cfg.certs.flannelClient; {
|
||||
certFile = cert;
|
||||
keyFile = key;
|
||||
};
|
||||
};
|
||||
scheduler = mkIf top.scheduler.enable {
|
||||
kubeconfig = with cfg.certs.schedulerClient; {
|
||||
certFile = mkDefault cert;
|
||||
|
@ -45,12 +45,28 @@ in
|
||||
};
|
||||
|
||||
###### implementation
|
||||
config = mkIf cfg.enable {
|
||||
systemd.services.kube-proxy = {
|
||||
config = let
|
||||
|
||||
proxyPaths = filter (a: a != null) [
|
||||
cfg.kubeconfig.caFile
|
||||
cfg.kubeconfig.certFile
|
||||
cfg.kubeconfig.keyFile
|
||||
];
|
||||
|
||||
in mkIf cfg.enable {
|
||||
systemd.services.kube-proxy = rec {
|
||||
description = "Kubernetes Proxy Service";
|
||||
wantedBy = [ "kubernetes.target" ];
|
||||
after = [ "kube-apiserver.service" ];
|
||||
path = with pkgs; [ iptables conntrack_tools ];
|
||||
wantedBy = [ "kube-node-online.target" ];
|
||||
after = [ "kubelet-online.service" ];
|
||||
before = [ "kube-node-online.target" ];
|
||||
environment.KUBECONFIG = top.lib.mkKubeConfig "kube-proxy" cfg.kubeconfig;
|
||||
path = with pkgs; [ iptables conntrack_tools kubectl ];
|
||||
preStart = ''
|
||||
until kubectl auth can-i get nodes/${top.kubelet.hostname} -q 2>/dev/null; do
|
||||
echo kubectl auth can-i get nodes/${top.kubelet.hostname}: exit status $?
|
||||
sleep 2
|
||||
done
|
||||
'';
|
||||
serviceConfig = {
|
||||
Slice = "kubernetes.slice";
|
||||
ExecStart = ''${top.package}/bin/kube-proxy \
|
||||
@ -59,7 +75,7 @@ in
|
||||
"--cluster-cidr=${top.clusterCidr}"} \
|
||||
${optionalString (cfg.featureGates != [])
|
||||
"--feature-gates=${concatMapStringsSep "," (feature: "${feature}=true") cfg.featureGates}"} \
|
||||
--kubeconfig=${top.lib.mkKubeConfig "kube-proxy" cfg.kubeconfig} \
|
||||
--kubeconfig=${environment.KUBECONFIG} \
|
||||
${optionalString (cfg.verbosity != null) "--v=${toString cfg.verbosity}"} \
|
||||
${cfg.extraOpts}
|
||||
'';
|
||||
@ -67,6 +83,15 @@ in
|
||||
Restart = "on-failure";
|
||||
RestartSec = 5;
|
||||
};
|
||||
unitConfig.ConditionPathExists = proxyPaths;
|
||||
};
|
||||
|
||||
systemd.paths.kube-proxy = {
|
||||
wantedBy = [ "kube-proxy.service" ];
|
||||
pathConfig = {
|
||||
PathExists = proxyPaths;
|
||||
PathChanged = proxyPaths;
|
||||
};
|
||||
};
|
||||
|
||||
services.kubernetes.pki.certs = {
|
||||
|
@ -56,18 +56,35 @@ in
|
||||
};
|
||||
|
||||
###### implementation
|
||||
config = mkIf cfg.enable {
|
||||
systemd.services.kube-scheduler = {
|
||||
config = let
|
||||
|
||||
schedulerPaths = filter (a: a != null) [
|
||||
cfg.kubeconfig.caFile
|
||||
cfg.kubeconfig.certFile
|
||||
cfg.kubeconfig.keyFile
|
||||
];
|
||||
|
||||
in mkIf cfg.enable {
|
||||
systemd.services.kube-scheduler = rec {
|
||||
description = "Kubernetes Scheduler Service";
|
||||
wantedBy = [ "kubernetes.target" ];
|
||||
wantedBy = [ "kube-control-plane-online.target" ];
|
||||
after = [ "kube-apiserver.service" ];
|
||||
before = [ "kube-control-plane-online.target" ];
|
||||
environment.KUBECONFIG = top.lib.mkKubeConfig "kube-scheduler" cfg.kubeconfig;
|
||||
path = [ pkgs.kubectl ];
|
||||
preStart = ''
|
||||
until kubectl auth can-i get /api -q 2>/dev/null; do
|
||||
echo kubectl auth can-i get /api: exit status $?
|
||||
sleep 2
|
||||
done
|
||||
'';
|
||||
serviceConfig = {
|
||||
Slice = "kubernetes.slice";
|
||||
ExecStart = ''${top.package}/bin/kube-scheduler \
|
||||
--address=${cfg.address} \
|
||||
${optionalString (cfg.featureGates != [])
|
||||
"--feature-gates=${concatMapStringsSep "," (feature: "${feature}=true") cfg.featureGates}"} \
|
||||
--kubeconfig=${top.lib.mkKubeConfig "kube-scheduler" cfg.kubeconfig} \
|
||||
--kubeconfig=${environment.KUBECONFIG} \
|
||||
--leader-elect=${boolToString cfg.leaderElect} \
|
||||
--port=${toString cfg.port} \
|
||||
${optionalString (cfg.verbosity != null) "--v=${toString cfg.verbosity}"} \
|
||||
@ -79,6 +96,15 @@ in
|
||||
Restart = "on-failure";
|
||||
RestartSec = 5;
|
||||
};
|
||||
unitConfig.ConditionPathExists = schedulerPaths;
|
||||
};
|
||||
|
||||
systemd.paths.kube-scheduler = {
|
||||
wantedBy = [ "kube-scheduler.service" ];
|
||||
pathConfig = {
|
||||
PathExists = schedulerPaths;
|
||||
PathChanged = schedulerPaths;
|
||||
};
|
||||
};
|
||||
|
||||
services.kubernetes.pki.certs = {
|
||||
|
@ -30,7 +30,10 @@ let
|
||||
{ config, pkgs, lib, nodes, ... }:
|
||||
mkMerge [
|
||||
{
|
||||
boot.postBootCommands = "rm -fr /var/lib/kubernetes/secrets /tmp/shared/*";
|
||||
boot = {
|
||||
postBootCommands = "rm -fr /var/lib/kubernetes/secrets /tmp/shared/*";
|
||||
kernel.sysctl = { "fs.inotify.max_user_instances" = 256; };
|
||||
};
|
||||
virtualisation.memorySize = mkDefault 1536;
|
||||
virtualisation.diskSize = mkDefault 4096;
|
||||
networking = {
|
||||
|
@ -77,6 +77,7 @@ let
|
||||
singleNodeTest = {
|
||||
test = ''
|
||||
# prepare machine1 for test
|
||||
$machine1->waitForUnit("kubernetes.target");
|
||||
$machine1->waitUntilSucceeds("kubectl get node machine1.${domain} | grep -w Ready");
|
||||
$machine1->waitUntilSucceeds("docker load < ${redisImage}");
|
||||
$machine1->waitUntilSucceeds("kubectl create -f ${redisPod}");
|
||||
@ -102,6 +103,8 @@ let
|
||||
# Node token exchange
|
||||
$machine1->waitUntilSucceeds("cp -f /var/lib/cfssl/apitoken.secret /tmp/shared/apitoken.secret");
|
||||
$machine2->waitUntilSucceeds("cat /tmp/shared/apitoken.secret | nixos-kubernetes-node-join");
|
||||
$machine1->waitForUnit("kubernetes.target");
|
||||
$machine2->waitForUnit("kubernetes.target");
|
||||
|
||||
# prepare machines for test
|
||||
$machine1->waitUntilSucceeds("kubectl get node machine2.${domain} | grep -w Ready");
|
||||
|
@ -94,6 +94,8 @@ let
|
||||
|
||||
singlenode = base // {
|
||||
test = ''
|
||||
$machine1->waitForUnit("kubernetes.target");
|
||||
|
||||
$machine1->waitUntilSucceeds("kubectl get node machine1.my.zyx | grep -w Ready");
|
||||
|
||||
$machine1->waitUntilSucceeds("docker load < ${kubectlImage}");
|
||||
@ -116,6 +118,8 @@ let
|
||||
# Node token exchange
|
||||
$machine1->waitUntilSucceeds("cp -f /var/lib/cfssl/apitoken.secret /tmp/shared/apitoken.secret");
|
||||
$machine2->waitUntilSucceeds("cat /tmp/shared/apitoken.secret | nixos-kubernetes-node-join");
|
||||
$machine1->waitForUnit("kubernetes.target");
|
||||
$machine2->waitForUnit("kubernetes.target");
|
||||
|
||||
$machine1->waitUntilSucceeds("kubectl get node machine2.my.zyx | grep -w Ready");
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user