--- systemd: units: - name: etcd-member.service enabled: true dropins: - name: 40-etcd-cluster.conf contents: | [Service] Environment="ETCD_IMAGE_TAG=v3.4.12" Environment="ETCD_IMAGE_URL=docker://quay.io/coreos/etcd" Environment="RKT_RUN_ARGS=--insecure-options=image" Environment="ETCD_NAME=${etcd_name}" Environment="ETCD_ADVERTISE_CLIENT_URLS=https://${domain_name}:2379" Environment="ETCD_INITIAL_ADVERTISE_PEER_URLS=https://${domain_name}:2380" Environment="ETCD_LISTEN_CLIENT_URLS=https://0.0.0.0:2379" Environment="ETCD_LISTEN_PEER_URLS=https://0.0.0.0:2380" Environment="ETCD_LISTEN_METRICS_URLS=http://0.0.0.0:2381" Environment="ETCD_INITIAL_CLUSTER=${etcd_initial_cluster}" Environment="ETCD_STRICT_RECONFIG_CHECK=true" Environment="ETCD_SSL_DIR=/etc/ssl/etcd" Environment="ETCD_TRUSTED_CA_FILE=/etc/ssl/certs/etcd/server-ca.crt" Environment="ETCD_CERT_FILE=/etc/ssl/certs/etcd/server.crt" Environment="ETCD_KEY_FILE=/etc/ssl/certs/etcd/server.key" Environment="ETCD_CLIENT_CERT_AUTH=true" Environment="ETCD_PEER_TRUSTED_CA_FILE=/etc/ssl/certs/etcd/peer-ca.crt" Environment="ETCD_PEER_CERT_FILE=/etc/ssl/certs/etcd/peer.crt" Environment="ETCD_PEER_KEY_FILE=/etc/ssl/certs/etcd/peer.key" Environment="ETCD_PEER_CLIENT_CERT_AUTH=true" - name: docker.service enabled: true - name: locksmithd.service mask: true - name: kubelet.path enabled: true contents: | [Unit] Description=Watch for kubeconfig [Path] PathExists=/etc/kubernetes/kubeconfig [Install] WantedBy=multi-user.target - name: wait-for-dns.service enabled: true contents: | [Unit] Description=Wait for DNS entries Wants=systemd-resolved.service Before=kubelet.service [Service] Type=oneshot RemainAfterExit=true ExecStart=/bin/sh -c 'while ! /usr/bin/grep '^[^#[:space:]]' /etc/resolv.conf > /dev/null; do sleep 1; done' [Install] RequiredBy=kubelet.service RequiredBy=etcd-member.service - name: kubelet.service contents: | [Unit] Description=Kubelet Requires=docker.service After=docker.service Wants=rpc-statd.service [Service] Environment=KUBELET_IMAGE=quay.io/poseidon/kubelet:v1.19.3 Environment=KUBELET_CGROUP_DRIVER=${cgroup_driver} ExecStartPre=/bin/mkdir -p /etc/kubernetes/cni/net.d ExecStartPre=/bin/mkdir -p /etc/kubernetes/manifests ExecStartPre=/bin/mkdir -p /opt/cni/bin ExecStartPre=/bin/mkdir -p /var/lib/calico ExecStartPre=/bin/mkdir -p /var/lib/kubelet/volumeplugins ExecStartPre=/usr/bin/bash -c "grep 'certificate-authority-data' /etc/kubernetes/kubeconfig | awk '{print $2}' | base64 -d > /etc/kubernetes/ca.crt" ExecStartPre=/usr/bin/docker run -d \ --name kubelet \ --privileged \ --pid host \ --network host \ -v /etc/kubernetes:/etc/kubernetes:ro \ -v /etc/machine-id:/etc/machine-id:ro \ -v /usr/lib/os-release:/etc/os-release:ro \ -v /lib/modules:/lib/modules:ro \ -v /run:/run \ -v /sys/fs/cgroup:/sys/fs/cgroup:ro \ -v /sys/fs/cgroup/systemd:/sys/fs/cgroup/systemd \ -v /var/lib/calico:/var/lib/calico:ro \ -v /var/lib/docker:/var/lib/docker \ -v /var/lib/kubelet:/var/lib/kubelet:rshared \ -v /var/log:/var/log \ -v /opt/cni/bin:/opt/cni/bin \ -v /etc/iscsi:/etc/iscsi \ -v /usr/sbin/iscsiadm:/usr/sbin/iscsiadm \ $${KUBELET_IMAGE} \ --anonymous-auth=false \ --authentication-token-webhook \ --authorization-mode=Webhook \ --bootstrap-kubeconfig=/etc/kubernetes/kubeconfig \ --cgroup-driver=$${KUBELET_CGROUP_DRIVER} \ --client-ca-file=/etc/kubernetes/ca.crt \ --cluster_dns=${cluster_dns_service_ip} \ --cluster_domain=${cluster_domain_suffix} \ --cni-conf-dir=/etc/kubernetes/cni/net.d \ --healthz-port=0 \ --hostname-override=${domain_name} \ --kubeconfig=/var/lib/kubelet/kubeconfig \ --network-plugin=cni \ --node-labels=node.kubernetes.io/controller="true" \ --pod-manifest-path=/etc/kubernetes/manifests \ --read-only-port=0 \ --register-with-taints=node-role.kubernetes.io/controller=:NoSchedule \ --rotate-certificates \ --volume-plugin-dir=/var/lib/kubelet/volumeplugins ExecStart=docker logs -f kubelet ExecStop=docker stop kubelet ExecStopPost=docker rm kubelet Restart=always RestartSec=10 [Install] WantedBy=multi-user.target - name: bootstrap.service contents: | [Unit] Description=Kubernetes control plane ConditionPathExists=!/opt/bootstrap/bootstrap.done [Service] Type=oneshot RemainAfterExit=true WorkingDirectory=/opt/bootstrap ExecStart=/usr/bin/rkt run \ --trust-keys-from-https \ --volume config,kind=host,source=/etc/kubernetes/bootstrap-secrets \ --mount volume=config,target=/etc/kubernetes/secrets \ --volume assets,kind=host,source=/opt/bootstrap/assets \ --mount volume=assets,target=/assets \ --volume script,kind=host,source=/opt/bootstrap/apply \ --mount volume=script,target=/apply \ --insecure-options=image \ docker://quay.io/poseidon/kubelet:v1.19.3 \ --net=host \ --dns=host \ --exec=/apply ExecStartPost=/bin/touch /opt/bootstrap/bootstrap.done [Install] WantedBy=multi-user.target storage: directories: - path: /var/lib/etcd filesystem: root mode: 0700 overwrite: true - path: /etc/kubernetes filesystem: root mode: 0755 files: - path: /etc/hostname filesystem: root mode: 0644 contents: inline: ${domain_name} - path: /opt/bootstrap/layout filesystem: root mode: 0544 contents: inline: | #!/bin/bash -e mkdir -p -- auth tls/etcd tls/k8s static-manifests manifests/coredns manifests-networking awk '/#####/ {filename=$2; next} {print > filename}' assets mkdir -p /etc/ssl/etcd/etcd mkdir -p /etc/kubernetes/bootstrap-secrets mv tls/etcd/{peer*,server*} /etc/ssl/etcd/etcd/ mv tls/etcd/etcd-client* /etc/kubernetes/bootstrap-secrets/ chown -R etcd:etcd /etc/ssl/etcd chmod -R 500 /etc/ssl/etcd chmod -R 700 /var/lib/etcd mv auth/kubeconfig /etc/kubernetes/bootstrap-secrets/ mv tls/k8s/* /etc/kubernetes/bootstrap-secrets/ mkdir -p /etc/kubernetes/manifests mv static-manifests/* /etc/kubernetes/manifests/ mkdir -p /opt/bootstrap/assets mv manifests /opt/bootstrap/assets/manifests mv manifests-networking/* /opt/bootstrap/assets/manifests/ rm -rf assets auth static-manifests tls manifests-networking - path: /opt/bootstrap/apply filesystem: root mode: 0544 contents: inline: | #!/bin/bash -e export KUBECONFIG=/etc/kubernetes/secrets/kubeconfig until kubectl version; do echo "Waiting for static pod control plane" sleep 5 done until kubectl apply -f /assets/manifests -R; do echo "Retry applying manifests" sleep 5 done - path: /etc/sysctl.d/max-user-watches.conf filesystem: root mode: 0644 contents: inline: | fs.inotify.max_user_watches=16184 passwd: users: - name: core ssh_authorized_keys: - ${ssh_authorized_key}