mirror of
https://github.com/puppetmaster/typhoon.git
synced 2024-12-26 19:59:34 +01:00
eda78db08e
* Use docker to run the `kubelet.service` container * Update Kubelet mounts to match Fedora CoreOS * Remove unused `/etc/ssl/certs` mount (see https://github.com/poseidon/typhoon/pull/810) * Remove unused `/usr/share/ca-certificates` mount * Remove `/etc/resolv.conf` mount, Docker default is ok * Change `delete-node.service` to use docker instead of rkt and inline ExecStart, as was done on Fedora CoreOS * Fix permission denied on shutdown `delete-node`, caused by the kubeconfig mount changing with the introduction of node TLS bootstrap Background * podmand, rkt, and runc daemonless container process runners provide advantages over the docker daemon for system containers. Docker requires workarounds for use in systemd units where the ExecStart must tail logs so systemd can monitor the daemonized container. https://github.com/moby/moby/issues/6791 * Why switch then? On Flatcar Linux, podman isn't shipped. rkt works, but isn't developing while container standards continue to move forward. Typhoon has used runc for the Kubelet runner before in Fedora Atomic, but its more low-level. So we're left with Docker, which is less than ideal, but shipped in Flatcar * Flatcar Linux appears to be shifting system components to use docker, which does provide some limited guards against breakages (e.g. Flatcar cannot enable docker live restore)
193 lines
7.5 KiB
YAML
193 lines
7.5 KiB
YAML
---
|
|
systemd:
|
|
units:
|
|
- name: etcd-member.service
|
|
enabled: true
|
|
dropins:
|
|
- name: 40-etcd-cluster.conf
|
|
contents: |
|
|
[Service]
|
|
Environment="ETCD_IMAGE_TAG=v3.4.12"
|
|
Environment="ETCD_IMAGE_URL=docker://quay.io/coreos/etcd"
|
|
Environment="RKT_RUN_ARGS=--insecure-options=image"
|
|
Environment="ETCD_NAME=${etcd_name}"
|
|
Environment="ETCD_ADVERTISE_CLIENT_URLS=https://${etcd_domain}:2379"
|
|
Environment="ETCD_INITIAL_ADVERTISE_PEER_URLS=https://${etcd_domain}:2380"
|
|
Environment="ETCD_LISTEN_CLIENT_URLS=https://0.0.0.0:2379"
|
|
Environment="ETCD_LISTEN_PEER_URLS=https://0.0.0.0:2380"
|
|
Environment="ETCD_LISTEN_METRICS_URLS=http://0.0.0.0:2381"
|
|
Environment="ETCD_INITIAL_CLUSTER=${etcd_initial_cluster}"
|
|
Environment="ETCD_STRICT_RECONFIG_CHECK=true"
|
|
Environment="ETCD_SSL_DIR=/etc/ssl/etcd"
|
|
Environment="ETCD_TRUSTED_CA_FILE=/etc/ssl/certs/etcd/server-ca.crt"
|
|
Environment="ETCD_CERT_FILE=/etc/ssl/certs/etcd/server.crt"
|
|
Environment="ETCD_KEY_FILE=/etc/ssl/certs/etcd/server.key"
|
|
Environment="ETCD_CLIENT_CERT_AUTH=true"
|
|
Environment="ETCD_PEER_TRUSTED_CA_FILE=/etc/ssl/certs/etcd/peer-ca.crt"
|
|
Environment="ETCD_PEER_CERT_FILE=/etc/ssl/certs/etcd/peer.crt"
|
|
Environment="ETCD_PEER_KEY_FILE=/etc/ssl/certs/etcd/peer.key"
|
|
Environment="ETCD_PEER_CLIENT_CERT_AUTH=true"
|
|
- name: docker.service
|
|
enabled: true
|
|
- name: locksmithd.service
|
|
mask: true
|
|
- name: wait-for-dns.service
|
|
enabled: true
|
|
contents: |
|
|
[Unit]
|
|
Description=Wait for DNS entries
|
|
Wants=systemd-resolved.service
|
|
Before=kubelet.service
|
|
[Service]
|
|
Type=oneshot
|
|
RemainAfterExit=true
|
|
ExecStart=/bin/sh -c 'while ! /usr/bin/grep '^[^#[:space:]]' /etc/resolv.conf > /dev/null; do sleep 1; done'
|
|
[Install]
|
|
RequiredBy=kubelet.service
|
|
RequiredBy=etcd-member.service
|
|
- name: kubelet.service
|
|
enabled: true
|
|
contents: |
|
|
[Unit]
|
|
Description=Kubelet
|
|
Requires=docker.service
|
|
After=docker.service
|
|
Wants=rpc-statd.service
|
|
[Service]
|
|
Environment=KUBELET_IMAGE=quay.io/poseidon/kubelet:v1.19.3
|
|
ExecStartPre=/bin/mkdir -p /etc/kubernetes/cni/net.d
|
|
ExecStartPre=/bin/mkdir -p /etc/kubernetes/manifests
|
|
ExecStartPre=/bin/mkdir -p /opt/cni/bin
|
|
ExecStartPre=/bin/mkdir -p /var/lib/calico
|
|
ExecStartPre=/bin/mkdir -p /var/lib/kubelet/volumeplugins
|
|
ExecStartPre=/usr/bin/bash -c "grep 'certificate-authority-data' /etc/kubernetes/kubeconfig | awk '{print $2}' | base64 -d > /etc/kubernetes/ca.crt"
|
|
ExecStartPre=/usr/bin/docker run -d \
|
|
--name kubelet \
|
|
--privileged \
|
|
--pid host \
|
|
--network host \
|
|
-v /etc/kubernetes:/etc/kubernetes:ro \
|
|
-v /etc/machine-id:/etc/machine-id:ro \
|
|
-v /usr/lib/os-release:/etc/os-release:ro \
|
|
-v /lib/modules:/lib/modules:ro \
|
|
-v /run:/run \
|
|
-v /sys/fs/cgroup:/sys/fs/cgroup:ro \
|
|
-v /sys/fs/cgroup/systemd:/sys/fs/cgroup/systemd \
|
|
-v /var/lib/calico:/var/lib/calico:ro \
|
|
-v /var/lib/docker:/var/lib/docker \
|
|
-v /var/lib/kubelet:/var/lib/kubelet:rshared \
|
|
-v /var/log:/var/log \
|
|
-v /opt/cni/bin:/opt/cni/bin \
|
|
$${KUBELET_IMAGE} \
|
|
--anonymous-auth=false \
|
|
--authentication-token-webhook \
|
|
--authorization-mode=Webhook \
|
|
--bootstrap-kubeconfig=/etc/kubernetes/kubeconfig \
|
|
--client-ca-file=/etc/kubernetes/ca.crt \
|
|
--cluster_dns=${cluster_dns_service_ip} \
|
|
--cluster_domain=${cluster_domain_suffix} \
|
|
--cni-conf-dir=/etc/kubernetes/cni/net.d \
|
|
--healthz-port=0 \
|
|
--kubeconfig=/var/lib/kubelet/kubeconfig \
|
|
--network-plugin=cni \
|
|
--node-labels=node.kubernetes.io/controller="true" \
|
|
--pod-manifest-path=/etc/kubernetes/manifests \
|
|
--register-with-taints=node-role.kubernetes.io/controller=:NoSchedule \
|
|
--read-only-port=0 \
|
|
--rotate-certificates \
|
|
--volume-plugin-dir=/var/lib/kubelet/volumeplugins
|
|
ExecStart=docker logs -f kubelet
|
|
ExecStop=docker stop kubelet
|
|
ExecStopPost=docker rm kubelet
|
|
Restart=always
|
|
RestartSec=10
|
|
[Install]
|
|
WantedBy=multi-user.target
|
|
- name: bootstrap.service
|
|
contents: |
|
|
[Unit]
|
|
Description=Kubernetes control plane
|
|
ConditionPathExists=!/opt/bootstrap/bootstrap.done
|
|
[Service]
|
|
Type=oneshot
|
|
RemainAfterExit=true
|
|
WorkingDirectory=/opt/bootstrap
|
|
ExecStart=/usr/bin/rkt run \
|
|
--trust-keys-from-https \
|
|
--volume config,kind=host,source=/etc/kubernetes/bootstrap-secrets \
|
|
--mount volume=config,target=/etc/kubernetes/secrets \
|
|
--volume assets,kind=host,source=/opt/bootstrap/assets \
|
|
--mount volume=assets,target=/assets \
|
|
--volume script,kind=host,source=/opt/bootstrap/apply \
|
|
--mount volume=script,target=/apply \
|
|
--insecure-options=image \
|
|
docker://quay.io/poseidon/kubelet:v1.19.3 \
|
|
--net=host \
|
|
--dns=host \
|
|
--exec=/apply
|
|
ExecStartPost=/bin/touch /opt/bootstrap/bootstrap.done
|
|
[Install]
|
|
WantedBy=multi-user.target
|
|
storage:
|
|
directories:
|
|
- path: /var/lib/etcd
|
|
filesystem: root
|
|
mode: 0700
|
|
overwrite: true
|
|
files:
|
|
- path: /etc/kubernetes/kubeconfig
|
|
filesystem: root
|
|
mode: 0644
|
|
contents:
|
|
inline: |
|
|
${kubeconfig}
|
|
- path: /opt/bootstrap/layout
|
|
filesystem: root
|
|
mode: 0544
|
|
contents:
|
|
inline: |
|
|
#!/bin/bash -e
|
|
mkdir -p -- auth tls/etcd tls/k8s static-manifests manifests/coredns manifests-networking
|
|
awk '/#####/ {filename=$2; next} {print > filename}' assets
|
|
mkdir -p /etc/ssl/etcd/etcd
|
|
mkdir -p /etc/kubernetes/bootstrap-secrets
|
|
mv tls/etcd/{peer*,server*} /etc/ssl/etcd/etcd/
|
|
mv tls/etcd/etcd-client* /etc/kubernetes/bootstrap-secrets/
|
|
chown -R etcd:etcd /etc/ssl/etcd
|
|
chmod -R 500 /etc/ssl/etcd
|
|
chmod -R 700 /var/lib/etcd
|
|
mv auth/kubeconfig /etc/kubernetes/bootstrap-secrets/
|
|
mv tls/k8s/* /etc/kubernetes/bootstrap-secrets/
|
|
mkdir -p /etc/kubernetes/manifests
|
|
mv static-manifests/* /etc/kubernetes/manifests/
|
|
mkdir -p /opt/bootstrap/assets
|
|
mv manifests /opt/bootstrap/assets/manifests
|
|
mv manifests-networking/* /opt/bootstrap/assets/manifests/
|
|
rm -rf assets auth static-manifests tls manifests-networking
|
|
- path: /opt/bootstrap/apply
|
|
filesystem: root
|
|
mode: 0544
|
|
contents:
|
|
inline: |
|
|
#!/bin/bash -e
|
|
export KUBECONFIG=/etc/kubernetes/secrets/kubeconfig
|
|
until kubectl version; do
|
|
echo "Waiting for static pod control plane"
|
|
sleep 5
|
|
done
|
|
until kubectl apply -f /assets/manifests -R; do
|
|
echo "Retry applying manifests"
|
|
sleep 5
|
|
done
|
|
- path: /etc/sysctl.d/max-user-watches.conf
|
|
filesystem: root
|
|
mode: 0644
|
|
contents:
|
|
inline: |
|
|
fs.inotify.max_user_watches=16184
|
|
passwd:
|
|
users:
|
|
- name: core
|
|
ssh_authorized_keys:
|
|
- "${ssh_authorized_key}"
|