mirror of
https://github.com/puppetmaster/typhoon.git
synced 2024-12-26 19:59:34 +01:00
bf22222f7d
* In v1.18.0, kubectl apply would fail to apply manifests if any single manifest was unable to validate. For example, if a CRD and CR were defined in the same directory, apply would fail since the CR would be invalid as the CRD wouldn't exist * Typhoon temporary workaround was to separate CNI CRD manifests and explicitly apply them first. No longer needed in v1.18.1+ * Kubernetes v1.18.1 restored the prior behavior where kubectl apply applies as many valid manifests as it can. In the example above, the CRD would be applied and the CR could be applied if the kubectl apply was re-run (allowing for apply loops). * Upstream fix: https://github.com/kubernetes/kubernetes/pull/89864
215 lines
9.3 KiB
YAML
215 lines
9.3 KiB
YAML
---
|
|
systemd:
|
|
units:
|
|
- name: etcd-member.service
|
|
enable: true
|
|
dropins:
|
|
- name: 40-etcd-cluster.conf
|
|
contents: |
|
|
[Service]
|
|
Environment="ETCD_IMAGE_TAG=v3.4.7"
|
|
Environment="ETCD_IMAGE_URL=docker://quay.io/coreos/etcd"
|
|
Environment="RKT_RUN_ARGS=--insecure-options=image"
|
|
Environment="ETCD_NAME=${etcd_name}"
|
|
Environment="ETCD_ADVERTISE_CLIENT_URLS=https://${domain_name}:2379"
|
|
Environment="ETCD_INITIAL_ADVERTISE_PEER_URLS=https://${domain_name}:2380"
|
|
Environment="ETCD_LISTEN_CLIENT_URLS=https://0.0.0.0:2379"
|
|
Environment="ETCD_LISTEN_PEER_URLS=https://0.0.0.0:2380"
|
|
Environment="ETCD_LISTEN_METRICS_URLS=http://0.0.0.0:2381"
|
|
Environment="ETCD_INITIAL_CLUSTER=${etcd_initial_cluster}"
|
|
Environment="ETCD_STRICT_RECONFIG_CHECK=true"
|
|
Environment="ETCD_SSL_DIR=/etc/ssl/etcd"
|
|
Environment="ETCD_TRUSTED_CA_FILE=/etc/ssl/certs/etcd/server-ca.crt"
|
|
Environment="ETCD_CERT_FILE=/etc/ssl/certs/etcd/server.crt"
|
|
Environment="ETCD_KEY_FILE=/etc/ssl/certs/etcd/server.key"
|
|
Environment="ETCD_CLIENT_CERT_AUTH=true"
|
|
Environment="ETCD_PEER_TRUSTED_CA_FILE=/etc/ssl/certs/etcd/peer-ca.crt"
|
|
Environment="ETCD_PEER_CERT_FILE=/etc/ssl/certs/etcd/peer.crt"
|
|
Environment="ETCD_PEER_KEY_FILE=/etc/ssl/certs/etcd/peer.key"
|
|
Environment="ETCD_PEER_CLIENT_CERT_AUTH=true"
|
|
- name: docker.service
|
|
enable: true
|
|
- name: locksmithd.service
|
|
mask: true
|
|
- name: kubelet.path
|
|
enable: true
|
|
contents: |
|
|
[Unit]
|
|
Description=Watch for kubeconfig
|
|
[Path]
|
|
PathExists=/etc/kubernetes/kubeconfig
|
|
[Install]
|
|
WantedBy=multi-user.target
|
|
- name: wait-for-dns.service
|
|
enable: true
|
|
contents: |
|
|
[Unit]
|
|
Description=Wait for DNS entries
|
|
Wants=systemd-resolved.service
|
|
Before=kubelet.service
|
|
[Service]
|
|
Type=oneshot
|
|
RemainAfterExit=true
|
|
ExecStart=/bin/sh -c 'while ! /usr/bin/grep '^[^#[:space:]]' /etc/resolv.conf > /dev/null; do sleep 1; done'
|
|
[Install]
|
|
RequiredBy=kubelet.service
|
|
RequiredBy=etcd-member.service
|
|
- name: kubelet.service
|
|
contents: |
|
|
[Unit]
|
|
Description=Kubelet via Hyperkube
|
|
Wants=rpc-statd.service
|
|
[Service]
|
|
Environment=KUBELET_CGROUP_DRIVER=${cgroup_driver}
|
|
ExecStartPre=/bin/mkdir -p /etc/kubernetes/cni/net.d
|
|
ExecStartPre=/bin/mkdir -p /etc/kubernetes/manifests
|
|
ExecStartPre=/bin/mkdir -p /opt/cni/bin
|
|
ExecStartPre=/bin/mkdir -p /var/lib/calico
|
|
ExecStartPre=/bin/mkdir -p /var/lib/kubelet/volumeplugins
|
|
ExecStartPre=/usr/bin/bash -c "grep 'certificate-authority-data' /etc/kubernetes/kubeconfig | awk '{print $2}' | base64 -d > /etc/kubernetes/ca.crt"
|
|
ExecStartPre=-/usr/bin/rkt rm --uuid-file=/var/cache/kubelet-pod.uuid
|
|
ExecStart=/usr/bin/rkt run \
|
|
--uuid-file-save=/var/cache/kubelet-pod.uuid \
|
|
--stage1-from-dir=stage1-fly.aci \
|
|
--hosts-entry host \
|
|
--insecure-options=image \
|
|
--volume etc-kubernetes,kind=host,source=/etc/kubernetes,readOnly=true \
|
|
--mount volume=etc-kubernetes,target=/etc/kubernetes \
|
|
--volume etc-machine-id,kind=host,source=/etc/machine-id,readOnly=true \
|
|
--mount volume=etc-machine-id,target=/etc/machine-id \
|
|
--volume etc-os-release,kind=host,source=/usr/lib/os-release,readOnly=true \
|
|
--mount volume=etc-os-release,target=/etc/os-release \
|
|
--volume=etc-resolv,kind=host,source=/etc/resolv.conf,readOnly=true \
|
|
--mount volume=etc-resolv,target=/etc/resolv.conf \
|
|
--volume etc-ssl-certs,kind=host,source=/etc/ssl/certs,readOnly=true \
|
|
--mount volume=etc-ssl-certs,target=/etc/ssl/certs \
|
|
--volume lib-modules,kind=host,source=/lib/modules,readOnly=true \
|
|
--mount volume=lib-modules,target=/lib/modules \
|
|
--volume run,kind=host,source=/run \
|
|
--mount volume=run,target=/run \
|
|
--volume usr-share-certs,kind=host,source=/usr/share/ca-certificates,readOnly=true \
|
|
--mount volume=usr-share-certs,target=/usr/share/ca-certificates \
|
|
--volume var-lib-calico,kind=host,source=/var/lib/calico,readOnly=true \
|
|
--mount volume=var-lib-calico,target=/var/lib/calico \
|
|
--volume var-lib-docker,kind=host,source=/var/lib/docker \
|
|
--mount volume=var-lib-docker,target=/var/lib/docker \
|
|
--volume var-lib-kubelet,kind=host,source=/var/lib/kubelet,recursive=true \
|
|
--mount volume=var-lib-kubelet,target=/var/lib/kubelet \
|
|
--volume var-log,kind=host,source=/var/log \
|
|
--mount volume=var-log,target=/var/log \
|
|
--volume opt-cni-bin,kind=host,source=/opt/cni/bin \
|
|
--mount volume=opt-cni-bin,target=/opt/cni/bin \
|
|
--volume etc-iscsi,kind=host,source=/etc/iscsi \
|
|
--mount volume=etc-iscsi,target=/etc/iscsi \
|
|
--volume usr-sbin-iscsiadm,kind=host,source=/usr/sbin/iscsiadm \
|
|
--mount volume=usr-sbin-iscsiadm,target=/sbin/iscsiadm \
|
|
docker://quay.io/poseidon/kubelet:v1.18.2 -- \
|
|
--anonymous-auth=false \
|
|
--authentication-token-webhook \
|
|
--authorization-mode=Webhook \
|
|
--cgroup-driver=$${KUBELET_CGROUP_DRIVER} \
|
|
--client-ca-file=/etc/kubernetes/ca.crt \
|
|
--cluster_dns=${cluster_dns_service_ip} \
|
|
--cluster_domain=${cluster_domain_suffix} \
|
|
--cni-conf-dir=/etc/kubernetes/cni/net.d \
|
|
--exit-on-lock-contention \
|
|
--healthz-port=0 \
|
|
--hostname-override=${domain_name} \
|
|
--kubeconfig=/etc/kubernetes/kubeconfig \
|
|
--lock-file=/var/run/lock/kubelet.lock \
|
|
--network-plugin=cni \
|
|
--node-labels=node.kubernetes.io/master \
|
|
--node-labels=node.kubernetes.io/controller="true" \
|
|
--pod-manifest-path=/etc/kubernetes/manifests \
|
|
--read-only-port=0 \
|
|
--register-with-taints=node-role.kubernetes.io/master=:NoSchedule \
|
|
--volume-plugin-dir=/var/lib/kubelet/volumeplugins
|
|
ExecStop=-/usr/bin/rkt stop --uuid-file=/var/cache/kubelet-pod.uuid
|
|
Restart=always
|
|
RestartSec=10
|
|
[Install]
|
|
WantedBy=multi-user.target
|
|
- name: bootstrap.service
|
|
contents: |
|
|
[Unit]
|
|
Description=Kubernetes control plane
|
|
ConditionPathExists=!/opt/bootstrap/bootstrap.done
|
|
[Service]
|
|
Type=oneshot
|
|
RemainAfterExit=true
|
|
WorkingDirectory=/opt/bootstrap
|
|
ExecStart=/usr/bin/rkt run \
|
|
--trust-keys-from-https \
|
|
--volume config,kind=host,source=/etc/kubernetes/bootstrap-secrets \
|
|
--mount volume=config,target=/etc/kubernetes/secrets \
|
|
--volume assets,kind=host,source=/opt/bootstrap/assets \
|
|
--mount volume=assets,target=/assets \
|
|
--volume script,kind=host,source=/opt/bootstrap/apply \
|
|
--mount volume=script,target=/apply \
|
|
--insecure-options=image \
|
|
docker://quay.io/poseidon/kubelet:v1.18.2 \
|
|
--net=host \
|
|
--dns=host \
|
|
--exec=/apply
|
|
ExecStartPost=/bin/touch /opt/bootstrap/bootstrap.done
|
|
[Install]
|
|
WantedBy=multi-user.target
|
|
storage:
|
|
directories:
|
|
- path: /etc/kubernetes
|
|
filesystem: root
|
|
files:
|
|
- path: /etc/hostname
|
|
filesystem: root
|
|
mode: 0644
|
|
contents:
|
|
inline:
|
|
${domain_name}
|
|
- path: /opt/bootstrap/layout
|
|
filesystem: root
|
|
mode: 0544
|
|
contents:
|
|
inline: |
|
|
#!/bin/bash -e
|
|
mkdir -p -- auth tls/etcd tls/k8s static-manifests manifests/coredns manifests-networking
|
|
awk '/#####/ {filename=$2; next} {print > filename}' assets
|
|
mkdir -p /etc/ssl/etcd/etcd
|
|
mkdir -p /etc/kubernetes/bootstrap-secrets
|
|
mv tls/etcd/{peer*,server*} /etc/ssl/etcd/etcd/
|
|
mv tls/etcd/etcd-client* /etc/kubernetes/bootstrap-secrets/
|
|
chown -R etcd:etcd /etc/ssl/etcd
|
|
chmod -R 500 /etc/ssl/etcd
|
|
mv auth/kubeconfig /etc/kubernetes/bootstrap-secrets/
|
|
mv tls/k8s/* /etc/kubernetes/bootstrap-secrets/
|
|
sudo mkdir -p /etc/kubernetes/manifests
|
|
sudo mv static-manifests/* /etc/kubernetes/manifests/
|
|
sudo mkdir -p /opt/bootstrap/assets
|
|
sudo mv manifests /opt/bootstrap/assets/manifests
|
|
sudo mv manifests-networking/* /opt/bootstrap/assets/manifests/
|
|
rm -rf assets auth static-manifests tls manifests-networking
|
|
- path: /opt/bootstrap/apply
|
|
filesystem: root
|
|
mode: 0544
|
|
contents:
|
|
inline: |
|
|
#!/bin/bash -e
|
|
export KUBECONFIG=/etc/kubernetes/secrets/kubeconfig
|
|
until kubectl version; do
|
|
echo "Waiting for static pod control plane"
|
|
sleep 5
|
|
done
|
|
until kubectl apply -f /assets/manifests -R; do
|
|
echo "Retry applying manifests"
|
|
sleep 5
|
|
done
|
|
- path: /etc/sysctl.d/max-user-watches.conf
|
|
filesystem: root
|
|
contents:
|
|
inline: |
|
|
fs.inotify.max_user_watches=16184
|
|
passwd:
|
|
users:
|
|
- name: core
|
|
ssh_authorized_keys:
|
|
- ${ssh_authorized_key}
|