162 lines
6.7 KiB
Cheetah
162 lines
6.7 KiB
Cheetah
---
|
|
systemd:
|
|
units:
|
|
- name: etcd-member.service
|
|
enable: true
|
|
dropins:
|
|
- name: 40-etcd-cluster.conf
|
|
contents: |
|
|
[Service]
|
|
Environment="ETCD_IMAGE_TAG=v3.4.2"
|
|
Environment="ETCD_NAME=${etcd_name}"
|
|
Environment="ETCD_ADVERTISE_CLIENT_URLS=https://${etcd_domain}:2379"
|
|
Environment="ETCD_INITIAL_ADVERTISE_PEER_URLS=https://${etcd_domain}:2380"
|
|
Environment="ETCD_LISTEN_CLIENT_URLS=https://0.0.0.0:2379"
|
|
Environment="ETCD_LISTEN_PEER_URLS=https://0.0.0.0:2380"
|
|
Environment="ETCD_LISTEN_METRICS_URLS=http://0.0.0.0:2381"
|
|
Environment="ETCD_INITIAL_CLUSTER=${etcd_initial_cluster}"
|
|
Environment="ETCD_STRICT_RECONFIG_CHECK=true"
|
|
Environment="ETCD_SSL_DIR=/etc/ssl/etcd"
|
|
Environment="ETCD_TRUSTED_CA_FILE=/etc/ssl/certs/etcd/server-ca.crt"
|
|
Environment="ETCD_CERT_FILE=/etc/ssl/certs/etcd/server.crt"
|
|
Environment="ETCD_KEY_FILE=/etc/ssl/certs/etcd/server.key"
|
|
Environment="ETCD_CLIENT_CERT_AUTH=true"
|
|
Environment="ETCD_PEER_TRUSTED_CA_FILE=/etc/ssl/certs/etcd/peer-ca.crt"
|
|
Environment="ETCD_PEER_CERT_FILE=/etc/ssl/certs/etcd/peer.crt"
|
|
Environment="ETCD_PEER_KEY_FILE=/etc/ssl/certs/etcd/peer.key"
|
|
Environment="ETCD_PEER_CLIENT_CERT_AUTH=true"
|
|
- name: docker.service
|
|
enable: true
|
|
- name: locksmithd.service
|
|
mask: true
|
|
- name: kubelet.path
|
|
enable: true
|
|
contents: |
|
|
[Unit]
|
|
Description=Watch for kubeconfig
|
|
[Path]
|
|
PathExists=/etc/kubernetes/kubeconfig
|
|
[Install]
|
|
WantedBy=multi-user.target
|
|
- name: wait-for-dns.service
|
|
enable: true
|
|
contents: |
|
|
[Unit]
|
|
Description=Wait for DNS entries
|
|
Wants=systemd-resolved.service
|
|
Before=kubelet.service
|
|
[Service]
|
|
Type=oneshot
|
|
RemainAfterExit=true
|
|
ExecStart=/bin/sh -c 'while ! /usr/bin/grep '^[^#[:space:]]' /etc/resolv.conf > /dev/null; do sleep 1; done'
|
|
[Install]
|
|
RequiredBy=kubelet.service
|
|
RequiredBy=etcd-member.service
|
|
- name: kubelet.service
|
|
contents: |
|
|
[Unit]
|
|
Description=Kubelet via Hyperkube
|
|
Requires=coreos-metadata.service
|
|
After=coreos-metadata.service
|
|
Wants=rpc-statd.service
|
|
[Service]
|
|
EnvironmentFile=/etc/kubernetes/kubelet.env
|
|
EnvironmentFile=/run/metadata/coreos
|
|
Environment="RKT_RUN_ARGS=--uuid-file-save=/var/cache/kubelet-pod.uuid \
|
|
--volume=resolv,kind=host,source=/etc/resolv.conf \
|
|
--mount volume=resolv,target=/etc/resolv.conf \
|
|
--volume var-lib-cni,kind=host,source=/var/lib/cni \
|
|
--mount volume=var-lib-cni,target=/var/lib/cni \
|
|
--volume var-lib-calico,kind=host,source=/var/lib/calico \
|
|
--mount volume=var-lib-calico,target=/var/lib/calico \
|
|
--volume opt-cni-bin,kind=host,source=/opt/cni/bin \
|
|
--mount volume=opt-cni-bin,target=/opt/cni/bin \
|
|
--volume var-log,kind=host,source=/var/log \
|
|
--mount volume=var-log,target=/var/log \
|
|
--insecure-options=image"
|
|
ExecStartPre=/bin/mkdir -p /etc/kubernetes/cni/net.d
|
|
ExecStartPre=/bin/mkdir -p /etc/kubernetes/manifests
|
|
ExecStartPre=/bin/mkdir -p /opt/cni/bin
|
|
ExecStartPre=/bin/mkdir -p /var/lib/cni
|
|
ExecStartPre=/bin/mkdir -p /var/lib/calico
|
|
ExecStartPre=/bin/mkdir -p /var/lib/kubelet/volumeplugins
|
|
ExecStartPre=/usr/bin/bash -c "grep 'certificate-authority-data' /etc/kubernetes/kubeconfig | awk '{print $2}' | base64 -d > /etc/kubernetes/ca.crt"
|
|
ExecStartPre=-/usr/bin/rkt rm --uuid-file=/var/cache/kubelet-pod.uuid
|
|
ExecStart=/usr/lib/coreos/kubelet-wrapper \
|
|
--anonymous-auth=false \
|
|
--authentication-token-webhook \
|
|
--authorization-mode=Webhook \
|
|
--client-ca-file=/etc/kubernetes/ca.crt \
|
|
--cluster_dns=${cluster_dns_service_ip} \
|
|
--cluster_domain=${cluster_domain_suffix} \
|
|
--cni-conf-dir=/etc/kubernetes/cni/net.d \
|
|
--exit-on-lock-contention \
|
|
--hostname-override=$${COREOS_DIGITALOCEAN_IPV4_PRIVATE_0} \
|
|
--kubeconfig=/etc/kubernetes/kubeconfig \
|
|
--lock-file=/var/run/lock/kubelet.lock \
|
|
--network-plugin=cni \
|
|
--node-labels=node.kubernetes.io/master \
|
|
--node-labels=node.kubernetes.io/controller="true" \
|
|
--pod-manifest-path=/etc/kubernetes/manifests \
|
|
--read-only-port=0 \
|
|
--register-with-taints=node-role.kubernetes.io/master=:NoSchedule \
|
|
--volume-plugin-dir=/var/lib/kubelet/volumeplugins
|
|
ExecStop=-/usr/bin/rkt stop --uuid-file=/var/cache/kubelet-pod.uuid
|
|
Restart=always
|
|
RestartSec=10
|
|
[Install]
|
|
WantedBy=multi-user.target
|
|
- name: bootstrap.service
|
|
contents: |
|
|
[Unit]
|
|
Description=Kubernetes control plane
|
|
ConditionPathExists=!/opt/bootstrap/bootstrap.done
|
|
[Service]
|
|
Type=oneshot
|
|
RemainAfterExit=true
|
|
WorkingDirectory=/opt/bootstrap
|
|
ExecStartPre=-/usr/bin/bash -c 'set -x && [ -n "$(ls /opt/bootstrap/assets/manifests-*/* 2>/dev/null)" ] && mv /opt/bootstrap/assets/manifests-*/* /opt/bootstrap/assets/manifests && rm -rf /opt/bootstrap/assets/manifests-*'
|
|
ExecStart=/usr/bin/rkt run \
|
|
--trust-keys-from-https \
|
|
--volume assets,kind=host,source=/opt/bootstrap/assets \
|
|
--mount volume=assets,target=/assets \
|
|
--volume script,kind=host,source=/opt/bootstrap/apply \
|
|
--mount volume=script,target=/apply \
|
|
--insecure-options=image \
|
|
docker://k8s.gcr.io/hyperkube:v1.16.1 \
|
|
--net=host \
|
|
--dns=host \
|
|
--exec=/apply
|
|
ExecStartPost=/bin/touch /opt/bootstrap/bootstrap.done
|
|
[Install]
|
|
WantedBy=multi-user.target
|
|
storage:
|
|
files:
|
|
- path: /etc/kubernetes/kubelet.env
|
|
filesystem: root
|
|
mode: 0644
|
|
contents:
|
|
inline: |
|
|
KUBELET_IMAGE_URL=docker://k8s.gcr.io/hyperkube
|
|
KUBELET_IMAGE_TAG=v1.16.1
|
|
- path: /opt/bootstrap/apply
|
|
filesystem: root
|
|
mode: 0544
|
|
contents:
|
|
inline: |
|
|
#!/bin/bash -e
|
|
export KUBECONFIG=/assets/auth/kubeconfig
|
|
until kubectl version; do
|
|
echo "Waiting for static pod control plane"
|
|
sleep 5
|
|
done
|
|
until kubectl apply -f /assets/manifests -R; do
|
|
echo "Retry applying manifests"
|
|
sleep 5
|
|
done
|
|
- path: /etc/sysctl.d/max-user-watches.conf
|
|
filesystem: root
|
|
contents:
|
|
inline: |
|
|
fs.inotify.max_user_watches=16184
|