mirror of
https://github.com/puppetmaster/typhoon.git
synced 2024-12-26 07:29:32 +01:00
097dcdf47e
* Kubelets should register nodes via their private IPv4 address, as provided by the metadata service from Digital Ocean * By default, Kubelet exec's hostname to determine the name it should use when registering with the apiserver. On Digital Ocean, the hostname is not routeable by other instances. Digital Ocean does not run an internal DNS service. * Fixes issue where the apiserver can't reach the worker nodes. This prevented kubectl logs and exec commands from working
127 lines
4.5 KiB
Cheetah
127 lines
4.5 KiB
Cheetah
---
|
|
systemd:
|
|
units:
|
|
- name: docker.service
|
|
enable: true
|
|
- name: locksmithd.service
|
|
mask: true
|
|
- name: wait-for-dns.service
|
|
enable: true
|
|
contents: |
|
|
[Unit]
|
|
Description=Wait for DNS entries
|
|
Wants=systemd-resolved.service
|
|
Before=kubelet.service
|
|
[Service]
|
|
Type=oneshot
|
|
RemainAfterExit=true
|
|
ExecStart=/bin/sh -c 'while ! /usr/bin/grep '^[^#[:space:]]' /etc/resolv.conf > /dev/null; do sleep 1; done'
|
|
[Install]
|
|
RequiredBy=kubelet.service
|
|
- name: kubelet.service
|
|
enable: true
|
|
contents: |
|
|
[Unit]
|
|
Description=Kubelet via Hyperkube ACI
|
|
Requires=coreos-metadata.service
|
|
After=coreos-metadata.service
|
|
[Service]
|
|
EnvironmentFile=/etc/kubernetes/kubelet.env
|
|
EnvironmentFile=/run/metadata/coreos
|
|
Environment="RKT_RUN_ARGS=--uuid-file-save=/var/run/kubelet-pod.uuid \
|
|
--volume=resolv,kind=host,source=/etc/resolv.conf \
|
|
--mount volume=resolv,target=/etc/resolv.conf \
|
|
--volume var-lib-cni,kind=host,source=/var/lib/cni \
|
|
--mount volume=var-lib-cni,target=/var/lib/cni \
|
|
--volume var-log,kind=host,source=/var/log \
|
|
--mount volume=var-log,target=/var/log"
|
|
ExecStartPre=/bin/mkdir -p /etc/kubernetes/manifests
|
|
ExecStartPre=/bin/mkdir -p /etc/kubernetes/cni/net.d
|
|
ExecStartPre=/bin/mkdir -p /etc/kubernetes/checkpoint-secrets
|
|
ExecStartPre=/bin/mkdir -p /etc/kubernetes/inactive-manifests
|
|
ExecStartPre=/bin/mkdir -p /var/lib/cni
|
|
ExecStartPre=/usr/bin/bash -c "grep 'certificate-authority-data' /etc/kubernetes/kubeconfig | awk '{print $2}' | base64 -d > /etc/kubernetes/ca.crt"
|
|
ExecStartPre=-/usr/bin/rkt rm --uuid-file=/var/run/kubelet-pod.uuid
|
|
ExecStart=/usr/lib/coreos/kubelet-wrapper \
|
|
--kubeconfig=/etc/kubernetes/kubeconfig \
|
|
--require-kubeconfig \
|
|
--client-ca-file=/etc/kubernetes/ca.crt \
|
|
--anonymous-auth=false \
|
|
--cni-conf-dir=/etc/kubernetes/cni/net.d \
|
|
--network-plugin=cni \
|
|
--lock-file=/var/run/lock/kubelet.lock \
|
|
--exit-on-lock-contention \
|
|
--hostname-override=$${COREOS_DIGITALOCEAN_IPV4_PRIVATE_0} \
|
|
--pod-manifest-path=/etc/kubernetes/manifests \
|
|
--allow-privileged \
|
|
--node-labels=node-role.kubernetes.io/node \
|
|
--cluster_dns=${k8s_dns_service_ip} \
|
|
--cluster_domain=cluster.local
|
|
ExecStop=-/usr/bin/rkt stop --uuid-file=/var/run/kubelet-pod.uuid
|
|
Restart=always
|
|
RestartSec=5
|
|
[Install]
|
|
WantedBy=multi-user.target
|
|
- name: delete-node.service
|
|
enable: true
|
|
contents: |
|
|
[Unit]
|
|
Description=Waiting to delete Kubernetes node on shutdown
|
|
[Service]
|
|
Type=oneshot
|
|
RemainAfterExit=true
|
|
ExecStart=/bin/true
|
|
ExecStop=/etc/kubernetes/delete-node
|
|
[Install]
|
|
WantedBy=multi-user.target
|
|
storage:
|
|
files:
|
|
- path: /etc/kubernetes/kubeconfig
|
|
filesystem: root
|
|
mode: 0644
|
|
contents:
|
|
inline: |
|
|
apiVersion: v1
|
|
kind: Config
|
|
clusters:
|
|
- name: local
|
|
cluster:
|
|
server: ${kubeconfig_server}
|
|
certificate-authority-data: ${kubeconfig_ca_cert}
|
|
users:
|
|
- name: kubelet
|
|
user:
|
|
client-certificate-data: ${kubeconfig_kubelet_cert}
|
|
client-key-data: ${kubeconfig_kubelet_key}
|
|
contexts:
|
|
- context:
|
|
cluster: local
|
|
user: kubelet
|
|
- path: /etc/kubernetes/kubelet.env
|
|
filesystem: root
|
|
mode: 0644
|
|
contents:
|
|
inline: |
|
|
KUBELET_IMAGE_URL=quay.io/coreos/hyperkube
|
|
KUBELET_IMAGE_TAG=v1.7.1_coreos.0
|
|
- path: /etc/sysctl.d/max-user-watches.conf
|
|
filesystem: root
|
|
contents:
|
|
inline: |
|
|
fs.inotify.max_user_watches=16184
|
|
- path: /etc/kubernetes/delete-node
|
|
filesystem: root
|
|
mode: 0744
|
|
contents:
|
|
inline: |
|
|
#!/bin/bash
|
|
set -e
|
|
exec /usr/bin/rkt run \
|
|
--trust-keys-from-https \
|
|
--volume config,kind=host,source=/etc/kubernetes \
|
|
--mount volume=config,target=/etc/kubernetes \
|
|
quay.io/coreos/hyperkube:v1.7.1_coreos.0 \
|
|
--net=host \
|
|
--dns=host \
|
|
--exec=/kubectl -- --kubeconfig=/etc/kubernetes/kubeconfig delete node $(hostname)
|