mirror of
https://github.com/puppetmaster/typhoon.git
synced 2024-12-26 18:49:34 +01:00
fd044ee117
* Enable bootstrap token authentication on kube-apiserver * Generate the bootstrap.kubernetes.io/token Secret that may be used as a bootstrap token * Generate a bootstrap kubeconfig (with a bootstrap token) to be securely distributed to nodes. Each Kubelet will use the bootstrap kubeconfig to authenticate to kube-apiserver as `system:bootstrappers` and send a node-unique CSR for kube-controller-manager to automatically approve to issue a Kubelet certificate and kubeconfig (expires in 72 hours) * Add ClusterRoleBinding for bootstrap token subjects (`system:bootstrappers`) to have the `system:node-bootstrapper` ClusterRole * Add ClusterRoleBinding for bootstrap token subjects (`system:bootstrappers`) to have the csr nodeclient ClusterRole * Add ClusterRoleBinding for bootstrap token subjects (`system:bootstrappers`) to have the csr selfnodeclient ClusterRole * Enable NodeRestriction admission controller to limit the scope of Node or Pod objects a Kubelet can modify to those of the node itself * Ability for a Kubelet to delete its Node object is retained as preemptible nodes or those in auto-scaling instance groups need to be able to remove themselves on shutdown. This need continues to have precedence over any risk of a node deleting itself maliciously Security notes: 1. Issued Kubelet certificates authenticate as user `system:node:NAME` and group `system:nodes` and are limited in their authorization to perform API operations by Node authorization and NodeRestriction admission. Previously, a Kubelet's authorization was broader. This is the primary security motivation. 2. The bootstrap kubeconfig credential has the same sensitivity as the previous generated TLS client-certificate kubeconfig. It must be distributed securely to nodes. Its compromise still allows an attacker to obtain a Kubelet kubeconfig 3. Bootstrapping Kubelet kubeconfig's with a limited lifetime offers a slight security improvement. * An attacker who obtains the kubeconfig can likely obtain the bootstrap kubeconfig as well, to obtain the ability to renew their access * A compromised bootstrap kubeconfig could plausibly be handled by replacing the bootstrap token Secret, distributing the token to new nodes, and expiration. Whereas a compromised TLS-client certificate kubeconfig can't be revoked (no CRL). However, replacing a bootstrap token can be impractical in real cluster environments, so the limited lifetime is mostly a theoretical benefit. * Cluster CSR objects are visible via kubectl which is nice 4. Bootstrapping node-unique Kubelet kubeconfigs means Kubelet clients have more identity information, which can improve the utility of audits and future features Rel: https://kubernetes.io/docs/reference/command-line-tools-reference/kubelet-tls-bootstrapping/ Rel: https://github.com/poseidon/terraform-render-bootstrap/pull/185
217 lines
9.4 KiB
YAML
217 lines
9.4 KiB
YAML
---
|
|
systemd:
|
|
units:
|
|
- name: etcd-member.service
|
|
enable: true
|
|
dropins:
|
|
- name: 40-etcd-cluster.conf
|
|
contents: |
|
|
[Service]
|
|
Environment="ETCD_IMAGE_TAG=v3.4.7"
|
|
Environment="ETCD_IMAGE_URL=docker://quay.io/coreos/etcd"
|
|
Environment="RKT_RUN_ARGS=--insecure-options=image"
|
|
Environment="ETCD_NAME=${etcd_name}"
|
|
Environment="ETCD_ADVERTISE_CLIENT_URLS=https://${domain_name}:2379"
|
|
Environment="ETCD_INITIAL_ADVERTISE_PEER_URLS=https://${domain_name}:2380"
|
|
Environment="ETCD_LISTEN_CLIENT_URLS=https://0.0.0.0:2379"
|
|
Environment="ETCD_LISTEN_PEER_URLS=https://0.0.0.0:2380"
|
|
Environment="ETCD_LISTEN_METRICS_URLS=http://0.0.0.0:2381"
|
|
Environment="ETCD_INITIAL_CLUSTER=${etcd_initial_cluster}"
|
|
Environment="ETCD_STRICT_RECONFIG_CHECK=true"
|
|
Environment="ETCD_SSL_DIR=/etc/ssl/etcd"
|
|
Environment="ETCD_TRUSTED_CA_FILE=/etc/ssl/certs/etcd/server-ca.crt"
|
|
Environment="ETCD_CERT_FILE=/etc/ssl/certs/etcd/server.crt"
|
|
Environment="ETCD_KEY_FILE=/etc/ssl/certs/etcd/server.key"
|
|
Environment="ETCD_CLIENT_CERT_AUTH=true"
|
|
Environment="ETCD_PEER_TRUSTED_CA_FILE=/etc/ssl/certs/etcd/peer-ca.crt"
|
|
Environment="ETCD_PEER_CERT_FILE=/etc/ssl/certs/etcd/peer.crt"
|
|
Environment="ETCD_PEER_KEY_FILE=/etc/ssl/certs/etcd/peer.key"
|
|
Environment="ETCD_PEER_CLIENT_CERT_AUTH=true"
|
|
- name: docker.service
|
|
enable: true
|
|
- name: locksmithd.service
|
|
mask: true
|
|
- name: kubelet.path
|
|
enable: true
|
|
contents: |
|
|
[Unit]
|
|
Description=Watch for kubeconfig
|
|
[Path]
|
|
PathExists=/etc/kubernetes/kubeconfig
|
|
[Install]
|
|
WantedBy=multi-user.target
|
|
- name: wait-for-dns.service
|
|
enable: true
|
|
contents: |
|
|
[Unit]
|
|
Description=Wait for DNS entries
|
|
Wants=systemd-resolved.service
|
|
Before=kubelet.service
|
|
[Service]
|
|
Type=oneshot
|
|
RemainAfterExit=true
|
|
ExecStart=/bin/sh -c 'while ! /usr/bin/grep '^[^#[:space:]]' /etc/resolv.conf > /dev/null; do sleep 1; done'
|
|
[Install]
|
|
RequiredBy=kubelet.service
|
|
RequiredBy=etcd-member.service
|
|
- name: kubelet.service
|
|
contents: |
|
|
[Unit]
|
|
Description=Kubelet
|
|
Wants=rpc-statd.service
|
|
[Service]
|
|
Environment=KUBELET_CGROUP_DRIVER=${cgroup_driver}
|
|
ExecStartPre=/bin/mkdir -p /etc/kubernetes/cni/net.d
|
|
ExecStartPre=/bin/mkdir -p /etc/kubernetes/manifests
|
|
ExecStartPre=/bin/mkdir -p /opt/cni/bin
|
|
ExecStartPre=/bin/mkdir -p /var/lib/calico
|
|
ExecStartPre=/bin/mkdir -p /var/lib/kubelet/volumeplugins
|
|
ExecStartPre=/usr/bin/bash -c "grep 'certificate-authority-data' /etc/kubernetes/kubeconfig | awk '{print $2}' | base64 -d > /etc/kubernetes/ca.crt"
|
|
ExecStartPre=-/usr/bin/rkt rm --uuid-file=/var/cache/kubelet-pod.uuid
|
|
ExecStart=/usr/bin/rkt run \
|
|
--uuid-file-save=/var/cache/kubelet-pod.uuid \
|
|
--stage1-from-dir=stage1-fly.aci \
|
|
--hosts-entry host \
|
|
--insecure-options=image \
|
|
--volume etc-kubernetes,kind=host,source=/etc/kubernetes,readOnly=true \
|
|
--mount volume=etc-kubernetes,target=/etc/kubernetes \
|
|
--volume etc-machine-id,kind=host,source=/etc/machine-id,readOnly=true \
|
|
--mount volume=etc-machine-id,target=/etc/machine-id \
|
|
--volume etc-os-release,kind=host,source=/usr/lib/os-release,readOnly=true \
|
|
--mount volume=etc-os-release,target=/etc/os-release \
|
|
--volume=etc-resolv,kind=host,source=/etc/resolv.conf,readOnly=true \
|
|
--mount volume=etc-resolv,target=/etc/resolv.conf \
|
|
--volume etc-ssl-certs,kind=host,source=/etc/ssl/certs,readOnly=true \
|
|
--mount volume=etc-ssl-certs,target=/etc/ssl/certs \
|
|
--volume lib-modules,kind=host,source=/lib/modules,readOnly=true \
|
|
--mount volume=lib-modules,target=/lib/modules \
|
|
--volume run,kind=host,source=/run \
|
|
--mount volume=run,target=/run \
|
|
--volume usr-share-certs,kind=host,source=/usr/share/ca-certificates,readOnly=true \
|
|
--mount volume=usr-share-certs,target=/usr/share/ca-certificates \
|
|
--volume var-lib-calico,kind=host,source=/var/lib/calico,readOnly=true \
|
|
--mount volume=var-lib-calico,target=/var/lib/calico \
|
|
--volume var-lib-docker,kind=host,source=/var/lib/docker \
|
|
--mount volume=var-lib-docker,target=/var/lib/docker \
|
|
--volume var-lib-kubelet,kind=host,source=/var/lib/kubelet,recursive=true \
|
|
--mount volume=var-lib-kubelet,target=/var/lib/kubelet \
|
|
--volume var-log,kind=host,source=/var/log \
|
|
--mount volume=var-log,target=/var/log \
|
|
--volume opt-cni-bin,kind=host,source=/opt/cni/bin \
|
|
--mount volume=opt-cni-bin,target=/opt/cni/bin \
|
|
--volume etc-iscsi,kind=host,source=/etc/iscsi \
|
|
--mount volume=etc-iscsi,target=/etc/iscsi \
|
|
--volume usr-sbin-iscsiadm,kind=host,source=/usr/sbin/iscsiadm \
|
|
--mount volume=usr-sbin-iscsiadm,target=/sbin/iscsiadm \
|
|
docker://quay.io/poseidon/kubelet:v1.18.2 -- \
|
|
--anonymous-auth=false \
|
|
--authentication-token-webhook \
|
|
--authorization-mode=Webhook \
|
|
--bootstrap-kubeconfig=/etc/kubernetes/kubeconfig \
|
|
--cgroup-driver=$${KUBELET_CGROUP_DRIVER} \
|
|
--client-ca-file=/etc/kubernetes/ca.crt \
|
|
--cluster_dns=${cluster_dns_service_ip} \
|
|
--cluster_domain=${cluster_domain_suffix} \
|
|
--cni-conf-dir=/etc/kubernetes/cni/net.d \
|
|
--exit-on-lock-contention \
|
|
--healthz-port=0 \
|
|
--hostname-override=${domain_name} \
|
|
--kubeconfig=/var/lib/kubelet/kubeconfig \
|
|
--lock-file=/var/run/lock/kubelet.lock \
|
|
--network-plugin=cni \
|
|
--node-labels=node.kubernetes.io/master \
|
|
--node-labels=node.kubernetes.io/controller="true" \
|
|
--pod-manifest-path=/etc/kubernetes/manifests \
|
|
--read-only-port=0 \
|
|
--register-with-taints=node-role.kubernetes.io/master=:NoSchedule \
|
|
--rotate-certificates \
|
|
--volume-plugin-dir=/var/lib/kubelet/volumeplugins
|
|
ExecStop=-/usr/bin/rkt stop --uuid-file=/var/cache/kubelet-pod.uuid
|
|
Restart=always
|
|
RestartSec=10
|
|
[Install]
|
|
WantedBy=multi-user.target
|
|
- name: bootstrap.service
|
|
contents: |
|
|
[Unit]
|
|
Description=Kubernetes control plane
|
|
ConditionPathExists=!/opt/bootstrap/bootstrap.done
|
|
[Service]
|
|
Type=oneshot
|
|
RemainAfterExit=true
|
|
WorkingDirectory=/opt/bootstrap
|
|
ExecStart=/usr/bin/rkt run \
|
|
--trust-keys-from-https \
|
|
--volume config,kind=host,source=/etc/kubernetes/bootstrap-secrets \
|
|
--mount volume=config,target=/etc/kubernetes/secrets \
|
|
--volume assets,kind=host,source=/opt/bootstrap/assets \
|
|
--mount volume=assets,target=/assets \
|
|
--volume script,kind=host,source=/opt/bootstrap/apply \
|
|
--mount volume=script,target=/apply \
|
|
--insecure-options=image \
|
|
docker://quay.io/poseidon/kubelet:v1.18.2 \
|
|
--net=host \
|
|
--dns=host \
|
|
--exec=/apply
|
|
ExecStartPost=/bin/touch /opt/bootstrap/bootstrap.done
|
|
[Install]
|
|
WantedBy=multi-user.target
|
|
storage:
|
|
directories:
|
|
- path: /etc/kubernetes
|
|
filesystem: root
|
|
files:
|
|
- path: /etc/hostname
|
|
filesystem: root
|
|
mode: 0644
|
|
contents:
|
|
inline:
|
|
${domain_name}
|
|
- path: /opt/bootstrap/layout
|
|
filesystem: root
|
|
mode: 0544
|
|
contents:
|
|
inline: |
|
|
#!/bin/bash -e
|
|
mkdir -p -- auth tls/etcd tls/k8s static-manifests manifests/coredns manifests-networking
|
|
awk '/#####/ {filename=$2; next} {print > filename}' assets
|
|
mkdir -p /etc/ssl/etcd/etcd
|
|
mkdir -p /etc/kubernetes/bootstrap-secrets
|
|
mv tls/etcd/{peer*,server*} /etc/ssl/etcd/etcd/
|
|
mv tls/etcd/etcd-client* /etc/kubernetes/bootstrap-secrets/
|
|
chown -R etcd:etcd /etc/ssl/etcd
|
|
chmod -R 500 /etc/ssl/etcd
|
|
mv auth/kubeconfig /etc/kubernetes/bootstrap-secrets/
|
|
mv tls/k8s/* /etc/kubernetes/bootstrap-secrets/
|
|
mkdir -p /etc/kubernetes/manifests
|
|
mv static-manifests/* /etc/kubernetes/manifests/
|
|
mkdir -p /opt/bootstrap/assets
|
|
mv manifests /opt/bootstrap/assets/manifests
|
|
mv manifests-networking/* /opt/bootstrap/assets/manifests/
|
|
rm -rf assets auth static-manifests tls manifests-networking
|
|
- path: /opt/bootstrap/apply
|
|
filesystem: root
|
|
mode: 0544
|
|
contents:
|
|
inline: |
|
|
#!/bin/bash -e
|
|
export KUBECONFIG=/etc/kubernetes/secrets/kubeconfig
|
|
until kubectl version; do
|
|
echo "Waiting for static pod control plane"
|
|
sleep 5
|
|
done
|
|
until kubectl apply -f /assets/manifests -R; do
|
|
echo "Retry applying manifests"
|
|
sleep 5
|
|
done
|
|
- path: /etc/sysctl.d/max-user-watches.conf
|
|
filesystem: root
|
|
contents:
|
|
inline: |
|
|
fs.inotify.max_user_watches=16184
|
|
passwd:
|
|
users:
|
|
- name: core
|
|
ssh_authorized_keys:
|
|
- ${ssh_authorized_key}
|