2017-07-25 08:16:34 +02:00
|
|
|
---
|
|
|
|
systemd:
|
|
|
|
units:
|
|
|
|
- name: etcd-member.service
|
|
|
|
enable: true
|
|
|
|
dropins:
|
|
|
|
- name: 40-etcd-cluster.conf
|
|
|
|
contents: |
|
|
|
|
[Service]
|
2019-11-07 20:12:44 +01:00
|
|
|
Environment="ETCD_IMAGE_TAG=v3.4.3"
|
2017-09-23 20:49:12 +02:00
|
|
|
Environment="ETCD_NAME=${etcd_name}"
|
|
|
|
Environment="ETCD_ADVERTISE_CLIENT_URLS=https://${domain_name}:2379"
|
|
|
|
Environment="ETCD_INITIAL_ADVERTISE_PEER_URLS=https://${domain_name}:2380"
|
2017-07-25 08:16:34 +02:00
|
|
|
Environment="ETCD_LISTEN_CLIENT_URLS=https://0.0.0.0:2379"
|
|
|
|
Environment="ETCD_LISTEN_PEER_URLS=https://0.0.0.0:2380"
|
2018-03-29 06:45:24 +02:00
|
|
|
Environment="ETCD_LISTEN_METRICS_URLS=http://0.0.0.0:2381"
|
2017-09-23 20:49:12 +02:00
|
|
|
Environment="ETCD_INITIAL_CLUSTER=${etcd_initial_cluster}"
|
2017-07-25 08:16:34 +02:00
|
|
|
Environment="ETCD_STRICT_RECONFIG_CHECK=true"
|
|
|
|
Environment="ETCD_SSL_DIR=/etc/ssl/etcd"
|
|
|
|
Environment="ETCD_TRUSTED_CA_FILE=/etc/ssl/certs/etcd/server-ca.crt"
|
|
|
|
Environment="ETCD_CERT_FILE=/etc/ssl/certs/etcd/server.crt"
|
|
|
|
Environment="ETCD_KEY_FILE=/etc/ssl/certs/etcd/server.key"
|
|
|
|
Environment="ETCD_CLIENT_CERT_AUTH=true"
|
|
|
|
Environment="ETCD_PEER_TRUSTED_CA_FILE=/etc/ssl/certs/etcd/peer-ca.crt"
|
|
|
|
Environment="ETCD_PEER_CERT_FILE=/etc/ssl/certs/etcd/peer.crt"
|
|
|
|
Environment="ETCD_PEER_KEY_FILE=/etc/ssl/certs/etcd/peer.key"
|
|
|
|
Environment="ETCD_PEER_CLIENT_CERT_AUTH=true"
|
|
|
|
- name: docker.service
|
|
|
|
enable: true
|
|
|
|
- name: locksmithd.service
|
|
|
|
mask: true
|
|
|
|
- name: kubelet.path
|
|
|
|
enable: true
|
|
|
|
contents: |
|
|
|
|
[Unit]
|
|
|
|
Description=Watch for kubeconfig
|
|
|
|
[Path]
|
|
|
|
PathExists=/etc/kubernetes/kubeconfig
|
|
|
|
[Install]
|
|
|
|
WantedBy=multi-user.target
|
|
|
|
- name: wait-for-dns.service
|
|
|
|
enable: true
|
|
|
|
contents: |
|
|
|
|
[Unit]
|
|
|
|
Description=Wait for DNS entries
|
|
|
|
Wants=systemd-resolved.service
|
|
|
|
Before=kubelet.service
|
|
|
|
[Service]
|
|
|
|
Type=oneshot
|
|
|
|
RemainAfterExit=true
|
|
|
|
ExecStart=/bin/sh -c 'while ! /usr/bin/grep '^[^#[:space:]]' /etc/resolv.conf > /dev/null; do sleep 1; done'
|
|
|
|
[Install]
|
|
|
|
RequiredBy=kubelet.service
|
2017-12-04 09:04:31 +01:00
|
|
|
RequiredBy=etcd-member.service
|
2017-07-25 08:16:34 +02:00
|
|
|
- name: kubelet.service
|
|
|
|
contents: |
|
|
|
|
[Unit]
|
2018-01-07 01:20:34 +01:00
|
|
|
Description=Kubelet via Hyperkube
|
2017-09-25 03:04:48 +02:00
|
|
|
Wants=rpc-statd.service
|
2017-07-25 08:16:34 +02:00
|
|
|
[Service]
|
|
|
|
EnvironmentFile=/etc/kubernetes/kubelet.env
|
2017-09-14 17:41:17 +02:00
|
|
|
Environment="RKT_RUN_ARGS=--uuid-file-save=/var/cache/kubelet-pod.uuid \
|
2017-07-25 08:16:34 +02:00
|
|
|
--volume=resolv,kind=host,source=/etc/resolv.conf \
|
|
|
|
--mount volume=resolv,target=/etc/resolv.conf \
|
|
|
|
--volume var-lib-cni,kind=host,source=/var/lib/cni \
|
|
|
|
--mount volume=var-lib-cni,target=/var/lib/cni \
|
2018-04-22 00:13:38 +02:00
|
|
|
--volume var-lib-calico,kind=host,source=/var/lib/calico \
|
|
|
|
--mount volume=var-lib-calico,target=/var/lib/calico \
|
2017-09-14 17:41:17 +02:00
|
|
|
--volume opt-cni-bin,kind=host,source=/opt/cni/bin \
|
|
|
|
--mount volume=opt-cni-bin,target=/opt/cni/bin \
|
2017-07-25 08:16:34 +02:00
|
|
|
--volume var-log,kind=host,source=/var/log \
|
2017-10-25 06:34:29 +02:00
|
|
|
--mount volume=var-log,target=/var/log \
|
2018-01-19 20:34:47 +01:00
|
|
|
--volume iscsiconf,kind=host,source=/etc/iscsi/ \
|
|
|
|
--mount volume=iscsiconf,target=/etc/iscsi/ \
|
|
|
|
--volume iscsiadm,kind=host,source=/usr/sbin/iscsiadm \
|
|
|
|
--mount volume=iscsiadm,target=/sbin/iscsiadm \
|
2017-10-25 06:34:29 +02:00
|
|
|
--insecure-options=image"
|
2019-06-12 08:24:01 +02:00
|
|
|
Environment=KUBELET_CGROUP_DRIVER=${cgroup_driver}
|
2017-07-25 08:16:34 +02:00
|
|
|
ExecStartPre=/bin/mkdir -p /etc/kubernetes/cni/net.d
|
2019-09-19 09:15:39 +02:00
|
|
|
ExecStartPre=/bin/mkdir -p /etc/kubernetes/manifests
|
|
|
|
ExecStartPre=/bin/mkdir -p /opt/cni/bin
|
2017-07-25 08:16:34 +02:00
|
|
|
ExecStartPre=/bin/mkdir -p /var/lib/cni
|
2018-04-22 00:13:38 +02:00
|
|
|
ExecStartPre=/bin/mkdir -p /var/lib/calico
|
2017-11-11 23:13:54 +01:00
|
|
|
ExecStartPre=/bin/mkdir -p /var/lib/kubelet/volumeplugins
|
2017-07-25 08:16:34 +02:00
|
|
|
ExecStartPre=/usr/bin/bash -c "grep 'certificate-authority-data' /etc/kubernetes/kubeconfig | awk '{print $2}' | base64 -d > /etc/kubernetes/ca.crt"
|
2017-09-14 17:41:17 +02:00
|
|
|
ExecStartPre=-/usr/bin/rkt rm --uuid-file=/var/cache/kubelet-pod.uuid
|
2017-07-25 08:16:34 +02:00
|
|
|
ExecStart=/usr/lib/coreos/kubelet-wrapper \
|
|
|
|
--anonymous-auth=false \
|
2018-05-14 08:20:42 +02:00
|
|
|
--authentication-token-webhook \
|
|
|
|
--authorization-mode=Webhook \
|
2019-06-12 08:24:01 +02:00
|
|
|
--cgroup-driver=$${KUBELET_CGROUP_DRIVER} \
|
2017-09-14 17:41:17 +02:00
|
|
|
--client-ca-file=/etc/kubernetes/ca.crt \
|
2019-01-05 22:32:03 +01:00
|
|
|
--cluster_dns=${cluster_dns_service_ip} \
|
2017-12-09 22:36:59 +01:00
|
|
|
--cluster_domain=${cluster_domain_suffix} \
|
2017-07-25 08:16:34 +02:00
|
|
|
--cni-conf-dir=/etc/kubernetes/cni/net.d \
|
|
|
|
--exit-on-lock-contention \
|
2017-09-23 20:49:12 +02:00
|
|
|
--hostname-override=${domain_name} \
|
2017-09-14 17:41:17 +02:00
|
|
|
--kubeconfig=/etc/kubernetes/kubeconfig \
|
|
|
|
--lock-file=/var/run/lock/kubelet.lock \
|
|
|
|
--network-plugin=cni \
|
2019-09-18 06:24:30 +02:00
|
|
|
--node-labels=node.kubernetes.io/master \
|
|
|
|
--node-labels=node.kubernetes.io/controller="true" \
|
2017-09-14 17:41:17 +02:00
|
|
|
--pod-manifest-path=/etc/kubernetes/manifests \
|
2018-05-14 03:16:10 +02:00
|
|
|
--read-only-port=0 \
|
2017-11-11 23:13:54 +01:00
|
|
|
--register-with-taints=node-role.kubernetes.io/master=:NoSchedule \
|
|
|
|
--volume-plugin-dir=/var/lib/kubelet/volumeplugins
|
2017-09-14 17:41:17 +02:00
|
|
|
ExecStop=-/usr/bin/rkt stop --uuid-file=/var/cache/kubelet-pod.uuid
|
2017-07-25 08:16:34 +02:00
|
|
|
Restart=always
|
|
|
|
RestartSec=10
|
|
|
|
[Install]
|
|
|
|
WantedBy=multi-user.target
|
2019-09-05 07:05:29 +02:00
|
|
|
- name: bootstrap.service
|
2017-07-25 08:16:34 +02:00
|
|
|
contents: |
|
|
|
|
[Unit]
|
2019-09-05 07:05:29 +02:00
|
|
|
Description=Kubernetes control plane
|
|
|
|
ConditionPathExists=!/opt/bootstrap/bootstrap.done
|
2017-07-25 08:16:34 +02:00
|
|
|
[Service]
|
|
|
|
Type=oneshot
|
|
|
|
RemainAfterExit=true
|
2019-09-05 07:05:29 +02:00
|
|
|
WorkingDirectory=/opt/bootstrap
|
|
|
|
ExecStartPre=-/usr/bin/bash -c 'set -x && [ -n "$(ls /opt/bootstrap/assets/manifests-*/* 2>/dev/null)" ] && mv /opt/bootstrap/assets/manifests-*/* /opt/bootstrap/assets/manifests && rm -rf /opt/bootstrap/assets/manifests-*'
|
|
|
|
ExecStart=/usr/bin/rkt run \
|
|
|
|
--trust-keys-from-https \
|
Introduce cluster creation without local writes to asset_dir
* Allow generated assets (TLS materials, manifests) to be
securely distributed to controller node(s) via file provisioner
(i.e. ssh-agent) as an assets bundle file, rather than relying
on assets being locally rendered to disk in an asset_dir and
then securely distributed
* Change `asset_dir` from required to optional. Left unset,
asset_dir defaults to "" and no assets will be written to
files on the machine that runs terraform apply
* Enhancement: Managed cluster assets are kept only in Terraform
state, which supports different backends (GCS, S3, etcd, etc) and
optional encryption. terraform apply accesses state, runs in-memory,
and distributes sensitive materials to controllers without making
use of local disk (simplifies use in CI systems)
* Enhancement: Improve asset unpack and layout process to position
etcd certificates and control plane certificates more cleanly,
without unneeded secret materials
Details:
* Terraform file provisioner support for distributing directories of
contents (with unknown structure) has been limited to reading from a
local directory, meaning local writes to asset_dir were required.
https://github.com/poseidon/typhoon/issues/585 discusses the problem
and newer or upcoming Terraform features that might help.
* Observation: Terraform provisioner support for single files works
well, but iteration isn't viable. We're also constrained to Terraform
language features on the apply side (no extra plugins, no shelling out)
and CoreOS / Fedora tools on the receive side.
* Take a map representation of the contents that would have been splayed
out in asset_dir and pack/encode them into a single file format devised
for easy unpacking. Use an awk one-liner on the receive side to unpack.
In pratice, this has worked well and its rather nice that a single
assets file is transferred by file provisioner (all or none)
Rel: https://github.com/poseidon/terraform-render-bootstrap/pull/162
2019-12-05 07:10:55 +01:00
|
|
|
--volume config,kind=host,source=/etc/kubernetes/bootstrap-secrets \
|
|
|
|
--mount volume=config,target=/etc/kubernetes/secrets \
|
2019-09-05 07:05:29 +02:00
|
|
|
--volume assets,kind=host,source=/opt/bootstrap/assets \
|
|
|
|
--mount volume=assets,target=/assets \
|
|
|
|
--volume script,kind=host,source=/opt/bootstrap/apply \
|
|
|
|
--mount volume=script,target=/apply \
|
|
|
|
--insecure-options=image \
|
2019-12-10 03:31:58 +01:00
|
|
|
docker://k8s.gcr.io/hyperkube:v1.17.0 \
|
2019-09-05 07:05:29 +02:00
|
|
|
--net=host \
|
|
|
|
--dns=host \
|
|
|
|
--exec=/apply
|
|
|
|
ExecStartPost=/bin/touch /opt/bootstrap/bootstrap.done
|
|
|
|
[Install]
|
|
|
|
WantedBy=multi-user.target
|
2017-07-25 08:16:34 +02:00
|
|
|
storage:
|
|
|
|
files:
|
|
|
|
- path: /etc/kubernetes/kubelet.env
|
|
|
|
filesystem: root
|
|
|
|
mode: 0644
|
|
|
|
contents:
|
|
|
|
inline: |
|
2018-04-08 21:16:29 +02:00
|
|
|
KUBELET_IMAGE_URL=docker://k8s.gcr.io/hyperkube
|
2019-12-10 03:31:58 +01:00
|
|
|
KUBELET_IMAGE_TAG=v1.17.0
|
2019-11-22 08:00:25 +01:00
|
|
|
KUBELET_IMAGE_ARGS="--exec=/usr/local/bin/kubelet"
|
2017-07-25 08:16:34 +02:00
|
|
|
- path: /etc/hostname
|
|
|
|
filesystem: root
|
|
|
|
mode: 0644
|
|
|
|
contents:
|
|
|
|
inline:
|
2017-09-23 20:49:12 +02:00
|
|
|
${domain_name}
|
Introduce cluster creation without local writes to asset_dir
* Allow generated assets (TLS materials, manifests) to be
securely distributed to controller node(s) via file provisioner
(i.e. ssh-agent) as an assets bundle file, rather than relying
on assets being locally rendered to disk in an asset_dir and
then securely distributed
* Change `asset_dir` from required to optional. Left unset,
asset_dir defaults to "" and no assets will be written to
files on the machine that runs terraform apply
* Enhancement: Managed cluster assets are kept only in Terraform
state, which supports different backends (GCS, S3, etcd, etc) and
optional encryption. terraform apply accesses state, runs in-memory,
and distributes sensitive materials to controllers without making
use of local disk (simplifies use in CI systems)
* Enhancement: Improve asset unpack and layout process to position
etcd certificates and control plane certificates more cleanly,
without unneeded secret materials
Details:
* Terraform file provisioner support for distributing directories of
contents (with unknown structure) has been limited to reading from a
local directory, meaning local writes to asset_dir were required.
https://github.com/poseidon/typhoon/issues/585 discusses the problem
and newer or upcoming Terraform features that might help.
* Observation: Terraform provisioner support for single files works
well, but iteration isn't viable. We're also constrained to Terraform
language features on the apply side (no extra plugins, no shelling out)
and CoreOS / Fedora tools on the receive side.
* Take a map representation of the contents that would have been splayed
out in asset_dir and pack/encode them into a single file format devised
for easy unpacking. Use an awk one-liner on the receive side to unpack.
In pratice, this has worked well and its rather nice that a single
assets file is transferred by file provisioner (all or none)
Rel: https://github.com/poseidon/terraform-render-bootstrap/pull/162
2019-12-05 07:10:55 +01:00
|
|
|
- path: /opt/bootstrap/layout
|
|
|
|
filesystem: root
|
|
|
|
mode: 0544
|
|
|
|
contents:
|
|
|
|
inline: |
|
|
|
|
#!/bin/bash -e
|
|
|
|
mkdir -p -- auth tls/etcd tls/k8s static-manifests manifests/coredns manifests-networking
|
|
|
|
awk '/#####/ {filename=$2; next} {print > filename}' assets
|
|
|
|
mkdir -p /etc/ssl/etcd/etcd
|
|
|
|
mkdir -p /etc/kubernetes/bootstrap-secrets
|
|
|
|
mv tls/etcd/{peer*,server*} /etc/ssl/etcd/etcd/
|
|
|
|
mv tls/etcd/etcd-client* /etc/kubernetes/bootstrap-secrets/
|
|
|
|
chown -R etcd:etcd /etc/ssl/etcd
|
|
|
|
chmod -R 500 /etc/ssl/etcd
|
|
|
|
mv auth/kubeconfig /etc/kubernetes/bootstrap-secrets/
|
|
|
|
mv tls/k8s/* /etc/kubernetes/bootstrap-secrets/
|
|
|
|
sudo mkdir -p /etc/kubernetes/manifests
|
|
|
|
sudo mv static-manifests/* /etc/kubernetes/manifests/
|
|
|
|
sudo mkdir -p /opt/bootstrap/assets
|
|
|
|
sudo mv manifests /opt/bootstrap/assets/manifests
|
|
|
|
sudo mv manifests-networking /opt/bootstrap/assets/manifests-networking
|
|
|
|
rm -rf assets auth static-manifests tls
|
2019-09-05 07:05:29 +02:00
|
|
|
- path: /opt/bootstrap/apply
|
2017-07-25 08:16:34 +02:00
|
|
|
filesystem: root
|
2019-09-05 07:05:29 +02:00
|
|
|
mode: 0544
|
2017-07-25 08:16:34 +02:00
|
|
|
contents:
|
|
|
|
inline: |
|
2019-09-05 07:05:29 +02:00
|
|
|
#!/bin/bash -e
|
Introduce cluster creation without local writes to asset_dir
* Allow generated assets (TLS materials, manifests) to be
securely distributed to controller node(s) via file provisioner
(i.e. ssh-agent) as an assets bundle file, rather than relying
on assets being locally rendered to disk in an asset_dir and
then securely distributed
* Change `asset_dir` from required to optional. Left unset,
asset_dir defaults to "" and no assets will be written to
files on the machine that runs terraform apply
* Enhancement: Managed cluster assets are kept only in Terraform
state, which supports different backends (GCS, S3, etcd, etc) and
optional encryption. terraform apply accesses state, runs in-memory,
and distributes sensitive materials to controllers without making
use of local disk (simplifies use in CI systems)
* Enhancement: Improve asset unpack and layout process to position
etcd certificates and control plane certificates more cleanly,
without unneeded secret materials
Details:
* Terraform file provisioner support for distributing directories of
contents (with unknown structure) has been limited to reading from a
local directory, meaning local writes to asset_dir were required.
https://github.com/poseidon/typhoon/issues/585 discusses the problem
and newer or upcoming Terraform features that might help.
* Observation: Terraform provisioner support for single files works
well, but iteration isn't viable. We're also constrained to Terraform
language features on the apply side (no extra plugins, no shelling out)
and CoreOS / Fedora tools on the receive side.
* Take a map representation of the contents that would have been splayed
out in asset_dir and pack/encode them into a single file format devised
for easy unpacking. Use an awk one-liner on the receive side to unpack.
In pratice, this has worked well and its rather nice that a single
assets file is transferred by file provisioner (all or none)
Rel: https://github.com/poseidon/terraform-render-bootstrap/pull/162
2019-12-05 07:10:55 +01:00
|
|
|
export KUBECONFIG=/etc/kubernetes/secrets/kubeconfig
|
2019-09-05 07:05:29 +02:00
|
|
|
until kubectl version; do
|
|
|
|
echo "Waiting for static pod control plane"
|
|
|
|
sleep 5
|
|
|
|
done
|
|
|
|
until kubectl apply -f /assets/manifests -R; do
|
|
|
|
echo "Retry applying manifests"
|
|
|
|
sleep 5
|
|
|
|
done
|
|
|
|
- path: /etc/sysctl.d/max-user-watches.conf
|
2017-07-25 08:16:34 +02:00
|
|
|
filesystem: root
|
|
|
|
contents:
|
|
|
|
inline: |
|
2019-09-05 07:05:29 +02:00
|
|
|
fs.inotify.max_user_watches=16184
|
2017-07-25 08:16:34 +02:00
|
|
|
passwd:
|
|
|
|
users:
|
|
|
|
- name: core
|
|
|
|
ssh_authorized_keys:
|
2017-09-23 20:49:12 +02:00
|
|
|
- ${ssh_authorized_key}
|