2017-09-18 06:40:33 +02:00
|
|
|
---
|
|
|
|
systemd:
|
|
|
|
units:
|
2017-11-06 07:36:50 +01:00
|
|
|
- name: etcd-member.service
|
|
|
|
enable: true
|
|
|
|
dropins:
|
|
|
|
- name: 40-etcd-cluster.conf
|
|
|
|
contents: |
|
|
|
|
[Service]
|
2020-03-19 04:50:41 +01:00
|
|
|
Environment="ETCD_IMAGE_TAG=v3.4.5"
|
2020-03-02 17:11:15 +01:00
|
|
|
Environment="ETCD_IMAGE_URL=docker://quay.io/coreos/etcd"
|
|
|
|
Environment="RKT_RUN_ARGS=--insecure-options=image"
|
2017-11-06 07:36:50 +01:00
|
|
|
Environment="ETCD_NAME=${etcd_name}"
|
|
|
|
Environment="ETCD_ADVERTISE_CLIENT_URLS=https://${etcd_domain}:2379"
|
|
|
|
Environment="ETCD_INITIAL_ADVERTISE_PEER_URLS=https://${etcd_domain}:2380"
|
|
|
|
Environment="ETCD_LISTEN_CLIENT_URLS=https://0.0.0.0:2379"
|
|
|
|
Environment="ETCD_LISTEN_PEER_URLS=https://0.0.0.0:2380"
|
2018-03-29 06:45:24 +02:00
|
|
|
Environment="ETCD_LISTEN_METRICS_URLS=http://0.0.0.0:2381"
|
2017-11-06 07:36:50 +01:00
|
|
|
Environment="ETCD_INITIAL_CLUSTER=${etcd_initial_cluster}"
|
|
|
|
Environment="ETCD_STRICT_RECONFIG_CHECK=true"
|
|
|
|
Environment="ETCD_SSL_DIR=/etc/ssl/etcd"
|
|
|
|
Environment="ETCD_TRUSTED_CA_FILE=/etc/ssl/certs/etcd/server-ca.crt"
|
|
|
|
Environment="ETCD_CERT_FILE=/etc/ssl/certs/etcd/server.crt"
|
|
|
|
Environment="ETCD_KEY_FILE=/etc/ssl/certs/etcd/server.key"
|
|
|
|
Environment="ETCD_CLIENT_CERT_AUTH=true"
|
|
|
|
Environment="ETCD_PEER_TRUSTED_CA_FILE=/etc/ssl/certs/etcd/peer-ca.crt"
|
|
|
|
Environment="ETCD_PEER_CERT_FILE=/etc/ssl/certs/etcd/peer.crt"
|
|
|
|
Environment="ETCD_PEER_KEY_FILE=/etc/ssl/certs/etcd/peer.key"
|
|
|
|
Environment="ETCD_PEER_CLIENT_CERT_AUTH=true"
|
2017-09-18 06:40:33 +02:00
|
|
|
- name: docker.service
|
|
|
|
enable: true
|
|
|
|
- name: locksmithd.service
|
|
|
|
mask: true
|
|
|
|
- name: wait-for-dns.service
|
|
|
|
enable: true
|
|
|
|
contents: |
|
|
|
|
[Unit]
|
|
|
|
Description=Wait for DNS entries
|
|
|
|
Wants=systemd-resolved.service
|
|
|
|
Before=kubelet.service
|
|
|
|
[Service]
|
|
|
|
Type=oneshot
|
|
|
|
RemainAfterExit=true
|
|
|
|
ExecStart=/bin/sh -c 'while ! /usr/bin/grep '^[^#[:space:]]' /etc/resolv.conf > /dev/null; do sleep 1; done'
|
|
|
|
[Install]
|
|
|
|
RequiredBy=kubelet.service
|
2017-12-04 09:04:31 +01:00
|
|
|
RequiredBy=etcd-member.service
|
2017-09-18 06:40:33 +02:00
|
|
|
- name: kubelet.service
|
|
|
|
enable: true
|
|
|
|
contents: |
|
|
|
|
[Unit]
|
2018-01-07 01:20:34 +01:00
|
|
|
Description=Kubelet via Hyperkube
|
2017-09-25 03:04:48 +02:00
|
|
|
Wants=rpc-statd.service
|
2017-09-18 06:40:33 +02:00
|
|
|
[Service]
|
2019-06-12 08:24:01 +02:00
|
|
|
Environment=KUBELET_CGROUP_DRIVER=${cgroup_driver}
|
2017-09-18 06:40:33 +02:00
|
|
|
ExecStartPre=/bin/mkdir -p /etc/kubernetes/cni/net.d
|
2019-09-19 09:15:39 +02:00
|
|
|
ExecStartPre=/bin/mkdir -p /etc/kubernetes/manifests
|
|
|
|
ExecStartPre=/bin/mkdir -p /opt/cni/bin
|
2018-04-22 00:13:38 +02:00
|
|
|
ExecStartPre=/bin/mkdir -p /var/lib/calico
|
2018-02-23 07:00:51 +01:00
|
|
|
ExecStartPre=/bin/mkdir -p /var/lib/kubelet/volumeplugins
|
2017-09-18 06:40:33 +02:00
|
|
|
ExecStartPre=/usr/bin/bash -c "grep 'certificate-authority-data' /etc/kubernetes/kubeconfig | awk '{print $2}' | base64 -d > /etc/kubernetes/ca.crt"
|
|
|
|
ExecStartPre=-/usr/bin/rkt rm --uuid-file=/var/cache/kubelet-pod.uuid
|
Inline Container Linux kubelet.service, deprecate kubelet-wrapper
* Change kubelet.service on Container Linux nodes to ExecStart Kubelet
inline to replace the use of the host OS kubelet-wrapper script
* Express rkt run flags and volume mounts in a clear, uniform way to
make the Kubelet service easier to audit, manage, and understand
* Eliminate reliance on a Container Linux kubelet-wrapper script
* Typhoon for Fedora CoreOS developed a kubelet.service that similarly
uses an inline ExecStart (except with podman instead of rkt) and a
more minimal set of volume mounts. Adopt the volume improvements:
* Change Kubelet /etc/kubernetes volume to read-only
* Change Kubelet /etc/resolv.conf volume to read-only
* Remove unneeded /var/lib/cni volume mount
Background:
* kubelet-wrapper was added in CoreOS around the time of Kubernetes v1.0
to simplify running a CoreOS-built hyperkube ACI image via rkt-fly. The
script defaults are no longer ideal (e.g. rkt's notion of trust dates
back to quay.io ACI image serving and signing, which informed the OCI
standard images we use today, though they still lack rkt's signing ideas).
* Shipping kubelet-wrapper was regretted at CoreOS, but remains in the
distro for compatibility. The script is not updated to track hyperkube
changes, but it is stable and kubelet.env overrides bridge most gaps
* Typhoon Container Linux nodes have used kubelet-wrapper to rkt/rkt-fly
run the Kubelet via the official k8s.gcr.io hyperkube image using overrides
(new image registry, new image format, restart handling, new mounts, new
entrypoint in v1.17).
* Observation: Most of what it takes to run a Kubelet container is defined
in Typhoon, not in kubelet-wrapper. The wrapper's value is now undermined
by having to workaround its dated defaults. Typhoon may be better served
defining Kubelet.service explicitly
* Typhoon for Fedora CoreOS developed a kubelet.service without the use
of a host OS kubelet-wrapper which is both clearer and eliminated some
volume mounts
2019-12-29 20:17:26 +01:00
|
|
|
ExecStart=/usr/bin/rkt run \
|
|
|
|
--uuid-file-save=/var/cache/kubelet-pod.uuid \
|
|
|
|
--stage1-from-dir=stage1-fly.aci \
|
|
|
|
--hosts-entry host \
|
|
|
|
--insecure-options=image \
|
|
|
|
--volume etc-kubernetes,kind=host,source=/etc/kubernetes,readOnly=true \
|
|
|
|
--mount volume=etc-kubernetes,target=/etc/kubernetes \
|
|
|
|
--volume etc-machine-id,kind=host,source=/etc/machine-id,readOnly=true \
|
|
|
|
--mount volume=etc-machine-id,target=/etc/machine-id \
|
|
|
|
--volume etc-os-release,kind=host,source=/usr/lib/os-release,readOnly=true \
|
|
|
|
--mount volume=etc-os-release,target=/etc/os-release \
|
|
|
|
--volume=etc-resolv,kind=host,source=/etc/resolv.conf,readOnly=true \
|
|
|
|
--mount volume=etc-resolv,target=/etc/resolv.conf \
|
|
|
|
--volume etc-ssl-certs,kind=host,source=/etc/ssl/certs,readOnly=true \
|
|
|
|
--mount volume=etc-ssl-certs,target=/etc/ssl/certs \
|
|
|
|
--volume lib-modules,kind=host,source=/lib/modules,readOnly=true \
|
|
|
|
--mount volume=lib-modules,target=/lib/modules \
|
|
|
|
--volume run,kind=host,source=/run \
|
|
|
|
--mount volume=run,target=/run \
|
|
|
|
--volume usr-share-certs,kind=host,source=/usr/share/ca-certificates,readOnly=true \
|
|
|
|
--mount volume=usr-share-certs,target=/usr/share/ca-certificates \
|
2020-02-19 06:40:58 +01:00
|
|
|
--volume var-lib-calico,kind=host,source=/var/lib/calico,readOnly=true \
|
Inline Container Linux kubelet.service, deprecate kubelet-wrapper
* Change kubelet.service on Container Linux nodes to ExecStart Kubelet
inline to replace the use of the host OS kubelet-wrapper script
* Express rkt run flags and volume mounts in a clear, uniform way to
make the Kubelet service easier to audit, manage, and understand
* Eliminate reliance on a Container Linux kubelet-wrapper script
* Typhoon for Fedora CoreOS developed a kubelet.service that similarly
uses an inline ExecStart (except with podman instead of rkt) and a
more minimal set of volume mounts. Adopt the volume improvements:
* Change Kubelet /etc/kubernetes volume to read-only
* Change Kubelet /etc/resolv.conf volume to read-only
* Remove unneeded /var/lib/cni volume mount
Background:
* kubelet-wrapper was added in CoreOS around the time of Kubernetes v1.0
to simplify running a CoreOS-built hyperkube ACI image via rkt-fly. The
script defaults are no longer ideal (e.g. rkt's notion of trust dates
back to quay.io ACI image serving and signing, which informed the OCI
standard images we use today, though they still lack rkt's signing ideas).
* Shipping kubelet-wrapper was regretted at CoreOS, but remains in the
distro for compatibility. The script is not updated to track hyperkube
changes, but it is stable and kubelet.env overrides bridge most gaps
* Typhoon Container Linux nodes have used kubelet-wrapper to rkt/rkt-fly
run the Kubelet via the official k8s.gcr.io hyperkube image using overrides
(new image registry, new image format, restart handling, new mounts, new
entrypoint in v1.17).
* Observation: Most of what it takes to run a Kubelet container is defined
in Typhoon, not in kubelet-wrapper. The wrapper's value is now undermined
by having to workaround its dated defaults. Typhoon may be better served
defining Kubelet.service explicitly
* Typhoon for Fedora CoreOS developed a kubelet.service without the use
of a host OS kubelet-wrapper which is both clearer and eliminated some
volume mounts
2019-12-29 20:17:26 +01:00
|
|
|
--mount volume=var-lib-calico,target=/var/lib/calico \
|
|
|
|
--volume var-lib-docker,kind=host,source=/var/lib/docker \
|
|
|
|
--mount volume=var-lib-docker,target=/var/lib/docker \
|
|
|
|
--volume var-lib-kubelet,kind=host,source=/var/lib/kubelet,recursive=true \
|
|
|
|
--mount volume=var-lib-kubelet,target=/var/lib/kubelet \
|
|
|
|
--volume var-log,kind=host,source=/var/log \
|
|
|
|
--mount volume=var-log,target=/var/log \
|
|
|
|
--volume opt-cni-bin,kind=host,source=/opt/cni/bin \
|
|
|
|
--mount volume=opt-cni-bin,target=/opt/cni/bin \
|
2020-03-13 07:02:06 +01:00
|
|
|
docker://k8s.gcr.io/hyperkube:v1.17.4 \
|
Inline Container Linux kubelet.service, deprecate kubelet-wrapper
* Change kubelet.service on Container Linux nodes to ExecStart Kubelet
inline to replace the use of the host OS kubelet-wrapper script
* Express rkt run flags and volume mounts in a clear, uniform way to
make the Kubelet service easier to audit, manage, and understand
* Eliminate reliance on a Container Linux kubelet-wrapper script
* Typhoon for Fedora CoreOS developed a kubelet.service that similarly
uses an inline ExecStart (except with podman instead of rkt) and a
more minimal set of volume mounts. Adopt the volume improvements:
* Change Kubelet /etc/kubernetes volume to read-only
* Change Kubelet /etc/resolv.conf volume to read-only
* Remove unneeded /var/lib/cni volume mount
Background:
* kubelet-wrapper was added in CoreOS around the time of Kubernetes v1.0
to simplify running a CoreOS-built hyperkube ACI image via rkt-fly. The
script defaults are no longer ideal (e.g. rkt's notion of trust dates
back to quay.io ACI image serving and signing, which informed the OCI
standard images we use today, though they still lack rkt's signing ideas).
* Shipping kubelet-wrapper was regretted at CoreOS, but remains in the
distro for compatibility. The script is not updated to track hyperkube
changes, but it is stable and kubelet.env overrides bridge most gaps
* Typhoon Container Linux nodes have used kubelet-wrapper to rkt/rkt-fly
run the Kubelet via the official k8s.gcr.io hyperkube image using overrides
(new image registry, new image format, restart handling, new mounts, new
entrypoint in v1.17).
* Observation: Most of what it takes to run a Kubelet container is defined
in Typhoon, not in kubelet-wrapper. The wrapper's value is now undermined
by having to workaround its dated defaults. Typhoon may be better served
defining Kubelet.service explicitly
* Typhoon for Fedora CoreOS developed a kubelet.service without the use
of a host OS kubelet-wrapper which is both clearer and eliminated some
volume mounts
2019-12-29 20:17:26 +01:00
|
|
|
--exec=/usr/local/bin/kubelet -- \
|
2017-09-18 06:40:33 +02:00
|
|
|
--anonymous-auth=false \
|
2018-05-14 08:20:42 +02:00
|
|
|
--authentication-token-webhook \
|
|
|
|
--authorization-mode=Webhook \
|
2019-06-12 08:24:01 +02:00
|
|
|
--cgroup-driver=$${KUBELET_CGROUP_DRIVER} \
|
2017-09-18 06:40:33 +02:00
|
|
|
--client-ca-file=/etc/kubernetes/ca.crt \
|
2019-01-05 22:32:03 +01:00
|
|
|
--cluster_dns=${cluster_dns_service_ip} \
|
2017-12-09 22:36:59 +01:00
|
|
|
--cluster_domain=${cluster_domain_suffix} \
|
2017-09-18 06:40:33 +02:00
|
|
|
--cni-conf-dir=/etc/kubernetes/cni/net.d \
|
|
|
|
--exit-on-lock-contention \
|
2019-12-29 20:20:59 +01:00
|
|
|
--healthz-port=0 \
|
2017-09-18 06:40:33 +02:00
|
|
|
--kubeconfig=/etc/kubernetes/kubeconfig \
|
|
|
|
--lock-file=/var/run/lock/kubelet.lock \
|
|
|
|
--network-plugin=cni \
|
2019-09-18 06:24:30 +02:00
|
|
|
--node-labels=node.kubernetes.io/master \
|
|
|
|
--node-labels=node.kubernetes.io/controller="true" \
|
2017-09-18 06:40:33 +02:00
|
|
|
--pod-manifest-path=/etc/kubernetes/manifests \
|
2018-05-14 03:16:10 +02:00
|
|
|
--read-only-port=0 \
|
2018-02-23 07:00:51 +01:00
|
|
|
--register-with-taints=node-role.kubernetes.io/master=:NoSchedule \
|
|
|
|
--volume-plugin-dir=/var/lib/kubelet/volumeplugins
|
2017-09-18 06:40:33 +02:00
|
|
|
ExecStop=-/usr/bin/rkt stop --uuid-file=/var/cache/kubelet-pod.uuid
|
|
|
|
Restart=always
|
|
|
|
RestartSec=10
|
|
|
|
[Install]
|
|
|
|
WantedBy=multi-user.target
|
2019-09-05 07:20:36 +02:00
|
|
|
- name: bootstrap.service
|
2017-09-18 06:40:33 +02:00
|
|
|
contents: |
|
|
|
|
[Unit]
|
2019-09-05 07:20:36 +02:00
|
|
|
Description=Kubernetes control plane
|
|
|
|
ConditionPathExists=!/opt/bootstrap/bootstrap.done
|
2017-09-18 06:40:33 +02:00
|
|
|
[Service]
|
|
|
|
Type=oneshot
|
|
|
|
RemainAfterExit=true
|
2019-09-05 07:20:36 +02:00
|
|
|
WorkingDirectory=/opt/bootstrap
|
|
|
|
ExecStartPre=-/usr/bin/bash -c 'set -x && [ -n "$(ls /opt/bootstrap/assets/manifests-*/* 2>/dev/null)" ] && mv /opt/bootstrap/assets/manifests-*/* /opt/bootstrap/assets/manifests && rm -rf /opt/bootstrap/assets/manifests-*'
|
|
|
|
ExecStart=/usr/bin/rkt run \
|
|
|
|
--trust-keys-from-https \
|
Introduce cluster creation without local writes to asset_dir
* Allow generated assets (TLS materials, manifests) to be
securely distributed to controller node(s) via file provisioner
(i.e. ssh-agent) as an assets bundle file, rather than relying
on assets being locally rendered to disk in an asset_dir and
then securely distributed
* Change `asset_dir` from required to optional. Left unset,
asset_dir defaults to "" and no assets will be written to
files on the machine that runs terraform apply
* Enhancement: Managed cluster assets are kept only in Terraform
state, which supports different backends (GCS, S3, etcd, etc) and
optional encryption. terraform apply accesses state, runs in-memory,
and distributes sensitive materials to controllers without making
use of local disk (simplifies use in CI systems)
* Enhancement: Improve asset unpack and layout process to position
etcd certificates and control plane certificates more cleanly,
without unneeded secret materials
Details:
* Terraform file provisioner support for distributing directories of
contents (with unknown structure) has been limited to reading from a
local directory, meaning local writes to asset_dir were required.
https://github.com/poseidon/typhoon/issues/585 discusses the problem
and newer or upcoming Terraform features that might help.
* Observation: Terraform provisioner support for single files works
well, but iteration isn't viable. We're also constrained to Terraform
language features on the apply side (no extra plugins, no shelling out)
and CoreOS / Fedora tools on the receive side.
* Take a map representation of the contents that would have been splayed
out in asset_dir and pack/encode them into a single file format devised
for easy unpacking. Use an awk one-liner on the receive side to unpack.
In pratice, this has worked well and its rather nice that a single
assets file is transferred by file provisioner (all or none)
Rel: https://github.com/poseidon/terraform-render-bootstrap/pull/162
2019-12-05 07:10:55 +01:00
|
|
|
--volume config,kind=host,source=/etc/kubernetes/bootstrap-secrets \
|
|
|
|
--mount volume=config,target=/etc/kubernetes/secrets \
|
2019-09-05 07:20:36 +02:00
|
|
|
--volume assets,kind=host,source=/opt/bootstrap/assets \
|
|
|
|
--mount volume=assets,target=/assets \
|
|
|
|
--volume script,kind=host,source=/opt/bootstrap/apply \
|
|
|
|
--mount volume=script,target=/apply \
|
|
|
|
--insecure-options=image \
|
2020-03-13 07:02:06 +01:00
|
|
|
docker://k8s.gcr.io/hyperkube:v1.17.4 \
|
2019-09-05 07:20:36 +02:00
|
|
|
--net=host \
|
|
|
|
--dns=host \
|
|
|
|
--exec=/apply
|
|
|
|
ExecStartPost=/bin/touch /opt/bootstrap/bootstrap.done
|
2017-09-18 06:40:33 +02:00
|
|
|
[Install]
|
|
|
|
WantedBy=multi-user.target
|
|
|
|
storage:
|
|
|
|
files:
|
|
|
|
- path: /etc/kubernetes/kubeconfig
|
|
|
|
filesystem: root
|
|
|
|
mode: 0644
|
|
|
|
contents:
|
|
|
|
inline: |
|
2018-02-26 21:17:42 +01:00
|
|
|
${kubeconfig}
|
Introduce cluster creation without local writes to asset_dir
* Allow generated assets (TLS materials, manifests) to be
securely distributed to controller node(s) via file provisioner
(i.e. ssh-agent) as an assets bundle file, rather than relying
on assets being locally rendered to disk in an asset_dir and
then securely distributed
* Change `asset_dir` from required to optional. Left unset,
asset_dir defaults to "" and no assets will be written to
files on the machine that runs terraform apply
* Enhancement: Managed cluster assets are kept only in Terraform
state, which supports different backends (GCS, S3, etcd, etc) and
optional encryption. terraform apply accesses state, runs in-memory,
and distributes sensitive materials to controllers without making
use of local disk (simplifies use in CI systems)
* Enhancement: Improve asset unpack and layout process to position
etcd certificates and control plane certificates more cleanly,
without unneeded secret materials
Details:
* Terraform file provisioner support for distributing directories of
contents (with unknown structure) has been limited to reading from a
local directory, meaning local writes to asset_dir were required.
https://github.com/poseidon/typhoon/issues/585 discusses the problem
and newer or upcoming Terraform features that might help.
* Observation: Terraform provisioner support for single files works
well, but iteration isn't viable. We're also constrained to Terraform
language features on the apply side (no extra plugins, no shelling out)
and CoreOS / Fedora tools on the receive side.
* Take a map representation of the contents that would have been splayed
out in asset_dir and pack/encode them into a single file format devised
for easy unpacking. Use an awk one-liner on the receive side to unpack.
In pratice, this has worked well and its rather nice that a single
assets file is transferred by file provisioner (all or none)
Rel: https://github.com/poseidon/terraform-render-bootstrap/pull/162
2019-12-05 07:10:55 +01:00
|
|
|
- path: /opt/bootstrap/layout
|
|
|
|
filesystem: root
|
|
|
|
mode: 0544
|
|
|
|
contents:
|
|
|
|
inline: |
|
|
|
|
#!/bin/bash -e
|
|
|
|
mkdir -p -- auth tls/etcd tls/k8s static-manifests manifests/coredns manifests-networking
|
|
|
|
awk '/#####/ {filename=$2; next} {print > filename}' assets
|
|
|
|
mkdir -p /etc/ssl/etcd/etcd
|
|
|
|
mkdir -p /etc/kubernetes/bootstrap-secrets
|
|
|
|
mv tls/etcd/{peer*,server*} /etc/ssl/etcd/etcd/
|
|
|
|
mv tls/etcd/etcd-client* /etc/kubernetes/bootstrap-secrets/
|
|
|
|
chown -R etcd:etcd /etc/ssl/etcd
|
|
|
|
chmod -R 500 /etc/ssl/etcd
|
|
|
|
mv auth/kubeconfig /etc/kubernetes/bootstrap-secrets/
|
|
|
|
mv tls/k8s/* /etc/kubernetes/bootstrap-secrets/
|
|
|
|
sudo mkdir -p /etc/kubernetes/manifests
|
|
|
|
sudo mv static-manifests/* /etc/kubernetes/manifests/
|
|
|
|
sudo mkdir -p /opt/bootstrap/assets
|
|
|
|
sudo mv manifests /opt/bootstrap/assets/manifests
|
|
|
|
sudo mv manifests-networking /opt/bootstrap/assets/manifests-networking
|
|
|
|
rm -rf assets auth static-manifests tls
|
2019-09-05 07:20:36 +02:00
|
|
|
- path: /opt/bootstrap/apply
|
2017-09-18 06:40:33 +02:00
|
|
|
filesystem: root
|
2019-09-05 07:20:36 +02:00
|
|
|
mode: 0544
|
2017-09-18 06:40:33 +02:00
|
|
|
contents:
|
|
|
|
inline: |
|
2019-09-05 07:20:36 +02:00
|
|
|
#!/bin/bash -e
|
Introduce cluster creation without local writes to asset_dir
* Allow generated assets (TLS materials, manifests) to be
securely distributed to controller node(s) via file provisioner
(i.e. ssh-agent) as an assets bundle file, rather than relying
on assets being locally rendered to disk in an asset_dir and
then securely distributed
* Change `asset_dir` from required to optional. Left unset,
asset_dir defaults to "" and no assets will be written to
files on the machine that runs terraform apply
* Enhancement: Managed cluster assets are kept only in Terraform
state, which supports different backends (GCS, S3, etcd, etc) and
optional encryption. terraform apply accesses state, runs in-memory,
and distributes sensitive materials to controllers without making
use of local disk (simplifies use in CI systems)
* Enhancement: Improve asset unpack and layout process to position
etcd certificates and control plane certificates more cleanly,
without unneeded secret materials
Details:
* Terraform file provisioner support for distributing directories of
contents (with unknown structure) has been limited to reading from a
local directory, meaning local writes to asset_dir were required.
https://github.com/poseidon/typhoon/issues/585 discusses the problem
and newer or upcoming Terraform features that might help.
* Observation: Terraform provisioner support for single files works
well, but iteration isn't viable. We're also constrained to Terraform
language features on the apply side (no extra plugins, no shelling out)
and CoreOS / Fedora tools on the receive side.
* Take a map representation of the contents that would have been splayed
out in asset_dir and pack/encode them into a single file format devised
for easy unpacking. Use an awk one-liner on the receive side to unpack.
In pratice, this has worked well and its rather nice that a single
assets file is transferred by file provisioner (all or none)
Rel: https://github.com/poseidon/terraform-render-bootstrap/pull/162
2019-12-05 07:10:55 +01:00
|
|
|
export KUBECONFIG=/etc/kubernetes/secrets/kubeconfig
|
2019-09-05 07:20:36 +02:00
|
|
|
until kubectl version; do
|
|
|
|
echo "Waiting for static pod control plane"
|
|
|
|
sleep 5
|
|
|
|
done
|
|
|
|
until kubectl apply -f /assets/manifests -R; do
|
|
|
|
echo "Retry applying manifests"
|
|
|
|
sleep 5
|
|
|
|
done
|
|
|
|
- path: /etc/sysctl.d/max-user-watches.conf
|
2017-09-18 06:40:33 +02:00
|
|
|
filesystem: root
|
|
|
|
contents:
|
|
|
|
inline: |
|
2019-09-05 07:20:36 +02:00
|
|
|
fs.inotify.max_user_watches=16184
|
2017-09-18 06:40:33 +02:00
|
|
|
passwd:
|
|
|
|
users:
|
|
|
|
- name: core
|
|
|
|
ssh_authorized_keys:
|
|
|
|
- "${ssh_authorized_key}"
|