2022-08-03 03:12:37 +02:00
|
|
|
variant: flatcar
|
|
|
|
version: 1.0.0
|
2017-09-18 06:40:33 +02:00
|
|
|
systemd:
|
|
|
|
units:
|
2017-11-06 07:36:50 +01:00
|
|
|
- name: etcd-member.service
|
2020-06-10 07:38:32 +02:00
|
|
|
enabled: true
|
2020-11-04 01:37:09 +01:00
|
|
|
contents: |
|
|
|
|
[Unit]
|
|
|
|
Description=etcd (System Container)
|
|
|
|
Documentation=https://github.com/etcd-io/etcd
|
|
|
|
Requires=docker.service
|
|
|
|
After=docker.service
|
|
|
|
[Service]
|
2022-04-28 05:51:51 +02:00
|
|
|
Environment=ETCD_IMAGE=quay.io/coreos/etcd:v3.5.4
|
2020-11-04 01:37:09 +01:00
|
|
|
ExecStartPre=/usr/bin/docker run -d \
|
|
|
|
--name etcd \
|
|
|
|
--network host \
|
|
|
|
--env-file /etc/etcd/etcd.env \
|
|
|
|
--user 232:232 \
|
|
|
|
--volume /etc/ssl/etcd:/etc/ssl/certs:ro \
|
|
|
|
--volume /var/lib/etcd:/var/lib/etcd:rw \
|
|
|
|
$${ETCD_IMAGE}
|
|
|
|
ExecStart=docker logs -f etcd
|
|
|
|
ExecStop=docker stop etcd
|
|
|
|
ExecStopPost=docker rm etcd
|
|
|
|
Restart=always
|
|
|
|
RestartSec=10s
|
|
|
|
TimeoutStartSec=0
|
|
|
|
LimitNOFILE=40000
|
|
|
|
[Install]
|
|
|
|
WantedBy=multi-user.target
|
2017-09-18 06:40:33 +02:00
|
|
|
- name: docker.service
|
2020-06-10 07:38:32 +02:00
|
|
|
enabled: true
|
2017-09-18 06:40:33 +02:00
|
|
|
- name: locksmithd.service
|
|
|
|
mask: true
|
|
|
|
- name: wait-for-dns.service
|
2020-06-10 07:38:32 +02:00
|
|
|
enabled: true
|
2017-09-18 06:40:33 +02:00
|
|
|
contents: |
|
|
|
|
[Unit]
|
|
|
|
Description=Wait for DNS entries
|
|
|
|
Wants=systemd-resolved.service
|
|
|
|
Before=kubelet.service
|
|
|
|
[Service]
|
|
|
|
Type=oneshot
|
|
|
|
RemainAfterExit=true
|
|
|
|
ExecStart=/bin/sh -c 'while ! /usr/bin/grep '^[^#[:space:]]' /etc/resolv.conf > /dev/null; do sleep 1; done'
|
|
|
|
[Install]
|
|
|
|
RequiredBy=kubelet.service
|
2017-12-04 09:04:31 +01:00
|
|
|
RequiredBy=etcd-member.service
|
2017-09-18 06:40:33 +02:00
|
|
|
- name: kubelet.service
|
2020-06-10 07:38:32 +02:00
|
|
|
enabled: true
|
2017-09-18 06:40:33 +02:00
|
|
|
contents: |
|
|
|
|
[Unit]
|
2020-11-04 01:37:09 +01:00
|
|
|
Description=Kubelet (System Container)
|
Change Flatcar kubelet.service container from rkt to docker
* Use docker to run the `kubelet.service` container
* Update Kubelet mounts to match Fedora CoreOS
* Remove unused `/etc/ssl/certs` mount (see
https://github.com/poseidon/typhoon/pull/810)
* Remove unused `/usr/share/ca-certificates` mount
* Remove `/etc/resolv.conf` mount, Docker default is ok
* Change `delete-node.service` to use docker instead of rkt
and inline ExecStart, as was done on Fedora CoreOS
* Fix permission denied on shutdown `delete-node`, caused
by the kubeconfig mount changing with the introduction of
node TLS bootstrap
Background
* podmand, rkt, and runc daemonless container process runners
provide advantages over the docker daemon for system containers.
Docker requires workarounds for use in systemd units where the
ExecStart must tail logs so systemd can monitor the daemonized
container. https://github.com/moby/moby/issues/6791
* Why switch then? On Flatcar Linux, podman isn't shipped. rkt
works, but isn't developing while container standards continue
to move forward. Typhoon has used runc for the Kubelet runner
before in Fedora Atomic, but its more low-level. So we're left
with Docker, which is less than ideal, but shipped in Flatcar
* Flatcar Linux appears to be shifting system components to
use docker, which does provide some limited guards against
breakages (e.g. Flatcar cannot enable docker live restore)
2020-10-19 07:51:25 +02:00
|
|
|
Requires=docker.service
|
|
|
|
After=docker.service
|
2021-03-18 15:30:29 +01:00
|
|
|
Requires=coreos-metadata.service
|
|
|
|
After=coreos-metadata.service
|
2017-09-25 03:04:48 +02:00
|
|
|
Wants=rpc-statd.service
|
2017-09-18 06:40:33 +02:00
|
|
|
[Service]
|
2022-08-24 02:32:13 +02:00
|
|
|
Environment=KUBELET_IMAGE=quay.io/poseidon/kubelet:v1.25.0
|
2021-03-18 15:30:29 +01:00
|
|
|
EnvironmentFile=/run/metadata/coreos
|
2021-04-02 08:45:19 +02:00
|
|
|
ExecStartPre=/bin/mkdir -p /etc/cni/net.d
|
2019-09-19 09:15:39 +02:00
|
|
|
ExecStartPre=/bin/mkdir -p /etc/kubernetes/manifests
|
|
|
|
ExecStartPre=/bin/mkdir -p /opt/cni/bin
|
2018-04-22 00:13:38 +02:00
|
|
|
ExecStartPre=/bin/mkdir -p /var/lib/calico
|
2018-02-23 07:00:51 +01:00
|
|
|
ExecStartPre=/bin/mkdir -p /var/lib/kubelet/volumeplugins
|
2017-09-18 06:40:33 +02:00
|
|
|
ExecStartPre=/usr/bin/bash -c "grep 'certificate-authority-data' /etc/kubernetes/kubeconfig | awk '{print $2}' | base64 -d > /etc/kubernetes/ca.crt"
|
Change Flatcar kubelet.service container from rkt to docker
* Use docker to run the `kubelet.service` container
* Update Kubelet mounts to match Fedora CoreOS
* Remove unused `/etc/ssl/certs` mount (see
https://github.com/poseidon/typhoon/pull/810)
* Remove unused `/usr/share/ca-certificates` mount
* Remove `/etc/resolv.conf` mount, Docker default is ok
* Change `delete-node.service` to use docker instead of rkt
and inline ExecStart, as was done on Fedora CoreOS
* Fix permission denied on shutdown `delete-node`, caused
by the kubeconfig mount changing with the introduction of
node TLS bootstrap
Background
* podmand, rkt, and runc daemonless container process runners
provide advantages over the docker daemon for system containers.
Docker requires workarounds for use in systemd units where the
ExecStart must tail logs so systemd can monitor the daemonized
container. https://github.com/moby/moby/issues/6791
* Why switch then? On Flatcar Linux, podman isn't shipped. rkt
works, but isn't developing while container standards continue
to move forward. Typhoon has used runc for the Kubelet runner
before in Fedora Atomic, but its more low-level. So we're left
with Docker, which is less than ideal, but shipped in Flatcar
* Flatcar Linux appears to be shifting system components to
use docker, which does provide some limited guards against
breakages (e.g. Flatcar cannot enable docker live restore)
2020-10-19 07:51:25 +02:00
|
|
|
ExecStartPre=/usr/bin/docker run -d \
|
|
|
|
--name kubelet \
|
|
|
|
--privileged \
|
|
|
|
--pid host \
|
|
|
|
--network host \
|
2021-04-02 08:45:19 +02:00
|
|
|
-v /etc/cni/net.d:/etc/cni/net.d:ro \
|
Change Flatcar kubelet.service container from rkt to docker
* Use docker to run the `kubelet.service` container
* Update Kubelet mounts to match Fedora CoreOS
* Remove unused `/etc/ssl/certs` mount (see
https://github.com/poseidon/typhoon/pull/810)
* Remove unused `/usr/share/ca-certificates` mount
* Remove `/etc/resolv.conf` mount, Docker default is ok
* Change `delete-node.service` to use docker instead of rkt
and inline ExecStart, as was done on Fedora CoreOS
* Fix permission denied on shutdown `delete-node`, caused
by the kubeconfig mount changing with the introduction of
node TLS bootstrap
Background
* podmand, rkt, and runc daemonless container process runners
provide advantages over the docker daemon for system containers.
Docker requires workarounds for use in systemd units where the
ExecStart must tail logs so systemd can monitor the daemonized
container. https://github.com/moby/moby/issues/6791
* Why switch then? On Flatcar Linux, podman isn't shipped. rkt
works, but isn't developing while container standards continue
to move forward. Typhoon has used runc for the Kubelet runner
before in Fedora Atomic, but its more low-level. So we're left
with Docker, which is less than ideal, but shipped in Flatcar
* Flatcar Linux appears to be shifting system components to
use docker, which does provide some limited guards against
breakages (e.g. Flatcar cannot enable docker live restore)
2020-10-19 07:51:25 +02:00
|
|
|
-v /etc/kubernetes:/etc/kubernetes:ro \
|
|
|
|
-v /etc/machine-id:/etc/machine-id:ro \
|
|
|
|
-v /usr/lib/os-release:/etc/os-release:ro \
|
|
|
|
-v /lib/modules:/lib/modules:ro \
|
|
|
|
-v /run:/run \
|
2021-11-13 06:03:48 +01:00
|
|
|
-v /sys/fs/cgroup:/sys/fs/cgroup \
|
Change Flatcar kubelet.service container from rkt to docker
* Use docker to run the `kubelet.service` container
* Update Kubelet mounts to match Fedora CoreOS
* Remove unused `/etc/ssl/certs` mount (see
https://github.com/poseidon/typhoon/pull/810)
* Remove unused `/usr/share/ca-certificates` mount
* Remove `/etc/resolv.conf` mount, Docker default is ok
* Change `delete-node.service` to use docker instead of rkt
and inline ExecStart, as was done on Fedora CoreOS
* Fix permission denied on shutdown `delete-node`, caused
by the kubeconfig mount changing with the introduction of
node TLS bootstrap
Background
* podmand, rkt, and runc daemonless container process runners
provide advantages over the docker daemon for system containers.
Docker requires workarounds for use in systemd units where the
ExecStart must tail logs so systemd can monitor the daemonized
container. https://github.com/moby/moby/issues/6791
* Why switch then? On Flatcar Linux, podman isn't shipped. rkt
works, but isn't developing while container standards continue
to move forward. Typhoon has used runc for the Kubelet runner
before in Fedora Atomic, but its more low-level. So we're left
with Docker, which is less than ideal, but shipped in Flatcar
* Flatcar Linux appears to be shifting system components to
use docker, which does provide some limited guards against
breakages (e.g. Flatcar cannot enable docker live restore)
2020-10-19 07:51:25 +02:00
|
|
|
-v /var/lib/calico:/var/lib/calico:ro \
|
2021-12-15 04:37:43 +01:00
|
|
|
-v /var/lib/containerd:/var/lib/containerd \
|
Change Flatcar kubelet.service container from rkt to docker
* Use docker to run the `kubelet.service` container
* Update Kubelet mounts to match Fedora CoreOS
* Remove unused `/etc/ssl/certs` mount (see
https://github.com/poseidon/typhoon/pull/810)
* Remove unused `/usr/share/ca-certificates` mount
* Remove `/etc/resolv.conf` mount, Docker default is ok
* Change `delete-node.service` to use docker instead of rkt
and inline ExecStart, as was done on Fedora CoreOS
* Fix permission denied on shutdown `delete-node`, caused
by the kubeconfig mount changing with the introduction of
node TLS bootstrap
Background
* podmand, rkt, and runc daemonless container process runners
provide advantages over the docker daemon for system containers.
Docker requires workarounds for use in systemd units where the
ExecStart must tail logs so systemd can monitor the daemonized
container. https://github.com/moby/moby/issues/6791
* Why switch then? On Flatcar Linux, podman isn't shipped. rkt
works, but isn't developing while container standards continue
to move forward. Typhoon has used runc for the Kubelet runner
before in Fedora Atomic, but its more low-level. So we're left
with Docker, which is less than ideal, but shipped in Flatcar
* Flatcar Linux appears to be shifting system components to
use docker, which does provide some limited guards against
breakages (e.g. Flatcar cannot enable docker live restore)
2020-10-19 07:51:25 +02:00
|
|
|
-v /var/lib/kubelet:/var/lib/kubelet:rshared \
|
|
|
|
-v /var/log:/var/log \
|
|
|
|
-v /opt/cni/bin:/opt/cni/bin \
|
|
|
|
$${KUBELET_IMAGE} \
|
2020-04-26 01:50:51 +02:00
|
|
|
--bootstrap-kubeconfig=/etc/kubernetes/kubeconfig \
|
2022-08-27 18:17:33 +02:00
|
|
|
--config=/etc/kubernetes/kubelet.yaml \
|
2021-12-15 04:37:43 +01:00
|
|
|
--container-runtime-endpoint=unix:///run/containerd/containerd.sock \
|
2020-04-26 01:50:51 +02:00
|
|
|
--kubeconfig=/var/lib/kubelet/kubeconfig \
|
2019-09-18 06:24:30 +02:00
|
|
|
--node-labels=node.kubernetes.io/controller="true" \
|
2021-03-18 15:30:29 +01:00
|
|
|
--provider-id=aws:///$${COREOS_EC2_AVAILABILITY_ZONE}/$${COREOS_EC2_INSTANCE_ID} \
|
2022-08-27 18:17:33 +02:00
|
|
|
--register-with-taints=node-role.kubernetes.io/controller=:NoSchedule
|
Change Flatcar kubelet.service container from rkt to docker
* Use docker to run the `kubelet.service` container
* Update Kubelet mounts to match Fedora CoreOS
* Remove unused `/etc/ssl/certs` mount (see
https://github.com/poseidon/typhoon/pull/810)
* Remove unused `/usr/share/ca-certificates` mount
* Remove `/etc/resolv.conf` mount, Docker default is ok
* Change `delete-node.service` to use docker instead of rkt
and inline ExecStart, as was done on Fedora CoreOS
* Fix permission denied on shutdown `delete-node`, caused
by the kubeconfig mount changing with the introduction of
node TLS bootstrap
Background
* podmand, rkt, and runc daemonless container process runners
provide advantages over the docker daemon for system containers.
Docker requires workarounds for use in systemd units where the
ExecStart must tail logs so systemd can monitor the daemonized
container. https://github.com/moby/moby/issues/6791
* Why switch then? On Flatcar Linux, podman isn't shipped. rkt
works, but isn't developing while container standards continue
to move forward. Typhoon has used runc for the Kubelet runner
before in Fedora Atomic, but its more low-level. So we're left
with Docker, which is less than ideal, but shipped in Flatcar
* Flatcar Linux appears to be shifting system components to
use docker, which does provide some limited guards against
breakages (e.g. Flatcar cannot enable docker live restore)
2020-10-19 07:51:25 +02:00
|
|
|
ExecStart=docker logs -f kubelet
|
|
|
|
ExecStop=docker stop kubelet
|
|
|
|
ExecStopPost=docker rm kubelet
|
2017-09-18 06:40:33 +02:00
|
|
|
Restart=always
|
|
|
|
RestartSec=10
|
|
|
|
[Install]
|
|
|
|
WantedBy=multi-user.target
|
2019-09-05 07:20:36 +02:00
|
|
|
- name: bootstrap.service
|
2017-09-18 06:40:33 +02:00
|
|
|
contents: |
|
|
|
|
[Unit]
|
2019-09-05 07:20:36 +02:00
|
|
|
Description=Kubernetes control plane
|
2020-11-12 07:19:42 +01:00
|
|
|
Wants=docker.service
|
|
|
|
After=docker.service
|
2019-09-05 07:20:36 +02:00
|
|
|
ConditionPathExists=!/opt/bootstrap/bootstrap.done
|
2017-09-18 06:40:33 +02:00
|
|
|
[Service]
|
|
|
|
Type=oneshot
|
|
|
|
RemainAfterExit=true
|
2019-09-05 07:20:36 +02:00
|
|
|
WorkingDirectory=/opt/bootstrap
|
2022-08-24 02:32:13 +02:00
|
|
|
Environment=KUBELET_IMAGE=quay.io/poseidon/kubelet:v1.25.0
|
2020-11-12 07:19:42 +01:00
|
|
|
ExecStart=/usr/bin/docker run \
|
2020-12-03 08:22:34 +01:00
|
|
|
-v /etc/kubernetes/pki:/etc/kubernetes/pki:ro \
|
2020-11-12 07:19:42 +01:00
|
|
|
-v /opt/bootstrap/assets:/assets:ro \
|
|
|
|
-v /opt/bootstrap/apply:/apply:ro \
|
|
|
|
--entrypoint=/apply \
|
|
|
|
$${KUBELET_IMAGE}
|
2019-09-05 07:20:36 +02:00
|
|
|
ExecStartPost=/bin/touch /opt/bootstrap/bootstrap.done
|
2017-09-18 06:40:33 +02:00
|
|
|
[Install]
|
|
|
|
WantedBy=multi-user.target
|
|
|
|
storage:
|
2020-07-25 22:50:08 +02:00
|
|
|
directories:
|
|
|
|
- path: /var/lib/etcd
|
|
|
|
mode: 0700
|
|
|
|
overwrite: true
|
2017-09-18 06:40:33 +02:00
|
|
|
files:
|
|
|
|
- path: /etc/kubernetes/kubeconfig
|
|
|
|
mode: 0644
|
|
|
|
contents:
|
|
|
|
inline: |
|
2018-02-26 21:17:42 +01:00
|
|
|
${kubeconfig}
|
2022-08-27 18:17:33 +02:00
|
|
|
- path: /etc/kubernetes/kubelet.yaml
|
|
|
|
mode: 0644
|
|
|
|
contents:
|
|
|
|
inline: |
|
|
|
|
apiVersion: kubelet.config.k8s.io/v1beta1
|
|
|
|
kind: KubeletConfiguration
|
|
|
|
authentication:
|
|
|
|
anonymous:
|
|
|
|
enabled: false
|
|
|
|
webhook:
|
|
|
|
enabled: true
|
|
|
|
x509:
|
|
|
|
clientCAFile: /etc/kubernetes/ca.crt
|
|
|
|
authorization:
|
|
|
|
mode: Webhook
|
|
|
|
cgroupDriver: systemd
|
|
|
|
clusterDNS:
|
|
|
|
- ${cluster_dns_service_ip}
|
|
|
|
clusterDomain: ${cluster_domain_suffix}
|
|
|
|
healthzPort: 0
|
2022-08-27 02:46:51 +02:00
|
|
|
featureGates:
|
|
|
|
LocalStorageCapacityIsolationFSQuotaMonitoring: false
|
2022-08-27 18:17:33 +02:00
|
|
|
rotateCertificates: true
|
2022-08-28 18:49:28 +02:00
|
|
|
shutdownGracePeriod: 45s
|
|
|
|
shutdownGracePeriodCriticalPods: 30s
|
2022-08-27 18:17:33 +02:00
|
|
|
staticPodPath: /etc/kubernetes/manifests
|
|
|
|
readOnlyPort: 0
|
|
|
|
resolvConf: /run/systemd/resolve/resolv.conf
|
|
|
|
volumePluginDir: /var/lib/kubelet/volumeplugins
|
Introduce cluster creation without local writes to asset_dir
* Allow generated assets (TLS materials, manifests) to be
securely distributed to controller node(s) via file provisioner
(i.e. ssh-agent) as an assets bundle file, rather than relying
on assets being locally rendered to disk in an asset_dir and
then securely distributed
* Change `asset_dir` from required to optional. Left unset,
asset_dir defaults to "" and no assets will be written to
files on the machine that runs terraform apply
* Enhancement: Managed cluster assets are kept only in Terraform
state, which supports different backends (GCS, S3, etcd, etc) and
optional encryption. terraform apply accesses state, runs in-memory,
and distributes sensitive materials to controllers without making
use of local disk (simplifies use in CI systems)
* Enhancement: Improve asset unpack and layout process to position
etcd certificates and control plane certificates more cleanly,
without unneeded secret materials
Details:
* Terraform file provisioner support for distributing directories of
contents (with unknown structure) has been limited to reading from a
local directory, meaning local writes to asset_dir were required.
https://github.com/poseidon/typhoon/issues/585 discusses the problem
and newer or upcoming Terraform features that might help.
* Observation: Terraform provisioner support for single files works
well, but iteration isn't viable. We're also constrained to Terraform
language features on the apply side (no extra plugins, no shelling out)
and CoreOS / Fedora tools on the receive side.
* Take a map representation of the contents that would have been splayed
out in asset_dir and pack/encode them into a single file format devised
for easy unpacking. Use an awk one-liner on the receive side to unpack.
In pratice, this has worked well and its rather nice that a single
assets file is transferred by file provisioner (all or none)
Rel: https://github.com/poseidon/terraform-render-bootstrap/pull/162
2019-12-05 07:10:55 +01:00
|
|
|
- path: /opt/bootstrap/layout
|
|
|
|
mode: 0544
|
|
|
|
contents:
|
|
|
|
inline: |
|
|
|
|
#!/bin/bash -e
|
|
|
|
mkdir -p -- auth tls/etcd tls/k8s static-manifests manifests/coredns manifests-networking
|
|
|
|
awk '/#####/ {filename=$2; next} {print > filename}' assets
|
|
|
|
mkdir -p /etc/ssl/etcd/etcd
|
2020-12-03 08:22:34 +01:00
|
|
|
mkdir -p /etc/kubernetes/pki
|
Introduce cluster creation without local writes to asset_dir
* Allow generated assets (TLS materials, manifests) to be
securely distributed to controller node(s) via file provisioner
(i.e. ssh-agent) as an assets bundle file, rather than relying
on assets being locally rendered to disk in an asset_dir and
then securely distributed
* Change `asset_dir` from required to optional. Left unset,
asset_dir defaults to "" and no assets will be written to
files on the machine that runs terraform apply
* Enhancement: Managed cluster assets are kept only in Terraform
state, which supports different backends (GCS, S3, etcd, etc) and
optional encryption. terraform apply accesses state, runs in-memory,
and distributes sensitive materials to controllers without making
use of local disk (simplifies use in CI systems)
* Enhancement: Improve asset unpack and layout process to position
etcd certificates and control plane certificates more cleanly,
without unneeded secret materials
Details:
* Terraform file provisioner support for distributing directories of
contents (with unknown structure) has been limited to reading from a
local directory, meaning local writes to asset_dir were required.
https://github.com/poseidon/typhoon/issues/585 discusses the problem
and newer or upcoming Terraform features that might help.
* Observation: Terraform provisioner support for single files works
well, but iteration isn't viable. We're also constrained to Terraform
language features on the apply side (no extra plugins, no shelling out)
and CoreOS / Fedora tools on the receive side.
* Take a map representation of the contents that would have been splayed
out in asset_dir and pack/encode them into a single file format devised
for easy unpacking. Use an awk one-liner on the receive side to unpack.
In pratice, this has worked well and its rather nice that a single
assets file is transferred by file provisioner (all or none)
Rel: https://github.com/poseidon/terraform-render-bootstrap/pull/162
2019-12-05 07:10:55 +01:00
|
|
|
mv tls/etcd/{peer*,server*} /etc/ssl/etcd/etcd/
|
2020-12-03 08:22:34 +01:00
|
|
|
mv tls/etcd/etcd-client* /etc/kubernetes/pki/
|
Introduce cluster creation without local writes to asset_dir
* Allow generated assets (TLS materials, manifests) to be
securely distributed to controller node(s) via file provisioner
(i.e. ssh-agent) as an assets bundle file, rather than relying
on assets being locally rendered to disk in an asset_dir and
then securely distributed
* Change `asset_dir` from required to optional. Left unset,
asset_dir defaults to "" and no assets will be written to
files on the machine that runs terraform apply
* Enhancement: Managed cluster assets are kept only in Terraform
state, which supports different backends (GCS, S3, etcd, etc) and
optional encryption. terraform apply accesses state, runs in-memory,
and distributes sensitive materials to controllers without making
use of local disk (simplifies use in CI systems)
* Enhancement: Improve asset unpack and layout process to position
etcd certificates and control plane certificates more cleanly,
without unneeded secret materials
Details:
* Terraform file provisioner support for distributing directories of
contents (with unknown structure) has been limited to reading from a
local directory, meaning local writes to asset_dir were required.
https://github.com/poseidon/typhoon/issues/585 discusses the problem
and newer or upcoming Terraform features that might help.
* Observation: Terraform provisioner support for single files works
well, but iteration isn't viable. We're also constrained to Terraform
language features on the apply side (no extra plugins, no shelling out)
and CoreOS / Fedora tools on the receive side.
* Take a map representation of the contents that would have been splayed
out in asset_dir and pack/encode them into a single file format devised
for easy unpacking. Use an awk one-liner on the receive side to unpack.
In pratice, this has worked well and its rather nice that a single
assets file is transferred by file provisioner (all or none)
Rel: https://github.com/poseidon/terraform-render-bootstrap/pull/162
2019-12-05 07:10:55 +01:00
|
|
|
chown -R etcd:etcd /etc/ssl/etcd
|
|
|
|
chmod -R 500 /etc/ssl/etcd
|
2020-07-25 22:50:08 +02:00
|
|
|
chmod -R 700 /var/lib/etcd
|
2020-12-03 08:22:34 +01:00
|
|
|
mv auth/* /etc/kubernetes/pki/
|
|
|
|
mv tls/k8s/* /etc/kubernetes/pki/
|
2020-04-23 05:27:08 +02:00
|
|
|
mkdir -p /etc/kubernetes/manifests
|
|
|
|
mv static-manifests/* /etc/kubernetes/manifests/
|
|
|
|
mkdir -p /opt/bootstrap/assets
|
|
|
|
mv manifests /opt/bootstrap/assets/manifests
|
|
|
|
mv manifests-networking/* /opt/bootstrap/assets/manifests/
|
2020-04-01 03:21:59 +02:00
|
|
|
rm -rf assets auth static-manifests tls manifests-networking
|
2019-09-05 07:20:36 +02:00
|
|
|
- path: /opt/bootstrap/apply
|
|
|
|
mode: 0544
|
2017-09-18 06:40:33 +02:00
|
|
|
contents:
|
|
|
|
inline: |
|
2019-09-05 07:20:36 +02:00
|
|
|
#!/bin/bash -e
|
2020-12-03 08:22:34 +01:00
|
|
|
export KUBECONFIG=/etc/kubernetes/pki/admin.conf
|
2019-09-05 07:20:36 +02:00
|
|
|
until kubectl version; do
|
|
|
|
echo "Waiting for static pod control plane"
|
|
|
|
sleep 5
|
|
|
|
done
|
|
|
|
until kubectl apply -f /assets/manifests -R; do
|
|
|
|
echo "Retry applying manifests"
|
|
|
|
sleep 5
|
|
|
|
done
|
2022-08-28 18:49:28 +02:00
|
|
|
- path: /etc/systemd/logind.conf.d/inhibitors.conf
|
|
|
|
contents:
|
|
|
|
inline: |
|
|
|
|
[Login]
|
|
|
|
InhibitDelayMaxSec=45s
|
2019-09-05 07:20:36 +02:00
|
|
|
- path: /etc/sysctl.d/max-user-watches.conf
|
2020-06-10 07:38:32 +02:00
|
|
|
mode: 0644
|
2017-09-18 06:40:33 +02:00
|
|
|
contents:
|
|
|
|
inline: |
|
2019-09-05 07:20:36 +02:00
|
|
|
fs.inotify.max_user_watches=16184
|
2020-11-04 01:37:09 +01:00
|
|
|
- path: /etc/etcd/etcd.env
|
|
|
|
mode: 0644
|
|
|
|
contents:
|
|
|
|
inline: |
|
|
|
|
ETCD_NAME=${etcd_name}
|
|
|
|
ETCD_DATA_DIR=/var/lib/etcd
|
|
|
|
ETCD_ADVERTISE_CLIENT_URLS=https://${etcd_domain}:2379
|
|
|
|
ETCD_INITIAL_ADVERTISE_PEER_URLS=https://${etcd_domain}:2380
|
|
|
|
ETCD_LISTEN_CLIENT_URLS=https://0.0.0.0:2379
|
|
|
|
ETCD_LISTEN_PEER_URLS=https://0.0.0.0:2380
|
|
|
|
ETCD_LISTEN_METRICS_URLS=http://0.0.0.0:2381
|
|
|
|
ETCD_INITIAL_CLUSTER=${etcd_initial_cluster}
|
|
|
|
ETCD_STRICT_RECONFIG_CHECK=true
|
|
|
|
ETCD_TRUSTED_CA_FILE=/etc/ssl/certs/etcd/server-ca.crt
|
|
|
|
ETCD_CERT_FILE=/etc/ssl/certs/etcd/server.crt
|
|
|
|
ETCD_KEY_FILE=/etc/ssl/certs/etcd/server.key
|
|
|
|
ETCD_CLIENT_CERT_AUTH=true
|
|
|
|
ETCD_PEER_TRUSTED_CA_FILE=/etc/ssl/certs/etcd/peer-ca.crt
|
|
|
|
ETCD_PEER_CERT_FILE=/etc/ssl/certs/etcd/peer.crt
|
|
|
|
ETCD_PEER_KEY_FILE=/etc/ssl/certs/etcd/peer.key
|
|
|
|
ETCD_PEER_CLIENT_CERT_AUTH=true
|
2017-09-18 06:40:33 +02:00
|
|
|
passwd:
|
|
|
|
users:
|
|
|
|
- name: core
|
|
|
|
ssh_authorized_keys:
|
|
|
|
- "${ssh_authorized_key}"
|