mirror of
https://github.com/puppetmaster/typhoon.git
synced 2025-07-24 23:01:35 +02:00
Rename container-linux modules to flatcar-linux
* CoreOS Container Linux was deprecated in v1.18.3 * Continue transitioning docs and modules from supporting both CoreOS and Flatcar "variants" of Container Linux to now supporting Flatcar Linux and equivalents Action Required: Update the Flatcar Linux modules `source` to replace `s/container-linux/flatcar-linux`. See docs for examples
This commit is contained in:
23
bare-metal/flatcar-linux/kubernetes/LICENSE
Normal file
23
bare-metal/flatcar-linux/kubernetes/LICENSE
Normal file
@ -0,0 +1,23 @@
|
||||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2017 Typhoon Authors
|
||||
Copyright (c) 2017 Dalton Hubble
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
||||
|
23
bare-metal/flatcar-linux/kubernetes/README.md
Normal file
23
bare-metal/flatcar-linux/kubernetes/README.md
Normal file
@ -0,0 +1,23 @@
|
||||
# Typhoon <img align="right" src="https://storage.googleapis.com/poseidon/typhoon-logo.png">
|
||||
|
||||
Typhoon is a minimal and free Kubernetes distribution.
|
||||
|
||||
* Minimal, stable base Kubernetes distribution
|
||||
* Declarative infrastructure and configuration
|
||||
* Free (freedom and cost) and privacy-respecting
|
||||
* Practical for labs, datacenters, and clouds
|
||||
|
||||
Typhoon distributes upstream Kubernetes, architectural conventions, and cluster addons, much like a GNU/Linux distribution provides the Linux kernel and userspace components.
|
||||
|
||||
## Features <a href="https://www.cncf.io/certification/software-conformance/"><img align="right" src="https://storage.googleapis.com/poseidon/certified-kubernetes.png"></a>
|
||||
|
||||
* Kubernetes v1.19.3 (upstream)
|
||||
* Single or multi-master, [Calico](https://www.projectcalico.org/) or [Cilium](https://github.com/cilium/cilium) or [flannel](https://github.com/coreos/flannel) networking
|
||||
* On-cluster etcd with TLS, [RBAC](https://kubernetes.io/docs/admin/authorization/rbac/)-enabled, [network policy](https://kubernetes.io/docs/concepts/services-networking/network-policies/)
|
||||
* Advanced features like [snippets](https://typhoon.psdn.io/advanced/customization/#hosts) customization
|
||||
* Ready for Ingress, Prometheus, Grafana, and other optional [addons](https://typhoon.psdn.io/addons/overview/)
|
||||
|
||||
## Docs
|
||||
|
||||
Please see the [official docs](https://typhoon.psdn.io) and the bare-metal [tutorial](https://typhoon.psdn.io/cl/bare-metal/).
|
||||
|
17
bare-metal/flatcar-linux/kubernetes/bootstrap.tf
Normal file
17
bare-metal/flatcar-linux/kubernetes/bootstrap.tf
Normal file
@ -0,0 +1,17 @@
|
||||
# Kubernetes assets (kubeconfig, manifests)
|
||||
module "bootstrap" {
|
||||
source = "git::https://github.com/poseidon/terraform-render-bootstrap.git?ref=9037d7311b949439b217cd9c657d4500eab3e16b"
|
||||
|
||||
cluster_name = var.cluster_name
|
||||
api_servers = [var.k8s_domain_name]
|
||||
etcd_servers = var.controllers.*.domain
|
||||
networking = var.networking
|
||||
network_mtu = var.network_mtu
|
||||
network_ip_autodetection_method = var.network_ip_autodetection_method
|
||||
pod_cidr = var.pod_cidr
|
||||
service_cidr = var.service_cidr
|
||||
cluster_domain_suffix = var.cluster_domain_suffix
|
||||
enable_reporting = var.enable_reporting
|
||||
enable_aggregation = var.enable_aggregation
|
||||
}
|
||||
|
208
bare-metal/flatcar-linux/kubernetes/cl/controller.yaml
Normal file
208
bare-metal/flatcar-linux/kubernetes/cl/controller.yaml
Normal file
@ -0,0 +1,208 @@
|
||||
---
|
||||
systemd:
|
||||
units:
|
||||
- name: etcd-member.service
|
||||
enabled: true
|
||||
dropins:
|
||||
- name: 40-etcd-cluster.conf
|
||||
contents: |
|
||||
[Service]
|
||||
Environment="ETCD_IMAGE_TAG=v3.4.12"
|
||||
Environment="ETCD_IMAGE_URL=docker://quay.io/coreos/etcd"
|
||||
Environment="RKT_RUN_ARGS=--insecure-options=image"
|
||||
Environment="ETCD_NAME=${etcd_name}"
|
||||
Environment="ETCD_ADVERTISE_CLIENT_URLS=https://${domain_name}:2379"
|
||||
Environment="ETCD_INITIAL_ADVERTISE_PEER_URLS=https://${domain_name}:2380"
|
||||
Environment="ETCD_LISTEN_CLIENT_URLS=https://0.0.0.0:2379"
|
||||
Environment="ETCD_LISTEN_PEER_URLS=https://0.0.0.0:2380"
|
||||
Environment="ETCD_LISTEN_METRICS_URLS=http://0.0.0.0:2381"
|
||||
Environment="ETCD_INITIAL_CLUSTER=${etcd_initial_cluster}"
|
||||
Environment="ETCD_STRICT_RECONFIG_CHECK=true"
|
||||
Environment="ETCD_SSL_DIR=/etc/ssl/etcd"
|
||||
Environment="ETCD_TRUSTED_CA_FILE=/etc/ssl/certs/etcd/server-ca.crt"
|
||||
Environment="ETCD_CERT_FILE=/etc/ssl/certs/etcd/server.crt"
|
||||
Environment="ETCD_KEY_FILE=/etc/ssl/certs/etcd/server.key"
|
||||
Environment="ETCD_CLIENT_CERT_AUTH=true"
|
||||
Environment="ETCD_PEER_TRUSTED_CA_FILE=/etc/ssl/certs/etcd/peer-ca.crt"
|
||||
Environment="ETCD_PEER_CERT_FILE=/etc/ssl/certs/etcd/peer.crt"
|
||||
Environment="ETCD_PEER_KEY_FILE=/etc/ssl/certs/etcd/peer.key"
|
||||
Environment="ETCD_PEER_CLIENT_CERT_AUTH=true"
|
||||
- name: docker.service
|
||||
enabled: true
|
||||
- name: locksmithd.service
|
||||
mask: true
|
||||
- name: kubelet.path
|
||||
enabled: true
|
||||
contents: |
|
||||
[Unit]
|
||||
Description=Watch for kubeconfig
|
||||
[Path]
|
||||
PathExists=/etc/kubernetes/kubeconfig
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
- name: wait-for-dns.service
|
||||
enabled: true
|
||||
contents: |
|
||||
[Unit]
|
||||
Description=Wait for DNS entries
|
||||
Wants=systemd-resolved.service
|
||||
Before=kubelet.service
|
||||
[Service]
|
||||
Type=oneshot
|
||||
RemainAfterExit=true
|
||||
ExecStart=/bin/sh -c 'while ! /usr/bin/grep '^[^#[:space:]]' /etc/resolv.conf > /dev/null; do sleep 1; done'
|
||||
[Install]
|
||||
RequiredBy=kubelet.service
|
||||
RequiredBy=etcd-member.service
|
||||
- name: kubelet.service
|
||||
contents: |
|
||||
[Unit]
|
||||
Description=Kubelet
|
||||
Requires=docker.service
|
||||
After=docker.service
|
||||
Wants=rpc-statd.service
|
||||
[Service]
|
||||
Environment=KUBELET_IMAGE=quay.io/poseidon/kubelet:v1.19.3
|
||||
Environment=KUBELET_CGROUP_DRIVER=${cgroup_driver}
|
||||
ExecStartPre=/bin/mkdir -p /etc/kubernetes/cni/net.d
|
||||
ExecStartPre=/bin/mkdir -p /etc/kubernetes/manifests
|
||||
ExecStartPre=/bin/mkdir -p /opt/cni/bin
|
||||
ExecStartPre=/bin/mkdir -p /var/lib/calico
|
||||
ExecStartPre=/bin/mkdir -p /var/lib/kubelet/volumeplugins
|
||||
ExecStartPre=/usr/bin/bash -c "grep 'certificate-authority-data' /etc/kubernetes/kubeconfig | awk '{print $2}' | base64 -d > /etc/kubernetes/ca.crt"
|
||||
ExecStartPre=/usr/bin/docker run -d \
|
||||
--name kubelet \
|
||||
--privileged \
|
||||
--pid host \
|
||||
--network host \
|
||||
-v /etc/kubernetes:/etc/kubernetes:ro \
|
||||
-v /etc/machine-id:/etc/machine-id:ro \
|
||||
-v /usr/lib/os-release:/etc/os-release:ro \
|
||||
-v /lib/modules:/lib/modules:ro \
|
||||
-v /run:/run \
|
||||
-v /sys/fs/cgroup:/sys/fs/cgroup:ro \
|
||||
-v /sys/fs/cgroup/systemd:/sys/fs/cgroup/systemd \
|
||||
-v /var/lib/calico:/var/lib/calico:ro \
|
||||
-v /var/lib/docker:/var/lib/docker \
|
||||
-v /var/lib/kubelet:/var/lib/kubelet:rshared \
|
||||
-v /var/log:/var/log \
|
||||
-v /opt/cni/bin:/opt/cni/bin \
|
||||
-v /etc/iscsi:/etc/iscsi \
|
||||
-v /usr/sbin/iscsiadm:/usr/sbin/iscsiadm \
|
||||
$${KUBELET_IMAGE} \
|
||||
--anonymous-auth=false \
|
||||
--authentication-token-webhook \
|
||||
--authorization-mode=Webhook \
|
||||
--bootstrap-kubeconfig=/etc/kubernetes/kubeconfig \
|
||||
--cgroup-driver=$${KUBELET_CGROUP_DRIVER} \
|
||||
--client-ca-file=/etc/kubernetes/ca.crt \
|
||||
--cluster_dns=${cluster_dns_service_ip} \
|
||||
--cluster_domain=${cluster_domain_suffix} \
|
||||
--cni-conf-dir=/etc/kubernetes/cni/net.d \
|
||||
--healthz-port=0 \
|
||||
--hostname-override=${domain_name} \
|
||||
--kubeconfig=/var/lib/kubelet/kubeconfig \
|
||||
--network-plugin=cni \
|
||||
--node-labels=node.kubernetes.io/controller="true" \
|
||||
--pod-manifest-path=/etc/kubernetes/manifests \
|
||||
--read-only-port=0 \
|
||||
--register-with-taints=node-role.kubernetes.io/controller=:NoSchedule \
|
||||
--rotate-certificates \
|
||||
--volume-plugin-dir=/var/lib/kubelet/volumeplugins
|
||||
ExecStart=docker logs -f kubelet
|
||||
ExecStop=docker stop kubelet
|
||||
ExecStopPost=docker rm kubelet
|
||||
Restart=always
|
||||
RestartSec=10
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
- name: bootstrap.service
|
||||
contents: |
|
||||
[Unit]
|
||||
Description=Kubernetes control plane
|
||||
ConditionPathExists=!/opt/bootstrap/bootstrap.done
|
||||
[Service]
|
||||
Type=oneshot
|
||||
RemainAfterExit=true
|
||||
WorkingDirectory=/opt/bootstrap
|
||||
ExecStart=/usr/bin/rkt run \
|
||||
--trust-keys-from-https \
|
||||
--volume config,kind=host,source=/etc/kubernetes/bootstrap-secrets \
|
||||
--mount volume=config,target=/etc/kubernetes/secrets \
|
||||
--volume assets,kind=host,source=/opt/bootstrap/assets \
|
||||
--mount volume=assets,target=/assets \
|
||||
--volume script,kind=host,source=/opt/bootstrap/apply \
|
||||
--mount volume=script,target=/apply \
|
||||
--insecure-options=image \
|
||||
docker://quay.io/poseidon/kubelet:v1.19.3 \
|
||||
--net=host \
|
||||
--dns=host \
|
||||
--exec=/apply
|
||||
ExecStartPost=/bin/touch /opt/bootstrap/bootstrap.done
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
storage:
|
||||
directories:
|
||||
- path: /var/lib/etcd
|
||||
filesystem: root
|
||||
mode: 0700
|
||||
overwrite: true
|
||||
- path: /etc/kubernetes
|
||||
filesystem: root
|
||||
mode: 0755
|
||||
files:
|
||||
- path: /etc/hostname
|
||||
filesystem: root
|
||||
mode: 0644
|
||||
contents:
|
||||
inline:
|
||||
${domain_name}
|
||||
- path: /opt/bootstrap/layout
|
||||
filesystem: root
|
||||
mode: 0544
|
||||
contents:
|
||||
inline: |
|
||||
#!/bin/bash -e
|
||||
mkdir -p -- auth tls/etcd tls/k8s static-manifests manifests/coredns manifests-networking
|
||||
awk '/#####/ {filename=$2; next} {print > filename}' assets
|
||||
mkdir -p /etc/ssl/etcd/etcd
|
||||
mkdir -p /etc/kubernetes/bootstrap-secrets
|
||||
mv tls/etcd/{peer*,server*} /etc/ssl/etcd/etcd/
|
||||
mv tls/etcd/etcd-client* /etc/kubernetes/bootstrap-secrets/
|
||||
chown -R etcd:etcd /etc/ssl/etcd
|
||||
chmod -R 500 /etc/ssl/etcd
|
||||
chmod -R 700 /var/lib/etcd
|
||||
mv auth/kubeconfig /etc/kubernetes/bootstrap-secrets/
|
||||
mv tls/k8s/* /etc/kubernetes/bootstrap-secrets/
|
||||
mkdir -p /etc/kubernetes/manifests
|
||||
mv static-manifests/* /etc/kubernetes/manifests/
|
||||
mkdir -p /opt/bootstrap/assets
|
||||
mv manifests /opt/bootstrap/assets/manifests
|
||||
mv manifests-networking/* /opt/bootstrap/assets/manifests/
|
||||
rm -rf assets auth static-manifests tls manifests-networking
|
||||
- path: /opt/bootstrap/apply
|
||||
filesystem: root
|
||||
mode: 0544
|
||||
contents:
|
||||
inline: |
|
||||
#!/bin/bash -e
|
||||
export KUBECONFIG=/etc/kubernetes/secrets/kubeconfig
|
||||
until kubectl version; do
|
||||
echo "Waiting for static pod control plane"
|
||||
sleep 5
|
||||
done
|
||||
until kubectl apply -f /assets/manifests -R; do
|
||||
echo "Retry applying manifests"
|
||||
sleep 5
|
||||
done
|
||||
- path: /etc/sysctl.d/max-user-watches.conf
|
||||
filesystem: root
|
||||
mode: 0644
|
||||
contents:
|
||||
inline: |
|
||||
fs.inotify.max_user_watches=16184
|
||||
passwd:
|
||||
users:
|
||||
- name: core
|
||||
ssh_authorized_keys:
|
||||
- ${ssh_authorized_key}
|
46
bare-metal/flatcar-linux/kubernetes/cl/install.yaml
Normal file
46
bare-metal/flatcar-linux/kubernetes/cl/install.yaml
Normal file
@ -0,0 +1,46 @@
|
||||
---
|
||||
systemd:
|
||||
units:
|
||||
- name: installer.service
|
||||
enabled: true
|
||||
contents: |
|
||||
[Unit]
|
||||
Requires=network-online.target
|
||||
After=network-online.target
|
||||
[Service]
|
||||
Type=simple
|
||||
ExecStart=/opt/installer
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
# Avoid using the standard SSH port so terraform apply cannot SSH until
|
||||
# post-install. But admins may SSH to debug disk install problems.
|
||||
# After install, sshd will use port 22 and users/terraform can connect.
|
||||
- name: sshd.socket
|
||||
dropins:
|
||||
- name: 10-sshd-port.conf
|
||||
contents: |
|
||||
[Socket]
|
||||
ListenStream=
|
||||
ListenStream=2222
|
||||
storage:
|
||||
files:
|
||||
- path: /opt/installer
|
||||
filesystem: root
|
||||
mode: 0500
|
||||
contents:
|
||||
inline: |
|
||||
#!/bin/bash -ex
|
||||
curl --retry 10 "${ignition_endpoint}?{{.request.raw_query}}&os=installed" -o ignition.json
|
||||
${os_flavor}-install \
|
||||
-d ${install_disk} \
|
||||
-C ${os_channel} \
|
||||
-V ${os_version} \
|
||||
${baseurl_flag} \
|
||||
-i ignition.json
|
||||
udevadm settle
|
||||
systemctl reboot
|
||||
passwd:
|
||||
users:
|
||||
- name: core
|
||||
ssh_authorized_keys:
|
||||
- "${ssh_authorized_key}"
|
124
bare-metal/flatcar-linux/kubernetes/cl/worker.yaml
Normal file
124
bare-metal/flatcar-linux/kubernetes/cl/worker.yaml
Normal file
@ -0,0 +1,124 @@
|
||||
---
|
||||
systemd:
|
||||
units:
|
||||
- name: docker.service
|
||||
enabled: true
|
||||
- name: locksmithd.service
|
||||
mask: true
|
||||
- name: kubelet.path
|
||||
enabled: true
|
||||
contents: |
|
||||
[Unit]
|
||||
Description=Watch for kubeconfig
|
||||
[Path]
|
||||
PathExists=/etc/kubernetes/kubeconfig
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
- name: wait-for-dns.service
|
||||
enabled: true
|
||||
contents: |
|
||||
[Unit]
|
||||
Description=Wait for DNS entries
|
||||
Wants=systemd-resolved.service
|
||||
Before=kubelet.service
|
||||
[Service]
|
||||
Type=oneshot
|
||||
RemainAfterExit=true
|
||||
ExecStart=/bin/sh -c 'while ! /usr/bin/grep '^[^#[:space:]]' /etc/resolv.conf > /dev/null; do sleep 1; done'
|
||||
[Install]
|
||||
RequiredBy=kubelet.service
|
||||
- name: kubelet.service
|
||||
contents: |
|
||||
[Unit]
|
||||
Description=Kubelet
|
||||
Requires=docker.service
|
||||
After=docker.service
|
||||
Wants=rpc-statd.service
|
||||
[Service]
|
||||
Environment=KUBELET_IMAGE=quay.io/poseidon/kubelet:v1.19.3
|
||||
Environment=KUBELET_CGROUP_DRIVER=${cgroup_driver}
|
||||
ExecStartPre=/bin/mkdir -p /etc/kubernetes/cni/net.d
|
||||
ExecStartPre=/bin/mkdir -p /etc/kubernetes/manifests
|
||||
ExecStartPre=/bin/mkdir -p /opt/cni/bin
|
||||
ExecStartPre=/bin/mkdir -p /var/lib/calico
|
||||
ExecStartPre=/bin/mkdir -p /var/lib/kubelet/volumeplugins
|
||||
ExecStartPre=/usr/bin/bash -c "grep 'certificate-authority-data' /etc/kubernetes/kubeconfig | awk '{print $2}' | base64 -d > /etc/kubernetes/ca.crt"
|
||||
# Podman, rkt, or runc run container processes, whereas docker run
|
||||
# is a client to a daemon and requires workarounds to use within a
|
||||
# systemd unit. https://github.com/moby/moby/issues/6791
|
||||
ExecStartPre=/usr/bin/docker run -d \
|
||||
--name kubelet \
|
||||
--privileged \
|
||||
--pid host \
|
||||
--network host \
|
||||
-v /etc/kubernetes:/etc/kubernetes:ro \
|
||||
-v /etc/machine-id:/etc/machine-id:ro \
|
||||
-v /usr/lib/os-release:/etc/os-release:ro \
|
||||
-v /lib/modules:/lib/modules:ro \
|
||||
-v /run:/run \
|
||||
-v /sys/fs/cgroup:/sys/fs/cgroup:ro \
|
||||
-v /sys/fs/cgroup/systemd:/sys/fs/cgroup/systemd \
|
||||
-v /var/lib/calico:/var/lib/calico:ro \
|
||||
-v /var/lib/docker:/var/lib/docker \
|
||||
-v /var/lib/kubelet:/var/lib/kubelet:rshared \
|
||||
-v /var/log:/var/log \
|
||||
-v /opt/cni/bin:/opt/cni/bin \
|
||||
-v /etc/iscsi:/etc/iscsi \
|
||||
-v /usr/sbin/iscsiadm:/usr/sbin/iscsiadm \
|
||||
$${KUBELET_IMAGE} \
|
||||
--anonymous-auth=false \
|
||||
--authentication-token-webhook \
|
||||
--authorization-mode=Webhook \
|
||||
--bootstrap-kubeconfig=/etc/kubernetes/kubeconfig \
|
||||
--cgroup-driver=$${KUBELET_CGROUP_DRIVER} \
|
||||
--client-ca-file=/etc/kubernetes/ca.crt \
|
||||
--cluster_dns=${cluster_dns_service_ip} \
|
||||
--cluster_domain=${cluster_domain_suffix} \
|
||||
--cni-conf-dir=/etc/kubernetes/cni/net.d \
|
||||
--healthz-port=0 \
|
||||
--hostname-override=${domain_name} \
|
||||
--kubeconfig=/var/lib/kubelet/kubeconfig \
|
||||
--network-plugin=cni \
|
||||
--node-labels=node.kubernetes.io/node \
|
||||
%{~ for label in compact(split(",", node_labels)) ~}
|
||||
--node-labels=${label} \
|
||||
%{~ endfor ~}
|
||||
%{~ for taint in compact(split(",", node_taints)) ~}
|
||||
--register-with-taints=${taint} \
|
||||
%{~ endfor ~}
|
||||
--pod-manifest-path=/etc/kubernetes/manifests \
|
||||
--read-only-port=0 \
|
||||
--rotate-certificates \
|
||||
--volume-plugin-dir=/var/lib/kubelet/volumeplugins
|
||||
ExecStart=docker logs -f kubelet
|
||||
ExecStop=docker stop kubelet
|
||||
ExecStopPost=docker rm kubelet
|
||||
Restart=always
|
||||
RestartSec=5
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
|
||||
storage:
|
||||
directories:
|
||||
- path: /etc/kubernetes
|
||||
filesystem: root
|
||||
mode: 0755
|
||||
files:
|
||||
- path: /etc/hostname
|
||||
filesystem: root
|
||||
mode: 0644
|
||||
contents:
|
||||
inline:
|
||||
${domain_name}
|
||||
- path: /etc/sysctl.d/max-user-watches.conf
|
||||
filesystem: root
|
||||
mode: 0644
|
||||
contents:
|
||||
inline: |
|
||||
fs.inotify.max_user_watches=16184
|
||||
passwd:
|
||||
users:
|
||||
- name: core
|
||||
ssh_authorized_keys:
|
||||
- ${ssh_authorized_key}
|
||||
|
35
bare-metal/flatcar-linux/kubernetes/groups.tf
Normal file
35
bare-metal/flatcar-linux/kubernetes/groups.tf
Normal file
@ -0,0 +1,35 @@
|
||||
resource "matchbox_group" "install" {
|
||||
count = length(var.controllers) + length(var.workers)
|
||||
|
||||
name = format("install-%s", concat(var.controllers.*.name, var.workers.*.name)[count.index])
|
||||
|
||||
# pick one of 4 Matchbox profiles (Container Linux or Flatcar, cached or non-cached)
|
||||
profile = local.flavor == "flatcar" ? var.cached_install ? matchbox_profile.cached-flatcar-linux-install.*.name[count.index] : matchbox_profile.flatcar-install.*.name[count.index] : var.cached_install ? matchbox_profile.cached-container-linux-install.*.name[count.index] : matchbox_profile.container-linux-install.*.name[count.index]
|
||||
|
||||
selector = {
|
||||
mac = concat(var.controllers.*.mac, var.workers.*.mac)[count.index]
|
||||
}
|
||||
}
|
||||
|
||||
resource "matchbox_group" "controller" {
|
||||
count = length(var.controllers)
|
||||
name = format("%s-%s", var.cluster_name, var.controllers[count.index].name)
|
||||
profile = matchbox_profile.controllers.*.name[count.index]
|
||||
|
||||
selector = {
|
||||
mac = var.controllers[count.index].mac
|
||||
os = "installed"
|
||||
}
|
||||
}
|
||||
|
||||
resource "matchbox_group" "worker" {
|
||||
count = length(var.workers)
|
||||
name = format("%s-%s", var.cluster_name, var.workers[count.index].name)
|
||||
profile = matchbox_profile.workers.*.name[count.index]
|
||||
|
||||
selector = {
|
||||
mac = var.workers[count.index].mac
|
||||
os = "installed"
|
||||
}
|
||||
}
|
||||
|
10
bare-metal/flatcar-linux/kubernetes/outputs.tf
Normal file
10
bare-metal/flatcar-linux/kubernetes/outputs.tf
Normal file
@ -0,0 +1,10 @@
|
||||
output "kubeconfig-admin" {
|
||||
value = module.bootstrap.kubeconfig-admin
|
||||
}
|
||||
|
||||
# Outputs for debug
|
||||
|
||||
output "assets_dist" {
|
||||
value = module.bootstrap.assets_dist
|
||||
}
|
||||
|
194
bare-metal/flatcar-linux/kubernetes/profiles.tf
Normal file
194
bare-metal/flatcar-linux/kubernetes/profiles.tf
Normal file
@ -0,0 +1,194 @@
|
||||
locals {
|
||||
# coreos-stable -> coreos flavor, stable channel
|
||||
# flatcar-stable -> flatcar flavor, stable channel
|
||||
flavor = split("-", var.os_channel)[0]
|
||||
channel = split("-", var.os_channel)[1]
|
||||
}
|
||||
|
||||
// Container Linux Install profile (from release.core-os.net)
|
||||
resource "matchbox_profile" "container-linux-install" {
|
||||
count = length(var.controllers) + length(var.workers)
|
||||
name = format("%s-container-linux-install-%s", var.cluster_name, concat(var.controllers.*.name, var.workers.*.name)[count.index])
|
||||
|
||||
kernel = "${var.download_protocol}://${local.channel}.release.core-os.net/amd64-usr/${var.os_version}/coreos_production_pxe.vmlinuz"
|
||||
|
||||
initrd = [
|
||||
"${var.download_protocol}://${local.channel}.release.core-os.net/amd64-usr/${var.os_version}/coreos_production_pxe_image.cpio.gz",
|
||||
]
|
||||
|
||||
args = flatten([
|
||||
"initrd=coreos_production_pxe_image.cpio.gz",
|
||||
"coreos.config.url=${var.matchbox_http_endpoint}/ignition?uuid=$${uuid}&mac=$${mac:hexhyp}",
|
||||
"coreos.first_boot=yes",
|
||||
"console=tty0",
|
||||
"console=ttyS0",
|
||||
var.kernel_args,
|
||||
])
|
||||
|
||||
container_linux_config = data.template_file.container-linux-install-configs.*.rendered[count.index]
|
||||
}
|
||||
|
||||
data "template_file" "container-linux-install-configs" {
|
||||
count = length(var.controllers) + length(var.workers)
|
||||
|
||||
template = file("${path.module}/cl/install.yaml")
|
||||
|
||||
vars = {
|
||||
os_flavor = local.flavor
|
||||
os_channel = local.channel
|
||||
os_version = var.os_version
|
||||
ignition_endpoint = format("%s/ignition", var.matchbox_http_endpoint)
|
||||
install_disk = var.install_disk
|
||||
ssh_authorized_key = var.ssh_authorized_key
|
||||
# only cached-container-linux profile adds -b baseurl
|
||||
baseurl_flag = ""
|
||||
}
|
||||
}
|
||||
|
||||
// Container Linux Install profile (from matchbox /assets cache)
|
||||
// Note: Admin must have downloaded os_version into matchbox assets/coreos.
|
||||
resource "matchbox_profile" "cached-container-linux-install" {
|
||||
count = length(var.controllers) + length(var.workers)
|
||||
name = format("%s-cached-container-linux-install-%s", var.cluster_name, concat(var.controllers.*.name, var.workers.*.name)[count.index])
|
||||
|
||||
kernel = "/assets/coreos/${var.os_version}/coreos_production_pxe.vmlinuz"
|
||||
|
||||
initrd = [
|
||||
"/assets/coreos/${var.os_version}/coreos_production_pxe_image.cpio.gz",
|
||||
]
|
||||
|
||||
args = flatten([
|
||||
"initrd=coreos_production_pxe_image.cpio.gz",
|
||||
"coreos.config.url=${var.matchbox_http_endpoint}/ignition?uuid=$${uuid}&mac=$${mac:hexhyp}",
|
||||
"coreos.first_boot=yes",
|
||||
"console=tty0",
|
||||
"console=ttyS0",
|
||||
var.kernel_args,
|
||||
])
|
||||
|
||||
container_linux_config = data.template_file.cached-container-linux-install-configs.*.rendered[count.index]
|
||||
}
|
||||
|
||||
data "template_file" "cached-container-linux-install-configs" {
|
||||
count = length(var.controllers) + length(var.workers)
|
||||
|
||||
template = file("${path.module}/cl/install.yaml")
|
||||
|
||||
vars = {
|
||||
os_flavor = local.flavor
|
||||
os_channel = local.channel
|
||||
os_version = var.os_version
|
||||
ignition_endpoint = format("%s/ignition", var.matchbox_http_endpoint)
|
||||
install_disk = var.install_disk
|
||||
ssh_authorized_key = var.ssh_authorized_key
|
||||
# profile uses -b baseurl to install from matchbox cache
|
||||
baseurl_flag = "-b ${var.matchbox_http_endpoint}/assets/${local.flavor}"
|
||||
}
|
||||
}
|
||||
|
||||
// Flatcar Linux install profile (from release.flatcar-linux.net)
|
||||
resource "matchbox_profile" "flatcar-install" {
|
||||
count = length(var.controllers) + length(var.workers)
|
||||
name = format("%s-flatcar-install-%s", var.cluster_name, concat(var.controllers.*.name, var.workers.*.name)[count.index])
|
||||
|
||||
kernel = "${var.download_protocol}://${local.channel}.release.flatcar-linux.net/amd64-usr/${var.os_version}/flatcar_production_pxe.vmlinuz"
|
||||
|
||||
initrd = [
|
||||
"${var.download_protocol}://${local.channel}.release.flatcar-linux.net/amd64-usr/${var.os_version}/flatcar_production_pxe_image.cpio.gz",
|
||||
]
|
||||
|
||||
args = flatten([
|
||||
"initrd=flatcar_production_pxe_image.cpio.gz",
|
||||
"flatcar.config.url=${var.matchbox_http_endpoint}/ignition?uuid=$${uuid}&mac=$${mac:hexhyp}",
|
||||
"flatcar.first_boot=yes",
|
||||
"console=tty0",
|
||||
"console=ttyS0",
|
||||
var.kernel_args,
|
||||
])
|
||||
|
||||
container_linux_config = data.template_file.container-linux-install-configs.*.rendered[count.index]
|
||||
}
|
||||
|
||||
// Flatcar Linux Install profile (from matchbox /assets cache)
|
||||
// Note: Admin must have downloaded os_version into matchbox assets/flatcar.
|
||||
resource "matchbox_profile" "cached-flatcar-linux-install" {
|
||||
count = length(var.controllers) + length(var.workers)
|
||||
name = format("%s-cached-flatcar-linux-install-%s", var.cluster_name, concat(var.controllers.*.name, var.workers.*.name)[count.index])
|
||||
|
||||
kernel = "/assets/flatcar/${var.os_version}/flatcar_production_pxe.vmlinuz"
|
||||
|
||||
initrd = [
|
||||
"/assets/flatcar/${var.os_version}/flatcar_production_pxe_image.cpio.gz",
|
||||
]
|
||||
|
||||
args = flatten([
|
||||
"initrd=flatcar_production_pxe_image.cpio.gz",
|
||||
"flatcar.config.url=${var.matchbox_http_endpoint}/ignition?uuid=$${uuid}&mac=$${mac:hexhyp}",
|
||||
"flatcar.first_boot=yes",
|
||||
"console=tty0",
|
||||
"console=ttyS0",
|
||||
var.kernel_args,
|
||||
])
|
||||
|
||||
container_linux_config = data.template_file.cached-container-linux-install-configs.*.rendered[count.index]
|
||||
}
|
||||
|
||||
// Kubernetes Controller profiles
|
||||
resource "matchbox_profile" "controllers" {
|
||||
count = length(var.controllers)
|
||||
name = format("%s-controller-%s", var.cluster_name, var.controllers.*.name[count.index])
|
||||
raw_ignition = data.ct_config.controller-ignitions.*.rendered[count.index]
|
||||
}
|
||||
|
||||
data "ct_config" "controller-ignitions" {
|
||||
count = length(var.controllers)
|
||||
content = data.template_file.controller-configs.*.rendered[count.index]
|
||||
strict = true
|
||||
snippets = lookup(var.snippets, var.controllers.*.name[count.index], [])
|
||||
}
|
||||
|
||||
data "template_file" "controller-configs" {
|
||||
count = length(var.controllers)
|
||||
|
||||
template = file("${path.module}/cl/controller.yaml")
|
||||
|
||||
vars = {
|
||||
domain_name = var.controllers.*.domain[count.index]
|
||||
etcd_name = var.controllers.*.name[count.index]
|
||||
etcd_initial_cluster = join(",", formatlist("%s=https://%s:2380", var.controllers.*.name, var.controllers.*.domain))
|
||||
cgroup_driver = var.os_channel == "flatcar-edge" ? "systemd" : "cgroupfs"
|
||||
cluster_dns_service_ip = module.bootstrap.cluster_dns_service_ip
|
||||
cluster_domain_suffix = var.cluster_domain_suffix
|
||||
ssh_authorized_key = var.ssh_authorized_key
|
||||
}
|
||||
}
|
||||
|
||||
// Kubernetes Worker profiles
|
||||
resource "matchbox_profile" "workers" {
|
||||
count = length(var.workers)
|
||||
name = format("%s-worker-%s", var.cluster_name, var.workers.*.name[count.index])
|
||||
raw_ignition = data.ct_config.worker-ignitions.*.rendered[count.index]
|
||||
}
|
||||
|
||||
data "ct_config" "worker-ignitions" {
|
||||
count = length(var.workers)
|
||||
content = data.template_file.worker-configs.*.rendered[count.index]
|
||||
strict = true
|
||||
snippets = lookup(var.snippets, var.workers.*.name[count.index], [])
|
||||
}
|
||||
|
||||
data "template_file" "worker-configs" {
|
||||
count = length(var.workers)
|
||||
|
||||
template = file("${path.module}/cl/worker.yaml")
|
||||
|
||||
vars = {
|
||||
domain_name = var.workers.*.domain[count.index]
|
||||
cgroup_driver = var.os_channel == "flatcar-edge" ? "systemd" : "cgroupfs"
|
||||
cluster_dns_service_ip = module.bootstrap.cluster_dns_service_ip
|
||||
cluster_domain_suffix = var.cluster_domain_suffix
|
||||
ssh_authorized_key = var.ssh_authorized_key
|
||||
node_labels = join(",", lookup(var.worker_node_labels, var.workers.*.name[count.index], []))
|
||||
node_taints = join(",", lookup(var.worker_node_taints, var.workers.*.name[count.index], []))
|
||||
}
|
||||
}
|
102
bare-metal/flatcar-linux/kubernetes/ssh.tf
Normal file
102
bare-metal/flatcar-linux/kubernetes/ssh.tf
Normal file
@ -0,0 +1,102 @@
|
||||
locals {
|
||||
# format assets for distribution
|
||||
assets_bundle = [
|
||||
# header with the unpack location
|
||||
for key, value in module.bootstrap.assets_dist :
|
||||
format("##### %s\n%s", key, value)
|
||||
]
|
||||
}
|
||||
|
||||
# Secure copy assets to controllers. Activates kubelet.service
|
||||
resource "null_resource" "copy-controller-secrets" {
|
||||
count = length(var.controllers)
|
||||
|
||||
# Without depends_on, remote-exec could start and wait for machines before
|
||||
# matchbox groups are written, causing a deadlock.
|
||||
depends_on = [
|
||||
matchbox_group.install,
|
||||
matchbox_group.controller,
|
||||
matchbox_group.worker,
|
||||
module.bootstrap,
|
||||
]
|
||||
|
||||
connection {
|
||||
type = "ssh"
|
||||
host = var.controllers.*.domain[count.index]
|
||||
user = "core"
|
||||
timeout = "60m"
|
||||
}
|
||||
|
||||
provisioner "file" {
|
||||
content = module.bootstrap.kubeconfig-kubelet
|
||||
destination = "$HOME/kubeconfig"
|
||||
}
|
||||
|
||||
provisioner "file" {
|
||||
content = join("\n", local.assets_bundle)
|
||||
destination = "$HOME/assets"
|
||||
}
|
||||
|
||||
provisioner "remote-exec" {
|
||||
inline = [
|
||||
"sudo mv $HOME/kubeconfig /etc/kubernetes/kubeconfig",
|
||||
"sudo /opt/bootstrap/layout",
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
# Secure copy kubeconfig to all workers. Activates kubelet.service
|
||||
resource "null_resource" "copy-worker-secrets" {
|
||||
count = length(var.workers)
|
||||
|
||||
# Without depends_on, remote-exec could start and wait for machines before
|
||||
# matchbox groups are written, causing a deadlock.
|
||||
depends_on = [
|
||||
matchbox_group.install,
|
||||
matchbox_group.controller,
|
||||
matchbox_group.worker,
|
||||
]
|
||||
|
||||
connection {
|
||||
type = "ssh"
|
||||
host = var.workers.*.domain[count.index]
|
||||
user = "core"
|
||||
timeout = "60m"
|
||||
}
|
||||
|
||||
provisioner "file" {
|
||||
content = module.bootstrap.kubeconfig-kubelet
|
||||
destination = "$HOME/kubeconfig"
|
||||
}
|
||||
|
||||
provisioner "remote-exec" {
|
||||
inline = [
|
||||
"sudo mv $HOME/kubeconfig /etc/kubernetes/kubeconfig",
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
# Connect to a controller to perform one-time cluster bootstrap.
|
||||
resource "null_resource" "bootstrap" {
|
||||
# Without depends_on, this remote-exec may start before the kubeconfig copy.
|
||||
# Terraform only does one task at a time, so it would try to bootstrap
|
||||
# while no Kubelets are running.
|
||||
depends_on = [
|
||||
null_resource.copy-controller-secrets,
|
||||
null_resource.copy-worker-secrets,
|
||||
]
|
||||
|
||||
connection {
|
||||
type = "ssh"
|
||||
host = var.controllers[0].domain
|
||||
user = "core"
|
||||
timeout = "15m"
|
||||
}
|
||||
|
||||
provisioner "remote-exec" {
|
||||
inline = [
|
||||
"sudo systemctl start bootstrap",
|
||||
]
|
||||
}
|
||||
}
|
||||
|
160
bare-metal/flatcar-linux/kubernetes/variables.tf
Normal file
160
bare-metal/flatcar-linux/kubernetes/variables.tf
Normal file
@ -0,0 +1,160 @@
|
||||
variable "cluster_name" {
|
||||
type = string
|
||||
description = "Unique cluster name"
|
||||
}
|
||||
|
||||
# bare-metal
|
||||
|
||||
variable "matchbox_http_endpoint" {
|
||||
type = string
|
||||
description = "Matchbox HTTP read-only endpoint (e.g. http://matchbox.example.com:8080)"
|
||||
}
|
||||
|
||||
variable "os_channel" {
|
||||
type = string
|
||||
description = "Channel for a Flatcar Linux (flatcar-stable, flatcar-beta, flatcar-alpha, flatcar-edge)"
|
||||
}
|
||||
|
||||
variable "os_version" {
|
||||
type = string
|
||||
description = "Version for a Container Linux derivative to PXE and install (e.g. 2079.5.1)"
|
||||
}
|
||||
|
||||
# machines
|
||||
|
||||
variable "controllers" {
|
||||
type = list(object({
|
||||
name = string
|
||||
mac = string
|
||||
domain = string
|
||||
}))
|
||||
description = <<EOD
|
||||
List of controller machine details (unique name, identifying MAC address, FQDN)
|
||||
[{ name = "node1", mac = "52:54:00:a1:9c:ae", domain = "node1.example.com"}]
|
||||
EOD
|
||||
}
|
||||
|
||||
variable "workers" {
|
||||
type = list(object({
|
||||
name = string
|
||||
mac = string
|
||||
domain = string
|
||||
}))
|
||||
description = <<EOD
|
||||
List of worker machine details (unique name, identifying MAC address, FQDN)
|
||||
[
|
||||
{ name = "node2", mac = "52:54:00:b2:2f:86", domain = "node2.example.com"},
|
||||
{ name = "node3", mac = "52:54:00:c3:61:77", domain = "node3.example.com"}
|
||||
]
|
||||
EOD
|
||||
}
|
||||
|
||||
variable "snippets" {
|
||||
type = map(list(string))
|
||||
description = "Map from machine names to lists of Container Linux Config snippets"
|
||||
default = {}
|
||||
}
|
||||
|
||||
variable "worker_node_labels" {
|
||||
type = map(list(string))
|
||||
description = "Map from worker names to lists of initial node labels"
|
||||
default = {}
|
||||
}
|
||||
|
||||
variable "worker_node_taints" {
|
||||
type = map(list(string))
|
||||
description = "Map from worker names to lists of initial node taints"
|
||||
default = {}
|
||||
}
|
||||
|
||||
# configuration
|
||||
|
||||
variable "k8s_domain_name" {
|
||||
type = string
|
||||
description = "Controller DNS name which resolves to a controller instance. Workers and kubeconfig's will communicate with this endpoint (e.g. cluster.example.com)"
|
||||
}
|
||||
|
||||
variable "ssh_authorized_key" {
|
||||
type = string
|
||||
description = "SSH public key for user 'core'"
|
||||
}
|
||||
|
||||
variable "networking" {
|
||||
type = string
|
||||
description = "Choice of networking provider (flannel or calico)"
|
||||
default = "calico"
|
||||
}
|
||||
|
||||
variable "network_mtu" {
|
||||
type = number
|
||||
description = "CNI interface MTU (applies to calico only)"
|
||||
default = 1480
|
||||
}
|
||||
|
||||
variable "network_ip_autodetection_method" {
|
||||
type = string
|
||||
description = "Method to autodetect the host IPv4 address (applies to calico only)"
|
||||
default = "first-found"
|
||||
}
|
||||
|
||||
variable "pod_cidr" {
|
||||
type = string
|
||||
description = "CIDR IPv4 range to assign Kubernetes pods"
|
||||
default = "10.2.0.0/16"
|
||||
}
|
||||
|
||||
variable "service_cidr" {
|
||||
type = string
|
||||
description = <<EOD
|
||||
CIDR IPv4 range to assign Kubernetes services.
|
||||
The 1st IP will be reserved for kube_apiserver, the 10th IP will be reserved for coredns.
|
||||
EOD
|
||||
default = "10.3.0.0/16"
|
||||
}
|
||||
|
||||
# optional
|
||||
|
||||
variable "download_protocol" {
|
||||
type = string
|
||||
description = "Protocol iPXE should use to download the kernel and initrd. Defaults to https, which requires iPXE compiled with crypto support. Unused if cached_install is true."
|
||||
default = "https"
|
||||
}
|
||||
|
||||
variable "cached_install" {
|
||||
type = bool
|
||||
description = "Whether Container Linux should PXE boot and install from matchbox /assets cache. Note that the admin must have downloaded the os_version into matchbox assets."
|
||||
default = false
|
||||
}
|
||||
|
||||
variable "install_disk" {
|
||||
type = string
|
||||
default = "/dev/sda"
|
||||
description = "Disk device to which the install profiles should install Container Linux (e.g. /dev/sda)"
|
||||
}
|
||||
|
||||
variable "kernel_args" {
|
||||
type = list(string)
|
||||
description = "Additional kernel arguments to provide at PXE boot."
|
||||
default = []
|
||||
}
|
||||
|
||||
variable "enable_reporting" {
|
||||
type = bool
|
||||
description = "Enable usage or analytics reporting to upstreams (Calico)"
|
||||
default = false
|
||||
}
|
||||
|
||||
variable "enable_aggregation" {
|
||||
type = bool
|
||||
description = "Enable the Kubernetes Aggregation Layer (defaults to false)"
|
||||
default = false
|
||||
}
|
||||
|
||||
# unofficial, undocumented, unsupported
|
||||
|
||||
variable "cluster_domain_suffix" {
|
||||
type = string
|
||||
description = "Queries for domains with the suffix will be answered by coredns. Default is cluster.local (e.g. foo.default.svc.cluster.local) "
|
||||
default = "cluster.local"
|
||||
}
|
||||
|
20
bare-metal/flatcar-linux/kubernetes/versions.tf
Normal file
20
bare-metal/flatcar-linux/kubernetes/versions.tf
Normal file
@ -0,0 +1,20 @@
|
||||
# Terraform version and plugin versions
|
||||
|
||||
terraform {
|
||||
required_version = ">= 0.12.26, < 0.14.0"
|
||||
required_providers {
|
||||
template = "~> 2.1"
|
||||
null = "~> 2.1"
|
||||
|
||||
ct = {
|
||||
source = "poseidon/ct"
|
||||
version = "~> 0.6.1"
|
||||
}
|
||||
|
||||
matchbox = {
|
||||
source = "poseidon/matchbox"
|
||||
version = "~> 0.4.1"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
Reference in New Issue
Block a user