mirror of
https://github.com/puppetmaster/typhoon.git
synced 2025-08-02 17:51:33 +02:00
Compare commits
38 Commits
Author | SHA1 | Date | |
---|---|---|---|
6cf40722de | |||
c230cdec46 | |||
cabf5b2c34 | |||
ba8a951863 | |||
9aa99f1996 | |||
fc38ba45b1 | |||
28a42238c4 | |||
de9b30a587 | |||
affb40d59b | |||
15ac49b34d | |||
6c70d06937 | |||
cf4beeba34 | |||
10b4ba14b6 | |||
e06ee042ee | |||
a527f73f5a | |||
c21a0479c0 | |||
f614c538cf | |||
3da8c1575c | |||
dedd17d085 | |||
e274a451ff | |||
b2e36947ab | |||
5af0a5c5b9 | |||
2265ab5375 | |||
08ea9776f3 | |||
2e8bc99164 | |||
b18b0a9f3d | |||
beb9f1477a | |||
f544a9c71f | |||
415b7fa19a | |||
d0c29099ba | |||
30e4070474 | |||
43f6a19060 | |||
50215e373b | |||
a9f9c59b91 | |||
6ed048eb65 | |||
ce7b2fa21f | |||
9e3807798f | |||
ef9c6aa423 |
74
CHANGES.md
74
CHANGES.md
@ -4,6 +4,80 @@ Notable changes between versions.
|
||||
|
||||
## Latest
|
||||
|
||||
## v1.23.4
|
||||
|
||||
* Kubernetes [v1.23.4](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.23.md#v1234)
|
||||
* Update etcd from v3.5.1 to [v3.5.2](https://github.com/etcd-io/etcd/releases/tag/v3.5.2)
|
||||
* Change default CNI `networking` provider from `calico` to `cilium` ([#1114](https://github.com/poseidon/typhoon/pull/1114))
|
||||
|
||||
### AWS
|
||||
|
||||
* Allow upgrading AWS Terraform Provider to v4.x
|
||||
|
||||
### Addons
|
||||
|
||||
* Align nginx-ingress `--controller-class` with `IngressClass`
|
||||
* Watch only `public` IngressClass objects, better [example](https://kubernetes.github.io/ingress-nginx/user-guide/multiple-ingress/)
|
||||
* Update Prometheus from v2.32.1 to [v2.33.3](https://github.com/prometheus/prometheus/releases/tag/v2.33.3)
|
||||
* Update Grafana from v8.3.6 to [v8.4.1](https://github.com/grafana/grafana/releases/tag/v8.4.1)
|
||||
|
||||
## V1.23.3
|
||||
|
||||
* Kubernetes [v1.23.3](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.23.md#v1233)
|
||||
|
||||
### Flatcar Linux
|
||||
|
||||
#### Google Cloud
|
||||
|
||||
* Switch to using official Kinvolk Flatcar Linux images
|
||||
* Promote Typhoon on Flatcar Linux / Google Cloud to stable
|
||||
* Change `os_image` to `flatcar-stable`, `flatcar-beta`, or `flatcar-alpha` (**action required**)
|
||||
|
||||
## v1.23.2
|
||||
|
||||
* Kubernetes [v1.23.2](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.23.md#v1232)
|
||||
* Remove Kubelet flag `--network-plugin`. Unused since `docker-shim` isn't used ([#1106](https://github.com/poseidon/typhoon/pull/1106))
|
||||
|
||||
### Fedora CoreOS
|
||||
|
||||
* Switch Kubernetes Container Runtime from `docker` to `containerd` ([#1101](https://github.com/poseidon/typhoon/pull/1101))
|
||||
* Mask `docker.service` to prevent it from being socket activated ([#1105](https://github.com/poseidon/typhoon/pull/1105))
|
||||
|
||||
### Flatcar Linux
|
||||
|
||||
#### AWS
|
||||
|
||||
* Add experimental Flatcar Linux ARM64 support ([docs](https://typhoon.psdn.io/advanced/arm64/), [#1102](https://github.com/poseidon/typhoon/pull/1102))
|
||||
* Add `arch` variable to AWS `kubernetes` and `workers` modules
|
||||
* Allow arm64 full-cluster or mixed/hybrid cluster with arm64 workers
|
||||
* Requires `flannel` or `cilium` CNI provider
|
||||
|
||||
### DigitalOcean
|
||||
|
||||
* Upgrade DigitalOcean Terraform provider to [v2.x](https://registry.terraform.io/providers/digitalocean/digitalocean/latest/docs) ([#1109](https://github.com/poseidon/typhoon/pull/1109))
|
||||
|
||||
### Addons
|
||||
|
||||
* Update nginx-ingress from v1.1.0 to [v1.1.1](https://github.com/kubernetes/ingress-nginx/releases/tag/controller-v1.1.1)
|
||||
* Update Grafana from v8.3.3 to [v8.3.4](https://github.com/grafana/grafana/releases/tag/v8.3.4)
|
||||
|
||||
## v1.23.1
|
||||
|
||||
* Kubernetes [v1.23.1](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.23.md#v1231)
|
||||
* Workaround Terraform v1.1 regression in `file` provisioner ([#1093](https://github.com/poseidon/typhoon/pull/1093))
|
||||
|
||||
### Flatcar Linux
|
||||
|
||||
* Switch Kubernetes Container Runtime from `docker` to `containerd` ([#1087](https://github.com/poseidon/typhoon/pull/1087))
|
||||
|
||||
### Addons
|
||||
|
||||
* Configure Prometheus to allow a custom scrape query parameter ([#1095](https://github.com/poseidon/typhoon/pull/1095))
|
||||
* Configure Prometheus to probe Kubernetes Ingress via `blackbox-exporter` ([#1096](https://github.com/poseidon/typhoon/pull/1096))
|
||||
* Fix Prometheus Service probes to use `blackbox-exporter`, not `blackbox` ([#1096](https://github.com/poseidon/typhoon/pull/1096))
|
||||
|
||||
## v1.23.0
|
||||
|
||||
* Kubernetes [v1.23.0](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.23.md#v1230)
|
||||
* Normalize CA cert mounts in static Pods and kube-proxy ([#1078](https://github.com/poseidon/typhoon/pull/1078))
|
||||
* Set Kubelet resolver config to `/run/systemd/resolve/resolv.conf` ([#1082](https://github.com/poseidon/typhoon/pull/1082))
|
||||
|
16
README.md
16
README.md
@ -11,7 +11,7 @@ Typhoon distributes upstream Kubernetes, architectural conventions, and cluster
|
||||
|
||||
## Features <a href="https://www.cncf.io/certification/software-conformance/"><img align="right" src="https://storage.googleapis.com/poseidon/certified-kubernetes.png"></a>
|
||||
|
||||
* Kubernetes v1.23.0 (upstream)
|
||||
* Kubernetes v1.23.4 (upstream)
|
||||
* Single or multi-master, [Calico](https://www.projectcalico.org/) or [Cilium](https://github.com/cilium/cilium) or [flannel](https://github.com/coreos/flannel) networking
|
||||
* On-cluster etcd with TLS, [RBAC](https://kubernetes.io/docs/admin/authorization/rbac/)-enabled, [network policy](https://kubernetes.io/docs/concepts/services-networking/network-policies/), SELinux enforcing
|
||||
* Advanced features like [worker pools](https://typhoon.psdn.io/advanced/worker-pools/), [preemptible](https://typhoon.psdn.io/flatcar-linux/google-cloud/#preemption) workers, and [snippets](https://typhoon.psdn.io/advanced/customization/#hosts) customization
|
||||
@ -43,7 +43,11 @@ Typhoon is available for [Flatcar Linux](https://www.flatcar-linux.org/releases/
|
||||
| Azure | Flatcar Linux | [azure/flatcar-linux/kubernetes](azure/flatcar-linux/kubernetes) | alpha |
|
||||
| Bare-Metal | Flatcar Linux | [bare-metal/flatcar-linux/kubernetes](bare-metal/flatcar-linux/kubernetes) | stable |
|
||||
| DigitalOcean | Flatcar Linux | [digital-ocean/flatcar-linux/kubernetes](digital-ocean/flatcar-linux/kubernetes) | beta |
|
||||
| Google Cloud | Flatcar Linux | [google-cloud/flatcar-linux/kubernetes](google-cloud/flatcar-linux/kubernetes) | beta |
|
||||
| Google Cloud | Flatcar Linux | [google-cloud/flatcar-linux/kubernetes](google-cloud/flatcar-linux/kubernetes) | stable |
|
||||
|
||||
| Platform | Operating System | Terraform Module | Status |
|
||||
|---------------|------------------|------------------|--------|
|
||||
| AWS | Flatcar Linux (ARM64) | [aws/flatcar-linux/kubernetes](aws/flatcar-linux/kubernetes) | alpha |
|
||||
|
||||
## Documentation
|
||||
|
||||
@ -58,7 +62,7 @@ Define a Kubernetes cluster by using the Terraform module for your chosen platfo
|
||||
|
||||
```tf
|
||||
module "yavin" {
|
||||
source = "git::https://github.com/poseidon/typhoon//google-cloud/fedora-coreos/kubernetes?ref=v1.23.0"
|
||||
source = "git::https://github.com/poseidon/typhoon//google-cloud/fedora-coreos/kubernetes?ref=v1.23.4"
|
||||
|
||||
# Google Cloud
|
||||
cluster_name = "yavin"
|
||||
@ -97,9 +101,9 @@ In 4-8 minutes (varies by platform), the cluster will be ready. This Google Clou
|
||||
$ export KUBECONFIG=/home/user/.kube/configs/yavin-config
|
||||
$ kubectl get nodes
|
||||
NAME ROLES STATUS AGE VERSION
|
||||
yavin-controller-0.c.example-com.internal <none> Ready 6m v1.23.0
|
||||
yavin-worker-jrbf.c.example-com.internal <none> Ready 5m v1.23.0
|
||||
yavin-worker-mzdm.c.example-com.internal <none> Ready 5m v1.23.0
|
||||
yavin-controller-0.c.example-com.internal <none> Ready 6m v1.23.4
|
||||
yavin-worker-jrbf.c.example-com.internal <none> Ready 5m v1.23.4
|
||||
yavin-worker-mzdm.c.example-com.internal <none> Ready 5m v1.23.4
|
||||
```
|
||||
|
||||
List the pods.
|
||||
|
@ -24,7 +24,7 @@ spec:
|
||||
type: RuntimeDefault
|
||||
containers:
|
||||
- name: grafana
|
||||
image: docker.io/grafana/grafana:8.3.3
|
||||
image: docker.io/grafana/grafana:8.4.1
|
||||
env:
|
||||
- name: GF_PATHS_CONFIG
|
||||
value: "/etc/grafana/custom.ini"
|
||||
|
@ -3,4 +3,4 @@ kind: IngressClass
|
||||
metadata:
|
||||
name: public
|
||||
spec:
|
||||
controller: k8s.io/ingress-nginx
|
||||
controller: k8s.io/public
|
||||
|
@ -23,9 +23,10 @@ spec:
|
||||
type: RuntimeDefault
|
||||
containers:
|
||||
- name: nginx-ingress-controller
|
||||
image: k8s.gcr.io/ingress-nginx/controller:v1.1.0
|
||||
image: k8s.gcr.io/ingress-nginx/controller:v1.1.1
|
||||
args:
|
||||
- /nginx-ingress-controller
|
||||
- --controller-class=k8s.io/public
|
||||
- --ingress-class=public
|
||||
# use downward API
|
||||
env:
|
||||
|
@ -3,4 +3,4 @@ kind: IngressClass
|
||||
metadata:
|
||||
name: public
|
||||
spec:
|
||||
controller: k8s.io/ingress-nginx
|
||||
controller: k8s.io/public
|
||||
|
@ -23,9 +23,10 @@ spec:
|
||||
type: RuntimeDefault
|
||||
containers:
|
||||
- name: nginx-ingress-controller
|
||||
image: k8s.gcr.io/ingress-nginx/controller:v1.1.0
|
||||
image: k8s.gcr.io/ingress-nginx/controller:v1.1.1
|
||||
args:
|
||||
- /nginx-ingress-controller
|
||||
- --controller-class=k8s.io/public
|
||||
- --ingress-class=public
|
||||
# use downward API
|
||||
env:
|
||||
|
@ -3,4 +3,4 @@ kind: IngressClass
|
||||
metadata:
|
||||
name: public
|
||||
spec:
|
||||
controller: k8s.io/ingress-nginx
|
||||
controller: k8s.io/public
|
||||
|
@ -23,9 +23,10 @@ spec:
|
||||
type: RuntimeDefault
|
||||
containers:
|
||||
- name: nginx-ingress-controller
|
||||
image: k8s.gcr.io/ingress-nginx/controller:v1.1.0
|
||||
image: k8s.gcr.io/ingress-nginx/controller:v1.1.1
|
||||
args:
|
||||
- /nginx-ingress-controller
|
||||
- --controller-class=k8s.io/public
|
||||
- --ingress-class=public
|
||||
# use downward API
|
||||
env:
|
||||
|
@ -3,4 +3,4 @@ kind: IngressClass
|
||||
metadata:
|
||||
name: public
|
||||
spec:
|
||||
controller: k8s.io/ingress-nginx
|
||||
controller: k8s.io/public
|
||||
|
@ -23,9 +23,10 @@ spec:
|
||||
type: RuntimeDefault
|
||||
containers:
|
||||
- name: nginx-ingress-controller
|
||||
image: k8s.gcr.io/ingress-nginx/controller:v1.1.0
|
||||
image: k8s.gcr.io/ingress-nginx/controller:v1.1.1
|
||||
args:
|
||||
- /nginx-ingress-controller
|
||||
- --controller-class=k8s.io/public
|
||||
- --ingress-class=public
|
||||
# use downward API
|
||||
env:
|
||||
|
@ -3,4 +3,4 @@ kind: IngressClass
|
||||
metadata:
|
||||
name: public
|
||||
spec:
|
||||
controller: k8s.io/ingress-nginx
|
||||
controller: k8s.io/public
|
||||
|
@ -23,9 +23,10 @@ spec:
|
||||
type: RuntimeDefault
|
||||
containers:
|
||||
- name: nginx-ingress-controller
|
||||
image: k8s.gcr.io/ingress-nginx/controller:v1.1.0
|
||||
image: k8s.gcr.io/ingress-nginx/controller:v1.1.1
|
||||
args:
|
||||
- /nginx-ingress-controller
|
||||
- --controller-class=k8s.io/public
|
||||
- --ingress-class=public
|
||||
# use downward API
|
||||
env:
|
||||
|
@ -175,6 +175,7 @@ data:
|
||||
# * `prometheus.io/path`: If the metrics path is not `/metrics` override this.
|
||||
# * `prometheus.io/port`: If the metrics are exposed on a different port to the
|
||||
# service then set this appropriately.
|
||||
# * `prometheus.io/param`: Custom metrics query parameter, like "format=prometheus".
|
||||
- job_name: 'kubernetes-service-endpoints'
|
||||
kubernetes_sd_configs:
|
||||
- role: endpoints
|
||||
@ -197,6 +198,11 @@ data:
|
||||
target_label: __address__
|
||||
regex: ([^:]+)(?::\d+)?;(\d+)
|
||||
replacement: $1:$2
|
||||
- source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_param]
|
||||
action: replace
|
||||
target_label: __param_$1
|
||||
regex: ([^=]+)=(.*)
|
||||
replacement: $2
|
||||
- action: labelmap
|
||||
regex: __meta_kubernetes_service_label_(.+)
|
||||
- source_labels: [__meta_kubernetes_namespace]
|
||||
@ -214,38 +220,6 @@ data:
|
||||
action: drop
|
||||
regex: etcd_(debugging|disk|request|server).*
|
||||
|
||||
# Example scrape config for probing services via the Blackbox Exporter.
|
||||
#
|
||||
# The relabeling allows the actual service scrape endpoint to be configured
|
||||
# via the following annotations:
|
||||
#
|
||||
# * `prometheus.io/probe`: Only probe services that have a value of `true`
|
||||
- job_name: 'kubernetes-services'
|
||||
|
||||
metrics_path: /probe
|
||||
params:
|
||||
module: [http_2xx]
|
||||
|
||||
kubernetes_sd_configs:
|
||||
- role: service
|
||||
|
||||
relabel_configs:
|
||||
- source_labels: [__meta_kubernetes_service_annotation_prometheus_io_probe]
|
||||
action: keep
|
||||
regex: true
|
||||
- source_labels: [__address__]
|
||||
target_label: __param_target
|
||||
- target_label: __address__
|
||||
replacement: blackbox
|
||||
- source_labels: [__param_target]
|
||||
target_label: instance
|
||||
- action: labelmap
|
||||
regex: __meta_kubernetes_service_label_(.+)
|
||||
- source_labels: [__meta_kubernetes_namespace]
|
||||
target_label: namespace
|
||||
- source_labels: [__meta_kubernetes_service_name]
|
||||
target_label: job
|
||||
|
||||
# Example scrape config for pods
|
||||
#
|
||||
# The relabeling allows the actual pod scrape endpoint to be configured via the
|
||||
@ -282,6 +256,67 @@ data:
|
||||
action: replace
|
||||
target_label: kubernetes_pod_name
|
||||
|
||||
# Example scrape config for probing Services via the Blackbox Exporter.
|
||||
#
|
||||
# Relabeling allows service scraping to be configured via annotations:
|
||||
# * `prometheus.io/probe`: Only probe services that have a value of `true`
|
||||
- job_name: 'kubernetes-services'
|
||||
|
||||
metrics_path: /probe
|
||||
params:
|
||||
module: [http_2xx]
|
||||
|
||||
kubernetes_sd_configs:
|
||||
- role: service
|
||||
|
||||
relabel_configs:
|
||||
- source_labels: [__meta_kubernetes_service_annotation_prometheus_io_probe]
|
||||
action: keep
|
||||
regex: true
|
||||
- source_labels: [__address__]
|
||||
target_label: __param_target
|
||||
- target_label: __address__
|
||||
replacement: blackbox-exporter:8080
|
||||
- source_labels: [__param_target]
|
||||
target_label: instance
|
||||
- action: labelmap
|
||||
regex: __meta_kubernetes_service_label_(.+)
|
||||
- source_labels: [__meta_kubernetes_namespace]
|
||||
target_label: namespace
|
||||
- source_labels: [__meta_kubernetes_service_name]
|
||||
target_label: job
|
||||
|
||||
# Example scrape config for probing Ingresses via a Blackbox Exporter.
|
||||
#
|
||||
# Relabeling allows service scraping to be configured via annotations:
|
||||
# * `prometheus.io/probe`: Only probe ingresses that have a value of `true`
|
||||
- job_name: 'kubernetes-ingresses'
|
||||
metrics_path: /probe
|
||||
params:
|
||||
module: [http_2xx]
|
||||
|
||||
kubernetes_sd_configs:
|
||||
- role: ingress
|
||||
|
||||
relabel_configs:
|
||||
- source_labels: [__meta_kubernetes_ingress_annotation_prometheus_io_probe]
|
||||
action: keep
|
||||
regex: true
|
||||
- source_labels: [__meta_kubernetes_ingress_scheme, __address__, __meta_kubernetes_ingress_path]
|
||||
regex: (.+);(.+);(.+)
|
||||
replacement: ${1}://${2}${3}
|
||||
target_label: __param_target
|
||||
- target_label: __address__
|
||||
replacement: blackbox-exporter:8080
|
||||
- source_labels: [__param_target]
|
||||
target_label: instance
|
||||
- action: labelmap
|
||||
regex: __meta_kubernetes_ingress_label_(.+)
|
||||
- source_labels: [__meta_kubernetes_namespace]
|
||||
target_label: namespace
|
||||
- source_labels: [__meta_kubernetes_service_name]
|
||||
target_label: job
|
||||
|
||||
# Rule files
|
||||
rule_files:
|
||||
- "/etc/prometheus/rules/*.rules"
|
||||
|
@ -21,7 +21,7 @@ spec:
|
||||
serviceAccountName: prometheus
|
||||
containers:
|
||||
- name: prometheus
|
||||
image: quay.io/prometheus/prometheus:v2.32.0
|
||||
image: quay.io/prometheus/prometheus:v2.33.3
|
||||
args:
|
||||
- --web.listen-address=0.0.0.0:9090
|
||||
- --config.file=/etc/prometheus/prometheus.yaml
|
||||
|
@ -10,6 +10,17 @@ rules:
|
||||
- services
|
||||
- endpoints
|
||||
- pods
|
||||
verbs: ["get", "list", "watch"]
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- nonResourceURLs: ["/metrics"]
|
||||
verbs: ["get"]
|
||||
- apiGroups:
|
||||
- networking.k8s.io
|
||||
resources:
|
||||
- ingresses
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
|
@ -11,7 +11,7 @@ Typhoon distributes upstream Kubernetes, architectural conventions, and cluster
|
||||
|
||||
## Features <a href="https://www.cncf.io/certification/software-conformance/"><img align="right" src="https://storage.googleapis.com/poseidon/certified-kubernetes.png"></a>
|
||||
|
||||
* Kubernetes v1.23.0 (upstream)
|
||||
* Kubernetes v1.23.4 (upstream)
|
||||
* Single or multi-master, [Calico](https://www.projectcalico.org/) or [Cilium](https://github.com/cilium/cilium) or [flannel](https://github.com/coreos/flannel) networking
|
||||
* On-cluster etcd with TLS, [RBAC](https://kubernetes.io/docs/admin/authorization/rbac/)-enabled, [network policy](https://kubernetes.io/docs/concepts/services-networking/network-policies/), SELinux enforcing
|
||||
* Advanced features like [worker pools](https://typhoon.psdn.io/advanced/worker-pools/), [spot](https://typhoon.psdn.io/fedora-coreos/aws/#spot) workers, and [snippets](https://typhoon.psdn.io/advanced/customization/#hosts) customization
|
||||
|
@ -1,6 +1,6 @@
|
||||
# Kubernetes assets (kubeconfig, manifests)
|
||||
module "bootstrap" {
|
||||
source = "git::https://github.com/poseidon/terraform-render-bootstrap.git?ref=37f45cb28be2188befb5304794ba312cd8048fab"
|
||||
source = "git::https://github.com/poseidon/terraform-render-bootstrap.git?ref=205e5f212b634b3f590c0a91f60f4540b145045d"
|
||||
|
||||
cluster_name = var.cluster_name
|
||||
api_servers = [format("%s.%s", var.cluster_name, var.dns_zone)]
|
||||
|
@ -12,7 +12,7 @@ systemd:
|
||||
Wants=network-online.target network.target
|
||||
After=network-online.target
|
||||
[Service]
|
||||
Environment=ETCD_IMAGE=quay.io/coreos/etcd:v3.5.1
|
||||
Environment=ETCD_IMAGE=quay.io/coreos/etcd:v3.5.2
|
||||
Type=exec
|
||||
ExecStartPre=/bin/mkdir -p /var/lib/etcd
|
||||
ExecStartPre=-/usr/bin/podman rm etcd
|
||||
@ -29,8 +29,10 @@ systemd:
|
||||
LimitNOFILE=40000
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
- name: docker.service
|
||||
- name: containerd.service
|
||||
enabled: true
|
||||
- name: docker.service
|
||||
mask: true
|
||||
- name: wait-for-dns.service
|
||||
enabled: true
|
||||
contents: |
|
||||
@ -54,7 +56,7 @@ systemd:
|
||||
After=afterburn.service
|
||||
Wants=rpc-statd.service
|
||||
[Service]
|
||||
Environment=KUBELET_IMAGE=quay.io/poseidon/kubelet:v1.23.0
|
||||
Environment=KUBELET_IMAGE=quay.io/poseidon/kubelet:v1.23.4
|
||||
EnvironmentFile=/run/metadata/afterburn
|
||||
ExecStartPre=/bin/mkdir -p /etc/cni/net.d
|
||||
ExecStartPre=/bin/mkdir -p /etc/kubernetes/manifests
|
||||
@ -74,7 +76,7 @@ systemd:
|
||||
--volume /run:/run \
|
||||
--volume /sys/fs/cgroup:/sys/fs/cgroup \
|
||||
--volume /var/lib/calico:/var/lib/calico:ro \
|
||||
--volume /var/lib/docker:/var/lib/docker \
|
||||
--volume /var/lib/containerd:/var/lib/containerd \
|
||||
--volume /var/lib/kubelet:/var/lib/kubelet:rshared,z \
|
||||
--volume /var/log:/var/log \
|
||||
--volume /var/run/lock:/var/run/lock:z \
|
||||
@ -86,13 +88,14 @@ systemd:
|
||||
--bootstrap-kubeconfig=/etc/kubernetes/kubeconfig \
|
||||
--cgroup-driver=systemd \
|
||||
--cgroups-per-qos=true \
|
||||
--container-runtime=remote \
|
||||
--container-runtime-endpoint=unix:///run/containerd/containerd.sock \
|
||||
--enforce-node-allocatable=pods \
|
||||
--client-ca-file=/etc/kubernetes/ca.crt \
|
||||
--cluster_dns=${cluster_dns_service_ip} \
|
||||
--cluster_domain=${cluster_domain_suffix} \
|
||||
--healthz-port=0 \
|
||||
--kubeconfig=/var/lib/kubelet/kubeconfig \
|
||||
--network-plugin=cni \
|
||||
--node-labels=node.kubernetes.io/controller="true" \
|
||||
--pod-manifest-path=/etc/kubernetes/manifests \
|
||||
--provider-id=aws:///$${AFTERBURN_AWS_AVAILABILITY_ZONE}/$${AFTERBURN_AWS_INSTANCE_ID} \
|
||||
@ -123,7 +126,7 @@ systemd:
|
||||
--volume /opt/bootstrap/assets:/assets:ro,Z \
|
||||
--volume /opt/bootstrap/apply:/apply:ro,Z \
|
||||
--entrypoint=/apply \
|
||||
quay.io/poseidon/kubelet:v1.23.0
|
||||
quay.io/poseidon/kubelet:v1.23.4
|
||||
ExecStartPost=/bin/touch /opt/bootstrap/bootstrap.done
|
||||
ExecStartPost=-/usr/bin/podman stop bootstrap
|
||||
storage:
|
||||
@ -218,8 +221,26 @@ storage:
|
||||
ETCD_PEER_CERT_FILE=/etc/ssl/certs/etcd/peer.crt
|
||||
ETCD_PEER_KEY_FILE=/etc/ssl/certs/etcd/peer.key
|
||||
ETCD_PEER_CLIENT_CERT_AUTH=true
|
||||
ETCD_UNSUPPORTED_ARCH=arm64
|
||||
- path: /etc/fedora-coreos/iptables-legacy.stamp
|
||||
- path: /etc/containerd/config.toml
|
||||
overwrite: true
|
||||
contents:
|
||||
inline: |
|
||||
version = 2
|
||||
root = "/var/lib/containerd"
|
||||
state = "/run/containerd"
|
||||
subreaper = true
|
||||
oom_score = -999
|
||||
[grpc]
|
||||
address = "/run/containerd/containerd.sock"
|
||||
uid = 0
|
||||
gid = 0
|
||||
[plugins."io.containerd.grpc.v1.cri"]
|
||||
enable_selinux = true
|
||||
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc]
|
||||
runtime_type = "io.containerd.runc.v2"
|
||||
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options]
|
||||
SystemdCgroup = true
|
||||
passwd:
|
||||
users:
|
||||
- name: core
|
||||
|
@ -24,7 +24,7 @@ resource "null_resource" "copy-controller-secrets" {
|
||||
|
||||
provisioner "file" {
|
||||
content = join("\n", local.assets_bundle)
|
||||
destination = "$HOME/assets"
|
||||
destination = "/home/core/assets"
|
||||
}
|
||||
|
||||
provisioner "remote-exec" {
|
||||
|
@ -103,8 +103,8 @@ variable "ssh_authorized_key" {
|
||||
|
||||
variable "networking" {
|
||||
type = string
|
||||
description = "Choice of networking provider (calico or flannel)"
|
||||
default = "calico"
|
||||
description = "Choice of networking provider (flannel, calico, or cilium)"
|
||||
default = "cilium"
|
||||
}
|
||||
|
||||
variable "network_mtu" {
|
||||
|
@ -3,7 +3,7 @@
|
||||
terraform {
|
||||
required_version = ">= 0.13.0, < 2.0.0"
|
||||
required_providers {
|
||||
aws = ">= 2.23, <= 4.0"
|
||||
aws = ">= 2.23, <= 5.0"
|
||||
template = "~> 2.2"
|
||||
null = ">= 2.1"
|
||||
|
||||
|
@ -3,8 +3,10 @@ variant: fcos
|
||||
version: 1.4.0
|
||||
systemd:
|
||||
units:
|
||||
- name: docker.service
|
||||
- name: containerd.service
|
||||
enabled: true
|
||||
- name: docker.service
|
||||
mask: true
|
||||
- name: wait-for-dns.service
|
||||
enabled: true
|
||||
contents: |
|
||||
@ -27,7 +29,7 @@ systemd:
|
||||
After=afterburn.service
|
||||
Wants=rpc-statd.service
|
||||
[Service]
|
||||
Environment=KUBELET_IMAGE=quay.io/poseidon/kubelet:v1.23.0
|
||||
Environment=KUBELET_IMAGE=quay.io/poseidon/kubelet:v1.23.4
|
||||
EnvironmentFile=/run/metadata/afterburn
|
||||
ExecStartPre=/bin/mkdir -p /etc/cni/net.d
|
||||
ExecStartPre=/bin/mkdir -p /etc/kubernetes/manifests
|
||||
@ -47,7 +49,7 @@ systemd:
|
||||
--volume /run:/run \
|
||||
--volume /sys/fs/cgroup:/sys/fs/cgroup \
|
||||
--volume /var/lib/calico:/var/lib/calico:ro \
|
||||
--volume /var/lib/docker:/var/lib/docker \
|
||||
--volume /var/lib/containerd:/var/lib/containerd \
|
||||
--volume /var/lib/kubelet:/var/lib/kubelet:rshared,z \
|
||||
--volume /var/log:/var/log \
|
||||
--volume /var/run/lock:/var/run/lock:z \
|
||||
@ -59,13 +61,14 @@ systemd:
|
||||
--bootstrap-kubeconfig=/etc/kubernetes/kubeconfig \
|
||||
--cgroup-driver=systemd \
|
||||
--cgroups-per-qos=true \
|
||||
--container-runtime=remote \
|
||||
--container-runtime-endpoint=unix:///run/containerd/containerd.sock \
|
||||
--enforce-node-allocatable=pods \
|
||||
--client-ca-file=/etc/kubernetes/ca.crt \
|
||||
--cluster_dns=${cluster_dns_service_ip} \
|
||||
--cluster_domain=${cluster_domain_suffix} \
|
||||
--healthz-port=0 \
|
||||
--kubeconfig=/var/lib/kubelet/kubeconfig \
|
||||
--network-plugin=cni \
|
||||
--node-labels=node.kubernetes.io/node \
|
||||
%{~ for label in split(",", node_labels) ~}
|
||||
--node-labels=${label} \
|
||||
@ -91,7 +94,7 @@ systemd:
|
||||
[Unit]
|
||||
Description=Delete Kubernetes node on shutdown
|
||||
[Service]
|
||||
Environment=KUBELET_IMAGE=quay.io/poseidon/kubelet:v1.23.0
|
||||
Environment=KUBELET_IMAGE=quay.io/poseidon/kubelet:v1.23.4
|
||||
Type=oneshot
|
||||
RemainAfterExit=true
|
||||
ExecStart=/bin/true
|
||||
@ -131,9 +134,27 @@ storage:
|
||||
DefaultMemoryAccounting=yes
|
||||
DefaultBlockIOAccounting=yes
|
||||
- path: /etc/fedora-coreos/iptables-legacy.stamp
|
||||
- path: /etc/containerd/config.toml
|
||||
overwrite: true
|
||||
contents:
|
||||
inline: |
|
||||
version = 2
|
||||
root = "/var/lib/containerd"
|
||||
state = "/run/containerd"
|
||||
subreaper = true
|
||||
oom_score = -999
|
||||
[grpc]
|
||||
address = "/run/containerd/containerd.sock"
|
||||
uid = 0
|
||||
gid = 0
|
||||
[plugins."io.containerd.grpc.v1.cri"]
|
||||
enable_selinux = true
|
||||
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc]
|
||||
runtime_type = "io.containerd.runc.v2"
|
||||
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options]
|
||||
SystemdCgroup = true
|
||||
passwd:
|
||||
users:
|
||||
- name: core
|
||||
ssh_authorized_keys:
|
||||
- ${ssh_authorized_key}
|
||||
|
||||
|
@ -3,7 +3,7 @@
|
||||
terraform {
|
||||
required_version = ">= 0.13.0, < 2.0.0"
|
||||
required_providers {
|
||||
aws = ">= 2.23, <= 4.0"
|
||||
aws = ">= 2.23, <= 5.0"
|
||||
template = "~> 2.2"
|
||||
|
||||
ct = {
|
||||
|
@ -33,13 +33,11 @@ resource "aws_autoscaling_group" "workers" {
|
||||
# used. Disable wait to avoid issues and align with other clouds.
|
||||
wait_for_capacity_timeout = "0"
|
||||
|
||||
tags = [
|
||||
{
|
||||
key = "Name"
|
||||
value = "${var.name}-worker"
|
||||
propagate_at_launch = true
|
||||
},
|
||||
]
|
||||
tag {
|
||||
key = "Name"
|
||||
value = "${var.name}-worker"
|
||||
propagate_at_launch = true
|
||||
}
|
||||
}
|
||||
|
||||
# Worker template
|
||||
|
@ -11,7 +11,7 @@ Typhoon distributes upstream Kubernetes, architectural conventions, and cluster
|
||||
|
||||
## Features <a href="https://www.cncf.io/certification/software-conformance/"><img align="right" src="https://storage.googleapis.com/poseidon/certified-kubernetes.png"></a>
|
||||
|
||||
* Kubernetes v1.23.0 (upstream)
|
||||
* Kubernetes v1.23.4 (upstream)
|
||||
* Single or multi-master, [Calico](https://www.projectcalico.org/) or [Cilium](https://github.com/cilium/cilium) or [flannel](https://github.com/coreos/flannel) networking
|
||||
* On-cluster etcd with TLS, [RBAC](https://kubernetes.io/docs/admin/authorization/rbac/)-enabled, [network policy](https://kubernetes.io/docs/concepts/services-networking/network-policies/)
|
||||
* Advanced features like [worker pools](https://typhoon.psdn.io/advanced/worker-pools/), [spot](https://typhoon.psdn.io/flatcar-linux/aws/#spot) workers, and [snippets](https://typhoon.psdn.io/advanced/customization/#hosts) customization
|
||||
|
@ -1,7 +1,7 @@
|
||||
locals {
|
||||
# Pick a Flatcar Linux AMI
|
||||
# flatcar-stable -> Flatcar Linux AMI
|
||||
ami_id = data.aws_ami.flatcar.image_id
|
||||
ami_id = var.arch == "arm64" ? data.aws_ami.flatcar-arm64[0].image_id : data.aws_ami.flatcar.image_id
|
||||
channel = split("-", var.os_image)[1]
|
||||
}
|
||||
|
||||
@ -25,3 +25,25 @@ data "aws_ami" "flatcar" {
|
||||
}
|
||||
}
|
||||
|
||||
data "aws_ami" "flatcar-arm64" {
|
||||
count = var.arch == "arm64" ? 1 : 0
|
||||
|
||||
most_recent = true
|
||||
owners = ["075585003325"]
|
||||
|
||||
filter {
|
||||
name = "architecture"
|
||||
values = ["arm64"]
|
||||
}
|
||||
|
||||
filter {
|
||||
name = "virtualization-type"
|
||||
values = ["hvm"]
|
||||
}
|
||||
|
||||
filter {
|
||||
name = "name"
|
||||
values = ["Flatcar-${local.channel}-*"]
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1,6 +1,6 @@
|
||||
# Kubernetes assets (kubeconfig, manifests)
|
||||
module "bootstrap" {
|
||||
source = "git::https://github.com/poseidon/terraform-render-bootstrap.git?ref=37f45cb28be2188befb5304794ba312cd8048fab"
|
||||
source = "git::https://github.com/poseidon/terraform-render-bootstrap.git?ref=205e5f212b634b3f590c0a91f60f4540b145045d"
|
||||
|
||||
cluster_name = var.cluster_name
|
||||
api_servers = [format("%s.%s", var.cluster_name, var.dns_zone)]
|
||||
|
@ -10,7 +10,7 @@ systemd:
|
||||
Requires=docker.service
|
||||
After=docker.service
|
||||
[Service]
|
||||
Environment=ETCD_IMAGE=quay.io/coreos/etcd:v3.5.1
|
||||
Environment=ETCD_IMAGE=quay.io/coreos/etcd:v3.5.2
|
||||
ExecStartPre=/usr/bin/docker run -d \
|
||||
--name etcd \
|
||||
--network host \
|
||||
@ -57,7 +57,7 @@ systemd:
|
||||
After=coreos-metadata.service
|
||||
Wants=rpc-statd.service
|
||||
[Service]
|
||||
Environment=KUBELET_IMAGE=quay.io/poseidon/kubelet:v1.23.0
|
||||
Environment=KUBELET_IMAGE=quay.io/poseidon/kubelet:v1.23.4
|
||||
EnvironmentFile=/run/metadata/coreos
|
||||
ExecStartPre=/bin/mkdir -p /etc/cni/net.d
|
||||
ExecStartPre=/bin/mkdir -p /etc/kubernetes/manifests
|
||||
@ -78,7 +78,7 @@ systemd:
|
||||
-v /run:/run \
|
||||
-v /sys/fs/cgroup:/sys/fs/cgroup \
|
||||
-v /var/lib/calico:/var/lib/calico:ro \
|
||||
-v /var/lib/docker:/var/lib/docker \
|
||||
-v /var/lib/containerd:/var/lib/containerd \
|
||||
-v /var/lib/kubelet:/var/lib/kubelet:rshared \
|
||||
-v /var/log:/var/log \
|
||||
-v /opt/cni/bin:/opt/cni/bin \
|
||||
@ -88,12 +88,13 @@ systemd:
|
||||
--authorization-mode=Webhook \
|
||||
--bootstrap-kubeconfig=/etc/kubernetes/kubeconfig \
|
||||
--cgroup-driver=systemd \
|
||||
--container-runtime=remote \
|
||||
--container-runtime-endpoint=unix:///run/containerd/containerd.sock \
|
||||
--client-ca-file=/etc/kubernetes/ca.crt \
|
||||
--cluster_dns=${cluster_dns_service_ip} \
|
||||
--cluster_domain=${cluster_domain_suffix} \
|
||||
--healthz-port=0 \
|
||||
--kubeconfig=/var/lib/kubelet/kubeconfig \
|
||||
--network-plugin=cni \
|
||||
--node-labels=node.kubernetes.io/controller="true" \
|
||||
--pod-manifest-path=/etc/kubernetes/manifests \
|
||||
--provider-id=aws:///$${COREOS_EC2_AVAILABILITY_ZONE}/$${COREOS_EC2_INSTANCE_ID} \
|
||||
@ -120,7 +121,7 @@ systemd:
|
||||
Type=oneshot
|
||||
RemainAfterExit=true
|
||||
WorkingDirectory=/opt/bootstrap
|
||||
Environment=KUBELET_IMAGE=quay.io/poseidon/kubelet:v1.23.0
|
||||
Environment=KUBELET_IMAGE=quay.io/poseidon/kubelet:v1.23.4
|
||||
ExecStart=/usr/bin/docker run \
|
||||
-v /etc/kubernetes/pki:/etc/kubernetes/pki:ro \
|
||||
-v /opt/bootstrap/assets:/assets:ro \
|
||||
|
@ -24,7 +24,7 @@ resource "null_resource" "copy-controller-secrets" {
|
||||
|
||||
provisioner "file" {
|
||||
content = join("\n", local.assets_bundle)
|
||||
destination = "$HOME/assets"
|
||||
destination = "/home/core/assets"
|
||||
}
|
||||
|
||||
provisioner "remote-exec" {
|
||||
|
@ -103,8 +103,8 @@ variable "ssh_authorized_key" {
|
||||
|
||||
variable "networking" {
|
||||
type = string
|
||||
description = "Choice of networking provider (calico or flannel)"
|
||||
default = "calico"
|
||||
description = "Choice of networking provider (flannel, calico, or cilium)"
|
||||
default = "cilium"
|
||||
}
|
||||
|
||||
variable "network_mtu" {
|
||||
@ -160,6 +160,17 @@ variable "cluster_domain_suffix" {
|
||||
default = "cluster.local"
|
||||
}
|
||||
|
||||
variable "arch" {
|
||||
type = string
|
||||
description = "Container architecture (amd64 or arm64)"
|
||||
default = "amd64"
|
||||
|
||||
validation {
|
||||
condition = var.arch == "amd64" || var.arch == "arm64"
|
||||
error_message = "The arch must be amd64 or arm64."
|
||||
}
|
||||
}
|
||||
|
||||
variable "daemonset_tolerations" {
|
||||
type = list(string)
|
||||
description = "List of additional taint keys kube-system DaemonSets should tolerate (e.g. ['custom-role', 'gpu-role'])"
|
||||
|
@ -3,7 +3,7 @@
|
||||
terraform {
|
||||
required_version = ">= 0.13.0, < 2.0.0"
|
||||
required_providers {
|
||||
aws = ">= 2.23, <= 4.0"
|
||||
aws = ">= 2.23, <= 5.0"
|
||||
template = "~> 2.2"
|
||||
null = ">= 2.1"
|
||||
|
||||
|
@ -9,6 +9,7 @@ module "workers" {
|
||||
worker_count = var.worker_count
|
||||
instance_type = var.worker_type
|
||||
os_image = var.os_image
|
||||
arch = var.arch
|
||||
disk_size = var.disk_size
|
||||
spot_price = var.worker_price
|
||||
target_groups = var.worker_target_groups
|
||||
|
@ -1,7 +1,7 @@
|
||||
locals {
|
||||
# Pick a Flatcar Linux AMI
|
||||
# flatcar-stable -> Flatcar Linux AMI
|
||||
ami_id = data.aws_ami.flatcar.image_id
|
||||
ami_id = var.arch == "arm64" ? data.aws_ami.flatcar-arm64[0].image_id : data.aws_ami.flatcar.image_id
|
||||
channel = split("-", var.os_image)[1]
|
||||
}
|
||||
|
||||
@ -25,3 +25,24 @@ data "aws_ami" "flatcar" {
|
||||
}
|
||||
}
|
||||
|
||||
data "aws_ami" "flatcar-arm64" {
|
||||
count = var.arch == "arm64" ? 1 : 0
|
||||
|
||||
most_recent = true
|
||||
owners = ["075585003325"]
|
||||
|
||||
filter {
|
||||
name = "architecture"
|
||||
values = ["arm64"]
|
||||
}
|
||||
|
||||
filter {
|
||||
name = "virtualization-type"
|
||||
values = ["hvm"]
|
||||
}
|
||||
|
||||
filter {
|
||||
name = "name"
|
||||
values = ["Flatcar-${local.channel}-*"]
|
||||
}
|
||||
}
|
||||
|
@ -29,7 +29,7 @@ systemd:
|
||||
After=coreos-metadata.service
|
||||
Wants=rpc-statd.service
|
||||
[Service]
|
||||
Environment=KUBELET_IMAGE=quay.io/poseidon/kubelet:v1.23.0
|
||||
Environment=KUBELET_IMAGE=quay.io/poseidon/kubelet:v1.23.4
|
||||
EnvironmentFile=/run/metadata/coreos
|
||||
ExecStartPre=/bin/mkdir -p /etc/cni/net.d
|
||||
ExecStartPre=/bin/mkdir -p /etc/kubernetes/manifests
|
||||
@ -53,7 +53,7 @@ systemd:
|
||||
-v /run:/run \
|
||||
-v /sys/fs/cgroup:/sys/fs/cgroup \
|
||||
-v /var/lib/calico:/var/lib/calico:ro \
|
||||
-v /var/lib/docker:/var/lib/docker \
|
||||
-v /var/lib/containerd:/var/lib/containerd \
|
||||
-v /var/lib/kubelet:/var/lib/kubelet:rshared \
|
||||
-v /var/log:/var/log \
|
||||
-v /opt/cni/bin:/opt/cni/bin \
|
||||
@ -63,12 +63,13 @@ systemd:
|
||||
--authorization-mode=Webhook \
|
||||
--bootstrap-kubeconfig=/etc/kubernetes/kubeconfig \
|
||||
--cgroup-driver=systemd \
|
||||
--container-runtime=remote \
|
||||
--container-runtime-endpoint=unix:///run/containerd/containerd.sock \
|
||||
--client-ca-file=/etc/kubernetes/ca.crt \
|
||||
--cluster_dns=${cluster_dns_service_ip} \
|
||||
--cluster_domain=${cluster_domain_suffix} \
|
||||
--healthz-port=0 \
|
||||
--kubeconfig=/var/lib/kubelet/kubeconfig \
|
||||
--network-plugin=cni \
|
||||
--node-labels=node.kubernetes.io/node \
|
||||
%{~ for label in split(",", node_labels) ~}
|
||||
--node-labels=${label} \
|
||||
@ -95,7 +96,7 @@ systemd:
|
||||
[Unit]
|
||||
Description=Delete Kubernetes node on shutdown
|
||||
[Service]
|
||||
Environment=KUBELET_IMAGE=quay.io/poseidon/kubelet:v1.23.0
|
||||
Environment=KUBELET_IMAGE=quay.io/poseidon/kubelet:v1.23.4
|
||||
Type=oneshot
|
||||
RemainAfterExit=true
|
||||
ExecStart=/bin/true
|
||||
|
@ -119,3 +119,16 @@ variable "node_taints" {
|
||||
description = "List of initial node taints"
|
||||
default = []
|
||||
}
|
||||
|
||||
# unofficial, undocumented, unsupported
|
||||
|
||||
variable "arch" {
|
||||
type = string
|
||||
description = "Container architecture (amd64 or arm64)"
|
||||
default = "amd64"
|
||||
|
||||
validation {
|
||||
condition = var.arch == "amd64" || var.arch == "arm64"
|
||||
error_message = "The arch must be amd64 or arm64."
|
||||
}
|
||||
}
|
||||
|
@ -3,7 +3,7 @@
|
||||
terraform {
|
||||
required_version = ">= 0.13.0, < 2.0.0"
|
||||
required_providers {
|
||||
aws = ">= 2.23, <= 4.0"
|
||||
aws = ">= 2.23, <= 5.0"
|
||||
template = "~> 2.2"
|
||||
|
||||
ct = {
|
||||
|
@ -33,13 +33,11 @@ resource "aws_autoscaling_group" "workers" {
|
||||
# used. Disable wait to avoid issues and align with other clouds.
|
||||
wait_for_capacity_timeout = "0"
|
||||
|
||||
tags = [
|
||||
{
|
||||
key = "Name"
|
||||
value = "${var.name}-worker"
|
||||
propagate_at_launch = true
|
||||
},
|
||||
]
|
||||
tag {
|
||||
key = "Name"
|
||||
value = "${var.name}-worker"
|
||||
propagate_at_launch = true
|
||||
}
|
||||
}
|
||||
|
||||
# Worker template
|
||||
|
@ -11,7 +11,7 @@ Typhoon distributes upstream Kubernetes, architectural conventions, and cluster
|
||||
|
||||
## Features <a href="https://www.cncf.io/certification/software-conformance/"><img align="right" src="https://storage.googleapis.com/poseidon/certified-kubernetes.png"></a>
|
||||
|
||||
* Kubernetes v1.23.0 (upstream)
|
||||
* Kubernetes v1.23.4 (upstream)
|
||||
* Single or multi-master, [Calico](https://www.projectcalico.org/) or [Cilium](https://github.com/cilium/cilium) or [flannel](https://github.com/coreos/flannel) networking
|
||||
* On-cluster etcd with TLS, [RBAC](https://kubernetes.io/docs/admin/authorization/rbac/)-enabled, [network policy](https://kubernetes.io/docs/concepts/services-networking/network-policies/), SELinux enforcing
|
||||
* Advanced features like [worker pools](https://typhoon.psdn.io/advanced/worker-pools/), [spot priority](https://typhoon.psdn.io/fedora-coreos/azure/#low-priority) workers, and [snippets](https://typhoon.psdn.io/advanced/customization/#hosts) customization
|
||||
|
@ -1,6 +1,6 @@
|
||||
# Kubernetes assets (kubeconfig, manifests)
|
||||
module "bootstrap" {
|
||||
source = "git::https://github.com/poseidon/terraform-render-bootstrap.git?ref=37f45cb28be2188befb5304794ba312cd8048fab"
|
||||
source = "git::https://github.com/poseidon/terraform-render-bootstrap.git?ref=205e5f212b634b3f590c0a91f60f4540b145045d"
|
||||
|
||||
cluster_name = var.cluster_name
|
||||
api_servers = [format("%s.%s", var.cluster_name, var.dns_zone)]
|
||||
|
@ -12,7 +12,7 @@ systemd:
|
||||
Wants=network-online.target network.target
|
||||
After=network-online.target
|
||||
[Service]
|
||||
Environment=ETCD_IMAGE=quay.io/coreos/etcd:v3.5.1
|
||||
Environment=ETCD_IMAGE=quay.io/coreos/etcd:v3.5.2
|
||||
Type=exec
|
||||
ExecStartPre=/bin/mkdir -p /var/lib/etcd
|
||||
ExecStartPre=-/usr/bin/podman rm etcd
|
||||
@ -29,8 +29,10 @@ systemd:
|
||||
LimitNOFILE=40000
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
- name: docker.service
|
||||
- name: containerd.service
|
||||
enabled: true
|
||||
- name: docker.service
|
||||
mask: true
|
||||
- name: wait-for-dns.service
|
||||
enabled: true
|
||||
contents: |
|
||||
@ -51,7 +53,7 @@ systemd:
|
||||
Description=Kubelet (System Container)
|
||||
Wants=rpc-statd.service
|
||||
[Service]
|
||||
Environment=KUBELET_IMAGE=quay.io/poseidon/kubelet:v1.23.0
|
||||
Environment=KUBELET_IMAGE=quay.io/poseidon/kubelet:v1.23.4
|
||||
ExecStartPre=/bin/mkdir -p /etc/cni/net.d
|
||||
ExecStartPre=/bin/mkdir -p /etc/kubernetes/manifests
|
||||
ExecStartPre=/bin/mkdir -p /opt/cni/bin
|
||||
@ -70,7 +72,7 @@ systemd:
|
||||
--volume /run:/run \
|
||||
--volume /sys/fs/cgroup:/sys/fs/cgroup \
|
||||
--volume /var/lib/calico:/var/lib/calico:ro \
|
||||
--volume /var/lib/docker:/var/lib/docker \
|
||||
--volume /var/lib/containerd:/var/lib/containerd \
|
||||
--volume /var/lib/kubelet:/var/lib/kubelet:rshared,z \
|
||||
--volume /var/log:/var/log \
|
||||
--volume /var/run/lock:/var/run/lock:z \
|
||||
@ -82,13 +84,14 @@ systemd:
|
||||
--bootstrap-kubeconfig=/etc/kubernetes/kubeconfig \
|
||||
--cgroup-driver=systemd \
|
||||
--cgroups-per-qos=true \
|
||||
--container-runtime=remote \
|
||||
--container-runtime-endpoint=unix:///run/containerd/containerd.sock \
|
||||
--enforce-node-allocatable=pods \
|
||||
--client-ca-file=/etc/kubernetes/ca.crt \
|
||||
--cluster_dns=${cluster_dns_service_ip} \
|
||||
--cluster_domain=${cluster_domain_suffix} \
|
||||
--healthz-port=0 \
|
||||
--kubeconfig=/var/lib/kubelet/kubeconfig \
|
||||
--network-plugin=cni \
|
||||
--node-labels=node.kubernetes.io/controller="true" \
|
||||
--pod-manifest-path=/etc/kubernetes/manifests \
|
||||
--read-only-port=0 \
|
||||
@ -118,7 +121,7 @@ systemd:
|
||||
--volume /opt/bootstrap/assets:/assets:ro,Z \
|
||||
--volume /opt/bootstrap/apply:/apply:ro,Z \
|
||||
--entrypoint=/apply \
|
||||
quay.io/poseidon/kubelet:v1.23.0
|
||||
quay.io/poseidon/kubelet:v1.23.4
|
||||
ExecStartPost=/bin/touch /opt/bootstrap/bootstrap.done
|
||||
ExecStartPost=-/usr/bin/podman stop bootstrap
|
||||
storage:
|
||||
@ -214,6 +217,25 @@ storage:
|
||||
ETCD_PEER_KEY_FILE=/etc/ssl/certs/etcd/peer.key
|
||||
ETCD_PEER_CLIENT_CERT_AUTH=true
|
||||
- path: /etc/fedora-coreos/iptables-legacy.stamp
|
||||
- path: /etc/containerd/config.toml
|
||||
overwrite: true
|
||||
contents:
|
||||
inline: |
|
||||
version = 2
|
||||
root = "/var/lib/containerd"
|
||||
state = "/run/containerd"
|
||||
subreaper = true
|
||||
oom_score = -999
|
||||
[grpc]
|
||||
address = "/run/containerd/containerd.sock"
|
||||
uid = 0
|
||||
gid = 0
|
||||
[plugins."io.containerd.grpc.v1.cri"]
|
||||
enable_selinux = true
|
||||
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc]
|
||||
runtime_type = "io.containerd.runc.v2"
|
||||
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options]
|
||||
SystemdCgroup = true
|
||||
passwd:
|
||||
users:
|
||||
- name: core
|
||||
|
@ -25,7 +25,7 @@ resource "null_resource" "copy-controller-secrets" {
|
||||
|
||||
provisioner "file" {
|
||||
content = join("\n", local.assets_bundle)
|
||||
destination = "$HOME/assets"
|
||||
destination = "/home/core/assets"
|
||||
}
|
||||
|
||||
provisioner "remote-exec" {
|
||||
|
@ -84,8 +84,8 @@ variable "ssh_authorized_key" {
|
||||
|
||||
variable "networking" {
|
||||
type = string
|
||||
description = "Choice of networking provider (flannel or calico)"
|
||||
default = "calico"
|
||||
description = "Choice of networking provider (flannel, calico, or cilium)"
|
||||
default = "cilium"
|
||||
}
|
||||
|
||||
variable "host_cidr" {
|
||||
|
@ -3,8 +3,10 @@ variant: fcos
|
||||
version: 1.4.0
|
||||
systemd:
|
||||
units:
|
||||
- name: docker.service
|
||||
- name: containerd.service
|
||||
enabled: true
|
||||
- name: docker.service
|
||||
mask: true
|
||||
- name: wait-for-dns.service
|
||||
enabled: true
|
||||
contents: |
|
||||
@ -24,7 +26,7 @@ systemd:
|
||||
Description=Kubelet (System Container)
|
||||
Wants=rpc-statd.service
|
||||
[Service]
|
||||
Environment=KUBELET_IMAGE=quay.io/poseidon/kubelet:v1.23.0
|
||||
Environment=KUBELET_IMAGE=quay.io/poseidon/kubelet:v1.23.4
|
||||
ExecStartPre=/bin/mkdir -p /etc/cni/net.d
|
||||
ExecStartPre=/bin/mkdir -p /etc/kubernetes/manifests
|
||||
ExecStartPre=/bin/mkdir -p /opt/cni/bin
|
||||
@ -43,7 +45,7 @@ systemd:
|
||||
--volume /run:/run \
|
||||
--volume /sys/fs/cgroup:/sys/fs/cgroup \
|
||||
--volume /var/lib/calico:/var/lib/calico:ro \
|
||||
--volume /var/lib/docker:/var/lib/docker \
|
||||
--volume /var/lib/containerd:/var/lib/containerd \
|
||||
--volume /var/lib/kubelet:/var/lib/kubelet:rshared,z \
|
||||
--volume /var/log:/var/log \
|
||||
--volume /var/run/lock:/var/run/lock:z \
|
||||
@ -55,13 +57,14 @@ systemd:
|
||||
--bootstrap-kubeconfig=/etc/kubernetes/kubeconfig \
|
||||
--cgroup-driver=systemd \
|
||||
--cgroups-per-qos=true \
|
||||
--container-runtime=remote \
|
||||
--container-runtime-endpoint=unix:///run/containerd/containerd.sock \
|
||||
--enforce-node-allocatable=pods \
|
||||
--client-ca-file=/etc/kubernetes/ca.crt \
|
||||
--cluster_dns=${cluster_dns_service_ip} \
|
||||
--cluster_domain=${cluster_domain_suffix} \
|
||||
--healthz-port=0 \
|
||||
--kubeconfig=/var/lib/kubelet/kubeconfig \
|
||||
--network-plugin=cni \
|
||||
--node-labels=node.kubernetes.io/node \
|
||||
%{~ for label in split(",", node_labels) ~}
|
||||
--node-labels=${label} \
|
||||
@ -86,7 +89,7 @@ systemd:
|
||||
[Unit]
|
||||
Description=Delete Kubernetes node on shutdown
|
||||
[Service]
|
||||
Environment=KUBELET_IMAGE=quay.io/poseidon/kubelet:v1.23.0
|
||||
Environment=KUBELET_IMAGE=quay.io/poseidon/kubelet:v1.23.4
|
||||
Type=oneshot
|
||||
RemainAfterExit=true
|
||||
ExecStart=/bin/true
|
||||
@ -126,10 +129,28 @@ storage:
|
||||
DefaultMemoryAccounting=yes
|
||||
DefaultBlockIOAccounting=yes
|
||||
- path: /etc/fedora-coreos/iptables-legacy.stamp
|
||||
- path: /etc/containerd/config.toml
|
||||
overwrite: true
|
||||
contents:
|
||||
inline: |
|
||||
version = 2
|
||||
root = "/var/lib/containerd"
|
||||
state = "/run/containerd"
|
||||
subreaper = true
|
||||
oom_score = -999
|
||||
[grpc]
|
||||
address = "/run/containerd/containerd.sock"
|
||||
uid = 0
|
||||
gid = 0
|
||||
[plugins."io.containerd.grpc.v1.cri"]
|
||||
enable_selinux = true
|
||||
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc]
|
||||
runtime_type = "io.containerd.runc.v2"
|
||||
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options]
|
||||
SystemdCgroup = true
|
||||
passwd:
|
||||
users:
|
||||
- name: core
|
||||
ssh_authorized_keys:
|
||||
- ${ssh_authorized_key}
|
||||
|
||||
|
||||
|
@ -11,7 +11,7 @@ Typhoon distributes upstream Kubernetes, architectural conventions, and cluster
|
||||
|
||||
## Features <a href="https://www.cncf.io/certification/software-conformance/"><img align="right" src="https://storage.googleapis.com/poseidon/certified-kubernetes.png"></a>
|
||||
|
||||
* Kubernetes v1.23.0 (upstream)
|
||||
* Kubernetes v1.23.4 (upstream)
|
||||
* Single or multi-master, [Calico](https://www.projectcalico.org/) or [Cilium](https://github.com/cilium/cilium) or [flannel](https://github.com/coreos/flannel) networking
|
||||
* On-cluster etcd with TLS, [RBAC](https://kubernetes.io/docs/admin/authorization/rbac/)-enabled, [network policy](https://kubernetes.io/docs/concepts/services-networking/network-policies/)
|
||||
* Advanced features like [worker pools](https://typhoon.psdn.io/advanced/worker-pools/), [low-priority](https://typhoon.psdn.io/flatcar-linux/azure/#low-priority) workers, and [snippets](https://typhoon.psdn.io/advanced/customization/#hosts) customization
|
||||
|
@ -1,6 +1,6 @@
|
||||
# Kubernetes assets (kubeconfig, manifests)
|
||||
module "bootstrap" {
|
||||
source = "git::https://github.com/poseidon/terraform-render-bootstrap.git?ref=37f45cb28be2188befb5304794ba312cd8048fab"
|
||||
source = "git::https://github.com/poseidon/terraform-render-bootstrap.git?ref=205e5f212b634b3f590c0a91f60f4540b145045d"
|
||||
|
||||
cluster_name = var.cluster_name
|
||||
api_servers = [format("%s.%s", var.cluster_name, var.dns_zone)]
|
||||
|
@ -10,7 +10,7 @@ systemd:
|
||||
Requires=docker.service
|
||||
After=docker.service
|
||||
[Service]
|
||||
Environment=ETCD_IMAGE=quay.io/coreos/etcd:v3.5.1
|
||||
Environment=ETCD_IMAGE=quay.io/coreos/etcd:v3.5.2
|
||||
ExecStartPre=/usr/bin/docker run -d \
|
||||
--name etcd \
|
||||
--network host \
|
||||
@ -55,7 +55,7 @@ systemd:
|
||||
After=docker.service
|
||||
Wants=rpc-statd.service
|
||||
[Service]
|
||||
Environment=KUBELET_IMAGE=quay.io/poseidon/kubelet:v1.23.0
|
||||
Environment=KUBELET_IMAGE=quay.io/poseidon/kubelet:v1.23.4
|
||||
ExecStartPre=/bin/mkdir -p /etc/cni/net.d
|
||||
ExecStartPre=/bin/mkdir -p /etc/kubernetes/manifests
|
||||
ExecStartPre=/bin/mkdir -p /opt/cni/bin
|
||||
@ -75,7 +75,7 @@ systemd:
|
||||
-v /run:/run \
|
||||
-v /sys/fs/cgroup:/sys/fs/cgroup \
|
||||
-v /var/lib/calico:/var/lib/calico:ro \
|
||||
-v /var/lib/docker:/var/lib/docker \
|
||||
-v /var/lib/containerd:/var/lib/containerd \
|
||||
-v /var/lib/kubelet:/var/lib/kubelet:rshared \
|
||||
-v /var/log:/var/log \
|
||||
-v /opt/cni/bin:/opt/cni/bin \
|
||||
@ -85,12 +85,13 @@ systemd:
|
||||
--authorization-mode=Webhook \
|
||||
--bootstrap-kubeconfig=/etc/kubernetes/kubeconfig \
|
||||
--cgroup-driver=systemd \
|
||||
--container-runtime=remote \
|
||||
--container-runtime-endpoint=unix:///run/containerd/containerd.sock \
|
||||
--client-ca-file=/etc/kubernetes/ca.crt \
|
||||
--cluster_dns=${cluster_dns_service_ip} \
|
||||
--cluster_domain=${cluster_domain_suffix} \
|
||||
--healthz-port=0 \
|
||||
--kubeconfig=/var/lib/kubelet/kubeconfig \
|
||||
--network-plugin=cni \
|
||||
--node-labels=node.kubernetes.io/controller="true" \
|
||||
--pod-manifest-path=/etc/kubernetes/manifests \
|
||||
--read-only-port=0 \
|
||||
@ -116,7 +117,7 @@ systemd:
|
||||
Type=oneshot
|
||||
RemainAfterExit=true
|
||||
WorkingDirectory=/opt/bootstrap
|
||||
Environment=KUBELET_IMAGE=quay.io/poseidon/kubelet:v1.23.0
|
||||
Environment=KUBELET_IMAGE=quay.io/poseidon/kubelet:v1.23.4
|
||||
ExecStart=/usr/bin/docker run \
|
||||
-v /etc/kubernetes/pki:/etc/kubernetes/pki:ro \
|
||||
-v /opt/bootstrap/assets:/assets:ro \
|
||||
|
@ -25,7 +25,7 @@ resource "null_resource" "copy-controller-secrets" {
|
||||
|
||||
provisioner "file" {
|
||||
content = join("\n", local.assets_bundle)
|
||||
destination = "$HOME/assets"
|
||||
destination = "/home/core/assets"
|
||||
}
|
||||
|
||||
provisioner "remote-exec" {
|
||||
|
@ -90,8 +90,8 @@ variable "ssh_authorized_key" {
|
||||
|
||||
variable "networking" {
|
||||
type = string
|
||||
description = "Choice of networking provider (flannel or calico)"
|
||||
default = "calico"
|
||||
description = "Choice of networking provider (flannel, calico, or cilium)"
|
||||
default = "cilium"
|
||||
}
|
||||
|
||||
variable "host_cidr" {
|
||||
|
@ -27,7 +27,7 @@ systemd:
|
||||
After=docker.service
|
||||
Wants=rpc-statd.service
|
||||
[Service]
|
||||
Environment=KUBELET_IMAGE=quay.io/poseidon/kubelet:v1.23.0
|
||||
Environment=KUBELET_IMAGE=quay.io/poseidon/kubelet:v1.23.4
|
||||
ExecStartPre=/bin/mkdir -p /etc/cni/net.d
|
||||
ExecStartPre=/bin/mkdir -p /etc/kubernetes/manifests
|
||||
ExecStartPre=/bin/mkdir -p /opt/cni/bin
|
||||
@ -50,7 +50,7 @@ systemd:
|
||||
-v /run:/run \
|
||||
-v /sys/fs/cgroup:/sys/fs/cgroup \
|
||||
-v /var/lib/calico:/var/lib/calico:ro \
|
||||
-v /var/lib/docker:/var/lib/docker \
|
||||
-v /var/lib/containerd:/var/lib/containerd \
|
||||
-v /var/lib/kubelet:/var/lib/kubelet:rshared \
|
||||
-v /var/log:/var/log \
|
||||
-v /opt/cni/bin:/opt/cni/bin \
|
||||
@ -60,12 +60,13 @@ systemd:
|
||||
--authorization-mode=Webhook \
|
||||
--bootstrap-kubeconfig=/etc/kubernetes/kubeconfig \
|
||||
--cgroup-driver=systemd \
|
||||
--container-runtime=remote \
|
||||
--container-runtime-endpoint=unix:///run/containerd/containerd.sock \
|
||||
--client-ca-file=/etc/kubernetes/ca.crt \
|
||||
--cluster_dns=${cluster_dns_service_ip} \
|
||||
--cluster_domain=${cluster_domain_suffix} \
|
||||
--healthz-port=0 \
|
||||
--kubeconfig=/var/lib/kubelet/kubeconfig \
|
||||
--network-plugin=cni \
|
||||
--node-labels=node.kubernetes.io/node \
|
||||
%{~ for label in split(",", node_labels) ~}
|
||||
--node-labels=${label} \
|
||||
@ -91,7 +92,7 @@ systemd:
|
||||
[Unit]
|
||||
Description=Delete Kubernetes node on shutdown
|
||||
[Service]
|
||||
Environment=KUBELET_IMAGE=quay.io/poseidon/kubelet:v1.23.0
|
||||
Environment=KUBELET_IMAGE=quay.io/poseidon/kubelet:v1.23.4
|
||||
Type=oneshot
|
||||
RemainAfterExit=true
|
||||
ExecStart=/bin/true
|
||||
|
@ -11,7 +11,7 @@ Typhoon distributes upstream Kubernetes, architectural conventions, and cluster
|
||||
|
||||
## Features <a href="https://www.cncf.io/certification/software-conformance/"><img align="right" src="https://storage.googleapis.com/poseidon/certified-kubernetes.png"></a>
|
||||
|
||||
* Kubernetes v1.23.0 (upstream)
|
||||
* Kubernetes v1.23.4 (upstream)
|
||||
* Single or multi-master, [Calico](https://www.projectcalico.org/) or [Cilium](https://github.com/cilium/cilium) or [flannel](https://github.com/coreos/flannel) networking
|
||||
* On-cluster etcd with TLS, [RBAC](https://kubernetes.io/docs/admin/authorization/rbac/)-enabled, [network policy](https://kubernetes.io/docs/concepts/services-networking/network-policies/), SELinux enforcing
|
||||
* Advanced features like [snippets](https://typhoon.psdn.io/advanced/customization/#hosts) customization
|
||||
|
@ -1,6 +1,6 @@
|
||||
# Kubernetes assets (kubeconfig, manifests)
|
||||
module "bootstrap" {
|
||||
source = "git::https://github.com/poseidon/terraform-render-bootstrap.git?ref=37f45cb28be2188befb5304794ba312cd8048fab"
|
||||
source = "git::https://github.com/poseidon/terraform-render-bootstrap.git?ref=205e5f212b634b3f590c0a91f60f4540b145045d"
|
||||
|
||||
cluster_name = var.cluster_name
|
||||
api_servers = [var.k8s_domain_name]
|
||||
|
@ -12,7 +12,7 @@ systemd:
|
||||
Wants=network-online.target network.target
|
||||
After=network-online.target
|
||||
[Service]
|
||||
Environment=ETCD_IMAGE=quay.io/coreos/etcd:v3.5.1
|
||||
Environment=ETCD_IMAGE=quay.io/coreos/etcd:v3.5.2
|
||||
Type=exec
|
||||
ExecStartPre=/bin/mkdir -p /var/lib/etcd
|
||||
ExecStartPre=-/usr/bin/podman rm etcd
|
||||
@ -29,8 +29,10 @@ systemd:
|
||||
LimitNOFILE=40000
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
- name: docker.service
|
||||
- name: containerd.service
|
||||
enabled: true
|
||||
- name: docker.service
|
||||
mask: true
|
||||
- name: wait-for-dns.service
|
||||
enabled: true
|
||||
contents: |
|
||||
@ -50,7 +52,7 @@ systemd:
|
||||
Description=Kubelet (System Container)
|
||||
Wants=rpc-statd.service
|
||||
[Service]
|
||||
Environment=KUBELET_IMAGE=quay.io/poseidon/kubelet:v1.23.0
|
||||
Environment=KUBELET_IMAGE=quay.io/poseidon/kubelet:v1.23.4
|
||||
ExecStartPre=/bin/mkdir -p /etc/cni/net.d
|
||||
ExecStartPre=/bin/mkdir -p /etc/kubernetes/manifests
|
||||
ExecStartPre=/bin/mkdir -p /opt/cni/bin
|
||||
@ -69,7 +71,7 @@ systemd:
|
||||
--volume /run:/run \
|
||||
--volume /sys/fs/cgroup:/sys/fs/cgroup \
|
||||
--volume /var/lib/calico:/var/lib/calico:ro \
|
||||
--volume /var/lib/docker:/var/lib/docker \
|
||||
--volume /var/lib/containerd:/var/lib/containerd \
|
||||
--volume /var/lib/kubelet:/var/lib/kubelet:rshared,z \
|
||||
--volume /var/log:/var/log \
|
||||
--volume /var/run/lock:/var/run/lock:z \
|
||||
@ -81,6 +83,8 @@ systemd:
|
||||
--bootstrap-kubeconfig=/etc/kubernetes/kubeconfig \
|
||||
--cgroup-driver=systemd \
|
||||
--cgroups-per-qos=true \
|
||||
--container-runtime=remote \
|
||||
--container-runtime-endpoint=unix:///run/containerd/containerd.sock \
|
||||
--enforce-node-allocatable=pods \
|
||||
--client-ca-file=/etc/kubernetes/ca.crt \
|
||||
--cluster_dns=${cluster_dns_service_ip} \
|
||||
@ -88,7 +92,6 @@ systemd:
|
||||
--healthz-port=0 \
|
||||
--hostname-override=${domain_name} \
|
||||
--kubeconfig=/var/lib/kubelet/kubeconfig \
|
||||
--network-plugin=cni \
|
||||
--node-labels=node.kubernetes.io/controller="true" \
|
||||
--pod-manifest-path=/etc/kubernetes/manifests \
|
||||
--read-only-port=0 \
|
||||
@ -120,7 +123,7 @@ systemd:
|
||||
Type=oneshot
|
||||
RemainAfterExit=true
|
||||
WorkingDirectory=/opt/bootstrap
|
||||
Environment=KUBELET_IMAGE=quay.io/poseidon/kubelet:v1.23.0
|
||||
Environment=KUBELET_IMAGE=quay.io/poseidon/kubelet:v1.23.4
|
||||
ExecStartPre=-/usr/bin/podman rm bootstrap
|
||||
ExecStart=/usr/bin/podman run --name bootstrap \
|
||||
--network host \
|
||||
@ -224,6 +227,25 @@ storage:
|
||||
ETCD_PEER_KEY_FILE=/etc/ssl/certs/etcd/peer.key
|
||||
ETCD_PEER_CLIENT_CERT_AUTH=true
|
||||
- path: /etc/fedora-coreos/iptables-legacy.stamp
|
||||
- path: /etc/containerd/config.toml
|
||||
overwrite: true
|
||||
contents:
|
||||
inline: |
|
||||
version = 2
|
||||
root = "/var/lib/containerd"
|
||||
state = "/run/containerd"
|
||||
subreaper = true
|
||||
oom_score = -999
|
||||
[grpc]
|
||||
address = "/run/containerd/containerd.sock"
|
||||
uid = 0
|
||||
gid = 0
|
||||
[plugins."io.containerd.grpc.v1.cri"]
|
||||
enable_selinux = true
|
||||
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc]
|
||||
runtime_type = "io.containerd.runc.v2"
|
||||
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options]
|
||||
SystemdCgroup = true
|
||||
passwd:
|
||||
users:
|
||||
- name: core
|
||||
|
@ -3,8 +3,10 @@ variant: fcos
|
||||
version: 1.4.0
|
||||
systemd:
|
||||
units:
|
||||
- name: docker.service
|
||||
- name: containerd.service
|
||||
enabled: true
|
||||
- name: docker.service
|
||||
mask: true
|
||||
- name: wait-for-dns.service
|
||||
enabled: true
|
||||
contents: |
|
||||
@ -23,7 +25,7 @@ systemd:
|
||||
Description=Kubelet (System Container)
|
||||
Wants=rpc-statd.service
|
||||
[Service]
|
||||
Environment=KUBELET_IMAGE=quay.io/poseidon/kubelet:v1.23.0
|
||||
Environment=KUBELET_IMAGE=quay.io/poseidon/kubelet:v1.23.4
|
||||
ExecStartPre=/bin/mkdir -p /etc/cni/net.d
|
||||
ExecStartPre=/bin/mkdir -p /etc/kubernetes/manifests
|
||||
ExecStartPre=/bin/mkdir -p /opt/cni/bin
|
||||
@ -42,7 +44,7 @@ systemd:
|
||||
--volume /run:/run \
|
||||
--volume /sys/fs/cgroup:/sys/fs/cgroup \
|
||||
--volume /var/lib/calico:/var/lib/calico:ro \
|
||||
--volume /var/lib/docker:/var/lib/docker \
|
||||
--volume /var/lib/containerd:/var/lib/containerd \
|
||||
--volume /var/lib/kubelet:/var/lib/kubelet:rshared,z \
|
||||
--volume /var/log:/var/log \
|
||||
--volume /var/run/lock:/var/run/lock:z \
|
||||
@ -54,6 +56,8 @@ systemd:
|
||||
--bootstrap-kubeconfig=/etc/kubernetes/kubeconfig \
|
||||
--cgroup-driver=systemd \
|
||||
--cgroups-per-qos=true \
|
||||
--container-runtime=remote \
|
||||
--container-runtime-endpoint=unix:///run/containerd/containerd.sock \
|
||||
--enforce-node-allocatable=pods \
|
||||
--client-ca-file=/etc/kubernetes/ca.crt \
|
||||
--cluster_dns=${cluster_dns_service_ip} \
|
||||
@ -61,7 +65,6 @@ systemd:
|
||||
--healthz-port=0 \
|
||||
--hostname-override=${domain_name} \
|
||||
--kubeconfig=/var/lib/kubelet/kubeconfig \
|
||||
--network-plugin=cni \
|
||||
--node-labels=node.kubernetes.io/node \
|
||||
%{~ for label in compact(split(",", node_labels)) ~}
|
||||
--node-labels=${label} \
|
||||
@ -122,6 +125,25 @@ storage:
|
||||
DefaultMemoryAccounting=yes
|
||||
DefaultBlockIOAccounting=yes
|
||||
- path: /etc/fedora-coreos/iptables-legacy.stamp
|
||||
- path: /etc/containerd/config.toml
|
||||
overwrite: true
|
||||
contents:
|
||||
inline: |
|
||||
version = 2
|
||||
root = "/var/lib/containerd"
|
||||
state = "/run/containerd"
|
||||
subreaper = true
|
||||
oom_score = -999
|
||||
[grpc]
|
||||
address = "/run/containerd/containerd.sock"
|
||||
uid = 0
|
||||
gid = 0
|
||||
[plugins."io.containerd.grpc.v1.cri"]
|
||||
enable_selinux = true
|
||||
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc]
|
||||
runtime_type = "io.containerd.runc.v2"
|
||||
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options]
|
||||
SystemdCgroup = true
|
||||
passwd:
|
||||
users:
|
||||
- name: core
|
||||
|
@ -28,17 +28,17 @@ resource "null_resource" "copy-controller-secrets" {
|
||||
|
||||
provisioner "file" {
|
||||
content = module.bootstrap.kubeconfig-kubelet
|
||||
destination = "$HOME/kubeconfig"
|
||||
destination = "/home/core/kubeconfig"
|
||||
}
|
||||
|
||||
provisioner "file" {
|
||||
content = join("\n", local.assets_bundle)
|
||||
destination = "$HOME/assets"
|
||||
destination = "/home/core/assets"
|
||||
}
|
||||
|
||||
provisioner "remote-exec" {
|
||||
inline = [
|
||||
"sudo mv $HOME/kubeconfig /etc/kubernetes/kubeconfig",
|
||||
"sudo mv /home/core/kubeconfig /etc/kubernetes/kubeconfig",
|
||||
"sudo touch /etc/kubernetes",
|
||||
"sudo /opt/bootstrap/layout",
|
||||
]
|
||||
@ -65,12 +65,12 @@ resource "null_resource" "copy-worker-secrets" {
|
||||
|
||||
provisioner "file" {
|
||||
content = module.bootstrap.kubeconfig-kubelet
|
||||
destination = "$HOME/kubeconfig"
|
||||
destination = "/home/core/kubeconfig"
|
||||
}
|
||||
|
||||
provisioner "remote-exec" {
|
||||
inline = [
|
||||
"sudo mv $HOME/kubeconfig /etc/kubernetes/kubeconfig",
|
||||
"sudo mv /home/core/kubeconfig /etc/kubernetes/kubeconfig",
|
||||
"sudo touch /etc/kubernetes",
|
||||
]
|
||||
}
|
||||
|
@ -87,8 +87,8 @@ variable "ssh_authorized_key" {
|
||||
|
||||
variable "networking" {
|
||||
type = string
|
||||
description = "Choice of networking provider (flannel or calico)"
|
||||
default = "calico"
|
||||
description = "Choice of networking provider (flannel, calico, or cilium)"
|
||||
default = "cilium"
|
||||
}
|
||||
|
||||
variable "network_mtu" {
|
||||
|
@ -11,7 +11,7 @@ Typhoon distributes upstream Kubernetes, architectural conventions, and cluster
|
||||
|
||||
## Features <a href="https://www.cncf.io/certification/software-conformance/"><img align="right" src="https://storage.googleapis.com/poseidon/certified-kubernetes.png"></a>
|
||||
|
||||
* Kubernetes v1.23.0 (upstream)
|
||||
* Kubernetes v1.23.4 (upstream)
|
||||
* Single or multi-master, [Calico](https://www.projectcalico.org/) or [Cilium](https://github.com/cilium/cilium) or [flannel](https://github.com/coreos/flannel) networking
|
||||
* On-cluster etcd with TLS, [RBAC](https://kubernetes.io/docs/admin/authorization/rbac/)-enabled, [network policy](https://kubernetes.io/docs/concepts/services-networking/network-policies/)
|
||||
* Advanced features like [snippets](https://typhoon.psdn.io/advanced/customization/#hosts) customization
|
||||
|
@ -1,6 +1,6 @@
|
||||
# Kubernetes assets (kubeconfig, manifests)
|
||||
module "bootstrap" {
|
||||
source = "git::https://github.com/poseidon/terraform-render-bootstrap.git?ref=37f45cb28be2188befb5304794ba312cd8048fab"
|
||||
source = "git::https://github.com/poseidon/terraform-render-bootstrap.git?ref=205e5f212b634b3f590c0a91f60f4540b145045d"
|
||||
|
||||
cluster_name = var.cluster_name
|
||||
api_servers = [var.k8s_domain_name]
|
||||
|
@ -10,7 +10,7 @@ systemd:
|
||||
Requires=docker.service
|
||||
After=docker.service
|
||||
[Service]
|
||||
Environment=ETCD_IMAGE=quay.io/coreos/etcd:v3.5.1
|
||||
Environment=ETCD_IMAGE=quay.io/coreos/etcd:v3.5.2
|
||||
ExecStartPre=/usr/bin/docker run -d \
|
||||
--name etcd \
|
||||
--network host \
|
||||
@ -63,7 +63,7 @@ systemd:
|
||||
After=docker.service
|
||||
Wants=rpc-statd.service
|
||||
[Service]
|
||||
Environment=KUBELET_IMAGE=quay.io/poseidon/kubelet:v1.23.0
|
||||
Environment=KUBELET_IMAGE=quay.io/poseidon/kubelet:v1.23.4
|
||||
ExecStartPre=/bin/mkdir -p /etc/cni/net.d
|
||||
ExecStartPre=/bin/mkdir -p /etc/kubernetes/manifests
|
||||
ExecStartPre=/bin/mkdir -p /opt/cni/bin
|
||||
@ -83,7 +83,7 @@ systemd:
|
||||
-v /run:/run \
|
||||
-v /sys/fs/cgroup:/sys/fs/cgroup \
|
||||
-v /var/lib/calico:/var/lib/calico:ro \
|
||||
-v /var/lib/docker:/var/lib/docker \
|
||||
-v /var/lib/containerd:/var/lib/containerd \
|
||||
-v /var/lib/kubelet:/var/lib/kubelet:rshared \
|
||||
-v /var/log:/var/log \
|
||||
-v /opt/cni/bin:/opt/cni/bin \
|
||||
@ -93,13 +93,14 @@ systemd:
|
||||
--authorization-mode=Webhook \
|
||||
--bootstrap-kubeconfig=/etc/kubernetes/kubeconfig \
|
||||
--cgroup-driver=systemd \
|
||||
--container-runtime=remote \
|
||||
--container-runtime-endpoint=unix:///run/containerd/containerd.sock \
|
||||
--client-ca-file=/etc/kubernetes/ca.crt \
|
||||
--cluster_dns=${cluster_dns_service_ip} \
|
||||
--cluster_domain=${cluster_domain_suffix} \
|
||||
--healthz-port=0 \
|
||||
--hostname-override=${domain_name} \
|
||||
--kubeconfig=/var/lib/kubelet/kubeconfig \
|
||||
--network-plugin=cni \
|
||||
--node-labels=node.kubernetes.io/controller="true" \
|
||||
--pod-manifest-path=/etc/kubernetes/manifests \
|
||||
--read-only-port=0 \
|
||||
@ -125,7 +126,7 @@ systemd:
|
||||
Type=oneshot
|
||||
RemainAfterExit=true
|
||||
WorkingDirectory=/opt/bootstrap
|
||||
Environment=KUBELET_IMAGE=quay.io/poseidon/kubelet:v1.23.0
|
||||
Environment=KUBELET_IMAGE=quay.io/poseidon/kubelet:v1.23.4
|
||||
ExecStart=/usr/bin/docker run \
|
||||
-v /etc/kubernetes/pki:/etc/kubernetes/pki:ro \
|
||||
-v /opt/bootstrap/assets:/assets:ro \
|
||||
|
@ -35,7 +35,7 @@ systemd:
|
||||
After=docker.service
|
||||
Wants=rpc-statd.service
|
||||
[Service]
|
||||
Environment=KUBELET_IMAGE=quay.io/poseidon/kubelet:v1.23.0
|
||||
Environment=KUBELET_IMAGE=quay.io/poseidon/kubelet:v1.23.4
|
||||
ExecStartPre=/bin/mkdir -p /etc/cni/net.d
|
||||
ExecStartPre=/bin/mkdir -p /etc/kubernetes/manifests
|
||||
ExecStartPre=/bin/mkdir -p /opt/cni/bin
|
||||
@ -58,7 +58,7 @@ systemd:
|
||||
-v /run:/run \
|
||||
-v /sys/fs/cgroup:/sys/fs/cgroup \
|
||||
-v /var/lib/calico:/var/lib/calico:ro \
|
||||
-v /var/lib/docker:/var/lib/docker \
|
||||
-v /var/lib/containerd:/var/lib/containerd \
|
||||
-v /var/lib/kubelet:/var/lib/kubelet:rshared \
|
||||
-v /var/log:/var/log \
|
||||
-v /opt/cni/bin:/opt/cni/bin \
|
||||
@ -68,13 +68,14 @@ systemd:
|
||||
--authorization-mode=Webhook \
|
||||
--bootstrap-kubeconfig=/etc/kubernetes/kubeconfig \
|
||||
--cgroup-driver=systemd \
|
||||
--container-runtime=remote \
|
||||
--container-runtime-endpoint=unix:///run/containerd/containerd.sock \
|
||||
--client-ca-file=/etc/kubernetes/ca.crt \
|
||||
--cluster_dns=${cluster_dns_service_ip} \
|
||||
--cluster_domain=${cluster_domain_suffix} \
|
||||
--healthz-port=0 \
|
||||
--hostname-override=${domain_name} \
|
||||
--kubeconfig=/var/lib/kubelet/kubeconfig \
|
||||
--network-plugin=cni \
|
||||
--node-labels=node.kubernetes.io/node \
|
||||
%{~ for label in compact(split(",", node_labels)) ~}
|
||||
--node-labels=${label} \
|
||||
|
@ -29,17 +29,17 @@ resource "null_resource" "copy-controller-secrets" {
|
||||
|
||||
provisioner "file" {
|
||||
content = module.bootstrap.kubeconfig-kubelet
|
||||
destination = "$HOME/kubeconfig"
|
||||
destination = "/home/core/kubeconfig"
|
||||
}
|
||||
|
||||
provisioner "file" {
|
||||
content = join("\n", local.assets_bundle)
|
||||
destination = "$HOME/assets"
|
||||
destination = "/home/core/assets"
|
||||
}
|
||||
|
||||
provisioner "remote-exec" {
|
||||
inline = [
|
||||
"sudo mv $HOME/kubeconfig /etc/kubernetes/kubeconfig",
|
||||
"sudo mv /home/core/kubeconfig /etc/kubernetes/kubeconfig",
|
||||
"sudo /opt/bootstrap/layout",
|
||||
]
|
||||
}
|
||||
@ -66,12 +66,12 @@ resource "null_resource" "copy-worker-secrets" {
|
||||
|
||||
provisioner "file" {
|
||||
content = module.bootstrap.kubeconfig-kubelet
|
||||
destination = "$HOME/kubeconfig"
|
||||
destination = "/home/core/kubeconfig"
|
||||
}
|
||||
|
||||
provisioner "remote-exec" {
|
||||
inline = [
|
||||
"sudo mv $HOME/kubeconfig /etc/kubernetes/kubeconfig",
|
||||
"sudo mv /home/core/kubeconfig /etc/kubernetes/kubeconfig",
|
||||
]
|
||||
}
|
||||
}
|
||||
|
@ -86,8 +86,8 @@ variable "ssh_authorized_key" {
|
||||
|
||||
variable "networking" {
|
||||
type = string
|
||||
description = "Choice of networking provider (flannel or calico)"
|
||||
default = "calico"
|
||||
description = "Choice of networking provider (flannel, calico, or cilium)"
|
||||
default = "cilium"
|
||||
}
|
||||
|
||||
variable "network_mtu" {
|
||||
|
@ -11,7 +11,7 @@ Typhoon distributes upstream Kubernetes, architectural conventions, and cluster
|
||||
|
||||
## Features <a href="https://www.cncf.io/certification/software-conformance/"><img align="right" src="https://storage.googleapis.com/poseidon/certified-kubernetes.png"></a>
|
||||
|
||||
* Kubernetes v1.23.0 (upstream)
|
||||
* Kubernetes v1.23.4 (upstream)
|
||||
* Single or multi-master, [Calico](https://www.projectcalico.org/) or [flannel](https://github.com/coreos/flannel) networking
|
||||
* On-cluster etcd with TLS, [RBAC](https://kubernetes.io/docs/admin/authorization/rbac/)-enabled, [network policy](https://kubernetes.io/docs/concepts/services-networking/network-policies/), SELinux enforcing
|
||||
* Advanced features like [snippets](https://typhoon.psdn.io/advanced/customization/#hosts) customization
|
||||
|
@ -1,6 +1,6 @@
|
||||
# Kubernetes assets (kubeconfig, manifests)
|
||||
module "bootstrap" {
|
||||
source = "git::https://github.com/poseidon/terraform-render-bootstrap.git?ref=37f45cb28be2188befb5304794ba312cd8048fab"
|
||||
source = "git::https://github.com/poseidon/terraform-render-bootstrap.git?ref=205e5f212b634b3f590c0a91f60f4540b145045d"
|
||||
|
||||
cluster_name = var.cluster_name
|
||||
api_servers = [format("%s.%s", var.cluster_name, var.dns_zone)]
|
||||
|
@ -41,7 +41,6 @@ resource "digitalocean_droplet" "controllers" {
|
||||
size = var.controller_type
|
||||
|
||||
# network
|
||||
private_networking = true
|
||||
vpc_uuid = digitalocean_vpc.network.id
|
||||
# TODO: Only official DigitalOcean images support IPv6
|
||||
ipv6 = false
|
||||
|
@ -12,7 +12,7 @@ systemd:
|
||||
Wants=network-online.target network.target
|
||||
After=network-online.target
|
||||
[Service]
|
||||
Environment=ETCD_IMAGE=quay.io/coreos/etcd:v3.5.1
|
||||
Environment=ETCD_IMAGE=quay.io/coreos/etcd:v3.5.2
|
||||
Type=exec
|
||||
ExecStartPre=/bin/mkdir -p /var/lib/etcd
|
||||
ExecStartPre=-/usr/bin/podman rm etcd
|
||||
@ -29,8 +29,10 @@ systemd:
|
||||
LimitNOFILE=40000
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
- name: docker.service
|
||||
- name: containerd.service
|
||||
enabled: true
|
||||
- name: docker.service
|
||||
mask: true
|
||||
- name: wait-for-dns.service
|
||||
enabled: true
|
||||
contents: |
|
||||
@ -52,7 +54,7 @@ systemd:
|
||||
After=afterburn.service
|
||||
Wants=rpc-statd.service
|
||||
[Service]
|
||||
Environment=KUBELET_IMAGE=quay.io/poseidon/kubelet:v1.23.0
|
||||
Environment=KUBELET_IMAGE=quay.io/poseidon/kubelet:v1.23.4
|
||||
EnvironmentFile=/run/metadata/afterburn
|
||||
ExecStartPre=/bin/mkdir -p /etc/cni/net.d
|
||||
ExecStartPre=/bin/mkdir -p /etc/kubernetes/manifests
|
||||
@ -72,7 +74,7 @@ systemd:
|
||||
--volume /run:/run \
|
||||
--volume /sys/fs/cgroup:/sys/fs/cgroup \
|
||||
--volume /var/lib/calico:/var/lib/calico:ro \
|
||||
--volume /var/lib/docker:/var/lib/docker \
|
||||
--volume /var/lib/containerd:/var/lib/containerd \
|
||||
--volume /var/lib/kubelet:/var/lib/kubelet:rshared,z \
|
||||
--volume /var/log:/var/log \
|
||||
--volume /var/run/lock:/var/run/lock:z \
|
||||
@ -84,6 +86,8 @@ systemd:
|
||||
--bootstrap-kubeconfig=/etc/kubernetes/kubeconfig \
|
||||
--cgroup-driver=systemd \
|
||||
--cgroups-per-qos=true \
|
||||
--container-runtime=remote \
|
||||
--container-runtime-endpoint=unix:///run/containerd/containerd.sock \
|
||||
--enforce-node-allocatable=pods \
|
||||
--client-ca-file=/etc/kubernetes/ca.crt \
|
||||
--cluster_dns=${cluster_dns_service_ip} \
|
||||
@ -91,7 +95,6 @@ systemd:
|
||||
--healthz-port=0 \
|
||||
--hostname-override=$${AFTERBURN_DIGITALOCEAN_IPV4_PRIVATE_0} \
|
||||
--kubeconfig=/var/lib/kubelet/kubeconfig \
|
||||
--network-plugin=cni \
|
||||
--node-labels=node.kubernetes.io/controller="true" \
|
||||
--pod-manifest-path=/etc/kubernetes/manifests \
|
||||
--read-only-port=0 \
|
||||
@ -130,7 +133,7 @@ systemd:
|
||||
--volume /opt/bootstrap/assets:/assets:ro,Z \
|
||||
--volume /opt/bootstrap/apply:/apply:ro,Z \
|
||||
--entrypoint=/apply \
|
||||
quay.io/poseidon/kubelet:v1.23.0
|
||||
quay.io/poseidon/kubelet:v1.23.4
|
||||
ExecStartPost=/bin/touch /opt/bootstrap/bootstrap.done
|
||||
ExecStartPost=-/usr/bin/podman stop bootstrap
|
||||
storage:
|
||||
@ -221,4 +224,23 @@ storage:
|
||||
ETCD_PEER_KEY_FILE=/etc/ssl/certs/etcd/peer.key
|
||||
ETCD_PEER_CLIENT_CERT_AUTH=true
|
||||
- path: /etc/fedora-coreos/iptables-legacy.stamp
|
||||
- path: /etc/containerd/config.toml
|
||||
overwrite: true
|
||||
contents:
|
||||
inline: |
|
||||
version = 2
|
||||
root = "/var/lib/containerd"
|
||||
state = "/run/containerd"
|
||||
subreaper = true
|
||||
oom_score = -999
|
||||
[grpc]
|
||||
address = "/run/containerd/containerd.sock"
|
||||
uid = 0
|
||||
gid = 0
|
||||
[plugins."io.containerd.grpc.v1.cri"]
|
||||
enable_selinux = true
|
||||
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc]
|
||||
runtime_type = "io.containerd.runc.v2"
|
||||
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options]
|
||||
SystemdCgroup = true
|
||||
|
||||
|
@ -3,8 +3,10 @@ variant: fcos
|
||||
version: 1.4.0
|
||||
systemd:
|
||||
units:
|
||||
- name: docker.service
|
||||
- name: containerd.service
|
||||
enabled: true
|
||||
- name: docker.service
|
||||
mask: true
|
||||
- name: wait-for-dns.service
|
||||
enabled: true
|
||||
contents: |
|
||||
@ -26,7 +28,7 @@ systemd:
|
||||
After=afterburn.service
|
||||
Wants=rpc-statd.service
|
||||
[Service]
|
||||
Environment=KUBELET_IMAGE=quay.io/poseidon/kubelet:v1.23.0
|
||||
Environment=KUBELET_IMAGE=quay.io/poseidon/kubelet:v1.23.4
|
||||
EnvironmentFile=/run/metadata/afterburn
|
||||
ExecStartPre=/bin/mkdir -p /etc/cni/net.d
|
||||
ExecStartPre=/bin/mkdir -p /etc/kubernetes/manifests
|
||||
@ -46,7 +48,7 @@ systemd:
|
||||
--volume /run:/run \
|
||||
--volume /sys/fs/cgroup:/sys/fs/cgroup \
|
||||
--volume /var/lib/calico:/var/lib/calico:ro \
|
||||
--volume /var/lib/docker:/var/lib/docker \
|
||||
--volume /var/lib/containerd:/var/lib/containerd \
|
||||
--volume /var/lib/kubelet:/var/lib/kubelet:rshared,z \
|
||||
--volume /var/log:/var/log \
|
||||
--volume /var/run/lock:/var/run/lock:z \
|
||||
@ -58,6 +60,8 @@ systemd:
|
||||
--bootstrap-kubeconfig=/etc/kubernetes/kubeconfig \
|
||||
--cgroup-driver=systemd \
|
||||
--cgroups-per-qos=true \
|
||||
--container-runtime=remote \
|
||||
--container-runtime-endpoint=unix:///run/containerd/containerd.sock \
|
||||
--enforce-node-allocatable=pods \
|
||||
--client-ca-file=/etc/kubernetes/ca.crt \
|
||||
--cluster_dns=${cluster_dns_service_ip} \
|
||||
@ -65,7 +69,6 @@ systemd:
|
||||
--healthz-port=0 \
|
||||
--hostname-override=$${AFTERBURN_DIGITALOCEAN_IPV4_PRIVATE_0} \
|
||||
--kubeconfig=/var/lib/kubelet/kubeconfig \
|
||||
--network-plugin=cni \
|
||||
--node-labels=node.kubernetes.io/node \
|
||||
--pod-manifest-path=/etc/kubernetes/manifests \
|
||||
--read-only-port=0 \
|
||||
@ -93,7 +96,7 @@ systemd:
|
||||
[Unit]
|
||||
Description=Delete Kubernetes node on shutdown
|
||||
[Service]
|
||||
Environment=KUBELET_IMAGE=quay.io/poseidon/kubelet:v1.23.0
|
||||
Environment=KUBELET_IMAGE=quay.io/poseidon/kubelet:v1.23.4
|
||||
Type=oneshot
|
||||
RemainAfterExit=true
|
||||
ExecStart=/bin/true
|
||||
@ -128,3 +131,22 @@ storage:
|
||||
DefaultMemoryAccounting=yes
|
||||
DefaultBlockIOAccounting=yes
|
||||
- path: /etc/fedora-coreos/iptables-legacy.stamp
|
||||
- path: /etc/containerd/config.toml
|
||||
overwrite: true
|
||||
contents:
|
||||
inline: |
|
||||
version = 2
|
||||
root = "/var/lib/containerd"
|
||||
state = "/run/containerd"
|
||||
subreaper = true
|
||||
oom_score = -999
|
||||
[grpc]
|
||||
address = "/run/containerd/containerd.sock"
|
||||
uid = 0
|
||||
gid = 0
|
||||
[plugins."io.containerd.grpc.v1.cri"]
|
||||
enable_selinux = true
|
||||
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc]
|
||||
runtime_type = "io.containerd.runc.v2"
|
||||
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options]
|
||||
SystemdCgroup = true
|
||||
|
@ -25,17 +25,17 @@ resource "null_resource" "copy-controller-secrets" {
|
||||
|
||||
provisioner "file" {
|
||||
content = module.bootstrap.kubeconfig-kubelet
|
||||
destination = "$HOME/kubeconfig"
|
||||
destination = "/home/core/kubeconfig"
|
||||
}
|
||||
|
||||
provisioner "file" {
|
||||
content = join("\n", local.assets_bundle)
|
||||
destination = "$HOME/assets"
|
||||
destination = "/home/core/assets"
|
||||
}
|
||||
|
||||
provisioner "remote-exec" {
|
||||
inline = [
|
||||
"sudo mv $HOME/kubeconfig /etc/kubernetes/kubeconfig",
|
||||
"sudo mv /home/core/kubeconfig /etc/kubernetes/kubeconfig",
|
||||
"sudo touch /etc/kubernetes",
|
||||
"sudo /opt/bootstrap/layout",
|
||||
]
|
||||
@ -55,12 +55,12 @@ resource "null_resource" "copy-worker-secrets" {
|
||||
|
||||
provisioner "file" {
|
||||
content = module.bootstrap.kubeconfig-kubelet
|
||||
destination = "$HOME/kubeconfig"
|
||||
destination = "/home/core/kubeconfig"
|
||||
}
|
||||
|
||||
provisioner "remote-exec" {
|
||||
inline = [
|
||||
"sudo mv $HOME/kubeconfig /etc/kubernetes/kubeconfig",
|
||||
"sudo mv /home/core/kubeconfig /etc/kubernetes/kubeconfig",
|
||||
"sudo touch /etc/kubernetes",
|
||||
]
|
||||
}
|
||||
|
@ -67,8 +67,8 @@ variable "ssh_fingerprints" {
|
||||
|
||||
variable "networking" {
|
||||
type = string
|
||||
description = "Choice of networking provider (flannel or calico)"
|
||||
default = "calico"
|
||||
description = "Choice of networking provider (flannel, calico, or cilium)"
|
||||
default = "cilium"
|
||||
}
|
||||
|
||||
variable "pod_cidr" {
|
||||
|
@ -13,7 +13,7 @@ terraform {
|
||||
|
||||
digitalocean = {
|
||||
source = "digitalocean/digitalocean"
|
||||
version = "~> 1.20"
|
||||
version = ">= 2.12, < 3.0"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -37,7 +37,6 @@ resource "digitalocean_droplet" "workers" {
|
||||
size = var.worker_type
|
||||
|
||||
# network
|
||||
private_networking = true
|
||||
vpc_uuid = digitalocean_vpc.network.id
|
||||
# TODO: Only official DigitalOcean images support IPv6
|
||||
ipv6 = false
|
||||
|
@ -11,7 +11,7 @@ Typhoon distributes upstream Kubernetes, architectural conventions, and cluster
|
||||
|
||||
## Features <a href="https://www.cncf.io/certification/software-conformance/"><img align="right" src="https://storage.googleapis.com/poseidon/certified-kubernetes.png"></a>
|
||||
|
||||
* Kubernetes v1.23.0 (upstream)
|
||||
* Kubernetes v1.23.4 (upstream)
|
||||
* Single or multi-master, [Calico](https://www.projectcalico.org/) or [Cilium](https://github.com/cilium/cilium) or [flannel](https://github.com/coreos/flannel) networking
|
||||
* On-cluster etcd with TLS, [RBAC](https://kubernetes.io/docs/admin/authorization/rbac/)-enabled, [network policy](https://kubernetes.io/docs/concepts/services-networking/network-policies/)
|
||||
* Advanced features like [snippets](https://typhoon.psdn.io/advanced/customization/#hosts) customization
|
||||
|
@ -1,6 +1,6 @@
|
||||
# Kubernetes assets (kubeconfig, manifests)
|
||||
module "bootstrap" {
|
||||
source = "git::https://github.com/poseidon/terraform-render-bootstrap.git?ref=37f45cb28be2188befb5304794ba312cd8048fab"
|
||||
source = "git::https://github.com/poseidon/terraform-render-bootstrap.git?ref=205e5f212b634b3f590c0a91f60f4540b145045d"
|
||||
|
||||
cluster_name = var.cluster_name
|
||||
api_servers = [format("%s.%s", var.cluster_name, var.dns_zone)]
|
||||
|
@ -10,7 +10,7 @@ systemd:
|
||||
Requires=docker.service
|
||||
After=docker.service
|
||||
[Service]
|
||||
Environment=ETCD_IMAGE=quay.io/coreos/etcd:v3.5.1
|
||||
Environment=ETCD_IMAGE=quay.io/coreos/etcd:v3.5.2
|
||||
ExecStartPre=/usr/bin/docker run -d \
|
||||
--name etcd \
|
||||
--network host \
|
||||
@ -65,7 +65,7 @@ systemd:
|
||||
After=coreos-metadata.service
|
||||
Wants=rpc-statd.service
|
||||
[Service]
|
||||
Environment=KUBELET_IMAGE=quay.io/poseidon/kubelet:v1.23.0
|
||||
Environment=KUBELET_IMAGE=quay.io/poseidon/kubelet:v1.23.4
|
||||
EnvironmentFile=/run/metadata/coreos
|
||||
ExecStartPre=/bin/mkdir -p /etc/cni/net.d
|
||||
ExecStartPre=/bin/mkdir -p /etc/kubernetes/manifests
|
||||
@ -86,7 +86,7 @@ systemd:
|
||||
-v /run:/run \
|
||||
-v /sys/fs/cgroup:/sys/fs/cgroup \
|
||||
-v /var/lib/calico:/var/lib/calico:ro \
|
||||
-v /var/lib/docker:/var/lib/docker \
|
||||
-v /var/lib/containerd:/var/lib/containerd \
|
||||
-v /var/lib/kubelet:/var/lib/kubelet:rshared \
|
||||
-v /var/log:/var/log \
|
||||
-v /opt/cni/bin:/opt/cni/bin \
|
||||
@ -96,13 +96,14 @@ systemd:
|
||||
--authorization-mode=Webhook \
|
||||
--bootstrap-kubeconfig=/etc/kubernetes/kubeconfig \
|
||||
--cgroup-driver=systemd \
|
||||
--container-runtime=remote \
|
||||
--container-runtime-endpoint=unix:///run/containerd/containerd.sock \
|
||||
--client-ca-file=/etc/kubernetes/ca.crt \
|
||||
--cluster_dns=${cluster_dns_service_ip} \
|
||||
--cluster_domain=${cluster_domain_suffix} \
|
||||
--healthz-port=0 \
|
||||
--hostname-override=$${COREOS_DIGITALOCEAN_IPV4_PRIVATE_0} \
|
||||
--kubeconfig=/var/lib/kubelet/kubeconfig \
|
||||
--network-plugin=cni \
|
||||
--node-labels=node.kubernetes.io/controller="true" \
|
||||
--pod-manifest-path=/etc/kubernetes/manifests \
|
||||
--read-only-port=0 \
|
||||
@ -128,7 +129,7 @@ systemd:
|
||||
Type=oneshot
|
||||
RemainAfterExit=true
|
||||
WorkingDirectory=/opt/bootstrap
|
||||
Environment=KUBELET_IMAGE=quay.io/poseidon/kubelet:v1.23.0
|
||||
Environment=KUBELET_IMAGE=quay.io/poseidon/kubelet:v1.23.4
|
||||
ExecStart=/usr/bin/docker run \
|
||||
-v /etc/kubernetes/pki:/etc/kubernetes/pki:ro \
|
||||
-v /opt/bootstrap/assets:/assets:ro \
|
||||
|
@ -37,7 +37,7 @@ systemd:
|
||||
After=coreos-metadata.service
|
||||
Wants=rpc-statd.service
|
||||
[Service]
|
||||
Environment=KUBELET_IMAGE=quay.io/poseidon/kubelet:v1.23.0
|
||||
Environment=KUBELET_IMAGE=quay.io/poseidon/kubelet:v1.23.4
|
||||
EnvironmentFile=/run/metadata/coreos
|
||||
ExecStartPre=/bin/mkdir -p /etc/cni/net.d
|
||||
ExecStartPre=/bin/mkdir -p /etc/kubernetes/manifests
|
||||
@ -61,7 +61,7 @@ systemd:
|
||||
-v /run:/run \
|
||||
-v /sys/fs/cgroup:/sys/fs/cgroup \
|
||||
-v /var/lib/calico:/var/lib/calico:ro \
|
||||
-v /var/lib/docker:/var/lib/docker \
|
||||
-v /var/lib/containerd:/var/lib/containerd \
|
||||
-v /var/lib/kubelet:/var/lib/kubelet:rshared \
|
||||
-v /var/log:/var/log \
|
||||
-v /opt/cni/bin:/opt/cni/bin \
|
||||
@ -71,13 +71,14 @@ systemd:
|
||||
--authorization-mode=Webhook \
|
||||
--bootstrap-kubeconfig=/etc/kubernetes/kubeconfig \
|
||||
--cgroup-driver=systemd \
|
||||
--container-runtime=remote \
|
||||
--container-runtime-endpoint=unix:///run/containerd/containerd.sock \
|
||||
--client-ca-file=/etc/kubernetes/ca.crt \
|
||||
--cluster_dns=${cluster_dns_service_ip} \
|
||||
--cluster_domain=${cluster_domain_suffix} \
|
||||
--healthz-port=0 \
|
||||
--hostname-override=$${COREOS_DIGITALOCEAN_IPV4_PRIVATE_0} \
|
||||
--kubeconfig=/var/lib/kubelet/kubeconfig \
|
||||
--network-plugin=cni \
|
||||
--node-labels=node.kubernetes.io/node \
|
||||
--pod-manifest-path=/etc/kubernetes/manifests \
|
||||
--read-only-port=0 \
|
||||
@ -97,7 +98,7 @@ systemd:
|
||||
[Unit]
|
||||
Description=Delete Kubernetes node on shutdown
|
||||
[Service]
|
||||
Environment=KUBELET_IMAGE=quay.io/poseidon/kubelet:v1.23.0
|
||||
Environment=KUBELET_IMAGE=quay.io/poseidon/kubelet:v1.23.4
|
||||
Type=oneshot
|
||||
RemainAfterExit=true
|
||||
ExecStart=/bin/true
|
||||
|
@ -46,7 +46,6 @@ resource "digitalocean_droplet" "controllers" {
|
||||
size = var.controller_type
|
||||
|
||||
# network
|
||||
private_networking = true
|
||||
vpc_uuid = digitalocean_vpc.network.id
|
||||
# TODO: Only official DigitalOcean images support IPv6
|
||||
ipv6 = false
|
||||
|
@ -25,17 +25,17 @@ resource "null_resource" "copy-controller-secrets" {
|
||||
|
||||
provisioner "file" {
|
||||
content = module.bootstrap.kubeconfig-kubelet
|
||||
destination = "$HOME/kubeconfig"
|
||||
destination = "/home/core/kubeconfig"
|
||||
}
|
||||
|
||||
provisioner "file" {
|
||||
content = join("\n", local.assets_bundle)
|
||||
destination = "$HOME/assets"
|
||||
destination = "/home/core/assets"
|
||||
}
|
||||
|
||||
provisioner "remote-exec" {
|
||||
inline = [
|
||||
"sudo mv $HOME/kubeconfig /etc/kubernetes/kubeconfig",
|
||||
"sudo mv /home/core/kubeconfig /etc/kubernetes/kubeconfig",
|
||||
"sudo /opt/bootstrap/layout",
|
||||
]
|
||||
}
|
||||
@ -54,12 +54,12 @@ resource "null_resource" "copy-worker-secrets" {
|
||||
|
||||
provisioner "file" {
|
||||
content = module.bootstrap.kubeconfig-kubelet
|
||||
destination = "$HOME/kubeconfig"
|
||||
destination = "/home/core/kubeconfig"
|
||||
}
|
||||
|
||||
provisioner "remote-exec" {
|
||||
inline = [
|
||||
"sudo mv $HOME/kubeconfig /etc/kubernetes/kubeconfig",
|
||||
"sudo mv /home/core/kubeconfig /etc/kubernetes/kubeconfig",
|
||||
]
|
||||
}
|
||||
}
|
||||
|
@ -67,8 +67,8 @@ variable "ssh_fingerprints" {
|
||||
|
||||
variable "networking" {
|
||||
type = string
|
||||
description = "Choice of networking provider (flannel or calico)"
|
||||
default = "calico"
|
||||
description = "Choice of networking provider (flannel, calico, or cilium)"
|
||||
default = "cilium"
|
||||
}
|
||||
|
||||
variable "pod_cidr" {
|
||||
|
@ -13,7 +13,7 @@ terraform {
|
||||
|
||||
digitalocean = {
|
||||
source = "digitalocean/digitalocean"
|
||||
version = "~> 1.20"
|
||||
version = ">= 2.12, < 3.0"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -35,7 +35,6 @@ resource "digitalocean_droplet" "workers" {
|
||||
size = var.worker_type
|
||||
|
||||
# network
|
||||
private_networking = true
|
||||
vpc_uuid = digitalocean_vpc.network.id
|
||||
# only official DigitalOcean images support IPv6
|
||||
ipv6 = local.is_official_image
|
||||
|
@ -1,59 +1,83 @@
|
||||
# ARM64
|
||||
|
||||
!!! warning
|
||||
ARM64 support is experimental
|
||||
|
||||
Typhoon has experimental support for ARM64 with Fedora CoreOS on AWS. Full clusters can be created with ARM64 controller and worker nodes. Or worker pools of ARM64 nodes can be attached to an AMD64 cluster to create a hybrid/mixed architecture cluster.
|
||||
Typhoon has experimental support for ARM64 on AWS, with Fedora CoreOS or Flatcar Linux. Clusters can be created with ARM64 controller and worker nodes. Or worker pools of ARM64 nodes can be attached to an AMD64 cluster to create a hybrid/mixed architecture cluster.
|
||||
|
||||
!!! note
|
||||
Currently, CNI networking must be set to flannel or Cilium.
|
||||
Currently, CNI networking must be set to `flannel` or `cilium`.
|
||||
|
||||
## Cluster
|
||||
|
||||
Create a cluster with ARM64 controller and worker nodes. Container workloads must be `arm64` compatible and use `arm64` container images.
|
||||
|
||||
```tf
|
||||
module "gravitas" {
|
||||
source = "git::https://github.com/poseidon/typhoon//aws/fedora-coreos/kubernetes?ref=v1.23.0"
|
||||
=== "Fedora CoreOS Cluster (arm64)"
|
||||
|
||||
# AWS
|
||||
cluster_name = "gravitas"
|
||||
dns_zone = "aws.example.com"
|
||||
dns_zone_id = "Z3PAABBCFAKEC0"
|
||||
```tf
|
||||
module "gravitas" {
|
||||
source = "git::https://github.com/poseidon/typhoon//aws/fedora-coreos/kubernetes?ref=v1.23.4"
|
||||
|
||||
# configuration
|
||||
ssh_authorized_key = "ssh-ed25519 AAAAB3Nz..."
|
||||
# AWS
|
||||
cluster_name = "gravitas"
|
||||
dns_zone = "aws.example.com"
|
||||
dns_zone_id = "Z3PAABBCFAKEC0"
|
||||
|
||||
# optional
|
||||
arch = "arm64"
|
||||
networking = "cilium"
|
||||
worker_count = 2
|
||||
worker_price = "0.0168"
|
||||
# configuration
|
||||
ssh_authorized_key = "ssh-ed25519 AAAAB3Nz..."
|
||||
|
||||
controller_type = "t4g.small"
|
||||
worker_type = "t4g.small"
|
||||
}
|
||||
```
|
||||
# optional
|
||||
arch = "arm64"
|
||||
networking = "cilium"
|
||||
worker_count = 2
|
||||
worker_price = "0.0168"
|
||||
|
||||
Verify the cluster has only arm64 (`aarch64`) nodes.
|
||||
controller_type = "t4g.small"
|
||||
worker_type = "t4g.small"
|
||||
}
|
||||
```
|
||||
|
||||
=== "Flatcar Linux Cluster (arm64)"
|
||||
|
||||
```tf
|
||||
module "gravitas" {
|
||||
source = "git::https://github.com/poseidon/typhoon//aws/flatcar-linux/kubernetes?ref=v1.23.4"
|
||||
|
||||
# AWS
|
||||
cluster_name = "gravitas"
|
||||
dns_zone = "aws.example.com"
|
||||
dns_zone_id = "Z3PAABBCFAKEC0"
|
||||
|
||||
# configuration
|
||||
ssh_authorized_key = "ssh-ed25519 AAAAB3Nz..."
|
||||
|
||||
# optional
|
||||
arch = "arm64"
|
||||
networking = "cilium"
|
||||
worker_count = 2
|
||||
worker_price = "0.0168"
|
||||
|
||||
controller_type = "t4g.small"
|
||||
worker_type = "t4g.small"
|
||||
}
|
||||
```
|
||||
|
||||
Verify the cluster has only arm64 (`aarch64`) nodes. For Flatcar Linux, describe nodes.
|
||||
|
||||
```
|
||||
$ kubectl get nodes -o wide
|
||||
NAME STATUS ROLES AGE VERSION INTERNAL-IP EXTERNAL-IP OS-IMAGE KERNEL-VERSION CONTAINER-RUNTIME
|
||||
ip-10-0-12-178 Ready <none> 101s v1.23.0 10.0.12.178 <none> Fedora CoreOS 32.20201104.dev.0 5.8.17-200.fc32.aarch64 docker://19.3.11
|
||||
ip-10-0-18-93 Ready <none> 102s v1.23.0 10.0.18.93 <none> Fedora CoreOS 32.20201104.dev.0 5.8.17-200.fc32.aarch64 docker://19.3.11
|
||||
ip-10-0-90-10 Ready <none> 104s v1.23.0 10.0.90.10 <none> Fedora CoreOS 32.20201104.dev.0 5.8.17-200.fc32.aarch64 docker://19.3.11
|
||||
NAME STATUS ROLES AGE VERSION INTERNAL-IP EXTERNAL-IP OS-IMAGE KERNEL-VERSION CONTAINER-RUNTIME
|
||||
ip-10-0-21-119 Ready <none> 77s v1.23.4 10.0.21.119 <none> Fedora CoreOS 35.20211215.3.0 5.15.7-200.fc35.aarch64 containerd://1.5.8
|
||||
ip-10-0-32-166 Ready <none> 80s v1.23.4 10.0.32.166 <none> Fedora CoreOS 35.20211215.3.0 5.15.7-200.fc35.aarch64 containerd://1.5.8
|
||||
ip-10-0-5-79 Ready <none> 77s v1.23.4 10.0.5.79 <none> Fedora CoreOS 35.20211215.3.0 5.15.7-200.fc35.aarch64 containerd://1.5.8
|
||||
```
|
||||
|
||||
## Hybrid
|
||||
|
||||
Create a hybrid/mixed arch cluster by defining an AWS cluster. Then define a [worker pool](worker-pools.md#aws) with ARM64 workers. Optional taints are added to aid in scheduling.
|
||||
|
||||
=== "Cluster (amd64)"
|
||||
=== "FCOS Cluster"
|
||||
|
||||
```tf
|
||||
module "gravitas" {
|
||||
source = "git::https://github.com/poseidon/typhoon//aws/fedora-coreos/kubernetes?ref=v1.23.0"
|
||||
source = "git::https://github.com/poseidon/typhoon//aws/fedora-coreos/kubernetes?ref=v1.23.4"
|
||||
|
||||
# AWS
|
||||
cluster_name = "gravitas"
|
||||
@ -72,11 +96,58 @@ Create a hybrid/mixed arch cluster by defining an AWS cluster. Then define a [wo
|
||||
}
|
||||
```
|
||||
|
||||
=== "Worker Pool (arm64)"
|
||||
=== "Flatcar Cluster"
|
||||
|
||||
```tf
|
||||
module "gravitas" {
|
||||
source = "git::https://github.com/poseidon/typhoon//aws/flatcar-linux/kubernetes?ref=v1.23.4"
|
||||
|
||||
# AWS
|
||||
cluster_name = "gravitas"
|
||||
dns_zone = "aws.example.com"
|
||||
dns_zone_id = "Z3PAABBCFAKEC0"
|
||||
|
||||
# configuration
|
||||
ssh_authorized_key = "ssh-ed25519 AAAAB3Nz..."
|
||||
|
||||
# optional
|
||||
networking = "cilium"
|
||||
worker_count = 2
|
||||
worker_price = "0.021"
|
||||
|
||||
daemonset_tolerations = ["arch"] # important
|
||||
}
|
||||
```
|
||||
|
||||
=== "FCOS ARM64 Workers"
|
||||
|
||||
```tf
|
||||
module "gravitas-arm64" {
|
||||
source = "git::https://github.com/poseidon/typhoon//aws/fedora-coreos/kubernetes/workers?ref=v1.23.0"
|
||||
source = "git::https://github.com/poseidon/typhoon//aws/fedora-coreos/kubernetes/workers?ref=v1.23.4"
|
||||
|
||||
# AWS
|
||||
vpc_id = module.gravitas.vpc_id
|
||||
subnet_ids = module.gravitas.subnet_ids
|
||||
security_groups = module.gravitas.worker_security_groups
|
||||
|
||||
# configuration
|
||||
name = "gravitas-arm64"
|
||||
kubeconfig = module.gravitas.kubeconfig
|
||||
ssh_authorized_key = var.ssh_authorized_key
|
||||
|
||||
# optional
|
||||
arch = "arm64"
|
||||
instance_type = "t4g.small"
|
||||
spot_price = "0.0168"
|
||||
node_taints = ["arch=arm64:NoSchedule"]
|
||||
}
|
||||
```
|
||||
|
||||
=== "Flatcar ARM64 Workers"
|
||||
|
||||
```tf
|
||||
module "gravitas-arm64" {
|
||||
source = "git::https://github.com/poseidon/typhoon//aws/flatcar-linux/kubernetes/workers?ref=v1.23.4"
|
||||
|
||||
# AWS
|
||||
vpc_id = module.gravitas.vpc_id
|
||||
@ -100,10 +171,10 @@ Verify amd64 (x86_64) and arm64 (aarch64) nodes are present.
|
||||
|
||||
```
|
||||
$ kubectl get nodes -o wide
|
||||
NAME STATUS ROLES AGE VERSION INTERNAL-IP EXTERNAL-IP OS-IMAGE KERNEL-VERSION CONTAINER-RUNTIME
|
||||
ip-10-0-1-81 Ready <none> 4m28s v1.23.0 10.0.1.81 <none> Fedora CoreOS 34.20210427.3.0 5.11.15-300.fc34.x86_64 docker://20.10.6
|
||||
ip-10-0-17-86 Ready <none> 4m28s v1.23.0 10.0.17.86 <none> Fedora CoreOS 33.20210413.dev.0 5.10.19-200.fc33.aarch64 docker://19.3.13
|
||||
ip-10-0-21-45 Ready <none> 4m28s v1.23.0 10.0.21.45 <none> Fedora CoreOS 34.20210427.3.0 5.11.15-300.fc34.x86_64 docker://20.10.6
|
||||
ip-10-0-40-36 Ready <none> 4m22s v1.23.0 10.0.40.36 <none> Fedora CoreOS 34.20210427.3.0 5.11.15-300.fc34.x86_64 docker://20.10.6
|
||||
NAME STATUS ROLES AGE VERSION INTERNAL-IP EXTERNAL-IP OS-IMAGE KERNEL-VERSION CONTAINER-RUNTIME
|
||||
ip-10-0-1-73 Ready <none> 111m v1.23.4 10.0.1.73 <none> Fedora CoreOS 35.20211215.3.0 5.15.7-200.fc35.x86_64 containerd://1.5.8
|
||||
ip-10-0-22-79... Ready <none> 111m v1.23.4 10.0.22.79 <none> Flatcar Container Linux by Kinvolk 3033.2.0 (Oklo) 5.10.84-flatcar containerd://1.5.8
|
||||
ip-10-0-24-130 Ready <none> 111m v1.23.4 10.0.24.130 <none> Fedora CoreOS 35.20211215.3.0 5.15.7-200.fc35.x86_64 containerd://1.5.8
|
||||
ip-10-0-39-19 Ready <none> 111m v1.23.4 10.0.39.19 <none> Fedora CoreOS 35.20211215.3.0 5.15.7-200.fc35.x86_64 containerd://1.5.8
|
||||
```
|
||||
|
||||
|
@ -36,7 +36,7 @@ Add custom initial worker node labels to default workers or worker pool nodes to
|
||||
|
||||
```tf
|
||||
module "yavin" {
|
||||
source = "git::https://github.com/poseidon/typhoon//google-cloud/fedora-coreos/kubernetes?ref=v1.23.0"
|
||||
source = "git::https://github.com/poseidon/typhoon//google-cloud/fedora-coreos/kubernetes?ref=v1.23.4"
|
||||
|
||||
# Google Cloud
|
||||
cluster_name = "yavin"
|
||||
@ -57,7 +57,7 @@ Add custom initial worker node labels to default workers or worker pool nodes to
|
||||
|
||||
```tf
|
||||
module "yavin-pool" {
|
||||
source = "git::https://github.com/poseidon/typhoon//google-cloud/fedora-coreos/kubernetes/workers?ref=v1.23.0"
|
||||
source = "git::https://github.com/poseidon/typhoon//google-cloud/fedora-coreos/kubernetes/workers?ref=v1.23.4"
|
||||
|
||||
# Google Cloud
|
||||
cluster_name = "yavin"
|
||||
@ -89,7 +89,7 @@ Add custom initial taints on worker pool nodes to indicate a node is unique and
|
||||
|
||||
```tf
|
||||
module "yavin" {
|
||||
source = "git::https://github.com/poseidon/typhoon//google-cloud/fedora-coreos/kubernetes?ref=v1.23.0"
|
||||
source = "git::https://github.com/poseidon/typhoon//google-cloud/fedora-coreos/kubernetes?ref=v1.23.4"
|
||||
|
||||
# Google Cloud
|
||||
cluster_name = "yavin"
|
||||
@ -110,7 +110,7 @@ Add custom initial taints on worker pool nodes to indicate a node is unique and
|
||||
|
||||
```tf
|
||||
module "yavin-pool" {
|
||||
source = "git::https://github.com/poseidon/typhoon//google-cloud/fedora-coreos/kubernetes/workers?ref=v1.23.0"
|
||||
source = "git::https://github.com/poseidon/typhoon//google-cloud/fedora-coreos/kubernetes/workers?ref=v1.23.4"
|
||||
|
||||
# Google Cloud
|
||||
cluster_name = "yavin"
|
||||
|
@ -4,4 +4,5 @@ Typhoon clusters offer several advanced features for skilled users.
|
||||
|
||||
* [ARM64](arm64.md)
|
||||
* [Customization](customization.md)
|
||||
* [Nodes](nodes.md)
|
||||
* [Worker Pools](worker-pools.md)
|
||||
|
@ -19,7 +19,7 @@ Create a cluster following the AWS [tutorial](../flatcar-linux/aws.md#cluster).
|
||||
|
||||
```tf
|
||||
module "tempest-worker-pool" {
|
||||
source = "git::https://github.com/poseidon/typhoon//aws/fedora-coreos/kubernetes/workers?ref=v1.23.0"
|
||||
source = "git::https://github.com/poseidon/typhoon//aws/fedora-coreos/kubernetes/workers?ref=v1.23.4"
|
||||
|
||||
# AWS
|
||||
vpc_id = module.tempest.vpc_id
|
||||
@ -42,7 +42,7 @@ Create a cluster following the AWS [tutorial](../flatcar-linux/aws.md#cluster).
|
||||
|
||||
```tf
|
||||
module "tempest-worker-pool" {
|
||||
source = "git::https://github.com/poseidon/typhoon//aws/flatcar-linux/kubernetes/workers?ref=v1.23.0"
|
||||
source = "git::https://github.com/poseidon/typhoon//aws/flatcar-linux/kubernetes/workers?ref=v1.23.4"
|
||||
|
||||
# AWS
|
||||
vpc_id = module.tempest.vpc_id
|
||||
@ -111,7 +111,7 @@ Create a cluster following the Azure [tutorial](../flatcar-linux/azure.md#cluste
|
||||
|
||||
```tf
|
||||
module "ramius-worker-pool" {
|
||||
source = "git::https://github.com/poseidon/typhoon//azure/fedora-coreos/kubernetes/workers?ref=v1.23.0"
|
||||
source = "git::https://github.com/poseidon/typhoon//azure/fedora-coreos/kubernetes/workers?ref=v1.23.4"
|
||||
|
||||
# Azure
|
||||
region = module.ramius.region
|
||||
@ -137,7 +137,7 @@ Create a cluster following the Azure [tutorial](../flatcar-linux/azure.md#cluste
|
||||
|
||||
```tf
|
||||
module "ramius-worker-pool" {
|
||||
source = "git::https://github.com/poseidon/typhoon//azure/flatcar-linux/kubernetes/workers?ref=v1.23.0"
|
||||
source = "git::https://github.com/poseidon/typhoon//azure/flatcar-linux/kubernetes/workers?ref=v1.23.4"
|
||||
|
||||
# Azure
|
||||
region = module.ramius.region
|
||||
@ -207,7 +207,7 @@ Create a cluster following the Google Cloud [tutorial](../flatcar-linux/google-c
|
||||
|
||||
```tf
|
||||
module "yavin-worker-pool" {
|
||||
source = "git::https://github.com/poseidon/typhoon//google-cloud/fedora-coreos/kubernetes/workers?ref=v1.23.0"
|
||||
source = "git::https://github.com/poseidon/typhoon//google-cloud/fedora-coreos/kubernetes/workers?ref=v1.23.4"
|
||||
|
||||
# Google Cloud
|
||||
region = "europe-west2"
|
||||
@ -231,7 +231,7 @@ Create a cluster following the Google Cloud [tutorial](../flatcar-linux/google-c
|
||||
|
||||
```tf
|
||||
module "yavin-worker-pool" {
|
||||
source = "git::https://github.com/poseidon/typhoon//google-cloud/flatcar-linux/kubernetes/workers?ref=v1.23.0"
|
||||
source = "git::https://github.com/poseidon/typhoon//google-cloud/flatcar-linux/kubernetes/workers?ref=v1.23.4"
|
||||
|
||||
# Google Cloud
|
||||
region = "europe-west2"
|
||||
@ -246,7 +246,7 @@ Create a cluster following the Google Cloud [tutorial](../flatcar-linux/google-c
|
||||
# optional
|
||||
worker_count = 2
|
||||
machine_type = "n1-standard-16"
|
||||
os_image = "flatcar-linux-2303-4-0" # custom
|
||||
os_image = "flatcar-stable"
|
||||
preemptible = true
|
||||
}
|
||||
```
|
||||
@ -262,11 +262,11 @@ Verify a managed instance group of workers joins the cluster within a few minute
|
||||
```
|
||||
$ kubectl get nodes
|
||||
NAME STATUS AGE VERSION
|
||||
yavin-controller-0.c.example-com.internal Ready 6m v1.23.0
|
||||
yavin-worker-jrbf.c.example-com.internal Ready 5m v1.23.0
|
||||
yavin-worker-mzdm.c.example-com.internal Ready 5m v1.23.0
|
||||
yavin-16x-worker-jrbf.c.example-com.internal Ready 3m v1.23.0
|
||||
yavin-16x-worker-mzdm.c.example-com.internal Ready 3m v1.23.0
|
||||
yavin-controller-0.c.example-com.internal Ready 6m v1.23.4
|
||||
yavin-worker-jrbf.c.example-com.internal Ready 5m v1.23.4
|
||||
yavin-worker-mzdm.c.example-com.internal Ready 5m v1.23.4
|
||||
yavin-16x-worker-jrbf.c.example-com.internal Ready 3m v1.23.4
|
||||
yavin-16x-worker-mzdm.c.example-com.internal Ready 3m v1.23.4
|
||||
```
|
||||
|
||||
### Variables
|
||||
|
@ -34,7 +34,7 @@ Together, they diversify Typhoon to support a range of container technologies.
|
||||
| single-master | all platforms | all platforms |
|
||||
| multi-master | all platforms | all platforms |
|
||||
| control plane | static pods | static pods |
|
||||
| Container Runtime | docker 20.10 | docker 20.10 |
|
||||
| Container Runtime | containerd 1.5.8 | containerd 1.5.8 |
|
||||
| kubelet image | kubelet [image](https://github.com/poseidon/kubelet) with upstream binary | kubelet [image](https://github.com/poseidon/kubelet) with upstream binary |
|
||||
| control plane images | upstream images | upstream images |
|
||||
| on-host etcd | docker | podman |
|
||||
|
@ -1,6 +1,6 @@
|
||||
# AWS
|
||||
|
||||
In this tutorial, we'll create a Kubernetes v1.23.0 cluster on AWS with Fedora CoreOS.
|
||||
In this tutorial, we'll create a Kubernetes v1.23.4 cluster on AWS with Fedora CoreOS.
|
||||
|
||||
We'll declare a Kubernetes cluster using the Typhoon Terraform module. Then apply the changes to create a VPC, gateway, subnets, security groups, controller instances, worker auto-scaling group, network load balancer, and TLS assets.
|
||||
|
||||
@ -51,11 +51,11 @@ terraform {
|
||||
required_providers {
|
||||
ct = {
|
||||
source = "poseidon/ct"
|
||||
version = "0.9.1"
|
||||
version = "0.10.0"
|
||||
}
|
||||
aws = {
|
||||
source = "hashicorp/aws"
|
||||
version = "3.67.0"
|
||||
version = "4.2.0"
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -72,7 +72,7 @@ Define a Kubernetes cluster using the module `aws/fedora-coreos/kubernetes`.
|
||||
|
||||
```tf
|
||||
module "tempest" {
|
||||
source = "git::https://github.com/poseidon/typhoon//aws/fedora-coreos/kubernetes?ref=v1.23.0"
|
||||
source = "git::https://github.com/poseidon/typhoon//aws/fedora-coreos/kubernetes?ref=v1.23.4"
|
||||
|
||||
# AWS
|
||||
cluster_name = "tempest"
|
||||
@ -145,9 +145,9 @@ List nodes in the cluster.
|
||||
$ export KUBECONFIG=/home/user/.kube/configs/tempest-config
|
||||
$ kubectl get nodes
|
||||
NAME STATUS ROLES AGE VERSION
|
||||
ip-10-0-3-155 Ready <none> 10m v1.23.0
|
||||
ip-10-0-26-65 Ready <none> 10m v1.23.0
|
||||
ip-10-0-41-21 Ready <none> 10m v1.23.0
|
||||
ip-10-0-3-155 Ready <none> 10m v1.23.4
|
||||
ip-10-0-26-65 Ready <none> 10m v1.23.4
|
||||
ip-10-0-41-21 Ready <none> 10m v1.23.4
|
||||
```
|
||||
|
||||
List the pods.
|
||||
@ -218,7 +218,7 @@ Reference the DNS zone id with `aws_route53_zone.zone-for-clusters.zone_id`.
|
||||
| worker_price | Spot price in USD for worker instances or 0 to use on-demand instances | 0 | 0.10 |
|
||||
| controller_snippets | Controller Butane snippets | [] | [examples](/advanced/customization/) |
|
||||
| worker_snippets | Worker Butane snippets | [] | [examples](/advanced/customization/) |
|
||||
| networking | Choice of networking provider | "calico" | "calico" or "cilium" or "flannel" |
|
||||
| networking | Choice of networking provider | "cilium" | "calico" or "cilium" or "flannel" |
|
||||
| network_mtu | CNI interface MTU (calico only) | 1480 | 8981 |
|
||||
| host_cidr | CIDR IPv4 range to assign to EC2 instances | "10.0.0.0/16" | "10.1.0.0/16" |
|
||||
| pod_cidr | CIDR IPv4 range to assign to Kubernetes pods | "10.2.0.0/16" | "10.22.0.0/16" |
|
||||
|
@ -1,6 +1,6 @@
|
||||
# Azure
|
||||
|
||||
In this tutorial, we'll create a Kubernetes v1.23.0 cluster on Azure with Fedora CoreOS.
|
||||
In this tutorial, we'll create a Kubernetes v1.23.4 cluster on Azure with Fedora CoreOS.
|
||||
|
||||
We'll declare a Kubernetes cluster using the Typhoon Terraform module. Then apply the changes to create a resource group, virtual network, subnets, security groups, controller availability set, worker scale set, load balancer, and TLS assets.
|
||||
|
||||
@ -48,11 +48,11 @@ terraform {
|
||||
required_providers {
|
||||
ct = {
|
||||
source = "poseidon/ct"
|
||||
version = "0.9.1"
|
||||
version = "0.10.0"
|
||||
}
|
||||
azurerm = {
|
||||
source = "hashicorp/azurerm"
|
||||
version = "2.88.1"
|
||||
version = "2.97.0"
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -86,7 +86,7 @@ Define a Kubernetes cluster using the module `azure/fedora-coreos/kubernetes`.
|
||||
|
||||
```tf
|
||||
module "ramius" {
|
||||
source = "git::https://github.com/poseidon/typhoon//azure/fedora-coreos/kubernetes?ref=v1.23.0"
|
||||
source = "git::https://github.com/poseidon/typhoon//azure/fedora-coreos/kubernetes?ref=v1.23.4"
|
||||
|
||||
# Azure
|
||||
cluster_name = "ramius"
|
||||
@ -161,9 +161,9 @@ List nodes in the cluster.
|
||||
$ export KUBECONFIG=/home/user/.kube/configs/ramius-config
|
||||
$ kubectl get nodes
|
||||
NAME STATUS ROLES AGE VERSION
|
||||
ramius-controller-0 Ready <none> 24m v1.23.0
|
||||
ramius-worker-000001 Ready <none> 25m v1.23.0
|
||||
ramius-worker-000002 Ready <none> 24m v1.23.0
|
||||
ramius-controller-0 Ready <none> 24m v1.23.4
|
||||
ramius-worker-000001 Ready <none> 25m v1.23.4
|
||||
ramius-worker-000002 Ready <none> 24m v1.23.4
|
||||
```
|
||||
|
||||
List the pods.
|
||||
@ -245,7 +245,7 @@ Reference the DNS zone with `azurerm_dns_zone.clusters.name` and its resource gr
|
||||
| worker_priority | Set priority to Spot to use reduced cost surplus capacity, with the tradeoff that instances can be deallocated at any time | Regular | Spot |
|
||||
| controller_snippets | Controller Butane snippets | [] | [example](/advanced/customization/#usage) |
|
||||
| worker_snippets | Worker Butane snippets | [] | [example](/advanced/customization/#usage) |
|
||||
| networking | Choice of networking provider | "calico" | "calico" or "cilium" or "flannel" |
|
||||
| networking | Choice of networking provider | "cilium" | "calico" or "cilium" or "flannel" |
|
||||
| host_cidr | CIDR IPv4 range to assign to instances | "10.0.0.0/16" | "10.0.0.0/20" |
|
||||
| pod_cidr | CIDR IPv4 range to assign to Kubernetes pods | "10.2.0.0/16" | "10.22.0.0/16" |
|
||||
| service_cidr | CIDR IPv4 range to assign to Kubernetes services | "10.3.0.0/16" | "10.3.0.0/24" |
|
||||
|
@ -1,6 +1,6 @@
|
||||
# Bare-Metal
|
||||
|
||||
In this tutorial, we'll network boot and provision a Kubernetes v1.23.0 cluster on bare-metal with Fedora CoreOS.
|
||||
In this tutorial, we'll network boot and provision a Kubernetes v1.23.4 cluster on bare-metal with Fedora CoreOS.
|
||||
|
||||
First, we'll deploy a [Matchbox](https://github.com/poseidon/matchbox) service and setup a network boot environment. Then, we'll declare a Kubernetes cluster using the Typhoon Terraform module and power on machines. On PXE boot, machines will install Fedora CoreOS to disk, reboot into the disk install, and provision themselves as Kubernetes controllers or workers via Ignition.
|
||||
|
||||
@ -138,7 +138,7 @@ terraform {
|
||||
required_providers {
|
||||
ct = {
|
||||
source = "poseidon/ct"
|
||||
version = "0.9.1"
|
||||
version = "0.10.0"
|
||||
}
|
||||
matchbox = {
|
||||
source = "poseidon/matchbox"
|
||||
@ -154,7 +154,7 @@ Define a Kubernetes cluster using the module `bare-metal/fedora-coreos/kubernete
|
||||
|
||||
```tf
|
||||
module "mercury" {
|
||||
source = "git::https://github.com/poseidon/typhoon//bare-metal/fedora-coreos/kubernetes?ref=v1.23.0"
|
||||
source = "git::https://github.com/poseidon/typhoon//bare-metal/fedora-coreos/kubernetes?ref=v1.23.4"
|
||||
|
||||
# bare-metal
|
||||
cluster_name = "mercury"
|
||||
@ -283,9 +283,9 @@ List nodes in the cluster.
|
||||
$ export KUBECONFIG=/home/user/.kube/configs/mercury-config
|
||||
$ kubectl get nodes
|
||||
NAME STATUS ROLES AGE VERSION
|
||||
node1.example.com Ready <none> 10m v1.23.0
|
||||
node2.example.com Ready <none> 10m v1.23.0
|
||||
node3.example.com Ready <none> 10m v1.23.0
|
||||
node1.example.com Ready <none> 10m v1.23.4
|
||||
node2.example.com Ready <none> 10m v1.23.4
|
||||
node3.example.com Ready <none> 10m v1.23.4
|
||||
```
|
||||
|
||||
List the pods.
|
||||
@ -333,7 +333,7 @@ Check the [variables.tf](https://github.com/poseidon/typhoon/blob/master/bare-me
|
||||
|:-----|:------------|:--------|:--------|
|
||||
| cached_install | PXE boot and install from the Matchbox `/assets` cache. Admin MUST have downloaded Fedora CoreOS images into the cache | false | true |
|
||||
| install_disk | Disk device where Fedora CoreOS should be installed | "sda" (not "/dev/sda" like Container Linux) | "sdb" |
|
||||
| networking | Choice of networking provider | "calico" | "calico" or "cilium" or "flannel" |
|
||||
| networking | Choice of networking provider | "cilium" | "calico" or "cilium" or "flannel" |
|
||||
| network_mtu | CNI interface MTU (calico-only) | 1480 | - |
|
||||
| snippets | Map from machine names to lists of Butane snippets | {} | [examples](/advanced/customization/) |
|
||||
| network_ip_autodetection_method | Method to detect host IPv4 address (calico-only) | "first-found" | "can-reach=10.0.0.1" |
|
||||
|
@ -1,6 +1,6 @@
|
||||
# DigitalOcean
|
||||
|
||||
In this tutorial, we'll create a Kubernetes v1.23.0 cluster on DigitalOcean with Fedora CoreOS.
|
||||
In this tutorial, we'll create a Kubernetes v1.23.4 cluster on DigitalOcean with Fedora CoreOS.
|
||||
|
||||
We'll declare a Kubernetes cluster using the Typhoon Terraform module. Then apply the changes to create controller droplets, worker droplets, DNS records, tags, and TLS assets.
|
||||
|
||||
@ -51,11 +51,11 @@ terraform {
|
||||
required_providers {
|
||||
ct = {
|
||||
source = "poseidon/ct"
|
||||
version = "0.9.1"
|
||||
version = "0.10.0"
|
||||
}
|
||||
digitalocean = {
|
||||
source = "digitalocean/digitalocean"
|
||||
version = "1.22.1"
|
||||
version = "2.17.1"
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -81,7 +81,7 @@ Define a Kubernetes cluster using the module `digital-ocean/fedora-coreos/kubern
|
||||
|
||||
```tf
|
||||
module "nemo" {
|
||||
source = "git::https://github.com/poseidon/typhoon//digital-ocean/fedora-coreos/kubernetes?ref=v1.23.0"
|
||||
source = "git::https://github.com/poseidon/typhoon//digital-ocean/fedora-coreos/kubernetes?ref=v1.23.4"
|
||||
|
||||
# Digital Ocean
|
||||
cluster_name = "nemo"
|
||||
@ -155,9 +155,9 @@ List nodes in the cluster.
|
||||
$ export KUBECONFIG=/home/user/.kube/configs/nemo-config
|
||||
$ kubectl get nodes
|
||||
NAME STATUS ROLES AGE VERSION
|
||||
10.132.110.130 Ready <none> 10m v1.23.0
|
||||
10.132.115.81 Ready <none> 10m v1.23.0
|
||||
10.132.124.107 Ready <none> 10m v1.23.0
|
||||
10.132.110.130 Ready <none> 10m v1.23.4
|
||||
10.132.115.81 Ready <none> 10m v1.23.4
|
||||
10.132.124.107 Ready <none> 10m v1.23.4
|
||||
```
|
||||
|
||||
List the pods.
|
||||
@ -240,7 +240,7 @@ Digital Ocean requires the SSH public key be uploaded to your account, so you ma
|
||||
| worker_type | Droplet type for workers | "s-1vcpu-2gb" | s-1vcpu-2gb, s-2vcpu-2gb, ... |
|
||||
| controller_snippets | Controller Butane snippets | [] | [example](/advanced/customization/) |
|
||||
| worker_snippets | Worker Butane snippets | [] | [example](/advanced/customization/) |
|
||||
| networking | Choice of networking provider | "calico" | "calico" or "cilium" or "flannel" |
|
||||
| networking | Choice of networking provider | "cilium" | "calico" or "cilium" or "flannel" |
|
||||
| pod_cidr | CIDR IPv4 range to assign to Kubernetes pods | "10.2.0.0/16" | "10.22.0.0/16" |
|
||||
| service_cidr | CIDR IPv4 range to assign to Kubernetes services | "10.3.0.0/16" | "10.3.0.0/24" |
|
||||
|
||||
|
@ -1,6 +1,6 @@
|
||||
# Google Cloud
|
||||
|
||||
In this tutorial, we'll create a Kubernetes v1.23.0 cluster on Google Compute Engine with Fedora CoreOS.
|
||||
In this tutorial, we'll create a Kubernetes v1.23.4 cluster on Google Compute Engine with Fedora CoreOS.
|
||||
|
||||
We'll declare a Kubernetes cluster using the Typhoon Terraform module. Then apply the changes to create a network, firewall rules, health checks, controller instances, worker managed instance group, load balancers, and TLS assets.
|
||||
|
||||
@ -52,11 +52,11 @@ terraform {
|
||||
required_providers {
|
||||
ct = {
|
||||
source = "poseidon/ct"
|
||||
version = "0.9.1"
|
||||
version = "0.10.0"
|
||||
}
|
||||
google = {
|
||||
source = "hashicorp/google"
|
||||
version = "4.3.0"
|
||||
version = "4.11.0"
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -147,9 +147,9 @@ List nodes in the cluster.
|
||||
$ export KUBECONFIG=/home/user/.kube/configs/yavin-config
|
||||
$ kubectl get nodes
|
||||
NAME ROLES STATUS AGE VERSION
|
||||
yavin-controller-0.c.example-com.internal <none> Ready 6m v1.23.0
|
||||
yavin-worker-jrbf.c.example-com.internal <none> Ready 5m v1.23.0
|
||||
yavin-worker-mzdm.c.example-com.internal <none> Ready 5m v1.23.0
|
||||
yavin-controller-0.c.example-com.internal <none> Ready 6m v1.23.4
|
||||
yavin-worker-jrbf.c.example-com.internal <none> Ready 5m v1.23.4
|
||||
yavin-worker-mzdm.c.example-com.internal <none> Ready 5m v1.23.4
|
||||
```
|
||||
|
||||
List the pods.
|
||||
@ -220,7 +220,7 @@ resource "google_dns_managed_zone" "zone-for-clusters" {
|
||||
| worker_preemptible | If enabled, Compute Engine will terminate workers randomly within 24 hours | false | true |
|
||||
| controller_snippets | Controller Butane snippets | [] | [examples](/advanced/customization/) |
|
||||
| worker_snippets | Worker Butane snippets | [] | [examples](/advanced/customization/) |
|
||||
| networking | Choice of networking provider | "calico" | "calico" or "cilium" or "flannel" |
|
||||
| networking | Choice of networking provider | "cilium" | "calico" or "cilium" or "flannel" |
|
||||
| pod_cidr | CIDR IPv4 range to assign to Kubernetes pods | "10.2.0.0/16" | "10.22.0.0/16" |
|
||||
| service_cidr | CIDR IPv4 range to assign to Kubernetes services | "10.3.0.0/16" | "10.3.0.0/24" |
|
||||
| worker_node_labels | List of initial worker node labels | [] | ["worker-pool=default"] |
|
||||
|
@ -1,6 +1,6 @@
|
||||
# AWS
|
||||
|
||||
In this tutorial, we'll create a Kubernetes v1.23.0 cluster on AWS with Flatcar Linux.
|
||||
In this tutorial, we'll create a Kubernetes v1.23.4 cluster on AWS with Flatcar Linux.
|
||||
|
||||
We'll declare a Kubernetes cluster using the Typhoon Terraform module. Then apply the changes to create a VPC, gateway, subnets, security groups, controller instances, worker auto-scaling group, network load balancer, and TLS assets.
|
||||
|
||||
@ -51,11 +51,11 @@ terraform {
|
||||
required_providers {
|
||||
ct = {
|
||||
source = "poseidon/ct"
|
||||
version = "0.9.1"
|
||||
version = "0.10.0"
|
||||
}
|
||||
aws = {
|
||||
source = "hashicorp/aws"
|
||||
version = "3.67.0"
|
||||
version = "4.2.0"
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -72,7 +72,7 @@ Define a Kubernetes cluster using the module `aws/flatcar-linux/kubernetes`.
|
||||
|
||||
```tf
|
||||
module "tempest" {
|
||||
source = "git::https://github.com/poseidon/typhoon//aws/flatcar-linux/kubernetes?ref=v1.23.0"
|
||||
source = "git::https://github.com/poseidon/typhoon//aws/flatcar-linux/kubernetes?ref=v1.23.4"
|
||||
|
||||
# AWS
|
||||
cluster_name = "tempest"
|
||||
@ -145,9 +145,9 @@ List nodes in the cluster.
|
||||
$ export KUBECONFIG=/home/user/.kube/configs/tempest-config
|
||||
$ kubectl get nodes
|
||||
NAME STATUS ROLES AGE VERSION
|
||||
ip-10-0-3-155 Ready <none> 10m v1.23.0
|
||||
ip-10-0-26-65 Ready <none> 10m v1.23.0
|
||||
ip-10-0-41-21 Ready <none> 10m v1.23.0
|
||||
ip-10-0-3-155 Ready <none> 10m v1.23.4
|
||||
ip-10-0-26-65 Ready <none> 10m v1.23.4
|
||||
ip-10-0-41-21 Ready <none> 10m v1.23.4
|
||||
```
|
||||
|
||||
List the pods.
|
||||
@ -218,7 +218,7 @@ Reference the DNS zone id with `aws_route53_zone.zone-for-clusters.zone_id`.
|
||||
| worker_price | Spot price in USD for worker instances or 0 to use on-demand instances | 0/null | 0.10 |
|
||||
| controller_snippets | Controller Container Linux Config snippets | [] | [example](/advanced/customization/) |
|
||||
| worker_snippets | Worker Container Linux Config snippets | [] | [example](/advanced/customization/) |
|
||||
| networking | Choice of networking provider | "calico" | "calico" or "cilium" or "flannel" |
|
||||
| networking | Choice of networking provider | "cilium" | "calico" or "cilium" or "flannel" |
|
||||
| network_mtu | CNI interface MTU (calico only) | 1480 | 8981 |
|
||||
| host_cidr | CIDR IPv4 range to assign to EC2 instances | "10.0.0.0/16" | "10.1.0.0/16" |
|
||||
| pod_cidr | CIDR IPv4 range to assign to Kubernetes pods | "10.2.0.0/16" | "10.22.0.0/16" |
|
||||
|
@ -1,6 +1,6 @@
|
||||
# Azure
|
||||
|
||||
In this tutorial, we'll create a Kubernetes v1.23.0 cluster on Azure with Flatcar Linux.
|
||||
In this tutorial, we'll create a Kubernetes v1.23.4 cluster on Azure with Flatcar Linux.
|
||||
|
||||
We'll declare a Kubernetes cluster using the Typhoon Terraform module. Then apply the changes to create a resource group, virtual network, subnets, security groups, controller availability set, worker scale set, load balancer, and TLS assets.
|
||||
|
||||
@ -48,11 +48,11 @@ terraform {
|
||||
required_providers {
|
||||
ct = {
|
||||
source = "poseidon/ct"
|
||||
version = "0.9.1"
|
||||
version = "0.10.0"
|
||||
}
|
||||
azurerm = {
|
||||
source = "hashicorp/azurerm"
|
||||
version = "2.88.1"
|
||||
version = "2.97.0"
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -75,7 +75,7 @@ Define a Kubernetes cluster using the module `azure/flatcar-linux/kubernetes`.
|
||||
|
||||
```tf
|
||||
module "ramius" {
|
||||
source = "git::https://github.com/poseidon/typhoon//azure/flatcar-linux/kubernetes?ref=v1.23.0"
|
||||
source = "git::https://github.com/poseidon/typhoon//azure/flatcar-linux/kubernetes?ref=v1.23.4"
|
||||
|
||||
# Azure
|
||||
cluster_name = "ramius"
|
||||
@ -149,9 +149,9 @@ List nodes in the cluster.
|
||||
$ export KUBECONFIG=/home/user/.kube/configs/ramius-config
|
||||
$ kubectl get nodes
|
||||
NAME STATUS ROLES AGE VERSION
|
||||
ramius-controller-0 Ready <none> 24m v1.23.0
|
||||
ramius-worker-000001 Ready <none> 25m v1.23.0
|
||||
ramius-worker-000002 Ready <none> 24m v1.23.0
|
||||
ramius-controller-0 Ready <none> 24m v1.23.4
|
||||
ramius-worker-000001 Ready <none> 25m v1.23.4
|
||||
ramius-worker-000002 Ready <none> 24m v1.23.4
|
||||
```
|
||||
|
||||
List the pods.
|
||||
@ -233,7 +233,7 @@ Reference the DNS zone with `azurerm_dns_zone.clusters.name` and its resource gr
|
||||
| worker_priority | Set priority to Spot to use reduced cost surplus capacity, with the tradeoff that instances can be deallocated at any time | Regular | Spot |
|
||||
| controller_snippets | Controller Container Linux Config snippets | [] | [example](/advanced/customization/#usage) |
|
||||
| worker_snippets | Worker Container Linux Config snippets | [] | [example](/advanced/customization/#usage) |
|
||||
| networking | Choice of networking provider | "calico" | "calico" or "cilium" or "flannel" |
|
||||
| networking | Choice of networking provider | "cilium" | "calico" or "cilium" or "flannel" |
|
||||
| host_cidr | CIDR IPv4 range to assign to instances | "10.0.0.0/16" | "10.0.0.0/20" |
|
||||
| pod_cidr | CIDR IPv4 range to assign to Kubernetes pods | "10.2.0.0/16" | "10.22.0.0/16" |
|
||||
| service_cidr | CIDR IPv4 range to assign to Kubernetes services | "10.3.0.0/16" | "10.3.0.0/24" |
|
||||
|
@ -1,6 +1,6 @@
|
||||
# Bare-Metal
|
||||
|
||||
In this tutorial, we'll network boot and provision a Kubernetes v1.23.0 cluster on bare-metal with Flatcar Linux.
|
||||
In this tutorial, we'll network boot and provision a Kubernetes v1.23.4 cluster on bare-metal with Flatcar Linux.
|
||||
|
||||
First, we'll deploy a [Matchbox](https://github.com/poseidon/matchbox) service and setup a network boot environment. Then, we'll declare a Kubernetes cluster using the Typhoon Terraform module and power on machines. On PXE boot, machines will install Container Linux to disk, reboot into the disk install, and provision themselves as Kubernetes controllers or workers via Ignition.
|
||||
|
||||
@ -138,7 +138,7 @@ terraform {
|
||||
required_providers {
|
||||
ct = {
|
||||
source = "poseidon/ct"
|
||||
version = "0.9.1"
|
||||
version = "0.10.0"
|
||||
}
|
||||
matchbox = {
|
||||
source = "poseidon/matchbox"
|
||||
@ -154,7 +154,7 @@ Define a Kubernetes cluster using the module `bare-metal/flatcar-linux/kubernete
|
||||
|
||||
```tf
|
||||
module "mercury" {
|
||||
source = "git::https://github.com/poseidon/typhoon//bare-metal/flatcar-linux/kubernetes?ref=v1.23.0"
|
||||
source = "git::https://github.com/poseidon/typhoon//bare-metal/flatcar-linux/kubernetes?ref=v1.23.4"
|
||||
|
||||
# bare-metal
|
||||
cluster_name = "mercury"
|
||||
@ -293,9 +293,9 @@ List nodes in the cluster.
|
||||
$ export KUBECONFIG=/home/user/.kube/configs/mercury-config
|
||||
$ kubectl get nodes
|
||||
NAME STATUS ROLES AGE VERSION
|
||||
node1.example.com Ready <none> 10m v1.23.0
|
||||
node2.example.com Ready <none> 10m v1.23.0
|
||||
node3.example.com Ready <none> 10m v1.23.0
|
||||
node1.example.com Ready <none> 10m v1.23.4
|
||||
node2.example.com Ready <none> 10m v1.23.4
|
||||
node3.example.com Ready <none> 10m v1.23.4
|
||||
```
|
||||
|
||||
List the pods.
|
||||
@ -344,7 +344,7 @@ Check the [variables.tf](https://github.com/poseidon/typhoon/blob/master/bare-me
|
||||
| download_protocol | Protocol iPXE uses to download the kernel and initrd. iPXE must be compiled with [crypto](https://ipxe.org/crypto) support for https. Unused if cached_install is true | "https" | "http" |
|
||||
| cached_install | PXE boot and install from the Matchbox `/assets` cache. Admin MUST have downloaded Container Linux or Flatcar images into the cache | false | true |
|
||||
| install_disk | Disk device where Container Linux should be installed | "/dev/sda" | "/dev/sdb" |
|
||||
| networking | Choice of networking provider | "calico" | "calico" or "cilium" or "flannel" |
|
||||
| networking | Choice of networking provider | "cilium" | "calico" or "cilium" or "flannel" |
|
||||
| network_mtu | CNI interface MTU (calico-only) | 1480 | - |
|
||||
| snippets | Map from machine names to lists of Container Linux Config snippets | {} | [examples](/advanced/customization/) |
|
||||
| network_ip_autodetection_method | Method to detect host IPv4 address (calico-only) | "first-found" | "can-reach=10.0.0.1" |
|
||||
|
@ -1,6 +1,6 @@
|
||||
# DigitalOcean
|
||||
|
||||
In this tutorial, we'll create a Kubernetes v1.23.0 cluster on DigitalOcean with Flatcar Linux.
|
||||
In this tutorial, we'll create a Kubernetes v1.23.4 cluster on DigitalOcean with Flatcar Linux.
|
||||
|
||||
We'll declare a Kubernetes cluster using the Typhoon Terraform module. Then apply the changes to create controller droplets, worker droplets, DNS records, tags, and TLS assets.
|
||||
|
||||
@ -51,11 +51,11 @@ terraform {
|
||||
required_providers {
|
||||
ct = {
|
||||
source = "poseidon/ct"
|
||||
version = "0.9.1"
|
||||
version = "0.10.0"
|
||||
}
|
||||
digitalocean = {
|
||||
source = "digitalocean/digitalocean"
|
||||
version = "1.22.1"
|
||||
version = "2.17.1"
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -81,7 +81,7 @@ Define a Kubernetes cluster using the module `digital-ocean/flatcar-linux/kubern
|
||||
|
||||
```tf
|
||||
module "nemo" {
|
||||
source = "git::https://github.com/poseidon/typhoon//digital-ocean/flatcar-linux/kubernetes?ref=v1.23.0"
|
||||
source = "git::https://github.com/poseidon/typhoon//digital-ocean/flatcar-linux/kubernetes?ref=v1.23.4"
|
||||
|
||||
# Digital Ocean
|
||||
cluster_name = "nemo"
|
||||
@ -155,9 +155,9 @@ List nodes in the cluster.
|
||||
$ export KUBECONFIG=/home/user/.kube/configs/nemo-config
|
||||
$ kubectl get nodes
|
||||
NAME STATUS ROLES AGE VERSION
|
||||
10.132.110.130 Ready <none> 10m v1.23.0
|
||||
10.132.115.81 Ready <none> 10m v1.23.0
|
||||
10.132.124.107 Ready <none> 10m v1.23.0
|
||||
10.132.110.130 Ready <none> 10m v1.23.4
|
||||
10.132.115.81 Ready <none> 10m v1.23.4
|
||||
10.132.124.107 Ready <none> 10m v1.23.4
|
||||
```
|
||||
|
||||
List the pods.
|
||||
@ -240,7 +240,7 @@ Digital Ocean requires the SSH public key be uploaded to your account, so you ma
|
||||
| worker_type | Droplet type for workers | "s-1vcpu-2gb" | s-1vcpu-2gb, s-2vcpu-2gb, ... |
|
||||
| controller_snippets | Controller Container Linux Config snippets | [] | [example](/advanced/customization/) |
|
||||
| worker_snippets | Worker Container Linux Config snippets | [] | [example](/advanced/customization/) |
|
||||
| networking | Choice of networking provider | "calico" | "calico" or "cilium" or "flannel" |
|
||||
| networking | Choice of networking provider | "cilium" | "calico" or "cilium" or "flannel" |
|
||||
| pod_cidr | CIDR IPv4 range to assign to Kubernetes pods | "10.2.0.0/16" | "10.22.0.0/16" |
|
||||
| service_cidr | CIDR IPv4 range to assign to Kubernetes services | "10.3.0.0/16" | "10.3.0.0/24" |
|
||||
|
||||
|
@ -1,6 +1,6 @@
|
||||
# Google Cloud
|
||||
|
||||
In this tutorial, we'll create a Kubernetes v1.23.0 cluster on Google Compute Engine with Flatcar Linux.
|
||||
In this tutorial, we'll create a Kubernetes v1.23.4 cluster on Google Compute Engine with Flatcar Linux.
|
||||
|
||||
We'll declare a Kubernetes cluster using the Typhoon Terraform module. Then apply the changes to create a network, firewall rules, health checks, controller instances, worker managed instance group, load balancers, and TLS assets.
|
||||
|
||||
@ -52,11 +52,11 @@ terraform {
|
||||
required_providers {
|
||||
ct = {
|
||||
source = "poseidon/ct"
|
||||
version = "0.9.1"
|
||||
version = "0.10.0"
|
||||
}
|
||||
google = {
|
||||
source = "hashicorp/google"
|
||||
version = "4.3.0"
|
||||
version = "4.11.0"
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -67,32 +67,13 @@ Additional configuration options are described in the `google` provider [docs](h
|
||||
!!! tip
|
||||
Regions are listed in [docs](https://cloud.google.com/compute/docs/regions-zones/regions-zones) or with `gcloud compute regions list`. A project may contain multiple clusters across different regions.
|
||||
|
||||
### Flatcar Linux Images
|
||||
|
||||
Flatcar Linux publishes Google Cloud images, but does not yet upload them. Google Cloud allows [custom boot images](https://cloud.google.com/compute/docs/images/import-existing-image) to be uploaded to a bucket and imported into your project.
|
||||
|
||||
[Download](https://www.flatcar-linux.org/releases/) the Flatcar Linux GCE gzipped tarball and upload it to a Google Cloud storage bucket.
|
||||
|
||||
```
|
||||
gsutil list
|
||||
gsutil cp flatcar_production_gce.tar.gz gs://BUCKET
|
||||
```
|
||||
|
||||
Create a Compute Engine image from the file.
|
||||
|
||||
```
|
||||
gcloud compute images create flatcar-linux-2303-4-0 --source-uri gs://BUCKET_NAME/flatcar_production_gce.tar.gz
|
||||
```
|
||||
|
||||
Set the [os_image](#variables) in the next step.
|
||||
|
||||
## Cluster
|
||||
|
||||
Define a Kubernetes cluster using the module `google-cloud/flatcar-linux/kubernetes`.
|
||||
|
||||
```tf
|
||||
module "yavin" {
|
||||
source = "git::https://github.com/poseidon/typhoon//google-cloud/flatcar-linux/kubernetes?ref=v1.23.0"
|
||||
source = "git::https://github.com/poseidon/typhoon//google-cloud/flatcar-linux/kubernetes?ref=v1.23.4"
|
||||
|
||||
# Google Cloud
|
||||
cluster_name = "yavin"
|
||||
@ -101,7 +82,6 @@ module "yavin" {
|
||||
dns_zone_name = "example-zone"
|
||||
|
||||
# configuration
|
||||
os_image = "flatcar-linux-2303-4-0"
|
||||
ssh_authorized_key = "ssh-rsa AAAAB3Nz..."
|
||||
|
||||
# optional
|
||||
@ -167,9 +147,9 @@ List nodes in the cluster.
|
||||
$ export KUBECONFIG=/home/user/.kube/configs/yavin-config
|
||||
$ kubectl get nodes
|
||||
NAME ROLES STATUS AGE VERSION
|
||||
yavin-controller-0.c.example-com.internal <none> Ready 6m v1.23.0
|
||||
yavin-worker-jrbf.c.example-com.internal <none> Ready 5m v1.23.0
|
||||
yavin-worker-mzdm.c.example-com.internal <none> Ready 5m v1.23.0
|
||||
yavin-controller-0.c.example-com.internal <none> Ready 6m v1.23.4
|
||||
yavin-worker-jrbf.c.example-com.internal <none> Ready 5m v1.23.4
|
||||
yavin-worker-mzdm.c.example-com.internal <none> Ready 5m v1.23.4
|
||||
```
|
||||
|
||||
List the pods.
|
||||
@ -206,7 +186,6 @@ Check the [variables.tf](https://github.com/poseidon/typhoon/blob/master/google-
|
||||
| region | Google Cloud region | "us-central1" |
|
||||
| dns_zone | Google Cloud DNS zone | "google-cloud.example.com" |
|
||||
| dns_zone_name | Google Cloud DNS zone name | "example-zone" |
|
||||
| os_image | Container Linux image for compute instances | "flatcar-linux-2303-4-0" |
|
||||
| ssh_authorized_key | SSH public key for user 'core' | "ssh-rsa AAAAB3NZ..." |
|
||||
|
||||
Check the list of valid [regions](https://cloud.google.com/compute/docs/regions-zones/regions-zones) and list Container Linux [images](https://cloud.google.com/compute/docs/images) with `gcloud compute images list | grep coreos`.
|
||||
@ -236,11 +215,12 @@ resource "google_dns_managed_zone" "zone-for-clusters" {
|
||||
| worker_count | Number of workers | 1 | 3 |
|
||||
| controller_type | Machine type for controllers | "n1-standard-1" | See below |
|
||||
| worker_type | Machine type for workers | "n1-standard-1" | See below |
|
||||
| os_image | Flatcar Linux image for compute instances | "flatcar-stable" | flatcar-stable, flatcar-beta, flatcar-alpha |
|
||||
| disk_size | Size of the disk in GB | 30 | 100 |
|
||||
| worker_preemptible | If enabled, Compute Engine will terminate workers randomly within 24 hours | false | true |
|
||||
| controller_snippets | Controller Container Linux Config snippets | [] | [example](/advanced/customization/) |
|
||||
| worker_snippets | Worker Container Linux Config snippets | [] | [example](/advanced/customization/) |
|
||||
| networking | Choice of networking provider | "calico" | "calico" or "cilium" or "flannel" |
|
||||
| networking | Choice of networking provider | "cilium" | "calico" or "cilium" or "flannel" |
|
||||
| pod_cidr | CIDR IPv4 range to assign to Kubernetes pods | "10.2.0.0/16" | "10.22.0.0/16" |
|
||||
| service_cidr | CIDR IPv4 range to assign to Kubernetes services | "10.3.0.0/16" | "10.3.0.0/24" |
|
||||
| worker_node_labels | List of initial worker node labels | [] | ["worker-pool=default"] |
|
||||
|
@ -11,7 +11,7 @@ Typhoon distributes upstream Kubernetes, architectural conventions, and cluster
|
||||
|
||||
## Features <a href="https://www.cncf.io/certification/software-conformance/"><img align="right" src="https://storage.googleapis.com/poseidon/certified-kubernetes.png"></a>
|
||||
|
||||
* Kubernetes v1.23.0 (upstream)
|
||||
* Kubernetes v1.23.4 (upstream)
|
||||
* Single or multi-master, [Calico](https://www.projectcalico.org/) or [Cilium](https://github.com/cilium/cilium) or [flannel](https://github.com/coreos/flannel) networking
|
||||
* On-cluster etcd with TLS, [RBAC](https://kubernetes.io/docs/admin/authorization/rbac/)-enabled, [network policy](https://kubernetes.io/docs/concepts/services-networking/network-policies/), SELinux enforcing
|
||||
* Advanced features like [worker pools](advanced/worker-pools/), [preemptible](fedora-coreos/google-cloud/#preemption) workers, and [snippets](advanced/customization/#hosts) customization
|
||||
@ -43,7 +43,11 @@ Typhoon is available for [Flatcar Linux](https://www.flatcar-linux.org/releases/
|
||||
| Azure | Flatcar Linux | [azure/flatcar-linux/kubernetes](flatcar-linux/azure.md) | alpha |
|
||||
| Bare-Metal | Flatcar Linux | [bare-metal/flatcar-linux/kubernetes](flatcar-linux/bare-metal.md) | stable |
|
||||
| DigitalOcean | Flatcar Linux | [digital-ocean/flatcar-linux/kubernetes](flatcar-linux/digitalocean.md) | beta |
|
||||
| Google Cloud | Flatcar Linux | [google-cloud/flatcar-linux/kubernetes](flatcar-linux/google-cloud.md) | beta |
|
||||
| Google Cloud | Flatcar Linux | [google-cloud/flatcar-linux/kubernetes](flatcar-linux/google-cloud.md) | stable |
|
||||
|
||||
| Platform | Operating System | Terraform Module | Status |
|
||||
|---------------|------------------|------------------|--------|
|
||||
| AWS | Flatcar Linux (ARM64) | [aws/flatcar-linux/kubernetes](advanced/arm64.md) | alpha |
|
||||
|
||||
## Documentation
|
||||
|
||||
@ -57,7 +61,7 @@ Define a Kubernetes cluster by using the Terraform module for your chosen platfo
|
||||
|
||||
```tf
|
||||
module "yavin" {
|
||||
source = "git::https://github.com/poseidon/typhoon//google-cloud/fedora-coreos/kubernetes?ref=v1.23.0"
|
||||
source = "git::https://github.com/poseidon/typhoon//google-cloud/fedora-coreos/kubernetes?ref=v1.23.4"
|
||||
|
||||
# Google Cloud
|
||||
cluster_name = "yavin"
|
||||
@ -95,9 +99,9 @@ In 4-8 minutes (varies by platform), the cluster will be ready. This Google Clou
|
||||
$ export KUBECONFIG=/home/user/.kube/configs/yavin-config
|
||||
$ kubectl get nodes
|
||||
NAME ROLES STATUS AGE VERSION
|
||||
yavin-controller-0.c.example-com.internal <none> Ready 6m v1.23.0
|
||||
yavin-worker-jrbf.c.example-com.internal <none> Ready 5m v1.23.0
|
||||
yavin-worker-mzdm.c.example-com.internal <none> Ready 5m v1.23.0
|
||||
yavin-controller-0.c.example-com.internal <none> Ready 6m v1.23.4
|
||||
yavin-worker-jrbf.c.example-com.internal <none> Ready 5m v1.23.4
|
||||
yavin-worker-mzdm.c.example-com.internal <none> Ready 5m v1.23.4
|
||||
```
|
||||
|
||||
List the pods.
|
||||
|
@ -13,12 +13,12 @@ Typhoon provides tagged releases to allow clusters to be versioned using ordinar
|
||||
|
||||
```
|
||||
module "yavin" {
|
||||
source = "git::https://github.com/poseidon/typhoon//google-cloud/fedora-coreos/kubernetes?ref=v1.23.0"
|
||||
source = "git::https://github.com/poseidon/typhoon//google-cloud/fedora-coreos/kubernetes?ref=v1.23.4"
|
||||
...
|
||||
}
|
||||
|
||||
module "mercury" {
|
||||
source = "git::https://github.com/poseidon/typhoon//bare-metal/flatcar-linux/kubernetes?ref=v1.23.0"
|
||||
source = "git::https://github.com/poseidon/typhoon//bare-metal/flatcar-linux/kubernetes?ref=v1.23.4"
|
||||
...
|
||||
}
|
||||
```
|
||||
|
@ -11,7 +11,7 @@ Typhoon distributes upstream Kubernetes, architectural conventions, and cluster
|
||||
|
||||
## Features <a href="https://www.cncf.io/certification/software-conformance/"><img align="right" src="https://storage.googleapis.com/poseidon/certified-kubernetes.png"></a>
|
||||
|
||||
* Kubernetes v1.23.0 (upstream)
|
||||
* Kubernetes v1.23.4 (upstream)
|
||||
* Single or multi-master, [Calico](https://www.projectcalico.org/) or [Cilium](https://github.com/cilium/cilium) or [flannel](https://github.com/coreos/flannel) networking
|
||||
* On-cluster etcd with TLS, [RBAC](https://kubernetes.io/docs/admin/authorization/rbac/)-enabled, [network policy](https://kubernetes.io/docs/concepts/services-networking/network-policies/), SELinux enforcing
|
||||
* Advanced features like [worker pools](https://typhoon.psdn.io/advanced/worker-pools/), [preemptible](https://typhoon.psdn.io/fedora-coreos/google-cloud/#preemption) workers, and [snippets](https://typhoon.psdn.io/advanced/customization/#hosts) customization
|
||||
|
@ -1,6 +1,6 @@
|
||||
# Kubernetes assets (kubeconfig, manifests)
|
||||
module "bootstrap" {
|
||||
source = "git::https://github.com/poseidon/terraform-render-bootstrap.git?ref=37f45cb28be2188befb5304794ba312cd8048fab"
|
||||
source = "git::https://github.com/poseidon/terraform-render-bootstrap.git?ref=205e5f212b634b3f590c0a91f60f4540b145045d"
|
||||
|
||||
cluster_name = var.cluster_name
|
||||
api_servers = [format("%s.%s", var.cluster_name, var.dns_zone)]
|
||||
|
@ -12,7 +12,7 @@ systemd:
|
||||
Wants=network-online.target network.target
|
||||
After=network-online.target
|
||||
[Service]
|
||||
Environment=ETCD_IMAGE=quay.io/coreos/etcd:v3.5.1
|
||||
Environment=ETCD_IMAGE=quay.io/coreos/etcd:v3.5.2
|
||||
Type=exec
|
||||
ExecStartPre=/bin/mkdir -p /var/lib/etcd
|
||||
ExecStartPre=-/usr/bin/podman rm etcd
|
||||
@ -29,8 +29,10 @@ systemd:
|
||||
LimitNOFILE=40000
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
- name: docker.service
|
||||
- name: containerd.service
|
||||
enabled: true
|
||||
- name: docker.service
|
||||
mask: true
|
||||
- name: wait-for-dns.service
|
||||
enabled: true
|
||||
contents: |
|
||||
@ -51,7 +53,7 @@ systemd:
|
||||
Description=Kubelet (System Container)
|
||||
Wants=rpc-statd.service
|
||||
[Service]
|
||||
Environment=KUBELET_IMAGE=quay.io/poseidon/kubelet:v1.23.0
|
||||
Environment=KUBELET_IMAGE=quay.io/poseidon/kubelet:v1.23.4
|
||||
ExecStartPre=/bin/mkdir -p /etc/cni/net.d
|
||||
ExecStartPre=/bin/mkdir -p /etc/kubernetes/manifests
|
||||
ExecStartPre=/bin/mkdir -p /opt/cni/bin
|
||||
@ -70,7 +72,7 @@ systemd:
|
||||
--volume /run:/run \
|
||||
--volume /sys/fs/cgroup:/sys/fs/cgroup \
|
||||
--volume /var/lib/calico:/var/lib/calico:ro \
|
||||
--volume /var/lib/docker:/var/lib/docker \
|
||||
--volume /var/lib/containerd:/var/lib/containerd \
|
||||
--volume /var/lib/kubelet:/var/lib/kubelet:rshared,z \
|
||||
--volume /var/log:/var/log \
|
||||
--volume /var/run/lock:/var/run/lock:z \
|
||||
@ -82,13 +84,14 @@ systemd:
|
||||
--bootstrap-kubeconfig=/etc/kubernetes/kubeconfig \
|
||||
--cgroup-driver=systemd \
|
||||
--cgroups-per-qos=true \
|
||||
--container-runtime=remote \
|
||||
--container-runtime-endpoint=unix:///run/containerd/containerd.sock \
|
||||
--enforce-node-allocatable=pods \
|
||||
--client-ca-file=/etc/kubernetes/ca.crt \
|
||||
--cluster_dns=${cluster_dns_service_ip} \
|
||||
--cluster_domain=${cluster_domain_suffix} \
|
||||
--healthz-port=0 \
|
||||
--kubeconfig=/var/lib/kubelet/kubeconfig \
|
||||
--network-plugin=cni \
|
||||
--node-labels=node.kubernetes.io/controller="true" \
|
||||
--pod-manifest-path=/etc/kubernetes/manifests \
|
||||
--read-only-port=0 \
|
||||
@ -118,7 +121,7 @@ systemd:
|
||||
--volume /opt/bootstrap/assets:/assets:ro,Z \
|
||||
--volume /opt/bootstrap/apply:/apply:ro,Z \
|
||||
--entrypoint=/apply \
|
||||
quay.io/poseidon/kubelet:v1.23.0
|
||||
quay.io/poseidon/kubelet:v1.23.4
|
||||
ExecStartPost=/bin/touch /opt/bootstrap/bootstrap.done
|
||||
ExecStartPost=-/usr/bin/podman stop bootstrap
|
||||
storage:
|
||||
@ -214,6 +217,25 @@ storage:
|
||||
ETCD_PEER_KEY_FILE=/etc/ssl/certs/etcd/peer.key
|
||||
ETCD_PEER_CLIENT_CERT_AUTH=true
|
||||
- path: /etc/fedora-coreos/iptables-legacy.stamp
|
||||
- path: /etc/containerd/config.toml
|
||||
overwrite: true
|
||||
contents:
|
||||
inline: |
|
||||
version = 2
|
||||
root = "/var/lib/containerd"
|
||||
state = "/run/containerd"
|
||||
subreaper = true
|
||||
oom_score = -999
|
||||
[grpc]
|
||||
address = "/run/containerd/containerd.sock"
|
||||
uid = 0
|
||||
gid = 0
|
||||
[plugins."io.containerd.grpc.v1.cri"]
|
||||
enable_selinux = true
|
||||
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc]
|
||||
runtime_type = "io.containerd.runc.v2"
|
||||
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options]
|
||||
SystemdCgroup = true
|
||||
passwd:
|
||||
users:
|
||||
- name: core
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user