mirror of
https://github.com/puppetmaster/typhoon.git
synced 2025-08-02 16:41:34 +02:00
Compare commits
16 Commits
Author | SHA1 | Date | |
---|---|---|---|
257a49ce37 | |||
df3f40bcce | |||
32886cfba1 | |||
0ba2c1a4da | |||
430d139a5b | |||
7c6ab21b94 | |||
21178868db | |||
9dcf35e393 | |||
81b6f54169 | |||
7bce15975c | |||
1f83ae7dbb | |||
a10a1cee9f | |||
a79ad34ba3 | |||
99a11442c7 | |||
d27f367004 | |||
e9c8520359 |
33
CHANGES.md
33
CHANGES.md
@ -4,6 +4,37 @@ Notable changes between versions.
|
||||
|
||||
## Latest
|
||||
|
||||
* Kubernetes [v1.18.5](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.18.md#v1185)
|
||||
* Add Cilium v1.8.0 as a (experimental) CNI provider option ([#760](https://github.com/poseidon/typhoon/pull/760))
|
||||
* Set `networking` to "cilium" to enable
|
||||
* Update Calico from v3.14.1 to [v3.15.0](https://docs.projectcalico.org/v3.15/release-notes/)
|
||||
|
||||
#### DigitalOcean
|
||||
|
||||
* Isolate each cluster in an independent DigitalOcean VPC ([#776](https://github.com/poseidon/typhoon/pull/776))
|
||||
* Create droplets in a VPC per cluster (matches Typhoon AWS, Azure, and GCP)
|
||||
* Require `terraform-provider-digitalocean` v1.16.0+ (action required)
|
||||
* Output `vpc_id` for use with an attached DigitalOcean [loadbalancer](https://github.com/poseidon/typhoon/blob/v1.18.5/docs/architecture/digitalocean.md#custom-load-balancer)
|
||||
|
||||
### Fedora CoreOS
|
||||
|
||||
#### Google Cloud
|
||||
|
||||
* Promote Fedora CoreOS to stable
|
||||
* Remove `os_image` variable deprecated in v1.18.3 ([#777](https://github.com/poseidon/typhoon/pull/777))
|
||||
* Use `os_stream` to select a Fedora CoreOS image stream
|
||||
|
||||
### Flatcar Linux
|
||||
|
||||
#### Azure
|
||||
|
||||
* Allow using Flatcar Linux Edge by setting `os_image` to "flatcar-edge" ([#778](https://github.com/poseidon/typhoon/pull/778))
|
||||
|
||||
#### Addons
|
||||
|
||||
* Update Prometheus from v2.19.0 to [v2.19.1](https://github.com/prometheus/prometheus/releases/tag/v2.19.1)
|
||||
* Update Grafana from v7.0.3 to [v7.0.4](https://github.com/grafana/grafana/releases/tag/v7.0.4)
|
||||
|
||||
## v1.18.4
|
||||
|
||||
* Kubernetes [v1.18.4](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.18.md#v1184)
|
||||
@ -88,7 +119,7 @@ Notable changes between versions.
|
||||
|
||||
#### Google
|
||||
|
||||
* Support Fedora CoreOS [image streams](https://docs.fedoraproject.org/en-US/fedora-coreos/update-streams/) ([#723](https://github.com/poseidon/typhoon/pull/722))
|
||||
* Support Fedora CoreOS [image streams](https://docs.fedoraproject.org/en-US/fedora-coreos/update-streams/) ([#723](https://github.com/poseidon/typhoon/pull/723))
|
||||
* Add `os_stream` variable to set the stream to `stable` (default), `testing`, or `next`
|
||||
* Deprecate `os_image` variable. Manual image uploads are no longer needed
|
||||
|
||||
|
12
README.md
12
README.md
@ -11,7 +11,7 @@ Typhoon distributes upstream Kubernetes, architectural conventions, and cluster
|
||||
|
||||
## Features <a href="https://www.cncf.io/certification/software-conformance/"><img align="right" src="https://storage.googleapis.com/poseidon/certified-kubernetes.png"></a>
|
||||
|
||||
* Kubernetes v1.18.4 (upstream)
|
||||
* Kubernetes v1.18.5 (upstream)
|
||||
* Single or multi-master, [Calico](https://www.projectcalico.org/) or [flannel](https://github.com/coreos/flannel) networking
|
||||
* On-cluster etcd with TLS, [RBAC](https://kubernetes.io/docs/admin/authorization/rbac/)-enabled, [network policy](https://kubernetes.io/docs/concepts/services-networking/network-policies/), SELinux enforcing
|
||||
* Advanced features like [worker pools](https://typhoon.psdn.io/advanced/worker-pools/), [preemptible](https://typhoon.psdn.io/cl/google-cloud/#preemption) workers, and [snippets](https://typhoon.psdn.io/advanced/customization/#container-linux) customization
|
||||
@ -29,7 +29,7 @@ Typhoon is available for [Fedora CoreOS](https://getfedora.org/coreos/).
|
||||
| Azure | Fedora CoreOS | [azure/fedora-coreos/kubernetes](azure/fedora-coreos/kubernetes) | alpha |
|
||||
| Bare-Metal | Fedora CoreOS | [bare-metal/fedora-coreos/kubernetes](bare-metal/fedora-coreos/kubernetes) | beta |
|
||||
| DigitalOcean | Fedora CoreOS | [digital-ocean/fedora-coreos/kubernetes](digital-ocean/fedora-coreos/kubernetes) | beta |
|
||||
| Google Cloud | Fedora CoreOS | [google-cloud/fedora-coreos/kubernetes](google-cloud/fedora-coreos/kubernetes) | beta |
|
||||
| Google Cloud | Fedora CoreOS | [google-cloud/fedora-coreos/kubernetes](google-cloud/fedora-coreos/kubernetes) | stable |
|
||||
|
||||
Typhoon is available for [Flatcar Linux](https://www.flatcar-linux.org/releases/).
|
||||
|
||||
@ -54,7 +54,7 @@ Define a Kubernetes cluster by using the Terraform module for your chosen platfo
|
||||
|
||||
```tf
|
||||
module "yavin" {
|
||||
source = "git::https://github.com/poseidon/typhoon//google-cloud/fedora-coreos/kubernetes?ref=v1.18.4"
|
||||
source = "git::https://github.com/poseidon/typhoon//google-cloud/fedora-coreos/kubernetes?ref=v1.18.5"
|
||||
|
||||
# Google Cloud
|
||||
cluster_name = "yavin"
|
||||
@ -93,9 +93,9 @@ In 4-8 minutes (varies by platform), the cluster will be ready. This Google Clou
|
||||
$ export KUBECONFIG=/home/user/.kube/configs/yavin-config
|
||||
$ kubectl get nodes
|
||||
NAME ROLES STATUS AGE VERSION
|
||||
yavin-controller-0.c.example-com.internal <none> Ready 6m v1.18.4
|
||||
yavin-worker-jrbf.c.example-com.internal <none> Ready 5m v1.18.4
|
||||
yavin-worker-mzdm.c.example-com.internal <none> Ready 5m v1.18.4
|
||||
yavin-controller-0.c.example-com.internal <none> Ready 6m v1.18.5
|
||||
yavin-worker-jrbf.c.example-com.internal <none> Ready 5m v1.18.5
|
||||
yavin-worker-mzdm.c.example-com.internal <none> Ready 5m v1.18.5
|
||||
```
|
||||
|
||||
List the pods.
|
||||
|
@ -23,7 +23,7 @@ spec:
|
||||
spec:
|
||||
containers:
|
||||
- name: grafana
|
||||
image: docker.io/grafana/grafana:7.0.3
|
||||
image: docker.io/grafana/grafana:7.0.4
|
||||
env:
|
||||
- name: GF_PATHS_CONFIG
|
||||
value: "/etc/grafana/custom.ini"
|
||||
|
@ -20,7 +20,7 @@ spec:
|
||||
serviceAccountName: prometheus
|
||||
containers:
|
||||
- name: prometheus
|
||||
image: quay.io/prometheus/prometheus:v2.19.0
|
||||
image: quay.io/prometheus/prometheus:v2.19.1
|
||||
args:
|
||||
- --web.listen-address=0.0.0.0:9090
|
||||
- --config.file=/etc/prometheus/prometheus.yaml
|
||||
|
@ -11,7 +11,7 @@ Typhoon distributes upstream Kubernetes, architectural conventions, and cluster
|
||||
|
||||
## Features <a href="https://www.cncf.io/certification/software-conformance/"><img align="right" src="https://storage.googleapis.com/poseidon/certified-kubernetes.png"></a>
|
||||
|
||||
* Kubernetes v1.18.4 (upstream)
|
||||
* Kubernetes v1.18.5 (upstream)
|
||||
* Single or multi-master, [Calico](https://www.projectcalico.org/) or [flannel](https://github.com/coreos/flannel) networking
|
||||
* On-cluster etcd with TLS, [RBAC](https://kubernetes.io/docs/admin/authorization/rbac/)-enabled, [network policy](https://kubernetes.io/docs/concepts/services-networking/network-policies/)
|
||||
* Advanced features like [worker pools](https://typhoon.psdn.io/advanced/worker-pools/), [spot](https://typhoon.psdn.io/cl/aws/#spot) workers, and [snippets](https://typhoon.psdn.io/advanced/customization/#container-linux) customization
|
||||
|
@ -1,6 +1,6 @@
|
||||
# Kubernetes assets (kubeconfig, manifests)
|
||||
module "bootstrap" {
|
||||
source = "git::https://github.com/poseidon/terraform-render-bootstrap.git?ref=e75697ce35d7773705f0b9b28ce1ffbe99f9493c"
|
||||
source = "git::https://github.com/poseidon/terraform-render-bootstrap.git?ref=5a7c963caf59740891df2aeae4b1561ccb3b9db6"
|
||||
|
||||
cluster_name = var.cluster_name
|
||||
api_servers = [format("%s.%s", var.cluster_name, var.dns_zone)]
|
||||
|
@ -52,7 +52,7 @@ systemd:
|
||||
Description=Kubelet
|
||||
Wants=rpc-statd.service
|
||||
[Service]
|
||||
Environment=KUBELET_IMAGE=docker://quay.io/poseidon/kubelet:v1.18.4
|
||||
Environment=KUBELET_IMAGE=docker://quay.io/poseidon/kubelet:v1.18.5
|
||||
Environment=KUBELET_CGROUP_DRIVER=${cgroup_driver}
|
||||
ExecStartPre=/bin/mkdir -p /etc/kubernetes/cni/net.d
|
||||
ExecStartPre=/bin/mkdir -p /etc/kubernetes/manifests
|
||||
@ -134,7 +134,7 @@ systemd:
|
||||
--volume script,kind=host,source=/opt/bootstrap/apply \
|
||||
--mount volume=script,target=/apply \
|
||||
--insecure-options=image \
|
||||
docker://quay.io/poseidon/kubelet:v1.18.4 \
|
||||
docker://quay.io/poseidon/kubelet:v1.18.5 \
|
||||
--net=host \
|
||||
--dns=host \
|
||||
--exec=/apply
|
||||
|
@ -13,6 +13,30 @@ resource "aws_security_group" "controller" {
|
||||
}
|
||||
}
|
||||
|
||||
resource "aws_security_group_rule" "controller-icmp" {
|
||||
count = var.networking == "cilium" ? 1 : 0
|
||||
|
||||
security_group_id = aws_security_group.controller.id
|
||||
|
||||
type = "ingress"
|
||||
protocol = "icmp"
|
||||
from_port = 8
|
||||
to_port = 0
|
||||
source_security_group_id = aws_security_group.worker.id
|
||||
}
|
||||
|
||||
resource "aws_security_group_rule" "controller-icmp-self" {
|
||||
count = var.networking == "cilium" ? 1 : 0
|
||||
|
||||
security_group_id = aws_security_group.controller.id
|
||||
|
||||
type = "ingress"
|
||||
protocol = "icmp"
|
||||
from_port = 8
|
||||
to_port = 0
|
||||
self = true
|
||||
}
|
||||
|
||||
resource "aws_security_group_rule" "controller-ssh" {
|
||||
security_group_id = aws_security_group.controller.id
|
||||
|
||||
@ -44,39 +68,31 @@ resource "aws_security_group_rule" "controller-etcd-metrics" {
|
||||
source_security_group_id = aws_security_group.worker.id
|
||||
}
|
||||
|
||||
# Allow Prometheus to scrape kube-proxy
|
||||
resource "aws_security_group_rule" "kube-proxy-metrics" {
|
||||
resource "aws_security_group_rule" "controller-cilium-health" {
|
||||
count = var.networking == "cilium" ? 1 : 0
|
||||
|
||||
security_group_id = aws_security_group.controller.id
|
||||
|
||||
type = "ingress"
|
||||
protocol = "tcp"
|
||||
from_port = 10249
|
||||
to_port = 10249
|
||||
from_port = 4240
|
||||
to_port = 4240
|
||||
source_security_group_id = aws_security_group.worker.id
|
||||
}
|
||||
|
||||
# Allow Prometheus to scrape kube-scheduler
|
||||
resource "aws_security_group_rule" "controller-scheduler-metrics" {
|
||||
resource "aws_security_group_rule" "controller-cilium-health-self" {
|
||||
count = var.networking == "cilium" ? 1 : 0
|
||||
|
||||
security_group_id = aws_security_group.controller.id
|
||||
|
||||
type = "ingress"
|
||||
protocol = "tcp"
|
||||
from_port = 10251
|
||||
to_port = 10251
|
||||
source_security_group_id = aws_security_group.worker.id
|
||||
}
|
||||
|
||||
# Allow Prometheus to scrape kube-controller-manager
|
||||
resource "aws_security_group_rule" "controller-manager-metrics" {
|
||||
security_group_id = aws_security_group.controller.id
|
||||
|
||||
type = "ingress"
|
||||
protocol = "tcp"
|
||||
from_port = 10252
|
||||
to_port = 10252
|
||||
source_security_group_id = aws_security_group.worker.id
|
||||
type = "ingress"
|
||||
protocol = "tcp"
|
||||
from_port = 4240
|
||||
to_port = 4240
|
||||
self = true
|
||||
}
|
||||
|
||||
# IANA VXLAN default
|
||||
resource "aws_security_group_rule" "controller-vxlan" {
|
||||
count = var.networking == "flannel" ? 1 : 0
|
||||
|
||||
@ -111,6 +127,31 @@ resource "aws_security_group_rule" "controller-apiserver" {
|
||||
cidr_blocks = ["0.0.0.0/0"]
|
||||
}
|
||||
|
||||
# Linux VXLAN default
|
||||
resource "aws_security_group_rule" "controller-linux-vxlan" {
|
||||
count = var.networking == "cilium" ? 1 : 0
|
||||
|
||||
security_group_id = aws_security_group.controller.id
|
||||
|
||||
type = "ingress"
|
||||
protocol = "udp"
|
||||
from_port = 8472
|
||||
to_port = 8472
|
||||
source_security_group_id = aws_security_group.worker.id
|
||||
}
|
||||
|
||||
resource "aws_security_group_rule" "controller-linux-vxlan-self" {
|
||||
count = var.networking == "cilium" ? 1 : 0
|
||||
|
||||
security_group_id = aws_security_group.controller.id
|
||||
|
||||
type = "ingress"
|
||||
protocol = "udp"
|
||||
from_port = 8472
|
||||
to_port = 8472
|
||||
self = true
|
||||
}
|
||||
|
||||
# Allow Prometheus to scrape node-exporter daemonset
|
||||
resource "aws_security_group_rule" "controller-node-exporter" {
|
||||
security_group_id = aws_security_group.controller.id
|
||||
@ -122,6 +163,17 @@ resource "aws_security_group_rule" "controller-node-exporter" {
|
||||
source_security_group_id = aws_security_group.worker.id
|
||||
}
|
||||
|
||||
# Allow Prometheus to scrape kube-proxy
|
||||
resource "aws_security_group_rule" "kube-proxy-metrics" {
|
||||
security_group_id = aws_security_group.controller.id
|
||||
|
||||
type = "ingress"
|
||||
protocol = "tcp"
|
||||
from_port = 10249
|
||||
to_port = 10249
|
||||
source_security_group_id = aws_security_group.worker.id
|
||||
}
|
||||
|
||||
# Allow apiserver to access kubelets for exec, log, port-forward
|
||||
resource "aws_security_group_rule" "controller-kubelet" {
|
||||
security_group_id = aws_security_group.controller.id
|
||||
@ -143,6 +195,28 @@ resource "aws_security_group_rule" "controller-kubelet-self" {
|
||||
self = true
|
||||
}
|
||||
|
||||
# Allow Prometheus to scrape kube-scheduler
|
||||
resource "aws_security_group_rule" "controller-scheduler-metrics" {
|
||||
security_group_id = aws_security_group.controller.id
|
||||
|
||||
type = "ingress"
|
||||
protocol = "tcp"
|
||||
from_port = 10251
|
||||
to_port = 10251
|
||||
source_security_group_id = aws_security_group.worker.id
|
||||
}
|
||||
|
||||
# Allow Prometheus to scrape kube-controller-manager
|
||||
resource "aws_security_group_rule" "controller-manager-metrics" {
|
||||
security_group_id = aws_security_group.controller.id
|
||||
|
||||
type = "ingress"
|
||||
protocol = "tcp"
|
||||
from_port = 10252
|
||||
to_port = 10252
|
||||
source_security_group_id = aws_security_group.worker.id
|
||||
}
|
||||
|
||||
resource "aws_security_group_rule" "controller-bgp" {
|
||||
security_group_id = aws_security_group.controller.id
|
||||
|
||||
@ -227,6 +301,30 @@ resource "aws_security_group" "worker" {
|
||||
}
|
||||
}
|
||||
|
||||
resource "aws_security_group_rule" "worker-icmp" {
|
||||
count = var.networking == "cilium" ? 1 : 0
|
||||
|
||||
security_group_id = aws_security_group.worker.id
|
||||
|
||||
type = "ingress"
|
||||
protocol = "icmp"
|
||||
from_port = 8
|
||||
to_port = 0
|
||||
source_security_group_id = aws_security_group.controller.id
|
||||
}
|
||||
|
||||
resource "aws_security_group_rule" "worker-icmp-self" {
|
||||
count = var.networking == "cilium" ? 1 : 0
|
||||
|
||||
security_group_id = aws_security_group.worker.id
|
||||
|
||||
type = "ingress"
|
||||
protocol = "icmp"
|
||||
from_port = 8
|
||||
to_port = 0
|
||||
self = true
|
||||
}
|
||||
|
||||
resource "aws_security_group_rule" "worker-ssh" {
|
||||
security_group_id = aws_security_group.worker.id
|
||||
|
||||
@ -257,6 +355,31 @@ resource "aws_security_group_rule" "worker-https" {
|
||||
cidr_blocks = ["0.0.0.0/0"]
|
||||
}
|
||||
|
||||
resource "aws_security_group_rule" "worker-cilium-health" {
|
||||
count = var.networking == "cilium" ? 1 : 0
|
||||
|
||||
security_group_id = aws_security_group.worker.id
|
||||
|
||||
type = "ingress"
|
||||
protocol = "tcp"
|
||||
from_port = 4240
|
||||
to_port = 4240
|
||||
source_security_group_id = aws_security_group.controller.id
|
||||
}
|
||||
|
||||
resource "aws_security_group_rule" "worker-cilium-health-self" {
|
||||
count = var.networking == "cilium" ? 1 : 0
|
||||
|
||||
security_group_id = aws_security_group.worker.id
|
||||
|
||||
type = "ingress"
|
||||
protocol = "tcp"
|
||||
from_port = 4240
|
||||
to_port = 4240
|
||||
self = true
|
||||
}
|
||||
|
||||
# IANA VXLAN default
|
||||
resource "aws_security_group_rule" "worker-vxlan" {
|
||||
count = var.networking == "flannel" ? 1 : 0
|
||||
|
||||
@ -281,6 +404,31 @@ resource "aws_security_group_rule" "worker-vxlan-self" {
|
||||
self = true
|
||||
}
|
||||
|
||||
# Linux VXLAN default
|
||||
resource "aws_security_group_rule" "worker-linux-vxlan" {
|
||||
count = var.networking == "cilium" ? 1 : 0
|
||||
|
||||
security_group_id = aws_security_group.worker.id
|
||||
|
||||
type = "ingress"
|
||||
protocol = "udp"
|
||||
from_port = 8472
|
||||
to_port = 8472
|
||||
source_security_group_id = aws_security_group.controller.id
|
||||
}
|
||||
|
||||
resource "aws_security_group_rule" "worker-linux-vxlan-self" {
|
||||
count = var.networking == "cilium" ? 1 : 0
|
||||
|
||||
security_group_id = aws_security_group.worker.id
|
||||
|
||||
type = "ingress"
|
||||
protocol = "udp"
|
||||
from_port = 8472
|
||||
to_port = 8472
|
||||
self = true
|
||||
}
|
||||
|
||||
# Allow Prometheus to scrape node-exporter daemonset
|
||||
resource "aws_security_group_rule" "worker-node-exporter" {
|
||||
security_group_id = aws_security_group.worker.id
|
||||
|
@ -25,7 +25,7 @@ systemd:
|
||||
Description=Kubelet
|
||||
Wants=rpc-statd.service
|
||||
[Service]
|
||||
Environment=KUBELET_IMAGE=docker://quay.io/poseidon/kubelet:v1.18.4
|
||||
Environment=KUBELET_IMAGE=docker://quay.io/poseidon/kubelet:v1.18.5
|
||||
Environment=KUBELET_CGROUP_DRIVER=${cgroup_driver}
|
||||
ExecStartPre=/bin/mkdir -p /etc/kubernetes/cni/net.d
|
||||
ExecStartPre=/bin/mkdir -p /etc/kubernetes/manifests
|
||||
@ -129,7 +129,7 @@ storage:
|
||||
--volume config,kind=host,source=/etc/kubernetes \
|
||||
--mount volume=config,target=/etc/kubernetes \
|
||||
--insecure-options=image \
|
||||
docker://quay.io/poseidon/kubelet:v1.18.4 \
|
||||
docker://quay.io/poseidon/kubelet:v1.18.5 \
|
||||
--net=host \
|
||||
--dns=host \
|
||||
--exec=/usr/local/bin/kubectl -- --kubeconfig=/etc/kubernetes/kubeconfig delete node $(hostname)
|
||||
|
@ -11,7 +11,7 @@ Typhoon distributes upstream Kubernetes, architectural conventions, and cluster
|
||||
|
||||
## Features <a href="https://www.cncf.io/certification/software-conformance/"><img align="right" src="https://storage.googleapis.com/poseidon/certified-kubernetes.png"></a>
|
||||
|
||||
* Kubernetes v1.18.4 (upstream)
|
||||
* Kubernetes v1.18.5 (upstream)
|
||||
* Single or multi-master, [Calico](https://www.projectcalico.org/) or [flannel](https://github.com/coreos/flannel) networking
|
||||
* On-cluster etcd with TLS, [RBAC](https://kubernetes.io/docs/admin/authorization/rbac/)-enabled, [network policy](https://kubernetes.io/docs/concepts/services-networking/network-policies/), SELinux enforcing
|
||||
* Advanced features like [worker pools](https://typhoon.psdn.io/advanced/worker-pools/), [spot](https://typhoon.psdn.io/cl/aws/#spot) workers, and [snippets](https://typhoon.psdn.io/advanced/customization/#container-linux) customization
|
||||
|
@ -1,6 +1,6 @@
|
||||
# Kubernetes assets (kubeconfig, manifests)
|
||||
module "bootstrap" {
|
||||
source = "git::https://github.com/poseidon/terraform-render-bootstrap.git?ref=e75697ce35d7773705f0b9b28ce1ffbe99f9493c"
|
||||
source = "git::https://github.com/poseidon/terraform-render-bootstrap.git?ref=5a7c963caf59740891df2aeae4b1561ccb3b9db6"
|
||||
|
||||
cluster_name = var.cluster_name
|
||||
api_servers = [format("%s.%s", var.cluster_name, var.dns_zone)]
|
||||
|
@ -55,7 +55,7 @@ systemd:
|
||||
Description=Kubelet (System Container)
|
||||
Wants=rpc-statd.service
|
||||
[Service]
|
||||
Environment=KUBELET_IMAGE=quay.io/poseidon/kubelet:v1.18.4
|
||||
Environment=KUBELET_IMAGE=quay.io/poseidon/kubelet:v1.18.5
|
||||
ExecStartPre=/bin/mkdir -p /etc/kubernetes/cni/net.d
|
||||
ExecStartPre=/bin/mkdir -p /etc/kubernetes/manifests
|
||||
ExecStartPre=/bin/mkdir -p /opt/cni/bin
|
||||
@ -124,7 +124,7 @@ systemd:
|
||||
--volume /opt/bootstrap/assets:/assets:ro,Z \
|
||||
--volume /opt/bootstrap/apply:/apply:ro,Z \
|
||||
--entrypoint=/apply \
|
||||
quay.io/poseidon/kubelet:v1.18.4
|
||||
quay.io/poseidon/kubelet:v1.18.5
|
||||
ExecStartPost=/bin/touch /opt/bootstrap/bootstrap.done
|
||||
ExecStartPost=-/usr/bin/podman stop bootstrap
|
||||
storage:
|
||||
@ -176,6 +176,11 @@ storage:
|
||||
contents:
|
||||
inline: |
|
||||
fs.inotify.max_user_watches=16184
|
||||
- path: /etc/sysctl.d/reverse-path-filter.conf
|
||||
contents:
|
||||
inline: |
|
||||
net.ipv4.conf.default.rp_filter=0
|
||||
net.ipv4.conf.*.rp_filter=0
|
||||
- path: /etc/systemd/system.conf.d/accounting.conf
|
||||
contents:
|
||||
inline: |
|
||||
|
@ -13,6 +13,30 @@ resource "aws_security_group" "controller" {
|
||||
}
|
||||
}
|
||||
|
||||
resource "aws_security_group_rule" "controller-icmp" {
|
||||
count = var.networking == "cilium" ? 1 : 0
|
||||
|
||||
security_group_id = aws_security_group.controller.id
|
||||
|
||||
type = "ingress"
|
||||
protocol = "icmp"
|
||||
from_port = 8
|
||||
to_port = 0
|
||||
source_security_group_id = aws_security_group.worker.id
|
||||
}
|
||||
|
||||
resource "aws_security_group_rule" "controller-icmp-self" {
|
||||
count = var.networking == "cilium" ? 1 : 0
|
||||
|
||||
security_group_id = aws_security_group.controller.id
|
||||
|
||||
type = "ingress"
|
||||
protocol = "icmp"
|
||||
from_port = 8
|
||||
to_port = 0
|
||||
self = true
|
||||
}
|
||||
|
||||
resource "aws_security_group_rule" "controller-ssh" {
|
||||
security_group_id = aws_security_group.controller.id
|
||||
|
||||
@ -44,39 +68,31 @@ resource "aws_security_group_rule" "controller-etcd-metrics" {
|
||||
source_security_group_id = aws_security_group.worker.id
|
||||
}
|
||||
|
||||
# Allow Prometheus to scrape kube-proxy
|
||||
resource "aws_security_group_rule" "kube-proxy-metrics" {
|
||||
resource "aws_security_group_rule" "controller-cilium-health" {
|
||||
count = var.networking == "cilium" ? 1 : 0
|
||||
|
||||
security_group_id = aws_security_group.controller.id
|
||||
|
||||
type = "ingress"
|
||||
protocol = "tcp"
|
||||
from_port = 10249
|
||||
to_port = 10249
|
||||
from_port = 4240
|
||||
to_port = 4240
|
||||
source_security_group_id = aws_security_group.worker.id
|
||||
}
|
||||
|
||||
# Allow Prometheus to scrape kube-scheduler
|
||||
resource "aws_security_group_rule" "controller-scheduler-metrics" {
|
||||
resource "aws_security_group_rule" "controller-cilium-health-self" {
|
||||
count = var.networking == "cilium" ? 1 : 0
|
||||
|
||||
security_group_id = aws_security_group.controller.id
|
||||
|
||||
type = "ingress"
|
||||
protocol = "tcp"
|
||||
from_port = 10251
|
||||
to_port = 10251
|
||||
source_security_group_id = aws_security_group.worker.id
|
||||
}
|
||||
|
||||
# Allow Prometheus to scrape kube-controller-manager
|
||||
resource "aws_security_group_rule" "controller-manager-metrics" {
|
||||
security_group_id = aws_security_group.controller.id
|
||||
|
||||
type = "ingress"
|
||||
protocol = "tcp"
|
||||
from_port = 10252
|
||||
to_port = 10252
|
||||
source_security_group_id = aws_security_group.worker.id
|
||||
type = "ingress"
|
||||
protocol = "tcp"
|
||||
from_port = 4240
|
||||
to_port = 4240
|
||||
self = true
|
||||
}
|
||||
|
||||
# IANA VXLAN default
|
||||
resource "aws_security_group_rule" "controller-vxlan" {
|
||||
count = var.networking == "flannel" ? 1 : 0
|
||||
|
||||
@ -111,6 +127,31 @@ resource "aws_security_group_rule" "controller-apiserver" {
|
||||
cidr_blocks = ["0.0.0.0/0"]
|
||||
}
|
||||
|
||||
# Linux VXLAN default
|
||||
resource "aws_security_group_rule" "controller-linux-vxlan" {
|
||||
count = var.networking == "cilium" ? 1 : 0
|
||||
|
||||
security_group_id = aws_security_group.controller.id
|
||||
|
||||
type = "ingress"
|
||||
protocol = "udp"
|
||||
from_port = 8472
|
||||
to_port = 8472
|
||||
source_security_group_id = aws_security_group.worker.id
|
||||
}
|
||||
|
||||
resource "aws_security_group_rule" "controller-linux-vxlan-self" {
|
||||
count = var.networking == "cilium" ? 1 : 0
|
||||
|
||||
security_group_id = aws_security_group.controller.id
|
||||
|
||||
type = "ingress"
|
||||
protocol = "udp"
|
||||
from_port = 8472
|
||||
to_port = 8472
|
||||
self = true
|
||||
}
|
||||
|
||||
# Allow Prometheus to scrape node-exporter daemonset
|
||||
resource "aws_security_group_rule" "controller-node-exporter" {
|
||||
security_group_id = aws_security_group.controller.id
|
||||
@ -122,6 +163,17 @@ resource "aws_security_group_rule" "controller-node-exporter" {
|
||||
source_security_group_id = aws_security_group.worker.id
|
||||
}
|
||||
|
||||
# Allow Prometheus to scrape kube-proxy
|
||||
resource "aws_security_group_rule" "kube-proxy-metrics" {
|
||||
security_group_id = aws_security_group.controller.id
|
||||
|
||||
type = "ingress"
|
||||
protocol = "tcp"
|
||||
from_port = 10249
|
||||
to_port = 10249
|
||||
source_security_group_id = aws_security_group.worker.id
|
||||
}
|
||||
|
||||
# Allow apiserver to access kubelets for exec, log, port-forward
|
||||
resource "aws_security_group_rule" "controller-kubelet" {
|
||||
security_group_id = aws_security_group.controller.id
|
||||
@ -143,6 +195,28 @@ resource "aws_security_group_rule" "controller-kubelet-self" {
|
||||
self = true
|
||||
}
|
||||
|
||||
# Allow Prometheus to scrape kube-scheduler
|
||||
resource "aws_security_group_rule" "controller-scheduler-metrics" {
|
||||
security_group_id = aws_security_group.controller.id
|
||||
|
||||
type = "ingress"
|
||||
protocol = "tcp"
|
||||
from_port = 10251
|
||||
to_port = 10251
|
||||
source_security_group_id = aws_security_group.worker.id
|
||||
}
|
||||
|
||||
# Allow Prometheus to scrape kube-controller-manager
|
||||
resource "aws_security_group_rule" "controller-manager-metrics" {
|
||||
security_group_id = aws_security_group.controller.id
|
||||
|
||||
type = "ingress"
|
||||
protocol = "tcp"
|
||||
from_port = 10252
|
||||
to_port = 10252
|
||||
source_security_group_id = aws_security_group.worker.id
|
||||
}
|
||||
|
||||
resource "aws_security_group_rule" "controller-bgp" {
|
||||
security_group_id = aws_security_group.controller.id
|
||||
|
||||
@ -227,6 +301,30 @@ resource "aws_security_group" "worker" {
|
||||
}
|
||||
}
|
||||
|
||||
resource "aws_security_group_rule" "worker-icmp" {
|
||||
count = var.networking == "cilium" ? 1 : 0
|
||||
|
||||
security_group_id = aws_security_group.worker.id
|
||||
|
||||
type = "ingress"
|
||||
protocol = "icmp"
|
||||
from_port = 8
|
||||
to_port = 0
|
||||
source_security_group_id = aws_security_group.controller.id
|
||||
}
|
||||
|
||||
resource "aws_security_group_rule" "worker-icmp-self" {
|
||||
count = var.networking == "cilium" ? 1 : 0
|
||||
|
||||
security_group_id = aws_security_group.worker.id
|
||||
|
||||
type = "ingress"
|
||||
protocol = "icmp"
|
||||
from_port = 8
|
||||
to_port = 0
|
||||
self = true
|
||||
}
|
||||
|
||||
resource "aws_security_group_rule" "worker-ssh" {
|
||||
security_group_id = aws_security_group.worker.id
|
||||
|
||||
@ -257,6 +355,31 @@ resource "aws_security_group_rule" "worker-https" {
|
||||
cidr_blocks = ["0.0.0.0/0"]
|
||||
}
|
||||
|
||||
resource "aws_security_group_rule" "worker-cilium-health" {
|
||||
count = var.networking == "cilium" ? 1 : 0
|
||||
|
||||
security_group_id = aws_security_group.worker.id
|
||||
|
||||
type = "ingress"
|
||||
protocol = "tcp"
|
||||
from_port = 4240
|
||||
to_port = 4240
|
||||
source_security_group_id = aws_security_group.controller.id
|
||||
}
|
||||
|
||||
resource "aws_security_group_rule" "worker-cilium-health-self" {
|
||||
count = var.networking == "cilium" ? 1 : 0
|
||||
|
||||
security_group_id = aws_security_group.worker.id
|
||||
|
||||
type = "ingress"
|
||||
protocol = "tcp"
|
||||
from_port = 4240
|
||||
to_port = 4240
|
||||
self = true
|
||||
}
|
||||
|
||||
# IANA VXLAN default
|
||||
resource "aws_security_group_rule" "worker-vxlan" {
|
||||
count = var.networking == "flannel" ? 1 : 0
|
||||
|
||||
@ -281,6 +404,31 @@ resource "aws_security_group_rule" "worker-vxlan-self" {
|
||||
self = true
|
||||
}
|
||||
|
||||
# Linux VXLAN default
|
||||
resource "aws_security_group_rule" "worker-linux-vxlan" {
|
||||
count = var.networking == "cilium" ? 1 : 0
|
||||
|
||||
security_group_id = aws_security_group.worker.id
|
||||
|
||||
type = "ingress"
|
||||
protocol = "udp"
|
||||
from_port = 8472
|
||||
to_port = 8472
|
||||
source_security_group_id = aws_security_group.controller.id
|
||||
}
|
||||
|
||||
resource "aws_security_group_rule" "worker-linux-vxlan-self" {
|
||||
count = var.networking == "cilium" ? 1 : 0
|
||||
|
||||
security_group_id = aws_security_group.worker.id
|
||||
|
||||
type = "ingress"
|
||||
protocol = "udp"
|
||||
from_port = 8472
|
||||
to_port = 8472
|
||||
self = true
|
||||
}
|
||||
|
||||
# Allow Prometheus to scrape node-exporter daemonset
|
||||
resource "aws_security_group_rule" "worker-node-exporter" {
|
||||
security_group_id = aws_security_group.worker.id
|
||||
|
@ -25,7 +25,7 @@ systemd:
|
||||
Description=Kubelet (System Container)
|
||||
Wants=rpc-statd.service
|
||||
[Service]
|
||||
Environment=KUBELET_IMAGE=quay.io/poseidon/kubelet:v1.18.4
|
||||
Environment=KUBELET_IMAGE=quay.io/poseidon/kubelet:v1.18.5
|
||||
ExecStartPre=/bin/mkdir -p /etc/kubernetes/cni/net.d
|
||||
ExecStartPre=/bin/mkdir -p /etc/kubernetes/manifests
|
||||
ExecStartPre=/bin/mkdir -p /opt/cni/bin
|
||||
@ -89,7 +89,7 @@ systemd:
|
||||
Type=oneshot
|
||||
RemainAfterExit=true
|
||||
ExecStart=/bin/true
|
||||
ExecStop=/bin/bash -c '/usr/bin/podman run --volume /etc/kubernetes:/etc/kubernetes:ro,z --entrypoint /usr/local/bin/kubectl quay.io/poseidon/kubelet:v1.18.4 --kubeconfig=/etc/kubernetes/kubeconfig delete node $HOSTNAME'
|
||||
ExecStop=/bin/bash -c '/usr/bin/podman run --volume /etc/kubernetes:/etc/kubernetes:ro,z --entrypoint /usr/local/bin/kubectl quay.io/poseidon/kubelet:v1.18.5 --kubeconfig=/etc/kubernetes/kubeconfig delete node $HOSTNAME'
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
storage:
|
||||
@ -105,6 +105,11 @@ storage:
|
||||
contents:
|
||||
inline: |
|
||||
fs.inotify.max_user_watches=16184
|
||||
- path: /etc/sysctl.d/reverse-path-filter.conf
|
||||
contents:
|
||||
inline: |
|
||||
net.ipv4.conf.default.rp_filter=0
|
||||
net.ipv4.conf.*.rp_filter=0
|
||||
- path: /etc/systemd/system.conf.d/accounting.conf
|
||||
contents:
|
||||
inline: |
|
||||
|
@ -11,7 +11,7 @@ Typhoon distributes upstream Kubernetes, architectural conventions, and cluster
|
||||
|
||||
## Features <a href="https://www.cncf.io/certification/software-conformance/"><img align="right" src="https://storage.googleapis.com/poseidon/certified-kubernetes.png"></a>
|
||||
|
||||
* Kubernetes v1.18.4 (upstream)
|
||||
* Kubernetes v1.18.5 (upstream)
|
||||
* Single or multi-master, [Calico](https://www.projectcalico.org/) or [flannel](https://github.com/coreos/flannel) networking
|
||||
* On-cluster etcd with TLS, [RBAC](https://kubernetes.io/docs/admin/authorization/rbac/)-enabled, [network policy](https://kubernetes.io/docs/concepts/services-networking/network-policies/)
|
||||
* Advanced features like [worker pools](https://typhoon.psdn.io/advanced/worker-pools/), [low-priority](https://typhoon.psdn.io/cl/azure/#low-priority) workers, and [snippets](https://typhoon.psdn.io/advanced/customization/#container-linux) customization
|
||||
|
@ -1,6 +1,6 @@
|
||||
# Kubernetes assets (kubeconfig, manifests)
|
||||
module "bootstrap" {
|
||||
source = "git::https://github.com/poseidon/terraform-render-bootstrap.git?ref=e75697ce35d7773705f0b9b28ce1ffbe99f9493c"
|
||||
source = "git::https://github.com/poseidon/terraform-render-bootstrap.git?ref=5a7c963caf59740891df2aeae4b1561ccb3b9db6"
|
||||
|
||||
cluster_name = var.cluster_name
|
||||
api_servers = [format("%s.%s", var.cluster_name, var.dns_zone)]
|
||||
|
@ -52,7 +52,8 @@ systemd:
|
||||
Description=Kubelet
|
||||
Wants=rpc-statd.service
|
||||
[Service]
|
||||
Environment=KUBELET_IMAGE=docker://quay.io/poseidon/kubelet:v1.18.4
|
||||
Environment=KUBELET_IMAGE=docker://quay.io/poseidon/kubelet:v1.18.5
|
||||
Environment=KUBELET_CGROUP_DRIVER=${cgroup_driver}
|
||||
ExecStartPre=/bin/mkdir -p /etc/kubernetes/cni/net.d
|
||||
ExecStartPre=/bin/mkdir -p /etc/kubernetes/manifests
|
||||
ExecStartPre=/bin/mkdir -p /opt/cni/bin
|
||||
@ -96,6 +97,7 @@ systemd:
|
||||
--authentication-token-webhook \
|
||||
--authorization-mode=Webhook \
|
||||
--bootstrap-kubeconfig=/etc/kubernetes/kubeconfig \
|
||||
--cgroup-driver=$${KUBELET_CGROUP_DRIVER} \
|
||||
--client-ca-file=/etc/kubernetes/ca.crt \
|
||||
--cluster_dns=${cluster_dns_service_ip} \
|
||||
--cluster_domain=${cluster_domain_suffix} \
|
||||
@ -132,7 +134,7 @@ systemd:
|
||||
--volume script,kind=host,source=/opt/bootstrap/apply \
|
||||
--mount volume=script,target=/apply \
|
||||
--insecure-options=image \
|
||||
docker://quay.io/poseidon/kubelet:v1.18.4 \
|
||||
docker://quay.io/poseidon/kubelet:v1.18.5 \
|
||||
--net=host \
|
||||
--dns=host \
|
||||
--exec=/apply
|
||||
|
@ -157,6 +157,7 @@ data "template_file" "controller-configs" {
|
||||
etcd_domain = "${var.cluster_name}-etcd${count.index}.${var.dns_zone}"
|
||||
# etcd0=https://cluster-etcd0.example.com,etcd1=https://cluster-etcd1.example.com,...
|
||||
etcd_initial_cluster = join(",", data.template_file.etcds.*.rendered)
|
||||
cgroup_driver = local.flavor == "flatcar" && local.channel == "edge" ? "systemd" : "cgroupfs"
|
||||
kubeconfig = indent(10, module.bootstrap.kubeconfig-kubelet)
|
||||
ssh_authorized_key = var.ssh_authorized_key
|
||||
cluster_dns_service_ip = cidrhost(var.service_cidr, 10)
|
||||
|
@ -7,6 +7,21 @@ resource "azurerm_network_security_group" "controller" {
|
||||
location = azurerm_resource_group.cluster.location
|
||||
}
|
||||
|
||||
resource "azurerm_network_security_rule" "controller-icmp" {
|
||||
resource_group_name = azurerm_resource_group.cluster.name
|
||||
|
||||
name = "allow-icmp"
|
||||
network_security_group_name = azurerm_network_security_group.controller.name
|
||||
priority = "1995"
|
||||
access = "Allow"
|
||||
direction = "Inbound"
|
||||
protocol = "Icmp"
|
||||
source_port_range = "*"
|
||||
destination_port_range = "*"
|
||||
source_address_prefixes = [azurerm_subnet.controller.address_prefix, azurerm_subnet.worker.address_prefix]
|
||||
destination_address_prefix = azurerm_subnet.controller.address_prefix
|
||||
}
|
||||
|
||||
resource "azurerm_network_security_rule" "controller-ssh" {
|
||||
resource_group_name = azurerm_resource_group.cluster.name
|
||||
|
||||
@ -100,6 +115,22 @@ resource "azurerm_network_security_rule" "controller-apiserver" {
|
||||
destination_address_prefix = azurerm_subnet.controller.address_prefix
|
||||
}
|
||||
|
||||
resource "azurerm_network_security_rule" "controller-cilium-health" {
|
||||
resource_group_name = azurerm_resource_group.cluster.name
|
||||
count = var.networking == "cilium" ? 1 : 0
|
||||
|
||||
name = "allow-cilium-health"
|
||||
network_security_group_name = azurerm_network_security_group.controller.name
|
||||
priority = "2019"
|
||||
access = "Allow"
|
||||
direction = "Inbound"
|
||||
protocol = "Tcp"
|
||||
source_port_range = "*"
|
||||
destination_port_range = "4240"
|
||||
source_address_prefixes = [azurerm_subnet.controller.address_prefix, azurerm_subnet.worker.address_prefix]
|
||||
destination_address_prefix = azurerm_subnet.controller.address_prefix
|
||||
}
|
||||
|
||||
resource "azurerm_network_security_rule" "controller-vxlan" {
|
||||
resource_group_name = azurerm_resource_group.cluster.name
|
||||
|
||||
@ -115,6 +146,21 @@ resource "azurerm_network_security_rule" "controller-vxlan" {
|
||||
destination_address_prefix = azurerm_subnet.controller.address_prefix
|
||||
}
|
||||
|
||||
resource "azurerm_network_security_rule" "controller-linux-vxlan" {
|
||||
resource_group_name = azurerm_resource_group.cluster.name
|
||||
|
||||
name = "allow-linux-vxlan"
|
||||
network_security_group_name = azurerm_network_security_group.controller.name
|
||||
priority = "2021"
|
||||
access = "Allow"
|
||||
direction = "Inbound"
|
||||
protocol = "Udp"
|
||||
source_port_range = "*"
|
||||
destination_port_range = "8472"
|
||||
source_address_prefixes = [azurerm_subnet.controller.address_prefix, azurerm_subnet.worker.address_prefix]
|
||||
destination_address_prefix = azurerm_subnet.controller.address_prefix
|
||||
}
|
||||
|
||||
# Allow Prometheus to scrape node-exporter daemonset
|
||||
resource "azurerm_network_security_rule" "controller-node-exporter" {
|
||||
resource_group_name = azurerm_resource_group.cluster.name
|
||||
@ -191,6 +237,21 @@ resource "azurerm_network_security_group" "worker" {
|
||||
location = azurerm_resource_group.cluster.location
|
||||
}
|
||||
|
||||
resource "azurerm_network_security_rule" "worker-icmp" {
|
||||
resource_group_name = azurerm_resource_group.cluster.name
|
||||
|
||||
name = "allow-icmp"
|
||||
network_security_group_name = azurerm_network_security_group.worker.name
|
||||
priority = "1995"
|
||||
access = "Allow"
|
||||
direction = "Inbound"
|
||||
protocol = "Icmp"
|
||||
source_port_range = "*"
|
||||
destination_port_range = "*"
|
||||
source_address_prefixes = [azurerm_subnet.controller.address_prefix, azurerm_subnet.worker.address_prefix]
|
||||
destination_address_prefix = azurerm_subnet.worker.address_prefix
|
||||
}
|
||||
|
||||
resource "azurerm_network_security_rule" "worker-ssh" {
|
||||
resource_group_name = azurerm_resource_group.cluster.name
|
||||
|
||||
@ -236,6 +297,22 @@ resource "azurerm_network_security_rule" "worker-https" {
|
||||
destination_address_prefix = azurerm_subnet.worker.address_prefix
|
||||
}
|
||||
|
||||
resource "azurerm_network_security_rule" "worker-cilium-health" {
|
||||
resource_group_name = azurerm_resource_group.cluster.name
|
||||
count = var.networking == "cilium" ? 1 : 0
|
||||
|
||||
name = "allow-cilium-health"
|
||||
network_security_group_name = azurerm_network_security_group.worker.name
|
||||
priority = "2014"
|
||||
access = "Allow"
|
||||
direction = "Inbound"
|
||||
protocol = "Tcp"
|
||||
source_port_range = "*"
|
||||
destination_port_range = "4240"
|
||||
source_address_prefixes = [azurerm_subnet.controller.address_prefix, azurerm_subnet.worker.address_prefix]
|
||||
destination_address_prefix = azurerm_subnet.worker.address_prefix
|
||||
}
|
||||
|
||||
resource "azurerm_network_security_rule" "worker-vxlan" {
|
||||
resource_group_name = azurerm_resource_group.cluster.name
|
||||
|
||||
@ -251,6 +328,21 @@ resource "azurerm_network_security_rule" "worker-vxlan" {
|
||||
destination_address_prefix = azurerm_subnet.worker.address_prefix
|
||||
}
|
||||
|
||||
resource "azurerm_network_security_rule" "worker-linux-vxlan" {
|
||||
resource_group_name = azurerm_resource_group.cluster.name
|
||||
|
||||
name = "allow-linux-vxlan"
|
||||
network_security_group_name = azurerm_network_security_group.worker.name
|
||||
priority = "2016"
|
||||
access = "Allow"
|
||||
direction = "Inbound"
|
||||
protocol = "Udp"
|
||||
source_port_range = "*"
|
||||
destination_port_range = "8472"
|
||||
source_address_prefixes = [azurerm_subnet.controller.address_prefix, azurerm_subnet.worker.address_prefix]
|
||||
destination_address_prefix = azurerm_subnet.worker.address_prefix
|
||||
}
|
||||
|
||||
# Allow Prometheus to scrape node-exporter daemonset
|
||||
resource "azurerm_network_security_rule" "worker-node-exporter" {
|
||||
resource_group_name = azurerm_resource_group.cluster.name
|
||||
|
@ -25,7 +25,8 @@ systemd:
|
||||
Description=Kubelet
|
||||
Wants=rpc-statd.service
|
||||
[Service]
|
||||
Environment=KUBELET_IMAGE=docker://quay.io/poseidon/kubelet:v1.18.4
|
||||
Environment=KUBELET_IMAGE=docker://quay.io/poseidon/kubelet:v1.18.5
|
||||
Environment=KUBELET_CGROUP_DRIVER=${cgroup_driver}
|
||||
ExecStartPre=/bin/mkdir -p /etc/kubernetes/cni/net.d
|
||||
ExecStartPre=/bin/mkdir -p /etc/kubernetes/manifests
|
||||
ExecStartPre=/bin/mkdir -p /opt/cni/bin
|
||||
@ -69,6 +70,7 @@ systemd:
|
||||
--authentication-token-webhook \
|
||||
--authorization-mode=Webhook \
|
||||
--bootstrap-kubeconfig=/etc/kubernetes/kubeconfig \
|
||||
--cgroup-driver=$${KUBELET_CGROUP_DRIVER} \
|
||||
--client-ca-file=/etc/kubernetes/ca.crt \
|
||||
--cluster_dns=${cluster_dns_service_ip} \
|
||||
--cluster_domain=${cluster_domain_suffix} \
|
||||
@ -127,7 +129,7 @@ storage:
|
||||
--volume config,kind=host,source=/etc/kubernetes \
|
||||
--mount volume=config,target=/etc/kubernetes \
|
||||
--insecure-options=image \
|
||||
docker://quay.io/poseidon/kubelet:v1.18.4 \
|
||||
docker://quay.io/poseidon/kubelet:v1.18.5 \
|
||||
--net=host \
|
||||
--dns=host \
|
||||
--exec=/usr/local/bin/kubectl -- --kubeconfig=/etc/kubernetes/kubeconfig delete node $(hostname | tr '[:upper:]' '[:lower:]')
|
||||
|
@ -111,6 +111,7 @@ data "template_file" "worker-config" {
|
||||
ssh_authorized_key = var.ssh_authorized_key
|
||||
cluster_dns_service_ip = cidrhost(var.service_cidr, 10)
|
||||
cluster_domain_suffix = var.cluster_domain_suffix
|
||||
cgroup_driver = local.flavor == "flatcar" && local.channel == "edge" ? "systemd" : "cgroupfs"
|
||||
node_labels = join(",", var.node_labels)
|
||||
}
|
||||
}
|
||||
|
@ -11,7 +11,7 @@ Typhoon distributes upstream Kubernetes, architectural conventions, and cluster
|
||||
|
||||
## Features <a href="https://www.cncf.io/certification/software-conformance/"><img align="right" src="https://storage.googleapis.com/poseidon/certified-kubernetes.png"></a>
|
||||
|
||||
* Kubernetes v1.18.4 (upstream)
|
||||
* Kubernetes v1.18.5 (upstream)
|
||||
* Single or multi-master, [Calico](https://www.projectcalico.org/) or [flannel](https://github.com/coreos/flannel) networking
|
||||
* On-cluster etcd with TLS, [RBAC](https://kubernetes.io/docs/admin/authorization/rbac/)-enabled, [network policy](https://kubernetes.io/docs/concepts/services-networking/network-policies/), SELinux enforcing
|
||||
* Advanced features like [worker pools](https://typhoon.psdn.io/advanced/worker-pools/), [spot priority](https://typhoon.psdn.io/fedora-coreos/azure/#low-priority) workers, and [snippets](https://typhoon.psdn.io/advanced/customization/) customization
|
||||
|
@ -1,6 +1,6 @@
|
||||
# Kubernetes assets (kubeconfig, manifests)
|
||||
module "bootstrap" {
|
||||
source = "git::https://github.com/poseidon/terraform-render-bootstrap.git?ref=e75697ce35d7773705f0b9b28ce1ffbe99f9493c"
|
||||
source = "git::https://github.com/poseidon/terraform-render-bootstrap.git?ref=5a7c963caf59740891df2aeae4b1561ccb3b9db6"
|
||||
|
||||
cluster_name = var.cluster_name
|
||||
api_servers = [format("%s.%s", var.cluster_name, var.dns_zone)]
|
||||
|
@ -54,7 +54,7 @@ systemd:
|
||||
Description=Kubelet (System Container)
|
||||
Wants=rpc-statd.service
|
||||
[Service]
|
||||
Environment=KUBELET_IMAGE=quay.io/poseidon/kubelet:v1.18.4
|
||||
Environment=KUBELET_IMAGE=quay.io/poseidon/kubelet:v1.18.5
|
||||
ExecStartPre=/bin/mkdir -p /etc/kubernetes/cni/net.d
|
||||
ExecStartPre=/bin/mkdir -p /etc/kubernetes/manifests
|
||||
ExecStartPre=/bin/mkdir -p /opt/cni/bin
|
||||
@ -123,7 +123,7 @@ systemd:
|
||||
--volume /opt/bootstrap/assets:/assets:ro,Z \
|
||||
--volume /opt/bootstrap/apply:/apply:ro,Z \
|
||||
--entrypoint=/apply \
|
||||
quay.io/poseidon/kubelet:v1.18.4
|
||||
quay.io/poseidon/kubelet:v1.18.5
|
||||
ExecStartPost=/bin/touch /opt/bootstrap/bootstrap.done
|
||||
ExecStartPost=-/usr/bin/podman stop bootstrap
|
||||
storage:
|
||||
@ -175,6 +175,11 @@ storage:
|
||||
contents:
|
||||
inline: |
|
||||
fs.inotify.max_user_watches=16184
|
||||
- path: /etc/sysctl.d/reverse-path-filter.conf
|
||||
contents:
|
||||
inline: |
|
||||
net.ipv4.conf.default.rp_filter=0
|
||||
net.ipv4.conf.*.rp_filter=0
|
||||
- path: /etc/systemd/system.conf.d/accounting.conf
|
||||
contents:
|
||||
inline: |
|
||||
|
@ -7,6 +7,21 @@ resource "azurerm_network_security_group" "controller" {
|
||||
location = azurerm_resource_group.cluster.location
|
||||
}
|
||||
|
||||
resource "azurerm_network_security_rule" "controller-icmp" {
|
||||
resource_group_name = azurerm_resource_group.cluster.name
|
||||
|
||||
name = "allow-icmp"
|
||||
network_security_group_name = azurerm_network_security_group.controller.name
|
||||
priority = "1995"
|
||||
access = "Allow"
|
||||
direction = "Inbound"
|
||||
protocol = "Icmp"
|
||||
source_port_range = "*"
|
||||
destination_port_range = "*"
|
||||
source_address_prefixes = [azurerm_subnet.controller.address_prefix, azurerm_subnet.worker.address_prefix]
|
||||
destination_address_prefix = azurerm_subnet.controller.address_prefix
|
||||
}
|
||||
|
||||
resource "azurerm_network_security_rule" "controller-ssh" {
|
||||
resource_group_name = azurerm_resource_group.cluster.name
|
||||
|
||||
@ -100,6 +115,22 @@ resource "azurerm_network_security_rule" "controller-apiserver" {
|
||||
destination_address_prefix = azurerm_subnet.controller.address_prefix
|
||||
}
|
||||
|
||||
resource "azurerm_network_security_rule" "controller-cilium-health" {
|
||||
resource_group_name = azurerm_resource_group.cluster.name
|
||||
count = var.networking == "cilium" ? 1 : 0
|
||||
|
||||
name = "allow-cilium-health"
|
||||
network_security_group_name = azurerm_network_security_group.controller.name
|
||||
priority = "2019"
|
||||
access = "Allow"
|
||||
direction = "Inbound"
|
||||
protocol = "Tcp"
|
||||
source_port_range = "*"
|
||||
destination_port_range = "4240"
|
||||
source_address_prefixes = [azurerm_subnet.controller.address_prefix, azurerm_subnet.worker.address_prefix]
|
||||
destination_address_prefix = azurerm_subnet.controller.address_prefix
|
||||
}
|
||||
|
||||
resource "azurerm_network_security_rule" "controller-vxlan" {
|
||||
resource_group_name = azurerm_resource_group.cluster.name
|
||||
|
||||
@ -115,6 +146,21 @@ resource "azurerm_network_security_rule" "controller-vxlan" {
|
||||
destination_address_prefix = azurerm_subnet.controller.address_prefix
|
||||
}
|
||||
|
||||
resource "azurerm_network_security_rule" "controller-linux-vxlan" {
|
||||
resource_group_name = azurerm_resource_group.cluster.name
|
||||
|
||||
name = "allow-linux-vxlan"
|
||||
network_security_group_name = azurerm_network_security_group.controller.name
|
||||
priority = "2021"
|
||||
access = "Allow"
|
||||
direction = "Inbound"
|
||||
protocol = "Udp"
|
||||
source_port_range = "*"
|
||||
destination_port_range = "8472"
|
||||
source_address_prefixes = [azurerm_subnet.controller.address_prefix, azurerm_subnet.worker.address_prefix]
|
||||
destination_address_prefix = azurerm_subnet.controller.address_prefix
|
||||
}
|
||||
|
||||
# Allow Prometheus to scrape node-exporter daemonset
|
||||
resource "azurerm_network_security_rule" "controller-node-exporter" {
|
||||
resource_group_name = azurerm_resource_group.cluster.name
|
||||
@ -191,6 +237,21 @@ resource "azurerm_network_security_group" "worker" {
|
||||
location = azurerm_resource_group.cluster.location
|
||||
}
|
||||
|
||||
resource "azurerm_network_security_rule" "worker-icmp" {
|
||||
resource_group_name = azurerm_resource_group.cluster.name
|
||||
|
||||
name = "allow-icmp"
|
||||
network_security_group_name = azurerm_network_security_group.worker.name
|
||||
priority = "1995"
|
||||
access = "Allow"
|
||||
direction = "Inbound"
|
||||
protocol = "Icmp"
|
||||
source_port_range = "*"
|
||||
destination_port_range = "*"
|
||||
source_address_prefixes = [azurerm_subnet.controller.address_prefix, azurerm_subnet.worker.address_prefix]
|
||||
destination_address_prefix = azurerm_subnet.worker.address_prefix
|
||||
}
|
||||
|
||||
resource "azurerm_network_security_rule" "worker-ssh" {
|
||||
resource_group_name = azurerm_resource_group.cluster.name
|
||||
|
||||
@ -236,6 +297,22 @@ resource "azurerm_network_security_rule" "worker-https" {
|
||||
destination_address_prefix = azurerm_subnet.worker.address_prefix
|
||||
}
|
||||
|
||||
resource "azurerm_network_security_rule" "worker-cilium-health" {
|
||||
resource_group_name = azurerm_resource_group.cluster.name
|
||||
count = var.networking == "cilium" ? 1 : 0
|
||||
|
||||
name = "allow-cilium-health"
|
||||
network_security_group_name = azurerm_network_security_group.worker.name
|
||||
priority = "2014"
|
||||
access = "Allow"
|
||||
direction = "Inbound"
|
||||
protocol = "Tcp"
|
||||
source_port_range = "*"
|
||||
destination_port_range = "4240"
|
||||
source_address_prefixes = [azurerm_subnet.controller.address_prefix, azurerm_subnet.worker.address_prefix]
|
||||
destination_address_prefix = azurerm_subnet.worker.address_prefix
|
||||
}
|
||||
|
||||
resource "azurerm_network_security_rule" "worker-vxlan" {
|
||||
resource_group_name = azurerm_resource_group.cluster.name
|
||||
|
||||
@ -251,6 +328,21 @@ resource "azurerm_network_security_rule" "worker-vxlan" {
|
||||
destination_address_prefix = azurerm_subnet.worker.address_prefix
|
||||
}
|
||||
|
||||
resource "azurerm_network_security_rule" "worker-linux-vxlan" {
|
||||
resource_group_name = azurerm_resource_group.cluster.name
|
||||
|
||||
name = "allow-linux-vxlan"
|
||||
network_security_group_name = azurerm_network_security_group.worker.name
|
||||
priority = "2016"
|
||||
access = "Allow"
|
||||
direction = "Inbound"
|
||||
protocol = "Udp"
|
||||
source_port_range = "*"
|
||||
destination_port_range = "8472"
|
||||
source_address_prefixes = [azurerm_subnet.controller.address_prefix, azurerm_subnet.worker.address_prefix]
|
||||
destination_address_prefix = azurerm_subnet.worker.address_prefix
|
||||
}
|
||||
|
||||
# Allow Prometheus to scrape node-exporter daemonset
|
||||
resource "azurerm_network_security_rule" "worker-node-exporter" {
|
||||
resource_group_name = azurerm_resource_group.cluster.name
|
||||
|
@ -24,7 +24,7 @@ systemd:
|
||||
Description=Kubelet (System Container)
|
||||
Wants=rpc-statd.service
|
||||
[Service]
|
||||
Environment=KUBELET_IMAGE=quay.io/poseidon/kubelet:v1.18.4
|
||||
Environment=KUBELET_IMAGE=quay.io/poseidon/kubelet:v1.18.5
|
||||
ExecStartPre=/bin/mkdir -p /etc/kubernetes/cni/net.d
|
||||
ExecStartPre=/bin/mkdir -p /etc/kubernetes/manifests
|
||||
ExecStartPre=/bin/mkdir -p /opt/cni/bin
|
||||
@ -88,7 +88,7 @@ systemd:
|
||||
Type=oneshot
|
||||
RemainAfterExit=true
|
||||
ExecStart=/bin/true
|
||||
ExecStop=/bin/bash -c '/usr/bin/podman run --volume /etc/kubernetes:/etc/kubernetes:ro,z --entrypoint /usr/local/bin/kubectl quay.io/poseidon/kubelet:v1.18.4 --kubeconfig=/etc/kubernetes/kubeconfig delete node $HOSTNAME'
|
||||
ExecStop=/bin/bash -c '/usr/bin/podman run --volume /etc/kubernetes:/etc/kubernetes:ro,z --entrypoint /usr/local/bin/kubectl quay.io/poseidon/kubelet:v1.18.5 --kubeconfig=/etc/kubernetes/kubeconfig delete node $HOSTNAME'
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
storage:
|
||||
@ -104,6 +104,11 @@ storage:
|
||||
contents:
|
||||
inline: |
|
||||
fs.inotify.max_user_watches=16184
|
||||
- path: /etc/sysctl.d/reverse-path-filter.conf
|
||||
contents:
|
||||
inline: |
|
||||
net.ipv4.conf.default.rp_filter=0
|
||||
net.ipv4.conf.*.rp_filter=0
|
||||
- path: /etc/systemd/system.conf.d/accounting.conf
|
||||
contents:
|
||||
inline: |
|
||||
|
@ -11,7 +11,7 @@ Typhoon distributes upstream Kubernetes, architectural conventions, and cluster
|
||||
|
||||
## Features <a href="https://www.cncf.io/certification/software-conformance/"><img align="right" src="https://storage.googleapis.com/poseidon/certified-kubernetes.png"></a>
|
||||
|
||||
* Kubernetes v1.18.4 (upstream)
|
||||
* Kubernetes v1.18.5 (upstream)
|
||||
* Single or multi-master, [Calico](https://www.projectcalico.org/) or [flannel](https://github.com/coreos/flannel) networking
|
||||
* On-cluster etcd with TLS, [RBAC](https://kubernetes.io/docs/admin/authorization/rbac/)-enabled, [network policy](https://kubernetes.io/docs/concepts/services-networking/network-policies/)
|
||||
* Advanced features like [snippets](https://typhoon.psdn.io/advanced/customization/#container-linux) customization
|
||||
|
@ -1,6 +1,6 @@
|
||||
# Kubernetes assets (kubeconfig, manifests)
|
||||
module "bootstrap" {
|
||||
source = "git::https://github.com/poseidon/terraform-render-bootstrap.git?ref=e75697ce35d7773705f0b9b28ce1ffbe99f9493c"
|
||||
source = "git::https://github.com/poseidon/terraform-render-bootstrap.git?ref=5a7c963caf59740891df2aeae4b1561ccb3b9db6"
|
||||
|
||||
cluster_name = var.cluster_name
|
||||
api_servers = [var.k8s_domain_name]
|
||||
|
@ -60,7 +60,7 @@ systemd:
|
||||
Description=Kubelet
|
||||
Wants=rpc-statd.service
|
||||
[Service]
|
||||
Environment=KUBELET_IMAGE=docker://quay.io/poseidon/kubelet:v1.18.4
|
||||
Environment=KUBELET_IMAGE=docker://quay.io/poseidon/kubelet:v1.18.5
|
||||
Environment=KUBELET_CGROUP_DRIVER=${cgroup_driver}
|
||||
ExecStartPre=/bin/mkdir -p /etc/kubernetes/cni/net.d
|
||||
ExecStartPre=/bin/mkdir -p /etc/kubernetes/manifests
|
||||
@ -147,7 +147,7 @@ systemd:
|
||||
--volume script,kind=host,source=/opt/bootstrap/apply \
|
||||
--mount volume=script,target=/apply \
|
||||
--insecure-options=image \
|
||||
docker://quay.io/poseidon/kubelet:v1.18.4 \
|
||||
docker://quay.io/poseidon/kubelet:v1.18.5 \
|
||||
--net=host \
|
||||
--dns=host \
|
||||
--exec=/apply
|
||||
|
@ -33,7 +33,7 @@ systemd:
|
||||
Description=Kubelet
|
||||
Wants=rpc-statd.service
|
||||
[Service]
|
||||
Environment=KUBELET_IMAGE=docker://quay.io/poseidon/kubelet:v1.18.4
|
||||
Environment=KUBELET_IMAGE=docker://quay.io/poseidon/kubelet:v1.18.5
|
||||
Environment=KUBELET_CGROUP_DRIVER=${cgroup_driver}
|
||||
ExecStartPre=/bin/mkdir -p /etc/kubernetes/cni/net.d
|
||||
ExecStartPre=/bin/mkdir -p /etc/kubernetes/manifests
|
||||
|
@ -11,7 +11,7 @@ Typhoon distributes upstream Kubernetes, architectural conventions, and cluster
|
||||
|
||||
## Features <a href="https://www.cncf.io/certification/software-conformance/"><img align="right" src="https://storage.googleapis.com/poseidon/certified-kubernetes.png"></a>
|
||||
|
||||
* Kubernetes v1.18.4 (upstream)
|
||||
* Kubernetes v1.18.5 (upstream)
|
||||
* Single or multi-master, [Calico](https://www.projectcalico.org/) or [flannel](https://github.com/coreos/flannel) networking
|
||||
* On-cluster etcd with TLS, [RBAC](https://kubernetes.io/docs/admin/authorization/rbac/)-enabled, [network policy](https://kubernetes.io/docs/concepts/services-networking/network-policies/), SELinux enforcing
|
||||
* Advanced features like [snippets](https://typhoon.psdn.io/advanced/customization/#container-linux) customization
|
||||
|
@ -1,6 +1,6 @@
|
||||
# Kubernetes assets (kubeconfig, manifests)
|
||||
module "bootstrap" {
|
||||
source = "git::https://github.com/poseidon/terraform-render-bootstrap.git?ref=e75697ce35d7773705f0b9b28ce1ffbe99f9493c"
|
||||
source = "git::https://github.com/poseidon/terraform-render-bootstrap.git?ref=5a7c963caf59740891df2aeae4b1561ccb3b9db6"
|
||||
|
||||
cluster_name = var.cluster_name
|
||||
api_servers = [var.k8s_domain_name]
|
||||
|
@ -53,7 +53,7 @@ systemd:
|
||||
Description=Kubelet (System Container)
|
||||
Wants=rpc-statd.service
|
||||
[Service]
|
||||
Environment=KUBELET_IMAGE=quay.io/poseidon/kubelet:v1.18.4
|
||||
Environment=KUBELET_IMAGE=quay.io/poseidon/kubelet:v1.18.5
|
||||
ExecStartPre=/bin/mkdir -p /etc/kubernetes/cni/net.d
|
||||
ExecStartPre=/bin/mkdir -p /etc/kubernetes/manifests
|
||||
ExecStartPre=/bin/mkdir -p /opt/cni/bin
|
||||
@ -134,7 +134,7 @@ systemd:
|
||||
--volume /opt/bootstrap/assets:/assets:ro,Z \
|
||||
--volume /opt/bootstrap/apply:/apply:ro,Z \
|
||||
--entrypoint=/apply \
|
||||
quay.io/poseidon/kubelet:v1.18.4
|
||||
quay.io/poseidon/kubelet:v1.18.5
|
||||
ExecStartPost=/bin/touch /opt/bootstrap/bootstrap.done
|
||||
ExecStartPost=-/usr/bin/podman stop bootstrap
|
||||
storage:
|
||||
@ -186,6 +186,11 @@ storage:
|
||||
contents:
|
||||
inline: |
|
||||
fs.inotify.max_user_watches=16184
|
||||
- path: /etc/sysctl.d/reverse-path-filter.conf
|
||||
contents:
|
||||
inline: |
|
||||
net.ipv4.conf.default.rp_filter=0
|
||||
net.ipv4.conf.*.rp_filter=0
|
||||
- path: /etc/systemd/system.conf.d/accounting.conf
|
||||
contents:
|
||||
inline: |
|
||||
|
@ -23,7 +23,7 @@ systemd:
|
||||
Description=Kubelet (System Container)
|
||||
Wants=rpc-statd.service
|
||||
[Service]
|
||||
Environment=KUBELET_IMAGE=quay.io/poseidon/kubelet:v1.18.4
|
||||
Environment=KUBELET_IMAGE=quay.io/poseidon/kubelet:v1.18.5
|
||||
ExecStartPre=/bin/mkdir -p /etc/kubernetes/cni/net.d
|
||||
ExecStartPre=/bin/mkdir -p /etc/kubernetes/manifests
|
||||
ExecStartPre=/bin/mkdir -p /opt/cni/bin
|
||||
@ -106,6 +106,11 @@ storage:
|
||||
contents:
|
||||
inline: |
|
||||
fs.inotify.max_user_watches=16184
|
||||
- path: /etc/sysctl.d/reverse-path-filter.conf
|
||||
contents:
|
||||
inline: |
|
||||
net.ipv4.conf.default.rp_filter=0
|
||||
net.ipv4.conf.*.rp_filter=0
|
||||
- path: /etc/systemd/system.conf.d/accounting.conf
|
||||
contents:
|
||||
inline: |
|
||||
|
@ -11,7 +11,7 @@ Typhoon distributes upstream Kubernetes, architectural conventions, and cluster
|
||||
|
||||
## Features <a href="https://www.cncf.io/certification/software-conformance/"><img align="right" src="https://storage.googleapis.com/poseidon/certified-kubernetes.png"></a>
|
||||
|
||||
* Kubernetes v1.18.4 (upstream)
|
||||
* Kubernetes v1.18.5 (upstream)
|
||||
* Single or multi-master, [Calico](https://www.projectcalico.org/) or [flannel](https://github.com/coreos/flannel) networking
|
||||
* On-cluster etcd with TLS, [RBAC](https://kubernetes.io/docs/admin/authorization/rbac/)-enabled, [network policy](https://kubernetes.io/docs/concepts/services-networking/network-policies/)
|
||||
* Advanced features like [snippets](https://typhoon.psdn.io/advanced/customization/#container-linux) customization
|
||||
|
@ -1,6 +1,6 @@
|
||||
# Kubernetes assets (kubeconfig, manifests)
|
||||
module "bootstrap" {
|
||||
source = "git::https://github.com/poseidon/terraform-render-bootstrap.git?ref=e75697ce35d7773705f0b9b28ce1ffbe99f9493c"
|
||||
source = "git::https://github.com/poseidon/terraform-render-bootstrap.git?ref=5a7c963caf59740891df2aeae4b1561ccb3b9db6"
|
||||
|
||||
cluster_name = var.cluster_name
|
||||
api_servers = [format("%s.%s", var.cluster_name, var.dns_zone)]
|
||||
|
@ -62,7 +62,7 @@ systemd:
|
||||
After=coreos-metadata.service
|
||||
Wants=rpc-statd.service
|
||||
[Service]
|
||||
Environment=KUBELET_IMAGE=docker://quay.io/poseidon/kubelet:v1.18.4
|
||||
Environment=KUBELET_IMAGE=docker://quay.io/poseidon/kubelet:v1.18.5
|
||||
EnvironmentFile=/run/metadata/coreos
|
||||
ExecStartPre=/bin/mkdir -p /etc/kubernetes/cni/net.d
|
||||
ExecStartPre=/bin/mkdir -p /etc/kubernetes/manifests
|
||||
@ -144,7 +144,7 @@ systemd:
|
||||
--volume script,kind=host,source=/opt/bootstrap/apply \
|
||||
--mount volume=script,target=/apply \
|
||||
--insecure-options=image \
|
||||
docker://quay.io/poseidon/kubelet:v1.18.4 \
|
||||
docker://quay.io/poseidon/kubelet:v1.18.5 \
|
||||
--net=host \
|
||||
--dns=host \
|
||||
--exec=/apply
|
||||
|
@ -35,7 +35,7 @@ systemd:
|
||||
After=coreos-metadata.service
|
||||
Wants=rpc-statd.service
|
||||
[Service]
|
||||
Environment=KUBELET_IMAGE=docker://quay.io/poseidon/kubelet:v1.18.4
|
||||
Environment=KUBELET_IMAGE=docker://quay.io/poseidon/kubelet:v1.18.5
|
||||
EnvironmentFile=/run/metadata/coreos
|
||||
ExecStartPre=/bin/mkdir -p /etc/kubernetes/cni/net.d
|
||||
ExecStartPre=/bin/mkdir -p /etc/kubernetes/manifests
|
||||
@ -134,7 +134,7 @@ storage:
|
||||
--volume config,kind=host,source=/etc/kubernetes \
|
||||
--mount volume=config,target=/etc/kubernetes \
|
||||
--insecure-options=image \
|
||||
docker://quay.io/poseidon/kubelet:v1.18.4 \
|
||||
docker://quay.io/poseidon/kubelet:v1.18.5 \
|
||||
--net=host \
|
||||
--dns=host \
|
||||
--exec=/usr/local/bin/kubectl -- --kubeconfig=/etc/kubernetes/kubeconfig delete node $(hostname)
|
||||
|
@ -46,9 +46,10 @@ resource "digitalocean_droplet" "controllers" {
|
||||
size = var.controller_type
|
||||
|
||||
# network
|
||||
# only official DigitalOcean images support IPv6
|
||||
ipv6 = local.is_official_image
|
||||
private_networking = true
|
||||
vpc_uuid = digitalocean_vpc.network.id
|
||||
# TODO: Only official DigitalOcean images support IPv6
|
||||
ipv6 = false
|
||||
|
||||
user_data = data.ct_config.controller-ignitions.*.rendered[count.index]
|
||||
ssh_keys = var.ssh_fingerprints
|
||||
|
@ -1,3 +1,10 @@
|
||||
# Network VPC
|
||||
resource "digitalocean_vpc" "network" {
|
||||
name = var.cluster_name
|
||||
region = var.region
|
||||
description = "Network for ${var.cluster_name} cluster"
|
||||
}
|
||||
|
||||
resource "digitalocean_firewall" "rules" {
|
||||
name = var.cluster_name
|
||||
|
||||
@ -6,6 +13,11 @@ resource "digitalocean_firewall" "rules" {
|
||||
digitalocean_tag.workers.name
|
||||
]
|
||||
|
||||
inbound_rule {
|
||||
protocol = "icmp"
|
||||
source_tags = [digitalocean_tag.controllers.name, digitalocean_tag.workers.name]
|
||||
}
|
||||
|
||||
# allow ssh, internal flannel, internal node-exporter, internal kubelet
|
||||
inbound_rule {
|
||||
protocol = "tcp"
|
||||
@ -13,12 +25,27 @@ resource "digitalocean_firewall" "rules" {
|
||||
source_addresses = ["0.0.0.0/0", "::/0"]
|
||||
}
|
||||
|
||||
# Cilium health
|
||||
inbound_rule {
|
||||
protocol = "tcp"
|
||||
port_range = "4240"
|
||||
source_tags = [digitalocean_tag.controllers.name, digitalocean_tag.workers.name]
|
||||
}
|
||||
|
||||
# IANA vxlan (flannel, calico)
|
||||
inbound_rule {
|
||||
protocol = "udp"
|
||||
port_range = "4789"
|
||||
source_tags = [digitalocean_tag.controllers.name, digitalocean_tag.workers.name]
|
||||
}
|
||||
|
||||
# Linux vxlan (Cilium)
|
||||
inbound_rule {
|
||||
protocol = "udp"
|
||||
port_range = "8472"
|
||||
source_tags = [digitalocean_tag.controllers.name, digitalocean_tag.workers.name]
|
||||
}
|
||||
|
||||
# Allow Prometheus to scrape node-exporter
|
||||
inbound_rule {
|
||||
protocol = "tcp"
|
||||
@ -33,6 +60,7 @@ resource "digitalocean_firewall" "rules" {
|
||||
source_tags = [digitalocean_tag.workers.name]
|
||||
}
|
||||
|
||||
# Kubelet
|
||||
inbound_rule {
|
||||
protocol = "tcp"
|
||||
port_range = "10250"
|
||||
|
@ -2,6 +2,8 @@ output "kubeconfig-admin" {
|
||||
value = module.bootstrap.kubeconfig-admin
|
||||
}
|
||||
|
||||
# Outputs for Kubernetes Ingress
|
||||
|
||||
output "controllers_dns" {
|
||||
value = digitalocean_record.controllers[0].fqdn
|
||||
}
|
||||
@ -45,3 +47,10 @@ output "worker_tag" {
|
||||
value = digitalocean_tag.workers.name
|
||||
}
|
||||
|
||||
# Outputs for custom load balancing
|
||||
|
||||
output "vpc_id" {
|
||||
description = "ID of the cluster VPC"
|
||||
value = digitalocean_vpc.network.id
|
||||
}
|
||||
|
||||
|
@ -3,7 +3,7 @@
|
||||
terraform {
|
||||
required_version = "~> 0.12.6"
|
||||
required_providers {
|
||||
digitalocean = "~> 1.3"
|
||||
digitalocean = "~> 1.16"
|
||||
ct = "~> 0.4"
|
||||
template = "~> 2.1"
|
||||
null = "~> 2.1"
|
||||
|
@ -35,9 +35,10 @@ resource "digitalocean_droplet" "workers" {
|
||||
size = var.worker_type
|
||||
|
||||
# network
|
||||
# only official DigitalOcean images support IPv6
|
||||
ipv6 = local.is_official_image
|
||||
private_networking = true
|
||||
vpc_uuid = digitalocean_vpc.network.id
|
||||
# only official DigitalOcean images support IPv6
|
||||
ipv6 = local.is_official_image
|
||||
|
||||
user_data = data.ct_config.worker-ignition.rendered
|
||||
ssh_keys = var.ssh_fingerprints
|
||||
|
@ -11,7 +11,7 @@ Typhoon distributes upstream Kubernetes, architectural conventions, and cluster
|
||||
|
||||
## Features <a href="https://www.cncf.io/certification/software-conformance/"><img align="right" src="https://storage.googleapis.com/poseidon/certified-kubernetes.png"></a>
|
||||
|
||||
* Kubernetes v1.18.4 (upstream)
|
||||
* Kubernetes v1.18.5 (upstream)
|
||||
* Single or multi-master, [Calico](https://www.projectcalico.org/) or [flannel](https://github.com/coreos/flannel) networking
|
||||
* On-cluster etcd with TLS, [RBAC](https://kubernetes.io/docs/admin/authorization/rbac/)-enabled, [network policy](https://kubernetes.io/docs/concepts/services-networking/network-policies/), SELinux enforcing
|
||||
* Advanced features like [snippets](https://typhoon.psdn.io/advanced/customization/) customization
|
||||
|
@ -1,6 +1,6 @@
|
||||
# Kubernetes assets (kubeconfig, manifests)
|
||||
module "bootstrap" {
|
||||
source = "git::https://github.com/poseidon/terraform-render-bootstrap.git?ref=e75697ce35d7773705f0b9b28ce1ffbe99f9493c"
|
||||
source = "git::https://github.com/poseidon/terraform-render-bootstrap.git?ref=5a7c963caf59740891df2aeae4b1561ccb3b9db6"
|
||||
|
||||
cluster_name = var.cluster_name
|
||||
api_servers = [format("%s.%s", var.cluster_name, var.dns_zone)]
|
||||
|
@ -41,9 +41,10 @@ resource "digitalocean_droplet" "controllers" {
|
||||
size = var.controller_type
|
||||
|
||||
# network
|
||||
# TODO: Only official DigitalOcean images support IPv6
|
||||
ipv6 = false
|
||||
private_networking = true
|
||||
vpc_uuid = digitalocean_vpc.network.id
|
||||
# TODO: Only official DigitalOcean images support IPv6
|
||||
ipv6 = false
|
||||
|
||||
user_data = data.ct_config.controller-ignitions.*.rendered[count.index]
|
||||
ssh_keys = var.ssh_fingerprints
|
||||
|
@ -55,7 +55,7 @@ systemd:
|
||||
After=afterburn.service
|
||||
Wants=rpc-statd.service
|
||||
[Service]
|
||||
Environment=KUBELET_IMAGE=quay.io/poseidon/kubelet:v1.18.4
|
||||
Environment=KUBELET_IMAGE=quay.io/poseidon/kubelet:v1.18.5
|
||||
EnvironmentFile=/run/metadata/afterburn
|
||||
ExecStartPre=/bin/mkdir -p /etc/kubernetes/cni/net.d
|
||||
ExecStartPre=/bin/mkdir -p /etc/kubernetes/manifests
|
||||
@ -135,7 +135,7 @@ systemd:
|
||||
--volume /opt/bootstrap/assets:/assets:ro,Z \
|
||||
--volume /opt/bootstrap/apply:/apply:ro,Z \
|
||||
--entrypoint=/apply \
|
||||
quay.io/poseidon/kubelet:v1.18.4
|
||||
quay.io/poseidon/kubelet:v1.18.5
|
||||
ExecStartPost=/bin/touch /opt/bootstrap/bootstrap.done
|
||||
ExecStartPost=-/usr/bin/podman stop bootstrap
|
||||
storage:
|
||||
@ -182,6 +182,11 @@ storage:
|
||||
contents:
|
||||
inline: |
|
||||
fs.inotify.max_user_watches=16184
|
||||
- path: /etc/sysctl.d/reverse-path-filter.conf
|
||||
contents:
|
||||
inline: |
|
||||
net.ipv4.conf.default.rp_filter=0
|
||||
net.ipv4.conf.*.rp_filter=0
|
||||
- path: /etc/systemd/system.conf.d/accounting.conf
|
||||
contents:
|
||||
inline: |
|
||||
|
@ -26,7 +26,7 @@ systemd:
|
||||
After=afterburn.service
|
||||
Wants=rpc-statd.service
|
||||
[Service]
|
||||
Environment=KUBELET_IMAGE=quay.io/poseidon/kubelet:v1.18.4
|
||||
Environment=KUBELET_IMAGE=quay.io/poseidon/kubelet:v1.18.5
|
||||
EnvironmentFile=/run/metadata/afterburn
|
||||
ExecStartPre=/bin/mkdir -p /etc/kubernetes/cni/net.d
|
||||
ExecStartPre=/bin/mkdir -p /etc/kubernetes/manifests
|
||||
@ -98,7 +98,7 @@ systemd:
|
||||
Type=oneshot
|
||||
RemainAfterExit=true
|
||||
ExecStart=/bin/true
|
||||
ExecStop=/bin/bash -c '/usr/bin/podman run --volume /etc/kubernetes:/etc/kubernetes:ro,z --entrypoint /usr/local/bin/kubectl quay.io/poseidon/kubelet:v1.18.4 --kubeconfig=/etc/kubernetes/kubeconfig delete node $HOSTNAME'
|
||||
ExecStop=/bin/bash -c '/usr/bin/podman run --volume /etc/kubernetes:/etc/kubernetes:ro,z --entrypoint /usr/local/bin/kubectl quay.io/poseidon/kubelet:v1.18.5 --kubeconfig=/etc/kubernetes/kubeconfig delete node $HOSTNAME'
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
storage:
|
||||
@ -109,6 +109,11 @@ storage:
|
||||
contents:
|
||||
inline: |
|
||||
fs.inotify.max_user_watches=16184
|
||||
- path: /etc/sysctl.d/reverse-path-filter.conf
|
||||
contents:
|
||||
inline: |
|
||||
net.ipv4.conf.default.rp_filter=0
|
||||
net.ipv4.conf.*.rp_filter=0
|
||||
- path: /etc/systemd/system.conf.d/accounting.conf
|
||||
contents:
|
||||
inline: |
|
||||
|
@ -1,3 +1,10 @@
|
||||
# Network VPC
|
||||
resource "digitalocean_vpc" "network" {
|
||||
name = var.cluster_name
|
||||
region = var.region
|
||||
description = "Network for ${var.cluster_name} cluster"
|
||||
}
|
||||
|
||||
resource "digitalocean_firewall" "rules" {
|
||||
name = var.cluster_name
|
||||
|
||||
@ -6,6 +13,11 @@ resource "digitalocean_firewall" "rules" {
|
||||
digitalocean_tag.workers.name
|
||||
]
|
||||
|
||||
inbound_rule {
|
||||
protocol = "icmp"
|
||||
source_tags = [digitalocean_tag.controllers.name, digitalocean_tag.workers.name]
|
||||
}
|
||||
|
||||
# allow ssh, internal flannel, internal node-exporter, internal kubelet
|
||||
inbound_rule {
|
||||
protocol = "tcp"
|
||||
@ -13,12 +25,27 @@ resource "digitalocean_firewall" "rules" {
|
||||
source_addresses = ["0.0.0.0/0", "::/0"]
|
||||
}
|
||||
|
||||
# Cilium health
|
||||
inbound_rule {
|
||||
protocol = "tcp"
|
||||
port_range = "4240"
|
||||
source_tags = [digitalocean_tag.controllers.name, digitalocean_tag.workers.name]
|
||||
}
|
||||
|
||||
# IANA vxlan (flannel, calico)
|
||||
inbound_rule {
|
||||
protocol = "udp"
|
||||
port_range = "4789"
|
||||
source_tags = [digitalocean_tag.controllers.name, digitalocean_tag.workers.name]
|
||||
}
|
||||
|
||||
# Linux vxlan (Cilium)
|
||||
inbound_rule {
|
||||
protocol = "udp"
|
||||
port_range = "8472"
|
||||
source_tags = [digitalocean_tag.controllers.name, digitalocean_tag.workers.name]
|
||||
}
|
||||
|
||||
# Allow Prometheus to scrape node-exporter
|
||||
inbound_rule {
|
||||
protocol = "tcp"
|
||||
@ -33,6 +60,7 @@ resource "digitalocean_firewall" "rules" {
|
||||
source_tags = [digitalocean_tag.workers.name]
|
||||
}
|
||||
|
||||
# Kubelet
|
||||
inbound_rule {
|
||||
protocol = "tcp"
|
||||
port_range = "10250"
|
||||
|
@ -2,6 +2,8 @@ output "kubeconfig-admin" {
|
||||
value = module.bootstrap.kubeconfig-admin
|
||||
}
|
||||
|
||||
# Outputs for Kubernetes Ingress
|
||||
|
||||
output "controllers_dns" {
|
||||
value = digitalocean_record.controllers[0].fqdn
|
||||
}
|
||||
@ -45,3 +47,9 @@ output "worker_tag" {
|
||||
value = digitalocean_tag.workers.name
|
||||
}
|
||||
|
||||
# Outputs for custom load balancing
|
||||
|
||||
output "vpc_id" {
|
||||
description = "ID of the cluster VPC"
|
||||
value = digitalocean_vpc.network.id
|
||||
}
|
||||
|
@ -3,7 +3,7 @@
|
||||
terraform {
|
||||
required_version = "~> 0.12.6"
|
||||
required_providers {
|
||||
digitalocean = "~> 1.3"
|
||||
digitalocean = "~> 1.16"
|
||||
ct = "~> 0.4"
|
||||
template = "~> 2.1"
|
||||
null = "~> 2.1"
|
||||
|
@ -37,9 +37,10 @@ resource "digitalocean_droplet" "workers" {
|
||||
size = var.worker_type
|
||||
|
||||
# network
|
||||
# TODO: Only official DigitalOcean images support IPv6
|
||||
ipv6 = false
|
||||
private_networking = true
|
||||
vpc_uuid = digitalocean_vpc.network.id
|
||||
# TODO: Only official DigitalOcean images support IPv6
|
||||
ipv6 = false
|
||||
|
||||
user_data = data.ct_config.worker-ignition.rendered
|
||||
ssh_keys = var.ssh_fingerprints
|
||||
|
@ -65,7 +65,8 @@ The AWS internal `workers` module supports a number of [variables](https://githu
|
||||
|:-----|:------------|:--------|:--------|
|
||||
| worker_count | Number of instances | 1 | 3 |
|
||||
| instance_type | EC2 instance type | "t3.small" | "t3.medium" |
|
||||
| os_image | AMI channel for a Container Linux derivative | "flatcar-stable" | flatcar-stable, flatcar-beta, flatcar-alph, coreos-stable, coreos-beta, coreos-alpha |
|
||||
| os_image | AMI channel for a Container Linux derivative | "flatcar-stable" | flatcar-stable, flatcar-beta, flatcar-alph, flatcar-edge |
|
||||
| os_stream | Fedora CoreOS stream for compute instances | "stable" | "testing", "next" |
|
||||
| disk_size | Size of the EBS volume in GB | 40 | 100 |
|
||||
| disk_type | Type of the EBS volume | "gp2" | standard, gp2, io1 |
|
||||
| disk_iops | IOPS of the EBS volume | 0 (i.e. auto) | 400 |
|
||||
@ -82,7 +83,7 @@ Create a cluster following the Azure [tutorial](../flatcar-linux/azure.md#cluste
|
||||
|
||||
```tf
|
||||
module "ramius-worker-pool" {
|
||||
source = "git::https://github.com/poseidon/typhoon//azure/container-linux/kubernetes/workers?ref=v1.18.4"
|
||||
source = "git::https://github.com/poseidon/typhoon//azure/container-linux/kubernetes/workers?ref=v1.18.5"
|
||||
|
||||
# Azure
|
||||
region = module.ramius.region
|
||||
@ -134,7 +135,7 @@ The Azure internal `workers` module supports a number of [variables](https://git
|
||||
|:-----|:------------|:--------|:--------|
|
||||
| worker_count | Number of instances | 1 | 3 |
|
||||
| vm_type | Machine type for instances | "Standard_DS1_v2" | See below |
|
||||
| os_image | Channel for a Container Linux derivative | "flatcar-stable" | flatcar-stable, flatcar-beta, flatcar-alpha, flatcar-edge, coreos-stable, coreos-beta, coreos-alpha |
|
||||
| os_image | Channel for a Container Linux derivative | "flatcar-stable" | flatcar-stable, flatcar-beta, flatcar-alpha, flatcar-edge |
|
||||
| priority | Set priority to Spot to use reduced cost surplus capacity, with the tradeoff that instances can be deallocated at any time | "Regular" | "Spot" |
|
||||
| snippets | Container Linux Config snippets | [] | [examples](/advanced/customization/) |
|
||||
| service_cidr | CIDR IPv4 range to assign to Kubernetes services | "10.3.0.0/16" | "10.3.0.0/24" |
|
||||
@ -148,7 +149,7 @@ Create a cluster following the Google Cloud [tutorial](../flatcar-linux/google-c
|
||||
|
||||
```tf
|
||||
module "yavin-worker-pool" {
|
||||
source = "git::https://github.com/poseidon/typhoon//google-cloud/container-linux/kubernetes/workers?ref=v1.18.4"
|
||||
source = "git::https://github.com/poseidon/typhoon//google-cloud/container-linux/kubernetes/workers?ref=v1.18.5"
|
||||
|
||||
# Google Cloud
|
||||
region = "europe-west2"
|
||||
@ -179,11 +180,11 @@ Verify a managed instance group of workers joins the cluster within a few minute
|
||||
```
|
||||
$ kubectl get nodes
|
||||
NAME STATUS AGE VERSION
|
||||
yavin-controller-0.c.example-com.internal Ready 6m v1.18.4
|
||||
yavin-worker-jrbf.c.example-com.internal Ready 5m v1.18.4
|
||||
yavin-worker-mzdm.c.example-com.internal Ready 5m v1.18.4
|
||||
yavin-16x-worker-jrbf.c.example-com.internal Ready 3m v1.18.4
|
||||
yavin-16x-worker-mzdm.c.example-com.internal Ready 3m v1.18.4
|
||||
yavin-controller-0.c.example-com.internal Ready 6m v1.18.5
|
||||
yavin-worker-jrbf.c.example-com.internal Ready 5m v1.18.5
|
||||
yavin-worker-mzdm.c.example-com.internal Ready 5m v1.18.5
|
||||
yavin-16x-worker-jrbf.c.example-com.internal Ready 3m v1.18.5
|
||||
yavin-16x-worker-mzdm.c.example-com.internal Ready 3m v1.18.5
|
||||
```
|
||||
|
||||
### Variables
|
||||
@ -199,7 +200,7 @@ The Google Cloud internal `workers` module supports a number of [variables](http
|
||||
| region | Region for the worker pool instances. May differ from the cluster's region | "europe-west2" |
|
||||
| network | Must be set to `network_name` output by cluster | module.cluster.network_name |
|
||||
| kubeconfig | Must be set to `kubeconfig` output by cluster | module.cluster.kubeconfig |
|
||||
| os_image | Container Linux image for compute instances | "fedora-coreos-or-flatcar-image", coreos-stable, coreos-beta, coreos-alpha |
|
||||
| os_image | Container Linux image for compute instances | "uploaded-flatcar-image" |
|
||||
| ssh_authorized_key | SSH public key for user 'core' | "ssh-rsa AAAAB3NZ..." |
|
||||
|
||||
Check the list of regions [docs](https://cloud.google.com/compute/docs/regions-zones/regions-zones) or with `gcloud compute regions list`.
|
||||
|
@ -30,6 +30,7 @@ Add a DigitalOcean load balancer to distribute IPv4 TCP traffic (HTTP/HTTPS Ingr
|
||||
resource "digitalocean_loadbalancer" "ingress" {
|
||||
name = "ingress"
|
||||
region = "fra1"
|
||||
vpc_uuid = module.nemo.vpc_id
|
||||
droplet_tag = module.nemo.worker_tag
|
||||
|
||||
healthcheck {
|
||||
|
@ -1,6 +1,6 @@
|
||||
# AWS
|
||||
|
||||
In this tutorial, we'll create a Kubernetes v1.18.4 cluster on AWS with Fedora CoreOS.
|
||||
In this tutorial, we'll create a Kubernetes v1.18.5 cluster on AWS with Fedora CoreOS.
|
||||
|
||||
We'll declare a Kubernetes cluster using the Typhoon Terraform module. Then apply the changes to create a VPC, gateway, subnets, security groups, controller instances, worker auto-scaling group, network load balancer, and TLS assets.
|
||||
|
||||
@ -49,7 +49,7 @@ Configure the AWS provider to use your access key credentials in a `providers.tf
|
||||
|
||||
```tf
|
||||
provider "aws" {
|
||||
version = "2.66.0"
|
||||
version = "2.68.0"
|
||||
region = "eu-central-1"
|
||||
shared_credentials_file = "/home/user/.config/aws/credentials"
|
||||
}
|
||||
@ -70,7 +70,7 @@ Define a Kubernetes cluster using the module `aws/fedora-coreos/kubernetes`.
|
||||
|
||||
```tf
|
||||
module "tempest" {
|
||||
source = "git::https://github.com/poseidon/typhoon//aws/fedora-coreos/kubernetes?ref=v1.18.4"
|
||||
source = "git::https://github.com/poseidon/typhoon//aws/fedora-coreos/kubernetes?ref=v1.18.5"
|
||||
|
||||
# AWS
|
||||
cluster_name = "tempest"
|
||||
@ -143,9 +143,9 @@ List nodes in the cluster.
|
||||
$ export KUBECONFIG=/home/user/.kube/configs/tempest-config
|
||||
$ kubectl get nodes
|
||||
NAME STATUS ROLES AGE VERSION
|
||||
ip-10-0-3-155 Ready <none> 10m v1.18.4
|
||||
ip-10-0-26-65 Ready <none> 10m v1.18.4
|
||||
ip-10-0-41-21 Ready <none> 10m v1.18.4
|
||||
ip-10-0-3-155 Ready <none> 10m v1.18.5
|
||||
ip-10-0-26-65 Ready <none> 10m v1.18.5
|
||||
ip-10-0-41-21 Ready <none> 10m v1.18.5
|
||||
```
|
||||
|
||||
List the pods.
|
||||
|
@ -1,6 +1,6 @@
|
||||
# Azure
|
||||
|
||||
In this tutorial, we'll create a Kubernetes v1.18.4 cluster on Azure with Fedora CoreOS.
|
||||
In this tutorial, we'll create a Kubernetes v1.18.5 cluster on Azure with Fedora CoreOS.
|
||||
|
||||
We'll declare a Kubernetes cluster using the Typhoon Terraform module. Then apply the changes to create a resource group, virtual network, subnets, security groups, controller availability set, worker scale set, load balancer, and TLS assets.
|
||||
|
||||
@ -47,7 +47,7 @@ Configure the Azure provider in a `providers.tf` file.
|
||||
|
||||
```tf
|
||||
provider "azurerm" {
|
||||
version = "2.14.0"
|
||||
version = "2.16.0"
|
||||
}
|
||||
|
||||
provider "ct" {
|
||||
@ -83,7 +83,7 @@ Define a Kubernetes cluster using the module `azure/fedora-coreos/kubernetes`.
|
||||
|
||||
```tf
|
||||
module "ramius" {
|
||||
source = "git::https://github.com/poseidon/typhoon//azure/fedora-coreos/kubernetes?ref=v1.18.4"
|
||||
source = "git::https://github.com/poseidon/typhoon//azure/fedora-coreos/kubernetes?ref=v1.18.5"
|
||||
|
||||
# Azure
|
||||
cluster_name = "ramius"
|
||||
@ -158,9 +158,9 @@ List nodes in the cluster.
|
||||
$ export KUBECONFIG=/home/user/.kube/configs/ramius-config
|
||||
$ kubectl get nodes
|
||||
NAME STATUS ROLES AGE VERSION
|
||||
ramius-controller-0 Ready <none> 24m v1.18.4
|
||||
ramius-worker-000001 Ready <none> 25m v1.18.4
|
||||
ramius-worker-000002 Ready <none> 24m v1.18.4
|
||||
ramius-controller-0 Ready <none> 24m v1.18.5
|
||||
ramius-worker-000001 Ready <none> 25m v1.18.5
|
||||
ramius-worker-000002 Ready <none> 24m v1.18.5
|
||||
```
|
||||
|
||||
List the pods.
|
||||
|
@ -1,6 +1,6 @@
|
||||
# Bare-Metal
|
||||
|
||||
In this tutorial, we'll network boot and provision a Kubernetes v1.18.4 cluster on bare-metal with Fedora CoreOS.
|
||||
In this tutorial, we'll network boot and provision a Kubernetes v1.18.5 cluster on bare-metal with Fedora CoreOS.
|
||||
|
||||
First, we'll deploy a [Matchbox](https://github.com/poseidon/matchbox) service and setup a network boot environment. Then, we'll declare a Kubernetes cluster using the Typhoon Terraform module and power on machines. On PXE boot, machines will install Fedora CoreOS to disk, reboot into the disk install, and provision themselves as Kubernetes controllers or workers via Ignition.
|
||||
|
||||
@ -160,7 +160,7 @@ Define a Kubernetes cluster using the module `bare-metal/fedora-coreos/kubernete
|
||||
|
||||
```tf
|
||||
module "mercury" {
|
||||
source = "git::https://github.com/poseidon/typhoon//bare-metal/fedora-coreos/kubernetes?ref=v1.18.4"
|
||||
source = "git::https://github.com/poseidon/typhoon//bare-metal/fedora-coreos/kubernetes?ref=v1.18.5"
|
||||
|
||||
# bare-metal
|
||||
cluster_name = "mercury"
|
||||
@ -289,9 +289,9 @@ List nodes in the cluster.
|
||||
$ export KUBECONFIG=/home/user/.kube/configs/mercury-config
|
||||
$ kubectl get nodes
|
||||
NAME STATUS ROLES AGE VERSION
|
||||
node1.example.com Ready <none> 10m v1.18.4
|
||||
node2.example.com Ready <none> 10m v1.18.4
|
||||
node3.example.com Ready <none> 10m v1.18.4
|
||||
node1.example.com Ready <none> 10m v1.18.5
|
||||
node2.example.com Ready <none> 10m v1.18.5
|
||||
node3.example.com Ready <none> 10m v1.18.5
|
||||
```
|
||||
|
||||
List the pods.
|
||||
|
@ -1,6 +1,6 @@
|
||||
# DigitalOcean
|
||||
|
||||
In this tutorial, we'll create a Kubernetes v1.18.4 cluster on DigitalOcean with Fedora CoreOS.
|
||||
In this tutorial, we'll create a Kubernetes v1.18.5 cluster on DigitalOcean with Fedora CoreOS.
|
||||
|
||||
We'll declare a Kubernetes cluster using the Typhoon Terraform module. Then apply the changes to create controller droplets, worker droplets, DNS records, tags, and TLS assets.
|
||||
|
||||
@ -79,7 +79,7 @@ Define a Kubernetes cluster using the module `digital-ocean/fedora-coreos/kubern
|
||||
|
||||
```tf
|
||||
module "nemo" {
|
||||
source = "git::https://github.com/poseidon/typhoon//digital-ocean/fedora-coreos/kubernetes?ref=v1.18.4"
|
||||
source = "git::https://github.com/poseidon/typhoon//digital-ocean/fedora-coreos/kubernetes?ref=v1.18.5"
|
||||
|
||||
# Digital Ocean
|
||||
cluster_name = "nemo"
|
||||
@ -153,9 +153,9 @@ List nodes in the cluster.
|
||||
$ export KUBECONFIG=/home/user/.kube/configs/nemo-config
|
||||
$ kubectl get nodes
|
||||
NAME STATUS ROLES AGE VERSION
|
||||
10.132.110.130 Ready <none> 10m v1.18.4
|
||||
10.132.115.81 Ready <none> 10m v1.18.4
|
||||
10.132.124.107 Ready <none> 10m v1.18.4
|
||||
10.132.110.130 Ready <none> 10m v1.18.5
|
||||
10.132.115.81 Ready <none> 10m v1.18.5
|
||||
10.132.124.107 Ready <none> 10m v1.18.5
|
||||
```
|
||||
|
||||
List the pods.
|
||||
|
@ -1,6 +1,6 @@
|
||||
# Google Cloud
|
||||
|
||||
In this tutorial, we'll create a Kubernetes v1.18.4 cluster on Google Compute Engine with Fedora CoreOS.
|
||||
In this tutorial, we'll create a Kubernetes v1.18.5 cluster on Google Compute Engine with Fedora CoreOS.
|
||||
|
||||
We'll declare a Kubernetes cluster using the Typhoon Terraform module. Then apply the changes to create a network, firewall rules, health checks, controller instances, worker managed instance group, load balancers, and TLS assets.
|
||||
|
||||
@ -49,7 +49,7 @@ Configure the Google Cloud provider to use your service account key, project-id,
|
||||
|
||||
```tf
|
||||
provider "google" {
|
||||
version = "3.26.0"
|
||||
version = "3.27.0"
|
||||
project = "project-id"
|
||||
region = "us-central1"
|
||||
credentials = file("~/.config/google-cloud/terraform.json")
|
||||
@ -145,9 +145,9 @@ List nodes in the cluster.
|
||||
$ export KUBECONFIG=/home/user/.kube/configs/yavin-config
|
||||
$ kubectl get nodes
|
||||
NAME ROLES STATUS AGE VERSION
|
||||
yavin-controller-0.c.example-com.internal <none> Ready 6m v1.18.4
|
||||
yavin-worker-jrbf.c.example-com.internal <none> Ready 5m v1.18.4
|
||||
yavin-worker-mzdm.c.example-com.internal <none> Ready 5m v1.18.4
|
||||
yavin-controller-0.c.example-com.internal <none> Ready 6m v1.18.5
|
||||
yavin-worker-jrbf.c.example-com.internal <none> Ready 5m v1.18.5
|
||||
yavin-worker-mzdm.c.example-com.internal <none> Ready 5m v1.18.5
|
||||
```
|
||||
|
||||
List the pods.
|
||||
@ -213,7 +213,7 @@ resource "google_dns_managed_zone" "zone-for-clusters" {
|
||||
| worker_count | Number of workers | 1 | 3 |
|
||||
| controller_type | Machine type for controllers | "n1-standard-1" | See below |
|
||||
| worker_type | Machine type for workers | "n1-standard-1" | See below |
|
||||
| os_stream | Fedora CoreOS stream for compute instances | "stable" | "testing", "next" |
|
||||
| os_stream | Fedora CoreOS stream for compute instances | "stable" | "stable", "testing", "next" |
|
||||
| disk_size | Size of the disk in GB | 40 | 100 |
|
||||
| worker_preemptible | If enabled, Compute Engine will terminate workers randomly within 24 hours | false | true |
|
||||
| controller_snippets | Controller Fedora CoreOS Config snippets | [] | [examples](/advanced/customization/) |
|
||||
|
@ -1,6 +1,6 @@
|
||||
# AWS
|
||||
|
||||
In this tutorial, we'll create a Kubernetes v1.18.4 cluster on AWS with CoreOS Container Linux or Flatcar Linux.
|
||||
In this tutorial, we'll create a Kubernetes v1.18.5 cluster on AWS with CoreOS Container Linux or Flatcar Linux.
|
||||
|
||||
We'll declare a Kubernetes cluster using the Typhoon Terraform module. Then apply the changes to create a VPC, gateway, subnets, security groups, controller instances, worker auto-scaling group, network load balancer, and TLS assets.
|
||||
|
||||
@ -49,7 +49,7 @@ Configure the AWS provider to use your access key credentials in a `providers.tf
|
||||
|
||||
```tf
|
||||
provider "aws" {
|
||||
version = "2.66.0"
|
||||
version = "2.68.0"
|
||||
region = "eu-central-1"
|
||||
shared_credentials_file = "/home/user/.config/aws/credentials"
|
||||
}
|
||||
@ -70,7 +70,7 @@ Define a Kubernetes cluster using the module `aws/container-linux/kubernetes`.
|
||||
|
||||
```tf
|
||||
module "tempest" {
|
||||
source = "git::https://github.com/poseidon/typhoon//aws/container-linux/kubernetes?ref=v1.18.4"
|
||||
source = "git::https://github.com/poseidon/typhoon//aws/container-linux/kubernetes?ref=v1.18.5"
|
||||
|
||||
# AWS
|
||||
cluster_name = "tempest"
|
||||
@ -143,9 +143,9 @@ List nodes in the cluster.
|
||||
$ export KUBECONFIG=/home/user/.kube/configs/tempest-config
|
||||
$ kubectl get nodes
|
||||
NAME STATUS ROLES AGE VERSION
|
||||
ip-10-0-3-155 Ready <none> 10m v1.18.4
|
||||
ip-10-0-26-65 Ready <none> 10m v1.18.4
|
||||
ip-10-0-41-21 Ready <none> 10m v1.18.4
|
||||
ip-10-0-3-155 Ready <none> 10m v1.18.5
|
||||
ip-10-0-26-65 Ready <none> 10m v1.18.5
|
||||
ip-10-0-41-21 Ready <none> 10m v1.18.5
|
||||
```
|
||||
|
||||
List the pods.
|
||||
@ -208,7 +208,7 @@ Reference the DNS zone id with `aws_route53_zone.zone-for-clusters.zone_id`.
|
||||
| worker_count | Number of workers | 1 | 3 |
|
||||
| controller_type | EC2 instance type for controllers | "t3.small" | See below |
|
||||
| worker_type | EC2 instance type for workers | "t3.small" | See below |
|
||||
| os_image | AMI channel for a Container Linux derivative | "flatcar-stable" | coreos-stable, coreos-beta, coreos-alpha, flatcar-stable, flatcar-beta, flatcar-alpha, flatcar-edge |
|
||||
| os_image | AMI channel for a Container Linux derivative | "flatcar-stable" | flatcar-stable, flatcar-beta, flatcar-alpha, flatcar-edge |
|
||||
| disk_size | Size of the EBS volume in GB | 40 | 100 |
|
||||
| disk_type | Type of the EBS volume | "gp2" | standard, gp2, io1 |
|
||||
| disk_iops | IOPS of the EBS volume | 0 (i.e. auto) | 400 |
|
||||
|
@ -1,6 +1,6 @@
|
||||
# Azure
|
||||
|
||||
In this tutorial, we'll create a Kubernetes v1.18.4 cluster on Azure with CoreOS Container Linux or Flatcar Linux.
|
||||
In this tutorial, we'll create a Kubernetes v1.18.5 cluster on Azure with CoreOS Container Linux or Flatcar Linux.
|
||||
|
||||
We'll declare a Kubernetes cluster using the Typhoon Terraform module. Then apply the changes to create a resource group, virtual network, subnets, security groups, controller availability set, worker scale set, load balancer, and TLS assets.
|
||||
|
||||
@ -47,7 +47,7 @@ Configure the Azure provider in a `providers.tf` file.
|
||||
|
||||
```tf
|
||||
provider "azurerm" {
|
||||
version = "2.14.0"
|
||||
version = "2.16.0"
|
||||
}
|
||||
|
||||
provider "ct" {
|
||||
@ -72,7 +72,7 @@ Define a Kubernetes cluster using the module `azure/container-linux/kubernetes`.
|
||||
|
||||
```tf
|
||||
module "ramius" {
|
||||
source = "git::https://github.com/poseidon/typhoon//azure/container-linux/kubernetes?ref=v1.18.4"
|
||||
source = "git::https://github.com/poseidon/typhoon//azure/container-linux/kubernetes?ref=v1.18.5"
|
||||
|
||||
# Azure
|
||||
cluster_name = "ramius"
|
||||
@ -146,9 +146,9 @@ List nodes in the cluster.
|
||||
$ export KUBECONFIG=/home/user/.kube/configs/ramius-config
|
||||
$ kubectl get nodes
|
||||
NAME STATUS ROLES AGE VERSION
|
||||
ramius-controller-0 Ready <none> 24m v1.18.4
|
||||
ramius-worker-000001 Ready <none> 25m v1.18.4
|
||||
ramius-worker-000002 Ready <none> 24m v1.18.4
|
||||
ramius-controller-0 Ready <none> 24m v1.18.5
|
||||
ramius-worker-000001 Ready <none> 25m v1.18.5
|
||||
ramius-worker-000002 Ready <none> 24m v1.18.5
|
||||
```
|
||||
|
||||
List the pods.
|
||||
@ -225,7 +225,7 @@ Reference the DNS zone with `azurerm_dns_zone.clusters.name` and its resource gr
|
||||
| worker_count | Number of workers | 1 | 3 |
|
||||
| controller_type | Machine type for controllers | "Standard_B2s" | See below |
|
||||
| worker_type | Machine type for workers | "Standard_DS1_v2" | See below |
|
||||
| os_image | Channel for a Container Linux derivative | "flatcar-stable" | flatcar-stable, flatcar-beta, flatcar-alpha, flatcar-edge, coreos-stable, coreos-beta, coreos-alpha |
|
||||
| os_image | Channel for a Container Linux derivative | "flatcar-stable" | flatcar-stable, flatcar-beta, flatcar-alpha, flatcar-edge |
|
||||
| disk_size | Size of the disk in GB | 40 | 100 |
|
||||
| worker_priority | Set priority to Spot to use reduced cost surplus capacity, with the tradeoff that instances can be deallocated at any time | Regular | Spot |
|
||||
| controller_snippets | Controller Container Linux Config snippets | [] | [example](/advanced/customization/#usage) |
|
||||
|
@ -1,6 +1,6 @@
|
||||
# Bare-Metal
|
||||
|
||||
In this tutorial, we'll network boot and provision a Kubernetes v1.18.4 cluster on bare-metal with CoreOS Container Linux or Flatcar Linux.
|
||||
In this tutorial, we'll network boot and provision a Kubernetes v1.18.5 cluster on bare-metal with CoreOS Container Linux or Flatcar Linux.
|
||||
|
||||
First, we'll deploy a [Matchbox](https://github.com/poseidon/matchbox) service and setup a network boot environment. Then, we'll declare a Kubernetes cluster using the Typhoon Terraform module and power on machines. On PXE boot, machines will install Container Linux to disk, reboot into the disk install, and provision themselves as Kubernetes controllers or workers via Ignition.
|
||||
|
||||
@ -160,7 +160,7 @@ Define a Kubernetes cluster using the module `bare-metal/container-linux/kuberne
|
||||
|
||||
```tf
|
||||
module "mercury" {
|
||||
source = "git::https://github.com/poseidon/typhoon//bare-metal/container-linux/kubernetes?ref=v1.18.4"
|
||||
source = "git::https://github.com/poseidon/typhoon//bare-metal/container-linux/kubernetes?ref=v1.18.5"
|
||||
|
||||
# bare-metal
|
||||
cluster_name = "mercury"
|
||||
@ -299,9 +299,9 @@ List nodes in the cluster.
|
||||
$ export KUBECONFIG=/home/user/.kube/configs/mercury-config
|
||||
$ kubectl get nodes
|
||||
NAME STATUS ROLES AGE VERSION
|
||||
node1.example.com Ready <none> 10m v1.18.4
|
||||
node2.example.com Ready <none> 10m v1.18.4
|
||||
node3.example.com Ready <none> 10m v1.18.4
|
||||
node1.example.com Ready <none> 10m v1.18.5
|
||||
node2.example.com Ready <none> 10m v1.18.5
|
||||
node3.example.com Ready <none> 10m v1.18.5
|
||||
```
|
||||
|
||||
List the pods.
|
||||
@ -336,7 +336,7 @@ Check the [variables.tf](https://github.com/poseidon/typhoon/blob/master/bare-me
|
||||
|:-----|:------------|:--------|
|
||||
| cluster_name | Unique cluster name | "mercury" |
|
||||
| matchbox_http_endpoint | Matchbox HTTP read-only endpoint | "http://matchbox.example.com:port" |
|
||||
| os_channel | Channel for a Container Linux derivative | coreos-stable, coreos-beta, coreos-alpha, flatcar-stable, flatcar-beta, flatcar-alpha, flatcar-edge |
|
||||
| os_channel | Channel for a Container Linux derivative | flatcar-stable, flatcar-beta, flatcar-alpha, flatcar-edge |
|
||||
| os_version | Version for a Container Linux derivative to PXE and install | "2345.3.1" |
|
||||
| k8s_domain_name | FQDN resolving to the controller(s) nodes. Workers and kubectl will communicate with this endpoint | "myk8s.example.com" |
|
||||
| ssh_authorized_key | SSH public key for user 'core' | "ssh-rsa AAAAB3Nz..." |
|
||||
|
@ -1,6 +1,6 @@
|
||||
# DigitalOcean
|
||||
|
||||
In this tutorial, we'll create a Kubernetes v1.18.4 cluster on DigitalOcean with CoreOS Container Linux or Flatcar Linux.
|
||||
In this tutorial, we'll create a Kubernetes v1.18.5 cluster on DigitalOcean with CoreOS Container Linux or Flatcar Linux.
|
||||
|
||||
We'll declare a Kubernetes cluster using the Typhoon Terraform module. Then apply the changes to create controller droplets, worker droplets, DNS records, tags, and TLS assets.
|
||||
|
||||
@ -79,7 +79,7 @@ Define a Kubernetes cluster using the module `digital-ocean/container-linux/kube
|
||||
|
||||
```tf
|
||||
module "nemo" {
|
||||
source = "git::https://github.com/poseidon/typhoon//digital-ocean/container-linux/kubernetes?ref=v1.18.4"
|
||||
source = "git::https://github.com/poseidon/typhoon//digital-ocean/container-linux/kubernetes?ref=v1.18.5"
|
||||
|
||||
# Digital Ocean
|
||||
cluster_name = "nemo"
|
||||
@ -153,9 +153,9 @@ List nodes in the cluster.
|
||||
$ export KUBECONFIG=/home/user/.kube/configs/nemo-config
|
||||
$ kubectl get nodes
|
||||
NAME STATUS ROLES AGE VERSION
|
||||
10.132.110.130 Ready <none> 10m v1.18.4
|
||||
10.132.115.81 Ready <none> 10m v1.18.4
|
||||
10.132.124.107 Ready <none> 10m v1.18.4
|
||||
10.132.110.130 Ready <none> 10m v1.18.5
|
||||
10.132.115.81 Ready <none> 10m v1.18.5
|
||||
10.132.124.107 Ready <none> 10m v1.18.5
|
||||
```
|
||||
|
||||
List the pods.
|
||||
@ -190,7 +190,7 @@ Check the [variables.tf](https://github.com/poseidon/typhoon/blob/master/digital
|
||||
| cluster_name | Unique cluster name (prepended to dns_zone) | "nemo" |
|
||||
| region | Digital Ocean region | "nyc1", "sfo2", "fra1", tor1" |
|
||||
| dns_zone | Digital Ocean domain (i.e. DNS zone) | "do.example.com" |
|
||||
| os_image | Container Linux image for instances | "custom-image-id", coreos-stable, coreos-beta, coreos-alpha |
|
||||
| os_image | Container Linux image for instances | "uploaded-flatcar-image-id" |
|
||||
| ssh_fingerprints | SSH public key fingerprints | ["d7:9d..."] |
|
||||
|
||||
#### DNS Zone
|
||||
|
@ -1,6 +1,6 @@
|
||||
# Google Cloud
|
||||
|
||||
In this tutorial, we'll create a Kubernetes v1.18.4 cluster on Google Compute Engine with CoreOS Container Linux or Flatcar Linux.
|
||||
In this tutorial, we'll create a Kubernetes v1.18.5 cluster on Google Compute Engine with CoreOS Container Linux or Flatcar Linux.
|
||||
|
||||
We'll declare a Kubernetes cluster using the Typhoon Terraform module. Then apply the changes to create a network, firewall rules, health checks, controller instances, worker managed instance group, load balancers, and TLS assets.
|
||||
|
||||
@ -49,7 +49,7 @@ Configure the Google Cloud provider to use your service account key, project-id,
|
||||
|
||||
```tf
|
||||
provider "google" {
|
||||
version = "3.26.0"
|
||||
version = "3.27.0"
|
||||
project = "project-id"
|
||||
region = "us-central1"
|
||||
credentials = file("~/.config/google-cloud/terraform.json")
|
||||
@ -90,7 +90,7 @@ Define a Kubernetes cluster using the module `google-cloud/container-linux/kuber
|
||||
|
||||
```tf
|
||||
module "yavin" {
|
||||
source = "git::https://github.com/poseidon/typhoon//google-cloud/container-linux/kubernetes?ref=v1.18.4"
|
||||
source = "git::https://github.com/poseidon/typhoon//google-cloud/container-linux/kubernetes?ref=v1.18.5"
|
||||
|
||||
# Google Cloud
|
||||
cluster_name = "yavin"
|
||||
@ -165,9 +165,9 @@ List nodes in the cluster.
|
||||
$ export KUBECONFIG=/home/user/.kube/configs/yavin-config
|
||||
$ kubectl get nodes
|
||||
NAME ROLES STATUS AGE VERSION
|
||||
yavin-controller-0.c.example-com.internal <none> Ready 6m v1.18.4
|
||||
yavin-worker-jrbf.c.example-com.internal <none> Ready 5m v1.18.4
|
||||
yavin-worker-mzdm.c.example-com.internal <none> Ready 5m v1.18.4
|
||||
yavin-controller-0.c.example-com.internal <none> Ready 6m v1.18.5
|
||||
yavin-worker-jrbf.c.example-com.internal <none> Ready 5m v1.18.5
|
||||
yavin-worker-mzdm.c.example-com.internal <none> Ready 5m v1.18.5
|
||||
```
|
||||
|
||||
List the pods.
|
||||
@ -204,7 +204,7 @@ Check the [variables.tf](https://github.com/poseidon/typhoon/blob/master/google-
|
||||
| region | Google Cloud region | "us-central1" |
|
||||
| dns_zone | Google Cloud DNS zone | "google-cloud.example.com" |
|
||||
| dns_zone_name | Google Cloud DNS zone name | "example-zone" |
|
||||
| os_image | Container Linux image for compute instances | "flatcar-linux-2303-4-0", coreos-stable, coreos-beta, coreos-alpha |
|
||||
| os_image | Container Linux image for compute instances | "flatcar-linux-2303-4-0" |
|
||||
| ssh_authorized_key | SSH public key for user 'core' | "ssh-rsa AAAAB3NZ..." |
|
||||
|
||||
Check the list of valid [regions](https://cloud.google.com/compute/docs/regions-zones/regions-zones) and list Container Linux [images](https://cloud.google.com/compute/docs/images) with `gcloud compute images list | grep coreos`.
|
||||
|
@ -11,7 +11,7 @@ Typhoon distributes upstream Kubernetes, architectural conventions, and cluster
|
||||
|
||||
## Features <a href="https://www.cncf.io/certification/software-conformance/"><img align="right" src="https://storage.googleapis.com/poseidon/certified-kubernetes.png"></a>
|
||||
|
||||
* Kubernetes v1.18.4 (upstream)
|
||||
* Kubernetes v1.18.5 (upstream)
|
||||
* Single or multi-master, [Calico](https://www.projectcalico.org/) or [flannel](https://github.com/coreos/flannel) networking
|
||||
* On-cluster etcd with TLS, [RBAC](https://kubernetes.io/docs/admin/authorization/rbac/)-enabled, [network policy](https://kubernetes.io/docs/concepts/services-networking/network-policies/), SELinux enforcing
|
||||
* Advanced features like [worker pools](advanced/worker-pools/), [preemptible](fedora-coreos/google-cloud/#preemption) workers, and [snippets](advanced/customization/#container-linux) customization
|
||||
@ -29,7 +29,7 @@ Typhoon is available for [Fedora CoreOS](https://getfedora.org/coreos/).
|
||||
| Azure | Fedora CoreOS | [azure/fedora-coreos/kubernetes](fedora-coreos/azure.md) | alpha |
|
||||
| Bare-Metal | Fedora CoreOS | [bare-metal/fedora-coreos/kubernetes](fedora-coreos/bare-metal.md) | beta |
|
||||
| DigitalOcean | Fedora CoreOS | [digital-ocean/fedora-coreos/kubernetes](fedora-coreos/digitalocean.md) | beta |
|
||||
| Google Cloud | Fedora CoreOS | [google-cloud/fedora-coreos/kubernetes](google-cloud/fedora-coreos/kubernetes) | beta |
|
||||
| Google Cloud | Fedora CoreOS | [google-cloud/fedora-coreos/kubernetes](google-cloud/fedora-coreos/kubernetes) | stable |
|
||||
|
||||
Typhoon is available for [Flatcar Linux](https://www.flatcar-linux.org/releases/).
|
||||
|
||||
@ -53,7 +53,7 @@ Define a Kubernetes cluster by using the Terraform module for your chosen platfo
|
||||
|
||||
```tf
|
||||
module "yavin" {
|
||||
source = "git::https://github.com/poseidon/typhoon//google-cloud/fedora-coreos/kubernetes?ref=v1.18.4"
|
||||
source = "git::https://github.com/poseidon/typhoon//google-cloud/fedora-coreos/kubernetes?ref=v1.18.5"
|
||||
|
||||
# Google Cloud
|
||||
cluster_name = "yavin"
|
||||
@ -91,9 +91,9 @@ In 4-8 minutes (varies by platform), the cluster will be ready. This Google Clou
|
||||
$ export KUBECONFIG=/home/user/.kube/configs/yavin-config
|
||||
$ kubectl get nodes
|
||||
NAME ROLES STATUS AGE VERSION
|
||||
yavin-controller-0.c.example-com.internal <none> Ready 6m v1.18.4
|
||||
yavin-worker-jrbf.c.example-com.internal <none> Ready 5m v1.18.4
|
||||
yavin-worker-mzdm.c.example-com.internal <none> Ready 5m v1.18.4
|
||||
yavin-controller-0.c.example-com.internal <none> Ready 6m v1.18.5
|
||||
yavin-worker-jrbf.c.example-com.internal <none> Ready 5m v1.18.5
|
||||
yavin-worker-mzdm.c.example-com.internal <none> Ready 5m v1.18.5
|
||||
```
|
||||
|
||||
List the pods.
|
||||
|
@ -13,12 +13,12 @@ Typhoon provides tagged releases to allow clusters to be versioned using ordinar
|
||||
|
||||
```
|
||||
module "yavin" {
|
||||
source = "git::https://github.com/poseidon/typhoon//google-cloud/fedora-coreos/kubernetes?ref=v1.18.4"
|
||||
source = "git::https://github.com/poseidon/typhoon//google-cloud/fedora-coreos/kubernetes?ref=v1.18.5"
|
||||
...
|
||||
}
|
||||
|
||||
module "mercury" {
|
||||
source = "git::https://github.com/poseidon/typhoon//bare-metal/container-linux/kubernetes?ref=v1.18.4"
|
||||
source = "git::https://github.com/poseidon/typhoon//bare-metal/container-linux/kubernetes?ref=v1.18.5"
|
||||
...
|
||||
}
|
||||
```
|
||||
|
@ -11,7 +11,7 @@ Typhoon distributes upstream Kubernetes, architectural conventions, and cluster
|
||||
|
||||
## Features <a href="https://www.cncf.io/certification/software-conformance/"><img align="right" src="https://storage.googleapis.com/poseidon/certified-kubernetes.png"></a>
|
||||
|
||||
* Kubernetes v1.18.4 (upstream)
|
||||
* Kubernetes v1.18.5 (upstream)
|
||||
* Single or multi-master, [Calico](https://www.projectcalico.org/) or [flannel](https://github.com/coreos/flannel) networking
|
||||
* On-cluster etcd with TLS, [RBAC](https://kubernetes.io/docs/admin/authorization/rbac/)-enabled, [network policy](https://kubernetes.io/docs/concepts/services-networking/network-policies/)
|
||||
* Advanced features like [worker pools](https://typhoon.psdn.io/advanced/worker-pools/), [preemptible](https://typhoon.psdn.io/cl/google-cloud/#preemption) workers, and [snippets](https://typhoon.psdn.io/advanced/customization/#container-linux) customization
|
||||
|
@ -1,6 +1,6 @@
|
||||
# Kubernetes assets (kubeconfig, manifests)
|
||||
module "bootstrap" {
|
||||
source = "git::https://github.com/poseidon/terraform-render-bootstrap.git?ref=e75697ce35d7773705f0b9b28ce1ffbe99f9493c"
|
||||
source = "git::https://github.com/poseidon/terraform-render-bootstrap.git?ref=5a7c963caf59740891df2aeae4b1561ccb3b9db6"
|
||||
|
||||
cluster_name = var.cluster_name
|
||||
api_servers = [format("%s.%s", var.cluster_name, var.dns_zone)]
|
||||
|
@ -52,7 +52,7 @@ systemd:
|
||||
Description=Kubelet
|
||||
Wants=rpc-statd.service
|
||||
[Service]
|
||||
Environment=KUBELET_IMAGE=docker://quay.io/poseidon/kubelet:v1.18.4
|
||||
Environment=KUBELET_IMAGE=docker://quay.io/poseidon/kubelet:v1.18.5
|
||||
ExecStartPre=/bin/mkdir -p /etc/kubernetes/cni/net.d
|
||||
ExecStartPre=/bin/mkdir -p /etc/kubernetes/manifests
|
||||
ExecStartPre=/bin/mkdir -p /opt/cni/bin
|
||||
@ -132,7 +132,7 @@ systemd:
|
||||
--volume script,kind=host,source=/opt/bootstrap/apply \
|
||||
--mount volume=script,target=/apply \
|
||||
--insecure-options=image \
|
||||
docker://quay.io/poseidon/kubelet:v1.18.4 \
|
||||
docker://quay.io/poseidon/kubelet:v1.18.5 \
|
||||
--net=host \
|
||||
--dns=host \
|
||||
--exec=/apply
|
||||
|
@ -112,6 +112,32 @@ resource "google_compute_firewall" "internal-vxlan" {
|
||||
target_tags = ["${var.cluster_name}-controller", "${var.cluster_name}-worker"]
|
||||
}
|
||||
|
||||
# Cilium VXLAN
|
||||
resource "google_compute_firewall" "internal-linux-vxlan" {
|
||||
count = var.networking == "cilium" ? 1 : 0
|
||||
|
||||
name = "${var.cluster_name}-linux-vxlan"
|
||||
network = google_compute_network.network.name
|
||||
|
||||
allow {
|
||||
protocol = "udp"
|
||||
ports = [8472]
|
||||
}
|
||||
|
||||
# Cilium health
|
||||
allow {
|
||||
protocol = "icmp"
|
||||
}
|
||||
|
||||
allow {
|
||||
protocol = "tcp"
|
||||
ports = [4240]
|
||||
}
|
||||
|
||||
source_tags = ["${var.cluster_name}-controller", "${var.cluster_name}-worker"]
|
||||
target_tags = ["${var.cluster_name}-controller", "${var.cluster_name}-worker"]
|
||||
}
|
||||
|
||||
# Allow Prometheus to scrape node-exporter daemonset
|
||||
resource "google_compute_firewall" "internal-node-exporter" {
|
||||
name = "${var.cluster_name}-internal-node-exporter"
|
||||
|
@ -25,7 +25,7 @@ systemd:
|
||||
Description=Kubelet
|
||||
Wants=rpc-statd.service
|
||||
[Service]
|
||||
Environment=KUBELET_IMAGE=docker://quay.io/poseidon/kubelet:v1.18.4
|
||||
Environment=KUBELET_IMAGE=docker://quay.io/poseidon/kubelet:v1.18.5
|
||||
ExecStartPre=/bin/mkdir -p /etc/kubernetes/cni/net.d
|
||||
ExecStartPre=/bin/mkdir -p /etc/kubernetes/manifests
|
||||
ExecStartPre=/bin/mkdir -p /opt/cni/bin
|
||||
@ -127,7 +127,7 @@ storage:
|
||||
--volume config,kind=host,source=/etc/kubernetes \
|
||||
--mount volume=config,target=/etc/kubernetes \
|
||||
--insecure-options=image \
|
||||
docker://quay.io/poseidon/kubelet:v1.18.4 \
|
||||
docker://quay.io/poseidon/kubelet:v1.18.5 \
|
||||
--net=host \
|
||||
--dns=host \
|
||||
--exec=/usr/local/bin/kubectl -- --kubeconfig=/etc/kubernetes/kubeconfig delete node $(hostname)
|
||||
|
@ -11,7 +11,7 @@ Typhoon distributes upstream Kubernetes, architectural conventions, and cluster
|
||||
|
||||
## Features <a href="https://www.cncf.io/certification/software-conformance/"><img align="right" src="https://storage.googleapis.com/poseidon/certified-kubernetes.png"></a>
|
||||
|
||||
* Kubernetes v1.18.4 (upstream)
|
||||
* Kubernetes v1.18.5 (upstream)
|
||||
* Single or multi-master, [Calico](https://www.projectcalico.org/) or [flannel](https://github.com/coreos/flannel) networking
|
||||
* On-cluster etcd with TLS, [RBAC](https://kubernetes.io/docs/admin/authorization/rbac/)-enabled, [network policy](https://kubernetes.io/docs/concepts/services-networking/network-policies/), SELinux enforcing
|
||||
* Advanced features like [worker pools](https://typhoon.psdn.io/advanced/worker-pools/), [preemptible](https://typhoon.psdn.io/fedora-coreos/google-cloud/#preemption) workers, and [snippets](https://typhoon.psdn.io/advanced/customization/) customization
|
||||
|
@ -1,6 +1,6 @@
|
||||
# Kubernetes assets (kubeconfig, manifests)
|
||||
module "bootstrap" {
|
||||
source = "git::https://github.com/poseidon/terraform-render-bootstrap.git?ref=e75697ce35d7773705f0b9b28ce1ffbe99f9493c"
|
||||
source = "git::https://github.com/poseidon/terraform-render-bootstrap.git?ref=5a7c963caf59740891df2aeae4b1561ccb3b9db6"
|
||||
|
||||
cluster_name = var.cluster_name
|
||||
api_servers = [format("%s.%s", var.cluster_name, var.dns_zone)]
|
||||
|
@ -42,7 +42,7 @@ resource "google_compute_instance" "controllers" {
|
||||
auto_delete = true
|
||||
|
||||
initialize_params {
|
||||
image = var.os_image == "" ? data.google_compute_image.fedora-coreos.self_link : var.os_image
|
||||
image = data.google_compute_image.fedora-coreos.self_link
|
||||
size = var.disk_size
|
||||
}
|
||||
}
|
||||
|
@ -54,7 +54,7 @@ systemd:
|
||||
Description=Kubelet (System Container)
|
||||
Wants=rpc-statd.service
|
||||
[Service]
|
||||
Environment=KUBELET_IMAGE=quay.io/poseidon/kubelet:v1.18.4
|
||||
Environment=KUBELET_IMAGE=quay.io/poseidon/kubelet:v1.18.5
|
||||
ExecStartPre=/bin/mkdir -p /etc/kubernetes/cni/net.d
|
||||
ExecStartPre=/bin/mkdir -p /etc/kubernetes/manifests
|
||||
ExecStartPre=/bin/mkdir -p /opt/cni/bin
|
||||
@ -123,7 +123,7 @@ systemd:
|
||||
--volume /opt/bootstrap/assets:/assets:ro,Z \
|
||||
--volume /opt/bootstrap/apply:/apply:ro,Z \
|
||||
--entrypoint=/apply \
|
||||
quay.io/poseidon/kubelet:v1.18.4
|
||||
quay.io/poseidon/kubelet:v1.18.5
|
||||
ExecStartPost=/bin/touch /opt/bootstrap/bootstrap.done
|
||||
ExecStartPost=-/usr/bin/podman stop bootstrap
|
||||
storage:
|
||||
@ -175,6 +175,11 @@ storage:
|
||||
contents:
|
||||
inline: |
|
||||
fs.inotify.max_user_watches=16184
|
||||
- path: /etc/sysctl.d/reverse-path-filter.conf
|
||||
contents:
|
||||
inline: |
|
||||
net.ipv4.conf.default.rp_filter=0
|
||||
net.ipv4.conf.*.rp_filter=0
|
||||
- path: /etc/systemd/system.conf.d/accounting.conf
|
||||
contents:
|
||||
inline: |
|
||||
|
@ -112,6 +112,32 @@ resource "google_compute_firewall" "internal-vxlan" {
|
||||
target_tags = ["${var.cluster_name}-controller", "${var.cluster_name}-worker"]
|
||||
}
|
||||
|
||||
# Cilium VXLAN
|
||||
resource "google_compute_firewall" "internal-linux-vxlan" {
|
||||
count = var.networking == "cilium" ? 1 : 0
|
||||
|
||||
name = "${var.cluster_name}-linux-vxlan"
|
||||
network = google_compute_network.network.name
|
||||
|
||||
allow {
|
||||
protocol = "udp"
|
||||
ports = [8472]
|
||||
}
|
||||
|
||||
# Cilium health
|
||||
allow {
|
||||
protocol = "icmp"
|
||||
}
|
||||
|
||||
allow {
|
||||
protocol = "tcp"
|
||||
ports = [4240]
|
||||
}
|
||||
|
||||
source_tags = ["${var.cluster_name}-controller", "${var.cluster_name}-worker"]
|
||||
target_tags = ["${var.cluster_name}-controller", "${var.cluster_name}-worker"]
|
||||
}
|
||||
|
||||
# Allow Prometheus to scrape node-exporter daemonset
|
||||
resource "google_compute_firewall" "internal-node-exporter" {
|
||||
name = "${var.cluster_name}-internal-node-exporter"
|
||||
|
@ -52,13 +52,6 @@ variable "os_stream" {
|
||||
default = "stable"
|
||||
}
|
||||
|
||||
# Deprecated
|
||||
variable "os_image" {
|
||||
type = string
|
||||
description = "Fedora CoreOS image for compute instances (e.g. fedora-coreos)"
|
||||
default = ""
|
||||
}
|
||||
|
||||
variable "disk_size" {
|
||||
type = number
|
||||
description = "Size of the disk in GB"
|
||||
|
@ -9,7 +9,6 @@ module "workers" {
|
||||
worker_count = var.worker_count
|
||||
machine_type = var.worker_type
|
||||
os_stream = var.os_stream
|
||||
os_image = var.os_image
|
||||
disk_size = var.disk_size
|
||||
preemptible = var.worker_preemptible
|
||||
|
||||
|
@ -24,7 +24,7 @@ systemd:
|
||||
Description=Kubelet (System Container)
|
||||
Wants=rpc-statd.service
|
||||
[Service]
|
||||
Environment=KUBELET_IMAGE=quay.io/poseidon/kubelet:v1.18.4
|
||||
Environment=KUBELET_IMAGE=quay.io/poseidon/kubelet:v1.18.5
|
||||
ExecStartPre=/bin/mkdir -p /etc/kubernetes/cni/net.d
|
||||
ExecStartPre=/bin/mkdir -p /etc/kubernetes/manifests
|
||||
ExecStartPre=/bin/mkdir -p /opt/cni/bin
|
||||
@ -88,7 +88,7 @@ systemd:
|
||||
Type=oneshot
|
||||
RemainAfterExit=true
|
||||
ExecStart=/bin/true
|
||||
ExecStop=/bin/bash -c '/usr/bin/podman run --volume /etc/kubernetes:/etc/kubernetes:ro,z --entrypoint /usr/local/bin/kubectl quay.io/poseidon/kubelet:v1.18.4 --kubeconfig=/etc/kubernetes/kubeconfig delete node $HOSTNAME'
|
||||
ExecStop=/bin/bash -c '/usr/bin/podman run --volume /etc/kubernetes:/etc/kubernetes:ro,z --entrypoint /usr/local/bin/kubectl quay.io/poseidon/kubelet:v1.18.5 --kubeconfig=/etc/kubernetes/kubeconfig delete node $HOSTNAME'
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
storage:
|
||||
@ -104,6 +104,11 @@ storage:
|
||||
contents:
|
||||
inline: |
|
||||
fs.inotify.max_user_watches=16184
|
||||
- path: /etc/sysctl.d/reverse-path-filter.conf
|
||||
contents:
|
||||
inline: |
|
||||
net.ipv4.conf.default.rp_filter=0
|
||||
net.ipv4.conf.*.rp_filter=0
|
||||
- path: /etc/systemd/system.conf.d/accounting.conf
|
||||
contents:
|
||||
inline: |
|
||||
|
@ -40,13 +40,6 @@ variable "os_stream" {
|
||||
default = "stable"
|
||||
}
|
||||
|
||||
# Deprecated
|
||||
variable "os_image" {
|
||||
type = string
|
||||
description = "Fedora CoreOS image for compute instances (e.g. fedora-coreos)"
|
||||
default = ""
|
||||
}
|
||||
|
||||
variable "disk_size" {
|
||||
type = number
|
||||
description = "Size of the disk in GB"
|
||||
|
@ -43,7 +43,7 @@ resource "google_compute_instance_template" "worker" {
|
||||
disk {
|
||||
auto_delete = true
|
||||
boot = true
|
||||
source_image = var.os_image == "" ? data.google_compute_image.fedora-coreos.self_link : var.os_image
|
||||
source_image = data.google_compute_image.fedora-coreos.self_link
|
||||
disk_size_gb = var.disk_size
|
||||
}
|
||||
|
||||
|
@ -1,4 +1,4 @@
|
||||
mkdocs==1.1.2
|
||||
mkdocs-material==5.3.0
|
||||
mkdocs-material==5.3.3
|
||||
pygments==2.6.1
|
||||
pymdown-extensions==7.1.0
|
||||
|
Reference in New Issue
Block a user