mirror of
https://github.com/puppetmaster/typhoon.git
synced 2025-04-21 23:31:11 +02:00
Compare commits
No commits in common. "main" and "v1.30.1" have entirely different histories.
141
CHANGES.md
141
CHANGES.md
@ -4,147 +4,6 @@ Notable changes between versions.
|
|||||||
|
|
||||||
## Latest
|
## Latest
|
||||||
|
|
||||||
## v1.31.3
|
|
||||||
|
|
||||||
* Kubernetes [v1.31.2](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.31.md#v1312)
|
|
||||||
* Update CoreDNS from v1.11.3 to v1.11.4
|
|
||||||
* Update Cilium from v1.16.3 to [v1.16.4](https://github.com/cilium/cilium/releases/tag/v1.16.4)
|
|
||||||
|
|
||||||
### Deprecations
|
|
||||||
|
|
||||||
* Plan to drop support for using Calico CNI, recommend everyone use the Cilium default
|
|
||||||
|
|
||||||
## v1.31.2
|
|
||||||
|
|
||||||
* Kubernetes [v1.31.2](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.31.md#v1312)
|
|
||||||
* Update Cilium from v1.16.1 to [v1.16.3](https://github.com/cilium/cilium/releases/tag/v1.16.3)
|
|
||||||
* Update flannel from v0.25.6 to [v0.26.0](https://github.com/flannel-io/flannel/releases/tag/v0.26.0)
|
|
||||||
|
|
||||||
## v1.31.1
|
|
||||||
|
|
||||||
* Kubernetes [v1.31.1](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.31.md#v1311)
|
|
||||||
* Update flannel from v0.25.5 to [v0.25.6](https://github.com/flannel-io/flannel/releases/tag/v0.25.6)
|
|
||||||
|
|
||||||
### Google
|
|
||||||
|
|
||||||
* Add `controller_disk_type` and `worker_disk_type` variables ([#1513](https://github.com/poseidon/typhoon/pull/1513))
|
|
||||||
* Add explicit `region` field to regional worker instance templates ([#1524](https://github.com/poseidon/typhoon/pull/1524))
|
|
||||||
|
|
||||||
## v1.31.0
|
|
||||||
|
|
||||||
* Kubernetes [v1.31.0](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.31.md#v1310)
|
|
||||||
* Use Cilium kube-proxy replacement mode when `cilium` networking is chosen ([#1501](https://github.com/poseidon/typhoon/pull/1501))
|
|
||||||
* Fix invalid flannel-cni container image for those using `flannel` networking ([#1497](https://github.com/poseidon/typhoon/pull/1497))
|
|
||||||
|
|
||||||
### AWS
|
|
||||||
|
|
||||||
* Use EC2 resource-based hostnames instead of IP-based hostnames ([#1499](https://github.com/poseidon/typhoon/pull/1499))
|
|
||||||
* The Amazon DNS server can resolve A and AAAA queries to IPv4 and IPv6 node addresses
|
|
||||||
* Tag controller node EBS volumes with a name based on the controller node name
|
|
||||||
|
|
||||||
### Google
|
|
||||||
|
|
||||||
* Use `google_compute_region_instance_template` instead of `google_compute_instance_template`
|
|
||||||
* Google's regional instance template metadata is kept in the associated region for greater resiliency. The "global" instance templates were kept in a single region
|
|
||||||
|
|
||||||
## v1.30.4
|
|
||||||
|
|
||||||
* Kubernetes [v1.30.4](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.30.md#v1304)
|
|
||||||
* Update Cilium from v1.15.7 to [v1.16.1](https://github.com/cilium/cilium/releases/tag/v1.16.1)
|
|
||||||
* Update CoreDNS from v1.11.1 to v1.11.3
|
|
||||||
* Remove `enable_aggregation` variable for Kubernetes Aggregation Layer, always set to true
|
|
||||||
* Remove `cluster_domain_suffix` variable, always use "cluster.local"
|
|
||||||
* Remove `enable_reporting` variable for analytics, always set to false
|
|
||||||
|
|
||||||
## v1.30.3
|
|
||||||
|
|
||||||
* Kubernetes [v1.30.3](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.30.md#v1303)
|
|
||||||
* Update Cilium from v1.15.6 to [v1.15.7](https://github.com/cilium/cilium/releases/tag/v1.15.7)
|
|
||||||
* Update flannel from v0.25.4 to [v0.25.5](https://github.com/flannel-io/flannel/releases/tag/v0.25.5)
|
|
||||||
|
|
||||||
### AWS
|
|
||||||
|
|
||||||
* Configure controller and worker disks ([#1482](https://github.com/poseidon/typhoon/pull/1482))
|
|
||||||
* Add `controller_disk_type`, `controller_disk_size`, and `controller_disk_iops` variables
|
|
||||||
* Add `worker_disk_type`, `worker_disk_size`, and `worker_disk_iops` variables
|
|
||||||
* Remove `disk_type`, `disk_size`, and `disk_iops` variables
|
|
||||||
* Fix propagating settings to worker disks, previously ignored
|
|
||||||
* Configure CPU pricing model for burstable instance types ([#1482](https://github.com/poseidon/typhoon/pull/1482))
|
|
||||||
* Add `controller_cpu_credits` and `worker_cpu_credits` variables (`standard` or `unlimited`)
|
|
||||||
* Configure controller or worker instance architecture ([#1485](https://github.com/poseidon/typhoon/pull/1485))
|
|
||||||
* Add `controller_arch` and `worker_arch` variables (`amd64` or `arm64`)
|
|
||||||
* Remove `arch` variable
|
|
||||||
|
|
||||||
```diff
|
|
||||||
module "cluster" {
|
|
||||||
...
|
|
||||||
- arch = "amd64"
|
|
||||||
- disk_type = "gp3"
|
|
||||||
- disk_size = 30
|
|
||||||
- disk_iops = 3000
|
|
||||||
|
|
||||||
+ controller_arch = "amd64"
|
|
||||||
+ controller_disk_size = 15
|
|
||||||
+ controller_cpu_credits = "standard"
|
|
||||||
+ worker_arch = "amd64"
|
|
||||||
+ worker_disk_size = 22
|
|
||||||
+ worker_cpu_credits = "unlimited"
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
### Azure
|
|
||||||
|
|
||||||
* Configure the virtual network and subnets with IPv6 private address space
|
|
||||||
* Change `host_cidr` variable (string) to a `network_cidr` object with `ipv4` and `ipv6` fields that list CIDR strings. Leave the variable unset to use the defaults. (**breaking**)
|
|
||||||
* Add support for dual-stack Kubernetes Ingress Load Balancing
|
|
||||||
* Add a public IPv6 frontend, 80/443 rules, and a worker-ipv6 backend pool
|
|
||||||
* Change the `controller_address_prefixes` output from a list of strings to an object with `ipv4` and `ipv6` fields. Most Azure resources can't accept a mix, so these are split out (**breaking**)
|
|
||||||
* Change the `worker_address_prefixes` output from a list of strings to an object with `ipv4` and `ipv6` fields. Most Azure resources can't accept a mix, so these are split out (**breaking**)
|
|
||||||
* Change the `backend_address_pool_id` output (and worker module input) from a string to an object with `ipv4` and `ipv6` fields that list ids (**breaking**)
|
|
||||||
* Configure nodes to have outbound IPv6 internet connectivity (analogous to IPv4 SNAT)
|
|
||||||
* Configure controller nodes to have a public IPv6 address
|
|
||||||
* Configure worker nodes to use outbound rules and the load balancer for SNAT
|
|
||||||
* Extend network security rules to allow IPv6 traffic, analogous to IPv4
|
|
||||||
* Rename `region` variable to `location` to align with Azure platform conventions ([#1469](https://github.com/poseidon/typhoon/pull/1469))
|
|
||||||
* Change worker pools from uniform to flexible orchestration mode ([#1473](https://github.com/poseidon/typhoon/pull/1473))
|
|
||||||
* Add options to allow workers nodes to use ephemeral local disks ([#1473](https://github.com/poseidon/typhoon/pull/1473))
|
|
||||||
* Add `controller_disk_type` and `controller_disk_size` variables
|
|
||||||
* Add `worker_disk_type`, `worker_disk_size`, and `worker_ephemeral_disk` variables
|
|
||||||
* Reduce the number of public IPv4 addresses needed for the Azure load balancer ([#1470](https://github.com/poseidon/typhoon/pull/1470))
|
|
||||||
* Configure controller or worker instance architecture for Flatcar Linux ([#1485](https://github.com/poseidon/typhoon/pull/1485))
|
|
||||||
* Add `controller_arch` and `worker_arch` variables (`amd64` or `arm64`)
|
|
||||||
* Remove `arch` variable
|
|
||||||
|
|
||||||
```diff
|
|
||||||
module "cluster" {
|
|
||||||
...
|
|
||||||
- region = "centralus"
|
|
||||||
+ location = "centralus"
|
|
||||||
# optional
|
|
||||||
- host_cidr = "10.0.0.0/16"
|
|
||||||
+ network_cidr = {
|
|
||||||
+ ipv4 = ["10.0.0.0/16"]
|
|
||||||
+ }
|
|
||||||
|
|
||||||
# instances
|
|
||||||
+ controller_disk_type = "StandardSSD_LRS"
|
|
||||||
+ worker_ephemeral_disk = true
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
### Google Cloud
|
|
||||||
|
|
||||||
* Allow configuring controller and worker disks ([#1486](https://github.com/poseidon/typhoon/pull/1486))
|
|
||||||
* Add `controller_disk_size` and `worker_disk_size` variables
|
|
||||||
* Remove `disk_size` variable
|
|
||||||
|
|
||||||
## v1.30.2
|
|
||||||
|
|
||||||
* Kubernetes [v1.30.2](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.30.md#v1302)
|
|
||||||
* Update CoreDNS from v1.9.4 to v1.11.1
|
|
||||||
* Update Cilium from v1.15.5 to [v1.15.6](https://github.com/cilium/cilium/releases/tag/v1.15.6)
|
|
||||||
* Update flannel from v0.25.1 to [v0.25.4](https://github.com/flannel-io/flannel/releases/tag/v0.25.4)
|
|
||||||
|
|
||||||
## v1.30.1
|
## v1.30.1
|
||||||
|
|
||||||
* Kubernetes [v1.30.1](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.30.md#v1301)
|
* Kubernetes [v1.30.1](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.30.md#v1301)
|
||||||
|
22
README.md
22
README.md
@ -18,7 +18,7 @@ Typhoon distributes upstream Kubernetes, architectural conventions, and cluster
|
|||||||
|
|
||||||
## Features <a href="https://www.cncf.io/certification/software-conformance/"><img align="right" src="https://storage.googleapis.com/poseidon/certified-kubernetes.png"></a>
|
## Features <a href="https://www.cncf.io/certification/software-conformance/"><img align="right" src="https://storage.googleapis.com/poseidon/certified-kubernetes.png"></a>
|
||||||
|
|
||||||
* Kubernetes v1.31.3 (upstream)
|
* Kubernetes v1.30.1 (upstream)
|
||||||
* Single or multi-master, [Calico](https://www.projectcalico.org/) or [Cilium](https://github.com/cilium/cilium) or [flannel](https://github.com/coreos/flannel) networking
|
* Single or multi-master, [Calico](https://www.projectcalico.org/) or [Cilium](https://github.com/cilium/cilium) or [flannel](https://github.com/coreos/flannel) networking
|
||||||
* On-cluster etcd with TLS, [RBAC](https://kubernetes.io/docs/admin/authorization/rbac/)-enabled, [network policy](https://kubernetes.io/docs/concepts/services-networking/network-policies/), SELinux enforcing
|
* On-cluster etcd with TLS, [RBAC](https://kubernetes.io/docs/admin/authorization/rbac/)-enabled, [network policy](https://kubernetes.io/docs/concepts/services-networking/network-policies/), SELinux enforcing
|
||||||
* Advanced features like [worker pools](https://typhoon.psdn.io/advanced/worker-pools/), [preemptible](https://typhoon.psdn.io/flatcar-linux/google-cloud/#preemption) workers, and [snippets](https://typhoon.psdn.io/advanced/customization/#hosts) customization
|
* Advanced features like [worker pools](https://typhoon.psdn.io/advanced/worker-pools/), [preemptible](https://typhoon.psdn.io/flatcar-linux/google-cloud/#preemption) workers, and [snippets](https://typhoon.psdn.io/advanced/customization/#hosts) customization
|
||||||
@ -78,7 +78,7 @@ Define a Kubernetes cluster by using the Terraform module for your chosen platfo
|
|||||||
|
|
||||||
```tf
|
```tf
|
||||||
module "yavin" {
|
module "yavin" {
|
||||||
source = "git::https://github.com/poseidon/typhoon//google-cloud/fedora-coreos/kubernetes?ref=v1.31.3"
|
source = "git::https://github.com/poseidon/typhoon//google-cloud/fedora-coreos/kubernetes?ref=v1.30.1"
|
||||||
|
|
||||||
# Google Cloud
|
# Google Cloud
|
||||||
cluster_name = "yavin"
|
cluster_name = "yavin"
|
||||||
@ -96,9 +96,8 @@ module "yavin" {
|
|||||||
|
|
||||||
# Obtain cluster kubeconfig
|
# Obtain cluster kubeconfig
|
||||||
resource "local_file" "kubeconfig-yavin" {
|
resource "local_file" "kubeconfig-yavin" {
|
||||||
content = module.yavin.kubeconfig-admin
|
content = module.yavin.kubeconfig-admin
|
||||||
filename = "/home/user/.kube/configs/yavin-config"
|
filename = "/home/user/.kube/configs/yavin-config"
|
||||||
file_permission = "0600"
|
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
@ -118,9 +117,9 @@ In 4-8 minutes (varies by platform), the cluster will be ready. This Google Clou
|
|||||||
$ export KUBECONFIG=/home/user/.kube/configs/yavin-config
|
$ export KUBECONFIG=/home/user/.kube/configs/yavin-config
|
||||||
$ kubectl get nodes
|
$ kubectl get nodes
|
||||||
NAME ROLES STATUS AGE VERSION
|
NAME ROLES STATUS AGE VERSION
|
||||||
yavin-controller-0.c.example-com.internal <none> Ready 6m v1.31.3
|
yavin-controller-0.c.example-com.internal <none> Ready 6m v1.30.1
|
||||||
yavin-worker-jrbf.c.example-com.internal <none> Ready 5m v1.31.3
|
yavin-worker-jrbf.c.example-com.internal <none> Ready 5m v1.30.1
|
||||||
yavin-worker-mzdm.c.example-com.internal <none> Ready 5m v1.31.3
|
yavin-worker-mzdm.c.example-com.internal <none> Ready 5m v1.30.1
|
||||||
```
|
```
|
||||||
|
|
||||||
List the pods.
|
List the pods.
|
||||||
@ -128,10 +127,9 @@ List the pods.
|
|||||||
```
|
```
|
||||||
$ kubectl get pods --all-namespaces
|
$ kubectl get pods --all-namespaces
|
||||||
NAMESPACE NAME READY STATUS RESTARTS AGE
|
NAMESPACE NAME READY STATUS RESTARTS AGE
|
||||||
kube-system cilium-1cs8z 1/1 Running 0 6m
|
kube-system calico-node-1cs8z 2/2 Running 0 6m
|
||||||
kube-system cilium-d1l5b 1/1 Running 0 6m
|
kube-system calico-node-d1l5b 2/2 Running 0 6m
|
||||||
kube-system cilium-sp9ps 1/1 Running 0 6m
|
kube-system calico-node-sp9ps 2/2 Running 0 6m
|
||||||
kube-system cilium-operator-68d778b448-g744f 1/1 Running 0 6m
|
|
||||||
kube-system coredns-1187388186-zj5dl 1/1 Running 0 6m
|
kube-system coredns-1187388186-zj5dl 1/1 Running 0 6m
|
||||||
kube-system coredns-1187388186-dkh3o 1/1 Running 0 6m
|
kube-system coredns-1187388186-dkh3o 1/1 Running 0 6m
|
||||||
kube-system kube-apiserver-controller-0 1/1 Running 0 6m
|
kube-system kube-apiserver-controller-0 1/1 Running 0 6m
|
||||||
|
@ -128,8 +128,8 @@ resource "kubernetes_config_map" "cilium" {
|
|||||||
enable-bpf-masquerade = "true"
|
enable-bpf-masquerade = "true"
|
||||||
|
|
||||||
# kube-proxy
|
# kube-proxy
|
||||||
kube-proxy-replacement = "true"
|
kube-proxy-replacement = "false"
|
||||||
kube-proxy-replacement-healthz-bind-address = ":10256"
|
kube-proxy-replacement-healthz-bind-address = ""
|
||||||
enable-session-affinity = "true"
|
enable-session-affinity = "true"
|
||||||
|
|
||||||
# ClusterIPs from host namespace
|
# ClusterIPs from host namespace
|
||||||
|
@ -61,7 +61,7 @@ resource "kubernetes_daemonset" "cilium" {
|
|||||||
# https://github.com/cilium/cilium/pull/24075
|
# https://github.com/cilium/cilium/pull/24075
|
||||||
init_container {
|
init_container {
|
||||||
name = "install-cni"
|
name = "install-cni"
|
||||||
image = "quay.io/cilium/cilium:v1.16.4"
|
image = "quay.io/cilium/cilium:v1.15.5"
|
||||||
command = ["/install-plugin.sh"]
|
command = ["/install-plugin.sh"]
|
||||||
security_context {
|
security_context {
|
||||||
allow_privilege_escalation = true
|
allow_privilege_escalation = true
|
||||||
@ -80,7 +80,7 @@ resource "kubernetes_daemonset" "cilium" {
|
|||||||
# We use nsenter command with host's cgroup and mount namespaces enabled.
|
# We use nsenter command with host's cgroup and mount namespaces enabled.
|
||||||
init_container {
|
init_container {
|
||||||
name = "mount-cgroup"
|
name = "mount-cgroup"
|
||||||
image = "quay.io/cilium/cilium:v1.16.4"
|
image = "quay.io/cilium/cilium:v1.15.5"
|
||||||
command = [
|
command = [
|
||||||
"sh",
|
"sh",
|
||||||
"-ec",
|
"-ec",
|
||||||
@ -115,7 +115,7 @@ resource "kubernetes_daemonset" "cilium" {
|
|||||||
|
|
||||||
init_container {
|
init_container {
|
||||||
name = "clean-cilium-state"
|
name = "clean-cilium-state"
|
||||||
image = "quay.io/cilium/cilium:v1.16.4"
|
image = "quay.io/cilium/cilium:v1.15.5"
|
||||||
command = ["/init-container.sh"]
|
command = ["/init-container.sh"]
|
||||||
security_context {
|
security_context {
|
||||||
allow_privilege_escalation = true
|
allow_privilege_escalation = true
|
||||||
@ -139,7 +139,7 @@ resource "kubernetes_daemonset" "cilium" {
|
|||||||
|
|
||||||
container {
|
container {
|
||||||
name = "cilium-agent"
|
name = "cilium-agent"
|
||||||
image = "quay.io/cilium/cilium:v1.16.4"
|
image = "quay.io/cilium/cilium:v1.15.5"
|
||||||
command = ["cilium-agent"]
|
command = ["cilium-agent"]
|
||||||
args = [
|
args = [
|
||||||
"--config-dir=/tmp/cilium/config-map"
|
"--config-dir=/tmp/cilium/config-map"
|
||||||
|
@ -58,7 +58,7 @@ resource "kubernetes_deployment" "operator" {
|
|||||||
enable_service_links = false
|
enable_service_links = false
|
||||||
container {
|
container {
|
||||||
name = "cilium-operator"
|
name = "cilium-operator"
|
||||||
image = "quay.io/cilium/operator-generic:v1.16.4"
|
image = "quay.io/cilium/operator-generic:v1.15.5"
|
||||||
command = ["cilium-operator-generic"]
|
command = ["cilium-operator-generic"]
|
||||||
args = [
|
args = [
|
||||||
"--config-dir=/tmp/cilium/config-map",
|
"--config-dir=/tmp/cilium/config-map",
|
||||||
|
@ -77,7 +77,7 @@ resource "kubernetes_deployment" "coredns" {
|
|||||||
}
|
}
|
||||||
container {
|
container {
|
||||||
name = "coredns"
|
name = "coredns"
|
||||||
image = "registry.k8s.io/coredns/coredns:v1.12.0"
|
image = "registry.k8s.io/coredns/coredns:v1.11.1"
|
||||||
args = ["-conf", "/etc/coredns/Corefile"]
|
args = ["-conf", "/etc/coredns/Corefile"]
|
||||||
port {
|
port {
|
||||||
name = "dns"
|
name = "dns"
|
||||||
|
@ -73,7 +73,7 @@ resource "kubernetes_daemonset" "flannel" {
|
|||||||
|
|
||||||
container {
|
container {
|
||||||
name = "flannel"
|
name = "flannel"
|
||||||
image = "docker.io/flannel/flannel:v0.26.1"
|
image = "docker.io/flannel/flannel:v0.25.1"
|
||||||
command = [
|
command = [
|
||||||
"/opt/bin/flanneld",
|
"/opt/bin/flanneld",
|
||||||
"--ip-masq",
|
"--ip-masq",
|
||||||
|
@ -29,7 +29,7 @@ rules:
|
|||||||
- list
|
- list
|
||||||
- watch
|
- watch
|
||||||
- apiGroups:
|
- apiGroups:
|
||||||
- ""
|
- ""
|
||||||
resources:
|
resources:
|
||||||
- events
|
- events
|
||||||
verbs:
|
verbs:
|
||||||
@ -59,11 +59,4 @@ rules:
|
|||||||
- get
|
- get
|
||||||
- list
|
- list
|
||||||
- watch
|
- watch
|
||||||
- apiGroups:
|
|
||||||
- discovery.k8s.io
|
|
||||||
resources:
|
|
||||||
- "endpointslices"
|
|
||||||
verbs:
|
|
||||||
- get
|
|
||||||
- list
|
|
||||||
- watch
|
|
||||||
|
@ -29,7 +29,7 @@ rules:
|
|||||||
- list
|
- list
|
||||||
- watch
|
- watch
|
||||||
- apiGroups:
|
- apiGroups:
|
||||||
- ""
|
- ""
|
||||||
resources:
|
resources:
|
||||||
- events
|
- events
|
||||||
verbs:
|
verbs:
|
||||||
@ -59,11 +59,4 @@ rules:
|
|||||||
- get
|
- get
|
||||||
- list
|
- list
|
||||||
- watch
|
- watch
|
||||||
- apiGroups:
|
|
||||||
- discovery.k8s.io
|
|
||||||
resources:
|
|
||||||
- "endpointslices"
|
|
||||||
verbs:
|
|
||||||
- get
|
|
||||||
- list
|
|
||||||
- watch
|
|
||||||
|
@ -29,7 +29,7 @@ rules:
|
|||||||
- list
|
- list
|
||||||
- watch
|
- watch
|
||||||
- apiGroups:
|
- apiGroups:
|
||||||
- ""
|
- ""
|
||||||
resources:
|
resources:
|
||||||
- events
|
- events
|
||||||
verbs:
|
verbs:
|
||||||
@ -59,11 +59,4 @@ rules:
|
|||||||
- get
|
- get
|
||||||
- list
|
- list
|
||||||
- watch
|
- watch
|
||||||
- apiGroups:
|
|
||||||
- discovery.k8s.io
|
|
||||||
resources:
|
|
||||||
- "endpointslices"
|
|
||||||
verbs:
|
|
||||||
- get
|
|
||||||
- list
|
|
||||||
- watch
|
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
apiVersion: v1
|
apiVersion: v1
|
||||||
kind: Service
|
kind: Service
|
||||||
metadata:
|
metadata:
|
||||||
name: nginx-ingress-controller
|
name: ingress-controller-public
|
||||||
namespace: ingress
|
namespace: ingress
|
||||||
annotations:
|
annotations:
|
||||||
prometheus.io/scrape: 'true'
|
prometheus.io/scrape: 'true'
|
||||||
@ -10,7 +10,7 @@ spec:
|
|||||||
type: ClusterIP
|
type: ClusterIP
|
||||||
clusterIP: 10.3.0.12
|
clusterIP: 10.3.0.12
|
||||||
selector:
|
selector:
|
||||||
name: nginx-ingress-controller
|
name: ingress-controller-public
|
||||||
phase: prod
|
phase: prod
|
||||||
ports:
|
ports:
|
||||||
- name: http
|
- name: http
|
||||||
|
@ -29,7 +29,7 @@ rules:
|
|||||||
- list
|
- list
|
||||||
- watch
|
- watch
|
||||||
- apiGroups:
|
- apiGroups:
|
||||||
- ""
|
- ""
|
||||||
resources:
|
resources:
|
||||||
- events
|
- events
|
||||||
verbs:
|
verbs:
|
||||||
@ -59,11 +59,4 @@ rules:
|
|||||||
- get
|
- get
|
||||||
- list
|
- list
|
||||||
- watch
|
- watch
|
||||||
- apiGroups:
|
|
||||||
- discovery.k8s.io
|
|
||||||
resources:
|
|
||||||
- "endpointslices"
|
|
||||||
verbs:
|
|
||||||
- get
|
|
||||||
- list
|
|
||||||
- watch
|
|
||||||
|
@ -29,7 +29,7 @@ rules:
|
|||||||
- list
|
- list
|
||||||
- watch
|
- watch
|
||||||
- apiGroups:
|
- apiGroups:
|
||||||
- ""
|
- ""
|
||||||
resources:
|
resources:
|
||||||
- events
|
- events
|
||||||
verbs:
|
verbs:
|
||||||
@ -59,11 +59,11 @@ rules:
|
|||||||
- get
|
- get
|
||||||
- list
|
- list
|
||||||
- watch
|
- watch
|
||||||
- apiGroups:
|
- apiGroups:
|
||||||
- discovery.k8s.io
|
- discovery.k8s.io
|
||||||
resources:
|
resources:
|
||||||
- "endpointslices"
|
- "endpointslices"
|
||||||
verbs:
|
verbs:
|
||||||
- get
|
- get
|
||||||
- list
|
- list
|
||||||
- watch
|
- watch
|
||||||
|
@ -11,7 +11,7 @@ Typhoon distributes upstream Kubernetes, architectural conventions, and cluster
|
|||||||
|
|
||||||
## Features <a href="https://www.cncf.io/certification/software-conformance/"><img align="right" src="https://storage.googleapis.com/poseidon/certified-kubernetes.png"></a>
|
## Features <a href="https://www.cncf.io/certification/software-conformance/"><img align="right" src="https://storage.googleapis.com/poseidon/certified-kubernetes.png"></a>
|
||||||
|
|
||||||
* Kubernetes v1.31.3 (upstream)
|
* Kubernetes v1.30.1 (upstream)
|
||||||
* Single or multi-master, [Calico](https://www.projectcalico.org/) or [Cilium](https://github.com/cilium/cilium) or [flannel](https://github.com/coreos/flannel) networking
|
* Single or multi-master, [Calico](https://www.projectcalico.org/) or [Cilium](https://github.com/cilium/cilium) or [flannel](https://github.com/coreos/flannel) networking
|
||||||
* On-cluster etcd with TLS, [RBAC](https://kubernetes.io/docs/admin/authorization/rbac/)-enabled, [network policy](https://kubernetes.io/docs/concepts/services-networking/network-policies/), SELinux enforcing
|
* On-cluster etcd with TLS, [RBAC](https://kubernetes.io/docs/admin/authorization/rbac/)-enabled, [network policy](https://kubernetes.io/docs/concepts/services-networking/network-policies/), SELinux enforcing
|
||||||
* Advanced features like [worker pools](https://typhoon.psdn.io/advanced/worker-pools/), [spot](https://typhoon.psdn.io/fedora-coreos/aws/#spot) workers, and [snippets](https://typhoon.psdn.io/advanced/customization/#hosts) customization
|
* Advanced features like [worker pools](https://typhoon.psdn.io/advanced/worker-pools/), [spot](https://typhoon.psdn.io/fedora-coreos/aws/#spot) workers, and [snippets](https://typhoon.psdn.io/advanced/customization/#hosts) customization
|
||||||
|
@ -19,7 +19,7 @@ data "aws_ami" "fedora-coreos" {
|
|||||||
}
|
}
|
||||||
|
|
||||||
data "aws_ami" "fedora-coreos-arm" {
|
data "aws_ami" "fedora-coreos-arm" {
|
||||||
count = var.controller_arch == "arm64" ? 1 : 0
|
count = var.arch == "arm64" ? 1 : 0
|
||||||
|
|
||||||
most_recent = true
|
most_recent = true
|
||||||
owners = ["125523088429"]
|
owners = ["125523088429"]
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
# Kubernetes assets (kubeconfig, manifests)
|
# Kubernetes assets (kubeconfig, manifests)
|
||||||
module "bootstrap" {
|
module "bootstrap" {
|
||||||
source = "git::https://github.com/poseidon/terraform-render-bootstrap.git?ref=e6a1c7bccfc45ab299b5f8149bc3840f99b30b2b"
|
source = "git::https://github.com/poseidon/terraform-render-bootstrap.git?ref=e1b1e0c75e77e042cf369f463f0e656297a201a8"
|
||||||
|
|
||||||
cluster_name = var.cluster_name
|
cluster_name = var.cluster_name
|
||||||
api_servers = [format("%s.%s", var.cluster_name, var.dns_zone)]
|
api_servers = [format("%s.%s", var.cluster_name, var.dns_zone)]
|
||||||
@ -9,6 +9,9 @@ module "bootstrap" {
|
|||||||
network_mtu = var.network_mtu
|
network_mtu = var.network_mtu
|
||||||
pod_cidr = var.pod_cidr
|
pod_cidr = var.pod_cidr
|
||||||
service_cidr = var.service_cidr
|
service_cidr = var.service_cidr
|
||||||
|
cluster_domain_suffix = var.cluster_domain_suffix
|
||||||
|
enable_reporting = var.enable_reporting
|
||||||
|
enable_aggregation = var.enable_aggregation
|
||||||
daemonset_tolerations = var.daemonset_tolerations
|
daemonset_tolerations = var.daemonset_tolerations
|
||||||
components = var.components
|
components = var.components
|
||||||
}
|
}
|
||||||
|
@ -57,7 +57,7 @@ systemd:
|
|||||||
After=afterburn.service
|
After=afterburn.service
|
||||||
Wants=rpc-statd.service
|
Wants=rpc-statd.service
|
||||||
[Service]
|
[Service]
|
||||||
Environment=KUBELET_IMAGE=quay.io/poseidon/kubelet:v1.31.3
|
Environment=KUBELET_IMAGE=quay.io/poseidon/kubelet:v1.30.1
|
||||||
EnvironmentFile=/run/metadata/afterburn
|
EnvironmentFile=/run/metadata/afterburn
|
||||||
ExecStartPre=/bin/mkdir -p /etc/cni/net.d
|
ExecStartPre=/bin/mkdir -p /etc/cni/net.d
|
||||||
ExecStartPre=/bin/mkdir -p /etc/kubernetes/manifests
|
ExecStartPre=/bin/mkdir -p /etc/kubernetes/manifests
|
||||||
@ -116,7 +116,7 @@ systemd:
|
|||||||
--volume /opt/bootstrap/assets:/assets:ro,Z \
|
--volume /opt/bootstrap/assets:/assets:ro,Z \
|
||||||
--volume /opt/bootstrap/apply:/apply:ro,Z \
|
--volume /opt/bootstrap/apply:/apply:ro,Z \
|
||||||
--entrypoint=/apply \
|
--entrypoint=/apply \
|
||||||
quay.io/poseidon/kubelet:v1.31.3
|
quay.io/poseidon/kubelet:v1.30.1
|
||||||
ExecStartPost=/bin/touch /opt/bootstrap/bootstrap.done
|
ExecStartPost=/bin/touch /opt/bootstrap/bootstrap.done
|
||||||
ExecStartPost=-/usr/bin/podman stop bootstrap
|
ExecStartPost=-/usr/bin/podman stop bootstrap
|
||||||
storage:
|
storage:
|
||||||
@ -149,7 +149,7 @@ storage:
|
|||||||
cgroupDriver: systemd
|
cgroupDriver: systemd
|
||||||
clusterDNS:
|
clusterDNS:
|
||||||
- ${cluster_dns_service_ip}
|
- ${cluster_dns_service_ip}
|
||||||
clusterDomain: cluster.local
|
clusterDomain: ${cluster_domain_suffix}
|
||||||
healthzPort: 0
|
healthzPort: 0
|
||||||
rotateCertificates: true
|
rotateCertificates: true
|
||||||
shutdownGracePeriod: 45s
|
shutdownGracePeriod: 45s
|
||||||
|
@ -20,18 +20,18 @@ resource "aws_instance" "controllers" {
|
|||||||
tags = {
|
tags = {
|
||||||
Name = "${var.cluster_name}-controller-${count.index}"
|
Name = "${var.cluster_name}-controller-${count.index}"
|
||||||
}
|
}
|
||||||
|
|
||||||
instance_type = var.controller_type
|
instance_type = var.controller_type
|
||||||
ami = var.controller_arch == "arm64" ? data.aws_ami.fedora-coreos-arm[0].image_id : data.aws_ami.fedora-coreos.image_id
|
ami = var.arch == "arm64" ? data.aws_ami.fedora-coreos-arm[0].image_id : data.aws_ami.fedora-coreos.image_id
|
||||||
|
user_data = data.ct_config.controllers.*.rendered[count.index]
|
||||||
|
|
||||||
# storage
|
# storage
|
||||||
root_block_device {
|
root_block_device {
|
||||||
volume_type = var.controller_disk_type
|
volume_type = var.disk_type
|
||||||
volume_size = var.controller_disk_size
|
volume_size = var.disk_size
|
||||||
iops = var.controller_disk_iops
|
iops = var.disk_iops
|
||||||
encrypted = true
|
encrypted = true
|
||||||
tags = {
|
tags = {}
|
||||||
Name = "${var.cluster_name}-controller-${count.index}"
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
# network
|
# network
|
||||||
@ -39,14 +39,6 @@ resource "aws_instance" "controllers" {
|
|||||||
subnet_id = element(aws_subnet.public.*.id, count.index)
|
subnet_id = element(aws_subnet.public.*.id, count.index)
|
||||||
vpc_security_group_ids = [aws_security_group.controller.id]
|
vpc_security_group_ids = [aws_security_group.controller.id]
|
||||||
|
|
||||||
# boot
|
|
||||||
user_data = data.ct_config.controllers.*.rendered[count.index]
|
|
||||||
|
|
||||||
# cost
|
|
||||||
credit_specification {
|
|
||||||
cpu_credits = var.controller_cpu_credits
|
|
||||||
}
|
|
||||||
|
|
||||||
lifecycle {
|
lifecycle {
|
||||||
ignore_changes = [
|
ignore_changes = [
|
||||||
ami,
|
ami,
|
||||||
@ -69,6 +61,7 @@ data "ct_config" "controllers" {
|
|||||||
kubeconfig = indent(10, module.bootstrap.kubeconfig-kubelet)
|
kubeconfig = indent(10, module.bootstrap.kubeconfig-kubelet)
|
||||||
ssh_authorized_key = var.ssh_authorized_key
|
ssh_authorized_key = var.ssh_authorized_key
|
||||||
cluster_dns_service_ip = cidrhost(var.service_cidr, 10)
|
cluster_dns_service_ip = cidrhost(var.service_cidr, 10)
|
||||||
|
cluster_domain_suffix = var.cluster_domain_suffix
|
||||||
})
|
})
|
||||||
strict = true
|
strict = true
|
||||||
snippets = var.controller_snippets
|
snippets = var.controller_snippets
|
||||||
|
@ -47,25 +47,17 @@ resource "aws_route" "egress-ipv6" {
|
|||||||
resource "aws_subnet" "public" {
|
resource "aws_subnet" "public" {
|
||||||
count = length(data.aws_availability_zones.all.names)
|
count = length(data.aws_availability_zones.all.names)
|
||||||
|
|
||||||
tags = {
|
|
||||||
"Name" = "${var.cluster_name}-public-${count.index}"
|
|
||||||
}
|
|
||||||
vpc_id = aws_vpc.network.id
|
vpc_id = aws_vpc.network.id
|
||||||
availability_zone = data.aws_availability_zones.all.names[count.index]
|
availability_zone = data.aws_availability_zones.all.names[count.index]
|
||||||
|
|
||||||
# IPv4 and IPv6 CIDR blocks
|
cidr_block = cidrsubnet(var.host_cidr, 4, count.index)
|
||||||
cidr_block = cidrsubnet(var.host_cidr, 4, count.index)
|
ipv6_cidr_block = cidrsubnet(aws_vpc.network.ipv6_cidr_block, 8, count.index)
|
||||||
ipv6_cidr_block = cidrsubnet(aws_vpc.network.ipv6_cidr_block, 8, count.index)
|
|
||||||
|
|
||||||
# Assign IPv4 and IPv6 addresses to instances
|
|
||||||
map_public_ip_on_launch = true
|
map_public_ip_on_launch = true
|
||||||
assign_ipv6_address_on_creation = true
|
assign_ipv6_address_on_creation = true
|
||||||
|
|
||||||
# Hostnames assigned to instances
|
tags = {
|
||||||
# resource-name: <ec2-instance-id>.region.compute.internal
|
"Name" = "${var.cluster_name}-public-${count.index}"
|
||||||
private_dns_hostname_type_on_launch = "resource-name"
|
}
|
||||||
enable_resource_name_dns_a_record_on_launch = true
|
|
||||||
enable_resource_name_dns_aaaa_record_on_launch = true
|
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "aws_route_table_association" "public" {
|
resource "aws_route_table_association" "public" {
|
||||||
|
@ -17,6 +17,30 @@ variable "dns_zone_id" {
|
|||||||
|
|
||||||
# instances
|
# instances
|
||||||
|
|
||||||
|
variable "controller_count" {
|
||||||
|
type = number
|
||||||
|
description = "Number of controllers (i.e. masters)"
|
||||||
|
default = 1
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "worker_count" {
|
||||||
|
type = number
|
||||||
|
description = "Number of workers"
|
||||||
|
default = 1
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "controller_type" {
|
||||||
|
type = string
|
||||||
|
description = "EC2 instance type for controllers"
|
||||||
|
default = "t3.small"
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "worker_type" {
|
||||||
|
type = string
|
||||||
|
description = "EC2 instance type for workers"
|
||||||
|
default = "t3.small"
|
||||||
|
}
|
||||||
|
|
||||||
variable "os_stream" {
|
variable "os_stream" {
|
||||||
type = string
|
type = string
|
||||||
description = "Fedora CoreOS image stream for instances (e.g. stable, testing, next)"
|
description = "Fedora CoreOS image stream for instances (e.g. stable, testing, next)"
|
||||||
@ -28,78 +52,24 @@ variable "os_stream" {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "controller_count" {
|
variable "disk_size" {
|
||||||
type = number
|
|
||||||
description = "Number of controllers (i.e. masters)"
|
|
||||||
default = 1
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "controller_type" {
|
|
||||||
type = string
|
|
||||||
description = "EC2 instance type for controllers"
|
|
||||||
default = "t3.small"
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "controller_disk_size" {
|
|
||||||
type = number
|
type = number
|
||||||
description = "Size of the EBS volume in GB"
|
description = "Size of the EBS volume in GB"
|
||||||
default = 30
|
default = 30
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "controller_disk_type" {
|
variable "disk_type" {
|
||||||
type = string
|
type = string
|
||||||
description = "Type of the EBS volume (e.g. standard, gp2, gp3, io1)"
|
description = "Type of the EBS volume (e.g. standard, gp2, gp3, io1)"
|
||||||
default = "gp3"
|
default = "gp3"
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "controller_disk_iops" {
|
variable "disk_iops" {
|
||||||
type = number
|
type = number
|
||||||
description = "IOPS of the EBS volume (e.g. 3000)"
|
description = "IOPS of the EBS volume (e.g. 3000)"
|
||||||
default = 3000
|
default = 3000
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "controller_cpu_credits" {
|
|
||||||
type = string
|
|
||||||
description = "CPU credits mode (if using a burstable instance type)"
|
|
||||||
default = null
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "worker_count" {
|
|
||||||
type = number
|
|
||||||
description = "Number of workers"
|
|
||||||
default = 1
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "worker_type" {
|
|
||||||
type = string
|
|
||||||
description = "EC2 instance type for workers"
|
|
||||||
default = "t3.small"
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "worker_disk_size" {
|
|
||||||
type = number
|
|
||||||
description = "Size of the EBS volume in GB"
|
|
||||||
default = 30
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "worker_disk_type" {
|
|
||||||
type = string
|
|
||||||
description = "Type of the EBS volume (e.g. standard, gp2, gp3, io1)"
|
|
||||||
default = "gp3"
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "worker_disk_iops" {
|
|
||||||
type = number
|
|
||||||
description = "IOPS of the EBS volume (e.g. 3000)"
|
|
||||||
default = 3000
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "worker_cpu_credits" {
|
|
||||||
type = string
|
|
||||||
description = "CPU credits mode (if using a burstable instance type)"
|
|
||||||
default = null
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "worker_price" {
|
variable "worker_price" {
|
||||||
type = number
|
type = number
|
||||||
description = "Spot price in USD for worker instances or 0 to use on-demand instances"
|
description = "Spot price in USD for worker instances or 0 to use on-demand instances"
|
||||||
@ -164,31 +134,40 @@ EOD
|
|||||||
default = "10.3.0.0/16"
|
default = "10.3.0.0/16"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
variable "enable_reporting" {
|
||||||
|
type = bool
|
||||||
|
description = "Enable usage or analytics reporting to upstreams (Calico)"
|
||||||
|
default = false
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "enable_aggregation" {
|
||||||
|
type = bool
|
||||||
|
description = "Enable the Kubernetes Aggregation Layer"
|
||||||
|
default = true
|
||||||
|
}
|
||||||
|
|
||||||
variable "worker_node_labels" {
|
variable "worker_node_labels" {
|
||||||
type = list(string)
|
type = list(string)
|
||||||
description = "List of initial worker node labels"
|
description = "List of initial worker node labels"
|
||||||
default = []
|
default = []
|
||||||
}
|
}
|
||||||
|
|
||||||
# advanced
|
# unofficial, undocumented, unsupported
|
||||||
|
|
||||||
variable "controller_arch" {
|
variable "cluster_domain_suffix" {
|
||||||
type = string
|
type = string
|
||||||
description = "Controller node(s) architecture (amd64 or arm64)"
|
description = "Queries for domains with the suffix will be answered by CoreDNS. Default is cluster.local (e.g. foo.default.svc.cluster.local)"
|
||||||
default = "amd64"
|
default = "cluster.local"
|
||||||
validation {
|
|
||||||
condition = contains(["amd64", "arm64"], var.controller_arch)
|
|
||||||
error_message = "The controller_arch must be amd64 or arm64."
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "worker_arch" {
|
variable "arch" {
|
||||||
type = string
|
type = string
|
||||||
description = "Worker node(s) architecture (amd64 or arm64)"
|
description = "Container architecture (amd64 or arm64)"
|
||||||
default = "amd64"
|
default = "amd64"
|
||||||
|
|
||||||
validation {
|
validation {
|
||||||
condition = contains(["amd64", "arm64"], var.worker_arch)
|
condition = var.arch == "amd64" || var.arch == "arm64"
|
||||||
error_message = "The worker_arch must be amd64 or arm64."
|
error_message = "The arch must be amd64 or arm64."
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -6,24 +6,20 @@ module "workers" {
|
|||||||
vpc_id = aws_vpc.network.id
|
vpc_id = aws_vpc.network.id
|
||||||
subnet_ids = aws_subnet.public.*.id
|
subnet_ids = aws_subnet.public.*.id
|
||||||
security_groups = [aws_security_group.worker.id]
|
security_groups = [aws_security_group.worker.id]
|
||||||
|
worker_count = var.worker_count
|
||||||
# instances
|
instance_type = var.worker_type
|
||||||
os_stream = var.os_stream
|
os_stream = var.os_stream
|
||||||
worker_count = var.worker_count
|
arch = var.arch
|
||||||
instance_type = var.worker_type
|
disk_size = var.disk_size
|
||||||
arch = var.worker_arch
|
spot_price = var.worker_price
|
||||||
disk_type = var.worker_disk_type
|
target_groups = var.worker_target_groups
|
||||||
disk_size = var.worker_disk_size
|
|
||||||
disk_iops = var.worker_disk_iops
|
|
||||||
cpu_credits = var.worker_cpu_credits
|
|
||||||
spot_price = var.worker_price
|
|
||||||
target_groups = var.worker_target_groups
|
|
||||||
|
|
||||||
# configuration
|
# configuration
|
||||||
kubeconfig = module.bootstrap.kubeconfig-kubelet
|
kubeconfig = module.bootstrap.kubeconfig-kubelet
|
||||||
ssh_authorized_key = var.ssh_authorized_key
|
ssh_authorized_key = var.ssh_authorized_key
|
||||||
service_cidr = var.service_cidr
|
service_cidr = var.service_cidr
|
||||||
snippets = var.worker_snippets
|
cluster_domain_suffix = var.cluster_domain_suffix
|
||||||
node_labels = var.worker_node_labels
|
snippets = var.worker_snippets
|
||||||
|
node_labels = var.worker_node_labels
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -29,7 +29,7 @@ systemd:
|
|||||||
After=afterburn.service
|
After=afterburn.service
|
||||||
Wants=rpc-statd.service
|
Wants=rpc-statd.service
|
||||||
[Service]
|
[Service]
|
||||||
Environment=KUBELET_IMAGE=quay.io/poseidon/kubelet:v1.31.3
|
Environment=KUBELET_IMAGE=quay.io/poseidon/kubelet:v1.30.1
|
||||||
EnvironmentFile=/run/metadata/afterburn
|
EnvironmentFile=/run/metadata/afterburn
|
||||||
ExecStartPre=/bin/mkdir -p /etc/cni/net.d
|
ExecStartPre=/bin/mkdir -p /etc/cni/net.d
|
||||||
ExecStartPre=/bin/mkdir -p /etc/kubernetes/manifests
|
ExecStartPre=/bin/mkdir -p /etc/kubernetes/manifests
|
||||||
@ -104,7 +104,7 @@ storage:
|
|||||||
cgroupDriver: systemd
|
cgroupDriver: systemd
|
||||||
clusterDNS:
|
clusterDNS:
|
||||||
- ${cluster_dns_service_ip}
|
- ${cluster_dns_service_ip}
|
||||||
clusterDomain: cluster.local
|
clusterDomain: ${cluster_domain_suffix}
|
||||||
healthzPort: 0
|
healthzPort: 0
|
||||||
rotateCertificates: true
|
rotateCertificates: true
|
||||||
shutdownGracePeriod: 45s
|
shutdownGracePeriod: 45s
|
||||||
|
@ -69,12 +69,6 @@ variable "spot_price" {
|
|||||||
default = 0
|
default = 0
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "cpu_credits" {
|
|
||||||
type = string
|
|
||||||
description = "CPU burst credits mode (if applicable)"
|
|
||||||
default = null
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "target_groups" {
|
variable "target_groups" {
|
||||||
type = list(string)
|
type = list(string)
|
||||||
description = "Additional target group ARNs to which instances should be added"
|
description = "Additional target group ARNs to which instances should be added"
|
||||||
@ -108,6 +102,12 @@ EOD
|
|||||||
default = "10.3.0.0/16"
|
default = "10.3.0.0/16"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
variable "cluster_domain_suffix" {
|
||||||
|
type = string
|
||||||
|
description = "Queries for domains with the suffix will be answered by coredns. Default is cluster.local (e.g. foo.default.svc.cluster.local) "
|
||||||
|
default = "cluster.local"
|
||||||
|
}
|
||||||
|
|
||||||
variable "node_labels" {
|
variable "node_labels" {
|
||||||
type = list(string)
|
type = list(string)
|
||||||
description = "List of initial node labels"
|
description = "List of initial node labels"
|
||||||
@ -120,14 +120,15 @@ variable "node_taints" {
|
|||||||
default = []
|
default = []
|
||||||
}
|
}
|
||||||
|
|
||||||
# advanced
|
# unofficial, undocumented, unsupported
|
||||||
|
|
||||||
variable "arch" {
|
variable "arch" {
|
||||||
type = string
|
type = string
|
||||||
description = "Container architecture (amd64 or arm64)"
|
description = "Container architecture (amd64 or arm64)"
|
||||||
default = "amd64"
|
default = "amd64"
|
||||||
|
|
||||||
validation {
|
validation {
|
||||||
condition = contains(["amd64", "arm64"], var.arch)
|
condition = var.arch == "amd64" || var.arch == "arm64"
|
||||||
error_message = "The arch must be amd64 or arm64."
|
error_message = "The arch must be amd64 or arm64."
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -3,14 +3,16 @@ resource "aws_autoscaling_group" "workers" {
|
|||||||
name = "${var.name}-worker"
|
name = "${var.name}-worker"
|
||||||
|
|
||||||
# count
|
# count
|
||||||
desired_capacity = var.worker_count
|
desired_capacity = var.worker_count
|
||||||
min_size = var.worker_count
|
min_size = var.worker_count
|
||||||
max_size = var.worker_count + 2
|
max_size = var.worker_count + 2
|
||||||
|
default_cooldown = 30
|
||||||
|
health_check_grace_period = 30
|
||||||
|
|
||||||
# network
|
# network
|
||||||
vpc_zone_identifier = var.subnet_ids
|
vpc_zone_identifier = var.subnet_ids
|
||||||
|
|
||||||
# instance template
|
# template
|
||||||
launch_template {
|
launch_template {
|
||||||
id = aws_launch_template.worker.id
|
id = aws_launch_template.worker.id
|
||||||
version = aws_launch_template.worker.latest_version
|
version = aws_launch_template.worker.latest_version
|
||||||
@ -30,11 +32,6 @@ resource "aws_autoscaling_group" "workers" {
|
|||||||
min_healthy_percentage = 90
|
min_healthy_percentage = 90
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
# Grace period before checking new instance's health
|
|
||||||
health_check_grace_period = 30
|
|
||||||
# Cooldown period between scaling activities
|
|
||||||
default_cooldown = 30
|
|
||||||
|
|
||||||
|
|
||||||
lifecycle {
|
lifecycle {
|
||||||
# override the default destroy and replace update behavior
|
# override the default destroy and replace update behavior
|
||||||
@ -59,6 +56,11 @@ resource "aws_launch_template" "worker" {
|
|||||||
name_prefix = "${var.name}-worker"
|
name_prefix = "${var.name}-worker"
|
||||||
image_id = local.ami_id
|
image_id = local.ami_id
|
||||||
instance_type = var.instance_type
|
instance_type = var.instance_type
|
||||||
|
monitoring {
|
||||||
|
enabled = false
|
||||||
|
}
|
||||||
|
|
||||||
|
user_data = sensitive(base64encode(data.ct_config.worker.rendered))
|
||||||
|
|
||||||
# storage
|
# storage
|
||||||
ebs_optimized = true
|
ebs_optimized = true
|
||||||
@ -74,26 +76,14 @@ resource "aws_launch_template" "worker" {
|
|||||||
}
|
}
|
||||||
|
|
||||||
# network
|
# network
|
||||||
network_interfaces {
|
vpc_security_group_ids = var.security_groups
|
||||||
associate_public_ip_address = true
|
|
||||||
security_groups = var.security_groups
|
|
||||||
}
|
|
||||||
|
|
||||||
# boot
|
|
||||||
user_data = sensitive(base64encode(data.ct_config.worker.rendered))
|
|
||||||
|
|
||||||
# metadata
|
# metadata
|
||||||
metadata_options {
|
metadata_options {
|
||||||
http_tokens = "optional"
|
http_tokens = "optional"
|
||||||
}
|
}
|
||||||
monitoring {
|
|
||||||
enabled = false
|
|
||||||
}
|
|
||||||
|
|
||||||
# cost
|
# spot
|
||||||
credit_specification {
|
|
||||||
cpu_credits = var.cpu_credits
|
|
||||||
}
|
|
||||||
dynamic "instance_market_options" {
|
dynamic "instance_market_options" {
|
||||||
for_each = var.spot_price > 0 ? [1] : []
|
for_each = var.spot_price > 0 ? [1] : []
|
||||||
content {
|
content {
|
||||||
@ -117,6 +107,7 @@ data "ct_config" "worker" {
|
|||||||
kubeconfig = indent(10, var.kubeconfig)
|
kubeconfig = indent(10, var.kubeconfig)
|
||||||
ssh_authorized_key = var.ssh_authorized_key
|
ssh_authorized_key = var.ssh_authorized_key
|
||||||
cluster_dns_service_ip = cidrhost(var.service_cidr, 10)
|
cluster_dns_service_ip = cidrhost(var.service_cidr, 10)
|
||||||
|
cluster_domain_suffix = var.cluster_domain_suffix
|
||||||
node_labels = join(",", var.node_labels)
|
node_labels = join(",", var.node_labels)
|
||||||
node_taints = join(",", var.node_taints)
|
node_taints = join(",", var.node_taints)
|
||||||
})
|
})
|
||||||
|
@ -11,7 +11,7 @@ Typhoon distributes upstream Kubernetes, architectural conventions, and cluster
|
|||||||
|
|
||||||
## Features <a href="https://www.cncf.io/certification/software-conformance/"><img align="right" src="https://storage.googleapis.com/poseidon/certified-kubernetes.png"></a>
|
## Features <a href="https://www.cncf.io/certification/software-conformance/"><img align="right" src="https://storage.googleapis.com/poseidon/certified-kubernetes.png"></a>
|
||||||
|
|
||||||
* Kubernetes v1.31.3 (upstream)
|
* Kubernetes v1.30.1 (upstream)
|
||||||
* Single or multi-master, [Calico](https://www.projectcalico.org/) or [Cilium](https://github.com/cilium/cilium) or [flannel](https://github.com/coreos/flannel) networking
|
* Single or multi-master, [Calico](https://www.projectcalico.org/) or [Cilium](https://github.com/cilium/cilium) or [flannel](https://github.com/coreos/flannel) networking
|
||||||
* On-cluster etcd with TLS, [RBAC](https://kubernetes.io/docs/admin/authorization/rbac/)-enabled, [network policy](https://kubernetes.io/docs/concepts/services-networking/network-policies/)
|
* On-cluster etcd with TLS, [RBAC](https://kubernetes.io/docs/admin/authorization/rbac/)-enabled, [network policy](https://kubernetes.io/docs/concepts/services-networking/network-policies/)
|
||||||
* Advanced features like [worker pools](https://typhoon.psdn.io/advanced/worker-pools/), [spot](https://typhoon.psdn.io/flatcar-linux/aws/#spot) workers, and [snippets](https://typhoon.psdn.io/advanced/customization/#hosts) customization
|
* Advanced features like [worker pools](https://typhoon.psdn.io/advanced/worker-pools/), [spot](https://typhoon.psdn.io/flatcar-linux/aws/#spot) workers, and [snippets](https://typhoon.psdn.io/advanced/customization/#hosts) customization
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
locals {
|
locals {
|
||||||
# Pick a Flatcar Linux AMI
|
# Pick a Flatcar Linux AMI
|
||||||
# flatcar-stable -> Flatcar Linux AMI
|
# flatcar-stable -> Flatcar Linux AMI
|
||||||
ami_id = var.controller_arch == "arm64" ? data.aws_ami.flatcar-arm64[0].image_id : data.aws_ami.flatcar.image_id
|
ami_id = var.arch == "arm64" ? data.aws_ami.flatcar-arm64[0].image_id : data.aws_ami.flatcar.image_id
|
||||||
channel = split("-", var.os_image)[1]
|
channel = split("-", var.os_image)[1]
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -26,7 +26,7 @@ data "aws_ami" "flatcar" {
|
|||||||
}
|
}
|
||||||
|
|
||||||
data "aws_ami" "flatcar-arm64" {
|
data "aws_ami" "flatcar-arm64" {
|
||||||
count = var.controller_arch == "arm64" ? 1 : 0
|
count = var.arch == "arm64" ? 1 : 0
|
||||||
|
|
||||||
most_recent = true
|
most_recent = true
|
||||||
owners = ["075585003325"]
|
owners = ["075585003325"]
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
# Kubernetes assets (kubeconfig, manifests)
|
# Kubernetes assets (kubeconfig, manifests)
|
||||||
module "bootstrap" {
|
module "bootstrap" {
|
||||||
source = "git::https://github.com/poseidon/terraform-render-bootstrap.git?ref=e6a1c7bccfc45ab299b5f8149bc3840f99b30b2b"
|
source = "git::https://github.com/poseidon/terraform-render-bootstrap.git?ref=e1b1e0c75e77e042cf369f463f0e656297a201a8"
|
||||||
|
|
||||||
cluster_name = var.cluster_name
|
cluster_name = var.cluster_name
|
||||||
api_servers = [format("%s.%s", var.cluster_name, var.dns_zone)]
|
api_servers = [format("%s.%s", var.cluster_name, var.dns_zone)]
|
||||||
@ -9,6 +9,9 @@ module "bootstrap" {
|
|||||||
network_mtu = var.network_mtu
|
network_mtu = var.network_mtu
|
||||||
pod_cidr = var.pod_cidr
|
pod_cidr = var.pod_cidr
|
||||||
service_cidr = var.service_cidr
|
service_cidr = var.service_cidr
|
||||||
|
cluster_domain_suffix = var.cluster_domain_suffix
|
||||||
|
enable_reporting = var.enable_reporting
|
||||||
|
enable_aggregation = var.enable_aggregation
|
||||||
daemonset_tolerations = var.daemonset_tolerations
|
daemonset_tolerations = var.daemonset_tolerations
|
||||||
components = var.components
|
components = var.components
|
||||||
}
|
}
|
||||||
|
@ -58,7 +58,7 @@ systemd:
|
|||||||
After=coreos-metadata.service
|
After=coreos-metadata.service
|
||||||
Wants=rpc-statd.service
|
Wants=rpc-statd.service
|
||||||
[Service]
|
[Service]
|
||||||
Environment=KUBELET_IMAGE=quay.io/poseidon/kubelet:v1.31.3
|
Environment=KUBELET_IMAGE=quay.io/poseidon/kubelet:v1.30.1
|
||||||
EnvironmentFile=/run/metadata/coreos
|
EnvironmentFile=/run/metadata/coreos
|
||||||
ExecStartPre=/bin/mkdir -p /etc/cni/net.d
|
ExecStartPre=/bin/mkdir -p /etc/cni/net.d
|
||||||
ExecStartPre=/bin/mkdir -p /etc/kubernetes/manifests
|
ExecStartPre=/bin/mkdir -p /etc/kubernetes/manifests
|
||||||
@ -109,7 +109,7 @@ systemd:
|
|||||||
Type=oneshot
|
Type=oneshot
|
||||||
RemainAfterExit=true
|
RemainAfterExit=true
|
||||||
WorkingDirectory=/opt/bootstrap
|
WorkingDirectory=/opt/bootstrap
|
||||||
Environment=KUBELET_IMAGE=quay.io/poseidon/kubelet:v1.31.3
|
Environment=KUBELET_IMAGE=quay.io/poseidon/kubelet:v1.30.1
|
||||||
ExecStart=/usr/bin/docker run \
|
ExecStart=/usr/bin/docker run \
|
||||||
-v /etc/kubernetes/pki:/etc/kubernetes/pki:ro \
|
-v /etc/kubernetes/pki:/etc/kubernetes/pki:ro \
|
||||||
-v /opt/bootstrap/assets:/assets:ro \
|
-v /opt/bootstrap/assets:/assets:ro \
|
||||||
@ -148,7 +148,7 @@ storage:
|
|||||||
cgroupDriver: systemd
|
cgroupDriver: systemd
|
||||||
clusterDNS:
|
clusterDNS:
|
||||||
- ${cluster_dns_service_ip}
|
- ${cluster_dns_service_ip}
|
||||||
clusterDomain: cluster.local
|
clusterDomain: ${cluster_domain_suffix}
|
||||||
healthzPort: 0
|
healthzPort: 0
|
||||||
rotateCertificates: true
|
rotateCertificates: true
|
||||||
shutdownGracePeriod: 45s
|
shutdownGracePeriod: 45s
|
||||||
|
@ -20,18 +20,19 @@ resource "aws_instance" "controllers" {
|
|||||||
tags = {
|
tags = {
|
||||||
Name = "${var.cluster_name}-controller-${count.index}"
|
Name = "${var.cluster_name}-controller-${count.index}"
|
||||||
}
|
}
|
||||||
|
|
||||||
instance_type = var.controller_type
|
instance_type = var.controller_type
|
||||||
ami = local.ami_id
|
|
||||||
|
ami = local.ami_id
|
||||||
|
user_data = data.ct_config.controllers.*.rendered[count.index]
|
||||||
|
|
||||||
# storage
|
# storage
|
||||||
root_block_device {
|
root_block_device {
|
||||||
volume_type = var.controller_disk_type
|
volume_type = var.disk_type
|
||||||
volume_size = var.controller_disk_size
|
volume_size = var.disk_size
|
||||||
iops = var.controller_disk_iops
|
iops = var.disk_iops
|
||||||
encrypted = true
|
encrypted = true
|
||||||
tags = {
|
tags = {}
|
||||||
Name = "${var.cluster_name}-controller-${count.index}"
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
# network
|
# network
|
||||||
@ -39,14 +40,6 @@ resource "aws_instance" "controllers" {
|
|||||||
subnet_id = element(aws_subnet.public.*.id, count.index)
|
subnet_id = element(aws_subnet.public.*.id, count.index)
|
||||||
vpc_security_group_ids = [aws_security_group.controller.id]
|
vpc_security_group_ids = [aws_security_group.controller.id]
|
||||||
|
|
||||||
# boot
|
|
||||||
user_data = data.ct_config.controllers.*.rendered[count.index]
|
|
||||||
|
|
||||||
# cost
|
|
||||||
credit_specification {
|
|
||||||
cpu_credits = var.controller_cpu_credits
|
|
||||||
}
|
|
||||||
|
|
||||||
lifecycle {
|
lifecycle {
|
||||||
ignore_changes = [
|
ignore_changes = [
|
||||||
ami,
|
ami,
|
||||||
@ -69,6 +62,7 @@ data "ct_config" "controllers" {
|
|||||||
kubeconfig = indent(10, module.bootstrap.kubeconfig-kubelet)
|
kubeconfig = indent(10, module.bootstrap.kubeconfig-kubelet)
|
||||||
ssh_authorized_key = var.ssh_authorized_key
|
ssh_authorized_key = var.ssh_authorized_key
|
||||||
cluster_dns_service_ip = cidrhost(var.service_cidr, 10)
|
cluster_dns_service_ip = cidrhost(var.service_cidr, 10)
|
||||||
|
cluster_domain_suffix = var.cluster_domain_suffix
|
||||||
})
|
})
|
||||||
strict = true
|
strict = true
|
||||||
snippets = var.controller_snippets
|
snippets = var.controller_snippets
|
||||||
|
@ -47,25 +47,17 @@ resource "aws_route" "egress-ipv6" {
|
|||||||
resource "aws_subnet" "public" {
|
resource "aws_subnet" "public" {
|
||||||
count = length(data.aws_availability_zones.all.names)
|
count = length(data.aws_availability_zones.all.names)
|
||||||
|
|
||||||
tags = {
|
|
||||||
"Name" = "${var.cluster_name}-public-${count.index}"
|
|
||||||
}
|
|
||||||
vpc_id = aws_vpc.network.id
|
vpc_id = aws_vpc.network.id
|
||||||
availability_zone = data.aws_availability_zones.all.names[count.index]
|
availability_zone = data.aws_availability_zones.all.names[count.index]
|
||||||
|
|
||||||
# IPv4 and IPv6 CIDR blocks
|
cidr_block = cidrsubnet(var.host_cidr, 4, count.index)
|
||||||
cidr_block = cidrsubnet(var.host_cidr, 4, count.index)
|
ipv6_cidr_block = cidrsubnet(aws_vpc.network.ipv6_cidr_block, 8, count.index)
|
||||||
ipv6_cidr_block = cidrsubnet(aws_vpc.network.ipv6_cidr_block, 8, count.index)
|
|
||||||
|
|
||||||
# Assign IPv4 and IPv6 addresses to instances
|
|
||||||
map_public_ip_on_launch = true
|
map_public_ip_on_launch = true
|
||||||
assign_ipv6_address_on_creation = true
|
assign_ipv6_address_on_creation = true
|
||||||
|
|
||||||
# Hostnames assigned to instances
|
tags = {
|
||||||
# resource-name: <ec2-instance-id>.region.compute.internal
|
"Name" = "${var.cluster_name}-public-${count.index}"
|
||||||
private_dns_hostname_type_on_launch = "resource-name"
|
}
|
||||||
enable_resource_name_dns_a_record_on_launch = true
|
|
||||||
enable_resource_name_dns_aaaa_record_on_launch = true
|
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "aws_route_table_association" "public" {
|
resource "aws_route_table_association" "public" {
|
||||||
|
@ -17,6 +17,30 @@ variable "dns_zone_id" {
|
|||||||
|
|
||||||
# instances
|
# instances
|
||||||
|
|
||||||
|
variable "controller_count" {
|
||||||
|
type = number
|
||||||
|
description = "Number of controllers (i.e. masters)"
|
||||||
|
default = 1
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "worker_count" {
|
||||||
|
type = number
|
||||||
|
description = "Number of workers"
|
||||||
|
default = 1
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "controller_type" {
|
||||||
|
type = string
|
||||||
|
description = "EC2 instance type for controllers"
|
||||||
|
default = "t3.small"
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "worker_type" {
|
||||||
|
type = string
|
||||||
|
description = "EC2 instance type for workers"
|
||||||
|
default = "t3.small"
|
||||||
|
}
|
||||||
|
|
||||||
variable "os_image" {
|
variable "os_image" {
|
||||||
type = string
|
type = string
|
||||||
description = "AMI channel for a Container Linux derivative (flatcar-stable, flatcar-beta, flatcar-alpha)"
|
description = "AMI channel for a Container Linux derivative (flatcar-stable, flatcar-beta, flatcar-alpha)"
|
||||||
@ -28,78 +52,24 @@ variable "os_image" {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "controller_count" {
|
variable "disk_size" {
|
||||||
type = number
|
|
||||||
description = "Number of controllers (i.e. masters)"
|
|
||||||
default = 1
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "controller_type" {
|
|
||||||
type = string
|
|
||||||
description = "EC2 instance type for controllers"
|
|
||||||
default = "t3.small"
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "controller_disk_size" {
|
|
||||||
type = number
|
type = number
|
||||||
description = "Size of the EBS volume in GB"
|
description = "Size of the EBS volume in GB"
|
||||||
default = 30
|
default = 30
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "controller_disk_type" {
|
variable "disk_type" {
|
||||||
type = string
|
type = string
|
||||||
description = "Type of the EBS volume (e.g. standard, gp2, gp3, io1)"
|
description = "Type of the EBS volume (e.g. standard, gp2, gp3, io1)"
|
||||||
default = "gp3"
|
default = "gp3"
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "controller_disk_iops" {
|
variable "disk_iops" {
|
||||||
type = number
|
type = number
|
||||||
description = "IOPS of the EBS volume (e.g. 3000)"
|
description = "IOPS of the EBS volume (e.g. 3000)"
|
||||||
default = 3000
|
default = 3000
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "controller_cpu_credits" {
|
|
||||||
type = string
|
|
||||||
description = "CPU credits mode (if using a burstable instance type)"
|
|
||||||
default = null
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "worker_count" {
|
|
||||||
type = number
|
|
||||||
description = "Number of workers"
|
|
||||||
default = 1
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "worker_type" {
|
|
||||||
type = string
|
|
||||||
description = "EC2 instance type for workers"
|
|
||||||
default = "t3.small"
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "worker_disk_size" {
|
|
||||||
type = number
|
|
||||||
description = "Size of the EBS volume in GB"
|
|
||||||
default = 30
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "worker_disk_type" {
|
|
||||||
type = string
|
|
||||||
description = "Type of the EBS volume (e.g. standard, gp2, gp3, io1)"
|
|
||||||
default = "gp3"
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "worker_disk_iops" {
|
|
||||||
type = number
|
|
||||||
description = "IOPS of the EBS volume (e.g. 3000)"
|
|
||||||
default = 3000
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "worker_cpu_credits" {
|
|
||||||
type = string
|
|
||||||
description = "CPU credits mode (if using a burstable instance type)"
|
|
||||||
default = null
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "worker_price" {
|
variable "worker_price" {
|
||||||
type = number
|
type = number
|
||||||
description = "Spot price in USD for worker instances or 0 to use on-demand instances"
|
description = "Spot price in USD for worker instances or 0 to use on-demand instances"
|
||||||
@ -164,31 +134,40 @@ EOD
|
|||||||
default = "10.3.0.0/16"
|
default = "10.3.0.0/16"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
variable "enable_reporting" {
|
||||||
|
type = bool
|
||||||
|
description = "Enable usage or analytics reporting to upstreams (Calico)"
|
||||||
|
default = false
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "enable_aggregation" {
|
||||||
|
type = bool
|
||||||
|
description = "Enable the Kubernetes Aggregation Layer"
|
||||||
|
default = true
|
||||||
|
}
|
||||||
|
|
||||||
variable "worker_node_labels" {
|
variable "worker_node_labels" {
|
||||||
type = list(string)
|
type = list(string)
|
||||||
description = "List of initial worker node labels"
|
description = "List of initial worker node labels"
|
||||||
default = []
|
default = []
|
||||||
}
|
}
|
||||||
|
|
||||||
# advanced
|
# unofficial, undocumented, unsupported
|
||||||
|
|
||||||
variable "controller_arch" {
|
variable "cluster_domain_suffix" {
|
||||||
type = string
|
type = string
|
||||||
description = "Controller node(s) architecture (amd64 or arm64)"
|
description = "Queries for domains with the suffix will be answered by CoreDNS. Default is cluster.local (e.g. foo.default.svc.cluster.local)"
|
||||||
default = "amd64"
|
default = "cluster.local"
|
||||||
validation {
|
|
||||||
condition = contains(["amd64", "arm64"], var.controller_arch)
|
|
||||||
error_message = "The controller_arch must be amd64 or arm64."
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "worker_arch" {
|
variable "arch" {
|
||||||
type = string
|
type = string
|
||||||
description = "Worker node(s) architecture (amd64 or arm64)"
|
description = "Container architecture (amd64 or arm64)"
|
||||||
default = "amd64"
|
default = "amd64"
|
||||||
|
|
||||||
validation {
|
validation {
|
||||||
condition = contains(["amd64", "arm64"], var.worker_arch)
|
condition = var.arch == "amd64" || var.arch == "arm64"
|
||||||
error_message = "The worker_arch must be amd64 or arm64."
|
error_message = "The arch must be amd64 or arm64."
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -6,23 +6,20 @@ module "workers" {
|
|||||||
vpc_id = aws_vpc.network.id
|
vpc_id = aws_vpc.network.id
|
||||||
subnet_ids = aws_subnet.public.*.id
|
subnet_ids = aws_subnet.public.*.id
|
||||||
security_groups = [aws_security_group.worker.id]
|
security_groups = [aws_security_group.worker.id]
|
||||||
|
worker_count = var.worker_count
|
||||||
# instances
|
instance_type = var.worker_type
|
||||||
os_image = var.os_image
|
os_image = var.os_image
|
||||||
worker_count = var.worker_count
|
arch = var.arch
|
||||||
instance_type = var.worker_type
|
disk_size = var.disk_size
|
||||||
arch = var.worker_arch
|
spot_price = var.worker_price
|
||||||
disk_type = var.worker_disk_type
|
target_groups = var.worker_target_groups
|
||||||
disk_size = var.worker_disk_size
|
|
||||||
disk_iops = var.worker_disk_iops
|
|
||||||
spot_price = var.worker_price
|
|
||||||
target_groups = var.worker_target_groups
|
|
||||||
|
|
||||||
# configuration
|
# configuration
|
||||||
kubeconfig = module.bootstrap.kubeconfig-kubelet
|
kubeconfig = module.bootstrap.kubeconfig-kubelet
|
||||||
ssh_authorized_key = var.ssh_authorized_key
|
ssh_authorized_key = var.ssh_authorized_key
|
||||||
service_cidr = var.service_cidr
|
service_cidr = var.service_cidr
|
||||||
snippets = var.worker_snippets
|
cluster_domain_suffix = var.cluster_domain_suffix
|
||||||
node_labels = var.worker_node_labels
|
snippets = var.worker_snippets
|
||||||
|
node_labels = var.worker_node_labels
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -30,7 +30,7 @@ systemd:
|
|||||||
After=coreos-metadata.service
|
After=coreos-metadata.service
|
||||||
Wants=rpc-statd.service
|
Wants=rpc-statd.service
|
||||||
[Service]
|
[Service]
|
||||||
Environment=KUBELET_IMAGE=quay.io/poseidon/kubelet:v1.31.3
|
Environment=KUBELET_IMAGE=quay.io/poseidon/kubelet:v1.30.1
|
||||||
EnvironmentFile=/run/metadata/coreos
|
EnvironmentFile=/run/metadata/coreos
|
||||||
ExecStartPre=/bin/mkdir -p /etc/cni/net.d
|
ExecStartPre=/bin/mkdir -p /etc/cni/net.d
|
||||||
ExecStartPre=/bin/mkdir -p /etc/kubernetes/manifests
|
ExecStartPre=/bin/mkdir -p /etc/kubernetes/manifests
|
||||||
@ -103,7 +103,7 @@ storage:
|
|||||||
cgroupDriver: systemd
|
cgroupDriver: systemd
|
||||||
clusterDNS:
|
clusterDNS:
|
||||||
- ${cluster_dns_service_ip}
|
- ${cluster_dns_service_ip}
|
||||||
clusterDomain: cluster.local
|
clusterDomain: ${cluster_domain_suffix}
|
||||||
healthzPort: 0
|
healthzPort: 0
|
||||||
rotateCertificates: true
|
rotateCertificates: true
|
||||||
shutdownGracePeriod: 45s
|
shutdownGracePeriod: 45s
|
||||||
|
@ -69,12 +69,6 @@ variable "spot_price" {
|
|||||||
default = 0
|
default = 0
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "cpu_credits" {
|
|
||||||
type = string
|
|
||||||
description = "CPU burst credits mode (if applicable)"
|
|
||||||
default = null
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "target_groups" {
|
variable "target_groups" {
|
||||||
type = list(string)
|
type = list(string)
|
||||||
description = "Additional target group ARNs to which instances should be added"
|
description = "Additional target group ARNs to which instances should be added"
|
||||||
@ -108,6 +102,12 @@ EOD
|
|||||||
default = "10.3.0.0/16"
|
default = "10.3.0.0/16"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
variable "cluster_domain_suffix" {
|
||||||
|
type = string
|
||||||
|
description = "Queries for domains with the suffix will be answered by coredns. Default is cluster.local (e.g. foo.default.svc.cluster.local) "
|
||||||
|
default = "cluster.local"
|
||||||
|
}
|
||||||
|
|
||||||
variable "node_labels" {
|
variable "node_labels" {
|
||||||
type = list(string)
|
type = list(string)
|
||||||
description = "List of initial node labels"
|
description = "List of initial node labels"
|
||||||
@ -128,7 +128,7 @@ variable "arch" {
|
|||||||
default = "amd64"
|
default = "amd64"
|
||||||
|
|
||||||
validation {
|
validation {
|
||||||
condition = contains(["amd64", "arm64"], var.arch)
|
condition = var.arch == "amd64" || var.arch == "arm64"
|
||||||
error_message = "The arch must be amd64 or arm64."
|
error_message = "The arch must be amd64 or arm64."
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -3,14 +3,16 @@ resource "aws_autoscaling_group" "workers" {
|
|||||||
name = "${var.name}-worker"
|
name = "${var.name}-worker"
|
||||||
|
|
||||||
# count
|
# count
|
||||||
desired_capacity = var.worker_count
|
desired_capacity = var.worker_count
|
||||||
min_size = var.worker_count
|
min_size = var.worker_count
|
||||||
max_size = var.worker_count + 2
|
max_size = var.worker_count + 2
|
||||||
|
default_cooldown = 30
|
||||||
|
health_check_grace_period = 30
|
||||||
|
|
||||||
# network
|
# network
|
||||||
vpc_zone_identifier = var.subnet_ids
|
vpc_zone_identifier = var.subnet_ids
|
||||||
|
|
||||||
# instance template
|
# template
|
||||||
launch_template {
|
launch_template {
|
||||||
id = aws_launch_template.worker.id
|
id = aws_launch_template.worker.id
|
||||||
version = aws_launch_template.worker.latest_version
|
version = aws_launch_template.worker.latest_version
|
||||||
@ -30,10 +32,6 @@ resource "aws_autoscaling_group" "workers" {
|
|||||||
min_healthy_percentage = 90
|
min_healthy_percentage = 90
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
# Grace period before checking new instance's health
|
|
||||||
health_check_grace_period = 30
|
|
||||||
# Cooldown period between scaling activities
|
|
||||||
default_cooldown = 30
|
|
||||||
|
|
||||||
lifecycle {
|
lifecycle {
|
||||||
# override the default destroy and replace update behavior
|
# override the default destroy and replace update behavior
|
||||||
@ -58,6 +56,11 @@ resource "aws_launch_template" "worker" {
|
|||||||
name_prefix = "${var.name}-worker"
|
name_prefix = "${var.name}-worker"
|
||||||
image_id = local.ami_id
|
image_id = local.ami_id
|
||||||
instance_type = var.instance_type
|
instance_type = var.instance_type
|
||||||
|
monitoring {
|
||||||
|
enabled = false
|
||||||
|
}
|
||||||
|
|
||||||
|
user_data = sensitive(base64encode(data.ct_config.worker.rendered))
|
||||||
|
|
||||||
# storage
|
# storage
|
||||||
ebs_optimized = true
|
ebs_optimized = true
|
||||||
@ -73,26 +76,14 @@ resource "aws_launch_template" "worker" {
|
|||||||
}
|
}
|
||||||
|
|
||||||
# network
|
# network
|
||||||
network_interfaces {
|
vpc_security_group_ids = var.security_groups
|
||||||
associate_public_ip_address = true
|
|
||||||
security_groups = var.security_groups
|
|
||||||
}
|
|
||||||
|
|
||||||
# boot
|
|
||||||
user_data = sensitive(base64encode(data.ct_config.worker.rendered))
|
|
||||||
|
|
||||||
# metadata
|
# metadata
|
||||||
metadata_options {
|
metadata_options {
|
||||||
http_tokens = "optional"
|
http_tokens = "optional"
|
||||||
}
|
}
|
||||||
monitoring {
|
|
||||||
enabled = false
|
|
||||||
}
|
|
||||||
|
|
||||||
# cost
|
# spot
|
||||||
credit_specification {
|
|
||||||
cpu_credits = var.cpu_credits
|
|
||||||
}
|
|
||||||
dynamic "instance_market_options" {
|
dynamic "instance_market_options" {
|
||||||
for_each = var.spot_price > 0 ? [1] : []
|
for_each = var.spot_price > 0 ? [1] : []
|
||||||
content {
|
content {
|
||||||
@ -116,6 +107,7 @@ data "ct_config" "worker" {
|
|||||||
kubeconfig = indent(10, var.kubeconfig)
|
kubeconfig = indent(10, var.kubeconfig)
|
||||||
ssh_authorized_key = var.ssh_authorized_key
|
ssh_authorized_key = var.ssh_authorized_key
|
||||||
cluster_dns_service_ip = cidrhost(var.service_cidr, 10)
|
cluster_dns_service_ip = cidrhost(var.service_cidr, 10)
|
||||||
|
cluster_domain_suffix = var.cluster_domain_suffix
|
||||||
node_labels = join(",", var.node_labels)
|
node_labels = join(",", var.node_labels)
|
||||||
node_taints = join(",", var.node_taints)
|
node_taints = join(",", var.node_taints)
|
||||||
})
|
})
|
||||||
|
@ -11,7 +11,7 @@ Typhoon distributes upstream Kubernetes, architectural conventions, and cluster
|
|||||||
|
|
||||||
## Features <a href="https://www.cncf.io/certification/software-conformance/"><img align="right" src="https://storage.googleapis.com/poseidon/certified-kubernetes.png"></a>
|
## Features <a href="https://www.cncf.io/certification/software-conformance/"><img align="right" src="https://storage.googleapis.com/poseidon/certified-kubernetes.png"></a>
|
||||||
|
|
||||||
* Kubernetes v1.31.3 (upstream)
|
* Kubernetes v1.30.1 (upstream)
|
||||||
* Single or multi-master, [Calico](https://www.projectcalico.org/) or [Cilium](https://github.com/cilium/cilium) or [flannel](https://github.com/coreos/flannel) networking
|
* Single or multi-master, [Calico](https://www.projectcalico.org/) or [Cilium](https://github.com/cilium/cilium) or [flannel](https://github.com/coreos/flannel) networking
|
||||||
* On-cluster etcd with TLS, [RBAC](https://kubernetes.io/docs/admin/authorization/rbac/)-enabled, [network policy](https://kubernetes.io/docs/concepts/services-networking/network-policies/), SELinux enforcing
|
* On-cluster etcd with TLS, [RBAC](https://kubernetes.io/docs/admin/authorization/rbac/)-enabled, [network policy](https://kubernetes.io/docs/concepts/services-networking/network-policies/), SELinux enforcing
|
||||||
* Advanced features like [worker pools](https://typhoon.psdn.io/advanced/worker-pools/), [spot priority](https://typhoon.psdn.io/fedora-coreos/azure/#low-priority) workers, and [snippets](https://typhoon.psdn.io/advanced/customization/#hosts) customization
|
* Advanced features like [worker pools](https://typhoon.psdn.io/advanced/worker-pools/), [spot priority](https://typhoon.psdn.io/fedora-coreos/azure/#low-priority) workers, and [snippets](https://typhoon.psdn.io/advanced/customization/#hosts) customization
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
# Kubernetes assets (kubeconfig, manifests)
|
# Kubernetes assets (kubeconfig, manifests)
|
||||||
module "bootstrap" {
|
module "bootstrap" {
|
||||||
source = "git::https://github.com/poseidon/terraform-render-bootstrap.git?ref=e6a1c7bccfc45ab299b5f8149bc3840f99b30b2b"
|
source = "git::https://github.com/poseidon/terraform-render-bootstrap.git?ref=e1b1e0c75e77e042cf369f463f0e656297a201a8"
|
||||||
|
|
||||||
cluster_name = var.cluster_name
|
cluster_name = var.cluster_name
|
||||||
api_servers = [format("%s.%s", var.cluster_name, var.dns_zone)]
|
api_servers = [format("%s.%s", var.cluster_name, var.dns_zone)]
|
||||||
@ -14,6 +14,9 @@ module "bootstrap" {
|
|||||||
|
|
||||||
pod_cidr = var.pod_cidr
|
pod_cidr = var.pod_cidr
|
||||||
service_cidr = var.service_cidr
|
service_cidr = var.service_cidr
|
||||||
|
cluster_domain_suffix = var.cluster_domain_suffix
|
||||||
|
enable_reporting = var.enable_reporting
|
||||||
|
enable_aggregation = var.enable_aggregation
|
||||||
daemonset_tolerations = var.daemonset_tolerations
|
daemonset_tolerations = var.daemonset_tolerations
|
||||||
components = var.components
|
components = var.components
|
||||||
}
|
}
|
||||||
|
@ -54,7 +54,7 @@ systemd:
|
|||||||
Description=Kubelet (System Container)
|
Description=Kubelet (System Container)
|
||||||
Wants=rpc-statd.service
|
Wants=rpc-statd.service
|
||||||
[Service]
|
[Service]
|
||||||
Environment=KUBELET_IMAGE=quay.io/poseidon/kubelet:v1.31.3
|
Environment=KUBELET_IMAGE=quay.io/poseidon/kubelet:v1.30.1
|
||||||
ExecStartPre=/bin/mkdir -p /etc/cni/net.d
|
ExecStartPre=/bin/mkdir -p /etc/cni/net.d
|
||||||
ExecStartPre=/bin/mkdir -p /etc/kubernetes/manifests
|
ExecStartPre=/bin/mkdir -p /etc/kubernetes/manifests
|
||||||
ExecStartPre=/bin/mkdir -p /opt/cni/bin
|
ExecStartPre=/bin/mkdir -p /opt/cni/bin
|
||||||
@ -111,7 +111,7 @@ systemd:
|
|||||||
--volume /opt/bootstrap/assets:/assets:ro,Z \
|
--volume /opt/bootstrap/assets:/assets:ro,Z \
|
||||||
--volume /opt/bootstrap/apply:/apply:ro,Z \
|
--volume /opt/bootstrap/apply:/apply:ro,Z \
|
||||||
--entrypoint=/apply \
|
--entrypoint=/apply \
|
||||||
quay.io/poseidon/kubelet:v1.31.3
|
quay.io/poseidon/kubelet:v1.30.1
|
||||||
ExecStartPost=/bin/touch /opt/bootstrap/bootstrap.done
|
ExecStartPost=/bin/touch /opt/bootstrap/bootstrap.done
|
||||||
ExecStartPost=-/usr/bin/podman stop bootstrap
|
ExecStartPost=-/usr/bin/podman stop bootstrap
|
||||||
storage:
|
storage:
|
||||||
@ -144,7 +144,7 @@ storage:
|
|||||||
cgroupDriver: systemd
|
cgroupDriver: systemd
|
||||||
clusterDNS:
|
clusterDNS:
|
||||||
- ${cluster_dns_service_ip}
|
- ${cluster_dns_service_ip}
|
||||||
clusterDomain: cluster.local
|
clusterDomain: ${cluster_domain_suffix}
|
||||||
healthzPort: 0
|
healthzPort: 0
|
||||||
rotateCertificates: true
|
rotateCertificates: true
|
||||||
shutdownGracePeriod: 45s
|
shutdownGracePeriod: 45s
|
||||||
|
@ -8,23 +8,26 @@ locals {
|
|||||||
|
|
||||||
# Discrete DNS records for each controller's private IPv4 for etcd usage
|
# Discrete DNS records for each controller's private IPv4 for etcd usage
|
||||||
resource "azurerm_dns_a_record" "etcds" {
|
resource "azurerm_dns_a_record" "etcds" {
|
||||||
count = var.controller_count
|
count = var.controller_count
|
||||||
|
resource_group_name = var.dns_zone_group
|
||||||
|
|
||||||
# DNS Zone name where record should be created
|
# DNS Zone name where record should be created
|
||||||
zone_name = var.dns_zone
|
zone_name = var.dns_zone
|
||||||
resource_group_name = var.dns_zone_group
|
|
||||||
# DNS record
|
# DNS record
|
||||||
name = format("%s-etcd%d", var.cluster_name, count.index)
|
name = format("%s-etcd%d", var.cluster_name, count.index)
|
||||||
ttl = 300
|
ttl = 300
|
||||||
|
|
||||||
# private IPv4 address for etcd
|
# private IPv4 address for etcd
|
||||||
records = [azurerm_network_interface.controllers[count.index].private_ip_address]
|
records = [azurerm_network_interface.controllers.*.private_ip_address[count.index]]
|
||||||
}
|
}
|
||||||
|
|
||||||
# Controller availability set to spread controllers
|
# Controller availability set to spread controllers
|
||||||
resource "azurerm_availability_set" "controllers" {
|
resource "azurerm_availability_set" "controllers" {
|
||||||
|
resource_group_name = azurerm_resource_group.cluster.name
|
||||||
|
|
||||||
name = "${var.cluster_name}-controllers"
|
name = "${var.cluster_name}-controllers"
|
||||||
resource_group_name = azurerm_resource_group.cluster.name
|
location = var.region
|
||||||
location = var.location
|
|
||||||
platform_fault_domain_count = 2
|
platform_fault_domain_count = 2
|
||||||
platform_update_domain_count = 4
|
platform_update_domain_count = 4
|
||||||
managed = true
|
managed = true
|
||||||
@ -32,35 +35,31 @@ resource "azurerm_availability_set" "controllers" {
|
|||||||
|
|
||||||
# Controller instances
|
# Controller instances
|
||||||
resource "azurerm_linux_virtual_machine" "controllers" {
|
resource "azurerm_linux_virtual_machine" "controllers" {
|
||||||
count = var.controller_count
|
count = var.controller_count
|
||||||
|
resource_group_name = azurerm_resource_group.cluster.name
|
||||||
|
|
||||||
name = "${var.cluster_name}-controller-${count.index}"
|
name = "${var.cluster_name}-controller-${count.index}"
|
||||||
resource_group_name = azurerm_resource_group.cluster.name
|
location = var.region
|
||||||
location = var.location
|
|
||||||
availability_set_id = azurerm_availability_set.controllers.id
|
availability_set_id = azurerm_availability_set.controllers.id
|
||||||
size = var.controller_type
|
|
||||||
|
size = var.controller_type
|
||||||
|
custom_data = base64encode(data.ct_config.controllers.*.rendered[count.index])
|
||||||
|
|
||||||
# storage
|
# storage
|
||||||
source_image_id = var.os_image
|
source_image_id = var.os_image
|
||||||
os_disk {
|
os_disk {
|
||||||
name = "${var.cluster_name}-controller-${count.index}"
|
name = "${var.cluster_name}-controller-${count.index}"
|
||||||
storage_account_type = var.controller_disk_type
|
|
||||||
disk_size_gb = var.controller_disk_size
|
|
||||||
caching = "None"
|
caching = "None"
|
||||||
|
disk_size_gb = var.disk_size
|
||||||
|
storage_account_type = "Premium_LRS"
|
||||||
}
|
}
|
||||||
|
|
||||||
# network
|
# network
|
||||||
network_interface_ids = [
|
network_interface_ids = [
|
||||||
azurerm_network_interface.controllers[count.index].id
|
azurerm_network_interface.controllers.*.id[count.index]
|
||||||
]
|
]
|
||||||
|
|
||||||
# boot
|
# Azure requires setting admin_ssh_key, though Ignition custom_data handles it too
|
||||||
custom_data = base64encode(data.ct_config.controllers[count.index].rendered)
|
|
||||||
boot_diagnostics {
|
|
||||||
# defaults to a managed storage account
|
|
||||||
}
|
|
||||||
|
|
||||||
# Azure requires an RSA admin_ssh_key
|
|
||||||
admin_username = "core"
|
admin_username = "core"
|
||||||
admin_ssh_key {
|
admin_ssh_key {
|
||||||
username = "core"
|
username = "core"
|
||||||
@ -75,52 +74,31 @@ resource "azurerm_linux_virtual_machine" "controllers" {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
# Controller node public IPv4 addresses
|
# Controller public IPv4 addresses
|
||||||
resource "azurerm_public_ip" "controllers-ipv4" {
|
resource "azurerm_public_ip" "controllers" {
|
||||||
count = var.controller_count
|
count = var.controller_count
|
||||||
|
|
||||||
name = "${var.cluster_name}-controller-${count.index}-ipv4"
|
|
||||||
resource_group_name = azurerm_resource_group.cluster.name
|
resource_group_name = azurerm_resource_group.cluster.name
|
||||||
location = azurerm_resource_group.cluster.location
|
|
||||||
ip_version = "IPv4"
|
name = "${var.cluster_name}-controller-${count.index}"
|
||||||
sku = "Standard"
|
location = azurerm_resource_group.cluster.location
|
||||||
allocation_method = "Static"
|
sku = "Standard"
|
||||||
|
allocation_method = "Static"
|
||||||
}
|
}
|
||||||
|
|
||||||
# Controller node public IPv6 addresses
|
# Controller NICs with public and private IPv4
|
||||||
resource "azurerm_public_ip" "controllers-ipv6" {
|
|
||||||
count = var.controller_count
|
|
||||||
|
|
||||||
name = "${var.cluster_name}-controller-${count.index}-ipv6"
|
|
||||||
resource_group_name = azurerm_resource_group.cluster.name
|
|
||||||
location = azurerm_resource_group.cluster.location
|
|
||||||
ip_version = "IPv6"
|
|
||||||
sku = "Standard"
|
|
||||||
allocation_method = "Static"
|
|
||||||
}
|
|
||||||
|
|
||||||
# Controllers' network interfaces
|
|
||||||
resource "azurerm_network_interface" "controllers" {
|
resource "azurerm_network_interface" "controllers" {
|
||||||
count = var.controller_count
|
count = var.controller_count
|
||||||
|
|
||||||
name = "${var.cluster_name}-controller-${count.index}"
|
|
||||||
resource_group_name = azurerm_resource_group.cluster.name
|
resource_group_name = azurerm_resource_group.cluster.name
|
||||||
location = azurerm_resource_group.cluster.location
|
|
||||||
|
name = "${var.cluster_name}-controller-${count.index}"
|
||||||
|
location = azurerm_resource_group.cluster.location
|
||||||
|
|
||||||
ip_configuration {
|
ip_configuration {
|
||||||
name = "ipv4"
|
name = "ip0"
|
||||||
primary = true
|
|
||||||
subnet_id = azurerm_subnet.controller.id
|
subnet_id = azurerm_subnet.controller.id
|
||||||
private_ip_address_allocation = "Dynamic"
|
private_ip_address_allocation = "Dynamic"
|
||||||
private_ip_address_version = "IPv4"
|
# instance public IPv4
|
||||||
public_ip_address_id = azurerm_public_ip.controllers-ipv4[count.index].id
|
public_ip_address_id = azurerm_public_ip.controllers.*.id[count.index]
|
||||||
}
|
|
||||||
ip_configuration {
|
|
||||||
name = "ipv6"
|
|
||||||
subnet_id = azurerm_subnet.controller.id
|
|
||||||
private_ip_address_allocation = "Dynamic"
|
|
||||||
private_ip_address_version = "IPv6"
|
|
||||||
public_ip_address_id = azurerm_public_ip.controllers-ipv6[count.index].id
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -133,20 +111,12 @@ resource "azurerm_network_interface_security_group_association" "controllers" {
|
|||||||
}
|
}
|
||||||
|
|
||||||
# Associate controller network interface with controller backend address pool
|
# Associate controller network interface with controller backend address pool
|
||||||
resource "azurerm_network_interface_backend_address_pool_association" "controllers-ipv4" {
|
resource "azurerm_network_interface_backend_address_pool_association" "controllers" {
|
||||||
count = var.controller_count
|
count = var.controller_count
|
||||||
|
|
||||||
network_interface_id = azurerm_network_interface.controllers[count.index].id
|
network_interface_id = azurerm_network_interface.controllers[count.index].id
|
||||||
ip_configuration_name = "ipv4"
|
ip_configuration_name = "ip0"
|
||||||
backend_address_pool_id = azurerm_lb_backend_address_pool.controller-ipv4.id
|
backend_address_pool_id = azurerm_lb_backend_address_pool.controller.id
|
||||||
}
|
|
||||||
|
|
||||||
resource "azurerm_network_interface_backend_address_pool_association" "controllers-ipv6" {
|
|
||||||
count = var.controller_count
|
|
||||||
|
|
||||||
network_interface_id = azurerm_network_interface.controllers[count.index].id
|
|
||||||
ip_configuration_name = "ipv6"
|
|
||||||
backend_address_pool_id = azurerm_lb_backend_address_pool.controller-ipv6.id
|
|
||||||
}
|
}
|
||||||
|
|
||||||
# Fedora CoreOS controllers
|
# Fedora CoreOS controllers
|
||||||
@ -163,6 +133,7 @@ data "ct_config" "controllers" {
|
|||||||
kubeconfig = indent(10, module.bootstrap.kubeconfig-kubelet)
|
kubeconfig = indent(10, module.bootstrap.kubeconfig-kubelet)
|
||||||
ssh_authorized_key = var.ssh_authorized_key
|
ssh_authorized_key = var.ssh_authorized_key
|
||||||
cluster_dns_service_ip = cidrhost(var.service_cidr, 10)
|
cluster_dns_service_ip = cidrhost(var.service_cidr, 10)
|
||||||
|
cluster_domain_suffix = var.cluster_domain_suffix
|
||||||
})
|
})
|
||||||
strict = true
|
strict = true
|
||||||
snippets = var.controller_snippets
|
snippets = var.controller_snippets
|
||||||
|
@ -1,164 +1,116 @@
|
|||||||
# DNS A record for the apiserver load balancer
|
# DNS record for the apiserver load balancer
|
||||||
resource "azurerm_dns_a_record" "apiserver" {
|
resource "azurerm_dns_a_record" "apiserver" {
|
||||||
# DNS Zone name where record should be created
|
|
||||||
zone_name = var.dns_zone
|
|
||||||
resource_group_name = var.dns_zone_group
|
resource_group_name = var.dns_zone_group
|
||||||
|
|
||||||
|
# DNS Zone name where record should be created
|
||||||
|
zone_name = var.dns_zone
|
||||||
|
|
||||||
# DNS record
|
# DNS record
|
||||||
name = var.cluster_name
|
name = var.cluster_name
|
||||||
ttl = 300
|
ttl = 300
|
||||||
|
|
||||||
# IPv4 address of apiserver load balancer
|
# IPv4 address of apiserver load balancer
|
||||||
records = [azurerm_public_ip.frontend-ipv4.ip_address]
|
records = [azurerm_public_ip.apiserver-ipv4.ip_address]
|
||||||
}
|
}
|
||||||
|
|
||||||
# DNS AAAA record for the apiserver load balancer
|
# Static IPv4 address for the apiserver frontend
|
||||||
resource "azurerm_dns_aaaa_record" "apiserver" {
|
resource "azurerm_public_ip" "apiserver-ipv4" {
|
||||||
# DNS Zone name where record should be created
|
|
||||||
zone_name = var.dns_zone
|
|
||||||
resource_group_name = var.dns_zone_group
|
|
||||||
# DNS record
|
|
||||||
name = var.cluster_name
|
|
||||||
ttl = 300
|
|
||||||
# IPv4 address of apiserver load balancer
|
|
||||||
records = [azurerm_public_ip.frontend-ipv6.ip_address]
|
|
||||||
}
|
|
||||||
|
|
||||||
# Static IPv4 address for the load balancer
|
|
||||||
resource "azurerm_public_ip" "frontend-ipv4" {
|
|
||||||
name = "${var.cluster_name}-frontend-ipv4"
|
|
||||||
resource_group_name = azurerm_resource_group.cluster.name
|
resource_group_name = azurerm_resource_group.cluster.name
|
||||||
location = var.location
|
|
||||||
ip_version = "IPv4"
|
name = "${var.cluster_name}-apiserver-ipv4"
|
||||||
sku = "Standard"
|
location = var.region
|
||||||
allocation_method = "Static"
|
sku = "Standard"
|
||||||
|
allocation_method = "Static"
|
||||||
}
|
}
|
||||||
|
|
||||||
# Static IPv6 address for the load balancer
|
# Static IPv4 address for the ingress frontend
|
||||||
resource "azurerm_public_ip" "frontend-ipv6" {
|
resource "azurerm_public_ip" "ingress-ipv4" {
|
||||||
name = "${var.cluster_name}-frontend-ipv6"
|
|
||||||
resource_group_name = azurerm_resource_group.cluster.name
|
resource_group_name = azurerm_resource_group.cluster.name
|
||||||
location = var.location
|
|
||||||
ip_version = "IPv6"
|
name = "${var.cluster_name}-ingress-ipv4"
|
||||||
sku = "Standard"
|
location = var.region
|
||||||
allocation_method = "Static"
|
sku = "Standard"
|
||||||
|
allocation_method = "Static"
|
||||||
}
|
}
|
||||||
|
|
||||||
# Network Load Balancer for apiservers and ingress
|
# Network Load Balancer for apiservers and ingress
|
||||||
resource "azurerm_lb" "cluster" {
|
resource "azurerm_lb" "cluster" {
|
||||||
name = var.cluster_name
|
|
||||||
resource_group_name = azurerm_resource_group.cluster.name
|
resource_group_name = azurerm_resource_group.cluster.name
|
||||||
location = var.location
|
|
||||||
sku = "Standard"
|
name = var.cluster_name
|
||||||
|
location = var.region
|
||||||
|
sku = "Standard"
|
||||||
|
|
||||||
frontend_ip_configuration {
|
frontend_ip_configuration {
|
||||||
name = "frontend-ipv4"
|
name = "apiserver"
|
||||||
public_ip_address_id = azurerm_public_ip.frontend-ipv4.id
|
public_ip_address_id = azurerm_public_ip.apiserver-ipv4.id
|
||||||
}
|
}
|
||||||
|
|
||||||
frontend_ip_configuration {
|
frontend_ip_configuration {
|
||||||
name = "frontend-ipv6"
|
name = "ingress"
|
||||||
public_ip_address_id = azurerm_public_ip.frontend-ipv6.id
|
public_ip_address_id = azurerm_public_ip.ingress-ipv4.id
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "azurerm_lb_rule" "apiserver-ipv4" {
|
resource "azurerm_lb_rule" "apiserver" {
|
||||||
name = "apiserver-ipv4"
|
name = "apiserver"
|
||||||
loadbalancer_id = azurerm_lb.cluster.id
|
loadbalancer_id = azurerm_lb.cluster.id
|
||||||
frontend_ip_configuration_name = "frontend-ipv4"
|
frontend_ip_configuration_name = "apiserver"
|
||||||
disable_outbound_snat = true
|
|
||||||
|
|
||||||
protocol = "Tcp"
|
protocol = "Tcp"
|
||||||
frontend_port = 6443
|
frontend_port = 6443
|
||||||
backend_port = 6443
|
backend_port = 6443
|
||||||
backend_address_pool_ids = [azurerm_lb_backend_address_pool.controller-ipv4.id]
|
backend_address_pool_ids = [azurerm_lb_backend_address_pool.controller.id]
|
||||||
probe_id = azurerm_lb_probe.apiserver.id
|
probe_id = azurerm_lb_probe.apiserver.id
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "azurerm_lb_rule" "apiserver-ipv6" {
|
resource "azurerm_lb_rule" "ingress-http" {
|
||||||
name = "apiserver-ipv6"
|
name = "ingress-http"
|
||||||
loadbalancer_id = azurerm_lb.cluster.id
|
loadbalancer_id = azurerm_lb.cluster.id
|
||||||
frontend_ip_configuration_name = "frontend-ipv6"
|
frontend_ip_configuration_name = "ingress"
|
||||||
disable_outbound_snat = true
|
|
||||||
|
|
||||||
protocol = "Tcp"
|
|
||||||
frontend_port = 6443
|
|
||||||
backend_port = 6443
|
|
||||||
backend_address_pool_ids = [azurerm_lb_backend_address_pool.controller-ipv6.id]
|
|
||||||
probe_id = azurerm_lb_probe.apiserver.id
|
|
||||||
}
|
|
||||||
|
|
||||||
resource "azurerm_lb_rule" "ingress-http-ipv4" {
|
|
||||||
name = "ingress-http-ipv4"
|
|
||||||
loadbalancer_id = azurerm_lb.cluster.id
|
|
||||||
frontend_ip_configuration_name = "frontend-ipv4"
|
|
||||||
disable_outbound_snat = true
|
disable_outbound_snat = true
|
||||||
|
|
||||||
protocol = "Tcp"
|
protocol = "Tcp"
|
||||||
frontend_port = 80
|
frontend_port = 80
|
||||||
backend_port = 80
|
backend_port = 80
|
||||||
backend_address_pool_ids = [azurerm_lb_backend_address_pool.worker-ipv4.id]
|
backend_address_pool_ids = [azurerm_lb_backend_address_pool.worker.id]
|
||||||
probe_id = azurerm_lb_probe.ingress.id
|
probe_id = azurerm_lb_probe.ingress.id
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "azurerm_lb_rule" "ingress-https-ipv4" {
|
resource "azurerm_lb_rule" "ingress-https" {
|
||||||
name = "ingress-https-ipv4"
|
name = "ingress-https"
|
||||||
loadbalancer_id = azurerm_lb.cluster.id
|
loadbalancer_id = azurerm_lb.cluster.id
|
||||||
frontend_ip_configuration_name = "frontend-ipv4"
|
frontend_ip_configuration_name = "ingress"
|
||||||
disable_outbound_snat = true
|
disable_outbound_snat = true
|
||||||
|
|
||||||
protocol = "Tcp"
|
protocol = "Tcp"
|
||||||
frontend_port = 443
|
frontend_port = 443
|
||||||
backend_port = 443
|
backend_port = 443
|
||||||
backend_address_pool_ids = [azurerm_lb_backend_address_pool.worker-ipv4.id]
|
backend_address_pool_ids = [azurerm_lb_backend_address_pool.worker.id]
|
||||||
probe_id = azurerm_lb_probe.ingress.id
|
probe_id = azurerm_lb_probe.ingress.id
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "azurerm_lb_rule" "ingress-http-ipv6" {
|
# Worker outbound TCP/UDP SNAT
|
||||||
name = "ingress-http-ipv6"
|
resource "azurerm_lb_outbound_rule" "worker-outbound" {
|
||||||
loadbalancer_id = azurerm_lb.cluster.id
|
name = "worker"
|
||||||
frontend_ip_configuration_name = "frontend-ipv6"
|
loadbalancer_id = azurerm_lb.cluster.id
|
||||||
disable_outbound_snat = true
|
frontend_ip_configuration {
|
||||||
|
name = "ingress"
|
||||||
|
}
|
||||||
|
|
||||||
protocol = "Tcp"
|
protocol = "All"
|
||||||
frontend_port = 80
|
backend_address_pool_id = azurerm_lb_backend_address_pool.worker.id
|
||||||
backend_port = 80
|
|
||||||
backend_address_pool_ids = [azurerm_lb_backend_address_pool.worker-ipv6.id]
|
|
||||||
probe_id = azurerm_lb_probe.ingress.id
|
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "azurerm_lb_rule" "ingress-https-ipv6" {
|
|
||||||
name = "ingress-https-ipv6"
|
|
||||||
loadbalancer_id = azurerm_lb.cluster.id
|
|
||||||
frontend_ip_configuration_name = "frontend-ipv6"
|
|
||||||
disable_outbound_snat = true
|
|
||||||
|
|
||||||
protocol = "Tcp"
|
|
||||||
frontend_port = 443
|
|
||||||
backend_port = 443
|
|
||||||
backend_address_pool_ids = [azurerm_lb_backend_address_pool.worker-ipv6.id]
|
|
||||||
probe_id = azurerm_lb_probe.ingress.id
|
|
||||||
}
|
|
||||||
|
|
||||||
# Backend Address Pools
|
|
||||||
|
|
||||||
# Address pool of controllers
|
# Address pool of controllers
|
||||||
resource "azurerm_lb_backend_address_pool" "controller-ipv4" {
|
resource "azurerm_lb_backend_address_pool" "controller" {
|
||||||
name = "controller-ipv4"
|
name = "controller"
|
||||||
loadbalancer_id = azurerm_lb.cluster.id
|
|
||||||
}
|
|
||||||
|
|
||||||
resource "azurerm_lb_backend_address_pool" "controller-ipv6" {
|
|
||||||
name = "controller-ipv6"
|
|
||||||
loadbalancer_id = azurerm_lb.cluster.id
|
loadbalancer_id = azurerm_lb.cluster.id
|
||||||
}
|
}
|
||||||
|
|
||||||
# Address pool of workers
|
# Address pool of workers
|
||||||
resource "azurerm_lb_backend_address_pool" "worker-ipv4" {
|
resource "azurerm_lb_backend_address_pool" "worker" {
|
||||||
name = "worker-ipv4"
|
name = "worker"
|
||||||
loadbalancer_id = azurerm_lb.cluster.id
|
|
||||||
}
|
|
||||||
|
|
||||||
resource "azurerm_lb_backend_address_pool" "worker-ipv6" {
|
|
||||||
name = "worker-ipv6"
|
|
||||||
loadbalancer_id = azurerm_lb.cluster.id
|
loadbalancer_id = azurerm_lb.cluster.id
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -170,8 +122,10 @@ resource "azurerm_lb_probe" "apiserver" {
|
|||||||
loadbalancer_id = azurerm_lb.cluster.id
|
loadbalancer_id = azurerm_lb.cluster.id
|
||||||
protocol = "Tcp"
|
protocol = "Tcp"
|
||||||
port = 6443
|
port = 6443
|
||||||
|
|
||||||
# unhealthy threshold
|
# unhealthy threshold
|
||||||
number_of_probes = 3
|
number_of_probes = 3
|
||||||
|
|
||||||
interval_in_seconds = 5
|
interval_in_seconds = 5
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -182,29 +136,10 @@ resource "azurerm_lb_probe" "ingress" {
|
|||||||
protocol = "Http"
|
protocol = "Http"
|
||||||
port = 10254
|
port = 10254
|
||||||
request_path = "/healthz"
|
request_path = "/healthz"
|
||||||
|
|
||||||
# unhealthy threshold
|
# unhealthy threshold
|
||||||
number_of_probes = 3
|
number_of_probes = 3
|
||||||
|
|
||||||
interval_in_seconds = 5
|
interval_in_seconds = 5
|
||||||
}
|
}
|
||||||
|
|
||||||
# Outbound SNAT
|
|
||||||
|
|
||||||
resource "azurerm_lb_outbound_rule" "outbound-ipv4" {
|
|
||||||
name = "outbound-ipv4"
|
|
||||||
protocol = "All"
|
|
||||||
loadbalancer_id = azurerm_lb.cluster.id
|
|
||||||
backend_address_pool_id = azurerm_lb_backend_address_pool.worker-ipv4.id
|
|
||||||
frontend_ip_configuration {
|
|
||||||
name = "frontend-ipv4"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
resource "azurerm_lb_outbound_rule" "outbound-ipv6" {
|
|
||||||
name = "outbound-ipv6"
|
|
||||||
protocol = "All"
|
|
||||||
loadbalancer_id = azurerm_lb.cluster.id
|
|
||||||
backend_address_pool_id = azurerm_lb_backend_address_pool.worker-ipv6.id
|
|
||||||
frontend_ip_configuration {
|
|
||||||
name = "frontend-ipv6"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
@ -1,6 +0,0 @@
|
|||||||
locals {
|
|
||||||
backend_address_pool_ids = {
|
|
||||||
ipv4 = [azurerm_lb_backend_address_pool.worker-ipv4.id]
|
|
||||||
ipv6 = [azurerm_lb_backend_address_pool.worker-ipv6.id]
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,64 +1,27 @@
|
|||||||
# Choose an IPv6 ULA subnet at random
|
|
||||||
# https://datatracker.ietf.org/doc/html/rfc4193
|
|
||||||
resource "random_id" "ula-netnum" {
|
|
||||||
byte_length = 5 # 40 bits
|
|
||||||
}
|
|
||||||
|
|
||||||
locals {
|
|
||||||
# fd00::/8 -> shift 40 -> 2^40 possible /48 subnets
|
|
||||||
ula-range = cidrsubnet("fd00::/8", 40, random_id.ula-netnum.dec)
|
|
||||||
network_cidr = {
|
|
||||||
ipv4 = var.network_cidr.ipv4
|
|
||||||
ipv6 = length(var.network_cidr.ipv6) > 0 ? var.network_cidr.ipv6 : [local.ula-range]
|
|
||||||
}
|
|
||||||
|
|
||||||
# Subdivide the virtual network into subnets
|
|
||||||
# - controllers use netnum 0
|
|
||||||
# - workers use netnum 1
|
|
||||||
controller_subnets = {
|
|
||||||
ipv4 = [for i, cidr in local.network_cidr.ipv4 : cidrsubnet(cidr, 1, 0)]
|
|
||||||
ipv6 = [for i, cidr in local.network_cidr.ipv6 : cidrsubnet(cidr, 16, 0)]
|
|
||||||
}
|
|
||||||
worker_subnets = {
|
|
||||||
ipv4 = [for i, cidr in local.network_cidr.ipv4 : cidrsubnet(cidr, 1, 1)]
|
|
||||||
ipv6 = [for i, cidr in local.network_cidr.ipv6 : cidrsubnet(cidr, 16, 1)]
|
|
||||||
}
|
|
||||||
cluster_subnets = {
|
|
||||||
ipv4 = concat(local.controller_subnets.ipv4, local.worker_subnets.ipv4)
|
|
||||||
ipv6 = concat(local.controller_subnets.ipv6, local.worker_subnets.ipv6)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
# Organize cluster into a resource group
|
# Organize cluster into a resource group
|
||||||
resource "azurerm_resource_group" "cluster" {
|
resource "azurerm_resource_group" "cluster" {
|
||||||
name = var.cluster_name
|
name = var.cluster_name
|
||||||
location = var.location
|
location = var.region
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "azurerm_virtual_network" "network" {
|
resource "azurerm_virtual_network" "network" {
|
||||||
name = var.cluster_name
|
|
||||||
resource_group_name = azurerm_resource_group.cluster.name
|
resource_group_name = azurerm_resource_group.cluster.name
|
||||||
location = azurerm_resource_group.cluster.location
|
|
||||||
address_space = concat(
|
name = var.cluster_name
|
||||||
local.network_cidr.ipv4,
|
location = azurerm_resource_group.cluster.location
|
||||||
local.network_cidr.ipv6
|
address_space = [var.host_cidr]
|
||||||
)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
# Subnets - separate subnets for controllers and workers because Azure
|
# Subnets - separate subnets for controller and workers because Azure
|
||||||
# network security groups are oriented around address prefixes rather
|
# network security groups are based on IPv4 CIDR rather than instance
|
||||||
# than instance tags (GCP) or security group membership (AWS)
|
# tags like GCP or security group membership like AWS
|
||||||
|
|
||||||
resource "azurerm_subnet" "controller" {
|
resource "azurerm_subnet" "controller" {
|
||||||
name = "controller"
|
resource_group_name = azurerm_resource_group.cluster.name
|
||||||
resource_group_name = azurerm_resource_group.cluster.name
|
|
||||||
virtual_network_name = azurerm_virtual_network.network.name
|
|
||||||
address_prefixes = concat(
|
|
||||||
local.controller_subnets.ipv4,
|
|
||||||
local.controller_subnets.ipv6,
|
|
||||||
)
|
|
||||||
default_outbound_access_enabled = false
|
|
||||||
|
|
||||||
|
name = "controller"
|
||||||
|
virtual_network_name = azurerm_virtual_network.network.name
|
||||||
|
address_prefixes = [cidrsubnet(var.host_cidr, 1, 0)]
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "azurerm_subnet_network_security_group_association" "controller" {
|
resource "azurerm_subnet_network_security_group_association" "controller" {
|
||||||
@ -67,14 +30,11 @@ resource "azurerm_subnet_network_security_group_association" "controller" {
|
|||||||
}
|
}
|
||||||
|
|
||||||
resource "azurerm_subnet" "worker" {
|
resource "azurerm_subnet" "worker" {
|
||||||
|
resource_group_name = azurerm_resource_group.cluster.name
|
||||||
|
|
||||||
name = "worker"
|
name = "worker"
|
||||||
resource_group_name = azurerm_resource_group.cluster.name
|
|
||||||
virtual_network_name = azurerm_virtual_network.network.name
|
virtual_network_name = azurerm_virtual_network.network.name
|
||||||
address_prefixes = concat(
|
address_prefixes = [cidrsubnet(var.host_cidr, 1, 1)]
|
||||||
local.worker_subnets.ipv4,
|
|
||||||
local.worker_subnets.ipv6,
|
|
||||||
)
|
|
||||||
default_outbound_access_enabled = false
|
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "azurerm_subnet_network_security_group_association" "worker" {
|
resource "azurerm_subnet_network_security_group_association" "worker" {
|
||||||
|
@ -6,18 +6,13 @@ output "kubeconfig-admin" {
|
|||||||
# Outputs for Kubernetes Ingress
|
# Outputs for Kubernetes Ingress
|
||||||
|
|
||||||
output "ingress_static_ipv4" {
|
output "ingress_static_ipv4" {
|
||||||
value = azurerm_public_ip.frontend-ipv4.ip_address
|
value = azurerm_public_ip.ingress-ipv4.ip_address
|
||||||
description = "IPv4 address of the load balancer for distributing traffic to Ingress controllers"
|
description = "IPv4 address of the load balancer for distributing traffic to Ingress controllers"
|
||||||
}
|
}
|
||||||
|
|
||||||
output "ingress_static_ipv6" {
|
|
||||||
value = azurerm_public_ip.frontend-ipv6.ip_address
|
|
||||||
description = "IPv6 address of the load balancer for distributing traffic to Ingress controllers"
|
|
||||||
}
|
|
||||||
|
|
||||||
# Outputs for worker pools
|
# Outputs for worker pools
|
||||||
|
|
||||||
output "location" {
|
output "region" {
|
||||||
value = azurerm_resource_group.cluster.location
|
value = azurerm_resource_group.cluster.location
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -56,12 +51,12 @@ output "worker_security_group_name" {
|
|||||||
|
|
||||||
output "controller_address_prefixes" {
|
output "controller_address_prefixes" {
|
||||||
description = "Controller network subnet CIDR addresses (for source/destination)"
|
description = "Controller network subnet CIDR addresses (for source/destination)"
|
||||||
value = local.controller_subnets
|
value = azurerm_subnet.controller.address_prefixes
|
||||||
}
|
}
|
||||||
|
|
||||||
output "worker_address_prefixes" {
|
output "worker_address_prefixes" {
|
||||||
description = "Worker network subnet CIDR addresses (for source/destination)"
|
description = "Worker network subnet CIDR addresses (for source/destination)"
|
||||||
value = local.worker_subnets
|
value = azurerm_subnet.worker.address_prefixes
|
||||||
}
|
}
|
||||||
|
|
||||||
# Outputs for custom load balancing
|
# Outputs for custom load balancing
|
||||||
@ -71,12 +66,9 @@ output "loadbalancer_id" {
|
|||||||
value = azurerm_lb.cluster.id
|
value = azurerm_lb.cluster.id
|
||||||
}
|
}
|
||||||
|
|
||||||
output "backend_address_pool_ids" {
|
output "backend_address_pool_id" {
|
||||||
description = "IDs of the worker backend address pools"
|
description = "ID of the worker backend address pool"
|
||||||
value = {
|
value = azurerm_lb_backend_address_pool.worker.id
|
||||||
ipv4 = [azurerm_lb_backend_address_pool.worker-ipv4.id]
|
|
||||||
ipv6 = [azurerm_lb_backend_address_pool.worker-ipv6.id]
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
# Outputs for debug
|
# Outputs for debug
|
||||||
|
@ -1,223 +1,214 @@
|
|||||||
# Controller security group
|
# Controller security group
|
||||||
|
|
||||||
resource "azurerm_network_security_group" "controller" {
|
resource "azurerm_network_security_group" "controller" {
|
||||||
name = "${var.cluster_name}-controller"
|
|
||||||
resource_group_name = azurerm_resource_group.cluster.name
|
resource_group_name = azurerm_resource_group.cluster.name
|
||||||
location = azurerm_resource_group.cluster.location
|
|
||||||
|
name = "${var.cluster_name}-controller"
|
||||||
|
location = azurerm_resource_group.cluster.location
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "azurerm_network_security_rule" "controller-icmp" {
|
resource "azurerm_network_security_rule" "controller-icmp" {
|
||||||
for_each = local.controller_subnets
|
resource_group_name = azurerm_resource_group.cluster.name
|
||||||
|
|
||||||
name = "allow-icmp-${each.key}"
|
name = "allow-icmp"
|
||||||
resource_group_name = azurerm_resource_group.cluster.name
|
|
||||||
network_security_group_name = azurerm_network_security_group.controller.name
|
network_security_group_name = azurerm_network_security_group.controller.name
|
||||||
priority = 1995 + (each.key == "ipv4" ? 0 : 1)
|
priority = "1995"
|
||||||
access = "Allow"
|
access = "Allow"
|
||||||
direction = "Inbound"
|
direction = "Inbound"
|
||||||
protocol = "Icmp"
|
protocol = "Icmp"
|
||||||
source_port_range = "*"
|
source_port_range = "*"
|
||||||
destination_port_range = "*"
|
destination_port_range = "*"
|
||||||
source_address_prefixes = local.cluster_subnets[each.key]
|
source_address_prefixes = concat(azurerm_subnet.controller.address_prefixes, azurerm_subnet.worker.address_prefixes)
|
||||||
destination_address_prefixes = local.controller_subnets[each.key]
|
destination_address_prefixes = azurerm_subnet.controller.address_prefixes
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "azurerm_network_security_rule" "controller-ssh" {
|
resource "azurerm_network_security_rule" "controller-ssh" {
|
||||||
for_each = local.controller_subnets
|
resource_group_name = azurerm_resource_group.cluster.name
|
||||||
|
|
||||||
name = "allow-ssh-${each.key}"
|
name = "allow-ssh"
|
||||||
resource_group_name = azurerm_resource_group.cluster.name
|
|
||||||
network_security_group_name = azurerm_network_security_group.controller.name
|
network_security_group_name = azurerm_network_security_group.controller.name
|
||||||
priority = 2000 + (each.key == "ipv4" ? 0 : 1)
|
priority = "2000"
|
||||||
access = "Allow"
|
access = "Allow"
|
||||||
direction = "Inbound"
|
direction = "Inbound"
|
||||||
protocol = "Tcp"
|
protocol = "Tcp"
|
||||||
source_port_range = "*"
|
source_port_range = "*"
|
||||||
destination_port_range = "22"
|
destination_port_range = "22"
|
||||||
source_address_prefix = "*"
|
source_address_prefix = "*"
|
||||||
destination_address_prefixes = local.controller_subnets[each.key]
|
destination_address_prefixes = azurerm_subnet.controller.address_prefixes
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "azurerm_network_security_rule" "controller-etcd" {
|
resource "azurerm_network_security_rule" "controller-etcd" {
|
||||||
for_each = local.controller_subnets
|
resource_group_name = azurerm_resource_group.cluster.name
|
||||||
|
|
||||||
name = "allow-etcd-${each.key}"
|
name = "allow-etcd"
|
||||||
resource_group_name = azurerm_resource_group.cluster.name
|
|
||||||
network_security_group_name = azurerm_network_security_group.controller.name
|
network_security_group_name = azurerm_network_security_group.controller.name
|
||||||
priority = 2005 + (each.key == "ipv4" ? 0 : 1)
|
priority = "2005"
|
||||||
access = "Allow"
|
access = "Allow"
|
||||||
direction = "Inbound"
|
direction = "Inbound"
|
||||||
protocol = "Tcp"
|
protocol = "Tcp"
|
||||||
source_port_range = "*"
|
source_port_range = "*"
|
||||||
destination_port_range = "2379-2380"
|
destination_port_range = "2379-2380"
|
||||||
source_address_prefixes = local.controller_subnets[each.key]
|
source_address_prefixes = azurerm_subnet.controller.address_prefixes
|
||||||
destination_address_prefixes = local.controller_subnets[each.key]
|
destination_address_prefixes = azurerm_subnet.controller.address_prefixes
|
||||||
}
|
}
|
||||||
|
|
||||||
# Allow Prometheus to scrape etcd metrics
|
# Allow Prometheus to scrape etcd metrics
|
||||||
resource "azurerm_network_security_rule" "controller-etcd-metrics" {
|
resource "azurerm_network_security_rule" "controller-etcd-metrics" {
|
||||||
for_each = local.controller_subnets
|
resource_group_name = azurerm_resource_group.cluster.name
|
||||||
|
|
||||||
name = "allow-etcd-metrics-${each.key}"
|
name = "allow-etcd-metrics"
|
||||||
resource_group_name = azurerm_resource_group.cluster.name
|
|
||||||
network_security_group_name = azurerm_network_security_group.controller.name
|
network_security_group_name = azurerm_network_security_group.controller.name
|
||||||
priority = 2010 + (each.key == "ipv4" ? 0 : 1)
|
priority = "2010"
|
||||||
access = "Allow"
|
access = "Allow"
|
||||||
direction = "Inbound"
|
direction = "Inbound"
|
||||||
protocol = "Tcp"
|
protocol = "Tcp"
|
||||||
source_port_range = "*"
|
source_port_range = "*"
|
||||||
destination_port_range = "2381"
|
destination_port_range = "2381"
|
||||||
source_address_prefixes = local.worker_subnets[each.key]
|
source_address_prefixes = azurerm_subnet.worker.address_prefixes
|
||||||
destination_address_prefixes = local.controller_subnets[each.key]
|
destination_address_prefixes = azurerm_subnet.controller.address_prefixes
|
||||||
}
|
}
|
||||||
|
|
||||||
# Allow Prometheus to scrape kube-proxy metrics
|
# Allow Prometheus to scrape kube-proxy metrics
|
||||||
resource "azurerm_network_security_rule" "controller-kube-proxy" {
|
resource "azurerm_network_security_rule" "controller-kube-proxy" {
|
||||||
for_each = local.controller_subnets
|
resource_group_name = azurerm_resource_group.cluster.name
|
||||||
|
|
||||||
name = "allow-kube-proxy-metrics-${each.key}"
|
name = "allow-kube-proxy-metrics"
|
||||||
resource_group_name = azurerm_resource_group.cluster.name
|
|
||||||
network_security_group_name = azurerm_network_security_group.controller.name
|
network_security_group_name = azurerm_network_security_group.controller.name
|
||||||
priority = 2012 + (each.key == "ipv4" ? 0 : 1)
|
priority = "2011"
|
||||||
access = "Allow"
|
access = "Allow"
|
||||||
direction = "Inbound"
|
direction = "Inbound"
|
||||||
protocol = "Tcp"
|
protocol = "Tcp"
|
||||||
source_port_range = "*"
|
source_port_range = "*"
|
||||||
destination_port_range = "10249"
|
destination_port_range = "10249"
|
||||||
source_address_prefixes = local.worker_subnets[each.key]
|
source_address_prefixes = azurerm_subnet.worker.address_prefixes
|
||||||
destination_address_prefixes = local.controller_subnets[each.key]
|
destination_address_prefixes = azurerm_subnet.controller.address_prefixes
|
||||||
}
|
}
|
||||||
|
|
||||||
# Allow Prometheus to scrape kube-scheduler and kube-controller-manager metrics
|
# Allow Prometheus to scrape kube-scheduler and kube-controller-manager metrics
|
||||||
resource "azurerm_network_security_rule" "controller-kube-metrics" {
|
resource "azurerm_network_security_rule" "controller-kube-metrics" {
|
||||||
for_each = local.controller_subnets
|
resource_group_name = azurerm_resource_group.cluster.name
|
||||||
|
|
||||||
name = "allow-kube-metrics-${each.key}"
|
name = "allow-kube-metrics"
|
||||||
resource_group_name = azurerm_resource_group.cluster.name
|
|
||||||
network_security_group_name = azurerm_network_security_group.controller.name
|
network_security_group_name = azurerm_network_security_group.controller.name
|
||||||
priority = 2014 + (each.key == "ipv4" ? 0 : 1)
|
priority = "2012"
|
||||||
access = "Allow"
|
access = "Allow"
|
||||||
direction = "Inbound"
|
direction = "Inbound"
|
||||||
protocol = "Tcp"
|
protocol = "Tcp"
|
||||||
source_port_range = "*"
|
source_port_range = "*"
|
||||||
destination_port_range = "10257-10259"
|
destination_port_range = "10257-10259"
|
||||||
source_address_prefixes = local.worker_subnets[each.key]
|
source_address_prefixes = azurerm_subnet.worker.address_prefixes
|
||||||
destination_address_prefixes = local.controller_subnets[each.key]
|
destination_address_prefixes = azurerm_subnet.controller.address_prefixes
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "azurerm_network_security_rule" "controller-apiserver" {
|
resource "azurerm_network_security_rule" "controller-apiserver" {
|
||||||
for_each = local.controller_subnets
|
resource_group_name = azurerm_resource_group.cluster.name
|
||||||
|
|
||||||
name = "allow-apiserver-${each.key}"
|
name = "allow-apiserver"
|
||||||
resource_group_name = azurerm_resource_group.cluster.name
|
|
||||||
network_security_group_name = azurerm_network_security_group.controller.name
|
network_security_group_name = azurerm_network_security_group.controller.name
|
||||||
priority = 2016 + (each.key == "ipv4" ? 0 : 1)
|
priority = "2015"
|
||||||
access = "Allow"
|
access = "Allow"
|
||||||
direction = "Inbound"
|
direction = "Inbound"
|
||||||
protocol = "Tcp"
|
protocol = "Tcp"
|
||||||
source_port_range = "*"
|
source_port_range = "*"
|
||||||
destination_port_range = "6443"
|
destination_port_range = "6443"
|
||||||
source_address_prefix = "*"
|
source_address_prefix = "*"
|
||||||
destination_address_prefixes = local.controller_subnets[each.key]
|
destination_address_prefixes = azurerm_subnet.controller.address_prefixes
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "azurerm_network_security_rule" "controller-cilium-health" {
|
resource "azurerm_network_security_rule" "controller-cilium-health" {
|
||||||
for_each = var.networking == "cilium" ? local.controller_subnets : {}
|
resource_group_name = azurerm_resource_group.cluster.name
|
||||||
|
count = var.networking == "cilium" ? 1 : 0
|
||||||
|
|
||||||
name = "allow-cilium-health-${each.key}"
|
name = "allow-cilium-health"
|
||||||
resource_group_name = azurerm_resource_group.cluster.name
|
|
||||||
network_security_group_name = azurerm_network_security_group.controller.name
|
network_security_group_name = azurerm_network_security_group.controller.name
|
||||||
priority = 2018 + (each.key == "ipv4" ? 0 : 1)
|
priority = "2018"
|
||||||
access = "Allow"
|
access = "Allow"
|
||||||
direction = "Inbound"
|
direction = "Inbound"
|
||||||
protocol = "Tcp"
|
protocol = "Tcp"
|
||||||
source_port_range = "*"
|
source_port_range = "*"
|
||||||
destination_port_range = "4240"
|
destination_port_range = "4240"
|
||||||
source_address_prefixes = local.cluster_subnets[each.key]
|
source_address_prefixes = concat(azurerm_subnet.controller.address_prefixes, azurerm_subnet.worker.address_prefixes)
|
||||||
destination_address_prefixes = local.controller_subnets[each.key]
|
destination_address_prefixes = azurerm_subnet.controller.address_prefixes
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "azurerm_network_security_rule" "controller-cilium-metrics" {
|
resource "azurerm_network_security_rule" "controller-cilium-metrics" {
|
||||||
for_each = var.networking == "cilium" ? local.controller_subnets : {}
|
resource_group_name = azurerm_resource_group.cluster.name
|
||||||
|
count = var.networking == "cilium" ? 1 : 0
|
||||||
|
|
||||||
name = "allow-cilium-metrics-${each.key}"
|
name = "allow-cilium-metrics"
|
||||||
resource_group_name = azurerm_resource_group.cluster.name
|
|
||||||
network_security_group_name = azurerm_network_security_group.controller.name
|
network_security_group_name = azurerm_network_security_group.controller.name
|
||||||
priority = 2035 + (each.key == "ipv4" ? 0 : 1)
|
priority = "2019"
|
||||||
access = "Allow"
|
access = "Allow"
|
||||||
direction = "Inbound"
|
direction = "Inbound"
|
||||||
protocol = "Tcp"
|
protocol = "Tcp"
|
||||||
source_port_range = "*"
|
source_port_range = "*"
|
||||||
destination_port_range = "9962-9965"
|
destination_port_range = "9962-9965"
|
||||||
source_address_prefixes = local.cluster_subnets[each.key]
|
source_address_prefixes = concat(azurerm_subnet.controller.address_prefixes, azurerm_subnet.worker.address_prefixes)
|
||||||
destination_address_prefixes = local.controller_subnets[each.key]
|
destination_address_prefixes = azurerm_subnet.controller.address_prefixes
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "azurerm_network_security_rule" "controller-vxlan" {
|
resource "azurerm_network_security_rule" "controller-vxlan" {
|
||||||
for_each = local.controller_subnets
|
resource_group_name = azurerm_resource_group.cluster.name
|
||||||
|
|
||||||
name = "allow-vxlan-${each.key}"
|
name = "allow-vxlan"
|
||||||
resource_group_name = azurerm_resource_group.cluster.name
|
|
||||||
network_security_group_name = azurerm_network_security_group.controller.name
|
network_security_group_name = azurerm_network_security_group.controller.name
|
||||||
priority = 2020 + (each.key == "ipv4" ? 0 : 1)
|
priority = "2020"
|
||||||
access = "Allow"
|
access = "Allow"
|
||||||
direction = "Inbound"
|
direction = "Inbound"
|
||||||
protocol = "Udp"
|
protocol = "Udp"
|
||||||
source_port_range = "*"
|
source_port_range = "*"
|
||||||
destination_port_range = "4789"
|
destination_port_range = "4789"
|
||||||
source_address_prefixes = local.cluster_subnets[each.key]
|
source_address_prefixes = concat(azurerm_subnet.controller.address_prefixes, azurerm_subnet.worker.address_prefixes)
|
||||||
destination_address_prefixes = local.controller_subnets[each.key]
|
destination_address_prefixes = azurerm_subnet.controller.address_prefixes
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "azurerm_network_security_rule" "controller-linux-vxlan" {
|
resource "azurerm_network_security_rule" "controller-linux-vxlan" {
|
||||||
for_each = local.controller_subnets
|
resource_group_name = azurerm_resource_group.cluster.name
|
||||||
|
|
||||||
name = "allow-linux-vxlan-${each.key}"
|
name = "allow-linux-vxlan"
|
||||||
resource_group_name = azurerm_resource_group.cluster.name
|
|
||||||
network_security_group_name = azurerm_network_security_group.controller.name
|
network_security_group_name = azurerm_network_security_group.controller.name
|
||||||
priority = 2022 + (each.key == "ipv4" ? 0 : 1)
|
priority = "2021"
|
||||||
access = "Allow"
|
access = "Allow"
|
||||||
direction = "Inbound"
|
direction = "Inbound"
|
||||||
protocol = "Udp"
|
protocol = "Udp"
|
||||||
source_port_range = "*"
|
source_port_range = "*"
|
||||||
destination_port_range = "8472"
|
destination_port_range = "8472"
|
||||||
source_address_prefixes = local.cluster_subnets[each.key]
|
source_address_prefixes = concat(azurerm_subnet.controller.address_prefixes, azurerm_subnet.worker.address_prefixes)
|
||||||
destination_address_prefixes = local.controller_subnets[each.key]
|
destination_address_prefixes = azurerm_subnet.controller.address_prefixes
|
||||||
}
|
}
|
||||||
|
|
||||||
# Allow Prometheus to scrape node-exporter daemonset
|
# Allow Prometheus to scrape node-exporter daemonset
|
||||||
resource "azurerm_network_security_rule" "controller-node-exporter" {
|
resource "azurerm_network_security_rule" "controller-node-exporter" {
|
||||||
for_each = local.controller_subnets
|
resource_group_name = azurerm_resource_group.cluster.name
|
||||||
|
|
||||||
name = "allow-node-exporter-${each.key}"
|
name = "allow-node-exporter"
|
||||||
resource_group_name = azurerm_resource_group.cluster.name
|
|
||||||
network_security_group_name = azurerm_network_security_group.controller.name
|
network_security_group_name = azurerm_network_security_group.controller.name
|
||||||
priority = 2025 + (each.key == "ipv4" ? 0 : 1)
|
priority = "2025"
|
||||||
access = "Allow"
|
access = "Allow"
|
||||||
direction = "Inbound"
|
direction = "Inbound"
|
||||||
protocol = "Tcp"
|
protocol = "Tcp"
|
||||||
source_port_range = "*"
|
source_port_range = "*"
|
||||||
destination_port_range = "9100"
|
destination_port_range = "9100"
|
||||||
source_address_prefixes = local.worker_subnets[each.key]
|
source_address_prefixes = azurerm_subnet.worker.address_prefixes
|
||||||
destination_address_prefixes = local.controller_subnets[each.key]
|
destination_address_prefixes = azurerm_subnet.controller.address_prefixes
|
||||||
}
|
}
|
||||||
|
|
||||||
# Allow apiserver to access kubelet's for exec, log, port-forward
|
# Allow apiserver to access kubelet's for exec, log, port-forward
|
||||||
resource "azurerm_network_security_rule" "controller-kubelet" {
|
resource "azurerm_network_security_rule" "controller-kubelet" {
|
||||||
for_each = local.controller_subnets
|
resource_group_name = azurerm_resource_group.cluster.name
|
||||||
|
|
||||||
name = "allow-kubelet-${each.key}"
|
name = "allow-kubelet"
|
||||||
resource_group_name = azurerm_resource_group.cluster.name
|
|
||||||
network_security_group_name = azurerm_network_security_group.controller.name
|
network_security_group_name = azurerm_network_security_group.controller.name
|
||||||
priority = 2030 + (each.key == "ipv4" ? 0 : 1)
|
priority = "2030"
|
||||||
access = "Allow"
|
access = "Allow"
|
||||||
direction = "Inbound"
|
direction = "Inbound"
|
||||||
protocol = "Tcp"
|
protocol = "Tcp"
|
||||||
source_port_range = "*"
|
source_port_range = "*"
|
||||||
destination_port_range = "10250"
|
destination_port_range = "10250"
|
||||||
|
|
||||||
# allow Prometheus to scrape kubelet metrics too
|
# allow Prometheus to scrape kubelet metrics too
|
||||||
source_address_prefixes = local.cluster_subnets[each.key]
|
source_address_prefixes = concat(azurerm_subnet.controller.address_prefixes, azurerm_subnet.worker.address_prefixes)
|
||||||
destination_address_prefixes = local.controller_subnets[each.key]
|
destination_address_prefixes = azurerm_subnet.controller.address_prefixes
|
||||||
}
|
}
|
||||||
|
|
||||||
# Override Azure AllowVNetInBound and AllowAzureLoadBalancerInBound
|
# Override Azure AllowVNetInBound and AllowAzureLoadBalancerInBound
|
||||||
@ -256,189 +247,182 @@ resource "azurerm_network_security_rule" "controller-deny-all" {
|
|||||||
# Worker security group
|
# Worker security group
|
||||||
|
|
||||||
resource "azurerm_network_security_group" "worker" {
|
resource "azurerm_network_security_group" "worker" {
|
||||||
name = "${var.cluster_name}-worker"
|
|
||||||
resource_group_name = azurerm_resource_group.cluster.name
|
resource_group_name = azurerm_resource_group.cluster.name
|
||||||
location = azurerm_resource_group.cluster.location
|
|
||||||
|
name = "${var.cluster_name}-worker"
|
||||||
|
location = azurerm_resource_group.cluster.location
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "azurerm_network_security_rule" "worker-icmp" {
|
resource "azurerm_network_security_rule" "worker-icmp" {
|
||||||
for_each = local.worker_subnets
|
resource_group_name = azurerm_resource_group.cluster.name
|
||||||
|
|
||||||
name = "allow-icmp-${each.key}"
|
name = "allow-icmp"
|
||||||
resource_group_name = azurerm_resource_group.cluster.name
|
|
||||||
network_security_group_name = azurerm_network_security_group.worker.name
|
network_security_group_name = azurerm_network_security_group.worker.name
|
||||||
priority = 1995 + (each.key == "ipv4" ? 0 : 1)
|
priority = "1995"
|
||||||
access = "Allow"
|
access = "Allow"
|
||||||
direction = "Inbound"
|
direction = "Inbound"
|
||||||
protocol = "Icmp"
|
protocol = "Icmp"
|
||||||
source_port_range = "*"
|
source_port_range = "*"
|
||||||
destination_port_range = "*"
|
destination_port_range = "*"
|
||||||
source_address_prefixes = local.cluster_subnets[each.key]
|
source_address_prefixes = concat(azurerm_subnet.controller.address_prefixes, azurerm_subnet.worker.address_prefixes)
|
||||||
destination_address_prefixes = local.worker_subnets[each.key]
|
destination_address_prefixes = azurerm_subnet.worker.address_prefixes
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "azurerm_network_security_rule" "worker-ssh" {
|
resource "azurerm_network_security_rule" "worker-ssh" {
|
||||||
for_each = local.worker_subnets
|
resource_group_name = azurerm_resource_group.cluster.name
|
||||||
|
|
||||||
name = "allow-ssh-${each.key}"
|
name = "allow-ssh"
|
||||||
resource_group_name = azurerm_resource_group.cluster.name
|
|
||||||
network_security_group_name = azurerm_network_security_group.worker.name
|
network_security_group_name = azurerm_network_security_group.worker.name
|
||||||
priority = 2000 + (each.key == "ipv4" ? 0 : 1)
|
priority = "2000"
|
||||||
access = "Allow"
|
access = "Allow"
|
||||||
direction = "Inbound"
|
direction = "Inbound"
|
||||||
protocol = "Tcp"
|
protocol = "Tcp"
|
||||||
source_port_range = "*"
|
source_port_range = "*"
|
||||||
destination_port_range = "22"
|
destination_port_range = "22"
|
||||||
source_address_prefixes = local.controller_subnets[each.key]
|
source_address_prefixes = azurerm_subnet.controller.address_prefixes
|
||||||
destination_address_prefixes = local.worker_subnets[each.key]
|
destination_address_prefixes = azurerm_subnet.worker.address_prefixes
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "azurerm_network_security_rule" "worker-http" {
|
resource "azurerm_network_security_rule" "worker-http" {
|
||||||
for_each = local.worker_subnets
|
resource_group_name = azurerm_resource_group.cluster.name
|
||||||
|
|
||||||
name = "allow-http-${each.key}"
|
name = "allow-http"
|
||||||
resource_group_name = azurerm_resource_group.cluster.name
|
|
||||||
network_security_group_name = azurerm_network_security_group.worker.name
|
network_security_group_name = azurerm_network_security_group.worker.name
|
||||||
priority = 2005 + (each.key == "ipv4" ? 0 : 1)
|
priority = "2005"
|
||||||
access = "Allow"
|
access = "Allow"
|
||||||
direction = "Inbound"
|
direction = "Inbound"
|
||||||
protocol = "Tcp"
|
protocol = "Tcp"
|
||||||
source_port_range = "*"
|
source_port_range = "*"
|
||||||
destination_port_range = "80"
|
destination_port_range = "80"
|
||||||
source_address_prefix = "*"
|
source_address_prefix = "*"
|
||||||
destination_address_prefixes = local.worker_subnets[each.key]
|
destination_address_prefixes = azurerm_subnet.worker.address_prefixes
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "azurerm_network_security_rule" "worker-https" {
|
resource "azurerm_network_security_rule" "worker-https" {
|
||||||
for_each = local.worker_subnets
|
resource_group_name = azurerm_resource_group.cluster.name
|
||||||
|
|
||||||
name = "allow-https-${each.key}"
|
name = "allow-https"
|
||||||
resource_group_name = azurerm_resource_group.cluster.name
|
|
||||||
network_security_group_name = azurerm_network_security_group.worker.name
|
network_security_group_name = azurerm_network_security_group.worker.name
|
||||||
priority = 2010 + (each.key == "ipv4" ? 0 : 1)
|
priority = "2010"
|
||||||
access = "Allow"
|
access = "Allow"
|
||||||
direction = "Inbound"
|
direction = "Inbound"
|
||||||
protocol = "Tcp"
|
protocol = "Tcp"
|
||||||
source_port_range = "*"
|
source_port_range = "*"
|
||||||
destination_port_range = "443"
|
destination_port_range = "443"
|
||||||
source_address_prefix = "*"
|
source_address_prefix = "*"
|
||||||
destination_address_prefixes = local.worker_subnets[each.key]
|
destination_address_prefixes = azurerm_subnet.worker.address_prefixes
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "azurerm_network_security_rule" "worker-cilium-health" {
|
resource "azurerm_network_security_rule" "worker-cilium-health" {
|
||||||
for_each = var.networking == "cilium" ? local.worker_subnets : {}
|
resource_group_name = azurerm_resource_group.cluster.name
|
||||||
|
count = var.networking == "cilium" ? 1 : 0
|
||||||
|
|
||||||
name = "allow-cilium-health-${each.key}"
|
name = "allow-cilium-health"
|
||||||
resource_group_name = azurerm_resource_group.cluster.name
|
|
||||||
network_security_group_name = azurerm_network_security_group.worker.name
|
network_security_group_name = azurerm_network_security_group.worker.name
|
||||||
priority = 2012 + (each.key == "ipv4" ? 0 : 1)
|
priority = "2013"
|
||||||
access = "Allow"
|
access = "Allow"
|
||||||
direction = "Inbound"
|
direction = "Inbound"
|
||||||
protocol = "Tcp"
|
protocol = "Tcp"
|
||||||
source_port_range = "*"
|
source_port_range = "*"
|
||||||
destination_port_range = "4240"
|
destination_port_range = "4240"
|
||||||
source_address_prefixes = local.cluster_subnets[each.key]
|
source_address_prefixes = concat(azurerm_subnet.controller.address_prefixes, azurerm_subnet.worker.address_prefixes)
|
||||||
destination_address_prefixes = local.worker_subnets[each.key]
|
destination_address_prefixes = azurerm_subnet.worker.address_prefixes
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "azurerm_network_security_rule" "worker-cilium-metrics" {
|
resource "azurerm_network_security_rule" "worker-cilium-metrics" {
|
||||||
for_each = var.networking == "cilium" ? local.worker_subnets : {}
|
resource_group_name = azurerm_resource_group.cluster.name
|
||||||
|
count = var.networking == "cilium" ? 1 : 0
|
||||||
|
|
||||||
name = "allow-cilium-metrics-${each.key}"
|
name = "allow-cilium-metrics"
|
||||||
resource_group_name = azurerm_resource_group.cluster.name
|
|
||||||
network_security_group_name = azurerm_network_security_group.worker.name
|
network_security_group_name = azurerm_network_security_group.worker.name
|
||||||
priority = 2014 + (each.key == "ipv4" ? 0 : 1)
|
priority = "2014"
|
||||||
access = "Allow"
|
access = "Allow"
|
||||||
direction = "Inbound"
|
direction = "Inbound"
|
||||||
protocol = "Tcp"
|
protocol = "Tcp"
|
||||||
source_port_range = "*"
|
source_port_range = "*"
|
||||||
destination_port_range = "9962-9965"
|
destination_port_range = "9962-9965"
|
||||||
source_address_prefixes = local.cluster_subnets[each.key]
|
source_address_prefixes = concat(azurerm_subnet.controller.address_prefixes, azurerm_subnet.worker.address_prefixes)
|
||||||
destination_address_prefixes = local.worker_subnets[each.key]
|
destination_address_prefixes = azurerm_subnet.worker.address_prefixes
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "azurerm_network_security_rule" "worker-vxlan" {
|
resource "azurerm_network_security_rule" "worker-vxlan" {
|
||||||
for_each = local.worker_subnets
|
resource_group_name = azurerm_resource_group.cluster.name
|
||||||
|
|
||||||
name = "allow-vxlan-${each.key}"
|
name = "allow-vxlan"
|
||||||
resource_group_name = azurerm_resource_group.cluster.name
|
|
||||||
network_security_group_name = azurerm_network_security_group.worker.name
|
network_security_group_name = azurerm_network_security_group.worker.name
|
||||||
priority = 2016 + (each.key == "ipv4" ? 0 : 1)
|
priority = "2015"
|
||||||
access = "Allow"
|
access = "Allow"
|
||||||
direction = "Inbound"
|
direction = "Inbound"
|
||||||
protocol = "Udp"
|
protocol = "Udp"
|
||||||
source_port_range = "*"
|
source_port_range = "*"
|
||||||
destination_port_range = "4789"
|
destination_port_range = "4789"
|
||||||
source_address_prefixes = local.cluster_subnets[each.key]
|
source_address_prefixes = concat(azurerm_subnet.controller.address_prefixes, azurerm_subnet.worker.address_prefixes)
|
||||||
destination_address_prefixes = local.worker_subnets[each.key]
|
destination_address_prefixes = azurerm_subnet.worker.address_prefixes
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "azurerm_network_security_rule" "worker-linux-vxlan" {
|
resource "azurerm_network_security_rule" "worker-linux-vxlan" {
|
||||||
for_each = local.worker_subnets
|
resource_group_name = azurerm_resource_group.cluster.name
|
||||||
|
|
||||||
name = "allow-linux-vxlan-${each.key}"
|
name = "allow-linux-vxlan"
|
||||||
resource_group_name = azurerm_resource_group.cluster.name
|
|
||||||
network_security_group_name = azurerm_network_security_group.worker.name
|
network_security_group_name = azurerm_network_security_group.worker.name
|
||||||
priority = 2018 + (each.key == "ipv4" ? 0 : 1)
|
priority = "2016"
|
||||||
access = "Allow"
|
access = "Allow"
|
||||||
direction = "Inbound"
|
direction = "Inbound"
|
||||||
protocol = "Udp"
|
protocol = "Udp"
|
||||||
source_port_range = "*"
|
source_port_range = "*"
|
||||||
destination_port_range = "8472"
|
destination_port_range = "8472"
|
||||||
source_address_prefixes = local.cluster_subnets[each.key]
|
source_address_prefixes = concat(azurerm_subnet.controller.address_prefixes, azurerm_subnet.worker.address_prefixes)
|
||||||
destination_address_prefixes = local.worker_subnets[each.key]
|
destination_address_prefixes = azurerm_subnet.worker.address_prefixes
|
||||||
}
|
}
|
||||||
|
|
||||||
# Allow Prometheus to scrape node-exporter daemonset
|
# Allow Prometheus to scrape node-exporter daemonset
|
||||||
resource "azurerm_network_security_rule" "worker-node-exporter" {
|
resource "azurerm_network_security_rule" "worker-node-exporter" {
|
||||||
for_each = local.worker_subnets
|
resource_group_name = azurerm_resource_group.cluster.name
|
||||||
|
|
||||||
name = "allow-node-exporter-${each.key}"
|
name = "allow-node-exporter"
|
||||||
resource_group_name = azurerm_resource_group.cluster.name
|
|
||||||
network_security_group_name = azurerm_network_security_group.worker.name
|
network_security_group_name = azurerm_network_security_group.worker.name
|
||||||
priority = 2020 + (each.key == "ipv4" ? 0 : 1)
|
priority = "2020"
|
||||||
access = "Allow"
|
access = "Allow"
|
||||||
direction = "Inbound"
|
direction = "Inbound"
|
||||||
protocol = "Tcp"
|
protocol = "Tcp"
|
||||||
source_port_range = "*"
|
source_port_range = "*"
|
||||||
destination_port_range = "9100"
|
destination_port_range = "9100"
|
||||||
source_address_prefixes = local.worker_subnets[each.key]
|
source_address_prefixes = azurerm_subnet.worker.address_prefixes
|
||||||
destination_address_prefixes = local.worker_subnets[each.key]
|
destination_address_prefixes = azurerm_subnet.worker.address_prefixes
|
||||||
}
|
}
|
||||||
|
|
||||||
# Allow Prometheus to scrape kube-proxy
|
# Allow Prometheus to scrape kube-proxy
|
||||||
resource "azurerm_network_security_rule" "worker-kube-proxy" {
|
resource "azurerm_network_security_rule" "worker-kube-proxy" {
|
||||||
for_each = local.worker_subnets
|
resource_group_name = azurerm_resource_group.cluster.name
|
||||||
|
|
||||||
name = "allow-kube-proxy-${each.key}"
|
name = "allow-kube-proxy"
|
||||||
resource_group_name = azurerm_resource_group.cluster.name
|
|
||||||
network_security_group_name = azurerm_network_security_group.worker.name
|
network_security_group_name = azurerm_network_security_group.worker.name
|
||||||
priority = 2024 + (each.key == "ipv4" ? 0 : 1)
|
priority = "2024"
|
||||||
access = "Allow"
|
access = "Allow"
|
||||||
direction = "Inbound"
|
direction = "Inbound"
|
||||||
protocol = "Tcp"
|
protocol = "Tcp"
|
||||||
source_port_range = "*"
|
source_port_range = "*"
|
||||||
destination_port_range = "10249"
|
destination_port_range = "10249"
|
||||||
source_address_prefixes = local.worker_subnets[each.key]
|
source_address_prefixes = azurerm_subnet.worker.address_prefixes
|
||||||
destination_address_prefixes = local.worker_subnets[each.key]
|
destination_address_prefixes = azurerm_subnet.worker.address_prefixes
|
||||||
}
|
}
|
||||||
|
|
||||||
# Allow apiserver to access kubelet's for exec, log, port-forward
|
# Allow apiserver to access kubelet's for exec, log, port-forward
|
||||||
resource "azurerm_network_security_rule" "worker-kubelet" {
|
resource "azurerm_network_security_rule" "worker-kubelet" {
|
||||||
for_each = local.worker_subnets
|
resource_group_name = azurerm_resource_group.cluster.name
|
||||||
|
|
||||||
name = "allow-kubelet-${each.key}"
|
name = "allow-kubelet"
|
||||||
resource_group_name = azurerm_resource_group.cluster.name
|
|
||||||
network_security_group_name = azurerm_network_security_group.worker.name
|
network_security_group_name = azurerm_network_security_group.worker.name
|
||||||
priority = 2026 + (each.key == "ipv4" ? 0 : 1)
|
priority = "2025"
|
||||||
access = "Allow"
|
access = "Allow"
|
||||||
direction = "Inbound"
|
direction = "Inbound"
|
||||||
protocol = "Tcp"
|
protocol = "Tcp"
|
||||||
source_port_range = "*"
|
source_port_range = "*"
|
||||||
destination_port_range = "10250"
|
destination_port_range = "10250"
|
||||||
|
|
||||||
# allow Prometheus to scrape kubelet metrics too
|
# allow Prometheus to scrape kubelet metrics too
|
||||||
source_address_prefixes = local.cluster_subnets[each.key]
|
source_address_prefixes = concat(azurerm_subnet.controller.address_prefixes, azurerm_subnet.worker.address_prefixes)
|
||||||
destination_address_prefixes = local.worker_subnets[each.key]
|
destination_address_prefixes = azurerm_subnet.worker.address_prefixes
|
||||||
}
|
}
|
||||||
|
|
||||||
# Override Azure AllowVNetInBound and AllowAzureLoadBalancerInBound
|
# Override Azure AllowVNetInBound and AllowAzureLoadBalancerInBound
|
||||||
|
@ -18,7 +18,7 @@ resource "null_resource" "copy-controller-secrets" {
|
|||||||
|
|
||||||
connection {
|
connection {
|
||||||
type = "ssh"
|
type = "ssh"
|
||||||
host = azurerm_public_ip.controllers-ipv4[count.index].ip_address
|
host = azurerm_public_ip.controllers.*.ip_address[count.index]
|
||||||
user = "core"
|
user = "core"
|
||||||
timeout = "15m"
|
timeout = "15m"
|
||||||
}
|
}
|
||||||
@ -45,7 +45,7 @@ resource "null_resource" "bootstrap" {
|
|||||||
|
|
||||||
connection {
|
connection {
|
||||||
type = "ssh"
|
type = "ssh"
|
||||||
host = azurerm_public_ip.controllers-ipv4[0].ip_address
|
host = azurerm_public_ip.controllers.*.ip_address[0]
|
||||||
user = "core"
|
user = "core"
|
||||||
timeout = "15m"
|
timeout = "15m"
|
||||||
}
|
}
|
||||||
|
@ -5,9 +5,9 @@ variable "cluster_name" {
|
|||||||
|
|
||||||
# Azure
|
# Azure
|
||||||
|
|
||||||
variable "location" {
|
variable "region" {
|
||||||
type = string
|
type = string
|
||||||
description = "Azure location (e.g. centralus , see `az account list-locations --output table`)"
|
description = "Azure Region (e.g. centralus , see `az account list-locations --output table`)"
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "dns_zone" {
|
variable "dns_zone" {
|
||||||
@ -22,65 +22,41 @@ variable "dns_zone_group" {
|
|||||||
|
|
||||||
# instances
|
# instances
|
||||||
|
|
||||||
variable "os_image" {
|
|
||||||
type = string
|
|
||||||
description = "Fedora CoreOS image for instances"
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "controller_count" {
|
variable "controller_count" {
|
||||||
type = number
|
type = number
|
||||||
description = "Number of controllers (i.e. masters)"
|
description = "Number of controllers (i.e. masters)"
|
||||||
default = 1
|
default = 1
|
||||||
}
|
}
|
||||||
|
|
||||||
|
variable "worker_count" {
|
||||||
|
type = number
|
||||||
|
description = "Number of workers"
|
||||||
|
default = 1
|
||||||
|
}
|
||||||
|
|
||||||
variable "controller_type" {
|
variable "controller_type" {
|
||||||
type = string
|
type = string
|
||||||
description = "Machine type for controllers (see `az vm list-skus --location centralus`)"
|
description = "Machine type for controllers (see `az vm list-skus --location centralus`)"
|
||||||
default = "Standard_B2s"
|
default = "Standard_B2s"
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "controller_disk_type" {
|
|
||||||
type = string
|
|
||||||
description = "Type of managed disk for controller node(s)"
|
|
||||||
default = "Premium_LRS"
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "controller_disk_size" {
|
|
||||||
type = number
|
|
||||||
description = "Size of the managed disk in GB for controller node(s)"
|
|
||||||
default = 30
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "worker_count" {
|
|
||||||
type = number
|
|
||||||
description = "Number of workers"
|
|
||||||
default = 1
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "worker_type" {
|
variable "worker_type" {
|
||||||
type = string
|
type = string
|
||||||
description = "Machine type for workers (see `az vm list-skus --location centralus`)"
|
description = "Machine type for workers (see `az vm list-skus --location centralus`)"
|
||||||
default = "Standard_D2as_v5"
|
default = "Standard_D2as_v5"
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "worker_disk_type" {
|
variable "os_image" {
|
||||||
type = string
|
type = string
|
||||||
description = "Type of managed disk for worker nodes"
|
description = "Fedora CoreOS image for instances"
|
||||||
default = "Standard_LRS"
|
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "worker_disk_size" {
|
variable "disk_size" {
|
||||||
type = number
|
type = number
|
||||||
description = "Size of the managed disk in GB for worker nodes"
|
description = "Size of the disk in GB"
|
||||||
default = 30
|
default = 30
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "worker_ephemeral_disk" {
|
|
||||||
type = bool
|
|
||||||
description = "Use ephemeral local disk instead of managed disk (requires vm_type with local storage)"
|
|
||||||
default = false
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "worker_priority" {
|
variable "worker_priority" {
|
||||||
type = string
|
type = string
|
||||||
description = "Set worker priority to Spot to use reduced cost surplus capacity, with the tradeoff that instances can be deallocated at any time."
|
description = "Set worker priority to Spot to use reduced cost surplus capacity, with the tradeoff that instances can be deallocated at any time."
|
||||||
@ -118,15 +94,10 @@ variable "networking" {
|
|||||||
default = "cilium"
|
default = "cilium"
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "network_cidr" {
|
variable "host_cidr" {
|
||||||
type = object({
|
type = string
|
||||||
ipv4 = list(string)
|
description = "CIDR IPv4 range to assign to instances"
|
||||||
ipv6 = optional(list(string), [])
|
default = "10.0.0.0/16"
|
||||||
})
|
|
||||||
description = "Virtual network CIDR ranges"
|
|
||||||
default = {
|
|
||||||
ipv4 = ["10.0.0.0/16"]
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "pod_cidr" {
|
variable "pod_cidr" {
|
||||||
@ -144,13 +115,31 @@ EOD
|
|||||||
default = "10.3.0.0/16"
|
default = "10.3.0.0/16"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
variable "enable_reporting" {
|
||||||
|
type = bool
|
||||||
|
description = "Enable usage or analytics reporting to upstreams (Calico)"
|
||||||
|
default = false
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "enable_aggregation" {
|
||||||
|
type = bool
|
||||||
|
description = "Enable the Kubernetes Aggregation Layer"
|
||||||
|
default = true
|
||||||
|
}
|
||||||
|
|
||||||
variable "worker_node_labels" {
|
variable "worker_node_labels" {
|
||||||
type = list(string)
|
type = list(string)
|
||||||
description = "List of initial worker node labels"
|
description = "List of initial worker node labels"
|
||||||
default = []
|
default = []
|
||||||
}
|
}
|
||||||
|
|
||||||
# advanced
|
# unofficial, undocumented, unsupported
|
||||||
|
|
||||||
|
variable "cluster_domain_suffix" {
|
||||||
|
type = string
|
||||||
|
description = "Queries for domains with the suffix will be answered by coredns. Default is cluster.local (e.g. foo.default.svc.cluster.local) "
|
||||||
|
default = "cluster.local"
|
||||||
|
}
|
||||||
|
|
||||||
variable "daemonset_tolerations" {
|
variable "daemonset_tolerations" {
|
||||||
type = list(string)
|
type = list(string)
|
||||||
|
@ -3,7 +3,7 @@
|
|||||||
terraform {
|
terraform {
|
||||||
required_version = ">= 0.13.0, < 2.0.0"
|
required_version = ">= 0.13.0, < 2.0.0"
|
||||||
required_providers {
|
required_providers {
|
||||||
azurerm = ">= 2.8"
|
azurerm = ">= 2.8, < 4.0"
|
||||||
null = ">= 2.1"
|
null = ">= 2.1"
|
||||||
ct = {
|
ct = {
|
||||||
source = "poseidon/ct"
|
source = "poseidon/ct"
|
||||||
|
@ -3,26 +3,23 @@ module "workers" {
|
|||||||
name = var.cluster_name
|
name = var.cluster_name
|
||||||
|
|
||||||
# Azure
|
# Azure
|
||||||
resource_group_name = azurerm_resource_group.cluster.name
|
resource_group_name = azurerm_resource_group.cluster.name
|
||||||
location = azurerm_resource_group.cluster.location
|
region = azurerm_resource_group.cluster.location
|
||||||
subnet_id = azurerm_subnet.worker.id
|
subnet_id = azurerm_subnet.worker.id
|
||||||
security_group_id = azurerm_network_security_group.worker.id
|
security_group_id = azurerm_network_security_group.worker.id
|
||||||
backend_address_pool_ids = local.backend_address_pool_ids
|
backend_address_pool_id = azurerm_lb_backend_address_pool.worker.id
|
||||||
|
|
||||||
# instances
|
worker_count = var.worker_count
|
||||||
os_image = var.os_image
|
vm_type = var.worker_type
|
||||||
worker_count = var.worker_count
|
os_image = var.os_image
|
||||||
vm_type = var.worker_type
|
priority = var.worker_priority
|
||||||
disk_type = var.worker_disk_type
|
|
||||||
disk_size = var.worker_disk_size
|
|
||||||
ephemeral_disk = var.worker_ephemeral_disk
|
|
||||||
priority = var.worker_priority
|
|
||||||
|
|
||||||
# configuration
|
# configuration
|
||||||
kubeconfig = module.bootstrap.kubeconfig-kubelet
|
kubeconfig = module.bootstrap.kubeconfig-kubelet
|
||||||
ssh_authorized_key = var.ssh_authorized_key
|
ssh_authorized_key = var.ssh_authorized_key
|
||||||
azure_authorized_key = var.azure_authorized_key
|
azure_authorized_key = var.azure_authorized_key
|
||||||
service_cidr = var.service_cidr
|
service_cidr = var.service_cidr
|
||||||
snippets = var.worker_snippets
|
cluster_domain_suffix = var.cluster_domain_suffix
|
||||||
node_labels = var.worker_node_labels
|
snippets = var.worker_snippets
|
||||||
|
node_labels = var.worker_node_labels
|
||||||
}
|
}
|
||||||
|
@ -26,7 +26,7 @@ systemd:
|
|||||||
Description=Kubelet (System Container)
|
Description=Kubelet (System Container)
|
||||||
Wants=rpc-statd.service
|
Wants=rpc-statd.service
|
||||||
[Service]
|
[Service]
|
||||||
Environment=KUBELET_IMAGE=quay.io/poseidon/kubelet:v1.31.3
|
Environment=KUBELET_IMAGE=quay.io/poseidon/kubelet:v1.30.1
|
||||||
ExecStartPre=/bin/mkdir -p /etc/cni/net.d
|
ExecStartPre=/bin/mkdir -p /etc/cni/net.d
|
||||||
ExecStartPre=/bin/mkdir -p /etc/kubernetes/manifests
|
ExecStartPre=/bin/mkdir -p /etc/kubernetes/manifests
|
||||||
ExecStartPre=/bin/mkdir -p /opt/cni/bin
|
ExecStartPre=/bin/mkdir -p /opt/cni/bin
|
||||||
@ -99,7 +99,7 @@ storage:
|
|||||||
cgroupDriver: systemd
|
cgroupDriver: systemd
|
||||||
clusterDNS:
|
clusterDNS:
|
||||||
- ${cluster_dns_service_ip}
|
- ${cluster_dns_service_ip}
|
||||||
clusterDomain: cluster.local
|
clusterDomain: ${cluster_domain_suffix}
|
||||||
healthzPort: 0
|
healthzPort: 0
|
||||||
rotateCertificates: true
|
rotateCertificates: true
|
||||||
shutdownGracePeriod: 45s
|
shutdownGracePeriod: 45s
|
||||||
|
@ -5,9 +5,9 @@ variable "name" {
|
|||||||
|
|
||||||
# Azure
|
# Azure
|
||||||
|
|
||||||
variable "location" {
|
variable "region" {
|
||||||
type = string
|
type = string
|
||||||
description = "Must be set to the Azure location of cluster"
|
description = "Must be set to the Azure Region of cluster"
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "resource_group_name" {
|
variable "resource_group_name" {
|
||||||
@ -25,12 +25,9 @@ variable "security_group_id" {
|
|||||||
description = "Must be set to the `worker_security_group_id` output by cluster"
|
description = "Must be set to the `worker_security_group_id` output by cluster"
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "backend_address_pool_ids" {
|
variable "backend_address_pool_id" {
|
||||||
type = object({
|
type = string
|
||||||
ipv4 = list(string)
|
description = "Must be set to the `worker_backend_address_pool_id` output by cluster"
|
||||||
ipv6 = list(string)
|
|
||||||
})
|
|
||||||
description = "Must be set to the `backend_address_pool_ids` output by cluster"
|
|
||||||
}
|
}
|
||||||
|
|
||||||
# instances
|
# instances
|
||||||
@ -52,24 +49,6 @@ variable "os_image" {
|
|||||||
description = "Fedora CoreOS image for instances"
|
description = "Fedora CoreOS image for instances"
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "disk_type" {
|
|
||||||
type = string
|
|
||||||
description = "Type of managed disk"
|
|
||||||
default = "Standard_LRS"
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "disk_size" {
|
|
||||||
type = number
|
|
||||||
description = "Size of the managed disk in GB"
|
|
||||||
default = 30
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "ephemeral_disk" {
|
|
||||||
type = bool
|
|
||||||
description = "Use ephemeral local disk instead of managed disk (requires vm_type with local storage)"
|
|
||||||
default = false
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "priority" {
|
variable "priority" {
|
||||||
type = string
|
type = string
|
||||||
description = "Set priority to Spot to use reduced cost surplus capacity, with the tradeoff that instances can be evicted at any time."
|
description = "Set priority to Spot to use reduced cost surplus capacity, with the tradeoff that instances can be evicted at any time."
|
||||||
@ -120,3 +99,12 @@ variable "node_taints" {
|
|||||||
description = "List of initial node taints"
|
description = "List of initial node taints"
|
||||||
default = []
|
default = []
|
||||||
}
|
}
|
||||||
|
|
||||||
|
# unofficial, undocumented, unsupported
|
||||||
|
|
||||||
|
variable "cluster_domain_suffix" {
|
||||||
|
description = "Queries for domains with the suffix will be answered by coredns. Default is cluster.local (e.g. foo.default.svc.cluster.local) "
|
||||||
|
type = string
|
||||||
|
default = "cluster.local"
|
||||||
|
}
|
||||||
|
|
||||||
|
@ -3,7 +3,7 @@
|
|||||||
terraform {
|
terraform {
|
||||||
required_version = ">= 0.13.0, < 2.0.0"
|
required_version = ">= 0.13.0, < 2.0.0"
|
||||||
required_providers {
|
required_providers {
|
||||||
azurerm = ">= 2.8"
|
azurerm = ">= 2.8, < 4.0"
|
||||||
ct = {
|
ct = {
|
||||||
source = "poseidon/ct"
|
source = "poseidon/ct"
|
||||||
version = "~> 0.13"
|
version = "~> 0.13"
|
||||||
|
@ -3,29 +3,30 @@ locals {
|
|||||||
}
|
}
|
||||||
|
|
||||||
# Workers scale set
|
# Workers scale set
|
||||||
resource "azurerm_orchestrated_virtual_machine_scale_set" "workers" {
|
resource "azurerm_linux_virtual_machine_scale_set" "workers" {
|
||||||
name = "${var.name}-worker"
|
resource_group_name = var.resource_group_name
|
||||||
resource_group_name = var.resource_group_name
|
|
||||||
location = var.location
|
name = "${var.name}-worker"
|
||||||
platform_fault_domain_count = 1
|
location = var.region
|
||||||
sku_name = var.vm_type
|
sku = var.vm_type
|
||||||
instances = var.worker_count
|
instances = var.worker_count
|
||||||
|
# instance name prefix for instances in the set
|
||||||
|
computer_name_prefix = "${var.name}-worker"
|
||||||
|
single_placement_group = false
|
||||||
|
custom_data = base64encode(data.ct_config.worker.rendered)
|
||||||
|
|
||||||
# storage
|
# storage
|
||||||
encryption_at_host_enabled = true
|
source_image_id = var.os_image
|
||||||
source_image_id = var.os_image
|
|
||||||
os_disk {
|
os_disk {
|
||||||
storage_account_type = var.disk_type
|
storage_account_type = "Standard_LRS"
|
||||||
disk_size_gb = var.disk_size
|
caching = "ReadWrite"
|
||||||
caching = "ReadOnly"
|
}
|
||||||
# Optionally, use the ephemeral disk of the instance type (support varies)
|
|
||||||
dynamic "diff_disk_settings" {
|
# Azure requires setting admin_ssh_key, though Ignition custom_data handles it too
|
||||||
for_each = var.ephemeral_disk ? [1] : []
|
admin_username = "core"
|
||||||
content {
|
admin_ssh_key {
|
||||||
option = "Local"
|
username = "core"
|
||||||
placement = "ResourceDisk"
|
public_key = var.azure_authorized_key
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
# network
|
# network
|
||||||
@ -35,46 +36,41 @@ resource "azurerm_orchestrated_virtual_machine_scale_set" "workers" {
|
|||||||
network_security_group_id = var.security_group_id
|
network_security_group_id = var.security_group_id
|
||||||
|
|
||||||
ip_configuration {
|
ip_configuration {
|
||||||
name = "ipv4"
|
name = "ip0"
|
||||||
version = "IPv4"
|
|
||||||
primary = true
|
primary = true
|
||||||
subnet_id = var.subnet_id
|
subnet_id = var.subnet_id
|
||||||
# backend address pool to which the NIC should be added
|
|
||||||
load_balancer_backend_address_pool_ids = var.backend_address_pool_ids.ipv4
|
|
||||||
}
|
|
||||||
ip_configuration {
|
|
||||||
name = "ipv6"
|
|
||||||
version = "IPv6"
|
|
||||||
subnet_id = var.subnet_id
|
|
||||||
# backend address pool to which the NIC should be added
|
|
||||||
load_balancer_backend_address_pool_ids = var.backend_address_pool_ids.ipv6
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
# boot
|
# backend address pool to which the NIC should be added
|
||||||
user_data_base64 = base64encode(data.ct_config.worker.rendered)
|
load_balancer_backend_address_pool_ids = [var.backend_address_pool_id]
|
||||||
boot_diagnostics {
|
|
||||||
# defaults to a managed storage account
|
|
||||||
}
|
|
||||||
|
|
||||||
# Azure requires an RSA admin_ssh_key
|
|
||||||
os_profile {
|
|
||||||
linux_configuration {
|
|
||||||
admin_username = "core"
|
|
||||||
admin_ssh_key {
|
|
||||||
username = "core"
|
|
||||||
public_key = local.azure_authorized_key
|
|
||||||
}
|
|
||||||
computer_name_prefix = "${var.name}-worker"
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
# lifecycle
|
# lifecycle
|
||||||
|
upgrade_mode = "Manual"
|
||||||
# eviction policy may only be set when priority is Spot
|
# eviction policy may only be set when priority is Spot
|
||||||
priority = var.priority
|
priority = var.priority
|
||||||
eviction_policy = var.priority == "Spot" ? "Delete" : null
|
eviction_policy = var.priority == "Spot" ? "Delete" : null
|
||||||
termination_notification {
|
}
|
||||||
enabled = true
|
|
||||||
|
# Scale up or down to maintain desired number, tolerating deallocations.
|
||||||
|
resource "azurerm_monitor_autoscale_setting" "workers" {
|
||||||
|
resource_group_name = var.resource_group_name
|
||||||
|
|
||||||
|
name = "${var.name}-maintain-desired"
|
||||||
|
location = var.region
|
||||||
|
|
||||||
|
# autoscale
|
||||||
|
enabled = true
|
||||||
|
target_resource_id = azurerm_linux_virtual_machine_scale_set.workers.id
|
||||||
|
|
||||||
|
profile {
|
||||||
|
name = "default"
|
||||||
|
|
||||||
|
capacity {
|
||||||
|
minimum = var.worker_count
|
||||||
|
default = var.worker_count
|
||||||
|
maximum = var.worker_count
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -84,6 +80,7 @@ data "ct_config" "worker" {
|
|||||||
kubeconfig = indent(10, var.kubeconfig)
|
kubeconfig = indent(10, var.kubeconfig)
|
||||||
ssh_authorized_key = var.ssh_authorized_key
|
ssh_authorized_key = var.ssh_authorized_key
|
||||||
cluster_dns_service_ip = cidrhost(var.service_cidr, 10)
|
cluster_dns_service_ip = cidrhost(var.service_cidr, 10)
|
||||||
|
cluster_domain_suffix = var.cluster_domain_suffix
|
||||||
node_labels = join(",", var.node_labels)
|
node_labels = join(",", var.node_labels)
|
||||||
node_taints = join(",", var.node_taints)
|
node_taints = join(",", var.node_taints)
|
||||||
})
|
})
|
||||||
|
@ -11,7 +11,7 @@ Typhoon distributes upstream Kubernetes, architectural conventions, and cluster
|
|||||||
|
|
||||||
## Features <a href="https://www.cncf.io/certification/software-conformance/"><img align="right" src="https://storage.googleapis.com/poseidon/certified-kubernetes.png"></a>
|
## Features <a href="https://www.cncf.io/certification/software-conformance/"><img align="right" src="https://storage.googleapis.com/poseidon/certified-kubernetes.png"></a>
|
||||||
|
|
||||||
* Kubernetes v1.31.3 (upstream)
|
* Kubernetes v1.30.1 (upstream)
|
||||||
* Single or multi-master, [Calico](https://www.projectcalico.org/) or [Cilium](https://github.com/cilium/cilium) or [flannel](https://github.com/coreos/flannel) networking
|
* Single or multi-master, [Calico](https://www.projectcalico.org/) or [Cilium](https://github.com/cilium/cilium) or [flannel](https://github.com/coreos/flannel) networking
|
||||||
* On-cluster etcd with TLS, [RBAC](https://kubernetes.io/docs/admin/authorization/rbac/)-enabled, [network policy](https://kubernetes.io/docs/concepts/services-networking/network-policies/)
|
* On-cluster etcd with TLS, [RBAC](https://kubernetes.io/docs/admin/authorization/rbac/)-enabled, [network policy](https://kubernetes.io/docs/concepts/services-networking/network-policies/)
|
||||||
* Advanced features like [worker pools](https://typhoon.psdn.io/advanced/worker-pools/), [low-priority](https://typhoon.psdn.io/flatcar-linux/azure/#low-priority) workers, and [snippets](https://typhoon.psdn.io/advanced/customization/#hosts) customization
|
* Advanced features like [worker pools](https://typhoon.psdn.io/advanced/worker-pools/), [low-priority](https://typhoon.psdn.io/flatcar-linux/azure/#low-priority) workers, and [snippets](https://typhoon.psdn.io/advanced/customization/#hosts) customization
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
# Kubernetes assets (kubeconfig, manifests)
|
# Kubernetes assets (kubeconfig, manifests)
|
||||||
module "bootstrap" {
|
module "bootstrap" {
|
||||||
source = "git::https://github.com/poseidon/terraform-render-bootstrap.git?ref=e6a1c7bccfc45ab299b5f8149bc3840f99b30b2b"
|
source = "git::https://github.com/poseidon/terraform-render-bootstrap.git?ref=e1b1e0c75e77e042cf369f463f0e656297a201a8"
|
||||||
|
|
||||||
cluster_name = var.cluster_name
|
cluster_name = var.cluster_name
|
||||||
api_servers = [format("%s.%s", var.cluster_name, var.dns_zone)]
|
api_servers = [format("%s.%s", var.cluster_name, var.dns_zone)]
|
||||||
@ -14,6 +14,9 @@ module "bootstrap" {
|
|||||||
|
|
||||||
pod_cidr = var.pod_cidr
|
pod_cidr = var.pod_cidr
|
||||||
service_cidr = var.service_cidr
|
service_cidr = var.service_cidr
|
||||||
|
cluster_domain_suffix = var.cluster_domain_suffix
|
||||||
|
enable_reporting = var.enable_reporting
|
||||||
|
enable_aggregation = var.enable_aggregation
|
||||||
daemonset_tolerations = var.daemonset_tolerations
|
daemonset_tolerations = var.daemonset_tolerations
|
||||||
components = var.components
|
components = var.components
|
||||||
}
|
}
|
||||||
|
@ -56,7 +56,7 @@ systemd:
|
|||||||
After=docker.service
|
After=docker.service
|
||||||
Wants=rpc-statd.service
|
Wants=rpc-statd.service
|
||||||
[Service]
|
[Service]
|
||||||
Environment=KUBELET_IMAGE=quay.io/poseidon/kubelet:v1.31.3
|
Environment=KUBELET_IMAGE=quay.io/poseidon/kubelet:v1.30.1
|
||||||
ExecStartPre=/bin/mkdir -p /etc/cni/net.d
|
ExecStartPre=/bin/mkdir -p /etc/cni/net.d
|
||||||
ExecStartPre=/bin/mkdir -p /etc/kubernetes/manifests
|
ExecStartPre=/bin/mkdir -p /etc/kubernetes/manifests
|
||||||
ExecStartPre=/bin/mkdir -p /opt/cni/bin
|
ExecStartPre=/bin/mkdir -p /opt/cni/bin
|
||||||
@ -105,7 +105,7 @@ systemd:
|
|||||||
Type=oneshot
|
Type=oneshot
|
||||||
RemainAfterExit=true
|
RemainAfterExit=true
|
||||||
WorkingDirectory=/opt/bootstrap
|
WorkingDirectory=/opt/bootstrap
|
||||||
Environment=KUBELET_IMAGE=quay.io/poseidon/kubelet:v1.31.3
|
Environment=KUBELET_IMAGE=quay.io/poseidon/kubelet:v1.30.1
|
||||||
ExecStart=/usr/bin/docker run \
|
ExecStart=/usr/bin/docker run \
|
||||||
-v /etc/kubernetes/pki:/etc/kubernetes/pki:ro \
|
-v /etc/kubernetes/pki:/etc/kubernetes/pki:ro \
|
||||||
-v /opt/bootstrap/assets:/assets:ro \
|
-v /opt/bootstrap/assets:/assets:ro \
|
||||||
@ -144,7 +144,7 @@ storage:
|
|||||||
cgroupDriver: systemd
|
cgroupDriver: systemd
|
||||||
clusterDNS:
|
clusterDNS:
|
||||||
- ${cluster_dns_service_ip}
|
- ${cluster_dns_service_ip}
|
||||||
clusterDomain: cluster.local
|
clusterDomain: ${cluster_domain_suffix}
|
||||||
healthzPort: 0
|
healthzPort: 0
|
||||||
rotateCertificates: true
|
rotateCertificates: true
|
||||||
shutdownGracePeriod: 45s
|
shutdownGracePeriod: 45s
|
||||||
|
@ -1,9 +1,25 @@
|
|||||||
|
# Discrete DNS records for each controller's private IPv4 for etcd usage
|
||||||
|
resource "azurerm_dns_a_record" "etcds" {
|
||||||
|
count = var.controller_count
|
||||||
|
resource_group_name = var.dns_zone_group
|
||||||
|
|
||||||
|
# DNS Zone name where record should be created
|
||||||
|
zone_name = var.dns_zone
|
||||||
|
|
||||||
|
# DNS record
|
||||||
|
name = format("%s-etcd%d", var.cluster_name, count.index)
|
||||||
|
ttl = 300
|
||||||
|
|
||||||
|
# private IPv4 address for etcd
|
||||||
|
records = [azurerm_network_interface.controllers.*.private_ip_address[count.index]]
|
||||||
|
}
|
||||||
|
|
||||||
locals {
|
locals {
|
||||||
# Container Linux derivative
|
# Container Linux derivative
|
||||||
# flatcar-stable -> Flatcar Linux Stable
|
# flatcar-stable -> Flatcar Linux Stable
|
||||||
channel = split("-", var.os_image)[1]
|
channel = split("-", var.os_image)[1]
|
||||||
offer_suffix = var.controller_arch == "arm64" ? "corevm" : "free"
|
offer_suffix = var.arch == "arm64" ? "corevm" : "free"
|
||||||
urn = var.controller_arch == "arm64" ? local.channel : "${local.channel}-gen2"
|
urn = var.arch == "arm64" ? local.channel : "${local.channel}-gen2"
|
||||||
|
|
||||||
# Typhoon ssh_authorized_key supports RSA or a newer formats (e.g. ed25519).
|
# Typhoon ssh_authorized_key supports RSA or a newer formats (e.g. ed25519).
|
||||||
# However, Azure requires an older RSA key to pass validations. To use a
|
# However, Azure requires an older RSA key to pass validations. To use a
|
||||||
@ -12,25 +28,12 @@ locals {
|
|||||||
azure_authorized_key = var.azure_authorized_key == "" ? var.ssh_authorized_key : var.azure_authorized_key
|
azure_authorized_key = var.azure_authorized_key == "" ? var.ssh_authorized_key : var.azure_authorized_key
|
||||||
}
|
}
|
||||||
|
|
||||||
# Discrete DNS records for each controller's private IPv4 for etcd usage
|
|
||||||
resource "azurerm_dns_a_record" "etcds" {
|
|
||||||
count = var.controller_count
|
|
||||||
|
|
||||||
# DNS Zone name where record should be created
|
|
||||||
zone_name = var.dns_zone
|
|
||||||
resource_group_name = var.dns_zone_group
|
|
||||||
# DNS record
|
|
||||||
name = format("%s-etcd%d", var.cluster_name, count.index)
|
|
||||||
ttl = 300
|
|
||||||
# private IPv4 address for etcd
|
|
||||||
records = [azurerm_network_interface.controllers[count.index].private_ip_address]
|
|
||||||
}
|
|
||||||
|
|
||||||
# Controller availability set to spread controllers
|
# Controller availability set to spread controllers
|
||||||
resource "azurerm_availability_set" "controllers" {
|
resource "azurerm_availability_set" "controllers" {
|
||||||
|
resource_group_name = azurerm_resource_group.cluster.name
|
||||||
|
|
||||||
name = "${var.cluster_name}-controllers"
|
name = "${var.cluster_name}-controllers"
|
||||||
resource_group_name = azurerm_resource_group.cluster.name
|
location = var.region
|
||||||
location = var.location
|
|
||||||
platform_fault_domain_count = 2
|
platform_fault_domain_count = 2
|
||||||
platform_update_domain_count = 4
|
platform_update_domain_count = 4
|
||||||
managed = true
|
managed = true
|
||||||
@ -38,20 +41,25 @@ resource "azurerm_availability_set" "controllers" {
|
|||||||
|
|
||||||
# Controller instances
|
# Controller instances
|
||||||
resource "azurerm_linux_virtual_machine" "controllers" {
|
resource "azurerm_linux_virtual_machine" "controllers" {
|
||||||
count = var.controller_count
|
count = var.controller_count
|
||||||
|
resource_group_name = azurerm_resource_group.cluster.name
|
||||||
|
|
||||||
name = "${var.cluster_name}-controller-${count.index}"
|
name = "${var.cluster_name}-controller-${count.index}"
|
||||||
resource_group_name = azurerm_resource_group.cluster.name
|
location = var.region
|
||||||
location = var.location
|
|
||||||
availability_set_id = azurerm_availability_set.controllers.id
|
availability_set_id = azurerm_availability_set.controllers.id
|
||||||
size = var.controller_type
|
|
||||||
|
size = var.controller_type
|
||||||
|
custom_data = base64encode(data.ct_config.controllers.*.rendered[count.index])
|
||||||
|
boot_diagnostics {
|
||||||
|
# defaults to a managed storage account
|
||||||
|
}
|
||||||
|
|
||||||
# storage
|
# storage
|
||||||
os_disk {
|
os_disk {
|
||||||
name = "${var.cluster_name}-controller-${count.index}"
|
name = "${var.cluster_name}-controller-${count.index}"
|
||||||
storage_account_type = var.controller_disk_type
|
|
||||||
disk_size_gb = var.controller_disk_size
|
|
||||||
caching = "None"
|
caching = "None"
|
||||||
|
disk_size_gb = var.disk_size
|
||||||
|
storage_account_type = "Premium_LRS"
|
||||||
}
|
}
|
||||||
|
|
||||||
# Flatcar Container Linux
|
# Flatcar Container Linux
|
||||||
@ -63,7 +71,7 @@ resource "azurerm_linux_virtual_machine" "controllers" {
|
|||||||
}
|
}
|
||||||
|
|
||||||
dynamic "plan" {
|
dynamic "plan" {
|
||||||
for_each = var.controller_arch == "arm64" ? [] : [1]
|
for_each = var.arch == "arm64" ? [] : [1]
|
||||||
content {
|
content {
|
||||||
publisher = "kinvolk"
|
publisher = "kinvolk"
|
||||||
product = "flatcar-container-linux-${local.offer_suffix}"
|
product = "flatcar-container-linux-${local.offer_suffix}"
|
||||||
@ -76,13 +84,7 @@ resource "azurerm_linux_virtual_machine" "controllers" {
|
|||||||
azurerm_network_interface.controllers[count.index].id
|
azurerm_network_interface.controllers[count.index].id
|
||||||
]
|
]
|
||||||
|
|
||||||
# boot
|
# Azure requires setting admin_ssh_key, though Ignition custom_data handles it too
|
||||||
custom_data = base64encode(data.ct_config.controllers[count.index].rendered)
|
|
||||||
boot_diagnostics {
|
|
||||||
# defaults to a managed storage account
|
|
||||||
}
|
|
||||||
|
|
||||||
# Azure requires an RSA admin_ssh_key
|
|
||||||
admin_username = "core"
|
admin_username = "core"
|
||||||
admin_ssh_key {
|
admin_ssh_key {
|
||||||
username = "core"
|
username = "core"
|
||||||
@ -97,52 +99,31 @@ resource "azurerm_linux_virtual_machine" "controllers" {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
# Controller node public IPv4 addresses
|
# Controller public IPv4 addresses
|
||||||
resource "azurerm_public_ip" "controllers-ipv4" {
|
resource "azurerm_public_ip" "controllers" {
|
||||||
count = var.controller_count
|
count = var.controller_count
|
||||||
|
|
||||||
name = "${var.cluster_name}-controller-${count.index}-ipv4"
|
|
||||||
resource_group_name = azurerm_resource_group.cluster.name
|
resource_group_name = azurerm_resource_group.cluster.name
|
||||||
location = azurerm_resource_group.cluster.location
|
|
||||||
ip_version = "IPv4"
|
name = "${var.cluster_name}-controller-${count.index}"
|
||||||
sku = "Standard"
|
location = azurerm_resource_group.cluster.location
|
||||||
allocation_method = "Static"
|
sku = "Standard"
|
||||||
|
allocation_method = "Static"
|
||||||
}
|
}
|
||||||
|
|
||||||
# Controller node public IPv6 addresses
|
# Controller NICs with public and private IPv4
|
||||||
resource "azurerm_public_ip" "controllers-ipv6" {
|
|
||||||
count = var.controller_count
|
|
||||||
|
|
||||||
name = "${var.cluster_name}-controller-${count.index}-ipv6"
|
|
||||||
resource_group_name = azurerm_resource_group.cluster.name
|
|
||||||
location = azurerm_resource_group.cluster.location
|
|
||||||
ip_version = "IPv6"
|
|
||||||
sku = "Standard"
|
|
||||||
allocation_method = "Static"
|
|
||||||
}
|
|
||||||
|
|
||||||
# Controllers' network interfaces
|
|
||||||
resource "azurerm_network_interface" "controllers" {
|
resource "azurerm_network_interface" "controllers" {
|
||||||
count = var.controller_count
|
count = var.controller_count
|
||||||
|
|
||||||
name = "${var.cluster_name}-controller-${count.index}"
|
|
||||||
resource_group_name = azurerm_resource_group.cluster.name
|
resource_group_name = azurerm_resource_group.cluster.name
|
||||||
location = azurerm_resource_group.cluster.location
|
|
||||||
|
name = "${var.cluster_name}-controller-${count.index}"
|
||||||
|
location = azurerm_resource_group.cluster.location
|
||||||
|
|
||||||
ip_configuration {
|
ip_configuration {
|
||||||
name = "ipv4"
|
name = "ip0"
|
||||||
primary = true
|
|
||||||
subnet_id = azurerm_subnet.controller.id
|
subnet_id = azurerm_subnet.controller.id
|
||||||
private_ip_address_allocation = "Dynamic"
|
private_ip_address_allocation = "Dynamic"
|
||||||
private_ip_address_version = "IPv4"
|
# instance public IPv4
|
||||||
public_ip_address_id = azurerm_public_ip.controllers-ipv4[count.index].id
|
public_ip_address_id = azurerm_public_ip.controllers.*.id[count.index]
|
||||||
}
|
|
||||||
ip_configuration {
|
|
||||||
name = "ipv6"
|
|
||||||
subnet_id = azurerm_subnet.controller.id
|
|
||||||
private_ip_address_allocation = "Dynamic"
|
|
||||||
private_ip_address_version = "IPv6"
|
|
||||||
public_ip_address_id = azurerm_public_ip.controllers-ipv6[count.index].id
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -154,21 +135,13 @@ resource "azurerm_network_interface_security_group_association" "controllers" {
|
|||||||
network_security_group_id = azurerm_network_security_group.controller.id
|
network_security_group_id = azurerm_network_security_group.controller.id
|
||||||
}
|
}
|
||||||
|
|
||||||
# Associate controller network interface with controller backend address pools
|
# Associate controller network interface with controller backend address pool
|
||||||
resource "azurerm_network_interface_backend_address_pool_association" "controllers-ipv4" {
|
resource "azurerm_network_interface_backend_address_pool_association" "controllers" {
|
||||||
count = var.controller_count
|
count = var.controller_count
|
||||||
|
|
||||||
network_interface_id = azurerm_network_interface.controllers[count.index].id
|
network_interface_id = azurerm_network_interface.controllers[count.index].id
|
||||||
ip_configuration_name = "ipv4"
|
ip_configuration_name = "ip0"
|
||||||
backend_address_pool_id = azurerm_lb_backend_address_pool.controller-ipv4.id
|
backend_address_pool_id = azurerm_lb_backend_address_pool.controller.id
|
||||||
}
|
|
||||||
|
|
||||||
resource "azurerm_network_interface_backend_address_pool_association" "controllers-ipv6" {
|
|
||||||
count = var.controller_count
|
|
||||||
|
|
||||||
network_interface_id = azurerm_network_interface.controllers[count.index].id
|
|
||||||
ip_configuration_name = "ipv6"
|
|
||||||
backend_address_pool_id = azurerm_lb_backend_address_pool.controller-ipv6.id
|
|
||||||
}
|
}
|
||||||
|
|
||||||
# Flatcar Linux controllers
|
# Flatcar Linux controllers
|
||||||
@ -185,6 +158,7 @@ data "ct_config" "controllers" {
|
|||||||
kubeconfig = indent(10, module.bootstrap.kubeconfig-kubelet)
|
kubeconfig = indent(10, module.bootstrap.kubeconfig-kubelet)
|
||||||
ssh_authorized_key = var.ssh_authorized_key
|
ssh_authorized_key = var.ssh_authorized_key
|
||||||
cluster_dns_service_ip = cidrhost(var.service_cidr, 10)
|
cluster_dns_service_ip = cidrhost(var.service_cidr, 10)
|
||||||
|
cluster_domain_suffix = var.cluster_domain_suffix
|
||||||
})
|
})
|
||||||
strict = true
|
strict = true
|
||||||
snippets = var.controller_snippets
|
snippets = var.controller_snippets
|
||||||
|
@ -1,164 +1,116 @@
|
|||||||
# DNS A record for the apiserver load balancer
|
# DNS record for the apiserver load balancer
|
||||||
resource "azurerm_dns_a_record" "apiserver" {
|
resource "azurerm_dns_a_record" "apiserver" {
|
||||||
# DNS Zone name where record should be created
|
|
||||||
zone_name = var.dns_zone
|
|
||||||
resource_group_name = var.dns_zone_group
|
resource_group_name = var.dns_zone_group
|
||||||
|
|
||||||
|
# DNS Zone name where record should be created
|
||||||
|
zone_name = var.dns_zone
|
||||||
|
|
||||||
# DNS record
|
# DNS record
|
||||||
name = var.cluster_name
|
name = var.cluster_name
|
||||||
ttl = 300
|
ttl = 300
|
||||||
|
|
||||||
# IPv4 address of apiserver load balancer
|
# IPv4 address of apiserver load balancer
|
||||||
records = [azurerm_public_ip.frontend-ipv4.ip_address]
|
records = [azurerm_public_ip.apiserver-ipv4.ip_address]
|
||||||
}
|
}
|
||||||
|
|
||||||
# DNS AAAA record for the apiserver load balancer
|
# Static IPv4 address for the apiserver frontend
|
||||||
resource "azurerm_dns_aaaa_record" "apiserver" {
|
resource "azurerm_public_ip" "apiserver-ipv4" {
|
||||||
# DNS Zone name where record should be created
|
|
||||||
zone_name = var.dns_zone
|
|
||||||
resource_group_name = var.dns_zone_group
|
|
||||||
# DNS record
|
|
||||||
name = var.cluster_name
|
|
||||||
ttl = 300
|
|
||||||
# IPv6 address of apiserver load balancer
|
|
||||||
records = [azurerm_public_ip.frontend-ipv6.ip_address]
|
|
||||||
}
|
|
||||||
|
|
||||||
# Static IPv4 address for the load balancer
|
|
||||||
resource "azurerm_public_ip" "frontend-ipv4" {
|
|
||||||
name = "${var.cluster_name}-frontend-ipv4"
|
|
||||||
resource_group_name = azurerm_resource_group.cluster.name
|
resource_group_name = azurerm_resource_group.cluster.name
|
||||||
location = var.location
|
|
||||||
ip_version = "IPv4"
|
name = "${var.cluster_name}-apiserver-ipv4"
|
||||||
sku = "Standard"
|
location = var.region
|
||||||
allocation_method = "Static"
|
sku = "Standard"
|
||||||
|
allocation_method = "Static"
|
||||||
}
|
}
|
||||||
|
|
||||||
# Static IPv6 address for the load balancer
|
# Static IPv4 address for the ingress frontend
|
||||||
resource "azurerm_public_ip" "frontend-ipv6" {
|
resource "azurerm_public_ip" "ingress-ipv4" {
|
||||||
name = "${var.cluster_name}-frontend-ipv6"
|
|
||||||
resource_group_name = azurerm_resource_group.cluster.name
|
resource_group_name = azurerm_resource_group.cluster.name
|
||||||
location = var.location
|
|
||||||
ip_version = "IPv6"
|
name = "${var.cluster_name}-ingress-ipv4"
|
||||||
sku = "Standard"
|
location = var.region
|
||||||
allocation_method = "Static"
|
sku = "Standard"
|
||||||
|
allocation_method = "Static"
|
||||||
}
|
}
|
||||||
|
|
||||||
# Network Load Balancer for apiservers and ingress
|
# Network Load Balancer for apiservers and ingress
|
||||||
resource "azurerm_lb" "cluster" {
|
resource "azurerm_lb" "cluster" {
|
||||||
name = var.cluster_name
|
|
||||||
resource_group_name = azurerm_resource_group.cluster.name
|
resource_group_name = azurerm_resource_group.cluster.name
|
||||||
location = var.location
|
|
||||||
sku = "Standard"
|
name = var.cluster_name
|
||||||
|
location = var.region
|
||||||
|
sku = "Standard"
|
||||||
|
|
||||||
frontend_ip_configuration {
|
frontend_ip_configuration {
|
||||||
name = "frontend-ipv4"
|
name = "apiserver"
|
||||||
public_ip_address_id = azurerm_public_ip.frontend-ipv4.id
|
public_ip_address_id = azurerm_public_ip.apiserver-ipv4.id
|
||||||
}
|
}
|
||||||
|
|
||||||
frontend_ip_configuration {
|
frontend_ip_configuration {
|
||||||
name = "frontend-ipv6"
|
name = "ingress"
|
||||||
public_ip_address_id = azurerm_public_ip.frontend-ipv6.id
|
public_ip_address_id = azurerm_public_ip.ingress-ipv4.id
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "azurerm_lb_rule" "apiserver-ipv4" {
|
resource "azurerm_lb_rule" "apiserver" {
|
||||||
name = "apiserver-ipv4"
|
name = "apiserver"
|
||||||
loadbalancer_id = azurerm_lb.cluster.id
|
loadbalancer_id = azurerm_lb.cluster.id
|
||||||
frontend_ip_configuration_name = "frontend-ipv4"
|
frontend_ip_configuration_name = "apiserver"
|
||||||
disable_outbound_snat = true
|
|
||||||
|
|
||||||
protocol = "Tcp"
|
protocol = "Tcp"
|
||||||
frontend_port = 6443
|
frontend_port = 6443
|
||||||
backend_port = 6443
|
backend_port = 6443
|
||||||
backend_address_pool_ids = [azurerm_lb_backend_address_pool.controller-ipv4.id]
|
backend_address_pool_ids = [azurerm_lb_backend_address_pool.controller.id]
|
||||||
probe_id = azurerm_lb_probe.apiserver.id
|
probe_id = azurerm_lb_probe.apiserver.id
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "azurerm_lb_rule" "apiserver-ipv6" {
|
resource "azurerm_lb_rule" "ingress-http" {
|
||||||
name = "apiserver-ipv6"
|
name = "ingress-http"
|
||||||
loadbalancer_id = azurerm_lb.cluster.id
|
loadbalancer_id = azurerm_lb.cluster.id
|
||||||
frontend_ip_configuration_name = "frontend-ipv6"
|
frontend_ip_configuration_name = "ingress"
|
||||||
disable_outbound_snat = true
|
|
||||||
|
|
||||||
protocol = "Tcp"
|
|
||||||
frontend_port = 6443
|
|
||||||
backend_port = 6443
|
|
||||||
backend_address_pool_ids = [azurerm_lb_backend_address_pool.controller-ipv6.id]
|
|
||||||
probe_id = azurerm_lb_probe.apiserver.id
|
|
||||||
}
|
|
||||||
|
|
||||||
resource "azurerm_lb_rule" "ingress-http-ipv4" {
|
|
||||||
name = "ingress-http-ipv4"
|
|
||||||
loadbalancer_id = azurerm_lb.cluster.id
|
|
||||||
frontend_ip_configuration_name = "frontend-ipv4"
|
|
||||||
disable_outbound_snat = true
|
disable_outbound_snat = true
|
||||||
|
|
||||||
protocol = "Tcp"
|
protocol = "Tcp"
|
||||||
frontend_port = 80
|
frontend_port = 80
|
||||||
backend_port = 80
|
backend_port = 80
|
||||||
backend_address_pool_ids = [azurerm_lb_backend_address_pool.worker-ipv4.id]
|
backend_address_pool_ids = [azurerm_lb_backend_address_pool.worker.id]
|
||||||
probe_id = azurerm_lb_probe.ingress.id
|
probe_id = azurerm_lb_probe.ingress.id
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "azurerm_lb_rule" "ingress-https-ipv4" {
|
resource "azurerm_lb_rule" "ingress-https" {
|
||||||
name = "ingress-https-ipv4"
|
name = "ingress-https"
|
||||||
loadbalancer_id = azurerm_lb.cluster.id
|
loadbalancer_id = azurerm_lb.cluster.id
|
||||||
frontend_ip_configuration_name = "frontend-ipv4"
|
frontend_ip_configuration_name = "ingress"
|
||||||
disable_outbound_snat = true
|
disable_outbound_snat = true
|
||||||
|
|
||||||
protocol = "Tcp"
|
protocol = "Tcp"
|
||||||
frontend_port = 443
|
frontend_port = 443
|
||||||
backend_port = 443
|
backend_port = 443
|
||||||
backend_address_pool_ids = [azurerm_lb_backend_address_pool.worker-ipv4.id]
|
backend_address_pool_ids = [azurerm_lb_backend_address_pool.worker.id]
|
||||||
probe_id = azurerm_lb_probe.ingress.id
|
probe_id = azurerm_lb_probe.ingress.id
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "azurerm_lb_rule" "ingress-http-ipv6" {
|
# Worker outbound TCP/UDP SNAT
|
||||||
name = "ingress-http-ipv6"
|
resource "azurerm_lb_outbound_rule" "worker-outbound" {
|
||||||
loadbalancer_id = azurerm_lb.cluster.id
|
name = "worker"
|
||||||
frontend_ip_configuration_name = "frontend-ipv6"
|
loadbalancer_id = azurerm_lb.cluster.id
|
||||||
disable_outbound_snat = true
|
frontend_ip_configuration {
|
||||||
|
name = "ingress"
|
||||||
|
}
|
||||||
|
|
||||||
protocol = "Tcp"
|
protocol = "All"
|
||||||
frontend_port = 80
|
backend_address_pool_id = azurerm_lb_backend_address_pool.worker.id
|
||||||
backend_port = 80
|
|
||||||
backend_address_pool_ids = [azurerm_lb_backend_address_pool.worker-ipv6.id]
|
|
||||||
probe_id = azurerm_lb_probe.ingress.id
|
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "azurerm_lb_rule" "ingress-https-ipv6" {
|
|
||||||
name = "ingress-https-ipv6"
|
|
||||||
loadbalancer_id = azurerm_lb.cluster.id
|
|
||||||
frontend_ip_configuration_name = "frontend-ipv6"
|
|
||||||
disable_outbound_snat = true
|
|
||||||
|
|
||||||
protocol = "Tcp"
|
|
||||||
frontend_port = 443
|
|
||||||
backend_port = 443
|
|
||||||
backend_address_pool_ids = [azurerm_lb_backend_address_pool.worker-ipv6.id]
|
|
||||||
probe_id = azurerm_lb_probe.ingress.id
|
|
||||||
}
|
|
||||||
|
|
||||||
# Backend Address Pools
|
|
||||||
|
|
||||||
# Address pool of controllers
|
# Address pool of controllers
|
||||||
resource "azurerm_lb_backend_address_pool" "controller-ipv4" {
|
resource "azurerm_lb_backend_address_pool" "controller" {
|
||||||
name = "controller-ipv4"
|
name = "controller"
|
||||||
loadbalancer_id = azurerm_lb.cluster.id
|
loadbalancer_id = azurerm_lb.cluster.id
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "azurerm_lb_backend_address_pool" "controller-ipv6" {
|
# Address pool of workers
|
||||||
name = "controller-ipv6"
|
resource "azurerm_lb_backend_address_pool" "worker" {
|
||||||
loadbalancer_id = azurerm_lb.cluster.id
|
name = "worker"
|
||||||
}
|
|
||||||
|
|
||||||
# Address pools for workers
|
|
||||||
resource "azurerm_lb_backend_address_pool" "worker-ipv4" {
|
|
||||||
name = "worker-ipv4"
|
|
||||||
loadbalancer_id = azurerm_lb.cluster.id
|
|
||||||
}
|
|
||||||
|
|
||||||
resource "azurerm_lb_backend_address_pool" "worker-ipv6" {
|
|
||||||
name = "worker-ipv6"
|
|
||||||
loadbalancer_id = azurerm_lb.cluster.id
|
loadbalancer_id = azurerm_lb.cluster.id
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -170,8 +122,10 @@ resource "azurerm_lb_probe" "apiserver" {
|
|||||||
loadbalancer_id = azurerm_lb.cluster.id
|
loadbalancer_id = azurerm_lb.cluster.id
|
||||||
protocol = "Tcp"
|
protocol = "Tcp"
|
||||||
port = 6443
|
port = 6443
|
||||||
|
|
||||||
# unhealthy threshold
|
# unhealthy threshold
|
||||||
number_of_probes = 3
|
number_of_probes = 3
|
||||||
|
|
||||||
interval_in_seconds = 5
|
interval_in_seconds = 5
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -182,29 +136,10 @@ resource "azurerm_lb_probe" "ingress" {
|
|||||||
protocol = "Http"
|
protocol = "Http"
|
||||||
port = 10254
|
port = 10254
|
||||||
request_path = "/healthz"
|
request_path = "/healthz"
|
||||||
|
|
||||||
# unhealthy threshold
|
# unhealthy threshold
|
||||||
number_of_probes = 3
|
number_of_probes = 3
|
||||||
|
|
||||||
interval_in_seconds = 5
|
interval_in_seconds = 5
|
||||||
}
|
}
|
||||||
|
|
||||||
# Outbound SNAT
|
|
||||||
|
|
||||||
resource "azurerm_lb_outbound_rule" "outbound-ipv4" {
|
|
||||||
name = "outbound-ipv4"
|
|
||||||
protocol = "All"
|
|
||||||
loadbalancer_id = azurerm_lb.cluster.id
|
|
||||||
backend_address_pool_id = azurerm_lb_backend_address_pool.worker-ipv4.id
|
|
||||||
frontend_ip_configuration {
|
|
||||||
name = "frontend-ipv4"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
resource "azurerm_lb_outbound_rule" "outbound-ipv6" {
|
|
||||||
name = "outbound-ipv6"
|
|
||||||
protocol = "All"
|
|
||||||
loadbalancer_id = azurerm_lb.cluster.id
|
|
||||||
backend_address_pool_id = azurerm_lb_backend_address_pool.worker-ipv6.id
|
|
||||||
frontend_ip_configuration {
|
|
||||||
name = "frontend-ipv6"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
@ -1,6 +0,0 @@
|
|||||||
locals {
|
|
||||||
backend_address_pool_ids = {
|
|
||||||
ipv4 = [azurerm_lb_backend_address_pool.worker-ipv4.id]
|
|
||||||
ipv6 = [azurerm_lb_backend_address_pool.worker-ipv6.id]
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,63 +1,27 @@
|
|||||||
# Choose an IPv6 ULA subnet at random
|
|
||||||
# https://datatracker.ietf.org/doc/html/rfc4193
|
|
||||||
resource "random_id" "ula-netnum" {
|
|
||||||
byte_length = 5 # 40 bits
|
|
||||||
}
|
|
||||||
|
|
||||||
locals {
|
|
||||||
# fd00::/8 -> shift 40 -> 2^40 possible /48 subnets
|
|
||||||
ula-range = cidrsubnet("fd00::/8", 40, random_id.ula-netnum.dec)
|
|
||||||
network_cidr = {
|
|
||||||
ipv4 = var.network_cidr.ipv4
|
|
||||||
ipv6 = length(var.network_cidr.ipv6) > 0 ? var.network_cidr.ipv6 : [local.ula-range]
|
|
||||||
}
|
|
||||||
|
|
||||||
# Subdivide the virtual network into subnets
|
|
||||||
# - controllers use netnum 0
|
|
||||||
# - workers use netnum 1
|
|
||||||
controller_subnets = {
|
|
||||||
ipv4 = [for i, cidr in local.network_cidr.ipv4 : cidrsubnet(cidr, 1, 0)]
|
|
||||||
ipv6 = [for i, cidr in local.network_cidr.ipv6 : cidrsubnet(cidr, 16, 0)]
|
|
||||||
}
|
|
||||||
worker_subnets = {
|
|
||||||
ipv4 = [for i, cidr in local.network_cidr.ipv4 : cidrsubnet(cidr, 1, 1)]
|
|
||||||
ipv6 = [for i, cidr in local.network_cidr.ipv6 : cidrsubnet(cidr, 16, 1)]
|
|
||||||
}
|
|
||||||
cluster_subnets = {
|
|
||||||
ipv4 = concat(local.controller_subnets.ipv4, local.worker_subnets.ipv4)
|
|
||||||
ipv6 = concat(local.controller_subnets.ipv6, local.worker_subnets.ipv6)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
# Organize cluster into a resource group
|
# Organize cluster into a resource group
|
||||||
resource "azurerm_resource_group" "cluster" {
|
resource "azurerm_resource_group" "cluster" {
|
||||||
name = var.cluster_name
|
name = var.cluster_name
|
||||||
location = var.location
|
location = var.region
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "azurerm_virtual_network" "network" {
|
resource "azurerm_virtual_network" "network" {
|
||||||
name = var.cluster_name
|
|
||||||
resource_group_name = azurerm_resource_group.cluster.name
|
resource_group_name = azurerm_resource_group.cluster.name
|
||||||
location = azurerm_resource_group.cluster.location
|
|
||||||
address_space = concat(
|
name = var.cluster_name
|
||||||
local.network_cidr.ipv4,
|
location = azurerm_resource_group.cluster.location
|
||||||
local.network_cidr.ipv6
|
address_space = [var.host_cidr]
|
||||||
)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
# Subnets - separate subnets for controllers and workers because Azure
|
# Subnets - separate subnets for controller and workers because Azure
|
||||||
# network security groups are oriented around address prefixes rather
|
# network security groups are based on IPv4 CIDR rather than instance
|
||||||
# than instance tags (GCP) or security group membership (AWS)
|
# tags like GCP or security group membership like AWS
|
||||||
|
|
||||||
resource "azurerm_subnet" "controller" {
|
resource "azurerm_subnet" "controller" {
|
||||||
|
resource_group_name = azurerm_resource_group.cluster.name
|
||||||
|
|
||||||
name = "controller"
|
name = "controller"
|
||||||
resource_group_name = azurerm_resource_group.cluster.name
|
|
||||||
virtual_network_name = azurerm_virtual_network.network.name
|
virtual_network_name = azurerm_virtual_network.network.name
|
||||||
address_prefixes = concat(
|
address_prefixes = [cidrsubnet(var.host_cidr, 1, 0)]
|
||||||
local.controller_subnets.ipv4,
|
|
||||||
local.controller_subnets.ipv6,
|
|
||||||
)
|
|
||||||
default_outbound_access_enabled = false
|
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "azurerm_subnet_network_security_group_association" "controller" {
|
resource "azurerm_subnet_network_security_group_association" "controller" {
|
||||||
@ -66,14 +30,11 @@ resource "azurerm_subnet_network_security_group_association" "controller" {
|
|||||||
}
|
}
|
||||||
|
|
||||||
resource "azurerm_subnet" "worker" {
|
resource "azurerm_subnet" "worker" {
|
||||||
|
resource_group_name = azurerm_resource_group.cluster.name
|
||||||
|
|
||||||
name = "worker"
|
name = "worker"
|
||||||
resource_group_name = azurerm_resource_group.cluster.name
|
|
||||||
virtual_network_name = azurerm_virtual_network.network.name
|
virtual_network_name = azurerm_virtual_network.network.name
|
||||||
address_prefixes = concat(
|
address_prefixes = [cidrsubnet(var.host_cidr, 1, 1)]
|
||||||
local.worker_subnets.ipv4,
|
|
||||||
local.worker_subnets.ipv6,
|
|
||||||
)
|
|
||||||
default_outbound_access_enabled = false
|
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "azurerm_subnet_network_security_group_association" "worker" {
|
resource "azurerm_subnet_network_security_group_association" "worker" {
|
||||||
|
@ -6,18 +6,13 @@ output "kubeconfig-admin" {
|
|||||||
# Outputs for Kubernetes Ingress
|
# Outputs for Kubernetes Ingress
|
||||||
|
|
||||||
output "ingress_static_ipv4" {
|
output "ingress_static_ipv4" {
|
||||||
value = azurerm_public_ip.frontend-ipv4.ip_address
|
value = azurerm_public_ip.ingress-ipv4.ip_address
|
||||||
description = "IPv4 address of the load balancer for distributing traffic to Ingress controllers"
|
description = "IPv4 address of the load balancer for distributing traffic to Ingress controllers"
|
||||||
}
|
}
|
||||||
|
|
||||||
output "ingress_static_ipv6" {
|
|
||||||
value = azurerm_public_ip.frontend-ipv6.ip_address
|
|
||||||
description = "IPv6 address of the load balancer for distributing traffic to Ingress controllers"
|
|
||||||
}
|
|
||||||
|
|
||||||
# Outputs for worker pools
|
# Outputs for worker pools
|
||||||
|
|
||||||
output "location" {
|
output "region" {
|
||||||
value = azurerm_resource_group.cluster.location
|
value = azurerm_resource_group.cluster.location
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -56,12 +51,12 @@ output "worker_security_group_name" {
|
|||||||
|
|
||||||
output "controller_address_prefixes" {
|
output "controller_address_prefixes" {
|
||||||
description = "Controller network subnet CIDR addresses (for source/destination)"
|
description = "Controller network subnet CIDR addresses (for source/destination)"
|
||||||
value = local.controller_subnets
|
value = azurerm_subnet.controller.address_prefixes
|
||||||
}
|
}
|
||||||
|
|
||||||
output "worker_address_prefixes" {
|
output "worker_address_prefixes" {
|
||||||
description = "Worker network subnet CIDR addresses (for source/destination)"
|
description = "Worker network subnet CIDR addresses (for source/destination)"
|
||||||
value = local.worker_subnets
|
value = azurerm_subnet.worker.address_prefixes
|
||||||
}
|
}
|
||||||
|
|
||||||
# Outputs for custom load balancing
|
# Outputs for custom load balancing
|
||||||
@ -71,12 +66,9 @@ output "loadbalancer_id" {
|
|||||||
value = azurerm_lb.cluster.id
|
value = azurerm_lb.cluster.id
|
||||||
}
|
}
|
||||||
|
|
||||||
output "backend_address_pool_ids" {
|
output "backend_address_pool_id" {
|
||||||
description = "IDs of the worker backend address pools"
|
description = "ID of the worker backend address pool"
|
||||||
value = {
|
value = azurerm_lb_backend_address_pool.worker.id
|
||||||
ipv4 = [azurerm_lb_backend_address_pool.worker-ipv4.id]
|
|
||||||
ipv6 = [azurerm_lb_backend_address_pool.worker-ipv6.id]
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
# Outputs for debug
|
# Outputs for debug
|
||||||
|
@ -1,223 +1,214 @@
|
|||||||
# Controller security group
|
# Controller security group
|
||||||
|
|
||||||
resource "azurerm_network_security_group" "controller" {
|
resource "azurerm_network_security_group" "controller" {
|
||||||
name = "${var.cluster_name}-controller"
|
|
||||||
resource_group_name = azurerm_resource_group.cluster.name
|
resource_group_name = azurerm_resource_group.cluster.name
|
||||||
location = azurerm_resource_group.cluster.location
|
|
||||||
|
name = "${var.cluster_name}-controller"
|
||||||
|
location = azurerm_resource_group.cluster.location
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "azurerm_network_security_rule" "controller-icmp" {
|
resource "azurerm_network_security_rule" "controller-icmp" {
|
||||||
for_each = local.controller_subnets
|
resource_group_name = azurerm_resource_group.cluster.name
|
||||||
|
|
||||||
name = "allow-icmp-${each.key}"
|
name = "allow-icmp"
|
||||||
resource_group_name = azurerm_resource_group.cluster.name
|
|
||||||
network_security_group_name = azurerm_network_security_group.controller.name
|
network_security_group_name = azurerm_network_security_group.controller.name
|
||||||
priority = 1995 + (each.key == "ipv4" ? 0 : 1)
|
priority = "1995"
|
||||||
access = "Allow"
|
access = "Allow"
|
||||||
direction = "Inbound"
|
direction = "Inbound"
|
||||||
protocol = "Icmp"
|
protocol = "Icmp"
|
||||||
source_port_range = "*"
|
source_port_range = "*"
|
||||||
destination_port_range = "*"
|
destination_port_range = "*"
|
||||||
source_address_prefixes = local.cluster_subnets[each.key]
|
source_address_prefixes = concat(azurerm_subnet.controller.address_prefixes, azurerm_subnet.worker.address_prefixes)
|
||||||
destination_address_prefixes = local.controller_subnets[each.key]
|
destination_address_prefixes = azurerm_subnet.controller.address_prefixes
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "azurerm_network_security_rule" "controller-ssh" {
|
resource "azurerm_network_security_rule" "controller-ssh" {
|
||||||
for_each = local.controller_subnets
|
resource_group_name = azurerm_resource_group.cluster.name
|
||||||
|
|
||||||
name = "allow-ssh-${each.key}"
|
name = "allow-ssh"
|
||||||
resource_group_name = azurerm_resource_group.cluster.name
|
|
||||||
network_security_group_name = azurerm_network_security_group.controller.name
|
network_security_group_name = azurerm_network_security_group.controller.name
|
||||||
priority = 2000 + (each.key == "ipv4" ? 0 : 1)
|
priority = "2000"
|
||||||
access = "Allow"
|
access = "Allow"
|
||||||
direction = "Inbound"
|
direction = "Inbound"
|
||||||
protocol = "Tcp"
|
protocol = "Tcp"
|
||||||
source_port_range = "*"
|
source_port_range = "*"
|
||||||
destination_port_range = "22"
|
destination_port_range = "22"
|
||||||
source_address_prefix = "*"
|
source_address_prefix = "*"
|
||||||
destination_address_prefixes = local.controller_subnets[each.key]
|
destination_address_prefixes = azurerm_subnet.controller.address_prefixes
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "azurerm_network_security_rule" "controller-etcd" {
|
resource "azurerm_network_security_rule" "controller-etcd" {
|
||||||
for_each = local.controller_subnets
|
resource_group_name = azurerm_resource_group.cluster.name
|
||||||
|
|
||||||
name = "allow-etcd-${each.key}"
|
name = "allow-etcd"
|
||||||
resource_group_name = azurerm_resource_group.cluster.name
|
|
||||||
network_security_group_name = azurerm_network_security_group.controller.name
|
network_security_group_name = azurerm_network_security_group.controller.name
|
||||||
priority = 2005 + (each.key == "ipv4" ? 0 : 1)
|
priority = "2005"
|
||||||
access = "Allow"
|
access = "Allow"
|
||||||
direction = "Inbound"
|
direction = "Inbound"
|
||||||
protocol = "Tcp"
|
protocol = "Tcp"
|
||||||
source_port_range = "*"
|
source_port_range = "*"
|
||||||
destination_port_range = "2379-2380"
|
destination_port_range = "2379-2380"
|
||||||
source_address_prefixes = local.controller_subnets[each.key]
|
source_address_prefixes = azurerm_subnet.controller.address_prefixes
|
||||||
destination_address_prefixes = local.controller_subnets[each.key]
|
destination_address_prefixes = azurerm_subnet.controller.address_prefixes
|
||||||
}
|
}
|
||||||
|
|
||||||
# Allow Prometheus to scrape etcd metrics
|
# Allow Prometheus to scrape etcd metrics
|
||||||
resource "azurerm_network_security_rule" "controller-etcd-metrics" {
|
resource "azurerm_network_security_rule" "controller-etcd-metrics" {
|
||||||
for_each = local.controller_subnets
|
resource_group_name = azurerm_resource_group.cluster.name
|
||||||
|
|
||||||
name = "allow-etcd-metrics-${each.key}"
|
name = "allow-etcd-metrics"
|
||||||
resource_group_name = azurerm_resource_group.cluster.name
|
|
||||||
network_security_group_name = azurerm_network_security_group.controller.name
|
network_security_group_name = azurerm_network_security_group.controller.name
|
||||||
priority = 2010 + (each.key == "ipv4" ? 0 : 1)
|
priority = "2010"
|
||||||
access = "Allow"
|
access = "Allow"
|
||||||
direction = "Inbound"
|
direction = "Inbound"
|
||||||
protocol = "Tcp"
|
protocol = "Tcp"
|
||||||
source_port_range = "*"
|
source_port_range = "*"
|
||||||
destination_port_range = "2381"
|
destination_port_range = "2381"
|
||||||
source_address_prefixes = local.worker_subnets[each.key]
|
source_address_prefixes = azurerm_subnet.worker.address_prefixes
|
||||||
destination_address_prefixes = local.controller_subnets[each.key]
|
destination_address_prefixes = azurerm_subnet.controller.address_prefixes
|
||||||
}
|
}
|
||||||
|
|
||||||
# Allow Prometheus to scrape kube-proxy metrics
|
# Allow Prometheus to scrape kube-proxy metrics
|
||||||
resource "azurerm_network_security_rule" "controller-kube-proxy" {
|
resource "azurerm_network_security_rule" "controller-kube-proxy" {
|
||||||
for_each = local.controller_subnets
|
resource_group_name = azurerm_resource_group.cluster.name
|
||||||
|
|
||||||
name = "allow-kube-proxy-metrics-${each.key}"
|
name = "allow-kube-proxy-metrics"
|
||||||
resource_group_name = azurerm_resource_group.cluster.name
|
|
||||||
network_security_group_name = azurerm_network_security_group.controller.name
|
network_security_group_name = azurerm_network_security_group.controller.name
|
||||||
priority = 2012 + (each.key == "ipv4" ? 0 : 1)
|
priority = "2011"
|
||||||
access = "Allow"
|
access = "Allow"
|
||||||
direction = "Inbound"
|
direction = "Inbound"
|
||||||
protocol = "Tcp"
|
protocol = "Tcp"
|
||||||
source_port_range = "*"
|
source_port_range = "*"
|
||||||
destination_port_range = "10249"
|
destination_port_range = "10249"
|
||||||
source_address_prefixes = local.worker_subnets[each.key]
|
source_address_prefixes = azurerm_subnet.worker.address_prefixes
|
||||||
destination_address_prefixes = local.controller_subnets[each.key]
|
destination_address_prefixes = azurerm_subnet.controller.address_prefixes
|
||||||
}
|
}
|
||||||
|
|
||||||
# Allow Prometheus to scrape kube-scheduler and kube-controller-manager metrics
|
# Allow Prometheus to scrape kube-scheduler and kube-controller-manager metrics
|
||||||
resource "azurerm_network_security_rule" "controller-kube-metrics" {
|
resource "azurerm_network_security_rule" "controller-kube-metrics" {
|
||||||
for_each = local.controller_subnets
|
resource_group_name = azurerm_resource_group.cluster.name
|
||||||
|
|
||||||
name = "allow-kube-metrics-${each.key}"
|
name = "allow-kube-metrics"
|
||||||
resource_group_name = azurerm_resource_group.cluster.name
|
|
||||||
network_security_group_name = azurerm_network_security_group.controller.name
|
network_security_group_name = azurerm_network_security_group.controller.name
|
||||||
priority = 2014 + (each.key == "ipv4" ? 0 : 1)
|
priority = "2012"
|
||||||
access = "Allow"
|
access = "Allow"
|
||||||
direction = "Inbound"
|
direction = "Inbound"
|
||||||
protocol = "Tcp"
|
protocol = "Tcp"
|
||||||
source_port_range = "*"
|
source_port_range = "*"
|
||||||
destination_port_range = "10257-10259"
|
destination_port_range = "10257-10259"
|
||||||
source_address_prefixes = local.worker_subnets[each.key]
|
source_address_prefixes = azurerm_subnet.worker.address_prefixes
|
||||||
destination_address_prefixes = local.controller_subnets[each.key]
|
destination_address_prefixes = azurerm_subnet.controller.address_prefixes
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "azurerm_network_security_rule" "controller-apiserver" {
|
resource "azurerm_network_security_rule" "controller-apiserver" {
|
||||||
for_each = local.controller_subnets
|
resource_group_name = azurerm_resource_group.cluster.name
|
||||||
|
|
||||||
name = "allow-apiserver-${each.key}"
|
name = "allow-apiserver"
|
||||||
resource_group_name = azurerm_resource_group.cluster.name
|
|
||||||
network_security_group_name = azurerm_network_security_group.controller.name
|
network_security_group_name = azurerm_network_security_group.controller.name
|
||||||
priority = 2016 + (each.key == "ipv4" ? 0 : 1)
|
priority = "2015"
|
||||||
access = "Allow"
|
access = "Allow"
|
||||||
direction = "Inbound"
|
direction = "Inbound"
|
||||||
protocol = "Tcp"
|
protocol = "Tcp"
|
||||||
source_port_range = "*"
|
source_port_range = "*"
|
||||||
destination_port_range = "6443"
|
destination_port_range = "6443"
|
||||||
source_address_prefix = "*"
|
source_address_prefix = "*"
|
||||||
destination_address_prefixes = local.controller_subnets[each.key]
|
destination_address_prefixes = azurerm_subnet.controller.address_prefixes
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "azurerm_network_security_rule" "controller-cilium-health" {
|
resource "azurerm_network_security_rule" "controller-cilium-health" {
|
||||||
for_each = var.networking == "cilium" ? local.controller_subnets : {}
|
resource_group_name = azurerm_resource_group.cluster.name
|
||||||
|
count = var.networking == "cilium" ? 1 : 0
|
||||||
|
|
||||||
name = "allow-cilium-health-${each.key}"
|
name = "allow-cilium-health"
|
||||||
resource_group_name = azurerm_resource_group.cluster.name
|
|
||||||
network_security_group_name = azurerm_network_security_group.controller.name
|
network_security_group_name = azurerm_network_security_group.controller.name
|
||||||
priority = 2018 + (each.key == "ipv4" ? 0 : 1)
|
priority = "2018"
|
||||||
access = "Allow"
|
access = "Allow"
|
||||||
direction = "Inbound"
|
direction = "Inbound"
|
||||||
protocol = "Tcp"
|
protocol = "Tcp"
|
||||||
source_port_range = "*"
|
source_port_range = "*"
|
||||||
destination_port_range = "4240"
|
destination_port_range = "4240"
|
||||||
source_address_prefixes = local.cluster_subnets[each.key]
|
source_address_prefixes = concat(azurerm_subnet.controller.address_prefixes, azurerm_subnet.worker.address_prefixes)
|
||||||
destination_address_prefixes = local.controller_subnets[each.key]
|
destination_address_prefixes = azurerm_subnet.controller.address_prefixes
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "azurerm_network_security_rule" "controller-cilium-metrics" {
|
resource "azurerm_network_security_rule" "controller-cilium-metrics" {
|
||||||
for_each = var.networking == "cilium" ? local.controller_subnets : {}
|
resource_group_name = azurerm_resource_group.cluster.name
|
||||||
|
count = var.networking == "cilium" ? 1 : 0
|
||||||
|
|
||||||
name = "allow-cilium-metrics-${each.key}"
|
name = "allow-cilium-metrics"
|
||||||
resource_group_name = azurerm_resource_group.cluster.name
|
|
||||||
network_security_group_name = azurerm_network_security_group.controller.name
|
network_security_group_name = azurerm_network_security_group.controller.name
|
||||||
priority = 2035 + (each.key == "ipv4" ? 0 : 1)
|
priority = "2019"
|
||||||
access = "Allow"
|
access = "Allow"
|
||||||
direction = "Inbound"
|
direction = "Inbound"
|
||||||
protocol = "Tcp"
|
protocol = "Tcp"
|
||||||
source_port_range = "*"
|
source_port_range = "*"
|
||||||
destination_port_range = "9962-9965"
|
destination_port_range = "9962-9965"
|
||||||
source_address_prefixes = local.cluster_subnets[each.key]
|
source_address_prefixes = concat(azurerm_subnet.controller.address_prefixes, azurerm_subnet.worker.address_prefixes)
|
||||||
destination_address_prefixes = local.controller_subnets[each.key]
|
destination_address_prefixes = azurerm_subnet.controller.address_prefixes
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "azurerm_network_security_rule" "controller-vxlan" {
|
resource "azurerm_network_security_rule" "controller-vxlan" {
|
||||||
for_each = local.controller_subnets
|
resource_group_name = azurerm_resource_group.cluster.name
|
||||||
|
|
||||||
name = "allow-vxlan-${each.key}"
|
name = "allow-vxlan"
|
||||||
resource_group_name = azurerm_resource_group.cluster.name
|
|
||||||
network_security_group_name = azurerm_network_security_group.controller.name
|
network_security_group_name = azurerm_network_security_group.controller.name
|
||||||
priority = 2020 + (each.key == "ipv4" ? 0 : 1)
|
priority = "2020"
|
||||||
access = "Allow"
|
access = "Allow"
|
||||||
direction = "Inbound"
|
direction = "Inbound"
|
||||||
protocol = "Udp"
|
protocol = "Udp"
|
||||||
source_port_range = "*"
|
source_port_range = "*"
|
||||||
destination_port_range = "4789"
|
destination_port_range = "4789"
|
||||||
source_address_prefixes = local.cluster_subnets[each.key]
|
source_address_prefixes = concat(azurerm_subnet.controller.address_prefixes, azurerm_subnet.worker.address_prefixes)
|
||||||
destination_address_prefixes = local.controller_subnets[each.key]
|
destination_address_prefixes = azurerm_subnet.controller.address_prefixes
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "azurerm_network_security_rule" "controller-linux-vxlan" {
|
resource "azurerm_network_security_rule" "controller-linux-vxlan" {
|
||||||
for_each = local.controller_subnets
|
resource_group_name = azurerm_resource_group.cluster.name
|
||||||
|
|
||||||
name = "allow-linux-vxlan-${each.key}"
|
name = "allow-linux-vxlan"
|
||||||
resource_group_name = azurerm_resource_group.cluster.name
|
|
||||||
network_security_group_name = azurerm_network_security_group.controller.name
|
network_security_group_name = azurerm_network_security_group.controller.name
|
||||||
priority = 2022 + (each.key == "ipv4" ? 0 : 1)
|
priority = "2021"
|
||||||
access = "Allow"
|
access = "Allow"
|
||||||
direction = "Inbound"
|
direction = "Inbound"
|
||||||
protocol = "Udp"
|
protocol = "Udp"
|
||||||
source_port_range = "*"
|
source_port_range = "*"
|
||||||
destination_port_range = "8472"
|
destination_port_range = "8472"
|
||||||
source_address_prefixes = local.cluster_subnets[each.key]
|
source_address_prefixes = concat(azurerm_subnet.controller.address_prefixes, azurerm_subnet.worker.address_prefixes)
|
||||||
destination_address_prefixes = local.controller_subnets[each.key]
|
destination_address_prefixes = azurerm_subnet.controller.address_prefixes
|
||||||
}
|
}
|
||||||
|
|
||||||
# Allow Prometheus to scrape node-exporter daemonset
|
# Allow Prometheus to scrape node-exporter daemonset
|
||||||
resource "azurerm_network_security_rule" "controller-node-exporter" {
|
resource "azurerm_network_security_rule" "controller-node-exporter" {
|
||||||
for_each = local.controller_subnets
|
resource_group_name = azurerm_resource_group.cluster.name
|
||||||
|
|
||||||
name = "allow-node-exporter-${each.key}"
|
name = "allow-node-exporter"
|
||||||
resource_group_name = azurerm_resource_group.cluster.name
|
|
||||||
network_security_group_name = azurerm_network_security_group.controller.name
|
network_security_group_name = azurerm_network_security_group.controller.name
|
||||||
priority = 2025 + (each.key == "ipv4" ? 0 : 1)
|
priority = "2025"
|
||||||
access = "Allow"
|
access = "Allow"
|
||||||
direction = "Inbound"
|
direction = "Inbound"
|
||||||
protocol = "Tcp"
|
protocol = "Tcp"
|
||||||
source_port_range = "*"
|
source_port_range = "*"
|
||||||
destination_port_range = "9100"
|
destination_port_range = "9100"
|
||||||
source_address_prefixes = local.worker_subnets[each.key]
|
source_address_prefixes = azurerm_subnet.worker.address_prefixes
|
||||||
destination_address_prefixes = local.controller_subnets[each.key]
|
destination_address_prefixes = azurerm_subnet.controller.address_prefixes
|
||||||
}
|
}
|
||||||
|
|
||||||
# Allow apiserver to access kubelet's for exec, log, port-forward
|
# Allow apiserver to access kubelet's for exec, log, port-forward
|
||||||
resource "azurerm_network_security_rule" "controller-kubelet" {
|
resource "azurerm_network_security_rule" "controller-kubelet" {
|
||||||
for_each = local.controller_subnets
|
resource_group_name = azurerm_resource_group.cluster.name
|
||||||
|
|
||||||
name = "allow-kubelet-${each.key}"
|
name = "allow-kubelet"
|
||||||
resource_group_name = azurerm_resource_group.cluster.name
|
|
||||||
network_security_group_name = azurerm_network_security_group.controller.name
|
network_security_group_name = azurerm_network_security_group.controller.name
|
||||||
priority = 2030 + (each.key == "ipv4" ? 0 : 1)
|
priority = "2030"
|
||||||
access = "Allow"
|
access = "Allow"
|
||||||
direction = "Inbound"
|
direction = "Inbound"
|
||||||
protocol = "Tcp"
|
protocol = "Tcp"
|
||||||
source_port_range = "*"
|
source_port_range = "*"
|
||||||
destination_port_range = "10250"
|
destination_port_range = "10250"
|
||||||
|
|
||||||
# allow Prometheus to scrape kubelet metrics too
|
# allow Prometheus to scrape kubelet metrics too
|
||||||
source_address_prefixes = local.cluster_subnets[each.key]
|
source_address_prefixes = concat(azurerm_subnet.controller.address_prefixes, azurerm_subnet.worker.address_prefixes)
|
||||||
destination_address_prefixes = local.controller_subnets[each.key]
|
destination_address_prefixes = azurerm_subnet.controller.address_prefixes
|
||||||
}
|
}
|
||||||
|
|
||||||
# Override Azure AllowVNetInBound and AllowAzureLoadBalancerInBound
|
# Override Azure AllowVNetInBound and AllowAzureLoadBalancerInBound
|
||||||
@ -256,189 +247,182 @@ resource "azurerm_network_security_rule" "controller-deny-all" {
|
|||||||
# Worker security group
|
# Worker security group
|
||||||
|
|
||||||
resource "azurerm_network_security_group" "worker" {
|
resource "azurerm_network_security_group" "worker" {
|
||||||
name = "${var.cluster_name}-worker"
|
|
||||||
resource_group_name = azurerm_resource_group.cluster.name
|
resource_group_name = azurerm_resource_group.cluster.name
|
||||||
location = azurerm_resource_group.cluster.location
|
|
||||||
|
name = "${var.cluster_name}-worker"
|
||||||
|
location = azurerm_resource_group.cluster.location
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "azurerm_network_security_rule" "worker-icmp" {
|
resource "azurerm_network_security_rule" "worker-icmp" {
|
||||||
for_each = local.worker_subnets
|
resource_group_name = azurerm_resource_group.cluster.name
|
||||||
|
|
||||||
name = "allow-icmp-${each.key}"
|
name = "allow-icmp"
|
||||||
resource_group_name = azurerm_resource_group.cluster.name
|
|
||||||
network_security_group_name = azurerm_network_security_group.worker.name
|
network_security_group_name = azurerm_network_security_group.worker.name
|
||||||
priority = 1995 + (each.key == "ipv4" ? 0 : 1)
|
priority = "1995"
|
||||||
access = "Allow"
|
access = "Allow"
|
||||||
direction = "Inbound"
|
direction = "Inbound"
|
||||||
protocol = "Icmp"
|
protocol = "Icmp"
|
||||||
source_port_range = "*"
|
source_port_range = "*"
|
||||||
destination_port_range = "*"
|
destination_port_range = "*"
|
||||||
source_address_prefixes = local.cluster_subnets[each.key]
|
source_address_prefixes = concat(azurerm_subnet.controller.address_prefixes, azurerm_subnet.worker.address_prefixes)
|
||||||
destination_address_prefixes = local.worker_subnets[each.key]
|
destination_address_prefixes = azurerm_subnet.worker.address_prefixes
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "azurerm_network_security_rule" "worker-ssh" {
|
resource "azurerm_network_security_rule" "worker-ssh" {
|
||||||
for_each = local.worker_subnets
|
resource_group_name = azurerm_resource_group.cluster.name
|
||||||
|
|
||||||
name = "allow-ssh-${each.key}"
|
name = "allow-ssh"
|
||||||
resource_group_name = azurerm_resource_group.cluster.name
|
|
||||||
network_security_group_name = azurerm_network_security_group.worker.name
|
network_security_group_name = azurerm_network_security_group.worker.name
|
||||||
priority = 2000 + (each.key == "ipv4" ? 0 : 1)
|
priority = "2000"
|
||||||
access = "Allow"
|
access = "Allow"
|
||||||
direction = "Inbound"
|
direction = "Inbound"
|
||||||
protocol = "Tcp"
|
protocol = "Tcp"
|
||||||
source_port_range = "*"
|
source_port_range = "*"
|
||||||
destination_port_range = "22"
|
destination_port_range = "22"
|
||||||
source_address_prefixes = local.controller_subnets[each.key]
|
source_address_prefixes = azurerm_subnet.controller.address_prefixes
|
||||||
destination_address_prefixes = local.worker_subnets[each.key]
|
destination_address_prefixes = azurerm_subnet.worker.address_prefixes
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "azurerm_network_security_rule" "worker-http" {
|
resource "azurerm_network_security_rule" "worker-http" {
|
||||||
for_each = local.worker_subnets
|
resource_group_name = azurerm_resource_group.cluster.name
|
||||||
|
|
||||||
name = "allow-http-${each.key}"
|
name = "allow-http"
|
||||||
resource_group_name = azurerm_resource_group.cluster.name
|
|
||||||
network_security_group_name = azurerm_network_security_group.worker.name
|
network_security_group_name = azurerm_network_security_group.worker.name
|
||||||
priority = 2005 + (each.key == "ipv4" ? 0 : 1)
|
priority = "2005"
|
||||||
access = "Allow"
|
access = "Allow"
|
||||||
direction = "Inbound"
|
direction = "Inbound"
|
||||||
protocol = "Tcp"
|
protocol = "Tcp"
|
||||||
source_port_range = "*"
|
source_port_range = "*"
|
||||||
destination_port_range = "80"
|
destination_port_range = "80"
|
||||||
source_address_prefix = "*"
|
source_address_prefix = "*"
|
||||||
destination_address_prefixes = local.worker_subnets[each.key]
|
destination_address_prefixes = azurerm_subnet.worker.address_prefixes
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "azurerm_network_security_rule" "worker-https" {
|
resource "azurerm_network_security_rule" "worker-https" {
|
||||||
for_each = local.worker_subnets
|
resource_group_name = azurerm_resource_group.cluster.name
|
||||||
|
|
||||||
name = "allow-https-${each.key}"
|
name = "allow-https"
|
||||||
resource_group_name = azurerm_resource_group.cluster.name
|
|
||||||
network_security_group_name = azurerm_network_security_group.worker.name
|
network_security_group_name = azurerm_network_security_group.worker.name
|
||||||
priority = 2010 + (each.key == "ipv4" ? 0 : 1)
|
priority = "2010"
|
||||||
access = "Allow"
|
access = "Allow"
|
||||||
direction = "Inbound"
|
direction = "Inbound"
|
||||||
protocol = "Tcp"
|
protocol = "Tcp"
|
||||||
source_port_range = "*"
|
source_port_range = "*"
|
||||||
destination_port_range = "443"
|
destination_port_range = "443"
|
||||||
source_address_prefix = "*"
|
source_address_prefix = "*"
|
||||||
destination_address_prefixes = local.worker_subnets[each.key]
|
destination_address_prefixes = azurerm_subnet.worker.address_prefixes
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "azurerm_network_security_rule" "worker-cilium-health" {
|
resource "azurerm_network_security_rule" "worker-cilium-health" {
|
||||||
for_each = var.networking == "cilium" ? local.worker_subnets : {}
|
resource_group_name = azurerm_resource_group.cluster.name
|
||||||
|
count = var.networking == "cilium" ? 1 : 0
|
||||||
|
|
||||||
name = "allow-cilium-health-${each.key}"
|
name = "allow-cilium-health"
|
||||||
resource_group_name = azurerm_resource_group.cluster.name
|
|
||||||
network_security_group_name = azurerm_network_security_group.worker.name
|
network_security_group_name = azurerm_network_security_group.worker.name
|
||||||
priority = 2012 + (each.key == "ipv4" ? 0 : 1)
|
priority = "2013"
|
||||||
access = "Allow"
|
access = "Allow"
|
||||||
direction = "Inbound"
|
direction = "Inbound"
|
||||||
protocol = "Tcp"
|
protocol = "Tcp"
|
||||||
source_port_range = "*"
|
source_port_range = "*"
|
||||||
destination_port_range = "4240"
|
destination_port_range = "4240"
|
||||||
source_address_prefixes = local.cluster_subnets[each.key]
|
source_address_prefixes = concat(azurerm_subnet.controller.address_prefixes, azurerm_subnet.worker.address_prefixes)
|
||||||
destination_address_prefixes = local.worker_subnets[each.key]
|
destination_address_prefixes = azurerm_subnet.worker.address_prefixes
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "azurerm_network_security_rule" "worker-cilium-metrics" {
|
resource "azurerm_network_security_rule" "worker-cilium-metrics" {
|
||||||
for_each = var.networking == "cilium" ? local.worker_subnets : {}
|
resource_group_name = azurerm_resource_group.cluster.name
|
||||||
|
count = var.networking == "cilium" ? 1 : 0
|
||||||
|
|
||||||
name = "allow-cilium-metrics-${each.key}"
|
name = "allow-cilium-metrics"
|
||||||
resource_group_name = azurerm_resource_group.cluster.name
|
|
||||||
network_security_group_name = azurerm_network_security_group.worker.name
|
network_security_group_name = azurerm_network_security_group.worker.name
|
||||||
priority = 2014 + (each.key == "ipv4" ? 0 : 1)
|
priority = "2014"
|
||||||
access = "Allow"
|
access = "Allow"
|
||||||
direction = "Inbound"
|
direction = "Inbound"
|
||||||
protocol = "Tcp"
|
protocol = "Tcp"
|
||||||
source_port_range = "*"
|
source_port_range = "*"
|
||||||
destination_port_range = "9962-9965"
|
destination_port_range = "9962-9965"
|
||||||
source_address_prefixes = local.cluster_subnets[each.key]
|
source_address_prefixes = concat(azurerm_subnet.controller.address_prefixes, azurerm_subnet.worker.address_prefixes)
|
||||||
destination_address_prefixes = local.worker_subnets[each.key]
|
destination_address_prefixes = azurerm_subnet.worker.address_prefixes
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "azurerm_network_security_rule" "worker-vxlan" {
|
resource "azurerm_network_security_rule" "worker-vxlan" {
|
||||||
for_each = local.worker_subnets
|
resource_group_name = azurerm_resource_group.cluster.name
|
||||||
|
|
||||||
name = "allow-vxlan-${each.key}"
|
name = "allow-vxlan"
|
||||||
resource_group_name = azurerm_resource_group.cluster.name
|
|
||||||
network_security_group_name = azurerm_network_security_group.worker.name
|
network_security_group_name = azurerm_network_security_group.worker.name
|
||||||
priority = 2016 + (each.key == "ipv4" ? 0 : 1)
|
priority = "2015"
|
||||||
access = "Allow"
|
access = "Allow"
|
||||||
direction = "Inbound"
|
direction = "Inbound"
|
||||||
protocol = "Udp"
|
protocol = "Udp"
|
||||||
source_port_range = "*"
|
source_port_range = "*"
|
||||||
destination_port_range = "4789"
|
destination_port_range = "4789"
|
||||||
source_address_prefixes = local.cluster_subnets[each.key]
|
source_address_prefixes = concat(azurerm_subnet.controller.address_prefixes, azurerm_subnet.worker.address_prefixes)
|
||||||
destination_address_prefixes = local.worker_subnets[each.key]
|
destination_address_prefixes = azurerm_subnet.worker.address_prefixes
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "azurerm_network_security_rule" "worker-linux-vxlan" {
|
resource "azurerm_network_security_rule" "worker-linux-vxlan" {
|
||||||
for_each = local.worker_subnets
|
resource_group_name = azurerm_resource_group.cluster.name
|
||||||
|
|
||||||
name = "allow-linux-vxlan-${each.key}"
|
name = "allow-linux-vxlan"
|
||||||
resource_group_name = azurerm_resource_group.cluster.name
|
|
||||||
network_security_group_name = azurerm_network_security_group.worker.name
|
network_security_group_name = azurerm_network_security_group.worker.name
|
||||||
priority = 2018 + (each.key == "ipv4" ? 0 : 1)
|
priority = "2016"
|
||||||
access = "Allow"
|
access = "Allow"
|
||||||
direction = "Inbound"
|
direction = "Inbound"
|
||||||
protocol = "Udp"
|
protocol = "Udp"
|
||||||
source_port_range = "*"
|
source_port_range = "*"
|
||||||
destination_port_range = "8472"
|
destination_port_range = "8472"
|
||||||
source_address_prefixes = local.cluster_subnets[each.key]
|
source_address_prefixes = concat(azurerm_subnet.controller.address_prefixes, azurerm_subnet.worker.address_prefixes)
|
||||||
destination_address_prefixes = local.worker_subnets[each.key]
|
destination_address_prefixes = azurerm_subnet.worker.address_prefixes
|
||||||
}
|
}
|
||||||
|
|
||||||
# Allow Prometheus to scrape node-exporter daemonset
|
# Allow Prometheus to scrape node-exporter daemonset
|
||||||
resource "azurerm_network_security_rule" "worker-node-exporter" {
|
resource "azurerm_network_security_rule" "worker-node-exporter" {
|
||||||
for_each = local.worker_subnets
|
resource_group_name = azurerm_resource_group.cluster.name
|
||||||
|
|
||||||
name = "allow-node-exporter-${each.key}"
|
name = "allow-node-exporter"
|
||||||
resource_group_name = azurerm_resource_group.cluster.name
|
|
||||||
network_security_group_name = azurerm_network_security_group.worker.name
|
network_security_group_name = azurerm_network_security_group.worker.name
|
||||||
priority = 2020 + (each.key == "ipv4" ? 0 : 1)
|
priority = "2020"
|
||||||
access = "Allow"
|
access = "Allow"
|
||||||
direction = "Inbound"
|
direction = "Inbound"
|
||||||
protocol = "Tcp"
|
protocol = "Tcp"
|
||||||
source_port_range = "*"
|
source_port_range = "*"
|
||||||
destination_port_range = "9100"
|
destination_port_range = "9100"
|
||||||
source_address_prefixes = local.worker_subnets[each.key]
|
source_address_prefixes = azurerm_subnet.worker.address_prefixes
|
||||||
destination_address_prefixes = local.worker_subnets[each.key]
|
destination_address_prefixes = azurerm_subnet.worker.address_prefixes
|
||||||
}
|
}
|
||||||
|
|
||||||
# Allow Prometheus to scrape kube-proxy
|
# Allow Prometheus to scrape kube-proxy
|
||||||
resource "azurerm_network_security_rule" "worker-kube-proxy" {
|
resource "azurerm_network_security_rule" "worker-kube-proxy" {
|
||||||
for_each = local.worker_subnets
|
resource_group_name = azurerm_resource_group.cluster.name
|
||||||
|
|
||||||
name = "allow-kube-proxy-${each.key}"
|
name = "allow-kube-proxy"
|
||||||
resource_group_name = azurerm_resource_group.cluster.name
|
|
||||||
network_security_group_name = azurerm_network_security_group.worker.name
|
network_security_group_name = azurerm_network_security_group.worker.name
|
||||||
priority = 2024 + (each.key == "ipv4" ? 0 : 1)
|
priority = "2024"
|
||||||
access = "Allow"
|
access = "Allow"
|
||||||
direction = "Inbound"
|
direction = "Inbound"
|
||||||
protocol = "Tcp"
|
protocol = "Tcp"
|
||||||
source_port_range = "*"
|
source_port_range = "*"
|
||||||
destination_port_range = "10249"
|
destination_port_range = "10249"
|
||||||
source_address_prefixes = local.worker_subnets[each.key]
|
source_address_prefixes = azurerm_subnet.worker.address_prefixes
|
||||||
destination_address_prefixes = local.worker_subnets[each.key]
|
destination_address_prefixes = azurerm_subnet.worker.address_prefixes
|
||||||
}
|
}
|
||||||
|
|
||||||
# Allow apiserver to access kubelet's for exec, log, port-forward
|
# Allow apiserver to access kubelet's for exec, log, port-forward
|
||||||
resource "azurerm_network_security_rule" "worker-kubelet" {
|
resource "azurerm_network_security_rule" "worker-kubelet" {
|
||||||
for_each = local.worker_subnets
|
resource_group_name = azurerm_resource_group.cluster.name
|
||||||
|
|
||||||
name = "allow-kubelet-${each.key}"
|
name = "allow-kubelet"
|
||||||
resource_group_name = azurerm_resource_group.cluster.name
|
|
||||||
network_security_group_name = azurerm_network_security_group.worker.name
|
network_security_group_name = azurerm_network_security_group.worker.name
|
||||||
priority = 2026 + (each.key == "ipv4" ? 0 : 1)
|
priority = "2025"
|
||||||
access = "Allow"
|
access = "Allow"
|
||||||
direction = "Inbound"
|
direction = "Inbound"
|
||||||
protocol = "Tcp"
|
protocol = "Tcp"
|
||||||
source_port_range = "*"
|
source_port_range = "*"
|
||||||
destination_port_range = "10250"
|
destination_port_range = "10250"
|
||||||
|
|
||||||
# allow Prometheus to scrape kubelet metrics too
|
# allow Prometheus to scrape kubelet metrics too
|
||||||
source_address_prefixes = local.cluster_subnets[each.key]
|
source_address_prefixes = concat(azurerm_subnet.controller.address_prefixes, azurerm_subnet.worker.address_prefixes)
|
||||||
destination_address_prefixes = local.worker_subnets[each.key]
|
destination_address_prefixes = azurerm_subnet.worker.address_prefixes
|
||||||
}
|
}
|
||||||
|
|
||||||
# Override Azure AllowVNetInBound and AllowAzureLoadBalancerInBound
|
# Override Azure AllowVNetInBound and AllowAzureLoadBalancerInBound
|
||||||
|
@ -18,7 +18,7 @@ resource "null_resource" "copy-controller-secrets" {
|
|||||||
|
|
||||||
connection {
|
connection {
|
||||||
type = "ssh"
|
type = "ssh"
|
||||||
host = azurerm_public_ip.controllers-ipv4[count.index].ip_address
|
host = azurerm_public_ip.controllers.*.ip_address[count.index]
|
||||||
user = "core"
|
user = "core"
|
||||||
timeout = "15m"
|
timeout = "15m"
|
||||||
}
|
}
|
||||||
@ -45,7 +45,7 @@ resource "null_resource" "bootstrap" {
|
|||||||
|
|
||||||
connection {
|
connection {
|
||||||
type = "ssh"
|
type = "ssh"
|
||||||
host = azurerm_public_ip.controllers-ipv4[0].ip_address
|
host = azurerm_public_ip.controllers.*.ip_address[0]
|
||||||
user = "core"
|
user = "core"
|
||||||
timeout = "15m"
|
timeout = "15m"
|
||||||
}
|
}
|
||||||
|
@ -5,9 +5,9 @@ variable "cluster_name" {
|
|||||||
|
|
||||||
# Azure
|
# Azure
|
||||||
|
|
||||||
variable "location" {
|
variable "region" {
|
||||||
type = string
|
type = string
|
||||||
description = "Azure location (e.g. centralus , see `az account list-locations --output table`)"
|
description = "Azure Region (e.g. centralus , see `az account list-locations --output table`)"
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "dns_zone" {
|
variable "dns_zone" {
|
||||||
@ -22,6 +22,30 @@ variable "dns_zone_group" {
|
|||||||
|
|
||||||
# instances
|
# instances
|
||||||
|
|
||||||
|
variable "controller_count" {
|
||||||
|
type = number
|
||||||
|
description = "Number of controllers (i.e. masters)"
|
||||||
|
default = 1
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "worker_count" {
|
||||||
|
type = number
|
||||||
|
description = "Number of workers"
|
||||||
|
default = 1
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "controller_type" {
|
||||||
|
type = string
|
||||||
|
description = "Machine type for controllers (see `az vm list-skus --location centralus`)"
|
||||||
|
default = "Standard_B2s"
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "worker_type" {
|
||||||
|
type = string
|
||||||
|
description = "Machine type for workers (see `az vm list-skus --location centralus`)"
|
||||||
|
default = "Standard_D2as_v5"
|
||||||
|
}
|
||||||
|
|
||||||
variable "os_image" {
|
variable "os_image" {
|
||||||
type = string
|
type = string
|
||||||
description = "Channel for a Container Linux derivative (flatcar-stable, flatcar-beta, flatcar-alpha)"
|
description = "Channel for a Container Linux derivative (flatcar-stable, flatcar-beta, flatcar-alpha)"
|
||||||
@ -33,60 +57,12 @@ variable "os_image" {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "controller_count" {
|
variable "disk_size" {
|
||||||
type = number
|
type = number
|
||||||
description = "Number of controllers (i.e. masters)"
|
description = "Size of the disk in GB"
|
||||||
default = 1
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "controller_type" {
|
|
||||||
type = string
|
|
||||||
description = "Machine type for controllers (see `az vm list-skus --location centralus`)"
|
|
||||||
default = "Standard_B2s"
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "controller_disk_type" {
|
|
||||||
type = string
|
|
||||||
description = "Type of managed disk for controller node(s)"
|
|
||||||
default = "Premium_LRS"
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "controller_disk_size" {
|
|
||||||
type = number
|
|
||||||
description = "Size of the managed disk in GB for controller node(s)"
|
|
||||||
default = 30
|
default = 30
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "worker_count" {
|
|
||||||
type = number
|
|
||||||
description = "Number of workers"
|
|
||||||
default = 1
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "worker_type" {
|
|
||||||
type = string
|
|
||||||
description = "Machine type for workers (see `az vm list-skus --location centralus`)"
|
|
||||||
default = "Standard_D2as_v5"
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "worker_disk_type" {
|
|
||||||
type = string
|
|
||||||
description = "Type of managed disk for worker nodes"
|
|
||||||
default = "Standard_LRS"
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "worker_disk_size" {
|
|
||||||
type = number
|
|
||||||
description = "Size of the managed disk in GB for worker nodes"
|
|
||||||
default = 30
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "worker_ephemeral_disk" {
|
|
||||||
type = bool
|
|
||||||
description = "Use ephemeral local disk instead of managed disk (requires vm_type with local storage)"
|
|
||||||
default = false
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "worker_priority" {
|
variable "worker_priority" {
|
||||||
type = string
|
type = string
|
||||||
description = "Set worker priority to Spot to use reduced cost surplus capacity, with the tradeoff that instances can be deallocated at any time."
|
description = "Set worker priority to Spot to use reduced cost surplus capacity, with the tradeoff that instances can be deallocated at any time."
|
||||||
@ -124,15 +100,10 @@ variable "networking" {
|
|||||||
default = "cilium"
|
default = "cilium"
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "network_cidr" {
|
variable "host_cidr" {
|
||||||
type = object({
|
type = string
|
||||||
ipv4 = list(string)
|
description = "CIDR IPv4 range to assign to instances"
|
||||||
ipv6 = optional(list(string), [])
|
default = "10.0.0.0/16"
|
||||||
})
|
|
||||||
description = "Virtual network CIDR ranges"
|
|
||||||
default = {
|
|
||||||
ipv4 = ["10.0.0.0/16"]
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "pod_cidr" {
|
variable "pod_cidr" {
|
||||||
@ -150,31 +121,32 @@ EOD
|
|||||||
default = "10.3.0.0/16"
|
default = "10.3.0.0/16"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
variable "enable_reporting" {
|
||||||
|
type = bool
|
||||||
|
description = "Enable usage or analytics reporting to upstreams (Calico)"
|
||||||
|
default = false
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "enable_aggregation" {
|
||||||
|
type = bool
|
||||||
|
description = "Enable the Kubernetes Aggregation Layer"
|
||||||
|
default = true
|
||||||
|
}
|
||||||
|
|
||||||
variable "worker_node_labels" {
|
variable "worker_node_labels" {
|
||||||
type = list(string)
|
type = list(string)
|
||||||
description = "List of initial worker node labels"
|
description = "List of initial worker node labels"
|
||||||
default = []
|
default = []
|
||||||
}
|
}
|
||||||
|
|
||||||
# advanced
|
variable "arch" {
|
||||||
|
|
||||||
variable "controller_arch" {
|
|
||||||
type = string
|
type = string
|
||||||
description = "Controller node(s) architecture (amd64 or arm64)"
|
description = "Container architecture (amd64 or arm64)"
|
||||||
default = "amd64"
|
default = "amd64"
|
||||||
validation {
|
|
||||||
condition = contains(["amd64", "arm64"], var.controller_arch)
|
|
||||||
error_message = "The controller_arch must be amd64 or arm64."
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "worker_arch" {
|
|
||||||
type = string
|
|
||||||
description = "Worker node(s) architecture (amd64 or arm64)"
|
|
||||||
default = "amd64"
|
|
||||||
validation {
|
validation {
|
||||||
condition = contains(["amd64", "arm64"], var.worker_arch)
|
condition = var.arch == "amd64" || var.arch == "arm64"
|
||||||
error_message = "The worker_arch must be amd64 or arm64."
|
error_message = "The arch must be amd64 or arm64."
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -184,6 +156,14 @@ variable "daemonset_tolerations" {
|
|||||||
default = []
|
default = []
|
||||||
}
|
}
|
||||||
|
|
||||||
|
# unofficial, undocumented, unsupported
|
||||||
|
|
||||||
|
variable "cluster_domain_suffix" {
|
||||||
|
type = string
|
||||||
|
description = "Queries for domains with the suffix will be answered by coredns. Default is cluster.local (e.g. foo.default.svc.cluster.local) "
|
||||||
|
default = "cluster.local"
|
||||||
|
}
|
||||||
|
|
||||||
variable "components" {
|
variable "components" {
|
||||||
description = "Configure pre-installed cluster components"
|
description = "Configure pre-installed cluster components"
|
||||||
# Component configs are passed through to terraform-render-bootstrap,
|
# Component configs are passed through to terraform-render-bootstrap,
|
||||||
|
@ -3,7 +3,7 @@
|
|||||||
terraform {
|
terraform {
|
||||||
required_version = ">= 0.13.0, < 2.0.0"
|
required_version = ">= 0.13.0, < 2.0.0"
|
||||||
required_providers {
|
required_providers {
|
||||||
azurerm = ">= 2.8"
|
azurerm = ">= 2.8, < 4.0"
|
||||||
null = ">= 2.1"
|
null = ">= 2.1"
|
||||||
ct = {
|
ct = {
|
||||||
source = "poseidon/ct"
|
source = "poseidon/ct"
|
||||||
|
@ -3,26 +3,24 @@ module "workers" {
|
|||||||
name = var.cluster_name
|
name = var.cluster_name
|
||||||
|
|
||||||
# Azure
|
# Azure
|
||||||
resource_group_name = azurerm_resource_group.cluster.name
|
resource_group_name = azurerm_resource_group.cluster.name
|
||||||
location = azurerm_resource_group.cluster.location
|
region = azurerm_resource_group.cluster.location
|
||||||
subnet_id = azurerm_subnet.worker.id
|
subnet_id = azurerm_subnet.worker.id
|
||||||
security_group_id = azurerm_network_security_group.worker.id
|
security_group_id = azurerm_network_security_group.worker.id
|
||||||
backend_address_pool_ids = local.backend_address_pool_ids
|
backend_address_pool_id = azurerm_lb_backend_address_pool.worker.id
|
||||||
|
|
||||||
worker_count = var.worker_count
|
worker_count = var.worker_count
|
||||||
vm_type = var.worker_type
|
vm_type = var.worker_type
|
||||||
os_image = var.os_image
|
os_image = var.os_image
|
||||||
disk_type = var.worker_disk_type
|
priority = var.worker_priority
|
||||||
disk_size = var.worker_disk_size
|
|
||||||
ephemeral_disk = var.worker_ephemeral_disk
|
|
||||||
priority = var.worker_priority
|
|
||||||
|
|
||||||
# configuration
|
# configuration
|
||||||
kubeconfig = module.bootstrap.kubeconfig-kubelet
|
kubeconfig = module.bootstrap.kubeconfig-kubelet
|
||||||
ssh_authorized_key = var.ssh_authorized_key
|
ssh_authorized_key = var.ssh_authorized_key
|
||||||
azure_authorized_key = var.azure_authorized_key
|
azure_authorized_key = var.azure_authorized_key
|
||||||
service_cidr = var.service_cidr
|
service_cidr = var.service_cidr
|
||||||
snippets = var.worker_snippets
|
cluster_domain_suffix = var.cluster_domain_suffix
|
||||||
node_labels = var.worker_node_labels
|
snippets = var.worker_snippets
|
||||||
arch = var.worker_arch
|
node_labels = var.worker_node_labels
|
||||||
|
arch = var.arch
|
||||||
}
|
}
|
||||||
|
@ -28,7 +28,7 @@ systemd:
|
|||||||
After=docker.service
|
After=docker.service
|
||||||
Wants=rpc-statd.service
|
Wants=rpc-statd.service
|
||||||
[Service]
|
[Service]
|
||||||
Environment=KUBELET_IMAGE=quay.io/poseidon/kubelet:v1.31.3
|
Environment=KUBELET_IMAGE=quay.io/poseidon/kubelet:v1.30.1
|
||||||
ExecStartPre=/bin/mkdir -p /etc/cni/net.d
|
ExecStartPre=/bin/mkdir -p /etc/cni/net.d
|
||||||
ExecStartPre=/bin/mkdir -p /etc/kubernetes/manifests
|
ExecStartPre=/bin/mkdir -p /etc/kubernetes/manifests
|
||||||
ExecStartPre=/bin/mkdir -p /opt/cni/bin
|
ExecStartPre=/bin/mkdir -p /opt/cni/bin
|
||||||
@ -99,7 +99,7 @@ storage:
|
|||||||
cgroupDriver: systemd
|
cgroupDriver: systemd
|
||||||
clusterDNS:
|
clusterDNS:
|
||||||
- ${cluster_dns_service_ip}
|
- ${cluster_dns_service_ip}
|
||||||
clusterDomain: cluster.local
|
clusterDomain: ${cluster_domain_suffix}
|
||||||
healthzPort: 0
|
healthzPort: 0
|
||||||
rotateCertificates: true
|
rotateCertificates: true
|
||||||
shutdownGracePeriod: 45s
|
shutdownGracePeriod: 45s
|
||||||
|
@ -5,9 +5,9 @@ variable "name" {
|
|||||||
|
|
||||||
# Azure
|
# Azure
|
||||||
|
|
||||||
variable "location" {
|
variable "region" {
|
||||||
type = string
|
type = string
|
||||||
description = "Must be set to the Azure location of cluster"
|
description = "Must be set to the Azure Region of cluster"
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "resource_group_name" {
|
variable "resource_group_name" {
|
||||||
@ -25,12 +25,9 @@ variable "security_group_id" {
|
|||||||
description = "Must be set to the `worker_security_group_id` output by cluster"
|
description = "Must be set to the `worker_security_group_id` output by cluster"
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "backend_address_pool_ids" {
|
variable "backend_address_pool_id" {
|
||||||
type = object({
|
type = string
|
||||||
ipv4 = list(string)
|
description = "Must be set to the `worker_backend_address_pool_id` output by cluster"
|
||||||
ipv6 = list(string)
|
|
||||||
})
|
|
||||||
description = "Must be set to the `backend_address_pool_ids` output by cluster"
|
|
||||||
}
|
}
|
||||||
|
|
||||||
# instances
|
# instances
|
||||||
@ -58,24 +55,6 @@ variable "os_image" {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "disk_type" {
|
|
||||||
type = string
|
|
||||||
description = "Type of managed disk"
|
|
||||||
default = "Standard_LRS"
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "disk_size" {
|
|
||||||
type = number
|
|
||||||
description = "Size of the managed disk in GB"
|
|
||||||
default = 30
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "ephemeral_disk" {
|
|
||||||
type = bool
|
|
||||||
description = "Use ephemeral local disk instead of managed disk (requires vm_type with local storage)"
|
|
||||||
default = false
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "priority" {
|
variable "priority" {
|
||||||
type = string
|
type = string
|
||||||
description = "Set priority to Spot to use reduced cost surplus capacity, with the tradeoff that instances can be evicted at any time."
|
description = "Set priority to Spot to use reduced cost surplus capacity, with the tradeoff that instances can be evicted at any time."
|
||||||
@ -137,3 +116,12 @@ variable "arch" {
|
|||||||
error_message = "The arch must be amd64 or arm64."
|
error_message = "The arch must be amd64 or arm64."
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
# unofficial, undocumented, unsupported
|
||||||
|
|
||||||
|
variable "cluster_domain_suffix" {
|
||||||
|
description = "Queries for domains with the suffix will be answered by coredns. Default is cluster.local (e.g. foo.default.svc.cluster.local) "
|
||||||
|
type = string
|
||||||
|
default = "cluster.local"
|
||||||
|
}
|
||||||
|
|
||||||
|
@ -3,7 +3,7 @@
|
|||||||
terraform {
|
terraform {
|
||||||
required_version = ">= 0.13.0, < 2.0.0"
|
required_version = ">= 0.13.0, < 2.0.0"
|
||||||
required_providers {
|
required_providers {
|
||||||
azurerm = ">= 2.8"
|
azurerm = ">= 2.8, < 4.0"
|
||||||
ct = {
|
ct = {
|
||||||
source = "poseidon/ct"
|
source = "poseidon/ct"
|
||||||
version = "~> 0.13"
|
version = "~> 0.13"
|
||||||
|
@ -8,28 +8,25 @@ locals {
|
|||||||
}
|
}
|
||||||
|
|
||||||
# Workers scale set
|
# Workers scale set
|
||||||
resource "azurerm_orchestrated_virtual_machine_scale_set" "workers" {
|
resource "azurerm_linux_virtual_machine_scale_set" "workers" {
|
||||||
name = "${var.name}-worker"
|
resource_group_name = var.resource_group_name
|
||||||
resource_group_name = var.resource_group_name
|
|
||||||
location = var.location
|
name = "${var.name}-worker"
|
||||||
platform_fault_domain_count = 1
|
location = var.region
|
||||||
sku_name = var.vm_type
|
sku = var.vm_type
|
||||||
instances = var.worker_count
|
instances = var.worker_count
|
||||||
|
# instance name prefix for instances in the set
|
||||||
|
computer_name_prefix = "${var.name}-worker"
|
||||||
|
single_placement_group = false
|
||||||
|
custom_data = base64encode(data.ct_config.worker.rendered)
|
||||||
|
boot_diagnostics {
|
||||||
|
# defaults to a managed storage account
|
||||||
|
}
|
||||||
|
|
||||||
# storage
|
# storage
|
||||||
encryption_at_host_enabled = true
|
|
||||||
os_disk {
|
os_disk {
|
||||||
storage_account_type = var.disk_type
|
storage_account_type = "Standard_LRS"
|
||||||
disk_size_gb = var.disk_size
|
caching = "ReadWrite"
|
||||||
caching = "ReadOnly"
|
|
||||||
# Optionally, use the ephemeral disk of the instance type (support varies)
|
|
||||||
dynamic "diff_disk_settings" {
|
|
||||||
for_each = var.ephemeral_disk ? [1] : []
|
|
||||||
content {
|
|
||||||
option = "Local"
|
|
||||||
placement = "ResourceDisk"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
# Flatcar Container Linux
|
# Flatcar Container Linux
|
||||||
@ -49,6 +46,13 @@ resource "azurerm_orchestrated_virtual_machine_scale_set" "workers" {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
# Azure requires setting admin_ssh_key, though Ignition custom_data handles it too
|
||||||
|
admin_username = "core"
|
||||||
|
admin_ssh_key {
|
||||||
|
username = "core"
|
||||||
|
public_key = local.azure_authorized_key
|
||||||
|
}
|
||||||
|
|
||||||
# network
|
# network
|
||||||
network_interface {
|
network_interface {
|
||||||
name = "nic0"
|
name = "nic0"
|
||||||
@ -56,41 +60,17 @@ resource "azurerm_orchestrated_virtual_machine_scale_set" "workers" {
|
|||||||
network_security_group_id = var.security_group_id
|
network_security_group_id = var.security_group_id
|
||||||
|
|
||||||
ip_configuration {
|
ip_configuration {
|
||||||
name = "ipv4"
|
name = "ip0"
|
||||||
version = "IPv4"
|
|
||||||
primary = true
|
primary = true
|
||||||
subnet_id = var.subnet_id
|
subnet_id = var.subnet_id
|
||||||
# backend address pool to which the NIC should be added
|
|
||||||
load_balancer_backend_address_pool_ids = var.backend_address_pool_ids.ipv4
|
|
||||||
}
|
|
||||||
ip_configuration {
|
|
||||||
name = "ipv6"
|
|
||||||
version = "IPv6"
|
|
||||||
subnet_id = var.subnet_id
|
|
||||||
# backend address pool to which the NIC should be added
|
|
||||||
load_balancer_backend_address_pool_ids = var.backend_address_pool_ids.ipv6
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
# boot
|
# backend address pool to which the NIC should be added
|
||||||
user_data_base64 = base64encode(data.ct_config.worker.rendered)
|
load_balancer_backend_address_pool_ids = [var.backend_address_pool_id]
|
||||||
boot_diagnostics {
|
|
||||||
# defaults to a managed storage account
|
|
||||||
}
|
|
||||||
|
|
||||||
# Azure requires an RSA admin_ssh_key
|
|
||||||
os_profile {
|
|
||||||
linux_configuration {
|
|
||||||
admin_username = "core"
|
|
||||||
admin_ssh_key {
|
|
||||||
username = "core"
|
|
||||||
public_key = local.azure_authorized_key
|
|
||||||
}
|
|
||||||
computer_name_prefix = "${var.name}-worker"
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
# lifecycle
|
# lifecycle
|
||||||
|
upgrade_mode = "Manual"
|
||||||
# eviction policy may only be set when priority is Spot
|
# eviction policy may only be set when priority is Spot
|
||||||
priority = var.priority
|
priority = var.priority
|
||||||
eviction_policy = var.priority == "Spot" ? "Delete" : null
|
eviction_policy = var.priority == "Spot" ? "Delete" : null
|
||||||
@ -99,12 +79,35 @@ resource "azurerm_orchestrated_virtual_machine_scale_set" "workers" {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
# Scale up or down to maintain desired number, tolerating deallocations.
|
||||||
|
resource "azurerm_monitor_autoscale_setting" "workers" {
|
||||||
|
resource_group_name = var.resource_group_name
|
||||||
|
|
||||||
|
name = "${var.name}-maintain-desired"
|
||||||
|
location = var.region
|
||||||
|
|
||||||
|
# autoscale
|
||||||
|
enabled = true
|
||||||
|
target_resource_id = azurerm_linux_virtual_machine_scale_set.workers.id
|
||||||
|
|
||||||
|
profile {
|
||||||
|
name = "default"
|
||||||
|
|
||||||
|
capacity {
|
||||||
|
minimum = var.worker_count
|
||||||
|
default = var.worker_count
|
||||||
|
maximum = var.worker_count
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
# Flatcar Linux worker
|
# Flatcar Linux worker
|
||||||
data "ct_config" "worker" {
|
data "ct_config" "worker" {
|
||||||
content = templatefile("${path.module}/butane/worker.yaml", {
|
content = templatefile("${path.module}/butane/worker.yaml", {
|
||||||
kubeconfig = indent(10, var.kubeconfig)
|
kubeconfig = indent(10, var.kubeconfig)
|
||||||
ssh_authorized_key = var.ssh_authorized_key
|
ssh_authorized_key = var.ssh_authorized_key
|
||||||
cluster_dns_service_ip = cidrhost(var.service_cidr, 10)
|
cluster_dns_service_ip = cidrhost(var.service_cidr, 10)
|
||||||
|
cluster_domain_suffix = var.cluster_domain_suffix
|
||||||
node_labels = join(",", var.node_labels)
|
node_labels = join(",", var.node_labels)
|
||||||
node_taints = join(",", var.node_taints)
|
node_taints = join(",", var.node_taints)
|
||||||
})
|
})
|
||||||
|
@ -11,7 +11,7 @@ Typhoon distributes upstream Kubernetes, architectural conventions, and cluster
|
|||||||
|
|
||||||
## Features <a href="https://www.cncf.io/certification/software-conformance/"><img align="right" src="https://storage.googleapis.com/poseidon/certified-kubernetes.png"></a>
|
## Features <a href="https://www.cncf.io/certification/software-conformance/"><img align="right" src="https://storage.googleapis.com/poseidon/certified-kubernetes.png"></a>
|
||||||
|
|
||||||
* Kubernetes v1.31.3 (upstream)
|
* Kubernetes v1.30.1 (upstream)
|
||||||
* Single or multi-master, [Calico](https://www.projectcalico.org/) or [Cilium](https://github.com/cilium/cilium) or [flannel](https://github.com/coreos/flannel) networking
|
* Single or multi-master, [Calico](https://www.projectcalico.org/) or [Cilium](https://github.com/cilium/cilium) or [flannel](https://github.com/coreos/flannel) networking
|
||||||
* On-cluster etcd with TLS, [RBAC](https://kubernetes.io/docs/admin/authorization/rbac/)-enabled, [network policy](https://kubernetes.io/docs/concepts/services-networking/network-policies/), SELinux enforcing
|
* On-cluster etcd with TLS, [RBAC](https://kubernetes.io/docs/admin/authorization/rbac/)-enabled, [network policy](https://kubernetes.io/docs/concepts/services-networking/network-policies/), SELinux enforcing
|
||||||
* Advanced features like [snippets](https://typhoon.psdn.io/advanced/customization/#hosts) customization
|
* Advanced features like [snippets](https://typhoon.psdn.io/advanced/customization/#hosts) customization
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
# Kubernetes assets (kubeconfig, manifests)
|
# Kubernetes assets (kubeconfig, manifests)
|
||||||
module "bootstrap" {
|
module "bootstrap" {
|
||||||
source = "git::https://github.com/poseidon/terraform-render-bootstrap.git?ref=e6a1c7bccfc45ab299b5f8149bc3840f99b30b2b"
|
source = "git::https://github.com/poseidon/terraform-render-bootstrap.git?ref=e1b1e0c75e77e042cf369f463f0e656297a201a8"
|
||||||
|
|
||||||
cluster_name = var.cluster_name
|
cluster_name = var.cluster_name
|
||||||
api_servers = [var.k8s_domain_name]
|
api_servers = [var.k8s_domain_name]
|
||||||
@ -10,6 +10,9 @@ module "bootstrap" {
|
|||||||
network_ip_autodetection_method = var.network_ip_autodetection_method
|
network_ip_autodetection_method = var.network_ip_autodetection_method
|
||||||
pod_cidr = var.pod_cidr
|
pod_cidr = var.pod_cidr
|
||||||
service_cidr = var.service_cidr
|
service_cidr = var.service_cidr
|
||||||
|
cluster_domain_suffix = var.cluster_domain_suffix
|
||||||
|
enable_reporting = var.enable_reporting
|
||||||
|
enable_aggregation = var.enable_aggregation
|
||||||
components = var.components
|
components = var.components
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -53,7 +53,7 @@ systemd:
|
|||||||
Description=Kubelet (System Container)
|
Description=Kubelet (System Container)
|
||||||
Wants=rpc-statd.service
|
Wants=rpc-statd.service
|
||||||
[Service]
|
[Service]
|
||||||
Environment=KUBELET_IMAGE=quay.io/poseidon/kubelet:v1.31.3
|
Environment=KUBELET_IMAGE=quay.io/poseidon/kubelet:v1.30.1
|
||||||
ExecStartPre=/bin/mkdir -p /etc/cni/net.d
|
ExecStartPre=/bin/mkdir -p /etc/cni/net.d
|
||||||
ExecStartPre=/bin/mkdir -p /etc/kubernetes/manifests
|
ExecStartPre=/bin/mkdir -p /etc/kubernetes/manifests
|
||||||
ExecStartPre=/bin/mkdir -p /opt/cni/bin
|
ExecStartPre=/bin/mkdir -p /opt/cni/bin
|
||||||
@ -113,7 +113,7 @@ systemd:
|
|||||||
Type=oneshot
|
Type=oneshot
|
||||||
RemainAfterExit=true
|
RemainAfterExit=true
|
||||||
WorkingDirectory=/opt/bootstrap
|
WorkingDirectory=/opt/bootstrap
|
||||||
Environment=KUBELET_IMAGE=quay.io/poseidon/kubelet:v1.31.3
|
Environment=KUBELET_IMAGE=quay.io/poseidon/kubelet:v1.30.1
|
||||||
ExecStartPre=-/usr/bin/podman rm bootstrap
|
ExecStartPre=-/usr/bin/podman rm bootstrap
|
||||||
ExecStart=/usr/bin/podman run --name bootstrap \
|
ExecStart=/usr/bin/podman run --name bootstrap \
|
||||||
--network host \
|
--network host \
|
||||||
@ -154,7 +154,7 @@ storage:
|
|||||||
cgroupDriver: systemd
|
cgroupDriver: systemd
|
||||||
clusterDNS:
|
clusterDNS:
|
||||||
- ${cluster_dns_service_ip}
|
- ${cluster_dns_service_ip}
|
||||||
clusterDomain: cluster.local
|
clusterDomain: ${cluster_domain_suffix}
|
||||||
healthzPort: 0
|
healthzPort: 0
|
||||||
rotateCertificates: true
|
rotateCertificates: true
|
||||||
shutdownGracePeriod: 45s
|
shutdownGracePeriod: 45s
|
||||||
|
@ -59,6 +59,7 @@ data "ct_config" "controllers" {
|
|||||||
etcd_name = var.controllers.*.name[count.index]
|
etcd_name = var.controllers.*.name[count.index]
|
||||||
etcd_initial_cluster = join(",", formatlist("%s=https://%s:2380", var.controllers.*.name, var.controllers.*.domain))
|
etcd_initial_cluster = join(",", formatlist("%s=https://%s:2380", var.controllers.*.name, var.controllers.*.domain))
|
||||||
cluster_dns_service_ip = module.bootstrap.cluster_dns_service_ip
|
cluster_dns_service_ip = module.bootstrap.cluster_dns_service_ip
|
||||||
|
cluster_domain_suffix = var.cluster_domain_suffix
|
||||||
ssh_authorized_key = var.ssh_authorized_key
|
ssh_authorized_key = var.ssh_authorized_key
|
||||||
})
|
})
|
||||||
strict = true
|
strict = true
|
||||||
|
@ -139,7 +139,25 @@ variable "kernel_args" {
|
|||||||
default = []
|
default = []
|
||||||
}
|
}
|
||||||
|
|
||||||
# advanced
|
variable "enable_reporting" {
|
||||||
|
type = bool
|
||||||
|
description = "Enable usage or analytics reporting to upstreams (Calico)"
|
||||||
|
default = false
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "enable_aggregation" {
|
||||||
|
type = bool
|
||||||
|
description = "Enable the Kubernetes Aggregation Layer"
|
||||||
|
default = true
|
||||||
|
}
|
||||||
|
|
||||||
|
# unofficial, undocumented, unsupported
|
||||||
|
|
||||||
|
variable "cluster_domain_suffix" {
|
||||||
|
description = "Queries for domains with the suffix will be answered by coredns. Default is cluster.local (e.g. foo.default.svc.cluster.local) "
|
||||||
|
type = string
|
||||||
|
default = "cluster.local"
|
||||||
|
}
|
||||||
|
|
||||||
variable "components" {
|
variable "components" {
|
||||||
description = "Configure pre-installed cluster components"
|
description = "Configure pre-installed cluster components"
|
||||||
|
@ -25,7 +25,7 @@ systemd:
|
|||||||
Description=Kubelet (System Container)
|
Description=Kubelet (System Container)
|
||||||
Wants=rpc-statd.service
|
Wants=rpc-statd.service
|
||||||
[Service]
|
[Service]
|
||||||
Environment=KUBELET_IMAGE=quay.io/poseidon/kubelet:v1.31.3
|
Environment=KUBELET_IMAGE=quay.io/poseidon/kubelet:v1.30.1
|
||||||
ExecStartPre=/bin/mkdir -p /etc/cni/net.d
|
ExecStartPre=/bin/mkdir -p /etc/cni/net.d
|
||||||
ExecStartPre=/bin/mkdir -p /etc/kubernetes/manifests
|
ExecStartPre=/bin/mkdir -p /etc/kubernetes/manifests
|
||||||
ExecStartPre=/bin/mkdir -p /opt/cni/bin
|
ExecStartPre=/bin/mkdir -p /opt/cni/bin
|
||||||
@ -108,7 +108,7 @@ storage:
|
|||||||
cgroupDriver: systemd
|
cgroupDriver: systemd
|
||||||
clusterDNS:
|
clusterDNS:
|
||||||
- ${cluster_dns_service_ip}
|
- ${cluster_dns_service_ip}
|
||||||
clusterDomain: cluster.local
|
clusterDomain: ${cluster_domain_suffix}
|
||||||
healthzPort: 0
|
healthzPort: 0
|
||||||
rotateCertificates: true
|
rotateCertificates: true
|
||||||
shutdownGracePeriod: 45s
|
shutdownGracePeriod: 45s
|
||||||
|
@ -53,6 +53,7 @@ data "ct_config" "worker" {
|
|||||||
domain_name = var.domain
|
domain_name = var.domain
|
||||||
ssh_authorized_key = var.ssh_authorized_key
|
ssh_authorized_key = var.ssh_authorized_key
|
||||||
cluster_dns_service_ip = cidrhost(var.service_cidr, 10)
|
cluster_dns_service_ip = cidrhost(var.service_cidr, 10)
|
||||||
|
cluster_domain_suffix = var.cluster_domain_suffix
|
||||||
node_labels = join(",", var.node_labels)
|
node_labels = join(",", var.node_labels)
|
||||||
node_taints = join(",", var.node_taints)
|
node_taints = join(",", var.node_taints)
|
||||||
})
|
})
|
||||||
|
@ -103,3 +103,9 @@ The 1st IP will be reserved for kube_apiserver, the 10th IP will be reserved for
|
|||||||
EOD
|
EOD
|
||||||
default = "10.3.0.0/16"
|
default = "10.3.0.0/16"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
variable "cluster_domain_suffix" {
|
||||||
|
description = "Queries for domains with the suffix will be answered by coredns. Default is cluster.local (e.g. foo.default.svc.cluster.local) "
|
||||||
|
type = string
|
||||||
|
default = "cluster.local"
|
||||||
|
}
|
||||||
|
@ -15,12 +15,13 @@ module "workers" {
|
|||||||
domain = var.workers[count.index].domain
|
domain = var.workers[count.index].domain
|
||||||
|
|
||||||
# configuration
|
# configuration
|
||||||
kubeconfig = module.bootstrap.kubeconfig-kubelet
|
kubeconfig = module.bootstrap.kubeconfig-kubelet
|
||||||
ssh_authorized_key = var.ssh_authorized_key
|
ssh_authorized_key = var.ssh_authorized_key
|
||||||
service_cidr = var.service_cidr
|
service_cidr = var.service_cidr
|
||||||
node_labels = lookup(var.worker_node_labels, var.workers[count.index].name, [])
|
cluster_domain_suffix = var.cluster_domain_suffix
|
||||||
node_taints = lookup(var.worker_node_taints, var.workers[count.index].name, [])
|
node_labels = lookup(var.worker_node_labels, var.workers[count.index].name, [])
|
||||||
snippets = lookup(var.snippets, var.workers[count.index].name, [])
|
node_taints = lookup(var.worker_node_taints, var.workers[count.index].name, [])
|
||||||
|
snippets = lookup(var.snippets, var.workers[count.index].name, [])
|
||||||
|
|
||||||
# optional
|
# optional
|
||||||
cached_install = var.cached_install
|
cached_install = var.cached_install
|
||||||
|
@ -11,7 +11,7 @@ Typhoon distributes upstream Kubernetes, architectural conventions, and cluster
|
|||||||
|
|
||||||
## Features <a href="https://www.cncf.io/certification/software-conformance/"><img align="right" src="https://storage.googleapis.com/poseidon/certified-kubernetes.png"></a>
|
## Features <a href="https://www.cncf.io/certification/software-conformance/"><img align="right" src="https://storage.googleapis.com/poseidon/certified-kubernetes.png"></a>
|
||||||
|
|
||||||
* Kubernetes v1.31.3 (upstream)
|
* Kubernetes v1.30.1 (upstream)
|
||||||
* Single or multi-master, [Calico](https://www.projectcalico.org/) or [Cilium](https://github.com/cilium/cilium) or [flannel](https://github.com/coreos/flannel) networking
|
* Single or multi-master, [Calico](https://www.projectcalico.org/) or [Cilium](https://github.com/cilium/cilium) or [flannel](https://github.com/coreos/flannel) networking
|
||||||
* On-cluster etcd with TLS, [RBAC](https://kubernetes.io/docs/admin/authorization/rbac/)-enabled, [network policy](https://kubernetes.io/docs/concepts/services-networking/network-policies/)
|
* On-cluster etcd with TLS, [RBAC](https://kubernetes.io/docs/admin/authorization/rbac/)-enabled, [network policy](https://kubernetes.io/docs/concepts/services-networking/network-policies/)
|
||||||
* Advanced features like [snippets](https://typhoon.psdn.io/advanced/customization/#hosts) customization
|
* Advanced features like [snippets](https://typhoon.psdn.io/advanced/customization/#hosts) customization
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
# Kubernetes assets (kubeconfig, manifests)
|
# Kubernetes assets (kubeconfig, manifests)
|
||||||
module "bootstrap" {
|
module "bootstrap" {
|
||||||
source = "git::https://github.com/poseidon/terraform-render-bootstrap.git?ref=e6a1c7bccfc45ab299b5f8149bc3840f99b30b2b"
|
source = "git::https://github.com/poseidon/terraform-render-bootstrap.git?ref=e1b1e0c75e77e042cf369f463f0e656297a201a8"
|
||||||
|
|
||||||
cluster_name = var.cluster_name
|
cluster_name = var.cluster_name
|
||||||
api_servers = [var.k8s_domain_name]
|
api_servers = [var.k8s_domain_name]
|
||||||
@ -10,6 +10,9 @@ module "bootstrap" {
|
|||||||
network_ip_autodetection_method = var.network_ip_autodetection_method
|
network_ip_autodetection_method = var.network_ip_autodetection_method
|
||||||
pod_cidr = var.pod_cidr
|
pod_cidr = var.pod_cidr
|
||||||
service_cidr = var.service_cidr
|
service_cidr = var.service_cidr
|
||||||
|
cluster_domain_suffix = var.cluster_domain_suffix
|
||||||
|
enable_reporting = var.enable_reporting
|
||||||
|
enable_aggregation = var.enable_aggregation
|
||||||
components = var.components
|
components = var.components
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -64,7 +64,7 @@ systemd:
|
|||||||
After=docker.service
|
After=docker.service
|
||||||
Wants=rpc-statd.service
|
Wants=rpc-statd.service
|
||||||
[Service]
|
[Service]
|
||||||
Environment=KUBELET_IMAGE=quay.io/poseidon/kubelet:v1.31.3
|
Environment=KUBELET_IMAGE=quay.io/poseidon/kubelet:v1.30.1
|
||||||
ExecStartPre=/bin/mkdir -p /etc/cni/net.d
|
ExecStartPre=/bin/mkdir -p /etc/cni/net.d
|
||||||
ExecStartPre=/bin/mkdir -p /etc/kubernetes/manifests
|
ExecStartPre=/bin/mkdir -p /etc/kubernetes/manifests
|
||||||
ExecStartPre=/bin/mkdir -p /opt/cni/bin
|
ExecStartPre=/bin/mkdir -p /opt/cni/bin
|
||||||
@ -114,7 +114,7 @@ systemd:
|
|||||||
Type=oneshot
|
Type=oneshot
|
||||||
RemainAfterExit=true
|
RemainAfterExit=true
|
||||||
WorkingDirectory=/opt/bootstrap
|
WorkingDirectory=/opt/bootstrap
|
||||||
Environment=KUBELET_IMAGE=quay.io/poseidon/kubelet:v1.31.3
|
Environment=KUBELET_IMAGE=quay.io/poseidon/kubelet:v1.30.1
|
||||||
ExecStart=/usr/bin/docker run \
|
ExecStart=/usr/bin/docker run \
|
||||||
-v /etc/kubernetes/pki:/etc/kubernetes/pki:ro \
|
-v /etc/kubernetes/pki:/etc/kubernetes/pki:ro \
|
||||||
-v /opt/bootstrap/assets:/assets:ro \
|
-v /opt/bootstrap/assets:/assets:ro \
|
||||||
@ -155,7 +155,7 @@ storage:
|
|||||||
cgroupDriver: systemd
|
cgroupDriver: systemd
|
||||||
clusterDNS:
|
clusterDNS:
|
||||||
- ${cluster_dns_service_ip}
|
- ${cluster_dns_service_ip}
|
||||||
clusterDomain: cluster.local
|
clusterDomain: ${cluster_domain_suffix}
|
||||||
healthzPort: 0
|
healthzPort: 0
|
||||||
rotateCertificates: true
|
rotateCertificates: true
|
||||||
shutdownGracePeriod: 45s
|
shutdownGracePeriod: 45s
|
||||||
|
@ -60,7 +60,6 @@ data "ct_config" "install" {
|
|||||||
baseurl_flag = var.cached_install ? "-b ${var.matchbox_http_endpoint}/assets/flatcar" : ""
|
baseurl_flag = var.cached_install ? "-b ${var.matchbox_http_endpoint}/assets/flatcar" : ""
|
||||||
})
|
})
|
||||||
strict = true
|
strict = true
|
||||||
snippets = lookup(var.install_snippets, var.controllers.*.name[count.index], [])
|
|
||||||
}
|
}
|
||||||
|
|
||||||
# Match each controller by MAC
|
# Match each controller by MAC
|
||||||
@ -89,6 +88,7 @@ data "ct_config" "controllers" {
|
|||||||
etcd_name = var.controllers.*.name[count.index]
|
etcd_name = var.controllers.*.name[count.index]
|
||||||
etcd_initial_cluster = join(",", formatlist("%s=https://%s:2380", var.controllers.*.name, var.controllers.*.domain))
|
etcd_initial_cluster = join(",", formatlist("%s=https://%s:2380", var.controllers.*.name, var.controllers.*.domain))
|
||||||
cluster_dns_service_ip = module.bootstrap.cluster_dns_service_ip
|
cluster_dns_service_ip = module.bootstrap.cluster_dns_service_ip
|
||||||
|
cluster_domain_suffix = var.cluster_domain_suffix
|
||||||
ssh_authorized_key = var.ssh_authorized_key
|
ssh_authorized_key = var.ssh_authorized_key
|
||||||
})
|
})
|
||||||
strict = true
|
strict = true
|
||||||
|
@ -61,12 +61,6 @@ variable "snippets" {
|
|||||||
default = {}
|
default = {}
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "install_snippets" {
|
|
||||||
type = map(list(string))
|
|
||||||
description = "Map from machine names to lists of Container Linux Config snippets to run during install phase"
|
|
||||||
default = {}
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "worker_node_labels" {
|
variable "worker_node_labels" {
|
||||||
type = map(list(string))
|
type = map(list(string))
|
||||||
description = "Map from worker names to lists of initial node labels"
|
description = "Map from worker names to lists of initial node labels"
|
||||||
@ -150,6 +144,18 @@ variable "kernel_args" {
|
|||||||
default = []
|
default = []
|
||||||
}
|
}
|
||||||
|
|
||||||
|
variable "enable_reporting" {
|
||||||
|
type = bool
|
||||||
|
description = "Enable usage or analytics reporting to upstreams (Calico)"
|
||||||
|
default = false
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "enable_aggregation" {
|
||||||
|
type = bool
|
||||||
|
description = "Enable the Kubernetes Aggregation Layer"
|
||||||
|
default = true
|
||||||
|
}
|
||||||
|
|
||||||
variable "oem_type" {
|
variable "oem_type" {
|
||||||
type = string
|
type = string
|
||||||
description = <<EOD
|
description = <<EOD
|
||||||
@ -161,7 +167,13 @@ EOD
|
|||||||
default = ""
|
default = ""
|
||||||
}
|
}
|
||||||
|
|
||||||
# advanced
|
# unofficial, undocumented, unsupported
|
||||||
|
|
||||||
|
variable "cluster_domain_suffix" {
|
||||||
|
type = string
|
||||||
|
description = "Queries for domains with the suffix will be answered by coredns. Default is cluster.local (e.g. foo.default.svc.cluster.local) "
|
||||||
|
default = "cluster.local"
|
||||||
|
}
|
||||||
|
|
||||||
variable "components" {
|
variable "components" {
|
||||||
description = "Configure pre-installed cluster components"
|
description = "Configure pre-installed cluster components"
|
||||||
|
@ -36,7 +36,7 @@ systemd:
|
|||||||
After=docker.service
|
After=docker.service
|
||||||
Wants=rpc-statd.service
|
Wants=rpc-statd.service
|
||||||
[Service]
|
[Service]
|
||||||
Environment=KUBELET_IMAGE=quay.io/poseidon/kubelet:v1.31.3
|
Environment=KUBELET_IMAGE=quay.io/poseidon/kubelet:v1.30.1
|
||||||
ExecStartPre=/bin/mkdir -p /etc/cni/net.d
|
ExecStartPre=/bin/mkdir -p /etc/cni/net.d
|
||||||
ExecStartPre=/bin/mkdir -p /etc/kubernetes/manifests
|
ExecStartPre=/bin/mkdir -p /etc/kubernetes/manifests
|
||||||
ExecStartPre=/bin/mkdir -p /opt/cni/bin
|
ExecStartPre=/bin/mkdir -p /opt/cni/bin
|
||||||
@ -113,7 +113,7 @@ storage:
|
|||||||
cgroupDriver: systemd
|
cgroupDriver: systemd
|
||||||
clusterDNS:
|
clusterDNS:
|
||||||
- ${cluster_dns_service_ip}
|
- ${cluster_dns_service_ip}
|
||||||
clusterDomain: cluster.local
|
clusterDomain: ${cluster_domain_suffix}
|
||||||
healthzPort: 0
|
healthzPort: 0
|
||||||
rotateCertificates: true
|
rotateCertificates: true
|
||||||
shutdownGracePeriod: 45s
|
shutdownGracePeriod: 45s
|
||||||
|
@ -55,7 +55,6 @@ data "ct_config" "install" {
|
|||||||
baseurl_flag = var.cached_install ? "-b ${var.matchbox_http_endpoint}/assets/flatcar" : ""
|
baseurl_flag = var.cached_install ? "-b ${var.matchbox_http_endpoint}/assets/flatcar" : ""
|
||||||
})
|
})
|
||||||
strict = true
|
strict = true
|
||||||
snippets = var.install_snippets
|
|
||||||
}
|
}
|
||||||
|
|
||||||
# Match a worker to a profile by MAC
|
# Match a worker to a profile by MAC
|
||||||
@ -80,6 +79,7 @@ data "ct_config" "worker" {
|
|||||||
domain_name = var.domain
|
domain_name = var.domain
|
||||||
ssh_authorized_key = var.ssh_authorized_key
|
ssh_authorized_key = var.ssh_authorized_key
|
||||||
cluster_dns_service_ip = cidrhost(var.service_cidr, 10)
|
cluster_dns_service_ip = cidrhost(var.service_cidr, 10)
|
||||||
|
cluster_domain_suffix = var.cluster_domain_suffix
|
||||||
node_labels = join(",", var.node_labels)
|
node_labels = join(",", var.node_labels)
|
||||||
node_taints = join(",", var.node_taints)
|
node_taints = join(",", var.node_taints)
|
||||||
})
|
})
|
||||||
|
@ -60,12 +60,6 @@ variable "snippets" {
|
|||||||
default = []
|
default = []
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "install_snippets" {
|
|
||||||
type = list(string)
|
|
||||||
description = "List of Butane snippets to run with the install command"
|
|
||||||
default = []
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "node_labels" {
|
variable "node_labels" {
|
||||||
type = list(string)
|
type = list(string)
|
||||||
description = "List of initial node labels"
|
description = "List of initial node labels"
|
||||||
@ -120,3 +114,13 @@ The 1st IP will be reserved for kube_apiserver, the 10th IP will be reserved for
|
|||||||
EOD
|
EOD
|
||||||
default = "10.3.0.0/16"
|
default = "10.3.0.0/16"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
variable "cluster_domain_suffix" {
|
||||||
|
type = string
|
||||||
|
description = "Queries for domains with the suffix will be answered by coredns. Default is cluster.local (e.g. foo.default.svc.cluster.local) "
|
||||||
|
default = "cluster.local"
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -15,13 +15,13 @@ module "workers" {
|
|||||||
domain = var.workers[count.index].domain
|
domain = var.workers[count.index].domain
|
||||||
|
|
||||||
# configuration
|
# configuration
|
||||||
kubeconfig = module.bootstrap.kubeconfig-kubelet
|
kubeconfig = module.bootstrap.kubeconfig-kubelet
|
||||||
ssh_authorized_key = var.ssh_authorized_key
|
ssh_authorized_key = var.ssh_authorized_key
|
||||||
service_cidr = var.service_cidr
|
service_cidr = var.service_cidr
|
||||||
node_labels = lookup(var.worker_node_labels, var.workers[count.index].name, [])
|
cluster_domain_suffix = var.cluster_domain_suffix
|
||||||
node_taints = lookup(var.worker_node_taints, var.workers[count.index].name, [])
|
node_labels = lookup(var.worker_node_labels, var.workers[count.index].name, [])
|
||||||
snippets = lookup(var.snippets, var.workers[count.index].name, [])
|
node_taints = lookup(var.worker_node_taints, var.workers[count.index].name, [])
|
||||||
install_snippets = lookup(var.install_snippets, var.workers[count.index].name, [])
|
snippets = lookup(var.snippets, var.workers[count.index].name, [])
|
||||||
|
|
||||||
# optional
|
# optional
|
||||||
download_protocol = var.download_protocol
|
download_protocol = var.download_protocol
|
||||||
|
@ -11,7 +11,7 @@ Typhoon distributes upstream Kubernetes, architectural conventions, and cluster
|
|||||||
|
|
||||||
## Features <a href="https://www.cncf.io/certification/software-conformance/"><img align="right" src="https://storage.googleapis.com/poseidon/certified-kubernetes.png"></a>
|
## Features <a href="https://www.cncf.io/certification/software-conformance/"><img align="right" src="https://storage.googleapis.com/poseidon/certified-kubernetes.png"></a>
|
||||||
|
|
||||||
* Kubernetes v1.31.3 (upstream)
|
* Kubernetes v1.30.1 (upstream)
|
||||||
* Single or multi-master, [Calico](https://www.projectcalico.org/) or [flannel](https://github.com/coreos/flannel) networking
|
* Single or multi-master, [Calico](https://www.projectcalico.org/) or [flannel](https://github.com/coreos/flannel) networking
|
||||||
* On-cluster etcd with TLS, [RBAC](https://kubernetes.io/docs/admin/authorization/rbac/)-enabled, [network policy](https://kubernetes.io/docs/concepts/services-networking/network-policies/), SELinux enforcing
|
* On-cluster etcd with TLS, [RBAC](https://kubernetes.io/docs/admin/authorization/rbac/)-enabled, [network policy](https://kubernetes.io/docs/concepts/services-networking/network-policies/), SELinux enforcing
|
||||||
* Advanced features like [snippets](https://typhoon.psdn.io/advanced/customization/#hosts) customization
|
* Advanced features like [snippets](https://typhoon.psdn.io/advanced/customization/#hosts) customization
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
# Kubernetes assets (kubeconfig, manifests)
|
# Kubernetes assets (kubeconfig, manifests)
|
||||||
module "bootstrap" {
|
module "bootstrap" {
|
||||||
source = "git::https://github.com/poseidon/terraform-render-bootstrap.git?ref=e6a1c7bccfc45ab299b5f8149bc3840f99b30b2b"
|
source = "git::https://github.com/poseidon/terraform-render-bootstrap.git?ref=e1b1e0c75e77e042cf369f463f0e656297a201a8"
|
||||||
|
|
||||||
cluster_name = var.cluster_name
|
cluster_name = var.cluster_name
|
||||||
api_servers = [format("%s.%s", var.cluster_name, var.dns_zone)]
|
api_servers = [format("%s.%s", var.cluster_name, var.dns_zone)]
|
||||||
@ -11,8 +11,11 @@ module "bootstrap" {
|
|||||||
network_encapsulation = "vxlan"
|
network_encapsulation = "vxlan"
|
||||||
network_mtu = "1450"
|
network_mtu = "1450"
|
||||||
|
|
||||||
pod_cidr = var.pod_cidr
|
pod_cidr = var.pod_cidr
|
||||||
service_cidr = var.service_cidr
|
service_cidr = var.service_cidr
|
||||||
components = var.components
|
cluster_domain_suffix = var.cluster_domain_suffix
|
||||||
|
enable_reporting = var.enable_reporting
|
||||||
|
enable_aggregation = var.enable_aggregation
|
||||||
|
components = var.components
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -55,7 +55,7 @@ systemd:
|
|||||||
After=afterburn.service
|
After=afterburn.service
|
||||||
Wants=rpc-statd.service
|
Wants=rpc-statd.service
|
||||||
[Service]
|
[Service]
|
||||||
Environment=KUBELET_IMAGE=quay.io/poseidon/kubelet:v1.31.3
|
Environment=KUBELET_IMAGE=quay.io/poseidon/kubelet:v1.30.1
|
||||||
EnvironmentFile=/run/metadata/afterburn
|
EnvironmentFile=/run/metadata/afterburn
|
||||||
ExecStartPre=/bin/mkdir -p /etc/cni/net.d
|
ExecStartPre=/bin/mkdir -p /etc/cni/net.d
|
||||||
ExecStartPre=/bin/mkdir -p /etc/kubernetes/manifests
|
ExecStartPre=/bin/mkdir -p /etc/kubernetes/manifests
|
||||||
@ -123,7 +123,7 @@ systemd:
|
|||||||
--volume /opt/bootstrap/assets:/assets:ro,Z \
|
--volume /opt/bootstrap/assets:/assets:ro,Z \
|
||||||
--volume /opt/bootstrap/apply:/apply:ro,Z \
|
--volume /opt/bootstrap/apply:/apply:ro,Z \
|
||||||
--entrypoint=/apply \
|
--entrypoint=/apply \
|
||||||
quay.io/poseidon/kubelet:v1.31.3
|
quay.io/poseidon/kubelet:v1.30.1
|
||||||
ExecStartPost=/bin/touch /opt/bootstrap/bootstrap.done
|
ExecStartPost=/bin/touch /opt/bootstrap/bootstrap.done
|
||||||
ExecStartPost=-/usr/bin/podman stop bootstrap
|
ExecStartPost=-/usr/bin/podman stop bootstrap
|
||||||
storage:
|
storage:
|
||||||
@ -151,7 +151,7 @@ storage:
|
|||||||
cgroupDriver: systemd
|
cgroupDriver: systemd
|
||||||
clusterDNS:
|
clusterDNS:
|
||||||
- ${cluster_dns_service_ip}
|
- ${cluster_dns_service_ip}
|
||||||
clusterDomain: cluster.local
|
clusterDomain: ${cluster_domain_suffix}
|
||||||
healthzPort: 0
|
healthzPort: 0
|
||||||
rotateCertificates: true
|
rotateCertificates: true
|
||||||
shutdownGracePeriod: 45s
|
shutdownGracePeriod: 45s
|
||||||
|
@ -28,7 +28,7 @@ systemd:
|
|||||||
After=afterburn.service
|
After=afterburn.service
|
||||||
Wants=rpc-statd.service
|
Wants=rpc-statd.service
|
||||||
[Service]
|
[Service]
|
||||||
Environment=KUBELET_IMAGE=quay.io/poseidon/kubelet:v1.31.3
|
Environment=KUBELET_IMAGE=quay.io/poseidon/kubelet:v1.30.1
|
||||||
EnvironmentFile=/run/metadata/afterburn
|
EnvironmentFile=/run/metadata/afterburn
|
||||||
ExecStartPre=/bin/mkdir -p /etc/cni/net.d
|
ExecStartPre=/bin/mkdir -p /etc/cni/net.d
|
||||||
ExecStartPre=/bin/mkdir -p /etc/kubernetes/manifests
|
ExecStartPre=/bin/mkdir -p /etc/kubernetes/manifests
|
||||||
@ -104,7 +104,7 @@ storage:
|
|||||||
cgroupDriver: systemd
|
cgroupDriver: systemd
|
||||||
clusterDNS:
|
clusterDNS:
|
||||||
- ${cluster_dns_service_ip}
|
- ${cluster_dns_service_ip}
|
||||||
clusterDomain: cluster.local
|
clusterDomain: ${cluster_domain_suffix}
|
||||||
healthzPort: 0
|
healthzPort: 0
|
||||||
rotateCertificates: true
|
rotateCertificates: true
|
||||||
shutdownGracePeriod: 45s
|
shutdownGracePeriod: 45s
|
||||||
|
@ -74,6 +74,7 @@ data "ct_config" "controllers" {
|
|||||||
for i in range(var.controller_count) : "etcd${i}=https://${var.cluster_name}-etcd${i}.${var.dns_zone}:2380"
|
for i in range(var.controller_count) : "etcd${i}=https://${var.cluster_name}-etcd${i}.${var.dns_zone}:2380"
|
||||||
])
|
])
|
||||||
cluster_dns_service_ip = cidrhost(var.service_cidr, 10)
|
cluster_dns_service_ip = cidrhost(var.service_cidr, 10)
|
||||||
|
cluster_domain_suffix = var.cluster_domain_suffix
|
||||||
})
|
})
|
||||||
strict = true
|
strict = true
|
||||||
snippets = var.controller_snippets
|
snippets = var.controller_snippets
|
||||||
|
@ -86,7 +86,25 @@ EOD
|
|||||||
default = "10.3.0.0/16"
|
default = "10.3.0.0/16"
|
||||||
}
|
}
|
||||||
|
|
||||||
# advanced
|
variable "enable_reporting" {
|
||||||
|
type = bool
|
||||||
|
description = "Enable usage or analytics reporting to upstreams (Calico)"
|
||||||
|
default = false
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "enable_aggregation" {
|
||||||
|
type = bool
|
||||||
|
description = "Enable the Kubernetes Aggregation Layer"
|
||||||
|
default = true
|
||||||
|
}
|
||||||
|
|
||||||
|
# unofficial, undocumented, unsupported
|
||||||
|
|
||||||
|
variable "cluster_domain_suffix" {
|
||||||
|
type = string
|
||||||
|
description = "Queries for domains with the suffix will be answered by coredns. Default is cluster.local (e.g. foo.default.svc.cluster.local) "
|
||||||
|
default = "cluster.local"
|
||||||
|
}
|
||||||
|
|
||||||
variable "components" {
|
variable "components" {
|
||||||
description = "Configure pre-installed cluster components"
|
description = "Configure pre-installed cluster components"
|
||||||
|
@ -62,6 +62,7 @@ resource "digitalocean_tag" "workers" {
|
|||||||
data "ct_config" "worker" {
|
data "ct_config" "worker" {
|
||||||
content = templatefile("${path.module}/butane/worker.yaml", {
|
content = templatefile("${path.module}/butane/worker.yaml", {
|
||||||
cluster_dns_service_ip = cidrhost(var.service_cidr, 10)
|
cluster_dns_service_ip = cidrhost(var.service_cidr, 10)
|
||||||
|
cluster_domain_suffix = var.cluster_domain_suffix
|
||||||
})
|
})
|
||||||
strict = true
|
strict = true
|
||||||
snippets = var.worker_snippets
|
snippets = var.worker_snippets
|
||||||
|
@ -11,7 +11,7 @@ Typhoon distributes upstream Kubernetes, architectural conventions, and cluster
|
|||||||
|
|
||||||
## Features <a href="https://www.cncf.io/certification/software-conformance/"><img align="right" src="https://storage.googleapis.com/poseidon/certified-kubernetes.png"></a>
|
## Features <a href="https://www.cncf.io/certification/software-conformance/"><img align="right" src="https://storage.googleapis.com/poseidon/certified-kubernetes.png"></a>
|
||||||
|
|
||||||
* Kubernetes v1.31.3 (upstream)
|
* Kubernetes v1.30.1 (upstream)
|
||||||
* Single or multi-master, [Calico](https://www.projectcalico.org/) or [Cilium](https://github.com/cilium/cilium) or [flannel](https://github.com/coreos/flannel) networking
|
* Single or multi-master, [Calico](https://www.projectcalico.org/) or [Cilium](https://github.com/cilium/cilium) or [flannel](https://github.com/coreos/flannel) networking
|
||||||
* On-cluster etcd with TLS, [RBAC](https://kubernetes.io/docs/admin/authorization/rbac/)-enabled, [network policy](https://kubernetes.io/docs/concepts/services-networking/network-policies/)
|
* On-cluster etcd with TLS, [RBAC](https://kubernetes.io/docs/admin/authorization/rbac/)-enabled, [network policy](https://kubernetes.io/docs/concepts/services-networking/network-policies/)
|
||||||
* Advanced features like [snippets](https://typhoon.psdn.io/advanced/customization/#hosts) customization
|
* Advanced features like [snippets](https://typhoon.psdn.io/advanced/customization/#hosts) customization
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
# Kubernetes assets (kubeconfig, manifests)
|
# Kubernetes assets (kubeconfig, manifests)
|
||||||
module "bootstrap" {
|
module "bootstrap" {
|
||||||
source = "git::https://github.com/poseidon/terraform-render-bootstrap.git?ref=e6a1c7bccfc45ab299b5f8149bc3840f99b30b2b"
|
source = "git::https://github.com/poseidon/terraform-render-bootstrap.git?ref=e1b1e0c75e77e042cf369f463f0e656297a201a8"
|
||||||
|
|
||||||
cluster_name = var.cluster_name
|
cluster_name = var.cluster_name
|
||||||
api_servers = [format("%s.%s", var.cluster_name, var.dns_zone)]
|
api_servers = [format("%s.%s", var.cluster_name, var.dns_zone)]
|
||||||
@ -11,8 +11,11 @@ module "bootstrap" {
|
|||||||
network_encapsulation = "vxlan"
|
network_encapsulation = "vxlan"
|
||||||
network_mtu = "1450"
|
network_mtu = "1450"
|
||||||
|
|
||||||
pod_cidr = var.pod_cidr
|
pod_cidr = var.pod_cidr
|
||||||
service_cidr = var.service_cidr
|
service_cidr = var.service_cidr
|
||||||
components = var.components
|
cluster_domain_suffix = var.cluster_domain_suffix
|
||||||
|
enable_reporting = var.enable_reporting
|
||||||
|
enable_aggregation = var.enable_aggregation
|
||||||
|
components = var.components
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -66,7 +66,7 @@ systemd:
|
|||||||
After=coreos-metadata.service
|
After=coreos-metadata.service
|
||||||
Wants=rpc-statd.service
|
Wants=rpc-statd.service
|
||||||
[Service]
|
[Service]
|
||||||
Environment=KUBELET_IMAGE=quay.io/poseidon/kubelet:v1.31.3
|
Environment=KUBELET_IMAGE=quay.io/poseidon/kubelet:v1.30.1
|
||||||
EnvironmentFile=/run/metadata/coreos
|
EnvironmentFile=/run/metadata/coreos
|
||||||
ExecStartPre=/bin/mkdir -p /etc/cni/net.d
|
ExecStartPre=/bin/mkdir -p /etc/cni/net.d
|
||||||
ExecStartPre=/bin/mkdir -p /etc/kubernetes/manifests
|
ExecStartPre=/bin/mkdir -p /etc/kubernetes/manifests
|
||||||
@ -117,7 +117,7 @@ systemd:
|
|||||||
Type=oneshot
|
Type=oneshot
|
||||||
RemainAfterExit=true
|
RemainAfterExit=true
|
||||||
WorkingDirectory=/opt/bootstrap
|
WorkingDirectory=/opt/bootstrap
|
||||||
Environment=KUBELET_IMAGE=quay.io/poseidon/kubelet:v1.31.3
|
Environment=KUBELET_IMAGE=quay.io/poseidon/kubelet:v1.30.1
|
||||||
ExecStart=/usr/bin/docker run \
|
ExecStart=/usr/bin/docker run \
|
||||||
-v /etc/kubernetes/pki:/etc/kubernetes/pki:ro \
|
-v /etc/kubernetes/pki:/etc/kubernetes/pki:ro \
|
||||||
-v /opt/bootstrap/assets:/assets:ro \
|
-v /opt/bootstrap/assets:/assets:ro \
|
||||||
@ -153,7 +153,7 @@ storage:
|
|||||||
cgroupDriver: systemd
|
cgroupDriver: systemd
|
||||||
clusterDNS:
|
clusterDNS:
|
||||||
- ${cluster_dns_service_ip}
|
- ${cluster_dns_service_ip}
|
||||||
clusterDomain: cluster.local
|
clusterDomain: ${cluster_domain_suffix}
|
||||||
healthzPort: 0
|
healthzPort: 0
|
||||||
rotateCertificates: true
|
rotateCertificates: true
|
||||||
shutdownGracePeriod: 45s
|
shutdownGracePeriod: 45s
|
||||||
|
@ -38,7 +38,7 @@ systemd:
|
|||||||
After=coreos-metadata.service
|
After=coreos-metadata.service
|
||||||
Wants=rpc-statd.service
|
Wants=rpc-statd.service
|
||||||
[Service]
|
[Service]
|
||||||
Environment=KUBELET_IMAGE=quay.io/poseidon/kubelet:v1.31.3
|
Environment=KUBELET_IMAGE=quay.io/poseidon/kubelet:v1.30.1
|
||||||
EnvironmentFile=/run/metadata/coreos
|
EnvironmentFile=/run/metadata/coreos
|
||||||
ExecStartPre=/bin/mkdir -p /etc/cni/net.d
|
ExecStartPre=/bin/mkdir -p /etc/cni/net.d
|
||||||
ExecStartPre=/bin/mkdir -p /etc/kubernetes/manifests
|
ExecStartPre=/bin/mkdir -p /etc/kubernetes/manifests
|
||||||
@ -103,7 +103,7 @@ storage:
|
|||||||
cgroupDriver: systemd
|
cgroupDriver: systemd
|
||||||
clusterDNS:
|
clusterDNS:
|
||||||
- ${cluster_dns_service_ip}
|
- ${cluster_dns_service_ip}
|
||||||
clusterDomain: cluster.local
|
clusterDomain: ${cluster_domain_suffix}
|
||||||
healthzPort: 0
|
healthzPort: 0
|
||||||
rotateCertificates: true
|
rotateCertificates: true
|
||||||
shutdownGracePeriod: 45s
|
shutdownGracePeriod: 45s
|
||||||
|
@ -79,6 +79,7 @@ data "ct_config" "controllers" {
|
|||||||
for i in range(var.controller_count) : "etcd${i}=https://${var.cluster_name}-etcd${i}.${var.dns_zone}:2380"
|
for i in range(var.controller_count) : "etcd${i}=https://${var.cluster_name}-etcd${i}.${var.dns_zone}:2380"
|
||||||
])
|
])
|
||||||
cluster_dns_service_ip = cidrhost(var.service_cidr, 10)
|
cluster_dns_service_ip = cidrhost(var.service_cidr, 10)
|
||||||
|
cluster_domain_suffix = var.cluster_domain_suffix
|
||||||
})
|
})
|
||||||
strict = true
|
strict = true
|
||||||
snippets = var.controller_snippets
|
snippets = var.controller_snippets
|
||||||
|
@ -86,7 +86,25 @@ EOD
|
|||||||
default = "10.3.0.0/16"
|
default = "10.3.0.0/16"
|
||||||
}
|
}
|
||||||
|
|
||||||
# advanced
|
variable "enable_reporting" {
|
||||||
|
type = bool
|
||||||
|
description = "Enable usage or analytics reporting to upstreams (Calico)"
|
||||||
|
default = false
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "enable_aggregation" {
|
||||||
|
type = bool
|
||||||
|
description = "Enable the Kubernetes Aggregation Layer"
|
||||||
|
default = true
|
||||||
|
}
|
||||||
|
|
||||||
|
# unofficial, undocumented, unsupported
|
||||||
|
|
||||||
|
variable "cluster_domain_suffix" {
|
||||||
|
type = string
|
||||||
|
description = "Queries for domains with the suffix will be answered by coredns. Default is cluster.local (e.g. foo.default.svc.cluster.local) "
|
||||||
|
default = "cluster.local"
|
||||||
|
}
|
||||||
|
|
||||||
variable "components" {
|
variable "components" {
|
||||||
description = "Configure pre-installed cluster components"
|
description = "Configure pre-installed cluster components"
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user