Introduce the component system for managing pre-installed addons

* Previously: Typhoon provisions clusters with kube-system components
like CoreDNS, kube-proxy, and a chosen CNI provider (among flannel,
Calico, or Cilium) pre-installed. This is convenient since clusters
come with "batteries included". But it also means upgrading these
components is generally done in lock-step, by upgrading to a new
Typhoon / Kubernetes release
* It can be valuable to manage these components with a separate
plan/apply process or through automations and deploy systems. For
example, this allows managing CoreDNS separately from the cluster's
lifecycle.
* These "components" will continue to be pre-installed by default,
but a new `components` variable allows them to be disabled and
managed as "addons", components you apply after cluster creation
and manage on a rolling basis. For some of these, we may provide
Terraform modules to aide in managing these components.

```
module "cluster" {
  # defaults
  components = {
    enable = true
    coredns = {
      enable = true
    }
    kube_proxy = {
      enable = true
    }
    # Only the CNI set in var.networking will be installed
    flannel = {
      enable = true
    }
    calico = {
      enable = true
    }
    cilium = {
      enable = true
    }
  }
}
```

An earlier variable `install_container_networking = true/false` has
been removed, since it can now be achieved with this more extensible
and general components mechanism by setting the chosen networking
provider enable field to false.
This commit is contained in:
Dalton Hubble 2024-05-18 15:05:33 -07:00
parent 563feacd29
commit b3c384fbc0
32 changed files with 185 additions and 92 deletions

View File

@ -8,6 +8,7 @@ Notable changes between versions.
* Kubernetes [v1.30.1](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.30.md#v1301)
* Add firewall rules and security group rules for Cilium and Hubble metrics ([#1449](https://github.com/poseidon/typhoon/pull/1449))
* Introduce `components` variabe to enable/disable/configure pre-installed components ([#1453](https://github.com/poseidon/typhoon/pull/1453))
* Update Cilium from v1.15.3 to [v1.15.4](https://github.com/cilium/cilium/releases/tag/v1.15.4)
* Update flannel from v0.24.4 to [v0.25.1](https://github.com/flannel-io/flannel/releases/tag/v0.25.1)

View File

@ -1,11 +1,11 @@
# Kubernetes assets (kubeconfig, manifests)
module "bootstrap" {
source = "git::https://github.com/poseidon/terraform-render-bootstrap.git?ref=452bcf379d94f5d479c1dee281fd479872271415"
source = "git::https://github.com/poseidon/terraform-render-bootstrap.git?ref=a54fe54d9895f5dd51332b79533143f52792090f"
cluster_name = var.cluster_name
api_servers = [format("%s.%s", var.cluster_name, var.dns_zone)]
etcd_servers = aws_route53_record.etcds.*.fqdn
networking = var.install_container_networking ? var.networking : "none"
networking = var.networking
network_mtu = var.network_mtu
pod_cidr = var.pod_cidr
service_cidr = var.service_cidr

View File

@ -107,12 +107,6 @@ variable "networking" {
default = "cilium"
}
variable "install_container_networking" {
type = bool
description = "Install the chosen networking provider during cluster bootstrap (use false to self-manage)"
default = true
}
variable "network_mtu" {
type = number
description = "CNI interface MTU (applies to calico only). Use 8981 if using instances types with Jumbo frames."
@ -192,6 +186,9 @@ variable "components" {
enable = optional(bool)
coredns = optional(map(any))
kube_proxy = optional(map(any))
flannel = optional(map(any))
calico = optional(map(any))
cilium = optional(map(any))
})
default = null
}

View File

@ -1,11 +1,11 @@
# Kubernetes assets (kubeconfig, manifests)
module "bootstrap" {
source = "git::https://github.com/poseidon/terraform-render-bootstrap.git?ref=452bcf379d94f5d479c1dee281fd479872271415"
source = "git::https://github.com/poseidon/terraform-render-bootstrap.git?ref=a54fe54d9895f5dd51332b79533143f52792090f"
cluster_name = var.cluster_name
api_servers = [format("%s.%s", var.cluster_name, var.dns_zone)]
etcd_servers = aws_route53_record.etcds.*.fqdn
networking = var.install_container_networking ? var.networking : "none"
networking = var.networking
network_mtu = var.network_mtu
pod_cidr = var.pod_cidr
service_cidr = var.service_cidr

View File

@ -107,12 +107,6 @@ variable "networking" {
default = "cilium"
}
variable "install_container_networking" {
type = bool
description = "Install the chosen networking provider during cluster bootstrap (use false to self-manage)"
default = true
}
variable "network_mtu" {
type = number
description = "CNI interface MTU (applies to calico only). Use 8981 if using instances types with Jumbo frames."
@ -192,6 +186,9 @@ variable "components" {
enable = optional(bool)
coredns = optional(map(any))
kube_proxy = optional(map(any))
flannel = optional(map(any))
calico = optional(map(any))
cilium = optional(map(any))
})
default = null
}

View File

@ -1,12 +1,12 @@
# Kubernetes assets (kubeconfig, manifests)
module "bootstrap" {
source = "git::https://github.com/poseidon/terraform-render-bootstrap.git?ref=452bcf379d94f5d479c1dee281fd479872271415"
source = "git::https://github.com/poseidon/terraform-render-bootstrap.git?ref=a54fe54d9895f5dd51332b79533143f52792090f"
cluster_name = var.cluster_name
api_servers = [format("%s.%s", var.cluster_name, var.dns_zone)]
etcd_servers = formatlist("%s.%s", azurerm_dns_a_record.etcds.*.name, var.dns_zone)
networking = var.install_container_networking ? var.networking : "none"
networking = var.networking
# only effective with Calico networking
# we should be able to use 1450 MTU, but in practice, 1410 was needed
network_encapsulation = "vxlan"

View File

@ -94,12 +94,6 @@ variable "networking" {
default = "cilium"
}
variable "install_container_networking" {
type = bool
description = "Install the chosen networking provider during cluster bootstrap (use false to self-manage)"
default = true
}
variable "host_cidr" {
type = string
description = "CIDR IPv4 range to assign to instances"
@ -162,6 +156,9 @@ variable "components" {
enable = optional(bool)
coredns = optional(map(any))
kube_proxy = optional(map(any))
flannel = optional(map(any))
calico = optional(map(any))
cilium = optional(map(any))
})
default = null
}

View File

@ -1,12 +1,12 @@
# Kubernetes assets (kubeconfig, manifests)
module "bootstrap" {
source = "git::https://github.com/poseidon/terraform-render-bootstrap.git?ref=452bcf379d94f5d479c1dee281fd479872271415"
source = "git::https://github.com/poseidon/terraform-render-bootstrap.git?ref=a54fe54d9895f5dd51332b79533143f52792090f"
cluster_name = var.cluster_name
api_servers = [format("%s.%s", var.cluster_name, var.dns_zone)]
etcd_servers = formatlist("%s.%s", azurerm_dns_a_record.etcds.*.name, var.dns_zone)
networking = var.install_container_networking ? var.networking : "none"
networking = var.networking
# only effective with Calico networking
# we should be able to use 1450 MTU, but in practice, 1410 was needed
network_encapsulation = "vxlan"

View File

@ -100,12 +100,6 @@ variable "networking" {
default = "cilium"
}
variable "install_container_networking" {
type = bool
description = "Install the chosen networking provider during cluster bootstrap (use false to self-manage)"
default = true
}
variable "host_cidr" {
type = string
description = "CIDR IPv4 range to assign to instances"
@ -179,6 +173,9 @@ variable "components" {
enable = optional(bool)
coredns = optional(map(any))
kube_proxy = optional(map(any))
flannel = optional(map(any))
calico = optional(map(any))
cilium = optional(map(any))
})
default = null
}

View File

@ -1,11 +1,11 @@
# Kubernetes assets (kubeconfig, manifests)
module "bootstrap" {
source = "git::https://github.com/poseidon/terraform-render-bootstrap.git?ref=452bcf379d94f5d479c1dee281fd479872271415"
source = "git::https://github.com/poseidon/terraform-render-bootstrap.git?ref=a54fe54d9895f5dd51332b79533143f52792090f"
cluster_name = var.cluster_name
api_servers = [var.k8s_domain_name]
etcd_servers = var.controllers.*.domain
networking = var.install_container_networking ? var.networking : "none"
networking = var.networking
network_mtu = var.network_mtu
network_ip_autodetection_method = var.network_ip_autodetection_method
pod_cidr = var.pod_cidr

View File

@ -92,12 +92,6 @@ variable "networking" {
default = "cilium"
}
variable "install_container_networking" {
type = bool
description = "Install the chosen networking provider during cluster bootstrap (use false to self-manage)"
default = true
}
variable "network_mtu" {
type = number
description = "CNI interface MTU (applies to calico only)"
@ -174,6 +168,9 @@ variable "components" {
enable = optional(bool)
coredns = optional(map(any))
kube_proxy = optional(map(any))
flannel = optional(map(any))
calico = optional(map(any))
cilium = optional(map(any))
})
default = null
}

View File

@ -1,11 +1,11 @@
# Kubernetes assets (kubeconfig, manifests)
module "bootstrap" {
source = "git::https://github.com/poseidon/terraform-render-bootstrap.git?ref=452bcf379d94f5d479c1dee281fd479872271415"
source = "git::https://github.com/poseidon/terraform-render-bootstrap.git?ref=a54fe54d9895f5dd51332b79533143f52792090f"
cluster_name = var.cluster_name
api_servers = [var.k8s_domain_name]
etcd_servers = var.controllers.*.domain
networking = var.install_container_networking ? var.networking : "none"
networking = var.networking
network_mtu = var.network_mtu
network_ip_autodetection_method = var.network_ip_autodetection_method
pod_cidr = var.pod_cidr

View File

@ -91,12 +91,6 @@ variable "networking" {
default = "cilium"
}
variable "install_container_networking" {
type = bool
description = "Install the chosen networking provider during cluster bootstrap (use false to self-manage)"
default = true
}
variable "network_mtu" {
type = number
description = "CNI interface MTU (applies to calico only)"
@ -190,6 +184,9 @@ variable "components" {
enable = optional(bool)
coredns = optional(map(any))
kube_proxy = optional(map(any))
flannel = optional(map(any))
calico = optional(map(any))
cilium = optional(map(any))
})
default = null
}

View File

@ -1,12 +1,12 @@
# Kubernetes assets (kubeconfig, manifests)
module "bootstrap" {
source = "git::https://github.com/poseidon/terraform-render-bootstrap.git?ref=452bcf379d94f5d479c1dee281fd479872271415"
source = "git::https://github.com/poseidon/terraform-render-bootstrap.git?ref=a54fe54d9895f5dd51332b79533143f52792090f"
cluster_name = var.cluster_name
api_servers = [format("%s.%s", var.cluster_name, var.dns_zone)]
etcd_servers = digitalocean_record.etcds.*.fqdn
networking = var.install_container_networking ? var.networking : "none"
networking = var.networking
# only effective with Calico networking
network_encapsulation = "vxlan"
network_mtu = "1450"

View File

@ -71,12 +71,6 @@ variable "networking" {
default = "cilium"
}
variable "install_container_networking" {
type = bool
description = "Install the chosen networking provider during cluster bootstrap (use false to self-manage)"
default = true
}
variable "pod_cidr" {
type = string
description = "CIDR IPv4 range to assign Kubernetes pods"
@ -121,6 +115,9 @@ variable "components" {
enable = optional(bool)
coredns = optional(map(any))
kube_proxy = optional(map(any))
flannel = optional(map(any))
calico = optional(map(any))
cilium = optional(map(any))
})
default = null
}

View File

@ -1,12 +1,12 @@
# Kubernetes assets (kubeconfig, manifests)
module "bootstrap" {
source = "git::https://github.com/poseidon/terraform-render-bootstrap.git?ref=452bcf379d94f5d479c1dee281fd479872271415"
source = "git::https://github.com/poseidon/terraform-render-bootstrap.git?ref=a54fe54d9895f5dd51332b79533143f52792090f"
cluster_name = var.cluster_name
api_servers = [format("%s.%s", var.cluster_name, var.dns_zone)]
etcd_servers = digitalocean_record.etcds.*.fqdn
networking = var.install_container_networking ? var.networking : "none"
networking = var.networking
# only effective with Calico networking
network_encapsulation = "vxlan"
network_mtu = "1450"

View File

@ -71,12 +71,6 @@ variable "networking" {
default = "cilium"
}
variable "install_container_networking" {
type = bool
description = "Install the chosen networking provider during cluster bootstrap (use false to self-manage)"
default = true
}
variable "pod_cidr" {
type = string
description = "CIDR IPv4 range to assign Kubernetes pods"
@ -121,6 +115,9 @@ variable "components" {
enable = optional(bool)
coredns = optional(map(any))
kube_proxy = optional(map(any))
flannel = optional(map(any))
calico = optional(map(any))
cilium = optional(map(any))
})
default = null
}

View File

@ -1,9 +1,131 @@
# Addons
# Components
Typhoon clusters are verified to work well with several post-install addons.
Typhoon's component model allows for managing cluster components independent from the cluster's lifecycle, upgrading in a rolling or automated fashion, or customizing components in advanced ways.
Typhoon clusters install core components like `CoreDNS`, `kube-proxy`, and a chosen CNI provider (`flannel`, `calico`, or `cilium`) by default. Since v1.30.1, pre-installed components are optional. Other "addon" components like Nginx Ingress, Prometheus, or Grafana may be optionally applied though the component model (after cluster creation).
## Components
Pre-installed by default:
* CoreDNS
* kube-proxy
* CNI provider (set via `var.networking`)
* flannel
* Calico
* Cilium
Addons:
* Nginx [Ingress Controller](ingress.md)
* [Prometheus](prometheus.md)
* [Grafana](grafana.md)
* [fleetlock](fleetlock.md)
## Pre-installed Components
By default, Typhoon clusters install `CoreDNS`, `kube-proxy`, and a chosen CNI provider (`flannel`, `calico`, or `cilium`). Disable any or all of these components using the `components` system.
```tf
module "yavin" {
source = "git::https://github.com/poseidon/typhoon//google-cloud/fedora-coreos/kubernetes?ref=v1.30.1"
# Google Cloud
cluster_name = "yavin"
region = "us-central1"
dns_zone = "example.com"
dns_zone_name = "example-zone"
# configuration
ssh_authorized_key = "ssh-ed25519 AAAAB3Nz..."
# pre-installed components (defaults shown)
components = {
enable = true
coredns = {
enable = true
}
kube_proxy = {
enable = true
}
# Only the CNI set in var.networking will be installed
flannel = {
enable = true
}
calico = {
enable = true
}
cilium = {
enable = true
}
}
}
```
!!! warn
Disabling pre-installed components is for advanced users who intend to manage these components separately. Without a CNI provider, cluster nodes will be NotReady and wait for the CNI provider to be applied.
## Managing Components
If you choose to manage components youself, a recommended pattern is to use a separate Terraform workspace per component, like you would any application.
```
mkdir -p infra/components/{coredns, cilium}
tree components/coredns
components/coredns/
├── backend.tf
├── manifests.tf
└── providers.tf
```
Let's consider managing CoreDNS resources. Configure the `kubernetes` provider to use the kubeconfig credentials of your Typhoon cluster(s) in a `providers.tf` file. Here we show provider blocks for interacting with Typhoon clusters on AWS, Azure, or Google Cloud, assuming each cluster's `kubeconfig-admin` output was written to local file.
```tf
provider "kubernetes" {
alias = "aws"
config_path = "~/.kube/configs/aws-config"
}
provider "kubernetes" {
alias = "google"
config_path = "~/.kube/configs/google-config"
}
...
```
Typhoon maintains Terraform modules for most addon components. You can reference `main`, a tagged release, a SHA revision, or custom module of your own. Define the CoreDNS manifests using the `addons/coredns` module in a `manifests.tf` file.
```tf
# CoreDNS manifests for the aws cluster
module "aws" {
source = "git::https://github.com/poseidon/typhoon//addons/coredns?ref=v1.30.1"
providers = {
kubernetes = kubernetes.aws
}
}
# CoreDNS manifests for the google cloud cluster
module "aws" {
source = "git::https://github.com/poseidon/typhoon//addons/coredns?ref=v1.30.1"
providers = {
kubernetes = kubernetes.google
}
}
...
```
Plan and apply the CoreDNS Kubernetes resources to cluster(s).
```
terraform plan
terraform apply
...
module.aws.kubernetes_service_account.coredns: Refreshing state... [id=kube-system/coredns]
module.aws.kubernetes_config_map.coredns: Refreshing state... [id=kube-system/coredns]
module.aws.kubernetes_cluster_role.coredns: Refreshing state... [id=system:coredns]
module.aws.kubernetes_cluster_role_binding.coredns: Refreshing state... [id=system:coredns]
module.aws.kubernetes_service.coredns: Refreshing state... [id=kube-system/coredns]
...
```

View File

@ -4,7 +4,7 @@ In this tutorial, we'll create a Kubernetes v1.30.1 cluster on AWS with Fedora C
We'll declare a Kubernetes cluster using the Typhoon Terraform module. Then apply the changes to create a VPC, gateway, subnets, security groups, controller instances, worker auto-scaling group, network load balancer, and TLS assets.
Controller hosts are provisioned to run an `etcd-member` peer and a `kubelet` service. Worker hosts run a `kubelet` service. Controller nodes run `kube-apiserver`, `kube-scheduler`, `kube-controller-manager`, and `coredns`, while `kube-proxy` and `calico` (or `flannel`) run on every node. A generated `kubeconfig` provides `kubectl` access to the cluster.
Controller hosts are provisioned to run an `etcd-member` peer and a `kubelet` service. Worker hosts run a `kubelet` service. Controller nodes run `kube-apiserver`, `kube-scheduler`, `kube-controller-manager`, and `coredns`, while `kube-proxy` and (`flannel`, `calico`, or `cilium`) run on every node. A generated `kubeconfig` provides `kubectl` access to the cluster.
## Requirements

View File

@ -4,7 +4,7 @@ In this tutorial, we'll create a Kubernetes v1.30.1 cluster on Azure with Fedora
We'll declare a Kubernetes cluster using the Typhoon Terraform module. Then apply the changes to create a resource group, virtual network, subnets, security groups, controller availability set, worker scale set, load balancer, and TLS assets.
Controller hosts are provisioned to run an `etcd-member` peer and a `kubelet` service. Worker hosts run a `kubelet` service. Controller nodes run `kube-apiserver`, `kube-scheduler`, `kube-controller-manager`, and `coredns`, while `kube-proxy` and `calico` (or `flannel`) run on every node. A generated `kubeconfig` provides `kubectl` access to the cluster.
Controller hosts are provisioned to run an `etcd-member` peer and a `kubelet` service. Worker hosts run a `kubelet` service. Controller nodes run `kube-apiserver`, `kube-scheduler`, `kube-controller-manager`, and `coredns`, while `kube-proxy` and (`flannel`, `calico`, or `cilium`) run on every node. A generated `kubeconfig` provides `kubectl` access to the cluster.
## Requirements

View File

@ -4,7 +4,7 @@ In this tutorial, we'll network boot and provision a Kubernetes v1.30.1 cluster
First, we'll deploy a [Matchbox](https://github.com/poseidon/matchbox) service and setup a network boot environment. Then, we'll declare a Kubernetes cluster using the Typhoon Terraform module and power on machines. On PXE boot, machines will install Fedora CoreOS to disk, reboot into the disk install, and provision themselves as Kubernetes controllers or workers via Ignition.
Controller hosts are provisioned to run an `etcd-member` peer and a `kubelet` service. Worker hosts run a `kubelet` service. Controller nodes run `kube-apiserver`, `kube-scheduler`, `kube-controller-manager`, and `coredns`, while `kube-proxy` and `calico` (or `flannel`) run on every node. A generated `kubeconfig` provides `kubectl` access to the cluster.
Controller hosts are provisioned to run an `etcd-member` peer and a `kubelet` service. Worker hosts run a `kubelet` service. Controller nodes run `kube-apiserver`, `kube-scheduler`, `kube-controller-manager`, and `coredns`, while `kube-proxy` and (`flannel`, `calico`, or `cilium`) run on every node. A generated `kubeconfig` provides `kubectl` access to the cluster.
## Requirements

View File

@ -4,7 +4,7 @@ In this tutorial, we'll create a Kubernetes v1.30.1 cluster on DigitalOcean with
We'll declare a Kubernetes cluster using the Typhoon Terraform module. Then apply the changes to create controller droplets, worker droplets, DNS records, tags, and TLS assets.
Controller hosts are provisioned to run an `etcd-member` peer and a `kubelet` service. Worker hosts run a `kubelet` service. Controller nodes run `kube-apiserver`, `kube-scheduler`, `kube-controller-manager`, and `coredns`, while `kube-proxy` and `calico` (or `flannel`) run on every node. A generated `kubeconfig` provides `kubectl` access to the cluster.
Controller hosts are provisioned to run an `etcd-member` peer and a `kubelet` service. Worker hosts run a `kubelet` service. Controller nodes run `kube-apiserver`, `kube-scheduler`, `kube-controller-manager`, and `coredns`, while `kube-proxy` and (`flannel`, `calico`, or `cilium`) run on every node. A generated `kubeconfig` provides `kubectl` access to the cluster.
## Requirements

View File

@ -4,7 +4,7 @@ In this tutorial, we'll create a Kubernetes v1.30.1 cluster on Google Compute En
We'll declare a Kubernetes cluster using the Typhoon Terraform module. Then apply the changes to create a network, firewall rules, health checks, controller instances, worker managed instance group, load balancers, and TLS assets.
Controller hosts are provisioned to run an `etcd-member` peer and a `kubelet` service. Worker hosts run a `kubelet` service. Controller nodes run `kube-apiserver`, `kube-scheduler`, `kube-controller-manager`, and `coredns`, while `kube-proxy` and `calico` (or `flannel`) run on every node. A generated `kubeconfig` provides `kubectl` access to the cluster.
Controller hosts are provisioned to run an `etcd-member` peer and a `kubelet` service. Worker hosts run a `kubelet` service. Controller nodes run `kube-apiserver`, `kube-scheduler`, `kube-controller-manager`, and `coredns`, while `kube-proxy` and (`flannel`, `calico`, or `cilium`) run on every node. A generated `kubeconfig` provides `kubectl` access to the cluster.
## Requirements

View File

@ -4,7 +4,7 @@ In this tutorial, we'll create a Kubernetes v1.30.1 cluster on AWS with Flatcar
We'll declare a Kubernetes cluster using the Typhoon Terraform module. Then apply the changes to create a VPC, gateway, subnets, security groups, controller instances, worker auto-scaling group, network load balancer, and TLS assets.
Controller hosts are provisioned to run an `etcd-member` peer and a `kubelet` service. Worker hosts run a `kubelet` service. Controller nodes run `kube-apiserver`, `kube-scheduler`, `kube-controller-manager`, and `coredns`, while `kube-proxy` and `calico` (or `flannel`) run on every node. A generated `kubeconfig` provides `kubectl` access to the cluster.
Controller hosts are provisioned to run an `etcd-member` peer and a `kubelet` service. Worker hosts run a `kubelet` service. Controller nodes run `kube-apiserver`, `kube-scheduler`, `kube-controller-manager`, and `coredns`, while `kube-proxy` and (`flannel`, `calico`, or `cilium`) run on every node. A generated `kubeconfig` provides `kubectl` access to the cluster.
## Requirements

View File

@ -4,7 +4,7 @@ In this tutorial, we'll create a Kubernetes v1.30.1 cluster on Azure with Flatca
We'll declare a Kubernetes cluster using the Typhoon Terraform module. Then apply the changes to create a resource group, virtual network, subnets, security groups, controller availability set, worker scale set, load balancer, and TLS assets.
Controller hosts are provisioned to run an `etcd-member` peer and a `kubelet` service. Worker hosts run a `kubelet` service. Controller nodes run `kube-apiserver`, `kube-scheduler`, `kube-controller-manager`, and `coredns`, while `kube-proxy` and `calico` (or `flannel`) run on every node. A generated `kubeconfig` provides `kubectl` access to the cluster.
Controller hosts are provisioned to run an `etcd-member` peer and a `kubelet` service. Worker hosts run a `kubelet` service. Controller nodes run `kube-apiserver`, `kube-scheduler`, `kube-controller-manager`, and `coredns`, while `kube-proxy` and (`flannel`, `calico`, or `cilium`) run on every node. A generated `kubeconfig` provides `kubectl` access to the cluster.
## Requirements

View File

@ -4,7 +4,7 @@ In this tutorial, we'll network boot and provision a Kubernetes v1.30.1 cluster
First, we'll deploy a [Matchbox](https://github.com/poseidon/matchbox) service and setup a network boot environment. Then, we'll declare a Kubernetes cluster using the Typhoon Terraform module and power on machines. On PXE boot, machines will install Container Linux to disk, reboot into the disk install, and provision themselves as Kubernetes controllers or workers via Ignition.
Controller hosts are provisioned to run an `etcd-member` peer and a `kubelet` service. Worker hosts run a `kubelet` service. Controller nodes run `kube-apiserver`, `kube-scheduler`, `kube-controller-manager`, and `coredns` while `kube-proxy` and `calico` (or `flannel`) run on every node. A generated `kubeconfig` provides `kubectl` access to the cluster.
Controller hosts are provisioned to run an `etcd-member` peer and a `kubelet` service. Worker hosts run a `kubelet` service. Controller nodes run `kube-apiserver`, `kube-scheduler`, `kube-controller-manager`, and `coredns` while `kube-proxy` and (`flannel`, `calico`, or `cilium`) run on every node. A generated `kubeconfig` provides `kubectl` access to the cluster.
## Requirements

View File

@ -4,7 +4,7 @@ In this tutorial, we'll create a Kubernetes v1.30.1 cluster on DigitalOcean with
We'll declare a Kubernetes cluster using the Typhoon Terraform module. Then apply the changes to create controller droplets, worker droplets, DNS records, tags, and TLS assets.
Controller hosts are provisioned to run an `etcd-member` peer and a `kubelet` service. Worker hosts run a `kubelet` service. Controller nodes run `kube-apiserver`, `kube-scheduler`, `kube-controller-manager`, and `coredns`, while `kube-proxy` and `calico` (or `flannel`) run on every node. A generated `kubeconfig` provides `kubectl` access to the cluster.
Controller hosts are provisioned to run an `etcd-member` peer and a `kubelet` service. Worker hosts run a `kubelet` service. Controller nodes run `kube-apiserver`, `kube-scheduler`, `kube-controller-manager`, and `coredns`, while `kube-proxy` and (`flannel`, `calico`, or `cilium`) run on every node. A generated `kubeconfig` provides `kubectl` access to the cluster.
## Requirements

View File

@ -4,7 +4,7 @@ In this tutorial, we'll create a Kubernetes v1.30.1 cluster on Google Compute En
We'll declare a Kubernetes cluster using the Typhoon Terraform module. Then apply the changes to create a network, firewall rules, health checks, controller instances, worker managed instance group, load balancers, and TLS assets.
Controller hosts are provisioned to run an `etcd-member` peer and a `kubelet` service. Worker hosts run a `kubelet` service. Controller nodes run `kube-apiserver`, `kube-scheduler`, `kube-controller-manager`, and `coredns`, while `kube-proxy` and `calico` (or `flannel`) run on every node. A generated `kubeconfig` provides `kubectl` access to the cluster.
Controller hosts are provisioned to run an `etcd-member` peer and a `kubelet` service. Worker hosts run a `kubelet` service. Controller nodes run `kube-apiserver`, `kube-scheduler`, `kube-controller-manager`, and `coredns`, while `kube-proxy` and (`flannel`, `calico`, or `cilium`) run on every node. A generated `kubeconfig` provides `kubectl` access to the cluster.
## Requirements

View File

@ -1,11 +1,11 @@
# Kubernetes assets (kubeconfig, manifests)
module "bootstrap" {
source = "git::https://github.com/poseidon/terraform-render-bootstrap.git?ref=452bcf379d94f5d479c1dee281fd479872271415"
source = "git::https://github.com/poseidon/terraform-render-bootstrap.git?ref=a54fe54d9895f5dd51332b79533143f52792090f"
cluster_name = var.cluster_name
api_servers = [format("%s.%s", var.cluster_name, var.dns_zone)]
etcd_servers = [for fqdn in google_dns_record_set.etcds.*.name : trimsuffix(fqdn, ".")]
networking = var.install_container_networking ? var.networking : "none"
networking = var.networking
network_mtu = 1440
pod_cidr = var.pod_cidr
service_cidr = var.service_cidr

View File

@ -94,12 +94,6 @@ variable "networking" {
default = "cilium"
}
variable "install_container_networking" {
type = bool
description = "Install the chosen networking provider during cluster bootstrap (use false to self-manage)"
default = true
}
variable "pod_cidr" {
type = string
description = "CIDR IPv4 range to assign Kubernetes pods"
@ -157,6 +151,9 @@ variable "components" {
enable = optional(bool)
coredns = optional(map(any))
kube_proxy = optional(map(any))
flannel = optional(map(any))
calico = optional(map(any))
cilium = optional(map(any))
})
default = null
}

View File

@ -1,11 +1,11 @@
# Kubernetes assets (kubeconfig, manifests)
module "bootstrap" {
source = "git::https://github.com/poseidon/terraform-render-bootstrap.git?ref=452bcf379d94f5d479c1dee281fd479872271415"
source = "git::https://github.com/poseidon/terraform-render-bootstrap.git?ref=a54fe54d9895f5dd51332b79533143f52792090f"
cluster_name = var.cluster_name
api_servers = [format("%s.%s", var.cluster_name, var.dns_zone)]
etcd_servers = [for fqdn in google_dns_record_set.etcds.*.name : trimsuffix(fqdn, ".")]
networking = var.install_container_networking ? var.networking : "none"
networking = var.networking
network_mtu = 1440
pod_cidr = var.pod_cidr
service_cidr = var.service_cidr

View File

@ -94,12 +94,6 @@ variable "networking" {
default = "cilium"
}
variable "install_container_networking" {
type = bool
description = "Install the chosen networking provider during cluster bootstrap (use false to self-manage)"
default = true
}
variable "pod_cidr" {
type = string
description = "CIDR IPv4 range to assign Kubernetes pods"
@ -157,6 +151,9 @@ variable "components" {
enable = optional(bool)
coredns = optional(map(any))
kube_proxy = optional(map(any))
flannel = optional(map(any))
calico = optional(map(any))
cilium = optional(map(any))
})
default = null
}