mirror of
https://github.com/puppetmaster/typhoon.git
synced 2025-07-23 00:11:38 +02:00
Merge remote-tracking branch 'upstream/main'
This commit is contained in:
@ -1,10 +1,10 @@
|
||||
# AWS
|
||||
|
||||
In this tutorial, we'll create a Kubernetes v1.28.3 cluster on AWS with Flatcar Linux.
|
||||
In this tutorial, we'll create a Kubernetes v1.31.3 cluster on AWS with Flatcar Linux.
|
||||
|
||||
We'll declare a Kubernetes cluster using the Typhoon Terraform module. Then apply the changes to create a VPC, gateway, subnets, security groups, controller instances, worker auto-scaling group, network load balancer, and TLS assets.
|
||||
|
||||
Controller hosts are provisioned to run an `etcd-member` peer and a `kubelet` service. Worker hosts run a `kubelet` service. Controller nodes run `kube-apiserver`, `kube-scheduler`, `kube-controller-manager`, and `coredns`, while `kube-proxy` and `calico` (or `flannel`) run on every node. A generated `kubeconfig` provides `kubectl` access to the cluster.
|
||||
Controller hosts are provisioned to run an `etcd-member` peer and a `kubelet` service. Worker hosts run a `kubelet` service. Controller nodes run `kube-apiserver`, `kube-scheduler`, `kube-controller-manager`, and `coredns`, while `kube-proxy` and (`flannel`, `calico`, or `cilium`) run on every node. A generated `kubeconfig` provides `kubectl` access to the cluster.
|
||||
|
||||
## Requirements
|
||||
|
||||
@ -72,19 +72,19 @@ Define a Kubernetes cluster using the module `aws/flatcar-linux/kubernetes`.
|
||||
|
||||
```tf
|
||||
module "tempest" {
|
||||
source = "git::https://github.com/poseidon/typhoon//aws/flatcar-linux/kubernetes?ref=v1.28.3"
|
||||
source = "git::https://github.com/poseidon/typhoon//aws/flatcar-linux/kubernetes?ref=v1.31.3"
|
||||
|
||||
# AWS
|
||||
cluster_name = "tempest"
|
||||
dns_zone = "aws.example.com"
|
||||
dns_zone_id = "Z3PAABBCFAKEC0"
|
||||
|
||||
# configuration
|
||||
ssh_authorized_key = "ssh-rsa AAAAB3Nz..."
|
||||
|
||||
# optional
|
||||
# instances
|
||||
worker_count = 2
|
||||
worker_type = "t3.small"
|
||||
|
||||
# configuration
|
||||
ssh_authorized_key = "ssh-rsa AAAAB3Nz..."
|
||||
}
|
||||
```
|
||||
|
||||
@ -134,8 +134,9 @@ In 4-8 minutes, the Kubernetes cluster will be ready.
|
||||
|
||||
```
|
||||
resource "local_file" "kubeconfig-tempest" {
|
||||
content = module.tempest.kubeconfig-admin
|
||||
filename = "/home/user/.kube/configs/tempest-config"
|
||||
content = module.tempest.kubeconfig-admin
|
||||
filename = "/home/user/.kube/configs/tempest-config"
|
||||
file_permission = "0600"
|
||||
}
|
||||
```
|
||||
|
||||
@ -145,9 +146,9 @@ List nodes in the cluster.
|
||||
$ export KUBECONFIG=/home/user/.kube/configs/tempest-config
|
||||
$ kubectl get nodes
|
||||
NAME STATUS ROLES AGE VERSION
|
||||
ip-10-0-3-155 Ready <none> 10m v1.28.3
|
||||
ip-10-0-26-65 Ready <none> 10m v1.28.3
|
||||
ip-10-0-41-21 Ready <none> 10m v1.28.3
|
||||
ip-10-0-3-155 Ready <none> 10m v1.31.3
|
||||
ip-10-0-26-65 Ready <none> 10m v1.31.3
|
||||
ip-10-0-41-21 Ready <none> 10m v1.31.3
|
||||
```
|
||||
|
||||
List the pods.
|
||||
@ -155,9 +156,9 @@ List the pods.
|
||||
```
|
||||
$ kubectl get pods --all-namespaces
|
||||
NAMESPACE NAME READY STATUS RESTARTS AGE
|
||||
kube-system calico-node-1m5bf 2/2 Running 0 34m
|
||||
kube-system calico-node-7jmr1 2/2 Running 0 34m
|
||||
kube-system calico-node-bknc8 2/2 Running 0 34m
|
||||
kube-system cilium-1m5bf 1/1 Running 0 34m
|
||||
kube-system cilium-7jmr1 1/1 Running 0 34m
|
||||
kube-system cilium-bknc8 1/1 Running 0 34m
|
||||
kube-system coredns-1187388186-wx1lg 1/1 Running 0 34m
|
||||
kube-system coredns-1187388186-qjnvp 1/1 Running 0 34m
|
||||
kube-system kube-apiserver-ip-10-0-3-155 1/1 Running 0 34m
|
||||
@ -206,16 +207,19 @@ Reference the DNS zone id with `aws_route53_zone.zone-for-clusters.zone_id`.
|
||||
|
||||
| Name | Description | Default | Example |
|
||||
|:-----|:------------|:--------|:--------|
|
||||
| controller_count | Number of controllers (i.e. masters) | 1 | 1 |
|
||||
| worker_count | Number of workers | 1 | 3 |
|
||||
| controller_type | EC2 instance type for controllers | "t3.small" | See below |
|
||||
| worker_type | EC2 instance type for workers | "t3.small" | See below |
|
||||
| os_image | AMI channel for a Container Linux derivative | "flatcar-stable" | flatcar-stable, flatcar-beta, flatcar-alpha |
|
||||
| disk_size | Size of the EBS volume in GB | 30 | 100 |
|
||||
| disk_type | Type of the EBS volume | "gp3" | standard, gp2, gp3, io1 |
|
||||
| disk_iops | IOPS of the EBS volume | 0 (i.e. auto) | 400 |
|
||||
| worker_target_groups | Target group ARNs to which worker instances should be added | [] | [aws_lb_target_group.app.id] |
|
||||
| controller_count | Number of controllers (i.e. masters) | 1 | 1 |
|
||||
| controller_type | EC2 instance type for controllers | "t3.small" | See below |
|
||||
| controller_disk_size | Size of EBS volume in GB | 30 | 100 |
|
||||
| controller_disk_type | Type of EBS volume | gp3 | io1 |
|
||||
| controller_disk_iops | IOPS of EBS volume | 3000 | 4000 |
|
||||
| controller_cpu_credits | Burstable CPU pricing model | null (i.e. auto) | standard, unlimited |
|
||||
| worker_disk_size | Size of EBS volume in GB | 30 | 100 |
|
||||
| worker_disk_type | Type of EBS volume | gp3 | io1 |
|
||||
| worker_disk_iops | IOPS of EBS volume | 3000 | 4000 |
|
||||
| worker_cpu_credits | Burstable CPU pricing model | null (i.e. auto) | standard, unlimited |
|
||||
| worker_price | Spot price in USD for worker instances or 0 to use on-demand instances | 0/null | 0.10 |
|
||||
| worker_target_groups | Target group ARNs to which worker instances should be added | [] | [aws_lb_target_group.app.id] |
|
||||
| controller_snippets | Controller Container Linux Config snippets | [] | [example](/advanced/customization/) |
|
||||
| worker_snippets | Worker Container Linux Config snippets | [] | [example](/advanced/customization/) |
|
||||
| networking | Choice of networking provider | "cilium" | "calico" or "cilium" or "flannel" |
|
||||
@ -228,7 +232,7 @@ Reference the DNS zone id with `aws_route53_zone.zone-for-clusters.zone_id`.
|
||||
Check the list of valid [instance types](https://aws.amazon.com/ec2/instance-types/).
|
||||
|
||||
!!! warning
|
||||
Do not choose a `controller_type` smaller than `t2.small`. Smaller instances are not sufficient for running a controller.
|
||||
Do not choose a `controller_type` smaller than `t3.small`. Smaller instances are not sufficient for running a controller.
|
||||
|
||||
!!! tip "MTU"
|
||||
If your EC2 instance type supports [Jumbo frames](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/network_mtu.html#jumbo_frame_instances) (most do), we recommend you change the `network_mtu` to 8981! You will get better pod-to-pod bandwidth.
|
||||
@ -236,4 +240,3 @@ Check the list of valid [instance types](https://aws.amazon.com/ec2/instance-typ
|
||||
#### Spot
|
||||
|
||||
Add `worker_price = "0.10"` to use spot instance workers (instead of "on-demand") and set a maximum spot price in USD. Clusters can tolerate spot market interuptions fairly well (reschedules pods, but cannot drain) to save money, with the tradeoff that requests for workers may go unfulfilled.
|
||||
|
||||
|
@ -1,10 +1,10 @@
|
||||
# Azure
|
||||
|
||||
In this tutorial, we'll create a Kubernetes v1.28.3 cluster on Azure with Flatcar Linux.
|
||||
In this tutorial, we'll create a Kubernetes v1.31.3 cluster on Azure with Flatcar Linux.
|
||||
|
||||
We'll declare a Kubernetes cluster using the Typhoon Terraform module. Then apply the changes to create a resource group, virtual network, subnets, security groups, controller availability set, worker scale set, load balancer, and TLS assets.
|
||||
|
||||
Controller hosts are provisioned to run an `etcd-member` peer and a `kubelet` service. Worker hosts run a `kubelet` service. Controller nodes run `kube-apiserver`, `kube-scheduler`, `kube-controller-manager`, and `coredns`, while `kube-proxy` and `calico` (or `flannel`) run on every node. A generated `kubeconfig` provides `kubectl` access to the cluster.
|
||||
Controller hosts are provisioned to run an `etcd-member` peer and a `kubelet` service. Worker hosts run a `kubelet` service. Controller nodes run `kube-apiserver`, `kube-scheduler`, `kube-controller-manager`, and `coredns`, while `kube-proxy` and (`flannel`, `calico`, or `cilium`) run on every node. A generated `kubeconfig` provides `kubectl` access to the cluster.
|
||||
|
||||
## Requirements
|
||||
|
||||
@ -75,20 +75,22 @@ Define a Kubernetes cluster using the module `azure/flatcar-linux/kubernetes`.
|
||||
|
||||
```tf
|
||||
module "ramius" {
|
||||
source = "git::https://github.com/poseidon/typhoon//azure/flatcar-linux/kubernetes?ref=v1.28.3"
|
||||
source = "git::https://github.com/poseidon/typhoon//azure/flatcar-linux/kubernetes?ref=v1.31.3"
|
||||
|
||||
# Azure
|
||||
cluster_name = "ramius"
|
||||
region = "centralus"
|
||||
location = "centralus"
|
||||
dns_zone = "azure.example.com"
|
||||
dns_zone_group = "example-group"
|
||||
network_cidr = {
|
||||
ipv4 = ["10.0.0.0/20"]
|
||||
}
|
||||
|
||||
# instances
|
||||
worker_count = 2
|
||||
|
||||
# configuration
|
||||
ssh_authorized_key = "ssh-rsa AAAAB3Nz..."
|
||||
|
||||
# optional
|
||||
worker_count = 2
|
||||
host_cidr = "10.0.0.0/20"
|
||||
}
|
||||
```
|
||||
|
||||
@ -138,8 +140,9 @@ In 4-8 minutes, the Kubernetes cluster will be ready.
|
||||
|
||||
```
|
||||
resource "local_file" "kubeconfig-ramius" {
|
||||
content = module.ramius.kubeconfig-admin
|
||||
filename = "/home/user/.kube/configs/ramius-config"
|
||||
content = module.ramius.kubeconfig-admin
|
||||
filename = "/home/user/.kube/configs/ramius-config"
|
||||
file_permission = "0600"
|
||||
}
|
||||
```
|
||||
|
||||
@ -149,9 +152,9 @@ List nodes in the cluster.
|
||||
$ export KUBECONFIG=/home/user/.kube/configs/ramius-config
|
||||
$ kubectl get nodes
|
||||
NAME STATUS ROLES AGE VERSION
|
||||
ramius-controller-0 Ready <none> 24m v1.28.3
|
||||
ramius-worker-000001 Ready <none> 25m v1.28.3
|
||||
ramius-worker-000002 Ready <none> 24m v1.28.3
|
||||
ramius-controller-0 Ready <none> 24m v1.31.3
|
||||
ramius-worker-000001 Ready <none> 25m v1.31.3
|
||||
ramius-worker-000002 Ready <none> 24m v1.31.3
|
||||
```
|
||||
|
||||
List the pods.
|
||||
@ -161,9 +164,9 @@ $ kubectl get pods --all-namespaces
|
||||
NAMESPACE NAME READY STATUS RESTARTS AGE
|
||||
kube-system coredns-7c6fbb4f4b-b6qzx 1/1 Running 0 26m
|
||||
kube-system coredns-7c6fbb4f4b-j2k3d 1/1 Running 0 26m
|
||||
kube-system calico-node-1m5bf 2/2 Running 0 26m
|
||||
kube-system calico-node-7jmr1 2/2 Running 0 26m
|
||||
kube-system calico-node-bknc8 2/2 Running 0 26m
|
||||
kube-system cilium-1m5bf 1/1 Running 0 26m
|
||||
kube-system cilium-7jmr1 1/1 Running 0 26m
|
||||
kube-system cilium-bknc8 1/1 Running 0 26m
|
||||
kube-system kube-apiserver-ramius-controller-0 1/1 Running 0 26m
|
||||
kube-system kube-controller-manager-ramius-controller-0 1/1 Running 0 26m
|
||||
kube-system kube-proxy-j4vpq 1/1 Running 0 26m
|
||||
@ -185,13 +188,13 @@ Check the [variables.tf](https://github.com/poseidon/typhoon/blob/master/azure/f
|
||||
| Name | Description | Example |
|
||||
|:-----|:------------|:--------|
|
||||
| cluster_name | Unique cluster name (prepended to dns_zone) | "ramius" |
|
||||
| region | Azure region | "centralus" |
|
||||
| location | Azure location | "centralus" |
|
||||
| dns_zone | Azure DNS zone | "azure.example.com" |
|
||||
| dns_zone_group | Resource group where the Azure DNS zone resides | "global" |
|
||||
| ssh_authorized_key | SSH public key for user 'core' | "ssh-rsa AAAAB3NZ..." |
|
||||
|
||||
!!! tip
|
||||
Regions are shown in [docs](https://azure.microsoft.com/en-us/global-infrastructure/regions/) or with `az account list-locations --output table`.
|
||||
Locations are shown in [docs](https://azure.microsoft.com/en-us/global-infrastructure/regions/) or with `az account list-locations --output table`.
|
||||
|
||||
#### DNS Zone
|
||||
|
||||
@ -224,26 +227,27 @@ Reference the DNS zone with `azurerm_dns_zone.clusters.name` and its resource gr
|
||||
|
||||
| Name | Description | Default | Example |
|
||||
|:-----|:------------|:--------|:--------|
|
||||
| controller_count | Number of controllers (i.e. masters) | 1 | 1 |
|
||||
| worker_count | Number of workers | 1 | 3 |
|
||||
| controller_type | Machine type for controllers | "Standard_B2s" | See below |
|
||||
| worker_type | Machine type for workers | "Standard_D2as_v5" | See below |
|
||||
| os_image | Channel for a Container Linux derivative | "flatcar-stable" | flatcar-stable, flatcar-beta, flatcar-alpha |
|
||||
| disk_size | Size of the disk in GB | 30 | 100 |
|
||||
| controller_count | Number of controllers (i.e. masters) | 1 | 1 |
|
||||
| controller_type | Machine type for controllers | "Standard_B2s" | See below |
|
||||
| controller_disk_type | Managed disk for controllers | Premium_LRS | Standard_LRS |
|
||||
| controller_disk_size | Managed disk size in GB | 30 | 50 |
|
||||
| worker_count | Number of workers | 1 | 3 |
|
||||
| worker_type | Machine type for workers | "Standard_D2as_v5" | See below |
|
||||
| worker_disk_type | Managed disk for workers | Standard_LRS | Premium_LRS |
|
||||
| worker_disk_size | Size of the disk in GB | 30 | 100 |
|
||||
| worker_ephemeral_disk | Use ephemeral local disk instead of managed disk | false | true |
|
||||
| worker_priority | Set priority to Spot to use reduced cost surplus capacity, with the tradeoff that instances can be deallocated at any time | Regular | Spot |
|
||||
| controller_snippets | Controller Container Linux Config snippets | [] | [example](/advanced/customization/#usage) |
|
||||
| worker_snippets | Worker Container Linux Config snippets | [] | [example](/advanced/customization/#usage) |
|
||||
| networking | Choice of networking provider | "cilium" | "calico" or "cilium" or "flannel" |
|
||||
| host_cidr | CIDR IPv4 range to assign to instances | "10.0.0.0/16" | "10.0.0.0/20" |
|
||||
| network_cidr | Virtual network CIDR ranges | { ipv4 = ["10.0.0.0/16"], ipv6 = [ULA, ...] } | { ipv4 = ["10.0.0.0/20"] } |
|
||||
| pod_cidr | CIDR IPv4 range to assign to Kubernetes pods | "10.2.0.0/16" | "10.22.0.0/16" |
|
||||
| service_cidr | CIDR IPv4 range to assign to Kubernetes services | "10.3.0.0/16" | "10.3.0.0/24" |
|
||||
| worker_node_labels | List of initial worker node labels | [] | ["worker-pool=default"] |
|
||||
|
||||
Check the list of valid [machine types](https://azure.microsoft.com/en-us/pricing/details/virtual-machines/linux/) and their [specs](https://docs.microsoft.com/en-us/azure/virtual-machines/linux/sizes-general). Use `az vm list-skus` to get the identifier.
|
||||
|
||||
!!! warning
|
||||
Unlike AWS and GCP, Azure requires its *virtual* networks to have non-overlapping IPv4 CIDRs (yeah, go figure). Instead of each cluster just using `10.0.0.0/16` for instances, each Azure cluster's `host_cidr` must be non-overlapping (e.g. 10.0.0.0/20 for the 1st cluster, 10.0.16.0/20 for the 2nd cluster, etc).
|
||||
|
||||
!!! warning
|
||||
Do not choose a `controller_type` smaller than `Standard_B2s`. Smaller instances are not sufficient for running a controller.
|
||||
|
||||
|
@ -1,10 +1,10 @@
|
||||
# Bare-Metal
|
||||
|
||||
In this tutorial, we'll network boot and provision a Kubernetes v1.28.3 cluster on bare-metal with Flatcar Linux.
|
||||
In this tutorial, we'll network boot and provision a Kubernetes v1.31.3 cluster on bare-metal with Flatcar Linux.
|
||||
|
||||
First, we'll deploy a [Matchbox](https://github.com/poseidon/matchbox) service and setup a network boot environment. Then, we'll declare a Kubernetes cluster using the Typhoon Terraform module and power on machines. On PXE boot, machines will install Container Linux to disk, reboot into the disk install, and provision themselves as Kubernetes controllers or workers via Ignition.
|
||||
|
||||
Controller hosts are provisioned to run an `etcd-member` peer and a `kubelet` service. Worker hosts run a `kubelet` service. Controller nodes run `kube-apiserver`, `kube-scheduler`, `kube-controller-manager`, and `coredns` while `kube-proxy` and `calico` (or `flannel`) run on every node. A generated `kubeconfig` provides `kubectl` access to the cluster.
|
||||
Controller hosts are provisioned to run an `etcd-member` peer and a `kubelet` service. Worker hosts run a `kubelet` service. Controller nodes run `kube-apiserver`, `kube-scheduler`, `kube-controller-manager`, and `coredns` while `kube-proxy` and (`flannel`, `calico`, or `cilium`) run on every node. A generated `kubeconfig` provides `kubectl` access to the cluster.
|
||||
|
||||
## Requirements
|
||||
|
||||
@ -154,7 +154,7 @@ Define a Kubernetes cluster using the module `bare-metal/flatcar-linux/kubernete
|
||||
|
||||
```tf
|
||||
module "mercury" {
|
||||
source = "git::https://github.com/poseidon/typhoon//bare-metal/flatcar-linux/kubernetes?ref=v1.28.3"
|
||||
source = "git::https://github.com/poseidon/typhoon//bare-metal/flatcar-linux/kubernetes?ref=v1.31.3"
|
||||
|
||||
# bare-metal
|
||||
cluster_name = "mercury"
|
||||
@ -194,7 +194,7 @@ Workers with similar features can be defined inline using the `workers` field as
|
||||
|
||||
```tf
|
||||
module "mercury-node1" {
|
||||
source = "git::https://github.com/poseidon/typhoon//bare-metal/fedora-coreos/kubernetes/worker?ref=v1.28.3"
|
||||
source = "git::https://github.com/poseidon/typhoon//bare-metal/fedora-coreos/kubernetes/worker?ref=v1.31.3"
|
||||
|
||||
# bare-metal
|
||||
cluster_name = "mercury"
|
||||
@ -312,8 +312,9 @@ systemd[1]: Started Kubernetes control plane.
|
||||
|
||||
```
|
||||
resource "local_file" "kubeconfig-mercury" {
|
||||
content = module.mercury.kubeconfig-admin
|
||||
filename = "/home/user/.kube/configs/mercury-config"
|
||||
content = module.mercury.kubeconfig-admin
|
||||
filename = "/home/user/.kube/configs/mercury-config"
|
||||
file_permission = "0600"
|
||||
}
|
||||
```
|
||||
|
||||
@ -323,9 +324,9 @@ List nodes in the cluster.
|
||||
$ export KUBECONFIG=/home/user/.kube/configs/mercury-config
|
||||
$ kubectl get nodes
|
||||
NAME STATUS ROLES AGE VERSION
|
||||
node1.example.com Ready <none> 10m v1.28.3
|
||||
node2.example.com Ready <none> 10m v1.28.3
|
||||
node3.example.com Ready <none> 10m v1.28.3
|
||||
node1.example.com Ready <none> 10m v1.31.3
|
||||
node2.example.com Ready <none> 10m v1.31.3
|
||||
node3.example.com Ready <none> 10m v1.31.3
|
||||
```
|
||||
|
||||
List the pods.
|
||||
@ -333,9 +334,10 @@ List the pods.
|
||||
```
|
||||
$ kubectl get pods --all-namespaces
|
||||
NAMESPACE NAME READY STATUS RESTARTS AGE
|
||||
kube-system calico-node-6qp7f 2/2 Running 1 11m
|
||||
kube-system calico-node-gnjrm 2/2 Running 0 11m
|
||||
kube-system calico-node-llbgt 2/2 Running 0 11m
|
||||
kube-system cilium-6qp7f 1/1 Running 1 11m
|
||||
kube-system cilium-gnjrm 1/1 Running 0 11m
|
||||
kube-system cilium-llbgt 1/1 Running 0 11m
|
||||
kube-system cilium-operator-68d778b448-g744f 1/1 Running 0 11m
|
||||
kube-system coredns-1187388186-dj3pd 1/1 Running 0 11m
|
||||
kube-system coredns-1187388186-mx9rt 1/1 Running 0 11m
|
||||
kube-system kube-apiserver-node1.example.com 1/1 Running 0 11m
|
||||
|
@ -1,10 +1,10 @@
|
||||
# DigitalOcean
|
||||
|
||||
In this tutorial, we'll create a Kubernetes v1.28.3 cluster on DigitalOcean with Flatcar Linux.
|
||||
In this tutorial, we'll create a Kubernetes v1.31.3 cluster on DigitalOcean with Flatcar Linux.
|
||||
|
||||
We'll declare a Kubernetes cluster using the Typhoon Terraform module. Then apply the changes to create controller droplets, worker droplets, DNS records, tags, and TLS assets.
|
||||
|
||||
Controller hosts are provisioned to run an `etcd-member` peer and a `kubelet` service. Worker hosts run a `kubelet` service. Controller nodes run `kube-apiserver`, `kube-scheduler`, `kube-controller-manager`, and `coredns`, while `kube-proxy` and `calico` (or `flannel`) run on every node. A generated `kubeconfig` provides `kubectl` access to the cluster.
|
||||
Controller hosts are provisioned to run an `etcd-member` peer and a `kubelet` service. Worker hosts run a `kubelet` service. Controller nodes run `kube-apiserver`, `kube-scheduler`, `kube-controller-manager`, and `coredns`, while `kube-proxy` and (`flannel`, `calico`, or `cilium`) run on every node. A generated `kubeconfig` provides `kubectl` access to the cluster.
|
||||
|
||||
## Requirements
|
||||
|
||||
@ -81,19 +81,19 @@ Define a Kubernetes cluster using the module `digital-ocean/flatcar-linux/kubern
|
||||
|
||||
```tf
|
||||
module "nemo" {
|
||||
source = "git::https://github.com/poseidon/typhoon//digital-ocean/flatcar-linux/kubernetes?ref=v1.28.3"
|
||||
source = "git::https://github.com/poseidon/typhoon//digital-ocean/flatcar-linux/kubernetes?ref=v1.31.3"
|
||||
|
||||
# Digital Ocean
|
||||
cluster_name = "nemo"
|
||||
region = "nyc3"
|
||||
dns_zone = "digital-ocean.example.com"
|
||||
|
||||
# configuration
|
||||
os_image = data.digitalocean_image.flatcar-stable-2303-4-0.id
|
||||
ssh_fingerprints = ["d7:9d:79:ae:56:32:73:79:95:88:e3:a2:ab:5d:45:e7"]
|
||||
|
||||
# optional
|
||||
# instances
|
||||
os_image = data.digitalocean_image.flatcar-stable-2303-4-0.id
|
||||
worker_count = 2
|
||||
|
||||
# configuration
|
||||
ssh_fingerprints = ["d7:9d:79:ae:56:32:73:79:95:88:e3:a2:ab:5d:45:e7"]
|
||||
}
|
||||
```
|
||||
|
||||
@ -144,8 +144,9 @@ In 3-6 minutes, the Kubernetes cluster will be ready.
|
||||
|
||||
```
|
||||
resource "local_file" "kubeconfig-nemo" {
|
||||
content = module.nemo.kubeconfig-admin
|
||||
filename = "/home/user/.kube/configs/nemo-config"
|
||||
content = module.nemo.kubeconfig-admin
|
||||
filename = "/home/user/.kube/configs/nemo-config"
|
||||
file_permission = "0600"
|
||||
}
|
||||
```
|
||||
|
||||
@ -155,9 +156,9 @@ List nodes in the cluster.
|
||||
$ export KUBECONFIG=/home/user/.kube/configs/nemo-config
|
||||
$ kubectl get nodes
|
||||
NAME STATUS ROLES AGE VERSION
|
||||
10.132.110.130 Ready <none> 10m v1.28.3
|
||||
10.132.115.81 Ready <none> 10m v1.28.3
|
||||
10.132.124.107 Ready <none> 10m v1.28.3
|
||||
10.132.110.130 Ready <none> 10m v1.31.3
|
||||
10.132.115.81 Ready <none> 10m v1.31.3
|
||||
10.132.124.107 Ready <none> 10m v1.31.3
|
||||
```
|
||||
|
||||
List the pods.
|
||||
@ -166,9 +167,9 @@ List the pods.
|
||||
NAMESPACE NAME READY STATUS RESTARTS AGE
|
||||
kube-system coredns-1187388186-ld1j7 1/1 Running 0 11m
|
||||
kube-system coredns-1187388186-rdhf7 1/1 Running 0 11m
|
||||
kube-system calico-node-1m5bf 2/2 Running 0 11m
|
||||
kube-system calico-node-7jmr1 2/2 Running 0 11m
|
||||
kube-system calico-node-bknc8 2/2 Running 0 11m
|
||||
kube-system cilium-1m5bf 1/1 Running 0 11m
|
||||
kube-system cilium-7jmr1 1/1 Running 0 11m
|
||||
kube-system cilium-bknc8 1/1 Running 0 11m
|
||||
kube-system kube-apiserver-ip-10.132.115.81 1/1 Running 0 11m
|
||||
kube-system kube-controller-manager-ip-10.132.115.81 1/1 Running 0 11m
|
||||
kube-system kube-proxy-6kxjf 1/1 Running 0 11m
|
||||
|
@ -1,10 +1,10 @@
|
||||
# Google Cloud
|
||||
|
||||
In this tutorial, we'll create a Kubernetes v1.28.3 cluster on Google Compute Engine with Flatcar Linux.
|
||||
In this tutorial, we'll create a Kubernetes v1.31.3 cluster on Google Compute Engine with Flatcar Linux.
|
||||
|
||||
We'll declare a Kubernetes cluster using the Typhoon Terraform module. Then apply the changes to create a network, firewall rules, health checks, controller instances, worker managed instance group, load balancers, and TLS assets.
|
||||
|
||||
Controller hosts are provisioned to run an `etcd-member` peer and a `kubelet` service. Worker hosts run a `kubelet` service. Controller nodes run `kube-apiserver`, `kube-scheduler`, `kube-controller-manager`, and `coredns`, while `kube-proxy` and `calico` (or `flannel`) run on every node. A generated `kubeconfig` provides `kubectl` access to the cluster.
|
||||
Controller hosts are provisioned to run an `etcd-member` peer and a `kubelet` service. Worker hosts run a `kubelet` service. Controller nodes run `kube-apiserver`, `kube-scheduler`, `kube-controller-manager`, and `coredns`, while `kube-proxy` and (`flannel`, `calico`, or `cilium`) run on every node. A generated `kubeconfig` provides `kubectl` access to the cluster.
|
||||
|
||||
## Requirements
|
||||
|
||||
@ -73,7 +73,7 @@ Define a Kubernetes cluster using the module `google-cloud/flatcar-linux/kuberne
|
||||
|
||||
```tf
|
||||
module "yavin" {
|
||||
source = "git::https://github.com/poseidon/typhoon//google-cloud/flatcar-linux/kubernetes?ref=v1.28.3"
|
||||
source = "git::https://github.com/poseidon/typhoon//google-cloud/flatcar-linux/kubernetes?ref=v1.31.3"
|
||||
|
||||
# Google Cloud
|
||||
cluster_name = "yavin"
|
||||
@ -81,11 +81,11 @@ module "yavin" {
|
||||
dns_zone = "example.com"
|
||||
dns_zone_name = "example-zone"
|
||||
|
||||
# instances
|
||||
worker_count = 2
|
||||
|
||||
# configuration
|
||||
ssh_authorized_key = "ssh-rsa AAAAB3Nz..."
|
||||
|
||||
# optional
|
||||
worker_count = 2
|
||||
}
|
||||
```
|
||||
|
||||
@ -136,8 +136,9 @@ In 4-8 minutes, the Kubernetes cluster will be ready.
|
||||
|
||||
```
|
||||
resource "local_file" "kubeconfig-yavin" {
|
||||
content = module.yavin.kubeconfig-admin
|
||||
filename = "/home/user/.kube/configs/yavin-config"
|
||||
content = module.yavin.kubeconfig-admin
|
||||
filename = "/home/user/.kube/configs/yavin-config"
|
||||
file_permission = "0600"
|
||||
}
|
||||
```
|
||||
|
||||
@ -147,9 +148,9 @@ List nodes in the cluster.
|
||||
$ export KUBECONFIG=/home/user/.kube/configs/yavin-config
|
||||
$ kubectl get nodes
|
||||
NAME ROLES STATUS AGE VERSION
|
||||
yavin-controller-0.c.example-com.internal <none> Ready 6m v1.28.3
|
||||
yavin-worker-jrbf.c.example-com.internal <none> Ready 5m v1.28.3
|
||||
yavin-worker-mzdm.c.example-com.internal <none> Ready 5m v1.28.3
|
||||
yavin-controller-0.c.example-com.internal <none> Ready 6m v1.31.3
|
||||
yavin-worker-jrbf.c.example-com.internal <none> Ready 5m v1.31.3
|
||||
yavin-worker-mzdm.c.example-com.internal <none> Ready 5m v1.31.3
|
||||
```
|
||||
|
||||
List the pods.
|
||||
@ -157,9 +158,9 @@ List the pods.
|
||||
```
|
||||
$ kubectl get pods --all-namespaces
|
||||
NAMESPACE NAME READY STATUS RESTARTS AGE
|
||||
kube-system calico-node-1cs8z 2/2 Running 0 6m
|
||||
kube-system calico-node-d1l5b 2/2 Running 0 6m
|
||||
kube-system calico-node-sp9ps 2/2 Running 0 6m
|
||||
kube-system cilium-1cs8z 1/1 Running 0 6m
|
||||
kube-system cilium-d1l5b 1/1 Running 0 6m
|
||||
kube-system cilium-sp9ps 1/1 Running 0 6m
|
||||
kube-system coredns-1187388186-dkh3o 1/1 Running 0 6m
|
||||
kube-system coredns-1187388186-zj5dl 1/1 Running 0 6m
|
||||
kube-system kube-apiserver-controller-0 1/1 Running 0 6m
|
||||
@ -209,25 +210,25 @@ resource "google_dns_managed_zone" "zone-for-clusters" {
|
||||
|
||||
### Optional
|
||||
|
||||
| Name | Description | Default | Example |
|
||||
|:-----|:------------|:--------|:--------|
|
||||
| controller_count | Number of controllers (i.e. masters) | 1 | 3 |
|
||||
| worker_count | Number of workers | 1 | 3 |
|
||||
| controller_type | Machine type for controllers | "n1-standard-1" | See below |
|
||||
| worker_type | Machine type for workers | "n1-standard-1" | See below |
|
||||
| os_image | Flatcar Linux image for compute instances | "flatcar-stable" | flatcar-stable, flatcar-beta, flatcar-alpha |
|
||||
| disk_size | Size of the disk in GB | 30 | 100 |
|
||||
| worker_preemptible | If enabled, Compute Engine will terminate workers randomly within 24 hours | false | true |
|
||||
| controller_snippets | Controller Container Linux Config snippets | [] | [example](/advanced/customization/) |
|
||||
| worker_snippets | Worker Container Linux Config snippets | [] | [example](/advanced/customization/) |
|
||||
| networking | Choice of networking provider | "cilium" | "calico" or "cilium" or "flannel" |
|
||||
| pod_cidr | CIDR IPv4 range to assign to Kubernetes pods | "10.2.0.0/16" | "10.22.0.0/16" |
|
||||
| service_cidr | CIDR IPv4 range to assign to Kubernetes services | "10.3.0.0/16" | "10.3.0.0/24" |
|
||||
| worker_node_labels | List of initial worker node labels | [] | ["worker-pool=default"] |
|
||||
| Name | Description | Default | Example |
|
||||
|:---------------------|:---------------------------------------------------------------------------|:-----------------|:--------------------------------------------|
|
||||
| os_image | Flatcar Linux image for compute instances | "flatcar-stable" | flatcar-stable, flatcar-beta, flatcar-alpha |
|
||||
| controller_count | Number of controllers (i.e. masters) | 1 | 3 |
|
||||
| controller_type | Machine type for controllers | "n1-standard-1" | See below |
|
||||
| controller_disk_size | Controller disk size in GB | 30 | 20 |
|
||||
| worker_count | Number of workers | 1 | 3 |
|
||||
| worker_type | Machine type for workers | "n1-standard-1" | See below |
|
||||
| worker_disk_size | Worker disk size in GB | 30 | 100 |
|
||||
| worker_preemptible | If enabled, Compute Engine will terminate workers randomly within 24 hours | false | true |
|
||||
| controller_snippets | Controller Container Linux Config snippets | [] | [example](/advanced/customization/) |
|
||||
| worker_snippets | Worker Container Linux Config snippets | [] | [example](/advanced/customization/) |
|
||||
| networking | Choice of networking provider | "cilium" | "calico" or "cilium" or "flannel" |
|
||||
| pod_cidr | CIDR IPv4 range to assign to Kubernetes pods | "10.2.0.0/16" | "10.22.0.0/16" |
|
||||
| service_cidr | CIDR IPv4 range to assign to Kubernetes services | "10.3.0.0/16" | "10.3.0.0/24" |
|
||||
| worker_node_labels | List of initial worker node labels | [] | ["worker-pool=default"] |
|
||||
|
||||
Check the list of valid [machine types](https://cloud.google.com/compute/docs/machine-types).
|
||||
|
||||
#### Preemption
|
||||
|
||||
Add `worker_preemptible = "true"` to allow worker nodes to be [preempted](https://cloud.google.com/compute/docs/instances/preemptible) at random, but pay [significantly](https://cloud.google.com/compute/pricing) less. Clusters tolerate stopping instances fairly well (reschedules pods, but cannot drain) and preemption provides a nice reward for running fault-tolerant cluster systems.`
|
||||
|
||||
|
Reference in New Issue
Block a user