google: Add controller_disk_type and worker_disk_type variables (#1513)

* Add controller_disk_type and worker_disk_type variables
* Properly pass disk_type to worker nodes
This commit is contained in:
Jordan Pittier 2024-09-20 23:31:17 +02:00 committed by GitHub
parent b2fad7771f
commit 3f844e3c57
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
13 changed files with 104 additions and 38 deletions

View File

@ -4,6 +4,10 @@ Notable changes between versions.
## Latest
### Google
* Add `controller_disk_type` and `worker_disk_type` variables
## v1.31.0
* Kubernetes [v1.31.0](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.31.md#v1310)

View File

@ -209,26 +209,27 @@ resource "google_dns_managed_zone" "zone-for-clusters" {
### Optional
| Name | Description | Default | Example |
|:-----|:------------|:--------|:--------|
| os_stream | Fedora CoreOS stream for compute instances | "stable" | "stable", "testing", "next" |
| controller_count | Number of controllers (i.e. masters) | 1 | 3 |
| controller_type | Machine type for controllers | "n1-standard-1" | See below |
| controller_disk_type | Controller disk size in GB | 30 | 20 |
| worker_count | Number of workers | 1 | 3 |
| worker_type | Machine type for workers | "n1-standard-1" | See below |
| worker_disk_size | Worker disk size in GB | 30 | 100 |
| worker_preemptible | If enabled, Compute Engine will terminate workers randomly within 24 hours | false | true |
| controller_snippets | Controller Butane snippets | [] | [examples](/advanced/customization/) |
| worker_snippets | Worker Butane snippets | [] | [examples](/advanced/customization/) |
| networking | Choice of networking provider | "cilium" | "calico" or "cilium" or "flannel" |
| pod_cidr | CIDR IPv4 range to assign to Kubernetes pods | "10.2.0.0/16" | "10.22.0.0/16" |
| service_cidr | CIDR IPv4 range to assign to Kubernetes services | "10.3.0.0/16" | "10.3.0.0/24" |
| worker_node_labels | List of initial worker node labels | [] | ["worker-pool=default"] |
| Name | Description | Default | Example |
|:---------------------|:---------------------------------------------------------------------------|:----------------|:-------------------------------------|
| os_stream | Fedora CoreOS stream for compute instances | "stable" | "stable", "testing", "next" |
| controller_count | Number of controllers (i.e. masters) | 1 | 3 |
| controller_type | Machine type for controllers | "n1-standard-1" | See below |
| controller_disk_size | Controller disk size in GB | 30 | 20 |
| controller_disk_type | Controller disk type | "pd-standard" | "pd-ssd" |
| worker_count | Number of workers | 1 | 3 |
| worker_type | Machine type for workers | "n1-standard-1" | See below |
| worker_disk_size | Worker disk size in GB | 30 | 100 |
| worker_disk_type | Worker disk type | "pd-standard" | "pd-ssd" |
| worker_preemptible | If enabled, Compute Engine will terminate workers randomly within 24 hours | false | true |
| controller_snippets | Controller Butane snippets | [] | [examples](/advanced/customization/) |
| worker_snippets | Worker Butane snippets | [] | [examples](/advanced/customization/) |
| networking | Choice of networking provider | "cilium" | "calico" or "cilium" or "flannel" |
| pod_cidr | CIDR IPv4 range to assign to Kubernetes pods | "10.2.0.0/16" | "10.22.0.0/16" |
| service_cidr | CIDR IPv4 range to assign to Kubernetes services | "10.3.0.0/16" | "10.3.0.0/24" |
| worker_node_labels | List of initial worker node labels | [] | ["worker-pool=default"] |
Check the list of valid [machine types](https://cloud.google.com/compute/docs/machine-types).
#### Preemption
Add `worker_preemptible = "true"` to allow worker nodes to be [preempted](https://cloud.google.com/compute/docs/instances/preemptible) at random, but pay [significantly](https://cloud.google.com/compute/pricing) less. Clusters tolerate stopping instances fairly well (reschedules pods, but cannot drain) and preemption provides a nice reward for running fault-tolerant cluster systems.`

View File

@ -209,26 +209,25 @@ resource "google_dns_managed_zone" "zone-for-clusters" {
### Optional
| Name | Description | Default | Example |
|:-----|:------------|:--------|:--------|
| os_image | Flatcar Linux image for compute instances | "flatcar-stable" | flatcar-stable, flatcar-beta, flatcar-alpha |
| controller_count | Number of controllers (i.e. masters) | 1 | 3 |
| controller_type | Machine type for controllers | "n1-standard-1" | See below |
| controller_disk_type | Controller disk size in GB | 30 | 20 |
| worker_count | Number of workers | 1 | 3 |
| worker_type | Machine type for workers | "n1-standard-1" | See below |
| worker_disk_size | Worker disk size in GB | 30 | 100 |
| worker_preemptible | If enabled, Compute Engine will terminate workers randomly within 24 hours | false | true |
| controller_snippets | Controller Container Linux Config snippets | [] | [example](/advanced/customization/) |
| worker_snippets | Worker Container Linux Config snippets | [] | [example](/advanced/customization/) |
| networking | Choice of networking provider | "cilium" | "calico" or "cilium" or "flannel" |
| pod_cidr | CIDR IPv4 range to assign to Kubernetes pods | "10.2.0.0/16" | "10.22.0.0/16" |
| service_cidr | CIDR IPv4 range to assign to Kubernetes services | "10.3.0.0/16" | "10.3.0.0/24" |
| worker_node_labels | List of initial worker node labels | [] | ["worker-pool=default"] |
| Name | Description | Default | Example |
|:---------------------|:---------------------------------------------------------------------------|:-----------------|:--------------------------------------------|
| os_image | Flatcar Linux image for compute instances | "flatcar-stable" | flatcar-stable, flatcar-beta, flatcar-alpha |
| controller_count | Number of controllers (i.e. masters) | 1 | 3 |
| controller_type | Machine type for controllers | "n1-standard-1" | See below |
| controller_disk_size | Controller disk size in GB | 30 | 20 |
| worker_count | Number of workers | 1 | 3 |
| worker_type | Machine type for workers | "n1-standard-1" | See below |
| worker_disk_size | Worker disk size in GB | 30 | 100 |
| worker_preemptible | If enabled, Compute Engine will terminate workers randomly within 24 hours | false | true |
| controller_snippets | Controller Container Linux Config snippets | [] | [example](/advanced/customization/) |
| worker_snippets | Worker Container Linux Config snippets | [] | [example](/advanced/customization/) |
| networking | Choice of networking provider | "cilium" | "calico" or "cilium" or "flannel" |
| pod_cidr | CIDR IPv4 range to assign to Kubernetes pods | "10.2.0.0/16" | "10.22.0.0/16" |
| service_cidr | CIDR IPv4 range to assign to Kubernetes services | "10.3.0.0/16" | "10.3.0.0/24" |
| worker_node_labels | List of initial worker node labels | [] | ["worker-pool=default"] |
Check the list of valid [machine types](https://cloud.google.com/compute/docs/machine-types).
#### Preemption
Add `worker_preemptible = "true"` to allow worker nodes to be [preempted](https://cloud.google.com/compute/docs/instances/preemptible) at random, but pay [significantly](https://cloud.google.com/compute/pricing) less. Clusters tolerate stopping instances fairly well (reschedules pods, but cannot drain) and preemption provides a nice reward for running fault-tolerant cluster systems.`

View File

@ -44,6 +44,7 @@ resource "google_compute_instance" "controllers" {
initialize_params {
image = data.google_compute_image.fedora-coreos.self_link
size = var.controller_disk_size
type = var.controller_disk_type
}
}

View File

@ -51,6 +51,16 @@ variable "controller_disk_size" {
default = 30
}
variable "controller_disk_type" {
type = string
description = "Type of managed disk for controller node(s)"
default = "pd-standard"
validation {
condition = contains(["pd-standard", "pd-ssd", "pd-balanced"], var.controller_disk_type)
error_message = "The controller_disk_type must be pd-standard, pd-ssd or pd-balanced."
}
}
variable "worker_count" {
type = number
description = "Number of workers"
@ -69,6 +79,16 @@ variable "worker_disk_size" {
default = 30
}
variable "worker_disk_type" {
type = string
description = "Type of managed disk for worker nodes"
default = "pd-standard"
validation {
condition = contains(["pd-standard", "pd-ssd", "pd-balanced"], var.worker_disk_type)
error_message = "The worker_disk_type must be pd-standard, pd-ssd or pd-balanced."
}
}
variable "worker_preemptible" {
type = bool
description = "If enabled, Compute Engine will terminate workers randomly within 24 hours"

View File

@ -10,6 +10,7 @@ module "workers" {
machine_type = var.worker_type
os_stream = var.os_stream
disk_size = var.worker_disk_size
disk_type = var.worker_disk_type
preemptible = var.worker_preemptible
# configuration
@ -19,4 +20,3 @@ module "workers" {
snippets = var.worker_snippets
node_labels = var.worker_node_labels
}

View File

@ -51,6 +51,16 @@ variable "disk_size" {
default = 30
}
variable "disk_type" {
type = string
description = "Type of managed disk"
default = "pd-standard"
validation {
condition = contains(["pd-standard", "pd-ssd", "pd-balanced"], var.disk_type)
error_message = "The disk_type must be pd-standard, pd-ssd or pd-balanced."
}
}
variable "preemptible" {
type = bool
description = "If enabled, Compute Engine will terminate instances randomly within 24 hours"
@ -109,4 +119,3 @@ variable "accelerator_count" {
default = "0"
description = "Number of compute engine accelerators"
}

View File

@ -80,6 +80,7 @@ resource "google_compute_region_instance_template" "worker" {
boot = true
source_image = data.google_compute_image.fedora-coreos.self_link
disk_size_gb = var.disk_size
disk_type = var.disk_type
}
network_interface {

View File

@ -45,6 +45,7 @@ resource "google_compute_instance" "controllers" {
initialize_params {
image = data.google_compute_image.flatcar-linux.self_link
size = var.controller_disk_size
type = var.controller_disk_type
}
}

View File

@ -51,6 +51,16 @@ variable "controller_disk_size" {
default = 30
}
variable "controller_disk_type" {
type = string
description = "Type of managed disk for controller node(s)"
default = "pd-standard"
validation {
condition = contains(["pd-standard", "pd-ssd", "pd-balanced"], var.controller_disk_type)
error_message = "The controller_disk_type must be pd-standard, pd-ssd or pd-balanced."
}
}
variable "worker_count" {
type = number
description = "Number of workers"
@ -69,6 +79,16 @@ variable "worker_disk_size" {
default = 30
}
variable "worker_disk_type" {
type = string
description = "Type of managed disk for worker nodes"
default = "pd-standard"
validation {
condition = contains(["pd-standard", "pd-ssd", "pd-balanced"], var.worker_disk_type)
error_message = "The worker_disk_type must be pd-standard, pd-ssd or pd-balanced."
}
}
variable "worker_preemptible" {
type = bool
description = "If enabled, Compute Engine will terminate workers randomly within 24 hours"

View File

@ -10,6 +10,7 @@ module "workers" {
machine_type = var.worker_type
os_image = var.os_image
disk_size = var.worker_disk_size
disk_type = var.worker_disk_type
preemptible = var.worker_preemptible
# configuration
@ -19,4 +20,3 @@ module "workers" {
snippets = var.worker_snippets
node_labels = var.worker_node_labels
}

View File

@ -51,6 +51,16 @@ variable "disk_size" {
default = 30
}
variable "disk_type" {
type = string
description = "Type of managed disk"
default = "pd-standard"
validation {
condition = contains(["pd-standard", "pd-ssd", "pd-balanced"], var.disk_type)
error_message = "The disk_type must be pd-standard, pd-ssd or pd-balanced."
}
}
variable "preemptible" {
type = bool
description = "If enabled, Compute Engine will terminate instances randomly within 24 hours"
@ -109,4 +119,3 @@ variable "accelerator_count" {
default = "0"
description = "Number of compute engine accelerators"
}

View File

@ -80,6 +80,7 @@ resource "google_compute_region_instance_template" "worker" {
boot = true
source_image = data.google_compute_image.flatcar-linux.self_link
disk_size_gb = var.disk_size
disk_type = var.disk_type
}
network_interface {