Remove the cluster_domain_suffix variable

* Drop support for `cluster_domain_suffix` customization and
always use `cluster.local`. Many components in the Kubernetes
ecosystem assume this default suffix and its very rare to be
setting a special value here these days
* Cleanup a few variables that are seldom used
This commit is contained in:
Dalton Hubble 2024-08-02 15:01:48 -07:00
parent af27661432
commit 0120b9f38d
No known key found for this signature in database
GPG Key ID: BD34C2E3EF32B7A0
88 changed files with 261 additions and 409 deletions

View File

@ -4,11 +4,7 @@ Notable changes between versions.
## Latest
### Azure
* Allow controller and worker nodes to use different CPU architectures
* Add `controller_arch` and `worker_arch` variables
* Remove the `arch` variable
* Remove `cluster_domain_suffix` variable, always use "cluster.local"
## v1.30.3

View File

@ -127,9 +127,10 @@ List the pods.
```
$ kubectl get pods --all-namespaces
NAMESPACE NAME READY STATUS RESTARTS AGE
kube-system calico-node-1cs8z 2/2 Running 0 6m
kube-system calico-node-d1l5b 2/2 Running 0 6m
kube-system calico-node-sp9ps 2/2 Running 0 6m
kube-system cilium-1cs8z 1/1 Running 0 6m
kube-system cilium-d1l5b 1/1 Running 0 6m
kube-system cilium-sp9ps 1/1 Running 0 6m
kube-system cilium-operator-68d778b448-g744f 1/1 Running 0 6m
kube-system coredns-1187388186-zj5dl 1/1 Running 0 6m
kube-system coredns-1187388186-dkh3o 1/1 Running 0 6m
kube-system kube-apiserver-controller-0 1/1 Running 0 6m

View File

@ -9,7 +9,6 @@ module "bootstrap" {
network_mtu = var.network_mtu
pod_cidr = var.pod_cidr
service_cidr = var.service_cidr
cluster_domain_suffix = var.cluster_domain_suffix
enable_reporting = var.enable_reporting
enable_aggregation = var.enable_aggregation
daemonset_tolerations = var.daemonset_tolerations

View File

@ -149,7 +149,7 @@ storage:
cgroupDriver: systemd
clusterDNS:
- ${cluster_dns_service_ip}
clusterDomain: ${cluster_domain_suffix}
clusterDomain: cluster.local
healthzPort: 0
rotateCertificates: true
shutdownGracePeriod: 45s

View File

@ -65,7 +65,6 @@ data "ct_config" "controllers" {
kubeconfig = indent(10, module.bootstrap.kubeconfig-kubelet)
ssh_authorized_key = var.ssh_authorized_key
cluster_dns_service_ip = cidrhost(var.service_cidr, 10)
cluster_domain_suffix = var.cluster_domain_suffix
})
strict = true
snippets = var.controller_snippets

View File

@ -164,6 +164,14 @@ EOD
default = "10.3.0.0/16"
}
variable "worker_node_labels" {
type = list(string)
description = "List of initial worker node labels"
default = []
}
# advanced
variable "enable_reporting" {
type = bool
description = "Enable usage or analytics reporting to upstreams (Calico)"
@ -176,22 +184,6 @@ variable "enable_aggregation" {
default = true
}
variable "worker_node_labels" {
type = list(string)
description = "List of initial worker node labels"
default = []
}
# unofficial, undocumented, unsupported
variable "cluster_domain_suffix" {
type = string
description = "Queries for domains with the suffix will be answered by CoreDNS. Default is cluster.local (e.g. foo.default.svc.cluster.local)"
default = "cluster.local"
}
# advanced
variable "controller_arch" {
type = string
description = "Controller node(s) architecture (amd64 or arm64)"

View File

@ -6,23 +6,24 @@ module "workers" {
vpc_id = aws_vpc.network.id
subnet_ids = aws_subnet.public.*.id
security_groups = [aws_security_group.worker.id]
worker_count = var.worker_count
instance_type = var.worker_type
os_stream = var.os_stream
arch = var.worker_arch
disk_type = var.worker_disk_type
disk_size = var.worker_disk_size
disk_iops = var.worker_disk_iops
cpu_credits = var.worker_cpu_credits
spot_price = var.worker_price
target_groups = var.worker_target_groups
# instances
os_stream = var.os_stream
worker_count = var.worker_count
instance_type = var.worker_type
arch = var.worker_arch
disk_type = var.worker_disk_type
disk_size = var.worker_disk_size
disk_iops = var.worker_disk_iops
cpu_credits = var.worker_cpu_credits
spot_price = var.worker_price
target_groups = var.worker_target_groups
# configuration
kubeconfig = module.bootstrap.kubeconfig-kubelet
ssh_authorized_key = var.ssh_authorized_key
service_cidr = var.service_cidr
cluster_domain_suffix = var.cluster_domain_suffix
snippets = var.worker_snippets
node_labels = var.worker_node_labels
kubeconfig = module.bootstrap.kubeconfig-kubelet
ssh_authorized_key = var.ssh_authorized_key
service_cidr = var.service_cidr
snippets = var.worker_snippets
node_labels = var.worker_node_labels
}

View File

@ -104,7 +104,7 @@ storage:
cgroupDriver: systemd
clusterDNS:
- ${cluster_dns_service_ip}
clusterDomain: ${cluster_domain_suffix}
clusterDomain: cluster.local
healthzPort: 0
rotateCertificates: true
shutdownGracePeriod: 45s

View File

@ -108,12 +108,6 @@ EOD
default = "10.3.0.0/16"
}
variable "cluster_domain_suffix" {
type = string
description = "Queries for domains with the suffix will be answered by coredns. Default is cluster.local (e.g. foo.default.svc.cluster.local) "
default = "cluster.local"
}
variable "node_labels" {
type = list(string)
description = "List of initial node labels"
@ -126,15 +120,14 @@ variable "node_taints" {
default = []
}
# unofficial, undocumented, unsupported
# advanced
variable "arch" {
type = string
description = "Container architecture (amd64 or arm64)"
default = "amd64"
validation {
condition = var.arch == "amd64" || var.arch == "arm64"
condition = contains(["amd64", "arm64"], var.arch)
error_message = "The arch must be amd64 or arm64."
}
}

View File

@ -111,7 +111,6 @@ data "ct_config" "worker" {
kubeconfig = indent(10, var.kubeconfig)
ssh_authorized_key = var.ssh_authorized_key
cluster_dns_service_ip = cidrhost(var.service_cidr, 10)
cluster_domain_suffix = var.cluster_domain_suffix
node_labels = join(",", var.node_labels)
node_taints = join(",", var.node_taints)
})

View File

@ -9,7 +9,6 @@ module "bootstrap" {
network_mtu = var.network_mtu
pod_cidr = var.pod_cidr
service_cidr = var.service_cidr
cluster_domain_suffix = var.cluster_domain_suffix
enable_reporting = var.enable_reporting
enable_aggregation = var.enable_aggregation
daemonset_tolerations = var.daemonset_tolerations

View File

@ -148,7 +148,7 @@ storage:
cgroupDriver: systemd
clusterDNS:
- ${cluster_dns_service_ip}
clusterDomain: ${cluster_domain_suffix}
clusterDomain: cluster.local
healthzPort: 0
rotateCertificates: true
shutdownGracePeriod: 45s

View File

@ -66,7 +66,6 @@ data "ct_config" "controllers" {
kubeconfig = indent(10, module.bootstrap.kubeconfig-kubelet)
ssh_authorized_key = var.ssh_authorized_key
cluster_dns_service_ip = cidrhost(var.service_cidr, 10)
cluster_domain_suffix = var.cluster_domain_suffix
})
strict = true
snippets = var.controller_snippets

View File

@ -164,6 +164,14 @@ EOD
default = "10.3.0.0/16"
}
variable "worker_node_labels" {
type = list(string)
description = "List of initial worker node labels"
default = []
}
# advanced
variable "enable_reporting" {
type = bool
description = "Enable usage or analytics reporting to upstreams (Calico)"
@ -176,20 +184,6 @@ variable "enable_aggregation" {
default = true
}
variable "worker_node_labels" {
type = list(string)
description = "List of initial worker node labels"
default = []
}
# unofficial, undocumented, unsupported
variable "cluster_domain_suffix" {
type = string
description = "Queries for domains with the suffix will be answered by CoreDNS. Default is cluster.local (e.g. foo.default.svc.cluster.local)"
default = "cluster.local"
}
variable "controller_arch" {
type = string
description = "Controller node(s) architecture (amd64 or arm64)"
@ -210,7 +204,6 @@ variable "worker_arch" {
}
}
variable "daemonset_tolerations" {
type = list(string)
description = "List of additional taint keys kube-system DaemonSets should tolerate (e.g. ['custom-role', 'gpu-role'])"

View File

@ -6,22 +6,23 @@ module "workers" {
vpc_id = aws_vpc.network.id
subnet_ids = aws_subnet.public.*.id
security_groups = [aws_security_group.worker.id]
worker_count = var.worker_count
instance_type = var.worker_type
os_image = var.os_image
arch = var.worker_arch
disk_type = var.worker_disk_type
disk_size = var.worker_disk_size
disk_iops = var.worker_disk_iops
spot_price = var.worker_price
target_groups = var.worker_target_groups
# instances
os_image = var.os_image
worker_count = var.worker_count
instance_type = var.worker_type
arch = var.worker_arch
disk_type = var.worker_disk_type
disk_size = var.worker_disk_size
disk_iops = var.worker_disk_iops
spot_price = var.worker_price
target_groups = var.worker_target_groups
# configuration
kubeconfig = module.bootstrap.kubeconfig-kubelet
ssh_authorized_key = var.ssh_authorized_key
service_cidr = var.service_cidr
cluster_domain_suffix = var.cluster_domain_suffix
snippets = var.worker_snippets
node_labels = var.worker_node_labels
kubeconfig = module.bootstrap.kubeconfig-kubelet
ssh_authorized_key = var.ssh_authorized_key
service_cidr = var.service_cidr
snippets = var.worker_snippets
node_labels = var.worker_node_labels
}

View File

@ -103,7 +103,7 @@ storage:
cgroupDriver: systemd
clusterDNS:
- ${cluster_dns_service_ip}
clusterDomain: ${cluster_domain_suffix}
clusterDomain: cluster.local
healthzPort: 0
rotateCertificates: true
shutdownGracePeriod: 45s

View File

@ -108,12 +108,6 @@ EOD
default = "10.3.0.0/16"
}
variable "cluster_domain_suffix" {
type = string
description = "Queries for domains with the suffix will be answered by coredns. Default is cluster.local (e.g. foo.default.svc.cluster.local) "
default = "cluster.local"
}
variable "node_labels" {
type = list(string)
description = "List of initial node labels"
@ -134,7 +128,7 @@ variable "arch" {
default = "amd64"
validation {
condition = var.arch == "amd64" || var.arch == "arm64"
condition = contains(["amd64", "arm64"], var.arch)
error_message = "The arch must be amd64 or arm64."
}
}

View File

@ -111,7 +111,6 @@ data "ct_config" "worker" {
kubeconfig = indent(10, var.kubeconfig)
ssh_authorized_key = var.ssh_authorized_key
cluster_dns_service_ip = cidrhost(var.service_cidr, 10)
cluster_domain_suffix = var.cluster_domain_suffix
node_labels = join(",", var.node_labels)
node_taints = join(",", var.node_taints)
})

View File

@ -14,7 +14,6 @@ module "bootstrap" {
pod_cidr = var.pod_cidr
service_cidr = var.service_cidr
cluster_domain_suffix = var.cluster_domain_suffix
enable_reporting = var.enable_reporting
enable_aggregation = var.enable_aggregation
daemonset_tolerations = var.daemonset_tolerations

View File

@ -144,7 +144,7 @@ storage:
cgroupDriver: systemd
clusterDNS:
- ${cluster_dns_service_ip}
clusterDomain: ${cluster_domain_suffix}
clusterDomain: cluster.local
healthzPort: 0
rotateCertificates: true
shutdownGracePeriod: 45s

View File

@ -163,7 +163,6 @@ data "ct_config" "controllers" {
kubeconfig = indent(10, module.bootstrap.kubeconfig-kubelet)
ssh_authorized_key = var.ssh_authorized_key
cluster_dns_service_ip = cidrhost(var.service_cidr, 10)
cluster_domain_suffix = var.cluster_domain_suffix
})
strict = true
snippets = var.controller_snippets

View File

@ -27,7 +27,6 @@ variable "os_image" {
description = "Fedora CoreOS image for instances"
}
variable "controller_count" {
type = number
description = "Number of controllers (i.e. masters)"
@ -145,6 +144,14 @@ EOD
default = "10.3.0.0/16"
}
variable "worker_node_labels" {
type = list(string)
description = "List of initial worker node labels"
default = []
}
# advanced
variable "enable_reporting" {
type = bool
description = "Enable usage or analytics reporting to upstreams (Calico)"
@ -157,20 +164,6 @@ variable "enable_aggregation" {
default = true
}
variable "worker_node_labels" {
type = list(string)
description = "List of initial worker node labels"
default = []
}
# unofficial, undocumented, unsupported
variable "cluster_domain_suffix" {
type = string
description = "Queries for domains with the suffix will be answered by coredns. Default is cluster.local (e.g. foo.default.svc.cluster.local) "
default = "cluster.local"
}
variable "daemonset_tolerations" {
type = list(string)
description = "List of additional taint keys kube-system DaemonSets should tolerate (e.g. ['custom-role', 'gpu-role'])"

View File

@ -9,20 +9,20 @@ module "workers" {
security_group_id = azurerm_network_security_group.worker.id
backend_address_pool_ids = local.backend_address_pool_ids
# instances
os_image = var.os_image
worker_count = var.worker_count
vm_type = var.worker_type
os_image = var.os_image
disk_type = var.worker_disk_type
disk_size = var.worker_disk_size
ephemeral_disk = var.worker_ephemeral_disk
priority = var.worker_priority
# configuration
kubeconfig = module.bootstrap.kubeconfig-kubelet
ssh_authorized_key = var.ssh_authorized_key
azure_authorized_key = var.azure_authorized_key
service_cidr = var.service_cidr
cluster_domain_suffix = var.cluster_domain_suffix
snippets = var.worker_snippets
node_labels = var.worker_node_labels
kubeconfig = module.bootstrap.kubeconfig-kubelet
ssh_authorized_key = var.ssh_authorized_key
azure_authorized_key = var.azure_authorized_key
service_cidr = var.service_cidr
snippets = var.worker_snippets
node_labels = var.worker_node_labels
}

View File

@ -99,7 +99,7 @@ storage:
cgroupDriver: systemd
clusterDNS:
- ${cluster_dns_service_ip}
clusterDomain: ${cluster_domain_suffix}
clusterDomain: cluster.local
healthzPort: 0
rotateCertificates: true
shutdownGracePeriod: 45s

View File

@ -120,12 +120,3 @@ variable "node_taints" {
description = "List of initial node taints"
default = []
}
# unofficial, undocumented, unsupported
variable "cluster_domain_suffix" {
description = "Queries for domains with the suffix will be answered by coredns. Default is cluster.local (e.g. foo.default.svc.cluster.local) "
type = string
default = "cluster.local"
}

View File

@ -84,7 +84,6 @@ data "ct_config" "worker" {
kubeconfig = indent(10, var.kubeconfig)
ssh_authorized_key = var.ssh_authorized_key
cluster_dns_service_ip = cidrhost(var.service_cidr, 10)
cluster_domain_suffix = var.cluster_domain_suffix
node_labels = join(",", var.node_labels)
node_taints = join(",", var.node_taints)
})

View File

@ -14,7 +14,6 @@ module "bootstrap" {
pod_cidr = var.pod_cidr
service_cidr = var.service_cidr
cluster_domain_suffix = var.cluster_domain_suffix
enable_reporting = var.enable_reporting
enable_aggregation = var.enable_aggregation
daemonset_tolerations = var.daemonset_tolerations

View File

@ -144,7 +144,7 @@ storage:
cgroupDriver: systemd
clusterDNS:
- ${cluster_dns_service_ip}
clusterDomain: ${cluster_domain_suffix}
clusterDomain: cluster.local
healthzPort: 0
rotateCertificates: true
shutdownGracePeriod: 45s

View File

@ -185,7 +185,6 @@ data "ct_config" "controllers" {
kubeconfig = indent(10, module.bootstrap.kubeconfig-kubelet)
ssh_authorized_key = var.ssh_authorized_key
cluster_dns_service_ip = cidrhost(var.service_cidr, 10)
cluster_domain_suffix = var.cluster_domain_suffix
})
strict = true
snippets = var.controller_snippets

View File

@ -196,14 +196,6 @@ variable "daemonset_tolerations" {
default = []
}
# unofficial, undocumented, unsupported
variable "cluster_domain_suffix" {
type = string
description = "Queries for domains with the suffix will be answered by coredns. Default is cluster.local (e.g. foo.default.svc.cluster.local) "
default = "cluster.local"
}
variable "components" {
description = "Configure pre-installed cluster components"
# Component configs are passed through to terraform-render-bootstrap,

View File

@ -18,12 +18,11 @@ module "workers" {
priority = var.worker_priority
# configuration
kubeconfig = module.bootstrap.kubeconfig-kubelet
ssh_authorized_key = var.ssh_authorized_key
azure_authorized_key = var.azure_authorized_key
service_cidr = var.service_cidr
cluster_domain_suffix = var.cluster_domain_suffix
snippets = var.worker_snippets
node_labels = var.worker_node_labels
arch = var.worker_arch
kubeconfig = module.bootstrap.kubeconfig-kubelet
ssh_authorized_key = var.ssh_authorized_key
azure_authorized_key = var.azure_authorized_key
service_cidr = var.service_cidr
snippets = var.worker_snippets
node_labels = var.worker_node_labels
arch = var.worker_arch
}

View File

@ -99,7 +99,7 @@ storage:
cgroupDriver: systemd
clusterDNS:
- ${cluster_dns_service_ip}
clusterDomain: ${cluster_domain_suffix}
clusterDomain: cluster.local
healthzPort: 0
rotateCertificates: true
shutdownGracePeriod: 45s

View File

@ -137,12 +137,3 @@ variable "arch" {
error_message = "The arch must be amd64 or arm64."
}
}
# unofficial, undocumented, unsupported
variable "cluster_domain_suffix" {
description = "Queries for domains with the suffix will be answered by coredns. Default is cluster.local (e.g. foo.default.svc.cluster.local) "
type = string
default = "cluster.local"
}

View File

@ -105,7 +105,6 @@ data "ct_config" "worker" {
kubeconfig = indent(10, var.kubeconfig)
ssh_authorized_key = var.ssh_authorized_key
cluster_dns_service_ip = cidrhost(var.service_cidr, 10)
cluster_domain_suffix = var.cluster_domain_suffix
node_labels = join(",", var.node_labels)
node_taints = join(",", var.node_taints)
})

View File

@ -10,7 +10,6 @@ module "bootstrap" {
network_ip_autodetection_method = var.network_ip_autodetection_method
pod_cidr = var.pod_cidr
service_cidr = var.service_cidr
cluster_domain_suffix = var.cluster_domain_suffix
enable_reporting = var.enable_reporting
enable_aggregation = var.enable_aggregation
components = var.components

View File

@ -154,7 +154,7 @@ storage:
cgroupDriver: systemd
clusterDNS:
- ${cluster_dns_service_ip}
clusterDomain: ${cluster_domain_suffix}
clusterDomain: cluster.local
healthzPort: 0
rotateCertificates: true
shutdownGracePeriod: 45s

View File

@ -59,7 +59,6 @@ data "ct_config" "controllers" {
etcd_name = var.controllers.*.name[count.index]
etcd_initial_cluster = join(",", formatlist("%s=https://%s:2380", var.controllers.*.name, var.controllers.*.domain))
cluster_dns_service_ip = module.bootstrap.cluster_dns_service_ip
cluster_domain_suffix = var.cluster_domain_suffix
ssh_authorized_key = var.ssh_authorized_key
})
strict = true

View File

@ -151,13 +151,7 @@ variable "enable_aggregation" {
default = true
}
# unofficial, undocumented, unsupported
variable "cluster_domain_suffix" {
description = "Queries for domains with the suffix will be answered by coredns. Default is cluster.local (e.g. foo.default.svc.cluster.local) "
type = string
default = "cluster.local"
}
# advanced
variable "components" {
description = "Configure pre-installed cluster components"

View File

@ -108,7 +108,7 @@ storage:
cgroupDriver: systemd
clusterDNS:
- ${cluster_dns_service_ip}
clusterDomain: ${cluster_domain_suffix}
clusterDomain: cluster.local
healthzPort: 0
rotateCertificates: true
shutdownGracePeriod: 45s

View File

@ -53,7 +53,6 @@ data "ct_config" "worker" {
domain_name = var.domain
ssh_authorized_key = var.ssh_authorized_key
cluster_dns_service_ip = cidrhost(var.service_cidr, 10)
cluster_domain_suffix = var.cluster_domain_suffix
node_labels = join(",", var.node_labels)
node_taints = join(",", var.node_taints)
})

View File

@ -103,9 +103,3 @@ The 1st IP will be reserved for kube_apiserver, the 10th IP will be reserved for
EOD
default = "10.3.0.0/16"
}
variable "cluster_domain_suffix" {
description = "Queries for domains with the suffix will be answered by coredns. Default is cluster.local (e.g. foo.default.svc.cluster.local) "
type = string
default = "cluster.local"
}

View File

@ -15,13 +15,12 @@ module "workers" {
domain = var.workers[count.index].domain
# configuration
kubeconfig = module.bootstrap.kubeconfig-kubelet
ssh_authorized_key = var.ssh_authorized_key
service_cidr = var.service_cidr
cluster_domain_suffix = var.cluster_domain_suffix
node_labels = lookup(var.worker_node_labels, var.workers[count.index].name, [])
node_taints = lookup(var.worker_node_taints, var.workers[count.index].name, [])
snippets = lookup(var.snippets, var.workers[count.index].name, [])
kubeconfig = module.bootstrap.kubeconfig-kubelet
ssh_authorized_key = var.ssh_authorized_key
service_cidr = var.service_cidr
node_labels = lookup(var.worker_node_labels, var.workers[count.index].name, [])
node_taints = lookup(var.worker_node_taints, var.workers[count.index].name, [])
snippets = lookup(var.snippets, var.workers[count.index].name, [])
# optional
cached_install = var.cached_install

View File

@ -10,7 +10,6 @@ module "bootstrap" {
network_ip_autodetection_method = var.network_ip_autodetection_method
pod_cidr = var.pod_cidr
service_cidr = var.service_cidr
cluster_domain_suffix = var.cluster_domain_suffix
enable_reporting = var.enable_reporting
enable_aggregation = var.enable_aggregation
components = var.components

View File

@ -155,7 +155,7 @@ storage:
cgroupDriver: systemd
clusterDNS:
- ${cluster_dns_service_ip}
clusterDomain: ${cluster_domain_suffix}
clusterDomain: cluster.local
healthzPort: 0
rotateCertificates: true
shutdownGracePeriod: 45s

View File

@ -88,7 +88,6 @@ data "ct_config" "controllers" {
etcd_name = var.controllers.*.name[count.index]
etcd_initial_cluster = join(",", formatlist("%s=https://%s:2380", var.controllers.*.name, var.controllers.*.domain))
cluster_dns_service_ip = module.bootstrap.cluster_dns_service_ip
cluster_domain_suffix = var.cluster_domain_suffix
ssh_authorized_key = var.ssh_authorized_key
})
strict = true

View File

@ -167,13 +167,7 @@ EOD
default = ""
}
# unofficial, undocumented, unsupported
variable "cluster_domain_suffix" {
type = string
description = "Queries for domains with the suffix will be answered by coredns. Default is cluster.local (e.g. foo.default.svc.cluster.local) "
default = "cluster.local"
}
# advanced
variable "components" {
description = "Configure pre-installed cluster components"

View File

@ -113,7 +113,7 @@ storage:
cgroupDriver: systemd
clusterDNS:
- ${cluster_dns_service_ip}
clusterDomain: ${cluster_domain_suffix}
clusterDomain: cluster.local
healthzPort: 0
rotateCertificates: true
shutdownGracePeriod: 45s

View File

@ -79,7 +79,6 @@ data "ct_config" "worker" {
domain_name = var.domain
ssh_authorized_key = var.ssh_authorized_key
cluster_dns_service_ip = cidrhost(var.service_cidr, 10)
cluster_domain_suffix = var.cluster_domain_suffix
node_labels = join(",", var.node_labels)
node_taints = join(",", var.node_taints)
})

View File

@ -114,13 +114,3 @@ The 1st IP will be reserved for kube_apiserver, the 10th IP will be reserved for
EOD
default = "10.3.0.0/16"
}
variable "cluster_domain_suffix" {
type = string
description = "Queries for domains with the suffix will be answered by coredns. Default is cluster.local (e.g. foo.default.svc.cluster.local) "
default = "cluster.local"
}

View File

@ -15,13 +15,12 @@ module "workers" {
domain = var.workers[count.index].domain
# configuration
kubeconfig = module.bootstrap.kubeconfig-kubelet
ssh_authorized_key = var.ssh_authorized_key
service_cidr = var.service_cidr
cluster_domain_suffix = var.cluster_domain_suffix
node_labels = lookup(var.worker_node_labels, var.workers[count.index].name, [])
node_taints = lookup(var.worker_node_taints, var.workers[count.index].name, [])
snippets = lookup(var.snippets, var.workers[count.index].name, [])
kubeconfig = module.bootstrap.kubeconfig-kubelet
ssh_authorized_key = var.ssh_authorized_key
service_cidr = var.service_cidr
node_labels = lookup(var.worker_node_labels, var.workers[count.index].name, [])
node_taints = lookup(var.worker_node_taints, var.workers[count.index].name, [])
snippets = lookup(var.snippets, var.workers[count.index].name, [])
# optional
download_protocol = var.download_protocol

View File

@ -11,11 +11,10 @@ module "bootstrap" {
network_encapsulation = "vxlan"
network_mtu = "1450"
pod_cidr = var.pod_cidr
service_cidr = var.service_cidr
cluster_domain_suffix = var.cluster_domain_suffix
enable_reporting = var.enable_reporting
enable_aggregation = var.enable_aggregation
components = var.components
pod_cidr = var.pod_cidr
service_cidr = var.service_cidr
enable_reporting = var.enable_reporting
enable_aggregation = var.enable_aggregation
components = var.components
}

View File

@ -151,7 +151,7 @@ storage:
cgroupDriver: systemd
clusterDNS:
- ${cluster_dns_service_ip}
clusterDomain: ${cluster_domain_suffix}
clusterDomain: cluster.local
healthzPort: 0
rotateCertificates: true
shutdownGracePeriod: 45s

View File

@ -104,7 +104,7 @@ storage:
cgroupDriver: systemd
clusterDNS:
- ${cluster_dns_service_ip}
clusterDomain: ${cluster_domain_suffix}
clusterDomain: cluster.local
healthzPort: 0
rotateCertificates: true
shutdownGracePeriod: 45s

View File

@ -74,7 +74,6 @@ data "ct_config" "controllers" {
for i in range(var.controller_count) : "etcd${i}=https://${var.cluster_name}-etcd${i}.${var.dns_zone}:2380"
])
cluster_dns_service_ip = cidrhost(var.service_cidr, 10)
cluster_domain_suffix = var.cluster_domain_suffix
})
strict = true
snippets = var.controller_snippets

View File

@ -98,13 +98,7 @@ variable "enable_aggregation" {
default = true
}
# unofficial, undocumented, unsupported
variable "cluster_domain_suffix" {
type = string
description = "Queries for domains with the suffix will be answered by coredns. Default is cluster.local (e.g. foo.default.svc.cluster.local) "
default = "cluster.local"
}
# advanced
variable "components" {
description = "Configure pre-installed cluster components"

View File

@ -62,7 +62,6 @@ resource "digitalocean_tag" "workers" {
data "ct_config" "worker" {
content = templatefile("${path.module}/butane/worker.yaml", {
cluster_dns_service_ip = cidrhost(var.service_cidr, 10)
cluster_domain_suffix = var.cluster_domain_suffix
})
strict = true
snippets = var.worker_snippets

View File

@ -11,11 +11,10 @@ module "bootstrap" {
network_encapsulation = "vxlan"
network_mtu = "1450"
pod_cidr = var.pod_cidr
service_cidr = var.service_cidr
cluster_domain_suffix = var.cluster_domain_suffix
enable_reporting = var.enable_reporting
enable_aggregation = var.enable_aggregation
components = var.components
pod_cidr = var.pod_cidr
service_cidr = var.service_cidr
enable_reporting = var.enable_reporting
enable_aggregation = var.enable_aggregation
components = var.components
}

View File

@ -153,7 +153,7 @@ storage:
cgroupDriver: systemd
clusterDNS:
- ${cluster_dns_service_ip}
clusterDomain: ${cluster_domain_suffix}
clusterDomain: cluster.local
healthzPort: 0
rotateCertificates: true
shutdownGracePeriod: 45s

View File

@ -103,7 +103,7 @@ storage:
cgroupDriver: systemd
clusterDNS:
- ${cluster_dns_service_ip}
clusterDomain: ${cluster_domain_suffix}
clusterDomain: cluster.local
healthzPort: 0
rotateCertificates: true
shutdownGracePeriod: 45s

View File

@ -79,7 +79,6 @@ data "ct_config" "controllers" {
for i in range(var.controller_count) : "etcd${i}=https://${var.cluster_name}-etcd${i}.${var.dns_zone}:2380"
])
cluster_dns_service_ip = cidrhost(var.service_cidr, 10)
cluster_domain_suffix = var.cluster_domain_suffix
})
strict = true
snippets = var.controller_snippets

View File

@ -98,13 +98,7 @@ variable "enable_aggregation" {
default = true
}
# unofficial, undocumented, unsupported
variable "cluster_domain_suffix" {
type = string
description = "Queries for domains with the suffix will be answered by coredns. Default is cluster.local (e.g. foo.default.svc.cluster.local) "
default = "cluster.local"
}
# advanced
variable "components" {
description = "Configure pre-installed cluster components"

View File

@ -60,7 +60,6 @@ resource "digitalocean_tag" "workers" {
data "ct_config" "worker" {
content = templatefile("${path.module}/butane/worker.yaml", {
cluster_dns_service_ip = cidrhost(var.service_cidr, 10)
cluster_domain_suffix = var.cluster_domain_suffix
})
strict = true
snippets = var.worker_snippets

View File

@ -79,12 +79,12 @@ module "tempest" {
dns_zone = "aws.example.com"
dns_zone_id = "Z3PAABBCFAKEC0"
# configuration
ssh_authorized_key = "ssh-ed25519 AAAAB3Nz..."
# optional
# instances
worker_count = 2
worker_type = "t3.small"
# configuration
ssh_authorized_key = "ssh-ed25519 AAAAB3Nz..."
}
```
@ -155,9 +155,9 @@ List the pods.
```
$ kubectl get pods --all-namespaces
NAMESPACE NAME READY STATUS RESTARTS AGE
kube-system calico-node-1m5bf 2/2 Running 0 34m
kube-system calico-node-7jmr1 2/2 Running 0 34m
kube-system calico-node-bknc8 2/2 Running 0 34m
kube-system cilium-1m5bf 1/1 Running 0 34m
kube-system cilium-7jmr1 1/1 Running 0 34m
kube-system cilium-bknc8 1/1 Running 0 34m
kube-system coredns-1187388186-wx1lg 1/1 Running 0 34m
kube-system coredns-1187388186-qjnvp 1/1 Running 0 34m
kube-system kube-apiserver-ip-10-0-3-155 1/1 Running 0 34m
@ -206,16 +206,21 @@ Reference the DNS zone id with `aws_route53_zone.zone-for-clusters.zone_id`.
| Name | Description | Default | Example |
|:-----|:------------|:--------|:--------|
| os_stream | Fedora CoreOS stream for instances | "stable" | "testing", "next" |
| controller_count | Number of controllers (i.e. masters) | 1 | 1 |
| worker_count | Number of workers | 1 | 3 |
| controller_type | EC2 instance type for controllers | "t3.small" | See below |
| controller_disk_size | Size of EBS volume in GB | 30 | 100 |
| controller_disk_type | Type of EBS volume | gp3 | io1 |
| controller_disk_iops | IOPS of EBS volume | 3000 | 4000 |
| controller_cpu_credits | Burstable CPU pricing model | null (i.e. auto) | standard, unlimited |
| worker_count | Number of workers | 1 | 3 |
| worker_type | EC2 instance type for workers | "t3.small" | See below |
| os_stream | Fedora CoreOS stream for compute instances | "stable" | "testing", "next" |
| disk_size | Size of the EBS volume in GB | 30 | 100 |
| disk_type | Type of the EBS volume | "gp3" | standard, gp2, gp3, io1 |
| disk_iops | IOPS of the EBS volume | 0 (i.e. auto) | 400 |
| worker_target_groups | Target group ARNs to which worker instances should be added | [] | [aws_lb_target_group.app.id] |
| worker_disk_size | Size of EBS volume in GB | 30 | 100 |
| worker_disk_type | Type of EBS volume | gp3 | io1 |
| worker_disk_iops | IOPS of EBS volume | 3000 | 4000 |
| worker_cpu_credits | Burstable CPU pricing model | null (i.e. auto) | standard, unlimited |
| worker_price | Spot price in USD for worker instances or 0 to use on-demand instances | 0 | 0.10 |
| worker_target_groups | Target group ARNs to which worker instances should be added | [] | [aws_lb_target_group.app.id] |
| controller_snippets | Controller Butane snippets | [] | [examples](/advanced/customization/) |
| worker_snippets | Worker Butane snippets | [] | [examples](/advanced/customization/) |
| networking | Choice of networking provider | "cilium" | "calico" or "cilium" or "flannel" |
@ -228,7 +233,7 @@ Reference the DNS zone id with `aws_route53_zone.zone-for-clusters.zone_id`.
Check the list of valid [instance types](https://aws.amazon.com/ec2/instance-types/).
!!! warning
Do not choose a `controller_type` smaller than `t2.small`. Smaller instances are not sufficient for running a controller.
Do not choose a `controller_type` smaller than `t3.small`. Smaller instances are not sufficient for running a controller.
!!! tip "MTU"
If your EC2 instance type supports [Jumbo frames](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/network_mtu.html#jumbo_frame_instances) (most do), we recommend you change the `network_mtu` to 8981! You will get better pod-to-pod bandwidth.

View File

@ -93,16 +93,16 @@ module "ramius" {
location = "centralus"
dns_zone = "azure.example.com"
dns_zone_group = "example-group"
# configuration
os_image = "/subscriptions/some/path/Microsoft.Compute/images/fedora-coreos-36.20220716.3.1"
ssh_authorized_key = "ssh-ed25519 AAAAB3Nz..."
# optional
worker_count = 2
network_cidr = {
network_cidr = {
ipv4 = ["10.0.0.0/20"]
}
# instances
os_image = "/subscriptions/some/path/Microsoft.Compute/images/fedora-coreos-36.20220716.3.1"
worker_count = 2
# configuration
ssh_authorized_key = "ssh-ed25519 AAAAB3Nz..."
}
```
@ -175,9 +175,9 @@ $ kubectl get pods --all-namespaces
NAMESPACE NAME READY STATUS RESTARTS AGE
kube-system coredns-7c6fbb4f4b-b6qzx 1/1 Running 0 26m
kube-system coredns-7c6fbb4f4b-j2k3d 1/1 Running 0 26m
kube-system calico-node-1m5bf 2/2 Running 0 26m
kube-system calico-node-7jmr1 2/2 Running 0 26m
kube-system calico-node-bknc8 2/2 Running 0 26m
kube-system cilium-1m5bf 1/1 Running 0 26m
kube-system cilium-7jmr1 1/1 Running 0 26m
kube-system cilium-bknc8 1/1 Running 0 26m
kube-system kube-apiserver-ramius-controller-0 1/1 Running 0 26m
kube-system kube-controller-manager-ramius-controller-0 1/1 Running 0 26m
kube-system kube-proxy-j4vpq 1/1 Running 0 26m
@ -240,10 +240,14 @@ Reference the DNS zone with `azurerm_dns_zone.clusters.name` and its resource gr
| Name | Description | Default | Example |
|:-----|:------------|:--------|:--------|
| controller_count | Number of controllers (i.e. masters) | 1 | 1 |
| worker_count | Number of workers | 1 | 3 |
| controller_type | Machine type for controllers | "Standard_B2s" | See below |
| controller_disk_type | Managed disk for controllers | Premium_LRS | Standard_LRS |
| controller_disk_size | Managed disk size in GB | 30 | 50 |
| worker_count | Number of workers | 1 | 3 |
| worker_type | Machine type for workers | "Standard_D2as_v5" | See below |
| disk_size | Size of the disk in GB | 30 | 100 |
| worker_disk_type | Managed disk for workers | Standard_LRS | Premium_LRS |
| worker_disk_size | Size of the disk in GB | 30 | 100 |
| worker_ephemeral_disk | Use ephemeral local disk instead of managed disk | false | true |
| worker_priority | Set priority to Spot to use reduced cost surplus capacity, with the tradeoff that instances can be deallocated at any time | Regular | Spot |
| controller_snippets | Controller Butane snippets | [] | [example](/advanced/customization/#usage) |
| worker_snippets | Worker Butane snippets | [] | [example](/advanced/customization/#usage) |
@ -255,9 +259,6 @@ Reference the DNS zone with `azurerm_dns_zone.clusters.name` and its resource gr
Check the list of valid [machine types](https://azure.microsoft.com/en-us/pricing/details/virtual-machines/linux/) and their [specs](https://docs.microsoft.com/en-us/azure/virtual-machines/linux/sizes-general). Use `az vm list-skus` to get the identifier.
!!! warning
Unlike AWS and GCP, Azure requires its *virtual* networks to have non-overlapping IPv4 CIDRs (yeah, go figure). Instead of each cluster just using `10.0.0.0/16` for instances, each Azure cluster's `host_cidr` must be non-overlapping (e.g. 10.0.0.0/20 for the 1st cluster, 10.0.16.0/20 for the 2nd cluster, etc).
!!! warning
Do not choose a `controller_type` smaller than `Standard_B2s`. Smaller instances are not sufficient for running a controller.

View File

@ -323,9 +323,10 @@ List the pods.
```
$ kubectl get pods --all-namespaces
NAMESPACE NAME READY STATUS RESTARTS AGE
kube-system calico-node-6qp7f 2/2 Running 1 11m
kube-system calico-node-gnjrm 2/2 Running 0 11m
kube-system calico-node-llbgt 2/2 Running 0 11m
kube-system cilium-6qp7f 1/1 Running 1 11m
kube-system cilium-gnjrm 1/1 Running 0 11m
kube-system cilium-llbgt 1/1 Running 0 11m
kube-system cilium-operator-68d778b448-g744f 1/1 Running 0 11m
kube-system coredns-1187388186-dj3pd 1/1 Running 0 11m
kube-system coredns-1187388186-mx9rt 1/1 Running 0 11m
kube-system kube-apiserver-node1.example.com 1/1 Running 0 11m

View File

@ -88,12 +88,12 @@ module "nemo" {
region = "nyc3"
dns_zone = "digital-ocean.example.com"
# configuration
os_image = data.digitalocean_image.fedora-coreos-31-20200323-3-2.id
ssh_fingerprints = ["d7:9d:79:ae:56:32:73:79:95:88:e3:a2:ab:5d:45:e7"]
# optional
# instances
os_image = data.digitalocean_image.fedora-coreos-31-20200323-3-2.id
worker_count = 2
# configuration
ssh_fingerprints = ["d7:9d:79:ae:56:32:73:79:95:88:e3:a2:ab:5d:45:e7"]
}
```
@ -166,9 +166,9 @@ List the pods.
NAMESPACE NAME READY STATUS RESTARTS AGE
kube-system coredns-1187388186-ld1j7 1/1 Running 0 11m
kube-system coredns-1187388186-rdhf7 1/1 Running 0 11m
kube-system calico-node-1m5bf 2/2 Running 0 11m
kube-system calico-node-7jmr1 2/2 Running 0 11m
kube-system calico-node-bknc8 2/2 Running 0 11m
kube-system cilium-1m5bf 1/1 Running 0 11m
kube-system cilium-7jmr1 1/1 Running 0 11m
kube-system cilium-bknc8 1/1 Running 0 11m
kube-system kube-apiserver-ip-10.132.115.81 1/1 Running 0 11m
kube-system kube-controller-manager-ip-10.132.115.81 1/1 Running 0 11m
kube-system kube-proxy-6kxjf 1/1 Running 0 11m

View File

@ -81,11 +81,11 @@ module "yavin" {
dns_zone = "example.com"
dns_zone_name = "example-zone"
# instances
worker_count = 2
# configuration
ssh_authorized_key = "ssh-ed25519 AAAAB3Nz..."
# optional
worker_count = 2
}
```
@ -157,9 +157,9 @@ List the pods.
```
$ kubectl get pods --all-namespaces
NAMESPACE NAME READY STATUS RESTARTS AGE
kube-system calico-node-1cs8z 2/2 Running 0 6m
kube-system calico-node-d1l5b 2/2 Running 0 6m
kube-system calico-node-sp9ps 2/2 Running 0 6m
kube-system cilium-1cs8z 1/1 Running 0 6m
kube-system cilium-d1l5b 1/1 Running 0 6m
kube-system cilium-sp9ps 1/1 Running 0 6m
kube-system coredns-1187388186-dkh3o 1/1 Running 0 6m
kube-system coredns-1187388186-zj5dl 1/1 Running 0 6m
kube-system kube-apiserver-controller-0 1/1 Running 0 6m
@ -211,12 +211,13 @@ resource "google_dns_managed_zone" "zone-for-clusters" {
| Name | Description | Default | Example |
|:-----|:------------|:--------|:--------|
| controller_count | Number of controllers (i.e. masters) | 1 | 3 |
| worker_count | Number of workers | 1 | 3 |
| controller_type | Machine type for controllers | "n1-standard-1" | See below |
| worker_type | Machine type for workers | "n1-standard-1" | See below |
| os_stream | Fedora CoreOS stream for compute instances | "stable" | "stable", "testing", "next" |
| disk_size | Size of the disk in GB | 30 | 100 |
| controller_count | Number of controllers (i.e. masters) | 1 | 3 |
| controller_type | Machine type for controllers | "n1-standard-1" | See below |
| controller_disk_type | Controller disk size in GB | 30 | 20 |
| worker_count | Number of workers | 1 | 3 |
| worker_type | Machine type for workers | "n1-standard-1" | See below |
| worker_disk_size | Worker disk size in GB | 30 | 100 |
| worker_preemptible | If enabled, Compute Engine will terminate workers randomly within 24 hours | false | true |
| controller_snippets | Controller Butane snippets | [] | [examples](/advanced/customization/) |
| worker_snippets | Worker Butane snippets | [] | [examples](/advanced/customization/) |

View File

@ -79,12 +79,12 @@ module "tempest" {
dns_zone = "aws.example.com"
dns_zone_id = "Z3PAABBCFAKEC0"
# configuration
ssh_authorized_key = "ssh-rsa AAAAB3Nz..."
# optional
# instances
worker_count = 2
worker_type = "t3.small"
# configuration
ssh_authorized_key = "ssh-rsa AAAAB3Nz..."
}
```
@ -155,9 +155,9 @@ List the pods.
```
$ kubectl get pods --all-namespaces
NAMESPACE NAME READY STATUS RESTARTS AGE
kube-system calico-node-1m5bf 2/2 Running 0 34m
kube-system calico-node-7jmr1 2/2 Running 0 34m
kube-system calico-node-bknc8 2/2 Running 0 34m
kube-system cilium-1m5bf 1/1 Running 0 34m
kube-system cilium-7jmr1 1/1 Running 0 34m
kube-system cilium-bknc8 1/1 Running 0 34m
kube-system coredns-1187388186-wx1lg 1/1 Running 0 34m
kube-system coredns-1187388186-qjnvp 1/1 Running 0 34m
kube-system kube-apiserver-ip-10-0-3-155 1/1 Running 0 34m
@ -206,16 +206,19 @@ Reference the DNS zone id with `aws_route53_zone.zone-for-clusters.zone_id`.
| Name | Description | Default | Example |
|:-----|:------------|:--------|:--------|
| controller_count | Number of controllers (i.e. masters) | 1 | 1 |
| worker_count | Number of workers | 1 | 3 |
| controller_type | EC2 instance type for controllers | "t3.small" | See below |
| worker_type | EC2 instance type for workers | "t3.small" | See below |
| os_image | AMI channel for a Container Linux derivative | "flatcar-stable" | flatcar-stable, flatcar-beta, flatcar-alpha |
| disk_size | Size of the EBS volume in GB | 30 | 100 |
| disk_type | Type of the EBS volume | "gp3" | standard, gp2, gp3, io1 |
| disk_iops | IOPS of the EBS volume | 0 (i.e. auto) | 400 |
| worker_target_groups | Target group ARNs to which worker instances should be added | [] | [aws_lb_target_group.app.id] |
| controller_count | Number of controllers (i.e. masters) | 1 | 1 |
| controller_type | EC2 instance type for controllers | "t3.small" | See below |
| controller_disk_size | Size of EBS volume in GB | 30 | 100 |
| controller_disk_type | Type of EBS volume | gp3 | io1 |
| controller_disk_iops | IOPS of EBS volume | 3000 | 4000 |
| controller_cpu_credits | Burstable CPU pricing model | null (i.e. auto) | standard, unlimited |
| worker_disk_size | Size of EBS volume in GB | 30 | 100 |
| worker_disk_type | Type of EBS volume | gp3 | io1 |
| worker_disk_iops | IOPS of EBS volume | 3000 | 4000 |
| worker_cpu_credits | Burstable CPU pricing model | null (i.e. auto) | standard, unlimited |
| worker_price | Spot price in USD for worker instances or 0 to use on-demand instances | 0/null | 0.10 |
| worker_target_groups | Target group ARNs to which worker instances should be added | [] | [aws_lb_target_group.app.id] |
| controller_snippets | Controller Container Linux Config snippets | [] | [example](/advanced/customization/) |
| worker_snippets | Worker Container Linux Config snippets | [] | [example](/advanced/customization/) |
| networking | Choice of networking provider | "cilium" | "calico" or "cilium" or "flannel" |
@ -228,7 +231,7 @@ Reference the DNS zone id with `aws_route53_zone.zone-for-clusters.zone_id`.
Check the list of valid [instance types](https://aws.amazon.com/ec2/instance-types/).
!!! warning
Do not choose a `controller_type` smaller than `t2.small`. Smaller instances are not sufficient for running a controller.
Do not choose a `controller_type` smaller than `t3.small`. Smaller instances are not sufficient for running a controller.
!!! tip "MTU"
If your EC2 instance type supports [Jumbo frames](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/network_mtu.html#jumbo_frame_instances) (most do), we recommend you change the `network_mtu` to 8981! You will get better pod-to-pod bandwidth.

View File

@ -82,15 +82,15 @@ module "ramius" {
location = "centralus"
dns_zone = "azure.example.com"
dns_zone_group = "example-group"
network_cidr = {
ipv4 = ["10.0.0.0/20"]
}
# instances
worker_count = 2
# configuration
ssh_authorized_key = "ssh-rsa AAAAB3Nz..."
# optional
worker_count = 2
network_cidr = {
ipv4 = ["10.0.0.0/20"]
}
}
```
@ -163,9 +163,9 @@ $ kubectl get pods --all-namespaces
NAMESPACE NAME READY STATUS RESTARTS AGE
kube-system coredns-7c6fbb4f4b-b6qzx 1/1 Running 0 26m
kube-system coredns-7c6fbb4f4b-j2k3d 1/1 Running 0 26m
kube-system calico-node-1m5bf 2/2 Running 0 26m
kube-system calico-node-7jmr1 2/2 Running 0 26m
kube-system calico-node-bknc8 2/2 Running 0 26m
kube-system cilium-1m5bf 1/1 Running 0 26m
kube-system cilium-7jmr1 1/1 Running 0 26m
kube-system cilium-bknc8 1/1 Running 0 26m
kube-system kube-apiserver-ramius-controller-0 1/1 Running 0 26m
kube-system kube-controller-manager-ramius-controller-0 1/1 Running 0 26m
kube-system kube-proxy-j4vpq 1/1 Running 0 26m
@ -226,12 +226,16 @@ Reference the DNS zone with `azurerm_dns_zone.clusters.name` and its resource gr
| Name | Description | Default | Example |
|:-----|:------------|:--------|:--------|
| controller_count | Number of controllers (i.e. masters) | 1 | 1 |
| worker_count | Number of workers | 1 | 3 |
| controller_type | Machine type for controllers | "Standard_B2s" | See below |
| worker_type | Machine type for workers | "Standard_D2as_v5" | See below |
| os_image | Channel for a Container Linux derivative | "flatcar-stable" | flatcar-stable, flatcar-beta, flatcar-alpha |
| disk_size | Size of the disk in GB | 30 | 100 |
| controller_count | Number of controllers (i.e. masters) | 1 | 1 |
| controller_type | Machine type for controllers | "Standard_B2s" | See below |
| controller_disk_type | Managed disk for controllers | Premium_LRS | Standard_LRS |
| controller_disk_size | Managed disk size in GB | 30 | 50 |
| worker_count | Number of workers | 1 | 3 |
| worker_type | Machine type for workers | "Standard_D2as_v5" | See below |
| worker_disk_type | Managed disk for workers | Standard_LRS | Premium_LRS |
| worker_disk_size | Size of the disk in GB | 30 | 100 |
| worker_ephemeral_disk | Use ephemeral local disk instead of managed disk | false | true |
| worker_priority | Set priority to Spot to use reduced cost surplus capacity, with the tradeoff that instances can be deallocated at any time | Regular | Spot |
| controller_snippets | Controller Container Linux Config snippets | [] | [example](/advanced/customization/#usage) |
| worker_snippets | Worker Container Linux Config snippets | [] | [example](/advanced/customization/#usage) |
@ -243,9 +247,6 @@ Reference the DNS zone with `azurerm_dns_zone.clusters.name` and its resource gr
Check the list of valid [machine types](https://azure.microsoft.com/en-us/pricing/details/virtual-machines/linux/) and their [specs](https://docs.microsoft.com/en-us/azure/virtual-machines/linux/sizes-general). Use `az vm list-skus` to get the identifier.
!!! warning
Unlike AWS and GCP, Azure requires its *virtual* networks to have non-overlapping IPv4 CIDRs (yeah, go figure). Instead of each cluster just using `10.0.0.0/16` for instances, each Azure cluster's `host_cidr` must be non-overlapping (e.g. 10.0.0.0/20 for the 1st cluster, 10.0.16.0/20 for the 2nd cluster, etc).
!!! warning
Do not choose a `controller_type` smaller than `Standard_B2s`. Smaller instances are not sufficient for running a controller.

View File

@ -333,9 +333,10 @@ List the pods.
```
$ kubectl get pods --all-namespaces
NAMESPACE NAME READY STATUS RESTARTS AGE
kube-system calico-node-6qp7f 2/2 Running 1 11m
kube-system calico-node-gnjrm 2/2 Running 0 11m
kube-system calico-node-llbgt 2/2 Running 0 11m
kube-system cilium-6qp7f 1/1 Running 1 11m
kube-system cilium-gnjrm 1/1 Running 0 11m
kube-system cilium-llbgt 1/1 Running 0 11m
kube-system cilium-operator-68d778b448-g744f 1/1 Running 0 11m
kube-system coredns-1187388186-dj3pd 1/1 Running 0 11m
kube-system coredns-1187388186-mx9rt 1/1 Running 0 11m
kube-system kube-apiserver-node1.example.com 1/1 Running 0 11m

View File

@ -88,12 +88,12 @@ module "nemo" {
region = "nyc3"
dns_zone = "digital-ocean.example.com"
# configuration
os_image = data.digitalocean_image.flatcar-stable-2303-4-0.id
ssh_fingerprints = ["d7:9d:79:ae:56:32:73:79:95:88:e3:a2:ab:5d:45:e7"]
# optional
# instances
os_image = data.digitalocean_image.flatcar-stable-2303-4-0.id
worker_count = 2
# configuration
ssh_fingerprints = ["d7:9d:79:ae:56:32:73:79:95:88:e3:a2:ab:5d:45:e7"]
}
```
@ -166,9 +166,9 @@ List the pods.
NAMESPACE NAME READY STATUS RESTARTS AGE
kube-system coredns-1187388186-ld1j7 1/1 Running 0 11m
kube-system coredns-1187388186-rdhf7 1/1 Running 0 11m
kube-system calico-node-1m5bf 2/2 Running 0 11m
kube-system calico-node-7jmr1 2/2 Running 0 11m
kube-system calico-node-bknc8 2/2 Running 0 11m
kube-system cilium-1m5bf 1/1 Running 0 11m
kube-system cilium-7jmr1 1/1 Running 0 11m
kube-system cilium-bknc8 1/1 Running 0 11m
kube-system kube-apiserver-ip-10.132.115.81 1/1 Running 0 11m
kube-system kube-controller-manager-ip-10.132.115.81 1/1 Running 0 11m
kube-system kube-proxy-6kxjf 1/1 Running 0 11m

View File

@ -81,11 +81,11 @@ module "yavin" {
dns_zone = "example.com"
dns_zone_name = "example-zone"
# instances
worker_count = 2
# configuration
ssh_authorized_key = "ssh-rsa AAAAB3Nz..."
# optional
worker_count = 2
}
```
@ -157,9 +157,9 @@ List the pods.
```
$ kubectl get pods --all-namespaces
NAMESPACE NAME READY STATUS RESTARTS AGE
kube-system calico-node-1cs8z 2/2 Running 0 6m
kube-system calico-node-d1l5b 2/2 Running 0 6m
kube-system calico-node-sp9ps 2/2 Running 0 6m
kube-system cilium-1cs8z 1/1 Running 0 6m
kube-system cilium-d1l5b 1/1 Running 0 6m
kube-system cilium-sp9ps 1/1 Running 0 6m
kube-system coredns-1187388186-dkh3o 1/1 Running 0 6m
kube-system coredns-1187388186-zj5dl 1/1 Running 0 6m
kube-system kube-apiserver-controller-0 1/1 Running 0 6m
@ -211,12 +211,13 @@ resource "google_dns_managed_zone" "zone-for-clusters" {
| Name | Description | Default | Example |
|:-----|:------------|:--------|:--------|
| controller_count | Number of controllers (i.e. masters) | 1 | 3 |
| worker_count | Number of workers | 1 | 3 |
| controller_type | Machine type for controllers | "n1-standard-1" | See below |
| worker_type | Machine type for workers | "n1-standard-1" | See below |
| os_image | Flatcar Linux image for compute instances | "flatcar-stable" | flatcar-stable, flatcar-beta, flatcar-alpha |
| disk_size | Size of the disk in GB | 30 | 100 |
| controller_count | Number of controllers (i.e. masters) | 1 | 3 |
| controller_type | Machine type for controllers | "n1-standard-1" | See below |
| controller_disk_type | Controller disk size in GB | 30 | 20 |
| worker_count | Number of workers | 1 | 3 |
| worker_type | Machine type for workers | "n1-standard-1" | See below |
| worker_disk_size | Worker disk size in GB | 30 | 100 |
| worker_preemptible | If enabled, Compute Engine will terminate workers randomly within 24 hours | false | true |
| controller_snippets | Controller Container Linux Config snippets | [] | [example](/advanced/customization/) |
| worker_snippets | Worker Container Linux Config snippets | [] | [example](/advanced/customization/) |

View File

@ -9,7 +9,6 @@ module "bootstrap" {
network_mtu = 1440
pod_cidr = var.pod_cidr
service_cidr = var.service_cidr
cluster_domain_suffix = var.cluster_domain_suffix
enable_reporting = var.enable_reporting
enable_aggregation = var.enable_aggregation
daemonset_tolerations = var.daemonset_tolerations

View File

@ -143,7 +143,7 @@ storage:
cgroupDriver: systemd
clusterDNS:
- ${cluster_dns_service_ip}
clusterDomain: ${cluster_domain_suffix}
clusterDomain: cluster.local
healthzPort: 0
rotateCertificates: true
shutdownGracePeriod: 45s

View File

@ -81,7 +81,6 @@ data "ct_config" "controllers" {
kubeconfig = indent(10, module.bootstrap.kubeconfig-kubelet)
ssh_authorized_key = var.ssh_authorized_key
cluster_dns_service_ip = cidrhost(var.service_cidr, 10)
cluster_domain_suffix = var.cluster_domain_suffix
})
strict = true
snippets = var.controller_snippets

View File

@ -134,13 +134,7 @@ variable "worker_node_labels" {
default = []
}
# unofficial, undocumented, unsupported
variable "cluster_domain_suffix" {
type = string
description = "Queries for domains with the suffix will be answered by coredns. Default is cluster.local (e.g. foo.default.svc.cluster.local) "
default = "cluster.local"
}
# advanced
variable "daemonset_tolerations" {
type = list(string)

View File

@ -13,11 +13,10 @@ module "workers" {
preemptible = var.worker_preemptible
# configuration
kubeconfig = module.bootstrap.kubeconfig-kubelet
ssh_authorized_key = var.ssh_authorized_key
service_cidr = var.service_cidr
cluster_domain_suffix = var.cluster_domain_suffix
snippets = var.worker_snippets
node_labels = var.worker_node_labels
kubeconfig = module.bootstrap.kubeconfig-kubelet
ssh_authorized_key = var.ssh_authorized_key
service_cidr = var.service_cidr
snippets = var.worker_snippets
node_labels = var.worker_node_labels
}

View File

@ -98,7 +98,7 @@ storage:
cgroupDriver: systemd
clusterDNS:
- ${cluster_dns_service_ip}
clusterDomain: ${cluster_domain_suffix}
clusterDomain: cluster.local
healthzPort: 0
rotateCertificates: true
shutdownGracePeriod: 45s

View File

@ -96,13 +96,7 @@ variable "node_taints" {
default = []
}
# unofficial, undocumented, unsupported, temporary
variable "cluster_domain_suffix" {
type = string
description = "Queries for domains with the suffix will be answered by coredns. Default is cluster.local (e.g. foo.default.svc.cluster.local) "
default = "cluster.local"
}
# advanced
variable "accelerator_type" {
type = string

View File

@ -111,7 +111,6 @@ data "ct_config" "worker" {
kubeconfig = indent(10, var.kubeconfig)
ssh_authorized_key = var.ssh_authorized_key
cluster_dns_service_ip = cidrhost(var.service_cidr, 10)
cluster_domain_suffix = var.cluster_domain_suffix
node_labels = join(",", var.node_labels)
node_taints = join(",", var.node_taints)
})

View File

@ -9,7 +9,6 @@ module "bootstrap" {
network_mtu = 1440
pod_cidr = var.pod_cidr
service_cidr = var.service_cidr
cluster_domain_suffix = var.cluster_domain_suffix
enable_reporting = var.enable_reporting
enable_aggregation = var.enable_aggregation
daemonset_tolerations = var.daemonset_tolerations

View File

@ -143,7 +143,7 @@ storage:
cgroupDriver: systemd
clusterDNS:
- ${cluster_dns_service_ip}
clusterDomain: ${cluster_domain_suffix}
clusterDomain: cluster.local
healthzPort: 0
rotateCertificates: true
shutdownGracePeriod: 45s

View File

@ -81,7 +81,6 @@ data "ct_config" "controllers" {
kubeconfig = indent(10, module.bootstrap.kubeconfig-kubelet)
ssh_authorized_key = var.ssh_authorized_key
cluster_dns_service_ip = cidrhost(var.service_cidr, 10)
cluster_domain_suffix = var.cluster_domain_suffix
})
strict = true
snippets = var.controller_snippets

View File

@ -115,7 +115,6 @@ EOD
default = "10.3.0.0/16"
}
variable "enable_reporting" {
type = bool
description = "Enable usage or analytics reporting to upstreams (Calico)"
@ -134,13 +133,7 @@ variable "worker_node_labels" {
default = []
}
# unofficial, undocumented, unsupported
variable "cluster_domain_suffix" {
type = string
description = "Queries for domains with the suffix will be answered by coredns. Default is cluster.local (e.g. foo.default.svc.cluster.local) "
default = "cluster.local"
}
# advanced
variable "daemonset_tolerations" {
type = list(string)

View File

@ -13,11 +13,10 @@ module "workers" {
preemptible = var.worker_preemptible
# configuration
kubeconfig = module.bootstrap.kubeconfig-kubelet
ssh_authorized_key = var.ssh_authorized_key
service_cidr = var.service_cidr
cluster_domain_suffix = var.cluster_domain_suffix
snippets = var.worker_snippets
node_labels = var.worker_node_labels
kubeconfig = module.bootstrap.kubeconfig-kubelet
ssh_authorized_key = var.ssh_authorized_key
service_cidr = var.service_cidr
snippets = var.worker_snippets
node_labels = var.worker_node_labels
}

View File

@ -98,7 +98,7 @@ storage:
cgroupDriver: systemd
clusterDNS:
- ${cluster_dns_service_ip}
clusterDomain: ${cluster_domain_suffix}
clusterDomain: cluster.local
healthzPort: 0
rotateCertificates: true
shutdownGracePeriod: 45s

View File

@ -96,13 +96,7 @@ variable "node_taints" {
default = []
}
# unofficial, undocumented, unsupported, temporary
variable "cluster_domain_suffix" {
type = string
description = "Queries for domains with the suffix will be answered by coredns. Default is cluster.local (e.g. foo.default.svc.cluster.local) "
default = "cluster.local"
}
# advanced
variable "accelerator_type" {
type = string

View File

@ -111,7 +111,6 @@ data "ct_config" "worker" {
kubeconfig = indent(10, var.kubeconfig)
ssh_authorized_key = var.ssh_authorized_key
cluster_dns_service_ip = cidrhost(var.service_cidr, 10)
cluster_domain_suffix = var.cluster_domain_suffix
node_labels = join(",", var.node_labels)
node_taints = join(",", var.node_taints)
})