Remove the cluster_domain_suffix variable
* Drop support for `cluster_domain_suffix` customization and always use `cluster.local`. Many components in the Kubernetes ecosystem assume this default suffix and its very rare to be setting a special value here these days * Cleanup a few variables that are seldom used
This commit is contained in:
parent
af27661432
commit
0120b9f38d
|
@ -4,11 +4,7 @@ Notable changes between versions.
|
||||||
|
|
||||||
## Latest
|
## Latest
|
||||||
|
|
||||||
### Azure
|
* Remove `cluster_domain_suffix` variable, always use "cluster.local"
|
||||||
|
|
||||||
* Allow controller and worker nodes to use different CPU architectures
|
|
||||||
* Add `controller_arch` and `worker_arch` variables
|
|
||||||
* Remove the `arch` variable
|
|
||||||
|
|
||||||
## v1.30.3
|
## v1.30.3
|
||||||
|
|
||||||
|
|
|
@ -127,9 +127,10 @@ List the pods.
|
||||||
```
|
```
|
||||||
$ kubectl get pods --all-namespaces
|
$ kubectl get pods --all-namespaces
|
||||||
NAMESPACE NAME READY STATUS RESTARTS AGE
|
NAMESPACE NAME READY STATUS RESTARTS AGE
|
||||||
kube-system calico-node-1cs8z 2/2 Running 0 6m
|
kube-system cilium-1cs8z 1/1 Running 0 6m
|
||||||
kube-system calico-node-d1l5b 2/2 Running 0 6m
|
kube-system cilium-d1l5b 1/1 Running 0 6m
|
||||||
kube-system calico-node-sp9ps 2/2 Running 0 6m
|
kube-system cilium-sp9ps 1/1 Running 0 6m
|
||||||
|
kube-system cilium-operator-68d778b448-g744f 1/1 Running 0 6m
|
||||||
kube-system coredns-1187388186-zj5dl 1/1 Running 0 6m
|
kube-system coredns-1187388186-zj5dl 1/1 Running 0 6m
|
||||||
kube-system coredns-1187388186-dkh3o 1/1 Running 0 6m
|
kube-system coredns-1187388186-dkh3o 1/1 Running 0 6m
|
||||||
kube-system kube-apiserver-controller-0 1/1 Running 0 6m
|
kube-system kube-apiserver-controller-0 1/1 Running 0 6m
|
||||||
|
|
|
@ -9,7 +9,6 @@ module "bootstrap" {
|
||||||
network_mtu = var.network_mtu
|
network_mtu = var.network_mtu
|
||||||
pod_cidr = var.pod_cidr
|
pod_cidr = var.pod_cidr
|
||||||
service_cidr = var.service_cidr
|
service_cidr = var.service_cidr
|
||||||
cluster_domain_suffix = var.cluster_domain_suffix
|
|
||||||
enable_reporting = var.enable_reporting
|
enable_reporting = var.enable_reporting
|
||||||
enable_aggregation = var.enable_aggregation
|
enable_aggregation = var.enable_aggregation
|
||||||
daemonset_tolerations = var.daemonset_tolerations
|
daemonset_tolerations = var.daemonset_tolerations
|
||||||
|
|
|
@ -149,7 +149,7 @@ storage:
|
||||||
cgroupDriver: systemd
|
cgroupDriver: systemd
|
||||||
clusterDNS:
|
clusterDNS:
|
||||||
- ${cluster_dns_service_ip}
|
- ${cluster_dns_service_ip}
|
||||||
clusterDomain: ${cluster_domain_suffix}
|
clusterDomain: cluster.local
|
||||||
healthzPort: 0
|
healthzPort: 0
|
||||||
rotateCertificates: true
|
rotateCertificates: true
|
||||||
shutdownGracePeriod: 45s
|
shutdownGracePeriod: 45s
|
||||||
|
|
|
@ -65,7 +65,6 @@ data "ct_config" "controllers" {
|
||||||
kubeconfig = indent(10, module.bootstrap.kubeconfig-kubelet)
|
kubeconfig = indent(10, module.bootstrap.kubeconfig-kubelet)
|
||||||
ssh_authorized_key = var.ssh_authorized_key
|
ssh_authorized_key = var.ssh_authorized_key
|
||||||
cluster_dns_service_ip = cidrhost(var.service_cidr, 10)
|
cluster_dns_service_ip = cidrhost(var.service_cidr, 10)
|
||||||
cluster_domain_suffix = var.cluster_domain_suffix
|
|
||||||
})
|
})
|
||||||
strict = true
|
strict = true
|
||||||
snippets = var.controller_snippets
|
snippets = var.controller_snippets
|
||||||
|
|
|
@ -164,6 +164,14 @@ EOD
|
||||||
default = "10.3.0.0/16"
|
default = "10.3.0.0/16"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
variable "worker_node_labels" {
|
||||||
|
type = list(string)
|
||||||
|
description = "List of initial worker node labels"
|
||||||
|
default = []
|
||||||
|
}
|
||||||
|
|
||||||
|
# advanced
|
||||||
|
|
||||||
variable "enable_reporting" {
|
variable "enable_reporting" {
|
||||||
type = bool
|
type = bool
|
||||||
description = "Enable usage or analytics reporting to upstreams (Calico)"
|
description = "Enable usage or analytics reporting to upstreams (Calico)"
|
||||||
|
@ -176,22 +184,6 @@ variable "enable_aggregation" {
|
||||||
default = true
|
default = true
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "worker_node_labels" {
|
|
||||||
type = list(string)
|
|
||||||
description = "List of initial worker node labels"
|
|
||||||
default = []
|
|
||||||
}
|
|
||||||
|
|
||||||
# unofficial, undocumented, unsupported
|
|
||||||
|
|
||||||
variable "cluster_domain_suffix" {
|
|
||||||
type = string
|
|
||||||
description = "Queries for domains with the suffix will be answered by CoreDNS. Default is cluster.local (e.g. foo.default.svc.cluster.local)"
|
|
||||||
default = "cluster.local"
|
|
||||||
}
|
|
||||||
|
|
||||||
# advanced
|
|
||||||
|
|
||||||
variable "controller_arch" {
|
variable "controller_arch" {
|
||||||
type = string
|
type = string
|
||||||
description = "Controller node(s) architecture (amd64 or arm64)"
|
description = "Controller node(s) architecture (amd64 or arm64)"
|
||||||
|
|
|
@ -6,23 +6,24 @@ module "workers" {
|
||||||
vpc_id = aws_vpc.network.id
|
vpc_id = aws_vpc.network.id
|
||||||
subnet_ids = aws_subnet.public.*.id
|
subnet_ids = aws_subnet.public.*.id
|
||||||
security_groups = [aws_security_group.worker.id]
|
security_groups = [aws_security_group.worker.id]
|
||||||
worker_count = var.worker_count
|
|
||||||
instance_type = var.worker_type
|
# instances
|
||||||
os_stream = var.os_stream
|
os_stream = var.os_stream
|
||||||
arch = var.worker_arch
|
worker_count = var.worker_count
|
||||||
disk_type = var.worker_disk_type
|
instance_type = var.worker_type
|
||||||
disk_size = var.worker_disk_size
|
arch = var.worker_arch
|
||||||
disk_iops = var.worker_disk_iops
|
disk_type = var.worker_disk_type
|
||||||
cpu_credits = var.worker_cpu_credits
|
disk_size = var.worker_disk_size
|
||||||
spot_price = var.worker_price
|
disk_iops = var.worker_disk_iops
|
||||||
target_groups = var.worker_target_groups
|
cpu_credits = var.worker_cpu_credits
|
||||||
|
spot_price = var.worker_price
|
||||||
|
target_groups = var.worker_target_groups
|
||||||
|
|
||||||
# configuration
|
# configuration
|
||||||
kubeconfig = module.bootstrap.kubeconfig-kubelet
|
kubeconfig = module.bootstrap.kubeconfig-kubelet
|
||||||
ssh_authorized_key = var.ssh_authorized_key
|
ssh_authorized_key = var.ssh_authorized_key
|
||||||
service_cidr = var.service_cidr
|
service_cidr = var.service_cidr
|
||||||
cluster_domain_suffix = var.cluster_domain_suffix
|
snippets = var.worker_snippets
|
||||||
snippets = var.worker_snippets
|
node_labels = var.worker_node_labels
|
||||||
node_labels = var.worker_node_labels
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -104,7 +104,7 @@ storage:
|
||||||
cgroupDriver: systemd
|
cgroupDriver: systemd
|
||||||
clusterDNS:
|
clusterDNS:
|
||||||
- ${cluster_dns_service_ip}
|
- ${cluster_dns_service_ip}
|
||||||
clusterDomain: ${cluster_domain_suffix}
|
clusterDomain: cluster.local
|
||||||
healthzPort: 0
|
healthzPort: 0
|
||||||
rotateCertificates: true
|
rotateCertificates: true
|
||||||
shutdownGracePeriod: 45s
|
shutdownGracePeriod: 45s
|
||||||
|
|
|
@ -108,12 +108,6 @@ EOD
|
||||||
default = "10.3.0.0/16"
|
default = "10.3.0.0/16"
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "cluster_domain_suffix" {
|
|
||||||
type = string
|
|
||||||
description = "Queries for domains with the suffix will be answered by coredns. Default is cluster.local (e.g. foo.default.svc.cluster.local) "
|
|
||||||
default = "cluster.local"
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "node_labels" {
|
variable "node_labels" {
|
||||||
type = list(string)
|
type = list(string)
|
||||||
description = "List of initial node labels"
|
description = "List of initial node labels"
|
||||||
|
@ -126,15 +120,14 @@ variable "node_taints" {
|
||||||
default = []
|
default = []
|
||||||
}
|
}
|
||||||
|
|
||||||
# unofficial, undocumented, unsupported
|
# advanced
|
||||||
|
|
||||||
variable "arch" {
|
variable "arch" {
|
||||||
type = string
|
type = string
|
||||||
description = "Container architecture (amd64 or arm64)"
|
description = "Container architecture (amd64 or arm64)"
|
||||||
default = "amd64"
|
default = "amd64"
|
||||||
|
|
||||||
validation {
|
validation {
|
||||||
condition = var.arch == "amd64" || var.arch == "arm64"
|
condition = contains(["amd64", "arm64"], var.arch)
|
||||||
error_message = "The arch must be amd64 or arm64."
|
error_message = "The arch must be amd64 or arm64."
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -111,7 +111,6 @@ data "ct_config" "worker" {
|
||||||
kubeconfig = indent(10, var.kubeconfig)
|
kubeconfig = indent(10, var.kubeconfig)
|
||||||
ssh_authorized_key = var.ssh_authorized_key
|
ssh_authorized_key = var.ssh_authorized_key
|
||||||
cluster_dns_service_ip = cidrhost(var.service_cidr, 10)
|
cluster_dns_service_ip = cidrhost(var.service_cidr, 10)
|
||||||
cluster_domain_suffix = var.cluster_domain_suffix
|
|
||||||
node_labels = join(",", var.node_labels)
|
node_labels = join(",", var.node_labels)
|
||||||
node_taints = join(",", var.node_taints)
|
node_taints = join(",", var.node_taints)
|
||||||
})
|
})
|
||||||
|
|
|
@ -9,7 +9,6 @@ module "bootstrap" {
|
||||||
network_mtu = var.network_mtu
|
network_mtu = var.network_mtu
|
||||||
pod_cidr = var.pod_cidr
|
pod_cidr = var.pod_cidr
|
||||||
service_cidr = var.service_cidr
|
service_cidr = var.service_cidr
|
||||||
cluster_domain_suffix = var.cluster_domain_suffix
|
|
||||||
enable_reporting = var.enable_reporting
|
enable_reporting = var.enable_reporting
|
||||||
enable_aggregation = var.enable_aggregation
|
enable_aggregation = var.enable_aggregation
|
||||||
daemonset_tolerations = var.daemonset_tolerations
|
daemonset_tolerations = var.daemonset_tolerations
|
||||||
|
|
|
@ -148,7 +148,7 @@ storage:
|
||||||
cgroupDriver: systemd
|
cgroupDriver: systemd
|
||||||
clusterDNS:
|
clusterDNS:
|
||||||
- ${cluster_dns_service_ip}
|
- ${cluster_dns_service_ip}
|
||||||
clusterDomain: ${cluster_domain_suffix}
|
clusterDomain: cluster.local
|
||||||
healthzPort: 0
|
healthzPort: 0
|
||||||
rotateCertificates: true
|
rotateCertificates: true
|
||||||
shutdownGracePeriod: 45s
|
shutdownGracePeriod: 45s
|
||||||
|
|
|
@ -66,7 +66,6 @@ data "ct_config" "controllers" {
|
||||||
kubeconfig = indent(10, module.bootstrap.kubeconfig-kubelet)
|
kubeconfig = indent(10, module.bootstrap.kubeconfig-kubelet)
|
||||||
ssh_authorized_key = var.ssh_authorized_key
|
ssh_authorized_key = var.ssh_authorized_key
|
||||||
cluster_dns_service_ip = cidrhost(var.service_cidr, 10)
|
cluster_dns_service_ip = cidrhost(var.service_cidr, 10)
|
||||||
cluster_domain_suffix = var.cluster_domain_suffix
|
|
||||||
})
|
})
|
||||||
strict = true
|
strict = true
|
||||||
snippets = var.controller_snippets
|
snippets = var.controller_snippets
|
||||||
|
|
|
@ -164,6 +164,14 @@ EOD
|
||||||
default = "10.3.0.0/16"
|
default = "10.3.0.0/16"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
variable "worker_node_labels" {
|
||||||
|
type = list(string)
|
||||||
|
description = "List of initial worker node labels"
|
||||||
|
default = []
|
||||||
|
}
|
||||||
|
|
||||||
|
# advanced
|
||||||
|
|
||||||
variable "enable_reporting" {
|
variable "enable_reporting" {
|
||||||
type = bool
|
type = bool
|
||||||
description = "Enable usage or analytics reporting to upstreams (Calico)"
|
description = "Enable usage or analytics reporting to upstreams (Calico)"
|
||||||
|
@ -176,20 +184,6 @@ variable "enable_aggregation" {
|
||||||
default = true
|
default = true
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "worker_node_labels" {
|
|
||||||
type = list(string)
|
|
||||||
description = "List of initial worker node labels"
|
|
||||||
default = []
|
|
||||||
}
|
|
||||||
|
|
||||||
# unofficial, undocumented, unsupported
|
|
||||||
|
|
||||||
variable "cluster_domain_suffix" {
|
|
||||||
type = string
|
|
||||||
description = "Queries for domains with the suffix will be answered by CoreDNS. Default is cluster.local (e.g. foo.default.svc.cluster.local)"
|
|
||||||
default = "cluster.local"
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "controller_arch" {
|
variable "controller_arch" {
|
||||||
type = string
|
type = string
|
||||||
description = "Controller node(s) architecture (amd64 or arm64)"
|
description = "Controller node(s) architecture (amd64 or arm64)"
|
||||||
|
@ -210,7 +204,6 @@ variable "worker_arch" {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
variable "daemonset_tolerations" {
|
variable "daemonset_tolerations" {
|
||||||
type = list(string)
|
type = list(string)
|
||||||
description = "List of additional taint keys kube-system DaemonSets should tolerate (e.g. ['custom-role', 'gpu-role'])"
|
description = "List of additional taint keys kube-system DaemonSets should tolerate (e.g. ['custom-role', 'gpu-role'])"
|
||||||
|
|
|
@ -6,22 +6,23 @@ module "workers" {
|
||||||
vpc_id = aws_vpc.network.id
|
vpc_id = aws_vpc.network.id
|
||||||
subnet_ids = aws_subnet.public.*.id
|
subnet_ids = aws_subnet.public.*.id
|
||||||
security_groups = [aws_security_group.worker.id]
|
security_groups = [aws_security_group.worker.id]
|
||||||
worker_count = var.worker_count
|
|
||||||
instance_type = var.worker_type
|
# instances
|
||||||
os_image = var.os_image
|
os_image = var.os_image
|
||||||
arch = var.worker_arch
|
worker_count = var.worker_count
|
||||||
disk_type = var.worker_disk_type
|
instance_type = var.worker_type
|
||||||
disk_size = var.worker_disk_size
|
arch = var.worker_arch
|
||||||
disk_iops = var.worker_disk_iops
|
disk_type = var.worker_disk_type
|
||||||
spot_price = var.worker_price
|
disk_size = var.worker_disk_size
|
||||||
target_groups = var.worker_target_groups
|
disk_iops = var.worker_disk_iops
|
||||||
|
spot_price = var.worker_price
|
||||||
|
target_groups = var.worker_target_groups
|
||||||
|
|
||||||
# configuration
|
# configuration
|
||||||
kubeconfig = module.bootstrap.kubeconfig-kubelet
|
kubeconfig = module.bootstrap.kubeconfig-kubelet
|
||||||
ssh_authorized_key = var.ssh_authorized_key
|
ssh_authorized_key = var.ssh_authorized_key
|
||||||
service_cidr = var.service_cidr
|
service_cidr = var.service_cidr
|
||||||
cluster_domain_suffix = var.cluster_domain_suffix
|
snippets = var.worker_snippets
|
||||||
snippets = var.worker_snippets
|
node_labels = var.worker_node_labels
|
||||||
node_labels = var.worker_node_labels
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -103,7 +103,7 @@ storage:
|
||||||
cgroupDriver: systemd
|
cgroupDriver: systemd
|
||||||
clusterDNS:
|
clusterDNS:
|
||||||
- ${cluster_dns_service_ip}
|
- ${cluster_dns_service_ip}
|
||||||
clusterDomain: ${cluster_domain_suffix}
|
clusterDomain: cluster.local
|
||||||
healthzPort: 0
|
healthzPort: 0
|
||||||
rotateCertificates: true
|
rotateCertificates: true
|
||||||
shutdownGracePeriod: 45s
|
shutdownGracePeriod: 45s
|
||||||
|
|
|
@ -108,12 +108,6 @@ EOD
|
||||||
default = "10.3.0.0/16"
|
default = "10.3.0.0/16"
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "cluster_domain_suffix" {
|
|
||||||
type = string
|
|
||||||
description = "Queries for domains with the suffix will be answered by coredns. Default is cluster.local (e.g. foo.default.svc.cluster.local) "
|
|
||||||
default = "cluster.local"
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "node_labels" {
|
variable "node_labels" {
|
||||||
type = list(string)
|
type = list(string)
|
||||||
description = "List of initial node labels"
|
description = "List of initial node labels"
|
||||||
|
@ -134,7 +128,7 @@ variable "arch" {
|
||||||
default = "amd64"
|
default = "amd64"
|
||||||
|
|
||||||
validation {
|
validation {
|
||||||
condition = var.arch == "amd64" || var.arch == "arm64"
|
condition = contains(["amd64", "arm64"], var.arch)
|
||||||
error_message = "The arch must be amd64 or arm64."
|
error_message = "The arch must be amd64 or arm64."
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -111,7 +111,6 @@ data "ct_config" "worker" {
|
||||||
kubeconfig = indent(10, var.kubeconfig)
|
kubeconfig = indent(10, var.kubeconfig)
|
||||||
ssh_authorized_key = var.ssh_authorized_key
|
ssh_authorized_key = var.ssh_authorized_key
|
||||||
cluster_dns_service_ip = cidrhost(var.service_cidr, 10)
|
cluster_dns_service_ip = cidrhost(var.service_cidr, 10)
|
||||||
cluster_domain_suffix = var.cluster_domain_suffix
|
|
||||||
node_labels = join(",", var.node_labels)
|
node_labels = join(",", var.node_labels)
|
||||||
node_taints = join(",", var.node_taints)
|
node_taints = join(",", var.node_taints)
|
||||||
})
|
})
|
||||||
|
|
|
@ -14,7 +14,6 @@ module "bootstrap" {
|
||||||
|
|
||||||
pod_cidr = var.pod_cidr
|
pod_cidr = var.pod_cidr
|
||||||
service_cidr = var.service_cidr
|
service_cidr = var.service_cidr
|
||||||
cluster_domain_suffix = var.cluster_domain_suffix
|
|
||||||
enable_reporting = var.enable_reporting
|
enable_reporting = var.enable_reporting
|
||||||
enable_aggregation = var.enable_aggregation
|
enable_aggregation = var.enable_aggregation
|
||||||
daemonset_tolerations = var.daemonset_tolerations
|
daemonset_tolerations = var.daemonset_tolerations
|
||||||
|
|
|
@ -144,7 +144,7 @@ storage:
|
||||||
cgroupDriver: systemd
|
cgroupDriver: systemd
|
||||||
clusterDNS:
|
clusterDNS:
|
||||||
- ${cluster_dns_service_ip}
|
- ${cluster_dns_service_ip}
|
||||||
clusterDomain: ${cluster_domain_suffix}
|
clusterDomain: cluster.local
|
||||||
healthzPort: 0
|
healthzPort: 0
|
||||||
rotateCertificates: true
|
rotateCertificates: true
|
||||||
shutdownGracePeriod: 45s
|
shutdownGracePeriod: 45s
|
||||||
|
|
|
@ -163,7 +163,6 @@ data "ct_config" "controllers" {
|
||||||
kubeconfig = indent(10, module.bootstrap.kubeconfig-kubelet)
|
kubeconfig = indent(10, module.bootstrap.kubeconfig-kubelet)
|
||||||
ssh_authorized_key = var.ssh_authorized_key
|
ssh_authorized_key = var.ssh_authorized_key
|
||||||
cluster_dns_service_ip = cidrhost(var.service_cidr, 10)
|
cluster_dns_service_ip = cidrhost(var.service_cidr, 10)
|
||||||
cluster_domain_suffix = var.cluster_domain_suffix
|
|
||||||
})
|
})
|
||||||
strict = true
|
strict = true
|
||||||
snippets = var.controller_snippets
|
snippets = var.controller_snippets
|
||||||
|
|
|
@ -27,7 +27,6 @@ variable "os_image" {
|
||||||
description = "Fedora CoreOS image for instances"
|
description = "Fedora CoreOS image for instances"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
variable "controller_count" {
|
variable "controller_count" {
|
||||||
type = number
|
type = number
|
||||||
description = "Number of controllers (i.e. masters)"
|
description = "Number of controllers (i.e. masters)"
|
||||||
|
@ -145,6 +144,14 @@ EOD
|
||||||
default = "10.3.0.0/16"
|
default = "10.3.0.0/16"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
variable "worker_node_labels" {
|
||||||
|
type = list(string)
|
||||||
|
description = "List of initial worker node labels"
|
||||||
|
default = []
|
||||||
|
}
|
||||||
|
|
||||||
|
# advanced
|
||||||
|
|
||||||
variable "enable_reporting" {
|
variable "enable_reporting" {
|
||||||
type = bool
|
type = bool
|
||||||
description = "Enable usage or analytics reporting to upstreams (Calico)"
|
description = "Enable usage or analytics reporting to upstreams (Calico)"
|
||||||
|
@ -157,20 +164,6 @@ variable "enable_aggregation" {
|
||||||
default = true
|
default = true
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "worker_node_labels" {
|
|
||||||
type = list(string)
|
|
||||||
description = "List of initial worker node labels"
|
|
||||||
default = []
|
|
||||||
}
|
|
||||||
|
|
||||||
# unofficial, undocumented, unsupported
|
|
||||||
|
|
||||||
variable "cluster_domain_suffix" {
|
|
||||||
type = string
|
|
||||||
description = "Queries for domains with the suffix will be answered by coredns. Default is cluster.local (e.g. foo.default.svc.cluster.local) "
|
|
||||||
default = "cluster.local"
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "daemonset_tolerations" {
|
variable "daemonset_tolerations" {
|
||||||
type = list(string)
|
type = list(string)
|
||||||
description = "List of additional taint keys kube-system DaemonSets should tolerate (e.g. ['custom-role', 'gpu-role'])"
|
description = "List of additional taint keys kube-system DaemonSets should tolerate (e.g. ['custom-role', 'gpu-role'])"
|
||||||
|
|
|
@ -9,20 +9,20 @@ module "workers" {
|
||||||
security_group_id = azurerm_network_security_group.worker.id
|
security_group_id = azurerm_network_security_group.worker.id
|
||||||
backend_address_pool_ids = local.backend_address_pool_ids
|
backend_address_pool_ids = local.backend_address_pool_ids
|
||||||
|
|
||||||
|
# instances
|
||||||
|
os_image = var.os_image
|
||||||
worker_count = var.worker_count
|
worker_count = var.worker_count
|
||||||
vm_type = var.worker_type
|
vm_type = var.worker_type
|
||||||
os_image = var.os_image
|
|
||||||
disk_type = var.worker_disk_type
|
disk_type = var.worker_disk_type
|
||||||
disk_size = var.worker_disk_size
|
disk_size = var.worker_disk_size
|
||||||
ephemeral_disk = var.worker_ephemeral_disk
|
ephemeral_disk = var.worker_ephemeral_disk
|
||||||
priority = var.worker_priority
|
priority = var.worker_priority
|
||||||
|
|
||||||
# configuration
|
# configuration
|
||||||
kubeconfig = module.bootstrap.kubeconfig-kubelet
|
kubeconfig = module.bootstrap.kubeconfig-kubelet
|
||||||
ssh_authorized_key = var.ssh_authorized_key
|
ssh_authorized_key = var.ssh_authorized_key
|
||||||
azure_authorized_key = var.azure_authorized_key
|
azure_authorized_key = var.azure_authorized_key
|
||||||
service_cidr = var.service_cidr
|
service_cidr = var.service_cidr
|
||||||
cluster_domain_suffix = var.cluster_domain_suffix
|
snippets = var.worker_snippets
|
||||||
snippets = var.worker_snippets
|
node_labels = var.worker_node_labels
|
||||||
node_labels = var.worker_node_labels
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -99,7 +99,7 @@ storage:
|
||||||
cgroupDriver: systemd
|
cgroupDriver: systemd
|
||||||
clusterDNS:
|
clusterDNS:
|
||||||
- ${cluster_dns_service_ip}
|
- ${cluster_dns_service_ip}
|
||||||
clusterDomain: ${cluster_domain_suffix}
|
clusterDomain: cluster.local
|
||||||
healthzPort: 0
|
healthzPort: 0
|
||||||
rotateCertificates: true
|
rotateCertificates: true
|
||||||
shutdownGracePeriod: 45s
|
shutdownGracePeriod: 45s
|
||||||
|
|
|
@ -120,12 +120,3 @@ variable "node_taints" {
|
||||||
description = "List of initial node taints"
|
description = "List of initial node taints"
|
||||||
default = []
|
default = []
|
||||||
}
|
}
|
||||||
|
|
||||||
# unofficial, undocumented, unsupported
|
|
||||||
|
|
||||||
variable "cluster_domain_suffix" {
|
|
||||||
description = "Queries for domains with the suffix will be answered by coredns. Default is cluster.local (e.g. foo.default.svc.cluster.local) "
|
|
||||||
type = string
|
|
||||||
default = "cluster.local"
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
|
@ -84,7 +84,6 @@ data "ct_config" "worker" {
|
||||||
kubeconfig = indent(10, var.kubeconfig)
|
kubeconfig = indent(10, var.kubeconfig)
|
||||||
ssh_authorized_key = var.ssh_authorized_key
|
ssh_authorized_key = var.ssh_authorized_key
|
||||||
cluster_dns_service_ip = cidrhost(var.service_cidr, 10)
|
cluster_dns_service_ip = cidrhost(var.service_cidr, 10)
|
||||||
cluster_domain_suffix = var.cluster_domain_suffix
|
|
||||||
node_labels = join(",", var.node_labels)
|
node_labels = join(",", var.node_labels)
|
||||||
node_taints = join(",", var.node_taints)
|
node_taints = join(",", var.node_taints)
|
||||||
})
|
})
|
||||||
|
|
|
@ -14,7 +14,6 @@ module "bootstrap" {
|
||||||
|
|
||||||
pod_cidr = var.pod_cidr
|
pod_cidr = var.pod_cidr
|
||||||
service_cidr = var.service_cidr
|
service_cidr = var.service_cidr
|
||||||
cluster_domain_suffix = var.cluster_domain_suffix
|
|
||||||
enable_reporting = var.enable_reporting
|
enable_reporting = var.enable_reporting
|
||||||
enable_aggregation = var.enable_aggregation
|
enable_aggregation = var.enable_aggregation
|
||||||
daemonset_tolerations = var.daemonset_tolerations
|
daemonset_tolerations = var.daemonset_tolerations
|
||||||
|
|
|
@ -144,7 +144,7 @@ storage:
|
||||||
cgroupDriver: systemd
|
cgroupDriver: systemd
|
||||||
clusterDNS:
|
clusterDNS:
|
||||||
- ${cluster_dns_service_ip}
|
- ${cluster_dns_service_ip}
|
||||||
clusterDomain: ${cluster_domain_suffix}
|
clusterDomain: cluster.local
|
||||||
healthzPort: 0
|
healthzPort: 0
|
||||||
rotateCertificates: true
|
rotateCertificates: true
|
||||||
shutdownGracePeriod: 45s
|
shutdownGracePeriod: 45s
|
||||||
|
|
|
@ -185,7 +185,6 @@ data "ct_config" "controllers" {
|
||||||
kubeconfig = indent(10, module.bootstrap.kubeconfig-kubelet)
|
kubeconfig = indent(10, module.bootstrap.kubeconfig-kubelet)
|
||||||
ssh_authorized_key = var.ssh_authorized_key
|
ssh_authorized_key = var.ssh_authorized_key
|
||||||
cluster_dns_service_ip = cidrhost(var.service_cidr, 10)
|
cluster_dns_service_ip = cidrhost(var.service_cidr, 10)
|
||||||
cluster_domain_suffix = var.cluster_domain_suffix
|
|
||||||
})
|
})
|
||||||
strict = true
|
strict = true
|
||||||
snippets = var.controller_snippets
|
snippets = var.controller_snippets
|
||||||
|
|
|
@ -196,14 +196,6 @@ variable "daemonset_tolerations" {
|
||||||
default = []
|
default = []
|
||||||
}
|
}
|
||||||
|
|
||||||
# unofficial, undocumented, unsupported
|
|
||||||
|
|
||||||
variable "cluster_domain_suffix" {
|
|
||||||
type = string
|
|
||||||
description = "Queries for domains with the suffix will be answered by coredns. Default is cluster.local (e.g. foo.default.svc.cluster.local) "
|
|
||||||
default = "cluster.local"
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "components" {
|
variable "components" {
|
||||||
description = "Configure pre-installed cluster components"
|
description = "Configure pre-installed cluster components"
|
||||||
# Component configs are passed through to terraform-render-bootstrap,
|
# Component configs are passed through to terraform-render-bootstrap,
|
||||||
|
|
|
@ -18,12 +18,11 @@ module "workers" {
|
||||||
priority = var.worker_priority
|
priority = var.worker_priority
|
||||||
|
|
||||||
# configuration
|
# configuration
|
||||||
kubeconfig = module.bootstrap.kubeconfig-kubelet
|
kubeconfig = module.bootstrap.kubeconfig-kubelet
|
||||||
ssh_authorized_key = var.ssh_authorized_key
|
ssh_authorized_key = var.ssh_authorized_key
|
||||||
azure_authorized_key = var.azure_authorized_key
|
azure_authorized_key = var.azure_authorized_key
|
||||||
service_cidr = var.service_cidr
|
service_cidr = var.service_cidr
|
||||||
cluster_domain_suffix = var.cluster_domain_suffix
|
snippets = var.worker_snippets
|
||||||
snippets = var.worker_snippets
|
node_labels = var.worker_node_labels
|
||||||
node_labels = var.worker_node_labels
|
arch = var.worker_arch
|
||||||
arch = var.worker_arch
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -99,7 +99,7 @@ storage:
|
||||||
cgroupDriver: systemd
|
cgroupDriver: systemd
|
||||||
clusterDNS:
|
clusterDNS:
|
||||||
- ${cluster_dns_service_ip}
|
- ${cluster_dns_service_ip}
|
||||||
clusterDomain: ${cluster_domain_suffix}
|
clusterDomain: cluster.local
|
||||||
healthzPort: 0
|
healthzPort: 0
|
||||||
rotateCertificates: true
|
rotateCertificates: true
|
||||||
shutdownGracePeriod: 45s
|
shutdownGracePeriod: 45s
|
||||||
|
|
|
@ -137,12 +137,3 @@ variable "arch" {
|
||||||
error_message = "The arch must be amd64 or arm64."
|
error_message = "The arch must be amd64 or arm64."
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
# unofficial, undocumented, unsupported
|
|
||||||
|
|
||||||
variable "cluster_domain_suffix" {
|
|
||||||
description = "Queries for domains with the suffix will be answered by coredns. Default is cluster.local (e.g. foo.default.svc.cluster.local) "
|
|
||||||
type = string
|
|
||||||
default = "cluster.local"
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
|
@ -105,7 +105,6 @@ data "ct_config" "worker" {
|
||||||
kubeconfig = indent(10, var.kubeconfig)
|
kubeconfig = indent(10, var.kubeconfig)
|
||||||
ssh_authorized_key = var.ssh_authorized_key
|
ssh_authorized_key = var.ssh_authorized_key
|
||||||
cluster_dns_service_ip = cidrhost(var.service_cidr, 10)
|
cluster_dns_service_ip = cidrhost(var.service_cidr, 10)
|
||||||
cluster_domain_suffix = var.cluster_domain_suffix
|
|
||||||
node_labels = join(",", var.node_labels)
|
node_labels = join(",", var.node_labels)
|
||||||
node_taints = join(",", var.node_taints)
|
node_taints = join(",", var.node_taints)
|
||||||
})
|
})
|
||||||
|
|
|
@ -10,7 +10,6 @@ module "bootstrap" {
|
||||||
network_ip_autodetection_method = var.network_ip_autodetection_method
|
network_ip_autodetection_method = var.network_ip_autodetection_method
|
||||||
pod_cidr = var.pod_cidr
|
pod_cidr = var.pod_cidr
|
||||||
service_cidr = var.service_cidr
|
service_cidr = var.service_cidr
|
||||||
cluster_domain_suffix = var.cluster_domain_suffix
|
|
||||||
enable_reporting = var.enable_reporting
|
enable_reporting = var.enable_reporting
|
||||||
enable_aggregation = var.enable_aggregation
|
enable_aggregation = var.enable_aggregation
|
||||||
components = var.components
|
components = var.components
|
||||||
|
|
|
@ -154,7 +154,7 @@ storage:
|
||||||
cgroupDriver: systemd
|
cgroupDriver: systemd
|
||||||
clusterDNS:
|
clusterDNS:
|
||||||
- ${cluster_dns_service_ip}
|
- ${cluster_dns_service_ip}
|
||||||
clusterDomain: ${cluster_domain_suffix}
|
clusterDomain: cluster.local
|
||||||
healthzPort: 0
|
healthzPort: 0
|
||||||
rotateCertificates: true
|
rotateCertificates: true
|
||||||
shutdownGracePeriod: 45s
|
shutdownGracePeriod: 45s
|
||||||
|
|
|
@ -59,7 +59,6 @@ data "ct_config" "controllers" {
|
||||||
etcd_name = var.controllers.*.name[count.index]
|
etcd_name = var.controllers.*.name[count.index]
|
||||||
etcd_initial_cluster = join(",", formatlist("%s=https://%s:2380", var.controllers.*.name, var.controllers.*.domain))
|
etcd_initial_cluster = join(",", formatlist("%s=https://%s:2380", var.controllers.*.name, var.controllers.*.domain))
|
||||||
cluster_dns_service_ip = module.bootstrap.cluster_dns_service_ip
|
cluster_dns_service_ip = module.bootstrap.cluster_dns_service_ip
|
||||||
cluster_domain_suffix = var.cluster_domain_suffix
|
|
||||||
ssh_authorized_key = var.ssh_authorized_key
|
ssh_authorized_key = var.ssh_authorized_key
|
||||||
})
|
})
|
||||||
strict = true
|
strict = true
|
||||||
|
|
|
@ -151,13 +151,7 @@ variable "enable_aggregation" {
|
||||||
default = true
|
default = true
|
||||||
}
|
}
|
||||||
|
|
||||||
# unofficial, undocumented, unsupported
|
# advanced
|
||||||
|
|
||||||
variable "cluster_domain_suffix" {
|
|
||||||
description = "Queries for domains with the suffix will be answered by coredns. Default is cluster.local (e.g. foo.default.svc.cluster.local) "
|
|
||||||
type = string
|
|
||||||
default = "cluster.local"
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "components" {
|
variable "components" {
|
||||||
description = "Configure pre-installed cluster components"
|
description = "Configure pre-installed cluster components"
|
||||||
|
|
|
@ -108,7 +108,7 @@ storage:
|
||||||
cgroupDriver: systemd
|
cgroupDriver: systemd
|
||||||
clusterDNS:
|
clusterDNS:
|
||||||
- ${cluster_dns_service_ip}
|
- ${cluster_dns_service_ip}
|
||||||
clusterDomain: ${cluster_domain_suffix}
|
clusterDomain: cluster.local
|
||||||
healthzPort: 0
|
healthzPort: 0
|
||||||
rotateCertificates: true
|
rotateCertificates: true
|
||||||
shutdownGracePeriod: 45s
|
shutdownGracePeriod: 45s
|
||||||
|
|
|
@ -53,7 +53,6 @@ data "ct_config" "worker" {
|
||||||
domain_name = var.domain
|
domain_name = var.domain
|
||||||
ssh_authorized_key = var.ssh_authorized_key
|
ssh_authorized_key = var.ssh_authorized_key
|
||||||
cluster_dns_service_ip = cidrhost(var.service_cidr, 10)
|
cluster_dns_service_ip = cidrhost(var.service_cidr, 10)
|
||||||
cluster_domain_suffix = var.cluster_domain_suffix
|
|
||||||
node_labels = join(",", var.node_labels)
|
node_labels = join(",", var.node_labels)
|
||||||
node_taints = join(",", var.node_taints)
|
node_taints = join(",", var.node_taints)
|
||||||
})
|
})
|
||||||
|
|
|
@ -103,9 +103,3 @@ The 1st IP will be reserved for kube_apiserver, the 10th IP will be reserved for
|
||||||
EOD
|
EOD
|
||||||
default = "10.3.0.0/16"
|
default = "10.3.0.0/16"
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "cluster_domain_suffix" {
|
|
||||||
description = "Queries for domains with the suffix will be answered by coredns. Default is cluster.local (e.g. foo.default.svc.cluster.local) "
|
|
||||||
type = string
|
|
||||||
default = "cluster.local"
|
|
||||||
}
|
|
||||||
|
|
|
@ -15,13 +15,12 @@ module "workers" {
|
||||||
domain = var.workers[count.index].domain
|
domain = var.workers[count.index].domain
|
||||||
|
|
||||||
# configuration
|
# configuration
|
||||||
kubeconfig = module.bootstrap.kubeconfig-kubelet
|
kubeconfig = module.bootstrap.kubeconfig-kubelet
|
||||||
ssh_authorized_key = var.ssh_authorized_key
|
ssh_authorized_key = var.ssh_authorized_key
|
||||||
service_cidr = var.service_cidr
|
service_cidr = var.service_cidr
|
||||||
cluster_domain_suffix = var.cluster_domain_suffix
|
node_labels = lookup(var.worker_node_labels, var.workers[count.index].name, [])
|
||||||
node_labels = lookup(var.worker_node_labels, var.workers[count.index].name, [])
|
node_taints = lookup(var.worker_node_taints, var.workers[count.index].name, [])
|
||||||
node_taints = lookup(var.worker_node_taints, var.workers[count.index].name, [])
|
snippets = lookup(var.snippets, var.workers[count.index].name, [])
|
||||||
snippets = lookup(var.snippets, var.workers[count.index].name, [])
|
|
||||||
|
|
||||||
# optional
|
# optional
|
||||||
cached_install = var.cached_install
|
cached_install = var.cached_install
|
||||||
|
|
|
@ -10,7 +10,6 @@ module "bootstrap" {
|
||||||
network_ip_autodetection_method = var.network_ip_autodetection_method
|
network_ip_autodetection_method = var.network_ip_autodetection_method
|
||||||
pod_cidr = var.pod_cidr
|
pod_cidr = var.pod_cidr
|
||||||
service_cidr = var.service_cidr
|
service_cidr = var.service_cidr
|
||||||
cluster_domain_suffix = var.cluster_domain_suffix
|
|
||||||
enable_reporting = var.enable_reporting
|
enable_reporting = var.enable_reporting
|
||||||
enable_aggregation = var.enable_aggregation
|
enable_aggregation = var.enable_aggregation
|
||||||
components = var.components
|
components = var.components
|
||||||
|
|
|
@ -155,7 +155,7 @@ storage:
|
||||||
cgroupDriver: systemd
|
cgroupDriver: systemd
|
||||||
clusterDNS:
|
clusterDNS:
|
||||||
- ${cluster_dns_service_ip}
|
- ${cluster_dns_service_ip}
|
||||||
clusterDomain: ${cluster_domain_suffix}
|
clusterDomain: cluster.local
|
||||||
healthzPort: 0
|
healthzPort: 0
|
||||||
rotateCertificates: true
|
rotateCertificates: true
|
||||||
shutdownGracePeriod: 45s
|
shutdownGracePeriod: 45s
|
||||||
|
|
|
@ -88,7 +88,6 @@ data "ct_config" "controllers" {
|
||||||
etcd_name = var.controllers.*.name[count.index]
|
etcd_name = var.controllers.*.name[count.index]
|
||||||
etcd_initial_cluster = join(",", formatlist("%s=https://%s:2380", var.controllers.*.name, var.controllers.*.domain))
|
etcd_initial_cluster = join(",", formatlist("%s=https://%s:2380", var.controllers.*.name, var.controllers.*.domain))
|
||||||
cluster_dns_service_ip = module.bootstrap.cluster_dns_service_ip
|
cluster_dns_service_ip = module.bootstrap.cluster_dns_service_ip
|
||||||
cluster_domain_suffix = var.cluster_domain_suffix
|
|
||||||
ssh_authorized_key = var.ssh_authorized_key
|
ssh_authorized_key = var.ssh_authorized_key
|
||||||
})
|
})
|
||||||
strict = true
|
strict = true
|
||||||
|
|
|
@ -167,13 +167,7 @@ EOD
|
||||||
default = ""
|
default = ""
|
||||||
}
|
}
|
||||||
|
|
||||||
# unofficial, undocumented, unsupported
|
# advanced
|
||||||
|
|
||||||
variable "cluster_domain_suffix" {
|
|
||||||
type = string
|
|
||||||
description = "Queries for domains with the suffix will be answered by coredns. Default is cluster.local (e.g. foo.default.svc.cluster.local) "
|
|
||||||
default = "cluster.local"
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "components" {
|
variable "components" {
|
||||||
description = "Configure pre-installed cluster components"
|
description = "Configure pre-installed cluster components"
|
||||||
|
|
|
@ -113,7 +113,7 @@ storage:
|
||||||
cgroupDriver: systemd
|
cgroupDriver: systemd
|
||||||
clusterDNS:
|
clusterDNS:
|
||||||
- ${cluster_dns_service_ip}
|
- ${cluster_dns_service_ip}
|
||||||
clusterDomain: ${cluster_domain_suffix}
|
clusterDomain: cluster.local
|
||||||
healthzPort: 0
|
healthzPort: 0
|
||||||
rotateCertificates: true
|
rotateCertificates: true
|
||||||
shutdownGracePeriod: 45s
|
shutdownGracePeriod: 45s
|
||||||
|
|
|
@ -79,7 +79,6 @@ data "ct_config" "worker" {
|
||||||
domain_name = var.domain
|
domain_name = var.domain
|
||||||
ssh_authorized_key = var.ssh_authorized_key
|
ssh_authorized_key = var.ssh_authorized_key
|
||||||
cluster_dns_service_ip = cidrhost(var.service_cidr, 10)
|
cluster_dns_service_ip = cidrhost(var.service_cidr, 10)
|
||||||
cluster_domain_suffix = var.cluster_domain_suffix
|
|
||||||
node_labels = join(",", var.node_labels)
|
node_labels = join(",", var.node_labels)
|
||||||
node_taints = join(",", var.node_taints)
|
node_taints = join(",", var.node_taints)
|
||||||
})
|
})
|
||||||
|
|
|
@ -114,13 +114,3 @@ The 1st IP will be reserved for kube_apiserver, the 10th IP will be reserved for
|
||||||
EOD
|
EOD
|
||||||
default = "10.3.0.0/16"
|
default = "10.3.0.0/16"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
variable "cluster_domain_suffix" {
|
|
||||||
type = string
|
|
||||||
description = "Queries for domains with the suffix will be answered by coredns. Default is cluster.local (e.g. foo.default.svc.cluster.local) "
|
|
||||||
default = "cluster.local"
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -15,13 +15,12 @@ module "workers" {
|
||||||
domain = var.workers[count.index].domain
|
domain = var.workers[count.index].domain
|
||||||
|
|
||||||
# configuration
|
# configuration
|
||||||
kubeconfig = module.bootstrap.kubeconfig-kubelet
|
kubeconfig = module.bootstrap.kubeconfig-kubelet
|
||||||
ssh_authorized_key = var.ssh_authorized_key
|
ssh_authorized_key = var.ssh_authorized_key
|
||||||
service_cidr = var.service_cidr
|
service_cidr = var.service_cidr
|
||||||
cluster_domain_suffix = var.cluster_domain_suffix
|
node_labels = lookup(var.worker_node_labels, var.workers[count.index].name, [])
|
||||||
node_labels = lookup(var.worker_node_labels, var.workers[count.index].name, [])
|
node_taints = lookup(var.worker_node_taints, var.workers[count.index].name, [])
|
||||||
node_taints = lookup(var.worker_node_taints, var.workers[count.index].name, [])
|
snippets = lookup(var.snippets, var.workers[count.index].name, [])
|
||||||
snippets = lookup(var.snippets, var.workers[count.index].name, [])
|
|
||||||
|
|
||||||
# optional
|
# optional
|
||||||
download_protocol = var.download_protocol
|
download_protocol = var.download_protocol
|
||||||
|
|
|
@ -11,11 +11,10 @@ module "bootstrap" {
|
||||||
network_encapsulation = "vxlan"
|
network_encapsulation = "vxlan"
|
||||||
network_mtu = "1450"
|
network_mtu = "1450"
|
||||||
|
|
||||||
pod_cidr = var.pod_cidr
|
pod_cidr = var.pod_cidr
|
||||||
service_cidr = var.service_cidr
|
service_cidr = var.service_cidr
|
||||||
cluster_domain_suffix = var.cluster_domain_suffix
|
enable_reporting = var.enable_reporting
|
||||||
enable_reporting = var.enable_reporting
|
enable_aggregation = var.enable_aggregation
|
||||||
enable_aggregation = var.enable_aggregation
|
components = var.components
|
||||||
components = var.components
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -151,7 +151,7 @@ storage:
|
||||||
cgroupDriver: systemd
|
cgroupDriver: systemd
|
||||||
clusterDNS:
|
clusterDNS:
|
||||||
- ${cluster_dns_service_ip}
|
- ${cluster_dns_service_ip}
|
||||||
clusterDomain: ${cluster_domain_suffix}
|
clusterDomain: cluster.local
|
||||||
healthzPort: 0
|
healthzPort: 0
|
||||||
rotateCertificates: true
|
rotateCertificates: true
|
||||||
shutdownGracePeriod: 45s
|
shutdownGracePeriod: 45s
|
||||||
|
|
|
@ -104,7 +104,7 @@ storage:
|
||||||
cgroupDriver: systemd
|
cgroupDriver: systemd
|
||||||
clusterDNS:
|
clusterDNS:
|
||||||
- ${cluster_dns_service_ip}
|
- ${cluster_dns_service_ip}
|
||||||
clusterDomain: ${cluster_domain_suffix}
|
clusterDomain: cluster.local
|
||||||
healthzPort: 0
|
healthzPort: 0
|
||||||
rotateCertificates: true
|
rotateCertificates: true
|
||||||
shutdownGracePeriod: 45s
|
shutdownGracePeriod: 45s
|
||||||
|
|
|
@ -74,7 +74,6 @@ data "ct_config" "controllers" {
|
||||||
for i in range(var.controller_count) : "etcd${i}=https://${var.cluster_name}-etcd${i}.${var.dns_zone}:2380"
|
for i in range(var.controller_count) : "etcd${i}=https://${var.cluster_name}-etcd${i}.${var.dns_zone}:2380"
|
||||||
])
|
])
|
||||||
cluster_dns_service_ip = cidrhost(var.service_cidr, 10)
|
cluster_dns_service_ip = cidrhost(var.service_cidr, 10)
|
||||||
cluster_domain_suffix = var.cluster_domain_suffix
|
|
||||||
})
|
})
|
||||||
strict = true
|
strict = true
|
||||||
snippets = var.controller_snippets
|
snippets = var.controller_snippets
|
||||||
|
|
|
@ -98,13 +98,7 @@ variable "enable_aggregation" {
|
||||||
default = true
|
default = true
|
||||||
}
|
}
|
||||||
|
|
||||||
# unofficial, undocumented, unsupported
|
# advanced
|
||||||
|
|
||||||
variable "cluster_domain_suffix" {
|
|
||||||
type = string
|
|
||||||
description = "Queries for domains with the suffix will be answered by coredns. Default is cluster.local (e.g. foo.default.svc.cluster.local) "
|
|
||||||
default = "cluster.local"
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "components" {
|
variable "components" {
|
||||||
description = "Configure pre-installed cluster components"
|
description = "Configure pre-installed cluster components"
|
||||||
|
|
|
@ -62,7 +62,6 @@ resource "digitalocean_tag" "workers" {
|
||||||
data "ct_config" "worker" {
|
data "ct_config" "worker" {
|
||||||
content = templatefile("${path.module}/butane/worker.yaml", {
|
content = templatefile("${path.module}/butane/worker.yaml", {
|
||||||
cluster_dns_service_ip = cidrhost(var.service_cidr, 10)
|
cluster_dns_service_ip = cidrhost(var.service_cidr, 10)
|
||||||
cluster_domain_suffix = var.cluster_domain_suffix
|
|
||||||
})
|
})
|
||||||
strict = true
|
strict = true
|
||||||
snippets = var.worker_snippets
|
snippets = var.worker_snippets
|
||||||
|
|
|
@ -11,11 +11,10 @@ module "bootstrap" {
|
||||||
network_encapsulation = "vxlan"
|
network_encapsulation = "vxlan"
|
||||||
network_mtu = "1450"
|
network_mtu = "1450"
|
||||||
|
|
||||||
pod_cidr = var.pod_cidr
|
pod_cidr = var.pod_cidr
|
||||||
service_cidr = var.service_cidr
|
service_cidr = var.service_cidr
|
||||||
cluster_domain_suffix = var.cluster_domain_suffix
|
enable_reporting = var.enable_reporting
|
||||||
enable_reporting = var.enable_reporting
|
enable_aggregation = var.enable_aggregation
|
||||||
enable_aggregation = var.enable_aggregation
|
components = var.components
|
||||||
components = var.components
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -153,7 +153,7 @@ storage:
|
||||||
cgroupDriver: systemd
|
cgroupDriver: systemd
|
||||||
clusterDNS:
|
clusterDNS:
|
||||||
- ${cluster_dns_service_ip}
|
- ${cluster_dns_service_ip}
|
||||||
clusterDomain: ${cluster_domain_suffix}
|
clusterDomain: cluster.local
|
||||||
healthzPort: 0
|
healthzPort: 0
|
||||||
rotateCertificates: true
|
rotateCertificates: true
|
||||||
shutdownGracePeriod: 45s
|
shutdownGracePeriod: 45s
|
||||||
|
|
|
@ -103,7 +103,7 @@ storage:
|
||||||
cgroupDriver: systemd
|
cgroupDriver: systemd
|
||||||
clusterDNS:
|
clusterDNS:
|
||||||
- ${cluster_dns_service_ip}
|
- ${cluster_dns_service_ip}
|
||||||
clusterDomain: ${cluster_domain_suffix}
|
clusterDomain: cluster.local
|
||||||
healthzPort: 0
|
healthzPort: 0
|
||||||
rotateCertificates: true
|
rotateCertificates: true
|
||||||
shutdownGracePeriod: 45s
|
shutdownGracePeriod: 45s
|
||||||
|
|
|
@ -79,7 +79,6 @@ data "ct_config" "controllers" {
|
||||||
for i in range(var.controller_count) : "etcd${i}=https://${var.cluster_name}-etcd${i}.${var.dns_zone}:2380"
|
for i in range(var.controller_count) : "etcd${i}=https://${var.cluster_name}-etcd${i}.${var.dns_zone}:2380"
|
||||||
])
|
])
|
||||||
cluster_dns_service_ip = cidrhost(var.service_cidr, 10)
|
cluster_dns_service_ip = cidrhost(var.service_cidr, 10)
|
||||||
cluster_domain_suffix = var.cluster_domain_suffix
|
|
||||||
})
|
})
|
||||||
strict = true
|
strict = true
|
||||||
snippets = var.controller_snippets
|
snippets = var.controller_snippets
|
||||||
|
|
|
@ -98,13 +98,7 @@ variable "enable_aggregation" {
|
||||||
default = true
|
default = true
|
||||||
}
|
}
|
||||||
|
|
||||||
# unofficial, undocumented, unsupported
|
# advanced
|
||||||
|
|
||||||
variable "cluster_domain_suffix" {
|
|
||||||
type = string
|
|
||||||
description = "Queries for domains with the suffix will be answered by coredns. Default is cluster.local (e.g. foo.default.svc.cluster.local) "
|
|
||||||
default = "cluster.local"
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "components" {
|
variable "components" {
|
||||||
description = "Configure pre-installed cluster components"
|
description = "Configure pre-installed cluster components"
|
||||||
|
|
|
@ -60,7 +60,6 @@ resource "digitalocean_tag" "workers" {
|
||||||
data "ct_config" "worker" {
|
data "ct_config" "worker" {
|
||||||
content = templatefile("${path.module}/butane/worker.yaml", {
|
content = templatefile("${path.module}/butane/worker.yaml", {
|
||||||
cluster_dns_service_ip = cidrhost(var.service_cidr, 10)
|
cluster_dns_service_ip = cidrhost(var.service_cidr, 10)
|
||||||
cluster_domain_suffix = var.cluster_domain_suffix
|
|
||||||
})
|
})
|
||||||
strict = true
|
strict = true
|
||||||
snippets = var.worker_snippets
|
snippets = var.worker_snippets
|
||||||
|
|
|
@ -79,12 +79,12 @@ module "tempest" {
|
||||||
dns_zone = "aws.example.com"
|
dns_zone = "aws.example.com"
|
||||||
dns_zone_id = "Z3PAABBCFAKEC0"
|
dns_zone_id = "Z3PAABBCFAKEC0"
|
||||||
|
|
||||||
# configuration
|
# instances
|
||||||
ssh_authorized_key = "ssh-ed25519 AAAAB3Nz..."
|
|
||||||
|
|
||||||
# optional
|
|
||||||
worker_count = 2
|
worker_count = 2
|
||||||
worker_type = "t3.small"
|
worker_type = "t3.small"
|
||||||
|
|
||||||
|
# configuration
|
||||||
|
ssh_authorized_key = "ssh-ed25519 AAAAB3Nz..."
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -155,9 +155,9 @@ List the pods.
|
||||||
```
|
```
|
||||||
$ kubectl get pods --all-namespaces
|
$ kubectl get pods --all-namespaces
|
||||||
NAMESPACE NAME READY STATUS RESTARTS AGE
|
NAMESPACE NAME READY STATUS RESTARTS AGE
|
||||||
kube-system calico-node-1m5bf 2/2 Running 0 34m
|
kube-system cilium-1m5bf 1/1 Running 0 34m
|
||||||
kube-system calico-node-7jmr1 2/2 Running 0 34m
|
kube-system cilium-7jmr1 1/1 Running 0 34m
|
||||||
kube-system calico-node-bknc8 2/2 Running 0 34m
|
kube-system cilium-bknc8 1/1 Running 0 34m
|
||||||
kube-system coredns-1187388186-wx1lg 1/1 Running 0 34m
|
kube-system coredns-1187388186-wx1lg 1/1 Running 0 34m
|
||||||
kube-system coredns-1187388186-qjnvp 1/1 Running 0 34m
|
kube-system coredns-1187388186-qjnvp 1/1 Running 0 34m
|
||||||
kube-system kube-apiserver-ip-10-0-3-155 1/1 Running 0 34m
|
kube-system kube-apiserver-ip-10-0-3-155 1/1 Running 0 34m
|
||||||
|
@ -206,16 +206,21 @@ Reference the DNS zone id with `aws_route53_zone.zone-for-clusters.zone_id`.
|
||||||
|
|
||||||
| Name | Description | Default | Example |
|
| Name | Description | Default | Example |
|
||||||
|:-----|:------------|:--------|:--------|
|
|:-----|:------------|:--------|:--------|
|
||||||
|
| os_stream | Fedora CoreOS stream for instances | "stable" | "testing", "next" |
|
||||||
| controller_count | Number of controllers (i.e. masters) | 1 | 1 |
|
| controller_count | Number of controllers (i.e. masters) | 1 | 1 |
|
||||||
| worker_count | Number of workers | 1 | 3 |
|
|
||||||
| controller_type | EC2 instance type for controllers | "t3.small" | See below |
|
| controller_type | EC2 instance type for controllers | "t3.small" | See below |
|
||||||
|
| controller_disk_size | Size of EBS volume in GB | 30 | 100 |
|
||||||
|
| controller_disk_type | Type of EBS volume | gp3 | io1 |
|
||||||
|
| controller_disk_iops | IOPS of EBS volume | 3000 | 4000 |
|
||||||
|
| controller_cpu_credits | Burstable CPU pricing model | null (i.e. auto) | standard, unlimited |
|
||||||
|
| worker_count | Number of workers | 1 | 3 |
|
||||||
| worker_type | EC2 instance type for workers | "t3.small" | See below |
|
| worker_type | EC2 instance type for workers | "t3.small" | See below |
|
||||||
| os_stream | Fedora CoreOS stream for compute instances | "stable" | "testing", "next" |
|
| worker_disk_size | Size of EBS volume in GB | 30 | 100 |
|
||||||
| disk_size | Size of the EBS volume in GB | 30 | 100 |
|
| worker_disk_type | Type of EBS volume | gp3 | io1 |
|
||||||
| disk_type | Type of the EBS volume | "gp3" | standard, gp2, gp3, io1 |
|
| worker_disk_iops | IOPS of EBS volume | 3000 | 4000 |
|
||||||
| disk_iops | IOPS of the EBS volume | 0 (i.e. auto) | 400 |
|
| worker_cpu_credits | Burstable CPU pricing model | null (i.e. auto) | standard, unlimited |
|
||||||
| worker_target_groups | Target group ARNs to which worker instances should be added | [] | [aws_lb_target_group.app.id] |
|
|
||||||
| worker_price | Spot price in USD for worker instances or 0 to use on-demand instances | 0 | 0.10 |
|
| worker_price | Spot price in USD for worker instances or 0 to use on-demand instances | 0 | 0.10 |
|
||||||
|
| worker_target_groups | Target group ARNs to which worker instances should be added | [] | [aws_lb_target_group.app.id] |
|
||||||
| controller_snippets | Controller Butane snippets | [] | [examples](/advanced/customization/) |
|
| controller_snippets | Controller Butane snippets | [] | [examples](/advanced/customization/) |
|
||||||
| worker_snippets | Worker Butane snippets | [] | [examples](/advanced/customization/) |
|
| worker_snippets | Worker Butane snippets | [] | [examples](/advanced/customization/) |
|
||||||
| networking | Choice of networking provider | "cilium" | "calico" or "cilium" or "flannel" |
|
| networking | Choice of networking provider | "cilium" | "calico" or "cilium" or "flannel" |
|
||||||
|
@ -228,7 +233,7 @@ Reference the DNS zone id with `aws_route53_zone.zone-for-clusters.zone_id`.
|
||||||
Check the list of valid [instance types](https://aws.amazon.com/ec2/instance-types/).
|
Check the list of valid [instance types](https://aws.amazon.com/ec2/instance-types/).
|
||||||
|
|
||||||
!!! warning
|
!!! warning
|
||||||
Do not choose a `controller_type` smaller than `t2.small`. Smaller instances are not sufficient for running a controller.
|
Do not choose a `controller_type` smaller than `t3.small`. Smaller instances are not sufficient for running a controller.
|
||||||
|
|
||||||
!!! tip "MTU"
|
!!! tip "MTU"
|
||||||
If your EC2 instance type supports [Jumbo frames](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/network_mtu.html#jumbo_frame_instances) (most do), we recommend you change the `network_mtu` to 8981! You will get better pod-to-pod bandwidth.
|
If your EC2 instance type supports [Jumbo frames](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/network_mtu.html#jumbo_frame_instances) (most do), we recommend you change the `network_mtu` to 8981! You will get better pod-to-pod bandwidth.
|
||||||
|
|
|
@ -93,16 +93,16 @@ module "ramius" {
|
||||||
location = "centralus"
|
location = "centralus"
|
||||||
dns_zone = "azure.example.com"
|
dns_zone = "azure.example.com"
|
||||||
dns_zone_group = "example-group"
|
dns_zone_group = "example-group"
|
||||||
|
network_cidr = {
|
||||||
# configuration
|
|
||||||
os_image = "/subscriptions/some/path/Microsoft.Compute/images/fedora-coreos-36.20220716.3.1"
|
|
||||||
ssh_authorized_key = "ssh-ed25519 AAAAB3Nz..."
|
|
||||||
|
|
||||||
# optional
|
|
||||||
worker_count = 2
|
|
||||||
network_cidr = {
|
|
||||||
ipv4 = ["10.0.0.0/20"]
|
ipv4 = ["10.0.0.0/20"]
|
||||||
}
|
}
|
||||||
|
|
||||||
|
# instances
|
||||||
|
os_image = "/subscriptions/some/path/Microsoft.Compute/images/fedora-coreos-36.20220716.3.1"
|
||||||
|
worker_count = 2
|
||||||
|
|
||||||
|
# configuration
|
||||||
|
ssh_authorized_key = "ssh-ed25519 AAAAB3Nz..."
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -175,9 +175,9 @@ $ kubectl get pods --all-namespaces
|
||||||
NAMESPACE NAME READY STATUS RESTARTS AGE
|
NAMESPACE NAME READY STATUS RESTARTS AGE
|
||||||
kube-system coredns-7c6fbb4f4b-b6qzx 1/1 Running 0 26m
|
kube-system coredns-7c6fbb4f4b-b6qzx 1/1 Running 0 26m
|
||||||
kube-system coredns-7c6fbb4f4b-j2k3d 1/1 Running 0 26m
|
kube-system coredns-7c6fbb4f4b-j2k3d 1/1 Running 0 26m
|
||||||
kube-system calico-node-1m5bf 2/2 Running 0 26m
|
kube-system cilium-1m5bf 1/1 Running 0 26m
|
||||||
kube-system calico-node-7jmr1 2/2 Running 0 26m
|
kube-system cilium-7jmr1 1/1 Running 0 26m
|
||||||
kube-system calico-node-bknc8 2/2 Running 0 26m
|
kube-system cilium-bknc8 1/1 Running 0 26m
|
||||||
kube-system kube-apiserver-ramius-controller-0 1/1 Running 0 26m
|
kube-system kube-apiserver-ramius-controller-0 1/1 Running 0 26m
|
||||||
kube-system kube-controller-manager-ramius-controller-0 1/1 Running 0 26m
|
kube-system kube-controller-manager-ramius-controller-0 1/1 Running 0 26m
|
||||||
kube-system kube-proxy-j4vpq 1/1 Running 0 26m
|
kube-system kube-proxy-j4vpq 1/1 Running 0 26m
|
||||||
|
@ -240,10 +240,14 @@ Reference the DNS zone with `azurerm_dns_zone.clusters.name` and its resource gr
|
||||||
| Name | Description | Default | Example |
|
| Name | Description | Default | Example |
|
||||||
|:-----|:------------|:--------|:--------|
|
|:-----|:------------|:--------|:--------|
|
||||||
| controller_count | Number of controllers (i.e. masters) | 1 | 1 |
|
| controller_count | Number of controllers (i.e. masters) | 1 | 1 |
|
||||||
| worker_count | Number of workers | 1 | 3 |
|
|
||||||
| controller_type | Machine type for controllers | "Standard_B2s" | See below |
|
| controller_type | Machine type for controllers | "Standard_B2s" | See below |
|
||||||
|
| controller_disk_type | Managed disk for controllers | Premium_LRS | Standard_LRS |
|
||||||
|
| controller_disk_size | Managed disk size in GB | 30 | 50 |
|
||||||
|
| worker_count | Number of workers | 1 | 3 |
|
||||||
| worker_type | Machine type for workers | "Standard_D2as_v5" | See below |
|
| worker_type | Machine type for workers | "Standard_D2as_v5" | See below |
|
||||||
| disk_size | Size of the disk in GB | 30 | 100 |
|
| worker_disk_type | Managed disk for workers | Standard_LRS | Premium_LRS |
|
||||||
|
| worker_disk_size | Size of the disk in GB | 30 | 100 |
|
||||||
|
| worker_ephemeral_disk | Use ephemeral local disk instead of managed disk | false | true |
|
||||||
| worker_priority | Set priority to Spot to use reduced cost surplus capacity, with the tradeoff that instances can be deallocated at any time | Regular | Spot |
|
| worker_priority | Set priority to Spot to use reduced cost surplus capacity, with the tradeoff that instances can be deallocated at any time | Regular | Spot |
|
||||||
| controller_snippets | Controller Butane snippets | [] | [example](/advanced/customization/#usage) |
|
| controller_snippets | Controller Butane snippets | [] | [example](/advanced/customization/#usage) |
|
||||||
| worker_snippets | Worker Butane snippets | [] | [example](/advanced/customization/#usage) |
|
| worker_snippets | Worker Butane snippets | [] | [example](/advanced/customization/#usage) |
|
||||||
|
@ -255,9 +259,6 @@ Reference the DNS zone with `azurerm_dns_zone.clusters.name` and its resource gr
|
||||||
|
|
||||||
Check the list of valid [machine types](https://azure.microsoft.com/en-us/pricing/details/virtual-machines/linux/) and their [specs](https://docs.microsoft.com/en-us/azure/virtual-machines/linux/sizes-general). Use `az vm list-skus` to get the identifier.
|
Check the list of valid [machine types](https://azure.microsoft.com/en-us/pricing/details/virtual-machines/linux/) and their [specs](https://docs.microsoft.com/en-us/azure/virtual-machines/linux/sizes-general). Use `az vm list-skus` to get the identifier.
|
||||||
|
|
||||||
!!! warning
|
|
||||||
Unlike AWS and GCP, Azure requires its *virtual* networks to have non-overlapping IPv4 CIDRs (yeah, go figure). Instead of each cluster just using `10.0.0.0/16` for instances, each Azure cluster's `host_cidr` must be non-overlapping (e.g. 10.0.0.0/20 for the 1st cluster, 10.0.16.0/20 for the 2nd cluster, etc).
|
|
||||||
|
|
||||||
!!! warning
|
!!! warning
|
||||||
Do not choose a `controller_type` smaller than `Standard_B2s`. Smaller instances are not sufficient for running a controller.
|
Do not choose a `controller_type` smaller than `Standard_B2s`. Smaller instances are not sufficient for running a controller.
|
||||||
|
|
||||||
|
|
|
@ -323,9 +323,10 @@ List the pods.
|
||||||
```
|
```
|
||||||
$ kubectl get pods --all-namespaces
|
$ kubectl get pods --all-namespaces
|
||||||
NAMESPACE NAME READY STATUS RESTARTS AGE
|
NAMESPACE NAME READY STATUS RESTARTS AGE
|
||||||
kube-system calico-node-6qp7f 2/2 Running 1 11m
|
kube-system cilium-6qp7f 1/1 Running 1 11m
|
||||||
kube-system calico-node-gnjrm 2/2 Running 0 11m
|
kube-system cilium-gnjrm 1/1 Running 0 11m
|
||||||
kube-system calico-node-llbgt 2/2 Running 0 11m
|
kube-system cilium-llbgt 1/1 Running 0 11m
|
||||||
|
kube-system cilium-operator-68d778b448-g744f 1/1 Running 0 11m
|
||||||
kube-system coredns-1187388186-dj3pd 1/1 Running 0 11m
|
kube-system coredns-1187388186-dj3pd 1/1 Running 0 11m
|
||||||
kube-system coredns-1187388186-mx9rt 1/1 Running 0 11m
|
kube-system coredns-1187388186-mx9rt 1/1 Running 0 11m
|
||||||
kube-system kube-apiserver-node1.example.com 1/1 Running 0 11m
|
kube-system kube-apiserver-node1.example.com 1/1 Running 0 11m
|
||||||
|
|
|
@ -88,12 +88,12 @@ module "nemo" {
|
||||||
region = "nyc3"
|
region = "nyc3"
|
||||||
dns_zone = "digital-ocean.example.com"
|
dns_zone = "digital-ocean.example.com"
|
||||||
|
|
||||||
# configuration
|
# instances
|
||||||
os_image = data.digitalocean_image.fedora-coreos-31-20200323-3-2.id
|
os_image = data.digitalocean_image.fedora-coreos-31-20200323-3-2.id
|
||||||
ssh_fingerprints = ["d7:9d:79:ae:56:32:73:79:95:88:e3:a2:ab:5d:45:e7"]
|
|
||||||
|
|
||||||
# optional
|
|
||||||
worker_count = 2
|
worker_count = 2
|
||||||
|
|
||||||
|
# configuration
|
||||||
|
ssh_fingerprints = ["d7:9d:79:ae:56:32:73:79:95:88:e3:a2:ab:5d:45:e7"]
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -166,9 +166,9 @@ List the pods.
|
||||||
NAMESPACE NAME READY STATUS RESTARTS AGE
|
NAMESPACE NAME READY STATUS RESTARTS AGE
|
||||||
kube-system coredns-1187388186-ld1j7 1/1 Running 0 11m
|
kube-system coredns-1187388186-ld1j7 1/1 Running 0 11m
|
||||||
kube-system coredns-1187388186-rdhf7 1/1 Running 0 11m
|
kube-system coredns-1187388186-rdhf7 1/1 Running 0 11m
|
||||||
kube-system calico-node-1m5bf 2/2 Running 0 11m
|
kube-system cilium-1m5bf 1/1 Running 0 11m
|
||||||
kube-system calico-node-7jmr1 2/2 Running 0 11m
|
kube-system cilium-7jmr1 1/1 Running 0 11m
|
||||||
kube-system calico-node-bknc8 2/2 Running 0 11m
|
kube-system cilium-bknc8 1/1 Running 0 11m
|
||||||
kube-system kube-apiserver-ip-10.132.115.81 1/1 Running 0 11m
|
kube-system kube-apiserver-ip-10.132.115.81 1/1 Running 0 11m
|
||||||
kube-system kube-controller-manager-ip-10.132.115.81 1/1 Running 0 11m
|
kube-system kube-controller-manager-ip-10.132.115.81 1/1 Running 0 11m
|
||||||
kube-system kube-proxy-6kxjf 1/1 Running 0 11m
|
kube-system kube-proxy-6kxjf 1/1 Running 0 11m
|
||||||
|
|
|
@ -81,11 +81,11 @@ module "yavin" {
|
||||||
dns_zone = "example.com"
|
dns_zone = "example.com"
|
||||||
dns_zone_name = "example-zone"
|
dns_zone_name = "example-zone"
|
||||||
|
|
||||||
|
# instances
|
||||||
|
worker_count = 2
|
||||||
|
|
||||||
# configuration
|
# configuration
|
||||||
ssh_authorized_key = "ssh-ed25519 AAAAB3Nz..."
|
ssh_authorized_key = "ssh-ed25519 AAAAB3Nz..."
|
||||||
|
|
||||||
# optional
|
|
||||||
worker_count = 2
|
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -157,9 +157,9 @@ List the pods.
|
||||||
```
|
```
|
||||||
$ kubectl get pods --all-namespaces
|
$ kubectl get pods --all-namespaces
|
||||||
NAMESPACE NAME READY STATUS RESTARTS AGE
|
NAMESPACE NAME READY STATUS RESTARTS AGE
|
||||||
kube-system calico-node-1cs8z 2/2 Running 0 6m
|
kube-system cilium-1cs8z 1/1 Running 0 6m
|
||||||
kube-system calico-node-d1l5b 2/2 Running 0 6m
|
kube-system cilium-d1l5b 1/1 Running 0 6m
|
||||||
kube-system calico-node-sp9ps 2/2 Running 0 6m
|
kube-system cilium-sp9ps 1/1 Running 0 6m
|
||||||
kube-system coredns-1187388186-dkh3o 1/1 Running 0 6m
|
kube-system coredns-1187388186-dkh3o 1/1 Running 0 6m
|
||||||
kube-system coredns-1187388186-zj5dl 1/1 Running 0 6m
|
kube-system coredns-1187388186-zj5dl 1/1 Running 0 6m
|
||||||
kube-system kube-apiserver-controller-0 1/1 Running 0 6m
|
kube-system kube-apiserver-controller-0 1/1 Running 0 6m
|
||||||
|
@ -211,12 +211,13 @@ resource "google_dns_managed_zone" "zone-for-clusters" {
|
||||||
|
|
||||||
| Name | Description | Default | Example |
|
| Name | Description | Default | Example |
|
||||||
|:-----|:------------|:--------|:--------|
|
|:-----|:------------|:--------|:--------|
|
||||||
| controller_count | Number of controllers (i.e. masters) | 1 | 3 |
|
|
||||||
| worker_count | Number of workers | 1 | 3 |
|
|
||||||
| controller_type | Machine type for controllers | "n1-standard-1" | See below |
|
|
||||||
| worker_type | Machine type for workers | "n1-standard-1" | See below |
|
|
||||||
| os_stream | Fedora CoreOS stream for compute instances | "stable" | "stable", "testing", "next" |
|
| os_stream | Fedora CoreOS stream for compute instances | "stable" | "stable", "testing", "next" |
|
||||||
| disk_size | Size of the disk in GB | 30 | 100 |
|
| controller_count | Number of controllers (i.e. masters) | 1 | 3 |
|
||||||
|
| controller_type | Machine type for controllers | "n1-standard-1" | See below |
|
||||||
|
| controller_disk_type | Controller disk size in GB | 30 | 20 |
|
||||||
|
| worker_count | Number of workers | 1 | 3 |
|
||||||
|
| worker_type | Machine type for workers | "n1-standard-1" | See below |
|
||||||
|
| worker_disk_size | Worker disk size in GB | 30 | 100 |
|
||||||
| worker_preemptible | If enabled, Compute Engine will terminate workers randomly within 24 hours | false | true |
|
| worker_preemptible | If enabled, Compute Engine will terminate workers randomly within 24 hours | false | true |
|
||||||
| controller_snippets | Controller Butane snippets | [] | [examples](/advanced/customization/) |
|
| controller_snippets | Controller Butane snippets | [] | [examples](/advanced/customization/) |
|
||||||
| worker_snippets | Worker Butane snippets | [] | [examples](/advanced/customization/) |
|
| worker_snippets | Worker Butane snippets | [] | [examples](/advanced/customization/) |
|
||||||
|
|
|
@ -79,12 +79,12 @@ module "tempest" {
|
||||||
dns_zone = "aws.example.com"
|
dns_zone = "aws.example.com"
|
||||||
dns_zone_id = "Z3PAABBCFAKEC0"
|
dns_zone_id = "Z3PAABBCFAKEC0"
|
||||||
|
|
||||||
# configuration
|
# instances
|
||||||
ssh_authorized_key = "ssh-rsa AAAAB3Nz..."
|
|
||||||
|
|
||||||
# optional
|
|
||||||
worker_count = 2
|
worker_count = 2
|
||||||
worker_type = "t3.small"
|
worker_type = "t3.small"
|
||||||
|
|
||||||
|
# configuration
|
||||||
|
ssh_authorized_key = "ssh-rsa AAAAB3Nz..."
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -155,9 +155,9 @@ List the pods.
|
||||||
```
|
```
|
||||||
$ kubectl get pods --all-namespaces
|
$ kubectl get pods --all-namespaces
|
||||||
NAMESPACE NAME READY STATUS RESTARTS AGE
|
NAMESPACE NAME READY STATUS RESTARTS AGE
|
||||||
kube-system calico-node-1m5bf 2/2 Running 0 34m
|
kube-system cilium-1m5bf 1/1 Running 0 34m
|
||||||
kube-system calico-node-7jmr1 2/2 Running 0 34m
|
kube-system cilium-7jmr1 1/1 Running 0 34m
|
||||||
kube-system calico-node-bknc8 2/2 Running 0 34m
|
kube-system cilium-bknc8 1/1 Running 0 34m
|
||||||
kube-system coredns-1187388186-wx1lg 1/1 Running 0 34m
|
kube-system coredns-1187388186-wx1lg 1/1 Running 0 34m
|
||||||
kube-system coredns-1187388186-qjnvp 1/1 Running 0 34m
|
kube-system coredns-1187388186-qjnvp 1/1 Running 0 34m
|
||||||
kube-system kube-apiserver-ip-10-0-3-155 1/1 Running 0 34m
|
kube-system kube-apiserver-ip-10-0-3-155 1/1 Running 0 34m
|
||||||
|
@ -206,16 +206,19 @@ Reference the DNS zone id with `aws_route53_zone.zone-for-clusters.zone_id`.
|
||||||
|
|
||||||
| Name | Description | Default | Example |
|
| Name | Description | Default | Example |
|
||||||
|:-----|:------------|:--------|:--------|
|
|:-----|:------------|:--------|:--------|
|
||||||
| controller_count | Number of controllers (i.e. masters) | 1 | 1 |
|
|
||||||
| worker_count | Number of workers | 1 | 3 |
|
|
||||||
| controller_type | EC2 instance type for controllers | "t3.small" | See below |
|
|
||||||
| worker_type | EC2 instance type for workers | "t3.small" | See below |
|
|
||||||
| os_image | AMI channel for a Container Linux derivative | "flatcar-stable" | flatcar-stable, flatcar-beta, flatcar-alpha |
|
| os_image | AMI channel for a Container Linux derivative | "flatcar-stable" | flatcar-stable, flatcar-beta, flatcar-alpha |
|
||||||
| disk_size | Size of the EBS volume in GB | 30 | 100 |
|
| controller_count | Number of controllers (i.e. masters) | 1 | 1 |
|
||||||
| disk_type | Type of the EBS volume | "gp3" | standard, gp2, gp3, io1 |
|
| controller_type | EC2 instance type for controllers | "t3.small" | See below |
|
||||||
| disk_iops | IOPS of the EBS volume | 0 (i.e. auto) | 400 |
|
| controller_disk_size | Size of EBS volume in GB | 30 | 100 |
|
||||||
| worker_target_groups | Target group ARNs to which worker instances should be added | [] | [aws_lb_target_group.app.id] |
|
| controller_disk_type | Type of EBS volume | gp3 | io1 |
|
||||||
|
| controller_disk_iops | IOPS of EBS volume | 3000 | 4000 |
|
||||||
|
| controller_cpu_credits | Burstable CPU pricing model | null (i.e. auto) | standard, unlimited |
|
||||||
|
| worker_disk_size | Size of EBS volume in GB | 30 | 100 |
|
||||||
|
| worker_disk_type | Type of EBS volume | gp3 | io1 |
|
||||||
|
| worker_disk_iops | IOPS of EBS volume | 3000 | 4000 |
|
||||||
|
| worker_cpu_credits | Burstable CPU pricing model | null (i.e. auto) | standard, unlimited |
|
||||||
| worker_price | Spot price in USD for worker instances or 0 to use on-demand instances | 0/null | 0.10 |
|
| worker_price | Spot price in USD for worker instances or 0 to use on-demand instances | 0/null | 0.10 |
|
||||||
|
| worker_target_groups | Target group ARNs to which worker instances should be added | [] | [aws_lb_target_group.app.id] |
|
||||||
| controller_snippets | Controller Container Linux Config snippets | [] | [example](/advanced/customization/) |
|
| controller_snippets | Controller Container Linux Config snippets | [] | [example](/advanced/customization/) |
|
||||||
| worker_snippets | Worker Container Linux Config snippets | [] | [example](/advanced/customization/) |
|
| worker_snippets | Worker Container Linux Config snippets | [] | [example](/advanced/customization/) |
|
||||||
| networking | Choice of networking provider | "cilium" | "calico" or "cilium" or "flannel" |
|
| networking | Choice of networking provider | "cilium" | "calico" or "cilium" or "flannel" |
|
||||||
|
@ -228,7 +231,7 @@ Reference the DNS zone id with `aws_route53_zone.zone-for-clusters.zone_id`.
|
||||||
Check the list of valid [instance types](https://aws.amazon.com/ec2/instance-types/).
|
Check the list of valid [instance types](https://aws.amazon.com/ec2/instance-types/).
|
||||||
|
|
||||||
!!! warning
|
!!! warning
|
||||||
Do not choose a `controller_type` smaller than `t2.small`. Smaller instances are not sufficient for running a controller.
|
Do not choose a `controller_type` smaller than `t3.small`. Smaller instances are not sufficient for running a controller.
|
||||||
|
|
||||||
!!! tip "MTU"
|
!!! tip "MTU"
|
||||||
If your EC2 instance type supports [Jumbo frames](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/network_mtu.html#jumbo_frame_instances) (most do), we recommend you change the `network_mtu` to 8981! You will get better pod-to-pod bandwidth.
|
If your EC2 instance type supports [Jumbo frames](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/network_mtu.html#jumbo_frame_instances) (most do), we recommend you change the `network_mtu` to 8981! You will get better pod-to-pod bandwidth.
|
||||||
|
|
|
@ -82,15 +82,15 @@ module "ramius" {
|
||||||
location = "centralus"
|
location = "centralus"
|
||||||
dns_zone = "azure.example.com"
|
dns_zone = "azure.example.com"
|
||||||
dns_zone_group = "example-group"
|
dns_zone_group = "example-group"
|
||||||
|
network_cidr = {
|
||||||
|
ipv4 = ["10.0.0.0/20"]
|
||||||
|
}
|
||||||
|
|
||||||
|
# instances
|
||||||
|
worker_count = 2
|
||||||
|
|
||||||
# configuration
|
# configuration
|
||||||
ssh_authorized_key = "ssh-rsa AAAAB3Nz..."
|
ssh_authorized_key = "ssh-rsa AAAAB3Nz..."
|
||||||
|
|
||||||
# optional
|
|
||||||
worker_count = 2
|
|
||||||
network_cidr = {
|
|
||||||
ipv4 = ["10.0.0.0/20"]
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -163,9 +163,9 @@ $ kubectl get pods --all-namespaces
|
||||||
NAMESPACE NAME READY STATUS RESTARTS AGE
|
NAMESPACE NAME READY STATUS RESTARTS AGE
|
||||||
kube-system coredns-7c6fbb4f4b-b6qzx 1/1 Running 0 26m
|
kube-system coredns-7c6fbb4f4b-b6qzx 1/1 Running 0 26m
|
||||||
kube-system coredns-7c6fbb4f4b-j2k3d 1/1 Running 0 26m
|
kube-system coredns-7c6fbb4f4b-j2k3d 1/1 Running 0 26m
|
||||||
kube-system calico-node-1m5bf 2/2 Running 0 26m
|
kube-system cilium-1m5bf 1/1 Running 0 26m
|
||||||
kube-system calico-node-7jmr1 2/2 Running 0 26m
|
kube-system cilium-7jmr1 1/1 Running 0 26m
|
||||||
kube-system calico-node-bknc8 2/2 Running 0 26m
|
kube-system cilium-bknc8 1/1 Running 0 26m
|
||||||
kube-system kube-apiserver-ramius-controller-0 1/1 Running 0 26m
|
kube-system kube-apiserver-ramius-controller-0 1/1 Running 0 26m
|
||||||
kube-system kube-controller-manager-ramius-controller-0 1/1 Running 0 26m
|
kube-system kube-controller-manager-ramius-controller-0 1/1 Running 0 26m
|
||||||
kube-system kube-proxy-j4vpq 1/1 Running 0 26m
|
kube-system kube-proxy-j4vpq 1/1 Running 0 26m
|
||||||
|
@ -226,12 +226,16 @@ Reference the DNS zone with `azurerm_dns_zone.clusters.name` and its resource gr
|
||||||
|
|
||||||
| Name | Description | Default | Example |
|
| Name | Description | Default | Example |
|
||||||
|:-----|:------------|:--------|:--------|
|
|:-----|:------------|:--------|:--------|
|
||||||
| controller_count | Number of controllers (i.e. masters) | 1 | 1 |
|
|
||||||
| worker_count | Number of workers | 1 | 3 |
|
|
||||||
| controller_type | Machine type for controllers | "Standard_B2s" | See below |
|
|
||||||
| worker_type | Machine type for workers | "Standard_D2as_v5" | See below |
|
|
||||||
| os_image | Channel for a Container Linux derivative | "flatcar-stable" | flatcar-stable, flatcar-beta, flatcar-alpha |
|
| os_image | Channel for a Container Linux derivative | "flatcar-stable" | flatcar-stable, flatcar-beta, flatcar-alpha |
|
||||||
| disk_size | Size of the disk in GB | 30 | 100 |
|
| controller_count | Number of controllers (i.e. masters) | 1 | 1 |
|
||||||
|
| controller_type | Machine type for controllers | "Standard_B2s" | See below |
|
||||||
|
| controller_disk_type | Managed disk for controllers | Premium_LRS | Standard_LRS |
|
||||||
|
| controller_disk_size | Managed disk size in GB | 30 | 50 |
|
||||||
|
| worker_count | Number of workers | 1 | 3 |
|
||||||
|
| worker_type | Machine type for workers | "Standard_D2as_v5" | See below |
|
||||||
|
| worker_disk_type | Managed disk for workers | Standard_LRS | Premium_LRS |
|
||||||
|
| worker_disk_size | Size of the disk in GB | 30 | 100 |
|
||||||
|
| worker_ephemeral_disk | Use ephemeral local disk instead of managed disk | false | true |
|
||||||
| worker_priority | Set priority to Spot to use reduced cost surplus capacity, with the tradeoff that instances can be deallocated at any time | Regular | Spot |
|
| worker_priority | Set priority to Spot to use reduced cost surplus capacity, with the tradeoff that instances can be deallocated at any time | Regular | Spot |
|
||||||
| controller_snippets | Controller Container Linux Config snippets | [] | [example](/advanced/customization/#usage) |
|
| controller_snippets | Controller Container Linux Config snippets | [] | [example](/advanced/customization/#usage) |
|
||||||
| worker_snippets | Worker Container Linux Config snippets | [] | [example](/advanced/customization/#usage) |
|
| worker_snippets | Worker Container Linux Config snippets | [] | [example](/advanced/customization/#usage) |
|
||||||
|
@ -243,9 +247,6 @@ Reference the DNS zone with `azurerm_dns_zone.clusters.name` and its resource gr
|
||||||
|
|
||||||
Check the list of valid [machine types](https://azure.microsoft.com/en-us/pricing/details/virtual-machines/linux/) and their [specs](https://docs.microsoft.com/en-us/azure/virtual-machines/linux/sizes-general). Use `az vm list-skus` to get the identifier.
|
Check the list of valid [machine types](https://azure.microsoft.com/en-us/pricing/details/virtual-machines/linux/) and their [specs](https://docs.microsoft.com/en-us/azure/virtual-machines/linux/sizes-general). Use `az vm list-skus` to get the identifier.
|
||||||
|
|
||||||
!!! warning
|
|
||||||
Unlike AWS and GCP, Azure requires its *virtual* networks to have non-overlapping IPv4 CIDRs (yeah, go figure). Instead of each cluster just using `10.0.0.0/16` for instances, each Azure cluster's `host_cidr` must be non-overlapping (e.g. 10.0.0.0/20 for the 1st cluster, 10.0.16.0/20 for the 2nd cluster, etc).
|
|
||||||
|
|
||||||
!!! warning
|
!!! warning
|
||||||
Do not choose a `controller_type` smaller than `Standard_B2s`. Smaller instances are not sufficient for running a controller.
|
Do not choose a `controller_type` smaller than `Standard_B2s`. Smaller instances are not sufficient for running a controller.
|
||||||
|
|
||||||
|
|
|
@ -333,9 +333,10 @@ List the pods.
|
||||||
```
|
```
|
||||||
$ kubectl get pods --all-namespaces
|
$ kubectl get pods --all-namespaces
|
||||||
NAMESPACE NAME READY STATUS RESTARTS AGE
|
NAMESPACE NAME READY STATUS RESTARTS AGE
|
||||||
kube-system calico-node-6qp7f 2/2 Running 1 11m
|
kube-system cilium-6qp7f 1/1 Running 1 11m
|
||||||
kube-system calico-node-gnjrm 2/2 Running 0 11m
|
kube-system cilium-gnjrm 1/1 Running 0 11m
|
||||||
kube-system calico-node-llbgt 2/2 Running 0 11m
|
kube-system cilium-llbgt 1/1 Running 0 11m
|
||||||
|
kube-system cilium-operator-68d778b448-g744f 1/1 Running 0 11m
|
||||||
kube-system coredns-1187388186-dj3pd 1/1 Running 0 11m
|
kube-system coredns-1187388186-dj3pd 1/1 Running 0 11m
|
||||||
kube-system coredns-1187388186-mx9rt 1/1 Running 0 11m
|
kube-system coredns-1187388186-mx9rt 1/1 Running 0 11m
|
||||||
kube-system kube-apiserver-node1.example.com 1/1 Running 0 11m
|
kube-system kube-apiserver-node1.example.com 1/1 Running 0 11m
|
||||||
|
|
|
@ -88,12 +88,12 @@ module "nemo" {
|
||||||
region = "nyc3"
|
region = "nyc3"
|
||||||
dns_zone = "digital-ocean.example.com"
|
dns_zone = "digital-ocean.example.com"
|
||||||
|
|
||||||
# configuration
|
# instances
|
||||||
os_image = data.digitalocean_image.flatcar-stable-2303-4-0.id
|
os_image = data.digitalocean_image.flatcar-stable-2303-4-0.id
|
||||||
ssh_fingerprints = ["d7:9d:79:ae:56:32:73:79:95:88:e3:a2:ab:5d:45:e7"]
|
|
||||||
|
|
||||||
# optional
|
|
||||||
worker_count = 2
|
worker_count = 2
|
||||||
|
|
||||||
|
# configuration
|
||||||
|
ssh_fingerprints = ["d7:9d:79:ae:56:32:73:79:95:88:e3:a2:ab:5d:45:e7"]
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -166,9 +166,9 @@ List the pods.
|
||||||
NAMESPACE NAME READY STATUS RESTARTS AGE
|
NAMESPACE NAME READY STATUS RESTARTS AGE
|
||||||
kube-system coredns-1187388186-ld1j7 1/1 Running 0 11m
|
kube-system coredns-1187388186-ld1j7 1/1 Running 0 11m
|
||||||
kube-system coredns-1187388186-rdhf7 1/1 Running 0 11m
|
kube-system coredns-1187388186-rdhf7 1/1 Running 0 11m
|
||||||
kube-system calico-node-1m5bf 2/2 Running 0 11m
|
kube-system cilium-1m5bf 1/1 Running 0 11m
|
||||||
kube-system calico-node-7jmr1 2/2 Running 0 11m
|
kube-system cilium-7jmr1 1/1 Running 0 11m
|
||||||
kube-system calico-node-bknc8 2/2 Running 0 11m
|
kube-system cilium-bknc8 1/1 Running 0 11m
|
||||||
kube-system kube-apiserver-ip-10.132.115.81 1/1 Running 0 11m
|
kube-system kube-apiserver-ip-10.132.115.81 1/1 Running 0 11m
|
||||||
kube-system kube-controller-manager-ip-10.132.115.81 1/1 Running 0 11m
|
kube-system kube-controller-manager-ip-10.132.115.81 1/1 Running 0 11m
|
||||||
kube-system kube-proxy-6kxjf 1/1 Running 0 11m
|
kube-system kube-proxy-6kxjf 1/1 Running 0 11m
|
||||||
|
|
|
@ -81,11 +81,11 @@ module "yavin" {
|
||||||
dns_zone = "example.com"
|
dns_zone = "example.com"
|
||||||
dns_zone_name = "example-zone"
|
dns_zone_name = "example-zone"
|
||||||
|
|
||||||
|
# instances
|
||||||
|
worker_count = 2
|
||||||
|
|
||||||
# configuration
|
# configuration
|
||||||
ssh_authorized_key = "ssh-rsa AAAAB3Nz..."
|
ssh_authorized_key = "ssh-rsa AAAAB3Nz..."
|
||||||
|
|
||||||
# optional
|
|
||||||
worker_count = 2
|
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -157,9 +157,9 @@ List the pods.
|
||||||
```
|
```
|
||||||
$ kubectl get pods --all-namespaces
|
$ kubectl get pods --all-namespaces
|
||||||
NAMESPACE NAME READY STATUS RESTARTS AGE
|
NAMESPACE NAME READY STATUS RESTARTS AGE
|
||||||
kube-system calico-node-1cs8z 2/2 Running 0 6m
|
kube-system cilium-1cs8z 1/1 Running 0 6m
|
||||||
kube-system calico-node-d1l5b 2/2 Running 0 6m
|
kube-system cilium-d1l5b 1/1 Running 0 6m
|
||||||
kube-system calico-node-sp9ps 2/2 Running 0 6m
|
kube-system cilium-sp9ps 1/1 Running 0 6m
|
||||||
kube-system coredns-1187388186-dkh3o 1/1 Running 0 6m
|
kube-system coredns-1187388186-dkh3o 1/1 Running 0 6m
|
||||||
kube-system coredns-1187388186-zj5dl 1/1 Running 0 6m
|
kube-system coredns-1187388186-zj5dl 1/1 Running 0 6m
|
||||||
kube-system kube-apiserver-controller-0 1/1 Running 0 6m
|
kube-system kube-apiserver-controller-0 1/1 Running 0 6m
|
||||||
|
@ -211,12 +211,13 @@ resource "google_dns_managed_zone" "zone-for-clusters" {
|
||||||
|
|
||||||
| Name | Description | Default | Example |
|
| Name | Description | Default | Example |
|
||||||
|:-----|:------------|:--------|:--------|
|
|:-----|:------------|:--------|:--------|
|
||||||
| controller_count | Number of controllers (i.e. masters) | 1 | 3 |
|
|
||||||
| worker_count | Number of workers | 1 | 3 |
|
|
||||||
| controller_type | Machine type for controllers | "n1-standard-1" | See below |
|
|
||||||
| worker_type | Machine type for workers | "n1-standard-1" | See below |
|
|
||||||
| os_image | Flatcar Linux image for compute instances | "flatcar-stable" | flatcar-stable, flatcar-beta, flatcar-alpha |
|
| os_image | Flatcar Linux image for compute instances | "flatcar-stable" | flatcar-stable, flatcar-beta, flatcar-alpha |
|
||||||
| disk_size | Size of the disk in GB | 30 | 100 |
|
| controller_count | Number of controllers (i.e. masters) | 1 | 3 |
|
||||||
|
| controller_type | Machine type for controllers | "n1-standard-1" | See below |
|
||||||
|
| controller_disk_type | Controller disk size in GB | 30 | 20 |
|
||||||
|
| worker_count | Number of workers | 1 | 3 |
|
||||||
|
| worker_type | Machine type for workers | "n1-standard-1" | See below |
|
||||||
|
| worker_disk_size | Worker disk size in GB | 30 | 100 |
|
||||||
| worker_preemptible | If enabled, Compute Engine will terminate workers randomly within 24 hours | false | true |
|
| worker_preemptible | If enabled, Compute Engine will terminate workers randomly within 24 hours | false | true |
|
||||||
| controller_snippets | Controller Container Linux Config snippets | [] | [example](/advanced/customization/) |
|
| controller_snippets | Controller Container Linux Config snippets | [] | [example](/advanced/customization/) |
|
||||||
| worker_snippets | Worker Container Linux Config snippets | [] | [example](/advanced/customization/) |
|
| worker_snippets | Worker Container Linux Config snippets | [] | [example](/advanced/customization/) |
|
||||||
|
|
|
@ -9,7 +9,6 @@ module "bootstrap" {
|
||||||
network_mtu = 1440
|
network_mtu = 1440
|
||||||
pod_cidr = var.pod_cidr
|
pod_cidr = var.pod_cidr
|
||||||
service_cidr = var.service_cidr
|
service_cidr = var.service_cidr
|
||||||
cluster_domain_suffix = var.cluster_domain_suffix
|
|
||||||
enable_reporting = var.enable_reporting
|
enable_reporting = var.enable_reporting
|
||||||
enable_aggregation = var.enable_aggregation
|
enable_aggregation = var.enable_aggregation
|
||||||
daemonset_tolerations = var.daemonset_tolerations
|
daemonset_tolerations = var.daemonset_tolerations
|
||||||
|
|
|
@ -143,7 +143,7 @@ storage:
|
||||||
cgroupDriver: systemd
|
cgroupDriver: systemd
|
||||||
clusterDNS:
|
clusterDNS:
|
||||||
- ${cluster_dns_service_ip}
|
- ${cluster_dns_service_ip}
|
||||||
clusterDomain: ${cluster_domain_suffix}
|
clusterDomain: cluster.local
|
||||||
healthzPort: 0
|
healthzPort: 0
|
||||||
rotateCertificates: true
|
rotateCertificates: true
|
||||||
shutdownGracePeriod: 45s
|
shutdownGracePeriod: 45s
|
||||||
|
|
|
@ -81,7 +81,6 @@ data "ct_config" "controllers" {
|
||||||
kubeconfig = indent(10, module.bootstrap.kubeconfig-kubelet)
|
kubeconfig = indent(10, module.bootstrap.kubeconfig-kubelet)
|
||||||
ssh_authorized_key = var.ssh_authorized_key
|
ssh_authorized_key = var.ssh_authorized_key
|
||||||
cluster_dns_service_ip = cidrhost(var.service_cidr, 10)
|
cluster_dns_service_ip = cidrhost(var.service_cidr, 10)
|
||||||
cluster_domain_suffix = var.cluster_domain_suffix
|
|
||||||
})
|
})
|
||||||
strict = true
|
strict = true
|
||||||
snippets = var.controller_snippets
|
snippets = var.controller_snippets
|
||||||
|
|
|
@ -134,13 +134,7 @@ variable "worker_node_labels" {
|
||||||
default = []
|
default = []
|
||||||
}
|
}
|
||||||
|
|
||||||
# unofficial, undocumented, unsupported
|
# advanced
|
||||||
|
|
||||||
variable "cluster_domain_suffix" {
|
|
||||||
type = string
|
|
||||||
description = "Queries for domains with the suffix will be answered by coredns. Default is cluster.local (e.g. foo.default.svc.cluster.local) "
|
|
||||||
default = "cluster.local"
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "daemonset_tolerations" {
|
variable "daemonset_tolerations" {
|
||||||
type = list(string)
|
type = list(string)
|
||||||
|
|
|
@ -13,11 +13,10 @@ module "workers" {
|
||||||
preemptible = var.worker_preemptible
|
preemptible = var.worker_preemptible
|
||||||
|
|
||||||
# configuration
|
# configuration
|
||||||
kubeconfig = module.bootstrap.kubeconfig-kubelet
|
kubeconfig = module.bootstrap.kubeconfig-kubelet
|
||||||
ssh_authorized_key = var.ssh_authorized_key
|
ssh_authorized_key = var.ssh_authorized_key
|
||||||
service_cidr = var.service_cidr
|
service_cidr = var.service_cidr
|
||||||
cluster_domain_suffix = var.cluster_domain_suffix
|
snippets = var.worker_snippets
|
||||||
snippets = var.worker_snippets
|
node_labels = var.worker_node_labels
|
||||||
node_labels = var.worker_node_labels
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -98,7 +98,7 @@ storage:
|
||||||
cgroupDriver: systemd
|
cgroupDriver: systemd
|
||||||
clusterDNS:
|
clusterDNS:
|
||||||
- ${cluster_dns_service_ip}
|
- ${cluster_dns_service_ip}
|
||||||
clusterDomain: ${cluster_domain_suffix}
|
clusterDomain: cluster.local
|
||||||
healthzPort: 0
|
healthzPort: 0
|
||||||
rotateCertificates: true
|
rotateCertificates: true
|
||||||
shutdownGracePeriod: 45s
|
shutdownGracePeriod: 45s
|
||||||
|
|
|
@ -96,13 +96,7 @@ variable "node_taints" {
|
||||||
default = []
|
default = []
|
||||||
}
|
}
|
||||||
|
|
||||||
# unofficial, undocumented, unsupported, temporary
|
# advanced
|
||||||
|
|
||||||
variable "cluster_domain_suffix" {
|
|
||||||
type = string
|
|
||||||
description = "Queries for domains with the suffix will be answered by coredns. Default is cluster.local (e.g. foo.default.svc.cluster.local) "
|
|
||||||
default = "cluster.local"
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "accelerator_type" {
|
variable "accelerator_type" {
|
||||||
type = string
|
type = string
|
||||||
|
|
|
@ -111,7 +111,6 @@ data "ct_config" "worker" {
|
||||||
kubeconfig = indent(10, var.kubeconfig)
|
kubeconfig = indent(10, var.kubeconfig)
|
||||||
ssh_authorized_key = var.ssh_authorized_key
|
ssh_authorized_key = var.ssh_authorized_key
|
||||||
cluster_dns_service_ip = cidrhost(var.service_cidr, 10)
|
cluster_dns_service_ip = cidrhost(var.service_cidr, 10)
|
||||||
cluster_domain_suffix = var.cluster_domain_suffix
|
|
||||||
node_labels = join(",", var.node_labels)
|
node_labels = join(",", var.node_labels)
|
||||||
node_taints = join(",", var.node_taints)
|
node_taints = join(",", var.node_taints)
|
||||||
})
|
})
|
||||||
|
|
|
@ -9,7 +9,6 @@ module "bootstrap" {
|
||||||
network_mtu = 1440
|
network_mtu = 1440
|
||||||
pod_cidr = var.pod_cidr
|
pod_cidr = var.pod_cidr
|
||||||
service_cidr = var.service_cidr
|
service_cidr = var.service_cidr
|
||||||
cluster_domain_suffix = var.cluster_domain_suffix
|
|
||||||
enable_reporting = var.enable_reporting
|
enable_reporting = var.enable_reporting
|
||||||
enable_aggregation = var.enable_aggregation
|
enable_aggregation = var.enable_aggregation
|
||||||
daemonset_tolerations = var.daemonset_tolerations
|
daemonset_tolerations = var.daemonset_tolerations
|
||||||
|
|
|
@ -143,7 +143,7 @@ storage:
|
||||||
cgroupDriver: systemd
|
cgroupDriver: systemd
|
||||||
clusterDNS:
|
clusterDNS:
|
||||||
- ${cluster_dns_service_ip}
|
- ${cluster_dns_service_ip}
|
||||||
clusterDomain: ${cluster_domain_suffix}
|
clusterDomain: cluster.local
|
||||||
healthzPort: 0
|
healthzPort: 0
|
||||||
rotateCertificates: true
|
rotateCertificates: true
|
||||||
shutdownGracePeriod: 45s
|
shutdownGracePeriod: 45s
|
||||||
|
|
|
@ -81,7 +81,6 @@ data "ct_config" "controllers" {
|
||||||
kubeconfig = indent(10, module.bootstrap.kubeconfig-kubelet)
|
kubeconfig = indent(10, module.bootstrap.kubeconfig-kubelet)
|
||||||
ssh_authorized_key = var.ssh_authorized_key
|
ssh_authorized_key = var.ssh_authorized_key
|
||||||
cluster_dns_service_ip = cidrhost(var.service_cidr, 10)
|
cluster_dns_service_ip = cidrhost(var.service_cidr, 10)
|
||||||
cluster_domain_suffix = var.cluster_domain_suffix
|
|
||||||
})
|
})
|
||||||
strict = true
|
strict = true
|
||||||
snippets = var.controller_snippets
|
snippets = var.controller_snippets
|
||||||
|
|
|
@ -115,7 +115,6 @@ EOD
|
||||||
default = "10.3.0.0/16"
|
default = "10.3.0.0/16"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
variable "enable_reporting" {
|
variable "enable_reporting" {
|
||||||
type = bool
|
type = bool
|
||||||
description = "Enable usage or analytics reporting to upstreams (Calico)"
|
description = "Enable usage or analytics reporting to upstreams (Calico)"
|
||||||
|
@ -134,13 +133,7 @@ variable "worker_node_labels" {
|
||||||
default = []
|
default = []
|
||||||
}
|
}
|
||||||
|
|
||||||
# unofficial, undocumented, unsupported
|
# advanced
|
||||||
|
|
||||||
variable "cluster_domain_suffix" {
|
|
||||||
type = string
|
|
||||||
description = "Queries for domains with the suffix will be answered by coredns. Default is cluster.local (e.g. foo.default.svc.cluster.local) "
|
|
||||||
default = "cluster.local"
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "daemonset_tolerations" {
|
variable "daemonset_tolerations" {
|
||||||
type = list(string)
|
type = list(string)
|
||||||
|
|
|
@ -13,11 +13,10 @@ module "workers" {
|
||||||
preemptible = var.worker_preemptible
|
preemptible = var.worker_preemptible
|
||||||
|
|
||||||
# configuration
|
# configuration
|
||||||
kubeconfig = module.bootstrap.kubeconfig-kubelet
|
kubeconfig = module.bootstrap.kubeconfig-kubelet
|
||||||
ssh_authorized_key = var.ssh_authorized_key
|
ssh_authorized_key = var.ssh_authorized_key
|
||||||
service_cidr = var.service_cidr
|
service_cidr = var.service_cidr
|
||||||
cluster_domain_suffix = var.cluster_domain_suffix
|
snippets = var.worker_snippets
|
||||||
snippets = var.worker_snippets
|
node_labels = var.worker_node_labels
|
||||||
node_labels = var.worker_node_labels
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -98,7 +98,7 @@ storage:
|
||||||
cgroupDriver: systemd
|
cgroupDriver: systemd
|
||||||
clusterDNS:
|
clusterDNS:
|
||||||
- ${cluster_dns_service_ip}
|
- ${cluster_dns_service_ip}
|
||||||
clusterDomain: ${cluster_domain_suffix}
|
clusterDomain: cluster.local
|
||||||
healthzPort: 0
|
healthzPort: 0
|
||||||
rotateCertificates: true
|
rotateCertificates: true
|
||||||
shutdownGracePeriod: 45s
|
shutdownGracePeriod: 45s
|
||||||
|
|
|
@ -96,13 +96,7 @@ variable "node_taints" {
|
||||||
default = []
|
default = []
|
||||||
}
|
}
|
||||||
|
|
||||||
# unofficial, undocumented, unsupported, temporary
|
# advanced
|
||||||
|
|
||||||
variable "cluster_domain_suffix" {
|
|
||||||
type = string
|
|
||||||
description = "Queries for domains with the suffix will be answered by coredns. Default is cluster.local (e.g. foo.default.svc.cluster.local) "
|
|
||||||
default = "cluster.local"
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "accelerator_type" {
|
variable "accelerator_type" {
|
||||||
type = string
|
type = string
|
||||||
|
|
|
@ -111,7 +111,6 @@ data "ct_config" "worker" {
|
||||||
kubeconfig = indent(10, var.kubeconfig)
|
kubeconfig = indent(10, var.kubeconfig)
|
||||||
ssh_authorized_key = var.ssh_authorized_key
|
ssh_authorized_key = var.ssh_authorized_key
|
||||||
cluster_dns_service_ip = cidrhost(var.service_cidr, 10)
|
cluster_dns_service_ip = cidrhost(var.service_cidr, 10)
|
||||||
cluster_domain_suffix = var.cluster_domain_suffix
|
|
||||||
node_labels = join(",", var.node_labels)
|
node_labels = join(",", var.node_labels)
|
||||||
node_taints = join(",", var.node_taints)
|
node_taints = join(",", var.node_taints)
|
||||||
})
|
})
|
||||||
|
|
Loading…
Reference in New Issue