Accept initial worker node labels and taints map on bare-metal
* Add `worker_node_labels` map from node name to a list of initial node label strings * Add `worker_node_taints` map from node name to a list of initial node taint strings * Unlike cloud platforms, bare-metal node labels and taints are defined via a map from node name to list of labels/taints. Bare-metal clusters may have heterogeneous hardware so per node labels and taints are accepted * Only worker node names are allowed. Workloads are not scheduled on controller nodes so altering their labels/taints isn't suitable ``` module "mercury" { ... worker_node_labels = { "node2" = ["role=special"] } worker_node_taints = { "node2" = ["role=special:NoSchedule"] } } ``` Related: https://github.com/poseidon/typhoon/issues/429
This commit is contained in:
parent
7b0ea23cdc
commit
ab7913a061
|
@ -17,11 +17,16 @@ Notable changes between versions.
|
|||
#### Azure
|
||||
|
||||
* Upgrade to `terraform-provider-azurerm` [v2.0+](https://www.terraform.io/docs/providers/azurerm/guides/2.0-upgrade-guide.html) (action required)
|
||||
* Change `worker_priority` from `Low` to `Spot` if used (action required)
|
||||
* Switch to Azure's new Linux VM and Linux VM Scale Set resources
|
||||
* If set, change `worker_priority` from `Low` to `Spot` (action required)
|
||||
* Set controller's Azure disk caching to None
|
||||
* Associate subnets (in addition to NICs) with security groups (aesthetic)
|
||||
|
||||
#### Bare-Metal
|
||||
|
||||
* Add `worker_node_labels` map variable for per-worker node labels ([#663](https://github.com/poseidon/typhoon/pull/663))
|
||||
* Add `worker_node_taints` map variable for per-worker node taints ([#663](https://github.com/poseidon/typhoon/pull/663))
|
||||
|
||||
#### Google Cloud
|
||||
|
||||
* Fix `worker_node_labels` on Fedora CoreOS ([#651](https://github.com/poseidon/typhoon/pull/651))
|
||||
|
|
|
@ -93,6 +93,12 @@ systemd:
|
|||
--lock-file=/var/run/lock/kubelet.lock \
|
||||
--network-plugin=cni \
|
||||
--node-labels=node.kubernetes.io/node \
|
||||
%{~ for label in compact(split(",", node_labels)) ~}
|
||||
--node-labels=${label} \
|
||||
%{~ endfor ~}
|
||||
%{~ for taint in compact(split(",", node_taints)) ~}
|
||||
--register-with-taints=${taint} \
|
||||
%{~ endfor ~}
|
||||
--pod-manifest-path=/etc/kubernetes/manifests \
|
||||
--read-only-port=0 \
|
||||
--volume-plugin-dir=/var/lib/kubelet/volumeplugins
|
||||
|
|
|
@ -188,6 +188,8 @@ data "template_file" "worker-configs" {
|
|||
cluster_dns_service_ip = module.bootstrap.cluster_dns_service_ip
|
||||
cluster_domain_suffix = var.cluster_domain_suffix
|
||||
ssh_authorized_key = var.ssh_authorized_key
|
||||
node_labels = join(",", lookup(var.worker_node_labels, var.workers.*.name[count.index], []))
|
||||
node_taints = join(",", lookup(var.worker_node_taints, var.workers.*.name[count.index], []))
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -55,6 +55,18 @@ variable "clc_snippets" {
|
|||
default = {}
|
||||
}
|
||||
|
||||
variable "worker_node_labels" {
|
||||
type = map(list(string))
|
||||
description = "Map from worker names to lists of initial node labels"
|
||||
default = {}
|
||||
}
|
||||
|
||||
variable "worker_node_taints" {
|
||||
type = map(list(string))
|
||||
description = "Map from worker names to lists of initial node taints"
|
||||
default = {}
|
||||
}
|
||||
|
||||
# configuration
|
||||
|
||||
variable "k8s_domain_name" {
|
||||
|
|
|
@ -68,6 +68,12 @@ systemd:
|
|||
--lock-file=/var/run/lock/kubelet.lock \
|
||||
--network-plugin=cni \
|
||||
--node-labels=node.kubernetes.io/node \
|
||||
%{~ for label in compact(split(",", node_labels)) ~}
|
||||
--node-labels=${label} \
|
||||
%{~ endfor ~}
|
||||
%{~ for taint in compact(split(",", node_taints)) ~}
|
||||
--register-with-taints=${taint} \
|
||||
%{~ endfor ~}
|
||||
--pod-manifest-path=/etc/kubernetes/manifests \
|
||||
--read-only-port=0 \
|
||||
--volume-plugin-dir=/var/lib/kubelet/volumeplugins
|
||||
|
|
|
@ -96,6 +96,8 @@ data "template_file" "worker-configs" {
|
|||
cluster_dns_service_ip = module.bootstrap.cluster_dns_service_ip
|
||||
cluster_domain_suffix = var.cluster_domain_suffix
|
||||
ssh_authorized_key = var.ssh_authorized_key
|
||||
node_labels = join(",", lookup(var.worker_node_labels, var.workers.*.name[count.index], []))
|
||||
node_taints = join(",", lookup(var.worker_node_taints, var.workers.*.name[count.index], []))
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -56,6 +56,18 @@ variable "snippets" {
|
|||
default = {}
|
||||
}
|
||||
|
||||
variable "worker_node_labels" {
|
||||
type = map(list(string))
|
||||
description = "Map from worker names to lists of initial node labels"
|
||||
default = {}
|
||||
}
|
||||
|
||||
variable "worker_node_taints" {
|
||||
type = map(list(string))
|
||||
description = "Map from worker names to lists of initial node taints"
|
||||
default = {}
|
||||
}
|
||||
|
||||
# configuration
|
||||
|
||||
variable "k8s_domain_name" {
|
||||
|
|
|
@ -361,4 +361,6 @@ Check the [variables.tf](https://github.com/poseidon/typhoon/blob/master/bare-me
|
|||
| pod_cidr | CIDR IPv4 range to assign to Kubernetes pods | "10.2.0.0/16" | "10.22.0.0/16" |
|
||||
| service_cidr | CIDR IPv4 range to assign to Kubernetes services | "10.3.0.0/16" | "10.3.0.0/24" |
|
||||
| kernel_args | Additional kernel args to provide at PXE boot | [] | ["kvm-intel.nested=1"] |
|
||||
| worker_node_labels | Map from worker name to list of initial node labels | {} | {"node2" = ["role=special"]} |
|
||||
| worker_node_taints | Map from worker name to list of initial node taints | {} | {"node2" = ["role=special:NoSchedule"]} |
|
||||
|
||||
|
|
|
@ -347,4 +347,6 @@ Check the [variables.tf](https://github.com/poseidon/typhoon/blob/master/bare-me
|
|||
| pod_cidr | CIDR IPv4 range to assign to Kubernetes pods | "10.2.0.0/16" | "10.22.0.0/16" |
|
||||
| service_cidr | CIDR IPv4 range to assign to Kubernetes services | "10.3.0.0/16" | "10.3.0.0/24" |
|
||||
| kernel_args | Additional kernel args to provide at PXE boot | [] | ["kvm-intel.nested=1"] |
|
||||
| worker_node_labels | Map from worker name to list of initial node labels | {} | {"node2" = ["role=special"]} |
|
||||
| worker_node_taints | Map from worker name to list of initial node taints | {} | {"node2" = ["role=special:NoSchedule"]} |
|
||||
|
||||
|
|
Loading…
Reference in New Issue