Restructure bare-metal module to use a worker submodule
* Add an internal `worker` module to the bare-metal module, to allow individual bare-metal machines to be defined and joined to an existing bare-metal cluster. This is similar to the "worker pools" modules for adding sets of nodes to cloud (AWS, GCP, Azure) clusters, but on metal, each piece of hardware is potentially unique New: Using the new `worker` module, a Kubernetes cluster can be defined without any `workers` (i.e. just a control-plane). Use the `worker` module to define each piece machine that should join the bare-metal cluster and customize it in detail. This style is quite flexible and suited for clusters with hardware that varies quite a bit. ```tf module "mercury" { source = "git::https://github.com/poseidon/typhoon//bare-metal/flatcar-linux/kubernetes?ref=v1.26.2" # bare-metal cluster_name = "mercury" matchbox_http_endpoint = "http://matchbox.example.com" os_channel = "flatcar-stable" os_version = "2345.3.1" # configuration k8s_domain_name = "node1.example.com" ssh_authorized_key = "ssh-rsa AAAAB3Nz..." # machines controllers = [{ name = "node1" mac = "52:54:00:a1:9c:ae" domain = "node1.example.com" }] } ``` ```tf module "mercury-node1" { source = "git::https://github.com/poseidon/typhoon//bare-metal/flatcar-linux/kubernetes/worker?ref=v1.26.2" cluster_name = "mercury" # bare-metal matchbox_http_endpoint = "http://matchbox.example.com" os_channel = "flatcar-stable" os_version = "2345.3.1" # configuration name = "node2" mac = "52:54:00:b2:2f:86" domain = "node2.example.com" kubeconfig = module.mercury.kubeconfig ssh_authorized_key = "ssh-rsa AAAAB3Nz..." # optional snippets = [] node_labels = [] node_tains = [] install_disk = "/dev/vda" cached_install = false } ``` For clusters with fairly similar hardware, you may continue to define `workers` directly within the cluster definition. This reduces some repetition, but is not quite as flexible. ```tf module "mercury" { source = "git::https://github.com/poseidon/typhoon//bare-metal/flatcar-linux/kubernetes?ref=v1.26.1" # bare-metal cluster_name = "mercury" matchbox_http_endpoint = "http://matchbox.example.com" os_channel = "flatcar-stable" os_version = "2345.3.1" # configuration k8s_domain_name = "node1.example.com" ssh_authorized_key = "ssh-rsa AAAAB3Nz..." # machines controllers = [{ name = "node1" mac = "52:54:00:a1:9c:ae" domain = "node1.example.com" }] workers = [ { name = "node2", mac = "52:54:00:b2:2f:86" domain = "node2.example.com" }, { name = "node3", mac = "52:54:00:c3:61:77" domain = "node3.example.com" } ] } ``` Optional variables `snippets`, `worker_node_labels`, and `worker_node_taints` are still defined as a map from machine name to a list of snippets, labels, or taints respectively to allow some degree of per-machine customization. However, fields like `install_disk`, `kernel_args`, `cached_install` and future options will not be designed this way. Instead, if your machines vary it is recommended to use the new `worker` module to define each node
This commit is contained in:
parent
d04d88023d
commit
1caea3388c
|
@ -4,6 +4,10 @@ Notable changes between versions.
|
||||||
|
|
||||||
## Latest
|
## Latest
|
||||||
|
|
||||||
|
### Bare-Metal
|
||||||
|
|
||||||
|
* Add a `worker` module to allow customizing individual worker nodes ([#1295](https://github.com/poseidon/typhoon/pull/1295))
|
||||||
|
|
||||||
## v1.26.1
|
## v1.26.1
|
||||||
|
|
||||||
* Kubernetes [v1.26.1](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.26.md#v1261)
|
* Kubernetes [v1.26.1](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.26.md#v1261)
|
||||||
|
|
|
@ -1,22 +0,0 @@
|
||||||
# Match each controller or worker to a profile
|
|
||||||
|
|
||||||
resource "matchbox_group" "controller" {
|
|
||||||
count = length(var.controllers)
|
|
||||||
name = format("%s-%s", var.cluster_name, var.controllers.*.name[count.index])
|
|
||||||
profile = matchbox_profile.controllers.*.name[count.index]
|
|
||||||
|
|
||||||
selector = {
|
|
||||||
mac = var.controllers.*.mac[count.index]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
resource "matchbox_group" "worker" {
|
|
||||||
count = length(var.workers)
|
|
||||||
name = format("%s-%s", var.cluster_name, var.workers.*.name[count.index])
|
|
||||||
profile = matchbox_profile.workers.*.name[count.index]
|
|
||||||
|
|
||||||
selector = {
|
|
||||||
mac = var.workers.*.mac[count.index]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
|
@ -3,6 +3,13 @@ output "kubeconfig-admin" {
|
||||||
sensitive = true
|
sensitive = true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
# Outputs for workers
|
||||||
|
|
||||||
|
output "kubeconfig" {
|
||||||
|
value = module.bootstrap.kubeconfig-kubelet
|
||||||
|
sensitive = true
|
||||||
|
}
|
||||||
|
|
||||||
# Outputs for debug
|
# Outputs for debug
|
||||||
|
|
||||||
output "assets_dist" {
|
output "assets_dist" {
|
||||||
|
|
|
@ -28,6 +28,16 @@ locals {
|
||||||
args = var.cached_install ? local.cached_args : local.remote_args
|
args = var.cached_install ? local.cached_args : local.remote_args
|
||||||
}
|
}
|
||||||
|
|
||||||
|
# Match a controller to a profile by MAC
|
||||||
|
resource "matchbox_group" "controller" {
|
||||||
|
count = length(var.controllers)
|
||||||
|
name = format("%s-%s", var.cluster_name, var.controllers.*.name[count.index])
|
||||||
|
profile = matchbox_profile.controllers.*.name[count.index]
|
||||||
|
|
||||||
|
selector = {
|
||||||
|
mac = var.controllers.*.mac[count.index]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Fedora CoreOS controller profile
|
// Fedora CoreOS controller profile
|
||||||
resource "matchbox_profile" "controllers" {
|
resource "matchbox_profile" "controllers" {
|
||||||
|
@ -55,30 +65,3 @@ data "ct_config" "controllers" {
|
||||||
strict = true
|
strict = true
|
||||||
snippets = lookup(var.snippets, var.controllers.*.name[count.index], [])
|
snippets = lookup(var.snippets, var.controllers.*.name[count.index], [])
|
||||||
}
|
}
|
||||||
|
|
||||||
// Fedora CoreOS worker profile
|
|
||||||
resource "matchbox_profile" "workers" {
|
|
||||||
count = length(var.workers)
|
|
||||||
name = format("%s-worker-%s", var.cluster_name, var.workers.*.name[count.index])
|
|
||||||
|
|
||||||
kernel = local.kernel
|
|
||||||
initrd = local.initrd
|
|
||||||
args = concat(local.args, var.kernel_args)
|
|
||||||
|
|
||||||
raw_ignition = data.ct_config.workers.*.rendered[count.index]
|
|
||||||
}
|
|
||||||
|
|
||||||
# Fedora CoreOS workers
|
|
||||||
data "ct_config" "workers" {
|
|
||||||
count = length(var.workers)
|
|
||||||
content = templatefile("${path.module}/butane/worker.yaml", {
|
|
||||||
domain_name = var.workers.*.domain[count.index]
|
|
||||||
cluster_dns_service_ip = module.bootstrap.cluster_dns_service_ip
|
|
||||||
cluster_domain_suffix = var.cluster_domain_suffix
|
|
||||||
ssh_authorized_key = var.ssh_authorized_key
|
|
||||||
node_labels = join(",", lookup(var.worker_node_labels, var.workers.*.name[count.index], []))
|
|
||||||
node_taints = join(",", lookup(var.worker_node_taints, var.workers.*.name[count.index], []))
|
|
||||||
})
|
|
||||||
strict = true
|
|
||||||
snippets = lookup(var.snippets, var.workers.*.name[count.index], [])
|
|
||||||
}
|
|
||||||
|
|
|
@ -15,7 +15,6 @@ resource "null_resource" "copy-controller-secrets" {
|
||||||
# matchbox groups are written, causing a deadlock.
|
# matchbox groups are written, causing a deadlock.
|
||||||
depends_on = [
|
depends_on = [
|
||||||
matchbox_group.controller,
|
matchbox_group.controller,
|
||||||
matchbox_group.worker,
|
|
||||||
module.bootstrap,
|
module.bootstrap,
|
||||||
]
|
]
|
||||||
|
|
||||||
|
@ -45,37 +44,6 @@ resource "null_resource" "copy-controller-secrets" {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
# Secure copy kubeconfig to all workers. Activates kubelet.service
|
|
||||||
resource "null_resource" "copy-worker-secrets" {
|
|
||||||
count = length(var.workers)
|
|
||||||
|
|
||||||
# Without depends_on, remote-exec could start and wait for machines before
|
|
||||||
# matchbox groups are written, causing a deadlock.
|
|
||||||
depends_on = [
|
|
||||||
matchbox_group.controller,
|
|
||||||
matchbox_group.worker,
|
|
||||||
]
|
|
||||||
|
|
||||||
connection {
|
|
||||||
type = "ssh"
|
|
||||||
host = var.workers.*.domain[count.index]
|
|
||||||
user = "core"
|
|
||||||
timeout = "60m"
|
|
||||||
}
|
|
||||||
|
|
||||||
provisioner "file" {
|
|
||||||
content = module.bootstrap.kubeconfig-kubelet
|
|
||||||
destination = "/home/core/kubeconfig"
|
|
||||||
}
|
|
||||||
|
|
||||||
provisioner "remote-exec" {
|
|
||||||
inline = [
|
|
||||||
"sudo mv /home/core/kubeconfig /etc/kubernetes/kubeconfig",
|
|
||||||
"sudo touch /etc/kubernetes",
|
|
||||||
]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
# Connect to a controller to perform one-time cluster bootstrap.
|
# Connect to a controller to perform one-time cluster bootstrap.
|
||||||
resource "null_resource" "bootstrap" {
|
resource "null_resource" "bootstrap" {
|
||||||
# Without depends_on, this remote-exec may start before the kubeconfig copy.
|
# Without depends_on, this remote-exec may start before the kubeconfig copy.
|
||||||
|
@ -83,7 +51,6 @@ resource "null_resource" "bootstrap" {
|
||||||
# while no Kubelets are running.
|
# while no Kubelets are running.
|
||||||
depends_on = [
|
depends_on = [
|
||||||
null_resource.copy-controller-secrets,
|
null_resource.copy-controller-secrets,
|
||||||
null_resource.copy-worker-secrets,
|
|
||||||
]
|
]
|
||||||
|
|
||||||
connection {
|
connection {
|
||||||
|
|
|
@ -0,0 +1,63 @@
|
||||||
|
locals {
|
||||||
|
remote_kernel = "https://builds.coreos.fedoraproject.org/prod/streams/${var.os_stream}/builds/${var.os_version}/x86_64/fedora-coreos-${var.os_version}-live-kernel-x86_64"
|
||||||
|
remote_initrd = [
|
||||||
|
"--name main https://builds.coreos.fedoraproject.org/prod/streams/${var.os_stream}/builds/${var.os_version}/x86_64/fedora-coreos-${var.os_version}-live-initramfs.x86_64.img",
|
||||||
|
]
|
||||||
|
|
||||||
|
remote_args = [
|
||||||
|
"initrd=main",
|
||||||
|
"coreos.live.rootfs_url=https://builds.coreos.fedoraproject.org/prod/streams/${var.os_stream}/builds/${var.os_version}/x86_64/fedora-coreos-${var.os_version}-live-rootfs.x86_64.img",
|
||||||
|
"coreos.inst.install_dev=${var.install_disk}",
|
||||||
|
"coreos.inst.ignition_url=${var.matchbox_http_endpoint}/ignition?uuid=$${uuid}&mac=$${mac:hexhyp}",
|
||||||
|
]
|
||||||
|
|
||||||
|
cached_kernel = "/assets/fedora-coreos/fedora-coreos-${var.os_version}-live-kernel-x86_64"
|
||||||
|
cached_initrd = [
|
||||||
|
"/assets/fedora-coreos/fedora-coreos-${var.os_version}-live-initramfs.x86_64.img",
|
||||||
|
]
|
||||||
|
|
||||||
|
cached_args = [
|
||||||
|
"initrd=main",
|
||||||
|
"coreos.live.rootfs_url=${var.matchbox_http_endpoint}/assets/fedora-coreos/fedora-coreos-${var.os_version}-live-rootfs.x86_64.img",
|
||||||
|
"coreos.inst.install_dev=${var.install_disk}",
|
||||||
|
"coreos.inst.ignition_url=${var.matchbox_http_endpoint}/ignition?uuid=$${uuid}&mac=$${mac:hexhyp}",
|
||||||
|
]
|
||||||
|
|
||||||
|
kernel = var.cached_install ? local.cached_kernel : local.remote_kernel
|
||||||
|
initrd = var.cached_install ? local.cached_initrd : local.remote_initrd
|
||||||
|
args = var.cached_install ? local.cached_args : local.remote_args
|
||||||
|
}
|
||||||
|
|
||||||
|
// Match a worker to a profile by MAC
|
||||||
|
resource "matchbox_group" "worker" {
|
||||||
|
name = format("%s-%s", var.cluster_name, var.name)
|
||||||
|
profile = matchbox_profile.worker.name
|
||||||
|
selector = {
|
||||||
|
mac = var.mac
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fedora CoreOS worker profile
|
||||||
|
resource "matchbox_profile" "worker" {
|
||||||
|
name = format("%s-worker-%s", var.cluster_name, var.name)
|
||||||
|
kernel = local.kernel
|
||||||
|
initrd = local.initrd
|
||||||
|
args = concat(local.args, var.kernel_args)
|
||||||
|
|
||||||
|
raw_ignition = data.ct_config.worker.rendered
|
||||||
|
}
|
||||||
|
|
||||||
|
# Fedora CoreOS workers
|
||||||
|
data "ct_config" "worker" {
|
||||||
|
content = templatefile("${path.module}/butane/worker.yaml", {
|
||||||
|
domain_name = var.domain
|
||||||
|
ssh_authorized_key = var.ssh_authorized_key
|
||||||
|
cluster_dns_service_ip = cidrhost(var.service_cidr, 10)
|
||||||
|
cluster_domain_suffix = var.cluster_domain_suffix
|
||||||
|
node_labels = join(",", var.node_labels)
|
||||||
|
node_taints = join(",", var.node_taints)
|
||||||
|
})
|
||||||
|
strict = true
|
||||||
|
snippets = var.snippets
|
||||||
|
}
|
||||||
|
|
|
@ -0,0 +1,27 @@
|
||||||
|
# Secure copy kubeconfig to worker. Activates kubelet.service
|
||||||
|
resource "null_resource" "copy-worker-secrets" {
|
||||||
|
# Without depends_on, remote-exec could start and wait for machines before
|
||||||
|
# matchbox groups are written, causing a deadlock.
|
||||||
|
depends_on = [
|
||||||
|
matchbox_group.worker,
|
||||||
|
]
|
||||||
|
|
||||||
|
connection {
|
||||||
|
type = "ssh"
|
||||||
|
host = var.domain
|
||||||
|
user = "core"
|
||||||
|
timeout = "60m"
|
||||||
|
}
|
||||||
|
|
||||||
|
provisioner "file" {
|
||||||
|
content = var.kubeconfig
|
||||||
|
destination = "/home/core/kubeconfig"
|
||||||
|
}
|
||||||
|
|
||||||
|
provisioner "remote-exec" {
|
||||||
|
inline = [
|
||||||
|
"sudo mv /home/core/kubeconfig /etc/kubernetes/kubeconfig",
|
||||||
|
"sudo touch /etc/kubernetes",
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,111 @@
|
||||||
|
variable "cluster_name" {
|
||||||
|
type = string
|
||||||
|
description = "Must be set to the `cluster_name` of cluster"
|
||||||
|
}
|
||||||
|
|
||||||
|
# bare-metal
|
||||||
|
|
||||||
|
variable "matchbox_http_endpoint" {
|
||||||
|
type = string
|
||||||
|
description = "Matchbox HTTP read-only endpoint (e.g. http://matchbox.example.com:8080)"
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "os_stream" {
|
||||||
|
type = string
|
||||||
|
description = "Fedora CoreOS release stream (e.g. stable, testing, next)"
|
||||||
|
default = "stable"
|
||||||
|
|
||||||
|
validation {
|
||||||
|
condition = contains(["stable", "testing", "next"], var.os_stream)
|
||||||
|
error_message = "The os_stream must be stable, testing, or next."
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "os_version" {
|
||||||
|
type = string
|
||||||
|
description = "Fedora CoreOS version to PXE and install (e.g. 31.20200310.3.0)"
|
||||||
|
}
|
||||||
|
|
||||||
|
# machine
|
||||||
|
|
||||||
|
variable "name" {
|
||||||
|
type = string
|
||||||
|
description = "Unique name for the machine (e.g. node1)"
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "mac" {
|
||||||
|
type = string
|
||||||
|
description = "MAC address (e.g. 52:54:00:a1:9c:ae)"
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "domain" {
|
||||||
|
type = string
|
||||||
|
description = "Fully qualified domain name (e.g. node1.example.com)"
|
||||||
|
}
|
||||||
|
|
||||||
|
# configuration
|
||||||
|
|
||||||
|
variable "kubeconfig" {
|
||||||
|
type = string
|
||||||
|
description = "Must be set to `kubeconfig` output by cluster"
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "ssh_authorized_key" {
|
||||||
|
type = string
|
||||||
|
description = "SSH public key for user 'core'"
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "snippets" {
|
||||||
|
type = list(string)
|
||||||
|
description = "List of Butane snippets"
|
||||||
|
default = []
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "node_labels" {
|
||||||
|
type = list(string)
|
||||||
|
description = "List of initial node labels"
|
||||||
|
default = []
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "node_taints" {
|
||||||
|
type = list(string)
|
||||||
|
description = "List of initial node taints"
|
||||||
|
default = []
|
||||||
|
}
|
||||||
|
|
||||||
|
# optional
|
||||||
|
|
||||||
|
variable "cached_install" {
|
||||||
|
type = bool
|
||||||
|
description = "Whether Fedora CoreOS should PXE boot and install from matchbox /assets cache. Note that the admin must have downloaded the os_version into matchbox assets."
|
||||||
|
default = false
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "install_disk" {
|
||||||
|
type = string
|
||||||
|
description = "Disk device to install Fedora CoreOS (e.g. sda)"
|
||||||
|
default = "sda"
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "kernel_args" {
|
||||||
|
type = list(string)
|
||||||
|
description = "Additional kernel arguments to provide at PXE boot."
|
||||||
|
default = []
|
||||||
|
}
|
||||||
|
|
||||||
|
# unofficial, undocumented, unsupported
|
||||||
|
|
||||||
|
variable "service_cidr" {
|
||||||
|
type = string
|
||||||
|
description = <<EOD
|
||||||
|
CIDR IPv4 range to assign Kubernetes services.
|
||||||
|
The 1st IP will be reserved for kube_apiserver, the 10th IP will be reserved for coredns.
|
||||||
|
EOD
|
||||||
|
default = "10.3.0.0/16"
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "cluster_domain_suffix" {
|
||||||
|
description = "Queries for domains with the suffix will be answered by coredns. Default is cluster.local (e.g. foo.default.svc.cluster.local) "
|
||||||
|
type = string
|
||||||
|
default = "cluster.local"
|
||||||
|
}
|
|
@ -0,0 +1,17 @@
|
||||||
|
# Terraform version and plugin versions
|
||||||
|
|
||||||
|
terraform {
|
||||||
|
required_version = ">= 0.13.0, < 2.0.0"
|
||||||
|
required_providers {
|
||||||
|
null = ">= 2.1"
|
||||||
|
ct = {
|
||||||
|
source = "poseidon/ct"
|
||||||
|
version = "~> 0.9"
|
||||||
|
}
|
||||||
|
matchbox = {
|
||||||
|
source = "poseidon/matchbox"
|
||||||
|
version = "~> 0.5.0"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
|
@ -0,0 +1,30 @@
|
||||||
|
module "workers" {
|
||||||
|
count = length(var.workers)
|
||||||
|
source = "./worker"
|
||||||
|
|
||||||
|
cluster_name = var.cluster_name
|
||||||
|
|
||||||
|
# metal
|
||||||
|
matchbox_http_endpoint = var.matchbox_http_endpoint
|
||||||
|
os_stream = var.os_stream
|
||||||
|
os_version = var.os_version
|
||||||
|
|
||||||
|
# machine
|
||||||
|
name = var.workers[count.index].name
|
||||||
|
mac = var.workers[count.index].mac
|
||||||
|
domain = var.workers[count.index].domain
|
||||||
|
|
||||||
|
# configuration
|
||||||
|
kubeconfig = module.bootstrap.kubeconfig-kubelet
|
||||||
|
ssh_authorized_key = var.ssh_authorized_key
|
||||||
|
service_cidr = var.service_cidr
|
||||||
|
cluster_domain_suffix = var.cluster_domain_suffix
|
||||||
|
node_labels = lookup(var.worker_node_labels, var.workers[count.index].name, [])
|
||||||
|
node_taints = lookup(var.worker_node_taints, var.workers[count.index].name, [])
|
||||||
|
snippets = lookup(var.snippets, var.workers[count.index].name, [])
|
||||||
|
|
||||||
|
# optional
|
||||||
|
cached_install = var.cached_install
|
||||||
|
install_disk = var.install_disk
|
||||||
|
kernel_args = var.kernel_args
|
||||||
|
}
|
|
@ -1,35 +0,0 @@
|
||||||
resource "matchbox_group" "install" {
|
|
||||||
count = length(var.controllers) + length(var.workers)
|
|
||||||
|
|
||||||
name = format("install-%s", concat(var.controllers.*.name, var.workers.*.name)[count.index])
|
|
||||||
|
|
||||||
# pick Matchbox profile (Flatcar upstream or Matchbox image cache)
|
|
||||||
profile = var.cached_install ? matchbox_profile.cached-flatcar-install.*.name[count.index] : matchbox_profile.flatcar-install.*.name[count.index]
|
|
||||||
|
|
||||||
selector = {
|
|
||||||
mac = concat(var.controllers.*.mac, var.workers.*.mac)[count.index]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
resource "matchbox_group" "controller" {
|
|
||||||
count = length(var.controllers)
|
|
||||||
name = format("%s-%s", var.cluster_name, var.controllers[count.index].name)
|
|
||||||
profile = matchbox_profile.controllers.*.name[count.index]
|
|
||||||
|
|
||||||
selector = {
|
|
||||||
mac = var.controllers[count.index].mac
|
|
||||||
os = "installed"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
resource "matchbox_group" "worker" {
|
|
||||||
count = length(var.workers)
|
|
||||||
name = format("%s-%s", var.cluster_name, var.workers[count.index].name)
|
|
||||||
profile = matchbox_profile.workers.*.name[count.index]
|
|
||||||
|
|
||||||
selector = {
|
|
||||||
mac = var.workers[count.index].mac
|
|
||||||
os = "installed"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
|
@ -1,54 +1,53 @@
|
||||||
locals {
|
locals {
|
||||||
# flatcar-stable -> stable channel
|
# flatcar-stable -> stable channel
|
||||||
channel = split("-", var.os_channel)[1]
|
channel = split("-", var.os_channel)[1]
|
||||||
}
|
|
||||||
|
|
||||||
// Flatcar Linux install profile (from release.flatcar-linux.net)
|
remote_kernel = "${var.download_protocol}://${local.channel}.release.flatcar-linux.net/amd64-usr/${var.os_version}/flatcar_production_pxe.vmlinuz"
|
||||||
resource "matchbox_profile" "flatcar-install" {
|
remote_initrd = [
|
||||||
count = length(var.controllers) + length(var.workers)
|
|
||||||
name = format("%s-flatcar-install-%s", var.cluster_name, concat(var.controllers.*.name, var.workers.*.name)[count.index])
|
|
||||||
|
|
||||||
kernel = "${var.download_protocol}://${local.channel}.release.flatcar-linux.net/amd64-usr/${var.os_version}/flatcar_production_pxe.vmlinuz"
|
|
||||||
|
|
||||||
initrd = [
|
|
||||||
"${var.download_protocol}://${local.channel}.release.flatcar-linux.net/amd64-usr/${var.os_version}/flatcar_production_pxe_image.cpio.gz",
|
"${var.download_protocol}://${local.channel}.release.flatcar-linux.net/amd64-usr/${var.os_version}/flatcar_production_pxe_image.cpio.gz",
|
||||||
]
|
]
|
||||||
|
args = [
|
||||||
args = flatten([
|
|
||||||
"initrd=flatcar_production_pxe_image.cpio.gz",
|
"initrd=flatcar_production_pxe_image.cpio.gz",
|
||||||
"flatcar.config.url=${var.matchbox_http_endpoint}/ignition?uuid=$${uuid}&mac=$${mac:hexhyp}",
|
"flatcar.config.url=${var.matchbox_http_endpoint}/ignition?uuid=$${uuid}&mac=$${mac:hexhyp}",
|
||||||
"flatcar.first_boot=yes",
|
"flatcar.first_boot=yes",
|
||||||
var.kernel_args,
|
]
|
||||||
])
|
|
||||||
|
|
||||||
raw_ignition = data.ct_config.install.*.rendered[count.index]
|
cached_kernel = "/assets/flatcar/${var.os_version}/flatcar_production_pxe.vmlinuz"
|
||||||
}
|
cached_initrd = [
|
||||||
|
|
||||||
// Flatcar Linux Install profile (from matchbox /assets cache)
|
|
||||||
// Note: Admin must have downloaded os_version into matchbox assets/flatcar.
|
|
||||||
resource "matchbox_profile" "cached-flatcar-install" {
|
|
||||||
count = length(var.controllers) + length(var.workers)
|
|
||||||
name = format("%s-cached-flatcar-linux-install-%s", var.cluster_name, concat(var.controllers.*.name, var.workers.*.name)[count.index])
|
|
||||||
|
|
||||||
kernel = "/assets/flatcar/${var.os_version}/flatcar_production_pxe.vmlinuz"
|
|
||||||
|
|
||||||
initrd = [
|
|
||||||
"/assets/flatcar/${var.os_version}/flatcar_production_pxe_image.cpio.gz",
|
"/assets/flatcar/${var.os_version}/flatcar_production_pxe_image.cpio.gz",
|
||||||
]
|
]
|
||||||
|
|
||||||
args = flatten([
|
kernel = var.cached_install ? local.cached_kernel : local.remote_kernel
|
||||||
"initrd=flatcar_production_pxe_image.cpio.gz",
|
initrd = var.cached_install ? local.cached_initrd : local.remote_initrd
|
||||||
"flatcar.config.url=${var.matchbox_http_endpoint}/ignition?uuid=$${uuid}&mac=$${mac:hexhyp}",
|
}
|
||||||
"flatcar.first_boot=yes",
|
|
||||||
var.kernel_args,
|
|
||||||
])
|
|
||||||
|
|
||||||
raw_ignition = data.ct_config.cached-install.*.rendered[count.index]
|
# Match controllers to install profiles by MAC
|
||||||
|
resource "matchbox_group" "install" {
|
||||||
|
count = length(var.controllers)
|
||||||
|
|
||||||
|
name = format("install-%s", var.controllers[count.index].name)
|
||||||
|
profile = matchbox_profile.install[count.index].name
|
||||||
|
selector = {
|
||||||
|
mac = concat(var.controllers.*.mac, var.workers.*.mac)[count.index]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Flatcar Linux install
|
||||||
|
resource "matchbox_profile" "install" {
|
||||||
|
count = length(var.controllers)
|
||||||
|
|
||||||
|
name = format("%s-install-%s", var.cluster_name, var.controllers.*.name[count.index])
|
||||||
|
kernel = local.kernel
|
||||||
|
initrd = local.initrd
|
||||||
|
args = concat(local.args, var.kernel_args)
|
||||||
|
|
||||||
|
raw_ignition = data.ct_config.install[count.index].rendered
|
||||||
}
|
}
|
||||||
|
|
||||||
# Flatcar Linux install
|
# Flatcar Linux install
|
||||||
data "ct_config" "install" {
|
data "ct_config" "install" {
|
||||||
count = length(var.controllers) + length(var.workers)
|
count = length(var.controllers)
|
||||||
|
|
||||||
content = templatefile("${path.module}/butane/install.yaml", {
|
content = templatefile("${path.module}/butane/install.yaml", {
|
||||||
os_channel = local.channel
|
os_channel = local.channel
|
||||||
os_version = var.os_version
|
os_version = var.os_version
|
||||||
|
@ -57,25 +56,20 @@ data "ct_config" "install" {
|
||||||
install_disk = var.install_disk
|
install_disk = var.install_disk
|
||||||
ssh_authorized_key = var.ssh_authorized_key
|
ssh_authorized_key = var.ssh_authorized_key
|
||||||
# only cached profile adds -b baseurl
|
# only cached profile adds -b baseurl
|
||||||
baseurl_flag = ""
|
baseurl_flag = var.cached_install ? "-b ${var.matchbox_http_endpoint}/assets/flatcar" : ""
|
||||||
})
|
})
|
||||||
strict = true
|
strict = true
|
||||||
}
|
}
|
||||||
|
|
||||||
# Flatcar Linux cached install
|
# Match each controller by MAC
|
||||||
data "ct_config" "cached-install" {
|
resource "matchbox_group" "controller" {
|
||||||
count = length(var.controllers) + length(var.workers)
|
count = length(var.controllers)
|
||||||
content = templatefile("${path.module}/butane/install.yaml", {
|
name = format("%s-%s", var.cluster_name, var.controllers[count.index].name)
|
||||||
os_channel = local.channel
|
profile = matchbox_profile.controllers[count.index].name
|
||||||
os_version = var.os_version
|
selector = {
|
||||||
ignition_endpoint = format("%s/ignition", var.matchbox_http_endpoint)
|
mac = var.controllers[count.index].mac
|
||||||
mac = concat(var.controllers.*.mac, var.workers.*.mac)[count.index]
|
os = "installed"
|
||||||
install_disk = var.install_disk
|
}
|
||||||
ssh_authorized_key = var.ssh_authorized_key
|
|
||||||
# profile uses -b baseurl to install from matchbox cache
|
|
||||||
baseurl_flag = "-b ${var.matchbox_http_endpoint}/assets/flatcar"
|
|
||||||
})
|
|
||||||
strict = true
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Kubernetes Controller profiles
|
// Kubernetes Controller profiles
|
||||||
|
@ -99,25 +93,3 @@ data "ct_config" "controllers" {
|
||||||
strict = true
|
strict = true
|
||||||
snippets = lookup(var.snippets, var.controllers.*.name[count.index], [])
|
snippets = lookup(var.snippets, var.controllers.*.name[count.index], [])
|
||||||
}
|
}
|
||||||
|
|
||||||
// Kubernetes Worker profiles
|
|
||||||
resource "matchbox_profile" "workers" {
|
|
||||||
count = length(var.workers)
|
|
||||||
name = format("%s-worker-%s", var.cluster_name, var.workers.*.name[count.index])
|
|
||||||
raw_ignition = data.ct_config.workers.*.rendered[count.index]
|
|
||||||
}
|
|
||||||
|
|
||||||
# Flatcar Linux workers
|
|
||||||
data "ct_config" "workers" {
|
|
||||||
count = length(var.workers)
|
|
||||||
content = templatefile("${path.module}/butane/worker.yaml", {
|
|
||||||
domain_name = var.workers.*.domain[count.index]
|
|
||||||
cluster_dns_service_ip = module.bootstrap.cluster_dns_service_ip
|
|
||||||
cluster_domain_suffix = var.cluster_domain_suffix
|
|
||||||
ssh_authorized_key = var.ssh_authorized_key
|
|
||||||
node_labels = join(",", lookup(var.worker_node_labels, var.workers.*.name[count.index], []))
|
|
||||||
node_taints = join(",", lookup(var.worker_node_taints, var.workers.*.name[count.index], []))
|
|
||||||
})
|
|
||||||
strict = true
|
|
||||||
snippets = lookup(var.snippets, var.workers.*.name[count.index], [])
|
|
||||||
}
|
|
||||||
|
|
|
@ -16,7 +16,6 @@ resource "null_resource" "copy-controller-secrets" {
|
||||||
depends_on = [
|
depends_on = [
|
||||||
matchbox_group.install,
|
matchbox_group.install,
|
||||||
matchbox_group.controller,
|
matchbox_group.controller,
|
||||||
matchbox_group.worker,
|
|
||||||
module.bootstrap,
|
module.bootstrap,
|
||||||
]
|
]
|
||||||
|
|
||||||
|
@ -45,37 +44,6 @@ resource "null_resource" "copy-controller-secrets" {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
# Secure copy kubeconfig to all workers. Activates kubelet.service
|
|
||||||
resource "null_resource" "copy-worker-secrets" {
|
|
||||||
count = length(var.workers)
|
|
||||||
|
|
||||||
# Without depends_on, remote-exec could start and wait for machines before
|
|
||||||
# matchbox groups are written, causing a deadlock.
|
|
||||||
depends_on = [
|
|
||||||
matchbox_group.install,
|
|
||||||
matchbox_group.controller,
|
|
||||||
matchbox_group.worker,
|
|
||||||
]
|
|
||||||
|
|
||||||
connection {
|
|
||||||
type = "ssh"
|
|
||||||
host = var.workers.*.domain[count.index]
|
|
||||||
user = "core"
|
|
||||||
timeout = "60m"
|
|
||||||
}
|
|
||||||
|
|
||||||
provisioner "file" {
|
|
||||||
content = module.bootstrap.kubeconfig-kubelet
|
|
||||||
destination = "/home/core/kubeconfig"
|
|
||||||
}
|
|
||||||
|
|
||||||
provisioner "remote-exec" {
|
|
||||||
inline = [
|
|
||||||
"sudo mv /home/core/kubeconfig /etc/kubernetes/kubeconfig",
|
|
||||||
]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
# Connect to a controller to perform one-time cluster bootstrap.
|
# Connect to a controller to perform one-time cluster bootstrap.
|
||||||
resource "null_resource" "bootstrap" {
|
resource "null_resource" "bootstrap" {
|
||||||
# Without depends_on, this remote-exec may start before the kubeconfig copy.
|
# Without depends_on, this remote-exec may start before the kubeconfig copy.
|
||||||
|
@ -83,7 +51,6 @@ resource "null_resource" "bootstrap" {
|
||||||
# while no Kubelets are running.
|
# while no Kubelets are running.
|
||||||
depends_on = [
|
depends_on = [
|
||||||
null_resource.copy-controller-secrets,
|
null_resource.copy-controller-secrets,
|
||||||
null_resource.copy-worker-secrets,
|
|
||||||
]
|
]
|
||||||
|
|
||||||
connection {
|
connection {
|
||||||
|
@ -99,4 +66,3 @@ resource "null_resource" "bootstrap" {
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,46 @@
|
||||||
|
variant: flatcar
|
||||||
|
version: 1.0.0
|
||||||
|
systemd:
|
||||||
|
units:
|
||||||
|
- name: installer.service
|
||||||
|
enabled: true
|
||||||
|
contents: |
|
||||||
|
[Unit]
|
||||||
|
Requires=network-online.target
|
||||||
|
After=network-online.target
|
||||||
|
[Service]
|
||||||
|
Type=simple
|
||||||
|
ExecStart=/opt/installer
|
||||||
|
[Install]
|
||||||
|
WantedBy=multi-user.target
|
||||||
|
# Avoid using the standard SSH port so terraform apply cannot SSH until
|
||||||
|
# post-install. But admins may SSH to debug disk install problems.
|
||||||
|
# After install, sshd will use port 22 and users/terraform can connect.
|
||||||
|
- name: sshd.socket
|
||||||
|
dropins:
|
||||||
|
- name: 10-sshd-port.conf
|
||||||
|
contents: |
|
||||||
|
[Socket]
|
||||||
|
ListenStream=
|
||||||
|
ListenStream=2222
|
||||||
|
storage:
|
||||||
|
files:
|
||||||
|
- path: /opt/installer
|
||||||
|
mode: 0500
|
||||||
|
contents:
|
||||||
|
inline: |
|
||||||
|
#!/bin/bash -ex
|
||||||
|
curl --retry 10 "${ignition_endpoint}?mac=${mac}&os=installed" -o ignition.json
|
||||||
|
flatcar-install \
|
||||||
|
-d ${install_disk} \
|
||||||
|
-C ${os_channel} \
|
||||||
|
-V ${os_version} \
|
||||||
|
${baseurl_flag} \
|
||||||
|
-i ignition.json
|
||||||
|
udevadm settle
|
||||||
|
systemctl reboot
|
||||||
|
passwd:
|
||||||
|
users:
|
||||||
|
- name: core
|
||||||
|
ssh_authorized_keys:
|
||||||
|
- "${ssh_authorized_key}"
|
|
@ -0,0 +1,87 @@
|
||||||
|
locals {
|
||||||
|
# flatcar-stable -> stable channel
|
||||||
|
channel = split("-", var.os_channel)[1]
|
||||||
|
|
||||||
|
remote_kernel = "${var.download_protocol}://${local.channel}.release.flatcar-linux.net/amd64-usr/${var.os_version}/flatcar_production_pxe.vmlinuz"
|
||||||
|
remote_initrd = [
|
||||||
|
"${var.download_protocol}://${local.channel}.release.flatcar-linux.net/amd64-usr/${var.os_version}/flatcar_production_pxe_image.cpio.gz",
|
||||||
|
]
|
||||||
|
args = flatten([
|
||||||
|
"initrd=flatcar_production_pxe_image.cpio.gz",
|
||||||
|
"flatcar.config.url=${var.matchbox_http_endpoint}/ignition?uuid=$${uuid}&mac=$${mac:hexhyp}",
|
||||||
|
"flatcar.first_boot=yes",
|
||||||
|
var.kernel_args,
|
||||||
|
])
|
||||||
|
|
||||||
|
cached_kernel = "/assets/flatcar/${var.os_version}/flatcar_production_pxe.vmlinuz"
|
||||||
|
cached_initrd = [
|
||||||
|
"/assets/flatcar/${var.os_version}/flatcar_production_pxe_image.cpio.gz",
|
||||||
|
]
|
||||||
|
|
||||||
|
kernel = var.cached_install ? local.cached_kernel : local.remote_kernel
|
||||||
|
initrd = var.cached_install ? local.cached_initrd : local.remote_initrd
|
||||||
|
}
|
||||||
|
|
||||||
|
# Match machine to an install profile by MAC
|
||||||
|
resource "matchbox_group" "install" {
|
||||||
|
name = format("install-%s", var.name)
|
||||||
|
profile = matchbox_profile.install.name
|
||||||
|
selector = {
|
||||||
|
mac = var.mac
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Flatcar Linux install profile (from release.flatcar-linux.net)
|
||||||
|
resource "matchbox_profile" "install" {
|
||||||
|
name = format("%s-install-%s", var.cluster_name, var.name)
|
||||||
|
kernel = local.kernel
|
||||||
|
initrd = local.initrd
|
||||||
|
args = concat(local.args, var.kernel_args)
|
||||||
|
|
||||||
|
raw_ignition = data.ct_config.install.rendered
|
||||||
|
}
|
||||||
|
|
||||||
|
# Flatcar Linux install
|
||||||
|
data "ct_config" "install" {
|
||||||
|
content = templatefile("${path.module}/butane/install.yaml", {
|
||||||
|
os_channel = local.channel
|
||||||
|
os_version = var.os_version
|
||||||
|
ignition_endpoint = format("%s/ignition", var.matchbox_http_endpoint)
|
||||||
|
mac = var.mac
|
||||||
|
install_disk = var.install_disk
|
||||||
|
ssh_authorized_key = var.ssh_authorized_key
|
||||||
|
# only cached profile adds -b baseurl
|
||||||
|
baseurl_flag = var.cached_install ? "-b ${var.matchbox_http_endpoint}/assets/flatcar" : ""
|
||||||
|
})
|
||||||
|
strict = true
|
||||||
|
}
|
||||||
|
|
||||||
|
# Match a worker to a profile by MAC
|
||||||
|
resource "matchbox_group" "worker" {
|
||||||
|
name = format("%s-%s", var.cluster_name, var.name)
|
||||||
|
profile = matchbox_profile.worker.name
|
||||||
|
selector = {
|
||||||
|
mac = var.mac
|
||||||
|
os = "installed"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Flatcar Linux Worker profile
|
||||||
|
resource "matchbox_profile" "worker" {
|
||||||
|
name = format("%s-worker-%s", var.cluster_name, var.name)
|
||||||
|
raw_ignition = data.ct_config.worker.rendered
|
||||||
|
}
|
||||||
|
|
||||||
|
# Flatcar Linux workers
|
||||||
|
data "ct_config" "worker" {
|
||||||
|
content = templatefile("${path.module}/butane/worker.yaml", {
|
||||||
|
domain_name = var.domain
|
||||||
|
ssh_authorized_key = var.ssh_authorized_key
|
||||||
|
cluster_dns_service_ip = cidrhost(var.service_cidr, 10)
|
||||||
|
cluster_domain_suffix = var.cluster_domain_suffix
|
||||||
|
node_labels = join(",", var.node_labels)
|
||||||
|
node_taints = join(",", var.node_taints)
|
||||||
|
})
|
||||||
|
strict = true
|
||||||
|
snippets = var.snippets
|
||||||
|
}
|
|
@ -0,0 +1,27 @@
|
||||||
|
# Secure copy kubeconfig to worker. Activates kubelet.service
|
||||||
|
resource "null_resource" "copy-worker-secrets" {
|
||||||
|
# Without depends_on, remote-exec could start and wait for machines before
|
||||||
|
# matchbox groups are written, causing a deadlock.
|
||||||
|
depends_on = [
|
||||||
|
matchbox_group.install,
|
||||||
|
matchbox_group.worker,
|
||||||
|
]
|
||||||
|
|
||||||
|
connection {
|
||||||
|
type = "ssh"
|
||||||
|
host = var.domain
|
||||||
|
user = "core"
|
||||||
|
timeout = "60m"
|
||||||
|
}
|
||||||
|
|
||||||
|
provisioner "file" {
|
||||||
|
content = var.kubeconfig
|
||||||
|
destination = "/home/core/kubeconfig"
|
||||||
|
}
|
||||||
|
|
||||||
|
provisioner "remote-exec" {
|
||||||
|
inline = [
|
||||||
|
"sudo mv /home/core/kubeconfig /etc/kubernetes/kubeconfig",
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,120 @@
|
||||||
|
variable "cluster_name" {
|
||||||
|
type = string
|
||||||
|
description = "Must be set to the `cluster_name` of cluster"
|
||||||
|
}
|
||||||
|
|
||||||
|
# bare-metal
|
||||||
|
|
||||||
|
variable "matchbox_http_endpoint" {
|
||||||
|
type = string
|
||||||
|
description = "Matchbox HTTP read-only endpoint (e.g. http://matchbox.example.com:8080)"
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "os_channel" {
|
||||||
|
type = string
|
||||||
|
description = "Channel for a Flatcar Linux (flatcar-stable, flatcar-beta, flatcar-alpha)"
|
||||||
|
|
||||||
|
validation {
|
||||||
|
condition = contains(["flatcar-stable", "flatcar-beta", "flatcar-alpha"], var.os_channel)
|
||||||
|
error_message = "The os_channel must be flatcar-stable, flatcar-beta, or flatcar-alpha."
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "os_version" {
|
||||||
|
type = string
|
||||||
|
description = "Version of Flatcar Linux to PXE and install (e.g. 2079.5.1)"
|
||||||
|
}
|
||||||
|
|
||||||
|
# machine
|
||||||
|
|
||||||
|
variable "name" {
|
||||||
|
type = string
|
||||||
|
description = "Unique name for the machine (e.g. node1)"
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "mac" {
|
||||||
|
type = string
|
||||||
|
description = "MAC address (e.g. 52:54:00:a1:9c:ae)"
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "domain" {
|
||||||
|
type = string
|
||||||
|
description = "Fully qualified domain name (e.g. node1.example.com)"
|
||||||
|
}
|
||||||
|
|
||||||
|
# configuration
|
||||||
|
|
||||||
|
variable "kubeconfig" {
|
||||||
|
type = string
|
||||||
|
description = "Must be set to `kubeconfig` output by cluster"
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "ssh_authorized_key" {
|
||||||
|
type = string
|
||||||
|
description = "SSH public key for user 'core'"
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "snippets" {
|
||||||
|
type = list(string)
|
||||||
|
description = "List of Butane snippets"
|
||||||
|
default = []
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "node_labels" {
|
||||||
|
type = list(string)
|
||||||
|
description = "List of initial node labels"
|
||||||
|
default = []
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "node_taints" {
|
||||||
|
type = list(string)
|
||||||
|
description = "List of initial node taints"
|
||||||
|
default = []
|
||||||
|
}
|
||||||
|
|
||||||
|
# optional
|
||||||
|
|
||||||
|
variable "download_protocol" {
|
||||||
|
type = string
|
||||||
|
description = "Protocol iPXE should use to download the kernel and initrd. Defaults to https, which requires iPXE compiled with crypto support. Unused if cached_install is true."
|
||||||
|
default = "https"
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "cached_install" {
|
||||||
|
type = bool
|
||||||
|
description = "Whether Flatcar Linux should PXE boot and install from matchbox /assets cache. Note that the admin must have downloaded the os_version into matchbox assets."
|
||||||
|
default = false
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "install_disk" {
|
||||||
|
type = string
|
||||||
|
default = "/dev/sda"
|
||||||
|
description = "Disk device to which the install profiles should install Flatcar Linux (e.g. /dev/sda)"
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "kernel_args" {
|
||||||
|
type = list(string)
|
||||||
|
description = "Additional kernel arguments to provide at PXE boot."
|
||||||
|
default = []
|
||||||
|
}
|
||||||
|
|
||||||
|
# unofficial, undocumented, unsupported
|
||||||
|
|
||||||
|
variable "service_cidr" {
|
||||||
|
type = string
|
||||||
|
description = <<EOD
|
||||||
|
CIDR IPv4 range to assign Kubernetes services.
|
||||||
|
The 1st IP will be reserved for kube_apiserver, the 10th IP will be reserved for coredns.
|
||||||
|
EOD
|
||||||
|
default = "10.3.0.0/16"
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
variable "cluster_domain_suffix" {
|
||||||
|
type = string
|
||||||
|
description = "Queries for domains with the suffix will be answered by coredns. Default is cluster.local (e.g. foo.default.svc.cluster.local) "
|
||||||
|
default = "cluster.local"
|
||||||
|
}
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,16 @@
|
||||||
|
# Terraform version and plugin versions
|
||||||
|
|
||||||
|
terraform {
|
||||||
|
required_version = ">= 0.13.0, < 2.0.0"
|
||||||
|
required_providers {
|
||||||
|
null = ">= 2.1"
|
||||||
|
ct = {
|
||||||
|
source = "poseidon/ct"
|
||||||
|
version = "~> 0.9"
|
||||||
|
}
|
||||||
|
matchbox = {
|
||||||
|
source = "poseidon/matchbox"
|
||||||
|
version = "~> 0.5.0"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,32 @@
|
||||||
|
module "workers" {
|
||||||
|
count = length(var.workers)
|
||||||
|
source = "./worker"
|
||||||
|
|
||||||
|
cluster_name = var.cluster_name
|
||||||
|
|
||||||
|
# metal
|
||||||
|
matchbox_http_endpoint = var.matchbox_http_endpoint
|
||||||
|
os_channel = var.os_channel
|
||||||
|
os_version = var.os_version
|
||||||
|
|
||||||
|
# machine
|
||||||
|
name = var.workers[count.index].name
|
||||||
|
mac = var.workers[count.index].mac
|
||||||
|
domain = var.workers[count.index].domain
|
||||||
|
|
||||||
|
# configuration
|
||||||
|
kubeconfig = module.bootstrap.kubeconfig-kubelet
|
||||||
|
ssh_authorized_key = var.ssh_authorized_key
|
||||||
|
service_cidr = var.service_cidr
|
||||||
|
cluster_domain_suffix = var.cluster_domain_suffix
|
||||||
|
node_labels = lookup(var.worker_node_labels, var.workers[count.index].name, [])
|
||||||
|
node_taints = lookup(var.worker_node_taints, var.workers[count.index].name, [])
|
||||||
|
snippets = lookup(var.snippets, var.workers[count.index].name, [])
|
||||||
|
|
||||||
|
# optional
|
||||||
|
download_protocol = var.download_protocol
|
||||||
|
cached_install = var.cached_install
|
||||||
|
install_disk = var.install_disk
|
||||||
|
kernel_args = var.kernel_args
|
||||||
|
}
|
||||||
|
|
Loading…
Reference in New Issue