Rename CLC files and favor Terraform list index syntax
* Rename Container Linux Config (CLC) files to *.yaml to align with Fedora CoreOS Config (FCC) files and for syntax highlighting * Replace common uses of Terraform `element` (which wraps around) with `list[index]` syntax to surface index errors
This commit is contained in:
parent
11565ffa8a
commit
50db3d0231
|
@ -4,8 +4,8 @@ locals {
|
|||
# flatcar-stable -> Flatcar Linux AMI
|
||||
ami_id = local.flavor == "flatcar" ? data.aws_ami.flatcar.image_id : data.aws_ami.coreos.image_id
|
||||
|
||||
flavor = element(split("-", var.os_image), 0)
|
||||
channel = element(split("-", var.os_image), 1)
|
||||
flavor = split("-", var.os_image)[0]
|
||||
channel = split("-", var.os_image)[1]
|
||||
}
|
||||
|
||||
data "aws_ami" "coreos" {
|
||||
|
|
|
@ -10,7 +10,7 @@ resource "aws_route53_record" "etcds" {
|
|||
ttl = 300
|
||||
|
||||
# private IPv4 address for etcd
|
||||
records = [element(aws_instance.controllers.*.private_ip, count.index)]
|
||||
records = [aws_instance.controllers.*.private_ip[count.index]]
|
||||
}
|
||||
|
||||
# Controller instances
|
||||
|
@ -24,7 +24,7 @@ resource "aws_instance" "controllers" {
|
|||
instance_type = var.controller_type
|
||||
|
||||
ami = local.ami_id
|
||||
user_data = element(data.ct_config.controller-ignitions.*.rendered, count.index)
|
||||
user_data = data.ct_config.controller-ignitions.*.rendered[count.index]
|
||||
|
||||
# storage
|
||||
root_block_device {
|
||||
|
@ -36,7 +36,7 @@ resource "aws_instance" "controllers" {
|
|||
|
||||
# network
|
||||
associate_public_ip_address = true
|
||||
subnet_id = element(aws_subnet.public.*.id, count.index)
|
||||
subnet_id = aws_subnet.public.*.id[count.index]
|
||||
vpc_security_group_ids = [aws_security_group.controller.id]
|
||||
|
||||
lifecycle {
|
||||
|
@ -50,10 +50,7 @@ resource "aws_instance" "controllers" {
|
|||
# Controller Ignition configs
|
||||
data "ct_config" "controller-ignitions" {
|
||||
count = var.controller_count
|
||||
content = element(
|
||||
data.template_file.controller-configs.*.rendered,
|
||||
count.index,
|
||||
)
|
||||
content = data.template_file.controller-configs.*.rendered[count.index]
|
||||
pretty_print = false
|
||||
snippets = var.controller_clc_snippets
|
||||
}
|
||||
|
@ -62,7 +59,7 @@ data "ct_config" "controller-ignitions" {
|
|||
data "template_file" "controller-configs" {
|
||||
count = var.controller_count
|
||||
|
||||
template = file("${path.module}/cl/controller.yaml.tmpl")
|
||||
template = file("${path.module}/cl/controller.yaml")
|
||||
|
||||
vars = {
|
||||
# Cannot use cyclic dependencies on controllers or their DNS records
|
||||
|
|
|
@ -62,6 +62,6 @@ resource "aws_route_table_association" "public" {
|
|||
count = length(data.aws_availability_zones.all.names)
|
||||
|
||||
route_table_id = aws_route_table.default.id
|
||||
subnet_id = element(aws_subnet.public.*.id, count.index)
|
||||
subnet_id = aws_subnet.public.*.id[count.index]
|
||||
}
|
||||
|
||||
|
|
|
@ -88,7 +88,7 @@ resource "aws_lb_target_group_attachment" "controllers" {
|
|||
count = var.controller_count
|
||||
|
||||
target_group_arn = aws_lb_target_group.controllers.arn
|
||||
target_id = element(aws_instance.controllers.*.id, count.index)
|
||||
target_id = aws_instance.controllers.*.id[count.index]
|
||||
port = 6443
|
||||
}
|
||||
|
||||
|
|
|
@ -4,8 +4,8 @@ locals {
|
|||
# flatcar-stable -> Flatcar Linux AMI
|
||||
ami_id = local.flavor == "flatcar" ? data.aws_ami.flatcar.image_id : data.aws_ami.coreos.image_id
|
||||
|
||||
flavor = element(split("-", var.os_image), 0)
|
||||
channel = element(split("-", var.os_image), 1)
|
||||
flavor = split("-", var.os_image)[0]
|
||||
channel = split("-", var.os_image)[1]
|
||||
}
|
||||
|
||||
data "aws_ami" "coreos" {
|
||||
|
|
|
@ -78,7 +78,7 @@ data "ct_config" "worker-ignition" {
|
|||
|
||||
# Worker Container Linux config
|
||||
data "template_file" "worker-config" {
|
||||
template = file("${path.module}/cl/worker.yaml.tmpl")
|
||||
template = file("${path.module}/cl/worker.yaml")
|
||||
|
||||
vars = {
|
||||
kubeconfig = indent(10, var.kubeconfig)
|
||||
|
|
|
@ -11,16 +11,13 @@ resource "azurerm_dns_a_record" "etcds" {
|
|||
ttl = 300
|
||||
|
||||
# private IPv4 address for etcd
|
||||
records = [element(
|
||||
azurerm_network_interface.controllers.*.private_ip_address,
|
||||
count.index,
|
||||
)]
|
||||
records = [azurerm_network_interface.controllers.*.private_ip_address[count.index]]
|
||||
}
|
||||
|
||||
locals {
|
||||
# Channel for a Container Linux derivative
|
||||
# coreos-stable -> Container Linux Stable
|
||||
channel = element(split("-", var.os_image), 1)
|
||||
channel = split("-", var.os_image)[1]
|
||||
}
|
||||
|
||||
# Controller availability set to spread controllers
|
||||
|
@ -63,12 +60,12 @@ resource "azurerm_virtual_machine" "controllers" {
|
|||
}
|
||||
|
||||
# network
|
||||
network_interface_ids = [element(azurerm_network_interface.controllers.*.id, count.index)]
|
||||
network_interface_ids = [azurerm_network_interface.controllers.*.id[count.index]]
|
||||
|
||||
os_profile {
|
||||
computer_name = "${var.cluster_name}-controller-${count.index}"
|
||||
admin_username = "core"
|
||||
custom_data = element(data.ct_config.controller-ignitions.*.rendered, count.index)
|
||||
custom_data = data.ct_config.controller-ignitions.*.rendered[count.index]
|
||||
}
|
||||
|
||||
# Azure mandates setting an ssh_key, even though Ignition custom_data handles it too
|
||||
|
@ -108,7 +105,7 @@ resource "azurerm_network_interface" "controllers" {
|
|||
private_ip_address_allocation = "dynamic"
|
||||
|
||||
# public IPv4
|
||||
public_ip_address_id = element(azurerm_public_ip.controllers.*.id, count.index)
|
||||
public_ip_address_id = azurerm_public_ip.controllers.*.id[count.index]
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -135,10 +132,7 @@ resource "azurerm_public_ip" "controllers" {
|
|||
# Controller Ignition configs
|
||||
data "ct_config" "controller-ignitions" {
|
||||
count = var.controller_count
|
||||
content = element(
|
||||
data.template_file.controller-configs.*.rendered,
|
||||
count.index,
|
||||
)
|
||||
content = data.template_file.controller-configs.*.rendered[count.index]
|
||||
pretty_print = false
|
||||
snippets = var.controller_clc_snippets
|
||||
}
|
||||
|
@ -147,7 +141,7 @@ data "ct_config" "controller-ignitions" {
|
|||
data "template_file" "controller-configs" {
|
||||
count = var.controller_count
|
||||
|
||||
template = file("${path.module}/cl/controller.yaml.tmpl")
|
||||
template = file("${path.module}/cl/controller.yaml")
|
||||
|
||||
vars = {
|
||||
# Cannot use cyclic dependencies on controllers or their DNS records
|
||||
|
|
|
@ -45,7 +45,7 @@ resource "null_resource" "bootstrap" {
|
|||
|
||||
connection {
|
||||
type = "ssh"
|
||||
host = element(azurerm_public_ip.controllers.*.ip_address, 0)
|
||||
host = azurerm_public_ip.controllers.*.ip_address[0]
|
||||
user = "core"
|
||||
timeout = "15m"
|
||||
}
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
locals {
|
||||
# Channel for a Container Linux derivative
|
||||
# coreos-stable -> Container Linux Stable
|
||||
channel = element(split("-", var.os_image), 1)
|
||||
channel = split("-", var.os_image)[1]
|
||||
}
|
||||
|
||||
# Workers scale set
|
||||
|
@ -104,7 +104,7 @@ data "ct_config" "worker-ignition" {
|
|||
|
||||
# Worker Container Linux configs
|
||||
data "template_file" "worker-config" {
|
||||
template = file("${path.module}/cl/worker.yaml.tmpl")
|
||||
template = file("${path.module}/cl/worker.yaml")
|
||||
|
||||
vars = {
|
||||
kubeconfig = indent(10, var.kubeconfig)
|
||||
|
|
|
@ -31,7 +31,7 @@ resource "matchbox_profile" "container-linux-install" {
|
|||
data "template_file" "container-linux-install-configs" {
|
||||
count = length(var.controllers) + length(var.workers)
|
||||
|
||||
template = file("${path.module}/cl/install.yaml.tmpl")
|
||||
template = file("${path.module}/cl/install.yaml")
|
||||
|
||||
vars = {
|
||||
os_flavor = local.flavor
|
||||
|
@ -72,7 +72,7 @@ resource "matchbox_profile" "cached-container-linux-install" {
|
|||
data "template_file" "cached-container-linux-install-configs" {
|
||||
count = length(var.controllers) + length(var.workers)
|
||||
|
||||
template = file("${path.module}/cl/install.yaml.tmpl")
|
||||
template = file("${path.module}/cl/install.yaml")
|
||||
|
||||
vars = {
|
||||
os_flavor = local.flavor
|
||||
|
@ -150,7 +150,7 @@ data "ct_config" "controller-ignitions" {
|
|||
data "template_file" "controller-configs" {
|
||||
count = length(var.controllers)
|
||||
|
||||
template = file("${path.module}/cl/controller.yaml.tmpl")
|
||||
template = file("${path.module}/cl/controller.yaml")
|
||||
|
||||
vars = {
|
||||
domain_name = var.controllers.*.domain[count.index]
|
||||
|
@ -180,7 +180,7 @@ data "ct_config" "worker-ignitions" {
|
|||
data "template_file" "worker-configs" {
|
||||
count = length(var.workers)
|
||||
|
||||
template = file("${path.module}/cl/worker.yaml.tmpl")
|
||||
template = file("${path.module}/cl/worker.yaml")
|
||||
|
||||
vars = {
|
||||
domain_name = var.workers.*.domain[count.index]
|
||||
|
|
|
@ -11,7 +11,7 @@ resource "digitalocean_record" "controllers" {
|
|||
ttl = 300
|
||||
|
||||
# IPv4 addresses of controllers
|
||||
value = element(digitalocean_droplet.controllers.*.ipv4_address, count.index)
|
||||
value = digitalocean_droplet.controllers.*.ipv4_address[count.index]
|
||||
}
|
||||
|
||||
# Discrete DNS records for each controller's private IPv4 for etcd usage
|
||||
|
@ -27,7 +27,7 @@ resource "digitalocean_record" "etcds" {
|
|||
ttl = 300
|
||||
|
||||
# private IPv4 address for etcd
|
||||
value = element(digitalocean_droplet.controllers.*.ipv4_address_private, count.index)
|
||||
value = digitalocean_droplet.controllers.*.ipv4_address_private[count.index]
|
||||
}
|
||||
|
||||
# Controller droplet instances
|
||||
|
@ -44,7 +44,7 @@ resource "digitalocean_droplet" "controllers" {
|
|||
ipv6 = true
|
||||
private_networking = true
|
||||
|
||||
user_data = element(data.ct_config.controller-ignitions.*.rendered, count.index)
|
||||
user_data = data.ct_config.controller-ignitions.*.rendered[count.index]
|
||||
ssh_keys = var.ssh_fingerprints
|
||||
|
||||
tags = [
|
||||
|
@ -64,7 +64,7 @@ resource "digitalocean_tag" "controllers" {
|
|||
# Controller Ignition configs
|
||||
data "ct_config" "controller-ignitions" {
|
||||
count = var.controller_count
|
||||
content = element(data.template_file.controller-configs.*.rendered, count.index)
|
||||
content = data.template_file.controller-configs.*.rendered[count.index]
|
||||
pretty_print = false
|
||||
snippets = var.controller_clc_snippets
|
||||
}
|
||||
|
@ -73,7 +73,7 @@ data "ct_config" "controller-ignitions" {
|
|||
data "template_file" "controller-configs" {
|
||||
count = var.controller_count
|
||||
|
||||
template = file("${path.module}/cl/controller.yaml.tmpl")
|
||||
template = file("${path.module}/cl/controller.yaml")
|
||||
|
||||
vars = {
|
||||
# Cannot use cyclic dependencies on controllers or their DNS records
|
||||
|
|
|
@ -8,7 +8,7 @@ resource "digitalocean_record" "workers-record-a" {
|
|||
name = "${var.cluster_name}-workers"
|
||||
type = "A"
|
||||
ttl = 300
|
||||
value = element(digitalocean_droplet.workers.*.ipv4_address, count.index)
|
||||
value = digitalocean_droplet.workers.*.ipv4_address[count.index]
|
||||
}
|
||||
|
||||
resource "digitalocean_record" "workers-record-aaaa" {
|
||||
|
@ -20,7 +20,7 @@ resource "digitalocean_record" "workers-record-aaaa" {
|
|||
name = "${var.cluster_name}-workers"
|
||||
type = "AAAA"
|
||||
ttl = 300
|
||||
value = element(digitalocean_droplet.workers.*.ipv6_address, count.index)
|
||||
value = digitalocean_droplet.workers.*.ipv6_address[count.index]
|
||||
}
|
||||
|
||||
# Worker droplet instances
|
||||
|
@ -63,7 +63,7 @@ data "ct_config" "worker-ignition" {
|
|||
|
||||
# Worker Container Linux config
|
||||
data "template_file" "worker-config" {
|
||||
template = file("${path.module}/cl/worker.yaml.tmpl")
|
||||
template = file("${path.module}/cl/worker.yaml")
|
||||
|
||||
vars = {
|
||||
cluster_dns_service_ip = cidrhost(var.service_cidr, 10)
|
||||
|
|
|
@ -11,7 +11,7 @@ resource "google_dns_record_set" "etcds" {
|
|||
ttl = 300
|
||||
|
||||
# private IPv4 address for etcd
|
||||
rrdatas = [element(google_compute_instance.controllers.*.network_interface.0.network_ip, count.index)]
|
||||
rrdatas = [google_compute_instance.controllers.*.network_interface.0.network_ip[count.index]]
|
||||
}
|
||||
|
||||
# Zones in the region
|
||||
|
@ -30,11 +30,12 @@ resource "google_compute_instance" "controllers" {
|
|||
count = var.controller_count
|
||||
|
||||
name = "${var.cluster_name}-controller-${count.index}"
|
||||
# use a zone in the region and wrap around (e.g. controllers > zones)
|
||||
zone = element(local.zones, count.index)
|
||||
machine_type = var.controller_type
|
||||
|
||||
metadata = {
|
||||
user-data = element(data.ct_config.controller-ignitions.*.rendered, count.index)
|
||||
user-data = data.ct_config.controller-ignitions.*.rendered[count.index]
|
||||
}
|
||||
|
||||
boot_disk {
|
||||
|
@ -65,10 +66,7 @@ resource "google_compute_instance" "controllers" {
|
|||
# Controller Ignition configs
|
||||
data "ct_config" "controller-ignitions" {
|
||||
count = var.controller_count
|
||||
content = element(
|
||||
data.template_file.controller-configs.*.rendered,
|
||||
count.index,
|
||||
)
|
||||
content = data.template_file.controller-configs.*.rendered[count.index]
|
||||
pretty_print = false
|
||||
snippets = var.controller_clc_snippets
|
||||
}
|
||||
|
@ -77,7 +75,7 @@ data "ct_config" "controller-ignitions" {
|
|||
data "template_file" "controller-configs" {
|
||||
count = var.controller_count
|
||||
|
||||
template = file("${path.module}/cl/controller.yaml.tmpl")
|
||||
template = file("${path.module}/cl/controller.yaml")
|
||||
|
||||
vars = {
|
||||
# Cannot use cyclic dependencies on controllers or their DNS records
|
||||
|
|
|
@ -78,7 +78,7 @@ data "ct_config" "worker-ignition" {
|
|||
|
||||
# Worker Container Linux config
|
||||
data "template_file" "worker-config" {
|
||||
template = file("${path.module}/cl/worker.yaml.tmpl")
|
||||
template = file("${path.module}/cl/worker.yaml")
|
||||
|
||||
vars = {
|
||||
kubeconfig = indent(10, var.kubeconfig)
|
||||
|
|
Loading…
Reference in New Issue