diff --git a/bare-metal/container-linux/kubernetes/profiles.tf b/bare-metal/container-linux/kubernetes/profiles.tf index 794f0adf..51e5977c 100644 --- a/bare-metal/container-linux/kubernetes/profiles.tf +++ b/bare-metal/container-linux/kubernetes/profiles.tf @@ -116,20 +116,20 @@ resource "matchbox_profile" "flatcar-install" { // Kubernetes Controller profiles resource "matchbox_profile" "controllers" { - count = "${length(var.controller_names)}" - name = "${format("%s-controller-%s", var.cluster_name, element(var.controller_names, count.index))}" + count = "${length(var.controller_names)}" + name = "${format("%s-controller-%s", var.cluster_name, element(var.controller_names, count.index))}" raw_ignition = "${element(data.ct_config.controller-ignitions.*.rendered, count.index)}" } data "ct_config" "controller-ignitions" { - count = "${length(var.controller_names)}" - content = "${element(data.template_file.controller-configs.*.rendered, count.index)}" + count = "${length(var.controller_names)}" + content = "${element(data.template_file.controller-configs.*.rendered, count.index)}" pretty_print = false + # Must use direct lookup. Cannot use lookup(map, key) since it only works for flat maps snippets = ["${local.clc_map[element(var.controller_names, count.index)]}"] } - data "template_file" "controller-configs" { count = "${length(var.controller_names)}" @@ -147,16 +147,16 @@ data "template_file" "controller-configs" { // Kubernetes Worker profiles resource "matchbox_profile" "workers" { - count = "${length(var.worker_names)}" - name = "${format("%s-worker-%s", var.cluster_name, element(var.worker_names, count.index))}" + count = "${length(var.worker_names)}" + name = "${format("%s-worker-%s", var.cluster_name, element(var.worker_names, count.index))}" raw_ignition = "${element(data.ct_config.worker-ignitions.*.rendered, count.index)}" } - data "ct_config" "worker-ignitions" { - count = "${length(var.worker_names)}" - content = "${element(data.template_file.worker-configs.*.rendered, count.index)}" + count = "${length(var.worker_names)}" + content = "${element(data.template_file.worker-configs.*.rendered, count.index)}" pretty_print = false + # Must use direct lookup. Cannot use lookup(map, key) since it only works for flat maps snippets = ["${local.clc_map[element(var.worker_names, count.index)]}"] } @@ -179,12 +179,13 @@ locals { # Default Container Linux config snippets map every node names to list("\n") so # all lookups succeed clc_defaults = "${zipmap(concat(var.controller_names, var.worker_names), chunklist(data.template_file.clc-default-snippets.*.rendered, 1))}" + # Union of the default and user specific snippets, later overrides prior. clc_map = "${merge(local.clc_defaults, var.clc_snippets)}" } // Horrible hack to generate a Terraform list of node count length data "template_file" "clc-default-snippets" { - count = "${length(var.controller_names) + length(var.worker_names)}" + count = "${length(var.controller_names) + length(var.worker_names)}" template = "\n" } diff --git a/bare-metal/container-linux/kubernetes/variables.tf b/bare-metal/container-linux/kubernetes/variables.tf index 1f422f4a..c44b9066 100644 --- a/bare-metal/container-linux/kubernetes/variables.tf +++ b/bare-metal/container-linux/kubernetes/variables.tf @@ -24,39 +24,39 @@ variable "os_version" { # Terraform's crude "type system" does not properly support lists of maps so we do this. variable "controller_names" { - type = "list" + type = "list" description = "Ordered list of controller names (e.g. [node1])" } variable "controller_macs" { - type = "list" + type = "list" description = "Ordered list of controller identifying MAC addresses (e.g. [52:54:00:a1:9c:ae])" } variable "controller_domains" { - type = "list" + type = "list" description = "Ordered list of controller FQDNs (e.g. [node1.example.com])" } variable "worker_names" { - type = "list" + type = "list" description = "Ordered list of worker names (e.g. [node2, node3])" } variable "worker_macs" { - type = "list" + type = "list" description = "Ordered list of worker identifying MAC addresses (e.g. [52:54:00:b2:2f:86, 52:54:00:c3:61:77])" } variable "worker_domains" { - type = "list" + type = "list" description = "Ordered list of worker FQDNs (e.g. [node2.example.com, node3.example.com])" } variable "clc_snippets" { - type = "map" + type = "map" description = "Map from machine names to lists of Container Linux Config snippets" - default = {} + default = {} } # configuration diff --git a/bare-metal/fedora-atomic/kubernetes/variables.tf b/bare-metal/fedora-atomic/kubernetes/variables.tf index e57f9b56..ca375285 100644 --- a/bare-metal/fedora-atomic/kubernetes/variables.tf +++ b/bare-metal/fedora-atomic/kubernetes/variables.tf @@ -25,32 +25,32 @@ EOD # Terraform's crude "type system" does not properly support lists of maps so we do this. variable "controller_names" { - type = "list" + type = "list" description = "Ordered list of controller names (e.g. [node1])" } variable "controller_macs" { - type = "list" + type = "list" description = "Ordered list of controller identifying MAC addresses (e.g. [52:54:00:a1:9c:ae])" } variable "controller_domains" { - type = "list" + type = "list" description = "Ordered list of controller FQDNs (e.g. [node1.example.com])" } variable "worker_names" { - type = "list" + type = "list" description = "Ordered list of worker names (e.g. [node2, node3])" } variable "worker_macs" { - type = "list" + type = "list" description = "Ordered list of worker identifying MAC addresses (e.g. [52:54:00:b2:2f:86, 52:54:00:c3:61:77])" } variable "worker_domains" { - type = "list" + type = "list" description = "Ordered list of worker FQDNs (e.g. [node2.example.com, node3.example.com])" } diff --git a/google-cloud/container-linux/kubernetes/ingress.tf b/google-cloud/container-linux/kubernetes/ingress.tf index 218f0155..ba107cf0 100644 --- a/google-cloud/container-linux/kubernetes/ingress.tf +++ b/google-cloud/container-linux/kubernetes/ingress.tf @@ -25,21 +25,22 @@ resource "google_compute_global_forwarding_rule" "ingress-https" { # HTTP proxy load balancer for ingress controllers resource "google_compute_target_http_proxy" "ingress-http" { - name = "${var.cluster_name}-ingress-http" + name = "${var.cluster_name}-ingress-http" description = "Distribute HTTP load across ${var.cluster_name} workers" - url_map = "${google_compute_url_map.ingress-http.self_link}" + url_map = "${google_compute_url_map.ingress-http.self_link}" } # TCP proxy load balancer for ingress controllers resource "google_compute_target_tcp_proxy" "ingress-https" { - name = "${var.cluster_name}-ingress-https" - description = "Distribute HTTPS load across ${var.cluster_name} workers" + name = "${var.cluster_name}-ingress-https" + description = "Distribute HTTPS load across ${var.cluster_name} workers" backend_service = "${google_compute_backend_service.ingress-https.self_link}" } # HTTP URL Map (required) resource "google_compute_url_map" "ingress-http" { name = "${var.cluster_name}-ingress-http" + # Do not add host/path rules for applications here. Use Ingress resources. default_service = "${google_compute_backend_service.ingress-http.self_link}" } @@ -90,7 +91,7 @@ resource "google_compute_health_check" "ingress" { unhealthy_threshold = 4 http_health_check { - port = 10254 + port = 10254 request_path = "/healthz" } } diff --git a/google-cloud/container-linux/kubernetes/network.tf b/google-cloud/container-linux/kubernetes/network.tf index 89cfd441..512f1056 100644 --- a/google-cloud/container-linux/kubernetes/network.tf +++ b/google-cloud/container-linux/kubernetes/network.tf @@ -57,7 +57,6 @@ resource "google_compute_firewall" "allow-apiserver" { target_tags = ["${var.cluster_name}-controller"] } - # Calico BGP and IPIP # https://docs.projectcalico.org/v2.5/reference/public-cloud/gce resource "google_compute_firewall" "internal-calico" { @@ -154,7 +153,7 @@ resource "google_compute_firewall" "allow-ingress" { } resource "google_compute_firewall" "google-health-checks" { - name = "${var.cluster_name}-google-health-checks" + name = "${var.cluster_name}-google-health-checks" network = "${google_compute_network.network.name}" allow { @@ -164,5 +163,5 @@ resource "google_compute_firewall" "google-health-checks" { # https://cloud.google.com/compute/docs/load-balancing/tcp-ssl/tcp-proxy#health-checking source_ranges = ["130.211.0.0/22", "35.191.0.0/16"] - target_tags = ["${var.cluster_name}-worker"] + target_tags = ["${var.cluster_name}-worker"] } diff --git a/google-cloud/container-linux/kubernetes/outputs.tf b/google-cloud/container-linux/kubernetes/outputs.tf index e38fa645..e96c6b0a 100644 --- a/google-cloud/container-linux/kubernetes/outputs.tf +++ b/google-cloud/container-linux/kubernetes/outputs.tf @@ -2,7 +2,7 @@ output "ingress_static_ipv4" { description = "Global IPv4 address for proxy load balancing to the nearest Ingress controller" - value = "${google_compute_global_address.ingress-ipv4.address}" + value = "${google_compute_global_address.ingress-ipv4.address}" } # Outputs for worker pools @@ -25,5 +25,5 @@ output "network_self_link" { output "worker_instance_group" { description = "Full URL of the worker managed instance group" - value = "${module.workers.instance_group}" + value = "${module.workers.instance_group}" } diff --git a/google-cloud/container-linux/kubernetes/workers/outputs.tf b/google-cloud/container-linux/kubernetes/workers/outputs.tf index 7264a096..4521d792 100644 --- a/google-cloud/container-linux/kubernetes/workers/outputs.tf +++ b/google-cloud/container-linux/kubernetes/workers/outputs.tf @@ -1,4 +1,4 @@ output "instance_group" { description = "Full URL of the worker managed instance group" - value = "${google_compute_region_instance_group_manager.workers.instance_group}" + value = "${google_compute_region_instance_group_manager.workers.instance_group}" } diff --git a/google-cloud/fedora-atomic/kubernetes/ingress.tf b/google-cloud/fedora-atomic/kubernetes/ingress.tf index 218f0155..ba107cf0 100644 --- a/google-cloud/fedora-atomic/kubernetes/ingress.tf +++ b/google-cloud/fedora-atomic/kubernetes/ingress.tf @@ -25,21 +25,22 @@ resource "google_compute_global_forwarding_rule" "ingress-https" { # HTTP proxy load balancer for ingress controllers resource "google_compute_target_http_proxy" "ingress-http" { - name = "${var.cluster_name}-ingress-http" + name = "${var.cluster_name}-ingress-http" description = "Distribute HTTP load across ${var.cluster_name} workers" - url_map = "${google_compute_url_map.ingress-http.self_link}" + url_map = "${google_compute_url_map.ingress-http.self_link}" } # TCP proxy load balancer for ingress controllers resource "google_compute_target_tcp_proxy" "ingress-https" { - name = "${var.cluster_name}-ingress-https" - description = "Distribute HTTPS load across ${var.cluster_name} workers" + name = "${var.cluster_name}-ingress-https" + description = "Distribute HTTPS load across ${var.cluster_name} workers" backend_service = "${google_compute_backend_service.ingress-https.self_link}" } # HTTP URL Map (required) resource "google_compute_url_map" "ingress-http" { name = "${var.cluster_name}-ingress-http" + # Do not add host/path rules for applications here. Use Ingress resources. default_service = "${google_compute_backend_service.ingress-http.self_link}" } @@ -90,7 +91,7 @@ resource "google_compute_health_check" "ingress" { unhealthy_threshold = 4 http_health_check { - port = 10254 + port = 10254 request_path = "/healthz" } } diff --git a/google-cloud/fedora-atomic/kubernetes/network.tf b/google-cloud/fedora-atomic/kubernetes/network.tf index 89cfd441..512f1056 100644 --- a/google-cloud/fedora-atomic/kubernetes/network.tf +++ b/google-cloud/fedora-atomic/kubernetes/network.tf @@ -57,7 +57,6 @@ resource "google_compute_firewall" "allow-apiserver" { target_tags = ["${var.cluster_name}-controller"] } - # Calico BGP and IPIP # https://docs.projectcalico.org/v2.5/reference/public-cloud/gce resource "google_compute_firewall" "internal-calico" { @@ -154,7 +153,7 @@ resource "google_compute_firewall" "allow-ingress" { } resource "google_compute_firewall" "google-health-checks" { - name = "${var.cluster_name}-google-health-checks" + name = "${var.cluster_name}-google-health-checks" network = "${google_compute_network.network.name}" allow { @@ -164,5 +163,5 @@ resource "google_compute_firewall" "google-health-checks" { # https://cloud.google.com/compute/docs/load-balancing/tcp-ssl/tcp-proxy#health-checking source_ranges = ["130.211.0.0/22", "35.191.0.0/16"] - target_tags = ["${var.cluster_name}-worker"] + target_tags = ["${var.cluster_name}-worker"] } diff --git a/google-cloud/fedora-atomic/kubernetes/outputs.tf b/google-cloud/fedora-atomic/kubernetes/outputs.tf index e38fa645..e96c6b0a 100644 --- a/google-cloud/fedora-atomic/kubernetes/outputs.tf +++ b/google-cloud/fedora-atomic/kubernetes/outputs.tf @@ -2,7 +2,7 @@ output "ingress_static_ipv4" { description = "Global IPv4 address for proxy load balancing to the nearest Ingress controller" - value = "${google_compute_global_address.ingress-ipv4.address}" + value = "${google_compute_global_address.ingress-ipv4.address}" } # Outputs for worker pools @@ -25,5 +25,5 @@ output "network_self_link" { output "worker_instance_group" { description = "Full URL of the worker managed instance group" - value = "${module.workers.instance_group}" + value = "${module.workers.instance_group}" } diff --git a/google-cloud/fedora-atomic/kubernetes/workers/outputs.tf b/google-cloud/fedora-atomic/kubernetes/workers/outputs.tf index 7264a096..4521d792 100644 --- a/google-cloud/fedora-atomic/kubernetes/workers/outputs.tf +++ b/google-cloud/fedora-atomic/kubernetes/workers/outputs.tf @@ -1,4 +1,4 @@ output "instance_group" { description = "Full URL of the worker managed instance group" - value = "${google_compute_region_instance_group_manager.workers.instance_group}" + value = "${google_compute_region_instance_group_manager.workers.instance_group}" }