Fix terraform fmt
This commit is contained in:
parent
99e3721181
commit
bdf1e6986e
|
@ -116,20 +116,20 @@ resource "matchbox_profile" "flatcar-install" {
|
||||||
|
|
||||||
// Kubernetes Controller profiles
|
// Kubernetes Controller profiles
|
||||||
resource "matchbox_profile" "controllers" {
|
resource "matchbox_profile" "controllers" {
|
||||||
count = "${length(var.controller_names)}"
|
count = "${length(var.controller_names)}"
|
||||||
name = "${format("%s-controller-%s", var.cluster_name, element(var.controller_names, count.index))}"
|
name = "${format("%s-controller-%s", var.cluster_name, element(var.controller_names, count.index))}"
|
||||||
raw_ignition = "${element(data.ct_config.controller-ignitions.*.rendered, count.index)}"
|
raw_ignition = "${element(data.ct_config.controller-ignitions.*.rendered, count.index)}"
|
||||||
}
|
}
|
||||||
|
|
||||||
data "ct_config" "controller-ignitions" {
|
data "ct_config" "controller-ignitions" {
|
||||||
count = "${length(var.controller_names)}"
|
count = "${length(var.controller_names)}"
|
||||||
content = "${element(data.template_file.controller-configs.*.rendered, count.index)}"
|
content = "${element(data.template_file.controller-configs.*.rendered, count.index)}"
|
||||||
pretty_print = false
|
pretty_print = false
|
||||||
|
|
||||||
# Must use direct lookup. Cannot use lookup(map, key) since it only works for flat maps
|
# Must use direct lookup. Cannot use lookup(map, key) since it only works for flat maps
|
||||||
snippets = ["${local.clc_map[element(var.controller_names, count.index)]}"]
|
snippets = ["${local.clc_map[element(var.controller_names, count.index)]}"]
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
data "template_file" "controller-configs" {
|
data "template_file" "controller-configs" {
|
||||||
count = "${length(var.controller_names)}"
|
count = "${length(var.controller_names)}"
|
||||||
|
|
||||||
|
@ -147,16 +147,16 @@ data "template_file" "controller-configs" {
|
||||||
|
|
||||||
// Kubernetes Worker profiles
|
// Kubernetes Worker profiles
|
||||||
resource "matchbox_profile" "workers" {
|
resource "matchbox_profile" "workers" {
|
||||||
count = "${length(var.worker_names)}"
|
count = "${length(var.worker_names)}"
|
||||||
name = "${format("%s-worker-%s", var.cluster_name, element(var.worker_names, count.index))}"
|
name = "${format("%s-worker-%s", var.cluster_name, element(var.worker_names, count.index))}"
|
||||||
raw_ignition = "${element(data.ct_config.worker-ignitions.*.rendered, count.index)}"
|
raw_ignition = "${element(data.ct_config.worker-ignitions.*.rendered, count.index)}"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
data "ct_config" "worker-ignitions" {
|
data "ct_config" "worker-ignitions" {
|
||||||
count = "${length(var.worker_names)}"
|
count = "${length(var.worker_names)}"
|
||||||
content = "${element(data.template_file.worker-configs.*.rendered, count.index)}"
|
content = "${element(data.template_file.worker-configs.*.rendered, count.index)}"
|
||||||
pretty_print = false
|
pretty_print = false
|
||||||
|
|
||||||
# Must use direct lookup. Cannot use lookup(map, key) since it only works for flat maps
|
# Must use direct lookup. Cannot use lookup(map, key) since it only works for flat maps
|
||||||
snippets = ["${local.clc_map[element(var.worker_names, count.index)]}"]
|
snippets = ["${local.clc_map[element(var.worker_names, count.index)]}"]
|
||||||
}
|
}
|
||||||
|
@ -179,12 +179,13 @@ locals {
|
||||||
# Default Container Linux config snippets map every node names to list("\n") so
|
# Default Container Linux config snippets map every node names to list("\n") so
|
||||||
# all lookups succeed
|
# all lookups succeed
|
||||||
clc_defaults = "${zipmap(concat(var.controller_names, var.worker_names), chunklist(data.template_file.clc-default-snippets.*.rendered, 1))}"
|
clc_defaults = "${zipmap(concat(var.controller_names, var.worker_names), chunklist(data.template_file.clc-default-snippets.*.rendered, 1))}"
|
||||||
|
|
||||||
# Union of the default and user specific snippets, later overrides prior.
|
# Union of the default and user specific snippets, later overrides prior.
|
||||||
clc_map = "${merge(local.clc_defaults, var.clc_snippets)}"
|
clc_map = "${merge(local.clc_defaults, var.clc_snippets)}"
|
||||||
}
|
}
|
||||||
|
|
||||||
// Horrible hack to generate a Terraform list of node count length
|
// Horrible hack to generate a Terraform list of node count length
|
||||||
data "template_file" "clc-default-snippets" {
|
data "template_file" "clc-default-snippets" {
|
||||||
count = "${length(var.controller_names) + length(var.worker_names)}"
|
count = "${length(var.controller_names) + length(var.worker_names)}"
|
||||||
template = "\n"
|
template = "\n"
|
||||||
}
|
}
|
||||||
|
|
|
@ -24,39 +24,39 @@ variable "os_version" {
|
||||||
# Terraform's crude "type system" does not properly support lists of maps so we do this.
|
# Terraform's crude "type system" does not properly support lists of maps so we do this.
|
||||||
|
|
||||||
variable "controller_names" {
|
variable "controller_names" {
|
||||||
type = "list"
|
type = "list"
|
||||||
description = "Ordered list of controller names (e.g. [node1])"
|
description = "Ordered list of controller names (e.g. [node1])"
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "controller_macs" {
|
variable "controller_macs" {
|
||||||
type = "list"
|
type = "list"
|
||||||
description = "Ordered list of controller identifying MAC addresses (e.g. [52:54:00:a1:9c:ae])"
|
description = "Ordered list of controller identifying MAC addresses (e.g. [52:54:00:a1:9c:ae])"
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "controller_domains" {
|
variable "controller_domains" {
|
||||||
type = "list"
|
type = "list"
|
||||||
description = "Ordered list of controller FQDNs (e.g. [node1.example.com])"
|
description = "Ordered list of controller FQDNs (e.g. [node1.example.com])"
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "worker_names" {
|
variable "worker_names" {
|
||||||
type = "list"
|
type = "list"
|
||||||
description = "Ordered list of worker names (e.g. [node2, node3])"
|
description = "Ordered list of worker names (e.g. [node2, node3])"
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "worker_macs" {
|
variable "worker_macs" {
|
||||||
type = "list"
|
type = "list"
|
||||||
description = "Ordered list of worker identifying MAC addresses (e.g. [52:54:00:b2:2f:86, 52:54:00:c3:61:77])"
|
description = "Ordered list of worker identifying MAC addresses (e.g. [52:54:00:b2:2f:86, 52:54:00:c3:61:77])"
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "worker_domains" {
|
variable "worker_domains" {
|
||||||
type = "list"
|
type = "list"
|
||||||
description = "Ordered list of worker FQDNs (e.g. [node2.example.com, node3.example.com])"
|
description = "Ordered list of worker FQDNs (e.g. [node2.example.com, node3.example.com])"
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "clc_snippets" {
|
variable "clc_snippets" {
|
||||||
type = "map"
|
type = "map"
|
||||||
description = "Map from machine names to lists of Container Linux Config snippets"
|
description = "Map from machine names to lists of Container Linux Config snippets"
|
||||||
default = {}
|
default = {}
|
||||||
}
|
}
|
||||||
|
|
||||||
# configuration
|
# configuration
|
||||||
|
|
|
@ -25,32 +25,32 @@ EOD
|
||||||
# Terraform's crude "type system" does not properly support lists of maps so we do this.
|
# Terraform's crude "type system" does not properly support lists of maps so we do this.
|
||||||
|
|
||||||
variable "controller_names" {
|
variable "controller_names" {
|
||||||
type = "list"
|
type = "list"
|
||||||
description = "Ordered list of controller names (e.g. [node1])"
|
description = "Ordered list of controller names (e.g. [node1])"
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "controller_macs" {
|
variable "controller_macs" {
|
||||||
type = "list"
|
type = "list"
|
||||||
description = "Ordered list of controller identifying MAC addresses (e.g. [52:54:00:a1:9c:ae])"
|
description = "Ordered list of controller identifying MAC addresses (e.g. [52:54:00:a1:9c:ae])"
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "controller_domains" {
|
variable "controller_domains" {
|
||||||
type = "list"
|
type = "list"
|
||||||
description = "Ordered list of controller FQDNs (e.g. [node1.example.com])"
|
description = "Ordered list of controller FQDNs (e.g. [node1.example.com])"
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "worker_names" {
|
variable "worker_names" {
|
||||||
type = "list"
|
type = "list"
|
||||||
description = "Ordered list of worker names (e.g. [node2, node3])"
|
description = "Ordered list of worker names (e.g. [node2, node3])"
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "worker_macs" {
|
variable "worker_macs" {
|
||||||
type = "list"
|
type = "list"
|
||||||
description = "Ordered list of worker identifying MAC addresses (e.g. [52:54:00:b2:2f:86, 52:54:00:c3:61:77])"
|
description = "Ordered list of worker identifying MAC addresses (e.g. [52:54:00:b2:2f:86, 52:54:00:c3:61:77])"
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "worker_domains" {
|
variable "worker_domains" {
|
||||||
type = "list"
|
type = "list"
|
||||||
description = "Ordered list of worker FQDNs (e.g. [node2.example.com, node3.example.com])"
|
description = "Ordered list of worker FQDNs (e.g. [node2.example.com, node3.example.com])"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -25,21 +25,22 @@ resource "google_compute_global_forwarding_rule" "ingress-https" {
|
||||||
|
|
||||||
# HTTP proxy load balancer for ingress controllers
|
# HTTP proxy load balancer for ingress controllers
|
||||||
resource "google_compute_target_http_proxy" "ingress-http" {
|
resource "google_compute_target_http_proxy" "ingress-http" {
|
||||||
name = "${var.cluster_name}-ingress-http"
|
name = "${var.cluster_name}-ingress-http"
|
||||||
description = "Distribute HTTP load across ${var.cluster_name} workers"
|
description = "Distribute HTTP load across ${var.cluster_name} workers"
|
||||||
url_map = "${google_compute_url_map.ingress-http.self_link}"
|
url_map = "${google_compute_url_map.ingress-http.self_link}"
|
||||||
}
|
}
|
||||||
|
|
||||||
# TCP proxy load balancer for ingress controllers
|
# TCP proxy load balancer for ingress controllers
|
||||||
resource "google_compute_target_tcp_proxy" "ingress-https" {
|
resource "google_compute_target_tcp_proxy" "ingress-https" {
|
||||||
name = "${var.cluster_name}-ingress-https"
|
name = "${var.cluster_name}-ingress-https"
|
||||||
description = "Distribute HTTPS load across ${var.cluster_name} workers"
|
description = "Distribute HTTPS load across ${var.cluster_name} workers"
|
||||||
backend_service = "${google_compute_backend_service.ingress-https.self_link}"
|
backend_service = "${google_compute_backend_service.ingress-https.self_link}"
|
||||||
}
|
}
|
||||||
|
|
||||||
# HTTP URL Map (required)
|
# HTTP URL Map (required)
|
||||||
resource "google_compute_url_map" "ingress-http" {
|
resource "google_compute_url_map" "ingress-http" {
|
||||||
name = "${var.cluster_name}-ingress-http"
|
name = "${var.cluster_name}-ingress-http"
|
||||||
|
|
||||||
# Do not add host/path rules for applications here. Use Ingress resources.
|
# Do not add host/path rules for applications here. Use Ingress resources.
|
||||||
default_service = "${google_compute_backend_service.ingress-http.self_link}"
|
default_service = "${google_compute_backend_service.ingress-http.self_link}"
|
||||||
}
|
}
|
||||||
|
@ -90,7 +91,7 @@ resource "google_compute_health_check" "ingress" {
|
||||||
unhealthy_threshold = 4
|
unhealthy_threshold = 4
|
||||||
|
|
||||||
http_health_check {
|
http_health_check {
|
||||||
port = 10254
|
port = 10254
|
||||||
request_path = "/healthz"
|
request_path = "/healthz"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -57,7 +57,6 @@ resource "google_compute_firewall" "allow-apiserver" {
|
||||||
target_tags = ["${var.cluster_name}-controller"]
|
target_tags = ["${var.cluster_name}-controller"]
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
# Calico BGP and IPIP
|
# Calico BGP and IPIP
|
||||||
# https://docs.projectcalico.org/v2.5/reference/public-cloud/gce
|
# https://docs.projectcalico.org/v2.5/reference/public-cloud/gce
|
||||||
resource "google_compute_firewall" "internal-calico" {
|
resource "google_compute_firewall" "internal-calico" {
|
||||||
|
@ -154,7 +153,7 @@ resource "google_compute_firewall" "allow-ingress" {
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "google_compute_firewall" "google-health-checks" {
|
resource "google_compute_firewall" "google-health-checks" {
|
||||||
name = "${var.cluster_name}-google-health-checks"
|
name = "${var.cluster_name}-google-health-checks"
|
||||||
network = "${google_compute_network.network.name}"
|
network = "${google_compute_network.network.name}"
|
||||||
|
|
||||||
allow {
|
allow {
|
||||||
|
@ -164,5 +163,5 @@ resource "google_compute_firewall" "google-health-checks" {
|
||||||
|
|
||||||
# https://cloud.google.com/compute/docs/load-balancing/tcp-ssl/tcp-proxy#health-checking
|
# https://cloud.google.com/compute/docs/load-balancing/tcp-ssl/tcp-proxy#health-checking
|
||||||
source_ranges = ["130.211.0.0/22", "35.191.0.0/16"]
|
source_ranges = ["130.211.0.0/22", "35.191.0.0/16"]
|
||||||
target_tags = ["${var.cluster_name}-worker"]
|
target_tags = ["${var.cluster_name}-worker"]
|
||||||
}
|
}
|
||||||
|
|
|
@ -2,7 +2,7 @@
|
||||||
|
|
||||||
output "ingress_static_ipv4" {
|
output "ingress_static_ipv4" {
|
||||||
description = "Global IPv4 address for proxy load balancing to the nearest Ingress controller"
|
description = "Global IPv4 address for proxy load balancing to the nearest Ingress controller"
|
||||||
value = "${google_compute_global_address.ingress-ipv4.address}"
|
value = "${google_compute_global_address.ingress-ipv4.address}"
|
||||||
}
|
}
|
||||||
|
|
||||||
# Outputs for worker pools
|
# Outputs for worker pools
|
||||||
|
@ -25,5 +25,5 @@ output "network_self_link" {
|
||||||
|
|
||||||
output "worker_instance_group" {
|
output "worker_instance_group" {
|
||||||
description = "Full URL of the worker managed instance group"
|
description = "Full URL of the worker managed instance group"
|
||||||
value = "${module.workers.instance_group}"
|
value = "${module.workers.instance_group}"
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
output "instance_group" {
|
output "instance_group" {
|
||||||
description = "Full URL of the worker managed instance group"
|
description = "Full URL of the worker managed instance group"
|
||||||
value = "${google_compute_region_instance_group_manager.workers.instance_group}"
|
value = "${google_compute_region_instance_group_manager.workers.instance_group}"
|
||||||
}
|
}
|
||||||
|
|
|
@ -25,21 +25,22 @@ resource "google_compute_global_forwarding_rule" "ingress-https" {
|
||||||
|
|
||||||
# HTTP proxy load balancer for ingress controllers
|
# HTTP proxy load balancer for ingress controllers
|
||||||
resource "google_compute_target_http_proxy" "ingress-http" {
|
resource "google_compute_target_http_proxy" "ingress-http" {
|
||||||
name = "${var.cluster_name}-ingress-http"
|
name = "${var.cluster_name}-ingress-http"
|
||||||
description = "Distribute HTTP load across ${var.cluster_name} workers"
|
description = "Distribute HTTP load across ${var.cluster_name} workers"
|
||||||
url_map = "${google_compute_url_map.ingress-http.self_link}"
|
url_map = "${google_compute_url_map.ingress-http.self_link}"
|
||||||
}
|
}
|
||||||
|
|
||||||
# TCP proxy load balancer for ingress controllers
|
# TCP proxy load balancer for ingress controllers
|
||||||
resource "google_compute_target_tcp_proxy" "ingress-https" {
|
resource "google_compute_target_tcp_proxy" "ingress-https" {
|
||||||
name = "${var.cluster_name}-ingress-https"
|
name = "${var.cluster_name}-ingress-https"
|
||||||
description = "Distribute HTTPS load across ${var.cluster_name} workers"
|
description = "Distribute HTTPS load across ${var.cluster_name} workers"
|
||||||
backend_service = "${google_compute_backend_service.ingress-https.self_link}"
|
backend_service = "${google_compute_backend_service.ingress-https.self_link}"
|
||||||
}
|
}
|
||||||
|
|
||||||
# HTTP URL Map (required)
|
# HTTP URL Map (required)
|
||||||
resource "google_compute_url_map" "ingress-http" {
|
resource "google_compute_url_map" "ingress-http" {
|
||||||
name = "${var.cluster_name}-ingress-http"
|
name = "${var.cluster_name}-ingress-http"
|
||||||
|
|
||||||
# Do not add host/path rules for applications here. Use Ingress resources.
|
# Do not add host/path rules for applications here. Use Ingress resources.
|
||||||
default_service = "${google_compute_backend_service.ingress-http.self_link}"
|
default_service = "${google_compute_backend_service.ingress-http.self_link}"
|
||||||
}
|
}
|
||||||
|
@ -90,7 +91,7 @@ resource "google_compute_health_check" "ingress" {
|
||||||
unhealthy_threshold = 4
|
unhealthy_threshold = 4
|
||||||
|
|
||||||
http_health_check {
|
http_health_check {
|
||||||
port = 10254
|
port = 10254
|
||||||
request_path = "/healthz"
|
request_path = "/healthz"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -57,7 +57,6 @@ resource "google_compute_firewall" "allow-apiserver" {
|
||||||
target_tags = ["${var.cluster_name}-controller"]
|
target_tags = ["${var.cluster_name}-controller"]
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
# Calico BGP and IPIP
|
# Calico BGP and IPIP
|
||||||
# https://docs.projectcalico.org/v2.5/reference/public-cloud/gce
|
# https://docs.projectcalico.org/v2.5/reference/public-cloud/gce
|
||||||
resource "google_compute_firewall" "internal-calico" {
|
resource "google_compute_firewall" "internal-calico" {
|
||||||
|
@ -154,7 +153,7 @@ resource "google_compute_firewall" "allow-ingress" {
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "google_compute_firewall" "google-health-checks" {
|
resource "google_compute_firewall" "google-health-checks" {
|
||||||
name = "${var.cluster_name}-google-health-checks"
|
name = "${var.cluster_name}-google-health-checks"
|
||||||
network = "${google_compute_network.network.name}"
|
network = "${google_compute_network.network.name}"
|
||||||
|
|
||||||
allow {
|
allow {
|
||||||
|
@ -164,5 +163,5 @@ resource "google_compute_firewall" "google-health-checks" {
|
||||||
|
|
||||||
# https://cloud.google.com/compute/docs/load-balancing/tcp-ssl/tcp-proxy#health-checking
|
# https://cloud.google.com/compute/docs/load-balancing/tcp-ssl/tcp-proxy#health-checking
|
||||||
source_ranges = ["130.211.0.0/22", "35.191.0.0/16"]
|
source_ranges = ["130.211.0.0/22", "35.191.0.0/16"]
|
||||||
target_tags = ["${var.cluster_name}-worker"]
|
target_tags = ["${var.cluster_name}-worker"]
|
||||||
}
|
}
|
||||||
|
|
|
@ -2,7 +2,7 @@
|
||||||
|
|
||||||
output "ingress_static_ipv4" {
|
output "ingress_static_ipv4" {
|
||||||
description = "Global IPv4 address for proxy load balancing to the nearest Ingress controller"
|
description = "Global IPv4 address for proxy load balancing to the nearest Ingress controller"
|
||||||
value = "${google_compute_global_address.ingress-ipv4.address}"
|
value = "${google_compute_global_address.ingress-ipv4.address}"
|
||||||
}
|
}
|
||||||
|
|
||||||
# Outputs for worker pools
|
# Outputs for worker pools
|
||||||
|
@ -25,5 +25,5 @@ output "network_self_link" {
|
||||||
|
|
||||||
output "worker_instance_group" {
|
output "worker_instance_group" {
|
||||||
description = "Full URL of the worker managed instance group"
|
description = "Full URL of the worker managed instance group"
|
||||||
value = "${module.workers.instance_group}"
|
value = "${module.workers.instance_group}"
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
output "instance_group" {
|
output "instance_group" {
|
||||||
description = "Full URL of the worker managed instance group"
|
description = "Full URL of the worker managed instance group"
|
||||||
value = "${google_compute_region_instance_group_manager.workers.instance_group}"
|
value = "${google_compute_region_instance_group_manager.workers.instance_group}"
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue