Fix terraform fmt

This commit is contained in:
Dalton Hubble 2018-08-21 21:59:55 -07:00
parent 99e3721181
commit bdf1e6986e
11 changed files with 48 additions and 47 deletions

View File

@ -116,20 +116,20 @@ resource "matchbox_profile" "flatcar-install" {
// Kubernetes Controller profiles
resource "matchbox_profile" "controllers" {
count = "${length(var.controller_names)}"
name = "${format("%s-controller-%s", var.cluster_name, element(var.controller_names, count.index))}"
count = "${length(var.controller_names)}"
name = "${format("%s-controller-%s", var.cluster_name, element(var.controller_names, count.index))}"
raw_ignition = "${element(data.ct_config.controller-ignitions.*.rendered, count.index)}"
}
data "ct_config" "controller-ignitions" {
count = "${length(var.controller_names)}"
content = "${element(data.template_file.controller-configs.*.rendered, count.index)}"
count = "${length(var.controller_names)}"
content = "${element(data.template_file.controller-configs.*.rendered, count.index)}"
pretty_print = false
# Must use direct lookup. Cannot use lookup(map, key) since it only works for flat maps
snippets = ["${local.clc_map[element(var.controller_names, count.index)]}"]
}
data "template_file" "controller-configs" {
count = "${length(var.controller_names)}"
@ -147,16 +147,16 @@ data "template_file" "controller-configs" {
// Kubernetes Worker profiles
resource "matchbox_profile" "workers" {
count = "${length(var.worker_names)}"
name = "${format("%s-worker-%s", var.cluster_name, element(var.worker_names, count.index))}"
count = "${length(var.worker_names)}"
name = "${format("%s-worker-%s", var.cluster_name, element(var.worker_names, count.index))}"
raw_ignition = "${element(data.ct_config.worker-ignitions.*.rendered, count.index)}"
}
data "ct_config" "worker-ignitions" {
count = "${length(var.worker_names)}"
content = "${element(data.template_file.worker-configs.*.rendered, count.index)}"
count = "${length(var.worker_names)}"
content = "${element(data.template_file.worker-configs.*.rendered, count.index)}"
pretty_print = false
# Must use direct lookup. Cannot use lookup(map, key) since it only works for flat maps
snippets = ["${local.clc_map[element(var.worker_names, count.index)]}"]
}
@ -179,12 +179,13 @@ locals {
# Default Container Linux config snippets map every node names to list("\n") so
# all lookups succeed
clc_defaults = "${zipmap(concat(var.controller_names, var.worker_names), chunklist(data.template_file.clc-default-snippets.*.rendered, 1))}"
# Union of the default and user specific snippets, later overrides prior.
clc_map = "${merge(local.clc_defaults, var.clc_snippets)}"
}
// Horrible hack to generate a Terraform list of node count length
data "template_file" "clc-default-snippets" {
count = "${length(var.controller_names) + length(var.worker_names)}"
count = "${length(var.controller_names) + length(var.worker_names)}"
template = "\n"
}

View File

@ -24,39 +24,39 @@ variable "os_version" {
# Terraform's crude "type system" does not properly support lists of maps so we do this.
variable "controller_names" {
type = "list"
type = "list"
description = "Ordered list of controller names (e.g. [node1])"
}
variable "controller_macs" {
type = "list"
type = "list"
description = "Ordered list of controller identifying MAC addresses (e.g. [52:54:00:a1:9c:ae])"
}
variable "controller_domains" {
type = "list"
type = "list"
description = "Ordered list of controller FQDNs (e.g. [node1.example.com])"
}
variable "worker_names" {
type = "list"
type = "list"
description = "Ordered list of worker names (e.g. [node2, node3])"
}
variable "worker_macs" {
type = "list"
type = "list"
description = "Ordered list of worker identifying MAC addresses (e.g. [52:54:00:b2:2f:86, 52:54:00:c3:61:77])"
}
variable "worker_domains" {
type = "list"
type = "list"
description = "Ordered list of worker FQDNs (e.g. [node2.example.com, node3.example.com])"
}
variable "clc_snippets" {
type = "map"
type = "map"
description = "Map from machine names to lists of Container Linux Config snippets"
default = {}
default = {}
}
# configuration

View File

@ -25,32 +25,32 @@ EOD
# Terraform's crude "type system" does not properly support lists of maps so we do this.
variable "controller_names" {
type = "list"
type = "list"
description = "Ordered list of controller names (e.g. [node1])"
}
variable "controller_macs" {
type = "list"
type = "list"
description = "Ordered list of controller identifying MAC addresses (e.g. [52:54:00:a1:9c:ae])"
}
variable "controller_domains" {
type = "list"
type = "list"
description = "Ordered list of controller FQDNs (e.g. [node1.example.com])"
}
variable "worker_names" {
type = "list"
type = "list"
description = "Ordered list of worker names (e.g. [node2, node3])"
}
variable "worker_macs" {
type = "list"
type = "list"
description = "Ordered list of worker identifying MAC addresses (e.g. [52:54:00:b2:2f:86, 52:54:00:c3:61:77])"
}
variable "worker_domains" {
type = "list"
type = "list"
description = "Ordered list of worker FQDNs (e.g. [node2.example.com, node3.example.com])"
}

View File

@ -25,21 +25,22 @@ resource "google_compute_global_forwarding_rule" "ingress-https" {
# HTTP proxy load balancer for ingress controllers
resource "google_compute_target_http_proxy" "ingress-http" {
name = "${var.cluster_name}-ingress-http"
name = "${var.cluster_name}-ingress-http"
description = "Distribute HTTP load across ${var.cluster_name} workers"
url_map = "${google_compute_url_map.ingress-http.self_link}"
url_map = "${google_compute_url_map.ingress-http.self_link}"
}
# TCP proxy load balancer for ingress controllers
resource "google_compute_target_tcp_proxy" "ingress-https" {
name = "${var.cluster_name}-ingress-https"
description = "Distribute HTTPS load across ${var.cluster_name} workers"
name = "${var.cluster_name}-ingress-https"
description = "Distribute HTTPS load across ${var.cluster_name} workers"
backend_service = "${google_compute_backend_service.ingress-https.self_link}"
}
# HTTP URL Map (required)
resource "google_compute_url_map" "ingress-http" {
name = "${var.cluster_name}-ingress-http"
# Do not add host/path rules for applications here. Use Ingress resources.
default_service = "${google_compute_backend_service.ingress-http.self_link}"
}
@ -90,7 +91,7 @@ resource "google_compute_health_check" "ingress" {
unhealthy_threshold = 4
http_health_check {
port = 10254
port = 10254
request_path = "/healthz"
}
}

View File

@ -57,7 +57,6 @@ resource "google_compute_firewall" "allow-apiserver" {
target_tags = ["${var.cluster_name}-controller"]
}
# Calico BGP and IPIP
# https://docs.projectcalico.org/v2.5/reference/public-cloud/gce
resource "google_compute_firewall" "internal-calico" {
@ -154,7 +153,7 @@ resource "google_compute_firewall" "allow-ingress" {
}
resource "google_compute_firewall" "google-health-checks" {
name = "${var.cluster_name}-google-health-checks"
name = "${var.cluster_name}-google-health-checks"
network = "${google_compute_network.network.name}"
allow {
@ -164,5 +163,5 @@ resource "google_compute_firewall" "google-health-checks" {
# https://cloud.google.com/compute/docs/load-balancing/tcp-ssl/tcp-proxy#health-checking
source_ranges = ["130.211.0.0/22", "35.191.0.0/16"]
target_tags = ["${var.cluster_name}-worker"]
target_tags = ["${var.cluster_name}-worker"]
}

View File

@ -2,7 +2,7 @@
output "ingress_static_ipv4" {
description = "Global IPv4 address for proxy load balancing to the nearest Ingress controller"
value = "${google_compute_global_address.ingress-ipv4.address}"
value = "${google_compute_global_address.ingress-ipv4.address}"
}
# Outputs for worker pools
@ -25,5 +25,5 @@ output "network_self_link" {
output "worker_instance_group" {
description = "Full URL of the worker managed instance group"
value = "${module.workers.instance_group}"
value = "${module.workers.instance_group}"
}

View File

@ -1,4 +1,4 @@
output "instance_group" {
description = "Full URL of the worker managed instance group"
value = "${google_compute_region_instance_group_manager.workers.instance_group}"
value = "${google_compute_region_instance_group_manager.workers.instance_group}"
}

View File

@ -25,21 +25,22 @@ resource "google_compute_global_forwarding_rule" "ingress-https" {
# HTTP proxy load balancer for ingress controllers
resource "google_compute_target_http_proxy" "ingress-http" {
name = "${var.cluster_name}-ingress-http"
name = "${var.cluster_name}-ingress-http"
description = "Distribute HTTP load across ${var.cluster_name} workers"
url_map = "${google_compute_url_map.ingress-http.self_link}"
url_map = "${google_compute_url_map.ingress-http.self_link}"
}
# TCP proxy load balancer for ingress controllers
resource "google_compute_target_tcp_proxy" "ingress-https" {
name = "${var.cluster_name}-ingress-https"
description = "Distribute HTTPS load across ${var.cluster_name} workers"
name = "${var.cluster_name}-ingress-https"
description = "Distribute HTTPS load across ${var.cluster_name} workers"
backend_service = "${google_compute_backend_service.ingress-https.self_link}"
}
# HTTP URL Map (required)
resource "google_compute_url_map" "ingress-http" {
name = "${var.cluster_name}-ingress-http"
# Do not add host/path rules for applications here. Use Ingress resources.
default_service = "${google_compute_backend_service.ingress-http.self_link}"
}
@ -90,7 +91,7 @@ resource "google_compute_health_check" "ingress" {
unhealthy_threshold = 4
http_health_check {
port = 10254
port = 10254
request_path = "/healthz"
}
}

View File

@ -57,7 +57,6 @@ resource "google_compute_firewall" "allow-apiserver" {
target_tags = ["${var.cluster_name}-controller"]
}
# Calico BGP and IPIP
# https://docs.projectcalico.org/v2.5/reference/public-cloud/gce
resource "google_compute_firewall" "internal-calico" {
@ -154,7 +153,7 @@ resource "google_compute_firewall" "allow-ingress" {
}
resource "google_compute_firewall" "google-health-checks" {
name = "${var.cluster_name}-google-health-checks"
name = "${var.cluster_name}-google-health-checks"
network = "${google_compute_network.network.name}"
allow {
@ -164,5 +163,5 @@ resource "google_compute_firewall" "google-health-checks" {
# https://cloud.google.com/compute/docs/load-balancing/tcp-ssl/tcp-proxy#health-checking
source_ranges = ["130.211.0.0/22", "35.191.0.0/16"]
target_tags = ["${var.cluster_name}-worker"]
target_tags = ["${var.cluster_name}-worker"]
}

View File

@ -2,7 +2,7 @@
output "ingress_static_ipv4" {
description = "Global IPv4 address for proxy load balancing to the nearest Ingress controller"
value = "${google_compute_global_address.ingress-ipv4.address}"
value = "${google_compute_global_address.ingress-ipv4.address}"
}
# Outputs for worker pools
@ -25,5 +25,5 @@ output "network_self_link" {
output "worker_instance_group" {
description = "Full URL of the worker managed instance group"
value = "${module.workers.instance_group}"
value = "${module.workers.instance_group}"
}

View File

@ -1,4 +1,4 @@
output "instance_group" {
description = "Full URL of the worker managed instance group"
value = "${google_compute_region_instance_group_manager.workers.instance_group}"
value = "${google_compute_region_instance_group_manager.workers.instance_group}"
}