Add Google Cloud worker instances to a target pool

* Background: A managed instance group of workers is used in backend
services for global load balancing (HTTP/HTTPS Ingress) and output
for custom global load balancing use cases
* Add worker instances to a target pool load balancing TCP/UDP
applications (NodePort or proxied). Output as `worker_target_pool`
* Health check for workers with a healthy Ingress controller. Forward
rules (regional) to target pools don't support different external and
internal ports so choosing nodes with Ingress allows proxying as a
workaround
* A target pool is a logical grouping only. It doesn't add costs to
clusters or worker pools
This commit is contained in:
Dalton Hubble 2019-03-30 15:12:55 -07:00
parent ae3a8a5770
commit aaa8e0261a
6 changed files with 60 additions and 9 deletions

View File

@ -8,6 +8,9 @@ Notable changes between versions.
#### Google Cloud #### Google Cloud
* Add ability to load balance TCP/UDP applications ([#442](https://github.com/poseidon/typhoon/pull/442))
* Add worker instances to a target pool, output as `worker_target_pool`
* Health check for workers with Ingress controllers. Forward rules don't support differing internal/external ports, but some Ingress controllers support TCP/UDP proxy as a workaround
* Remove Haswell minimum CPU platform requirement ([#439](https://github.com/poseidon/typhoon/pull/439)) * Remove Haswell minimum CPU platform requirement ([#439](https://github.com/poseidon/typhoon/pull/439))
* Google Cloud API implements `min_cpu_platform` to mean "use exactly this CPU". Revert [#405](https://github.com/poseidon/typhoon/pull/405) added in v1.13.4. * Google Cloud API implements `min_cpu_platform` to mean "use exactly this CPU". Revert [#405](https://github.com/poseidon/typhoon/pull/405) added in v1.13.4.
* Fix error creating clusters in new regions without Haswell (e.g. europe-west2) ([#438](https://github.com/poseidon/typhoon/issues/438)) * Fix error creating clusters in new regions without Haswell (e.g. europe-west2) ([#438](https://github.com/poseidon/typhoon/issues/438))

View File

@ -138,8 +138,8 @@ resource "google_compute_firewall" "allow-ingress" {
target_tags = ["${var.cluster_name}-worker"] target_tags = ["${var.cluster_name}-worker"]
} }
resource "google_compute_firewall" "google-health-checks" { resource "google_compute_firewall" "google-ingress-health-checks" {
name = "${var.cluster_name}-google-health-checks" name = "${var.cluster_name}-ingress-health"
network = "${google_compute_network.network.name}" network = "${google_compute_network.network.name}"
allow { allow {
@ -147,7 +147,19 @@ resource "google_compute_firewall" "google-health-checks" {
ports = [10254] ports = [10254]
} }
# https://cloud.google.com/compute/docs/load-balancing/tcp-ssl/tcp-proxy#health-checking # https://cloud.google.com/load-balancing/docs/health-check-concepts#method
source_ranges = ["130.211.0.0/22", "35.191.0.0/16"] source_ranges = [
target_tags = ["${var.cluster_name}-worker"] # Global LB health checks
"35.191.0.0/16",
"130.211.0.0/22",
# Region LB health checks
"35.191.0.0/16",
"209.85.152.0/22",
"209.85.204.0/22",
]
target_tags = ["${var.cluster_name}-worker"]
} }

View File

@ -33,6 +33,11 @@ output "network_self_link" {
# Outputs for custom load balancing # Outputs for custom load balancing
output "worker_instance_group" { output "worker_instance_group" {
description = "Full URL of the worker managed instance group" description = "Worker managed instance group full URL"
value = "${module.workers.instance_group}" value = "${module.workers.instance_group}"
} }
output "worker_target_pool" {
description = "Worker target pool self link"
value = "${module.workers.target_pool}"
}

View File

@ -1,4 +1,13 @@
# Outputs for global load balancing
output "instance_group" { output "instance_group" {
description = "Full URL of the worker managed instance group" description = "Worker managed instance group full URL"
value = "${google_compute_region_instance_group_manager.workers.instance_group}" value = "${google_compute_region_instance_group_manager.workers.instance_group}"
} }
# Outputs for regional load balancing
output "target_pool" {
description = "Worker target pool self link"
value = "${google_compute_target_pool.workers.self_link}"
}

View File

@ -0,0 +1,21 @@
# Target pool for TCP/UDP load balancing
resource "google_compute_target_pool" "workers" {
name = "${var.name}-worker-pool"
session_affinity = "NONE"
health_checks = [
"${google_compute_http_health_check.workers.name}",
]
}
# HTTP Health Check (for TCP/UDP load balancing)
# Forward rules (regional) to target pools don't support different external
# and internal ports. Health check for nodes with Ingress controllers that
# may support proxying or otherwise satisfy the check.
resource "google_compute_http_health_check" "workers" {
name = "${var.name}-target-pool-health"
description = "Health check for the worker target pool"
port = 10254
request_path = "/healthz"
}

View File

@ -1,4 +1,4 @@
# Regional managed instance group of workers # Managed instance group of workers
resource "google_compute_region_instance_group_manager" "workers" { resource "google_compute_region_instance_group_manager" "workers" {
name = "${var.name}-worker-group" name = "${var.name}-worker-group"
description = "Compute instance group of ${var.name} workers" description = "Compute instance group of ${var.name} workers"
@ -8,7 +8,8 @@ resource "google_compute_region_instance_group_manager" "workers" {
instance_template = "${google_compute_instance_template.worker.self_link}" instance_template = "${google_compute_instance_template.worker.self_link}"
region = "${var.region}" region = "${var.region}"
target_size = "${var.count}" target_size = "${var.count}"
target_pools = ["${google_compute_target_pool.workers.self_link}"]
named_port { named_port {
name = "http" name = "http"