mirror of
https://github.com/puppetmaster/typhoon.git
synced 2024-12-26 07:29:32 +01:00
ad2e4311d1
* Allow multi-controller clusters on Google Cloud * GCP regional network load balancers have a long open bug in which requests originating from a backend instance are routed to the instance itself, regardless of whether the health check passes or not. As a result, only the 0th controller node registers. We've recommended just using single master GCP clusters for a while * https://issuetracker.google.com/issues/67366622 * Workaround issue by switching to a GCP TCP Proxy load balancer. TCP proxy lb routes traffic to a backend service (global) of instance group backends. In our case, spread controllers across 3 zones (all regions have 3+ zones) and organize them in 3 zonal unmanaged instance groups that serve as backends. Allows multi-controller cluster creation * GCP network load balancers only allowed legacy HTTP health checks so kubelet 10255 was checked as an approximation of controller health. Replace with TCP apiserver health checks to detect unhealth or unresponsive apiservers. * Drawbacks: GCP provision time increases, tailed logs now timeout (similar tradeoff in AWS), controllers only span 3 zones instead of the exact number in the region * Workaround in Typhoon has been known and posted for 5 months, but there still appears to be no better alternative. Its probably time to support multi-master and accept the downsides
96 lines
2.8 KiB
HCL
96 lines
2.8 KiB
HCL
# TCP Proxy load balancer DNS record
|
|
resource "google_dns_record_set" "apiserver" {
|
|
# DNS Zone name where record should be created
|
|
managed_zone = "${var.dns_zone_name}"
|
|
|
|
# DNS record
|
|
name = "${format("%s.%s.", var.cluster_name, var.dns_zone)}"
|
|
type = "A"
|
|
ttl = 300
|
|
|
|
# IPv4 address of apiserver TCP Proxy load balancer
|
|
rrdatas = ["${google_compute_global_address.apiserver-ipv4.address}"]
|
|
}
|
|
|
|
# Static IPv4 address for the TCP Proxy Load Balancer
|
|
resource "google_compute_global_address" "apiserver-ipv4" {
|
|
name = "${var.cluster_name}-apiserver-ip"
|
|
ip_version = "IPV4"
|
|
}
|
|
|
|
# Forward IPv4 TCP traffic to the TCP proxy load balancer
|
|
resource "google_compute_global_forwarding_rule" "apiserver" {
|
|
name = "${var.cluster_name}-apiserver"
|
|
ip_address = "${google_compute_global_address.apiserver-ipv4.address}"
|
|
ip_protocol = "TCP"
|
|
port_range = "443"
|
|
target = "${google_compute_target_tcp_proxy.apiserver.self_link}"
|
|
}
|
|
|
|
# Global TCP Proxy Load Balancer for apiservers
|
|
resource "google_compute_target_tcp_proxy" "apiserver" {
|
|
name = "${var.cluster_name}-apiserver"
|
|
description = "Distribute TCP load across ${var.cluster_name} controllers"
|
|
backend_service = "${google_compute_backend_service.apiserver.self_link}"
|
|
}
|
|
|
|
# Global backend service backed by unmanaged instance groups
|
|
resource "google_compute_backend_service" "apiserver" {
|
|
name = "${var.cluster_name}-apiserver"
|
|
description = "${var.cluster_name} apiserver service"
|
|
|
|
protocol = "TCP"
|
|
port_name = "apiserver"
|
|
session_affinity = "NONE"
|
|
timeout_sec = "60"
|
|
|
|
# controller(s) spread across zonal instance groups
|
|
backend {
|
|
group = "${google_compute_instance_group.controllers.0.self_link}"
|
|
}
|
|
backend {
|
|
group = "${google_compute_instance_group.controllers.1.self_link}"
|
|
}
|
|
backend {
|
|
group = "${google_compute_instance_group.controllers.2.self_link}"
|
|
}
|
|
|
|
health_checks = ["${google_compute_health_check.apiserver.self_link}"]
|
|
}
|
|
|
|
# Instance group of heterogeneous (unmanged) controller instances
|
|
resource "google_compute_instance_group" "controllers" {
|
|
count = "${length(local.zones)}"
|
|
|
|
name = "${format("%s-controllers-%s", var.cluster_name, element(local.zones, count.index))}"
|
|
zone = "${element(local.zones, count.index)}"
|
|
|
|
named_port {
|
|
name = "apiserver"
|
|
port = "443"
|
|
}
|
|
|
|
# add instances in the zone into the instance group
|
|
instances = [
|
|
"${matchkeys(google_compute_instance.controllers.*.self_link,
|
|
google_compute_instance.controllers.*.zone,
|
|
list(element(local.zones, count.index)))}"
|
|
]
|
|
}
|
|
|
|
# TCP health check for apiserver
|
|
resource "google_compute_health_check" "apiserver" {
|
|
name = "${var.cluster_name}-apiserver-tcp-health"
|
|
description = "TCP health check for kube-apiserver"
|
|
|
|
timeout_sec = 5
|
|
check_interval_sec = 5
|
|
|
|
healthy_threshold = 1
|
|
unhealthy_threshold = 3
|
|
|
|
tcp_health_check {
|
|
port = "443"
|
|
}
|
|
}
|