typhoon/google-cloud/container-linux/kubernetes/network.tf
Dalton Hubble aaa8e0261a Add Google Cloud worker instances to a target pool
* Background: A managed instance group of workers is used in backend
services for global load balancing (HTTP/HTTPS Ingress) and output
for custom global load balancing use cases
* Add worker instances to a target pool load balancing TCP/UDP
applications (NodePort or proxied). Output as `worker_target_pool`
* Health check for workers with a healthy Ingress controller. Forward
rules (regional) to target pools don't support different external and
internal ports so choosing nodes with Ingress allows proxying as a
workaround
* A target pool is a logical grouping only. It doesn't add costs to
clusters or worker pools
2019-04-01 21:03:48 -07:00

166 lines
4.2 KiB
HCL

resource "google_compute_network" "network" {
name = "${var.cluster_name}"
description = "Network for the ${var.cluster_name} cluster"
auto_create_subnetworks = true
}
resource "google_compute_firewall" "allow-ssh" {
name = "${var.cluster_name}-allow-ssh"
network = "${google_compute_network.network.name}"
allow {
protocol = "tcp"
ports = [22]
}
source_ranges = ["0.0.0.0/0"]
target_tags = ["${var.cluster_name}-controller", "${var.cluster_name}-worker"]
}
resource "google_compute_firewall" "internal-etcd" {
name = "${var.cluster_name}-internal-etcd"
network = "${google_compute_network.network.name}"
allow {
protocol = "tcp"
ports = [2379, 2380]
}
source_tags = ["${var.cluster_name}-controller"]
target_tags = ["${var.cluster_name}-controller"]
}
# Allow Prometheus to scrape etcd metrics
resource "google_compute_firewall" "internal-etcd-metrics" {
name = "${var.cluster_name}-internal-etcd-metrics"
network = "${google_compute_network.network.name}"
allow {
protocol = "tcp"
ports = [2381]
}
source_tags = ["${var.cluster_name}-worker"]
target_tags = ["${var.cluster_name}-controller"]
}
resource "google_compute_firewall" "allow-apiserver" {
name = "${var.cluster_name}-allow-apiserver"
network = "${google_compute_network.network.name}"
allow {
protocol = "tcp"
ports = [443]
}
source_ranges = ["0.0.0.0/0"]
target_tags = ["${var.cluster_name}-controller"]
}
# BGP and IPIP
# https://docs.projectcalico.org/latest/reference/public-cloud/gce
resource "google_compute_firewall" "internal-bgp" {
count = "${var.networking != "flannel" ? 1 : 0}"
name = "${var.cluster_name}-internal-bgp"
network = "${google_compute_network.network.name}"
allow {
protocol = "tcp"
ports = ["179"]
}
allow {
protocol = "ipip"
}
source_tags = ["${var.cluster_name}-controller", "${var.cluster_name}-worker"]
target_tags = ["${var.cluster_name}-controller", "${var.cluster_name}-worker"]
}
# flannel
resource "google_compute_firewall" "internal-flannel" {
count = "${var.networking == "flannel" ? 1 : 0}"
name = "${var.cluster_name}-internal-flannel"
network = "${google_compute_network.network.name}"
allow {
protocol = "udp"
ports = [8472]
}
source_tags = ["${var.cluster_name}-controller", "${var.cluster_name}-worker"]
target_tags = ["${var.cluster_name}-controller", "${var.cluster_name}-worker"]
}
# Allow Prometheus to scrape node-exporter daemonset
resource "google_compute_firewall" "internal-node-exporter" {
name = "${var.cluster_name}-internal-node-exporter"
network = "${google_compute_network.network.name}"
allow {
protocol = "tcp"
ports = [9100]
}
source_tags = ["${var.cluster_name}-worker"]
target_tags = ["${var.cluster_name}-controller", "${var.cluster_name}-worker"]
}
# Allow apiserver to access kubelets for exec, log, port-forward
resource "google_compute_firewall" "internal-kubelet" {
name = "${var.cluster_name}-internal-kubelet"
network = "${google_compute_network.network.name}"
allow {
protocol = "tcp"
ports = [10250]
}
# allow Prometheus to scrape kubelet metrics too
source_tags = ["${var.cluster_name}-controller", "${var.cluster_name}-worker"]
target_tags = ["${var.cluster_name}-controller", "${var.cluster_name}-worker"]
}
# Workers
resource "google_compute_firewall" "allow-ingress" {
name = "${var.cluster_name}-allow-ingress"
network = "${google_compute_network.network.name}"
allow {
protocol = "tcp"
ports = [80, 443]
}
source_ranges = ["0.0.0.0/0"]
target_tags = ["${var.cluster_name}-worker"]
}
resource "google_compute_firewall" "google-ingress-health-checks" {
name = "${var.cluster_name}-ingress-health"
network = "${google_compute_network.network.name}"
allow {
protocol = "tcp"
ports = [10254]
}
# https://cloud.google.com/load-balancing/docs/health-check-concepts#method
source_ranges = [
# Global LB health checks
"35.191.0.0/16",
"130.211.0.0/22",
# Region LB health checks
"35.191.0.0/16",
"209.85.152.0/22",
"209.85.204.0/22",
]
target_tags = ["${var.cluster_name}-worker"]
}