Improve internal firewall rules on Google Cloud

* Whitelist internal traffic between controllers and workers
* Switch to tag-based firewall policies rather than source IP
This commit is contained in:
Dalton Hubble 2017-11-07 23:21:12 -08:00
parent b1b611b22c
commit 451fd86470
4 changed files with 94 additions and 22 deletions

View File

@ -14,6 +14,7 @@ Notable changes between versions.
* Change etcd to run on-host, across controllers (etcd-member.service) * Change etcd to run on-host, across controllers (etcd-member.service)
* Change controller instances to automatically span zones in the region * Change controller instances to automatically span zones in the region
* Change worker managed instance group to automatically span zones in the region * Change worker managed instance group to automatically span zones in the region
* Improve internal firewall rules and use tag-based firewall policies
* Remove support for self-hosted etcd * Remove support for self-hosted etcd
* Remove the `zone` required variable * Remove the `zone` required variable
* Remove the `controller_preemptible` optional variable * Remove the `controller_preemptible` optional variable

View File

@ -48,6 +48,7 @@ resource "google_compute_instance" "controllers" {
} }
can_ip_forward = true can_ip_forward = true
tags = ["${var.cluster_name}-controller"]
} }
# Controller Container Linux Config # Controller Container Linux Config

View File

@ -4,18 +4,6 @@ resource "google_compute_network" "network" {
auto_create_subnetworks = true auto_create_subnetworks = true
} }
resource "google_compute_firewall" "allow-ingress" {
name = "${var.cluster_name}-allow-ingress"
network = "${google_compute_network.network.name}"
allow {
protocol = "tcp"
ports = [80, 443]
}
source_ranges = ["0.0.0.0/0"]
}
resource "google_compute_firewall" "allow-ssh" { resource "google_compute_firewall" "allow-ssh" {
name = "${var.cluster_name}-allow-ssh" name = "${var.cluster_name}-allow-ssh"
network = "${google_compute_network.network.name}" network = "${google_compute_network.network.name}"
@ -26,31 +14,55 @@ resource "google_compute_firewall" "allow-ssh" {
} }
source_ranges = ["0.0.0.0/0"] source_ranges = ["0.0.0.0/0"]
target_tags = ["${var.cluster_name}-controller", "${var.cluster_name}-worker"]
} }
resource "google_compute_firewall" "allow-internal" { resource "google_compute_firewall" "allow-apiserver" {
name = "${var.cluster_name}-allow-internal" name = "${var.cluster_name}-allow-apiserver"
network = "${google_compute_network.network.name}" network = "${google_compute_network.network.name}"
allow { allow {
protocol = "tcp" protocol = "tcp"
ports = ["1-65535"] ports = [443]
} }
source_ranges = ["0.0.0.0/0"]
target_tags = ["${var.cluster_name}-controller"]
}
resource "google_compute_firewall" "allow-ingress" {
name = "${var.cluster_name}-allow-ingress"
network = "${google_compute_network.network.name}"
allow { allow {
protocol = "udp" protocol = "tcp"
ports = ["1-65535"] ports = [80, 443]
} }
source_ranges = ["10.0.0.0/8"] source_ranges = ["0.0.0.0/0"]
target_tags = ["${var.cluster_name}-worker"]
}
resource "google_compute_firewall" "internal-etcd" {
name = "${var.cluster_name}-internal-etcd"
network = "${google_compute_network.network.name}"
allow {
protocol = "tcp"
ports = [2380]
}
source_tags = ["${var.cluster_name}-controller"]
target_tags = ["${var.cluster_name}-controller"]
} }
# Calico BGP and IPIP # Calico BGP and IPIP
# https://docs.projectcalico.org/v2.5/reference/public-cloud/gce # https://docs.projectcalico.org/v2.5/reference/public-cloud/gce
resource "google_compute_firewall" "allow-calico" { resource "google_compute_firewall" "internal-calico" {
count = "${var.networking == "calico" ? 1 : 0}" count = "${var.networking == "calico" ? 1 : 0}"
name = "${var.cluster_name}-allow-calico" name = "${var.cluster_name}-internal-calico"
network = "${google_compute_network.network.name}" network = "${google_compute_network.network.name}"
allow { allow {
@ -62,5 +74,63 @@ resource "google_compute_firewall" "allow-calico" {
protocol = "ipip" protocol = "ipip"
} }
source_ranges = ["10.0.0.0/8"] source_tags = ["${var.cluster_name}-controller", "${var.cluster_name}-worker"]
target_tags = ["${var.cluster_name}-controller", "${var.cluster_name}-worker"]
}
# flannel
resource "google_compute_firewall" "internal-flannel" {
count = "${var.networking == "flannel" ? 1 : 0}"
name = "${var.cluster_name}-internal-flannel"
network = "${google_compute_network.network.name}"
allow {
protocol = "udp"
ports = [8472]
}
source_tags = ["${var.cluster_name}-controller", "${var.cluster_name}-worker"]
target_tags = ["${var.cluster_name}-controller", "${var.cluster_name}-worker"]
}
# Allow prometheus (workload) to scrape node-exporter daemonset
resource "google_compute_firewall" "internal-node-exporter" {
name = "${var.cluster_name}-internal-node-exporter"
network = "${google_compute_network.network.name}"
allow {
protocol = "tcp"
ports = [9100]
}
source_tags = ["${var.cluster_name}-worker"]
target_tags = ["${var.cluster_name}-controller", "${var.cluster_name}-worker"]
}
# kubelet API to allow kubectl exec and log
resource "google_compute_firewall" "internal-kubelet" {
name = "${var.cluster_name}-internal-kubelet"
network = "${google_compute_network.network.name}"
allow {
protocol = "tcp"
ports = [10250]
}
source_tags = ["${var.cluster_name}-controller"]
target_tags = ["${var.cluster_name}-controller", "${var.cluster_name}-worker"]
}
resource "google_compute_firewall" "internal-kubelet-readonly" {
name = "${var.cluster_name}-internal-kubelet-readonly"
network = "${google_compute_network.network.name}"
allow {
protocol = "tcp"
ports = [10255]
}
source_tags = ["${var.cluster_name}-controller", "${var.cluster_name}-worker"]
target_tags = ["${var.cluster_name}-controller", "${var.cluster_name}-worker"]
} }

View File

@ -67,7 +67,7 @@ resource "google_compute_instance_template" "worker" {
can_ip_forward = true can_ip_forward = true
tags = ["worker"] tags = ["worker", "${var.cluster_name}-worker"]
lifecycle { lifecycle {
# To update an Instance Template, Terraform should replace the existing resource # To update an Instance Template, Terraform should replace the existing resource