Improve internal firewall rules on Google Cloud
* Whitelist internal traffic between controllers and workers * Switch to tag-based firewall policies rather than source IP
This commit is contained in:
parent
b1b611b22c
commit
451fd86470
|
@ -14,6 +14,7 @@ Notable changes between versions.
|
|||
* Change etcd to run on-host, across controllers (etcd-member.service)
|
||||
* Change controller instances to automatically span zones in the region
|
||||
* Change worker managed instance group to automatically span zones in the region
|
||||
* Improve internal firewall rules and use tag-based firewall policies
|
||||
* Remove support for self-hosted etcd
|
||||
* Remove the `zone` required variable
|
||||
* Remove the `controller_preemptible` optional variable
|
||||
|
|
|
@ -48,6 +48,7 @@ resource "google_compute_instance" "controllers" {
|
|||
}
|
||||
|
||||
can_ip_forward = true
|
||||
tags = ["${var.cluster_name}-controller"]
|
||||
}
|
||||
|
||||
# Controller Container Linux Config
|
||||
|
|
|
@ -4,18 +4,6 @@ resource "google_compute_network" "network" {
|
|||
auto_create_subnetworks = true
|
||||
}
|
||||
|
||||
resource "google_compute_firewall" "allow-ingress" {
|
||||
name = "${var.cluster_name}-allow-ingress"
|
||||
network = "${google_compute_network.network.name}"
|
||||
|
||||
allow {
|
||||
protocol = "tcp"
|
||||
ports = [80, 443]
|
||||
}
|
||||
|
||||
source_ranges = ["0.0.0.0/0"]
|
||||
}
|
||||
|
||||
resource "google_compute_firewall" "allow-ssh" {
|
||||
name = "${var.cluster_name}-allow-ssh"
|
||||
network = "${google_compute_network.network.name}"
|
||||
|
@ -26,31 +14,55 @@ resource "google_compute_firewall" "allow-ssh" {
|
|||
}
|
||||
|
||||
source_ranges = ["0.0.0.0/0"]
|
||||
target_tags = ["${var.cluster_name}-controller", "${var.cluster_name}-worker"]
|
||||
}
|
||||
|
||||
resource "google_compute_firewall" "allow-internal" {
|
||||
name = "${var.cluster_name}-allow-internal"
|
||||
resource "google_compute_firewall" "allow-apiserver" {
|
||||
name = "${var.cluster_name}-allow-apiserver"
|
||||
network = "${google_compute_network.network.name}"
|
||||
|
||||
allow {
|
||||
protocol = "tcp"
|
||||
ports = ["1-65535"]
|
||||
ports = [443]
|
||||
}
|
||||
|
||||
source_ranges = ["0.0.0.0/0"]
|
||||
target_tags = ["${var.cluster_name}-controller"]
|
||||
}
|
||||
|
||||
|
||||
resource "google_compute_firewall" "allow-ingress" {
|
||||
name = "${var.cluster_name}-allow-ingress"
|
||||
network = "${google_compute_network.network.name}"
|
||||
|
||||
allow {
|
||||
protocol = "udp"
|
||||
ports = ["1-65535"]
|
||||
protocol = "tcp"
|
||||
ports = [80, 443]
|
||||
}
|
||||
|
||||
source_ranges = ["10.0.0.0/8"]
|
||||
source_ranges = ["0.0.0.0/0"]
|
||||
target_tags = ["${var.cluster_name}-worker"]
|
||||
}
|
||||
|
||||
resource "google_compute_firewall" "internal-etcd" {
|
||||
name = "${var.cluster_name}-internal-etcd"
|
||||
network = "${google_compute_network.network.name}"
|
||||
|
||||
allow {
|
||||
protocol = "tcp"
|
||||
ports = [2380]
|
||||
}
|
||||
|
||||
source_tags = ["${var.cluster_name}-controller"]
|
||||
target_tags = ["${var.cluster_name}-controller"]
|
||||
}
|
||||
|
||||
# Calico BGP and IPIP
|
||||
# https://docs.projectcalico.org/v2.5/reference/public-cloud/gce
|
||||
resource "google_compute_firewall" "allow-calico" {
|
||||
resource "google_compute_firewall" "internal-calico" {
|
||||
count = "${var.networking == "calico" ? 1 : 0}"
|
||||
|
||||
name = "${var.cluster_name}-allow-calico"
|
||||
name = "${var.cluster_name}-internal-calico"
|
||||
network = "${google_compute_network.network.name}"
|
||||
|
||||
allow {
|
||||
|
@ -62,5 +74,63 @@ resource "google_compute_firewall" "allow-calico" {
|
|||
protocol = "ipip"
|
||||
}
|
||||
|
||||
source_ranges = ["10.0.0.0/8"]
|
||||
source_tags = ["${var.cluster_name}-controller", "${var.cluster_name}-worker"]
|
||||
target_tags = ["${var.cluster_name}-controller", "${var.cluster_name}-worker"]
|
||||
}
|
||||
|
||||
# flannel
|
||||
resource "google_compute_firewall" "internal-flannel" {
|
||||
count = "${var.networking == "flannel" ? 1 : 0}"
|
||||
|
||||
name = "${var.cluster_name}-internal-flannel"
|
||||
network = "${google_compute_network.network.name}"
|
||||
|
||||
allow {
|
||||
protocol = "udp"
|
||||
ports = [8472]
|
||||
}
|
||||
|
||||
source_tags = ["${var.cluster_name}-controller", "${var.cluster_name}-worker"]
|
||||
target_tags = ["${var.cluster_name}-controller", "${var.cluster_name}-worker"]
|
||||
}
|
||||
|
||||
# Allow prometheus (workload) to scrape node-exporter daemonset
|
||||
resource "google_compute_firewall" "internal-node-exporter" {
|
||||
name = "${var.cluster_name}-internal-node-exporter"
|
||||
network = "${google_compute_network.network.name}"
|
||||
|
||||
allow {
|
||||
protocol = "tcp"
|
||||
ports = [9100]
|
||||
}
|
||||
|
||||
source_tags = ["${var.cluster_name}-worker"]
|
||||
target_tags = ["${var.cluster_name}-controller", "${var.cluster_name}-worker"]
|
||||
}
|
||||
|
||||
# kubelet API to allow kubectl exec and log
|
||||
resource "google_compute_firewall" "internal-kubelet" {
|
||||
name = "${var.cluster_name}-internal-kubelet"
|
||||
network = "${google_compute_network.network.name}"
|
||||
|
||||
allow {
|
||||
protocol = "tcp"
|
||||
ports = [10250]
|
||||
}
|
||||
|
||||
source_tags = ["${var.cluster_name}-controller"]
|
||||
target_tags = ["${var.cluster_name}-controller", "${var.cluster_name}-worker"]
|
||||
}
|
||||
|
||||
resource "google_compute_firewall" "internal-kubelet-readonly" {
|
||||
name = "${var.cluster_name}-internal-kubelet-readonly"
|
||||
network = "${google_compute_network.network.name}"
|
||||
|
||||
allow {
|
||||
protocol = "tcp"
|
||||
ports = [10255]
|
||||
}
|
||||
|
||||
source_tags = ["${var.cluster_name}-controller", "${var.cluster_name}-worker"]
|
||||
target_tags = ["${var.cluster_name}-controller", "${var.cluster_name}-worker"]
|
||||
}
|
||||
|
|
|
@ -67,7 +67,7 @@ resource "google_compute_instance_template" "worker" {
|
|||
|
||||
can_ip_forward = true
|
||||
|
||||
tags = ["worker"]
|
||||
tags = ["worker", "${var.cluster_name}-worker"]
|
||||
|
||||
lifecycle {
|
||||
# To update an Instance Template, Terraform should replace the existing resource
|
||||
|
|
Loading…
Reference in New Issue