Fix overly strict firewall for GCP "worker pools"

* Fix issue where worker firewall rules didn't apply to
additional workers attached to a GCP cluster using the new
"worker pools" feature (unreleased, #148). Solves host
connection timeouts and pods not being scheduled to attached
worker pools.
* Add `name` field to GCP internal worker module to represent
the unique name of of the worker pool
* Use `cluster_name` field of GCP internal worker module for
passing the name of the cluster to which workers should be
attached
This commit is contained in:
Dalton Hubble
2018-03-03 16:21:38 -08:00
parent da6aafe816
commit 45b556c08f
5 changed files with 43 additions and 31 deletions

View File

@ -3,11 +3,11 @@ module "controllers" {
cluster_name = "${var.cluster_name}"
# GCE
network = "${google_compute_network.network.name}"
count = "${var.controller_count}"
region = "${var.region}"
network = "${google_compute_network.network.name}"
dns_zone = "${var.dns_zone}"
dns_zone_name = "${var.dns_zone_name}"
count = "${var.controller_count}"
machine_type = "${var.machine_type}"
os_image = "${var.os_image}"
@ -21,11 +21,12 @@ module "controllers" {
module "workers" {
source = "workers"
name = "${var.cluster_name}"
cluster_name = "${var.cluster_name}"
# GCE
network = "${google_compute_network.network.name}"
region = "${var.region}"
network = "${google_compute_network.network.name}"
count = "${var.worker_count}"
machine_type = "${var.machine_type}"
os_image = "${var.os_image}"

View File

@ -1,18 +1,18 @@
# Static IPv4 address for the Network Load Balancer
resource "google_compute_address" "ingress-ip" {
name = "${var.cluster_name}-ingress-ip"
name = "${var.name}-ingress-ip"
}
# Network Load Balancer (i.e. forwarding rules)
resource "google_compute_forwarding_rule" "worker-http-lb" {
name = "${var.cluster_name}-worker-http-rule"
name = "${var.name}-worker-http-rule"
ip_address = "${google_compute_address.ingress-ip.address}"
port_range = "80"
target = "${google_compute_target_pool.workers.self_link}"
}
resource "google_compute_forwarding_rule" "worker-https-lb" {
name = "${var.cluster_name}-worker-https-rule"
name = "${var.name}-worker-https-rule"
ip_address = "${google_compute_address.ingress-ip.address}"
port_range = "443"
target = "${google_compute_target_pool.workers.self_link}"
@ -20,7 +20,7 @@ resource "google_compute_forwarding_rule" "worker-https-lb" {
# Network Load Balancer target pool of instances.
resource "google_compute_target_pool" "workers" {
name = "${var.cluster_name}-worker-pool"
name = "${var.name}-worker-pool"
health_checks = [
"${google_compute_http_health_check.ingress.name}",
@ -31,7 +31,7 @@ resource "google_compute_target_pool" "workers" {
# Ingress HTTP Health Check
resource "google_compute_http_health_check" "ingress" {
name = "${var.cluster_name}-ingress-health"
name = "${var.name}-ingress-health"
description = "Health check Ingress controller health host port"
timeout_sec = 5

View File

@ -1,11 +1,16 @@
variable "cluster_name" {
variable "name" {
type = "string"
description = "Unique cluster name"
description = "Unique name"
}
variable "ssh_authorized_key" {
variable "cluster_name" {
type = "string"
description = "SSH public key for logging in as user 'core'"
description = "Cluster name"
}
variable "region" {
type = "string"
description = "Google Cloud region (e.g. us-central1, see `gcloud compute regions list`)."
}
variable "network" {
@ -21,11 +26,6 @@ variable "count" {
description = "Number of worker compute instances the instance group should manage"
}
variable "region" {
type = "string"
description = "Google Cloud region (e.g. us-central1, see `gcloud compute regions list`)."
}
variable "machine_type" {
type = "string"
default = "n1-standard-1"
@ -52,6 +52,16 @@ variable "preemptible" {
# configuration
variable "kubeconfig" {
type = "string"
description = "Generated Kubelet kubeconfig"
}
variable "ssh_authorized_key" {
type = "string"
description = "SSH public key for logging in as user 'core'"
}
variable "service_cidr" {
description = <<EOD
CIDR IP range to assign Kubernetes services.
@ -67,8 +77,3 @@ variable "cluster_domain_suffix" {
type = "string"
default = "cluster.local"
}
variable "kubeconfig" {
type = "string"
description = "Generated Kubelet kubeconfig"
}

View File

@ -1,11 +1,11 @@
# Regional managed instance group maintains a homogeneous set of workers that
# span the zones in the region.
resource "google_compute_region_instance_group_manager" "workers" {
name = "${var.cluster_name}-worker-group"
description = "Compute instance group of ${var.cluster_name} workers"
name = "${var.name}-worker-group"
description = "Compute instance group of ${var.name} workers"
# instance name prefix for instances in the group
base_instance_name = "${var.cluster_name}-worker"
base_instance_name = "${var.name}-worker"
instance_template = "${google_compute_instance_template.worker.self_link}"
region = "${var.region}"
@ -35,7 +35,7 @@ data "ct_config" "worker_ign" {
}
resource "google_compute_instance_template" "worker" {
name_prefix = "${var.cluster_name}-worker-"
name_prefix = "${var.name}-worker-"
description = "Worker Instance template"
machine_type = "${var.machine_type}"
@ -64,7 +64,7 @@ resource "google_compute_instance_template" "worker" {
can_ip_forward = true
tags = ["worker", "${var.cluster_name}-worker"]
tags = ["worker", "${var.cluster_name}-worker", "${var.name}-worker"]
lifecycle {
# To update an Instance Template, Terraform should replace the existing resource