typhoon/google-cloud/container-linux/kubernetes/workers/workers.tf
Dalton Hubble 45b556c08f Fix overly strict firewall for GCP "worker pools"
* Fix issue where worker firewall rules didn't apply to
additional workers attached to a GCP cluster using the new
"worker pools" feature (unreleased, #148). Solves host
connection timeouts and pods not being scheduled to attached
worker pools.
* Add `name` field to GCP internal worker module to represent
the unique name of of the worker pool
* Use `cluster_name` field of GCP internal worker module for
passing the name of the cluster to which workers should be
attached
2018-03-03 17:40:17 -08:00

74 lines
2.0 KiB
HCL

# Regional managed instance group maintains a homogeneous set of workers that
# span the zones in the region.
resource "google_compute_region_instance_group_manager" "workers" {
name = "${var.name}-worker-group"
description = "Compute instance group of ${var.name} workers"
# instance name prefix for instances in the group
base_instance_name = "${var.name}-worker"
instance_template = "${google_compute_instance_template.worker.self_link}"
region = "${var.region}"
target_size = "${var.count}"
# target pool to which instances in the group should be added
target_pools = [
"${google_compute_target_pool.workers.self_link}",
]
}
# Worker Container Linux Config
data "template_file" "worker_config" {
template = "${file("${path.module}/cl/worker.yaml.tmpl")}"
vars = {
kubeconfig = "${indent(10, var.kubeconfig)}"
ssh_authorized_key = "${var.ssh_authorized_key}"
k8s_dns_service_ip = "${cidrhost(var.service_cidr, 10)}"
cluster_domain_suffix = "${var.cluster_domain_suffix}"
}
}
data "ct_config" "worker_ign" {
content = "${data.template_file.worker_config.rendered}"
pretty_print = false
}
resource "google_compute_instance_template" "worker" {
name_prefix = "${var.name}-worker-"
description = "Worker Instance template"
machine_type = "${var.machine_type}"
metadata {
user-data = "${data.ct_config.worker_ign.rendered}"
}
scheduling {
automatic_restart = "${var.preemptible ? false : true}"
preemptible = "${var.preemptible}"
}
disk {
auto_delete = true
boot = true
source_image = "${var.os_image}"
disk_size_gb = "${var.disk_size}"
}
network_interface {
network = "${var.network}"
# Ephemeral external IP
access_config = {}
}
can_ip_forward = true
tags = ["worker", "${var.cluster_name}-worker", "${var.name}-worker"]
lifecycle {
# To update an Instance Template, Terraform should replace the existing resource
create_before_destroy = true
}
}