Migrate Google Cloud module Terraform v0.11 to v0.12

* Replace v0.11 bracket type hints with Terraform v0.12 list expressions
* Use expression syntax instead of interpolated strings, where suggested
* Update Google Cloud tutorial and worker pools documentation
* Define Terraform and plugin version requirements in versions.tf
  * Require google ~> 2.5 to support Terraform v0.12
  * Require ct ~> 0.3.2 to support Terraform v0.12
This commit is contained in:
Dalton Hubble 2019-05-27 21:14:25 -07:00
parent 2ba0181dbe
commit d6d9e6c4b9
21 changed files with 249 additions and 291 deletions

View File

@ -39,8 +39,11 @@ Notable changes between versions.
#### Google Cloud
* Migrate from Terraform v0.11 to v0.12.x (**action required!**)
* Require `terraform-provider-google` v2.5+ to support Terraform v0.12
* Require `terraform-provider-ct` v0.3.2+ to support Terraform v0.12
* Rename `worker` pool module `count` variable to `worker_count` ([#485](https://github.com/poseidon/typhoon/pull/485)) (action required)
* `count` will become a reserved variable name in Terraform v0.12
* `count` is a reserved variable in Terraform v0.12
#### Addons

View File

@ -50,15 +50,7 @@ Define a Kubernetes cluster by using the Terraform module for your chosen platfo
```tf
module "google-cloud-yavin" {
source = "git::https://github.com/poseidon/typhoon//google-cloud/container-linux/kubernetes?ref=v1.14.3"
providers = {
google = "google.default"
local = "local.default"
null = "null.default"
template = "template.default"
tls = "tls.default"
}
source = "git::https://github.com/poseidon/typhoon//google-cloud/container-linux/kubernetes?ref=v1.14.4"
# Google Cloud
cluster_name = "yavin"

View File

@ -150,19 +150,15 @@ Create a cluster following the Google Cloud [tutorial](../cl/google-cloud.md#clu
module "yavin-worker-pool" {
source = "git::https://github.com/poseidon/typhoon//google-cloud/container-linux/kubernetes/workers?ref=v1.14.3"
providers = {
google = "google.default"
}
# Google Cloud
region = "europe-west2"
network = "${module.google-cloud-yavin.network_name}"
network = module.google-cloud-yavin.network_name
cluster_name = "yavin"
# configuration
name = "yavin-16x"
kubeconfig = "${module.google-cloud-yavin.kubeconfig}"
ssh_authorized_key = "${var.ssh_authorized_key}"
kubeconfig = module.google-cloud-yavin.kubeconfig
ssh_authorized_key = var.ssh_authorized_key
# optional
worker_count = 2
@ -200,9 +196,9 @@ The Google Cloud internal `workers` module supports a number of [variables](http
|:-----|:------------|:--------|
| name | Unique name (distinct from cluster name) | "yavin-16x" |
| region | Region for the worker pool instances. May differ from the cluster's region | "europe-west2" |
| network | Must be set to `network_name` output by cluster | "${module.cluster.network_name}" |
| network | Must be set to `network_name` output by cluster | module.cluster.network_name |
| cluster_name | Must be set to `cluster_name` of cluster | "yavin" |
| kubeconfig | Must be set to `kubeconfig` output by cluster | "${module.cluster.kubeconfig}" |
| kubeconfig | Must be set to `kubeconfig` output by cluster | module.cluster.kubeconfig |
| ssh_authorized_key | SSH public key for user 'core' | "ssh-rsa AAAAB3NZ..." |
Check the list of regions [docs](https://cloud.google.com/compute/docs/regions-zones/regions-zones) or with `gcloud compute regions list`.

View File

@ -10,15 +10,15 @@ Controllers are provisioned to run an `etcd-member` peer and a `kubelet` service
* Google Cloud Account and Service Account
* Google Cloud DNS Zone (registered Domain Name or delegated subdomain)
* Terraform v0.11.x and [terraform-provider-ct](https://github.com/poseidon/terraform-provider-ct) installed locally
* Terraform v0.12.x and [terraform-provider-ct](https://github.com/poseidon/terraform-provider-ct) installed locally
## Terraform Setup
Install [Terraform](https://www.terraform.io/downloads.html) v0.11.x on your system.
Install [Terraform](https://www.terraform.io/downloads.html) v0.12.x on your system.
```sh
$ terraform version
Terraform v0.11.14
Terraform v0.12.0
```
Add the [terraform-provider-ct](https://github.com/poseidon/terraform-provider-ct) plugin binary for your system to `~/.terraform.d/plugins/`, noting the final name.
@ -49,37 +49,15 @@ Configure the Google Cloud provider to use your service account key, project-id,
```tf
provider "google" {
version = "~> 2.7.0"
alias = "default"
credentials = "${file("~/.config/google-cloud/terraform.json")}"
version = "2.7.0"
project = "project-id"
region = "us-central1"
credentials = "${file("~/.config/google-cloud/terraform.json")}"
}
provider "ct" {
version = "0.3.2"
}
provider "local" {
version = "~> 1.0"
alias = "default"
}
provider "null" {
version = "~> 1.0"
alias = "default"
}
provider "template" {
version = "~> 1.0"
alias = "default"
}
provider "tls" {
version = "~> 1.0"
alias = "default"
}
```
Additional configuration options are described in the `google` provider [docs](https://www.terraform.io/docs/providers/google/index.html).
@ -93,15 +71,7 @@ Define a Kubernetes cluster using the module `google-cloud/container-linux/kuber
```tf
module "google-cloud-yavin" {
source = "git::https://github.com/poseidon/typhoon//google-cloud/container-linux/kubernetes?ref=v1.14.3"
providers = {
google = "google.default"
local = "local.default"
null = "null.default"
template = "template.default"
tls = "tls.default"
}
source = "git::https://github.com/poseidon/typhoon//google-cloud/container-linux/kubernetes?ref=v1.14.4"
# Google Cloud
cluster_name = "yavin"

View File

@ -49,15 +49,7 @@ Define a Kubernetes cluster by using the Terraform module for your chosen platfo
```tf
module "google-cloud-yavin" {
source = "git::https://github.com/poseidon/typhoon//google-cloud/container-linux/kubernetes?ref=v1.14.3"
providers = {
google = "google.default"
local = "local.default"
null = "null.default"
template = "template.default"
tls = "tls.default"
}
source = "git::https://github.com/poseidon/typhoon//google-cloud/container-linux/kubernetes?ref=v1.14.4"
# Google Cloud
cluster_name = "yavin"

View File

@ -1,15 +1,15 @@
# TCP Proxy load balancer DNS record
resource "google_dns_record_set" "apiserver" {
# DNS Zone name where record should be created
managed_zone = "${var.dns_zone_name}"
managed_zone = var.dns_zone_name
# DNS record
name = "${format("%s.%s.", var.cluster_name, var.dns_zone)}"
name = format("%s.%s.", var.cluster_name, var.dns_zone)
type = "A"
ttl = 300
# IPv4 address of apiserver TCP Proxy load balancer
rrdatas = ["${google_compute_global_address.apiserver-ipv4.address}"]
rrdatas = [google_compute_global_address.apiserver-ipv4.address]
}
# Static IPv4 address for the TCP Proxy Load Balancer
@ -21,17 +21,17 @@ resource "google_compute_global_address" "apiserver-ipv4" {
# Forward IPv4 TCP traffic to the TCP proxy load balancer
resource "google_compute_global_forwarding_rule" "apiserver" {
name = "${var.cluster_name}-apiserver"
ip_address = "${google_compute_global_address.apiserver-ipv4.address}"
ip_address = google_compute_global_address.apiserver-ipv4.address
ip_protocol = "TCP"
port_range = "443"
target = "${google_compute_target_tcp_proxy.apiserver.self_link}"
target = google_compute_target_tcp_proxy.apiserver.self_link
}
# Global TCP Proxy Load Balancer for apiservers
resource "google_compute_target_tcp_proxy" "apiserver" {
name = "${var.cluster_name}-apiserver"
description = "Distribute TCP load across ${var.cluster_name} controllers"
backend_service = "${google_compute_backend_service.apiserver.self_link}"
backend_service = google_compute_backend_service.apiserver.self_link
}
# Global backend service backed by unmanaged instance groups
@ -46,26 +46,30 @@ resource "google_compute_backend_service" "apiserver" {
# controller(s) spread across zonal instance groups
backend {
group = "${google_compute_instance_group.controllers.0.self_link}"
group = google_compute_instance_group.controllers[0].self_link
}
backend {
group = "${google_compute_instance_group.controllers.1.self_link}"
group = google_compute_instance_group.controllers[1].self_link
}
backend {
group = "${google_compute_instance_group.controllers.2.self_link}"
group = google_compute_instance_group.controllers[2].self_link
}
health_checks = ["${google_compute_health_check.apiserver.self_link}"]
health_checks = [google_compute_health_check.apiserver.self_link]
}
# Instance group of heterogeneous (unmanged) controller instances
resource "google_compute_instance_group" "controllers" {
count = "${length(local.zones)}"
count = length(local.zones)
name = "${format("%s-controllers-%s", var.cluster_name, element(local.zones, count.index))}"
zone = "${element(local.zones, count.index)}"
name = format(
"%s-controllers-%s",
var.cluster_name,
element(local.zones, count.index),
)
zone = element(local.zones, count.index)
named_port {
name = "apiserver"
@ -73,11 +77,11 @@ resource "google_compute_instance_group" "controllers" {
}
# add instances in the zone into the instance group
instances = [
"${matchkeys(google_compute_instance.controllers.*.self_link,
instances = matchkeys(
google_compute_instance.controllers.*.self_link,
google_compute_instance.controllers.*.zone,
list(element(local.zones, count.index)))}",
]
[element(local.zones, count.index)],
)
}
# TCP health check for apiserver
@ -95,3 +99,4 @@ resource "google_compute_health_check" "apiserver" {
port = "443"
}
}

View File

@ -1,19 +1,20 @@
# Self-hosted Kubernetes assets (kubeconfig, manifests)
module "bootkube" {
source = "git::https://github.com/poseidon/terraform-render-bootkube.git?ref=082921d67905417755609eebda7d39a7e26f7fdb"
source = "git::https://github.com/poseidon/terraform-render-bootkube.git?ref=0103bc06bb3f597455a765bf5d916f9b241cbea0"
cluster_name = "${var.cluster_name}"
api_servers = ["${format("%s.%s", var.cluster_name, var.dns_zone)}"]
etcd_servers = ["${google_dns_record_set.etcds.*.name}"]
asset_dir = "${var.asset_dir}"
networking = "${var.networking}"
cluster_name = var.cluster_name
api_servers = [format("%s.%s", var.cluster_name, var.dns_zone)]
etcd_servers = google_dns_record_set.etcds.*.name
asset_dir = var.asset_dir
networking = var.networking
network_mtu = 1440
pod_cidr = "${var.pod_cidr}"
service_cidr = "${var.service_cidr}"
cluster_domain_suffix = "${var.cluster_domain_suffix}"
enable_reporting = "${var.enable_reporting}"
enable_aggregation = "${var.enable_aggregation}"
pod_cidr = var.pod_cidr
service_cidr = var.service_cidr
cluster_domain_suffix = var.cluster_domain_suffix
enable_reporting = var.enable_reporting
enable_aggregation = var.enable_aggregation
// temporary
apiserver_port = 443
}

View File

@ -1,106 +1,107 @@
# Discrete DNS records for each controller's private IPv4 for etcd usage
resource "google_dns_record_set" "etcds" {
count = "${var.controller_count}"
count = var.controller_count
# DNS Zone name where record should be created
managed_zone = "${var.dns_zone_name}"
managed_zone = var.dns_zone_name
# DNS record
name = "${format("%s-etcd%d.%s.", var.cluster_name, count.index, var.dns_zone)}"
name = format("%s-etcd%d.%s.", var.cluster_name, count.index, var.dns_zone)
type = "A"
ttl = 300
# private IPv4 address for etcd
rrdatas = ["${element(google_compute_instance.controllers.*.network_interface.0.network_ip, count.index)}"]
rrdatas = [element(google_compute_instance.controllers.*.network_interface.0.network_ip, count.index)]
}
# Zones in the region
data "google_compute_zones" "all" {
region = "${var.region}"
region = var.region
}
locals {
# TCP proxy load balancers require a fixed number of zonal backends. Spread
# controllers over up to 3 zones, since all GCP regions have at least 3.
zones = "${slice(data.google_compute_zones.all.names, 0, 3)}"
zones = slice(data.google_compute_zones.all.names, 0, 3)
controllers_ipv4_public = ["${google_compute_instance.controllers.*.network_interface.0.access_config.0.nat_ip}"]
controllers_ipv4_public = google_compute_instance.controllers.*.network_interface.0.access_config.0.nat_ip
}
# Controller instances
resource "google_compute_instance" "controllers" {
count = "${var.controller_count}"
count = var.controller_count
name = "${var.cluster_name}-controller-${count.index}"
zone = "${element(local.zones, count.index)}"
machine_type = "${var.controller_type}"
zone = element(local.zones, count.index)
machine_type = var.controller_type
metadata = {
user-data = "${element(data.ct_config.controller-ignitions.*.rendered, count.index)}"
user-data = element(data.ct_config.controller-ignitions.*.rendered, count.index)
}
boot_disk {
auto_delete = true
initialize_params {
image = "${var.os_image}"
size = "${var.disk_size}"
image = var.os_image
size = var.disk_size
}
}
network_interface {
network = "${google_compute_network.network.name}"
network = google_compute_network.network.name
# Ephemeral external IP
access_config = {}
access_config {
}
}
can_ip_forward = true
tags = ["${var.cluster_name}-controller"]
lifecycle {
ignore_changes = [
"metadata",
]
ignore_changes = [metadata]
}
}
# Controller Ignition configs
data "ct_config" "controller-ignitions" {
count = "${var.controller_count}"
content = "${element(data.template_file.controller-configs.*.rendered, count.index)}"
count = var.controller_count
content = element(
data.template_file.controller-configs.*.rendered,
count.index,
)
pretty_print = false
snippets = ["${var.controller_clc_snippets}"]
snippets = var.controller_clc_snippets
}
# Controller Container Linux configs
data "template_file" "controller-configs" {
count = "${var.controller_count}"
count = var.controller_count
template = "${file("${path.module}/cl/controller.yaml.tmpl")}"
template = file("${path.module}/cl/controller.yaml.tmpl")
vars = {
# Cannot use cyclic dependencies on controllers or their DNS records
etcd_name = "etcd${count.index}"
etcd_domain = "${var.cluster_name}-etcd${count.index}.${var.dns_zone}"
# etcd0=https://cluster-etcd0.example.com,etcd1=https://cluster-etcd1.example.com,...
etcd_initial_cluster = "${join(",", data.template_file.etcds.*.rendered)}"
kubeconfig = "${indent(10, module.bootkube.kubeconfig-kubelet)}"
ssh_authorized_key = "${var.ssh_authorized_key}"
cluster_dns_service_ip = "${cidrhost(var.service_cidr, 10)}"
cluster_domain_suffix = "${var.cluster_domain_suffix}"
etcd_initial_cluster = join(",", data.template_file.etcds.*.rendered)
kubeconfig = indent(10, module.bootkube.kubeconfig-kubelet)
ssh_authorized_key = var.ssh_authorized_key
cluster_dns_service_ip = cidrhost(var.service_cidr, 10)
cluster_domain_suffix = var.cluster_domain_suffix
}
}
data "template_file" "etcds" {
count = "${var.controller_count}"
count = var.controller_count
template = "etcd$${index}=https://$${cluster_name}-etcd$${index}.$${dns_zone}:2380"
vars = {
index = "${count.index}"
cluster_name = "${var.cluster_name}"
dns_zone = "${var.dns_zone}"
index = count.index
cluster_name = var.cluster_name
dns_zone = var.dns_zone
}
}

View File

@ -14,52 +14,52 @@ resource "google_compute_global_address" "ingress-ipv6" {
# Google Cloud does not allow TCP proxies for port 80. Must use HTTP proxy.
resource "google_compute_global_forwarding_rule" "ingress-http-ipv4" {
name = "${var.cluster_name}-ingress-http-ipv4"
ip_address = "${google_compute_global_address.ingress-ipv4.address}"
ip_address = google_compute_global_address.ingress-ipv4.address
ip_protocol = "TCP"
port_range = "80"
target = "${google_compute_target_http_proxy.ingress-http.self_link}"
target = google_compute_target_http_proxy.ingress-http.self_link
}
# Forward IPv4 TCP traffic to the TCP proxy load balancer
resource "google_compute_global_forwarding_rule" "ingress-https-ipv4" {
name = "${var.cluster_name}-ingress-https-ipv4"
ip_address = "${google_compute_global_address.ingress-ipv4.address}"
ip_address = google_compute_global_address.ingress-ipv4.address
ip_protocol = "TCP"
port_range = "443"
target = "${google_compute_target_tcp_proxy.ingress-https.self_link}"
target = google_compute_target_tcp_proxy.ingress-https.self_link
}
# Forward IPv6 TCP traffic to the HTTP proxy load balancer
# Google Cloud does not allow TCP proxies for port 80. Must use HTTP proxy.
resource "google_compute_global_forwarding_rule" "ingress-http-ipv6" {
name = "${var.cluster_name}-ingress-http-ipv6"
ip_address = "${google_compute_global_address.ingress-ipv6.address}"
ip_address = google_compute_global_address.ingress-ipv6.address
ip_protocol = "TCP"
port_range = "80"
target = "${google_compute_target_http_proxy.ingress-http.self_link}"
target = google_compute_target_http_proxy.ingress-http.self_link
}
# Forward IPv6 TCP traffic to the TCP proxy load balancer
resource "google_compute_global_forwarding_rule" "ingress-https-ipv6" {
name = "${var.cluster_name}-ingress-https-ipv6"
ip_address = "${google_compute_global_address.ingress-ipv6.address}"
ip_address = google_compute_global_address.ingress-ipv6.address
ip_protocol = "TCP"
port_range = "443"
target = "${google_compute_target_tcp_proxy.ingress-https.self_link}"
target = google_compute_target_tcp_proxy.ingress-https.self_link
}
# HTTP proxy load balancer for ingress controllers
resource "google_compute_target_http_proxy" "ingress-http" {
name = "${var.cluster_name}-ingress-http"
description = "Distribute HTTP load across ${var.cluster_name} workers"
url_map = "${google_compute_url_map.ingress-http.self_link}"
url_map = google_compute_url_map.ingress-http.self_link
}
# TCP proxy load balancer for ingress controllers
resource "google_compute_target_tcp_proxy" "ingress-https" {
name = "${var.cluster_name}-ingress-https"
description = "Distribute HTTPS load across ${var.cluster_name} workers"
backend_service = "${google_compute_backend_service.ingress-https.self_link}"
backend_service = google_compute_backend_service.ingress-https.self_link
}
# HTTP URL Map (required)
@ -67,7 +67,7 @@ resource "google_compute_url_map" "ingress-http" {
name = "${var.cluster_name}-ingress-http"
# Do not add host/path rules for applications here. Use Ingress resources.
default_service = "${google_compute_backend_service.ingress-http.self_link}"
default_service = google_compute_backend_service.ingress-http.self_link
}
# Backend service backed by managed instance group of workers
@ -81,10 +81,10 @@ resource "google_compute_backend_service" "ingress-http" {
timeout_sec = "60"
backend {
group = "${module.workers.instance_group}"
group = module.workers.instance_group
}
health_checks = ["${google_compute_health_check.ingress.self_link}"]
health_checks = [google_compute_health_check.ingress.self_link]
}
# Backend service backed by managed instance group of workers
@ -98,10 +98,10 @@ resource "google_compute_backend_service" "ingress-https" {
timeout_sec = "60"
backend {
group = "${module.workers.instance_group}"
group = module.workers.instance_group
}
health_checks = ["${google_compute_health_check.ingress.self_link}"]
health_checks = [google_compute_health_check.ingress.self_link]
}
# Ingress HTTP Health Check
@ -120,3 +120,4 @@ resource "google_compute_health_check" "ingress" {
request_path = "/healthz"
}
}

View File

@ -1,12 +1,12 @@
resource "google_compute_network" "network" {
name = "${var.cluster_name}"
name = var.cluster_name
description = "Network for the ${var.cluster_name} cluster"
auto_create_subnetworks = true
}
resource "google_compute_firewall" "allow-ssh" {
name = "${var.cluster_name}-allow-ssh"
network = "${google_compute_network.network.name}"
network = google_compute_network.network.name
allow {
protocol = "tcp"
@ -19,7 +19,7 @@ resource "google_compute_firewall" "allow-ssh" {
resource "google_compute_firewall" "internal-etcd" {
name = "${var.cluster_name}-internal-etcd"
network = "${google_compute_network.network.name}"
network = google_compute_network.network.name
allow {
protocol = "tcp"
@ -33,7 +33,7 @@ resource "google_compute_firewall" "internal-etcd" {
# Allow Prometheus to scrape etcd metrics
resource "google_compute_firewall" "internal-etcd-metrics" {
name = "${var.cluster_name}-internal-etcd-metrics"
network = "${google_compute_network.network.name}"
network = google_compute_network.network.name
allow {
protocol = "tcp"
@ -46,7 +46,7 @@ resource "google_compute_firewall" "internal-etcd-metrics" {
resource "google_compute_firewall" "allow-apiserver" {
name = "${var.cluster_name}-allow-apiserver"
network = "${google_compute_network.network.name}"
network = google_compute_network.network.name
allow {
protocol = "tcp"
@ -60,10 +60,10 @@ resource "google_compute_firewall" "allow-apiserver" {
# BGP and IPIP
# https://docs.projectcalico.org/latest/reference/public-cloud/gce
resource "google_compute_firewall" "internal-bgp" {
count = "${var.networking != "flannel" ? 1 : 0}"
count = var.networking != "flannel" ? 1 : 0
name = "${var.cluster_name}-internal-bgp"
network = "${google_compute_network.network.name}"
network = google_compute_network.network.name
allow {
protocol = "tcp"
@ -80,10 +80,10 @@ resource "google_compute_firewall" "internal-bgp" {
# flannel VXLAN
resource "google_compute_firewall" "internal-vxlan" {
count = "${var.networking == "flannel" ? 1 : 0}"
count = var.networking == "flannel" ? 1 : 0
name = "${var.cluster_name}-internal-vxlan"
network = "${google_compute_network.network.name}"
network = google_compute_network.network.name
allow {
protocol = "udp"
@ -97,7 +97,7 @@ resource "google_compute_firewall" "internal-vxlan" {
# Allow Prometheus to scrape node-exporter daemonset
resource "google_compute_firewall" "internal-node-exporter" {
name = "${var.cluster_name}-internal-node-exporter"
network = "${google_compute_network.network.name}"
network = google_compute_network.network.name
allow {
protocol = "tcp"
@ -111,7 +111,7 @@ resource "google_compute_firewall" "internal-node-exporter" {
# Allow apiserver to access kubelets for exec, log, port-forward
resource "google_compute_firewall" "internal-kubelet" {
name = "${var.cluster_name}-internal-kubelet"
network = "${google_compute_network.network.name}"
network = google_compute_network.network.name
allow {
protocol = "tcp"
@ -127,7 +127,7 @@ resource "google_compute_firewall" "internal-kubelet" {
resource "google_compute_firewall" "allow-ingress" {
name = "${var.cluster_name}-allow-ingress"
network = "${google_compute_network.network.name}"
network = google_compute_network.network.name
allow {
protocol = "tcp"
@ -140,7 +140,7 @@ resource "google_compute_firewall" "allow-ingress" {
resource "google_compute_firewall" "google-ingress-health-checks" {
name = "${var.cluster_name}-ingress-health"
network = "${google_compute_network.network.name}"
network = google_compute_network.network.name
allow {
protocol = "tcp"
@ -149,17 +149,13 @@ resource "google_compute_firewall" "google-ingress-health-checks" {
# https://cloud.google.com/load-balancing/docs/health-check-concepts#method
source_ranges = [
# Global LB health checks
"35.191.0.0/16",
"130.211.0.0/22",
# Region LB health checks
"35.191.0.0/16",
"209.85.152.0/22",
"209.85.204.0/22",
]
target_tags = ["${var.cluster_name}-worker"]
}

View File

@ -1,43 +1,44 @@
output "kubeconfig-admin" {
value = "${module.bootkube.kubeconfig-admin}"
value = module.bootkube.kubeconfig-admin
}
# Outputs for Kubernetes Ingress
output "ingress_static_ipv4" {
description = "Global IPv4 address for proxy load balancing to the nearest Ingress controller"
value = "${google_compute_global_address.ingress-ipv4.address}"
value = google_compute_global_address.ingress-ipv4.address
}
output "ingress_static_ipv6" {
description = "Global IPv6 address for proxy load balancing to the nearest Ingress controller"
value = "${google_compute_global_address.ingress-ipv6.address}"
value = google_compute_global_address.ingress-ipv6.address
}
# Outputs for worker pools
output "network_name" {
value = "${google_compute_network.network.name}"
value = google_compute_network.network.name
}
output "kubeconfig" {
value = "${module.bootkube.kubeconfig-kubelet}"
value = module.bootkube.kubeconfig-kubelet
}
# Outputs for custom firewalling
output "network_self_link" {
value = "${google_compute_network.network.self_link}"
value = google_compute_network.network.self_link
}
# Outputs for custom load balancing
output "worker_instance_group" {
description = "Worker managed instance group full URL"
value = "${module.workers.instance_group}"
value = module.workers.instance_group
}
output "worker_target_pool" {
description = "Worker target pool self link"
value = "${module.workers.target_pool}"
value = module.workers.target_pool
}

View File

@ -1,25 +0,0 @@
# Terraform version and plugin versions
terraform {
required_version = ">= 0.11.0"
}
provider "google" {
version = ">= 1.19, < 3.0"
}
provider "local" {
version = "~> 1.0"
}
provider "null" {
version = "~> 1.0"
}
provider "template" {
version = "~> 1.0"
}
provider "tls" {
version = "~> 1.0"
}

View File

@ -1,46 +1,46 @@
# Secure copy etcd TLS assets to controllers.
resource "null_resource" "copy-controller-secrets" {
count = "${var.controller_count}"
count = var.controller_count
connection {
type = "ssh"
host = "${element(local.controllers_ipv4_public, count.index)}"
host = element(local.controllers_ipv4_public, count.index)
user = "core"
timeout = "15m"
}
provisioner "file" {
content = "${module.bootkube.etcd_ca_cert}"
content = module.bootkube.etcd_ca_cert
destination = "$HOME/etcd-client-ca.crt"
}
provisioner "file" {
content = "${module.bootkube.etcd_client_cert}"
content = module.bootkube.etcd_client_cert
destination = "$HOME/etcd-client.crt"
}
provisioner "file" {
content = "${module.bootkube.etcd_client_key}"
content = module.bootkube.etcd_client_key
destination = "$HOME/etcd-client.key"
}
provisioner "file" {
content = "${module.bootkube.etcd_server_cert}"
content = module.bootkube.etcd_server_cert
destination = "$HOME/etcd-server.crt"
}
provisioner "file" {
content = "${module.bootkube.etcd_server_key}"
content = module.bootkube.etcd_server_key
destination = "$HOME/etcd-server.key"
}
provisioner "file" {
content = "${module.bootkube.etcd_peer_cert}"
content = module.bootkube.etcd_peer_cert
destination = "$HOME/etcd-peer.crt"
}
provisioner "file" {
content = "${module.bootkube.etcd_peer_key}"
content = module.bootkube.etcd_peer_key
destination = "$HOME/etcd-peer.key"
}
@ -64,21 +64,21 @@ resource "null_resource" "copy-controller-secrets" {
# one-time self-hosted cluster bootstrapping.
resource "null_resource" "bootkube-start" {
depends_on = [
"module.bootkube",
"module.workers",
"google_dns_record_set.apiserver",
"null_resource.copy-controller-secrets",
module.bootkube,
module.workers,
google_dns_record_set.apiserver,
null_resource.copy-controller-secrets,
]
connection {
type = "ssh"
host = "${element(local.controllers_ipv4_public, 0)}"
host = element(local.controllers_ipv4_public, 0)
user = "core"
timeout = "15m"
}
provisioner "file" {
source = "${var.asset_dir}"
source = var.asset_dir
destination = "$HOME/assets"
}
@ -89,3 +89,4 @@ resource "null_resource" "bootkube-start" {
]
}
}

View File

@ -1,77 +1,77 @@
variable "cluster_name" {
type = "string"
type = string
description = "Unique cluster name (prepended to dns_zone)"
}
# Google Cloud
variable "region" {
type = "string"
type = string
description = "Google Cloud Region (e.g. us-central1, see `gcloud compute regions list`)"
}
variable "dns_zone" {
type = "string"
type = string
description = "Google Cloud DNS Zone (e.g. google-cloud.example.com)"
}
variable "dns_zone_name" {
type = "string"
type = string
description = "Google Cloud DNS Zone name (e.g. example-zone)"
}
# instances
variable "controller_count" {
type = "string"
type = string
default = "1"
description = "Number of controllers (i.e. masters)"
}
variable "worker_count" {
type = "string"
type = string
default = "1"
description = "Number of workers"
}
variable "controller_type" {
type = "string"
type = string
default = "n1-standard-1"
description = "Machine type for controllers (see `gcloud compute machine-types list`)"
}
variable "worker_type" {
type = "string"
type = string
default = "n1-standard-1"
description = "Machine type for controllers (see `gcloud compute machine-types list`)"
}
variable "os_image" {
type = "string"
type = string
default = "coreos-stable"
description = "Container Linux image for compute instances (e.g. coreos-stable)"
}
variable "disk_size" {
type = "string"
type = string
default = "40"
description = "Size of the disk in GB"
}
variable "worker_preemptible" {
type = "string"
type = string
default = "false"
description = "If enabled, Compute Engine will terminate workers randomly within 24 hours"
}
variable "controller_clc_snippets" {
type = "list"
type = list(string)
description = "Controller Container Linux Config snippets"
default = []
}
variable "worker_clc_snippets" {
type = "list"
type = list(string)
description = "Worker Container Linux Config snippets"
default = []
}
@ -79,24 +79,24 @@ variable "worker_clc_snippets" {
# configuration
variable "ssh_authorized_key" {
type = "string"
type = string
description = "SSH public key for user 'core'"
}
variable "asset_dir" {
description = "Path to a directory where generated assets should be placed (contains secrets)"
type = "string"
type = string
}
variable "networking" {
description = "Choice of networking provider (flannel or calico)"
type = "string"
type = string
default = "calico"
}
variable "pod_cidr" {
description = "CIDR IPv4 range to assign Kubernetes pods"
type = "string"
type = string
default = "10.2.0.0/16"
}
@ -106,24 +106,26 @@ CIDR IPv4 range to assign Kubernetes services.
The 1st IP will be reserved for kube_apiserver, the 10th IP will be reserved for coredns.
EOD
type = "string"
type = string
default = "10.3.0.0/16"
}
variable "cluster_domain_suffix" {
description = "Queries for domains with the suffix will be answered by coredns. Default is cluster.local (e.g. foo.default.svc.cluster.local) "
type = "string"
type = string
default = "cluster.local"
}
variable "enable_reporting" {
type = "string"
type = string
description = "Enable usage or analytics reporting to upstreams (Calico)"
default = "false"
}
variable "enable_aggregation" {
description = "Enable the Kubernetes Aggregation Layer (defaults to false)"
type = "string"
type = string
default = "false"
}

View File

@ -0,0 +1,11 @@
# Terraform version and plugin versions
terraform {
required_version = "~> 0.12.0"
required_providers {
google = "~> 2.5"
ct = "~> 0.3.2"
template = "~> 2.1"
null = "~> 2.1"
}
}

View File

@ -1,21 +1,22 @@
module "workers" {
source = "./workers"
name = "${var.cluster_name}"
cluster_name = "${var.cluster_name}"
name = var.cluster_name
cluster_name = var.cluster_name
# GCE
region = "${var.region}"
network = "${google_compute_network.network.name}"
worker_count = "${var.worker_count}"
machine_type = "${var.worker_type}"
os_image = "${var.os_image}"
disk_size = "${var.disk_size}"
preemptible = "${var.worker_preemptible}"
region = var.region
network = google_compute_network.network.name
worker_count = var.worker_count
machine_type = var.worker_type
os_image = var.os_image
disk_size = var.disk_size
preemptible = var.worker_preemptible
# configuration
kubeconfig = "${module.bootkube.kubeconfig-kubelet}"
ssh_authorized_key = "${var.ssh_authorized_key}"
service_cidr = "${var.service_cidr}"
cluster_domain_suffix = "${var.cluster_domain_suffix}"
clc_snippets = "${var.worker_clc_snippets}"
kubeconfig = module.bootkube.kubeconfig-kubelet
ssh_authorized_key = var.ssh_authorized_key
service_cidr = var.service_cidr
cluster_domain_suffix = var.cluster_domain_suffix
clc_snippets = var.worker_clc_snippets
}

View File

@ -2,12 +2,13 @@
output "instance_group" {
description = "Worker managed instance group full URL"
value = "${google_compute_region_instance_group_manager.workers.instance_group}"
value = google_compute_region_instance_group_manager.workers.instance_group
}
# Outputs for regional load balancing
output "target_pool" {
description = "Worker target pool self link"
value = "${google_compute_target_pool.workers.self_link}"
value = google_compute_target_pool.workers.self_link
}

View File

@ -1,11 +1,11 @@
# Target pool for TCP/UDP load balancing
resource "google_compute_target_pool" "workers" {
name = "${var.name}-worker-pool"
region = "${var.region}"
region = var.region
session_affinity = "NONE"
health_checks = [
"${google_compute_http_health_check.workers.name}",
google_compute_http_health_check.workers.name,
]
}
@ -20,3 +20,4 @@ resource "google_compute_http_health_check" "workers" {
port = 10254
request_path = "/healthz"
}

View File

@ -1,53 +1,53 @@
variable "name" {
type = "string"
type = string
description = "Unique name for the worker pool"
}
variable "cluster_name" {
type = "string"
type = string
description = "Must be set to `cluster_name of cluster`"
}
# Google Cloud
variable "region" {
type = "string"
type = string
description = "Must be set to `region` of cluster"
}
variable "network" {
type = "string"
type = string
description = "Must be set to `network_name` output by cluster"
}
# instances
variable "worker_count" {
type = "string"
type = string
default = "1"
description = "Number of worker compute instances the instance group should manage"
}
variable "machine_type" {
type = "string"
type = string
default = "n1-standard-1"
description = "Machine type for compute instances (e.g. gcloud compute machine-types list)"
}
variable "os_image" {
type = "string"
type = string
default = "coreos-stable"
description = "Container Linux image for compute instanges (e.g. gcloud compute images list)"
}
variable "disk_size" {
type = "string"
type = string
default = "40"
description = "Size of the disk in GB"
}
variable "preemptible" {
type = "string"
type = string
default = "false"
description = "If enabled, Compute Engine will terminate instances randomly within 24 hours"
}
@ -55,12 +55,12 @@ variable "preemptible" {
# configuration
variable "kubeconfig" {
type = "string"
type = string
description = "Must be set to `kubeconfig` output by cluster"
}
variable "ssh_authorized_key" {
type = "string"
type = string
description = "SSH public key for user 'core'"
}
@ -70,18 +70,19 @@ CIDR IPv4 range to assign Kubernetes services.
The 1st IP will be reserved for kube_apiserver, the 10th IP will be reserved for coredns.
EOD
type = "string"
type = string
default = "10.3.0.0/16"
}
variable "cluster_domain_suffix" {
description = "Queries for domains with the suffix will be answered by coredns. Default is cluster.local (e.g. foo.default.svc.cluster.local) "
type = "string"
type = string
default = "cluster.local"
}
variable "clc_snippets" {
type = "list"
type = list(string)
description = "Container Linux Config snippets"
default = []
}
@ -89,13 +90,14 @@ variable "clc_snippets" {
# unofficial, undocumented, unsupported, temporary
variable "accelerator_type" {
type = "string"
type = string
default = ""
description = "Google Compute Engine accelerator type (e.g. nvidia-tesla-k80, see gcloud compute accelerator-types list)"
}
variable "accelerator_count" {
type = "string"
type = string
default = "0"
description = "Number of compute engine accelerators"
}

View File

@ -0,0 +1,4 @@
terraform {
required_version = ">= 0.12"
}

View File

@ -5,11 +5,11 @@ resource "google_compute_region_instance_group_manager" "workers" {
# instance name prefix for instances in the group
base_instance_name = "${var.name}-worker"
instance_template = "${google_compute_instance_template.worker.self_link}"
region = "${var.region}"
instance_template = google_compute_instance_template.worker.self_link
region = var.region
target_size = "${var.worker_count}"
target_pools = ["${google_compute_target_pool.workers.self_link}"]
target_size = var.worker_count
target_pools = [google_compute_target_pool.workers.self_link]
named_port {
name = "http"
@ -26,37 +26,38 @@ resource "google_compute_region_instance_group_manager" "workers" {
resource "google_compute_instance_template" "worker" {
name_prefix = "${var.name}-worker-"
description = "Worker Instance template"
machine_type = "${var.machine_type}"
machine_type = var.machine_type
metadata = {
user-data = "${data.ct_config.worker-ignition.rendered}"
user-data = data.ct_config.worker-ignition.rendered
}
scheduling {
automatic_restart = "${var.preemptible ? false : true}"
preemptible = "${var.preemptible}"
automatic_restart = var.preemptible ? false : true
preemptible = var.preemptible
}
disk {
auto_delete = true
boot = true
source_image = "${var.os_image}"
disk_size_gb = "${var.disk_size}"
source_image = var.os_image
disk_size_gb = var.disk_size
}
network_interface {
network = "${var.network}"
network = var.network
# Ephemeral external IP
access_config = {}
access_config {
}
}
can_ip_forward = true
tags = ["worker", "${var.cluster_name}-worker", "${var.name}-worker"]
guest_accelerator {
count = "${var.accelerator_count}"
type = "${var.accelerator_type}"
count = var.accelerator_count
type = var.accelerator_type
}
lifecycle {
@ -67,19 +68,20 @@ resource "google_compute_instance_template" "worker" {
# Worker Ignition config
data "ct_config" "worker-ignition" {
content = "${data.template_file.worker-config.rendered}"
content = data.template_file.worker-config.rendered
pretty_print = false
snippets = ["${var.clc_snippets}"]
snippets = var.clc_snippets
}
# Worker Container Linux config
data "template_file" "worker-config" {
template = "${file("${path.module}/cl/worker.yaml.tmpl")}"
template = file("${path.module}/cl/worker.yaml.tmpl")
vars = {
kubeconfig = "${indent(10, var.kubeconfig)}"
ssh_authorized_key = "${var.ssh_authorized_key}"
cluster_dns_service_ip = "${cidrhost(var.service_cidr, 10)}"
cluster_domain_suffix = "${var.cluster_domain_suffix}"
kubeconfig = indent(10, var.kubeconfig)
ssh_authorized_key = var.ssh_authorized_key
cluster_dns_service_ip = cidrhost(var.service_cidr, 10)
cluster_domain_suffix = var.cluster_domain_suffix
}
}