Isolate each DigitalOcean cluster in its own VPC

* DigitalOcean introduced Virtual Private Cloud (VPC) support
to match other clouds and enhance the prior "private networking"
feature. Before, droplet's belonging to different clusters (but
residing in the same region) could reach one another (although
Typhoon firewall rules prohibit this). Now, droplets in a VPC
reside in their own network
* https://www.digitalocean.com/docs/networking/vpc/
* Create droplet instances in a VPC per cluster. This matches the
design of Typhoon AWS, Azure, and GCP.
* Require `terraform-provider-digitalocean` v1.16.0+ (action required)
* Output `vpc_id` for use with an attached DigitalOcean
loadbalancer
This commit is contained in:
Dalton Hubble 2020-06-28 23:12:54 -07:00
parent 21178868db
commit 7c6ab21b94
12 changed files with 53 additions and 10 deletions

View File

@ -9,6 +9,13 @@ Notable changes between versions.
* Set `networking` to "cilium" to enable * Set `networking` to "cilium" to enable
* Update Calico from v3.14.1 to [v3.15.0](https://docs.projectcalico.org/v3.15/release-notes/) * Update Calico from v3.14.1 to [v3.15.0](https://docs.projectcalico.org/v3.15/release-notes/)
#### DigitalOcean
* Isolate each cluster in an independent DigitalOcean VPC ([#776](https://github.com/poseidon/typhoon/pull/776))
* Create droplets in a VPC per cluster (matches Typhoon AWS, Azure, and GCP)
* Require `terraform-provider-digitalocean` v1.16.0+ (action required)
* Output `vpc_id` for use with an attached DigitalOcean [loadbalancer](https://github.com/poseidon/typhoon/blob/v1.18.5/docs/architecture/digitalocean.md#custom-load-balancer)
#### Addons #### Addons
* Update Prometheus from v2.19.0 to [v2.19.1](https://github.com/prometheus/prometheus/releases/tag/v2.19.1) * Update Prometheus from v2.19.0 to [v2.19.1](https://github.com/prometheus/prometheus/releases/tag/v2.19.1)

View File

@ -46,9 +46,10 @@ resource "digitalocean_droplet" "controllers" {
size = var.controller_type size = var.controller_type
# network # network
# only official DigitalOcean images support IPv6
ipv6 = local.is_official_image
private_networking = true private_networking = true
vpc_uuid = digitalocean_vpc.network.id
# TODO: Only official DigitalOcean images support IPv6
ipv6 = false
user_data = data.ct_config.controller-ignitions.*.rendered[count.index] user_data = data.ct_config.controller-ignitions.*.rendered[count.index]
ssh_keys = var.ssh_fingerprints ssh_keys = var.ssh_fingerprints

View File

@ -1,3 +1,10 @@
# Network VPC
resource "digitalocean_vpc" "network" {
name = var.cluster_name
region = var.region
description = "Network for ${var.cluster_name} cluster"
}
resource "digitalocean_firewall" "rules" { resource "digitalocean_firewall" "rules" {
name = var.cluster_name name = var.cluster_name

View File

@ -2,6 +2,8 @@ output "kubeconfig-admin" {
value = module.bootstrap.kubeconfig-admin value = module.bootstrap.kubeconfig-admin
} }
# Outputs for Kubernetes Ingress
output "controllers_dns" { output "controllers_dns" {
value = digitalocean_record.controllers[0].fqdn value = digitalocean_record.controllers[0].fqdn
} }
@ -45,3 +47,10 @@ output "worker_tag" {
value = digitalocean_tag.workers.name value = digitalocean_tag.workers.name
} }
# Outputs for custom load balancing
output "vpc_id" {
description = "ID of the cluster VPC"
value = digitalocean_vpc.network.id
}

View File

@ -3,7 +3,7 @@
terraform { terraform {
required_version = "~> 0.12.6" required_version = "~> 0.12.6"
required_providers { required_providers {
digitalocean = "~> 1.3" digitalocean = "~> 1.16"
ct = "~> 0.4" ct = "~> 0.4"
template = "~> 2.1" template = "~> 2.1"
null = "~> 2.1" null = "~> 2.1"

View File

@ -35,9 +35,10 @@ resource "digitalocean_droplet" "workers" {
size = var.worker_type size = var.worker_type
# network # network
private_networking = true
vpc_uuid = digitalocean_vpc.network.id
# only official DigitalOcean images support IPv6 # only official DigitalOcean images support IPv6
ipv6 = local.is_official_image ipv6 = local.is_official_image
private_networking = true
user_data = data.ct_config.worker-ignition.rendered user_data = data.ct_config.worker-ignition.rendered
ssh_keys = var.ssh_fingerprints ssh_keys = var.ssh_fingerprints

View File

@ -41,9 +41,10 @@ resource "digitalocean_droplet" "controllers" {
size = var.controller_type size = var.controller_type
# network # network
private_networking = true
vpc_uuid = digitalocean_vpc.network.id
# TODO: Only official DigitalOcean images support IPv6 # TODO: Only official DigitalOcean images support IPv6
ipv6 = false ipv6 = false
private_networking = true
user_data = data.ct_config.controller-ignitions.*.rendered[count.index] user_data = data.ct_config.controller-ignitions.*.rendered[count.index]
ssh_keys = var.ssh_fingerprints ssh_keys = var.ssh_fingerprints

View File

@ -1,3 +1,10 @@
# Network VPC
resource "digitalocean_vpc" "network" {
name = var.cluster_name
region = var.region
description = "Network for ${var.cluster_name} cluster"
}
resource "digitalocean_firewall" "rules" { resource "digitalocean_firewall" "rules" {
name = var.cluster_name name = var.cluster_name

View File

@ -2,6 +2,8 @@ output "kubeconfig-admin" {
value = module.bootstrap.kubeconfig-admin value = module.bootstrap.kubeconfig-admin
} }
# Outputs for Kubernetes Ingress
output "controllers_dns" { output "controllers_dns" {
value = digitalocean_record.controllers[0].fqdn value = digitalocean_record.controllers[0].fqdn
} }
@ -45,3 +47,9 @@ output "worker_tag" {
value = digitalocean_tag.workers.name value = digitalocean_tag.workers.name
} }
# Outputs for custom load balancing
output "vpc_id" {
description = "ID of the cluster VPC"
value = digitalocean_vpc.network.id
}

View File

@ -3,7 +3,7 @@
terraform { terraform {
required_version = "~> 0.12.6" required_version = "~> 0.12.6"
required_providers { required_providers {
digitalocean = "~> 1.3" digitalocean = "~> 1.16"
ct = "~> 0.4" ct = "~> 0.4"
template = "~> 2.1" template = "~> 2.1"
null = "~> 2.1" null = "~> 2.1"

View File

@ -37,9 +37,10 @@ resource "digitalocean_droplet" "workers" {
size = var.worker_type size = var.worker_type
# network # network
private_networking = true
vpc_uuid = digitalocean_vpc.network.id
# TODO: Only official DigitalOcean images support IPv6 # TODO: Only official DigitalOcean images support IPv6
ipv6 = false ipv6 = false
private_networking = true
user_data = data.ct_config.worker-ignition.rendered user_data = data.ct_config.worker-ignition.rendered
ssh_keys = var.ssh_fingerprints ssh_keys = var.ssh_fingerprints

View File

@ -30,6 +30,7 @@ Add a DigitalOcean load balancer to distribute IPv4 TCP traffic (HTTP/HTTPS Ingr
resource "digitalocean_loadbalancer" "ingress" { resource "digitalocean_loadbalancer" "ingress" {
name = "ingress" name = "ingress"
region = "fra1" region = "fra1"
vpc_uuid = module.nemo.vpc_id
droplet_tag = module.nemo.worker_tag droplet_tag = module.nemo.worker_tag
healthcheck { healthcheck {