Migrate DigitalOcean module from Terraform v0.11 to v0.12
* Replace v0.11 bracket type hints with Terraform v0.12 list expressions * Use expression syntax instead of interpolated strings, where suggested * Update DigitalOcean tutorial documentation * Define Terraform and plugin version requirements in versions.tf * Require digitalocean ~> v1.3 to support Terraform v0.12 * Require ct ~> v0.3.2 to support Terraform v0.12
This commit is contained in:
parent
0ccb2217b5
commit
1366ae404b
|
@ -2,6 +2,12 @@
|
||||||
|
|
||||||
Notable changes between versions.
|
Notable changes between versions.
|
||||||
|
|
||||||
|
#### DigitalOcean
|
||||||
|
|
||||||
|
* Migrate from Terraform v0.11 to v0.12.x (**action required!**)
|
||||||
|
* Require `terraform-provider-digitalocean` v1.3+ to support Terraform v0.12
|
||||||
|
* Require `terraform-provider-ct` ~> v0.3.2+ to support Terraform v0.12
|
||||||
|
|
||||||
## Latest
|
## Latest
|
||||||
|
|
||||||
## v1.14.3
|
## v1.14.3
|
||||||
|
|
|
@ -1,21 +1,22 @@
|
||||||
# Self-hosted Kubernetes assets (kubeconfig, manifests)
|
# Self-hosted Kubernetes assets (kubeconfig, manifests)
|
||||||
module "bootkube" {
|
module "bootkube" {
|
||||||
source = "git::https://github.com/poseidon/terraform-render-bootkube.git?ref=082921d67905417755609eebda7d39a7e26f7fdb"
|
source = "git::https://github.com/poseidon/terraform-render-bootkube.git?ref=0103bc06bb3f597455a765bf5d916f9b241cbea0"
|
||||||
|
|
||||||
cluster_name = "${var.cluster_name}"
|
cluster_name = var.cluster_name
|
||||||
api_servers = ["${format("%s.%s", var.cluster_name, var.dns_zone)}"]
|
api_servers = [format("%s.%s", var.cluster_name, var.dns_zone)]
|
||||||
etcd_servers = "${digitalocean_record.etcds.*.fqdn}"
|
etcd_servers = digitalocean_record.etcds.*.fqdn
|
||||||
asset_dir = "${var.asset_dir}"
|
asset_dir = var.asset_dir
|
||||||
|
|
||||||
networking = "${var.networking}"
|
networking = var.networking
|
||||||
|
|
||||||
# only effective with Calico networking
|
# only effective with Calico networking
|
||||||
network_encapsulation = "vxlan"
|
network_encapsulation = "vxlan"
|
||||||
network_mtu = "1450"
|
network_mtu = "1450"
|
||||||
|
|
||||||
pod_cidr = "${var.pod_cidr}"
|
pod_cidr = var.pod_cidr
|
||||||
service_cidr = "${var.service_cidr}"
|
service_cidr = var.service_cidr
|
||||||
cluster_domain_suffix = "${var.cluster_domain_suffix}"
|
cluster_domain_suffix = var.cluster_domain_suffix
|
||||||
enable_reporting = "${var.enable_reporting}"
|
enable_reporting = var.enable_reporting
|
||||||
enable_aggregation = "${var.enable_aggregation}"
|
enable_aggregation = var.enable_aggregation
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1,25 +1,25 @@
|
||||||
# Controller Instance DNS records
|
# Controller Instance DNS records
|
||||||
resource "digitalocean_record" "controllers" {
|
resource "digitalocean_record" "controllers" {
|
||||||
count = "${var.controller_count}"
|
count = var.controller_count
|
||||||
|
|
||||||
# DNS zone where record should be created
|
# DNS zone where record should be created
|
||||||
domain = "${var.dns_zone}"
|
domain = var.dns_zone
|
||||||
|
|
||||||
# DNS record (will be prepended to domain)
|
# DNS record (will be prepended to domain)
|
||||||
name = "${var.cluster_name}"
|
name = var.cluster_name
|
||||||
type = "A"
|
type = "A"
|
||||||
ttl = 300
|
ttl = 300
|
||||||
|
|
||||||
# IPv4 addresses of controllers
|
# IPv4 addresses of controllers
|
||||||
value = "${element(digitalocean_droplet.controllers.*.ipv4_address, count.index)}"
|
value = element(digitalocean_droplet.controllers.*.ipv4_address, count.index)
|
||||||
}
|
}
|
||||||
|
|
||||||
# Discrete DNS records for each controller's private IPv4 for etcd usage
|
# Discrete DNS records for each controller's private IPv4 for etcd usage
|
||||||
resource "digitalocean_record" "etcds" {
|
resource "digitalocean_record" "etcds" {
|
||||||
count = "${var.controller_count}"
|
count = var.controller_count
|
||||||
|
|
||||||
# DNS zone where record should be created
|
# DNS zone where record should be created
|
||||||
domain = "${var.dns_zone}"
|
domain = var.dns_zone
|
||||||
|
|
||||||
# DNS record (will be prepended to domain)
|
# DNS record (will be prepended to domain)
|
||||||
name = "${var.cluster_name}-etcd${count.index}"
|
name = "${var.cluster_name}-etcd${count.index}"
|
||||||
|
@ -27,34 +27,32 @@ resource "digitalocean_record" "etcds" {
|
||||||
ttl = 300
|
ttl = 300
|
||||||
|
|
||||||
# private IPv4 address for etcd
|
# private IPv4 address for etcd
|
||||||
value = "${element(digitalocean_droplet.controllers.*.ipv4_address_private, count.index)}"
|
value = element(digitalocean_droplet.controllers.*.ipv4_address_private, count.index)
|
||||||
}
|
}
|
||||||
|
|
||||||
# Controller droplet instances
|
# Controller droplet instances
|
||||||
resource "digitalocean_droplet" "controllers" {
|
resource "digitalocean_droplet" "controllers" {
|
||||||
count = "${var.controller_count}"
|
count = var.controller_count
|
||||||
|
|
||||||
name = "${var.cluster_name}-controller-${count.index}"
|
name = "${var.cluster_name}-controller-${count.index}"
|
||||||
region = "${var.region}"
|
region = var.region
|
||||||
|
|
||||||
image = "${var.image}"
|
image = var.image
|
||||||
size = "${var.controller_type}"
|
size = var.controller_type
|
||||||
|
|
||||||
# network
|
# network
|
||||||
ipv6 = true
|
ipv6 = true
|
||||||
private_networking = true
|
private_networking = true
|
||||||
|
|
||||||
user_data = "${element(data.ct_config.controller-ignitions.*.rendered, count.index)}"
|
user_data = element(data.ct_config.controller-ignitions.*.rendered, count.index)
|
||||||
ssh_keys = ["${var.ssh_fingerprints}"]
|
ssh_keys = var.ssh_fingerprints
|
||||||
|
|
||||||
tags = [
|
tags = [
|
||||||
"${digitalocean_tag.controllers.id}",
|
digitalocean_tag.controllers.id,
|
||||||
]
|
]
|
||||||
|
|
||||||
lifecycle {
|
lifecycle {
|
||||||
ignore_changes = [
|
ignore_changes = [user_data]
|
||||||
"user_data",
|
|
||||||
]
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -65,37 +63,37 @@ resource "digitalocean_tag" "controllers" {
|
||||||
|
|
||||||
# Controller Ignition configs
|
# Controller Ignition configs
|
||||||
data "ct_config" "controller-ignitions" {
|
data "ct_config" "controller-ignitions" {
|
||||||
count = "${var.controller_count}"
|
count = var.controller_count
|
||||||
content = "${element(data.template_file.controller-configs.*.rendered, count.index)}"
|
content = element(data.template_file.controller-configs.*.rendered, count.index)
|
||||||
pretty_print = false
|
pretty_print = false
|
||||||
snippets = ["${var.controller_clc_snippets}"]
|
snippets = var.controller_clc_snippets
|
||||||
}
|
}
|
||||||
|
|
||||||
# Controller Container Linux configs
|
# Controller Container Linux configs
|
||||||
data "template_file" "controller-configs" {
|
data "template_file" "controller-configs" {
|
||||||
count = "${var.controller_count}"
|
count = var.controller_count
|
||||||
|
|
||||||
template = "${file("${path.module}/cl/controller.yaml.tmpl")}"
|
template = file("${path.module}/cl/controller.yaml.tmpl")
|
||||||
|
|
||||||
vars = {
|
vars = {
|
||||||
# Cannot use cyclic dependencies on controllers or their DNS records
|
# Cannot use cyclic dependencies on controllers or their DNS records
|
||||||
etcd_name = "etcd${count.index}"
|
etcd_name = "etcd${count.index}"
|
||||||
etcd_domain = "${var.cluster_name}-etcd${count.index}.${var.dns_zone}"
|
etcd_domain = "${var.cluster_name}-etcd${count.index}.${var.dns_zone}"
|
||||||
|
|
||||||
# etcd0=https://cluster-etcd0.example.com,etcd1=https://cluster-etcd1.example.com,...
|
# etcd0=https://cluster-etcd0.example.com,etcd1=https://cluster-etcd1.example.com,...
|
||||||
etcd_initial_cluster = "${join(",", data.template_file.etcds.*.rendered)}"
|
etcd_initial_cluster = join(",", data.template_file.etcds.*.rendered)
|
||||||
cluster_dns_service_ip = "${cidrhost(var.service_cidr, 10)}"
|
cluster_dns_service_ip = cidrhost(var.service_cidr, 10)
|
||||||
cluster_domain_suffix = "${var.cluster_domain_suffix}"
|
cluster_domain_suffix = var.cluster_domain_suffix
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
data "template_file" "etcds" {
|
data "template_file" "etcds" {
|
||||||
count = "${var.controller_count}"
|
count = var.controller_count
|
||||||
template = "etcd$${index}=https://$${cluster_name}-etcd$${index}.$${dns_zone}:2380"
|
template = "etcd$${index}=https://$${cluster_name}-etcd$${index}.$${dns_zone}:2380"
|
||||||
|
|
||||||
vars = {
|
vars = {
|
||||||
index = "${count.index}"
|
index = count.index
|
||||||
cluster_name = "${var.cluster_name}"
|
cluster_name = var.cluster_name
|
||||||
dns_zone = "${var.dns_zone}"
|
dns_zone = var.dns_zone
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1,50 +1,51 @@
|
||||||
resource "digitalocean_firewall" "rules" {
|
resource "digitalocean_firewall" "rules" {
|
||||||
name = "${var.cluster_name}"
|
name = var.cluster_name
|
||||||
|
|
||||||
tags = ["${var.cluster_name}-controller", "${var.cluster_name}-worker"]
|
tags = ["${var.cluster_name}-controller", "${var.cluster_name}-worker"]
|
||||||
|
|
||||||
# allow ssh, internal flannel, internal node-exporter, internal kubelet
|
# allow ssh, internal flannel, internal node-exporter, internal kubelet
|
||||||
inbound_rule = [
|
inbound_rule {
|
||||||
{
|
|
||||||
protocol = "tcp"
|
protocol = "tcp"
|
||||||
port_range = "22"
|
port_range = "22"
|
||||||
source_addresses = ["0.0.0.0/0", "::/0"]
|
source_addresses = ["0.0.0.0/0", "::/0"]
|
||||||
},
|
}
|
||||||
{
|
|
||||||
|
inbound_rule {
|
||||||
protocol = "udp"
|
protocol = "udp"
|
||||||
port_range = "4789"
|
port_range = "4789"
|
||||||
source_tags = ["${digitalocean_tag.controllers.name}", "${digitalocean_tag.workers.name}"]
|
source_tags = [digitalocean_tag.controllers.name, digitalocean_tag.workers.name]
|
||||||
},
|
}
|
||||||
{
|
|
||||||
|
inbound_rule {
|
||||||
protocol = "tcp"
|
protocol = "tcp"
|
||||||
port_range = "9100"
|
port_range = "9100"
|
||||||
source_tags = ["${digitalocean_tag.workers.name}"]
|
source_tags = [digitalocean_tag.workers.name]
|
||||||
},
|
}
|
||||||
{
|
|
||||||
|
inbound_rule {
|
||||||
protocol = "tcp"
|
protocol = "tcp"
|
||||||
port_range = "10250"
|
port_range = "10250"
|
||||||
source_tags = ["${digitalocean_tag.controllers.name}", "${digitalocean_tag.workers.name}"]
|
source_tags = [digitalocean_tag.controllers.name, digitalocean_tag.workers.name]
|
||||||
},
|
}
|
||||||
]
|
|
||||||
|
|
||||||
# allow all outbound traffic
|
# allow all outbound traffic
|
||||||
outbound_rule = [
|
outbound_rule {
|
||||||
{
|
|
||||||
protocol = "tcp"
|
protocol = "tcp"
|
||||||
port_range = "1-65535"
|
port_range = "1-65535"
|
||||||
destination_addresses = ["0.0.0.0/0", "::/0"]
|
destination_addresses = ["0.0.0.0/0", "::/0"]
|
||||||
},
|
}
|
||||||
{
|
|
||||||
|
outbound_rule {
|
||||||
protocol = "udp"
|
protocol = "udp"
|
||||||
port_range = "1-65535"
|
port_range = "1-65535"
|
||||||
destination_addresses = ["0.0.0.0/0", "::/0"]
|
destination_addresses = ["0.0.0.0/0", "::/0"]
|
||||||
},
|
}
|
||||||
{
|
|
||||||
|
outbound_rule {
|
||||||
protocol = "icmp"
|
protocol = "icmp"
|
||||||
port_range = "1-65535"
|
port_range = "1-65535"
|
||||||
destination_addresses = ["0.0.0.0/0", "::/0"]
|
destination_addresses = ["0.0.0.0/0", "::/0"]
|
||||||
},
|
}
|
||||||
]
|
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "digitalocean_firewall" "controllers" {
|
resource "digitalocean_firewall" "controllers" {
|
||||||
|
@ -53,23 +54,23 @@ resource "digitalocean_firewall" "controllers" {
|
||||||
tags = ["${var.cluster_name}-controller"]
|
tags = ["${var.cluster_name}-controller"]
|
||||||
|
|
||||||
# etcd, kube-apiserver, kubelet
|
# etcd, kube-apiserver, kubelet
|
||||||
inbound_rule = [
|
inbound_rule {
|
||||||
{
|
|
||||||
protocol = "tcp"
|
protocol = "tcp"
|
||||||
port_range = "2379-2380"
|
port_range = "2379-2380"
|
||||||
source_tags = ["${digitalocean_tag.controllers.name}"]
|
source_tags = [digitalocean_tag.controllers.name]
|
||||||
},
|
}
|
||||||
{
|
|
||||||
|
inbound_rule {
|
||||||
protocol = "tcp"
|
protocol = "tcp"
|
||||||
port_range = "2381"
|
port_range = "2381"
|
||||||
source_tags = ["${digitalocean_tag.workers.name}"]
|
source_tags = [digitalocean_tag.workers.name]
|
||||||
},
|
}
|
||||||
{
|
|
||||||
|
inbound_rule {
|
||||||
protocol = "tcp"
|
protocol = "tcp"
|
||||||
port_range = "6443"
|
port_range = "6443"
|
||||||
source_addresses = ["0.0.0.0/0", "::/0"]
|
source_addresses = ["0.0.0.0/0", "::/0"]
|
||||||
},
|
}
|
||||||
]
|
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "digitalocean_firewall" "workers" {
|
resource "digitalocean_firewall" "workers" {
|
||||||
|
@ -78,21 +79,22 @@ resource "digitalocean_firewall" "workers" {
|
||||||
tags = ["${var.cluster_name}-worker"]
|
tags = ["${var.cluster_name}-worker"]
|
||||||
|
|
||||||
# allow HTTP/HTTPS ingress
|
# allow HTTP/HTTPS ingress
|
||||||
inbound_rule = [
|
inbound_rule {
|
||||||
{
|
|
||||||
protocol = "tcp"
|
protocol = "tcp"
|
||||||
port_range = "80"
|
port_range = "80"
|
||||||
source_addresses = ["0.0.0.0/0", "::/0"]
|
source_addresses = ["0.0.0.0/0", "::/0"]
|
||||||
},
|
}
|
||||||
{
|
|
||||||
|
inbound_rule {
|
||||||
protocol = "tcp"
|
protocol = "tcp"
|
||||||
port_range = "443"
|
port_range = "443"
|
||||||
source_addresses = ["0.0.0.0/0", "::/0"]
|
source_addresses = ["0.0.0.0/0", "::/0"]
|
||||||
},
|
}
|
||||||
{
|
|
||||||
|
inbound_rule {
|
||||||
protocol = "tcp"
|
protocol = "tcp"
|
||||||
port_range = "10254"
|
port_range = "10254"
|
||||||
source_addresses = ["0.0.0.0/0"]
|
source_addresses = ["0.0.0.0/0"]
|
||||||
},
|
|
||||||
]
|
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
|
@ -1,40 +1,41 @@
|
||||||
output "kubeconfig-admin" {
|
output "kubeconfig-admin" {
|
||||||
value = "${module.bootkube.kubeconfig-admin}"
|
value = module.bootkube.kubeconfig-admin
|
||||||
}
|
}
|
||||||
|
|
||||||
output "controllers_dns" {
|
output "controllers_dns" {
|
||||||
value = "${digitalocean_record.controllers.0.fqdn}"
|
value = digitalocean_record.controllers[0].fqdn
|
||||||
}
|
}
|
||||||
|
|
||||||
output "workers_dns" {
|
output "workers_dns" {
|
||||||
# Multiple A and AAAA records with the same FQDN
|
# Multiple A and AAAA records with the same FQDN
|
||||||
value = "${digitalocean_record.workers-record-a.0.fqdn}"
|
value = digitalocean_record.workers-record-a[0].fqdn
|
||||||
}
|
}
|
||||||
|
|
||||||
output "controllers_ipv4" {
|
output "controllers_ipv4" {
|
||||||
value = ["${digitalocean_droplet.controllers.*.ipv4_address}"]
|
value = [digitalocean_droplet.controllers.*.ipv4_address]
|
||||||
}
|
}
|
||||||
|
|
||||||
output "controllers_ipv6" {
|
output "controllers_ipv6" {
|
||||||
value = ["${digitalocean_droplet.controllers.*.ipv6_address}"]
|
value = [digitalocean_droplet.controllers.*.ipv6_address]
|
||||||
}
|
}
|
||||||
|
|
||||||
output "workers_ipv4" {
|
output "workers_ipv4" {
|
||||||
value = ["${digitalocean_droplet.workers.*.ipv4_address}"]
|
value = [digitalocean_droplet.workers.*.ipv4_address]
|
||||||
}
|
}
|
||||||
|
|
||||||
output "workers_ipv6" {
|
output "workers_ipv6" {
|
||||||
value = ["${digitalocean_droplet.workers.*.ipv6_address}"]
|
value = [digitalocean_droplet.workers.*.ipv6_address]
|
||||||
}
|
}
|
||||||
|
|
||||||
# Outputs for custom firewalls
|
# Outputs for custom firewalls
|
||||||
|
|
||||||
output "controller_tag" {
|
output "controller_tag" {
|
||||||
description = "Tag applied to controller droplets"
|
description = "Tag applied to controller droplets"
|
||||||
value = "${digitalocean_tag.controllers.name}"
|
value = digitalocean_tag.controllers.name
|
||||||
}
|
}
|
||||||
|
|
||||||
output "worker_tag" {
|
output "worker_tag" {
|
||||||
description = "Tag applied to worker droplets"
|
description = "Tag applied to worker droplets"
|
||||||
value = "${digitalocean_tag.workers.name}"
|
value = digitalocean_tag.workers.name
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1,25 +0,0 @@
|
||||||
# Terraform version and plugin versions
|
|
||||||
|
|
||||||
terraform {
|
|
||||||
required_version = ">= 0.11.0"
|
|
||||||
}
|
|
||||||
|
|
||||||
provider "digitalocean" {
|
|
||||||
version = "~> 1.0"
|
|
||||||
}
|
|
||||||
|
|
||||||
provider "local" {
|
|
||||||
version = "~> 1.0"
|
|
||||||
}
|
|
||||||
|
|
||||||
provider "null" {
|
|
||||||
version = "~> 1.0"
|
|
||||||
}
|
|
||||||
|
|
||||||
provider "template" {
|
|
||||||
version = "~> 1.0"
|
|
||||||
}
|
|
||||||
|
|
||||||
provider "tls" {
|
|
||||||
version = "~> 1.0"
|
|
||||||
}
|
|
|
@ -1,55 +1,55 @@
|
||||||
# Secure copy etcd TLS assets and kubeconfig to controllers. Activates kubelet.service
|
# Secure copy etcd TLS assets and kubeconfig to controllers. Activates kubelet.service
|
||||||
resource "null_resource" "copy-controller-secrets" {
|
resource "null_resource" "copy-controller-secrets" {
|
||||||
count = "${var.controller_count}"
|
count = var.controller_count
|
||||||
|
|
||||||
depends_on = [
|
depends_on = [
|
||||||
"digitalocean_firewall.rules",
|
digitalocean_firewall.rules
|
||||||
]
|
]
|
||||||
|
|
||||||
connection {
|
connection {
|
||||||
type = "ssh"
|
type = "ssh"
|
||||||
host = "${element(concat(digitalocean_droplet.controllers.*.ipv4_address), count.index)}"
|
host = element(digitalocean_droplet.controllers.*.ipv4_address, count.index)
|
||||||
user = "core"
|
user = "core"
|
||||||
timeout = "15m"
|
timeout = "15m"
|
||||||
}
|
}
|
||||||
|
|
||||||
provisioner "file" {
|
provisioner "file" {
|
||||||
content = "${module.bootkube.kubeconfig-kubelet}"
|
content = module.bootkube.kubeconfig-kubelet
|
||||||
destination = "$HOME/kubeconfig"
|
destination = "$HOME/kubeconfig"
|
||||||
}
|
}
|
||||||
|
|
||||||
provisioner "file" {
|
provisioner "file" {
|
||||||
content = "${module.bootkube.etcd_ca_cert}"
|
content = module.bootkube.etcd_ca_cert
|
||||||
destination = "$HOME/etcd-client-ca.crt"
|
destination = "$HOME/etcd-client-ca.crt"
|
||||||
}
|
}
|
||||||
|
|
||||||
provisioner "file" {
|
provisioner "file" {
|
||||||
content = "${module.bootkube.etcd_client_cert}"
|
content = module.bootkube.etcd_client_cert
|
||||||
destination = "$HOME/etcd-client.crt"
|
destination = "$HOME/etcd-client.crt"
|
||||||
}
|
}
|
||||||
|
|
||||||
provisioner "file" {
|
provisioner "file" {
|
||||||
content = "${module.bootkube.etcd_client_key}"
|
content = module.bootkube.etcd_client_key
|
||||||
destination = "$HOME/etcd-client.key"
|
destination = "$HOME/etcd-client.key"
|
||||||
}
|
}
|
||||||
|
|
||||||
provisioner "file" {
|
provisioner "file" {
|
||||||
content = "${module.bootkube.etcd_server_cert}"
|
content = module.bootkube.etcd_server_cert
|
||||||
destination = "$HOME/etcd-server.crt"
|
destination = "$HOME/etcd-server.crt"
|
||||||
}
|
}
|
||||||
|
|
||||||
provisioner "file" {
|
provisioner "file" {
|
||||||
content = "${module.bootkube.etcd_server_key}"
|
content = module.bootkube.etcd_server_key
|
||||||
destination = "$HOME/etcd-server.key"
|
destination = "$HOME/etcd-server.key"
|
||||||
}
|
}
|
||||||
|
|
||||||
provisioner "file" {
|
provisioner "file" {
|
||||||
content = "${module.bootkube.etcd_peer_cert}"
|
content = module.bootkube.etcd_peer_cert
|
||||||
destination = "$HOME/etcd-peer.crt"
|
destination = "$HOME/etcd-peer.crt"
|
||||||
}
|
}
|
||||||
|
|
||||||
provisioner "file" {
|
provisioner "file" {
|
||||||
content = "${module.bootkube.etcd_peer_key}"
|
content = module.bootkube.etcd_peer_key
|
||||||
destination = "$HOME/etcd-peer.key"
|
destination = "$HOME/etcd-peer.key"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -72,17 +72,17 @@ resource "null_resource" "copy-controller-secrets" {
|
||||||
|
|
||||||
# Secure copy kubeconfig to all workers. Activates kubelet.service.
|
# Secure copy kubeconfig to all workers. Activates kubelet.service.
|
||||||
resource "null_resource" "copy-worker-secrets" {
|
resource "null_resource" "copy-worker-secrets" {
|
||||||
count = "${var.worker_count}"
|
count = var.worker_count
|
||||||
|
|
||||||
connection {
|
connection {
|
||||||
type = "ssh"
|
type = "ssh"
|
||||||
host = "${element(concat(digitalocean_droplet.workers.*.ipv4_address), count.index)}"
|
host = element(digitalocean_droplet.workers.*.ipv4_address, count.index)
|
||||||
user = "core"
|
user = "core"
|
||||||
timeout = "15m"
|
timeout = "15m"
|
||||||
}
|
}
|
||||||
|
|
||||||
provisioner "file" {
|
provisioner "file" {
|
||||||
content = "${module.bootkube.kubeconfig-kubelet}"
|
content = module.bootkube.kubeconfig-kubelet
|
||||||
destination = "$HOME/kubeconfig"
|
destination = "$HOME/kubeconfig"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -97,20 +97,20 @@ resource "null_resource" "copy-worker-secrets" {
|
||||||
# one-time self-hosted cluster bootstrapping.
|
# one-time self-hosted cluster bootstrapping.
|
||||||
resource "null_resource" "bootkube-start" {
|
resource "null_resource" "bootkube-start" {
|
||||||
depends_on = [
|
depends_on = [
|
||||||
"module.bootkube",
|
module.bootkube,
|
||||||
"null_resource.copy-controller-secrets",
|
null_resource.copy-controller-secrets,
|
||||||
"null_resource.copy-worker-secrets",
|
null_resource.copy-worker-secrets,
|
||||||
]
|
]
|
||||||
|
|
||||||
connection {
|
connection {
|
||||||
type = "ssh"
|
type = "ssh"
|
||||||
host = "${digitalocean_droplet.controllers.0.ipv4_address}"
|
host = digitalocean_droplet.controllers[0].ipv4_address
|
||||||
user = "core"
|
user = "core"
|
||||||
timeout = "15m"
|
timeout = "15m"
|
||||||
}
|
}
|
||||||
|
|
||||||
provisioner "file" {
|
provisioner "file" {
|
||||||
source = "${var.asset_dir}"
|
source = var.asset_dir
|
||||||
destination = "$HOME/assets"
|
destination = "$HOME/assets"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -121,3 +121,4 @@ resource "null_resource" "bootkube-start" {
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1,60 +1,60 @@
|
||||||
variable "cluster_name" {
|
variable "cluster_name" {
|
||||||
type = "string"
|
type = string
|
||||||
description = "Unique cluster name (prepended to dns_zone)"
|
description = "Unique cluster name (prepended to dns_zone)"
|
||||||
}
|
}
|
||||||
|
|
||||||
# Digital Ocean
|
# Digital Ocean
|
||||||
|
|
||||||
variable "region" {
|
variable "region" {
|
||||||
type = "string"
|
type = string
|
||||||
description = "Digital Ocean region (e.g. nyc1, sfo2, fra1, tor1)"
|
description = "Digital Ocean region (e.g. nyc1, sfo2, fra1, tor1)"
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "dns_zone" {
|
variable "dns_zone" {
|
||||||
type = "string"
|
type = string
|
||||||
description = "Digital Ocean domain (i.e. DNS zone) (e.g. do.example.com)"
|
description = "Digital Ocean domain (i.e. DNS zone) (e.g. do.example.com)"
|
||||||
}
|
}
|
||||||
|
|
||||||
# instances
|
# instances
|
||||||
|
|
||||||
variable "controller_count" {
|
variable "controller_count" {
|
||||||
type = "string"
|
type = string
|
||||||
default = "1"
|
default = "1"
|
||||||
description = "Number of controllers (i.e. masters)"
|
description = "Number of controllers (i.e. masters)"
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "worker_count" {
|
variable "worker_count" {
|
||||||
type = "string"
|
type = string
|
||||||
default = "1"
|
default = "1"
|
||||||
description = "Number of workers"
|
description = "Number of workers"
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "controller_type" {
|
variable "controller_type" {
|
||||||
type = "string"
|
type = string
|
||||||
default = "s-2vcpu-2gb"
|
default = "s-2vcpu-2gb"
|
||||||
description = "Droplet type for controllers (e.g. s-2vcpu-2gb, s-2vcpu-4gb, s-4vcpu-8gb)."
|
description = "Droplet type for controllers (e.g. s-2vcpu-2gb, s-2vcpu-4gb, s-4vcpu-8gb)."
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "worker_type" {
|
variable "worker_type" {
|
||||||
type = "string"
|
type = string
|
||||||
default = "s-1vcpu-1gb"
|
default = "s-1vcpu-1gb"
|
||||||
description = "Droplet type for workers (e.g. s-1vcpu-1gb, s-1vcpu-2gb, s-2vcpu-2gb)"
|
description = "Droplet type for workers (e.g. s-1vcpu-1gb, s-1vcpu-2gb, s-2vcpu-2gb)"
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "image" {
|
variable "image" {
|
||||||
type = "string"
|
type = string
|
||||||
default = "coreos-stable"
|
default = "coreos-stable"
|
||||||
description = "Container Linux image for instances (e.g. coreos-stable)"
|
description = "Container Linux image for instances (e.g. coreos-stable)"
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "controller_clc_snippets" {
|
variable "controller_clc_snippets" {
|
||||||
type = "list"
|
type = list(string)
|
||||||
description = "Controller Container Linux Config snippets"
|
description = "Controller Container Linux Config snippets"
|
||||||
default = []
|
default = []
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "worker_clc_snippets" {
|
variable "worker_clc_snippets" {
|
||||||
type = "list"
|
type = list(string)
|
||||||
description = "Worker Container Linux Config snippets"
|
description = "Worker Container Linux Config snippets"
|
||||||
default = []
|
default = []
|
||||||
}
|
}
|
||||||
|
@ -62,24 +62,24 @@ variable "worker_clc_snippets" {
|
||||||
# configuration
|
# configuration
|
||||||
|
|
||||||
variable "ssh_fingerprints" {
|
variable "ssh_fingerprints" {
|
||||||
type = "list"
|
type = list(string)
|
||||||
description = "SSH public key fingerprints. (e.g. see `ssh-add -l -E md5`)"
|
description = "SSH public key fingerprints. (e.g. see `ssh-add -l -E md5`)"
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "asset_dir" {
|
variable "asset_dir" {
|
||||||
description = "Path to a directory where generated assets should be placed (contains secrets)"
|
description = "Path to a directory where generated assets should be placed (contains secrets)"
|
||||||
type = "string"
|
type = string
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "networking" {
|
variable "networking" {
|
||||||
description = "Choice of networking provider (flannel or calico)"
|
description = "Choice of networking provider (flannel or calico)"
|
||||||
type = "string"
|
type = string
|
||||||
default = "flannel"
|
default = "flannel"
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "pod_cidr" {
|
variable "pod_cidr" {
|
||||||
description = "CIDR IPv4 range to assign Kubernetes pods"
|
description = "CIDR IPv4 range to assign Kubernetes pods"
|
||||||
type = "string"
|
type = string
|
||||||
default = "10.2.0.0/16"
|
default = "10.2.0.0/16"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -89,24 +89,26 @@ CIDR IPv4 range to assign Kubernetes services.
|
||||||
The 1st IP will be reserved for kube_apiserver, the 10th IP will be reserved for coredns.
|
The 1st IP will be reserved for kube_apiserver, the 10th IP will be reserved for coredns.
|
||||||
EOD
|
EOD
|
||||||
|
|
||||||
type = "string"
|
|
||||||
|
type = string
|
||||||
default = "10.3.0.0/16"
|
default = "10.3.0.0/16"
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "cluster_domain_suffix" {
|
variable "cluster_domain_suffix" {
|
||||||
description = "Queries for domains with the suffix will be answered by coredns. Default is cluster.local (e.g. foo.default.svc.cluster.local) "
|
description = "Queries for domains with the suffix will be answered by coredns. Default is cluster.local (e.g. foo.default.svc.cluster.local) "
|
||||||
type = "string"
|
type = string
|
||||||
default = "cluster.local"
|
default = "cluster.local"
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "enable_reporting" {
|
variable "enable_reporting" {
|
||||||
type = "string"
|
type = string
|
||||||
description = "Enable usage or analytics reporting to upstreams (Calico)"
|
description = "Enable usage or analytics reporting to upstreams (Calico)"
|
||||||
default = "false"
|
default = "false"
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "enable_aggregation" {
|
variable "enable_aggregation" {
|
||||||
description = "Enable the Kubernetes Aggregation Layer (defaults to false)"
|
description = "Enable the Kubernetes Aggregation Layer (defaults to false)"
|
||||||
type = "string"
|
type = string
|
||||||
default = "false"
|
default = "false"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,12 @@
|
||||||
|
# Terraform version and plugin versions
|
||||||
|
|
||||||
|
terraform {
|
||||||
|
required_version = "~> 0.12.0"
|
||||||
|
required_providers {
|
||||||
|
digitalocean = "~> 1.3"
|
||||||
|
ct = "~> 0.3.2"
|
||||||
|
template = "~> 2.1"
|
||||||
|
null = "~> 2.1"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
|
@ -1,47 +1,47 @@
|
||||||
# Worker DNS records
|
# Worker DNS records
|
||||||
resource "digitalocean_record" "workers-record-a" {
|
resource "digitalocean_record" "workers-record-a" {
|
||||||
count = "${var.worker_count}"
|
count = var.worker_count
|
||||||
|
|
||||||
# DNS zone where record should be created
|
# DNS zone where record should be created
|
||||||
domain = "${var.dns_zone}"
|
domain = var.dns_zone
|
||||||
|
|
||||||
name = "${var.cluster_name}-workers"
|
name = "${var.cluster_name}-workers"
|
||||||
type = "A"
|
type = "A"
|
||||||
ttl = 300
|
ttl = 300
|
||||||
value = "${element(digitalocean_droplet.workers.*.ipv4_address, count.index)}"
|
value = element(digitalocean_droplet.workers.*.ipv4_address, count.index)
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "digitalocean_record" "workers-record-aaaa" {
|
resource "digitalocean_record" "workers-record-aaaa" {
|
||||||
count = "${var.worker_count}"
|
count = var.worker_count
|
||||||
|
|
||||||
# DNS zone where record should be created
|
# DNS zone where record should be created
|
||||||
domain = "${var.dns_zone}"
|
domain = var.dns_zone
|
||||||
|
|
||||||
name = "${var.cluster_name}-workers"
|
name = "${var.cluster_name}-workers"
|
||||||
type = "AAAA"
|
type = "AAAA"
|
||||||
ttl = 300
|
ttl = 300
|
||||||
value = "${element(digitalocean_droplet.workers.*.ipv6_address, count.index)}"
|
value = element(digitalocean_droplet.workers.*.ipv6_address, count.index)
|
||||||
}
|
}
|
||||||
|
|
||||||
# Worker droplet instances
|
# Worker droplet instances
|
||||||
resource "digitalocean_droplet" "workers" {
|
resource "digitalocean_droplet" "workers" {
|
||||||
count = "${var.worker_count}"
|
count = var.worker_count
|
||||||
|
|
||||||
name = "${var.cluster_name}-worker-${count.index}"
|
name = "${var.cluster_name}-worker-${count.index}"
|
||||||
region = "${var.region}"
|
region = var.region
|
||||||
|
|
||||||
image = "${var.image}"
|
image = var.image
|
||||||
size = "${var.worker_type}"
|
size = var.worker_type
|
||||||
|
|
||||||
# network
|
# network
|
||||||
ipv6 = true
|
ipv6 = true
|
||||||
private_networking = true
|
private_networking = true
|
||||||
|
|
||||||
user_data = "${data.ct_config.worker-ignition.rendered}"
|
user_data = data.ct_config.worker-ignition.rendered
|
||||||
ssh_keys = ["${var.ssh_fingerprints}"]
|
ssh_keys = var.ssh_fingerprints
|
||||||
|
|
||||||
tags = [
|
tags = [
|
||||||
"${digitalocean_tag.workers.id}",
|
digitalocean_tag.workers.id,
|
||||||
]
|
]
|
||||||
|
|
||||||
lifecycle {
|
lifecycle {
|
||||||
|
@ -56,17 +56,18 @@ resource "digitalocean_tag" "workers" {
|
||||||
|
|
||||||
# Worker Ignition config
|
# Worker Ignition config
|
||||||
data "ct_config" "worker-ignition" {
|
data "ct_config" "worker-ignition" {
|
||||||
content = "${data.template_file.worker-config.rendered}"
|
content = data.template_file.worker-config.rendered
|
||||||
pretty_print = false
|
pretty_print = false
|
||||||
snippets = ["${var.worker_clc_snippets}"]
|
snippets = var.worker_clc_snippets
|
||||||
}
|
}
|
||||||
|
|
||||||
# Worker Container Linux config
|
# Worker Container Linux config
|
||||||
data "template_file" "worker-config" {
|
data "template_file" "worker-config" {
|
||||||
template = "${file("${path.module}/cl/worker.yaml.tmpl")}"
|
template = file("${path.module}/cl/worker.yaml.tmpl")
|
||||||
|
|
||||||
vars = {
|
vars = {
|
||||||
cluster_dns_service_ip = "${cidrhost(var.service_cidr, 10)}"
|
cluster_dns_service_ip = cidrhost(var.service_cidr, 10)
|
||||||
cluster_domain_suffix = "${var.cluster_domain_suffix}"
|
cluster_domain_suffix = var.cluster_domain_suffix
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -10,15 +10,15 @@ Controllers are provisioned to run an `etcd-member` peer and a `kubelet` service
|
||||||
|
|
||||||
* Digital Ocean Account and Token
|
* Digital Ocean Account and Token
|
||||||
* Digital Ocean Domain (registered Domain Name or delegated subdomain)
|
* Digital Ocean Domain (registered Domain Name or delegated subdomain)
|
||||||
* Terraform v0.11.x and [terraform-provider-ct](https://github.com/poseidon/terraform-provider-ct) installed locally
|
* Terraform v0.12.x and [terraform-provider-ct](https://github.com/poseidon/terraform-provider-ct) installed locally
|
||||||
|
|
||||||
## Terraform Setup
|
## Terraform Setup
|
||||||
|
|
||||||
Install [Terraform](https://www.terraform.io/downloads.html) v0.11.x on your system.
|
Install [Terraform](https://www.terraform.io/downloads.html) v0.12.x on your system.
|
||||||
|
|
||||||
```sh
|
```sh
|
||||||
$ terraform version
|
$ terraform version
|
||||||
Terraform v0.11.14
|
Terraform v0.12.0
|
||||||
```
|
```
|
||||||
|
|
||||||
Add the [terraform-provider-ct](https://github.com/poseidon/terraform-provider-ct) plugin binary for your system to `~/.terraform.d/plugins/`, noting the final name.
|
Add the [terraform-provider-ct](https://github.com/poseidon/terraform-provider-ct) plugin binary for your system to `~/.terraform.d/plugins/`, noting the final name.
|
||||||
|
@ -50,34 +50,13 @@ Configure the DigitalOcean provider to use your token in a `providers.tf` file.
|
||||||
|
|
||||||
```tf
|
```tf
|
||||||
provider "digitalocean" {
|
provider "digitalocean" {
|
||||||
version = "~> 1.3.0"
|
version = "1.3.0"
|
||||||
token = "${chomp(file("~/.config/digital-ocean/token"))}"
|
token = "${chomp(file("~/.config/digital-ocean/token"))}"
|
||||||
alias = "default"
|
|
||||||
}
|
}
|
||||||
|
|
||||||
provider "ct" {
|
provider "ct" {
|
||||||
version = "0.3.2"
|
version = "0.3.2"
|
||||||
}
|
}
|
||||||
|
|
||||||
provider "local" {
|
|
||||||
version = "~> 1.0"
|
|
||||||
alias = "default"
|
|
||||||
}
|
|
||||||
|
|
||||||
provider "null" {
|
|
||||||
version = "~> 1.0"
|
|
||||||
alias = "default"
|
|
||||||
}
|
|
||||||
|
|
||||||
provider "template" {
|
|
||||||
version = "~> 1.0"
|
|
||||||
alias = "default"
|
|
||||||
}
|
|
||||||
|
|
||||||
provider "tls" {
|
|
||||||
version = "~> 1.0"
|
|
||||||
alias = "default"
|
|
||||||
}
|
|
||||||
```
|
```
|
||||||
|
|
||||||
## Cluster
|
## Cluster
|
||||||
|
@ -86,15 +65,7 @@ Define a Kubernetes cluster using the module `digital-ocean/container-linux/kube
|
||||||
|
|
||||||
```tf
|
```tf
|
||||||
module "digital-ocean-nemo" {
|
module "digital-ocean-nemo" {
|
||||||
source = "git::https://github.com/poseidon/typhoon//digital-ocean/container-linux/kubernetes?ref=v1.14.3"
|
source = "git::https://github.com/poseidon/typhoon//digital-ocean/container-linux/kubernetes?ref=v1.14.4"
|
||||||
|
|
||||||
providers = {
|
|
||||||
digitalocean = "digitalocean.default"
|
|
||||||
local = "local.default"
|
|
||||||
null = "null.default"
|
|
||||||
template = "template.default"
|
|
||||||
tls = "tls.default"
|
|
||||||
}
|
|
||||||
|
|
||||||
# Digital Ocean
|
# Digital Ocean
|
||||||
cluster_name = "nemo"
|
cluster_name = "nemo"
|
||||||
|
|
Loading…
Reference in New Issue