From 189487ecaab4f40f1418e98d95d4aecf365f7d4d Mon Sep 17 00:00:00 2001 From: Dalton Hubble Date: Mon, 27 May 2019 21:43:08 -0700 Subject: [PATCH] Migrate Azure module Terraform v0.11 to v0.12 * Replace v0.11 bracket type hints with Terraform v0.12 list expressions * Use expression syntax instead of interpolated strings, where suggested * Update Azure tutorial and worker pools documentation * Define Terraform and plugin version requirements in versions.tf * Require azurerm ~> 1.27 to support Terraform v0.12 * Require ct ~> 0.3.2 to support Terraform v0.12 --- CHANGES.md | 29 +++-- azure/container-linux/kubernetes/bootkube.tf | 23 ++-- .../container-linux/kubernetes/controllers.tf | 99 +++++++------- azure/container-linux/kubernetes/lb.tf | 67 +++++----- azure/container-linux/kubernetes/network.tf | 25 ++-- azure/container-linux/kubernetes/outputs.tf | 23 ++-- azure/container-linux/kubernetes/require.tf | 25 ---- azure/container-linux/kubernetes/security.tf | 121 +++++++++--------- azure/container-linux/kubernetes/ssh.tf | 35 +++-- azure/container-linux/kubernetes/variables.tf | 52 ++++---- azure/container-linux/kubernetes/versions.tf | 12 ++ azure/container-linux/kubernetes/workers.tf | 31 ++--- .../kubernetes/workers/variables.tf | 34 ++--- .../kubernetes/workers/versions.tf | 4 + .../kubernetes/workers/workers.tf | 51 ++++---- docs/advanced/worker-pools.md | 30 ++--- docs/cl/azure.md | 39 +----- 17 files changed, 342 insertions(+), 358 deletions(-) delete mode 100644 azure/container-linux/kubernetes/require.tf create mode 100644 azure/container-linux/kubernetes/versions.tf create mode 100644 azure/container-linux/kubernetes/workers/versions.tf diff --git a/CHANGES.md b/CHANGES.md index b99410ff..24ae8114 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -2,13 +2,31 @@ Notable changes between versions. +#### AWS + +* Migrate from Terraform v0.11 to v0.12.x (**action required!**) + * Require `terraform-provider-aws` v2.7+ to support Terraform v0.12 + * Require `terraform-provider-ct` v0.3.2+ to support Terraform v0.12 + +#### Azure + +* Migrate from Terraform v0.11 to v0.12.x (**action required!**) + * Require `terraform-provider-azurerm` v1.27+ to support Terraform v0.12 + * Require `terraform-provider-ct` v0.3.2+ to support Terraform v0.12 + #### DigitalOcean * Migrate from Terraform v0.11 to v0.12.x (**action required!**) * Require `terraform-provider-digitalocean` v1.3+ to support Terraform v0.12 * Require `terraform-provider-ct` ~> v0.3.2+ to support Terraform v0.12 -## Latest +#### Google Cloud + +* Migrate from Terraform v0.11 to v0.12.x (**action required!**) + * Require `terraform-provider-google` v2.5+ to support Terraform v0.12 + * Require `terraform-provider-ct` v0.3.2+ to support Terraform v0.12 + +## v1.14.3 ## v1.14.3 @@ -18,18 +36,14 @@ Notable changes between versions. * Fix trailing slash in terraform-render-bootkube version ([#479](https://github.com/poseidon/typhoon/pull/479)) * Recommend updating `terraform-provider-ct` plugin from v0.3.1 to [v0.3.2](https://github.com/poseidon/terraform-provider-ct/releases/tag/v0.3.2) ([#487](https://github.com/poseidon/typhoon/pull/487)) -### AWS +#### AWS -* Migrate from Terraform v0.11 to v0.12.x (**action required!**) - * Require `terraform-provider-aws` v2.7+ to support Terraform v0.12 - * Require `terraform-provider-ct` v0.3.2+ to support Terraform v0.12 * Rename `worker` pool module `count` variable to `worker_count` ([#485](https://github.com/poseidon/typhoon/pull/485)) (action required) * `count` will become a reserved variable name in Terraform v0.12 #### Azure * Replace `azurerm_autoscale_setting` with `azurerm_monitor_autoscale_setting` ([#482](https://github.com/poseidon/typhoon/pull/482)) - * Require `terraform-provider-azurerm` v1.22+ (action required) * Rename `worker` pool module `count` variable to `worker_count` ([#485](https://github.com/poseidon/typhoon/pull/485)) (action required) * `count` will become a reserved variable name in Terraform v0.12 @@ -39,9 +53,6 @@ Notable changes between versions. #### Google Cloud -* Migrate from Terraform v0.11 to v0.12.x (**action required!**) - * Require `terraform-provider-google` v2.5+ to support Terraform v0.12 - * Require `terraform-provider-ct` v0.3.2+ to support Terraform v0.12 * Rename `worker` pool module `count` variable to `worker_count` ([#485](https://github.com/poseidon/typhoon/pull/485)) (action required) * `count` is a reserved variable in Terraform v0.12 diff --git a/azure/container-linux/kubernetes/bootkube.tf b/azure/container-linux/kubernetes/bootkube.tf index a2789634..958e0170 100644 --- a/azure/container-linux/kubernetes/bootkube.tf +++ b/azure/container-linux/kubernetes/bootkube.tf @@ -1,22 +1,23 @@ # Self-hosted Kubernetes assets (kubeconfig, manifests) module "bootkube" { - source = "git::https://github.com/poseidon/terraform-render-bootkube.git?ref=082921d67905417755609eebda7d39a7e26f7fdb" + source = "git::https://github.com/poseidon/terraform-render-bootkube.git?ref=0103bc06bb3f597455a765bf5d916f9b241cbea0" - cluster_name = "${var.cluster_name}" - api_servers = ["${format("%s.%s", var.cluster_name, var.dns_zone)}"] - etcd_servers = ["${formatlist("%s.%s", azurerm_dns_a_record.etcds.*.name, var.dns_zone)}"] - asset_dir = "${var.asset_dir}" + cluster_name = var.cluster_name + api_servers = [format("%s.%s", var.cluster_name, var.dns_zone)] + etcd_servers = formatlist("%s.%s", azurerm_dns_a_record.etcds.*.name, var.dns_zone) + asset_dir = var.asset_dir - networking = "${var.networking}" + networking = var.networking # only effective with Calico networking # we should be able to use 1450 MTU, but in practice, 1410 was needed network_encapsulation = "vxlan" network_mtu = "1410" - pod_cidr = "${var.pod_cidr}" - service_cidr = "${var.service_cidr}" - cluster_domain_suffix = "${var.cluster_domain_suffix}" - enable_reporting = "${var.enable_reporting}" - enable_aggregation = "${var.enable_aggregation}" + pod_cidr = var.pod_cidr + service_cidr = var.service_cidr + cluster_domain_suffix = var.cluster_domain_suffix + enable_reporting = var.enable_reporting + enable_aggregation = var.enable_aggregation } + diff --git a/azure/container-linux/kubernetes/controllers.tf b/azure/container-linux/kubernetes/controllers.tf index 63a8fb17..2739eb6e 100644 --- a/azure/container-linux/kubernetes/controllers.tf +++ b/azure/container-linux/kubernetes/controllers.tf @@ -1,31 +1,34 @@ # Discrete DNS records for each controller's private IPv4 for etcd usage resource "azurerm_dns_a_record" "etcds" { - count = "${var.controller_count}" - resource_group_name = "${var.dns_zone_group}" + count = var.controller_count + resource_group_name = var.dns_zone_group # DNS Zone name where record should be created - zone_name = "${var.dns_zone}" + zone_name = var.dns_zone # DNS record - name = "${format("%s-etcd%d", var.cluster_name, count.index)}" + name = format("%s-etcd%d", var.cluster_name, count.index) ttl = 300 # private IPv4 address for etcd - records = ["${element(azurerm_network_interface.controllers.*.private_ip_address, count.index)}"] + records = [element( + azurerm_network_interface.controllers.*.private_ip_address, + count.index, + )] } locals { # Channel for a Container Linux derivative # coreos-stable -> Container Linux Stable - channel = "${element(split("-", var.os_image), 1)}" + channel = element(split("-", var.os_image), 1) } # Controller availability set to spread controllers resource "azurerm_availability_set" "controllers" { - resource_group_name = "${azurerm_resource_group.cluster.name}" + resource_group_name = azurerm_resource_group.cluster.name name = "${var.cluster_name}-controllers" - location = "${var.region}" + location = var.region platform_fault_domain_count = 2 platform_update_domain_count = 4 managed = true @@ -33,19 +36,19 @@ resource "azurerm_availability_set" "controllers" { # Controller instances resource "azurerm_virtual_machine" "controllers" { - count = "${var.controller_count}" - resource_group_name = "${azurerm_resource_group.cluster.name}" + count = var.controller_count + resource_group_name = azurerm_resource_group.cluster.name name = "${var.cluster_name}-controller-${count.index}" - location = "${var.region}" - availability_set_id = "${azurerm_availability_set.controllers.id}" - vm_size = "${var.controller_type}" + location = var.region + availability_set_id = azurerm_availability_set.controllers.id + vm_size = var.controller_type # boot storage_image_reference { publisher = "CoreOS" offer = "CoreOS" - sku = "${local.channel}" + sku = local.channel version = "latest" } @@ -54,18 +57,18 @@ resource "azurerm_virtual_machine" "controllers" { name = "${var.cluster_name}-controller-${count.index}" create_option = "FromImage" caching = "ReadWrite" - disk_size_gb = "${var.disk_size}" + disk_size_gb = var.disk_size os_type = "Linux" managed_disk_type = "Premium_LRS" } # network - network_interface_ids = ["${element(azurerm_network_interface.controllers.*.id, count.index)}"] + network_interface_ids = [element(azurerm_network_interface.controllers.*.id, count.index)] os_profile { computer_name = "${var.cluster_name}-controller-${count.index}" admin_username = "core" - custom_data = "${element(data.ct_config.controller-ignitions.*.rendered, count.index)}" + custom_data = element(data.ct_config.controller-ignitions.*.rendered, count.index) } # Azure mandates setting an ssh_key, even though Ignition custom_data handles it too @@ -74,7 +77,7 @@ resource "azurerm_virtual_machine" "controllers" { ssh_keys { path = "/home/core/.ssh/authorized_keys" - key_data = "${var.ssh_authorized_key}" + key_data = var.ssh_authorized_key } } @@ -84,85 +87,87 @@ resource "azurerm_virtual_machine" "controllers" { lifecycle { ignore_changes = [ - "storage_os_disk", - "os_profile", + storage_os_disk, + os_profile, ] } } # Controller NICs with public and private IPv4 resource "azurerm_network_interface" "controllers" { - count = "${var.controller_count}" - resource_group_name = "${azurerm_resource_group.cluster.name}" + count = var.controller_count + resource_group_name = azurerm_resource_group.cluster.name name = "${var.cluster_name}-controller-${count.index}" - location = "${azurerm_resource_group.cluster.location}" - network_security_group_id = "${azurerm_network_security_group.controller.id}" + location = azurerm_resource_group.cluster.location + network_security_group_id = azurerm_network_security_group.controller.id ip_configuration { name = "ip0" - subnet_id = "${azurerm_subnet.controller.id}" + subnet_id = azurerm_subnet.controller.id private_ip_address_allocation = "dynamic" # public IPv4 - public_ip_address_id = "${element(azurerm_public_ip.controllers.*.id, count.index)}" + public_ip_address_id = element(azurerm_public_ip.controllers.*.id, count.index) } } # Add controller NICs to the controller backend address pool resource "azurerm_network_interface_backend_address_pool_association" "controllers" { - network_interface_id = "${azurerm_network_interface.controllers.id}" + network_interface_id = azurerm_network_interface.controllers[0].id ip_configuration_name = "ip0" - backend_address_pool_id = "${azurerm_lb_backend_address_pool.controller.id}" + backend_address_pool_id = azurerm_lb_backend_address_pool.controller.id } # Controller public IPv4 addresses resource "azurerm_public_ip" "controllers" { - count = "${var.controller_count}" - resource_group_name = "${azurerm_resource_group.cluster.name}" + count = var.controller_count + resource_group_name = azurerm_resource_group.cluster.name name = "${var.cluster_name}-controller-${count.index}" - location = "${azurerm_resource_group.cluster.location}" + location = azurerm_resource_group.cluster.location sku = "Standard" allocation_method = "Static" } # Controller Ignition configs data "ct_config" "controller-ignitions" { - count = "${var.controller_count}" - content = "${element(data.template_file.controller-configs.*.rendered, count.index)}" + count = var.controller_count + content = element( + data.template_file.controller-configs.*.rendered, + count.index, + ) pretty_print = false - snippets = ["${var.controller_clc_snippets}"] + snippets = var.controller_clc_snippets } # Controller Container Linux configs data "template_file" "controller-configs" { - count = "${var.controller_count}" + count = var.controller_count - template = "${file("${path.module}/cl/controller.yaml.tmpl")}" + template = file("${path.module}/cl/controller.yaml.tmpl") vars = { # Cannot use cyclic dependencies on controllers or their DNS records etcd_name = "etcd${count.index}" etcd_domain = "${var.cluster_name}-etcd${count.index}.${var.dns_zone}" - # etcd0=https://cluster-etcd0.example.com,etcd1=https://cluster-etcd1.example.com,... - etcd_initial_cluster = "${join(",", data.template_file.etcds.*.rendered)}" - - kubeconfig = "${indent(10, module.bootkube.kubeconfig-kubelet)}" - ssh_authorized_key = "${var.ssh_authorized_key}" - cluster_dns_service_ip = "${cidrhost(var.service_cidr, 10)}" - cluster_domain_suffix = "${var.cluster_domain_suffix}" + etcd_initial_cluster = join(",", data.template_file.etcds.*.rendered) + kubeconfig = indent(10, module.bootkube.kubeconfig-kubelet) + ssh_authorized_key = var.ssh_authorized_key + cluster_dns_service_ip = cidrhost(var.service_cidr, 10) + cluster_domain_suffix = var.cluster_domain_suffix } } data "template_file" "etcds" { - count = "${var.controller_count}" + count = var.controller_count template = "etcd$${index}=https://$${cluster_name}-etcd$${index}.$${dns_zone}:2380" vars = { - index = "${count.index}" - cluster_name = "${var.cluster_name}" - dns_zone = "${var.dns_zone}" + index = count.index + cluster_name = var.cluster_name + dns_zone = var.dns_zone } } + diff --git a/azure/container-linux/kubernetes/lb.tf b/azure/container-linux/kubernetes/lb.tf index d010937f..6de81b22 100644 --- a/azure/container-linux/kubernetes/lb.tf +++ b/azure/container-linux/kubernetes/lb.tf @@ -1,123 +1,123 @@ # DNS record for the apiserver load balancer resource "azurerm_dns_a_record" "apiserver" { - resource_group_name = "${var.dns_zone_group}" + resource_group_name = var.dns_zone_group # DNS Zone name where record should be created - zone_name = "${var.dns_zone}" + zone_name = var.dns_zone # DNS record - name = "${var.cluster_name}" + name = var.cluster_name ttl = 300 # IPv4 address of apiserver load balancer - records = ["${azurerm_public_ip.apiserver-ipv4.ip_address}"] + records = [azurerm_public_ip.apiserver-ipv4.ip_address] } # Static IPv4 address for the apiserver frontend resource "azurerm_public_ip" "apiserver-ipv4" { - resource_group_name = "${azurerm_resource_group.cluster.name}" + resource_group_name = azurerm_resource_group.cluster.name name = "${var.cluster_name}-apiserver-ipv4" - location = "${var.region}" + location = var.region sku = "Standard" allocation_method = "Static" } # Static IPv4 address for the ingress frontend resource "azurerm_public_ip" "ingress-ipv4" { - resource_group_name = "${azurerm_resource_group.cluster.name}" + resource_group_name = azurerm_resource_group.cluster.name name = "${var.cluster_name}-ingress-ipv4" - location = "${var.region}" + location = var.region sku = "Standard" allocation_method = "Static" } # Network Load Balancer for apiservers and ingress resource "azurerm_lb" "cluster" { - resource_group_name = "${azurerm_resource_group.cluster.name}" + resource_group_name = azurerm_resource_group.cluster.name - name = "${var.cluster_name}" - location = "${var.region}" + name = var.cluster_name + location = var.region sku = "Standard" frontend_ip_configuration { name = "apiserver" - public_ip_address_id = "${azurerm_public_ip.apiserver-ipv4.id}" + public_ip_address_id = azurerm_public_ip.apiserver-ipv4.id } frontend_ip_configuration { name = "ingress" - public_ip_address_id = "${azurerm_public_ip.ingress-ipv4.id}" + public_ip_address_id = azurerm_public_ip.ingress-ipv4.id } } resource "azurerm_lb_rule" "apiserver" { - resource_group_name = "${azurerm_resource_group.cluster.name}" + resource_group_name = azurerm_resource_group.cluster.name name = "apiserver" - loadbalancer_id = "${azurerm_lb.cluster.id}" + loadbalancer_id = azurerm_lb.cluster.id frontend_ip_configuration_name = "apiserver" protocol = "Tcp" frontend_port = 6443 backend_port = 6443 - backend_address_pool_id = "${azurerm_lb_backend_address_pool.controller.id}" - probe_id = "${azurerm_lb_probe.apiserver.id}" + backend_address_pool_id = azurerm_lb_backend_address_pool.controller.id + probe_id = azurerm_lb_probe.apiserver.id } resource "azurerm_lb_rule" "ingress-http" { - resource_group_name = "${azurerm_resource_group.cluster.name}" + resource_group_name = azurerm_resource_group.cluster.name name = "ingress-http" - loadbalancer_id = "${azurerm_lb.cluster.id}" + loadbalancer_id = azurerm_lb.cluster.id frontend_ip_configuration_name = "ingress" protocol = "Tcp" frontend_port = 80 backend_port = 80 - backend_address_pool_id = "${azurerm_lb_backend_address_pool.worker.id}" - probe_id = "${azurerm_lb_probe.ingress.id}" + backend_address_pool_id = azurerm_lb_backend_address_pool.worker.id + probe_id = azurerm_lb_probe.ingress.id } resource "azurerm_lb_rule" "ingress-https" { - resource_group_name = "${azurerm_resource_group.cluster.name}" + resource_group_name = azurerm_resource_group.cluster.name name = "ingress-https" - loadbalancer_id = "${azurerm_lb.cluster.id}" + loadbalancer_id = azurerm_lb.cluster.id frontend_ip_configuration_name = "ingress" protocol = "Tcp" frontend_port = 443 backend_port = 443 - backend_address_pool_id = "${azurerm_lb_backend_address_pool.worker.id}" - probe_id = "${azurerm_lb_probe.ingress.id}" + backend_address_pool_id = azurerm_lb_backend_address_pool.worker.id + probe_id = azurerm_lb_probe.ingress.id } # Address pool of controllers resource "azurerm_lb_backend_address_pool" "controller" { - resource_group_name = "${azurerm_resource_group.cluster.name}" + resource_group_name = azurerm_resource_group.cluster.name name = "controller" - loadbalancer_id = "${azurerm_lb.cluster.id}" + loadbalancer_id = azurerm_lb.cluster.id } # Address pool of workers resource "azurerm_lb_backend_address_pool" "worker" { - resource_group_name = "${azurerm_resource_group.cluster.name}" + resource_group_name = azurerm_resource_group.cluster.name name = "worker" - loadbalancer_id = "${azurerm_lb.cluster.id}" + loadbalancer_id = azurerm_lb.cluster.id } # Health checks / probes # TCP health check for apiserver resource "azurerm_lb_probe" "apiserver" { - resource_group_name = "${azurerm_resource_group.cluster.name}" + resource_group_name = azurerm_resource_group.cluster.name name = "apiserver" - loadbalancer_id = "${azurerm_lb.cluster.id}" + loadbalancer_id = azurerm_lb.cluster.id protocol = "Tcp" port = 6443 @@ -129,10 +129,10 @@ resource "azurerm_lb_probe" "apiserver" { # HTTP health check for ingress resource "azurerm_lb_probe" "ingress" { - resource_group_name = "${azurerm_resource_group.cluster.name}" + resource_group_name = azurerm_resource_group.cluster.name name = "ingress" - loadbalancer_id = "${azurerm_lb.cluster.id}" + loadbalancer_id = azurerm_lb.cluster.id protocol = "Http" port = 10254 request_path = "/healthz" @@ -142,3 +142,4 @@ resource "azurerm_lb_probe" "ingress" { interval_in_seconds = 5 } + diff --git a/azure/container-linux/kubernetes/network.tf b/azure/container-linux/kubernetes/network.tf index da67a3ed..aa9157b0 100644 --- a/azure/container-linux/kubernetes/network.tf +++ b/azure/container-linux/kubernetes/network.tf @@ -1,15 +1,15 @@ # Organize cluster into a resource group resource "azurerm_resource_group" "cluster" { - name = "${var.cluster_name}" - location = "${var.region}" + name = var.cluster_name + location = var.region } resource "azurerm_virtual_network" "network" { - resource_group_name = "${azurerm_resource_group.cluster.name}" + resource_group_name = azurerm_resource_group.cluster.name - name = "${var.cluster_name}" - location = "${azurerm_resource_group.cluster.location}" - address_space = ["${var.host_cidr}"] + name = var.cluster_name + location = azurerm_resource_group.cluster.location + address_space = [var.host_cidr] } # Subnets - separate subnets for controller and workers because Azure @@ -17,17 +17,18 @@ resource "azurerm_virtual_network" "network" { # tags like GCP or security group membership like AWS resource "azurerm_subnet" "controller" { - resource_group_name = "${azurerm_resource_group.cluster.name}" + resource_group_name = azurerm_resource_group.cluster.name name = "controller" - virtual_network_name = "${azurerm_virtual_network.network.name}" - address_prefix = "${cidrsubnet(var.host_cidr, 1, 0)}" + virtual_network_name = azurerm_virtual_network.network.name + address_prefix = cidrsubnet(var.host_cidr, 1, 0) } resource "azurerm_subnet" "worker" { - resource_group_name = "${azurerm_resource_group.cluster.name}" + resource_group_name = azurerm_resource_group.cluster.name name = "worker" - virtual_network_name = "${azurerm_virtual_network.network.name}" - address_prefix = "${cidrsubnet(var.host_cidr, 1, 1)}" + virtual_network_name = azurerm_virtual_network.network.name + address_prefix = cidrsubnet(var.host_cidr, 1, 1) } + diff --git a/azure/container-linux/kubernetes/outputs.tf b/azure/container-linux/kubernetes/outputs.tf index bfd2a143..79a936a8 100644 --- a/azure/container-linux/kubernetes/outputs.tf +++ b/azure/container-linux/kubernetes/outputs.tf @@ -1,55 +1,56 @@ output "kubeconfig-admin" { - value = "${module.bootkube.kubeconfig-admin}" + value = module.bootkube.kubeconfig-admin } # Outputs for Kubernetes Ingress output "ingress_static_ipv4" { - value = "${azurerm_public_ip.ingress-ipv4.ip_address}" + value = azurerm_public_ip.ingress-ipv4.ip_address description = "IPv4 address of the load balancer for distributing traffic to Ingress controllers" } # Outputs for worker pools output "region" { - value = "${azurerm_resource_group.cluster.location}" + value = azurerm_resource_group.cluster.location } output "resource_group_name" { - value = "${azurerm_resource_group.cluster.name}" + value = azurerm_resource_group.cluster.name } output "subnet_id" { - value = "${azurerm_subnet.worker.id}" + value = azurerm_subnet.worker.id } output "security_group_id" { - value = "${azurerm_network_security_group.worker.id}" + value = azurerm_network_security_group.worker.id } output "kubeconfig" { - value = "${module.bootkube.kubeconfig-kubelet}" + value = module.bootkube.kubeconfig-kubelet } # Outputs for custom firewalling output "worker_security_group_name" { - value = "${azurerm_network_security_group.worker.name}" + value = azurerm_network_security_group.worker.name } output "worker_address_prefix" { description = "Worker network subnet CIDR address (for source/destination)" - value = "${azurerm_subnet.worker.address_prefix}" + value = azurerm_subnet.worker.address_prefix } # Outputs for custom load balancing output "loadbalancer_id" { description = "ID of the cluster load balancer" - value = "${azurerm_lb.cluster.id}" + value = azurerm_lb.cluster.id } output "backend_address_pool_id" { description = "ID of the worker backend address pool" - value = "${azurerm_lb_backend_address_pool.worker.id}" + value = azurerm_lb_backend_address_pool.worker.id } + diff --git a/azure/container-linux/kubernetes/require.tf b/azure/container-linux/kubernetes/require.tf deleted file mode 100644 index 836a87d5..00000000 --- a/azure/container-linux/kubernetes/require.tf +++ /dev/null @@ -1,25 +0,0 @@ -# Terraform version and plugin versions - -terraform { - required_version = ">= 0.11.0" -} - -provider "azurerm" { - version = "~> 1.22" -} - -provider "local" { - version = "~> 1.0" -} - -provider "null" { - version = "~> 1.0" -} - -provider "template" { - version = "~> 1.0" -} - -provider "tls" { - version = "~> 1.0" -} diff --git a/azure/container-linux/kubernetes/security.tf b/azure/container-linux/kubernetes/security.tf index 13830709..b9fd1c65 100644 --- a/azure/container-linux/kubernetes/security.tf +++ b/azure/container-linux/kubernetes/security.tf @@ -1,17 +1,17 @@ # Controller security group resource "azurerm_network_security_group" "controller" { - resource_group_name = "${azurerm_resource_group.cluster.name}" + resource_group_name = azurerm_resource_group.cluster.name name = "${var.cluster_name}-controller" - location = "${azurerm_resource_group.cluster.location}" + location = azurerm_resource_group.cluster.location } resource "azurerm_network_security_rule" "controller-ssh" { - resource_group_name = "${azurerm_resource_group.cluster.name}" + resource_group_name = azurerm_resource_group.cluster.name name = "allow-ssh" - network_security_group_name = "${azurerm_network_security_group.controller.name}" + network_security_group_name = azurerm_network_security_group.controller.name priority = "2000" access = "Allow" direction = "Inbound" @@ -19,45 +19,45 @@ resource "azurerm_network_security_rule" "controller-ssh" { source_port_range = "*" destination_port_range = "22" source_address_prefix = "*" - destination_address_prefix = "${azurerm_subnet.controller.address_prefix}" + destination_address_prefix = azurerm_subnet.controller.address_prefix } resource "azurerm_network_security_rule" "controller-etcd" { - resource_group_name = "${azurerm_resource_group.cluster.name}" + resource_group_name = azurerm_resource_group.cluster.name name = "allow-etcd" - network_security_group_name = "${azurerm_network_security_group.controller.name}" + network_security_group_name = azurerm_network_security_group.controller.name priority = "2005" access = "Allow" direction = "Inbound" protocol = "Tcp" source_port_range = "*" destination_port_range = "2379-2380" - source_address_prefix = "${azurerm_subnet.controller.address_prefix}" - destination_address_prefix = "${azurerm_subnet.controller.address_prefix}" + source_address_prefix = azurerm_subnet.controller.address_prefix + destination_address_prefix = azurerm_subnet.controller.address_prefix } # Allow Prometheus to scrape etcd metrics resource "azurerm_network_security_rule" "controller-etcd-metrics" { - resource_group_name = "${azurerm_resource_group.cluster.name}" + resource_group_name = azurerm_resource_group.cluster.name name = "allow-etcd-metrics" - network_security_group_name = "${azurerm_network_security_group.controller.name}" + network_security_group_name = azurerm_network_security_group.controller.name priority = "2010" access = "Allow" direction = "Inbound" protocol = "Tcp" source_port_range = "*" destination_port_range = "2381" - source_address_prefix = "${azurerm_subnet.worker.address_prefix}" - destination_address_prefix = "${azurerm_subnet.controller.address_prefix}" + source_address_prefix = azurerm_subnet.worker.address_prefix + destination_address_prefix = azurerm_subnet.controller.address_prefix } resource "azurerm_network_security_rule" "controller-apiserver" { - resource_group_name = "${azurerm_resource_group.cluster.name}" + resource_group_name = azurerm_resource_group.cluster.name name = "allow-apiserver" - network_security_group_name = "${azurerm_network_security_group.controller.name}" + network_security_group_name = azurerm_network_security_group.controller.name priority = "2015" access = "Allow" direction = "Inbound" @@ -65,46 +65,46 @@ resource "azurerm_network_security_rule" "controller-apiserver" { source_port_range = "*" destination_port_range = "6443" source_address_prefix = "*" - destination_address_prefix = "${azurerm_subnet.controller.address_prefix}" + destination_address_prefix = azurerm_subnet.controller.address_prefix } resource "azurerm_network_security_rule" "controller-vxlan" { - resource_group_name = "${azurerm_resource_group.cluster.name}" + resource_group_name = azurerm_resource_group.cluster.name name = "allow-vxlan" - network_security_group_name = "${azurerm_network_security_group.controller.name}" + network_security_group_name = azurerm_network_security_group.controller.name priority = "2020" access = "Allow" direction = "Inbound" protocol = "Udp" source_port_range = "*" destination_port_range = "4789" - source_address_prefixes = ["${azurerm_subnet.controller.address_prefix}", "${azurerm_subnet.worker.address_prefix}"] - destination_address_prefix = "${azurerm_subnet.controller.address_prefix}" + source_address_prefixes = [azurerm_subnet.controller.address_prefix, azurerm_subnet.worker.address_prefix] + destination_address_prefix = azurerm_subnet.controller.address_prefix } # Allow Prometheus to scrape node-exporter daemonset resource "azurerm_network_security_rule" "controller-node-exporter" { - resource_group_name = "${azurerm_resource_group.cluster.name}" + resource_group_name = azurerm_resource_group.cluster.name name = "allow-node-exporter" - network_security_group_name = "${azurerm_network_security_group.controller.name}" + network_security_group_name = azurerm_network_security_group.controller.name priority = "2025" access = "Allow" direction = "Inbound" protocol = "Tcp" source_port_range = "*" destination_port_range = "9100" - source_address_prefix = "${azurerm_subnet.worker.address_prefix}" - destination_address_prefix = "${azurerm_subnet.controller.address_prefix}" + source_address_prefix = azurerm_subnet.worker.address_prefix + destination_address_prefix = azurerm_subnet.controller.address_prefix } # Allow apiserver to access kubelet's for exec, log, port-forward resource "azurerm_network_security_rule" "controller-kubelet" { - resource_group_name = "${azurerm_resource_group.cluster.name}" + resource_group_name = azurerm_resource_group.cluster.name name = "allow-kubelet" - network_security_group_name = "${azurerm_network_security_group.controller.name}" + network_security_group_name = azurerm_network_security_group.controller.name priority = "2030" access = "Allow" direction = "Inbound" @@ -113,18 +113,18 @@ resource "azurerm_network_security_rule" "controller-kubelet" { destination_port_range = "10250" # allow Prometheus to scrape kubelet metrics too - source_address_prefixes = ["${azurerm_subnet.controller.address_prefix}", "${azurerm_subnet.worker.address_prefix}"] - destination_address_prefix = "${azurerm_subnet.controller.address_prefix}" + source_address_prefixes = [azurerm_subnet.controller.address_prefix, azurerm_subnet.worker.address_prefix] + destination_address_prefix = azurerm_subnet.controller.address_prefix } # Override Azure AllowVNetInBound and AllowAzureLoadBalancerInBound # https://docs.microsoft.com/en-us/azure/virtual-network/security-overview#default-security-rules resource "azurerm_network_security_rule" "controller-allow-loadblancer" { - resource_group_name = "${azurerm_resource_group.cluster.name}" + resource_group_name = azurerm_resource_group.cluster.name name = "allow-loadbalancer" - network_security_group_name = "${azurerm_network_security_group.controller.name}" + network_security_group_name = azurerm_network_security_group.controller.name priority = "3000" access = "Allow" direction = "Inbound" @@ -136,10 +136,10 @@ resource "azurerm_network_security_rule" "controller-allow-loadblancer" { } resource "azurerm_network_security_rule" "controller-deny-all" { - resource_group_name = "${azurerm_resource_group.cluster.name}" + resource_group_name = azurerm_resource_group.cluster.name name = "deny-all" - network_security_group_name = "${azurerm_network_security_group.controller.name}" + network_security_group_name = azurerm_network_security_group.controller.name priority = "3005" access = "Deny" direction = "Inbound" @@ -153,32 +153,32 @@ resource "azurerm_network_security_rule" "controller-deny-all" { # Worker security group resource "azurerm_network_security_group" "worker" { - resource_group_name = "${azurerm_resource_group.cluster.name}" + resource_group_name = azurerm_resource_group.cluster.name name = "${var.cluster_name}-worker" - location = "${azurerm_resource_group.cluster.location}" + location = azurerm_resource_group.cluster.location } resource "azurerm_network_security_rule" "worker-ssh" { - resource_group_name = "${azurerm_resource_group.cluster.name}" + resource_group_name = azurerm_resource_group.cluster.name name = "allow-ssh" - network_security_group_name = "${azurerm_network_security_group.worker.name}" + network_security_group_name = azurerm_network_security_group.worker.name priority = "2000" access = "Allow" direction = "Inbound" protocol = "Tcp" source_port_range = "*" destination_port_range = "22" - source_address_prefix = "${azurerm_subnet.controller.address_prefix}" - destination_address_prefix = "${azurerm_subnet.worker.address_prefix}" + source_address_prefix = azurerm_subnet.controller.address_prefix + destination_address_prefix = azurerm_subnet.worker.address_prefix } resource "azurerm_network_security_rule" "worker-http" { - resource_group_name = "${azurerm_resource_group.cluster.name}" + resource_group_name = azurerm_resource_group.cluster.name name = "allow-http" - network_security_group_name = "${azurerm_network_security_group.worker.name}" + network_security_group_name = azurerm_network_security_group.worker.name priority = "2005" access = "Allow" direction = "Inbound" @@ -186,14 +186,14 @@ resource "azurerm_network_security_rule" "worker-http" { source_port_range = "*" destination_port_range = "80" source_address_prefix = "*" - destination_address_prefix = "${azurerm_subnet.worker.address_prefix}" + destination_address_prefix = azurerm_subnet.worker.address_prefix } resource "azurerm_network_security_rule" "worker-https" { - resource_group_name = "${azurerm_resource_group.cluster.name}" + resource_group_name = azurerm_resource_group.cluster.name name = "allow-https" - network_security_group_name = "${azurerm_network_security_group.worker.name}" + network_security_group_name = azurerm_network_security_group.worker.name priority = "2010" access = "Allow" direction = "Inbound" @@ -201,46 +201,46 @@ resource "azurerm_network_security_rule" "worker-https" { source_port_range = "*" destination_port_range = "443" source_address_prefix = "*" - destination_address_prefix = "${azurerm_subnet.worker.address_prefix}" + destination_address_prefix = azurerm_subnet.worker.address_prefix } resource "azurerm_network_security_rule" "worker-vxlan" { - resource_group_name = "${azurerm_resource_group.cluster.name}" + resource_group_name = azurerm_resource_group.cluster.name name = "allow-vxlan" - network_security_group_name = "${azurerm_network_security_group.worker.name}" + network_security_group_name = azurerm_network_security_group.worker.name priority = "2015" access = "Allow" direction = "Inbound" protocol = "Udp" source_port_range = "*" destination_port_range = "4789" - source_address_prefixes = ["${azurerm_subnet.controller.address_prefix}", "${azurerm_subnet.worker.address_prefix}"] - destination_address_prefix = "${azurerm_subnet.worker.address_prefix}" + source_address_prefixes = [azurerm_subnet.controller.address_prefix, azurerm_subnet.worker.address_prefix] + destination_address_prefix = azurerm_subnet.worker.address_prefix } # Allow Prometheus to scrape node-exporter daemonset resource "azurerm_network_security_rule" "worker-node-exporter" { - resource_group_name = "${azurerm_resource_group.cluster.name}" + resource_group_name = azurerm_resource_group.cluster.name name = "allow-node-exporter" - network_security_group_name = "${azurerm_network_security_group.worker.name}" + network_security_group_name = azurerm_network_security_group.worker.name priority = "2020" access = "Allow" direction = "Inbound" protocol = "Tcp" source_port_range = "*" destination_port_range = "9100" - source_address_prefix = "${azurerm_subnet.worker.address_prefix}" - destination_address_prefix = "${azurerm_subnet.worker.address_prefix}" + source_address_prefix = azurerm_subnet.worker.address_prefix + destination_address_prefix = azurerm_subnet.worker.address_prefix } # Allow apiserver to access kubelet's for exec, log, port-forward resource "azurerm_network_security_rule" "worker-kubelet" { - resource_group_name = "${azurerm_resource_group.cluster.name}" + resource_group_name = azurerm_resource_group.cluster.name name = "allow-kubelet" - network_security_group_name = "${azurerm_network_security_group.worker.name}" + network_security_group_name = azurerm_network_security_group.worker.name priority = "2025" access = "Allow" direction = "Inbound" @@ -249,18 +249,18 @@ resource "azurerm_network_security_rule" "worker-kubelet" { destination_port_range = "10250" # allow Prometheus to scrape kubelet metrics too - source_address_prefixes = ["${azurerm_subnet.controller.address_prefix}", "${azurerm_subnet.worker.address_prefix}"] - destination_address_prefix = "${azurerm_subnet.worker.address_prefix}" + source_address_prefixes = [azurerm_subnet.controller.address_prefix, azurerm_subnet.worker.address_prefix] + destination_address_prefix = azurerm_subnet.worker.address_prefix } # Override Azure AllowVNetInBound and AllowAzureLoadBalancerInBound # https://docs.microsoft.com/en-us/azure/virtual-network/security-overview#default-security-rules resource "azurerm_network_security_rule" "worker-allow-loadblancer" { - resource_group_name = "${azurerm_resource_group.cluster.name}" + resource_group_name = azurerm_resource_group.cluster.name name = "allow-loadbalancer" - network_security_group_name = "${azurerm_network_security_group.worker.name}" + network_security_group_name = azurerm_network_security_group.worker.name priority = "3000" access = "Allow" direction = "Inbound" @@ -272,10 +272,10 @@ resource "azurerm_network_security_rule" "worker-allow-loadblancer" { } resource "azurerm_network_security_rule" "worker-deny-all" { - resource_group_name = "${azurerm_resource_group.cluster.name}" + resource_group_name = azurerm_resource_group.cluster.name name = "deny-all" - network_security_group_name = "${azurerm_network_security_group.worker.name}" + network_security_group_name = azurerm_network_security_group.worker.name priority = "3005" access = "Deny" direction = "Inbound" @@ -285,3 +285,4 @@ resource "azurerm_network_security_rule" "worker-deny-all" { source_address_prefix = "*" destination_address_prefix = "*" } + diff --git a/azure/container-linux/kubernetes/ssh.tf b/azure/container-linux/kubernetes/ssh.tf index 5c8aef26..85a5d7b4 100644 --- a/azure/container-linux/kubernetes/ssh.tf +++ b/azure/container-linux/kubernetes/ssh.tf @@ -1,50 +1,48 @@ # Secure copy etcd TLS assets to controllers. resource "null_resource" "copy-controller-secrets" { - count = "${var.controller_count}" + count = var.controller_count - depends_on = [ - "azurerm_virtual_machine.controllers", - ] + depends_on = [azurerm_virtual_machine.controllers] connection { type = "ssh" - host = "${element(azurerm_public_ip.controllers.*.ip_address, count.index)}" + host = element(azurerm_public_ip.controllers.*.ip_address, count.index) user = "core" timeout = "15m" } provisioner "file" { - content = "${module.bootkube.etcd_ca_cert}" + content = module.bootkube.etcd_ca_cert destination = "$HOME/etcd-client-ca.crt" } provisioner "file" { - content = "${module.bootkube.etcd_client_cert}" + content = module.bootkube.etcd_client_cert destination = "$HOME/etcd-client.crt" } provisioner "file" { - content = "${module.bootkube.etcd_client_key}" + content = module.bootkube.etcd_client_key destination = "$HOME/etcd-client.key" } provisioner "file" { - content = "${module.bootkube.etcd_server_cert}" + content = module.bootkube.etcd_server_cert destination = "$HOME/etcd-server.crt" } provisioner "file" { - content = "${module.bootkube.etcd_server_key}" + content = module.bootkube.etcd_server_key destination = "$HOME/etcd-server.key" } provisioner "file" { - content = "${module.bootkube.etcd_peer_cert}" + content = module.bootkube.etcd_peer_cert destination = "$HOME/etcd-peer.crt" } provisioner "file" { - content = "${module.bootkube.etcd_peer_key}" + content = module.bootkube.etcd_peer_key destination = "$HOME/etcd-peer.key" } @@ -68,21 +66,21 @@ resource "null_resource" "copy-controller-secrets" { # one-time self-hosted cluster bootstrapping. resource "null_resource" "bootkube-start" { depends_on = [ - "module.bootkube", - "module.workers", - "azurerm_dns_a_record.apiserver", - "null_resource.copy-controller-secrets", + module.bootkube, + module.workers, + azurerm_dns_a_record.apiserver, + null_resource.copy-controller-secrets, ] connection { type = "ssh" - host = "${element(azurerm_public_ip.controllers.*.ip_address, 0)}" + host = element(azurerm_public_ip.controllers.*.ip_address, 0) user = "core" timeout = "15m" } provisioner "file" { - source = "${var.asset_dir}" + source = var.asset_dir destination = "$HOME/assets" } @@ -93,3 +91,4 @@ resource "null_resource" "bootkube-start" { ] } } + diff --git a/azure/container-linux/kubernetes/variables.tf b/azure/container-linux/kubernetes/variables.tf index 90a14574..5ff72471 100644 --- a/azure/container-linux/kubernetes/variables.tf +++ b/azure/container-linux/kubernetes/variables.tf @@ -1,77 +1,77 @@ variable "cluster_name" { - type = "string" + type = string description = "Unique cluster name (prepended to dns_zone)" } # Azure variable "region" { - type = "string" + type = string description = "Azure Region (e.g. centralus , see `az account list-locations --output table`)" } variable "dns_zone" { - type = "string" + type = string description = "Azure DNS Zone (e.g. azure.example.com)" } variable "dns_zone_group" { - type = "string" + type = string description = "Resource group where the Azure DNS Zone resides (e.g. global)" } # instances variable "controller_count" { - type = "string" + type = string default = "1" description = "Number of controllers (i.e. masters)" } variable "worker_count" { - type = "string" + type = string default = "1" description = "Number of workers" } variable "controller_type" { - type = "string" + type = string default = "Standard_DS1_v2" description = "Machine type for controllers (see `az vm list-skus --location centralus`)" } variable "worker_type" { - type = "string" + type = string default = "Standard_F1" description = "Machine type for workers (see `az vm list-skus --location centralus`)" } variable "os_image" { - type = "string" + type = string default = "coreos-stable" description = "Channel for a Container Linux derivative (coreos-stable, coreos-beta, coreos-alpha)" } variable "disk_size" { - type = "string" + type = string default = "40" description = "Size of the disk in GB" } variable "worker_priority" { - type = "string" + type = string default = "Regular" description = "Set worker priority to Low to use reduced cost surplus capacity, with the tradeoff that instances can be deallocated at any time." } variable "controller_clc_snippets" { - type = "list" + type = list(string) description = "Controller Container Linux Config snippets" default = [] } variable "worker_clc_snippets" { - type = "list" + type = list(string) description = "Worker Container Linux Config snippets" default = [] } @@ -79,30 +79,30 @@ variable "worker_clc_snippets" { # configuration variable "ssh_authorized_key" { - type = "string" + type = string description = "SSH public key for user 'core'" } variable "asset_dir" { description = "Path to a directory where generated assets should be placed (contains secrets)" - type = "string" + type = string } variable "networking" { description = "Choice of networking provider (flannel or calico)" - type = "string" + type = string default = "flannel" } variable "host_cidr" { description = "CIDR IPv4 range to assign to instances" - type = "string" + type = string default = "10.0.0.0/16" } variable "pod_cidr" { description = "CIDR IPv4 range to assign Kubernetes pods" - type = "string" + type = string default = "10.2.0.0/16" } @@ -112,24 +112,26 @@ CIDR IPv4 range to assign Kubernetes services. The 1st IP will be reserved for kube_apiserver, the 10th IP will be reserved for coredns. EOD - type = "string" + + type = string default = "10.3.0.0/16" } variable "cluster_domain_suffix" { description = "Queries for domains with the suffix will be answered by coredns. Default is cluster.local (e.g. foo.default.svc.cluster.local) " - type = "string" - default = "cluster.local" + type = string + default = "cluster.local" } variable "enable_reporting" { - type = "string" + type = string description = "Enable usage or analytics reporting to upstreams (Calico)" - default = "false" + default = "false" } variable "enable_aggregation" { description = "Enable the Kubernetes Aggregation Layer (defaults to false)" - type = "string" - default = "false" + type = string + default = "false" } + diff --git a/azure/container-linux/kubernetes/versions.tf b/azure/container-linux/kubernetes/versions.tf new file mode 100644 index 00000000..e4e0a55b --- /dev/null +++ b/azure/container-linux/kubernetes/versions.tf @@ -0,0 +1,12 @@ +# Terraform version and plugin versions + +terraform { + required_version = "~> 0.12.0" + required_providers { + azurerm = "~> 1.27" + ct = "~> 0.3.2" + template = "~> 2.1" + null = "~> 2.1" + } +} + diff --git a/azure/container-linux/kubernetes/workers.tf b/azure/container-linux/kubernetes/workers.tf index c0d7c4f0..0f40da90 100644 --- a/azure/container-linux/kubernetes/workers.tf +++ b/azure/container-linux/kubernetes/workers.tf @@ -1,23 +1,24 @@ module "workers" { source = "./workers" - name = "${var.cluster_name}" + name = var.cluster_name # Azure - resource_group_name = "${azurerm_resource_group.cluster.name}" - region = "${azurerm_resource_group.cluster.location}" - subnet_id = "${azurerm_subnet.worker.id}" - security_group_id = "${azurerm_network_security_group.worker.id}" - backend_address_pool_id = "${azurerm_lb_backend_address_pool.worker.id}" + resource_group_name = azurerm_resource_group.cluster.name + region = azurerm_resource_group.cluster.location + subnet_id = azurerm_subnet.worker.id + security_group_id = azurerm_network_security_group.worker.id + backend_address_pool_id = azurerm_lb_backend_address_pool.worker.id - worker_count = "${var.worker_count}" - vm_type = "${var.worker_type}" - os_image = "${var.os_image}" - priority = "${var.worker_priority}" + worker_count = var.worker_count + vm_type = var.worker_type + os_image = var.os_image + priority = var.worker_priority # configuration - kubeconfig = "${module.bootkube.kubeconfig-kubelet}" - ssh_authorized_key = "${var.ssh_authorized_key}" - service_cidr = "${var.service_cidr}" - cluster_domain_suffix = "${var.cluster_domain_suffix}" - clc_snippets = "${var.worker_clc_snippets}" + kubeconfig = module.bootkube.kubeconfig-kubelet + ssh_authorized_key = var.ssh_authorized_key + service_cidr = var.service_cidr + cluster_domain_suffix = var.cluster_domain_suffix + clc_snippets = var.worker_clc_snippets } + diff --git a/azure/container-linux/kubernetes/workers/variables.tf b/azure/container-linux/kubernetes/workers/variables.tf index 6e77d1c6..a430a94a 100644 --- a/azure/container-linux/kubernetes/workers/variables.tf +++ b/azure/container-linux/kubernetes/workers/variables.tf @@ -1,63 +1,63 @@ variable "name" { - type = "string" + type = string description = "Unique name for the worker pool" } # Azure variable "region" { - type = "string" + type = string description = "Must be set to the Azure Region of cluster" } variable "resource_group_name" { - type = "string" + type = string description = "Must be set to the resource group name of cluster" } variable "subnet_id" { - type = "string" + type = string description = "Must be set to the `worker_subnet_id` output by cluster" } variable "security_group_id" { - type = "string" + type = string description = "Must be set to the `worker_security_group_id` output by cluster" } variable "backend_address_pool_id" { - type = "string" + type = string description = "Must be set to the `worker_backend_address_pool_id` output by cluster" } # instances variable "worker_count" { - type = "string" + type = string default = "1" description = "Number of instances" } variable "vm_type" { - type = "string" + type = string default = "Standard_F1" description = "Machine type for instances (see `az vm list-skus --location centralus`)" } variable "os_image" { - type = "string" + type = string default = "coreos-stable" description = "Channel for a Container Linux derivative (coreos-stable, coreos-beta, coreos-alpha)" } variable "priority" { - type = "string" + type = string default = "Regular" description = "Set priority to Low to use reduced cost surplus capacity, with the tradeoff that instances can be evicted at any time." } variable "clc_snippets" { - type = "list" + type = list(string) description = "Container Linux Config snippets" default = [] } @@ -65,12 +65,12 @@ variable "clc_snippets" { # configuration variable "kubeconfig" { - type = "string" + type = string description = "Must be set to `kubeconfig` output by cluster" } variable "ssh_authorized_key" { - type = "string" + type = string description = "SSH public key for user 'core'" } @@ -80,12 +80,14 @@ CIDR IPv4 range to assign Kubernetes services. The 1st IP will be reserved for kube_apiserver, the 10th IP will be reserved for coredns. EOD - type = "string" + + type = string default = "10.3.0.0/16" } variable "cluster_domain_suffix" { description = "Queries for domains with the suffix will be answered by coredns. Default is cluster.local (e.g. foo.default.svc.cluster.local) " - type = "string" - default = "cluster.local" + type = string + default = "cluster.local" } + diff --git a/azure/container-linux/kubernetes/workers/versions.tf b/azure/container-linux/kubernetes/workers/versions.tf new file mode 100644 index 00000000..ac97c6ac --- /dev/null +++ b/azure/container-linux/kubernetes/workers/versions.tf @@ -0,0 +1,4 @@ + +terraform { + required_version = ">= 0.12" +} diff --git a/azure/container-linux/kubernetes/workers/workers.tf b/azure/container-linux/kubernetes/workers/workers.tf index 40ec8a1d..01e35f7f 100644 --- a/azure/container-linux/kubernetes/workers/workers.tf +++ b/azure/container-linux/kubernetes/workers/workers.tf @@ -1,28 +1,28 @@ locals { # Channel for a Container Linux derivative # coreos-stable -> Container Linux Stable - channel = "${element(split("-", var.os_image), 1)}" + channel = element(split("-", var.os_image), 1) } # Workers scale set resource "azurerm_virtual_machine_scale_set" "workers" { - resource_group_name = "${var.resource_group_name}" + resource_group_name = var.resource_group_name name = "${var.name}-workers" - location = "${var.region}" + location = var.region single_placement_group = false sku { - name = "${var.vm_type}" + name = var.vm_type tier = "standard" - capacity = "${var.worker_count}" + capacity = var.worker_count } # boot storage_profile_image_reference { publisher = "CoreOS" offer = "CoreOS" - sku = "${local.channel}" + sku = local.channel version = "latest" } @@ -37,7 +37,7 @@ resource "azurerm_virtual_machine_scale_set" "workers" { os_profile { computer_name_prefix = "${var.name}-worker-" admin_username = "core" - custom_data = "${data.ct_config.worker-ignition.rendered}" + custom_data = data.ct_config.worker-ignition.rendered } # Azure mandates setting an ssh_key, even though Ignition custom_data handles it too @@ -46,7 +46,7 @@ resource "azurerm_virtual_machine_scale_set" "workers" { ssh_keys { path = "/home/core/.ssh/authorized_keys" - key_data = "${var.ssh_authorized_key}" + key_data = var.ssh_authorized_key } } @@ -54,61 +54,62 @@ resource "azurerm_virtual_machine_scale_set" "workers" { network_profile { name = "nic0" primary = true - network_security_group_id = "${var.security_group_id}" + network_security_group_id = var.security_group_id ip_configuration { name = "ip0" primary = true - subnet_id = "${var.subnet_id}" + subnet_id = var.subnet_id # backend address pool to which the NIC should be added - load_balancer_backend_address_pool_ids = ["${var.backend_address_pool_id}"] + load_balancer_backend_address_pool_ids = [var.backend_address_pool_id] } } # lifecycle upgrade_policy_mode = "Manual" - priority = "${var.priority}" + priority = var.priority eviction_policy = "Delete" } # Scale up or down to maintain desired number, tolerating deallocations. resource "azurerm_monitor_autoscale_setting" "workers" { - resource_group_name = "${var.resource_group_name}" + resource_group_name = var.resource_group_name name = "${var.name}-maintain-desired" - location = "${var.region}" + location = var.region # autoscale enabled = true - target_resource_id = "${azurerm_virtual_machine_scale_set.workers.id}" + target_resource_id = azurerm_virtual_machine_scale_set.workers.id profile { name = "default" capacity { - minimum = "${var.worker_count}" - default = "${var.worker_count}" - maximum = "${var.worker_count}" + minimum = var.worker_count + default = var.worker_count + maximum = var.worker_count } } } # Worker Ignition configs data "ct_config" "worker-ignition" { - content = "${data.template_file.worker-config.rendered}" + content = data.template_file.worker-config.rendered pretty_print = false - snippets = ["${var.clc_snippets}"] + snippets = var.clc_snippets } # Worker Container Linux configs data "template_file" "worker-config" { - template = "${file("${path.module}/cl/worker.yaml.tmpl")}" + template = file("${path.module}/cl/worker.yaml.tmpl") vars = { - kubeconfig = "${indent(10, var.kubeconfig)}" - ssh_authorized_key = "${var.ssh_authorized_key}" - cluster_dns_service_ip = "${cidrhost(var.service_cidr, 10)}" - cluster_domain_suffix = "${var.cluster_domain_suffix}" + kubeconfig = indent(10, var.kubeconfig) + ssh_authorized_key = var.ssh_authorized_key + cluster_dns_service_ip = cidrhost(var.service_cidr, 10) + cluster_domain_suffix = var.cluster_domain_suffix } } + diff --git a/docs/advanced/worker-pools.md b/docs/advanced/worker-pools.md index 584af00a..719c7bb6 100644 --- a/docs/advanced/worker-pools.md +++ b/docs/advanced/worker-pools.md @@ -80,21 +80,17 @@ Create a cluster following the Azure [tutorial](../cl/azure.md#cluster). Define module "ramius-worker-pool" { source = "git::https://github.com/poseidon/typhoon//azure/container-linux/kubernetes/workers?ref=v1.14.3" - providers = { - azurerm = "azurerm.default" - } - # Azure - region = "${module.azure-ramius.region}" - resource_group_name = "${module.azure-ramius.resource_group_name}" - subnet_id = "${module.azure-ramius.subnet_id}" - security_group_id = "${module.azure-ramius.security_group_id}" - backend_address_pool_id = "${module.azure-ramius.backend_address_pool_id}" + region = module.azure-ramius.region + resource_group_name = module.azure-ramius.resource_group_name + subnet_id = module.azure-ramius.subnet_id + security_group_id = module.azure-ramius.security_group_id + backend_address_pool_id = module.azure-ramius.backend_address_pool_id # configuration name = "ramius-low-priority" - kubeconfig = "${module.azure-ramius.kubeconfig}" - ssh_authorized_key = "${var.ssh_authorized_key}" + kubeconfig = module.azure-ramius.kubeconfig + ssh_authorized_key = var.ssh_authorized_key # optional worker_count = 2 @@ -120,12 +116,12 @@ The Azure internal `workers` module supports a number of [variables](https://git | Name | Description | Example | |:-----|:------------|:--------| | name | Unique name (distinct from cluster name) | "ramius-f4" | -| region | Must be set to `region` output by cluster | "${module.cluster.region}" | -| resource_group_name | Must be set to `resource_group_name` output by cluster | "${module.cluster.resource_group_name}" | -| subnet_id | Must be set to `subnet_id` output by cluster | "${module.cluster.subnet_id}" | -| security_group_id | Must be set to `security_group_id` output by cluster | "${module.cluster.security_group_id}" | -| backend_address_pool_id | Must be set to `backend_address_pool_id` output by cluster | "${module.cluster.backend_address_pool_id}" | -| kubeconfig | Must be set to `kubeconfig` output by cluster | "${module.cluster.kubeconfig}" | +| region | Must be set to `region` output by cluster | module.cluster.region | +| resource_group_name | Must be set to `resource_group_name` output by cluster | module.cluster.resource_group_name | +| subnet_id | Must be set to `subnet_id` output by cluster | module.cluster.subnet_id | +| security_group_id | Must be set to `security_group_id` output by cluster | module.cluster.security_group_id | +| backend_address_pool_id | Must be set to `backend_address_pool_id` output by cluster | module.cluster.backend_address_pool_id | +| kubeconfig | Must be set to `kubeconfig` output by cluster | module.cluster.kubeconfig | | ssh_authorized_key | SSH public key for user 'core' | "ssh-rsa AAAAB3NZ..." | #### Optional diff --git a/docs/cl/azure.md b/docs/cl/azure.md index c5544215..b7f17a26 100644 --- a/docs/cl/azure.md +++ b/docs/cl/azure.md @@ -13,15 +13,15 @@ Controllers are provisioned to run an `etcd-member` peer and a `kubelet` service * Azure account * Azure DNS Zone (registered Domain Name or delegated subdomain) -* Terraform v0.11.x and [terraform-provider-ct](https://github.com/poseidon/terraform-provider-ct) installed locally +* Terraform v0.12.x and [terraform-provider-ct](https://github.com/poseidon/terraform-provider-ct) installed locally ## Terraform Setup -Install [Terraform](https://www.terraform.io/downloads.html) v0.11.x on your system. +Install [Terraform](https://www.terraform.io/downloads.html) v0.12.x on your system. ```sh $ terraform version -Terraform v0.11.14 +Terraform v0.12.0 ``` Add the [terraform-provider-ct](https://github.com/poseidon/terraform-provider-ct) plugin binary for your system to `~/.terraform.d/plugins/`, noting the final name. @@ -50,33 +50,12 @@ Configure the Azure provider in a `providers.tf` file. ```tf provider "azurerm" { - version = "~> 1.29.0" - alias = "default" + version = "1.29.0" } provider "ct" { version = "0.3.2" } - -provider "local" { - version = "~> 1.0" - alias = "default" -} - -provider "null" { - version = "~> 1.0" - alias = "default" -} - -provider "template" { - version = "~> 1.0" - alias = "default" -} - -provider "tls" { - version = "~> 1.0" - alias = "default" -} ``` Additional configuration options are described in the `azurerm` provider [docs](https://www.terraform.io/docs/providers/azurerm/). @@ -87,15 +66,7 @@ Define a Kubernetes cluster using the module `azure/container-linux/kubernetes`. ```tf module "azure-ramius" { - source = "git::https://github.com/poseidon/typhoon//azure/container-linux/kubernetes?ref=v1.14.3" - - providers = { - azurerm = "azurerm.default" - local = "local.default" - null = "null.default" - template = "template.default" - tls = "tls.default" - } + source = "git::https://github.com/poseidon/typhoon//azure/container-linux/kubernetes?ref=v1.14.4" # Azure cluster_name = "ramius"