mirror of
https://github.com/puppetmaster/typhoon.git
synced 2025-07-22 18:21:33 +02:00
Migrate Azure module Terraform v0.11 to v0.12
* Replace v0.11 bracket type hints with Terraform v0.12 list expressions * Use expression syntax instead of interpolated strings, where suggested * Update Azure tutorial and worker pools documentation * Define Terraform and plugin version requirements in versions.tf * Require azurerm ~> 1.27 to support Terraform v0.12 * Require ct ~> 0.3.2 to support Terraform v0.12
This commit is contained in:
@ -1,63 +1,63 @@
|
||||
variable "name" {
|
||||
type = "string"
|
||||
type = string
|
||||
description = "Unique name for the worker pool"
|
||||
}
|
||||
|
||||
# Azure
|
||||
|
||||
variable "region" {
|
||||
type = "string"
|
||||
type = string
|
||||
description = "Must be set to the Azure Region of cluster"
|
||||
}
|
||||
|
||||
variable "resource_group_name" {
|
||||
type = "string"
|
||||
type = string
|
||||
description = "Must be set to the resource group name of cluster"
|
||||
}
|
||||
|
||||
variable "subnet_id" {
|
||||
type = "string"
|
||||
type = string
|
||||
description = "Must be set to the `worker_subnet_id` output by cluster"
|
||||
}
|
||||
|
||||
variable "security_group_id" {
|
||||
type = "string"
|
||||
type = string
|
||||
description = "Must be set to the `worker_security_group_id` output by cluster"
|
||||
}
|
||||
|
||||
variable "backend_address_pool_id" {
|
||||
type = "string"
|
||||
type = string
|
||||
description = "Must be set to the `worker_backend_address_pool_id` output by cluster"
|
||||
}
|
||||
|
||||
# instances
|
||||
|
||||
variable "worker_count" {
|
||||
type = "string"
|
||||
type = string
|
||||
default = "1"
|
||||
description = "Number of instances"
|
||||
}
|
||||
|
||||
variable "vm_type" {
|
||||
type = "string"
|
||||
type = string
|
||||
default = "Standard_F1"
|
||||
description = "Machine type for instances (see `az vm list-skus --location centralus`)"
|
||||
}
|
||||
|
||||
variable "os_image" {
|
||||
type = "string"
|
||||
type = string
|
||||
default = "coreos-stable"
|
||||
description = "Channel for a Container Linux derivative (coreos-stable, coreos-beta, coreos-alpha)"
|
||||
}
|
||||
|
||||
variable "priority" {
|
||||
type = "string"
|
||||
type = string
|
||||
default = "Regular"
|
||||
description = "Set priority to Low to use reduced cost surplus capacity, with the tradeoff that instances can be evicted at any time."
|
||||
}
|
||||
|
||||
variable "clc_snippets" {
|
||||
type = "list"
|
||||
type = list(string)
|
||||
description = "Container Linux Config snippets"
|
||||
default = []
|
||||
}
|
||||
@ -65,12 +65,12 @@ variable "clc_snippets" {
|
||||
# configuration
|
||||
|
||||
variable "kubeconfig" {
|
||||
type = "string"
|
||||
type = string
|
||||
description = "Must be set to `kubeconfig` output by cluster"
|
||||
}
|
||||
|
||||
variable "ssh_authorized_key" {
|
||||
type = "string"
|
||||
type = string
|
||||
description = "SSH public key for user 'core'"
|
||||
}
|
||||
|
||||
@ -80,12 +80,14 @@ CIDR IPv4 range to assign Kubernetes services.
|
||||
The 1st IP will be reserved for kube_apiserver, the 10th IP will be reserved for coredns.
|
||||
EOD
|
||||
|
||||
type = "string"
|
||||
|
||||
type = string
|
||||
default = "10.3.0.0/16"
|
||||
}
|
||||
|
||||
variable "cluster_domain_suffix" {
|
||||
description = "Queries for domains with the suffix will be answered by coredns. Default is cluster.local (e.g. foo.default.svc.cluster.local) "
|
||||
type = "string"
|
||||
default = "cluster.local"
|
||||
type = string
|
||||
default = "cluster.local"
|
||||
}
|
||||
|
||||
|
4
azure/container-linux/kubernetes/workers/versions.tf
Normal file
4
azure/container-linux/kubernetes/workers/versions.tf
Normal file
@ -0,0 +1,4 @@
|
||||
|
||||
terraform {
|
||||
required_version = ">= 0.12"
|
||||
}
|
@ -1,28 +1,28 @@
|
||||
locals {
|
||||
# Channel for a Container Linux derivative
|
||||
# coreos-stable -> Container Linux Stable
|
||||
channel = "${element(split("-", var.os_image), 1)}"
|
||||
channel = element(split("-", var.os_image), 1)
|
||||
}
|
||||
|
||||
# Workers scale set
|
||||
resource "azurerm_virtual_machine_scale_set" "workers" {
|
||||
resource_group_name = "${var.resource_group_name}"
|
||||
resource_group_name = var.resource_group_name
|
||||
|
||||
name = "${var.name}-workers"
|
||||
location = "${var.region}"
|
||||
location = var.region
|
||||
single_placement_group = false
|
||||
|
||||
sku {
|
||||
name = "${var.vm_type}"
|
||||
name = var.vm_type
|
||||
tier = "standard"
|
||||
capacity = "${var.worker_count}"
|
||||
capacity = var.worker_count
|
||||
}
|
||||
|
||||
# boot
|
||||
storage_profile_image_reference {
|
||||
publisher = "CoreOS"
|
||||
offer = "CoreOS"
|
||||
sku = "${local.channel}"
|
||||
sku = local.channel
|
||||
version = "latest"
|
||||
}
|
||||
|
||||
@ -37,7 +37,7 @@ resource "azurerm_virtual_machine_scale_set" "workers" {
|
||||
os_profile {
|
||||
computer_name_prefix = "${var.name}-worker-"
|
||||
admin_username = "core"
|
||||
custom_data = "${data.ct_config.worker-ignition.rendered}"
|
||||
custom_data = data.ct_config.worker-ignition.rendered
|
||||
}
|
||||
|
||||
# Azure mandates setting an ssh_key, even though Ignition custom_data handles it too
|
||||
@ -46,7 +46,7 @@ resource "azurerm_virtual_machine_scale_set" "workers" {
|
||||
|
||||
ssh_keys {
|
||||
path = "/home/core/.ssh/authorized_keys"
|
||||
key_data = "${var.ssh_authorized_key}"
|
||||
key_data = var.ssh_authorized_key
|
||||
}
|
||||
}
|
||||
|
||||
@ -54,61 +54,62 @@ resource "azurerm_virtual_machine_scale_set" "workers" {
|
||||
network_profile {
|
||||
name = "nic0"
|
||||
primary = true
|
||||
network_security_group_id = "${var.security_group_id}"
|
||||
network_security_group_id = var.security_group_id
|
||||
|
||||
ip_configuration {
|
||||
name = "ip0"
|
||||
primary = true
|
||||
subnet_id = "${var.subnet_id}"
|
||||
subnet_id = var.subnet_id
|
||||
|
||||
# backend address pool to which the NIC should be added
|
||||
load_balancer_backend_address_pool_ids = ["${var.backend_address_pool_id}"]
|
||||
load_balancer_backend_address_pool_ids = [var.backend_address_pool_id]
|
||||
}
|
||||
}
|
||||
|
||||
# lifecycle
|
||||
upgrade_policy_mode = "Manual"
|
||||
priority = "${var.priority}"
|
||||
priority = var.priority
|
||||
eviction_policy = "Delete"
|
||||
}
|
||||
|
||||
# Scale up or down to maintain desired number, tolerating deallocations.
|
||||
resource "azurerm_monitor_autoscale_setting" "workers" {
|
||||
resource_group_name = "${var.resource_group_name}"
|
||||
resource_group_name = var.resource_group_name
|
||||
|
||||
name = "${var.name}-maintain-desired"
|
||||
location = "${var.region}"
|
||||
location = var.region
|
||||
|
||||
# autoscale
|
||||
enabled = true
|
||||
target_resource_id = "${azurerm_virtual_machine_scale_set.workers.id}"
|
||||
target_resource_id = azurerm_virtual_machine_scale_set.workers.id
|
||||
|
||||
profile {
|
||||
name = "default"
|
||||
|
||||
capacity {
|
||||
minimum = "${var.worker_count}"
|
||||
default = "${var.worker_count}"
|
||||
maximum = "${var.worker_count}"
|
||||
minimum = var.worker_count
|
||||
default = var.worker_count
|
||||
maximum = var.worker_count
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
# Worker Ignition configs
|
||||
data "ct_config" "worker-ignition" {
|
||||
content = "${data.template_file.worker-config.rendered}"
|
||||
content = data.template_file.worker-config.rendered
|
||||
pretty_print = false
|
||||
snippets = ["${var.clc_snippets}"]
|
||||
snippets = var.clc_snippets
|
||||
}
|
||||
|
||||
# Worker Container Linux configs
|
||||
data "template_file" "worker-config" {
|
||||
template = "${file("${path.module}/cl/worker.yaml.tmpl")}"
|
||||
template = file("${path.module}/cl/worker.yaml.tmpl")
|
||||
|
||||
vars = {
|
||||
kubeconfig = "${indent(10, var.kubeconfig)}"
|
||||
ssh_authorized_key = "${var.ssh_authorized_key}"
|
||||
cluster_dns_service_ip = "${cidrhost(var.service_cidr, 10)}"
|
||||
cluster_domain_suffix = "${var.cluster_domain_suffix}"
|
||||
kubeconfig = indent(10, var.kubeconfig)
|
||||
ssh_authorized_key = var.ssh_authorized_key
|
||||
cluster_dns_service_ip = cidrhost(var.service_cidr, 10)
|
||||
cluster_domain_suffix = var.cluster_domain_suffix
|
||||
}
|
||||
}
|
||||
|
||||
|
Reference in New Issue
Block a user