mirror of
https://github.com/puppetmaster/typhoon.git
synced 2024-12-27 11:09:33 +01:00
48d4973957
* Define a dual-stack virtual network with both IPv4 and IPv6 private address space. Change `host_cidr` variable (string) to a `network_cidr` variable (object) with "ipv4" and "ipv6" fields that list CIDR strings. * Define dual-stack controller and worker subnets. Disable Azure default outbound access (a deprecated fallback mechanism) * Enable dual-stack load balancing to Kubernetes Ingress by adding a public IPv6 frontend IP and LB rule to the load balancer. * Enable worker outbound IPv6 connectivity through load balancer SNAT by adding an IPv6 frontend IP and outbound rule * Configure controller nodes with a public IPv6 address to provide direct outbound IPv6 connectivity * Add an IPv6 worker backend pool. Azure requires separate IPv4 and IPv6 backend pools, though the health probe can be shared * Extend network security group rules for IPv6 source/destinations Checklist: Access to controller and worker nodes via IPv6 addresses: * SSH access to controller nodes via public IPv6 address * SSH access to worker nodes via (private) IPv6 address (via controller) Outbound IPv6 connectivity from controller and worker nodes: ``` nc -6 -zv ipv6.google.com 80 Ncat: Version 7.94 ( https://nmap.org/ncat ) Ncat: Connected to [2607:f8b0:4001:c16::66]:80. Ncat: 0 bytes sent, 0 bytes received in 0.02 seconds. ``` Serve Ingress traffic via IPv4 or IPv6 just requires setting up A and AAAA records and running the ingress controller with `hostNetwork: true` since, hostPort only forwards IPv4 traffic
122 lines
3.5 KiB
HCL
122 lines
3.5 KiB
HCL
locals {
|
|
# flatcar-stable -> Flatcar Linux Stable
|
|
channel = split("-", var.os_image)[1]
|
|
offer_suffix = var.arch == "arm64" ? "corevm" : "free"
|
|
urn = var.arch == "arm64" ? local.channel : "${local.channel}-gen2"
|
|
|
|
azure_authorized_key = var.azure_authorized_key == "" ? var.ssh_authorized_key : var.azure_authorized_key
|
|
}
|
|
|
|
# Workers scale set
|
|
resource "azurerm_linux_virtual_machine_scale_set" "workers" {
|
|
name = "${var.name}-worker"
|
|
resource_group_name = var.resource_group_name
|
|
location = var.region
|
|
sku = var.vm_type
|
|
instances = var.worker_count
|
|
# instance name prefix for instances in the set
|
|
computer_name_prefix = "${var.name}-worker"
|
|
single_placement_group = false
|
|
|
|
# storage
|
|
os_disk {
|
|
storage_account_type = "Standard_LRS"
|
|
caching = "ReadWrite"
|
|
}
|
|
|
|
# Flatcar Container Linux
|
|
source_image_reference {
|
|
publisher = "kinvolk"
|
|
offer = "flatcar-container-linux-${local.offer_suffix}"
|
|
sku = local.urn
|
|
version = "latest"
|
|
}
|
|
|
|
dynamic "plan" {
|
|
for_each = var.arch == "arm64" ? [] : [1]
|
|
content {
|
|
publisher = "kinvolk"
|
|
product = "flatcar-container-linux-${local.offer_suffix}"
|
|
name = local.urn
|
|
}
|
|
}
|
|
|
|
# network
|
|
network_interface {
|
|
name = "nic0"
|
|
primary = true
|
|
network_security_group_id = var.security_group_id
|
|
|
|
ip_configuration {
|
|
name = "ipv4"
|
|
version = "IPv4"
|
|
primary = true
|
|
subnet_id = var.subnet_id
|
|
# backend address pool to which the NIC should be added
|
|
load_balancer_backend_address_pool_ids = var.backend_address_pool_ids.ipv4
|
|
}
|
|
ip_configuration {
|
|
name = "ipv6"
|
|
version = "IPv6"
|
|
subnet_id = var.subnet_id
|
|
# backend address pool to which the NIC should be added
|
|
load_balancer_backend_address_pool_ids = var.backend_address_pool_ids.ipv6
|
|
}
|
|
}
|
|
|
|
# boot
|
|
custom_data = base64encode(data.ct_config.worker.rendered)
|
|
boot_diagnostics {
|
|
# defaults to a managed storage account
|
|
}
|
|
|
|
# Azure requires an RSA admin_ssh_key
|
|
admin_username = "core"
|
|
admin_ssh_key {
|
|
username = "core"
|
|
public_key = local.azure_authorized_key
|
|
}
|
|
|
|
# lifecycle
|
|
upgrade_mode = "Manual"
|
|
# eviction policy may only be set when priority is Spot
|
|
priority = var.priority
|
|
eviction_policy = var.priority == "Spot" ? "Delete" : null
|
|
termination_notification {
|
|
enabled = true
|
|
}
|
|
}
|
|
|
|
# Scale up or down to maintain desired number, tolerating deallocations.
|
|
resource "azurerm_monitor_autoscale_setting" "workers" {
|
|
name = "${var.name}-maintain-desired"
|
|
resource_group_name = var.resource_group_name
|
|
location = var.region
|
|
# autoscale
|
|
enabled = true
|
|
target_resource_id = azurerm_linux_virtual_machine_scale_set.workers.id
|
|
|
|
profile {
|
|
name = "default"
|
|
capacity {
|
|
minimum = var.worker_count
|
|
default = var.worker_count
|
|
maximum = var.worker_count
|
|
}
|
|
}
|
|
}
|
|
|
|
# Flatcar Linux worker
|
|
data "ct_config" "worker" {
|
|
content = templatefile("${path.module}/butane/worker.yaml", {
|
|
kubeconfig = indent(10, var.kubeconfig)
|
|
ssh_authorized_key = var.ssh_authorized_key
|
|
cluster_dns_service_ip = cidrhost(var.service_cidr, 10)
|
|
cluster_domain_suffix = var.cluster_domain_suffix
|
|
node_labels = join(",", var.node_labels)
|
|
node_taints = join(",", var.node_taints)
|
|
})
|
|
strict = true
|
|
snippets = var.snippets
|
|
}
|