mirror of
https://github.com/puppetmaster/typhoon.git
synced 2025-07-30 23:41:34 +02:00
Add IPv6 support for Typhoon Azure clusters
* Define a dual-stack virtual network with both IPv4 and IPv6 private address space. Change `host_cidr` variable (string) to a `network_cidr` variable (object) with "ipv4" and "ipv6" fields that list CIDR strings. * Define dual-stack controller and worker subnets. Disable Azure default outbound access (a deprecated fallback mechanism) * Enable dual-stack load balancing to Kubernetes Ingress by adding a public IPv6 frontend IP and LB rule to the load balancer. * Enable worker outbound IPv6 connectivity through load balancer SNAT by adding an IPv6 frontend IP and outbound rule * Configure controller nodes with a public IPv6 address to provide direct outbound IPv6 connectivity * Add an IPv6 worker backend pool. Azure requires separate IPv4 and IPv6 backend pools, though the health probe can be shared * Extend network security group rules for IPv6 source/destinations Checklist: Access to controller and worker nodes via IPv6 addresses: * SSH access to controller nodes via public IPv6 address * SSH access to worker nodes via (private) IPv6 address (via controller) Outbound IPv6 connectivity from controller and worker nodes: ``` nc -6 -zv ipv6.google.com 80 Ncat: Version 7.94 ( https://nmap.org/ncat ) Ncat: Connected to [2607:f8b0:4001:c16::66]:80. Ncat: 0 bytes sent, 0 bytes received in 0.02 seconds. ``` Serve Ingress traffic via IPv4 or IPv6 just requires setting up A and AAAA records and running the ingress controller with `hostNetwork: true` since, hostPort only forwards IPv4 traffic
This commit is contained in:
@ -1,19 +1,3 @@
|
||||
# Discrete DNS records for each controller's private IPv4 for etcd usage
|
||||
resource "azurerm_dns_a_record" "etcds" {
|
||||
count = var.controller_count
|
||||
resource_group_name = var.dns_zone_group
|
||||
|
||||
# DNS Zone name where record should be created
|
||||
zone_name = var.dns_zone
|
||||
|
||||
# DNS record
|
||||
name = format("%s-etcd%d", var.cluster_name, count.index)
|
||||
ttl = 300
|
||||
|
||||
# private IPv4 address for etcd
|
||||
records = [azurerm_network_interface.controllers.*.private_ip_address[count.index]]
|
||||
}
|
||||
|
||||
locals {
|
||||
# Container Linux derivative
|
||||
# flatcar-stable -> Flatcar Linux Stable
|
||||
@ -28,11 +12,26 @@ locals {
|
||||
azure_authorized_key = var.azure_authorized_key == "" ? var.ssh_authorized_key : var.azure_authorized_key
|
||||
}
|
||||
|
||||
# Discrete DNS records for each controller's private IPv4 for etcd usage
|
||||
resource "azurerm_dns_a_record" "etcds" {
|
||||
count = var.controller_count
|
||||
resource_group_name = var.dns_zone_group
|
||||
|
||||
# DNS Zone name where record should be created
|
||||
zone_name = var.dns_zone
|
||||
|
||||
# DNS record
|
||||
name = format("%s-etcd%d", var.cluster_name, count.index)
|
||||
ttl = 300
|
||||
|
||||
# private IPv4 address for etcd
|
||||
records = [azurerm_network_interface.controllers[count.index].private_ip_address]
|
||||
}
|
||||
|
||||
# Controller availability set to spread controllers
|
||||
resource "azurerm_availability_set" "controllers" {
|
||||
resource_group_name = azurerm_resource_group.cluster.name
|
||||
|
||||
name = "${var.cluster_name}-controllers"
|
||||
resource_group_name = azurerm_resource_group.cluster.name
|
||||
location = var.region
|
||||
platform_fault_domain_count = 2
|
||||
platform_update_domain_count = 4
|
||||
@ -41,18 +40,13 @@ resource "azurerm_availability_set" "controllers" {
|
||||
|
||||
# Controller instances
|
||||
resource "azurerm_linux_virtual_machine" "controllers" {
|
||||
count = var.controller_count
|
||||
resource_group_name = azurerm_resource_group.cluster.name
|
||||
count = var.controller_count
|
||||
|
||||
name = "${var.cluster_name}-controller-${count.index}"
|
||||
resource_group_name = azurerm_resource_group.cluster.name
|
||||
location = var.region
|
||||
availability_set_id = azurerm_availability_set.controllers.id
|
||||
|
||||
size = var.controller_type
|
||||
custom_data = base64encode(data.ct_config.controllers.*.rendered[count.index])
|
||||
boot_diagnostics {
|
||||
# defaults to a managed storage account
|
||||
}
|
||||
size = var.controller_type
|
||||
|
||||
# storage
|
||||
os_disk {
|
||||
@ -84,7 +78,13 @@ resource "azurerm_linux_virtual_machine" "controllers" {
|
||||
azurerm_network_interface.controllers[count.index].id
|
||||
]
|
||||
|
||||
# Azure requires setting admin_ssh_key, though Ignition custom_data handles it too
|
||||
# boot
|
||||
custom_data = base64encode(data.ct_config.controllers[count.index].rendered)
|
||||
boot_diagnostics {
|
||||
# defaults to a managed storage account
|
||||
}
|
||||
|
||||
# Azure requires an RSA admin_ssh_key
|
||||
admin_username = "core"
|
||||
admin_ssh_key {
|
||||
username = "core"
|
||||
@ -99,31 +99,52 @@ resource "azurerm_linux_virtual_machine" "controllers" {
|
||||
}
|
||||
}
|
||||
|
||||
# Controller public IPv4 addresses
|
||||
resource "azurerm_public_ip" "controllers" {
|
||||
count = var.controller_count
|
||||
resource_group_name = azurerm_resource_group.cluster.name
|
||||
# Controller node public IPv4 addresses
|
||||
resource "azurerm_public_ip" "controllers-ipv4" {
|
||||
count = var.controller_count
|
||||
|
||||
name = "${var.cluster_name}-controller-${count.index}"
|
||||
location = azurerm_resource_group.cluster.location
|
||||
sku = "Standard"
|
||||
allocation_method = "Static"
|
||||
name = "${var.cluster_name}-controller-${count.index}-ipv4"
|
||||
resource_group_name = azurerm_resource_group.cluster.name
|
||||
location = azurerm_resource_group.cluster.location
|
||||
ip_version = "IPv4"
|
||||
sku = "Standard"
|
||||
allocation_method = "Static"
|
||||
}
|
||||
|
||||
# Controller NICs with public and private IPv4
|
||||
resource "azurerm_network_interface" "controllers" {
|
||||
count = var.controller_count
|
||||
resource_group_name = azurerm_resource_group.cluster.name
|
||||
# Controller node public IPv6 addresses
|
||||
resource "azurerm_public_ip" "controllers-ipv6" {
|
||||
count = var.controller_count
|
||||
|
||||
name = "${var.cluster_name}-controller-${count.index}"
|
||||
location = azurerm_resource_group.cluster.location
|
||||
name = "${var.cluster_name}-controller-${count.index}-ipv6"
|
||||
resource_group_name = azurerm_resource_group.cluster.name
|
||||
location = azurerm_resource_group.cluster.location
|
||||
ip_version = "IPv6"
|
||||
sku = "Standard"
|
||||
allocation_method = "Static"
|
||||
}
|
||||
|
||||
# Controllers' network interfaces
|
||||
resource "azurerm_network_interface" "controllers" {
|
||||
count = var.controller_count
|
||||
|
||||
name = "${var.cluster_name}-controller-${count.index}"
|
||||
resource_group_name = azurerm_resource_group.cluster.name
|
||||
location = azurerm_resource_group.cluster.location
|
||||
|
||||
ip_configuration {
|
||||
name = "ip0"
|
||||
name = "ipv4"
|
||||
primary = true
|
||||
subnet_id = azurerm_subnet.controller.id
|
||||
private_ip_address_allocation = "Dynamic"
|
||||
# instance public IPv4
|
||||
public_ip_address_id = azurerm_public_ip.controllers.*.id[count.index]
|
||||
private_ip_address_version = "IPv4"
|
||||
public_ip_address_id = azurerm_public_ip.controllers-ipv4[count.index].id
|
||||
}
|
||||
ip_configuration {
|
||||
name = "ipv6"
|
||||
subnet_id = azurerm_subnet.controller.id
|
||||
private_ip_address_allocation = "Dynamic"
|
||||
private_ip_address_version = "IPv6"
|
||||
public_ip_address_id = azurerm_public_ip.controllers-ipv6[count.index].id
|
||||
}
|
||||
}
|
||||
|
||||
@ -140,7 +161,7 @@ resource "azurerm_network_interface_backend_address_pool_association" "controlle
|
||||
count = var.controller_count
|
||||
|
||||
network_interface_id = azurerm_network_interface.controllers[count.index].id
|
||||
ip_configuration_name = "ip0"
|
||||
ip_configuration_name = "ipv4"
|
||||
backend_address_pool_id = azurerm_lb_backend_address_pool.controller.id
|
||||
}
|
||||
|
||||
|
@ -15,31 +15,39 @@ resource "azurerm_dns_a_record" "apiserver" {
|
||||
|
||||
# Static IPv4 address for the apiserver frontend
|
||||
resource "azurerm_public_ip" "apiserver-ipv4" {
|
||||
name = "${var.cluster_name}-apiserver-ipv4"
|
||||
resource_group_name = azurerm_resource_group.cluster.name
|
||||
|
||||
name = "${var.cluster_name}-apiserver-ipv4"
|
||||
location = var.region
|
||||
sku = "Standard"
|
||||
allocation_method = "Static"
|
||||
location = var.region
|
||||
sku = "Standard"
|
||||
allocation_method = "Static"
|
||||
}
|
||||
|
||||
# Static IPv4 address for the ingress frontend
|
||||
resource "azurerm_public_ip" "ingress-ipv4" {
|
||||
name = "${var.cluster_name}-ingress-ipv4"
|
||||
resource_group_name = azurerm_resource_group.cluster.name
|
||||
location = var.region
|
||||
ip_version = "IPv4"
|
||||
sku = "Standard"
|
||||
allocation_method = "Static"
|
||||
}
|
||||
|
||||
name = "${var.cluster_name}-ingress-ipv4"
|
||||
location = var.region
|
||||
sku = "Standard"
|
||||
allocation_method = "Static"
|
||||
# Static IPv6 address for the ingress frontend
|
||||
resource "azurerm_public_ip" "ingress-ipv6" {
|
||||
name = "${var.cluster_name}-ingress-ipv6"
|
||||
resource_group_name = azurerm_resource_group.cluster.name
|
||||
location = var.region
|
||||
ip_version = "IPv6"
|
||||
sku = "Standard"
|
||||
allocation_method = "Static"
|
||||
}
|
||||
|
||||
# Network Load Balancer for apiservers and ingress
|
||||
resource "azurerm_lb" "cluster" {
|
||||
name = var.cluster_name
|
||||
resource_group_name = azurerm_resource_group.cluster.name
|
||||
|
||||
name = var.cluster_name
|
||||
location = var.region
|
||||
sku = "Standard"
|
||||
location = var.region
|
||||
sku = "Standard"
|
||||
|
||||
frontend_ip_configuration {
|
||||
name = "apiserver"
|
||||
@ -47,15 +55,21 @@ resource "azurerm_lb" "cluster" {
|
||||
}
|
||||
|
||||
frontend_ip_configuration {
|
||||
name = "ingress"
|
||||
name = "ingress-ipv4"
|
||||
public_ip_address_id = azurerm_public_ip.ingress-ipv4.id
|
||||
}
|
||||
|
||||
frontend_ip_configuration {
|
||||
name = "ingress-ipv6"
|
||||
public_ip_address_id = azurerm_public_ip.ingress-ipv6.id
|
||||
}
|
||||
}
|
||||
|
||||
resource "azurerm_lb_rule" "apiserver" {
|
||||
name = "apiserver"
|
||||
loadbalancer_id = azurerm_lb.cluster.id
|
||||
frontend_ip_configuration_name = "apiserver"
|
||||
disable_outbound_snat = true
|
||||
|
||||
protocol = "Tcp"
|
||||
frontend_port = 6443
|
||||
@ -64,53 +78,74 @@ resource "azurerm_lb_rule" "apiserver" {
|
||||
probe_id = azurerm_lb_probe.apiserver.id
|
||||
}
|
||||
|
||||
resource "azurerm_lb_rule" "ingress-http" {
|
||||
name = "ingress-http"
|
||||
resource "azurerm_lb_rule" "ingress-http-ipv4" {
|
||||
name = "ingress-http-ipv4"
|
||||
loadbalancer_id = azurerm_lb.cluster.id
|
||||
frontend_ip_configuration_name = "ingress"
|
||||
frontend_ip_configuration_name = "ingress-ipv4"
|
||||
disable_outbound_snat = true
|
||||
|
||||
protocol = "Tcp"
|
||||
frontend_port = 80
|
||||
backend_port = 80
|
||||
backend_address_pool_ids = [azurerm_lb_backend_address_pool.worker.id]
|
||||
backend_address_pool_ids = [azurerm_lb_backend_address_pool.worker-ipv4.id]
|
||||
probe_id = azurerm_lb_probe.ingress.id
|
||||
}
|
||||
|
||||
resource "azurerm_lb_rule" "ingress-https" {
|
||||
name = "ingress-https"
|
||||
resource "azurerm_lb_rule" "ingress-https-ipv4" {
|
||||
name = "ingress-https-ipv4"
|
||||
loadbalancer_id = azurerm_lb.cluster.id
|
||||
frontend_ip_configuration_name = "ingress"
|
||||
frontend_ip_configuration_name = "ingress-ipv4"
|
||||
disable_outbound_snat = true
|
||||
|
||||
protocol = "Tcp"
|
||||
frontend_port = 443
|
||||
backend_port = 443
|
||||
backend_address_pool_ids = [azurerm_lb_backend_address_pool.worker.id]
|
||||
backend_address_pool_ids = [azurerm_lb_backend_address_pool.worker-ipv4.id]
|
||||
probe_id = azurerm_lb_probe.ingress.id
|
||||
}
|
||||
|
||||
# Worker outbound TCP/UDP SNAT
|
||||
resource "azurerm_lb_outbound_rule" "worker-outbound" {
|
||||
name = "worker"
|
||||
loadbalancer_id = azurerm_lb.cluster.id
|
||||
frontend_ip_configuration {
|
||||
name = "ingress"
|
||||
}
|
||||
resource "azurerm_lb_rule" "ingress-http-ipv6" {
|
||||
name = "ingress-http-ipv6"
|
||||
loadbalancer_id = azurerm_lb.cluster.id
|
||||
frontend_ip_configuration_name = "ingress-ipv6"
|
||||
disable_outbound_snat = true
|
||||
|
||||
protocol = "All"
|
||||
backend_address_pool_id = azurerm_lb_backend_address_pool.worker.id
|
||||
protocol = "Tcp"
|
||||
frontend_port = 80
|
||||
backend_port = 80
|
||||
backend_address_pool_ids = [azurerm_lb_backend_address_pool.worker-ipv6.id]
|
||||
probe_id = azurerm_lb_probe.ingress.id
|
||||
}
|
||||
|
||||
resource "azurerm_lb_rule" "ingress-https-ipv6" {
|
||||
name = "ingress-https-ipv6"
|
||||
loadbalancer_id = azurerm_lb.cluster.id
|
||||
frontend_ip_configuration_name = "ingress-ipv6"
|
||||
disable_outbound_snat = true
|
||||
|
||||
protocol = "Tcp"
|
||||
frontend_port = 443
|
||||
backend_port = 443
|
||||
backend_address_pool_ids = [azurerm_lb_backend_address_pool.worker-ipv6.id]
|
||||
probe_id = azurerm_lb_probe.ingress.id
|
||||
}
|
||||
|
||||
# Backend Address Pools
|
||||
|
||||
# Address pool of controllers
|
||||
resource "azurerm_lb_backend_address_pool" "controller" {
|
||||
name = "controller"
|
||||
loadbalancer_id = azurerm_lb.cluster.id
|
||||
}
|
||||
|
||||
# Address pool of workers
|
||||
resource "azurerm_lb_backend_address_pool" "worker" {
|
||||
name = "worker"
|
||||
# Address pools for workers
|
||||
resource "azurerm_lb_backend_address_pool" "worker-ipv4" {
|
||||
name = "worker-ipv4"
|
||||
loadbalancer_id = azurerm_lb.cluster.id
|
||||
}
|
||||
|
||||
resource "azurerm_lb_backend_address_pool" "worker-ipv6" {
|
||||
name = "worker-ipv6"
|
||||
loadbalancer_id = azurerm_lb.cluster.id
|
||||
}
|
||||
|
||||
@ -122,10 +157,8 @@ resource "azurerm_lb_probe" "apiserver" {
|
||||
loadbalancer_id = azurerm_lb.cluster.id
|
||||
protocol = "Tcp"
|
||||
port = 6443
|
||||
|
||||
# unhealthy threshold
|
||||
number_of_probes = 3
|
||||
|
||||
number_of_probes = 3
|
||||
interval_in_seconds = 5
|
||||
}
|
||||
|
||||
@ -136,10 +169,29 @@ resource "azurerm_lb_probe" "ingress" {
|
||||
protocol = "Http"
|
||||
port = 10254
|
||||
request_path = "/healthz"
|
||||
|
||||
# unhealthy threshold
|
||||
number_of_probes = 3
|
||||
|
||||
number_of_probes = 3
|
||||
interval_in_seconds = 5
|
||||
}
|
||||
|
||||
# Outbound SNAT
|
||||
|
||||
resource "azurerm_lb_outbound_rule" "outbound-ipv4" {
|
||||
name = "outbound-ipv4"
|
||||
protocol = "All"
|
||||
loadbalancer_id = azurerm_lb.cluster.id
|
||||
backend_address_pool_id = azurerm_lb_backend_address_pool.worker-ipv4.id
|
||||
frontend_ip_configuration {
|
||||
name = "ingress-ipv4"
|
||||
}
|
||||
}
|
||||
|
||||
resource "azurerm_lb_outbound_rule" "outbound-ipv6" {
|
||||
name = "outbound-ipv6"
|
||||
protocol = "All"
|
||||
loadbalancer_id = azurerm_lb.cluster.id
|
||||
backend_address_pool_id = azurerm_lb_backend_address_pool.worker-ipv6.id
|
||||
frontend_ip_configuration {
|
||||
name = "ingress-ipv6"
|
||||
}
|
||||
}
|
||||
|
6
azure/flatcar-linux/kubernetes/locals.tf
Normal file
6
azure/flatcar-linux/kubernetes/locals.tf
Normal file
@ -0,0 +1,6 @@
|
||||
locals {
|
||||
backend_address_pool_ids = {
|
||||
ipv4 = [azurerm_lb_backend_address_pool.worker-ipv4.id]
|
||||
ipv6 = [azurerm_lb_backend_address_pool.worker-ipv6.id]
|
||||
}
|
||||
}
|
@ -1,3 +1,21 @@
|
||||
locals {
|
||||
# Subdivide the virtual network into subnets
|
||||
# - controllers use netnum 0
|
||||
# - workers use netnum 1
|
||||
controller_subnets = {
|
||||
ipv4 = [for i, cidr in var.network_cidr.ipv4 : cidrsubnet(cidr, 1, 0)]
|
||||
ipv6 = [for i, cidr in var.network_cidr.ipv6 : cidrsubnet(cidr, 16, 0)]
|
||||
}
|
||||
worker_subnets = {
|
||||
ipv4 = [for i, cidr in var.network_cidr.ipv4 : cidrsubnet(cidr, 1, 1)]
|
||||
ipv6 = [for i, cidr in var.network_cidr.ipv6 : cidrsubnet(cidr, 16, 1)]
|
||||
}
|
||||
cluster_subnets = {
|
||||
ipv4 = concat(local.controller_subnets.ipv4, local.worker_subnets.ipv4)
|
||||
ipv6 = concat(local.controller_subnets.ipv6, local.worker_subnets.ipv6)
|
||||
}
|
||||
}
|
||||
|
||||
# Organize cluster into a resource group
|
||||
resource "azurerm_resource_group" "cluster" {
|
||||
name = var.cluster_name
|
||||
@ -5,23 +23,28 @@ resource "azurerm_resource_group" "cluster" {
|
||||
}
|
||||
|
||||
resource "azurerm_virtual_network" "network" {
|
||||
name = var.cluster_name
|
||||
resource_group_name = azurerm_resource_group.cluster.name
|
||||
|
||||
name = var.cluster_name
|
||||
location = azurerm_resource_group.cluster.location
|
||||
address_space = [var.host_cidr]
|
||||
location = azurerm_resource_group.cluster.location
|
||||
address_space = concat(
|
||||
var.network_cidr.ipv4,
|
||||
var.network_cidr.ipv6
|
||||
)
|
||||
}
|
||||
|
||||
# Subnets - separate subnets for controller and workers because Azure
|
||||
# network security groups are based on IPv4 CIDR rather than instance
|
||||
# tags like GCP or security group membership like AWS
|
||||
# Subnets - separate subnets for controllers and workers because Azure
|
||||
# network security groups are oriented around address prefixes rather
|
||||
# than instance tags (GCP) or security group membership (AWS)
|
||||
|
||||
resource "azurerm_subnet" "controller" {
|
||||
resource_group_name = azurerm_resource_group.cluster.name
|
||||
|
||||
name = "controller"
|
||||
resource_group_name = azurerm_resource_group.cluster.name
|
||||
virtual_network_name = azurerm_virtual_network.network.name
|
||||
address_prefixes = [cidrsubnet(var.host_cidr, 1, 0)]
|
||||
address_prefixes = concat(
|
||||
local.controller_subnets.ipv4,
|
||||
local.controller_subnets.ipv6,
|
||||
)
|
||||
default_outbound_access_enabled = false
|
||||
}
|
||||
|
||||
resource "azurerm_subnet_network_security_group_association" "controller" {
|
||||
@ -30,11 +53,14 @@ resource "azurerm_subnet_network_security_group_association" "controller" {
|
||||
}
|
||||
|
||||
resource "azurerm_subnet" "worker" {
|
||||
resource_group_name = azurerm_resource_group.cluster.name
|
||||
|
||||
name = "worker"
|
||||
resource_group_name = azurerm_resource_group.cluster.name
|
||||
virtual_network_name = azurerm_virtual_network.network.name
|
||||
address_prefixes = [cidrsubnet(var.host_cidr, 1, 1)]
|
||||
address_prefixes = concat(
|
||||
local.worker_subnets.ipv4,
|
||||
local.worker_subnets.ipv6,
|
||||
)
|
||||
default_outbound_access_enabled = false
|
||||
}
|
||||
|
||||
resource "azurerm_subnet_network_security_group_association" "worker" {
|
||||
|
@ -10,6 +10,11 @@ output "ingress_static_ipv4" {
|
||||
description = "IPv4 address of the load balancer for distributing traffic to Ingress controllers"
|
||||
}
|
||||
|
||||
output "ingress_static_ipv6" {
|
||||
value = azurerm_public_ip.ingress-ipv6.ip_address
|
||||
description = "IPv6 address of the load balancer for distributing traffic to Ingress controllers"
|
||||
}
|
||||
|
||||
# Outputs for worker pools
|
||||
|
||||
output "region" {
|
||||
@ -51,12 +56,12 @@ output "worker_security_group_name" {
|
||||
|
||||
output "controller_address_prefixes" {
|
||||
description = "Controller network subnet CIDR addresses (for source/destination)"
|
||||
value = azurerm_subnet.controller.address_prefixes
|
||||
value = local.controller_subnets
|
||||
}
|
||||
|
||||
output "worker_address_prefixes" {
|
||||
description = "Worker network subnet CIDR addresses (for source/destination)"
|
||||
value = azurerm_subnet.worker.address_prefixes
|
||||
value = local.worker_subnets
|
||||
}
|
||||
|
||||
# Outputs for custom load balancing
|
||||
@ -66,9 +71,12 @@ output "loadbalancer_id" {
|
||||
value = azurerm_lb.cluster.id
|
||||
}
|
||||
|
||||
output "backend_address_pool_id" {
|
||||
description = "ID of the worker backend address pool"
|
||||
value = azurerm_lb_backend_address_pool.worker.id
|
||||
output "backend_address_pool_ids" {
|
||||
description = "IDs of the worker backend address pools"
|
||||
value = {
|
||||
ipv4 = [azurerm_lb_backend_address_pool.worker-ipv4.id]
|
||||
ipv6 = [azurerm_lb_backend_address_pool.worker-ipv6.id]
|
||||
}
|
||||
}
|
||||
|
||||
# Outputs for debug
|
||||
|
@ -1,214 +1,223 @@
|
||||
# Controller security group
|
||||
|
||||
resource "azurerm_network_security_group" "controller" {
|
||||
name = "${var.cluster_name}-controller"
|
||||
resource_group_name = azurerm_resource_group.cluster.name
|
||||
|
||||
name = "${var.cluster_name}-controller"
|
||||
location = azurerm_resource_group.cluster.location
|
||||
location = azurerm_resource_group.cluster.location
|
||||
}
|
||||
|
||||
resource "azurerm_network_security_rule" "controller-icmp" {
|
||||
resource_group_name = azurerm_resource_group.cluster.name
|
||||
for_each = local.controller_subnets
|
||||
|
||||
name = "allow-icmp"
|
||||
name = "allow-icmp-${each.key}"
|
||||
resource_group_name = azurerm_resource_group.cluster.name
|
||||
network_security_group_name = azurerm_network_security_group.controller.name
|
||||
priority = "1995"
|
||||
priority = 1995 + (each.key == "ipv4" ? 0 : 1)
|
||||
access = "Allow"
|
||||
direction = "Inbound"
|
||||
protocol = "Icmp"
|
||||
source_port_range = "*"
|
||||
destination_port_range = "*"
|
||||
source_address_prefixes = concat(azurerm_subnet.controller.address_prefixes, azurerm_subnet.worker.address_prefixes)
|
||||
destination_address_prefixes = azurerm_subnet.controller.address_prefixes
|
||||
source_address_prefixes = local.cluster_subnets[each.key]
|
||||
destination_address_prefixes = local.controller_subnets[each.key]
|
||||
}
|
||||
|
||||
resource "azurerm_network_security_rule" "controller-ssh" {
|
||||
resource_group_name = azurerm_resource_group.cluster.name
|
||||
for_each = local.controller_subnets
|
||||
|
||||
name = "allow-ssh"
|
||||
name = "allow-ssh-${each.key}"
|
||||
resource_group_name = azurerm_resource_group.cluster.name
|
||||
network_security_group_name = azurerm_network_security_group.controller.name
|
||||
priority = "2000"
|
||||
priority = 2000 + (each.key == "ipv4" ? 0 : 1)
|
||||
access = "Allow"
|
||||
direction = "Inbound"
|
||||
protocol = "Tcp"
|
||||
source_port_range = "*"
|
||||
destination_port_range = "22"
|
||||
source_address_prefix = "*"
|
||||
destination_address_prefixes = azurerm_subnet.controller.address_prefixes
|
||||
destination_address_prefixes = local.controller_subnets[each.key]
|
||||
}
|
||||
|
||||
resource "azurerm_network_security_rule" "controller-etcd" {
|
||||
resource_group_name = azurerm_resource_group.cluster.name
|
||||
for_each = local.controller_subnets
|
||||
|
||||
name = "allow-etcd"
|
||||
name = "allow-etcd-${each.key}"
|
||||
resource_group_name = azurerm_resource_group.cluster.name
|
||||
network_security_group_name = azurerm_network_security_group.controller.name
|
||||
priority = "2005"
|
||||
priority = 2005 + (each.key == "ipv4" ? 0 : 1)
|
||||
access = "Allow"
|
||||
direction = "Inbound"
|
||||
protocol = "Tcp"
|
||||
source_port_range = "*"
|
||||
destination_port_range = "2379-2380"
|
||||
source_address_prefixes = azurerm_subnet.controller.address_prefixes
|
||||
destination_address_prefixes = azurerm_subnet.controller.address_prefixes
|
||||
source_address_prefixes = local.controller_subnets[each.key]
|
||||
destination_address_prefixes = local.controller_subnets[each.key]
|
||||
}
|
||||
|
||||
# Allow Prometheus to scrape etcd metrics
|
||||
resource "azurerm_network_security_rule" "controller-etcd-metrics" {
|
||||
resource_group_name = azurerm_resource_group.cluster.name
|
||||
for_each = local.controller_subnets
|
||||
|
||||
name = "allow-etcd-metrics"
|
||||
name = "allow-etcd-metrics-${each.key}"
|
||||
resource_group_name = azurerm_resource_group.cluster.name
|
||||
network_security_group_name = azurerm_network_security_group.controller.name
|
||||
priority = "2010"
|
||||
priority = 2010 + (each.key == "ipv4" ? 0 : 1)
|
||||
access = "Allow"
|
||||
direction = "Inbound"
|
||||
protocol = "Tcp"
|
||||
source_port_range = "*"
|
||||
destination_port_range = "2381"
|
||||
source_address_prefixes = azurerm_subnet.worker.address_prefixes
|
||||
destination_address_prefixes = azurerm_subnet.controller.address_prefixes
|
||||
source_address_prefixes = local.worker_subnets[each.key]
|
||||
destination_address_prefixes = local.controller_subnets[each.key]
|
||||
}
|
||||
|
||||
# Allow Prometheus to scrape kube-proxy metrics
|
||||
resource "azurerm_network_security_rule" "controller-kube-proxy" {
|
||||
resource_group_name = azurerm_resource_group.cluster.name
|
||||
for_each = local.controller_subnets
|
||||
|
||||
name = "allow-kube-proxy-metrics"
|
||||
name = "allow-kube-proxy-metrics-${each.key}"
|
||||
resource_group_name = azurerm_resource_group.cluster.name
|
||||
network_security_group_name = azurerm_network_security_group.controller.name
|
||||
priority = "2011"
|
||||
priority = 2012 + (each.key == "ipv4" ? 0 : 1)
|
||||
access = "Allow"
|
||||
direction = "Inbound"
|
||||
protocol = "Tcp"
|
||||
source_port_range = "*"
|
||||
destination_port_range = "10249"
|
||||
source_address_prefixes = azurerm_subnet.worker.address_prefixes
|
||||
destination_address_prefixes = azurerm_subnet.controller.address_prefixes
|
||||
source_address_prefixes = local.worker_subnets[each.key]
|
||||
destination_address_prefixes = local.controller_subnets[each.key]
|
||||
}
|
||||
|
||||
# Allow Prometheus to scrape kube-scheduler and kube-controller-manager metrics
|
||||
resource "azurerm_network_security_rule" "controller-kube-metrics" {
|
||||
resource_group_name = azurerm_resource_group.cluster.name
|
||||
for_each = local.controller_subnets
|
||||
|
||||
name = "allow-kube-metrics"
|
||||
name = "allow-kube-metrics-${each.key}"
|
||||
resource_group_name = azurerm_resource_group.cluster.name
|
||||
network_security_group_name = azurerm_network_security_group.controller.name
|
||||
priority = "2012"
|
||||
priority = 2014 + (each.key == "ipv4" ? 0 : 1)
|
||||
access = "Allow"
|
||||
direction = "Inbound"
|
||||
protocol = "Tcp"
|
||||
source_port_range = "*"
|
||||
destination_port_range = "10257-10259"
|
||||
source_address_prefixes = azurerm_subnet.worker.address_prefixes
|
||||
destination_address_prefixes = azurerm_subnet.controller.address_prefixes
|
||||
source_address_prefixes = local.worker_subnets[each.key]
|
||||
destination_address_prefixes = local.controller_subnets[each.key]
|
||||
}
|
||||
|
||||
resource "azurerm_network_security_rule" "controller-apiserver" {
|
||||
resource_group_name = azurerm_resource_group.cluster.name
|
||||
for_each = local.controller_subnets
|
||||
|
||||
name = "allow-apiserver"
|
||||
name = "allow-apiserver-${each.key}"
|
||||
resource_group_name = azurerm_resource_group.cluster.name
|
||||
network_security_group_name = azurerm_network_security_group.controller.name
|
||||
priority = "2015"
|
||||
priority = 2016 + (each.key == "ipv4" ? 0 : 1)
|
||||
access = "Allow"
|
||||
direction = "Inbound"
|
||||
protocol = "Tcp"
|
||||
source_port_range = "*"
|
||||
destination_port_range = "6443"
|
||||
source_address_prefix = "*"
|
||||
destination_address_prefixes = azurerm_subnet.controller.address_prefixes
|
||||
destination_address_prefixes = local.controller_subnets[each.key]
|
||||
}
|
||||
|
||||
resource "azurerm_network_security_rule" "controller-cilium-health" {
|
||||
resource_group_name = azurerm_resource_group.cluster.name
|
||||
count = var.networking == "cilium" ? 1 : 0
|
||||
for_each = var.networking == "cilium" ? local.controller_subnets : {}
|
||||
|
||||
name = "allow-cilium-health"
|
||||
name = "allow-cilium-health-${each.key}"
|
||||
resource_group_name = azurerm_resource_group.cluster.name
|
||||
network_security_group_name = azurerm_network_security_group.controller.name
|
||||
priority = "2018"
|
||||
priority = 2018 + (each.key == "ipv4" ? 0 : 1)
|
||||
access = "Allow"
|
||||
direction = "Inbound"
|
||||
protocol = "Tcp"
|
||||
source_port_range = "*"
|
||||
destination_port_range = "4240"
|
||||
source_address_prefixes = concat(azurerm_subnet.controller.address_prefixes, azurerm_subnet.worker.address_prefixes)
|
||||
destination_address_prefixes = azurerm_subnet.controller.address_prefixes
|
||||
source_address_prefixes = local.cluster_subnets[each.key]
|
||||
destination_address_prefixes = local.controller_subnets[each.key]
|
||||
}
|
||||
|
||||
resource "azurerm_network_security_rule" "controller-cilium-metrics" {
|
||||
resource_group_name = azurerm_resource_group.cluster.name
|
||||
count = var.networking == "cilium" ? 1 : 0
|
||||
for_each = var.networking == "cilium" ? local.controller_subnets : {}
|
||||
|
||||
name = "allow-cilium-metrics"
|
||||
name = "allow-cilium-metrics-${each.key}"
|
||||
resource_group_name = azurerm_resource_group.cluster.name
|
||||
network_security_group_name = azurerm_network_security_group.controller.name
|
||||
priority = "2019"
|
||||
priority = 2035 + (each.key == "ipv4" ? 0 : 1)
|
||||
access = "Allow"
|
||||
direction = "Inbound"
|
||||
protocol = "Tcp"
|
||||
source_port_range = "*"
|
||||
destination_port_range = "9962-9965"
|
||||
source_address_prefixes = concat(azurerm_subnet.controller.address_prefixes, azurerm_subnet.worker.address_prefixes)
|
||||
destination_address_prefixes = azurerm_subnet.controller.address_prefixes
|
||||
source_address_prefixes = local.cluster_subnets[each.key]
|
||||
destination_address_prefixes = local.controller_subnets[each.key]
|
||||
}
|
||||
|
||||
resource "azurerm_network_security_rule" "controller-vxlan" {
|
||||
resource_group_name = azurerm_resource_group.cluster.name
|
||||
for_each = local.controller_subnets
|
||||
|
||||
name = "allow-vxlan"
|
||||
name = "allow-vxlan-${each.key}"
|
||||
resource_group_name = azurerm_resource_group.cluster.name
|
||||
network_security_group_name = azurerm_network_security_group.controller.name
|
||||
priority = "2020"
|
||||
priority = 2020 + (each.key == "ipv4" ? 0 : 1)
|
||||
access = "Allow"
|
||||
direction = "Inbound"
|
||||
protocol = "Udp"
|
||||
source_port_range = "*"
|
||||
destination_port_range = "4789"
|
||||
source_address_prefixes = concat(azurerm_subnet.controller.address_prefixes, azurerm_subnet.worker.address_prefixes)
|
||||
destination_address_prefixes = azurerm_subnet.controller.address_prefixes
|
||||
source_address_prefixes = local.cluster_subnets[each.key]
|
||||
destination_address_prefixes = local.controller_subnets[each.key]
|
||||
}
|
||||
|
||||
resource "azurerm_network_security_rule" "controller-linux-vxlan" {
|
||||
resource_group_name = azurerm_resource_group.cluster.name
|
||||
for_each = local.controller_subnets
|
||||
|
||||
name = "allow-linux-vxlan"
|
||||
name = "allow-linux-vxlan-${each.key}"
|
||||
resource_group_name = azurerm_resource_group.cluster.name
|
||||
network_security_group_name = azurerm_network_security_group.controller.name
|
||||
priority = "2021"
|
||||
priority = 2022 + (each.key == "ipv4" ? 0 : 1)
|
||||
access = "Allow"
|
||||
direction = "Inbound"
|
||||
protocol = "Udp"
|
||||
source_port_range = "*"
|
||||
destination_port_range = "8472"
|
||||
source_address_prefixes = concat(azurerm_subnet.controller.address_prefixes, azurerm_subnet.worker.address_prefixes)
|
||||
destination_address_prefixes = azurerm_subnet.controller.address_prefixes
|
||||
source_address_prefixes = local.cluster_subnets[each.key]
|
||||
destination_address_prefixes = local.controller_subnets[each.key]
|
||||
}
|
||||
|
||||
# Allow Prometheus to scrape node-exporter daemonset
|
||||
resource "azurerm_network_security_rule" "controller-node-exporter" {
|
||||
resource_group_name = azurerm_resource_group.cluster.name
|
||||
for_each = local.controller_subnets
|
||||
|
||||
name = "allow-node-exporter"
|
||||
name = "allow-node-exporter-${each.key}"
|
||||
resource_group_name = azurerm_resource_group.cluster.name
|
||||
network_security_group_name = azurerm_network_security_group.controller.name
|
||||
priority = "2025"
|
||||
priority = 2025 + (each.key == "ipv4" ? 0 : 1)
|
||||
access = "Allow"
|
||||
direction = "Inbound"
|
||||
protocol = "Tcp"
|
||||
source_port_range = "*"
|
||||
destination_port_range = "9100"
|
||||
source_address_prefixes = azurerm_subnet.worker.address_prefixes
|
||||
destination_address_prefixes = azurerm_subnet.controller.address_prefixes
|
||||
source_address_prefixes = local.worker_subnets[each.key]
|
||||
destination_address_prefixes = local.controller_subnets[each.key]
|
||||
}
|
||||
|
||||
# Allow apiserver to access kubelet's for exec, log, port-forward
|
||||
resource "azurerm_network_security_rule" "controller-kubelet" {
|
||||
resource_group_name = azurerm_resource_group.cluster.name
|
||||
for_each = local.controller_subnets
|
||||
|
||||
name = "allow-kubelet"
|
||||
name = "allow-kubelet-${each.key}"
|
||||
resource_group_name = azurerm_resource_group.cluster.name
|
||||
network_security_group_name = azurerm_network_security_group.controller.name
|
||||
priority = "2030"
|
||||
priority = 2030 + (each.key == "ipv4" ? 0 : 1)
|
||||
access = "Allow"
|
||||
direction = "Inbound"
|
||||
protocol = "Tcp"
|
||||
source_port_range = "*"
|
||||
destination_port_range = "10250"
|
||||
|
||||
# allow Prometheus to scrape kubelet metrics too
|
||||
source_address_prefixes = concat(azurerm_subnet.controller.address_prefixes, azurerm_subnet.worker.address_prefixes)
|
||||
destination_address_prefixes = azurerm_subnet.controller.address_prefixes
|
||||
source_address_prefixes = local.cluster_subnets[each.key]
|
||||
destination_address_prefixes = local.controller_subnets[each.key]
|
||||
}
|
||||
|
||||
# Override Azure AllowVNetInBound and AllowAzureLoadBalancerInBound
|
||||
@ -247,182 +256,189 @@ resource "azurerm_network_security_rule" "controller-deny-all" {
|
||||
# Worker security group
|
||||
|
||||
resource "azurerm_network_security_group" "worker" {
|
||||
name = "${var.cluster_name}-worker"
|
||||
resource_group_name = azurerm_resource_group.cluster.name
|
||||
|
||||
name = "${var.cluster_name}-worker"
|
||||
location = azurerm_resource_group.cluster.location
|
||||
location = azurerm_resource_group.cluster.location
|
||||
}
|
||||
|
||||
resource "azurerm_network_security_rule" "worker-icmp" {
|
||||
resource_group_name = azurerm_resource_group.cluster.name
|
||||
for_each = local.worker_subnets
|
||||
|
||||
name = "allow-icmp"
|
||||
name = "allow-icmp-${each.key}"
|
||||
resource_group_name = azurerm_resource_group.cluster.name
|
||||
network_security_group_name = azurerm_network_security_group.worker.name
|
||||
priority = "1995"
|
||||
priority = 1995 + (each.key == "ipv4" ? 0 : 1)
|
||||
access = "Allow"
|
||||
direction = "Inbound"
|
||||
protocol = "Icmp"
|
||||
source_port_range = "*"
|
||||
destination_port_range = "*"
|
||||
source_address_prefixes = concat(azurerm_subnet.controller.address_prefixes, azurerm_subnet.worker.address_prefixes)
|
||||
destination_address_prefixes = azurerm_subnet.worker.address_prefixes
|
||||
source_address_prefixes = local.cluster_subnets[each.key]
|
||||
destination_address_prefixes = local.worker_subnets[each.key]
|
||||
}
|
||||
|
||||
resource "azurerm_network_security_rule" "worker-ssh" {
|
||||
resource_group_name = azurerm_resource_group.cluster.name
|
||||
for_each = local.worker_subnets
|
||||
|
||||
name = "allow-ssh"
|
||||
name = "allow-ssh-${each.key}"
|
||||
resource_group_name = azurerm_resource_group.cluster.name
|
||||
network_security_group_name = azurerm_network_security_group.worker.name
|
||||
priority = "2000"
|
||||
priority = 2000 + (each.key == "ipv4" ? 0 : 1)
|
||||
access = "Allow"
|
||||
direction = "Inbound"
|
||||
protocol = "Tcp"
|
||||
source_port_range = "*"
|
||||
destination_port_range = "22"
|
||||
source_address_prefixes = azurerm_subnet.controller.address_prefixes
|
||||
destination_address_prefixes = azurerm_subnet.worker.address_prefixes
|
||||
source_address_prefixes = local.controller_subnets[each.key]
|
||||
destination_address_prefixes = local.worker_subnets[each.key]
|
||||
}
|
||||
|
||||
resource "azurerm_network_security_rule" "worker-http" {
|
||||
resource_group_name = azurerm_resource_group.cluster.name
|
||||
for_each = local.worker_subnets
|
||||
|
||||
name = "allow-http"
|
||||
name = "allow-http-${each.key}"
|
||||
resource_group_name = azurerm_resource_group.cluster.name
|
||||
network_security_group_name = azurerm_network_security_group.worker.name
|
||||
priority = "2005"
|
||||
priority = 2005 + (each.key == "ipv4" ? 0 : 1)
|
||||
access = "Allow"
|
||||
direction = "Inbound"
|
||||
protocol = "Tcp"
|
||||
source_port_range = "*"
|
||||
destination_port_range = "80"
|
||||
source_address_prefix = "*"
|
||||
destination_address_prefixes = azurerm_subnet.worker.address_prefixes
|
||||
destination_address_prefixes = local.worker_subnets[each.key]
|
||||
}
|
||||
|
||||
resource "azurerm_network_security_rule" "worker-https" {
|
||||
resource_group_name = azurerm_resource_group.cluster.name
|
||||
for_each = local.worker_subnets
|
||||
|
||||
name = "allow-https"
|
||||
name = "allow-https-${each.key}"
|
||||
resource_group_name = azurerm_resource_group.cluster.name
|
||||
network_security_group_name = azurerm_network_security_group.worker.name
|
||||
priority = "2010"
|
||||
priority = 2010 + (each.key == "ipv4" ? 0 : 1)
|
||||
access = "Allow"
|
||||
direction = "Inbound"
|
||||
protocol = "Tcp"
|
||||
source_port_range = "*"
|
||||
destination_port_range = "443"
|
||||
source_address_prefix = "*"
|
||||
destination_address_prefixes = azurerm_subnet.worker.address_prefixes
|
||||
destination_address_prefixes = local.worker_subnets[each.key]
|
||||
}
|
||||
|
||||
resource "azurerm_network_security_rule" "worker-cilium-health" {
|
||||
resource_group_name = azurerm_resource_group.cluster.name
|
||||
count = var.networking == "cilium" ? 1 : 0
|
||||
for_each = var.networking == "cilium" ? local.worker_subnets : {}
|
||||
|
||||
name = "allow-cilium-health"
|
||||
name = "allow-cilium-health-${each.key}"
|
||||
resource_group_name = azurerm_resource_group.cluster.name
|
||||
network_security_group_name = azurerm_network_security_group.worker.name
|
||||
priority = "2013"
|
||||
priority = 2012 + (each.key == "ipv4" ? 0 : 1)
|
||||
access = "Allow"
|
||||
direction = "Inbound"
|
||||
protocol = "Tcp"
|
||||
source_port_range = "*"
|
||||
destination_port_range = "4240"
|
||||
source_address_prefixes = concat(azurerm_subnet.controller.address_prefixes, azurerm_subnet.worker.address_prefixes)
|
||||
destination_address_prefixes = azurerm_subnet.worker.address_prefixes
|
||||
source_address_prefixes = local.cluster_subnets[each.key]
|
||||
destination_address_prefixes = local.worker_subnets[each.key]
|
||||
}
|
||||
|
||||
resource "azurerm_network_security_rule" "worker-cilium-metrics" {
|
||||
resource_group_name = azurerm_resource_group.cluster.name
|
||||
count = var.networking == "cilium" ? 1 : 0
|
||||
for_each = var.networking == "cilium" ? local.worker_subnets : {}
|
||||
|
||||
name = "allow-cilium-metrics"
|
||||
name = "allow-cilium-metrics-${each.key}"
|
||||
resource_group_name = azurerm_resource_group.cluster.name
|
||||
network_security_group_name = azurerm_network_security_group.worker.name
|
||||
priority = "2014"
|
||||
priority = 2014 + (each.key == "ipv4" ? 0 : 1)
|
||||
access = "Allow"
|
||||
direction = "Inbound"
|
||||
protocol = "Tcp"
|
||||
source_port_range = "*"
|
||||
destination_port_range = "9962-9965"
|
||||
source_address_prefixes = concat(azurerm_subnet.controller.address_prefixes, azurerm_subnet.worker.address_prefixes)
|
||||
destination_address_prefixes = azurerm_subnet.worker.address_prefixes
|
||||
source_address_prefixes = local.cluster_subnets[each.key]
|
||||
destination_address_prefixes = local.worker_subnets[each.key]
|
||||
}
|
||||
|
||||
resource "azurerm_network_security_rule" "worker-vxlan" {
|
||||
resource_group_name = azurerm_resource_group.cluster.name
|
||||
for_each = local.worker_subnets
|
||||
|
||||
name = "allow-vxlan"
|
||||
name = "allow-vxlan-${each.key}"
|
||||
resource_group_name = azurerm_resource_group.cluster.name
|
||||
network_security_group_name = azurerm_network_security_group.worker.name
|
||||
priority = "2015"
|
||||
priority = 2016 + (each.key == "ipv4" ? 0 : 1)
|
||||
access = "Allow"
|
||||
direction = "Inbound"
|
||||
protocol = "Udp"
|
||||
source_port_range = "*"
|
||||
destination_port_range = "4789"
|
||||
source_address_prefixes = concat(azurerm_subnet.controller.address_prefixes, azurerm_subnet.worker.address_prefixes)
|
||||
destination_address_prefixes = azurerm_subnet.worker.address_prefixes
|
||||
source_address_prefixes = local.cluster_subnets[each.key]
|
||||
destination_address_prefixes = local.worker_subnets[each.key]
|
||||
}
|
||||
|
||||
resource "azurerm_network_security_rule" "worker-linux-vxlan" {
|
||||
resource_group_name = azurerm_resource_group.cluster.name
|
||||
for_each = local.worker_subnets
|
||||
|
||||
name = "allow-linux-vxlan"
|
||||
name = "allow-linux-vxlan-${each.key}"
|
||||
resource_group_name = azurerm_resource_group.cluster.name
|
||||
network_security_group_name = azurerm_network_security_group.worker.name
|
||||
priority = "2016"
|
||||
priority = 2018 + (each.key == "ipv4" ? 0 : 1)
|
||||
access = "Allow"
|
||||
direction = "Inbound"
|
||||
protocol = "Udp"
|
||||
source_port_range = "*"
|
||||
destination_port_range = "8472"
|
||||
source_address_prefixes = concat(azurerm_subnet.controller.address_prefixes, azurerm_subnet.worker.address_prefixes)
|
||||
destination_address_prefixes = azurerm_subnet.worker.address_prefixes
|
||||
source_address_prefixes = local.cluster_subnets[each.key]
|
||||
destination_address_prefixes = local.worker_subnets[each.key]
|
||||
}
|
||||
|
||||
# Allow Prometheus to scrape node-exporter daemonset
|
||||
resource "azurerm_network_security_rule" "worker-node-exporter" {
|
||||
resource_group_name = azurerm_resource_group.cluster.name
|
||||
for_each = local.worker_subnets
|
||||
|
||||
name = "allow-node-exporter"
|
||||
name = "allow-node-exporter-${each.key}"
|
||||
resource_group_name = azurerm_resource_group.cluster.name
|
||||
network_security_group_name = azurerm_network_security_group.worker.name
|
||||
priority = "2020"
|
||||
priority = 2020 + (each.key == "ipv4" ? 0 : 1)
|
||||
access = "Allow"
|
||||
direction = "Inbound"
|
||||
protocol = "Tcp"
|
||||
source_port_range = "*"
|
||||
destination_port_range = "9100"
|
||||
source_address_prefixes = azurerm_subnet.worker.address_prefixes
|
||||
destination_address_prefixes = azurerm_subnet.worker.address_prefixes
|
||||
source_address_prefixes = local.worker_subnets[each.key]
|
||||
destination_address_prefixes = local.worker_subnets[each.key]
|
||||
}
|
||||
|
||||
# Allow Prometheus to scrape kube-proxy
|
||||
resource "azurerm_network_security_rule" "worker-kube-proxy" {
|
||||
resource_group_name = azurerm_resource_group.cluster.name
|
||||
for_each = local.worker_subnets
|
||||
|
||||
name = "allow-kube-proxy"
|
||||
name = "allow-kube-proxy-${each.key}"
|
||||
resource_group_name = azurerm_resource_group.cluster.name
|
||||
network_security_group_name = azurerm_network_security_group.worker.name
|
||||
priority = "2024"
|
||||
priority = 2024 + (each.key == "ipv4" ? 0 : 1)
|
||||
access = "Allow"
|
||||
direction = "Inbound"
|
||||
protocol = "Tcp"
|
||||
source_port_range = "*"
|
||||
destination_port_range = "10249"
|
||||
source_address_prefixes = azurerm_subnet.worker.address_prefixes
|
||||
destination_address_prefixes = azurerm_subnet.worker.address_prefixes
|
||||
source_address_prefixes = local.worker_subnets[each.key]
|
||||
destination_address_prefixes = local.worker_subnets[each.key]
|
||||
}
|
||||
|
||||
# Allow apiserver to access kubelet's for exec, log, port-forward
|
||||
resource "azurerm_network_security_rule" "worker-kubelet" {
|
||||
resource_group_name = azurerm_resource_group.cluster.name
|
||||
for_each = local.worker_subnets
|
||||
|
||||
name = "allow-kubelet"
|
||||
name = "allow-kubelet-${each.key}"
|
||||
resource_group_name = azurerm_resource_group.cluster.name
|
||||
network_security_group_name = azurerm_network_security_group.worker.name
|
||||
priority = "2025"
|
||||
priority = 2026 + (each.key == "ipv4" ? 0 : 1)
|
||||
access = "Allow"
|
||||
direction = "Inbound"
|
||||
protocol = "Tcp"
|
||||
source_port_range = "*"
|
||||
destination_port_range = "10250"
|
||||
|
||||
# allow Prometheus to scrape kubelet metrics too
|
||||
source_address_prefixes = concat(azurerm_subnet.controller.address_prefixes, azurerm_subnet.worker.address_prefixes)
|
||||
destination_address_prefixes = azurerm_subnet.worker.address_prefixes
|
||||
source_address_prefixes = local.cluster_subnets[each.key]
|
||||
destination_address_prefixes = local.worker_subnets[each.key]
|
||||
}
|
||||
|
||||
# Override Azure AllowVNetInBound and AllowAzureLoadBalancerInBound
|
||||
|
@ -18,7 +18,7 @@ resource "null_resource" "copy-controller-secrets" {
|
||||
|
||||
connection {
|
||||
type = "ssh"
|
||||
host = azurerm_public_ip.controllers.*.ip_address[count.index]
|
||||
host = azurerm_public_ip.controllers-ipv4[count.index].ip_address
|
||||
user = "core"
|
||||
timeout = "15m"
|
||||
}
|
||||
@ -45,7 +45,7 @@ resource "null_resource" "bootstrap" {
|
||||
|
||||
connection {
|
||||
type = "ssh"
|
||||
host = azurerm_public_ip.controllers.*.ip_address[0]
|
||||
host = azurerm_public_ip.controllers-ipv4[0].ip_address
|
||||
user = "core"
|
||||
timeout = "15m"
|
||||
}
|
||||
|
@ -100,10 +100,15 @@ variable "networking" {
|
||||
default = "cilium"
|
||||
}
|
||||
|
||||
variable "host_cidr" {
|
||||
type = string
|
||||
description = "CIDR IPv4 range to assign to instances"
|
||||
default = "10.0.0.0/16"
|
||||
variable "network_cidr" {
|
||||
type = object({
|
||||
ipv4 = list(string)
|
||||
ipv6 = optional(list(string), ["fd9a:0d2f:b7dc::/48"])
|
||||
})
|
||||
description = "Virtual network CIDR ranges"
|
||||
default = {
|
||||
ipv4 = ["10.0.0.0/16"]
|
||||
}
|
||||
}
|
||||
|
||||
variable "pod_cidr" {
|
||||
|
@ -3,11 +3,11 @@ module "workers" {
|
||||
name = var.cluster_name
|
||||
|
||||
# Azure
|
||||
resource_group_name = azurerm_resource_group.cluster.name
|
||||
region = azurerm_resource_group.cluster.location
|
||||
subnet_id = azurerm_subnet.worker.id
|
||||
security_group_id = azurerm_network_security_group.worker.id
|
||||
backend_address_pool_id = azurerm_lb_backend_address_pool.worker.id
|
||||
resource_group_name = azurerm_resource_group.cluster.name
|
||||
region = azurerm_resource_group.cluster.location
|
||||
subnet_id = azurerm_subnet.worker.id
|
||||
security_group_id = azurerm_network_security_group.worker.id
|
||||
backend_address_pool_ids = local.backend_address_pool_ids
|
||||
|
||||
worker_count = var.worker_count
|
||||
vm_type = var.worker_type
|
||||
|
@ -25,9 +25,12 @@ variable "security_group_id" {
|
||||
description = "Must be set to the `worker_security_group_id` output by cluster"
|
||||
}
|
||||
|
||||
variable "backend_address_pool_id" {
|
||||
type = string
|
||||
description = "Must be set to the `worker_backend_address_pool_id` output by cluster"
|
||||
variable "backend_address_pool_ids" {
|
||||
type = object({
|
||||
ipv4 = list(string)
|
||||
ipv6 = list(string)
|
||||
})
|
||||
description = "Must be set to the `backend_address_pool_ids` output by cluster"
|
||||
}
|
||||
|
||||
# instances
|
||||
|
@ -9,19 +9,14 @@ locals {
|
||||
|
||||
# Workers scale set
|
||||
resource "azurerm_linux_virtual_machine_scale_set" "workers" {
|
||||
name = "${var.name}-worker"
|
||||
resource_group_name = var.resource_group_name
|
||||
|
||||
name = "${var.name}-worker"
|
||||
location = var.region
|
||||
sku = var.vm_type
|
||||
instances = var.worker_count
|
||||
location = var.region
|
||||
sku = var.vm_type
|
||||
instances = var.worker_count
|
||||
# instance name prefix for instances in the set
|
||||
computer_name_prefix = "${var.name}-worker"
|
||||
single_placement_group = false
|
||||
custom_data = base64encode(data.ct_config.worker.rendered)
|
||||
boot_diagnostics {
|
||||
# defaults to a managed storage account
|
||||
}
|
||||
|
||||
# storage
|
||||
os_disk {
|
||||
@ -46,13 +41,6 @@ resource "azurerm_linux_virtual_machine_scale_set" "workers" {
|
||||
}
|
||||
}
|
||||
|
||||
# Azure requires setting admin_ssh_key, though Ignition custom_data handles it too
|
||||
admin_username = "core"
|
||||
admin_ssh_key {
|
||||
username = "core"
|
||||
public_key = local.azure_authorized_key
|
||||
}
|
||||
|
||||
# network
|
||||
network_interface {
|
||||
name = "nic0"
|
||||
@ -60,13 +48,33 @@ resource "azurerm_linux_virtual_machine_scale_set" "workers" {
|
||||
network_security_group_id = var.security_group_id
|
||||
|
||||
ip_configuration {
|
||||
name = "ip0"
|
||||
name = "ipv4"
|
||||
version = "IPv4"
|
||||
primary = true
|
||||
subnet_id = var.subnet_id
|
||||
|
||||
# backend address pool to which the NIC should be added
|
||||
load_balancer_backend_address_pool_ids = [var.backend_address_pool_id]
|
||||
load_balancer_backend_address_pool_ids = var.backend_address_pool_ids.ipv4
|
||||
}
|
||||
ip_configuration {
|
||||
name = "ipv6"
|
||||
version = "IPv6"
|
||||
subnet_id = var.subnet_id
|
||||
# backend address pool to which the NIC should be added
|
||||
load_balancer_backend_address_pool_ids = var.backend_address_pool_ids.ipv6
|
||||
}
|
||||
}
|
||||
|
||||
# boot
|
||||
custom_data = base64encode(data.ct_config.worker.rendered)
|
||||
boot_diagnostics {
|
||||
# defaults to a managed storage account
|
||||
}
|
||||
|
||||
# Azure requires an RSA admin_ssh_key
|
||||
admin_username = "core"
|
||||
admin_ssh_key {
|
||||
username = "core"
|
||||
public_key = local.azure_authorized_key
|
||||
}
|
||||
|
||||
# lifecycle
|
||||
@ -81,18 +89,15 @@ resource "azurerm_linux_virtual_machine_scale_set" "workers" {
|
||||
|
||||
# Scale up or down to maintain desired number, tolerating deallocations.
|
||||
resource "azurerm_monitor_autoscale_setting" "workers" {
|
||||
name = "${var.name}-maintain-desired"
|
||||
resource_group_name = var.resource_group_name
|
||||
|
||||
name = "${var.name}-maintain-desired"
|
||||
location = var.region
|
||||
|
||||
location = var.region
|
||||
# autoscale
|
||||
enabled = true
|
||||
target_resource_id = azurerm_linux_virtual_machine_scale_set.workers.id
|
||||
|
||||
profile {
|
||||
name = "default"
|
||||
|
||||
capacity {
|
||||
minimum = var.worker_count
|
||||
default = var.worker_count
|
||||
|
Reference in New Issue
Block a user