diff --git a/aws/container-linux/kubernetes/bootstrap.tf b/aws/container-linux/kubernetes/bootstrap.tf index abd8c0fb..32ff2a46 100644 --- a/aws/container-linux/kubernetes/bootstrap.tf +++ b/aws/container-linux/kubernetes/bootstrap.tf @@ -1,6 +1,6 @@ # Kubernetes assets (kubeconfig, manifests) module "bootstrap" { - source = "git::https://github.com/poseidon/terraform-render-bootstrap.git?ref=e75697ce35d7773705f0b9b28ce1ffbe99f9493c" + source = "git::https://github.com/poseidon/terraform-render-bootstrap.git?ref=af36c539360696f5ca6cf5b06bb729477a003602" cluster_name = var.cluster_name api_servers = [format("%s.%s", var.cluster_name, var.dns_zone)] diff --git a/aws/container-linux/kubernetes/security.tf b/aws/container-linux/kubernetes/security.tf index 60727af8..5a19930d 100644 --- a/aws/container-linux/kubernetes/security.tf +++ b/aws/container-linux/kubernetes/security.tf @@ -13,6 +13,30 @@ resource "aws_security_group" "controller" { } } +resource "aws_security_group_rule" "controller-icmp" { + count = var.networking == "cilium" ? 1 : 0 + + security_group_id = aws_security_group.controller.id + + type = "ingress" + protocol = "icmp" + from_port = 8 + to_port = 0 + source_security_group_id = aws_security_group.worker.id +} + +resource "aws_security_group_rule" "controller-icmp-self" { + count = var.networking == "cilium" ? 1 : 0 + + security_group_id = aws_security_group.controller.id + + type = "ingress" + protocol = "icmp" + from_port = 8 + to_port = 0 + self = true +} + resource "aws_security_group_rule" "controller-ssh" { security_group_id = aws_security_group.controller.id @@ -44,39 +68,31 @@ resource "aws_security_group_rule" "controller-etcd-metrics" { source_security_group_id = aws_security_group.worker.id } -# Allow Prometheus to scrape kube-proxy -resource "aws_security_group_rule" "kube-proxy-metrics" { +resource "aws_security_group_rule" "controller-cilium-health" { + count = var.networking == "cilium" ? 1 : 0 + security_group_id = aws_security_group.controller.id type = "ingress" protocol = "tcp" - from_port = 10249 - to_port = 10249 + from_port = 4240 + to_port = 4240 source_security_group_id = aws_security_group.worker.id } -# Allow Prometheus to scrape kube-scheduler -resource "aws_security_group_rule" "controller-scheduler-metrics" { +resource "aws_security_group_rule" "controller-cilium-health-self" { + count = var.networking == "cilium" ? 1 : 0 + security_group_id = aws_security_group.controller.id - type = "ingress" - protocol = "tcp" - from_port = 10251 - to_port = 10251 - source_security_group_id = aws_security_group.worker.id -} - -# Allow Prometheus to scrape kube-controller-manager -resource "aws_security_group_rule" "controller-manager-metrics" { - security_group_id = aws_security_group.controller.id - - type = "ingress" - protocol = "tcp" - from_port = 10252 - to_port = 10252 - source_security_group_id = aws_security_group.worker.id + type = "ingress" + protocol = "tcp" + from_port = 4240 + to_port = 4240 + self = true } +# IANA VXLAN default resource "aws_security_group_rule" "controller-vxlan" { count = var.networking == "flannel" ? 1 : 0 @@ -111,6 +127,31 @@ resource "aws_security_group_rule" "controller-apiserver" { cidr_blocks = ["0.0.0.0/0"] } +# Linux VXLAN default +resource "aws_security_group_rule" "controller-linux-vxlan" { + count = var.networking == "cilium" ? 1 : 0 + + security_group_id = aws_security_group.controller.id + + type = "ingress" + protocol = "udp" + from_port = 8472 + to_port = 8472 + source_security_group_id = aws_security_group.worker.id +} + +resource "aws_security_group_rule" "controller-linux-vxlan-self" { + count = var.networking == "cilium" ? 1 : 0 + + security_group_id = aws_security_group.controller.id + + type = "ingress" + protocol = "udp" + from_port = 8472 + to_port = 8472 + self = true +} + # Allow Prometheus to scrape node-exporter daemonset resource "aws_security_group_rule" "controller-node-exporter" { security_group_id = aws_security_group.controller.id @@ -122,6 +163,17 @@ resource "aws_security_group_rule" "controller-node-exporter" { source_security_group_id = aws_security_group.worker.id } +# Allow Prometheus to scrape kube-proxy +resource "aws_security_group_rule" "kube-proxy-metrics" { + security_group_id = aws_security_group.controller.id + + type = "ingress" + protocol = "tcp" + from_port = 10249 + to_port = 10249 + source_security_group_id = aws_security_group.worker.id +} + # Allow apiserver to access kubelets for exec, log, port-forward resource "aws_security_group_rule" "controller-kubelet" { security_group_id = aws_security_group.controller.id @@ -143,6 +195,28 @@ resource "aws_security_group_rule" "controller-kubelet-self" { self = true } +# Allow Prometheus to scrape kube-scheduler +resource "aws_security_group_rule" "controller-scheduler-metrics" { + security_group_id = aws_security_group.controller.id + + type = "ingress" + protocol = "tcp" + from_port = 10251 + to_port = 10251 + source_security_group_id = aws_security_group.worker.id +} + +# Allow Prometheus to scrape kube-controller-manager +resource "aws_security_group_rule" "controller-manager-metrics" { + security_group_id = aws_security_group.controller.id + + type = "ingress" + protocol = "tcp" + from_port = 10252 + to_port = 10252 + source_security_group_id = aws_security_group.worker.id +} + resource "aws_security_group_rule" "controller-bgp" { security_group_id = aws_security_group.controller.id @@ -227,6 +301,30 @@ resource "aws_security_group" "worker" { } } +resource "aws_security_group_rule" "worker-icmp" { + count = var.networking == "cilium" ? 1 : 0 + + security_group_id = aws_security_group.worker.id + + type = "ingress" + protocol = "icmp" + from_port = 8 + to_port = 0 + source_security_group_id = aws_security_group.controller.id +} + +resource "aws_security_group_rule" "worker-icmp-self" { + count = var.networking == "cilium" ? 1 : 0 + + security_group_id = aws_security_group.worker.id + + type = "ingress" + protocol = "icmp" + from_port = 8 + to_port = 0 + self = true +} + resource "aws_security_group_rule" "worker-ssh" { security_group_id = aws_security_group.worker.id @@ -257,6 +355,31 @@ resource "aws_security_group_rule" "worker-https" { cidr_blocks = ["0.0.0.0/0"] } +resource "aws_security_group_rule" "worker-cilium-health" { + count = var.networking == "cilium" ? 1 : 0 + + security_group_id = aws_security_group.worker.id + + type = "ingress" + protocol = "tcp" + from_port = 4240 + to_port = 4240 + source_security_group_id = aws_security_group.controller.id +} + +resource "aws_security_group_rule" "worker-cilium-health-self" { + count = var.networking == "cilium" ? 1 : 0 + + security_group_id = aws_security_group.worker.id + + type = "ingress" + protocol = "tcp" + from_port = 4240 + to_port = 4240 + self = true +} + +# IANA VXLAN default resource "aws_security_group_rule" "worker-vxlan" { count = var.networking == "flannel" ? 1 : 0 @@ -281,6 +404,31 @@ resource "aws_security_group_rule" "worker-vxlan-self" { self = true } +# Linux VXLAN default +resource "aws_security_group_rule" "worker-linux-vxlan" { + count = var.networking == "cilium" ? 1 : 0 + + security_group_id = aws_security_group.worker.id + + type = "ingress" + protocol = "udp" + from_port = 8472 + to_port = 8472 + source_security_group_id = aws_security_group.controller.id +} + +resource "aws_security_group_rule" "worker-linux-vxlan-self" { + count = var.networking == "cilium" ? 1 : 0 + + security_group_id = aws_security_group.worker.id + + type = "ingress" + protocol = "udp" + from_port = 8472 + to_port = 8472 + self = true +} + # Allow Prometheus to scrape node-exporter daemonset resource "aws_security_group_rule" "worker-node-exporter" { security_group_id = aws_security_group.worker.id diff --git a/aws/fedora-coreos/kubernetes/bootstrap.tf b/aws/fedora-coreos/kubernetes/bootstrap.tf index 5dd4c677..3af19ef4 100644 --- a/aws/fedora-coreos/kubernetes/bootstrap.tf +++ b/aws/fedora-coreos/kubernetes/bootstrap.tf @@ -1,6 +1,6 @@ # Kubernetes assets (kubeconfig, manifests) module "bootstrap" { - source = "git::https://github.com/poseidon/terraform-render-bootstrap.git?ref=e75697ce35d7773705f0b9b28ce1ffbe99f9493c" + source = "git::https://github.com/poseidon/terraform-render-bootstrap.git?ref=af36c539360696f5ca6cf5b06bb729477a003602" cluster_name = var.cluster_name api_servers = [format("%s.%s", var.cluster_name, var.dns_zone)] diff --git a/aws/fedora-coreos/kubernetes/fcc/controller.yaml b/aws/fedora-coreos/kubernetes/fcc/controller.yaml index 2bc88e1e..1db4af00 100644 --- a/aws/fedora-coreos/kubernetes/fcc/controller.yaml +++ b/aws/fedora-coreos/kubernetes/fcc/controller.yaml @@ -176,6 +176,11 @@ storage: contents: inline: | fs.inotify.max_user_watches=16184 + - path: /etc/sysctl.d/reverse-path-filter.conf + contents: + inline: | + net.ipv4.conf.default.rp_filter=0 + net.ipv4.conf.*.rp_filter=0 - path: /etc/systemd/system.conf.d/accounting.conf contents: inline: | diff --git a/aws/fedora-coreos/kubernetes/security.tf b/aws/fedora-coreos/kubernetes/security.tf index 60727af8..5a19930d 100644 --- a/aws/fedora-coreos/kubernetes/security.tf +++ b/aws/fedora-coreos/kubernetes/security.tf @@ -13,6 +13,30 @@ resource "aws_security_group" "controller" { } } +resource "aws_security_group_rule" "controller-icmp" { + count = var.networking == "cilium" ? 1 : 0 + + security_group_id = aws_security_group.controller.id + + type = "ingress" + protocol = "icmp" + from_port = 8 + to_port = 0 + source_security_group_id = aws_security_group.worker.id +} + +resource "aws_security_group_rule" "controller-icmp-self" { + count = var.networking == "cilium" ? 1 : 0 + + security_group_id = aws_security_group.controller.id + + type = "ingress" + protocol = "icmp" + from_port = 8 + to_port = 0 + self = true +} + resource "aws_security_group_rule" "controller-ssh" { security_group_id = aws_security_group.controller.id @@ -44,39 +68,31 @@ resource "aws_security_group_rule" "controller-etcd-metrics" { source_security_group_id = aws_security_group.worker.id } -# Allow Prometheus to scrape kube-proxy -resource "aws_security_group_rule" "kube-proxy-metrics" { +resource "aws_security_group_rule" "controller-cilium-health" { + count = var.networking == "cilium" ? 1 : 0 + security_group_id = aws_security_group.controller.id type = "ingress" protocol = "tcp" - from_port = 10249 - to_port = 10249 + from_port = 4240 + to_port = 4240 source_security_group_id = aws_security_group.worker.id } -# Allow Prometheus to scrape kube-scheduler -resource "aws_security_group_rule" "controller-scheduler-metrics" { +resource "aws_security_group_rule" "controller-cilium-health-self" { + count = var.networking == "cilium" ? 1 : 0 + security_group_id = aws_security_group.controller.id - type = "ingress" - protocol = "tcp" - from_port = 10251 - to_port = 10251 - source_security_group_id = aws_security_group.worker.id -} - -# Allow Prometheus to scrape kube-controller-manager -resource "aws_security_group_rule" "controller-manager-metrics" { - security_group_id = aws_security_group.controller.id - - type = "ingress" - protocol = "tcp" - from_port = 10252 - to_port = 10252 - source_security_group_id = aws_security_group.worker.id + type = "ingress" + protocol = "tcp" + from_port = 4240 + to_port = 4240 + self = true } +# IANA VXLAN default resource "aws_security_group_rule" "controller-vxlan" { count = var.networking == "flannel" ? 1 : 0 @@ -111,6 +127,31 @@ resource "aws_security_group_rule" "controller-apiserver" { cidr_blocks = ["0.0.0.0/0"] } +# Linux VXLAN default +resource "aws_security_group_rule" "controller-linux-vxlan" { + count = var.networking == "cilium" ? 1 : 0 + + security_group_id = aws_security_group.controller.id + + type = "ingress" + protocol = "udp" + from_port = 8472 + to_port = 8472 + source_security_group_id = aws_security_group.worker.id +} + +resource "aws_security_group_rule" "controller-linux-vxlan-self" { + count = var.networking == "cilium" ? 1 : 0 + + security_group_id = aws_security_group.controller.id + + type = "ingress" + protocol = "udp" + from_port = 8472 + to_port = 8472 + self = true +} + # Allow Prometheus to scrape node-exporter daemonset resource "aws_security_group_rule" "controller-node-exporter" { security_group_id = aws_security_group.controller.id @@ -122,6 +163,17 @@ resource "aws_security_group_rule" "controller-node-exporter" { source_security_group_id = aws_security_group.worker.id } +# Allow Prometheus to scrape kube-proxy +resource "aws_security_group_rule" "kube-proxy-metrics" { + security_group_id = aws_security_group.controller.id + + type = "ingress" + protocol = "tcp" + from_port = 10249 + to_port = 10249 + source_security_group_id = aws_security_group.worker.id +} + # Allow apiserver to access kubelets for exec, log, port-forward resource "aws_security_group_rule" "controller-kubelet" { security_group_id = aws_security_group.controller.id @@ -143,6 +195,28 @@ resource "aws_security_group_rule" "controller-kubelet-self" { self = true } +# Allow Prometheus to scrape kube-scheduler +resource "aws_security_group_rule" "controller-scheduler-metrics" { + security_group_id = aws_security_group.controller.id + + type = "ingress" + protocol = "tcp" + from_port = 10251 + to_port = 10251 + source_security_group_id = aws_security_group.worker.id +} + +# Allow Prometheus to scrape kube-controller-manager +resource "aws_security_group_rule" "controller-manager-metrics" { + security_group_id = aws_security_group.controller.id + + type = "ingress" + protocol = "tcp" + from_port = 10252 + to_port = 10252 + source_security_group_id = aws_security_group.worker.id +} + resource "aws_security_group_rule" "controller-bgp" { security_group_id = aws_security_group.controller.id @@ -227,6 +301,30 @@ resource "aws_security_group" "worker" { } } +resource "aws_security_group_rule" "worker-icmp" { + count = var.networking == "cilium" ? 1 : 0 + + security_group_id = aws_security_group.worker.id + + type = "ingress" + protocol = "icmp" + from_port = 8 + to_port = 0 + source_security_group_id = aws_security_group.controller.id +} + +resource "aws_security_group_rule" "worker-icmp-self" { + count = var.networking == "cilium" ? 1 : 0 + + security_group_id = aws_security_group.worker.id + + type = "ingress" + protocol = "icmp" + from_port = 8 + to_port = 0 + self = true +} + resource "aws_security_group_rule" "worker-ssh" { security_group_id = aws_security_group.worker.id @@ -257,6 +355,31 @@ resource "aws_security_group_rule" "worker-https" { cidr_blocks = ["0.0.0.0/0"] } +resource "aws_security_group_rule" "worker-cilium-health" { + count = var.networking == "cilium" ? 1 : 0 + + security_group_id = aws_security_group.worker.id + + type = "ingress" + protocol = "tcp" + from_port = 4240 + to_port = 4240 + source_security_group_id = aws_security_group.controller.id +} + +resource "aws_security_group_rule" "worker-cilium-health-self" { + count = var.networking == "cilium" ? 1 : 0 + + security_group_id = aws_security_group.worker.id + + type = "ingress" + protocol = "tcp" + from_port = 4240 + to_port = 4240 + self = true +} + +# IANA VXLAN default resource "aws_security_group_rule" "worker-vxlan" { count = var.networking == "flannel" ? 1 : 0 @@ -281,6 +404,31 @@ resource "aws_security_group_rule" "worker-vxlan-self" { self = true } +# Linux VXLAN default +resource "aws_security_group_rule" "worker-linux-vxlan" { + count = var.networking == "cilium" ? 1 : 0 + + security_group_id = aws_security_group.worker.id + + type = "ingress" + protocol = "udp" + from_port = 8472 + to_port = 8472 + source_security_group_id = aws_security_group.controller.id +} + +resource "aws_security_group_rule" "worker-linux-vxlan-self" { + count = var.networking == "cilium" ? 1 : 0 + + security_group_id = aws_security_group.worker.id + + type = "ingress" + protocol = "udp" + from_port = 8472 + to_port = 8472 + self = true +} + # Allow Prometheus to scrape node-exporter daemonset resource "aws_security_group_rule" "worker-node-exporter" { security_group_id = aws_security_group.worker.id diff --git a/aws/fedora-coreos/kubernetes/workers/fcc/worker.yaml b/aws/fedora-coreos/kubernetes/workers/fcc/worker.yaml index a5645dea..e99a2248 100644 --- a/aws/fedora-coreos/kubernetes/workers/fcc/worker.yaml +++ b/aws/fedora-coreos/kubernetes/workers/fcc/worker.yaml @@ -105,6 +105,11 @@ storage: contents: inline: | fs.inotify.max_user_watches=16184 + - path: /etc/sysctl.d/reverse-path-filter.conf + contents: + inline: | + net.ipv4.conf.default.rp_filter=0 + net.ipv4.conf.*.rp_filter=0 - path: /etc/systemd/system.conf.d/accounting.conf contents: inline: | diff --git a/azure/container-linux/kubernetes/bootstrap.tf b/azure/container-linux/kubernetes/bootstrap.tf index 62012c19..81fae090 100644 --- a/azure/container-linux/kubernetes/bootstrap.tf +++ b/azure/container-linux/kubernetes/bootstrap.tf @@ -1,6 +1,6 @@ # Kubernetes assets (kubeconfig, manifests) module "bootstrap" { - source = "git::https://github.com/poseidon/terraform-render-bootstrap.git?ref=e75697ce35d7773705f0b9b28ce1ffbe99f9493c" + source = "git::https://github.com/poseidon/terraform-render-bootstrap.git?ref=af36c539360696f5ca6cf5b06bb729477a003602" cluster_name = var.cluster_name api_servers = [format("%s.%s", var.cluster_name, var.dns_zone)] diff --git a/azure/container-linux/kubernetes/security.tf b/azure/container-linux/kubernetes/security.tf index feb6fef5..c31d0014 100644 --- a/azure/container-linux/kubernetes/security.tf +++ b/azure/container-linux/kubernetes/security.tf @@ -7,6 +7,21 @@ resource "azurerm_network_security_group" "controller" { location = azurerm_resource_group.cluster.location } +resource "azurerm_network_security_rule" "controller-icmp" { + resource_group_name = azurerm_resource_group.cluster.name + + name = "allow-icmp" + network_security_group_name = azurerm_network_security_group.controller.name + priority = "1995" + access = "Allow" + direction = "Inbound" + protocol = "Icmp" + source_port_range = "*" + destination_port_range = "*" + source_address_prefixes = [azurerm_subnet.controller.address_prefix, azurerm_subnet.worker.address_prefix] + destination_address_prefix = azurerm_subnet.controller.address_prefix +} + resource "azurerm_network_security_rule" "controller-ssh" { resource_group_name = azurerm_resource_group.cluster.name @@ -100,6 +115,22 @@ resource "azurerm_network_security_rule" "controller-apiserver" { destination_address_prefix = azurerm_subnet.controller.address_prefix } +resource "azurerm_network_security_rule" "controller-cilium-health" { + resource_group_name = azurerm_resource_group.cluster.name + count = var.networking == "cilium" ? 1 : 0 + + name = "allow-cilium-health" + network_security_group_name = azurerm_network_security_group.controller.name + priority = "2019" + access = "Allow" + direction = "Inbound" + protocol = "Tcp" + source_port_range = "*" + destination_port_range = "4240" + source_address_prefixes = [azurerm_subnet.controller.address_prefix, azurerm_subnet.worker.address_prefix] + destination_address_prefix = azurerm_subnet.controller.address_prefix +} + resource "azurerm_network_security_rule" "controller-vxlan" { resource_group_name = azurerm_resource_group.cluster.name @@ -115,6 +146,21 @@ resource "azurerm_network_security_rule" "controller-vxlan" { destination_address_prefix = azurerm_subnet.controller.address_prefix } +resource "azurerm_network_security_rule" "controller-linux-vxlan" { + resource_group_name = azurerm_resource_group.cluster.name + + name = "allow-linux-vxlan" + network_security_group_name = azurerm_network_security_group.controller.name + priority = "2021" + access = "Allow" + direction = "Inbound" + protocol = "Udp" + source_port_range = "*" + destination_port_range = "8472" + source_address_prefixes = [azurerm_subnet.controller.address_prefix, azurerm_subnet.worker.address_prefix] + destination_address_prefix = azurerm_subnet.controller.address_prefix +} + # Allow Prometheus to scrape node-exporter daemonset resource "azurerm_network_security_rule" "controller-node-exporter" { resource_group_name = azurerm_resource_group.cluster.name @@ -191,6 +237,21 @@ resource "azurerm_network_security_group" "worker" { location = azurerm_resource_group.cluster.location } +resource "azurerm_network_security_rule" "worker-icmp" { + resource_group_name = azurerm_resource_group.cluster.name + + name = "allow-icmp" + network_security_group_name = azurerm_network_security_group.worker.name + priority = "1995" + access = "Allow" + direction = "Inbound" + protocol = "Icmp" + source_port_range = "*" + destination_port_range = "*" + source_address_prefixes = [azurerm_subnet.controller.address_prefix, azurerm_subnet.worker.address_prefix] + destination_address_prefix = azurerm_subnet.worker.address_prefix +} + resource "azurerm_network_security_rule" "worker-ssh" { resource_group_name = azurerm_resource_group.cluster.name @@ -236,6 +297,22 @@ resource "azurerm_network_security_rule" "worker-https" { destination_address_prefix = azurerm_subnet.worker.address_prefix } +resource "azurerm_network_security_rule" "worker-cilium-health" { + resource_group_name = azurerm_resource_group.cluster.name + count = var.networking == "cilium" ? 1 : 0 + + name = "allow-cilium-health" + network_security_group_name = azurerm_network_security_group.worker.name + priority = "2014" + access = "Allow" + direction = "Inbound" + protocol = "Tcp" + source_port_range = "*" + destination_port_range = "4240" + source_address_prefixes = [azurerm_subnet.controller.address_prefix, azurerm_subnet.worker.address_prefix] + destination_address_prefix = azurerm_subnet.worker.address_prefix +} + resource "azurerm_network_security_rule" "worker-vxlan" { resource_group_name = azurerm_resource_group.cluster.name @@ -251,6 +328,21 @@ resource "azurerm_network_security_rule" "worker-vxlan" { destination_address_prefix = azurerm_subnet.worker.address_prefix } +resource "azurerm_network_security_rule" "worker-linux-vxlan" { + resource_group_name = azurerm_resource_group.cluster.name + + name = "allow-linux-vxlan" + network_security_group_name = azurerm_network_security_group.worker.name + priority = "2016" + access = "Allow" + direction = "Inbound" + protocol = "Udp" + source_port_range = "*" + destination_port_range = "8472" + source_address_prefixes = [azurerm_subnet.controller.address_prefix, azurerm_subnet.worker.address_prefix] + destination_address_prefix = azurerm_subnet.worker.address_prefix +} + # Allow Prometheus to scrape node-exporter daemonset resource "azurerm_network_security_rule" "worker-node-exporter" { resource_group_name = azurerm_resource_group.cluster.name diff --git a/azure/fedora-coreos/kubernetes/bootstrap.tf b/azure/fedora-coreos/kubernetes/bootstrap.tf index 6e8d6e79..a4b9ec2d 100644 --- a/azure/fedora-coreos/kubernetes/bootstrap.tf +++ b/azure/fedora-coreos/kubernetes/bootstrap.tf @@ -1,6 +1,6 @@ # Kubernetes assets (kubeconfig, manifests) module "bootstrap" { - source = "git::https://github.com/poseidon/terraform-render-bootstrap.git?ref=e75697ce35d7773705f0b9b28ce1ffbe99f9493c" + source = "git::https://github.com/poseidon/terraform-render-bootstrap.git?ref=af36c539360696f5ca6cf5b06bb729477a003602" cluster_name = var.cluster_name api_servers = [format("%s.%s", var.cluster_name, var.dns_zone)] diff --git a/azure/fedora-coreos/kubernetes/fcc/controller.yaml b/azure/fedora-coreos/kubernetes/fcc/controller.yaml index f64b298d..f5c0e902 100644 --- a/azure/fedora-coreos/kubernetes/fcc/controller.yaml +++ b/azure/fedora-coreos/kubernetes/fcc/controller.yaml @@ -175,6 +175,11 @@ storage: contents: inline: | fs.inotify.max_user_watches=16184 + - path: /etc/sysctl.d/reverse-path-filter.conf + contents: + inline: | + net.ipv4.conf.default.rp_filter=0 + net.ipv4.conf.*.rp_filter=0 - path: /etc/systemd/system.conf.d/accounting.conf contents: inline: | diff --git a/azure/fedora-coreos/kubernetes/security.tf b/azure/fedora-coreos/kubernetes/security.tf index feb6fef5..c31d0014 100644 --- a/azure/fedora-coreos/kubernetes/security.tf +++ b/azure/fedora-coreos/kubernetes/security.tf @@ -7,6 +7,21 @@ resource "azurerm_network_security_group" "controller" { location = azurerm_resource_group.cluster.location } +resource "azurerm_network_security_rule" "controller-icmp" { + resource_group_name = azurerm_resource_group.cluster.name + + name = "allow-icmp" + network_security_group_name = azurerm_network_security_group.controller.name + priority = "1995" + access = "Allow" + direction = "Inbound" + protocol = "Icmp" + source_port_range = "*" + destination_port_range = "*" + source_address_prefixes = [azurerm_subnet.controller.address_prefix, azurerm_subnet.worker.address_prefix] + destination_address_prefix = azurerm_subnet.controller.address_prefix +} + resource "azurerm_network_security_rule" "controller-ssh" { resource_group_name = azurerm_resource_group.cluster.name @@ -100,6 +115,22 @@ resource "azurerm_network_security_rule" "controller-apiserver" { destination_address_prefix = azurerm_subnet.controller.address_prefix } +resource "azurerm_network_security_rule" "controller-cilium-health" { + resource_group_name = azurerm_resource_group.cluster.name + count = var.networking == "cilium" ? 1 : 0 + + name = "allow-cilium-health" + network_security_group_name = azurerm_network_security_group.controller.name + priority = "2019" + access = "Allow" + direction = "Inbound" + protocol = "Tcp" + source_port_range = "*" + destination_port_range = "4240" + source_address_prefixes = [azurerm_subnet.controller.address_prefix, azurerm_subnet.worker.address_prefix] + destination_address_prefix = azurerm_subnet.controller.address_prefix +} + resource "azurerm_network_security_rule" "controller-vxlan" { resource_group_name = azurerm_resource_group.cluster.name @@ -115,6 +146,21 @@ resource "azurerm_network_security_rule" "controller-vxlan" { destination_address_prefix = azurerm_subnet.controller.address_prefix } +resource "azurerm_network_security_rule" "controller-linux-vxlan" { + resource_group_name = azurerm_resource_group.cluster.name + + name = "allow-linux-vxlan" + network_security_group_name = azurerm_network_security_group.controller.name + priority = "2021" + access = "Allow" + direction = "Inbound" + protocol = "Udp" + source_port_range = "*" + destination_port_range = "8472" + source_address_prefixes = [azurerm_subnet.controller.address_prefix, azurerm_subnet.worker.address_prefix] + destination_address_prefix = azurerm_subnet.controller.address_prefix +} + # Allow Prometheus to scrape node-exporter daemonset resource "azurerm_network_security_rule" "controller-node-exporter" { resource_group_name = azurerm_resource_group.cluster.name @@ -191,6 +237,21 @@ resource "azurerm_network_security_group" "worker" { location = azurerm_resource_group.cluster.location } +resource "azurerm_network_security_rule" "worker-icmp" { + resource_group_name = azurerm_resource_group.cluster.name + + name = "allow-icmp" + network_security_group_name = azurerm_network_security_group.worker.name + priority = "1995" + access = "Allow" + direction = "Inbound" + protocol = "Icmp" + source_port_range = "*" + destination_port_range = "*" + source_address_prefixes = [azurerm_subnet.controller.address_prefix, azurerm_subnet.worker.address_prefix] + destination_address_prefix = azurerm_subnet.worker.address_prefix +} + resource "azurerm_network_security_rule" "worker-ssh" { resource_group_name = azurerm_resource_group.cluster.name @@ -236,6 +297,22 @@ resource "azurerm_network_security_rule" "worker-https" { destination_address_prefix = azurerm_subnet.worker.address_prefix } +resource "azurerm_network_security_rule" "worker-cilium-health" { + resource_group_name = azurerm_resource_group.cluster.name + count = var.networking == "cilium" ? 1 : 0 + + name = "allow-cilium-health" + network_security_group_name = azurerm_network_security_group.worker.name + priority = "2014" + access = "Allow" + direction = "Inbound" + protocol = "Tcp" + source_port_range = "*" + destination_port_range = "4240" + source_address_prefixes = [azurerm_subnet.controller.address_prefix, azurerm_subnet.worker.address_prefix] + destination_address_prefix = azurerm_subnet.worker.address_prefix +} + resource "azurerm_network_security_rule" "worker-vxlan" { resource_group_name = azurerm_resource_group.cluster.name @@ -251,6 +328,21 @@ resource "azurerm_network_security_rule" "worker-vxlan" { destination_address_prefix = azurerm_subnet.worker.address_prefix } +resource "azurerm_network_security_rule" "worker-linux-vxlan" { + resource_group_name = azurerm_resource_group.cluster.name + + name = "allow-linux-vxlan" + network_security_group_name = azurerm_network_security_group.worker.name + priority = "2016" + access = "Allow" + direction = "Inbound" + protocol = "Udp" + source_port_range = "*" + destination_port_range = "8472" + source_address_prefixes = [azurerm_subnet.controller.address_prefix, azurerm_subnet.worker.address_prefix] + destination_address_prefix = azurerm_subnet.worker.address_prefix +} + # Allow Prometheus to scrape node-exporter daemonset resource "azurerm_network_security_rule" "worker-node-exporter" { resource_group_name = azurerm_resource_group.cluster.name diff --git a/azure/fedora-coreos/kubernetes/workers/fcc/worker.yaml b/azure/fedora-coreos/kubernetes/workers/fcc/worker.yaml index 81e0c0ba..b901b162 100644 --- a/azure/fedora-coreos/kubernetes/workers/fcc/worker.yaml +++ b/azure/fedora-coreos/kubernetes/workers/fcc/worker.yaml @@ -104,6 +104,11 @@ storage: contents: inline: | fs.inotify.max_user_watches=16184 + - path: /etc/sysctl.d/reverse-path-filter.conf + contents: + inline: | + net.ipv4.conf.default.rp_filter=0 + net.ipv4.conf.*.rp_filter=0 - path: /etc/systemd/system.conf.d/accounting.conf contents: inline: | diff --git a/bare-metal/container-linux/kubernetes/bootstrap.tf b/bare-metal/container-linux/kubernetes/bootstrap.tf index abe58d1a..4fa6b087 100644 --- a/bare-metal/container-linux/kubernetes/bootstrap.tf +++ b/bare-metal/container-linux/kubernetes/bootstrap.tf @@ -1,6 +1,6 @@ # Kubernetes assets (kubeconfig, manifests) module "bootstrap" { - source = "git::https://github.com/poseidon/terraform-render-bootstrap.git?ref=e75697ce35d7773705f0b9b28ce1ffbe99f9493c" + source = "git::https://github.com/poseidon/terraform-render-bootstrap.git?ref=af36c539360696f5ca6cf5b06bb729477a003602" cluster_name = var.cluster_name api_servers = [var.k8s_domain_name] diff --git a/bare-metal/fedora-coreos/kubernetes/bootstrap.tf b/bare-metal/fedora-coreos/kubernetes/bootstrap.tf index 41d0c767..89901e5f 100644 --- a/bare-metal/fedora-coreos/kubernetes/bootstrap.tf +++ b/bare-metal/fedora-coreos/kubernetes/bootstrap.tf @@ -1,6 +1,6 @@ # Kubernetes assets (kubeconfig, manifests) module "bootstrap" { - source = "git::https://github.com/poseidon/terraform-render-bootstrap.git?ref=e75697ce35d7773705f0b9b28ce1ffbe99f9493c" + source = "git::https://github.com/poseidon/terraform-render-bootstrap.git?ref=af36c539360696f5ca6cf5b06bb729477a003602" cluster_name = var.cluster_name api_servers = [var.k8s_domain_name] diff --git a/bare-metal/fedora-coreos/kubernetes/fcc/controller.yaml b/bare-metal/fedora-coreos/kubernetes/fcc/controller.yaml index d56c144e..b31cddba 100644 --- a/bare-metal/fedora-coreos/kubernetes/fcc/controller.yaml +++ b/bare-metal/fedora-coreos/kubernetes/fcc/controller.yaml @@ -186,6 +186,11 @@ storage: contents: inline: | fs.inotify.max_user_watches=16184 + - path: /etc/sysctl.d/reverse-path-filter.conf + contents: + inline: | + net.ipv4.conf.default.rp_filter=0 + net.ipv4.conf.*.rp_filter=0 - path: /etc/systemd/system.conf.d/accounting.conf contents: inline: | diff --git a/bare-metal/fedora-coreos/kubernetes/fcc/worker.yaml b/bare-metal/fedora-coreos/kubernetes/fcc/worker.yaml index 26d2cd94..ed026ba2 100644 --- a/bare-metal/fedora-coreos/kubernetes/fcc/worker.yaml +++ b/bare-metal/fedora-coreos/kubernetes/fcc/worker.yaml @@ -106,6 +106,11 @@ storage: contents: inline: | fs.inotify.max_user_watches=16184 + - path: /etc/sysctl.d/reverse-path-filter.conf + contents: + inline: | + net.ipv4.conf.default.rp_filter=0 + net.ipv4.conf.*.rp_filter=0 - path: /etc/systemd/system.conf.d/accounting.conf contents: inline: | diff --git a/digital-ocean/container-linux/kubernetes/bootstrap.tf b/digital-ocean/container-linux/kubernetes/bootstrap.tf index 87a8519e..f228b425 100644 --- a/digital-ocean/container-linux/kubernetes/bootstrap.tf +++ b/digital-ocean/container-linux/kubernetes/bootstrap.tf @@ -1,6 +1,6 @@ # Kubernetes assets (kubeconfig, manifests) module "bootstrap" { - source = "git::https://github.com/poseidon/terraform-render-bootstrap.git?ref=e75697ce35d7773705f0b9b28ce1ffbe99f9493c" + source = "git::https://github.com/poseidon/terraform-render-bootstrap.git?ref=af36c539360696f5ca6cf5b06bb729477a003602" cluster_name = var.cluster_name api_servers = [format("%s.%s", var.cluster_name, var.dns_zone)] diff --git a/digital-ocean/container-linux/kubernetes/network.tf b/digital-ocean/container-linux/kubernetes/network.tf index e8b0564d..4be6827a 100644 --- a/digital-ocean/container-linux/kubernetes/network.tf +++ b/digital-ocean/container-linux/kubernetes/network.tf @@ -6,6 +6,11 @@ resource "digitalocean_firewall" "rules" { digitalocean_tag.workers.name ] + inbound_rule { + protocol = "icmp" + source_tags = [digitalocean_tag.controllers.name, digitalocean_tag.workers.name] + } + # allow ssh, internal flannel, internal node-exporter, internal kubelet inbound_rule { protocol = "tcp" @@ -13,12 +18,27 @@ resource "digitalocean_firewall" "rules" { source_addresses = ["0.0.0.0/0", "::/0"] } + # Cilium health + inbound_rule { + protocol = "tcp" + port_range = "4240" + source_tags = [digitalocean_tag.controllers.name, digitalocean_tag.workers.name] + } + + # IANA vxlan (flannel, calico) inbound_rule { protocol = "udp" port_range = "4789" source_tags = [digitalocean_tag.controllers.name, digitalocean_tag.workers.name] } + # Linux vxlan (Cilium) + inbound_rule { + protocol = "udp" + port_range = "8472" + source_tags = [digitalocean_tag.controllers.name, digitalocean_tag.workers.name] + } + # Allow Prometheus to scrape node-exporter inbound_rule { protocol = "tcp" @@ -33,6 +53,7 @@ resource "digitalocean_firewall" "rules" { source_tags = [digitalocean_tag.workers.name] } + # Kubelet inbound_rule { protocol = "tcp" port_range = "10250" diff --git a/digital-ocean/fedora-coreos/kubernetes/bootstrap.tf b/digital-ocean/fedora-coreos/kubernetes/bootstrap.tf index 92cebf46..53266d37 100644 --- a/digital-ocean/fedora-coreos/kubernetes/bootstrap.tf +++ b/digital-ocean/fedora-coreos/kubernetes/bootstrap.tf @@ -1,6 +1,6 @@ # Kubernetes assets (kubeconfig, manifests) module "bootstrap" { - source = "git::https://github.com/poseidon/terraform-render-bootstrap.git?ref=e75697ce35d7773705f0b9b28ce1ffbe99f9493c" + source = "git::https://github.com/poseidon/terraform-render-bootstrap.git?ref=af36c539360696f5ca6cf5b06bb729477a003602" cluster_name = var.cluster_name api_servers = [format("%s.%s", var.cluster_name, var.dns_zone)] diff --git a/digital-ocean/fedora-coreos/kubernetes/fcc/controller.yaml b/digital-ocean/fedora-coreos/kubernetes/fcc/controller.yaml index 6e32f50c..54e4000c 100644 --- a/digital-ocean/fedora-coreos/kubernetes/fcc/controller.yaml +++ b/digital-ocean/fedora-coreos/kubernetes/fcc/controller.yaml @@ -182,6 +182,11 @@ storage: contents: inline: | fs.inotify.max_user_watches=16184 + - path: /etc/sysctl.d/reverse-path-filter.conf + contents: + inline: | + net.ipv4.conf.default.rp_filter=0 + net.ipv4.conf.*.rp_filter=0 - path: /etc/systemd/system.conf.d/accounting.conf contents: inline: | diff --git a/digital-ocean/fedora-coreos/kubernetes/fcc/worker.yaml b/digital-ocean/fedora-coreos/kubernetes/fcc/worker.yaml index 44be2761..ae360545 100644 --- a/digital-ocean/fedora-coreos/kubernetes/fcc/worker.yaml +++ b/digital-ocean/fedora-coreos/kubernetes/fcc/worker.yaml @@ -109,6 +109,11 @@ storage: contents: inline: | fs.inotify.max_user_watches=16184 + - path: /etc/sysctl.d/reverse-path-filter.conf + contents: + inline: | + net.ipv4.conf.default.rp_filter=0 + net.ipv4.conf.*.rp_filter=0 - path: /etc/systemd/system.conf.d/accounting.conf contents: inline: | diff --git a/digital-ocean/fedora-coreos/kubernetes/network.tf b/digital-ocean/fedora-coreos/kubernetes/network.tf index e8b0564d..4be6827a 100644 --- a/digital-ocean/fedora-coreos/kubernetes/network.tf +++ b/digital-ocean/fedora-coreos/kubernetes/network.tf @@ -6,6 +6,11 @@ resource "digitalocean_firewall" "rules" { digitalocean_tag.workers.name ] + inbound_rule { + protocol = "icmp" + source_tags = [digitalocean_tag.controllers.name, digitalocean_tag.workers.name] + } + # allow ssh, internal flannel, internal node-exporter, internal kubelet inbound_rule { protocol = "tcp" @@ -13,12 +18,27 @@ resource "digitalocean_firewall" "rules" { source_addresses = ["0.0.0.0/0", "::/0"] } + # Cilium health + inbound_rule { + protocol = "tcp" + port_range = "4240" + source_tags = [digitalocean_tag.controllers.name, digitalocean_tag.workers.name] + } + + # IANA vxlan (flannel, calico) inbound_rule { protocol = "udp" port_range = "4789" source_tags = [digitalocean_tag.controllers.name, digitalocean_tag.workers.name] } + # Linux vxlan (Cilium) + inbound_rule { + protocol = "udp" + port_range = "8472" + source_tags = [digitalocean_tag.controllers.name, digitalocean_tag.workers.name] + } + # Allow Prometheus to scrape node-exporter inbound_rule { protocol = "tcp" @@ -33,6 +53,7 @@ resource "digitalocean_firewall" "rules" { source_tags = [digitalocean_tag.workers.name] } + # Kubelet inbound_rule { protocol = "tcp" port_range = "10250" diff --git a/google-cloud/container-linux/kubernetes/bootstrap.tf b/google-cloud/container-linux/kubernetes/bootstrap.tf index 9b906522..31fcfc22 100644 --- a/google-cloud/container-linux/kubernetes/bootstrap.tf +++ b/google-cloud/container-linux/kubernetes/bootstrap.tf @@ -1,6 +1,6 @@ # Kubernetes assets (kubeconfig, manifests) module "bootstrap" { - source = "git::https://github.com/poseidon/terraform-render-bootstrap.git?ref=e75697ce35d7773705f0b9b28ce1ffbe99f9493c" + source = "git::https://github.com/poseidon/terraform-render-bootstrap.git?ref=af36c539360696f5ca6cf5b06bb729477a003602" cluster_name = var.cluster_name api_servers = [format("%s.%s", var.cluster_name, var.dns_zone)] diff --git a/google-cloud/container-linux/kubernetes/network.tf b/google-cloud/container-linux/kubernetes/network.tf index bd7067d7..67c6afde 100644 --- a/google-cloud/container-linux/kubernetes/network.tf +++ b/google-cloud/container-linux/kubernetes/network.tf @@ -112,6 +112,32 @@ resource "google_compute_firewall" "internal-vxlan" { target_tags = ["${var.cluster_name}-controller", "${var.cluster_name}-worker"] } +# Cilium VXLAN +resource "google_compute_firewall" "internal-linux-vxlan" { + count = var.networking == "cilium" ? 1 : 0 + + name = "${var.cluster_name}-linux-vxlan" + network = google_compute_network.network.name + + allow { + protocol = "udp" + ports = [8472] + } + + # Cilium health + allow { + protocol = "icmp" + } + + allow { + protocol = "tcp" + ports = [4240] + } + + source_tags = ["${var.cluster_name}-controller", "${var.cluster_name}-worker"] + target_tags = ["${var.cluster_name}-controller", "${var.cluster_name}-worker"] +} + # Allow Prometheus to scrape node-exporter daemonset resource "google_compute_firewall" "internal-node-exporter" { name = "${var.cluster_name}-internal-node-exporter" diff --git a/google-cloud/fedora-coreos/kubernetes/bootstrap.tf b/google-cloud/fedora-coreos/kubernetes/bootstrap.tf index 92ac39ac..3da0c90f 100644 --- a/google-cloud/fedora-coreos/kubernetes/bootstrap.tf +++ b/google-cloud/fedora-coreos/kubernetes/bootstrap.tf @@ -1,6 +1,6 @@ # Kubernetes assets (kubeconfig, manifests) module "bootstrap" { - source = "git::https://github.com/poseidon/terraform-render-bootstrap.git?ref=e75697ce35d7773705f0b9b28ce1ffbe99f9493c" + source = "git::https://github.com/poseidon/terraform-render-bootstrap.git?ref=af36c539360696f5ca6cf5b06bb729477a003602" cluster_name = var.cluster_name api_servers = [format("%s.%s", var.cluster_name, var.dns_zone)] diff --git a/google-cloud/fedora-coreos/kubernetes/fcc/controller.yaml b/google-cloud/fedora-coreos/kubernetes/fcc/controller.yaml index 5e0119b2..6d6c3745 100644 --- a/google-cloud/fedora-coreos/kubernetes/fcc/controller.yaml +++ b/google-cloud/fedora-coreos/kubernetes/fcc/controller.yaml @@ -175,6 +175,11 @@ storage: contents: inline: | fs.inotify.max_user_watches=16184 + - path: /etc/sysctl.d/reverse-path-filter.conf + contents: + inline: | + net.ipv4.conf.default.rp_filter=0 + net.ipv4.conf.*.rp_filter=0 - path: /etc/systemd/system.conf.d/accounting.conf contents: inline: | diff --git a/google-cloud/fedora-coreos/kubernetes/network.tf b/google-cloud/fedora-coreos/kubernetes/network.tf index bd7067d7..67c6afde 100644 --- a/google-cloud/fedora-coreos/kubernetes/network.tf +++ b/google-cloud/fedora-coreos/kubernetes/network.tf @@ -112,6 +112,32 @@ resource "google_compute_firewall" "internal-vxlan" { target_tags = ["${var.cluster_name}-controller", "${var.cluster_name}-worker"] } +# Cilium VXLAN +resource "google_compute_firewall" "internal-linux-vxlan" { + count = var.networking == "cilium" ? 1 : 0 + + name = "${var.cluster_name}-linux-vxlan" + network = google_compute_network.network.name + + allow { + protocol = "udp" + ports = [8472] + } + + # Cilium health + allow { + protocol = "icmp" + } + + allow { + protocol = "tcp" + ports = [4240] + } + + source_tags = ["${var.cluster_name}-controller", "${var.cluster_name}-worker"] + target_tags = ["${var.cluster_name}-controller", "${var.cluster_name}-worker"] +} + # Allow Prometheus to scrape node-exporter daemonset resource "google_compute_firewall" "internal-node-exporter" { name = "${var.cluster_name}-internal-node-exporter" diff --git a/google-cloud/fedora-coreos/kubernetes/workers/fcc/worker.yaml b/google-cloud/fedora-coreos/kubernetes/workers/fcc/worker.yaml index 3febec95..147cea9b 100644 --- a/google-cloud/fedora-coreos/kubernetes/workers/fcc/worker.yaml +++ b/google-cloud/fedora-coreos/kubernetes/workers/fcc/worker.yaml @@ -104,6 +104,11 @@ storage: contents: inline: | fs.inotify.max_user_watches=16184 + - path: /etc/sysctl.d/reverse-path-filter.conf + contents: + inline: | + net.ipv4.conf.default.rp_filter=0 + net.ipv4.conf.*.rp_filter=0 - path: /etc/systemd/system.conf.d/accounting.conf contents: inline: |