Add experimental Cilium CNI provider
* Accept experimental CNI `networking` mode "cilium" * Run Cilium v1.8.0-rc4 with overlay vxlan tunnels and a minimal set of features. We're interested in: * IPAM: Divide pod_cidr into /24 subnets per node * CNI networking pod-to-pod, pod-to-external * BPF masquerade * NetworkPolicy as defined by Kubernetes (no L7 Policy) * Continue using kube-proxy with Cilium probe mode * Firewall changes: * Require UDP 8472 for vxlan (Linux kernel default) between nodes * Optional ICMP echo(8) between nodes for host reachability (health) * Optional TCP 4240 between nodes for endpoint reachability (health) Known Issues: * Containers with `hostPort` don't listen on all host addresses, these workloads must use `hostNetwork` for now https://github.com/cilium/cilium/issues/12116 * Erroneous warning on Fedora CoreOS https://github.com/cilium/cilium/issues/10256 Note: This is experimental. It is not listed in docs and may be changed or removed without a deprecation notice Related: * https://github.com/poseidon/terraform-render-bootstrap/pull/192 * https://github.com/cilium/cilium/issues/12217
This commit is contained in:
parent
37f00a3882
commit
e9c8520359
|
@ -1,6 +1,6 @@
|
|||
# Kubernetes assets (kubeconfig, manifests)
|
||||
module "bootstrap" {
|
||||
source = "git::https://github.com/poseidon/terraform-render-bootstrap.git?ref=e75697ce35d7773705f0b9b28ce1ffbe99f9493c"
|
||||
source = "git::https://github.com/poseidon/terraform-render-bootstrap.git?ref=af36c539360696f5ca6cf5b06bb729477a003602"
|
||||
|
||||
cluster_name = var.cluster_name
|
||||
api_servers = [format("%s.%s", var.cluster_name, var.dns_zone)]
|
||||
|
|
|
@ -13,6 +13,30 @@ resource "aws_security_group" "controller" {
|
|||
}
|
||||
}
|
||||
|
||||
resource "aws_security_group_rule" "controller-icmp" {
|
||||
count = var.networking == "cilium" ? 1 : 0
|
||||
|
||||
security_group_id = aws_security_group.controller.id
|
||||
|
||||
type = "ingress"
|
||||
protocol = "icmp"
|
||||
from_port = 8
|
||||
to_port = 0
|
||||
source_security_group_id = aws_security_group.worker.id
|
||||
}
|
||||
|
||||
resource "aws_security_group_rule" "controller-icmp-self" {
|
||||
count = var.networking == "cilium" ? 1 : 0
|
||||
|
||||
security_group_id = aws_security_group.controller.id
|
||||
|
||||
type = "ingress"
|
||||
protocol = "icmp"
|
||||
from_port = 8
|
||||
to_port = 0
|
||||
self = true
|
||||
}
|
||||
|
||||
resource "aws_security_group_rule" "controller-ssh" {
|
||||
security_group_id = aws_security_group.controller.id
|
||||
|
||||
|
@ -44,39 +68,31 @@ resource "aws_security_group_rule" "controller-etcd-metrics" {
|
|||
source_security_group_id = aws_security_group.worker.id
|
||||
}
|
||||
|
||||
# Allow Prometheus to scrape kube-proxy
|
||||
resource "aws_security_group_rule" "kube-proxy-metrics" {
|
||||
resource "aws_security_group_rule" "controller-cilium-health" {
|
||||
count = var.networking == "cilium" ? 1 : 0
|
||||
|
||||
security_group_id = aws_security_group.controller.id
|
||||
|
||||
type = "ingress"
|
||||
protocol = "tcp"
|
||||
from_port = 10249
|
||||
to_port = 10249
|
||||
from_port = 4240
|
||||
to_port = 4240
|
||||
source_security_group_id = aws_security_group.worker.id
|
||||
}
|
||||
|
||||
# Allow Prometheus to scrape kube-scheduler
|
||||
resource "aws_security_group_rule" "controller-scheduler-metrics" {
|
||||
resource "aws_security_group_rule" "controller-cilium-health-self" {
|
||||
count = var.networking == "cilium" ? 1 : 0
|
||||
|
||||
security_group_id = aws_security_group.controller.id
|
||||
|
||||
type = "ingress"
|
||||
protocol = "tcp"
|
||||
from_port = 10251
|
||||
to_port = 10251
|
||||
source_security_group_id = aws_security_group.worker.id
|
||||
}
|
||||
|
||||
# Allow Prometheus to scrape kube-controller-manager
|
||||
resource "aws_security_group_rule" "controller-manager-metrics" {
|
||||
security_group_id = aws_security_group.controller.id
|
||||
|
||||
type = "ingress"
|
||||
protocol = "tcp"
|
||||
from_port = 10252
|
||||
to_port = 10252
|
||||
source_security_group_id = aws_security_group.worker.id
|
||||
type = "ingress"
|
||||
protocol = "tcp"
|
||||
from_port = 4240
|
||||
to_port = 4240
|
||||
self = true
|
||||
}
|
||||
|
||||
# IANA VXLAN default
|
||||
resource "aws_security_group_rule" "controller-vxlan" {
|
||||
count = var.networking == "flannel" ? 1 : 0
|
||||
|
||||
|
@ -111,6 +127,31 @@ resource "aws_security_group_rule" "controller-apiserver" {
|
|||
cidr_blocks = ["0.0.0.0/0"]
|
||||
}
|
||||
|
||||
# Linux VXLAN default
|
||||
resource "aws_security_group_rule" "controller-linux-vxlan" {
|
||||
count = var.networking == "cilium" ? 1 : 0
|
||||
|
||||
security_group_id = aws_security_group.controller.id
|
||||
|
||||
type = "ingress"
|
||||
protocol = "udp"
|
||||
from_port = 8472
|
||||
to_port = 8472
|
||||
source_security_group_id = aws_security_group.worker.id
|
||||
}
|
||||
|
||||
resource "aws_security_group_rule" "controller-linux-vxlan-self" {
|
||||
count = var.networking == "cilium" ? 1 : 0
|
||||
|
||||
security_group_id = aws_security_group.controller.id
|
||||
|
||||
type = "ingress"
|
||||
protocol = "udp"
|
||||
from_port = 8472
|
||||
to_port = 8472
|
||||
self = true
|
||||
}
|
||||
|
||||
# Allow Prometheus to scrape node-exporter daemonset
|
||||
resource "aws_security_group_rule" "controller-node-exporter" {
|
||||
security_group_id = aws_security_group.controller.id
|
||||
|
@ -122,6 +163,17 @@ resource "aws_security_group_rule" "controller-node-exporter" {
|
|||
source_security_group_id = aws_security_group.worker.id
|
||||
}
|
||||
|
||||
# Allow Prometheus to scrape kube-proxy
|
||||
resource "aws_security_group_rule" "kube-proxy-metrics" {
|
||||
security_group_id = aws_security_group.controller.id
|
||||
|
||||
type = "ingress"
|
||||
protocol = "tcp"
|
||||
from_port = 10249
|
||||
to_port = 10249
|
||||
source_security_group_id = aws_security_group.worker.id
|
||||
}
|
||||
|
||||
# Allow apiserver to access kubelets for exec, log, port-forward
|
||||
resource "aws_security_group_rule" "controller-kubelet" {
|
||||
security_group_id = aws_security_group.controller.id
|
||||
|
@ -143,6 +195,28 @@ resource "aws_security_group_rule" "controller-kubelet-self" {
|
|||
self = true
|
||||
}
|
||||
|
||||
# Allow Prometheus to scrape kube-scheduler
|
||||
resource "aws_security_group_rule" "controller-scheduler-metrics" {
|
||||
security_group_id = aws_security_group.controller.id
|
||||
|
||||
type = "ingress"
|
||||
protocol = "tcp"
|
||||
from_port = 10251
|
||||
to_port = 10251
|
||||
source_security_group_id = aws_security_group.worker.id
|
||||
}
|
||||
|
||||
# Allow Prometheus to scrape kube-controller-manager
|
||||
resource "aws_security_group_rule" "controller-manager-metrics" {
|
||||
security_group_id = aws_security_group.controller.id
|
||||
|
||||
type = "ingress"
|
||||
protocol = "tcp"
|
||||
from_port = 10252
|
||||
to_port = 10252
|
||||
source_security_group_id = aws_security_group.worker.id
|
||||
}
|
||||
|
||||
resource "aws_security_group_rule" "controller-bgp" {
|
||||
security_group_id = aws_security_group.controller.id
|
||||
|
||||
|
@ -227,6 +301,30 @@ resource "aws_security_group" "worker" {
|
|||
}
|
||||
}
|
||||
|
||||
resource "aws_security_group_rule" "worker-icmp" {
|
||||
count = var.networking == "cilium" ? 1 : 0
|
||||
|
||||
security_group_id = aws_security_group.worker.id
|
||||
|
||||
type = "ingress"
|
||||
protocol = "icmp"
|
||||
from_port = 8
|
||||
to_port = 0
|
||||
source_security_group_id = aws_security_group.controller.id
|
||||
}
|
||||
|
||||
resource "aws_security_group_rule" "worker-icmp-self" {
|
||||
count = var.networking == "cilium" ? 1 : 0
|
||||
|
||||
security_group_id = aws_security_group.worker.id
|
||||
|
||||
type = "ingress"
|
||||
protocol = "icmp"
|
||||
from_port = 8
|
||||
to_port = 0
|
||||
self = true
|
||||
}
|
||||
|
||||
resource "aws_security_group_rule" "worker-ssh" {
|
||||
security_group_id = aws_security_group.worker.id
|
||||
|
||||
|
@ -257,6 +355,31 @@ resource "aws_security_group_rule" "worker-https" {
|
|||
cidr_blocks = ["0.0.0.0/0"]
|
||||
}
|
||||
|
||||
resource "aws_security_group_rule" "worker-cilium-health" {
|
||||
count = var.networking == "cilium" ? 1 : 0
|
||||
|
||||
security_group_id = aws_security_group.worker.id
|
||||
|
||||
type = "ingress"
|
||||
protocol = "tcp"
|
||||
from_port = 4240
|
||||
to_port = 4240
|
||||
source_security_group_id = aws_security_group.controller.id
|
||||
}
|
||||
|
||||
resource "aws_security_group_rule" "worker-cilium-health-self" {
|
||||
count = var.networking == "cilium" ? 1 : 0
|
||||
|
||||
security_group_id = aws_security_group.worker.id
|
||||
|
||||
type = "ingress"
|
||||
protocol = "tcp"
|
||||
from_port = 4240
|
||||
to_port = 4240
|
||||
self = true
|
||||
}
|
||||
|
||||
# IANA VXLAN default
|
||||
resource "aws_security_group_rule" "worker-vxlan" {
|
||||
count = var.networking == "flannel" ? 1 : 0
|
||||
|
||||
|
@ -281,6 +404,31 @@ resource "aws_security_group_rule" "worker-vxlan-self" {
|
|||
self = true
|
||||
}
|
||||
|
||||
# Linux VXLAN default
|
||||
resource "aws_security_group_rule" "worker-linux-vxlan" {
|
||||
count = var.networking == "cilium" ? 1 : 0
|
||||
|
||||
security_group_id = aws_security_group.worker.id
|
||||
|
||||
type = "ingress"
|
||||
protocol = "udp"
|
||||
from_port = 8472
|
||||
to_port = 8472
|
||||
source_security_group_id = aws_security_group.controller.id
|
||||
}
|
||||
|
||||
resource "aws_security_group_rule" "worker-linux-vxlan-self" {
|
||||
count = var.networking == "cilium" ? 1 : 0
|
||||
|
||||
security_group_id = aws_security_group.worker.id
|
||||
|
||||
type = "ingress"
|
||||
protocol = "udp"
|
||||
from_port = 8472
|
||||
to_port = 8472
|
||||
self = true
|
||||
}
|
||||
|
||||
# Allow Prometheus to scrape node-exporter daemonset
|
||||
resource "aws_security_group_rule" "worker-node-exporter" {
|
||||
security_group_id = aws_security_group.worker.id
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
# Kubernetes assets (kubeconfig, manifests)
|
||||
module "bootstrap" {
|
||||
source = "git::https://github.com/poseidon/terraform-render-bootstrap.git?ref=e75697ce35d7773705f0b9b28ce1ffbe99f9493c"
|
||||
source = "git::https://github.com/poseidon/terraform-render-bootstrap.git?ref=af36c539360696f5ca6cf5b06bb729477a003602"
|
||||
|
||||
cluster_name = var.cluster_name
|
||||
api_servers = [format("%s.%s", var.cluster_name, var.dns_zone)]
|
||||
|
|
|
@ -176,6 +176,11 @@ storage:
|
|||
contents:
|
||||
inline: |
|
||||
fs.inotify.max_user_watches=16184
|
||||
- path: /etc/sysctl.d/reverse-path-filter.conf
|
||||
contents:
|
||||
inline: |
|
||||
net.ipv4.conf.default.rp_filter=0
|
||||
net.ipv4.conf.*.rp_filter=0
|
||||
- path: /etc/systemd/system.conf.d/accounting.conf
|
||||
contents:
|
||||
inline: |
|
||||
|
|
|
@ -13,6 +13,30 @@ resource "aws_security_group" "controller" {
|
|||
}
|
||||
}
|
||||
|
||||
resource "aws_security_group_rule" "controller-icmp" {
|
||||
count = var.networking == "cilium" ? 1 : 0
|
||||
|
||||
security_group_id = aws_security_group.controller.id
|
||||
|
||||
type = "ingress"
|
||||
protocol = "icmp"
|
||||
from_port = 8
|
||||
to_port = 0
|
||||
source_security_group_id = aws_security_group.worker.id
|
||||
}
|
||||
|
||||
resource "aws_security_group_rule" "controller-icmp-self" {
|
||||
count = var.networking == "cilium" ? 1 : 0
|
||||
|
||||
security_group_id = aws_security_group.controller.id
|
||||
|
||||
type = "ingress"
|
||||
protocol = "icmp"
|
||||
from_port = 8
|
||||
to_port = 0
|
||||
self = true
|
||||
}
|
||||
|
||||
resource "aws_security_group_rule" "controller-ssh" {
|
||||
security_group_id = aws_security_group.controller.id
|
||||
|
||||
|
@ -44,39 +68,31 @@ resource "aws_security_group_rule" "controller-etcd-metrics" {
|
|||
source_security_group_id = aws_security_group.worker.id
|
||||
}
|
||||
|
||||
# Allow Prometheus to scrape kube-proxy
|
||||
resource "aws_security_group_rule" "kube-proxy-metrics" {
|
||||
resource "aws_security_group_rule" "controller-cilium-health" {
|
||||
count = var.networking == "cilium" ? 1 : 0
|
||||
|
||||
security_group_id = aws_security_group.controller.id
|
||||
|
||||
type = "ingress"
|
||||
protocol = "tcp"
|
||||
from_port = 10249
|
||||
to_port = 10249
|
||||
from_port = 4240
|
||||
to_port = 4240
|
||||
source_security_group_id = aws_security_group.worker.id
|
||||
}
|
||||
|
||||
# Allow Prometheus to scrape kube-scheduler
|
||||
resource "aws_security_group_rule" "controller-scheduler-metrics" {
|
||||
resource "aws_security_group_rule" "controller-cilium-health-self" {
|
||||
count = var.networking == "cilium" ? 1 : 0
|
||||
|
||||
security_group_id = aws_security_group.controller.id
|
||||
|
||||
type = "ingress"
|
||||
protocol = "tcp"
|
||||
from_port = 10251
|
||||
to_port = 10251
|
||||
source_security_group_id = aws_security_group.worker.id
|
||||
}
|
||||
|
||||
# Allow Prometheus to scrape kube-controller-manager
|
||||
resource "aws_security_group_rule" "controller-manager-metrics" {
|
||||
security_group_id = aws_security_group.controller.id
|
||||
|
||||
type = "ingress"
|
||||
protocol = "tcp"
|
||||
from_port = 10252
|
||||
to_port = 10252
|
||||
source_security_group_id = aws_security_group.worker.id
|
||||
type = "ingress"
|
||||
protocol = "tcp"
|
||||
from_port = 4240
|
||||
to_port = 4240
|
||||
self = true
|
||||
}
|
||||
|
||||
# IANA VXLAN default
|
||||
resource "aws_security_group_rule" "controller-vxlan" {
|
||||
count = var.networking == "flannel" ? 1 : 0
|
||||
|
||||
|
@ -111,6 +127,31 @@ resource "aws_security_group_rule" "controller-apiserver" {
|
|||
cidr_blocks = ["0.0.0.0/0"]
|
||||
}
|
||||
|
||||
# Linux VXLAN default
|
||||
resource "aws_security_group_rule" "controller-linux-vxlan" {
|
||||
count = var.networking == "cilium" ? 1 : 0
|
||||
|
||||
security_group_id = aws_security_group.controller.id
|
||||
|
||||
type = "ingress"
|
||||
protocol = "udp"
|
||||
from_port = 8472
|
||||
to_port = 8472
|
||||
source_security_group_id = aws_security_group.worker.id
|
||||
}
|
||||
|
||||
resource "aws_security_group_rule" "controller-linux-vxlan-self" {
|
||||
count = var.networking == "cilium" ? 1 : 0
|
||||
|
||||
security_group_id = aws_security_group.controller.id
|
||||
|
||||
type = "ingress"
|
||||
protocol = "udp"
|
||||
from_port = 8472
|
||||
to_port = 8472
|
||||
self = true
|
||||
}
|
||||
|
||||
# Allow Prometheus to scrape node-exporter daemonset
|
||||
resource "aws_security_group_rule" "controller-node-exporter" {
|
||||
security_group_id = aws_security_group.controller.id
|
||||
|
@ -122,6 +163,17 @@ resource "aws_security_group_rule" "controller-node-exporter" {
|
|||
source_security_group_id = aws_security_group.worker.id
|
||||
}
|
||||
|
||||
# Allow Prometheus to scrape kube-proxy
|
||||
resource "aws_security_group_rule" "kube-proxy-metrics" {
|
||||
security_group_id = aws_security_group.controller.id
|
||||
|
||||
type = "ingress"
|
||||
protocol = "tcp"
|
||||
from_port = 10249
|
||||
to_port = 10249
|
||||
source_security_group_id = aws_security_group.worker.id
|
||||
}
|
||||
|
||||
# Allow apiserver to access kubelets for exec, log, port-forward
|
||||
resource "aws_security_group_rule" "controller-kubelet" {
|
||||
security_group_id = aws_security_group.controller.id
|
||||
|
@ -143,6 +195,28 @@ resource "aws_security_group_rule" "controller-kubelet-self" {
|
|||
self = true
|
||||
}
|
||||
|
||||
# Allow Prometheus to scrape kube-scheduler
|
||||
resource "aws_security_group_rule" "controller-scheduler-metrics" {
|
||||
security_group_id = aws_security_group.controller.id
|
||||
|
||||
type = "ingress"
|
||||
protocol = "tcp"
|
||||
from_port = 10251
|
||||
to_port = 10251
|
||||
source_security_group_id = aws_security_group.worker.id
|
||||
}
|
||||
|
||||
# Allow Prometheus to scrape kube-controller-manager
|
||||
resource "aws_security_group_rule" "controller-manager-metrics" {
|
||||
security_group_id = aws_security_group.controller.id
|
||||
|
||||
type = "ingress"
|
||||
protocol = "tcp"
|
||||
from_port = 10252
|
||||
to_port = 10252
|
||||
source_security_group_id = aws_security_group.worker.id
|
||||
}
|
||||
|
||||
resource "aws_security_group_rule" "controller-bgp" {
|
||||
security_group_id = aws_security_group.controller.id
|
||||
|
||||
|
@ -227,6 +301,30 @@ resource "aws_security_group" "worker" {
|
|||
}
|
||||
}
|
||||
|
||||
resource "aws_security_group_rule" "worker-icmp" {
|
||||
count = var.networking == "cilium" ? 1 : 0
|
||||
|
||||
security_group_id = aws_security_group.worker.id
|
||||
|
||||
type = "ingress"
|
||||
protocol = "icmp"
|
||||
from_port = 8
|
||||
to_port = 0
|
||||
source_security_group_id = aws_security_group.controller.id
|
||||
}
|
||||
|
||||
resource "aws_security_group_rule" "worker-icmp-self" {
|
||||
count = var.networking == "cilium" ? 1 : 0
|
||||
|
||||
security_group_id = aws_security_group.worker.id
|
||||
|
||||
type = "ingress"
|
||||
protocol = "icmp"
|
||||
from_port = 8
|
||||
to_port = 0
|
||||
self = true
|
||||
}
|
||||
|
||||
resource "aws_security_group_rule" "worker-ssh" {
|
||||
security_group_id = aws_security_group.worker.id
|
||||
|
||||
|
@ -257,6 +355,31 @@ resource "aws_security_group_rule" "worker-https" {
|
|||
cidr_blocks = ["0.0.0.0/0"]
|
||||
}
|
||||
|
||||
resource "aws_security_group_rule" "worker-cilium-health" {
|
||||
count = var.networking == "cilium" ? 1 : 0
|
||||
|
||||
security_group_id = aws_security_group.worker.id
|
||||
|
||||
type = "ingress"
|
||||
protocol = "tcp"
|
||||
from_port = 4240
|
||||
to_port = 4240
|
||||
source_security_group_id = aws_security_group.controller.id
|
||||
}
|
||||
|
||||
resource "aws_security_group_rule" "worker-cilium-health-self" {
|
||||
count = var.networking == "cilium" ? 1 : 0
|
||||
|
||||
security_group_id = aws_security_group.worker.id
|
||||
|
||||
type = "ingress"
|
||||
protocol = "tcp"
|
||||
from_port = 4240
|
||||
to_port = 4240
|
||||
self = true
|
||||
}
|
||||
|
||||
# IANA VXLAN default
|
||||
resource "aws_security_group_rule" "worker-vxlan" {
|
||||
count = var.networking == "flannel" ? 1 : 0
|
||||
|
||||
|
@ -281,6 +404,31 @@ resource "aws_security_group_rule" "worker-vxlan-self" {
|
|||
self = true
|
||||
}
|
||||
|
||||
# Linux VXLAN default
|
||||
resource "aws_security_group_rule" "worker-linux-vxlan" {
|
||||
count = var.networking == "cilium" ? 1 : 0
|
||||
|
||||
security_group_id = aws_security_group.worker.id
|
||||
|
||||
type = "ingress"
|
||||
protocol = "udp"
|
||||
from_port = 8472
|
||||
to_port = 8472
|
||||
source_security_group_id = aws_security_group.controller.id
|
||||
}
|
||||
|
||||
resource "aws_security_group_rule" "worker-linux-vxlan-self" {
|
||||
count = var.networking == "cilium" ? 1 : 0
|
||||
|
||||
security_group_id = aws_security_group.worker.id
|
||||
|
||||
type = "ingress"
|
||||
protocol = "udp"
|
||||
from_port = 8472
|
||||
to_port = 8472
|
||||
self = true
|
||||
}
|
||||
|
||||
# Allow Prometheus to scrape node-exporter daemonset
|
||||
resource "aws_security_group_rule" "worker-node-exporter" {
|
||||
security_group_id = aws_security_group.worker.id
|
||||
|
|
|
@ -105,6 +105,11 @@ storage:
|
|||
contents:
|
||||
inline: |
|
||||
fs.inotify.max_user_watches=16184
|
||||
- path: /etc/sysctl.d/reverse-path-filter.conf
|
||||
contents:
|
||||
inline: |
|
||||
net.ipv4.conf.default.rp_filter=0
|
||||
net.ipv4.conf.*.rp_filter=0
|
||||
- path: /etc/systemd/system.conf.d/accounting.conf
|
||||
contents:
|
||||
inline: |
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
# Kubernetes assets (kubeconfig, manifests)
|
||||
module "bootstrap" {
|
||||
source = "git::https://github.com/poseidon/terraform-render-bootstrap.git?ref=e75697ce35d7773705f0b9b28ce1ffbe99f9493c"
|
||||
source = "git::https://github.com/poseidon/terraform-render-bootstrap.git?ref=af36c539360696f5ca6cf5b06bb729477a003602"
|
||||
|
||||
cluster_name = var.cluster_name
|
||||
api_servers = [format("%s.%s", var.cluster_name, var.dns_zone)]
|
||||
|
|
|
@ -7,6 +7,21 @@ resource "azurerm_network_security_group" "controller" {
|
|||
location = azurerm_resource_group.cluster.location
|
||||
}
|
||||
|
||||
resource "azurerm_network_security_rule" "controller-icmp" {
|
||||
resource_group_name = azurerm_resource_group.cluster.name
|
||||
|
||||
name = "allow-icmp"
|
||||
network_security_group_name = azurerm_network_security_group.controller.name
|
||||
priority = "1995"
|
||||
access = "Allow"
|
||||
direction = "Inbound"
|
||||
protocol = "Icmp"
|
||||
source_port_range = "*"
|
||||
destination_port_range = "*"
|
||||
source_address_prefixes = [azurerm_subnet.controller.address_prefix, azurerm_subnet.worker.address_prefix]
|
||||
destination_address_prefix = azurerm_subnet.controller.address_prefix
|
||||
}
|
||||
|
||||
resource "azurerm_network_security_rule" "controller-ssh" {
|
||||
resource_group_name = azurerm_resource_group.cluster.name
|
||||
|
||||
|
@ -100,6 +115,22 @@ resource "azurerm_network_security_rule" "controller-apiserver" {
|
|||
destination_address_prefix = azurerm_subnet.controller.address_prefix
|
||||
}
|
||||
|
||||
resource "azurerm_network_security_rule" "controller-cilium-health" {
|
||||
resource_group_name = azurerm_resource_group.cluster.name
|
||||
count = var.networking == "cilium" ? 1 : 0
|
||||
|
||||
name = "allow-cilium-health"
|
||||
network_security_group_name = azurerm_network_security_group.controller.name
|
||||
priority = "2019"
|
||||
access = "Allow"
|
||||
direction = "Inbound"
|
||||
protocol = "Tcp"
|
||||
source_port_range = "*"
|
||||
destination_port_range = "4240"
|
||||
source_address_prefixes = [azurerm_subnet.controller.address_prefix, azurerm_subnet.worker.address_prefix]
|
||||
destination_address_prefix = azurerm_subnet.controller.address_prefix
|
||||
}
|
||||
|
||||
resource "azurerm_network_security_rule" "controller-vxlan" {
|
||||
resource_group_name = azurerm_resource_group.cluster.name
|
||||
|
||||
|
@ -115,6 +146,21 @@ resource "azurerm_network_security_rule" "controller-vxlan" {
|
|||
destination_address_prefix = azurerm_subnet.controller.address_prefix
|
||||
}
|
||||
|
||||
resource "azurerm_network_security_rule" "controller-linux-vxlan" {
|
||||
resource_group_name = azurerm_resource_group.cluster.name
|
||||
|
||||
name = "allow-linux-vxlan"
|
||||
network_security_group_name = azurerm_network_security_group.controller.name
|
||||
priority = "2021"
|
||||
access = "Allow"
|
||||
direction = "Inbound"
|
||||
protocol = "Udp"
|
||||
source_port_range = "*"
|
||||
destination_port_range = "8472"
|
||||
source_address_prefixes = [azurerm_subnet.controller.address_prefix, azurerm_subnet.worker.address_prefix]
|
||||
destination_address_prefix = azurerm_subnet.controller.address_prefix
|
||||
}
|
||||
|
||||
# Allow Prometheus to scrape node-exporter daemonset
|
||||
resource "azurerm_network_security_rule" "controller-node-exporter" {
|
||||
resource_group_name = azurerm_resource_group.cluster.name
|
||||
|
@ -191,6 +237,21 @@ resource "azurerm_network_security_group" "worker" {
|
|||
location = azurerm_resource_group.cluster.location
|
||||
}
|
||||
|
||||
resource "azurerm_network_security_rule" "worker-icmp" {
|
||||
resource_group_name = azurerm_resource_group.cluster.name
|
||||
|
||||
name = "allow-icmp"
|
||||
network_security_group_name = azurerm_network_security_group.worker.name
|
||||
priority = "1995"
|
||||
access = "Allow"
|
||||
direction = "Inbound"
|
||||
protocol = "Icmp"
|
||||
source_port_range = "*"
|
||||
destination_port_range = "*"
|
||||
source_address_prefixes = [azurerm_subnet.controller.address_prefix, azurerm_subnet.worker.address_prefix]
|
||||
destination_address_prefix = azurerm_subnet.worker.address_prefix
|
||||
}
|
||||
|
||||
resource "azurerm_network_security_rule" "worker-ssh" {
|
||||
resource_group_name = azurerm_resource_group.cluster.name
|
||||
|
||||
|
@ -236,6 +297,22 @@ resource "azurerm_network_security_rule" "worker-https" {
|
|||
destination_address_prefix = azurerm_subnet.worker.address_prefix
|
||||
}
|
||||
|
||||
resource "azurerm_network_security_rule" "worker-cilium-health" {
|
||||
resource_group_name = azurerm_resource_group.cluster.name
|
||||
count = var.networking == "cilium" ? 1 : 0
|
||||
|
||||
name = "allow-cilium-health"
|
||||
network_security_group_name = azurerm_network_security_group.worker.name
|
||||
priority = "2014"
|
||||
access = "Allow"
|
||||
direction = "Inbound"
|
||||
protocol = "Tcp"
|
||||
source_port_range = "*"
|
||||
destination_port_range = "4240"
|
||||
source_address_prefixes = [azurerm_subnet.controller.address_prefix, azurerm_subnet.worker.address_prefix]
|
||||
destination_address_prefix = azurerm_subnet.worker.address_prefix
|
||||
}
|
||||
|
||||
resource "azurerm_network_security_rule" "worker-vxlan" {
|
||||
resource_group_name = azurerm_resource_group.cluster.name
|
||||
|
||||
|
@ -251,6 +328,21 @@ resource "azurerm_network_security_rule" "worker-vxlan" {
|
|||
destination_address_prefix = azurerm_subnet.worker.address_prefix
|
||||
}
|
||||
|
||||
resource "azurerm_network_security_rule" "worker-linux-vxlan" {
|
||||
resource_group_name = azurerm_resource_group.cluster.name
|
||||
|
||||
name = "allow-linux-vxlan"
|
||||
network_security_group_name = azurerm_network_security_group.worker.name
|
||||
priority = "2016"
|
||||
access = "Allow"
|
||||
direction = "Inbound"
|
||||
protocol = "Udp"
|
||||
source_port_range = "*"
|
||||
destination_port_range = "8472"
|
||||
source_address_prefixes = [azurerm_subnet.controller.address_prefix, azurerm_subnet.worker.address_prefix]
|
||||
destination_address_prefix = azurerm_subnet.worker.address_prefix
|
||||
}
|
||||
|
||||
# Allow Prometheus to scrape node-exporter daemonset
|
||||
resource "azurerm_network_security_rule" "worker-node-exporter" {
|
||||
resource_group_name = azurerm_resource_group.cluster.name
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
# Kubernetes assets (kubeconfig, manifests)
|
||||
module "bootstrap" {
|
||||
source = "git::https://github.com/poseidon/terraform-render-bootstrap.git?ref=e75697ce35d7773705f0b9b28ce1ffbe99f9493c"
|
||||
source = "git::https://github.com/poseidon/terraform-render-bootstrap.git?ref=af36c539360696f5ca6cf5b06bb729477a003602"
|
||||
|
||||
cluster_name = var.cluster_name
|
||||
api_servers = [format("%s.%s", var.cluster_name, var.dns_zone)]
|
||||
|
|
|
@ -175,6 +175,11 @@ storage:
|
|||
contents:
|
||||
inline: |
|
||||
fs.inotify.max_user_watches=16184
|
||||
- path: /etc/sysctl.d/reverse-path-filter.conf
|
||||
contents:
|
||||
inline: |
|
||||
net.ipv4.conf.default.rp_filter=0
|
||||
net.ipv4.conf.*.rp_filter=0
|
||||
- path: /etc/systemd/system.conf.d/accounting.conf
|
||||
contents:
|
||||
inline: |
|
||||
|
|
|
@ -7,6 +7,21 @@ resource "azurerm_network_security_group" "controller" {
|
|||
location = azurerm_resource_group.cluster.location
|
||||
}
|
||||
|
||||
resource "azurerm_network_security_rule" "controller-icmp" {
|
||||
resource_group_name = azurerm_resource_group.cluster.name
|
||||
|
||||
name = "allow-icmp"
|
||||
network_security_group_name = azurerm_network_security_group.controller.name
|
||||
priority = "1995"
|
||||
access = "Allow"
|
||||
direction = "Inbound"
|
||||
protocol = "Icmp"
|
||||
source_port_range = "*"
|
||||
destination_port_range = "*"
|
||||
source_address_prefixes = [azurerm_subnet.controller.address_prefix, azurerm_subnet.worker.address_prefix]
|
||||
destination_address_prefix = azurerm_subnet.controller.address_prefix
|
||||
}
|
||||
|
||||
resource "azurerm_network_security_rule" "controller-ssh" {
|
||||
resource_group_name = azurerm_resource_group.cluster.name
|
||||
|
||||
|
@ -100,6 +115,22 @@ resource "azurerm_network_security_rule" "controller-apiserver" {
|
|||
destination_address_prefix = azurerm_subnet.controller.address_prefix
|
||||
}
|
||||
|
||||
resource "azurerm_network_security_rule" "controller-cilium-health" {
|
||||
resource_group_name = azurerm_resource_group.cluster.name
|
||||
count = var.networking == "cilium" ? 1 : 0
|
||||
|
||||
name = "allow-cilium-health"
|
||||
network_security_group_name = azurerm_network_security_group.controller.name
|
||||
priority = "2019"
|
||||
access = "Allow"
|
||||
direction = "Inbound"
|
||||
protocol = "Tcp"
|
||||
source_port_range = "*"
|
||||
destination_port_range = "4240"
|
||||
source_address_prefixes = [azurerm_subnet.controller.address_prefix, azurerm_subnet.worker.address_prefix]
|
||||
destination_address_prefix = azurerm_subnet.controller.address_prefix
|
||||
}
|
||||
|
||||
resource "azurerm_network_security_rule" "controller-vxlan" {
|
||||
resource_group_name = azurerm_resource_group.cluster.name
|
||||
|
||||
|
@ -115,6 +146,21 @@ resource "azurerm_network_security_rule" "controller-vxlan" {
|
|||
destination_address_prefix = azurerm_subnet.controller.address_prefix
|
||||
}
|
||||
|
||||
resource "azurerm_network_security_rule" "controller-linux-vxlan" {
|
||||
resource_group_name = azurerm_resource_group.cluster.name
|
||||
|
||||
name = "allow-linux-vxlan"
|
||||
network_security_group_name = azurerm_network_security_group.controller.name
|
||||
priority = "2021"
|
||||
access = "Allow"
|
||||
direction = "Inbound"
|
||||
protocol = "Udp"
|
||||
source_port_range = "*"
|
||||
destination_port_range = "8472"
|
||||
source_address_prefixes = [azurerm_subnet.controller.address_prefix, azurerm_subnet.worker.address_prefix]
|
||||
destination_address_prefix = azurerm_subnet.controller.address_prefix
|
||||
}
|
||||
|
||||
# Allow Prometheus to scrape node-exporter daemonset
|
||||
resource "azurerm_network_security_rule" "controller-node-exporter" {
|
||||
resource_group_name = azurerm_resource_group.cluster.name
|
||||
|
@ -191,6 +237,21 @@ resource "azurerm_network_security_group" "worker" {
|
|||
location = azurerm_resource_group.cluster.location
|
||||
}
|
||||
|
||||
resource "azurerm_network_security_rule" "worker-icmp" {
|
||||
resource_group_name = azurerm_resource_group.cluster.name
|
||||
|
||||
name = "allow-icmp"
|
||||
network_security_group_name = azurerm_network_security_group.worker.name
|
||||
priority = "1995"
|
||||
access = "Allow"
|
||||
direction = "Inbound"
|
||||
protocol = "Icmp"
|
||||
source_port_range = "*"
|
||||
destination_port_range = "*"
|
||||
source_address_prefixes = [azurerm_subnet.controller.address_prefix, azurerm_subnet.worker.address_prefix]
|
||||
destination_address_prefix = azurerm_subnet.worker.address_prefix
|
||||
}
|
||||
|
||||
resource "azurerm_network_security_rule" "worker-ssh" {
|
||||
resource_group_name = azurerm_resource_group.cluster.name
|
||||
|
||||
|
@ -236,6 +297,22 @@ resource "azurerm_network_security_rule" "worker-https" {
|
|||
destination_address_prefix = azurerm_subnet.worker.address_prefix
|
||||
}
|
||||
|
||||
resource "azurerm_network_security_rule" "worker-cilium-health" {
|
||||
resource_group_name = azurerm_resource_group.cluster.name
|
||||
count = var.networking == "cilium" ? 1 : 0
|
||||
|
||||
name = "allow-cilium-health"
|
||||
network_security_group_name = azurerm_network_security_group.worker.name
|
||||
priority = "2014"
|
||||
access = "Allow"
|
||||
direction = "Inbound"
|
||||
protocol = "Tcp"
|
||||
source_port_range = "*"
|
||||
destination_port_range = "4240"
|
||||
source_address_prefixes = [azurerm_subnet.controller.address_prefix, azurerm_subnet.worker.address_prefix]
|
||||
destination_address_prefix = azurerm_subnet.worker.address_prefix
|
||||
}
|
||||
|
||||
resource "azurerm_network_security_rule" "worker-vxlan" {
|
||||
resource_group_name = azurerm_resource_group.cluster.name
|
||||
|
||||
|
@ -251,6 +328,21 @@ resource "azurerm_network_security_rule" "worker-vxlan" {
|
|||
destination_address_prefix = azurerm_subnet.worker.address_prefix
|
||||
}
|
||||
|
||||
resource "azurerm_network_security_rule" "worker-linux-vxlan" {
|
||||
resource_group_name = azurerm_resource_group.cluster.name
|
||||
|
||||
name = "allow-linux-vxlan"
|
||||
network_security_group_name = azurerm_network_security_group.worker.name
|
||||
priority = "2016"
|
||||
access = "Allow"
|
||||
direction = "Inbound"
|
||||
protocol = "Udp"
|
||||
source_port_range = "*"
|
||||
destination_port_range = "8472"
|
||||
source_address_prefixes = [azurerm_subnet.controller.address_prefix, azurerm_subnet.worker.address_prefix]
|
||||
destination_address_prefix = azurerm_subnet.worker.address_prefix
|
||||
}
|
||||
|
||||
# Allow Prometheus to scrape node-exporter daemonset
|
||||
resource "azurerm_network_security_rule" "worker-node-exporter" {
|
||||
resource_group_name = azurerm_resource_group.cluster.name
|
||||
|
|
|
@ -104,6 +104,11 @@ storage:
|
|||
contents:
|
||||
inline: |
|
||||
fs.inotify.max_user_watches=16184
|
||||
- path: /etc/sysctl.d/reverse-path-filter.conf
|
||||
contents:
|
||||
inline: |
|
||||
net.ipv4.conf.default.rp_filter=0
|
||||
net.ipv4.conf.*.rp_filter=0
|
||||
- path: /etc/systemd/system.conf.d/accounting.conf
|
||||
contents:
|
||||
inline: |
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
# Kubernetes assets (kubeconfig, manifests)
|
||||
module "bootstrap" {
|
||||
source = "git::https://github.com/poseidon/terraform-render-bootstrap.git?ref=e75697ce35d7773705f0b9b28ce1ffbe99f9493c"
|
||||
source = "git::https://github.com/poseidon/terraform-render-bootstrap.git?ref=af36c539360696f5ca6cf5b06bb729477a003602"
|
||||
|
||||
cluster_name = var.cluster_name
|
||||
api_servers = [var.k8s_domain_name]
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
# Kubernetes assets (kubeconfig, manifests)
|
||||
module "bootstrap" {
|
||||
source = "git::https://github.com/poseidon/terraform-render-bootstrap.git?ref=e75697ce35d7773705f0b9b28ce1ffbe99f9493c"
|
||||
source = "git::https://github.com/poseidon/terraform-render-bootstrap.git?ref=af36c539360696f5ca6cf5b06bb729477a003602"
|
||||
|
||||
cluster_name = var.cluster_name
|
||||
api_servers = [var.k8s_domain_name]
|
||||
|
|
|
@ -186,6 +186,11 @@ storage:
|
|||
contents:
|
||||
inline: |
|
||||
fs.inotify.max_user_watches=16184
|
||||
- path: /etc/sysctl.d/reverse-path-filter.conf
|
||||
contents:
|
||||
inline: |
|
||||
net.ipv4.conf.default.rp_filter=0
|
||||
net.ipv4.conf.*.rp_filter=0
|
||||
- path: /etc/systemd/system.conf.d/accounting.conf
|
||||
contents:
|
||||
inline: |
|
||||
|
|
|
@ -106,6 +106,11 @@ storage:
|
|||
contents:
|
||||
inline: |
|
||||
fs.inotify.max_user_watches=16184
|
||||
- path: /etc/sysctl.d/reverse-path-filter.conf
|
||||
contents:
|
||||
inline: |
|
||||
net.ipv4.conf.default.rp_filter=0
|
||||
net.ipv4.conf.*.rp_filter=0
|
||||
- path: /etc/systemd/system.conf.d/accounting.conf
|
||||
contents:
|
||||
inline: |
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
# Kubernetes assets (kubeconfig, manifests)
|
||||
module "bootstrap" {
|
||||
source = "git::https://github.com/poseidon/terraform-render-bootstrap.git?ref=e75697ce35d7773705f0b9b28ce1ffbe99f9493c"
|
||||
source = "git::https://github.com/poseidon/terraform-render-bootstrap.git?ref=af36c539360696f5ca6cf5b06bb729477a003602"
|
||||
|
||||
cluster_name = var.cluster_name
|
||||
api_servers = [format("%s.%s", var.cluster_name, var.dns_zone)]
|
||||
|
|
|
@ -6,6 +6,11 @@ resource "digitalocean_firewall" "rules" {
|
|||
digitalocean_tag.workers.name
|
||||
]
|
||||
|
||||
inbound_rule {
|
||||
protocol = "icmp"
|
||||
source_tags = [digitalocean_tag.controllers.name, digitalocean_tag.workers.name]
|
||||
}
|
||||
|
||||
# allow ssh, internal flannel, internal node-exporter, internal kubelet
|
||||
inbound_rule {
|
||||
protocol = "tcp"
|
||||
|
@ -13,12 +18,27 @@ resource "digitalocean_firewall" "rules" {
|
|||
source_addresses = ["0.0.0.0/0", "::/0"]
|
||||
}
|
||||
|
||||
# Cilium health
|
||||
inbound_rule {
|
||||
protocol = "tcp"
|
||||
port_range = "4240"
|
||||
source_tags = [digitalocean_tag.controllers.name, digitalocean_tag.workers.name]
|
||||
}
|
||||
|
||||
# IANA vxlan (flannel, calico)
|
||||
inbound_rule {
|
||||
protocol = "udp"
|
||||
port_range = "4789"
|
||||
source_tags = [digitalocean_tag.controllers.name, digitalocean_tag.workers.name]
|
||||
}
|
||||
|
||||
# Linux vxlan (Cilium)
|
||||
inbound_rule {
|
||||
protocol = "udp"
|
||||
port_range = "8472"
|
||||
source_tags = [digitalocean_tag.controllers.name, digitalocean_tag.workers.name]
|
||||
}
|
||||
|
||||
# Allow Prometheus to scrape node-exporter
|
||||
inbound_rule {
|
||||
protocol = "tcp"
|
||||
|
@ -33,6 +53,7 @@ resource "digitalocean_firewall" "rules" {
|
|||
source_tags = [digitalocean_tag.workers.name]
|
||||
}
|
||||
|
||||
# Kubelet
|
||||
inbound_rule {
|
||||
protocol = "tcp"
|
||||
port_range = "10250"
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
# Kubernetes assets (kubeconfig, manifests)
|
||||
module "bootstrap" {
|
||||
source = "git::https://github.com/poseidon/terraform-render-bootstrap.git?ref=e75697ce35d7773705f0b9b28ce1ffbe99f9493c"
|
||||
source = "git::https://github.com/poseidon/terraform-render-bootstrap.git?ref=af36c539360696f5ca6cf5b06bb729477a003602"
|
||||
|
||||
cluster_name = var.cluster_name
|
||||
api_servers = [format("%s.%s", var.cluster_name, var.dns_zone)]
|
||||
|
|
|
@ -182,6 +182,11 @@ storage:
|
|||
contents:
|
||||
inline: |
|
||||
fs.inotify.max_user_watches=16184
|
||||
- path: /etc/sysctl.d/reverse-path-filter.conf
|
||||
contents:
|
||||
inline: |
|
||||
net.ipv4.conf.default.rp_filter=0
|
||||
net.ipv4.conf.*.rp_filter=0
|
||||
- path: /etc/systemd/system.conf.d/accounting.conf
|
||||
contents:
|
||||
inline: |
|
||||
|
|
|
@ -109,6 +109,11 @@ storage:
|
|||
contents:
|
||||
inline: |
|
||||
fs.inotify.max_user_watches=16184
|
||||
- path: /etc/sysctl.d/reverse-path-filter.conf
|
||||
contents:
|
||||
inline: |
|
||||
net.ipv4.conf.default.rp_filter=0
|
||||
net.ipv4.conf.*.rp_filter=0
|
||||
- path: /etc/systemd/system.conf.d/accounting.conf
|
||||
contents:
|
||||
inline: |
|
||||
|
|
|
@ -6,6 +6,11 @@ resource "digitalocean_firewall" "rules" {
|
|||
digitalocean_tag.workers.name
|
||||
]
|
||||
|
||||
inbound_rule {
|
||||
protocol = "icmp"
|
||||
source_tags = [digitalocean_tag.controllers.name, digitalocean_tag.workers.name]
|
||||
}
|
||||
|
||||
# allow ssh, internal flannel, internal node-exporter, internal kubelet
|
||||
inbound_rule {
|
||||
protocol = "tcp"
|
||||
|
@ -13,12 +18,27 @@ resource "digitalocean_firewall" "rules" {
|
|||
source_addresses = ["0.0.0.0/0", "::/0"]
|
||||
}
|
||||
|
||||
# Cilium health
|
||||
inbound_rule {
|
||||
protocol = "tcp"
|
||||
port_range = "4240"
|
||||
source_tags = [digitalocean_tag.controllers.name, digitalocean_tag.workers.name]
|
||||
}
|
||||
|
||||
# IANA vxlan (flannel, calico)
|
||||
inbound_rule {
|
||||
protocol = "udp"
|
||||
port_range = "4789"
|
||||
source_tags = [digitalocean_tag.controllers.name, digitalocean_tag.workers.name]
|
||||
}
|
||||
|
||||
# Linux vxlan (Cilium)
|
||||
inbound_rule {
|
||||
protocol = "udp"
|
||||
port_range = "8472"
|
||||
source_tags = [digitalocean_tag.controllers.name, digitalocean_tag.workers.name]
|
||||
}
|
||||
|
||||
# Allow Prometheus to scrape node-exporter
|
||||
inbound_rule {
|
||||
protocol = "tcp"
|
||||
|
@ -33,6 +53,7 @@ resource "digitalocean_firewall" "rules" {
|
|||
source_tags = [digitalocean_tag.workers.name]
|
||||
}
|
||||
|
||||
# Kubelet
|
||||
inbound_rule {
|
||||
protocol = "tcp"
|
||||
port_range = "10250"
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
# Kubernetes assets (kubeconfig, manifests)
|
||||
module "bootstrap" {
|
||||
source = "git::https://github.com/poseidon/terraform-render-bootstrap.git?ref=e75697ce35d7773705f0b9b28ce1ffbe99f9493c"
|
||||
source = "git::https://github.com/poseidon/terraform-render-bootstrap.git?ref=af36c539360696f5ca6cf5b06bb729477a003602"
|
||||
|
||||
cluster_name = var.cluster_name
|
||||
api_servers = [format("%s.%s", var.cluster_name, var.dns_zone)]
|
||||
|
|
|
@ -112,6 +112,32 @@ resource "google_compute_firewall" "internal-vxlan" {
|
|||
target_tags = ["${var.cluster_name}-controller", "${var.cluster_name}-worker"]
|
||||
}
|
||||
|
||||
# Cilium VXLAN
|
||||
resource "google_compute_firewall" "internal-linux-vxlan" {
|
||||
count = var.networking == "cilium" ? 1 : 0
|
||||
|
||||
name = "${var.cluster_name}-linux-vxlan"
|
||||
network = google_compute_network.network.name
|
||||
|
||||
allow {
|
||||
protocol = "udp"
|
||||
ports = [8472]
|
||||
}
|
||||
|
||||
# Cilium health
|
||||
allow {
|
||||
protocol = "icmp"
|
||||
}
|
||||
|
||||
allow {
|
||||
protocol = "tcp"
|
||||
ports = [4240]
|
||||
}
|
||||
|
||||
source_tags = ["${var.cluster_name}-controller", "${var.cluster_name}-worker"]
|
||||
target_tags = ["${var.cluster_name}-controller", "${var.cluster_name}-worker"]
|
||||
}
|
||||
|
||||
# Allow Prometheus to scrape node-exporter daemonset
|
||||
resource "google_compute_firewall" "internal-node-exporter" {
|
||||
name = "${var.cluster_name}-internal-node-exporter"
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
# Kubernetes assets (kubeconfig, manifests)
|
||||
module "bootstrap" {
|
||||
source = "git::https://github.com/poseidon/terraform-render-bootstrap.git?ref=e75697ce35d7773705f0b9b28ce1ffbe99f9493c"
|
||||
source = "git::https://github.com/poseidon/terraform-render-bootstrap.git?ref=af36c539360696f5ca6cf5b06bb729477a003602"
|
||||
|
||||
cluster_name = var.cluster_name
|
||||
api_servers = [format("%s.%s", var.cluster_name, var.dns_zone)]
|
||||
|
|
|
@ -175,6 +175,11 @@ storage:
|
|||
contents:
|
||||
inline: |
|
||||
fs.inotify.max_user_watches=16184
|
||||
- path: /etc/sysctl.d/reverse-path-filter.conf
|
||||
contents:
|
||||
inline: |
|
||||
net.ipv4.conf.default.rp_filter=0
|
||||
net.ipv4.conf.*.rp_filter=0
|
||||
- path: /etc/systemd/system.conf.d/accounting.conf
|
||||
contents:
|
||||
inline: |
|
||||
|
|
|
@ -112,6 +112,32 @@ resource "google_compute_firewall" "internal-vxlan" {
|
|||
target_tags = ["${var.cluster_name}-controller", "${var.cluster_name}-worker"]
|
||||
}
|
||||
|
||||
# Cilium VXLAN
|
||||
resource "google_compute_firewall" "internal-linux-vxlan" {
|
||||
count = var.networking == "cilium" ? 1 : 0
|
||||
|
||||
name = "${var.cluster_name}-linux-vxlan"
|
||||
network = google_compute_network.network.name
|
||||
|
||||
allow {
|
||||
protocol = "udp"
|
||||
ports = [8472]
|
||||
}
|
||||
|
||||
# Cilium health
|
||||
allow {
|
||||
protocol = "icmp"
|
||||
}
|
||||
|
||||
allow {
|
||||
protocol = "tcp"
|
||||
ports = [4240]
|
||||
}
|
||||
|
||||
source_tags = ["${var.cluster_name}-controller", "${var.cluster_name}-worker"]
|
||||
target_tags = ["${var.cluster_name}-controller", "${var.cluster_name}-worker"]
|
||||
}
|
||||
|
||||
# Allow Prometheus to scrape node-exporter daemonset
|
||||
resource "google_compute_firewall" "internal-node-exporter" {
|
||||
name = "${var.cluster_name}-internal-node-exporter"
|
||||
|
|
|
@ -104,6 +104,11 @@ storage:
|
|||
contents:
|
||||
inline: |
|
||||
fs.inotify.max_user_watches=16184
|
||||
- path: /etc/sysctl.d/reverse-path-filter.conf
|
||||
contents:
|
||||
inline: |
|
||||
net.ipv4.conf.default.rp_filter=0
|
||||
net.ipv4.conf.*.rp_filter=0
|
||||
- path: /etc/systemd/system.conf.d/accounting.conf
|
||||
contents:
|
||||
inline: |
|
||||
|
|
Loading…
Reference in New Issue