mirror of
https://github.com/puppetmaster/typhoon.git
synced 2025-08-03 00:51:35 +02:00
Compare commits
15 Commits
Author | SHA1 | Date | |
---|---|---|---|
855aec5af3 | |||
0c4d59db87 | |||
2eaf04c68b | |||
0227014fa0 | |||
fb6f40051f | |||
316f06df06 | |||
f4d3059b00 | |||
6c5a1964aa | |||
6e64634748 | |||
d5de41e07a | |||
05b99178ae | |||
ed0b781296 | |||
51906bf398 | |||
18dd7ccc09 | |||
0764bd30b5 |
50
CHANGES.md
50
CHANGES.md
@ -4,6 +4,56 @@ Notable changes between versions.
|
||||
|
||||
## Latest
|
||||
|
||||
## v1.10.5
|
||||
|
||||
* Kubernetes [v1.10.5](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG-1.10.md#v1105)
|
||||
* Update etcd from v3.3.6 to v3.3.8 ([#243](https://github.com/poseidon/typhoon/pull/243), [#247](https://github.com/poseidon/typhoon/pull/247))
|
||||
|
||||
#### AWS
|
||||
|
||||
* Switch `kube-apiserver` port from 443 to 6443 ([#248](https://github.com/poseidon/typhoon/pull/248))
|
||||
* Combine apiserver and ingress NLBs ([#249](https://github.com/poseidon/typhoon/pull/249))
|
||||
* Reduce cost by ~$18/month per cluster. Typhoon AWS clusters now use one network load balancer.
|
||||
* Ingress addon users may keep using CNAME records to the `ingress_dns_name` module output (few million RPS)
|
||||
* Ingress users with heavy traffic (many million RPS) should create a separate NLB(s)
|
||||
* Worker pools no longer include an extraneous load balancer. Remove worker module's `ingress_dns_name` output
|
||||
* Disable detailed (paid) monitoring on worker nodes ([#251](https://github.com/poseidon/typhoon/pull/251))
|
||||
* Favor Prometheus for cloud-agnostic metrics, aggregation, and alerting
|
||||
* Add `worker_target_group_http` and `worker_target_group_https` module outputs to allow custom load balancing
|
||||
* Add `target_group_http` and `target_group_https` worker module outputs to allow custom load balancing
|
||||
|
||||
#### Bare-Metal
|
||||
|
||||
* Switch `kube-apiserver` port from 443 to 6443 ([#248](https://github.com/poseidon/typhoon/pull/248))
|
||||
* Users who exposed kube-apiserver on a WAN via their router/load-balancer will need to adjust its configuration (e.g. DNAT 6443). Most apiservers are on a LAN (internal, VPN-only, etc) so if you didn't specially configure network gear for 443, no change is needed. (possible action required)
|
||||
* Fix possible deadlock when provisioning clusters larger than 10 nodes ([#244](https://github.com/poseidon/typhoon/pull/244))
|
||||
|
||||
#### DigitalOcean
|
||||
|
||||
* Switch `kube-apiserver` port from 443 to 6443 ([#248](https://github.com/poseidon/typhoon/pull/248))
|
||||
* Update firewall rules and generated kubeconfig's
|
||||
|
||||
#### Google Cloud
|
||||
|
||||
* Use global HTTP and TCP proxy load balancing for Kubernetes Ingress ([#252](https://github.com/poseidon/typhoon/pull/252))
|
||||
* Switch Ingress from regional network load balancers to global HTTP/TCP Proxy load balancing
|
||||
* Reduce cost by ~$19/month per cluster. Google bills the first 5 global and regional forwarding rules separately. Typhoon clusters now use 3 global and 0 regional forwarding rules.
|
||||
* Worker pools no longer include an extraneous load balancer. Remove worker module's `ingress_static_ip` output
|
||||
* Allow using nginx-ingress addon on Fedora Atomic clusters ([#200](https://github.com/poseidon/typhoon/issues/200))
|
||||
* Add `worker_instance_group` module output to allow custom global load balancing
|
||||
* Add `instance_group` worker module output to allow custom global load balancing
|
||||
* Deprecate `ingress_static_ip` module output. Add `ingress_static_ipv4` module output instead.
|
||||
* Deprecate `controllers_ipv4_public` module output
|
||||
|
||||
#### Addons
|
||||
|
||||
* Update CLUO from v0.6.0 to v0.7.0 ([#242](https://github.com/poseidon/typhoon/pull/242))
|
||||
* Update Prometheus from v2.3.0 to v2.3.1
|
||||
* Update Grafana from 5.1.3 to 5.1.4
|
||||
* Drop `hostNetwork` from nginx-ingress addon
|
||||
* Both flannel and Calico support host port via `portmap`
|
||||
* Allows writing NetworkPolicies that reference ingress pods in `from` or `to`. HostNetwork pods were difficult to write network policy for since they could circumvent the CNI network to communicate with pods on the same node.
|
||||
|
||||
## v1.10.4
|
||||
|
||||
* Kubernetes [v1.10.4](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG-1.10.md#v1104)
|
||||
|
10
README.md
10
README.md
@ -11,7 +11,7 @@ Typhoon distributes upstream Kubernetes, architectural conventions, and cluster
|
||||
|
||||
## Features <a href="https://www.cncf.io/certification/software-conformance/"><img align="right" src="https://storage.googleapis.com/poseidon/certified-kubernetes.png"></a>
|
||||
|
||||
* Kubernetes v1.10.4 (upstream, via [kubernetes-incubator/bootkube](https://github.com/kubernetes-incubator/bootkube))
|
||||
* Kubernetes v1.10.5 (upstream, via [kubernetes-incubator/bootkube](https://github.com/kubernetes-incubator/bootkube))
|
||||
* Single or multi-master, workloads isolated on workers, [Calico](https://www.projectcalico.org/) or [flannel](https://github.com/coreos/flannel) networking
|
||||
* On-cluster etcd with TLS, [RBAC](https://kubernetes.io/docs/admin/authorization/rbac/)-enabled, [network policy](https://kubernetes.io/docs/concepts/services-networking/network-policies/)
|
||||
* Advanced features like [worker pools](https://typhoon.psdn.io/advanced/worker-pools/) and [preemption](https://typhoon.psdn.io/google-cloud/#preemption) (varies by platform)
|
||||
@ -46,7 +46,7 @@ Define a Kubernetes cluster by using the Terraform module for your chosen platfo
|
||||
|
||||
```tf
|
||||
module "google-cloud-yavin" {
|
||||
source = "git::https://github.com/poseidon/typhoon//google-cloud/container-linux/kubernetes?ref=v1.10.4"
|
||||
source = "git::https://github.com/poseidon/typhoon//google-cloud/container-linux/kubernetes?ref=v1.10.5"
|
||||
|
||||
providers = {
|
||||
google = "google.default"
|
||||
@ -88,9 +88,9 @@ In 4-8 minutes (varies by platform), the cluster will be ready. This Google Clou
|
||||
$ export KUBECONFIG=/home/user/.secrets/clusters/yavin/auth/kubeconfig
|
||||
$ kubectl get nodes
|
||||
NAME STATUS AGE VERSION
|
||||
yavin-controller-0.c.example-com.internal Ready 6m v1.10.4
|
||||
yavin-worker-jrbf.c.example-com.internal Ready 5m v1.10.4
|
||||
yavin-worker-mzdm.c.example-com.internal Ready 5m v1.10.4
|
||||
yavin-controller-0.c.example-com.internal Ready 6m v1.10.5
|
||||
yavin-worker-jrbf.c.example-com.internal Ready 5m v1.10.5
|
||||
yavin-worker-mzdm.c.example-com.internal Ready 5m v1.10.5
|
||||
```
|
||||
|
||||
List the pods.
|
||||
|
@ -18,7 +18,7 @@ spec:
|
||||
spec:
|
||||
containers:
|
||||
- name: update-agent
|
||||
image: quay.io/coreos/container-linux-update-operator:v0.6.0
|
||||
image: quay.io/coreos/container-linux-update-operator:v0.7.0
|
||||
command:
|
||||
- "/bin/update-agent"
|
||||
volumeMounts:
|
||||
|
@ -15,7 +15,7 @@ spec:
|
||||
spec:
|
||||
containers:
|
||||
- name: update-operator
|
||||
image: quay.io/coreos/container-linux-update-operator:v0.6.0
|
||||
image: quay.io/coreos/container-linux-update-operator:v0.7.0
|
||||
command:
|
||||
- "/bin/update-operator"
|
||||
env:
|
||||
|
@ -21,7 +21,7 @@ spec:
|
||||
spec:
|
||||
containers:
|
||||
- name: grafana
|
||||
image: grafana/grafana:5.1.3
|
||||
image: grafana/grafana:5.1.4
|
||||
env:
|
||||
- name: GF_SERVER_HTTP_PORT
|
||||
value: "8080"
|
||||
|
@ -20,7 +20,6 @@ spec:
|
||||
spec:
|
||||
nodeSelector:
|
||||
node-role.kubernetes.io/node: ""
|
||||
hostNetwork: true
|
||||
containers:
|
||||
- name: nginx-ingress-controller
|
||||
image: quay.io/kubernetes-ingress-controller/nginx-ingress-controller:0.15.0
|
||||
|
@ -20,7 +20,6 @@ spec:
|
||||
spec:
|
||||
nodeSelector:
|
||||
node-role.kubernetes.io/node: ""
|
||||
hostNetwork: true
|
||||
containers:
|
||||
- name: nginx-ingress-controller
|
||||
image: quay.io/kubernetes-ingress-controller/nginx-ingress-controller:0.15.0
|
||||
|
@ -20,7 +20,6 @@ spec:
|
||||
spec:
|
||||
nodeSelector:
|
||||
node-role.kubernetes.io/node: ""
|
||||
hostNetwork: true
|
||||
containers:
|
||||
- name: nginx-ingress-controller
|
||||
image: quay.io/kubernetes-ingress-controller/nginx-ingress-controller:0.15.0
|
||||
|
@ -18,7 +18,7 @@ spec:
|
||||
serviceAccountName: prometheus
|
||||
containers:
|
||||
- name: prometheus
|
||||
image: quay.io/prometheus/prometheus:v2.3.0
|
||||
image: quay.io/prometheus/prometheus:v2.3.1
|
||||
args:
|
||||
- --web.listen-address=0.0.0.0:9090
|
||||
- --config.file=/etc/prometheus/prometheus.yaml
|
||||
|
@ -11,7 +11,7 @@ Typhoon distributes upstream Kubernetes, architectural conventions, and cluster
|
||||
|
||||
## Features <a href="https://www.cncf.io/certification/software-conformance/"><img align="right" src="https://storage.googleapis.com/poseidon/certified-kubernetes.png"></a>
|
||||
|
||||
* Kubernetes v1.10.4 (upstream, via [kubernetes-incubator/bootkube](https://github.com/kubernetes-incubator/bootkube))
|
||||
* Kubernetes v1.10.5 (upstream, via [kubernetes-incubator/bootkube](https://github.com/kubernetes-incubator/bootkube))
|
||||
* Single or multi-master, workloads isolated on workers, [Calico](https://www.projectcalico.org/) or [flannel](https://github.com/coreos/flannel) networking
|
||||
* On-cluster etcd with TLS, [RBAC](https://kubernetes.io/docs/admin/authorization/rbac/)-enabled, [network policy](https://kubernetes.io/docs/concepts/services-networking/network-policies/)
|
||||
* Advanced features like [worker pools](https://typhoon.psdn.io/advanced/worker-pools/)
|
||||
|
@ -1,6 +1,6 @@
|
||||
# Self-hosted Kubernetes assets (kubeconfig, manifests)
|
||||
module "bootkube" {
|
||||
source = "git::https://github.com/poseidon/terraform-render-bootkube.git?ref=0e98e89e14a074768db13c4e050ed0c13319a0c1"
|
||||
source = "git::https://github.com/poseidon/terraform-render-bootkube.git?ref=1d4db824f09246266a6b9e54df5d4df5dcd4477a"
|
||||
|
||||
cluster_name = "${var.cluster_name}"
|
||||
api_servers = ["${format("%s.%s", var.cluster_name, var.dns_zone)}"]
|
||||
|
@ -7,7 +7,7 @@ systemd:
|
||||
- name: 40-etcd-cluster.conf
|
||||
contents: |
|
||||
[Service]
|
||||
Environment="ETCD_IMAGE_TAG=v3.3.6"
|
||||
Environment="ETCD_IMAGE_TAG=v3.3.8"
|
||||
Environment="ETCD_NAME=${etcd_name}"
|
||||
Environment="ETCD_ADVERTISE_CLIENT_URLS=https://${etcd_domain}:2379"
|
||||
Environment="ETCD_INITIAL_ADVERTISE_PEER_URLS=https://${etcd_domain}:2380"
|
||||
@ -123,7 +123,7 @@ storage:
|
||||
contents:
|
||||
inline: |
|
||||
KUBELET_IMAGE_URL=docker://k8s.gcr.io/hyperkube
|
||||
KUBELET_IMAGE_TAG=v1.10.4
|
||||
KUBELET_IMAGE_TAG=v1.10.5
|
||||
- path: /etc/sysctl.d/max-user-watches.conf
|
||||
filesystem: root
|
||||
contents:
|
||||
|
@ -68,9 +68,9 @@ data "template_file" "etcds" {
|
||||
template = "etcd$${index}=https://$${cluster_name}-etcd$${index}.$${dns_zone}:2380"
|
||||
|
||||
vars {
|
||||
index = "${count.index}"
|
||||
index = "${count.index}"
|
||||
cluster_name = "${var.cluster_name}"
|
||||
dns_zone = "${var.dns_zone}"
|
||||
dns_zone = "${var.dns_zone}"
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -7,15 +7,15 @@ resource "aws_route53_record" "apiserver" {
|
||||
|
||||
# AWS recommends their special "alias" records for ELBs
|
||||
alias {
|
||||
name = "${aws_lb.apiserver.dns_name}"
|
||||
zone_id = "${aws_lb.apiserver.zone_id}"
|
||||
name = "${aws_lb.nlb.dns_name}"
|
||||
zone_id = "${aws_lb.nlb.zone_id}"
|
||||
evaluate_target_health = true
|
||||
}
|
||||
}
|
||||
|
||||
# Network Load Balancer for apiservers
|
||||
resource "aws_lb" "apiserver" {
|
||||
name = "${var.cluster_name}-apiserver"
|
||||
# Network Load Balancer for apiservers and ingress
|
||||
resource "aws_lb" "nlb" {
|
||||
name = "${var.cluster_name}-nlb"
|
||||
load_balancer_type = "network"
|
||||
internal = false
|
||||
|
||||
@ -24,11 +24,11 @@ resource "aws_lb" "apiserver" {
|
||||
enable_cross_zone_load_balancing = true
|
||||
}
|
||||
|
||||
# Forward TCP traffic to controllers
|
||||
# Forward TCP apiserver traffic to controllers
|
||||
resource "aws_lb_listener" "apiserver-https" {
|
||||
load_balancer_arn = "${aws_lb.apiserver.arn}"
|
||||
load_balancer_arn = "${aws_lb.nlb.arn}"
|
||||
protocol = "TCP"
|
||||
port = "443"
|
||||
port = "6443"
|
||||
|
||||
default_action {
|
||||
type = "forward"
|
||||
@ -36,6 +36,30 @@ resource "aws_lb_listener" "apiserver-https" {
|
||||
}
|
||||
}
|
||||
|
||||
# Forward HTTP ingress traffic to workers
|
||||
resource "aws_lb_listener" "ingress-http" {
|
||||
load_balancer_arn = "${aws_lb.nlb.arn}"
|
||||
protocol = "TCP"
|
||||
port = 80
|
||||
|
||||
default_action {
|
||||
type = "forward"
|
||||
target_group_arn = "${module.workers.target_group_http}"
|
||||
}
|
||||
}
|
||||
|
||||
# Forward HTTPS ingress traffic to workers
|
||||
resource "aws_lb_listener" "ingress-https" {
|
||||
load_balancer_arn = "${aws_lb.nlb.arn}"
|
||||
protocol = "TCP"
|
||||
port = 443
|
||||
|
||||
default_action {
|
||||
type = "forward"
|
||||
target_group_arn = "${module.workers.target_group_https}"
|
||||
}
|
||||
}
|
||||
|
||||
# Target group of controllers
|
||||
resource "aws_lb_target_group" "controllers" {
|
||||
name = "${var.cluster_name}-controllers"
|
||||
@ -43,12 +67,12 @@ resource "aws_lb_target_group" "controllers" {
|
||||
target_type = "instance"
|
||||
|
||||
protocol = "TCP"
|
||||
port = 443
|
||||
port = 6443
|
||||
|
||||
# TCP health check for apiserver
|
||||
health_check {
|
||||
protocol = "TCP"
|
||||
port = 443
|
||||
port = 6443
|
||||
|
||||
# NLBs required to use same healthy and unhealthy thresholds
|
||||
healthy_threshold = 3
|
||||
@ -65,5 +89,5 @@ resource "aws_lb_target_group_attachment" "controllers" {
|
||||
|
||||
target_group_arn = "${aws_lb_target_group.controllers.arn}"
|
||||
target_id = "${element(aws_instance.controllers.*.id, count.index)}"
|
||||
port = 443
|
||||
port = 6443
|
||||
}
|
@ -1,5 +1,7 @@
|
||||
# Outputs for Kubernetes Ingress
|
||||
|
||||
output "ingress_dns_name" {
|
||||
value = "${module.workers.ingress_dns_name}"
|
||||
value = "${aws_lb.nlb.dns_name}"
|
||||
description = "DNS name of the network load balancer for distributing traffic to Ingress controllers"
|
||||
}
|
||||
|
||||
@ -23,3 +25,15 @@ output "worker_security_groups" {
|
||||
output "kubeconfig" {
|
||||
value = "${module.bootkube.kubeconfig}"
|
||||
}
|
||||
|
||||
# Outputs for custom load balancing
|
||||
|
||||
output "worker_target_group_http" {
|
||||
description = "ARN of a target group of workers for HTTP traffic"
|
||||
value = "${module.workers.target_group_http}"
|
||||
}
|
||||
|
||||
output "worker_target_group_https" {
|
||||
description = "ARN of a target group of workers for HTTPS traffic"
|
||||
value = "${module.workers.target_group_https}"
|
||||
}
|
||||
|
@ -36,8 +36,8 @@ resource "aws_security_group_rule" "controller-apiserver" {
|
||||
|
||||
type = "ingress"
|
||||
protocol = "tcp"
|
||||
from_port = 443
|
||||
to_port = 443
|
||||
from_port = 6443
|
||||
to_port = 6443
|
||||
cidr_blocks = ["0.0.0.0/0"]
|
||||
}
|
||||
|
||||
|
@ -93,7 +93,7 @@ storage:
|
||||
contents:
|
||||
inline: |
|
||||
KUBELET_IMAGE_URL=docker://k8s.gcr.io/hyperkube
|
||||
KUBELET_IMAGE_TAG=v1.10.4
|
||||
KUBELET_IMAGE_TAG=v1.10.5
|
||||
- path: /etc/sysctl.d/max-user-watches.conf
|
||||
filesystem: root
|
||||
contents:
|
||||
@ -111,7 +111,7 @@ storage:
|
||||
--volume config,kind=host,source=/etc/kubernetes \
|
||||
--mount volume=config,target=/etc/kubernetes \
|
||||
--insecure-options=image \
|
||||
docker://k8s.gcr.io/hyperkube:v1.10.4 \
|
||||
docker://k8s.gcr.io/hyperkube:v1.10.5 \
|
||||
--net=host \
|
||||
--dns=host \
|
||||
--exec=/kubectl -- --kubeconfig=/etc/kubernetes/kubeconfig delete node $(hostname)
|
||||
|
@ -1,39 +1,4 @@
|
||||
# Network Load Balancer for Ingress
|
||||
resource "aws_lb" "ingress" {
|
||||
name = "${var.name}-ingress"
|
||||
load_balancer_type = "network"
|
||||
internal = false
|
||||
|
||||
subnets = ["${var.subnet_ids}"]
|
||||
|
||||
enable_cross_zone_load_balancing = true
|
||||
}
|
||||
|
||||
# Forward HTTP traffic to workers
|
||||
resource "aws_lb_listener" "ingress-http" {
|
||||
load_balancer_arn = "${aws_lb.ingress.arn}"
|
||||
protocol = "TCP"
|
||||
port = 80
|
||||
|
||||
default_action {
|
||||
type = "forward"
|
||||
target_group_arn = "${aws_lb_target_group.workers-http.arn}"
|
||||
}
|
||||
}
|
||||
|
||||
# Forward HTTPS traffic to workers
|
||||
resource "aws_lb_listener" "ingress-https" {
|
||||
load_balancer_arn = "${aws_lb.ingress.arn}"
|
||||
protocol = "TCP"
|
||||
port = 443
|
||||
|
||||
default_action {
|
||||
type = "forward"
|
||||
target_group_arn = "${aws_lb_target_group.workers-https.arn}"
|
||||
}
|
||||
}
|
||||
|
||||
# Network Load Balancer target groups of instances
|
||||
# Target groups of instances for use with load balancers
|
||||
|
||||
resource "aws_lb_target_group" "workers-http" {
|
||||
name = "${var.name}-workers-http"
|
||||
|
@ -1,4 +1,9 @@
|
||||
output "ingress_dns_name" {
|
||||
value = "${aws_lb.ingress.dns_name}"
|
||||
description = "DNS name of the network load balancer for distributing traffic to Ingress controllers"
|
||||
output "target_group_http" {
|
||||
description = "ARN of a target group of workers for HTTP traffic"
|
||||
value = "${aws_lb_target_group.workers-http.arn}"
|
||||
}
|
||||
|
||||
output "target_group_https" {
|
||||
description = "ARN of a target group of workers for HTTPS traffic"
|
||||
value = "${aws_lb_target_group.workers-https.arn}"
|
||||
}
|
||||
|
@ -41,9 +41,10 @@ resource "aws_autoscaling_group" "workers" {
|
||||
|
||||
# Worker template
|
||||
resource "aws_launch_configuration" "worker" {
|
||||
image_id = "${local.ami_id}"
|
||||
instance_type = "${var.instance_type}"
|
||||
spot_price = "${var.spot_price}"
|
||||
image_id = "${local.ami_id}"
|
||||
instance_type = "${var.instance_type}"
|
||||
spot_price = "${var.spot_price}"
|
||||
enable_monitoring = false
|
||||
|
||||
user_data = "${data.ct_config.worker_ign.rendered}"
|
||||
|
||||
|
@ -11,7 +11,7 @@ Typhoon distributes upstream Kubernetes, architectural conventions, and cluster
|
||||
|
||||
## Features <a href="https://www.cncf.io/certification/software-conformance/"><img align="right" src="https://storage.googleapis.com/poseidon/certified-kubernetes.png"></a>
|
||||
|
||||
* Kubernetes v1.10.4 (upstream, via [kubernetes-incubator/bootkube](https://github.com/kubernetes-incubator/bootkube))
|
||||
* Kubernetes v1.10.5 (upstream, via [kubernetes-incubator/bootkube](https://github.com/kubernetes-incubator/bootkube))
|
||||
* Single or multi-master, workloads isolated on workers, [Calico](https://www.projectcalico.org/) or [flannel](https://github.com/coreos/flannel) networking
|
||||
* On-cluster etcd with TLS, [RBAC](https://kubernetes.io/docs/admin/authorization/rbac/)-enabled, [network policy](https://kubernetes.io/docs/concepts/services-networking/network-policies/)
|
||||
* Advanced features like [worker pools](https://typhoon.psdn.io/advanced/worker-pools/)
|
||||
|
@ -1,6 +1,6 @@
|
||||
# Self-hosted Kubernetes assets (kubeconfig, manifests)
|
||||
module "bootkube" {
|
||||
source = "git::https://github.com/poseidon/terraform-render-bootkube.git?ref=0e98e89e14a074768db13c4e050ed0c13319a0c1"
|
||||
source = "git::https://github.com/poseidon/terraform-render-bootkube.git?ref=1d4db824f09246266a6b9e54df5d4df5dcd4477a"
|
||||
|
||||
cluster_name = "${var.cluster_name}"
|
||||
api_servers = ["${format("%s.%s", var.cluster_name, var.dns_zone)}"]
|
||||
|
@ -93,8 +93,8 @@ bootcmd:
|
||||
runcmd:
|
||||
- [systemctl, daemon-reload]
|
||||
- [systemctl, restart, NetworkManager]
|
||||
- "atomic install --system --name=etcd quay.io/poseidon/etcd:v3.3.6"
|
||||
- "atomic install --system --name=kubelet quay.io/poseidon/kubelet:v1.10.4"
|
||||
- "atomic install --system --name=etcd quay.io/poseidon/etcd:v3.3.8"
|
||||
- "atomic install --system --name=kubelet quay.io/poseidon/kubelet:v1.10.5"
|
||||
- "atomic install --system --name=bootkube quay.io/poseidon/bootkube:v0.12.0"
|
||||
- [systemctl, start, --no-block, etcd.service]
|
||||
- [systemctl, enable, cloud-metadata.service]
|
||||
|
@ -68,8 +68,8 @@ data "template_file" "etcds" {
|
||||
template = "etcd$${index}=https://$${cluster_name}-etcd$${index}.$${dns_zone}:2380"
|
||||
|
||||
vars {
|
||||
index = "${count.index}"
|
||||
index = "${count.index}"
|
||||
cluster_name = "${var.cluster_name}"
|
||||
dns_zone = "${var.dns_zone}"
|
||||
dns_zone = "${var.dns_zone}"
|
||||
}
|
||||
}
|
||||
|
@ -7,15 +7,15 @@ resource "aws_route53_record" "apiserver" {
|
||||
|
||||
# AWS recommends their special "alias" records for ELBs
|
||||
alias {
|
||||
name = "${aws_lb.apiserver.dns_name}"
|
||||
zone_id = "${aws_lb.apiserver.zone_id}"
|
||||
name = "${aws_lb.nlb.dns_name}"
|
||||
zone_id = "${aws_lb.nlb.zone_id}"
|
||||
evaluate_target_health = true
|
||||
}
|
||||
}
|
||||
|
||||
# Network Load Balancer for apiservers
|
||||
resource "aws_lb" "apiserver" {
|
||||
name = "${var.cluster_name}-apiserver"
|
||||
# Network Load Balancer for apiservers and ingress
|
||||
resource "aws_lb" "nlb" {
|
||||
name = "${var.cluster_name}-nlb"
|
||||
load_balancer_type = "network"
|
||||
internal = false
|
||||
|
||||
@ -24,11 +24,11 @@ resource "aws_lb" "apiserver" {
|
||||
enable_cross_zone_load_balancing = true
|
||||
}
|
||||
|
||||
# Forward TCP traffic to controllers
|
||||
# Forward TCP apiserver traffic to controllers
|
||||
resource "aws_lb_listener" "apiserver-https" {
|
||||
load_balancer_arn = "${aws_lb.apiserver.arn}"
|
||||
load_balancer_arn = "${aws_lb.nlb.arn}"
|
||||
protocol = "TCP"
|
||||
port = "443"
|
||||
port = "6443"
|
||||
|
||||
default_action {
|
||||
type = "forward"
|
||||
@ -36,6 +36,30 @@ resource "aws_lb_listener" "apiserver-https" {
|
||||
}
|
||||
}
|
||||
|
||||
# Forward HTTP ingress traffic to workers
|
||||
resource "aws_lb_listener" "ingress-http" {
|
||||
load_balancer_arn = "${aws_lb.nlb.arn}"
|
||||
protocol = "TCP"
|
||||
port = 80
|
||||
|
||||
default_action {
|
||||
type = "forward"
|
||||
target_group_arn = "${module.workers.target_group_http}"
|
||||
}
|
||||
}
|
||||
|
||||
# Forward HTTPS ingress traffic to workers
|
||||
resource "aws_lb_listener" "ingress-https" {
|
||||
load_balancer_arn = "${aws_lb.nlb.arn}"
|
||||
protocol = "TCP"
|
||||
port = 443
|
||||
|
||||
default_action {
|
||||
type = "forward"
|
||||
target_group_arn = "${module.workers.target_group_https}"
|
||||
}
|
||||
}
|
||||
|
||||
# Target group of controllers
|
||||
resource "aws_lb_target_group" "controllers" {
|
||||
name = "${var.cluster_name}-controllers"
|
||||
@ -43,12 +67,12 @@ resource "aws_lb_target_group" "controllers" {
|
||||
target_type = "instance"
|
||||
|
||||
protocol = "TCP"
|
||||
port = 443
|
||||
port = 6443
|
||||
|
||||
# TCP health check for apiserver
|
||||
health_check {
|
||||
protocol = "TCP"
|
||||
port = 443
|
||||
port = 6443
|
||||
|
||||
# NLBs required to use same healthy and unhealthy thresholds
|
||||
healthy_threshold = 3
|
||||
@ -65,5 +89,5 @@ resource "aws_lb_target_group_attachment" "controllers" {
|
||||
|
||||
target_group_arn = "${aws_lb_target_group.controllers.arn}"
|
||||
target_id = "${element(aws_instance.controllers.*.id, count.index)}"
|
||||
port = 443
|
||||
port = 6443
|
||||
}
|
@ -1,5 +1,7 @@
|
||||
# Outputs for Kubernetes Ingress
|
||||
|
||||
output "ingress_dns_name" {
|
||||
value = "${module.workers.ingress_dns_name}"
|
||||
value = "${aws_lb.nlb.dns_name}"
|
||||
description = "DNS name of the network load balancer for distributing traffic to Ingress controllers"
|
||||
}
|
||||
|
||||
@ -23,3 +25,15 @@ output "worker_security_groups" {
|
||||
output "kubeconfig" {
|
||||
value = "${module.bootkube.kubeconfig}"
|
||||
}
|
||||
|
||||
# Outputs for custom load balancing
|
||||
|
||||
output "worker_target_group_http" {
|
||||
description = "ARN of a target group of workers for HTTP traffic"
|
||||
value = "${module.workers.target_group_http}"
|
||||
}
|
||||
|
||||
output "worker_target_group_https" {
|
||||
description = "ARN of a target group of workers for HTTPS traffic"
|
||||
value = "${module.workers.target_group_https}"
|
||||
}
|
||||
|
@ -36,8 +36,8 @@ resource "aws_security_group_rule" "controller-apiserver" {
|
||||
|
||||
type = "ingress"
|
||||
protocol = "tcp"
|
||||
from_port = 443
|
||||
to_port = 443
|
||||
from_port = 6443
|
||||
to_port = 6443
|
||||
cidr_blocks = ["0.0.0.0/0"]
|
||||
}
|
||||
|
||||
|
@ -70,7 +70,7 @@ runcmd:
|
||||
- [systemctl, daemon-reload]
|
||||
- [systemctl, restart, NetworkManager]
|
||||
- [systemctl, enable, cloud-metadata.service]
|
||||
- "atomic install --system --name=kubelet quay.io/poseidon/kubelet:v1.10.4"
|
||||
- "atomic install --system --name=kubelet quay.io/poseidon/kubelet:v1.10.5"
|
||||
- [systemctl, start, --no-block, kubelet.service]
|
||||
users:
|
||||
- default
|
||||
|
@ -1,39 +1,4 @@
|
||||
# Network Load Balancer for Ingress
|
||||
resource "aws_lb" "ingress" {
|
||||
name = "${var.name}-ingress"
|
||||
load_balancer_type = "network"
|
||||
internal = false
|
||||
|
||||
subnets = ["${var.subnet_ids}"]
|
||||
|
||||
enable_cross_zone_load_balancing = true
|
||||
}
|
||||
|
||||
# Forward HTTP traffic to workers
|
||||
resource "aws_lb_listener" "ingress-http" {
|
||||
load_balancer_arn = "${aws_lb.ingress.arn}"
|
||||
protocol = "TCP"
|
||||
port = 80
|
||||
|
||||
default_action {
|
||||
type = "forward"
|
||||
target_group_arn = "${aws_lb_target_group.workers-http.arn}"
|
||||
}
|
||||
}
|
||||
|
||||
# Forward HTTPS traffic to workers
|
||||
resource "aws_lb_listener" "ingress-https" {
|
||||
load_balancer_arn = "${aws_lb.ingress.arn}"
|
||||
protocol = "TCP"
|
||||
port = 443
|
||||
|
||||
default_action {
|
||||
type = "forward"
|
||||
target_group_arn = "${aws_lb_target_group.workers-https.arn}"
|
||||
}
|
||||
}
|
||||
|
||||
# Network Load Balancer target groups of instances
|
||||
# Target groups of instances for use with load balancers
|
||||
|
||||
resource "aws_lb_target_group" "workers-http" {
|
||||
name = "${var.name}-workers-http"
|
||||
|
@ -1,4 +1,9 @@
|
||||
output "ingress_dns_name" {
|
||||
value = "${aws_lb.ingress.dns_name}"
|
||||
description = "DNS name of the network load balancer for distributing traffic to Ingress controllers"
|
||||
output "target_group_http" {
|
||||
description = "ARN of a target group of workers for HTTP traffic"
|
||||
value = "${aws_lb_target_group.workers-http.arn}"
|
||||
}
|
||||
|
||||
output "target_group_https" {
|
||||
description = "ARN of a target group of workers for HTTPS traffic"
|
||||
value = "${aws_lb_target_group.workers-https.arn}"
|
||||
}
|
||||
|
@ -41,9 +41,10 @@ resource "aws_autoscaling_group" "workers" {
|
||||
|
||||
# Worker template
|
||||
resource "aws_launch_configuration" "worker" {
|
||||
image_id = "${data.aws_ami.fedora.image_id}"
|
||||
instance_type = "${var.instance_type}"
|
||||
spot_price = "${var.spot_price}"
|
||||
image_id = "${data.aws_ami.fedora.image_id}"
|
||||
instance_type = "${var.instance_type}"
|
||||
spot_price = "${var.spot_price}"
|
||||
enable_monitoring = false
|
||||
|
||||
user_data = "${data.template_file.worker-cloudinit.rendered}"
|
||||
|
||||
|
@ -11,7 +11,7 @@ Typhoon distributes upstream Kubernetes, architectural conventions, and cluster
|
||||
|
||||
## Features <a href="https://www.cncf.io/certification/software-conformance/"><img align="right" src="https://storage.googleapis.com/poseidon/certified-kubernetes.png"></a>
|
||||
|
||||
* Kubernetes v1.10.4 (upstream, via [kubernetes-incubator/bootkube](https://github.com/kubernetes-incubator/bootkube))
|
||||
* Kubernetes v1.10.5 (upstream, via [kubernetes-incubator/bootkube](https://github.com/kubernetes-incubator/bootkube))
|
||||
* Single or multi-master, workloads isolated on workers, [Calico](https://www.projectcalico.org/) or [flannel](https://github.com/coreos/flannel) networking
|
||||
* On-cluster etcd with TLS, [RBAC](https://kubernetes.io/docs/admin/authorization/rbac/)-enabled, [network policy](https://kubernetes.io/docs/concepts/services-networking/network-policies/)
|
||||
* Ready for Ingress, Prometheus, Grafana, and other optional [addons](https://typhoon.psdn.io/addons/overview/)
|
||||
|
@ -1,6 +1,6 @@
|
||||
# Self-hosted Kubernetes assets (kubeconfig, manifests)
|
||||
module "bootkube" {
|
||||
source = "git::https://github.com/poseidon/terraform-render-bootkube.git?ref=0e98e89e14a074768db13c4e050ed0c13319a0c1"
|
||||
source = "git::https://github.com/poseidon/terraform-render-bootkube.git?ref=1d4db824f09246266a6b9e54df5d4df5dcd4477a"
|
||||
|
||||
cluster_name = "${var.cluster_name}"
|
||||
api_servers = ["${var.k8s_domain_name}"]
|
||||
|
@ -7,7 +7,7 @@ systemd:
|
||||
- name: 40-etcd-cluster.conf
|
||||
contents: |
|
||||
[Service]
|
||||
Environment="ETCD_IMAGE_TAG=v3.3.6"
|
||||
Environment="ETCD_IMAGE_TAG=v3.3.8"
|
||||
Environment="ETCD_NAME=${etcd_name}"
|
||||
Environment="ETCD_ADVERTISE_CLIENT_URLS=https://${domain_name}:2379"
|
||||
Environment="ETCD_INITIAL_ADVERTISE_PEER_URLS=https://${domain_name}:2380"
|
||||
@ -124,7 +124,7 @@ storage:
|
||||
contents:
|
||||
inline: |
|
||||
KUBELET_IMAGE_URL=docker://k8s.gcr.io/hyperkube
|
||||
KUBELET_IMAGE_TAG=v1.10.4
|
||||
KUBELET_IMAGE_TAG=v1.10.5
|
||||
- path: /etc/hostname
|
||||
filesystem: root
|
||||
mode: 0644
|
||||
|
@ -85,7 +85,7 @@ storage:
|
||||
contents:
|
||||
inline: |
|
||||
KUBELET_IMAGE_URL=docker://k8s.gcr.io/hyperkube
|
||||
KUBELET_IMAGE_TAG=v1.10.4
|
||||
KUBELET_IMAGE_TAG=v1.10.5
|
||||
- path: /etc/hostname
|
||||
filesystem: root
|
||||
mode: 0644
|
||||
|
@ -1,7 +1,7 @@
|
||||
resource "matchbox_group" "install" {
|
||||
count = "${length(var.controller_names) + length(var.worker_names)}"
|
||||
|
||||
name = "${format("install-%s", element(concat(var.controller_names, var.worker_names), count.index))}"
|
||||
name = "${format("install-%s", element(concat(var.controller_names, var.worker_names), count.index))}"
|
||||
|
||||
profile = "${local.flavor == "flatcar" ? element(matchbox_profile.flatcar-install.*.name, count.index) : var.cached_install == "true" ? element(matchbox_profile.cached-container-linux-install.*.name, count.index) : element(matchbox_profile.container-linux-install.*.name, count.index)}"
|
||||
|
||||
|
@ -1,8 +1,9 @@
|
||||
locals {
|
||||
# coreos-stable -> coreos flavor, stable channel
|
||||
# flatcar-stable -> flatcar flavor, stable channel
|
||||
flavor = "${element(split("-", var.os_channel), 0)}"
|
||||
channel = "${element(split("-", var.os_channel), 1)}"
|
||||
flavor = "${element(split("-", var.os_channel), 0)}"
|
||||
|
||||
channel = "${element(split("-", var.os_channel), 1)}"
|
||||
}
|
||||
|
||||
// Container Linux Install profile (from release.core-os.net)
|
||||
|
@ -2,6 +2,14 @@
|
||||
resource "null_resource" "copy-controller-secrets" {
|
||||
count = "${length(var.controller_names)}"
|
||||
|
||||
# Without depends_on, remote-exec could start and wait for machines before
|
||||
# matchbox groups are written, causing a deadlock.
|
||||
depends_on = [
|
||||
"matchbox_group.install",
|
||||
"matchbox_group.controller",
|
||||
"matchbox_group.worker",
|
||||
]
|
||||
|
||||
connection {
|
||||
type = "ssh"
|
||||
host = "${element(var.controller_domains, count.index)}"
|
||||
@ -70,6 +78,14 @@ resource "null_resource" "copy-controller-secrets" {
|
||||
resource "null_resource" "copy-worker-secrets" {
|
||||
count = "${length(var.worker_names)}"
|
||||
|
||||
# Without depends_on, remote-exec could start and wait for machines before
|
||||
# matchbox groups are written, causing a deadlock.
|
||||
depends_on = [
|
||||
"matchbox_group.install",
|
||||
"matchbox_group.controller",
|
||||
"matchbox_group.worker",
|
||||
]
|
||||
|
||||
connection {
|
||||
type = "ssh"
|
||||
host = "${element(var.worker_domains, count.index)}"
|
||||
|
@ -11,7 +11,7 @@ Typhoon distributes upstream Kubernetes, architectural conventions, and cluster
|
||||
|
||||
## Features <a href="https://www.cncf.io/certification/software-conformance/"><img align="right" src="https://storage.googleapis.com/poseidon/certified-kubernetes.png"></a>
|
||||
|
||||
* Kubernetes v1.10.4 (upstream, via [kubernetes-incubator/bootkube](https://github.com/kubernetes-incubator/bootkube))
|
||||
* Kubernetes v1.10.5 (upstream, via [kubernetes-incubator/bootkube](https://github.com/kubernetes-incubator/bootkube))
|
||||
* Single or multi-master, workloads isolated on workers, [Calico](https://www.projectcalico.org/) or [flannel](https://github.com/coreos/flannel) networking
|
||||
* On-cluster etcd with TLS, [RBAC](https://kubernetes.io/docs/admin/authorization/rbac/)-enabled, [network policy](https://kubernetes.io/docs/concepts/services-networking/network-policies/)
|
||||
* Ready for Ingress, Prometheus, Grafana, and other optional [addons](https://typhoon.psdn.io/addons/overview/)
|
||||
|
@ -1,6 +1,6 @@
|
||||
# Self-hosted Kubernetes assets (kubeconfig, manifests)
|
||||
module "bootkube" {
|
||||
source = "git::https://github.com/poseidon/terraform-render-bootkube.git?ref=0e98e89e14a074768db13c4e050ed0c13319a0c1"
|
||||
source = "git::https://github.com/poseidon/terraform-render-bootkube.git?ref=1d4db824f09246266a6b9e54df5d4df5dcd4477a"
|
||||
|
||||
cluster_name = "${var.cluster_name}"
|
||||
api_servers = ["${var.k8s_domain_name}"]
|
||||
|
@ -84,8 +84,8 @@ runcmd:
|
||||
- [systemctl, daemon-reload]
|
||||
- [systemctl, restart, NetworkManager]
|
||||
- [hostnamectl, set-hostname, ${domain_name}]
|
||||
- "atomic install --system --name=etcd quay.io/poseidon/etcd:v3.3.6"
|
||||
- "atomic install --system --name=kubelet quay.io/poseidon/kubelet:v1.10.4"
|
||||
- "atomic install --system --name=etcd quay.io/poseidon/etcd:v3.3.8"
|
||||
- "atomic install --system --name=kubelet quay.io/poseidon/kubelet:v1.10.5"
|
||||
- "atomic install --system --name=bootkube quay.io/poseidon/bootkube:v0.12.0"
|
||||
- [systemctl, start, --no-block, etcd.service]
|
||||
- [systemctl, enable, kubelet.path]
|
||||
|
@ -60,7 +60,7 @@ runcmd:
|
||||
- [systemctl, daemon-reload]
|
||||
- [systemctl, restart, NetworkManager]
|
||||
- [hostnamectl, set-hostname, ${domain_name}]
|
||||
- "atomic install --system --name=kubelet quay.io/poseidon/kubelet:v1.10.4"
|
||||
- "atomic install --system --name=kubelet quay.io/poseidon/kubelet:v1.10.5"
|
||||
- [systemctl, enable, kubelet.path]
|
||||
- [systemctl, start, --no-block, kubelet.path]
|
||||
users:
|
||||
|
@ -2,6 +2,14 @@
|
||||
resource "null_resource" "copy-controller-secrets" {
|
||||
count = "${length(var.controller_names)}"
|
||||
|
||||
# Without depends_on, remote-exec could start and wait for machines before
|
||||
# matchbox groups are written, causing a deadlock.
|
||||
depends_on = [
|
||||
"matchbox_group.install",
|
||||
"matchbox_group.controller",
|
||||
"matchbox_group.worker",
|
||||
]
|
||||
|
||||
connection {
|
||||
type = "ssh"
|
||||
host = "${element(var.controller_domains, count.index)}"
|
||||
@ -68,6 +76,14 @@ resource "null_resource" "copy-controller-secrets" {
|
||||
resource "null_resource" "copy-worker-secrets" {
|
||||
count = "${length(var.worker_names)}"
|
||||
|
||||
# Without depends_on, remote-exec could start and wait for machines before
|
||||
# matchbox groups are written, causing a deadlock.
|
||||
depends_on = [
|
||||
"matchbox_group.install",
|
||||
"matchbox_group.controller",
|
||||
"matchbox_group.worker",
|
||||
]
|
||||
|
||||
connection {
|
||||
type = "ssh"
|
||||
host = "${element(var.worker_domains, count.index)}"
|
||||
|
@ -11,7 +11,7 @@ Typhoon distributes upstream Kubernetes, architectural conventions, and cluster
|
||||
|
||||
## Features <a href="https://www.cncf.io/certification/software-conformance/"><img align="right" src="https://storage.googleapis.com/poseidon/certified-kubernetes.png"></a>
|
||||
|
||||
* Kubernetes v1.10.4 (upstream, via [kubernetes-incubator/bootkube](https://github.com/kubernetes-incubator/bootkube))
|
||||
* Kubernetes v1.10.5 (upstream, via [kubernetes-incubator/bootkube](https://github.com/kubernetes-incubator/bootkube))
|
||||
* Single or multi-master, workloads isolated on workers, [flannel](https://github.com/coreos/flannel) networking
|
||||
* On-cluster etcd with TLS, [RBAC](https://kubernetes.io/docs/admin/authorization/rbac/)-enabled, [network policy](https://kubernetes.io/docs/concepts/services-networking/network-policies/)
|
||||
* Ready for Ingress, Prometheus, Grafana, and other optional [addons](https://typhoon.psdn.io/addons/overview/)
|
||||
|
@ -1,6 +1,6 @@
|
||||
# Self-hosted Kubernetes assets (kubeconfig, manifests)
|
||||
module "bootkube" {
|
||||
source = "git::https://github.com/poseidon/terraform-render-bootkube.git?ref=0e98e89e14a074768db13c4e050ed0c13319a0c1"
|
||||
source = "git::https://github.com/poseidon/terraform-render-bootkube.git?ref=1d4db824f09246266a6b9e54df5d4df5dcd4477a"
|
||||
|
||||
cluster_name = "${var.cluster_name}"
|
||||
api_servers = ["${format("%s.%s", var.cluster_name, var.dns_zone)}"]
|
||||
|
@ -7,7 +7,7 @@ systemd:
|
||||
- name: 40-etcd-cluster.conf
|
||||
contents: |
|
||||
[Service]
|
||||
Environment="ETCD_IMAGE_TAG=v3.3.6"
|
||||
Environment="ETCD_IMAGE_TAG=v3.3.8"
|
||||
Environment="ETCD_NAME=${etcd_name}"
|
||||
Environment="ETCD_ADVERTISE_CLIENT_URLS=https://${etcd_domain}:2379"
|
||||
Environment="ETCD_INITIAL_ADVERTISE_PEER_URLS=https://${etcd_domain}:2380"
|
||||
@ -129,7 +129,7 @@ storage:
|
||||
contents:
|
||||
inline: |
|
||||
KUBELET_IMAGE_URL=docker://k8s.gcr.io/hyperkube
|
||||
KUBELET_IMAGE_TAG=v1.10.4
|
||||
KUBELET_IMAGE_TAG=v1.10.5
|
||||
- path: /etc/sysctl.d/max-user-watches.conf
|
||||
filesystem: root
|
||||
contents:
|
||||
|
@ -99,7 +99,7 @@ storage:
|
||||
contents:
|
||||
inline: |
|
||||
KUBELET_IMAGE_URL=docker://k8s.gcr.io/hyperkube
|
||||
KUBELET_IMAGE_TAG=v1.10.4
|
||||
KUBELET_IMAGE_TAG=v1.10.5
|
||||
- path: /etc/sysctl.d/max-user-watches.conf
|
||||
filesystem: root
|
||||
contents:
|
||||
@ -117,7 +117,7 @@ storage:
|
||||
--volume config,kind=host,source=/etc/kubernetes \
|
||||
--mount volume=config,target=/etc/kubernetes \
|
||||
--insecure-options=image \
|
||||
docker://k8s.gcr.io/hyperkube:v1.10.4 \
|
||||
docker://k8s.gcr.io/hyperkube:v1.10.5 \
|
||||
--net=host \
|
||||
--dns=host \
|
||||
--exec=/kubectl -- --kubeconfig=/etc/kubernetes/kubeconfig delete node $(hostname)
|
||||
|
@ -69,7 +69,7 @@ data "template_file" "controller_config" {
|
||||
etcd_domain = "${var.cluster_name}-etcd${count.index}.${var.dns_zone}"
|
||||
|
||||
# etcd0=https://cluster-etcd0.example.com,etcd1=https://cluster-etcd1.example.com,...
|
||||
etcd_initial_cluster = "${join(",", data.template_file.etcds.*.rendered)}"
|
||||
etcd_initial_cluster = "${join(",", data.template_file.etcds.*.rendered)}"
|
||||
k8s_dns_service_ip = "${cidrhost(var.service_cidr, 10)}"
|
||||
cluster_domain_suffix = "${var.cluster_domain_suffix}"
|
||||
}
|
||||
@ -80,9 +80,9 @@ data "template_file" "etcds" {
|
||||
template = "etcd$${index}=https://$${cluster_name}-etcd$${index}.$${dns_zone}:2380"
|
||||
|
||||
vars {
|
||||
index = "${count.index}"
|
||||
index = "${count.index}"
|
||||
cluster_name = "${var.cluster_name}"
|
||||
dns_zone = "${var.dns_zone}"
|
||||
dns_zone = "${var.dns_zone}"
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -3,7 +3,7 @@ resource "digitalocean_firewall" "rules" {
|
||||
|
||||
tags = ["${var.cluster_name}-controller", "${var.cluster_name}-worker"]
|
||||
|
||||
# allow ssh, http/https ingress, and peer-to-peer traffic
|
||||
# allow ssh, apiserver, http/https ingress, and peer-to-peer traffic
|
||||
inbound_rule = [
|
||||
{
|
||||
protocol = "tcp"
|
||||
@ -20,6 +20,11 @@ resource "digitalocean_firewall" "rules" {
|
||||
port_range = "443"
|
||||
source_addresses = ["0.0.0.0/0", "::/0"]
|
||||
},
|
||||
{
|
||||
protocol = "tcp"
|
||||
port_range = "6443"
|
||||
source_addresses = ["0.0.0.0/0", "::/0"]
|
||||
},
|
||||
{
|
||||
protocol = "udp"
|
||||
port_range = "1-65535"
|
||||
|
@ -11,7 +11,7 @@ Typhoon distributes upstream Kubernetes, architectural conventions, and cluster
|
||||
|
||||
## Features <a href="https://www.cncf.io/certification/software-conformance/"><img align="right" src="https://storage.googleapis.com/poseidon/certified-kubernetes.png"></a>
|
||||
|
||||
* Kubernetes v1.10.4 (upstream, via [kubernetes-incubator/bootkube](https://github.com/kubernetes-incubator/bootkube))
|
||||
* Kubernetes v1.10.5 (upstream, via [kubernetes-incubator/bootkube](https://github.com/kubernetes-incubator/bootkube))
|
||||
* Single or multi-master, workloads isolated on workers, [Calico](https://www.projectcalico.org/) or [flannel](https://github.com/coreos/flannel) networking
|
||||
* On-cluster etcd with TLS, [RBAC](https://kubernetes.io/docs/admin/authorization/rbac/)-enabled, [network policy](https://kubernetes.io/docs/concepts/services-networking/network-policies/)
|
||||
* Ready for Ingress, Prometheus, Grafana, and other optional [addons](https://typhoon.psdn.io/addons/overview/)
|
||||
|
@ -1,6 +1,6 @@
|
||||
# Self-hosted Kubernetes assets (kubeconfig, manifests)
|
||||
module "bootkube" {
|
||||
source = "git::https://github.com/poseidon/terraform-render-bootkube.git?ref=0e98e89e14a074768db13c4e050ed0c13319a0c1"
|
||||
source = "git::https://github.com/poseidon/terraform-render-bootkube.git?ref=1d4db824f09246266a6b9e54df5d4df5dcd4477a"
|
||||
|
||||
cluster_name = "${var.cluster_name}"
|
||||
api_servers = ["${format("%s.%s", var.cluster_name, var.dns_zone)}"]
|
||||
|
@ -90,8 +90,8 @@ bootcmd:
|
||||
- [modprobe, ip_vs]
|
||||
runcmd:
|
||||
- [systemctl, daemon-reload]
|
||||
- "atomic install --system --name=etcd quay.io/poseidon/etcd:v3.3.6"
|
||||
- "atomic install --system --name=kubelet quay.io/poseidon/kubelet:v1.10.4"
|
||||
- "atomic install --system --name=etcd quay.io/poseidon/etcd:v3.3.8"
|
||||
- "atomic install --system --name=kubelet quay.io/poseidon/kubelet:v1.10.5"
|
||||
- "atomic install --system --name=bootkube quay.io/poseidon/bootkube:v0.12.0"
|
||||
- [systemctl, start, --no-block, etcd.service]
|
||||
- [systemctl, enable, cloud-metadata.service]
|
||||
|
@ -67,7 +67,7 @@ bootcmd:
|
||||
runcmd:
|
||||
- [systemctl, daemon-reload]
|
||||
- [systemctl, enable, cloud-metadata.service]
|
||||
- "atomic install --system --name=kubelet quay.io/poseidon/kubelet:v1.10.4"
|
||||
- "atomic install --system --name=kubelet quay.io/poseidon/kubelet:v1.10.5"
|
||||
- [systemctl, enable, kubelet.path]
|
||||
- [systemctl, start, --no-block, kubelet.path]
|
||||
users:
|
||||
|
@ -82,8 +82,8 @@ data "template_file" "etcds" {
|
||||
template = "etcd$${index}=https://$${cluster_name}-etcd$${index}.$${dns_zone}:2380"
|
||||
|
||||
vars {
|
||||
index = "${count.index}"
|
||||
index = "${count.index}"
|
||||
cluster_name = "${var.cluster_name}"
|
||||
dns_zone = "${var.dns_zone}"
|
||||
dns_zone = "${var.dns_zone}"
|
||||
}
|
||||
}
|
||||
|
@ -3,7 +3,7 @@ resource "digitalocean_firewall" "rules" {
|
||||
|
||||
tags = ["${var.cluster_name}-controller", "${var.cluster_name}-worker"]
|
||||
|
||||
# allow ssh, http/https ingress, and peer-to-peer traffic
|
||||
# allow ssh, apiserver, http/https ingress, and peer-to-peer traffic
|
||||
inbound_rule = [
|
||||
{
|
||||
protocol = "tcp"
|
||||
@ -20,6 +20,11 @@ resource "digitalocean_firewall" "rules" {
|
||||
port_range = "443"
|
||||
source_addresses = ["0.0.0.0/0", "::/0"]
|
||||
},
|
||||
{
|
||||
protocol = "tcp"
|
||||
port_range = "6443"
|
||||
source_addresses = ["0.0.0.0/0", "::/0"]
|
||||
},
|
||||
{
|
||||
protocol = "udp"
|
||||
port_range = "1-65535"
|
||||
|
@ -4,7 +4,7 @@ Nginx Ingress controller pods accept and demultiplex HTTP, HTTPS, TCP, or UDP tr
|
||||
|
||||
## AWS
|
||||
|
||||
On AWS, an elastic load balancer distributes traffic across worker nodes (i.e. an auto-scaling group) running an Ingress controller deployment on host ports 80 and 443. Firewall rules allow traffic to ports 80 and 443. Health check rules ensure only workers with a health Ingress controller receive traffic.
|
||||
On AWS, a network load balancer (NLB) distributes traffic across a target group of worker nodes running an Ingress controller deployment on host ports 80 and 443. Firewall rules allow traffic to ports 80 and 443. Health check rules ensure only workers with a health Ingress controller receive traffic.
|
||||
|
||||
Create the Ingress controller deployment, service, RBAC roles, RBAC bindings, default backend, and namespace.
|
||||
|
||||
|
@ -15,7 +15,7 @@ Create a cluster following the AWS [tutorial](../cl/aws.md#cluster). Define a wo
|
||||
|
||||
```tf
|
||||
module "tempest-worker-pool" {
|
||||
source = "git::https://github.com/poseidon/typhoon//aws/container-linux/kubernetes/workers?ref=v1.10.4"
|
||||
source = "git::https://github.com/poseidon/typhoon//aws/container-linux/kubernetes/workers?ref=v1.10.5"
|
||||
|
||||
providers = {
|
||||
aws = "aws.default"
|
||||
@ -80,7 +80,7 @@ Create a cluster following the Google Cloud [tutorial](../cl/google-cloud.md#clu
|
||||
|
||||
```tf
|
||||
module "yavin-worker-pool" {
|
||||
source = "git::https://github.com/poseidon/typhoon//google-cloud/container-linux/kubernetes/workers?ref=v1.10.4"
|
||||
source = "git::https://github.com/poseidon/typhoon//google-cloud/container-linux/kubernetes/workers?ref=v1.10.5"
|
||||
|
||||
providers = {
|
||||
google = "google.default"
|
||||
@ -114,11 +114,11 @@ Verify a managed instance group of workers joins the cluster within a few minute
|
||||
```
|
||||
$ kubectl get nodes
|
||||
NAME STATUS AGE VERSION
|
||||
yavin-controller-0.c.example-com.internal Ready 6m v1.10.4
|
||||
yavin-worker-jrbf.c.example-com.internal Ready 5m v1.10.4
|
||||
yavin-worker-mzdm.c.example-com.internal Ready 5m v1.10.4
|
||||
yavin-16x-worker-jrbf.c.example-com.internal Ready 3m v1.10.4
|
||||
yavin-16x-worker-mzdm.c.example-com.internal Ready 3m v1.10.4
|
||||
yavin-controller-0.c.example-com.internal Ready 6m v1.10.5
|
||||
yavin-worker-jrbf.c.example-com.internal Ready 5m v1.10.5
|
||||
yavin-worker-mzdm.c.example-com.internal Ready 5m v1.10.5
|
||||
yavin-16x-worker-jrbf.c.example-com.internal Ready 3m v1.10.5
|
||||
yavin-16x-worker-mzdm.c.example-com.internal Ready 3m v1.10.5
|
||||
```
|
||||
|
||||
### Variables
|
||||
|
@ -3,9 +3,9 @@
|
||||
!!! danger
|
||||
Typhoon for Fedora Atomic is alpha. Expect rough edges and changes.
|
||||
|
||||
In this tutorial, we'll create a Kubernetes v1.10.4 cluster on AWS with Fedora Atomic.
|
||||
In this tutorial, we'll create a Kubernetes v1.10.5 cluster on AWS with Fedora Atomic.
|
||||
|
||||
We'll declare a Kubernetes cluster using the Typhoon Terraform module. Then apply the changes to create a VPC, gateway, subnets, security groups, controller instances, worker auto-scaling group, network load balancers, and TLS assets. Instances are provisioned on first boot with cloud-init.
|
||||
We'll declare a Kubernetes cluster using the Typhoon Terraform module. Then apply the changes to create a VPC, gateway, subnets, security groups, controller instances, worker auto-scaling group, network load balancer, and TLS assets. Instances are provisioned on first boot with cloud-init.
|
||||
|
||||
Controllers are provisioned to run an `etcd` peer and a `kubelet` service. Workers run just a `kubelet` service. A one-time [bootkube](https://github.com/kubernetes-incubator/bootkube) bootstrap schedules the `apiserver`, `scheduler`, `controller-manager`, and `kube-dns` on controllers and schedules `kube-proxy` and `calico` (or `flannel`) on every node. A generated `kubeconfig` provides `kubectl` access to the cluster.
|
||||
|
||||
@ -83,7 +83,7 @@ Define a Kubernetes cluster using the module `aws/fedora-atomic/kubernetes`.
|
||||
|
||||
```tf
|
||||
module "aws-tempest" {
|
||||
source = "git::https://github.com/poseidon/typhoon//aws/fedora-atomic/kubernetes?ref=v1.10.4"
|
||||
source = "git::https://github.com/poseidon/typhoon//aws/fedora-atomic/kubernetes?ref=v1.10.5"
|
||||
|
||||
providers = {
|
||||
aws = "aws.default"
|
||||
@ -156,9 +156,9 @@ In 5-10 minutes, the Kubernetes cluster will be ready.
|
||||
$ export KUBECONFIG=/home/user/.secrets/clusters/tempest/auth/kubeconfig
|
||||
$ kubectl get nodes
|
||||
NAME STATUS AGE VERSION
|
||||
ip-10-0-12-221 Ready 34m v1.10.4
|
||||
ip-10-0-19-112 Ready 34m v1.10.4
|
||||
ip-10-0-4-22 Ready 34m v1.10.4
|
||||
ip-10-0-12-221 Ready 34m v1.10.5
|
||||
ip-10-0-19-112 Ready 34m v1.10.5
|
||||
ip-10-0-4-22 Ready 34m v1.10.5
|
||||
```
|
||||
|
||||
List the pods.
|
||||
@ -241,4 +241,4 @@ Check the list of valid [instance types](https://aws.amazon.com/ec2/instance-typ
|
||||
Do not choose a `controller_type` smaller than `t2.small`. Smaller instances are not sufficient for running a controller.
|
||||
|
||||
!!! tip "MTU"
|
||||
If your EC2 instance type supports [Jumbo frames](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/network_mtu.html#jumbo_frame_instances) (most do), we recommend you change the `network_mtu` to 8991! You will get better pod-to-pod bandwidth.
|
||||
If your EC2 instance type supports [Jumbo frames](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/network_mtu.html#jumbo_frame_instances) (most do), we recommend you change the `network_mtu` to 8981! You will get better pod-to-pod bandwidth.
|
||||
|
@ -3,7 +3,7 @@
|
||||
!!! danger
|
||||
Typhoon for Fedora Atomic is alpha. Expect rough edges and changes.
|
||||
|
||||
In this tutorial, we'll network boot and provision a Kubernetes v1.10.4 cluster on bare-metal with Fedora Atomic.
|
||||
In this tutorial, we'll network boot and provision a Kubernetes v1.10.5 cluster on bare-metal with Fedora Atomic.
|
||||
|
||||
First, we'll deploy a [Matchbox](https://github.com/coreos/matchbox) service and setup a network boot environment. Then, we'll declare a Kubernetes cluster using the Typhoon Terraform module and power on machines. On PXE boot, machines will install Fedora Atomic via kickstart, reboot into the disk install, and provision themselves as Kubernetes controllers or workers via cloud-init.
|
||||
|
||||
@ -234,7 +234,7 @@ Define a Kubernetes cluster using the module `bare-metal/fedora-atomic/kubernete
|
||||
|
||||
```tf
|
||||
module "bare-metal-mercury" {
|
||||
source = "git::https://github.com/poseidon/typhoon//bare-metal/fedora-atomic/kubernetes?ref=v1.10.4"
|
||||
source = "git::https://github.com/poseidon/typhoon//bare-metal/fedora-atomic/kubernetes?ref=v1.10.5"
|
||||
|
||||
providers = {
|
||||
local = "local.default"
|
||||
@ -360,9 +360,9 @@ bootkube[5]: Tearing down temporary bootstrap control plane...
|
||||
$ export KUBECONFIG=/home/user/.secrets/clusters/mercury/auth/kubeconfig
|
||||
$ kubectl get nodes
|
||||
NAME STATUS AGE VERSION
|
||||
node1.example.com Ready 11m v1.10.4
|
||||
node2.example.com Ready 11m v1.10.4
|
||||
node3.example.com Ready 11m v1.10.4
|
||||
node1.example.com Ready 11m v1.10.5
|
||||
node2.example.com Ready 11m v1.10.5
|
||||
node3.example.com Ready 11m v1.10.5
|
||||
```
|
||||
|
||||
List the pods.
|
||||
|
@ -3,7 +3,7 @@
|
||||
!!! danger
|
||||
Typhoon for Fedora Atomic is alpha. Expect rough edges and changes.
|
||||
|
||||
In this tutorial, we'll create a Kubernetes v1.10.4 cluster on DigitalOcean with Fedora Atomic.
|
||||
In this tutorial, we'll create a Kubernetes v1.10.5 cluster on DigitalOcean with Fedora Atomic.
|
||||
|
||||
We'll declare a Kubernetes cluster using the Typhoon Terraform module. Then apply the changes to create controller droplets, worker droplets, DNS records, tags, and TLS assets. Instances are provisioned on first boot with cloud-init.
|
||||
|
||||
@ -77,7 +77,7 @@ Define a Kubernetes cluster using the module `digital-ocean/fedora-atomic/kubern
|
||||
|
||||
```tf
|
||||
module "digital-ocean-nemo" {
|
||||
source = "git::https://github.com/poseidon/typhoon//digital-ocean/fedora-atomic/kubernetes?ref=v1.10.4"
|
||||
source = "git::https://github.com/poseidon/typhoon//digital-ocean/fedora-atomic/kubernetes?ref=v1.10.5"
|
||||
|
||||
providers = {
|
||||
digitalocean = "digitalocean.default"
|
||||
@ -152,9 +152,9 @@ In 3-6 minutes, the Kubernetes cluster will be ready.
|
||||
$ export KUBECONFIG=/home/user/.secrets/clusters/nemo/auth/kubeconfig
|
||||
$ kubectl get nodes
|
||||
NAME STATUS AGE VERSION
|
||||
10.132.110.130 Ready 10m v1.10.4
|
||||
10.132.115.81 Ready 10m v1.10.4
|
||||
10.132.124.107 Ready 10m v1.10.4
|
||||
10.132.110.130 Ready 10m v1.10.5
|
||||
10.132.115.81 Ready 10m v1.10.5
|
||||
10.132.124.107 Ready 10m v1.10.5
|
||||
```
|
||||
|
||||
List the pods.
|
||||
|
@ -3,7 +3,7 @@
|
||||
!!! danger
|
||||
Typhoon for Fedora Atomic is very alpha. Fedora does not publish official images for Google Cloud so you must prepare them yourself. Some addons don't work yet. Expect rough edges and changes.
|
||||
|
||||
In this tutorial, we'll create a Kubernetes v1.10.4 cluster on Google Compute Engine with Fedora Atomic.
|
||||
In this tutorial, we'll create a Kubernetes v1.10.5 cluster on Google Compute Engine with Fedora Atomic.
|
||||
|
||||
We'll declare a Kubernetes cluster using the Typhoon Terraform module. Then apply the changes to create a network, firewall rules, health checks, controller instances, worker managed instance group, load balancers, and TLS assets. Instances are provisioned on first boot with cloud-init.
|
||||
|
||||
@ -119,7 +119,7 @@ Define a Kubernetes cluster using the module `google-cloud/fedora-atomic/kuberne
|
||||
|
||||
```tf
|
||||
module "google-cloud-yavin" {
|
||||
source = "git::https://github.com/poseidon/typhoon//google-cloud/fedora-atomic/kubernetes?ref=v1.10.4"
|
||||
source = "git::https://github.com/poseidon/typhoon//google-cloud/fedora-atomic/kubernetes?ref=v1.10.5"
|
||||
|
||||
providers = {
|
||||
google = "google.default"
|
||||
@ -195,9 +195,9 @@ In 5-10 minutes, the Kubernetes cluster will be ready.
|
||||
$ export KUBECONFIG=/home/user/.secrets/clusters/yavin/auth/kubeconfig
|
||||
$ kubectl get nodes
|
||||
NAME STATUS AGE VERSION
|
||||
yavin-controller-0.c.example-com.internal Ready 6m v1.10.4
|
||||
yavin-worker-jrbf.c.example-com.internal Ready 5m v1.10.4
|
||||
yavin-worker-mzdm.c.example-com.internal Ready 5m v1.10.4
|
||||
yavin-controller-0.c.example-com.internal Ready 6m v1.10.5
|
||||
yavin-worker-jrbf.c.example-com.internal Ready 5m v1.10.5
|
||||
yavin-worker-mzdm.c.example-com.internal Ready 5m v1.10.5
|
||||
```
|
||||
|
||||
List the pods.
|
||||
|
@ -1,8 +1,8 @@
|
||||
# AWS
|
||||
|
||||
In this tutorial, we'll create a Kubernetes v1.10.4 cluster on AWS with Container Linux.
|
||||
In this tutorial, we'll create a Kubernetes v1.10.5 cluster on AWS with Container Linux.
|
||||
|
||||
We'll declare a Kubernetes cluster using the Typhoon Terraform module. Then apply the changes to create a VPC, gateway, subnets, security groups, controller instances, worker auto-scaling group, network load balancers, and TLS assets.
|
||||
We'll declare a Kubernetes cluster using the Typhoon Terraform module. Then apply the changes to create a VPC, gateway, subnets, security groups, controller instances, worker auto-scaling group, network load balancer, and TLS assets.
|
||||
|
||||
Controllers are provisioned to run an `etcd-member` peer and a `kubelet` service. Workers run just a `kubelet` service. A one-time [bootkube](https://github.com/kubernetes-incubator/bootkube) bootstrap schedules the `apiserver`, `scheduler`, `controller-manager`, and `kube-dns` on controllers and schedules `kube-proxy` and `calico` (or `flannel`) on every node. A generated `kubeconfig` provides `kubectl` access to the cluster.
|
||||
|
||||
@ -96,7 +96,7 @@ Define a Kubernetes cluster using the module `aws/container-linux/kubernetes`.
|
||||
|
||||
```tf
|
||||
module "aws-tempest" {
|
||||
source = "git::https://github.com/poseidon/typhoon//aws/container-linux/kubernetes?ref=v1.10.4"
|
||||
source = "git::https://github.com/poseidon/typhoon//aws/container-linux/kubernetes?ref=v1.10.5"
|
||||
|
||||
providers = {
|
||||
aws = "aws.default"
|
||||
@ -169,9 +169,9 @@ In 4-8 minutes, the Kubernetes cluster will be ready.
|
||||
$ export KUBECONFIG=/home/user/.secrets/clusters/tempest/auth/kubeconfig
|
||||
$ kubectl get nodes
|
||||
NAME STATUS AGE VERSION
|
||||
ip-10-0-12-221 Ready 34m v1.10.4
|
||||
ip-10-0-19-112 Ready 34m v1.10.4
|
||||
ip-10-0-4-22 Ready 34m v1.10.4
|
||||
ip-10-0-12-221 Ready 34m v1.10.5
|
||||
ip-10-0-19-112 Ready 34m v1.10.5
|
||||
ip-10-0-4-22 Ready 34m v1.10.5
|
||||
```
|
||||
|
||||
List the pods.
|
||||
@ -260,4 +260,4 @@ Check the list of valid [instance types](https://aws.amazon.com/ec2/instance-typ
|
||||
Do not choose a `controller_type` smaller than `t2.small`. Smaller instances are not sufficient for running a controller.
|
||||
|
||||
!!! tip "MTU"
|
||||
If your EC2 instance type supports [Jumbo frames](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/network_mtu.html#jumbo_frame_instances) (most do), we recommend you change the `network_mtu` to 8991! You will get better pod-to-pod bandwidth.
|
||||
If your EC2 instance type supports [Jumbo frames](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/network_mtu.html#jumbo_frame_instances) (most do), we recommend you change the `network_mtu` to 8981! You will get better pod-to-pod bandwidth.
|
||||
|
@ -1,6 +1,6 @@
|
||||
# Bare-Metal
|
||||
|
||||
In this tutorial, we'll network boot and provision a Kubernetes v1.10.4 cluster on bare-metal with Container Linux.
|
||||
In this tutorial, we'll network boot and provision a Kubernetes v1.10.5 cluster on bare-metal with Container Linux.
|
||||
|
||||
First, we'll deploy a [Matchbox](https://github.com/coreos/matchbox) service and setup a network boot environment. Then, we'll declare a Kubernetes cluster using the Typhoon Terraform module and power on machines. On PXE boot, machines will install Container Linux to disk, reboot into the disk install, and provision themselves as Kubernetes controllers or workers via Ignition.
|
||||
|
||||
@ -174,7 +174,7 @@ Define a Kubernetes cluster using the module `bare-metal/container-linux/kuberne
|
||||
|
||||
```tf
|
||||
module "bare-metal-mercury" {
|
||||
source = "git::https://github.com/poseidon/typhoon//bare-metal/container-linux/kubernetes?ref=v1.10.4"
|
||||
source = "git::https://github.com/poseidon/typhoon//bare-metal/container-linux/kubernetes?ref=v1.10.5"
|
||||
|
||||
providers = {
|
||||
local = "local.default"
|
||||
@ -283,9 +283,9 @@ Apply complete! Resources: 55 added, 0 changed, 0 destroyed.
|
||||
To watch the install to disk (until machines reboot from disk), SSH to port 2222.
|
||||
|
||||
```
|
||||
# before v1.10.4
|
||||
# before v1.10.5
|
||||
$ ssh debug@node1.example.com
|
||||
# after v1.10.4
|
||||
# after v1.10.5
|
||||
$ ssh -p 2222 core@node1.example.com
|
||||
```
|
||||
|
||||
@ -310,9 +310,9 @@ bootkube[5]: Tearing down temporary bootstrap control plane...
|
||||
$ export KUBECONFIG=/home/user/.secrets/clusters/mercury/auth/kubeconfig
|
||||
$ kubectl get nodes
|
||||
NAME STATUS AGE VERSION
|
||||
node1.example.com Ready 11m v1.10.4
|
||||
node2.example.com Ready 11m v1.10.4
|
||||
node3.example.com Ready 11m v1.10.4
|
||||
node1.example.com Ready 11m v1.10.5
|
||||
node2.example.com Ready 11m v1.10.5
|
||||
node3.example.com Ready 11m v1.10.5
|
||||
```
|
||||
|
||||
List the pods.
|
||||
|
@ -1,6 +1,6 @@
|
||||
# Digital Ocean
|
||||
|
||||
In this tutorial, we'll create a Kubernetes v1.10.4 cluster on DigitalOcean with Container Linux.
|
||||
In this tutorial, we'll create a Kubernetes v1.10.5 cluster on DigitalOcean with Container Linux.
|
||||
|
||||
We'll declare a Kubernetes cluster using the Typhoon Terraform module. Then apply the changes to create controller droplets, worker droplets, DNS records, tags, and TLS assets.
|
||||
|
||||
@ -90,7 +90,7 @@ Define a Kubernetes cluster using the module `digital-ocean/container-linux/kube
|
||||
|
||||
```tf
|
||||
module "digital-ocean-nemo" {
|
||||
source = "git::https://github.com/poseidon/typhoon//digital-ocean/container-linux/kubernetes?ref=v1.10.4"
|
||||
source = "git::https://github.com/poseidon/typhoon//digital-ocean/container-linux/kubernetes?ref=v1.10.5"
|
||||
|
||||
providers = {
|
||||
digitalocean = "digitalocean.default"
|
||||
@ -164,9 +164,9 @@ In 3-6 minutes, the Kubernetes cluster will be ready.
|
||||
$ export KUBECONFIG=/home/user/.secrets/clusters/nemo/auth/kubeconfig
|
||||
$ kubectl get nodes
|
||||
NAME STATUS AGE VERSION
|
||||
10.132.110.130 Ready 10m v1.10.4
|
||||
10.132.115.81 Ready 10m v1.10.4
|
||||
10.132.124.107 Ready 10m v1.10.4
|
||||
10.132.110.130 Ready 10m v1.10.5
|
||||
10.132.115.81 Ready 10m v1.10.5
|
||||
10.132.124.107 Ready 10m v1.10.5
|
||||
```
|
||||
|
||||
List the pods.
|
||||
|
@ -1,6 +1,6 @@
|
||||
# Google Cloud
|
||||
|
||||
In this tutorial, we'll create a Kubernetes v1.10.4 cluster on Google Compute Engine with Container Linux.
|
||||
In this tutorial, we'll create a Kubernetes v1.10.5 cluster on Google Compute Engine with Container Linux.
|
||||
|
||||
We'll declare a Kubernetes cluster using the Typhoon Terraform module. Then apply the changes to create a network, firewall rules, health checks, controller instances, worker managed instance group, load balancers, and TLS assets.
|
||||
|
||||
@ -97,7 +97,7 @@ Define a Kubernetes cluster using the module `google-cloud/container-linux/kuber
|
||||
|
||||
```tf
|
||||
module "google-cloud-yavin" {
|
||||
source = "git::https://github.com/poseidon/typhoon//google-cloud/container-linux/kubernetes?ref=v1.10.4"
|
||||
source = "git::https://github.com/poseidon/typhoon//google-cloud/container-linux/kubernetes?ref=v1.10.5"
|
||||
|
||||
providers = {
|
||||
google = "google.default"
|
||||
@ -172,9 +172,9 @@ In 4-8 minutes, the Kubernetes cluster will be ready.
|
||||
$ export KUBECONFIG=/home/user/.secrets/clusters/yavin/auth/kubeconfig
|
||||
$ kubectl get nodes
|
||||
NAME STATUS AGE VERSION
|
||||
yavin-controller-0.c.example-com.internal Ready 6m v1.10.4
|
||||
yavin-worker-jrbf.c.example-com.internal Ready 5m v1.10.4
|
||||
yavin-worker-mzdm.c.example-com.internal Ready 5m v1.10.4
|
||||
yavin-controller-0.c.example-com.internal Ready 6m v1.10.5
|
||||
yavin-worker-jrbf.c.example-com.internal Ready 5m v1.10.5
|
||||
yavin-worker-mzdm.c.example-com.internal Ready 5m v1.10.5
|
||||
```
|
||||
|
||||
List the pods.
|
||||
|
@ -11,7 +11,7 @@ Typhoon distributes upstream Kubernetes, architectural conventions, and cluster
|
||||
|
||||
## Features <a href="https://www.cncf.io/certification/software-conformance/"><img align="right" src="https://storage.googleapis.com/poseidon/certified-kubernetes.png"></a>
|
||||
|
||||
* Kubernetes v1.10.4 (upstream, via [kubernetes-incubator/bootkube](https://github.com/kubernetes-incubator/bootkube))
|
||||
* Kubernetes v1.10.5 (upstream, via [kubernetes-incubator/bootkube](https://github.com/kubernetes-incubator/bootkube))
|
||||
* Single or multi-master, workloads isolated on workers, [Calico](https://www.projectcalico.org/) or [flannel](https://github.com/coreos/flannel) networking
|
||||
* On-cluster etcd with TLS, [RBAC](https://kubernetes.io/docs/admin/authorization/rbac/)-enabled, [network policy](https://kubernetes.io/docs/concepts/services-networking/network-policies/)
|
||||
* Advanced features like [worker pools](https://typhoon.psdn.io/advanced/worker-pools/) and [preemption](https://typhoon.psdn.io/google-cloud/#preemption) (varies by platform)
|
||||
@ -45,7 +45,7 @@ Define a Kubernetes cluster by using the Terraform module for your chosen platfo
|
||||
|
||||
```tf
|
||||
module "google-cloud-yavin" {
|
||||
source = "git::https://github.com/poseidon/typhoon//google-cloud/container-linux/kubernetes?ref=v1.10.4"
|
||||
source = "git::https://github.com/poseidon/typhoon//google-cloud/container-linux/kubernetes?ref=v1.10.5"
|
||||
|
||||
providers = {
|
||||
google = "google.default"
|
||||
@ -86,9 +86,9 @@ In 4-8 minutes (varies by platform), the cluster will be ready. This Google Clou
|
||||
$ export KUBECONFIG=/home/user/.secrets/clusters/yavin/auth/kubeconfig
|
||||
$ kubectl get nodes
|
||||
NAME STATUS AGE VERSION
|
||||
yavin-controller-0.c.example-com.internal Ready 6m v1.10.4
|
||||
yavin-worker-jrbf.c.example-com.internal Ready 5m v1.10.4
|
||||
yavin-worker-mzdm.c.example-com.internal Ready 5m v1.10.4
|
||||
yavin-controller-0.c.example-com.internal Ready 6m v1.10.5
|
||||
yavin-worker-jrbf.c.example-com.internal Ready 5m v1.10.5
|
||||
yavin-worker-mzdm.c.example-com.internal Ready 5m v1.10.5
|
||||
```
|
||||
|
||||
List the pods.
|
||||
|
@ -18,7 +18,7 @@ module "google-cloud-yavin" {
|
||||
}
|
||||
|
||||
module "bare-metal-mercury" {
|
||||
source = "git::https://github.com/poseidon/typhoon//bare-metal/container-linux/kubernetes?ref=v1.10.4"
|
||||
source = "git::https://github.com/poseidon/typhoon//bare-metal/container-linux/kubernetes?ref=v1.10.5"
|
||||
...
|
||||
}
|
||||
```
|
||||
|
@ -25,7 +25,7 @@ Network performance varies based on the platform and CNI plugin. `iperf` was use
|
||||
|----------------------------|-------:|-------------:|-------------:|
|
||||
| AWS (flannel) | ? | 976 MB/s | 900-999 MB/s |
|
||||
| AWS (calico, MTU 1480) | ? | 976 MB/s | 100-350 MB/s |
|
||||
| AWS (calico, MTU 8991) | ? | 976 MB/s | 900-999 MB/s |
|
||||
| AWS (calico, MTU 8981) | ? | 976 MB/s | 900-999 MB/s |
|
||||
| Bare-Metal (flannel) | 1 GB/s | 934 MB/s | 903 MB/s |
|
||||
| Bare-Metal (calico) | 1 GB/s | 941 MB/s | 931 MB/s |
|
||||
| Bare-Metal (flannel, bond) | 3 GB/s | 2.3 GB/s | 1.17 GB/s |
|
||||
|
@ -11,7 +11,7 @@ Typhoon distributes upstream Kubernetes, architectural conventions, and cluster
|
||||
|
||||
## Features <a href="https://www.cncf.io/certification/software-conformance/"><img align="right" src="https://storage.googleapis.com/poseidon/certified-kubernetes.png"></a>
|
||||
|
||||
* Kubernetes v1.10.4 (upstream, via [kubernetes-incubator/bootkube](https://github.com/kubernetes-incubator/bootkube))
|
||||
* Kubernetes v1.10.5 (upstream, via [kubernetes-incubator/bootkube](https://github.com/kubernetes-incubator/bootkube))
|
||||
* Single or multi-master, workloads isolated on workers, [Calico](https://www.projectcalico.org/) or [flannel](https://github.com/coreos/flannel) networking
|
||||
* On-cluster etcd with TLS, [RBAC](https://kubernetes.io/docs/admin/authorization/rbac/)-enabled, [network policy](https://kubernetes.io/docs/concepts/services-networking/network-policies/)
|
||||
* Ready for Ingress, Prometheus, Grafana, and other optional [addons](https://typhoon.psdn.io/addons/overview/)
|
||||
|
@ -1,6 +1,6 @@
|
||||
# Self-hosted Kubernetes assets (kubeconfig, manifests)
|
||||
module "bootkube" {
|
||||
source = "git::https://github.com/poseidon/terraform-render-bootkube.git?ref=0e98e89e14a074768db13c4e050ed0c13319a0c1"
|
||||
source = "git::https://github.com/poseidon/terraform-render-bootkube.git?ref=1d4db824f09246266a6b9e54df5d4df5dcd4477a"
|
||||
|
||||
cluster_name = "${var.cluster_name}"
|
||||
api_servers = ["${format("%s.%s", var.cluster_name, var.dns_zone)}"]
|
||||
@ -11,4 +11,7 @@ module "bootkube" {
|
||||
pod_cidr = "${var.pod_cidr}"
|
||||
service_cidr = "${var.service_cidr}"
|
||||
cluster_domain_suffix = "${var.cluster_domain_suffix}"
|
||||
|
||||
// temporary
|
||||
apiserver_port = 443
|
||||
}
|
||||
|
@ -7,7 +7,7 @@ systemd:
|
||||
- name: 40-etcd-cluster.conf
|
||||
contents: |
|
||||
[Service]
|
||||
Environment="ETCD_IMAGE_TAG=v3.3.6"
|
||||
Environment="ETCD_IMAGE_TAG=v3.3.8"
|
||||
Environment="ETCD_NAME=${etcd_name}"
|
||||
Environment="ETCD_ADVERTISE_CLIENT_URLS=https://${etcd_domain}:2379"
|
||||
Environment="ETCD_INITIAL_ADVERTISE_PEER_URLS=https://${etcd_domain}:2380"
|
||||
@ -124,7 +124,7 @@ storage:
|
||||
contents:
|
||||
inline: |
|
||||
KUBELET_IMAGE_URL=docker://k8s.gcr.io/hyperkube
|
||||
KUBELET_IMAGE_TAG=v1.10.4
|
||||
KUBELET_IMAGE_TAG=v1.10.5
|
||||
- path: /etc/sysctl.d/max-user-watches.conf
|
||||
filesystem: root
|
||||
contents:
|
||||
|
96
google-cloud/container-linux/kubernetes/ingress.tf
Normal file
96
google-cloud/container-linux/kubernetes/ingress.tf
Normal file
@ -0,0 +1,96 @@
|
||||
# Static IPv4 address for the TCP Proxy Load Balancer
|
||||
resource "google_compute_global_address" "ingress-ipv4" {
|
||||
name = "${var.cluster_name}-ingress-ip"
|
||||
ip_version = "IPV4"
|
||||
}
|
||||
|
||||
# Forward IPv4 TCP traffic to the HTTP proxy load balancer
|
||||
# Google Cloud does not allow TCP proxies for port 80. Must use HTTP proxy.
|
||||
resource "google_compute_global_forwarding_rule" "ingress-http" {
|
||||
name = "${var.cluster_name}-ingress-http"
|
||||
ip_address = "${google_compute_global_address.ingress-ipv4.address}"
|
||||
ip_protocol = "TCP"
|
||||
port_range = "80"
|
||||
target = "${google_compute_target_http_proxy.ingress-http.self_link}"
|
||||
}
|
||||
|
||||
# Forward IPv4 TCP traffic to the TCP proxy load balancer
|
||||
resource "google_compute_global_forwarding_rule" "ingress-https" {
|
||||
name = "${var.cluster_name}-ingress-https"
|
||||
ip_address = "${google_compute_global_address.ingress-ipv4.address}"
|
||||
ip_protocol = "TCP"
|
||||
port_range = "443"
|
||||
target = "${google_compute_target_tcp_proxy.ingress-https.self_link}"
|
||||
}
|
||||
|
||||
# HTTP proxy load balancer for ingress controllers
|
||||
resource "google_compute_target_http_proxy" "ingress-http" {
|
||||
name = "${var.cluster_name}-ingress-http"
|
||||
description = "Distribute HTTP load across ${var.cluster_name} workers"
|
||||
url_map = "${google_compute_url_map.ingress-http.self_link}"
|
||||
}
|
||||
|
||||
# TCP proxy load balancer for ingress controllers
|
||||
resource "google_compute_target_tcp_proxy" "ingress-https" {
|
||||
name = "${var.cluster_name}-ingress-https"
|
||||
description = "Distribute HTTPS load across ${var.cluster_name} workers"
|
||||
backend_service = "${google_compute_backend_service.ingress-https.self_link}"
|
||||
}
|
||||
|
||||
# HTTP URL Map (required)
|
||||
resource "google_compute_url_map" "ingress-http" {
|
||||
name = "${var.cluster_name}-ingress-http"
|
||||
# Do not add host/path rules for applications here. Use Ingress resources.
|
||||
default_service = "${google_compute_backend_service.ingress-http.self_link}"
|
||||
}
|
||||
|
||||
# Backend service backed by managed instance group of workers
|
||||
resource "google_compute_backend_service" "ingress-http" {
|
||||
name = "${var.cluster_name}-ingress-http"
|
||||
description = "${var.cluster_name} ingress service"
|
||||
|
||||
protocol = "HTTP"
|
||||
port_name = "http"
|
||||
session_affinity = "NONE"
|
||||
timeout_sec = "60"
|
||||
|
||||
backend {
|
||||
group = "${module.workers.instance_group}"
|
||||
}
|
||||
|
||||
health_checks = ["${google_compute_health_check.ingress.self_link}"]
|
||||
}
|
||||
|
||||
# Backend service backed by managed instance group of workers
|
||||
resource "google_compute_backend_service" "ingress-https" {
|
||||
name = "${var.cluster_name}-ingress-https"
|
||||
description = "${var.cluster_name} ingress service"
|
||||
|
||||
protocol = "TCP"
|
||||
port_name = "https"
|
||||
session_affinity = "NONE"
|
||||
timeout_sec = "60"
|
||||
|
||||
backend {
|
||||
group = "${module.workers.instance_group}"
|
||||
}
|
||||
|
||||
health_checks = ["${google_compute_health_check.ingress.self_link}"]
|
||||
}
|
||||
|
||||
# Ingress HTTP Health Check
|
||||
resource "google_compute_health_check" "ingress" {
|
||||
name = "${var.cluster_name}-ingress-health"
|
||||
description = "Health check for Ingress controller"
|
||||
|
||||
timeout_sec = 5
|
||||
check_interval_sec = 5
|
||||
|
||||
healthy_threshold = 2
|
||||
unhealthy_threshold = 4
|
||||
|
||||
http_health_check {
|
||||
port = 10254
|
||||
request_path = "/healthz"
|
||||
}
|
||||
}
|
@ -161,3 +161,17 @@ resource "google_compute_firewall" "internal-kubelet-readonly" {
|
||||
source_tags = ["${var.cluster_name}-controller", "${var.cluster_name}-worker"]
|
||||
target_tags = ["${var.cluster_name}-controller", "${var.cluster_name}-worker"]
|
||||
}
|
||||
|
||||
resource "google_compute_firewall" "google-health-checks" {
|
||||
name = "${var.cluster_name}-google-health-checks"
|
||||
network = "${google_compute_network.network.name}"
|
||||
|
||||
allow {
|
||||
protocol = "tcp"
|
||||
ports = [10254]
|
||||
}
|
||||
|
||||
# https://cloud.google.com/compute/docs/load-balancing/tcp-ssl/tcp-proxy#health-checking
|
||||
source_ranges = ["130.211.0.0/22", "35.191.0.0/16"]
|
||||
target_tags = ["${var.cluster_name}-worker"]
|
||||
}
|
||||
|
@ -3,12 +3,17 @@ output "controllers_ipv4_public" {
|
||||
value = ["${google_compute_instance.controllers.*.network_interface.0.access_config.0.assigned_nat_ip}"]
|
||||
}
|
||||
|
||||
output "ingress_static_ip" {
|
||||
value = "${module.workers.ingress_static_ip}"
|
||||
# Outputs for Kubernetes Ingress
|
||||
|
||||
output "ingress_static_ipv4" {
|
||||
description = "Global IPv4 address for proxy load balancing to the nearest Ingress controller"
|
||||
value = "${google_compute_global_address.ingress-ipv4.address}"
|
||||
}
|
||||
|
||||
output "network_self_link" {
|
||||
value = "${google_compute_network.network.self_link}"
|
||||
# Deprecated, use ingress_static_ipv4
|
||||
output "ingress_static_ip" {
|
||||
description = "Global IPv4 address for proxy load balancing to the nearest Ingress controller"
|
||||
value = "${google_compute_global_address.ingress-ipv4.address}"
|
||||
}
|
||||
|
||||
# Outputs for worker pools
|
||||
@ -20,3 +25,16 @@ output "network_name" {
|
||||
output "kubeconfig" {
|
||||
value = "${module.bootkube.kubeconfig}"
|
||||
}
|
||||
|
||||
# Outputs for custom firewalling
|
||||
|
||||
output "network_self_link" {
|
||||
value = "${google_compute_network.network.self_link}"
|
||||
}
|
||||
|
||||
# Outputs for custom load balancing
|
||||
|
||||
output "worker_instance_group" {
|
||||
description = "Full URL of the worker managed instance group"
|
||||
value = "${module.workers.instance_group}"
|
||||
}
|
||||
|
@ -94,7 +94,7 @@ storage:
|
||||
contents:
|
||||
inline: |
|
||||
KUBELET_IMAGE_URL=docker://k8s.gcr.io/hyperkube
|
||||
KUBELET_IMAGE_TAG=v1.10.4
|
||||
KUBELET_IMAGE_TAG=v1.10.5
|
||||
- path: /etc/sysctl.d/max-user-watches.conf
|
||||
filesystem: root
|
||||
contents:
|
||||
@ -112,7 +112,7 @@ storage:
|
||||
--volume config,kind=host,source=/etc/kubernetes \
|
||||
--mount volume=config,target=/etc/kubernetes \
|
||||
--insecure-options=image \
|
||||
docker://k8s.gcr.io/hyperkube:v1.10.4 \
|
||||
docker://k8s.gcr.io/hyperkube:v1.10.5 \
|
||||
--net=host \
|
||||
--dns=host \
|
||||
--exec=/kubectl -- --kubeconfig=/etc/kubernetes/kubeconfig delete node $(hostname)
|
||||
|
@ -1,45 +0,0 @@
|
||||
# Static IPv4 address for the Network Load Balancer
|
||||
resource "google_compute_address" "ingress-ip" {
|
||||
name = "${var.name}-ingress-ip"
|
||||
}
|
||||
|
||||
# Network Load Balancer (i.e. forwarding rules)
|
||||
resource "google_compute_forwarding_rule" "worker-http-lb" {
|
||||
name = "${var.name}-worker-http-rule"
|
||||
ip_address = "${google_compute_address.ingress-ip.address}"
|
||||
port_range = "80"
|
||||
target = "${google_compute_target_pool.workers.self_link}"
|
||||
}
|
||||
|
||||
resource "google_compute_forwarding_rule" "worker-https-lb" {
|
||||
name = "${var.name}-worker-https-rule"
|
||||
ip_address = "${google_compute_address.ingress-ip.address}"
|
||||
port_range = "443"
|
||||
target = "${google_compute_target_pool.workers.self_link}"
|
||||
}
|
||||
|
||||
# Network Load Balancer target pool of instances.
|
||||
resource "google_compute_target_pool" "workers" {
|
||||
name = "${var.name}-worker-pool"
|
||||
|
||||
health_checks = [
|
||||
"${google_compute_http_health_check.ingress.name}",
|
||||
]
|
||||
|
||||
session_affinity = "NONE"
|
||||
}
|
||||
|
||||
# Ingress HTTP Health Check
|
||||
resource "google_compute_http_health_check" "ingress" {
|
||||
name = "${var.name}-ingress-health"
|
||||
description = "Health check Ingress controller health host port"
|
||||
|
||||
timeout_sec = 5
|
||||
check_interval_sec = 5
|
||||
|
||||
healthy_threshold = 2
|
||||
unhealthy_threshold = 4
|
||||
|
||||
port = 10254
|
||||
request_path = "/healthz"
|
||||
}
|
@ -1,3 +1,4 @@
|
||||
output "ingress_static_ip" {
|
||||
value = "${google_compute_address.ingress-ip.address}"
|
||||
output "instance_group" {
|
||||
description = "Full URL of the worker managed instance group"
|
||||
value = "${google_compute_region_instance_group_manager.workers.instance_group}"
|
||||
}
|
||||
|
@ -1,5 +1,4 @@
|
||||
# Regional managed instance group maintains a homogeneous set of workers that
|
||||
# span the zones in the region.
|
||||
# Regional managed instance group of workers
|
||||
resource "google_compute_region_instance_group_manager" "workers" {
|
||||
name = "${var.name}-worker-group"
|
||||
description = "Compute instance group of ${var.name} workers"
|
||||
@ -11,30 +10,18 @@ resource "google_compute_region_instance_group_manager" "workers" {
|
||||
|
||||
target_size = "${var.count}"
|
||||
|
||||
# target pool to which instances in the group should be added
|
||||
target_pools = [
|
||||
"${google_compute_target_pool.workers.self_link}",
|
||||
]
|
||||
}
|
||||
named_port {
|
||||
name = "http"
|
||||
port = "80"
|
||||
}
|
||||
|
||||
# Worker Container Linux Config
|
||||
data "template_file" "worker_config" {
|
||||
template = "${file("${path.module}/cl/worker.yaml.tmpl")}"
|
||||
|
||||
vars = {
|
||||
kubeconfig = "${indent(10, var.kubeconfig)}"
|
||||
ssh_authorized_key = "${var.ssh_authorized_key}"
|
||||
k8s_dns_service_ip = "${cidrhost(var.service_cidr, 10)}"
|
||||
cluster_domain_suffix = "${var.cluster_domain_suffix}"
|
||||
named_port {
|
||||
name = "https"
|
||||
port = "443"
|
||||
}
|
||||
}
|
||||
|
||||
data "ct_config" "worker_ign" {
|
||||
content = "${data.template_file.worker_config.rendered}"
|
||||
pretty_print = false
|
||||
snippets = ["${var.clc_snippets}"]
|
||||
}
|
||||
|
||||
# Worker instance template
|
||||
resource "google_compute_instance_template" "worker" {
|
||||
name_prefix = "${var.name}-worker-"
|
||||
description = "Worker Instance template"
|
||||
@ -76,3 +63,21 @@ resource "google_compute_instance_template" "worker" {
|
||||
create_before_destroy = true
|
||||
}
|
||||
}
|
||||
|
||||
# Worker Container Linux Config
|
||||
data "template_file" "worker_config" {
|
||||
template = "${file("${path.module}/cl/worker.yaml.tmpl")}"
|
||||
|
||||
vars = {
|
||||
kubeconfig = "${indent(10, var.kubeconfig)}"
|
||||
ssh_authorized_key = "${var.ssh_authorized_key}"
|
||||
k8s_dns_service_ip = "${cidrhost(var.service_cidr, 10)}"
|
||||
cluster_domain_suffix = "${var.cluster_domain_suffix}"
|
||||
}
|
||||
}
|
||||
|
||||
data "ct_config" "worker_ign" {
|
||||
content = "${data.template_file.worker_config.rendered}"
|
||||
pretty_print = false
|
||||
snippets = ["${var.clc_snippets}"]
|
||||
}
|
||||
|
@ -1,6 +1,6 @@
|
||||
# Self-hosted Kubernetes assets (kubeconfig, manifests)
|
||||
module "bootkube" {
|
||||
source = "git::https://github.com/poseidon/terraform-render-bootkube.git?ref=0e98e89e14a074768db13c4e050ed0c13319a0c1"
|
||||
source = "git::https://github.com/poseidon/terraform-render-bootkube.git?ref=1d4db824f09246266a6b9e54df5d4df5dcd4477a"
|
||||
|
||||
cluster_name = "${var.cluster_name}"
|
||||
api_servers = ["${format("%s.%s", var.cluster_name, var.dns_zone)}"]
|
||||
@ -14,4 +14,7 @@ module "bootkube" {
|
||||
|
||||
# Fedora
|
||||
trusted_certs_dir = "/etc/pki/tls/certs"
|
||||
|
||||
// temporary
|
||||
apiserver_port = 443
|
||||
}
|
||||
|
@ -94,8 +94,8 @@ bootcmd:
|
||||
runcmd:
|
||||
- [systemctl, daemon-reload]
|
||||
- [systemctl, restart, NetworkManager]
|
||||
- "atomic install --system --name=etcd quay.io/poseidon/etcd:v3.3.6"
|
||||
- "atomic install --system --name=kubelet quay.io/poseidon/kubelet:v1.10.4"
|
||||
- "atomic install --system --name=etcd quay.io/poseidon/etcd:v3.3.8"
|
||||
- "atomic install --system --name=kubelet quay.io/poseidon/kubelet:v1.10.5"
|
||||
- "atomic install --system --name=bootkube quay.io/poseidon/bootkube:v0.12.0"
|
||||
- [systemctl, start, --no-block, etcd.service]
|
||||
- [systemctl, enable, cloud-metadata.service]
|
||||
|
96
google-cloud/fedora-atomic/kubernetes/ingress.tf
Normal file
96
google-cloud/fedora-atomic/kubernetes/ingress.tf
Normal file
@ -0,0 +1,96 @@
|
||||
# Static IPv4 address for the TCP Proxy Load Balancer
|
||||
resource "google_compute_global_address" "ingress-ipv4" {
|
||||
name = "${var.cluster_name}-ingress-ip"
|
||||
ip_version = "IPV4"
|
||||
}
|
||||
|
||||
# Forward IPv4 TCP traffic to the HTTP proxy load balancer
|
||||
# Google Cloud does not allow TCP proxies for port 80. Must use HTTP proxy.
|
||||
resource "google_compute_global_forwarding_rule" "ingress-http" {
|
||||
name = "${var.cluster_name}-ingress-http"
|
||||
ip_address = "${google_compute_global_address.ingress-ipv4.address}"
|
||||
ip_protocol = "TCP"
|
||||
port_range = "80"
|
||||
target = "${google_compute_target_http_proxy.ingress-http.self_link}"
|
||||
}
|
||||
|
||||
# Forward IPv4 TCP traffic to the TCP proxy load balancer
|
||||
resource "google_compute_global_forwarding_rule" "ingress-https" {
|
||||
name = "${var.cluster_name}-ingress-https"
|
||||
ip_address = "${google_compute_global_address.ingress-ipv4.address}"
|
||||
ip_protocol = "TCP"
|
||||
port_range = "443"
|
||||
target = "${google_compute_target_tcp_proxy.ingress-https.self_link}"
|
||||
}
|
||||
|
||||
# HTTP proxy load balancer for ingress controllers
|
||||
resource "google_compute_target_http_proxy" "ingress-http" {
|
||||
name = "${var.cluster_name}-ingress-http"
|
||||
description = "Distribute HTTP load across ${var.cluster_name} workers"
|
||||
url_map = "${google_compute_url_map.ingress-http.self_link}"
|
||||
}
|
||||
|
||||
# TCP proxy load balancer for ingress controllers
|
||||
resource "google_compute_target_tcp_proxy" "ingress-https" {
|
||||
name = "${var.cluster_name}-ingress-https"
|
||||
description = "Distribute HTTPS load across ${var.cluster_name} workers"
|
||||
backend_service = "${google_compute_backend_service.ingress-https.self_link}"
|
||||
}
|
||||
|
||||
# HTTP URL Map (required)
|
||||
resource "google_compute_url_map" "ingress-http" {
|
||||
name = "${var.cluster_name}-ingress-http"
|
||||
# Do not add host/path rules for applications here. Use Ingress resources.
|
||||
default_service = "${google_compute_backend_service.ingress-http.self_link}"
|
||||
}
|
||||
|
||||
# Backend service backed by managed instance group of workers
|
||||
resource "google_compute_backend_service" "ingress-http" {
|
||||
name = "${var.cluster_name}-ingress-http"
|
||||
description = "${var.cluster_name} ingress service"
|
||||
|
||||
protocol = "HTTP"
|
||||
port_name = "http"
|
||||
session_affinity = "NONE"
|
||||
timeout_sec = "60"
|
||||
|
||||
backend {
|
||||
group = "${module.workers.instance_group}"
|
||||
}
|
||||
|
||||
health_checks = ["${google_compute_health_check.ingress.self_link}"]
|
||||
}
|
||||
|
||||
# Backend service backed by managed instance group of workers
|
||||
resource "google_compute_backend_service" "ingress-https" {
|
||||
name = "${var.cluster_name}-ingress-https"
|
||||
description = "${var.cluster_name} ingress service"
|
||||
|
||||
protocol = "TCP"
|
||||
port_name = "https"
|
||||
session_affinity = "NONE"
|
||||
timeout_sec = "60"
|
||||
|
||||
backend {
|
||||
group = "${module.workers.instance_group}"
|
||||
}
|
||||
|
||||
health_checks = ["${google_compute_health_check.ingress.self_link}"]
|
||||
}
|
||||
|
||||
# Ingress HTTP Health Check
|
||||
resource "google_compute_health_check" "ingress" {
|
||||
name = "${var.cluster_name}-ingress-health"
|
||||
description = "Health check for Ingress controller"
|
||||
|
||||
timeout_sec = 5
|
||||
check_interval_sec = 5
|
||||
|
||||
healthy_threshold = 2
|
||||
unhealthy_threshold = 4
|
||||
|
||||
http_health_check {
|
||||
port = 10254
|
||||
request_path = "/healthz"
|
||||
}
|
||||
}
|
@ -161,3 +161,17 @@ resource "google_compute_firewall" "internal-kubelet-readonly" {
|
||||
source_tags = ["${var.cluster_name}-controller", "${var.cluster_name}-worker"]
|
||||
target_tags = ["${var.cluster_name}-controller", "${var.cluster_name}-worker"]
|
||||
}
|
||||
|
||||
resource "google_compute_firewall" "google-health-checks" {
|
||||
name = "${var.cluster_name}-google-health-checks"
|
||||
network = "${google_compute_network.network.name}"
|
||||
|
||||
allow {
|
||||
protocol = "tcp"
|
||||
ports = [10254]
|
||||
}
|
||||
|
||||
# https://cloud.google.com/compute/docs/load-balancing/tcp-ssl/tcp-proxy#health-checking
|
||||
source_ranges = ["130.211.0.0/22", "35.191.0.0/16"]
|
||||
target_tags = ["${var.cluster_name}-worker"]
|
||||
}
|
||||
|
@ -1,9 +1,14 @@
|
||||
output "ingress_static_ip" {
|
||||
value = "${module.workers.ingress_static_ip}"
|
||||
# Outputs for Kubernetes Ingress
|
||||
|
||||
output "ingress_static_ipv4" {
|
||||
description = "Global IPv4 address for proxy load balancing to the nearest Ingress controller"
|
||||
value = "${google_compute_global_address.ingress-ipv4.address}"
|
||||
}
|
||||
|
||||
output "network_self_link" {
|
||||
value = "${google_compute_network.network.self_link}"
|
||||
# Deprecated, use ingress_static_ipv4
|
||||
output "ingress_static_ip" {
|
||||
description = "Global IPv4 address for proxy load balancing to the nearest Ingress controller"
|
||||
value = "${google_compute_global_address.ingress-ipv4.address}"
|
||||
}
|
||||
|
||||
# Outputs for worker pools
|
||||
@ -15,3 +20,16 @@ output "network_name" {
|
||||
output "kubeconfig" {
|
||||
value = "${module.bootkube.kubeconfig}"
|
||||
}
|
||||
|
||||
# Outputs for custom firewalling
|
||||
|
||||
output "network_self_link" {
|
||||
value = "${google_compute_network.network.self_link}"
|
||||
}
|
||||
|
||||
# Outputs for custom load balancing
|
||||
|
||||
output "worker_instance_group" {
|
||||
description = "Full URL of the worker managed instance group"
|
||||
value = "${module.workers.instance_group}"
|
||||
}
|
||||
|
@ -71,7 +71,7 @@ runcmd:
|
||||
- [systemctl, daemon-reload]
|
||||
- [systemctl, restart, NetworkManager]
|
||||
- [systemctl, enable, cloud-metadata.service]
|
||||
- "atomic install --system --name=kubelet quay.io/poseidon/kubelet:v1.10.4"
|
||||
- "atomic install --system --name=kubelet quay.io/poseidon/kubelet:v1.10.5"
|
||||
- [systemctl, start, --no-block, kubelet.service]
|
||||
users:
|
||||
- default
|
||||
|
@ -1,45 +0,0 @@
|
||||
# Static IPv4 address for the Network Load Balancer
|
||||
resource "google_compute_address" "ingress-ip" {
|
||||
name = "${var.name}-ingress-ip"
|
||||
}
|
||||
|
||||
# Network Load Balancer (i.e. forwarding rules)
|
||||
resource "google_compute_forwarding_rule" "worker-http-lb" {
|
||||
name = "${var.name}-worker-http-rule"
|
||||
ip_address = "${google_compute_address.ingress-ip.address}"
|
||||
port_range = "80"
|
||||
target = "${google_compute_target_pool.workers.self_link}"
|
||||
}
|
||||
|
||||
resource "google_compute_forwarding_rule" "worker-https-lb" {
|
||||
name = "${var.name}-worker-https-rule"
|
||||
ip_address = "${google_compute_address.ingress-ip.address}"
|
||||
port_range = "443"
|
||||
target = "${google_compute_target_pool.workers.self_link}"
|
||||
}
|
||||
|
||||
# Network Load Balancer target pool of instances.
|
||||
resource "google_compute_target_pool" "workers" {
|
||||
name = "${var.name}-worker-pool"
|
||||
|
||||
health_checks = [
|
||||
"${google_compute_http_health_check.ingress.name}",
|
||||
]
|
||||
|
||||
session_affinity = "NONE"
|
||||
}
|
||||
|
||||
# Ingress HTTP Health Check
|
||||
resource "google_compute_http_health_check" "ingress" {
|
||||
name = "${var.name}-ingress-health"
|
||||
description = "Health check Ingress controller health host port"
|
||||
|
||||
timeout_sec = 5
|
||||
check_interval_sec = 5
|
||||
|
||||
healthy_threshold = 2
|
||||
unhealthy_threshold = 4
|
||||
|
||||
port = 10254
|
||||
request_path = "/healthz"
|
||||
}
|
@ -1,3 +1,4 @@
|
||||
output "ingress_static_ip" {
|
||||
value = "${google_compute_address.ingress-ip.address}"
|
||||
output "instance_group" {
|
||||
description = "Full URL of the worker managed instance group"
|
||||
value = "${google_compute_region_instance_group_manager.workers.instance_group}"
|
||||
}
|
||||
|
@ -1,5 +1,4 @@
|
||||
# Regional managed instance group maintains a homogeneous set of workers that
|
||||
# span the zones in the region.
|
||||
# Regional managed instance group of workers
|
||||
resource "google_compute_region_instance_group_manager" "workers" {
|
||||
name = "${var.name}-worker-group"
|
||||
description = "Compute instance group of ${var.name} workers"
|
||||
@ -11,12 +10,18 @@ resource "google_compute_region_instance_group_manager" "workers" {
|
||||
|
||||
target_size = "${var.count}"
|
||||
|
||||
# target pool to which instances in the group should be added
|
||||
target_pools = [
|
||||
"${google_compute_target_pool.workers.self_link}",
|
||||
]
|
||||
named_port {
|
||||
name = "http"
|
||||
port = "80"
|
||||
}
|
||||
|
||||
named_port {
|
||||
name = "https"
|
||||
port = "443"
|
||||
}
|
||||
}
|
||||
|
||||
# Worker instance template
|
||||
resource "google_compute_instance_template" "worker" {
|
||||
name_prefix = "${var.name}-worker-"
|
||||
description = "Worker Instance template"
|
||||
|
Reference in New Issue
Block a user