diff --git a/aws/fedora-cloud/kubernetes/LICENSE b/aws/fedora-cloud/kubernetes/LICENSE
new file mode 100644
index 00000000..bd9a5eea
--- /dev/null
+++ b/aws/fedora-cloud/kubernetes/LICENSE
@@ -0,0 +1,23 @@
+The MIT License (MIT)
+
+Copyright (c) 2017 Typhoon Authors
+Copyright (c) 2017 Dalton Hubble
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
+
diff --git a/aws/fedora-cloud/kubernetes/README.md b/aws/fedora-cloud/kubernetes/README.md
new file mode 100644
index 00000000..e8bdaa82
--- /dev/null
+++ b/aws/fedora-cloud/kubernetes/README.md
@@ -0,0 +1,23 @@
+# Typhoon
+
+Typhoon is a minimal and free Kubernetes distribution.
+
+* Minimal, stable base Kubernetes distribution
+* Declarative infrastructure and configuration
+* Free (freedom and cost) and privacy-respecting
+* Practical for labs, datacenters, and clouds
+
+Typhoon distributes upstream Kubernetes, architectural conventions, and cluster addons, much like a GNU/Linux distribution provides the Linux kernel and userspace components.
+
+## Features
+
+* Kubernetes v1.9.6 (upstream, via [kubernetes-incubator/bootkube](https://github.com/kubernetes-incubator/bootkube))
+* Single or multi-master, workloads isolated on workers, [Calico](https://www.projectcalico.org/) or [flannel](https://github.com/coreos/flannel) networking
+* On-cluster etcd with TLS, [RBAC](https://kubernetes.io/docs/admin/authorization/rbac/)-enabled, [network policy](https://kubernetes.io/docs/concepts/services-networking/network-policies/)
+* Advanced features like [worker pools](https://typhoon.psdn.io/advanced/worker-pools/)
+* Ready for Ingress, Prometheus, Grafana, and other optional [addons](https://typhoon.psdn.io/addons/overview/)
+
+## Docs
+
+Please see the [official docs](https://typhoon.psdn.io) and the AWS [tutorial](https://typhoon.psdn.io/aws/).
+
diff --git a/aws/fedora-cloud/kubernetes/ami.tf b/aws/fedora-cloud/kubernetes/ami.tf
new file mode 100644
index 00000000..8aa1a486
--- /dev/null
+++ b/aws/fedora-cloud/kubernetes/ami.tf
@@ -0,0 +1,19 @@
+data "aws_ami" "fedora" {
+ most_recent = true
+ owners = ["125523088429"]
+
+ filter {
+ name = "architecture"
+ values = ["x86_64"]
+ }
+
+ filter {
+ name = "virtualization-type"
+ values = ["hvm"]
+ }
+
+ filter {
+ name = "name"
+ values = ["Fedora-Cloud-Base-27*-standard-0"]
+ }
+}
diff --git a/aws/fedora-cloud/kubernetes/apiserver.tf b/aws/fedora-cloud/kubernetes/apiserver.tf
new file mode 100644
index 00000000..f29d1f8d
--- /dev/null
+++ b/aws/fedora-cloud/kubernetes/apiserver.tf
@@ -0,0 +1,69 @@
+# kube-apiserver Network Load Balancer DNS Record
+resource "aws_route53_record" "apiserver" {
+ zone_id = "${var.dns_zone_id}"
+
+ name = "${format("%s.%s.", var.cluster_name, var.dns_zone)}"
+ type = "A"
+
+ # AWS recommends their special "alias" records for ELBs
+ alias {
+ name = "${aws_lb.apiserver.dns_name}"
+ zone_id = "${aws_lb.apiserver.zone_id}"
+ evaluate_target_health = true
+ }
+}
+
+# Network Load Balancer for apiservers
+resource "aws_lb" "apiserver" {
+ name = "${var.cluster_name}-apiserver"
+ load_balancer_type = "network"
+ internal = false
+
+ subnets = ["${aws_subnet.public.*.id}"]
+
+ enable_cross_zone_load_balancing = true
+}
+
+# Forward HTTP traffic to controllers
+resource "aws_lb_listener" "apiserver-https" {
+ load_balancer_arn = "${aws_lb.apiserver.arn}"
+ protocol = "TCP"
+ port = "443"
+
+ default_action {
+ type = "forward"
+ target_group_arn = "${aws_lb_target_group.controllers.arn}"
+ }
+}
+
+# Target group of controllers
+resource "aws_lb_target_group" "controllers" {
+ name = "${var.cluster_name}-controllers"
+ vpc_id = "${aws_vpc.network.id}"
+ target_type = "instance"
+
+ protocol = "TCP"
+ port = 443
+
+ # Kubelet HTTP health check
+ health_check {
+ protocol = "TCP"
+ port = 443
+
+ # NLBs required to use same healthy and unhealthy thresholds
+ healthy_threshold = 3
+ unhealthy_threshold = 3
+
+ # Interval between health checks required to be 10 or 30
+ interval = 10
+ }
+}
+
+# Attach controller instances to apiserver NLB
+resource "aws_lb_target_group_attachment" "controllers" {
+ count = "${var.controller_count}"
+
+ target_group_arn = "${aws_lb_target_group.controllers.arn}"
+ target_id = "${element(aws_instance.controllers.*.id, count.index)}"
+ port = 443
+}
diff --git a/aws/fedora-cloud/kubernetes/bootkube.tf b/aws/fedora-cloud/kubernetes/bootkube.tf
new file mode 100644
index 00000000..eed1c531
--- /dev/null
+++ b/aws/fedora-cloud/kubernetes/bootkube.tf
@@ -0,0 +1,14 @@
+# Self-hosted Kubernetes assets (kubeconfig, manifests)
+module "bootkube" {
+ source = "git::https://github.com/poseidon/terraform-render-bootkube.git?ref=88b361207d42ec3121930a4add6b64ba7cf18360"
+
+ cluster_name = "${var.cluster_name}"
+ api_servers = ["${format("%s.%s", var.cluster_name, var.dns_zone)}"]
+ etcd_servers = ["${aws_route53_record.etcds.*.fqdn}"]
+ asset_dir = "${var.asset_dir}"
+ networking = "${var.networking}"
+ network_mtu = "${var.network_mtu}"
+ pod_cidr = "${var.pod_cidr}"
+ service_cidr = "${var.service_cidr}"
+ cluster_domain_suffix = "${var.cluster_domain_suffix}"
+}
diff --git a/aws/fedora-cloud/kubernetes/cloudinit/controller.yaml.tmpl b/aws/fedora-cloud/kubernetes/cloudinit/controller.yaml.tmpl
new file mode 100644
index 00000000..9a441c5f
--- /dev/null
+++ b/aws/fedora-cloud/kubernetes/cloudinit/controller.yaml.tmpl
@@ -0,0 +1,141 @@
+#cloud-config
+yum_repos:
+ kubernetes:
+ name: kubernetes
+ baseurl: https://packages.cloud.google.com/yum/repos/kubernetes-el7-x86_64
+ enabled: true
+ gpgcheck: true
+ gpgkey: https://packages.cloud.google.com/yum/doc/yum-key.gpg https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg
+packages:
+ - vim
+ - docker
+ - kubelet
+ - nfs-utils
+write_files:
+ - path: /etc/systemd/system/etcd-member.service
+ content: |
+ [Unit]
+ Description=etcd-member
+ Documentation=https://github.com/coreos/etcd
+ Wants=network-online.target network.target
+ After=network-online.target
+ Requires=docker.service
+ After=docker.service
+ [Service]
+ EnvironmentFile=/etc/etcd/etcd.conf
+ ExecStartPre=/bin/mkdir -p /var/lib/etcd
+ ExecStart=/usr/bin/docker run --rm --name etcd-member \
+ --net=host \
+ -v /usr/share/ca-certificates:/usr/share/ca-certificates:ro,z \
+ -v /etc/ssl/etcd:/etc/ssl/certs:ro,Z \
+ -v /var/lib/etcd:/var/lib/etcd:Z \
+ --env-file=/etc/etcd/etcd.conf \
+ quay.io/coreos/etcd:v3.3.2
+ ExecStop=/usr/bin/docker stop etcd-member
+ Restart=on-failure
+ RestartSec=10s
+ TimeoutStartSec=0
+ LimitNOFILE=40000
+ [Install]
+ WantedBy=multi-user.target
+ - path: /etc/etcd/etcd.conf
+ content: |
+ ETCD_NAME=${etcd_name}
+ ETCD_DATA_DIR=/var/lib/etcd
+ ETCD_ADVERTISE_CLIENT_URLS=https://${etcd_domain}:2379
+ ETCD_INITIAL_ADVERTISE_PEER_URLS=https://${etcd_domain}:2380
+ ETCD_LISTEN_CLIENT_URLS=https://0.0.0.0:2379
+ ETCD_LISTEN_PEER_URLS=https://0.0.0.0:2380
+ ETCD_INITIAL_CLUSTER=${etcd_initial_cluster}
+ ETCD_STRICT_RECONFIG_CHECK=true
+ ETCD_TRUSTED_CA_FILE=/etc/ssl/certs/etcd/server-ca.crt
+ ETCD_CERT_FILE=/etc/ssl/certs/etcd/server.crt
+ ETCD_KEY_FILE=/etc/ssl/certs/etcd/server.key
+ ETCD_CLIENT_CERT_AUTH=true
+ ETCD_PEER_TRUSTED_CA_FILE=/etc/ssl/certs/etcd/peer-ca.crt
+ ETCD_PEER_CERT_FILE=/etc/ssl/certs/etcd/peer.crt
+ ETCD_PEER_KEY_FILE=/etc/ssl/certs/etcd/peer.key
+ ETCD_PEER_CLIENT_CERT_AUTH=true
+ - path: /etc/systemd/system/kubelet.service.d/10-typhoon.conf
+ content: |
+ [Unit]
+ Description=Kubelet
+ Wants=rpc-statd.service
+ [Service]
+ ExecStartPre=/bin/mkdir -p /opt/cni/bin
+ ExecStartPre=/bin/mkdir -p /etc/kubernetes/manifests
+ ExecStartPre=/bin/mkdir -p /etc/kubernetes/cni/net.d
+ ExecStartPre=/bin/mkdir -p /etc/kubernetes/checkpoint-secrets
+ ExecStartPre=/bin/mkdir -p /etc/kubernetes/inactive-manifests
+ ExecStartPre=/bin/mkdir -p /var/lib/cni
+ ExecStartPre=/bin/mkdir -p /var/lib/kubelet/volumeplugins
+ ExecStartPre=/usr/bin/bash -c "grep 'certificate-authority-data' /etc/kubernetes/kubeconfig | awk '{print $2}' | base64 -d > /etc/kubernetes/ca.crt"
+ ExecStart=
+ ExecStart=/usr/bin/kubelet \
+ --allow-privileged \
+ --anonymous-auth=false \
+ --cgroup-driver=systemd \
+ --client-ca-file=/etc/kubernetes/ca.crt \
+ --cluster_dns=${k8s_dns_service_ip} \
+ --cluster_domain=${cluster_domain_suffix} \
+ --cni-conf-dir=/etc/kubernetes/cni/net.d \
+ --exit-on-lock-contention \
+ --kubeconfig=/etc/kubernetes/kubeconfig \
+ --lock-file=/var/run/lock/kubelet.lock \
+ --network-plugin=cni \
+ --node-labels=node-role.kubernetes.io/master \
+ --node-labels=node-role.kubernetes.io/controller="true" \
+ --pod-manifest-path=/etc/kubernetes/manifests \
+ --register-with-taints=node-role.kubernetes.io/master=:NoSchedule \
+ --volume-plugin-dir=/var/lib/kubelet/volumeplugins
+ Restart=always
+ RestartSec=10
+ [Install]
+ WantedBy=multi-user.target
+ - path: /etc/kubernetes/kubeconfig
+ permissions: '0644'
+ content: |
+ ${kubeconfig}
+ - path: /etc/selinux/config
+ content: |
+ SELINUX=permissive
+ - path: /etc/systemd/system/bootkube.service
+ content: |
+ [Unit]
+ Description=Bootstrap a Kubernetes cluster
+ ConditionPathExists=!/opt/bootkube/init_bootkube.done
+ [Service]
+ Type=oneshot
+ RemainAfterExit=true
+ WorkingDirectory=/opt/bootkube
+ ExecStart=/opt/bootkube/bootkube-start
+ ExecStartPost=/bin/touch /opt/bootkube/init_bootkube.done
+ [Install]
+ WantedBy=multi-user.target
+ - path: /opt/bootkube/bootkube-start
+ permissions: '0544'
+ content: |
+ #!/bin/bash -e
+ # Wrapper for bootkube start
+ [ -n "$(ls /opt/bootkube/assets/manifests-*/* 2>/dev/null)" ] && mv /opt/bootkube/assets/manifests-*/* /opt/bootkube/assets/manifests && rm -rf /opt/bootkube/assets/manifests-*
+ /usr/bin/docker run --rm --name bootkube \
+ --volume /etc/kubernetes:/etc/kubernetes:Z \
+ --volume /opt/bootkube/assets:/assets:Z \
+ --entrypoint=/bootkube \
+ quay.io/coreos/bootkube:v0.11.0 start --asset-dir=/assets
+runcmd:
+ - [systemctl, daemon-reload]
+ - [systemctl, enable, docker.service]
+ - [systemctl, start, --no-block, docker.service]
+ - [systemctl, enable, etcd-member.service]
+ - [systemctl, start, --no-block, etcd-member.service]
+ - [systemctl, enable, kubelet.service]
+ - [systemctl, start, --no-block, kubelet.service]
+users:
+ - default
+ - name: fedora
+ gecos: Fedora Admin
+ sudo: ALL=(ALL) NOPASSWD:ALL
+ groups: wheel,adm,systemd-journal,docker
+ ssh-authorized-keys:
+ - "${ssh_authorized_key}"
diff --git a/aws/fedora-cloud/kubernetes/controllers.tf b/aws/fedora-cloud/kubernetes/controllers.tf
new file mode 100644
index 00000000..9ed461c3
--- /dev/null
+++ b/aws/fedora-cloud/kubernetes/controllers.tf
@@ -0,0 +1,75 @@
+# Discrete DNS records for each controller's private IPv4 for etcd usage
+resource "aws_route53_record" "etcds" {
+ count = "${var.controller_count}"
+
+ # DNS Zone where record should be created
+ zone_id = "${var.dns_zone_id}"
+
+ name = "${format("%s-etcd%d.%s.", var.cluster_name, count.index, var.dns_zone)}"
+ type = "A"
+ ttl = 300
+
+ # private IPv4 address for etcd
+ records = ["${element(aws_instance.controllers.*.private_ip, count.index)}"]
+}
+
+# Controller instances
+resource "aws_instance" "controllers" {
+ count = "${var.controller_count}"
+
+ tags = {
+ Name = "${var.cluster_name}-controller-${count.index}"
+ }
+
+ instance_type = "${var.controller_type}"
+
+ ami = "${data.aws_ami.fedora.image_id}"
+ user_data = "${element(data.template_file.controller-cloudinit.*.rendered, count.index)}"
+
+ # storage
+ root_block_device {
+ volume_type = "standard"
+ volume_size = "${var.disk_size}"
+ }
+
+ # network
+ associate_public_ip_address = true
+ subnet_id = "${element(aws_subnet.public.*.id, count.index)}"
+ vpc_security_group_ids = ["${aws_security_group.controller.id}"]
+
+ lifecycle {
+ ignore_changes = ["ami"]
+ }
+}
+
+# Controller Cloud-Init
+data "template_file" "controller-cloudinit" {
+ count = "${var.controller_count}"
+
+ template = "${file("${path.module}/cloudinit/controller.yaml.tmpl")}"
+
+ vars = {
+ # Cannot use cyclic dependencies on controllers or their DNS records
+ etcd_name = "etcd${count.index}"
+ etcd_domain = "${var.cluster_name}-etcd${count.index}.${var.dns_zone}"
+
+ # etcd0=https://cluster-etcd0.example.com,etcd1=https://cluster-etcd1.example.com,...
+ etcd_initial_cluster = "${join(",", formatlist("%s=https://%s:2380", null_resource.repeat.*.triggers.name, null_resource.repeat.*.triggers.domain))}"
+
+ kubeconfig = "${indent(6, module.bootkube.kubeconfig)}"
+ ssh_authorized_key = "${var.ssh_authorized_key}"
+ k8s_dns_service_ip = "${cidrhost(var.service_cidr, 10)}"
+ cluster_domain_suffix = "${var.cluster_domain_suffix}"
+ }
+}
+
+# Horrible hack to generate a Terraform list of a desired length without dependencies.
+# Ideal ${repeat("etcd", 3) -> ["etcd", "etcd", "etcd"]}
+resource null_resource "repeat" {
+ count = "${var.controller_count}"
+
+ triggers {
+ name = "etcd${count.index}"
+ domain = "${var.cluster_name}-etcd${count.index}.${var.dns_zone}"
+ }
+}
diff --git a/aws/fedora-cloud/kubernetes/network.tf b/aws/fedora-cloud/kubernetes/network.tf
new file mode 100644
index 00000000..1be5073b
--- /dev/null
+++ b/aws/fedora-cloud/kubernetes/network.tf
@@ -0,0 +1,57 @@
+data "aws_availability_zones" "all" {}
+
+# Network VPC, gateway, and routes
+
+resource "aws_vpc" "network" {
+ cidr_block = "${var.host_cidr}"
+ assign_generated_ipv6_cidr_block = true
+ enable_dns_support = true
+ enable_dns_hostnames = true
+
+ tags = "${map("Name", "${var.cluster_name}")}"
+}
+
+resource "aws_internet_gateway" "gateway" {
+ vpc_id = "${aws_vpc.network.id}"
+
+ tags = "${map("Name", "${var.cluster_name}")}"
+}
+
+resource "aws_route_table" "default" {
+ vpc_id = "${aws_vpc.network.id}"
+
+ route {
+ cidr_block = "0.0.0.0/0"
+ gateway_id = "${aws_internet_gateway.gateway.id}"
+ }
+
+ route {
+ ipv6_cidr_block = "::/0"
+ gateway_id = "${aws_internet_gateway.gateway.id}"
+ }
+
+ tags = "${map("Name", "${var.cluster_name}")}"
+}
+
+# Subnets (one per availability zone)
+
+resource "aws_subnet" "public" {
+ count = "${length(data.aws_availability_zones.all.names)}"
+
+ vpc_id = "${aws_vpc.network.id}"
+ availability_zone = "${data.aws_availability_zones.all.names[count.index]}"
+
+ cidr_block = "${cidrsubnet(var.host_cidr, 4, count.index)}"
+ ipv6_cidr_block = "${cidrsubnet(aws_vpc.network.ipv6_cidr_block, 8, count.index)}"
+ map_public_ip_on_launch = true
+ assign_ipv6_address_on_creation = true
+
+ tags = "${map("Name", "${var.cluster_name}-public-${count.index}")}"
+}
+
+resource "aws_route_table_association" "public" {
+ count = "${length(data.aws_availability_zones.all.names)}"
+
+ route_table_id = "${aws_route_table.default.id}"
+ subnet_id = "${element(aws_subnet.public.*.id, count.index)}"
+}
diff --git a/aws/fedora-cloud/kubernetes/outputs.tf b/aws/fedora-cloud/kubernetes/outputs.tf
new file mode 100644
index 00000000..1d5e3535
--- /dev/null
+++ b/aws/fedora-cloud/kubernetes/outputs.tf
@@ -0,0 +1,25 @@
+output "ingress_dns_name" {
+ value = "${module.workers.ingress_dns_name}"
+ description = "DNS name of the network load balancer for distributing traffic to Ingress controllers"
+}
+
+# Outputs for worker pools
+
+output "vpc_id" {
+ value = "${aws_vpc.network.id}"
+ description = "ID of the VPC for creating worker instances"
+}
+
+output "subnet_ids" {
+ value = ["${aws_subnet.public.*.id}"]
+ description = "List of subnet IDs for creating worker instances"
+}
+
+output "worker_security_groups" {
+ value = ["${aws_security_group.worker.id}"]
+ description = "List of worker security group IDs"
+}
+
+output "kubeconfig" {
+ value = "${module.bootkube.kubeconfig}"
+}
diff --git a/aws/fedora-cloud/kubernetes/require.tf b/aws/fedora-cloud/kubernetes/require.tf
new file mode 100644
index 00000000..e302aed7
--- /dev/null
+++ b/aws/fedora-cloud/kubernetes/require.tf
@@ -0,0 +1,25 @@
+# Terraform version and plugin versions
+
+terraform {
+ required_version = ">= 0.10.4"
+}
+
+provider "aws" {
+ version = "~> 1.11"
+}
+
+provider "local" {
+ version = "~> 1.0"
+}
+
+provider "null" {
+ version = "~> 1.0"
+}
+
+provider "template" {
+ version = "~> 1.0"
+}
+
+provider "tls" {
+ version = "~> 1.0"
+}
diff --git a/aws/fedora-cloud/kubernetes/security.tf b/aws/fedora-cloud/kubernetes/security.tf
new file mode 100644
index 00000000..8c71da6b
--- /dev/null
+++ b/aws/fedora-cloud/kubernetes/security.tf
@@ -0,0 +1,385 @@
+# Security Groups (instance firewalls)
+
+# Controller security group
+
+resource "aws_security_group" "controller" {
+ name = "${var.cluster_name}-controller"
+ description = "${var.cluster_name} controller security group"
+
+ vpc_id = "${aws_vpc.network.id}"
+
+ tags = "${map("Name", "${var.cluster_name}-controller")}"
+}
+
+resource "aws_security_group_rule" "controller-icmp" {
+ security_group_id = "${aws_security_group.controller.id}"
+
+ type = "ingress"
+ protocol = "icmp"
+ from_port = 0
+ to_port = 0
+ cidr_blocks = ["0.0.0.0/0"]
+}
+
+resource "aws_security_group_rule" "controller-ssh" {
+ security_group_id = "${aws_security_group.controller.id}"
+
+ type = "ingress"
+ protocol = "tcp"
+ from_port = 22
+ to_port = 22
+ cidr_blocks = ["0.0.0.0/0"]
+}
+
+resource "aws_security_group_rule" "controller-apiserver" {
+ security_group_id = "${aws_security_group.controller.id}"
+
+ type = "ingress"
+ protocol = "tcp"
+ from_port = 443
+ to_port = 443
+ cidr_blocks = ["0.0.0.0/0"]
+}
+
+resource "aws_security_group_rule" "controller-etcd" {
+ security_group_id = "${aws_security_group.controller.id}"
+
+ type = "ingress"
+ protocol = "tcp"
+ from_port = 2379
+ to_port = 2380
+ self = true
+}
+
+resource "aws_security_group_rule" "controller-flannel" {
+ security_group_id = "${aws_security_group.controller.id}"
+
+ type = "ingress"
+ protocol = "udp"
+ from_port = 8472
+ to_port = 8472
+ source_security_group_id = "${aws_security_group.worker.id}"
+}
+
+resource "aws_security_group_rule" "controller-flannel-self" {
+ security_group_id = "${aws_security_group.controller.id}"
+
+ type = "ingress"
+ protocol = "udp"
+ from_port = 8472
+ to_port = 8472
+ self = true
+}
+
+resource "aws_security_group_rule" "controller-node-exporter" {
+ security_group_id = "${aws_security_group.controller.id}"
+
+ type = "ingress"
+ protocol = "tcp"
+ from_port = 9100
+ to_port = 9100
+ source_security_group_id = "${aws_security_group.worker.id}"
+}
+
+resource "aws_security_group_rule" "controller-kubelet-self" {
+ security_group_id = "${aws_security_group.controller.id}"
+
+ type = "ingress"
+ protocol = "tcp"
+ from_port = 10250
+ to_port = 10250
+ self = true
+}
+
+resource "aws_security_group_rule" "controller-kubelet-read" {
+ security_group_id = "${aws_security_group.controller.id}"
+
+ type = "ingress"
+ protocol = "tcp"
+ from_port = 10255
+ to_port = 10255
+ source_security_group_id = "${aws_security_group.worker.id}"
+}
+
+resource "aws_security_group_rule" "controller-kubelet-read-self" {
+ security_group_id = "${aws_security_group.controller.id}"
+
+ type = "ingress"
+ protocol = "tcp"
+ from_port = 10255
+ to_port = 10255
+ self = true
+}
+
+resource "aws_security_group_rule" "controller-bgp" {
+ security_group_id = "${aws_security_group.controller.id}"
+
+ type = "ingress"
+ protocol = "tcp"
+ from_port = 179
+ to_port = 179
+ source_security_group_id = "${aws_security_group.worker.id}"
+}
+
+resource "aws_security_group_rule" "controller-bgp-self" {
+ security_group_id = "${aws_security_group.controller.id}"
+
+ type = "ingress"
+ protocol = "tcp"
+ from_port = 179
+ to_port = 179
+ self = true
+}
+
+resource "aws_security_group_rule" "controller-ipip" {
+ security_group_id = "${aws_security_group.controller.id}"
+
+ type = "ingress"
+ protocol = 4
+ from_port = 0
+ to_port = 0
+ source_security_group_id = "${aws_security_group.worker.id}"
+}
+
+resource "aws_security_group_rule" "controller-ipip-self" {
+ security_group_id = "${aws_security_group.controller.id}"
+
+ type = "ingress"
+ protocol = 4
+ from_port = 0
+ to_port = 0
+ self = true
+}
+
+resource "aws_security_group_rule" "controller-ipip-legacy" {
+ security_group_id = "${aws_security_group.controller.id}"
+
+ type = "ingress"
+ protocol = 94
+ from_port = 0
+ to_port = 0
+ source_security_group_id = "${aws_security_group.worker.id}"
+}
+
+resource "aws_security_group_rule" "controller-ipip-legacy-self" {
+ security_group_id = "${aws_security_group.controller.id}"
+
+ type = "ingress"
+ protocol = 94
+ from_port = 0
+ to_port = 0
+ self = true
+}
+
+resource "aws_security_group_rule" "controller-egress" {
+ security_group_id = "${aws_security_group.controller.id}"
+
+ type = "egress"
+ protocol = "-1"
+ from_port = 0
+ to_port = 0
+ cidr_blocks = ["0.0.0.0/0"]
+ ipv6_cidr_blocks = ["::/0"]
+}
+
+# Worker security group
+
+resource "aws_security_group" "worker" {
+ name = "${var.cluster_name}-worker"
+ description = "${var.cluster_name} worker security group"
+
+ vpc_id = "${aws_vpc.network.id}"
+
+ tags = "${map("Name", "${var.cluster_name}-worker")}"
+}
+
+resource "aws_security_group_rule" "worker-icmp" {
+ security_group_id = "${aws_security_group.worker.id}"
+
+ type = "ingress"
+ protocol = "icmp"
+ from_port = 0
+ to_port = 0
+ cidr_blocks = ["0.0.0.0/0"]
+}
+
+resource "aws_security_group_rule" "worker-ssh" {
+ security_group_id = "${aws_security_group.worker.id}"
+
+ type = "ingress"
+ protocol = "tcp"
+ from_port = 22
+ to_port = 22
+ cidr_blocks = ["0.0.0.0/0"]
+}
+
+resource "aws_security_group_rule" "worker-http" {
+ security_group_id = "${aws_security_group.worker.id}"
+
+ type = "ingress"
+ protocol = "tcp"
+ from_port = 80
+ to_port = 80
+ cidr_blocks = ["0.0.0.0/0"]
+}
+
+resource "aws_security_group_rule" "worker-https" {
+ security_group_id = "${aws_security_group.worker.id}"
+
+ type = "ingress"
+ protocol = "tcp"
+ from_port = 443
+ to_port = 443
+ cidr_blocks = ["0.0.0.0/0"]
+}
+
+resource "aws_security_group_rule" "worker-flannel" {
+ security_group_id = "${aws_security_group.worker.id}"
+
+ type = "ingress"
+ protocol = "udp"
+ from_port = 8472
+ to_port = 8472
+ source_security_group_id = "${aws_security_group.controller.id}"
+}
+
+resource "aws_security_group_rule" "worker-flannel-self" {
+ security_group_id = "${aws_security_group.worker.id}"
+
+ type = "ingress"
+ protocol = "udp"
+ from_port = 8472
+ to_port = 8472
+ self = true
+}
+
+resource "aws_security_group_rule" "worker-node-exporter" {
+ security_group_id = "${aws_security_group.worker.id}"
+
+ type = "ingress"
+ protocol = "tcp"
+ from_port = 9100
+ to_port = 9100
+ self = true
+}
+
+resource "aws_security_group_rule" "ingress-health" {
+ security_group_id = "${aws_security_group.worker.id}"
+
+ type = "ingress"
+ protocol = "tcp"
+ from_port = 10254
+ to_port = 10254
+ cidr_blocks = ["0.0.0.0/0"]
+}
+
+resource "aws_security_group_rule" "worker-kubelet" {
+ security_group_id = "${aws_security_group.worker.id}"
+
+ type = "ingress"
+ protocol = "tcp"
+ from_port = 10250
+ to_port = 10250
+ source_security_group_id = "${aws_security_group.controller.id}"
+}
+
+resource "aws_security_group_rule" "worker-kubelet-self" {
+ security_group_id = "${aws_security_group.worker.id}"
+
+ type = "ingress"
+ protocol = "tcp"
+ from_port = 10250
+ to_port = 10250
+ self = true
+}
+
+resource "aws_security_group_rule" "worker-kubelet-read" {
+ security_group_id = "${aws_security_group.worker.id}"
+
+ type = "ingress"
+ protocol = "tcp"
+ from_port = 10255
+ to_port = 10255
+ source_security_group_id = "${aws_security_group.controller.id}"
+}
+
+resource "aws_security_group_rule" "worker-kubelet-read-self" {
+ security_group_id = "${aws_security_group.worker.id}"
+
+ type = "ingress"
+ protocol = "tcp"
+ from_port = 10255
+ to_port = 10255
+ self = true
+}
+
+resource "aws_security_group_rule" "worker-bgp" {
+ security_group_id = "${aws_security_group.worker.id}"
+
+ type = "ingress"
+ protocol = "tcp"
+ from_port = 179
+ to_port = 179
+ source_security_group_id = "${aws_security_group.controller.id}"
+}
+
+resource "aws_security_group_rule" "worker-bgp-self" {
+ security_group_id = "${aws_security_group.worker.id}"
+
+ type = "ingress"
+ protocol = "tcp"
+ from_port = 179
+ to_port = 179
+ self = true
+}
+
+resource "aws_security_group_rule" "worker-ipip" {
+ security_group_id = "${aws_security_group.worker.id}"
+
+ type = "ingress"
+ protocol = 4
+ from_port = 0
+ to_port = 0
+ source_security_group_id = "${aws_security_group.controller.id}"
+}
+
+resource "aws_security_group_rule" "worker-ipip-self" {
+ security_group_id = "${aws_security_group.worker.id}"
+
+ type = "ingress"
+ protocol = 4
+ from_port = 0
+ to_port = 0
+ self = true
+}
+
+resource "aws_security_group_rule" "worker-ipip-legacy" {
+ security_group_id = "${aws_security_group.worker.id}"
+
+ type = "ingress"
+ protocol = 94
+ from_port = 0
+ to_port = 0
+ source_security_group_id = "${aws_security_group.controller.id}"
+}
+
+resource "aws_security_group_rule" "worker-ipip-legacy-self" {
+ security_group_id = "${aws_security_group.worker.id}"
+
+ type = "ingress"
+ protocol = 94
+ from_port = 0
+ to_port = 0
+ self = true
+}
+
+resource "aws_security_group_rule" "worker-egress" {
+ security_group_id = "${aws_security_group.worker.id}"
+
+ type = "egress"
+ protocol = "-1"
+ from_port = 0
+ to_port = 0
+ cidr_blocks = ["0.0.0.0/0"]
+ ipv6_cidr_blocks = ["::/0"]
+}
diff --git a/aws/fedora-cloud/kubernetes/ssh.tf b/aws/fedora-cloud/kubernetes/ssh.tf
new file mode 100644
index 00000000..a667727c
--- /dev/null
+++ b/aws/fedora-cloud/kubernetes/ssh.tf
@@ -0,0 +1,84 @@
+# Secure copy etcd TLS assets to controllers.
+resource "null_resource" "copy-secrets" {
+ count = "${var.controller_count}"
+
+ connection {
+ type = "ssh"
+ host = "${element(aws_instance.controllers.*.public_ip, count.index)}"
+ user = "fedora"
+ timeout = "15m"
+ }
+
+ provisioner "file" {
+ content = "${module.bootkube.etcd_ca_cert}"
+ destination = "$HOME/etcd-client-ca.crt"
+ }
+
+ provisioner "file" {
+ content = "${module.bootkube.etcd_client_cert}"
+ destination = "$HOME/etcd-client.crt"
+ }
+
+ provisioner "file" {
+ content = "${module.bootkube.etcd_client_key}"
+ destination = "$HOME/etcd-client.key"
+ }
+
+ provisioner "file" {
+ content = "${module.bootkube.etcd_server_cert}"
+ destination = "$HOME/etcd-server.crt"
+ }
+
+ provisioner "file" {
+ content = "${module.bootkube.etcd_server_key}"
+ destination = "$HOME/etcd-server.key"
+ }
+
+ provisioner "file" {
+ content = "${module.bootkube.etcd_peer_cert}"
+ destination = "$HOME/etcd-peer.crt"
+ }
+
+ provisioner "file" {
+ content = "${module.bootkube.etcd_peer_key}"
+ destination = "$HOME/etcd-peer.key"
+ }
+
+ provisioner "remote-exec" {
+ inline = [
+ "sudo mkdir -p /etc/ssl/etcd/etcd",
+ "sudo mv etcd-client* /etc/ssl/etcd/",
+ "sudo cp /etc/ssl/etcd/etcd-client-ca.crt /etc/ssl/etcd/etcd/server-ca.crt",
+ "sudo mv etcd-server.crt /etc/ssl/etcd/etcd/server.crt",
+ "sudo mv etcd-server.key /etc/ssl/etcd/etcd/server.key",
+ "sudo cp /etc/ssl/etcd/etcd-client-ca.crt /etc/ssl/etcd/etcd/peer-ca.crt",
+ "sudo mv etcd-peer.crt /etc/ssl/etcd/etcd/peer.crt",
+ "sudo mv etcd-peer.key /etc/ssl/etcd/etcd/peer.key",
+ ]
+ }
+}
+
+# Secure copy bootkube assets to ONE controller and start bootkube to perform
+# one-time self-hosted cluster bootstrapping.
+resource "null_resource" "bootkube-start" {
+ depends_on = ["module.bootkube", "null_resource.copy-secrets", "aws_route53_record.apiserver"]
+
+ connection {
+ type = "ssh"
+ host = "${aws_instance.controllers.0.public_ip}"
+ user = "fedora"
+ timeout = "15m"
+ }
+
+ provisioner "file" {
+ source = "${var.asset_dir}"
+ destination = "$HOME/assets"
+ }
+
+ provisioner "remote-exec" {
+ inline = [
+ "sudo mv assets /opt/bootkube",
+ "sudo systemctl start bootkube",
+ ]
+ }
+}
diff --git a/aws/fedora-cloud/kubernetes/variables.tf b/aws/fedora-cloud/kubernetes/variables.tf
new file mode 100644
index 00000000..44becb37
--- /dev/null
+++ b/aws/fedora-cloud/kubernetes/variables.tf
@@ -0,0 +1,96 @@
+variable "cluster_name" {
+ type = "string"
+ description = "Cluster name"
+}
+
+variable "dns_zone" {
+ type = "string"
+ description = "AWS DNS Zone (e.g. aws.dghubble.io)"
+}
+
+variable "dns_zone_id" {
+ type = "string"
+ description = "AWS DNS Zone ID (e.g. Z3PAABBCFAKEC0)"
+}
+
+variable "ssh_authorized_key" {
+ type = "string"
+ description = "SSH public key for user 'core'"
+}
+
+variable "disk_size" {
+ type = "string"
+ default = "40"
+ description = "The size of the disk in Gigabytes"
+}
+
+variable "host_cidr" {
+ description = "CIDR IPv4 range to assign to EC2 nodes"
+ type = "string"
+ default = "10.0.0.0/16"
+}
+
+variable "controller_count" {
+ type = "string"
+ default = "1"
+ description = "Number of controllers"
+}
+
+variable "controller_type" {
+ type = "string"
+ default = "t2.small"
+ description = "Controller EC2 instance type"
+}
+
+variable "worker_count" {
+ type = "string"
+ default = "1"
+ description = "Number of workers"
+}
+
+variable "worker_type" {
+ type = "string"
+ default = "t2.small"
+ description = "Worker EC2 instance type"
+}
+
+# bootkube assets
+
+variable "asset_dir" {
+ description = "Path to a directory where generated assets should be placed (contains secrets)"
+ type = "string"
+}
+
+variable "networking" {
+ description = "Choice of networking provider (calico or flannel)"
+ type = "string"
+ default = "calico"
+}
+
+variable "network_mtu" {
+ description = "CNI interface MTU (applies to calico only). Use 8981 if using instances types with Jumbo frames."
+ type = "string"
+ default = "1480"
+}
+
+variable "pod_cidr" {
+ description = "CIDR IPv4 range to assign Kubernetes pods"
+ type = "string"
+ default = "10.2.0.0/16"
+}
+
+variable "service_cidr" {
+ description = < /etc/kubernetes/ca.crt"
+ ExecStart=
+ ExecStart=/usr/bin/kubelet \
+ --allow-privileged \
+ --anonymous-auth=false \
+ --cgroup-driver=systemd \
+ --client-ca-file=/etc/kubernetes/ca.crt \
+ --cluster_dns=${k8s_dns_service_ip} \
+ --cluster_domain=${cluster_domain_suffix} \
+ --cni-conf-dir=/etc/kubernetes/cni/net.d \
+ --exit-on-lock-contention \
+ --kubeconfig=/etc/kubernetes/kubeconfig \
+ --lock-file=/var/run/lock/kubelet.lock \
+ --network-plugin=cni \
+ --node-labels=node-role.kubernetes.io/node \
+ --pod-manifest-path=/etc/kubernetes/manifests \
+ --volume-plugin-dir=/var/lib/kubelet/volumeplugins
+ Restart=always
+ RestartSec=10
+ [Install]
+ WantedBy=multi-user.target
+ - path: /etc/kubernetes/kubeconfig
+ permissions: '0644'
+ content: |
+ ${kubeconfig}
+ - path: /etc/selinux/config
+ content: |
+ SELINUX=permissive
+runcmd:
+ - [systemctl, daemon-reload]
+ - [systemctl, enable, docker.service]
+ - [systemctl, start, --no-block, docker.service]
+ - [systemctl, enable, kubelet.service]
+ - [systemctl, start, --no-block, kubelet.service]
+users:
+ - default
+ - name: fedora
+ gecos: Fedora Admin
+ sudo: ALL=(ALL) NOPASSWD:ALL
+ groups: wheel,adm,systemd-journal,docker
+ ssh-authorized-keys:
+ - "${ssh_authorized_key}"
diff --git a/aws/fedora-cloud/kubernetes/workers/ingress.tf b/aws/fedora-cloud/kubernetes/workers/ingress.tf
new file mode 100644
index 00000000..6e1599c3
--- /dev/null
+++ b/aws/fedora-cloud/kubernetes/workers/ingress.tf
@@ -0,0 +1,82 @@
+# Network Load Balancer for Ingress
+resource "aws_lb" "ingress" {
+ name = "${var.name}-ingress"
+ load_balancer_type = "network"
+ internal = false
+
+ subnets = ["${var.subnet_ids}"]
+
+ enable_cross_zone_load_balancing = true
+}
+
+# Forward HTTP traffic to workers
+resource "aws_lb_listener" "ingress-http" {
+ load_balancer_arn = "${aws_lb.ingress.arn}"
+ protocol = "TCP"
+ port = 80
+
+ default_action {
+ type = "forward"
+ target_group_arn = "${aws_lb_target_group.workers-http.arn}"
+ }
+}
+
+# Forward HTTPS traffic to workers
+resource "aws_lb_listener" "ingress-https" {
+ load_balancer_arn = "${aws_lb.ingress.arn}"
+ protocol = "TCP"
+ port = 443
+
+ default_action {
+ type = "forward"
+ target_group_arn = "${aws_lb_target_group.workers-https.arn}"
+ }
+}
+
+# Network Load Balancer target groups of instances
+
+resource "aws_lb_target_group" "workers-http" {
+ name = "${var.name}-workers-http"
+ vpc_id = "${var.vpc_id}"
+ target_type = "instance"
+
+ protocol = "TCP"
+ port = 80
+
+ # Ingress Controller HTTP health check
+ health_check {
+ protocol = "HTTP"
+ port = 10254
+ path = "/healthz"
+
+ # NLBs required to use same healthy and unhealthy thresholds
+ healthy_threshold = 3
+ unhealthy_threshold = 3
+
+ # Interval between health checks required to be 10 or 30
+ interval = 10
+ }
+}
+
+resource "aws_lb_target_group" "workers-https" {
+ name = "${var.name}-workers-https"
+ vpc_id = "${var.vpc_id}"
+ target_type = "instance"
+
+ protocol = "TCP"
+ port = 443
+
+ # Ingress Controller HTTP health check
+ health_check {
+ protocol = "HTTP"
+ port = 10254
+ path = "/healthz"
+
+ # NLBs required to use same healthy and unhealthy thresholds
+ healthy_threshold = 3
+ unhealthy_threshold = 3
+
+ # Interval between health checks required to be 10 or 30
+ interval = 10
+ }
+}
diff --git a/aws/fedora-cloud/kubernetes/workers/outputs.tf b/aws/fedora-cloud/kubernetes/workers/outputs.tf
new file mode 100644
index 00000000..1b9f8429
--- /dev/null
+++ b/aws/fedora-cloud/kubernetes/workers/outputs.tf
@@ -0,0 +1,4 @@
+output "ingress_dns_name" {
+ value = "${aws_lb.ingress.dns_name}"
+ description = "DNS name of the network load balancer for distributing traffic to Ingress controllers"
+}
diff --git a/aws/fedora-cloud/kubernetes/workers/variables.tf b/aws/fedora-cloud/kubernetes/workers/variables.tf
new file mode 100644
index 00000000..ae32d3e3
--- /dev/null
+++ b/aws/fedora-cloud/kubernetes/workers/variables.tf
@@ -0,0 +1,67 @@
+variable "name" {
+ type = "string"
+ description = "Unique name instance group"
+}
+
+variable "vpc_id" {
+ type = "string"
+ description = "ID of the VPC for creating instances"
+}
+
+variable "subnet_ids" {
+ type = "list"
+ description = "List of subnet IDs for creating instances"
+}
+
+variable "security_groups" {
+ type = "list"
+ description = "List of security group IDs"
+}
+
+# instances
+
+variable "count" {
+ type = "string"
+ default = "1"
+ description = "Number of instances"
+}
+
+variable "instance_type" {
+ type = "string"
+ default = "t2.small"
+ description = "EC2 instance type"
+}
+
+variable "disk_size" {
+ type = "string"
+ default = "40"
+ description = "Size of the disk in GB"
+}
+
+# configuration
+
+variable "kubeconfig" {
+ type = "string"
+ description = "Generated Kubelet kubeconfig"
+}
+
+variable "ssh_authorized_key" {
+ type = "string"
+ description = "SSH public key for user 'core'"
+}
+
+variable "service_cidr" {
+ description = <