diff --git a/.github/ISSUE_TEMPLATE.md b/.github/ISSUE_TEMPLATE.md
index 16225c6c..6796f5b2 100644
--- a/.github/ISSUE_TEMPLATE.md
+++ b/.github/ISSUE_TEMPLATE.md
@@ -5,7 +5,7 @@
### Environment
* Platform: aws, azure, bare-metal, google-cloud, digital-ocean
-* OS: container-linux, flatcar-linux, or fedora-atomic
+* OS: container-linux, flatcar-linux
* Release: Typhoon version or Git SHA (reporting latest is **not** helpful)
* Terraform: `terraform version` (reporting latest is **not** helpful)
* Plugins: Provider plugin versions (reporting latest is **not** helpful)
diff --git a/CHANGES.md b/CHANGES.md
index 6126a61a..19c0ba3d 100644
--- a/CHANGES.md
+++ b/CHANGES.md
@@ -11,6 +11,7 @@ Notable changes between versions.
* [Migration](https://typhoon.psdn.io/topics/maintenance/#terraform-v012x) instructions for Terraform v0.12
* Require `terraform-provider-ct` v0.3.2+ to support Terraform v0.12 (action required)
* Update Calico from v3.7.2 to [v3.7.3](https://docs.projectcalico.org/v3.7/release-notes/)
+* Remove Fedora Atomic modules (deprecated in March) ([#501](https://github.com/poseidon/typhoon/pull/501))
#### AWS
diff --git a/aws/fedora-atomic/kubernetes/LICENSE b/aws/fedora-atomic/kubernetes/LICENSE
deleted file mode 100644
index bd9a5eea..00000000
--- a/aws/fedora-atomic/kubernetes/LICENSE
+++ /dev/null
@@ -1,23 +0,0 @@
-The MIT License (MIT)
-
-Copyright (c) 2017 Typhoon Authors
-Copyright (c) 2017 Dalton Hubble
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.
-
diff --git a/aws/fedora-atomic/kubernetes/README.md b/aws/fedora-atomic/kubernetes/README.md
deleted file mode 100644
index 82abe431..00000000
--- a/aws/fedora-atomic/kubernetes/README.md
+++ /dev/null
@@ -1,23 +0,0 @@
-# Typhoon
-
-Typhoon is a minimal and free Kubernetes distribution.
-
-* Minimal, stable base Kubernetes distribution
-* Declarative infrastructure and configuration
-* Free (freedom and cost) and privacy-respecting
-* Practical for labs, datacenters, and clouds
-
-Typhoon distributes upstream Kubernetes, architectural conventions, and cluster addons, much like a GNU/Linux distribution provides the Linux kernel and userspace components.
-
-## Features
-
-* Kubernetes v1.15.0 (upstream, via [kubernetes-incubator/bootkube](https://github.com/kubernetes-incubator/bootkube))
-* Single or multi-master, [Calico](https://www.projectcalico.org/) or [flannel](https://github.com/coreos/flannel) networking
-* On-cluster etcd with TLS, [RBAC](https://kubernetes.io/docs/admin/authorization/rbac/)-enabled, [network policy](https://kubernetes.io/docs/concepts/services-networking/network-policies/)
-* Advanced features like [worker pools](https://typhoon.psdn.io/advanced/worker-pools/) and [spot](https://typhoon.psdn.io/cl/aws/#spot) workers
-* Ready for Ingress, Prometheus, Grafana, and other optional [addons](https://typhoon.psdn.io/addons/overview/)
-
-## Docs
-
-Please see the [official docs](https://typhoon.psdn.io) and the AWS [tutorial](https://typhoon.psdn.io/cl/aws/).
-
diff --git a/aws/fedora-atomic/kubernetes/ami.tf b/aws/fedora-atomic/kubernetes/ami.tf
deleted file mode 100644
index 7573749c..00000000
--- a/aws/fedora-atomic/kubernetes/ami.tf
+++ /dev/null
@@ -1,19 +0,0 @@
-data "aws_ami" "fedora" {
- most_recent = true
- owners = ["125523088429"]
-
- filter {
- name = "architecture"
- values = ["x86_64"]
- }
-
- filter {
- name = "virtualization-type"
- values = ["hvm"]
- }
-
- filter {
- name = "name"
- values = ["Fedora-AtomicHost-28-20180625.1.x86_64-*-gp2-*"]
- }
-}
diff --git a/aws/fedora-atomic/kubernetes/bootkube.tf b/aws/fedora-atomic/kubernetes/bootkube.tf
deleted file mode 100644
index 630894d3..00000000
--- a/aws/fedora-atomic/kubernetes/bootkube.tf
+++ /dev/null
@@ -1,18 +0,0 @@
-# Self-hosted Kubernetes assets (kubeconfig, manifests)
-module "bootkube" {
- source = "git::https://github.com/poseidon/terraform-render-bootkube.git?ref=62df9ad69cc0da35f47d40fa981370c4503ad581"
-
- cluster_name = "${var.cluster_name}"
- api_servers = ["${format("%s.%s", var.cluster_name, var.dns_zone)}"]
- etcd_servers = ["${aws_route53_record.etcds.*.fqdn}"]
- asset_dir = "${var.asset_dir}"
- networking = "${var.networking}"
- network_mtu = "${var.network_mtu}"
- pod_cidr = "${var.pod_cidr}"
- service_cidr = "${var.service_cidr}"
- cluster_domain_suffix = "${var.cluster_domain_suffix}"
- enable_reporting = "${var.enable_reporting}"
-
- # Fedora
- trusted_certs_dir = "/etc/pki/tls/certs"
-}
diff --git a/aws/fedora-atomic/kubernetes/cloudinit/controller.yaml.tmpl b/aws/fedora-atomic/kubernetes/cloudinit/controller.yaml.tmpl
deleted file mode 100644
index 64c1058e..00000000
--- a/aws/fedora-atomic/kubernetes/cloudinit/controller.yaml.tmpl
+++ /dev/null
@@ -1,93 +0,0 @@
-#cloud-config
-write_files:
- - path: /etc/etcd/etcd.conf
- content: |
- ETCD_NAME=${etcd_name}
- ETCD_DATA_DIR=/var/lib/etcd
- ETCD_ADVERTISE_CLIENT_URLS=https://${etcd_domain}:2379
- ETCD_INITIAL_ADVERTISE_PEER_URLS=https://${etcd_domain}:2380
- ETCD_LISTEN_CLIENT_URLS=https://0.0.0.0:2379
- ETCD_LISTEN_PEER_URLS=https://0.0.0.0:2380
- ETCD_LISTEN_METRICS_URLS=http://0.0.0.0:2381
- ETCD_INITIAL_CLUSTER=${etcd_initial_cluster}
- ETCD_STRICT_RECONFIG_CHECK=true
- ETCD_TRUSTED_CA_FILE=/etc/ssl/certs/etcd/server-ca.crt
- ETCD_CERT_FILE=/etc/ssl/certs/etcd/server.crt
- ETCD_KEY_FILE=/etc/ssl/certs/etcd/server.key
- ETCD_CLIENT_CERT_AUTH=true
- ETCD_PEER_TRUSTED_CA_FILE=/etc/ssl/certs/etcd/peer-ca.crt
- ETCD_PEER_CERT_FILE=/etc/ssl/certs/etcd/peer.crt
- ETCD_PEER_KEY_FILE=/etc/ssl/certs/etcd/peer.key
- ETCD_PEER_CLIENT_CERT_AUTH=true
- - path: /etc/systemd/system/kubelet.service.d/10-typhoon.conf
- content: |
- [Unit]
- Wants=rpc-statd.service
- [Service]
- ExecStartPre=/bin/mkdir -p /opt/cni/bin
- ExecStartPre=/bin/mkdir -p /etc/kubernetes/manifests
- ExecStartPre=/bin/mkdir -p /etc/kubernetes/cni/net.d
- ExecStartPre=/bin/mkdir -p /etc/kubernetes/checkpoint-secrets
- ExecStartPre=/bin/mkdir -p /etc/kubernetes/inactive-manifests
- ExecStartPre=/bin/mkdir -p /var/lib/cni
- ExecStartPre=/bin/mkdir -p /var/lib/kubelet/volumeplugins
- ExecStartPre=/usr/bin/bash -c "grep 'certificate-authority-data' /etc/kubernetes/kubeconfig | awk '{print $2}' | base64 -d > /etc/kubernetes/ca.crt"
- Restart=always
- RestartSec=10
- - path: /etc/kubernetes/kubelet.conf
- content: |
- ARGS="--anonymous-auth=false \
- --authentication-token-webhook \
- --authorization-mode=Webhook \
- --client-ca-file=/etc/kubernetes/ca.crt \
- --cluster_dns=${cluster_dns_service_ip} \
- --cluster_domain=${cluster_domain_suffix} \
- --cni-conf-dir=/etc/kubernetes/cni/net.d \
- --exit-on-lock-contention \
- --kubeconfig=/etc/kubernetes/kubeconfig \
- --lock-file=/var/run/lock/kubelet.lock \
- --network-plugin=cni \
- --node-labels=node-role.kubernetes.io/master \
- --node-labels=node-role.kubernetes.io/controller="true" \
- --pod-manifest-path=/etc/kubernetes/manifests \
- --read-only-port=0 \
- --register-with-taints=node-role.kubernetes.io/master=:NoSchedule \
- --volume-plugin-dir=/var/lib/kubelet/volumeplugins"
- - path: /etc/kubernetes/kubeconfig
- permissions: '0644'
- content: |
- ${kubeconfig}
- - path: /var/lib/bootkube/.keep
- - path: /etc/NetworkManager/conf.d/typhoon.conf
- content: |
- [main]
- plugins=keyfile
- [keyfile]
- unmanaged-devices=interface-name:cali*;interface-name:tunl*
- - path: /etc/selinux/config
- owner: root:root
- permissions: '0644'
- content: |
- SELINUX=permissive
- SELINUXTYPE=targeted
-bootcmd:
- - [setenforce, Permissive]
- - [systemctl, disable, firewalld, --now]
- # https://github.com/kubernetes/kubernetes/issues/60869
- - [modprobe, ip_vs]
-runcmd:
- - [systemctl, daemon-reload]
- - [systemctl, restart, NetworkManager]
- - "atomic install --system --name=etcd quay.io/poseidon/etcd:v3.3.12"
- - "atomic install --system --name=kubelet quay.io/poseidon/kubelet:v1.14.1"
- - "atomic install --system --name=bootkube quay.io/poseidon/bootkube:v0.14.0"
- - [systemctl, start, --no-block, etcd.service]
- - [systemctl, start, --no-block, kubelet.service]
-users:
- - default
- - name: fedora
- gecos: Fedora Admin
- sudo: ALL=(ALL) NOPASSWD:ALL
- groups: wheel,adm,systemd-journal,docker
- ssh-authorized-keys:
- - "${ssh_authorized_key}"
diff --git a/aws/fedora-atomic/kubernetes/controllers.tf b/aws/fedora-atomic/kubernetes/controllers.tf
deleted file mode 100644
index 26ba3ca3..00000000
--- a/aws/fedora-atomic/kubernetes/controllers.tf
+++ /dev/null
@@ -1,79 +0,0 @@
-# Discrete DNS records for each controller's private IPv4 for etcd usage
-resource "aws_route53_record" "etcds" {
- count = "${var.controller_count}"
-
- # DNS Zone where record should be created
- zone_id = "${var.dns_zone_id}"
-
- name = "${format("%s-etcd%d.%s.", var.cluster_name, count.index, var.dns_zone)}"
- type = "A"
- ttl = 300
-
- # private IPv4 address for etcd
- records = ["${element(aws_instance.controllers.*.private_ip, count.index)}"]
-}
-
-# Controller instances
-resource "aws_instance" "controllers" {
- count = "${var.controller_count}"
-
- tags = {
- Name = "${var.cluster_name}-controller-${count.index}"
- }
-
- instance_type = "${var.controller_type}"
-
- ami = "${data.aws_ami.fedora.image_id}"
- user_data = "${element(data.template_file.controller-cloudinit.*.rendered, count.index)}"
-
- # storage
- root_block_device {
- volume_type = "${var.disk_type}"
- volume_size = "${var.disk_size}"
- iops = "${var.disk_iops}"
- }
-
- # network
- associate_public_ip_address = true
- subnet_id = "${element(aws_subnet.public.*.id, count.index)}"
- vpc_security_group_ids = ["${aws_security_group.controller.id}"]
-
- lifecycle {
- ignore_changes = [
- "ami",
- "user_data",
- ]
- }
-}
-
-# Controller Cloud-Init
-data "template_file" "controller-cloudinit" {
- count = "${var.controller_count}"
-
- template = "${file("${path.module}/cloudinit/controller.yaml.tmpl")}"
-
- vars = {
- # Cannot use cyclic dependencies on controllers or their DNS records
- etcd_name = "etcd${count.index}"
- etcd_domain = "${var.cluster_name}-etcd${count.index}.${var.dns_zone}"
-
- # etcd0=https://cluster-etcd0.example.com,etcd1=https://cluster-etcd1.example.com,...
- etcd_initial_cluster = "${join(",", data.template_file.etcds.*.rendered)}"
-
- kubeconfig = "${indent(6, module.bootkube.kubeconfig-kubelet)}"
- ssh_authorized_key = "${var.ssh_authorized_key}"
- cluster_dns_service_ip = "${cidrhost(var.service_cidr, 10)}"
- cluster_domain_suffix = "${var.cluster_domain_suffix}"
- }
-}
-
-data "template_file" "etcds" {
- count = "${var.controller_count}"
- template = "etcd$${index}=https://$${cluster_name}-etcd$${index}.$${dns_zone}:2380"
-
- vars = {
- index = "${count.index}"
- cluster_name = "${var.cluster_name}"
- dns_zone = "${var.dns_zone}"
- }
-}
diff --git a/aws/fedora-atomic/kubernetes/network.tf b/aws/fedora-atomic/kubernetes/network.tf
deleted file mode 100644
index 1be5073b..00000000
--- a/aws/fedora-atomic/kubernetes/network.tf
+++ /dev/null
@@ -1,57 +0,0 @@
-data "aws_availability_zones" "all" {}
-
-# Network VPC, gateway, and routes
-
-resource "aws_vpc" "network" {
- cidr_block = "${var.host_cidr}"
- assign_generated_ipv6_cidr_block = true
- enable_dns_support = true
- enable_dns_hostnames = true
-
- tags = "${map("Name", "${var.cluster_name}")}"
-}
-
-resource "aws_internet_gateway" "gateway" {
- vpc_id = "${aws_vpc.network.id}"
-
- tags = "${map("Name", "${var.cluster_name}")}"
-}
-
-resource "aws_route_table" "default" {
- vpc_id = "${aws_vpc.network.id}"
-
- route {
- cidr_block = "0.0.0.0/0"
- gateway_id = "${aws_internet_gateway.gateway.id}"
- }
-
- route {
- ipv6_cidr_block = "::/0"
- gateway_id = "${aws_internet_gateway.gateway.id}"
- }
-
- tags = "${map("Name", "${var.cluster_name}")}"
-}
-
-# Subnets (one per availability zone)
-
-resource "aws_subnet" "public" {
- count = "${length(data.aws_availability_zones.all.names)}"
-
- vpc_id = "${aws_vpc.network.id}"
- availability_zone = "${data.aws_availability_zones.all.names[count.index]}"
-
- cidr_block = "${cidrsubnet(var.host_cidr, 4, count.index)}"
- ipv6_cidr_block = "${cidrsubnet(aws_vpc.network.ipv6_cidr_block, 8, count.index)}"
- map_public_ip_on_launch = true
- assign_ipv6_address_on_creation = true
-
- tags = "${map("Name", "${var.cluster_name}-public-${count.index}")}"
-}
-
-resource "aws_route_table_association" "public" {
- count = "${length(data.aws_availability_zones.all.names)}"
-
- route_table_id = "${aws_route_table.default.id}"
- subnet_id = "${element(aws_subnet.public.*.id, count.index)}"
-}
diff --git a/aws/fedora-atomic/kubernetes/nlb.tf b/aws/fedora-atomic/kubernetes/nlb.tf
deleted file mode 100644
index ddcc52f5..00000000
--- a/aws/fedora-atomic/kubernetes/nlb.tf
+++ /dev/null
@@ -1,93 +0,0 @@
-# Network Load Balancer DNS Record
-resource "aws_route53_record" "apiserver" {
- zone_id = "${var.dns_zone_id}"
-
- name = "${format("%s.%s.", var.cluster_name, var.dns_zone)}"
- type = "A"
-
- # AWS recommends their special "alias" records for NLBs
- alias {
- name = "${aws_lb.nlb.dns_name}"
- zone_id = "${aws_lb.nlb.zone_id}"
- evaluate_target_health = true
- }
-}
-
-# Network Load Balancer for apiservers and ingress
-resource "aws_lb" "nlb" {
- name = "${var.cluster_name}-nlb"
- load_balancer_type = "network"
- internal = false
-
- subnets = ["${aws_subnet.public.*.id}"]
-
- enable_cross_zone_load_balancing = true
-}
-
-# Forward TCP apiserver traffic to controllers
-resource "aws_lb_listener" "apiserver-https" {
- load_balancer_arn = "${aws_lb.nlb.arn}"
- protocol = "TCP"
- port = "6443"
-
- default_action {
- type = "forward"
- target_group_arn = "${aws_lb_target_group.controllers.arn}"
- }
-}
-
-# Forward HTTP ingress traffic to workers
-resource "aws_lb_listener" "ingress-http" {
- load_balancer_arn = "${aws_lb.nlb.arn}"
- protocol = "TCP"
- port = 80
-
- default_action {
- type = "forward"
- target_group_arn = "${module.workers.target_group_http}"
- }
-}
-
-# Forward HTTPS ingress traffic to workers
-resource "aws_lb_listener" "ingress-https" {
- load_balancer_arn = "${aws_lb.nlb.arn}"
- protocol = "TCP"
- port = 443
-
- default_action {
- type = "forward"
- target_group_arn = "${module.workers.target_group_https}"
- }
-}
-
-# Target group of controllers
-resource "aws_lb_target_group" "controllers" {
- name = "${var.cluster_name}-controllers"
- vpc_id = "${aws_vpc.network.id}"
- target_type = "instance"
-
- protocol = "TCP"
- port = 6443
-
- # TCP health check for apiserver
- health_check {
- protocol = "TCP"
- port = 6443
-
- # NLBs required to use same healthy and unhealthy thresholds
- healthy_threshold = 3
- unhealthy_threshold = 3
-
- # Interval between health checks required to be 10 or 30
- interval = 10
- }
-}
-
-# Attach controller instances to apiserver NLB
-resource "aws_lb_target_group_attachment" "controllers" {
- count = "${var.controller_count}"
-
- target_group_arn = "${aws_lb_target_group.controllers.arn}"
- target_id = "${element(aws_instance.controllers.*.id, count.index)}"
- port = 6443
-}
diff --git a/aws/fedora-atomic/kubernetes/outputs.tf b/aws/fedora-atomic/kubernetes/outputs.tf
deleted file mode 100644
index d042a3d4..00000000
--- a/aws/fedora-atomic/kubernetes/outputs.tf
+++ /dev/null
@@ -1,48 +0,0 @@
-output "kubeconfig-admin" {
- value = "${module.bootkube.kubeconfig-admin}"
-}
-
-# Outputs for Kubernetes Ingress
-
-output "ingress_dns_name" {
- value = "${aws_lb.nlb.dns_name}"
- description = "DNS name of the network load balancer for distributing traffic to Ingress controllers"
-}
-
-output "ingress_zone_id" {
- value = "${aws_lb.nlb.zone_id}"
- description = "Route53 zone id of the network load balancer DNS name that can be used in Route53 alias records"
-}
-
-# Outputs for worker pools
-
-output "vpc_id" {
- value = "${aws_vpc.network.id}"
- description = "ID of the VPC for creating worker instances"
-}
-
-output "subnet_ids" {
- value = ["${aws_subnet.public.*.id}"]
- description = "List of subnet IDs for creating worker instances"
-}
-
-output "worker_security_groups" {
- value = ["${aws_security_group.worker.id}"]
- description = "List of worker security group IDs"
-}
-
-output "kubeconfig" {
- value = "${module.bootkube.kubeconfig-kubelet}"
-}
-
-# Outputs for custom load balancing
-
-output "worker_target_group_http" {
- description = "ARN of a target group of workers for HTTP traffic"
- value = "${module.workers.target_group_http}"
-}
-
-output "worker_target_group_https" {
- description = "ARN of a target group of workers for HTTPS traffic"
- value = "${module.workers.target_group_https}"
-}
diff --git a/aws/fedora-atomic/kubernetes/require.tf b/aws/fedora-atomic/kubernetes/require.tf
deleted file mode 100644
index 68f475d6..00000000
--- a/aws/fedora-atomic/kubernetes/require.tf
+++ /dev/null
@@ -1,25 +0,0 @@
-# Terraform version and plugin versions
-
-terraform {
- required_version = ">= 0.11.0"
-}
-
-provider "aws" {
- version = ">= 1.13, < 3.0"
-}
-
-provider "local" {
- version = "~> 1.0"
-}
-
-provider "null" {
- version = "~> 1.0"
-}
-
-provider "template" {
- version = "~> 1.0"
-}
-
-provider "tls" {
- version = "~> 1.0"
-}
diff --git a/aws/fedora-atomic/kubernetes/security.tf b/aws/fedora-atomic/kubernetes/security.tf
deleted file mode 100644
index 7672a92e..00000000
--- a/aws/fedora-atomic/kubernetes/security.tf
+++ /dev/null
@@ -1,359 +0,0 @@
-# Security Groups (instance firewalls)
-
-# Controller security group
-
-resource "aws_security_group" "controller" {
- name = "${var.cluster_name}-controller"
- description = "${var.cluster_name} controller security group"
-
- vpc_id = "${aws_vpc.network.id}"
-
- tags = "${map("Name", "${var.cluster_name}-controller")}"
-}
-
-resource "aws_security_group_rule" "controller-ssh" {
- security_group_id = "${aws_security_group.controller.id}"
-
- type = "ingress"
- protocol = "tcp"
- from_port = 22
- to_port = 22
- cidr_blocks = ["0.0.0.0/0"]
-}
-
-resource "aws_security_group_rule" "controller-etcd" {
- security_group_id = "${aws_security_group.controller.id}"
-
- type = "ingress"
- protocol = "tcp"
- from_port = 2379
- to_port = 2380
- self = true
-}
-
-# Allow Prometheus to scrape etcd metrics
-resource "aws_security_group_rule" "controller-etcd-metrics" {
- security_group_id = "${aws_security_group.controller.id}"
-
- type = "ingress"
- protocol = "tcp"
- from_port = 2381
- to_port = 2381
- source_security_group_id = "${aws_security_group.worker.id}"
-}
-
-resource "aws_security_group_rule" "controller-vxlan" {
- count = "${var.networking == "flannel" ? 1 : 0}"
-
- security_group_id = "${aws_security_group.controller.id}"
-
- type = "ingress"
- protocol = "udp"
- from_port = 4789
- to_port = 4789
- source_security_group_id = "${aws_security_group.worker.id}"
-}
-
-resource "aws_security_group_rule" "controller-vxlan-self" {
- count = "${var.networking == "flannel" ? 1 : 0}"
-
- security_group_id = "${aws_security_group.controller.id}"
-
- type = "ingress"
- protocol = "udp"
- from_port = 4789
- to_port = 4789
- self = true
-}
-
-resource "aws_security_group_rule" "controller-apiserver" {
- security_group_id = "${aws_security_group.controller.id}"
-
- type = "ingress"
- protocol = "tcp"
- from_port = 6443
- to_port = 6443
- cidr_blocks = ["0.0.0.0/0"]
-}
-
-# Allow Prometheus to scrape node-exporter daemonset
-resource "aws_security_group_rule" "controller-node-exporter" {
- security_group_id = "${aws_security_group.controller.id}"
-
- type = "ingress"
- protocol = "tcp"
- from_port = 9100
- to_port = 9100
- source_security_group_id = "${aws_security_group.worker.id}"
-}
-
-# Allow apiserver to access kubelets for exec, log, port-forward
-resource "aws_security_group_rule" "controller-kubelet" {
- security_group_id = "${aws_security_group.controller.id}"
-
- type = "ingress"
- protocol = "tcp"
- from_port = 10250
- to_port = 10250
- source_security_group_id = "${aws_security_group.worker.id}"
-}
-
-resource "aws_security_group_rule" "controller-kubelet-self" {
- security_group_id = "${aws_security_group.controller.id}"
-
- type = "ingress"
- protocol = "tcp"
- from_port = 10250
- to_port = 10250
- self = true
-}
-
-resource "aws_security_group_rule" "controller-bgp" {
- security_group_id = "${aws_security_group.controller.id}"
-
- type = "ingress"
- protocol = "tcp"
- from_port = 179
- to_port = 179
- source_security_group_id = "${aws_security_group.worker.id}"
-}
-
-resource "aws_security_group_rule" "controller-bgp-self" {
- security_group_id = "${aws_security_group.controller.id}"
-
- type = "ingress"
- protocol = "tcp"
- from_port = 179
- to_port = 179
- self = true
-}
-
-resource "aws_security_group_rule" "controller-ipip" {
- security_group_id = "${aws_security_group.controller.id}"
-
- type = "ingress"
- protocol = 4
- from_port = 0
- to_port = 0
- source_security_group_id = "${aws_security_group.worker.id}"
-}
-
-resource "aws_security_group_rule" "controller-ipip-self" {
- security_group_id = "${aws_security_group.controller.id}"
-
- type = "ingress"
- protocol = 4
- from_port = 0
- to_port = 0
- self = true
-}
-
-resource "aws_security_group_rule" "controller-ipip-legacy" {
- security_group_id = "${aws_security_group.controller.id}"
-
- type = "ingress"
- protocol = 94
- from_port = 0
- to_port = 0
- source_security_group_id = "${aws_security_group.worker.id}"
-}
-
-resource "aws_security_group_rule" "controller-ipip-legacy-self" {
- security_group_id = "${aws_security_group.controller.id}"
-
- type = "ingress"
- protocol = 94
- from_port = 0
- to_port = 0
- self = true
-}
-
-resource "aws_security_group_rule" "controller-egress" {
- security_group_id = "${aws_security_group.controller.id}"
-
- type = "egress"
- protocol = "-1"
- from_port = 0
- to_port = 0
- cidr_blocks = ["0.0.0.0/0"]
- ipv6_cidr_blocks = ["::/0"]
-}
-
-# Worker security group
-
-resource "aws_security_group" "worker" {
- name = "${var.cluster_name}-worker"
- description = "${var.cluster_name} worker security group"
-
- vpc_id = "${aws_vpc.network.id}"
-
- tags = "${map("Name", "${var.cluster_name}-worker")}"
-}
-
-resource "aws_security_group_rule" "worker-ssh" {
- security_group_id = "${aws_security_group.worker.id}"
-
- type = "ingress"
- protocol = "tcp"
- from_port = 22
- to_port = 22
- cidr_blocks = ["0.0.0.0/0"]
-}
-
-resource "aws_security_group_rule" "worker-http" {
- security_group_id = "${aws_security_group.worker.id}"
-
- type = "ingress"
- protocol = "tcp"
- from_port = 80
- to_port = 80
- cidr_blocks = ["0.0.0.0/0"]
-}
-
-resource "aws_security_group_rule" "worker-https" {
- security_group_id = "${aws_security_group.worker.id}"
-
- type = "ingress"
- protocol = "tcp"
- from_port = 443
- to_port = 443
- cidr_blocks = ["0.0.0.0/0"]
-}
-
-resource "aws_security_group_rule" "worker-vxlan" {
- count = "${var.networking == "flannel" ? 1 : 0}"
-
- security_group_id = "${aws_security_group.worker.id}"
-
- type = "ingress"
- protocol = "udp"
- from_port = 4789
- to_port = 4789
- source_security_group_id = "${aws_security_group.controller.id}"
-}
-
-resource "aws_security_group_rule" "worker-vxlan-self" {
- count = "${var.networking == "flannel" ? 1 : 0}"
-
- security_group_id = "${aws_security_group.worker.id}"
-
- type = "ingress"
- protocol = "udp"
- from_port = 4789
- to_port = 4789
- self = true
-}
-
-# Allow Prometheus to scrape node-exporter daemonset
-resource "aws_security_group_rule" "worker-node-exporter" {
- security_group_id = "${aws_security_group.worker.id}"
-
- type = "ingress"
- protocol = "tcp"
- from_port = 9100
- to_port = 9100
- self = true
-}
-
-resource "aws_security_group_rule" "ingress-health" {
- security_group_id = "${aws_security_group.worker.id}"
-
- type = "ingress"
- protocol = "tcp"
- from_port = 10254
- to_port = 10254
- cidr_blocks = ["0.0.0.0/0"]
-}
-
-# Allow apiserver to access kubelets for exec, log, port-forward
-resource "aws_security_group_rule" "worker-kubelet" {
- security_group_id = "${aws_security_group.worker.id}"
-
- type = "ingress"
- protocol = "tcp"
- from_port = 10250
- to_port = 10250
- source_security_group_id = "${aws_security_group.controller.id}"
-}
-
-# Allow Prometheus to scrape kubelet metrics
-resource "aws_security_group_rule" "worker-kubelet-self" {
- security_group_id = "${aws_security_group.worker.id}"
-
- type = "ingress"
- protocol = "tcp"
- from_port = 10250
- to_port = 10250
- self = true
-}
-
-resource "aws_security_group_rule" "worker-bgp" {
- security_group_id = "${aws_security_group.worker.id}"
-
- type = "ingress"
- protocol = "tcp"
- from_port = 179
- to_port = 179
- source_security_group_id = "${aws_security_group.controller.id}"
-}
-
-resource "aws_security_group_rule" "worker-bgp-self" {
- security_group_id = "${aws_security_group.worker.id}"
-
- type = "ingress"
- protocol = "tcp"
- from_port = 179
- to_port = 179
- self = true
-}
-
-resource "aws_security_group_rule" "worker-ipip" {
- security_group_id = "${aws_security_group.worker.id}"
-
- type = "ingress"
- protocol = 4
- from_port = 0
- to_port = 0
- source_security_group_id = "${aws_security_group.controller.id}"
-}
-
-resource "aws_security_group_rule" "worker-ipip-self" {
- security_group_id = "${aws_security_group.worker.id}"
-
- type = "ingress"
- protocol = 4
- from_port = 0
- to_port = 0
- self = true
-}
-
-resource "aws_security_group_rule" "worker-ipip-legacy" {
- security_group_id = "${aws_security_group.worker.id}"
-
- type = "ingress"
- protocol = 94
- from_port = 0
- to_port = 0
- source_security_group_id = "${aws_security_group.controller.id}"
-}
-
-resource "aws_security_group_rule" "worker-ipip-legacy-self" {
- security_group_id = "${aws_security_group.worker.id}"
-
- type = "ingress"
- protocol = 94
- from_port = 0
- to_port = 0
- self = true
-}
-
-resource "aws_security_group_rule" "worker-egress" {
- security_group_id = "${aws_security_group.worker.id}"
-
- type = "egress"
- protocol = "-1"
- from_port = 0
- to_port = 0
- cidr_blocks = ["0.0.0.0/0"]
- ipv6_cidr_blocks = ["::/0"]
-}
diff --git a/aws/fedora-atomic/kubernetes/ssh.tf b/aws/fedora-atomic/kubernetes/ssh.tf
deleted file mode 100644
index c72a09e4..00000000
--- a/aws/fedora-atomic/kubernetes/ssh.tf
+++ /dev/null
@@ -1,89 +0,0 @@
-# Secure copy etcd TLS assets to controllers.
-resource "null_resource" "copy-controller-secrets" {
- count = "${var.controller_count}"
-
- connection {
- type = "ssh"
- host = "${element(aws_instance.controllers.*.public_ip, count.index)}"
- user = "fedora"
- timeout = "15m"
- }
-
- provisioner "file" {
- content = "${module.bootkube.etcd_ca_cert}"
- destination = "$HOME/etcd-client-ca.crt"
- }
-
- provisioner "file" {
- content = "${module.bootkube.etcd_client_cert}"
- destination = "$HOME/etcd-client.crt"
- }
-
- provisioner "file" {
- content = "${module.bootkube.etcd_client_key}"
- destination = "$HOME/etcd-client.key"
- }
-
- provisioner "file" {
- content = "${module.bootkube.etcd_server_cert}"
- destination = "$HOME/etcd-server.crt"
- }
-
- provisioner "file" {
- content = "${module.bootkube.etcd_server_key}"
- destination = "$HOME/etcd-server.key"
- }
-
- provisioner "file" {
- content = "${module.bootkube.etcd_peer_cert}"
- destination = "$HOME/etcd-peer.crt"
- }
-
- provisioner "file" {
- content = "${module.bootkube.etcd_peer_key}"
- destination = "$HOME/etcd-peer.key"
- }
-
- provisioner "remote-exec" {
- inline = [
- "sudo mkdir -p /etc/ssl/etcd/etcd",
- "sudo mv etcd-client* /etc/ssl/etcd/",
- "sudo cp /etc/ssl/etcd/etcd-client-ca.crt /etc/ssl/etcd/etcd/server-ca.crt",
- "sudo mv etcd-server.crt /etc/ssl/etcd/etcd/server.crt",
- "sudo mv etcd-server.key /etc/ssl/etcd/etcd/server.key",
- "sudo cp /etc/ssl/etcd/etcd-client-ca.crt /etc/ssl/etcd/etcd/peer-ca.crt",
- "sudo mv etcd-peer.crt /etc/ssl/etcd/etcd/peer.crt",
- "sudo mv etcd-peer.key /etc/ssl/etcd/etcd/peer.key",
- ]
- }
-}
-
-# Secure copy bootkube assets to ONE controller and start bootkube to perform
-# one-time self-hosted cluster bootstrapping.
-resource "null_resource" "bootkube-start" {
- depends_on = [
- "null_resource.copy-controller-secrets",
- "module.workers",
- "aws_route53_record.apiserver",
- ]
-
- connection {
- type = "ssh"
- host = "${aws_instance.controllers.0.public_ip}"
- user = "fedora"
- timeout = "15m"
- }
-
- provisioner "file" {
- source = "${var.asset_dir}"
- destination = "$HOME/assets"
- }
-
- provisioner "remote-exec" {
- inline = [
- "while [ ! -f /var/lib/cloud/instance/boot-finished ]; do sleep 4; done",
- "sudo mv $HOME/assets /var/lib/bootkube",
- "sudo systemctl start bootkube",
- ]
- }
-}
diff --git a/aws/fedora-atomic/kubernetes/variables.tf b/aws/fedora-atomic/kubernetes/variables.tf
deleted file mode 100644
index 538923f6..00000000
--- a/aws/fedora-atomic/kubernetes/variables.tf
+++ /dev/null
@@ -1,124 +0,0 @@
-variable "cluster_name" {
- type = "string"
- description = "Unique cluster name (prepended to dns_zone)"
-}
-
-# AWS
-
-variable "dns_zone" {
- type = "string"
- description = "AWS DNS Zone (e.g. aws.example.com)"
-}
-
-variable "dns_zone_id" {
- type = "string"
- description = "AWS DNS Zone ID (e.g. Z3PAABBCFAKEC0)"
-}
-
-# instances
-
-variable "controller_count" {
- type = "string"
- default = "1"
- description = "Number of controllers (i.e. masters)"
-}
-
-variable "worker_count" {
- type = "string"
- default = "1"
- description = "Number of workers"
-}
-
-variable "controller_type" {
- type = "string"
- default = "t3.small"
- description = "EC2 instance type for controllers"
-}
-
-variable "worker_type" {
- type = "string"
- default = "t3.small"
- description = "EC2 instance type for workers"
-}
-
-variable "disk_size" {
- type = "string"
- default = "40"
- description = "Size of the EBS volume in GB"
-}
-
-variable "disk_type" {
- type = "string"
- default = "gp2"
- description = "Type of the EBS volume (e.g. standard, gp2, io1)"
-}
-
-variable "disk_iops" {
- type = "string"
- default = "0"
- description = "IOPS of the EBS volume (e.g. 100)"
-}
-
-variable "worker_price" {
- type = "string"
- default = ""
- description = "Spot price in USD for autoscaling group spot instances. Leave as default empty string for autoscaling group to use on-demand instances. Note, switching in-place from spot to on-demand is not possible: https://github.com/terraform-providers/terraform-provider-aws/issues/4320"
-}
-
-# configuration
-
-variable "ssh_authorized_key" {
- type = "string"
- description = "SSH public key for user 'fedora'"
-}
-
-variable "asset_dir" {
- description = "Path to a directory where generated assets should be placed (contains secrets)"
- type = "string"
-}
-
-variable "networking" {
- description = "Choice of networking provider (calico or flannel)"
- type = "string"
- default = "calico"
-}
-
-variable "network_mtu" {
- description = "CNI interface MTU (applies to calico only). Use 8981 if using instances types with Jumbo frames."
- type = "string"
- default = "1480"
-}
-
-variable "host_cidr" {
- description = "CIDR IPv4 range to assign to EC2 nodes"
- type = "string"
- default = "10.0.0.0/16"
-}
-
-variable "pod_cidr" {
- description = "CIDR IPv4 range to assign Kubernetes pods"
- type = "string"
- default = "10.2.0.0/16"
-}
-
-variable "service_cidr" {
- description = < /etc/kubernetes/ca.crt"
- Restart=always
- RestartSec=10
- - path: /etc/kubernetes/kubelet.conf
- content: |
- ARGS="--anonymous-auth=false \
- --authentication-token-webhook \
- --authorization-mode=Webhook \
- --client-ca-file=/etc/kubernetes/ca.crt \
- --cluster_dns=${cluster_dns_service_ip} \
- --cluster_domain=${cluster_domain_suffix} \
- --cni-conf-dir=/etc/kubernetes/cni/net.d \
- --exit-on-lock-contention \
- --kubeconfig=/etc/kubernetes/kubeconfig \
- --lock-file=/var/run/lock/kubelet.lock \
- --network-plugin=cni \
- --node-labels=node-role.kubernetes.io/node \
- --pod-manifest-path=/etc/kubernetes/manifests \
- --read-only-port=0 \
- --volume-plugin-dir=/var/lib/kubelet/volumeplugins"
- - path: /etc/kubernetes/kubeconfig
- permissions: '0644'
- content: |
- ${kubeconfig}
- - path: /etc/NetworkManager/conf.d/typhoon.conf
- content: |
- [main]
- plugins=keyfile
- [keyfile]
- unmanaged-devices=interface-name:cali*;interface-name:tunl*
- - path: /etc/selinux/config
- owner: root:root
- permissions: '0644'
- content: |
- SELINUX=permissive
- SELINUXTYPE=targeted
-bootcmd:
- - [setenforce, Permissive]
- - [systemctl, disable, firewalld, --now]
- # https://github.com/kubernetes/kubernetes/issues/60869
- - [modprobe, ip_vs]
-runcmd:
- - [systemctl, daemon-reload]
- - [systemctl, restart, NetworkManager]
- - "atomic install --system --name=kubelet quay.io/poseidon/kubelet:v1.14.1"
- - [systemctl, start, --no-block, kubelet.service]
-users:
- - default
- - name: fedora
- gecos: Fedora Admin
- sudo: ALL=(ALL) NOPASSWD:ALL
- groups: wheel,adm,systemd-journal,docker
- ssh-authorized-keys:
- - "${ssh_authorized_key}"
diff --git a/aws/fedora-atomic/kubernetes/workers/ingress.tf b/aws/fedora-atomic/kubernetes/workers/ingress.tf
deleted file mode 100644
index bdb7362f..00000000
--- a/aws/fedora-atomic/kubernetes/workers/ingress.tf
+++ /dev/null
@@ -1,47 +0,0 @@
-# Target groups of instances for use with load balancers
-
-resource "aws_lb_target_group" "workers-http" {
- name = "${var.name}-workers-http"
- vpc_id = "${var.vpc_id}"
- target_type = "instance"
-
- protocol = "TCP"
- port = 80
-
- # HTTP health check for ingress
- health_check {
- protocol = "HTTP"
- port = 10254
- path = "/healthz"
-
- # NLBs required to use same healthy and unhealthy thresholds
- healthy_threshold = 3
- unhealthy_threshold = 3
-
- # Interval between health checks required to be 10 or 30
- interval = 10
- }
-}
-
-resource "aws_lb_target_group" "workers-https" {
- name = "${var.name}-workers-https"
- vpc_id = "${var.vpc_id}"
- target_type = "instance"
-
- protocol = "TCP"
- port = 443
-
- # HTTP health check for ingress
- health_check {
- protocol = "HTTP"
- port = 10254
- path = "/healthz"
-
- # NLBs required to use same healthy and unhealthy thresholds
- healthy_threshold = 3
- unhealthy_threshold = 3
-
- # Interval between health checks required to be 10 or 30
- interval = 10
- }
-}
diff --git a/aws/fedora-atomic/kubernetes/workers/outputs.tf b/aws/fedora-atomic/kubernetes/workers/outputs.tf
deleted file mode 100644
index 19552266..00000000
--- a/aws/fedora-atomic/kubernetes/workers/outputs.tf
+++ /dev/null
@@ -1,9 +0,0 @@
-output "target_group_http" {
- description = "ARN of a target group of workers for HTTP traffic"
- value = "${aws_lb_target_group.workers-http.arn}"
-}
-
-output "target_group_https" {
- description = "ARN of a target group of workers for HTTPS traffic"
- value = "${aws_lb_target_group.workers-https.arn}"
-}
diff --git a/aws/fedora-atomic/kubernetes/workers/variables.tf b/aws/fedora-atomic/kubernetes/workers/variables.tf
deleted file mode 100644
index 7552e297..00000000
--- a/aws/fedora-atomic/kubernetes/workers/variables.tf
+++ /dev/null
@@ -1,87 +0,0 @@
-variable "name" {
- type = "string"
- description = "Unique name for the worker pool"
-}
-
-# AWS
-
-variable "vpc_id" {
- type = "string"
- description = "Must be set to `vpc_id` output by cluster"
-}
-
-variable "subnet_ids" {
- type = "list"
- description = "Must be set to `subnet_ids` output by cluster"
-}
-
-variable "security_groups" {
- type = "list"
- description = "Must be set to `worker_security_groups` output by cluster"
-}
-
-# instances
-
-variable "count" {
- type = "string"
- default = "1"
- description = "Number of instances"
-}
-
-variable "instance_type" {
- type = "string"
- default = "t3.small"
- description = "EC2 instance type"
-}
-
-variable "disk_size" {
- type = "string"
- default = "40"
- description = "Size of the EBS volume in GB"
-}
-
-variable "disk_type" {
- type = "string"
- default = "gp2"
- description = "Type of the EBS volume (e.g. standard, gp2, io1)"
-}
-
-variable "disk_iops" {
- type = "string"
- default = "0"
- description = "IOPS of the EBS volume (required for io1)"
-}
-
-variable "spot_price" {
- type = "string"
- default = ""
- description = "Spot price in USD for autoscaling group spot instances. Leave as default empty string for autoscaling group to use on-demand instances. Note, switching in-place from spot to on-demand is not possible: https://github.com/terraform-providers/terraform-provider-aws/issues/4320"
-}
-
-# configuration
-
-variable "kubeconfig" {
- type = "string"
- description = "Must be set to `kubeconfig` output by cluster"
-}
-
-variable "ssh_authorized_key" {
- type = "string"
- description = "SSH public key for user 'fedora'"
-}
-
-variable "service_cidr" {
- description = <
-
-Typhoon is a minimal and free Kubernetes distribution.
-
-* Minimal, stable base Kubernetes distribution
-* Declarative infrastructure and configuration
-* Free (freedom and cost) and privacy-respecting
-* Practical for labs, datacenters, and clouds
-
-Typhoon distributes upstream Kubernetes, architectural conventions, and cluster addons, much like a GNU/Linux distribution provides the Linux kernel and userspace components.
-
-## Features
-
-* Kubernetes v1.15.0 (upstream, via [kubernetes-incubator/bootkube](https://github.com/kubernetes-incubator/bootkube))
-* Single or multi-master, [Calico](https://www.projectcalico.org/) or [flannel](https://github.com/coreos/flannel) networking
-* On-cluster etcd with TLS, [RBAC](https://kubernetes.io/docs/admin/authorization/rbac/)-enabled, [network policy](https://kubernetes.io/docs/concepts/services-networking/network-policies/)
-* Ready for Ingress, Prometheus, Grafana, and other optional [addons](https://typhoon.psdn.io/addons/overview/)
-
-## Docs
-
-Please see the [official docs](https://typhoon.psdn.io) and the bare-metal [tutorial](https://typhoon.psdn.io/cl/bare-metal/).
-
diff --git a/bare-metal/fedora-atomic/kubernetes/bootkube.tf b/bare-metal/fedora-atomic/kubernetes/bootkube.tf
deleted file mode 100644
index 63def25d..00000000
--- a/bare-metal/fedora-atomic/kubernetes/bootkube.tf
+++ /dev/null
@@ -1,18 +0,0 @@
-# Self-hosted Kubernetes assets (kubeconfig, manifests)
-module "bootkube" {
- source = "git::https://github.com/poseidon/terraform-render-bootkube.git?ref=62df9ad69cc0da35f47d40fa981370c4503ad581"
-
- cluster_name = "${var.cluster_name}"
- api_servers = ["${var.k8s_domain_name}"]
- etcd_servers = ["${var.controller_domains}"]
- asset_dir = "${var.asset_dir}"
- networking = "${var.networking}"
- network_mtu = "${var.network_mtu}"
- pod_cidr = "${var.pod_cidr}"
- service_cidr = "${var.service_cidr}"
- cluster_domain_suffix = "${var.cluster_domain_suffix}"
- enable_reporting = "${var.enable_reporting}"
-
- # Fedora
- trusted_certs_dir = "/etc/pki/tls/certs"
-}
diff --git a/bare-metal/fedora-atomic/kubernetes/cloudinit/controller.yaml.tmpl b/bare-metal/fedora-atomic/kubernetes/cloudinit/controller.yaml.tmpl
deleted file mode 100644
index a43d58fd..00000000
--- a/bare-metal/fedora-atomic/kubernetes/cloudinit/controller.yaml.tmpl
+++ /dev/null
@@ -1,100 +0,0 @@
-#cloud-config
-write_files:
- - path: /etc/etcd/etcd.conf
- content: |
- ETCD_NAME=${etcd_name}
- ETCD_DATA_DIR=/var/lib/etcd
- ETCD_ADVERTISE_CLIENT_URLS=https://${domain_name}:2379
- ETCD_INITIAL_ADVERTISE_PEER_URLS=https://${domain_name}:2380
- ETCD_LISTEN_CLIENT_URLS=https://0.0.0.0:2379
- ETCD_LISTEN_PEER_URLS=https://0.0.0.0:2380
- ETCD_LISTEN_METRICS_URLS=http://0.0.0.0:2381
- ETCD_INITIAL_CLUSTER=${etcd_initial_cluster}
- ETCD_STRICT_RECONFIG_CHECK=true
- ETCD_TRUSTED_CA_FILE=/etc/ssl/certs/etcd/server-ca.crt
- ETCD_CERT_FILE=/etc/ssl/certs/etcd/server.crt
- ETCD_KEY_FILE=/etc/ssl/certs/etcd/server.key
- ETCD_CLIENT_CERT_AUTH=true
- ETCD_PEER_TRUSTED_CA_FILE=/etc/ssl/certs/etcd/peer-ca.crt
- ETCD_PEER_CERT_FILE=/etc/ssl/certs/etcd/peer.crt
- ETCD_PEER_KEY_FILE=/etc/ssl/certs/etcd/peer.key
- ETCD_PEER_CLIENT_CERT_AUTH=true
- - path: /etc/systemd/system/kubelet.service.d/10-typhoon.conf
- content: |
- [Unit]
- Wants=rpc-statd.service
- [Service]
- ExecStartPre=/bin/mkdir -p /opt/cni/bin
- ExecStartPre=/bin/mkdir -p /etc/kubernetes/manifests
- ExecStartPre=/bin/mkdir -p /etc/kubernetes/cni/net.d
- ExecStartPre=/bin/mkdir -p /etc/kubernetes/checkpoint-secrets
- ExecStartPre=/bin/mkdir -p /etc/kubernetes/inactive-manifests
- ExecStartPre=/bin/mkdir -p /var/lib/cni
- ExecStartPre=/bin/mkdir -p /var/lib/kubelet/volumeplugins
- ExecStartPre=/usr/bin/bash -c "grep 'certificate-authority-data' /etc/kubernetes/kubeconfig | awk '{print $2}' | base64 -d > /etc/kubernetes/ca.crt"
- Restart=always
- RestartSec=10
- - path: /etc/kubernetes/kubelet.conf
- content: |
- ARGS="--anonymous-auth=false \
- --authentication-token-webhook \
- --authorization-mode=Webhook \
- --client-ca-file=/etc/kubernetes/ca.crt \
- --cluster_dns=${cluster_dns_service_ip} \
- --cluster_domain=${cluster_domain_suffix} \
- --cni-conf-dir=/etc/kubernetes/cni/net.d \
- --exit-on-lock-contention \
- --hostname-override=${domain_name} \
- --kubeconfig=/etc/kubernetes/kubeconfig \
- --lock-file=/var/run/lock/kubelet.lock \
- --network-plugin=cni \
- --node-labels=node-role.kubernetes.io/master \
- --node-labels=node-role.kubernetes.io/controller="true" \
- --pod-manifest-path=/etc/kubernetes/manifests \
- --read-only-port=0 \
- --register-with-taints=node-role.kubernetes.io/master=:NoSchedule \
- --volume-plugin-dir=/var/lib/kubelet/volumeplugins"
- - path: /etc/systemd/system/kubelet.path
- content: |
- [Unit]
- Description=Watch for kubeconfig
- [Path]
- PathExists=/etc/kubernetes/kubeconfig
- [Install]
- WantedBy=multi-user.target
- - path: /var/lib/bootkube/.keep
- - path: /etc/NetworkManager/conf.d/typhoon.conf
- content: |
- [main]
- plugins=keyfile
- [keyfile]
- unmanaged-devices=interface-name:cali*;interface-name:tunl*
- - path: /etc/selinux/config
- owner: root:root
- permissions: '0644'
- content: |
- SELINUX=permissive
- SELINUXTYPE=targeted
-bootcmd:
- - [setenforce, Permissive]
- - [systemctl, disable, firewalld, --now]
- # https://github.com/kubernetes/kubernetes/issues/60869
- - [modprobe, ip_vs]
-runcmd:
- - [systemctl, daemon-reload]
- - [systemctl, restart, NetworkManager]
- - [hostnamectl, set-hostname, ${domain_name}]
- - "atomic install --system --name=etcd quay.io/poseidon/etcd:v3.3.12"
- - "atomic install --system --name=kubelet quay.io/poseidon/kubelet:v1.14.1"
- - "atomic install --system --name=bootkube quay.io/poseidon/bootkube:v0.14.0"
- - [systemctl, start, --no-block, etcd.service]
- - [systemctl, enable, kubelet.path]
- - [systemctl, start, --no-block, kubelet.path]
-users:
- - default
- - name: fedora
- gecos: Fedora Admin
- sudo: ALL=(ALL) NOPASSWD:ALL
- groups: wheel,adm,systemd-journal,docker
- ssh-authorized-keys:
- - "${ssh_authorized_key}"
diff --git a/bare-metal/fedora-atomic/kubernetes/cloudinit/worker.yaml.tmpl b/bare-metal/fedora-atomic/kubernetes/cloudinit/worker.yaml.tmpl
deleted file mode 100644
index 2d4c5cf0..00000000
--- a/bare-metal/fedora-atomic/kubernetes/cloudinit/worker.yaml.tmpl
+++ /dev/null
@@ -1,73 +0,0 @@
-#cloud-config
-write_files:
- - path: /etc/systemd/system/kubelet.service.d/10-typhoon.conf
- content: |
- [Unit]
- Wants=rpc-statd.service
- [Service]
- ExecStartPre=/bin/mkdir -p /opt/cni/bin
- ExecStartPre=/bin/mkdir -p /etc/kubernetes/manifests
- ExecStartPre=/bin/mkdir -p /etc/kubernetes/cni/net.d
- ExecStartPre=/bin/mkdir -p /var/lib/cni
- ExecStartPre=/bin/mkdir -p /var/lib/kubelet/volumeplugins
- ExecStartPre=/usr/bin/bash -c "grep 'certificate-authority-data' /etc/kubernetes/kubeconfig | awk '{print $2}' | base64 -d > /etc/kubernetes/ca.crt"
- Restart=always
- RestartSec=10
- - path: /etc/kubernetes/kubelet.conf
- content: |
- ARGS="--anonymous-auth=false \
- --authentication-token-webhook \
- --authorization-mode=Webhook \
- --client-ca-file=/etc/kubernetes/ca.crt \
- --cluster_dns=${cluster_dns_service_ip} \
- --cluster_domain=${cluster_domain_suffix} \
- --cni-conf-dir=/etc/kubernetes/cni/net.d \
- --exit-on-lock-contention \
- --hostname-override=${domain_name} \
- --kubeconfig=/etc/kubernetes/kubeconfig \
- --lock-file=/var/run/lock/kubelet.lock \
- --network-plugin=cni \
- --node-labels=node-role.kubernetes.io/node \
- --pod-manifest-path=/etc/kubernetes/manifests \
- --read-only-port=0 \
- --volume-plugin-dir=/var/lib/kubelet/volumeplugins"
- - path: /etc/systemd/system/kubelet.path
- content: |
- [Unit]
- Description=Watch for kubeconfig
- [Path]
- PathExists=/etc/kubernetes/kubeconfig
- [Install]
- WantedBy=multi-user.target
- - path: /etc/NetworkManager/conf.d/typhoon.conf
- content: |
- [main]
- plugins=keyfile
- [keyfile]
- unmanaged-devices=interface-name:cali*;interface-name:tunl*
- - path: /etc/selinux/config
- owner: root:root
- permissions: '0644'
- content: |
- SELINUX=permissive
- SELINUXTYPE=targeted
-bootcmd:
- - [setenforce, Permissive]
- - [systemctl, disable, firewalld, --now]
- # https://github.com/kubernetes/kubernetes/issues/60869
- - [modprobe, ip_vs]
-runcmd:
- - [systemctl, daemon-reload]
- - [systemctl, restart, NetworkManager]
- - [hostnamectl, set-hostname, ${domain_name}]
- - "atomic install --system --name=kubelet quay.io/poseidon/kubelet:v1.14.1"
- - [systemctl, enable, kubelet.path]
- - [systemctl, start, --no-block, kubelet.path]
-users:
- - default
- - name: fedora
- gecos: Fedora Admin
- sudo: ALL=(ALL) NOPASSWD:ALL
- groups: wheel,adm,systemd-journal,docker
- ssh-authorized-keys:
- - "${ssh_authorized_key}"
diff --git a/bare-metal/fedora-atomic/kubernetes/groups.tf b/bare-metal/fedora-atomic/kubernetes/groups.tf
deleted file mode 100644
index 200fd3b9..00000000
--- a/bare-metal/fedora-atomic/kubernetes/groups.tf
+++ /dev/null
@@ -1,37 +0,0 @@
-// Install Fedora to disk
-resource "matchbox_group" "install" {
- count = "${length(var.controller_names) + length(var.worker_names)}"
-
- name = "${format("fedora-install-%s", element(concat(var.controller_names, var.worker_names), count.index))}"
- profile = "${element(matchbox_profile.cached-fedora-install.*.name, count.index)}"
-
- selector = {
- mac = "${element(concat(var.controller_macs, var.worker_macs), count.index)}"
- }
-
- metadata = {
- ssh_authorized_key = "${var.ssh_authorized_key}"
- }
-}
-
-resource "matchbox_group" "controller" {
- count = "${length(var.controller_names)}"
- name = "${format("%s-%s", var.cluster_name, element(var.controller_names, count.index))}"
- profile = "${element(matchbox_profile.controllers.*.name, count.index)}"
-
- selector = {
- mac = "${element(var.controller_macs, count.index)}"
- os = "installed"
- }
-}
-
-resource "matchbox_group" "worker" {
- count = "${length(var.worker_names)}"
- name = "${format("%s-%s", var.cluster_name, element(var.worker_names, count.index))}"
- profile = "${element(matchbox_profile.workers.*.name, count.index)}"
-
- selector = {
- mac = "${element(var.worker_macs, count.index)}"
- os = "installed"
- }
-}
diff --git a/bare-metal/fedora-atomic/kubernetes/kickstart/fedora-atomic.ks.tmpl b/bare-metal/fedora-atomic/kubernetes/kickstart/fedora-atomic.ks.tmpl
deleted file mode 100644
index 66e80a7c..00000000
--- a/bare-metal/fedora-atomic/kubernetes/kickstart/fedora-atomic.ks.tmpl
+++ /dev/null
@@ -1,36 +0,0 @@
-# required
-lang en_US.UTF-8
-keyboard us
-timezone --utc Etc/UTC
-
-# wipe disks
-zerombr
-clearpart --all --initlabel
-
-# locked root and temporary user
-rootpw --lock --iscrypted locked
-user --name=none
-
-# config
-autopart --type=lvm --noswap
-network --bootproto=dhcp --device=link --activate --onboot=on
-bootloader --timeout=1 --append="ds=nocloud\;seedfrom=/var/cloud-init/"
-services --enabled=cloud-init,cloud-init-local,cloud-config,cloud-final
-
-ostreesetup --osname="fedora-atomic" --remote="fedora-atomic" --url="${atomic_assets_endpoint}/repo" --ref=fedora/28/x86_64/atomic-host --nogpg
-
-reboot
-
-%post --erroronfail
-mkdir /var/cloud-init
-curl --retry 10 "${matchbox_http_endpoint}/generic?mac=${mac}&os=installed" -o /var/cloud-init/user-data
-echo "instance-id: iid-local01" > /var/cloud-init/meta-data
-
-rm -f /etc/ostree/remotes.d/fedora-atomic.conf
-ostree remote add fedora-atomic https://dl.fedoraproject.org/atomic/repo/ --set=gpgkeypath=/etc/pki/rpm-gpg/RPM-GPG-KEY-fedora-28-primary
-
-# lock root user
-passwd -l root
-# remove temporary user
-userdel -r none
-%end
diff --git a/bare-metal/fedora-atomic/kubernetes/outputs.tf b/bare-metal/fedora-atomic/kubernetes/outputs.tf
deleted file mode 100644
index a0977ea3..00000000
--- a/bare-metal/fedora-atomic/kubernetes/outputs.tf
+++ /dev/null
@@ -1,3 +0,0 @@
-output "kubeconfig-admin" {
- value = "${module.bootkube.kubeconfig-admin}"
-}
diff --git a/bare-metal/fedora-atomic/kubernetes/profiles.tf b/bare-metal/fedora-atomic/kubernetes/profiles.tf
deleted file mode 100644
index 1f8b1c5e..00000000
--- a/bare-metal/fedora-atomic/kubernetes/profiles.tf
+++ /dev/null
@@ -1,87 +0,0 @@
-locals {
- default_assets_endpoint = "${var.matchbox_http_endpoint}/assets/fedora/28"
- atomic_assets_endpoint = "${var.atomic_assets_endpoint != "" ? var.atomic_assets_endpoint : local.default_assets_endpoint}"
-}
-
-// Cached Fedora Install profile (from matchbox /assets cache)
-// Note: Admin must have downloaded Fedora kernel, initrd, and repo into
-// matchbox assets.
-resource "matchbox_profile" "cached-fedora-install" {
- count = "${length(var.controller_names) + length(var.worker_names)}"
- name = "${format("%s-cached-fedora-install-%s", var.cluster_name, element(concat(var.controller_names, var.worker_names), count.index))}"
-
- kernel = "${local.atomic_assets_endpoint}/images/pxeboot/vmlinuz"
-
- initrd = [
- "${local.atomic_assets_endpoint}/images/pxeboot/initrd.img",
- ]
-
- args = [
- "initrd=initrd.img",
- "inst.repo=${local.atomic_assets_endpoint}",
- "inst.ks=${var.matchbox_http_endpoint}/generic?mac=${element(concat(var.controller_macs, var.worker_macs), count.index)}",
- "inst.text",
- "${var.kernel_args}",
- ]
-
- # kickstart
- generic_config = "${element(data.template_file.install-kickstarts.*.rendered, count.index)}"
-}
-
-data "template_file" "install-kickstarts" {
- count = "${length(var.controller_names) + length(var.worker_names)}"
-
- template = "${file("${path.module}/kickstart/fedora-atomic.ks.tmpl")}"
-
- vars = {
- matchbox_http_endpoint = "${var.matchbox_http_endpoint}"
- atomic_assets_endpoint = "${local.atomic_assets_endpoint}"
- mac = "${element(concat(var.controller_macs, var.worker_macs), count.index)}"
- }
-}
-
-// Kubernetes Controller profiles
-resource "matchbox_profile" "controllers" {
- count = "${length(var.controller_names)}"
- name = "${format("%s-controller-%s", var.cluster_name, element(var.controller_names, count.index))}"
-
- # cloud-init
- generic_config = "${element(data.template_file.controller-configs.*.rendered, count.index)}"
-}
-
-data "template_file" "controller-configs" {
- count = "${length(var.controller_names)}"
-
- template = "${file("${path.module}/cloudinit/controller.yaml.tmpl")}"
-
- vars = {
- domain_name = "${element(var.controller_domains, count.index)}"
- etcd_name = "${element(var.controller_names, count.index)}"
- etcd_initial_cluster = "${join(",", formatlist("%s=https://%s:2380", var.controller_names, var.controller_domains))}"
- cluster_dns_service_ip = "${module.bootkube.cluster_dns_service_ip}"
- cluster_domain_suffix = "${var.cluster_domain_suffix}"
- ssh_authorized_key = "${var.ssh_authorized_key}"
- }
-}
-
-// Kubernetes Worker profiles
-resource "matchbox_profile" "workers" {
- count = "${length(var.worker_names)}"
- name = "${format("%s-worker-%s", var.cluster_name, element(var.worker_names, count.index))}"
-
- # cloud-init
- generic_config = "${element(data.template_file.worker-configs.*.rendered, count.index)}"
-}
-
-data "template_file" "worker-configs" {
- count = "${length(var.worker_names)}"
-
- template = "${file("${path.module}/cloudinit/worker.yaml.tmpl")}"
-
- vars = {
- domain_name = "${element(var.worker_domains, count.index)}"
- cluster_dns_service_ip = "${module.bootkube.cluster_dns_service_ip}"
- cluster_domain_suffix = "${var.cluster_domain_suffix}"
- ssh_authorized_key = "${var.ssh_authorized_key}"
- }
-}
diff --git a/bare-metal/fedora-atomic/kubernetes/require.tf b/bare-metal/fedora-atomic/kubernetes/require.tf
deleted file mode 100644
index a6435bec..00000000
--- a/bare-metal/fedora-atomic/kubernetes/require.tf
+++ /dev/null
@@ -1,21 +0,0 @@
-# Terraform version and plugin versions
-
-terraform {
- required_version = ">= 0.11.0"
-}
-
-provider "local" {
- version = "~> 1.0"
-}
-
-provider "null" {
- version = "~> 1.0"
-}
-
-provider "template" {
- version = "~> 1.0"
-}
-
-provider "tls" {
- version = "~> 1.0"
-}
diff --git a/bare-metal/fedora-atomic/kubernetes/ssh.tf b/bare-metal/fedora-atomic/kubernetes/ssh.tf
deleted file mode 100644
index 71ccf6e8..00000000
--- a/bare-metal/fedora-atomic/kubernetes/ssh.tf
+++ /dev/null
@@ -1,136 +0,0 @@
-# Secure copy etcd TLS assets and kubeconfig to controllers. Activates kubelet.service
-resource "null_resource" "copy-controller-secrets" {
- count = "${length(var.controller_names)}"
-
- # Without depends_on, remote-exec could start and wait for machines before
- # matchbox groups are written, causing a deadlock.
- depends_on = [
- "matchbox_group.install",
- "matchbox_group.controller",
- "matchbox_group.worker",
- ]
-
- connection {
- type = "ssh"
- host = "${element(var.controller_domains, count.index)}"
- user = "fedora"
- timeout = "60m"
- }
-
- provisioner "file" {
- content = "${module.bootkube.kubeconfig-kubelet}"
- destination = "$HOME/kubeconfig"
- }
-
- provisioner "file" {
- content = "${module.bootkube.etcd_ca_cert}"
- destination = "$HOME/etcd-client-ca.crt"
- }
-
- provisioner "file" {
- content = "${module.bootkube.etcd_client_cert}"
- destination = "$HOME/etcd-client.crt"
- }
-
- provisioner "file" {
- content = "${module.bootkube.etcd_client_key}"
- destination = "$HOME/etcd-client.key"
- }
-
- provisioner "file" {
- content = "${module.bootkube.etcd_server_cert}"
- destination = "$HOME/etcd-server.crt"
- }
-
- provisioner "file" {
- content = "${module.bootkube.etcd_server_key}"
- destination = "$HOME/etcd-server.key"
- }
-
- provisioner "file" {
- content = "${module.bootkube.etcd_peer_cert}"
- destination = "$HOME/etcd-peer.crt"
- }
-
- provisioner "file" {
- content = "${module.bootkube.etcd_peer_key}"
- destination = "$HOME/etcd-peer.key"
- }
-
- provisioner "remote-exec" {
- inline = [
- "sudo mkdir -p /etc/ssl/etcd/etcd",
- "sudo mv etcd-client* /etc/ssl/etcd/",
- "sudo cp /etc/ssl/etcd/etcd-client-ca.crt /etc/ssl/etcd/etcd/server-ca.crt",
- "sudo mv etcd-server.crt /etc/ssl/etcd/etcd/server.crt",
- "sudo mv etcd-server.key /etc/ssl/etcd/etcd/server.key",
- "sudo cp /etc/ssl/etcd/etcd-client-ca.crt /etc/ssl/etcd/etcd/peer-ca.crt",
- "sudo mv etcd-peer.crt /etc/ssl/etcd/etcd/peer.crt",
- "sudo mv etcd-peer.key /etc/ssl/etcd/etcd/peer.key",
- "sudo mv $HOME/kubeconfig /etc/kubernetes/kubeconfig",
- ]
- }
-}
-
-# Secure copy kubeconfig to all workers. Activates kubelet.service
-resource "null_resource" "copy-worker-secrets" {
- count = "${length(var.worker_names)}"
-
- # Without depends_on, remote-exec could start and wait for machines before
- # matchbox groups are written, causing a deadlock.
- depends_on = [
- "matchbox_group.install",
- "matchbox_group.controller",
- "matchbox_group.worker",
- ]
-
- connection {
- type = "ssh"
- host = "${element(var.worker_domains, count.index)}"
- user = "fedora"
- timeout = "60m"
- }
-
- provisioner "file" {
- content = "${module.bootkube.kubeconfig-kubelet}"
- destination = "$HOME/kubeconfig"
- }
-
- provisioner "remote-exec" {
- inline = [
- "sudo mv $HOME/kubeconfig /etc/kubernetes/kubeconfig",
- ]
- }
-}
-
-# Secure copy bootkube assets to ONE controller and start bootkube to perform
-# one-time self-hosted cluster bootstrapping.
-resource "null_resource" "bootkube-start" {
- # Without depends_on, this remote-exec may start before the kubeconfig copy.
- # Terraform only does one task at a time, so it would try to bootstrap
- # while no Kubelets are running.
- depends_on = [
- "null_resource.copy-controller-secrets",
- "null_resource.copy-worker-secrets",
- ]
-
- connection {
- type = "ssh"
- host = "${element(var.controller_domains, 0)}"
- user = "fedora"
- timeout = "15m"
- }
-
- provisioner "file" {
- source = "${var.asset_dir}"
- destination = "$HOME/assets"
- }
-
- provisioner "remote-exec" {
- inline = [
- "while [ ! -f /var/lib/cloud/instance/boot-finished ]; do sleep 4; done",
- "sudo mv $HOME/assets /var/lib/bootkube",
- "sudo systemctl start bootkube",
- ]
- }
-}
diff --git a/bare-metal/fedora-atomic/kubernetes/variables.tf b/bare-metal/fedora-atomic/kubernetes/variables.tf
deleted file mode 100644
index 141bddef..00000000
--- a/bare-metal/fedora-atomic/kubernetes/variables.tf
+++ /dev/null
@@ -1,118 +0,0 @@
-variable "cluster_name" {
- type = "string"
- description = "Unique cluster name"
-}
-
-# bare-metal
-
-variable "matchbox_http_endpoint" {
- type = "string"
- description = "Matchbox HTTP read-only endpoint (e.g. http://matchbox.example.com:8080)"
-}
-
-variable "atomic_assets_endpoint" {
- type = "string"
- default = ""
-
- description = <
-
-Typhoon is a minimal and free Kubernetes distribution.
-
-* Minimal, stable base Kubernetes distribution
-* Declarative infrastructure and configuration
-* Free (freedom and cost) and privacy-respecting
-* Practical for labs, datacenters, and clouds
-
-Typhoon distributes upstream Kubernetes, architectural conventions, and cluster addons, much like a GNU/Linux distribution provides the Linux kernel and userspace components.
-
-## Features
-
-* Kubernetes v1.15.0 (upstream, via [kubernetes-incubator/bootkube](https://github.com/kubernetes-incubator/bootkube))
-* Single or multi-master, [flannel](https://github.com/coreos/flannel) networking
-* On-cluster etcd with TLS, [RBAC](https://kubernetes.io/docs/admin/authorization/rbac/)-enabled
-* Ready for Ingress, Prometheus, Grafana, and other optional [addons](https://typhoon.psdn.io/addons/overview/)
-
-## Docs
-
-Please see the [official docs](https://typhoon.psdn.io) and the Digital Ocean [tutorial](https://typhoon.psdn.io/cl/digital-ocean/).
-
diff --git a/digital-ocean/fedora-atomic/kubernetes/bootkube.tf b/digital-ocean/fedora-atomic/kubernetes/bootkube.tf
deleted file mode 100644
index b8297cdd..00000000
--- a/digital-ocean/fedora-atomic/kubernetes/bootkube.tf
+++ /dev/null
@@ -1,18 +0,0 @@
-# Self-hosted Kubernetes assets (kubeconfig, manifests)
-module "bootkube" {
- source = "git::https://github.com/poseidon/terraform-render-bootkube.git?ref=62df9ad69cc0da35f47d40fa981370c4503ad581"
-
- cluster_name = "${var.cluster_name}"
- api_servers = ["${format("%s.%s", var.cluster_name, var.dns_zone)}"]
- etcd_servers = "${digitalocean_record.etcds.*.fqdn}"
- asset_dir = "${var.asset_dir}"
- networking = "flannel"
- network_mtu = 1440
- pod_cidr = "${var.pod_cidr}"
- service_cidr = "${var.service_cidr}"
- cluster_domain_suffix = "${var.cluster_domain_suffix}"
- enable_reporting = "${var.enable_reporting}"
-
- # Fedora
- trusted_certs_dir = "/etc/pki/tls/certs"
-}
diff --git a/digital-ocean/fedora-atomic/kubernetes/cloudinit/controller.yaml.tmpl b/digital-ocean/fedora-atomic/kubernetes/cloudinit/controller.yaml.tmpl
deleted file mode 100644
index e0331281..00000000
--- a/digital-ocean/fedora-atomic/kubernetes/cloudinit/controller.yaml.tmpl
+++ /dev/null
@@ -1,107 +0,0 @@
-#cloud-config
-write_files:
- - path: /etc/etcd/etcd.conf
- content: |
- ETCD_NAME=${etcd_name}
- ETCD_DATA_DIR=/var/lib/etcd
- ETCD_ADVERTISE_CLIENT_URLS=https://${etcd_domain}:2379
- ETCD_INITIAL_ADVERTISE_PEER_URLS=https://${etcd_domain}:2380
- ETCD_LISTEN_CLIENT_URLS=https://0.0.0.0:2379
- ETCD_LISTEN_PEER_URLS=https://0.0.0.0:2380
- ETCD_LISTEN_METRICS_URLS=http://0.0.0.0:2381
- ETCD_INITIAL_CLUSTER=${etcd_initial_cluster}
- ETCD_STRICT_RECONFIG_CHECK=true
- ETCD_TRUSTED_CA_FILE=/etc/ssl/certs/etcd/server-ca.crt
- ETCD_CERT_FILE=/etc/ssl/certs/etcd/server.crt
- ETCD_KEY_FILE=/etc/ssl/certs/etcd/server.key
- ETCD_CLIENT_CERT_AUTH=true
- ETCD_PEER_TRUSTED_CA_FILE=/etc/ssl/certs/etcd/peer-ca.crt
- ETCD_PEER_CERT_FILE=/etc/ssl/certs/etcd/peer.crt
- ETCD_PEER_KEY_FILE=/etc/ssl/certs/etcd/peer.key
- ETCD_PEER_CLIENT_CERT_AUTH=true
- - path: /etc/systemd/system/cloud-metadata.service
- content: |
- [Unit]
- Description=Cloud metadata agent
- [Service]
- Type=oneshot
- Environment=OUTPUT=/run/metadata/cloud
- ExecStart=/usr/bin/mkdir -p /run/metadata
- ExecStart=/usr/bin/bash -c 'echo "HOSTNAME_OVERRIDE=$(curl\
- --url http://169.254.169.254/metadata/v1/interfaces/private/0/ipv4/address\
- --retry 10)" > $${OUTPUT}'
- [Install]
- WantedBy=multi-user.target
- - path: /etc/systemd/system/kubelet.service.d/10-typhoon.conf
- content: |
- [Unit]
- Requires=cloud-metadata.service
- After=cloud-metadata.service
- Wants=rpc-statd.service
- [Service]
- ExecStartPre=/bin/mkdir -p /opt/cni/bin
- ExecStartPre=/bin/mkdir -p /etc/kubernetes/manifests
- ExecStartPre=/bin/mkdir -p /etc/kubernetes/cni/net.d
- ExecStartPre=/bin/mkdir -p /etc/kubernetes/checkpoint-secrets
- ExecStartPre=/bin/mkdir -p /etc/kubernetes/inactive-manifests
- ExecStartPre=/bin/mkdir -p /var/lib/cni
- ExecStartPre=/bin/mkdir -p /var/lib/kubelet/volumeplugins
- ExecStartPre=/usr/bin/bash -c "grep 'certificate-authority-data' /etc/kubernetes/kubeconfig | awk '{print $2}' | base64 -d > /etc/kubernetes/ca.crt"
- Restart=always
- RestartSec=10
- - path: /etc/kubernetes/kubelet.conf
- content: |
- ARGS="--anonymous-auth=false \
- --authentication-token-webhook \
- --authorization-mode=Webhook \
- --client-ca-file=/etc/kubernetes/ca.crt \
- --cluster_dns=${cluster_dns_service_ip} \
- --cluster_domain=${cluster_domain_suffix} \
- --cni-conf-dir=/etc/kubernetes/cni/net.d \
- --exit-on-lock-contention \
- --kubeconfig=/etc/kubernetes/kubeconfig \
- --lock-file=/var/run/lock/kubelet.lock \
- --network-plugin=cni \
- --node-labels=node-role.kubernetes.io/master \
- --node-labels=node-role.kubernetes.io/controller="true" \
- --pod-manifest-path=/etc/kubernetes/manifests \
- --read-only-port=0 \
- --register-with-taints=node-role.kubernetes.io/master=:NoSchedule \
- --volume-plugin-dir=/var/lib/kubelet/volumeplugins"
- - path: /etc/systemd/system/kubelet.path
- content: |
- [Unit]
- Description=Watch for kubeconfig
- [Path]
- PathExists=/etc/kubernetes/kubeconfig
- [Install]
- WantedBy=multi-user.target
- - path: /var/lib/bootkube/.keep
- - path: /etc/selinux/config
- owner: root:root
- permissions: '0644'
- content: |
- SELINUX=permissive
- SELINUXTYPE=targeted
-bootcmd:
- - [setenforce, Permissive]
- - [systemctl, disable, firewalld, --now]
- # https://github.com/kubernetes/kubernetes/issues/60869
- - [modprobe, ip_vs]
-runcmd:
- - [systemctl, daemon-reload]
- - "atomic install --system --name=etcd quay.io/poseidon/etcd:v3.3.12"
- - "atomic install --system --name=kubelet quay.io/poseidon/kubelet:v1.14.1"
- - "atomic install --system --name=bootkube quay.io/poseidon/bootkube:v0.14.0"
- - [systemctl, start, --no-block, etcd.service]
- - [systemctl, enable, cloud-metadata.service]
- - [systemctl, enable, kubelet.path]
- - [systemctl, start, --no-block, kubelet.path]
-users:
- - default
- - name: fedora
- gecos: Fedora Admin
- sudo: ALL=(ALL) NOPASSWD:ALL
- groups: wheel,adm,systemd-journal,docker
- ssh-authorized-keys:
- - "${ssh_authorized_key}"
diff --git a/digital-ocean/fedora-atomic/kubernetes/cloudinit/worker.yaml.tmpl b/digital-ocean/fedora-atomic/kubernetes/cloudinit/worker.yaml.tmpl
deleted file mode 100644
index 0bc273ba..00000000
--- a/digital-ocean/fedora-atomic/kubernetes/cloudinit/worker.yaml.tmpl
+++ /dev/null
@@ -1,80 +0,0 @@
-#cloud-config
-write_files:
- - path: /etc/systemd/system/cloud-metadata.service
- content: |
- [Unit]
- Description=Cloud metadata agent
- [Service]
- Type=oneshot
- Environment=OUTPUT=/run/metadata/cloud
- ExecStart=/usr/bin/mkdir -p /run/metadata
- ExecStart=/usr/bin/bash -c 'echo "HOSTNAME_OVERRIDE=$(curl\
- --url http://169.254.169.254/metadata/v1/interfaces/private/0/ipv4/address\
- --retry 10)" > $${OUTPUT}'
- [Install]
- WantedBy=multi-user.target
- - path: /etc/systemd/system/kubelet.service.d/10-typhoon.conf
- content: |
- [Unit]
- Requires=cloud-metadata.service
- After=cloud-metadata.service
- Wants=rpc-statd.service
- [Service]
- ExecStartPre=/bin/mkdir -p /opt/cni/bin
- ExecStartPre=/bin/mkdir -p /etc/kubernetes/manifests
- ExecStartPre=/bin/mkdir -p /etc/kubernetes/cni/net.d
- ExecStartPre=/bin/mkdir -p /var/lib/cni
- ExecStartPre=/bin/mkdir -p /var/lib/kubelet/volumeplugins
- ExecStartPre=/usr/bin/bash -c "grep 'certificate-authority-data' /etc/kubernetes/kubeconfig | awk '{print $2}' | base64 -d > /etc/kubernetes/ca.crt"
- Restart=always
- RestartSec=10
- - path: /etc/kubernetes/kubelet.conf
- content: |
- ARGS="--anonymous-auth=false \
- --authentication-token-webhook \
- --authorization-mode=Webhook \
- --client-ca-file=/etc/kubernetes/ca.crt \
- --cluster_dns=${cluster_dns_service_ip} \
- --cluster_domain=${cluster_domain_suffix} \
- --cni-conf-dir=/etc/kubernetes/cni/net.d \
- --exit-on-lock-contention \
- --kubeconfig=/etc/kubernetes/kubeconfig \
- --lock-file=/var/run/lock/kubelet.lock \
- --network-plugin=cni \
- --node-labels=node-role.kubernetes.io/node \
- --pod-manifest-path=/etc/kubernetes/manifests \
- --read-only-port=0 \
- --volume-plugin-dir=/var/lib/kubelet/volumeplugins"
- - path: /etc/systemd/system/kubelet.path
- content: |
- [Unit]
- Description=Watch for kubeconfig
- [Path]
- PathExists=/etc/kubernetes/kubeconfig
- [Install]
- WantedBy=multi-user.target
- - path: /etc/selinux/config
- owner: root:root
- permissions: '0644'
- content: |
- SELINUX=permissive
- SELINUXTYPE=targeted
-bootcmd:
- - [setenforce, Permissive]
- - [systemctl, disable, firewalld, --now]
- # https://github.com/kubernetes/kubernetes/issues/60869
- - [modprobe, ip_vs]
-runcmd:
- - [systemctl, daemon-reload]
- - [systemctl, enable, cloud-metadata.service]
- - "atomic install --system --name=kubelet quay.io/poseidon/kubelet:v1.14.1"
- - [systemctl, enable, kubelet.path]
- - [systemctl, start, --no-block, kubelet.path]
-users:
- - default
- - name: fedora
- gecos: Fedora Admin
- sudo: ALL=(ALL) NOPASSWD:ALL
- groups: wheel,adm,systemd-journal,docker
- ssh-authorized-keys:
- - "${ssh_authorized_key}"
diff --git a/digital-ocean/fedora-atomic/kubernetes/controllers.tf b/digital-ocean/fedora-atomic/kubernetes/controllers.tf
deleted file mode 100644
index deaa87b1..00000000
--- a/digital-ocean/fedora-atomic/kubernetes/controllers.tf
+++ /dev/null
@@ -1,95 +0,0 @@
-# Controller Instance DNS records
-resource "digitalocean_record" "controllers" {
- count = "${var.controller_count}"
-
- # DNS zone where record should be created
- domain = "${var.dns_zone}"
-
- # DNS record (will be prepended to domain)
- name = "${var.cluster_name}"
- type = "A"
- ttl = 300
-
- # IPv4 addresses of controllers
- value = "${element(digitalocean_droplet.controllers.*.ipv4_address, count.index)}"
-}
-
-# Discrete DNS records for each controller's private IPv4 for etcd usage
-resource "digitalocean_record" "etcds" {
- count = "${var.controller_count}"
-
- # DNS zone where record should be created
- domain = "${var.dns_zone}"
-
- # DNS record (will be prepended to domain)
- name = "${var.cluster_name}-etcd${count.index}"
- type = "A"
- ttl = 300
-
- # private IPv4 address for etcd
- value = "${element(digitalocean_droplet.controllers.*.ipv4_address_private, count.index)}"
-}
-
-# Controller droplet instances
-resource "digitalocean_droplet" "controllers" {
- count = "${var.controller_count}"
-
- name = "${var.cluster_name}-controller-${count.index}"
- region = "${var.region}"
-
- image = "${var.image}"
- size = "${var.controller_type}"
-
- # network
- ipv6 = true
- private_networking = true
-
- user_data = "${element(data.template_file.controller-cloudinit.*.rendered, count.index)}"
- ssh_keys = ["${var.ssh_fingerprints}"]
-
- tags = [
- "${digitalocean_tag.controllers.id}",
- ]
-
- lifecycle {
- ignore_changes = [
- "user_data",
- ]
- }
-}
-
-# Tag to label controllers
-resource "digitalocean_tag" "controllers" {
- name = "${var.cluster_name}-controller"
-}
-
-# Controller Cloud-Init
-data "template_file" "controller-cloudinit" {
- count = "${var.controller_count}"
-
- template = "${file("${path.module}/cloudinit/controller.yaml.tmpl")}"
-
- vars = {
- # Cannot use cyclic dependencies on controllers or their DNS records
- etcd_name = "etcd${count.index}"
- etcd_domain = "${var.cluster_name}-etcd${count.index}.${var.dns_zone}"
-
- # etcd0=https://cluster-etcd0.example.com,etcd1=https://cluster-etcd1.example.com,...
- etcd_initial_cluster = "${join(",", data.template_file.etcds.*.rendered)}"
-
- ssh_authorized_key = "${var.ssh_authorized_key}"
- cluster_dns_service_ip = "${cidrhost(var.service_cidr, 10)}"
- cluster_domain_suffix = "${var.cluster_domain_suffix}"
- }
-}
-
-data "template_file" "etcds" {
- count = "${var.controller_count}"
- template = "etcd$${index}=https://$${cluster_name}-etcd$${index}.$${dns_zone}:2380"
-
- vars = {
- index = "${count.index}"
- cluster_name = "${var.cluster_name}"
- dns_zone = "${var.dns_zone}"
- }
-}
diff --git a/digital-ocean/fedora-atomic/kubernetes/network.tf b/digital-ocean/fedora-atomic/kubernetes/network.tf
deleted file mode 100644
index 312d7966..00000000
--- a/digital-ocean/fedora-atomic/kubernetes/network.tf
+++ /dev/null
@@ -1,58 +0,0 @@
-resource "digitalocean_firewall" "rules" {
- name = "${var.cluster_name}"
-
- tags = ["${var.cluster_name}-controller", "${var.cluster_name}-worker"]
-
- # allow ssh, apiserver, http/https ingress, and peer-to-peer traffic
- inbound_rule = [
- {
- protocol = "tcp"
- port_range = "22"
- source_addresses = ["0.0.0.0/0", "::/0"]
- },
- {
- protocol = "tcp"
- port_range = "80"
- source_addresses = ["0.0.0.0/0", "::/0"]
- },
- {
- protocol = "tcp"
- port_range = "443"
- source_addresses = ["0.0.0.0/0", "::/0"]
- },
- {
- protocol = "tcp"
- port_range = "6443"
- source_addresses = ["0.0.0.0/0", "::/0"]
- },
- {
- protocol = "udp"
- port_range = "1-65535"
- source_tags = ["${digitalocean_tag.controllers.name}", "${digitalocean_tag.workers.name}"]
- },
- {
- protocol = "tcp"
- port_range = "1-65535"
- source_tags = ["${digitalocean_tag.controllers.name}", "${digitalocean_tag.workers.name}"]
- },
- ]
-
- # allow all outbound traffic
- outbound_rule = [
- {
- protocol = "tcp"
- port_range = "1-65535"
- destination_addresses = ["0.0.0.0/0", "::/0"]
- },
- {
- protocol = "udp"
- port_range = "1-65535"
- destination_addresses = ["0.0.0.0/0", "::/0"]
- },
- {
- protocol = "icmp"
- port_range = "1-65535"
- destination_addresses = ["0.0.0.0/0", "::/0"]
- },
- ]
-}
diff --git a/digital-ocean/fedora-atomic/kubernetes/outputs.tf b/digital-ocean/fedora-atomic/kubernetes/outputs.tf
deleted file mode 100644
index 86a4c76a..00000000
--- a/digital-ocean/fedora-atomic/kubernetes/outputs.tf
+++ /dev/null
@@ -1,28 +0,0 @@
-output "kubeconfig-admin" {
- value = "${module.bootkube.kubeconfig-admin}"
-}
-
-output "controllers_dns" {
- value = "${digitalocean_record.controllers.0.fqdn}"
-}
-
-output "workers_dns" {
- # Multiple A and AAAA records with the same FQDN
- value = "${digitalocean_record.workers-record-a.0.fqdn}"
-}
-
-output "controllers_ipv4" {
- value = ["${digitalocean_droplet.controllers.*.ipv4_address}"]
-}
-
-output "controllers_ipv6" {
- value = ["${digitalocean_droplet.controllers.*.ipv6_address}"]
-}
-
-output "workers_ipv4" {
- value = ["${digitalocean_droplet.workers.*.ipv4_address}"]
-}
-
-output "workers_ipv6" {
- value = ["${digitalocean_droplet.workers.*.ipv6_address}"]
-}
diff --git a/digital-ocean/fedora-atomic/kubernetes/require.tf b/digital-ocean/fedora-atomic/kubernetes/require.tf
deleted file mode 100644
index 4651337d..00000000
--- a/digital-ocean/fedora-atomic/kubernetes/require.tf
+++ /dev/null
@@ -1,25 +0,0 @@
-# Terraform version and plugin versions
-
-terraform {
- required_version = ">= 0.11.0"
-}
-
-provider "digitalocean" {
- version = "~> 1.0"
-}
-
-provider "local" {
- version = "~> 1.0"
-}
-
-provider "null" {
- version = "~> 1.0"
-}
-
-provider "template" {
- version = "~> 1.0"
-}
-
-provider "tls" {
- version = "~> 1.0"
-}
diff --git a/digital-ocean/fedora-atomic/kubernetes/ssh.tf b/digital-ocean/fedora-atomic/kubernetes/ssh.tf
deleted file mode 100644
index 77ae4aba..00000000
--- a/digital-ocean/fedora-atomic/kubernetes/ssh.tf
+++ /dev/null
@@ -1,121 +0,0 @@
-# Secure copy etcd TLS assets and kubeconfig to controllers. Activates kubelet.service
-resource "null_resource" "copy-controller-secrets" {
- count = "${var.controller_count}"
-
- depends_on = [
- "digitalocean_firewall.rules",
- ]
-
- connection {
- type = "ssh"
- host = "${element(concat(digitalocean_droplet.controllers.*.ipv4_address), count.index)}"
- user = "fedora"
- timeout = "15m"
- }
-
- provisioner "file" {
- content = "${module.bootkube.kubeconfig-kubelet}"
- destination = "$HOME/kubeconfig"
- }
-
- provisioner "file" {
- content = "${module.bootkube.etcd_ca_cert}"
- destination = "$HOME/etcd-client-ca.crt"
- }
-
- provisioner "file" {
- content = "${module.bootkube.etcd_client_cert}"
- destination = "$HOME/etcd-client.crt"
- }
-
- provisioner "file" {
- content = "${module.bootkube.etcd_client_key}"
- destination = "$HOME/etcd-client.key"
- }
-
- provisioner "file" {
- content = "${module.bootkube.etcd_server_cert}"
- destination = "$HOME/etcd-server.crt"
- }
-
- provisioner "file" {
- content = "${module.bootkube.etcd_server_key}"
- destination = "$HOME/etcd-server.key"
- }
-
- provisioner "file" {
- content = "${module.bootkube.etcd_peer_cert}"
- destination = "$HOME/etcd-peer.crt"
- }
-
- provisioner "file" {
- content = "${module.bootkube.etcd_peer_key}"
- destination = "$HOME/etcd-peer.key"
- }
-
- provisioner "remote-exec" {
- inline = [
- "sudo mkdir -p /etc/ssl/etcd/etcd",
- "sudo mv etcd-client* /etc/ssl/etcd/",
- "sudo cp /etc/ssl/etcd/etcd-client-ca.crt /etc/ssl/etcd/etcd/server-ca.crt",
- "sudo mv etcd-server.crt /etc/ssl/etcd/etcd/server.crt",
- "sudo mv etcd-server.key /etc/ssl/etcd/etcd/server.key",
- "sudo cp /etc/ssl/etcd/etcd-client-ca.crt /etc/ssl/etcd/etcd/peer-ca.crt",
- "sudo mv etcd-peer.crt /etc/ssl/etcd/etcd/peer.crt",
- "sudo mv etcd-peer.key /etc/ssl/etcd/etcd/peer.key",
- "sudo mv $HOME/kubeconfig /etc/kubernetes/kubeconfig",
- ]
- }
-}
-
-# Secure copy kubeconfig to all workers. Activates kubelet.service.
-resource "null_resource" "copy-worker-secrets" {
- count = "${var.worker_count}"
-
- connection {
- type = "ssh"
- host = "${element(concat(digitalocean_droplet.workers.*.ipv4_address), count.index)}"
- user = "fedora"
- timeout = "15m"
- }
-
- provisioner "file" {
- content = "${module.bootkube.kubeconfig-kubelet}"
- destination = "$HOME/kubeconfig"
- }
-
- provisioner "remote-exec" {
- inline = [
- "sudo mv $HOME/kubeconfig /etc/kubernetes/kubeconfig",
- ]
- }
-}
-
-# Secure copy bootkube assets to ONE controller and start bootkube to perform
-# one-time self-hosted cluster bootstrapping.
-resource "null_resource" "bootkube-start" {
- depends_on = [
- "null_resource.copy-controller-secrets",
- "null_resource.copy-worker-secrets",
- ]
-
- connection {
- type = "ssh"
- host = "${digitalocean_droplet.controllers.0.ipv4_address}"
- user = "fedora"
- timeout = "15m"
- }
-
- provisioner "file" {
- source = "${var.asset_dir}"
- destination = "$HOME/assets"
- }
-
- provisioner "remote-exec" {
- inline = [
- "while [ ! -f /var/lib/cloud/instance/boot-finished ]; do sleep 4; done",
- "sudo mv $HOME/assets /var/lib/bootkube",
- "sudo systemctl start bootkube",
- ]
- }
-}
diff --git a/digital-ocean/fedora-atomic/kubernetes/variables.tf b/digital-ocean/fedora-atomic/kubernetes/variables.tf
deleted file mode 100644
index accbb096..00000000
--- a/digital-ocean/fedora-atomic/kubernetes/variables.tf
+++ /dev/null
@@ -1,93 +0,0 @@
-variable "cluster_name" {
- type = "string"
- description = "Unique cluster name (prepended to dns_zone)"
-}
-
-# Digital Ocean
-
-variable "region" {
- type = "string"
- description = "Digital Ocean region (e.g. nyc1, sfo2, fra1, tor1)"
-}
-
-variable "dns_zone" {
- type = "string"
- description = "Digital Ocean domain (i.e. DNS zone) (e.g. do.example.com)"
-}
-
-# instances
-
-variable "controller_count" {
- type = "string"
- default = "1"
- description = "Number of controllers (i.e. masters)"
-}
-
-variable "worker_count" {
- type = "string"
- default = "1"
- description = "Number of workers"
-}
-
-variable "controller_type" {
- type = "string"
- default = "s-2vcpu-2gb"
- description = "Droplet type for controllers (e.g. s-2vcpu-2gb, s-2vcpu-4gb, s-4vcpu-8gb)"
-}
-
-variable "worker_type" {
- type = "string"
- default = "s-1vcpu-1gb"
- description = "Droplet type for workers (e.g. s-1vcpu-1gb, s-1vcpu-2gb, s-2vcpu-2gb)"
-}
-
-variable "image" {
- type = "string"
- default = "fedora-28-x64-atomic"
- description = "OS image from which to initialize the disk (e.g. fedora-28-x64-atomic)"
-}
-
-# configuration
-
-variable "ssh_authorized_key" {
- type = "string"
- description = "SSH public key for user 'fedora'"
-}
-
-variable "ssh_fingerprints" {
- type = "list"
- description = "SSH public key fingerprints. (e.g. see `ssh-add -l -E md5`)"
-}
-
-variable "asset_dir" {
- description = "Path to a directory where generated assets should be placed (contains secrets)"
- type = "string"
-}
-
-variable "pod_cidr" {
- description = "CIDR IPv4 range to assign Kubernetes pods"
- type = "string"
- default = "10.2.0.0/16"
-}
-
-variable "service_cidr" {
- description = <
-
-Typhoon is a minimal and free Kubernetes distribution.
-
-* Minimal, stable base Kubernetes distribution
-* Declarative infrastructure and configuration
-* Free (freedom and cost) and privacy-respecting
-* Practical for labs, datacenters, and clouds
-
-Typhoon distributes upstream Kubernetes, architectural conventions, and cluster addons, much like a GNU/Linux distribution provides the Linux kernel and userspace components.
-
-## Features
-
-* Kubernetes v1.15.0 (upstream, via [kubernetes-incubator/bootkube](https://github.com/kubernetes-incubator/bootkube))
-* Single or multi-master, [Calico](https://www.projectcalico.org/) or [flannel](https://github.com/coreos/flannel) networking
-* On-cluster etcd with TLS, [RBAC](https://kubernetes.io/docs/admin/authorization/rbac/)-enabled, [network policy](https://kubernetes.io/docs/concepts/services-networking/network-policies/)
-* Advanced features like [worker pools](https://typhoon.psdn.io/advanced/worker-pools/) and [preemptible](https://typhoon.psdn.io/cl/google-cloud/#preemption) workers
-* Ready for Ingress, Prometheus, Grafana, and other optional [addons](https://typhoon.psdn.io/addons/overview/)
-
-## Docs
-
-Please see the [official docs](https://typhoon.psdn.io) and the Google Cloud [tutorial](https://typhoon.psdn.io/cl/google-cloud/).
-
diff --git a/google-cloud/fedora-atomic/kubernetes/apiserver.tf b/google-cloud/fedora-atomic/kubernetes/apiserver.tf
deleted file mode 100644
index b56a4e7b..00000000
--- a/google-cloud/fedora-atomic/kubernetes/apiserver.tf
+++ /dev/null
@@ -1,97 +0,0 @@
-# TCP Proxy load balancer DNS record
-resource "google_dns_record_set" "apiserver" {
- # DNS Zone name where record should be created
- managed_zone = "${var.dns_zone_name}"
-
- # DNS record
- name = "${format("%s.%s.", var.cluster_name, var.dns_zone)}"
- type = "A"
- ttl = 300
-
- # IPv4 address of apiserver TCP Proxy load balancer
- rrdatas = ["${google_compute_global_address.apiserver-ipv4.address}"]
-}
-
-# Static IPv4 address for the TCP Proxy Load Balancer
-resource "google_compute_global_address" "apiserver-ipv4" {
- name = "${var.cluster_name}-apiserver-ip"
- ip_version = "IPV4"
-}
-
-# Forward IPv4 TCP traffic to the TCP proxy load balancer
-resource "google_compute_global_forwarding_rule" "apiserver" {
- name = "${var.cluster_name}-apiserver"
- ip_address = "${google_compute_global_address.apiserver-ipv4.address}"
- ip_protocol = "TCP"
- port_range = "443"
- target = "${google_compute_target_tcp_proxy.apiserver.self_link}"
-}
-
-# TCP Proxy Load Balancer for apiservers
-resource "google_compute_target_tcp_proxy" "apiserver" {
- name = "${var.cluster_name}-apiserver"
- description = "Distribute TCP load across ${var.cluster_name} controllers"
- backend_service = "${google_compute_backend_service.apiserver.self_link}"
-}
-
-# Backend service backed by unmanaged instance groups
-resource "google_compute_backend_service" "apiserver" {
- name = "${var.cluster_name}-apiserver"
- description = "${var.cluster_name} apiserver service"
-
- protocol = "TCP"
- port_name = "apiserver"
- session_affinity = "NONE"
- timeout_sec = "300"
-
- # controller(s) spread across zonal instance groups
- backend {
- group = "${google_compute_instance_group.controllers.0.self_link}"
- }
-
- backend {
- group = "${google_compute_instance_group.controllers.1.self_link}"
- }
-
- backend {
- group = "${google_compute_instance_group.controllers.2.self_link}"
- }
-
- health_checks = ["${google_compute_health_check.apiserver.self_link}"]
-}
-
-# Instance group of heterogeneous (unmanged) controller instances
-resource "google_compute_instance_group" "controllers" {
- count = "${length(local.zones)}"
-
- name = "${format("%s-controllers-%s", var.cluster_name, element(local.zones, count.index))}"
- zone = "${element(local.zones, count.index)}"
-
- named_port {
- name = "apiserver"
- port = "443"
- }
-
- # add instances in the zone into the instance group
- instances = [
- "${matchkeys(google_compute_instance.controllers.*.self_link,
- google_compute_instance.controllers.*.zone,
- list(element(local.zones, count.index)))}",
- ]
-}
-
-# TCP health check for apiserver
-resource "google_compute_health_check" "apiserver" {
- name = "${var.cluster_name}-apiserver-tcp-health"
- description = "TCP health check for kube-apiserver"
-
- timeout_sec = 5
- check_interval_sec = 5
-
- healthy_threshold = 1
- unhealthy_threshold = 3
-
- tcp_health_check {
- port = "443"
- }
-}
diff --git a/google-cloud/fedora-atomic/kubernetes/bootkube.tf b/google-cloud/fedora-atomic/kubernetes/bootkube.tf
deleted file mode 100644
index cbd1f104..00000000
--- a/google-cloud/fedora-atomic/kubernetes/bootkube.tf
+++ /dev/null
@@ -1,21 +0,0 @@
-# Self-hosted Kubernetes assets (kubeconfig, manifests)
-module "bootkube" {
- source = "git::https://github.com/poseidon/terraform-render-bootkube.git?ref=62df9ad69cc0da35f47d40fa981370c4503ad581"
-
- cluster_name = "${var.cluster_name}"
- api_servers = ["${format("%s.%s", var.cluster_name, var.dns_zone)}"]
- etcd_servers = ["${google_dns_record_set.etcds.*.name}"]
- asset_dir = "${var.asset_dir}"
- networking = "${var.networking}"
- network_mtu = 1440
- pod_cidr = "${var.pod_cidr}"
- service_cidr = "${var.service_cidr}"
- cluster_domain_suffix = "${var.cluster_domain_suffix}"
- enable_reporting = "${var.enable_reporting}"
-
- # Fedora
- trusted_certs_dir = "/etc/pki/tls/certs"
-
- // temporary
- apiserver_port = 443
-}
diff --git a/google-cloud/fedora-atomic/kubernetes/cloudinit/controller.yaml.tmpl b/google-cloud/fedora-atomic/kubernetes/cloudinit/controller.yaml.tmpl
deleted file mode 100644
index 64c1058e..00000000
--- a/google-cloud/fedora-atomic/kubernetes/cloudinit/controller.yaml.tmpl
+++ /dev/null
@@ -1,93 +0,0 @@
-#cloud-config
-write_files:
- - path: /etc/etcd/etcd.conf
- content: |
- ETCD_NAME=${etcd_name}
- ETCD_DATA_DIR=/var/lib/etcd
- ETCD_ADVERTISE_CLIENT_URLS=https://${etcd_domain}:2379
- ETCD_INITIAL_ADVERTISE_PEER_URLS=https://${etcd_domain}:2380
- ETCD_LISTEN_CLIENT_URLS=https://0.0.0.0:2379
- ETCD_LISTEN_PEER_URLS=https://0.0.0.0:2380
- ETCD_LISTEN_METRICS_URLS=http://0.0.0.0:2381
- ETCD_INITIAL_CLUSTER=${etcd_initial_cluster}
- ETCD_STRICT_RECONFIG_CHECK=true
- ETCD_TRUSTED_CA_FILE=/etc/ssl/certs/etcd/server-ca.crt
- ETCD_CERT_FILE=/etc/ssl/certs/etcd/server.crt
- ETCD_KEY_FILE=/etc/ssl/certs/etcd/server.key
- ETCD_CLIENT_CERT_AUTH=true
- ETCD_PEER_TRUSTED_CA_FILE=/etc/ssl/certs/etcd/peer-ca.crt
- ETCD_PEER_CERT_FILE=/etc/ssl/certs/etcd/peer.crt
- ETCD_PEER_KEY_FILE=/etc/ssl/certs/etcd/peer.key
- ETCD_PEER_CLIENT_CERT_AUTH=true
- - path: /etc/systemd/system/kubelet.service.d/10-typhoon.conf
- content: |
- [Unit]
- Wants=rpc-statd.service
- [Service]
- ExecStartPre=/bin/mkdir -p /opt/cni/bin
- ExecStartPre=/bin/mkdir -p /etc/kubernetes/manifests
- ExecStartPre=/bin/mkdir -p /etc/kubernetes/cni/net.d
- ExecStartPre=/bin/mkdir -p /etc/kubernetes/checkpoint-secrets
- ExecStartPre=/bin/mkdir -p /etc/kubernetes/inactive-manifests
- ExecStartPre=/bin/mkdir -p /var/lib/cni
- ExecStartPre=/bin/mkdir -p /var/lib/kubelet/volumeplugins
- ExecStartPre=/usr/bin/bash -c "grep 'certificate-authority-data' /etc/kubernetes/kubeconfig | awk '{print $2}' | base64 -d > /etc/kubernetes/ca.crt"
- Restart=always
- RestartSec=10
- - path: /etc/kubernetes/kubelet.conf
- content: |
- ARGS="--anonymous-auth=false \
- --authentication-token-webhook \
- --authorization-mode=Webhook \
- --client-ca-file=/etc/kubernetes/ca.crt \
- --cluster_dns=${cluster_dns_service_ip} \
- --cluster_domain=${cluster_domain_suffix} \
- --cni-conf-dir=/etc/kubernetes/cni/net.d \
- --exit-on-lock-contention \
- --kubeconfig=/etc/kubernetes/kubeconfig \
- --lock-file=/var/run/lock/kubelet.lock \
- --network-plugin=cni \
- --node-labels=node-role.kubernetes.io/master \
- --node-labels=node-role.kubernetes.io/controller="true" \
- --pod-manifest-path=/etc/kubernetes/manifests \
- --read-only-port=0 \
- --register-with-taints=node-role.kubernetes.io/master=:NoSchedule \
- --volume-plugin-dir=/var/lib/kubelet/volumeplugins"
- - path: /etc/kubernetes/kubeconfig
- permissions: '0644'
- content: |
- ${kubeconfig}
- - path: /var/lib/bootkube/.keep
- - path: /etc/NetworkManager/conf.d/typhoon.conf
- content: |
- [main]
- plugins=keyfile
- [keyfile]
- unmanaged-devices=interface-name:cali*;interface-name:tunl*
- - path: /etc/selinux/config
- owner: root:root
- permissions: '0644'
- content: |
- SELINUX=permissive
- SELINUXTYPE=targeted
-bootcmd:
- - [setenforce, Permissive]
- - [systemctl, disable, firewalld, --now]
- # https://github.com/kubernetes/kubernetes/issues/60869
- - [modprobe, ip_vs]
-runcmd:
- - [systemctl, daemon-reload]
- - [systemctl, restart, NetworkManager]
- - "atomic install --system --name=etcd quay.io/poseidon/etcd:v3.3.12"
- - "atomic install --system --name=kubelet quay.io/poseidon/kubelet:v1.14.1"
- - "atomic install --system --name=bootkube quay.io/poseidon/bootkube:v0.14.0"
- - [systemctl, start, --no-block, etcd.service]
- - [systemctl, start, --no-block, kubelet.service]
-users:
- - default
- - name: fedora
- gecos: Fedora Admin
- sudo: ALL=(ALL) NOPASSWD:ALL
- groups: wheel,adm,systemd-journal,docker
- ssh-authorized-keys:
- - "${ssh_authorized_key}"
diff --git a/google-cloud/fedora-atomic/kubernetes/controllers.tf b/google-cloud/fedora-atomic/kubernetes/controllers.tf
deleted file mode 100644
index 5ccba436..00000000
--- a/google-cloud/fedora-atomic/kubernetes/controllers.tf
+++ /dev/null
@@ -1,98 +0,0 @@
-# Discrete DNS records for each controller's private IPv4 for etcd usage
-resource "google_dns_record_set" "etcds" {
- count = "${var.controller_count}"
-
- # DNS Zone name where record should be created
- managed_zone = "${var.dns_zone_name}"
-
- # DNS record
- name = "${format("%s-etcd%d.%s.", var.cluster_name, count.index, var.dns_zone)}"
- type = "A"
- ttl = 300
-
- # private IPv4 address for etcd
- rrdatas = ["${element(google_compute_instance.controllers.*.network_interface.0.network_ip, count.index)}"]
-}
-
-# Zones in the region
-data "google_compute_zones" "all" {
- region = "${var.region}"
-}
-
-locals {
- # TCP proxy load balancers require a fixed number of zonal backends. Spread
- # controllers over up to 3 zones, since all GCP regions have at least 3.
- zones = "${slice(data.google_compute_zones.all.names, 0, 3)}"
-
- controllers_ipv4_public = ["${google_compute_instance.controllers.*.network_interface.0.access_config.0.nat_ip}"]
-}
-
-# Controller instances
-resource "google_compute_instance" "controllers" {
- count = "${var.controller_count}"
-
- name = "${var.cluster_name}-controller-${count.index}"
- zone = "${element(local.zones, count.index)}"
- machine_type = "${var.controller_type}"
-
- metadata = {
- user-data = "${element(data.template_file.controller-cloudinit.*.rendered, count.index)}"
- }
-
- boot_disk {
- auto_delete = true
-
- initialize_params {
- image = "${var.os_image}"
- size = "${var.disk_size}"
- }
- }
-
- network_interface {
- network = "${google_compute_network.network.name}"
-
- # Ephemeral external IP
- access_config = {}
- }
-
- can_ip_forward = true
- tags = ["${var.cluster_name}-controller"]
-
- lifecycle {
- ignore_changes = [
- "metadata",
- ]
- }
-}
-
-# Controller Cloud-Init
-data "template_file" "controller-cloudinit" {
- count = "${var.controller_count}"
-
- template = "${file("${path.module}/cloudinit/controller.yaml.tmpl")}"
-
- vars = {
- # Cannot use cyclic dependencies on controllers or their DNS records
- etcd_name = "etcd${count.index}"
- etcd_domain = "${var.cluster_name}-etcd${count.index}.${var.dns_zone}"
-
- # etcd0=https://cluster-etcd0.example.com,etcd1=https://cluster-etcd1.example.com,...
- etcd_initial_cluster = "${join(",", data.template_file.etcds.*.rendered)}"
-
- kubeconfig = "${indent(6, module.bootkube.kubeconfig-kubelet)}"
- ssh_authorized_key = "${var.ssh_authorized_key}"
- cluster_dns_service_ip = "${cidrhost(var.service_cidr, 10)}"
- cluster_domain_suffix = "${var.cluster_domain_suffix}"
- }
-}
-
-data "template_file" "etcds" {
- count = "${var.controller_count}"
- template = "etcd$${index}=https://$${cluster_name}-etcd$${index}.$${dns_zone}:2380"
-
- vars = {
- index = "${count.index}"
- cluster_name = "${var.cluster_name}"
- dns_zone = "${var.dns_zone}"
- }
-}
diff --git a/google-cloud/fedora-atomic/kubernetes/ingress.tf b/google-cloud/fedora-atomic/kubernetes/ingress.tf
deleted file mode 100644
index de84765e..00000000
--- a/google-cloud/fedora-atomic/kubernetes/ingress.tf
+++ /dev/null
@@ -1,122 +0,0 @@
-# Static IPv4 address for Ingress Load Balancing
-resource "google_compute_global_address" "ingress-ipv4" {
- name = "${var.cluster_name}-ingress-ipv4"
- ip_version = "IPV4"
-}
-
-# Static IPv6 address for Ingress Load Balancing
-resource "google_compute_global_address" "ingress-ipv6" {
- name = "${var.cluster_name}-ingress-ipv6"
- ip_version = "IPV6"
-}
-
-# Forward IPv4 TCP traffic to the HTTP proxy load balancer
-# Google Cloud does not allow TCP proxies for port 80. Must use HTTP proxy.
-resource "google_compute_global_forwarding_rule" "ingress-http-ipv4" {
- name = "${var.cluster_name}-ingress-http-ipv4"
- ip_address = "${google_compute_global_address.ingress-ipv4.address}"
- ip_protocol = "TCP"
- port_range = "80"
- target = "${google_compute_target_http_proxy.ingress-http.self_link}"
-}
-
-# Forward IPv4 TCP traffic to the TCP proxy load balancer
-resource "google_compute_global_forwarding_rule" "ingress-https-ipv4" {
- name = "${var.cluster_name}-ingress-https-ipv4"
- ip_address = "${google_compute_global_address.ingress-ipv4.address}"
- ip_protocol = "TCP"
- port_range = "443"
- target = "${google_compute_target_tcp_proxy.ingress-https.self_link}"
-}
-
-# Forward IPv6 TCP traffic to the HTTP proxy load balancer
-# Google Cloud does not allow TCP proxies for port 80. Must use HTTP proxy.
-resource "google_compute_global_forwarding_rule" "ingress-http-ipv6" {
- name = "${var.cluster_name}-ingress-http-ipv6"
- ip_address = "${google_compute_global_address.ingress-ipv6.address}"
- ip_protocol = "TCP"
- port_range = "80"
- target = "${google_compute_target_http_proxy.ingress-http.self_link}"
-}
-
-# Forward IPv6 TCP traffic to the TCP proxy load balancer
-resource "google_compute_global_forwarding_rule" "ingress-https-ipv6" {
- name = "${var.cluster_name}-ingress-https-ipv6"
- ip_address = "${google_compute_global_address.ingress-ipv6.address}"
- ip_protocol = "TCP"
- port_range = "443"
- target = "${google_compute_target_tcp_proxy.ingress-https.self_link}"
-}
-
-# HTTP proxy load balancer for ingress controllers
-resource "google_compute_target_http_proxy" "ingress-http" {
- name = "${var.cluster_name}-ingress-http"
- description = "Distribute HTTP load across ${var.cluster_name} workers"
- url_map = "${google_compute_url_map.ingress-http.self_link}"
-}
-
-# TCP proxy load balancer for ingress controllers
-resource "google_compute_target_tcp_proxy" "ingress-https" {
- name = "${var.cluster_name}-ingress-https"
- description = "Distribute HTTPS load across ${var.cluster_name} workers"
- backend_service = "${google_compute_backend_service.ingress-https.self_link}"
-}
-
-# HTTP URL Map (required)
-resource "google_compute_url_map" "ingress-http" {
- name = "${var.cluster_name}-ingress-http"
-
- # Do not add host/path rules for applications here. Use Ingress resources.
- default_service = "${google_compute_backend_service.ingress-http.self_link}"
-}
-
-# Backend service backed by managed instance group of workers
-resource "google_compute_backend_service" "ingress-http" {
- name = "${var.cluster_name}-ingress-http"
- description = "${var.cluster_name} ingress service"
-
- protocol = "HTTP"
- port_name = "http"
- session_affinity = "NONE"
- timeout_sec = "60"
-
- backend {
- group = "${module.workers.instance_group}"
- }
-
- health_checks = ["${google_compute_health_check.ingress.self_link}"]
-}
-
-# Backend service backed by managed instance group of workers
-resource "google_compute_backend_service" "ingress-https" {
- name = "${var.cluster_name}-ingress-https"
- description = "${var.cluster_name} ingress service"
-
- protocol = "TCP"
- port_name = "https"
- session_affinity = "NONE"
- timeout_sec = "60"
-
- backend {
- group = "${module.workers.instance_group}"
- }
-
- health_checks = ["${google_compute_health_check.ingress.self_link}"]
-}
-
-# Ingress HTTP Health Check
-resource "google_compute_health_check" "ingress" {
- name = "${var.cluster_name}-ingress-health"
- description = "Health check for Ingress controller"
-
- timeout_sec = 5
- check_interval_sec = 5
-
- healthy_threshold = 2
- unhealthy_threshold = 4
-
- http_health_check {
- port = 10254
- request_path = "/healthz"
- }
-}
diff --git a/google-cloud/fedora-atomic/kubernetes/network.tf b/google-cloud/fedora-atomic/kubernetes/network.tf
deleted file mode 100644
index ecfe7ae7..00000000
--- a/google-cloud/fedora-atomic/kubernetes/network.tf
+++ /dev/null
@@ -1,153 +0,0 @@
-resource "google_compute_network" "network" {
- name = "${var.cluster_name}"
- description = "Network for the ${var.cluster_name} cluster"
- auto_create_subnetworks = true
-}
-
-resource "google_compute_firewall" "allow-ssh" {
- name = "${var.cluster_name}-allow-ssh"
- network = "${google_compute_network.network.name}"
-
- allow {
- protocol = "tcp"
- ports = [22]
- }
-
- source_ranges = ["0.0.0.0/0"]
- target_tags = ["${var.cluster_name}-controller", "${var.cluster_name}-worker"]
-}
-
-resource "google_compute_firewall" "internal-etcd" {
- name = "${var.cluster_name}-internal-etcd"
- network = "${google_compute_network.network.name}"
-
- allow {
- protocol = "tcp"
- ports = [2379, 2380]
- }
-
- source_tags = ["${var.cluster_name}-controller"]
- target_tags = ["${var.cluster_name}-controller"]
-}
-
-# Allow Prometheus to scrape etcd metrics
-resource "google_compute_firewall" "internal-etcd-metrics" {
- name = "${var.cluster_name}-internal-etcd-metrics"
- network = "${google_compute_network.network.name}"
-
- allow {
- protocol = "tcp"
- ports = [2381]
- }
-
- source_tags = ["${var.cluster_name}-worker"]
- target_tags = ["${var.cluster_name}-controller"]
-}
-
-resource "google_compute_firewall" "allow-apiserver" {
- name = "${var.cluster_name}-allow-apiserver"
- network = "${google_compute_network.network.name}"
-
- allow {
- protocol = "tcp"
- ports = [443]
- }
-
- source_ranges = ["0.0.0.0/0"]
- target_tags = ["${var.cluster_name}-controller"]
-}
-
-# BGP and IPIP
-# https://docs.projectcalico.org/latest/reference/public-cloud/gce
-resource "google_compute_firewall" "internal-bgp" {
- count = "${var.networking != "flannel" ? 1 : 0}"
-
- name = "${var.cluster_name}-internal-bgp"
- network = "${google_compute_network.network.name}"
-
- allow {
- protocol = "tcp"
- ports = ["179"]
- }
-
- allow {
- protocol = "ipip"
- }
-
- source_tags = ["${var.cluster_name}-controller", "${var.cluster_name}-worker"]
- target_tags = ["${var.cluster_name}-controller", "${var.cluster_name}-worker"]
-}
-
-# flannel VXLAN
-resource "google_compute_firewall" "internal-vxlan" {
- count = "${var.networking == "flannel" ? 1 : 0}"
-
- name = "${var.cluster_name}-internal-vxlan"
- network = "${google_compute_network.network.name}"
-
- allow {
- protocol = "udp"
- ports = [4789]
- }
-
- source_tags = ["${var.cluster_name}-controller", "${var.cluster_name}-worker"]
- target_tags = ["${var.cluster_name}-controller", "${var.cluster_name}-worker"]
-}
-
-# Allow Prometheus to scrape node-exporter daemonset
-resource "google_compute_firewall" "internal-node-exporter" {
- name = "${var.cluster_name}-internal-node-exporter"
- network = "${google_compute_network.network.name}"
-
- allow {
- protocol = "tcp"
- ports = [9100]
- }
-
- source_tags = ["${var.cluster_name}-worker"]
- target_tags = ["${var.cluster_name}-controller", "${var.cluster_name}-worker"]
-}
-
-# Allow apiserver to access kubelets for exec, log, port-forward
-resource "google_compute_firewall" "internal-kubelet" {
- name = "${var.cluster_name}-internal-kubelet"
- network = "${google_compute_network.network.name}"
-
- allow {
- protocol = "tcp"
- ports = [10250]
- }
-
- # allow Prometheus to scrape kubelet metrics too
- source_tags = ["${var.cluster_name}-controller", "${var.cluster_name}-worker"]
- target_tags = ["${var.cluster_name}-controller", "${var.cluster_name}-worker"]
-}
-
-# Workers
-
-resource "google_compute_firewall" "allow-ingress" {
- name = "${var.cluster_name}-allow-ingress"
- network = "${google_compute_network.network.name}"
-
- allow {
- protocol = "tcp"
- ports = [80, 443]
- }
-
- source_ranges = ["0.0.0.0/0"]
- target_tags = ["${var.cluster_name}-worker"]
-}
-
-resource "google_compute_firewall" "google-health-checks" {
- name = "${var.cluster_name}-google-health-checks"
- network = "${google_compute_network.network.name}"
-
- allow {
- protocol = "tcp"
- ports = [10254]
- }
-
- # https://cloud.google.com/compute/docs/load-balancing/tcp-ssl/tcp-proxy#health-checking
- source_ranges = ["130.211.0.0/22", "35.191.0.0/16"]
- target_tags = ["${var.cluster_name}-worker"]
-}
diff --git a/google-cloud/fedora-atomic/kubernetes/outputs.tf b/google-cloud/fedora-atomic/kubernetes/outputs.tf
deleted file mode 100644
index f1a09ece..00000000
--- a/google-cloud/fedora-atomic/kubernetes/outputs.tf
+++ /dev/null
@@ -1,38 +0,0 @@
-output "kubeconfig-admin" {
- value = "${module.bootkube.kubeconfig-admin}"
-}
-
-# Outputs for Kubernetes Ingress
-
-output "ingress_static_ipv4" {
- description = "Global IPv4 address for proxy load balancing to the nearest Ingress controller"
- value = "${google_compute_global_address.ingress-ipv4.address}"
-}
-
-output "ingress_static_ipv6" {
- description = "Global IPv6 address for proxy load balancing to the nearest Ingress controller"
- value = "${google_compute_global_address.ingress-ipv6.address}"
-}
-
-# Outputs for worker pools
-
-output "network_name" {
- value = "${google_compute_network.network.name}"
-}
-
-output "kubeconfig" {
- value = "${module.bootkube.kubeconfig-kubelet}"
-}
-
-# Outputs for custom firewalling
-
-output "network_self_link" {
- value = "${google_compute_network.network.self_link}"
-}
-
-# Outputs for custom load balancing
-
-output "worker_instance_group" {
- description = "Full URL of the worker managed instance group"
- value = "${module.workers.instance_group}"
-}
diff --git a/google-cloud/fedora-atomic/kubernetes/require.tf b/google-cloud/fedora-atomic/kubernetes/require.tf
deleted file mode 100644
index d1f7f4d5..00000000
--- a/google-cloud/fedora-atomic/kubernetes/require.tf
+++ /dev/null
@@ -1,25 +0,0 @@
-# Terraform version and plugin versions
-
-terraform {
- required_version = ">= 0.11.0"
-}
-
-provider "google" {
- version = ">= 1.19, < 3.0"
-}
-
-provider "local" {
- version = "~> 1.0"
-}
-
-provider "null" {
- version = "~> 1.0"
-}
-
-provider "template" {
- version = "~> 1.0"
-}
-
-provider "tls" {
- version = "~> 1.0"
-}
diff --git a/google-cloud/fedora-atomic/kubernetes/ssh.tf b/google-cloud/fedora-atomic/kubernetes/ssh.tf
deleted file mode 100644
index 54517eaf..00000000
--- a/google-cloud/fedora-atomic/kubernetes/ssh.tf
+++ /dev/null
@@ -1,89 +0,0 @@
-# Secure copy etcd TLS assets to controllers.
-resource "null_resource" "copy-controller-secrets" {
- count = "${var.controller_count}"
-
- connection {
- type = "ssh"
- host = "${element(local.controllers_ipv4_public, count.index)}"
- user = "fedora"
- timeout = "15m"
- }
-
- provisioner "file" {
- content = "${module.bootkube.etcd_ca_cert}"
- destination = "$HOME/etcd-client-ca.crt"
- }
-
- provisioner "file" {
- content = "${module.bootkube.etcd_client_cert}"
- destination = "$HOME/etcd-client.crt"
- }
-
- provisioner "file" {
- content = "${module.bootkube.etcd_client_key}"
- destination = "$HOME/etcd-client.key"
- }
-
- provisioner "file" {
- content = "${module.bootkube.etcd_server_cert}"
- destination = "$HOME/etcd-server.crt"
- }
-
- provisioner "file" {
- content = "${module.bootkube.etcd_server_key}"
- destination = "$HOME/etcd-server.key"
- }
-
- provisioner "file" {
- content = "${module.bootkube.etcd_peer_cert}"
- destination = "$HOME/etcd-peer.crt"
- }
-
- provisioner "file" {
- content = "${module.bootkube.etcd_peer_key}"
- destination = "$HOME/etcd-peer.key"
- }
-
- provisioner "remote-exec" {
- inline = [
- "sudo mkdir -p /etc/ssl/etcd/etcd",
- "sudo mv etcd-client* /etc/ssl/etcd/",
- "sudo cp /etc/ssl/etcd/etcd-client-ca.crt /etc/ssl/etcd/etcd/server-ca.crt",
- "sudo mv etcd-server.crt /etc/ssl/etcd/etcd/server.crt",
- "sudo mv etcd-server.key /etc/ssl/etcd/etcd/server.key",
- "sudo cp /etc/ssl/etcd/etcd-client-ca.crt /etc/ssl/etcd/etcd/peer-ca.crt",
- "sudo mv etcd-peer.crt /etc/ssl/etcd/etcd/peer.crt",
- "sudo mv etcd-peer.key /etc/ssl/etcd/etcd/peer.key",
- ]
- }
-}
-
-# Secure copy bootkube assets to ONE controller and start bootkube to perform
-# one-time self-hosted cluster bootstrapping.
-resource "null_resource" "bootkube-start" {
- depends_on = [
- "null_resource.copy-controller-secrets",
- "module.workers",
- "google_dns_record_set.apiserver",
- ]
-
- connection {
- type = "ssh"
- host = "${element(local.controllers_ipv4_public, 0)}"
- user = "fedora"
- timeout = "15m"
- }
-
- provisioner "file" {
- source = "${var.asset_dir}"
- destination = "$HOME/assets"
- }
-
- provisioner "remote-exec" {
- inline = [
- "while [ ! -f /var/lib/cloud/instance/boot-finished ]; do sleep 4; done",
- "sudo mv $HOME/assets /var/lib/bootkube",
- "sudo systemctl start bootkube",
- ]
- }
-}
diff --git a/google-cloud/fedora-atomic/kubernetes/variables.tf b/google-cloud/fedora-atomic/kubernetes/variables.tf
deleted file mode 100644
index 4866b1c0..00000000
--- a/google-cloud/fedora-atomic/kubernetes/variables.tf
+++ /dev/null
@@ -1,110 +0,0 @@
-variable "cluster_name" {
- type = "string"
- description = "Unique cluster name (prepended to dns_zone)"
-}
-
-# Google Cloud
-
-variable "region" {
- type = "string"
- description = "Google Cloud Region (e.g. us-central1, see `gcloud compute regions list`)"
-}
-
-variable "dns_zone" {
- type = "string"
- description = "Google Cloud DNS Zone (e.g. google-cloud.example.com)"
-}
-
-variable "dns_zone_name" {
- type = "string"
- description = "Google Cloud DNS Zone name (e.g. example-zone)"
-}
-
-# instances
-
-variable "controller_count" {
- type = "string"
- default = "1"
- description = "Number of controllers (i.e. masters)"
-}
-
-variable "worker_count" {
- type = "string"
- default = "1"
- description = "Number of workers"
-}
-
-variable "controller_type" {
- type = "string"
- default = "n1-standard-1"
- description = "Machine type for controllers (see `gcloud compute machine-types list`)"
-}
-
-variable "worker_type" {
- type = "string"
- default = "n1-standard-1"
- description = "Machine type for controllers (see `gcloud compute machine-types list`)"
-}
-
-variable "os_image" {
- type = "string"
- description = "Custom Fedora Atomic image"
-}
-
-variable "disk_size" {
- type = "string"
- default = "40"
- description = "Size of the disk in GB"
-}
-
-variable "worker_preemptible" {
- type = "string"
- default = "false"
- description = "If enabled, Compute Engine will terminate workers randomly within 24 hours"
-}
-
-# configuration
-
-variable "ssh_authorized_key" {
- type = "string"
- description = "SSH public key for user 'fedora'"
-}
-
-variable "asset_dir" {
- description = "Path to a directory where generated assets should be placed (contains secrets)"
- type = "string"
-}
-
-variable "networking" {
- description = "Choice of networking provider (flannel or calico)"
- type = "string"
- default = "calico"
-}
-
-variable "pod_cidr" {
- description = "CIDR IPv4 range to assign Kubernetes pods"
- type = "string"
- default = "10.2.0.0/16"
-}
-
-variable "service_cidr" {
- description = < /etc/kubernetes/ca.crt"
- Restart=always
- RestartSec=10
- - path: /etc/kubernetes/kubelet.conf
- content: |
- ARGS="--anonymous-auth=false \
- --authentication-token-webhook \
- --authorization-mode=Webhook \
- --client-ca-file=/etc/kubernetes/ca.crt \
- --cluster_dns=${cluster_dns_service_ip} \
- --cluster_domain=${cluster_domain_suffix} \
- --cni-conf-dir=/etc/kubernetes/cni/net.d \
- --exit-on-lock-contention \
- --kubeconfig=/etc/kubernetes/kubeconfig \
- --lock-file=/var/run/lock/kubelet.lock \
- --network-plugin=cni \
- --node-labels=node-role.kubernetes.io/node \
- --pod-manifest-path=/etc/kubernetes/manifests \
- --read-only-port=0 \
- --volume-plugin-dir=/var/lib/kubelet/volumeplugins"
- - path: /etc/kubernetes/kubeconfig
- permissions: '0644'
- content: |
- ${kubeconfig}
- - path: /etc/NetworkManager/conf.d/typhoon.conf
- content: |
- [main]
- plugins=keyfile
- [keyfile]
- unmanaged-devices=interface-name:cali*;interface-name:tunl*
- - path: /etc/selinux/config
- owner: root:root
- permissions: '0644'
- content: |
- SELINUX=permissive
- SELINUXTYPE=targeted
-bootcmd:
- - [setenforce, Permissive]
- - [systemctl, disable, firewalld, --now]
- # https://github.com/kubernetes/kubernetes/issues/60869
- - [modprobe, ip_vs]
-runcmd:
- - [systemctl, daemon-reload]
- - [systemctl, restart, NetworkManager]
- - "atomic install --system --name=kubelet quay.io/poseidon/kubelet:v1.14.1"
- - [systemctl, start, --no-block, kubelet.service]
-users:
- - default
- - name: fedora
- gecos: Fedora Admin
- sudo: ALL=(ALL) NOPASSWD:ALL
- groups: wheel,adm,systemd-journal,docker
- ssh-authorized-keys:
- - "${ssh_authorized_key}"
diff --git a/google-cloud/fedora-atomic/kubernetes/workers/outputs.tf b/google-cloud/fedora-atomic/kubernetes/workers/outputs.tf
deleted file mode 100644
index 4521d792..00000000
--- a/google-cloud/fedora-atomic/kubernetes/workers/outputs.tf
+++ /dev/null
@@ -1,4 +0,0 @@
-output "instance_group" {
- description = "Full URL of the worker managed instance group"
- value = "${google_compute_region_instance_group_manager.workers.instance_group}"
-}
diff --git a/google-cloud/fedora-atomic/kubernetes/workers/variables.tf b/google-cloud/fedora-atomic/kubernetes/workers/variables.tf
deleted file mode 100644
index 913afb73..00000000
--- a/google-cloud/fedora-atomic/kubernetes/workers/variables.tf
+++ /dev/null
@@ -1,94 +0,0 @@
-variable "name" {
- type = "string"
- description = "Unique name for the worker pool"
-}
-
-variable "cluster_name" {
- type = "string"
- description = "Must be set to `cluster_name of cluster`"
-}
-
-# Google Cloud
-
-variable "region" {
- type = "string"
- description = "Must be set to `region` of cluster"
-}
-
-variable "network" {
- type = "string"
- description = "Must be set to `network_name` output by cluster"
-}
-
-# instances
-
-variable "count" {
- type = "string"
- default = "1"
- description = "Number of worker compute instances the instance group should manage"
-}
-
-variable "machine_type" {
- type = "string"
- default = "n1-standard-1"
- description = "Machine type for compute instances (e.g. gcloud compute machine-types list)"
-}
-
-variable "os_image" {
- type = "string"
- description = "Custom Fedora Atomic image"
-}
-
-variable "disk_size" {
- type = "string"
- default = "40"
- description = "Size of the disk in GB"
-}
-
-variable "preemptible" {
- type = "string"
- default = "false"
- description = "If enabled, Compute Engine will terminate instances randomly within 24 hours"
-}
-
-# configuration
-
-variable "kubeconfig" {
- type = "string"
- description = "Must be set to `kubeconfig` output by cluster"
-}
-
-variable "ssh_authorized_key" {
- type = "string"
- description = "SSH public key for user 'fedora'"
-}
-
-variable "service_cidr" {
- description = <