Remove Fedora Atomic modules

* Typhoon for Fedora Atomic was deprecated in March 2019
* https://typhoon.psdn.io/announce/#march-27-2019
This commit is contained in:
Dalton Hubble
2019-06-23 13:38:49 -07:00
parent ca18fab5f0
commit fff7cc035d
67 changed files with 2 additions and 3939 deletions

View File

@ -1,23 +0,0 @@
The MIT License (MIT)
Copyright (c) 2017 Typhoon Authors
Copyright (c) 2017 Dalton Hubble
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.

View File

@ -1,23 +0,0 @@
# Typhoon <img align="right" src="https://storage.googleapis.com/poseidon/typhoon-logo.png">
Typhoon is a minimal and free Kubernetes distribution.
* Minimal, stable base Kubernetes distribution
* Declarative infrastructure and configuration
* Free (freedom and cost) and privacy-respecting
* Practical for labs, datacenters, and clouds
Typhoon distributes upstream Kubernetes, architectural conventions, and cluster addons, much like a GNU/Linux distribution provides the Linux kernel and userspace components.
## Features <a href="https://www.cncf.io/certification/software-conformance/"><img align="right" src="https://storage.googleapis.com/poseidon/certified-kubernetes.png"></a>
* Kubernetes v1.15.0 (upstream, via [kubernetes-incubator/bootkube](https://github.com/kubernetes-incubator/bootkube))
* Single or multi-master, [Calico](https://www.projectcalico.org/) or [flannel](https://github.com/coreos/flannel) networking
* On-cluster etcd with TLS, [RBAC](https://kubernetes.io/docs/admin/authorization/rbac/)-enabled, [network policy](https://kubernetes.io/docs/concepts/services-networking/network-policies/)
* Advanced features like [worker pools](https://typhoon.psdn.io/advanced/worker-pools/) and [preemptible](https://typhoon.psdn.io/cl/google-cloud/#preemption) workers
* Ready for Ingress, Prometheus, Grafana, and other optional [addons](https://typhoon.psdn.io/addons/overview/)
## Docs
Please see the [official docs](https://typhoon.psdn.io) and the Google Cloud [tutorial](https://typhoon.psdn.io/cl/google-cloud/).

View File

@ -1,97 +0,0 @@
# TCP Proxy load balancer DNS record
resource "google_dns_record_set" "apiserver" {
# DNS Zone name where record should be created
managed_zone = "${var.dns_zone_name}"
# DNS record
name = "${format("%s.%s.", var.cluster_name, var.dns_zone)}"
type = "A"
ttl = 300
# IPv4 address of apiserver TCP Proxy load balancer
rrdatas = ["${google_compute_global_address.apiserver-ipv4.address}"]
}
# Static IPv4 address for the TCP Proxy Load Balancer
resource "google_compute_global_address" "apiserver-ipv4" {
name = "${var.cluster_name}-apiserver-ip"
ip_version = "IPV4"
}
# Forward IPv4 TCP traffic to the TCP proxy load balancer
resource "google_compute_global_forwarding_rule" "apiserver" {
name = "${var.cluster_name}-apiserver"
ip_address = "${google_compute_global_address.apiserver-ipv4.address}"
ip_protocol = "TCP"
port_range = "443"
target = "${google_compute_target_tcp_proxy.apiserver.self_link}"
}
# TCP Proxy Load Balancer for apiservers
resource "google_compute_target_tcp_proxy" "apiserver" {
name = "${var.cluster_name}-apiserver"
description = "Distribute TCP load across ${var.cluster_name} controllers"
backend_service = "${google_compute_backend_service.apiserver.self_link}"
}
# Backend service backed by unmanaged instance groups
resource "google_compute_backend_service" "apiserver" {
name = "${var.cluster_name}-apiserver"
description = "${var.cluster_name} apiserver service"
protocol = "TCP"
port_name = "apiserver"
session_affinity = "NONE"
timeout_sec = "300"
# controller(s) spread across zonal instance groups
backend {
group = "${google_compute_instance_group.controllers.0.self_link}"
}
backend {
group = "${google_compute_instance_group.controllers.1.self_link}"
}
backend {
group = "${google_compute_instance_group.controllers.2.self_link}"
}
health_checks = ["${google_compute_health_check.apiserver.self_link}"]
}
# Instance group of heterogeneous (unmanged) controller instances
resource "google_compute_instance_group" "controllers" {
count = "${length(local.zones)}"
name = "${format("%s-controllers-%s", var.cluster_name, element(local.zones, count.index))}"
zone = "${element(local.zones, count.index)}"
named_port {
name = "apiserver"
port = "443"
}
# add instances in the zone into the instance group
instances = [
"${matchkeys(google_compute_instance.controllers.*.self_link,
google_compute_instance.controllers.*.zone,
list(element(local.zones, count.index)))}",
]
}
# TCP health check for apiserver
resource "google_compute_health_check" "apiserver" {
name = "${var.cluster_name}-apiserver-tcp-health"
description = "TCP health check for kube-apiserver"
timeout_sec = 5
check_interval_sec = 5
healthy_threshold = 1
unhealthy_threshold = 3
tcp_health_check {
port = "443"
}
}

View File

@ -1,21 +0,0 @@
# Self-hosted Kubernetes assets (kubeconfig, manifests)
module "bootkube" {
source = "git::https://github.com/poseidon/terraform-render-bootkube.git?ref=62df9ad69cc0da35f47d40fa981370c4503ad581"
cluster_name = "${var.cluster_name}"
api_servers = ["${format("%s.%s", var.cluster_name, var.dns_zone)}"]
etcd_servers = ["${google_dns_record_set.etcds.*.name}"]
asset_dir = "${var.asset_dir}"
networking = "${var.networking}"
network_mtu = 1440
pod_cidr = "${var.pod_cidr}"
service_cidr = "${var.service_cidr}"
cluster_domain_suffix = "${var.cluster_domain_suffix}"
enable_reporting = "${var.enable_reporting}"
# Fedora
trusted_certs_dir = "/etc/pki/tls/certs"
// temporary
apiserver_port = 443
}

View File

@ -1,93 +0,0 @@
#cloud-config
write_files:
- path: /etc/etcd/etcd.conf
content: |
ETCD_NAME=${etcd_name}
ETCD_DATA_DIR=/var/lib/etcd
ETCD_ADVERTISE_CLIENT_URLS=https://${etcd_domain}:2379
ETCD_INITIAL_ADVERTISE_PEER_URLS=https://${etcd_domain}:2380
ETCD_LISTEN_CLIENT_URLS=https://0.0.0.0:2379
ETCD_LISTEN_PEER_URLS=https://0.0.0.0:2380
ETCD_LISTEN_METRICS_URLS=http://0.0.0.0:2381
ETCD_INITIAL_CLUSTER=${etcd_initial_cluster}
ETCD_STRICT_RECONFIG_CHECK=true
ETCD_TRUSTED_CA_FILE=/etc/ssl/certs/etcd/server-ca.crt
ETCD_CERT_FILE=/etc/ssl/certs/etcd/server.crt
ETCD_KEY_FILE=/etc/ssl/certs/etcd/server.key
ETCD_CLIENT_CERT_AUTH=true
ETCD_PEER_TRUSTED_CA_FILE=/etc/ssl/certs/etcd/peer-ca.crt
ETCD_PEER_CERT_FILE=/etc/ssl/certs/etcd/peer.crt
ETCD_PEER_KEY_FILE=/etc/ssl/certs/etcd/peer.key
ETCD_PEER_CLIENT_CERT_AUTH=true
- path: /etc/systemd/system/kubelet.service.d/10-typhoon.conf
content: |
[Unit]
Wants=rpc-statd.service
[Service]
ExecStartPre=/bin/mkdir -p /opt/cni/bin
ExecStartPre=/bin/mkdir -p /etc/kubernetes/manifests
ExecStartPre=/bin/mkdir -p /etc/kubernetes/cni/net.d
ExecStartPre=/bin/mkdir -p /etc/kubernetes/checkpoint-secrets
ExecStartPre=/bin/mkdir -p /etc/kubernetes/inactive-manifests
ExecStartPre=/bin/mkdir -p /var/lib/cni
ExecStartPre=/bin/mkdir -p /var/lib/kubelet/volumeplugins
ExecStartPre=/usr/bin/bash -c "grep 'certificate-authority-data' /etc/kubernetes/kubeconfig | awk '{print $2}' | base64 -d > /etc/kubernetes/ca.crt"
Restart=always
RestartSec=10
- path: /etc/kubernetes/kubelet.conf
content: |
ARGS="--anonymous-auth=false \
--authentication-token-webhook \
--authorization-mode=Webhook \
--client-ca-file=/etc/kubernetes/ca.crt \
--cluster_dns=${cluster_dns_service_ip} \
--cluster_domain=${cluster_domain_suffix} \
--cni-conf-dir=/etc/kubernetes/cni/net.d \
--exit-on-lock-contention \
--kubeconfig=/etc/kubernetes/kubeconfig \
--lock-file=/var/run/lock/kubelet.lock \
--network-plugin=cni \
--node-labels=node-role.kubernetes.io/master \
--node-labels=node-role.kubernetes.io/controller="true" \
--pod-manifest-path=/etc/kubernetes/manifests \
--read-only-port=0 \
--register-with-taints=node-role.kubernetes.io/master=:NoSchedule \
--volume-plugin-dir=/var/lib/kubelet/volumeplugins"
- path: /etc/kubernetes/kubeconfig
permissions: '0644'
content: |
${kubeconfig}
- path: /var/lib/bootkube/.keep
- path: /etc/NetworkManager/conf.d/typhoon.conf
content: |
[main]
plugins=keyfile
[keyfile]
unmanaged-devices=interface-name:cali*;interface-name:tunl*
- path: /etc/selinux/config
owner: root:root
permissions: '0644'
content: |
SELINUX=permissive
SELINUXTYPE=targeted
bootcmd:
- [setenforce, Permissive]
- [systemctl, disable, firewalld, --now]
# https://github.com/kubernetes/kubernetes/issues/60869
- [modprobe, ip_vs]
runcmd:
- [systemctl, daemon-reload]
- [systemctl, restart, NetworkManager]
- "atomic install --system --name=etcd quay.io/poseidon/etcd:v3.3.12"
- "atomic install --system --name=kubelet quay.io/poseidon/kubelet:v1.14.1"
- "atomic install --system --name=bootkube quay.io/poseidon/bootkube:v0.14.0"
- [systemctl, start, --no-block, etcd.service]
- [systemctl, start, --no-block, kubelet.service]
users:
- default
- name: fedora
gecos: Fedora Admin
sudo: ALL=(ALL) NOPASSWD:ALL
groups: wheel,adm,systemd-journal,docker
ssh-authorized-keys:
- "${ssh_authorized_key}"

View File

@ -1,98 +0,0 @@
# Discrete DNS records for each controller's private IPv4 for etcd usage
resource "google_dns_record_set" "etcds" {
count = "${var.controller_count}"
# DNS Zone name where record should be created
managed_zone = "${var.dns_zone_name}"
# DNS record
name = "${format("%s-etcd%d.%s.", var.cluster_name, count.index, var.dns_zone)}"
type = "A"
ttl = 300
# private IPv4 address for etcd
rrdatas = ["${element(google_compute_instance.controllers.*.network_interface.0.network_ip, count.index)}"]
}
# Zones in the region
data "google_compute_zones" "all" {
region = "${var.region}"
}
locals {
# TCP proxy load balancers require a fixed number of zonal backends. Spread
# controllers over up to 3 zones, since all GCP regions have at least 3.
zones = "${slice(data.google_compute_zones.all.names, 0, 3)}"
controllers_ipv4_public = ["${google_compute_instance.controllers.*.network_interface.0.access_config.0.nat_ip}"]
}
# Controller instances
resource "google_compute_instance" "controllers" {
count = "${var.controller_count}"
name = "${var.cluster_name}-controller-${count.index}"
zone = "${element(local.zones, count.index)}"
machine_type = "${var.controller_type}"
metadata = {
user-data = "${element(data.template_file.controller-cloudinit.*.rendered, count.index)}"
}
boot_disk {
auto_delete = true
initialize_params {
image = "${var.os_image}"
size = "${var.disk_size}"
}
}
network_interface {
network = "${google_compute_network.network.name}"
# Ephemeral external IP
access_config = {}
}
can_ip_forward = true
tags = ["${var.cluster_name}-controller"]
lifecycle {
ignore_changes = [
"metadata",
]
}
}
# Controller Cloud-Init
data "template_file" "controller-cloudinit" {
count = "${var.controller_count}"
template = "${file("${path.module}/cloudinit/controller.yaml.tmpl")}"
vars = {
# Cannot use cyclic dependencies on controllers or their DNS records
etcd_name = "etcd${count.index}"
etcd_domain = "${var.cluster_name}-etcd${count.index}.${var.dns_zone}"
# etcd0=https://cluster-etcd0.example.com,etcd1=https://cluster-etcd1.example.com,...
etcd_initial_cluster = "${join(",", data.template_file.etcds.*.rendered)}"
kubeconfig = "${indent(6, module.bootkube.kubeconfig-kubelet)}"
ssh_authorized_key = "${var.ssh_authorized_key}"
cluster_dns_service_ip = "${cidrhost(var.service_cidr, 10)}"
cluster_domain_suffix = "${var.cluster_domain_suffix}"
}
}
data "template_file" "etcds" {
count = "${var.controller_count}"
template = "etcd$${index}=https://$${cluster_name}-etcd$${index}.$${dns_zone}:2380"
vars = {
index = "${count.index}"
cluster_name = "${var.cluster_name}"
dns_zone = "${var.dns_zone}"
}
}

View File

@ -1,122 +0,0 @@
# Static IPv4 address for Ingress Load Balancing
resource "google_compute_global_address" "ingress-ipv4" {
name = "${var.cluster_name}-ingress-ipv4"
ip_version = "IPV4"
}
# Static IPv6 address for Ingress Load Balancing
resource "google_compute_global_address" "ingress-ipv6" {
name = "${var.cluster_name}-ingress-ipv6"
ip_version = "IPV6"
}
# Forward IPv4 TCP traffic to the HTTP proxy load balancer
# Google Cloud does not allow TCP proxies for port 80. Must use HTTP proxy.
resource "google_compute_global_forwarding_rule" "ingress-http-ipv4" {
name = "${var.cluster_name}-ingress-http-ipv4"
ip_address = "${google_compute_global_address.ingress-ipv4.address}"
ip_protocol = "TCP"
port_range = "80"
target = "${google_compute_target_http_proxy.ingress-http.self_link}"
}
# Forward IPv4 TCP traffic to the TCP proxy load balancer
resource "google_compute_global_forwarding_rule" "ingress-https-ipv4" {
name = "${var.cluster_name}-ingress-https-ipv4"
ip_address = "${google_compute_global_address.ingress-ipv4.address}"
ip_protocol = "TCP"
port_range = "443"
target = "${google_compute_target_tcp_proxy.ingress-https.self_link}"
}
# Forward IPv6 TCP traffic to the HTTP proxy load balancer
# Google Cloud does not allow TCP proxies for port 80. Must use HTTP proxy.
resource "google_compute_global_forwarding_rule" "ingress-http-ipv6" {
name = "${var.cluster_name}-ingress-http-ipv6"
ip_address = "${google_compute_global_address.ingress-ipv6.address}"
ip_protocol = "TCP"
port_range = "80"
target = "${google_compute_target_http_proxy.ingress-http.self_link}"
}
# Forward IPv6 TCP traffic to the TCP proxy load balancer
resource "google_compute_global_forwarding_rule" "ingress-https-ipv6" {
name = "${var.cluster_name}-ingress-https-ipv6"
ip_address = "${google_compute_global_address.ingress-ipv6.address}"
ip_protocol = "TCP"
port_range = "443"
target = "${google_compute_target_tcp_proxy.ingress-https.self_link}"
}
# HTTP proxy load balancer for ingress controllers
resource "google_compute_target_http_proxy" "ingress-http" {
name = "${var.cluster_name}-ingress-http"
description = "Distribute HTTP load across ${var.cluster_name} workers"
url_map = "${google_compute_url_map.ingress-http.self_link}"
}
# TCP proxy load balancer for ingress controllers
resource "google_compute_target_tcp_proxy" "ingress-https" {
name = "${var.cluster_name}-ingress-https"
description = "Distribute HTTPS load across ${var.cluster_name} workers"
backend_service = "${google_compute_backend_service.ingress-https.self_link}"
}
# HTTP URL Map (required)
resource "google_compute_url_map" "ingress-http" {
name = "${var.cluster_name}-ingress-http"
# Do not add host/path rules for applications here. Use Ingress resources.
default_service = "${google_compute_backend_service.ingress-http.self_link}"
}
# Backend service backed by managed instance group of workers
resource "google_compute_backend_service" "ingress-http" {
name = "${var.cluster_name}-ingress-http"
description = "${var.cluster_name} ingress service"
protocol = "HTTP"
port_name = "http"
session_affinity = "NONE"
timeout_sec = "60"
backend {
group = "${module.workers.instance_group}"
}
health_checks = ["${google_compute_health_check.ingress.self_link}"]
}
# Backend service backed by managed instance group of workers
resource "google_compute_backend_service" "ingress-https" {
name = "${var.cluster_name}-ingress-https"
description = "${var.cluster_name} ingress service"
protocol = "TCP"
port_name = "https"
session_affinity = "NONE"
timeout_sec = "60"
backend {
group = "${module.workers.instance_group}"
}
health_checks = ["${google_compute_health_check.ingress.self_link}"]
}
# Ingress HTTP Health Check
resource "google_compute_health_check" "ingress" {
name = "${var.cluster_name}-ingress-health"
description = "Health check for Ingress controller"
timeout_sec = 5
check_interval_sec = 5
healthy_threshold = 2
unhealthy_threshold = 4
http_health_check {
port = 10254
request_path = "/healthz"
}
}

View File

@ -1,153 +0,0 @@
resource "google_compute_network" "network" {
name = "${var.cluster_name}"
description = "Network for the ${var.cluster_name} cluster"
auto_create_subnetworks = true
}
resource "google_compute_firewall" "allow-ssh" {
name = "${var.cluster_name}-allow-ssh"
network = "${google_compute_network.network.name}"
allow {
protocol = "tcp"
ports = [22]
}
source_ranges = ["0.0.0.0/0"]
target_tags = ["${var.cluster_name}-controller", "${var.cluster_name}-worker"]
}
resource "google_compute_firewall" "internal-etcd" {
name = "${var.cluster_name}-internal-etcd"
network = "${google_compute_network.network.name}"
allow {
protocol = "tcp"
ports = [2379, 2380]
}
source_tags = ["${var.cluster_name}-controller"]
target_tags = ["${var.cluster_name}-controller"]
}
# Allow Prometheus to scrape etcd metrics
resource "google_compute_firewall" "internal-etcd-metrics" {
name = "${var.cluster_name}-internal-etcd-metrics"
network = "${google_compute_network.network.name}"
allow {
protocol = "tcp"
ports = [2381]
}
source_tags = ["${var.cluster_name}-worker"]
target_tags = ["${var.cluster_name}-controller"]
}
resource "google_compute_firewall" "allow-apiserver" {
name = "${var.cluster_name}-allow-apiserver"
network = "${google_compute_network.network.name}"
allow {
protocol = "tcp"
ports = [443]
}
source_ranges = ["0.0.0.0/0"]
target_tags = ["${var.cluster_name}-controller"]
}
# BGP and IPIP
# https://docs.projectcalico.org/latest/reference/public-cloud/gce
resource "google_compute_firewall" "internal-bgp" {
count = "${var.networking != "flannel" ? 1 : 0}"
name = "${var.cluster_name}-internal-bgp"
network = "${google_compute_network.network.name}"
allow {
protocol = "tcp"
ports = ["179"]
}
allow {
protocol = "ipip"
}
source_tags = ["${var.cluster_name}-controller", "${var.cluster_name}-worker"]
target_tags = ["${var.cluster_name}-controller", "${var.cluster_name}-worker"]
}
# flannel VXLAN
resource "google_compute_firewall" "internal-vxlan" {
count = "${var.networking == "flannel" ? 1 : 0}"
name = "${var.cluster_name}-internal-vxlan"
network = "${google_compute_network.network.name}"
allow {
protocol = "udp"
ports = [4789]
}
source_tags = ["${var.cluster_name}-controller", "${var.cluster_name}-worker"]
target_tags = ["${var.cluster_name}-controller", "${var.cluster_name}-worker"]
}
# Allow Prometheus to scrape node-exporter daemonset
resource "google_compute_firewall" "internal-node-exporter" {
name = "${var.cluster_name}-internal-node-exporter"
network = "${google_compute_network.network.name}"
allow {
protocol = "tcp"
ports = [9100]
}
source_tags = ["${var.cluster_name}-worker"]
target_tags = ["${var.cluster_name}-controller", "${var.cluster_name}-worker"]
}
# Allow apiserver to access kubelets for exec, log, port-forward
resource "google_compute_firewall" "internal-kubelet" {
name = "${var.cluster_name}-internal-kubelet"
network = "${google_compute_network.network.name}"
allow {
protocol = "tcp"
ports = [10250]
}
# allow Prometheus to scrape kubelet metrics too
source_tags = ["${var.cluster_name}-controller", "${var.cluster_name}-worker"]
target_tags = ["${var.cluster_name}-controller", "${var.cluster_name}-worker"]
}
# Workers
resource "google_compute_firewall" "allow-ingress" {
name = "${var.cluster_name}-allow-ingress"
network = "${google_compute_network.network.name}"
allow {
protocol = "tcp"
ports = [80, 443]
}
source_ranges = ["0.0.0.0/0"]
target_tags = ["${var.cluster_name}-worker"]
}
resource "google_compute_firewall" "google-health-checks" {
name = "${var.cluster_name}-google-health-checks"
network = "${google_compute_network.network.name}"
allow {
protocol = "tcp"
ports = [10254]
}
# https://cloud.google.com/compute/docs/load-balancing/tcp-ssl/tcp-proxy#health-checking
source_ranges = ["130.211.0.0/22", "35.191.0.0/16"]
target_tags = ["${var.cluster_name}-worker"]
}

View File

@ -1,38 +0,0 @@
output "kubeconfig-admin" {
value = "${module.bootkube.kubeconfig-admin}"
}
# Outputs for Kubernetes Ingress
output "ingress_static_ipv4" {
description = "Global IPv4 address for proxy load balancing to the nearest Ingress controller"
value = "${google_compute_global_address.ingress-ipv4.address}"
}
output "ingress_static_ipv6" {
description = "Global IPv6 address for proxy load balancing to the nearest Ingress controller"
value = "${google_compute_global_address.ingress-ipv6.address}"
}
# Outputs for worker pools
output "network_name" {
value = "${google_compute_network.network.name}"
}
output "kubeconfig" {
value = "${module.bootkube.kubeconfig-kubelet}"
}
# Outputs for custom firewalling
output "network_self_link" {
value = "${google_compute_network.network.self_link}"
}
# Outputs for custom load balancing
output "worker_instance_group" {
description = "Full URL of the worker managed instance group"
value = "${module.workers.instance_group}"
}

View File

@ -1,25 +0,0 @@
# Terraform version and plugin versions
terraform {
required_version = ">= 0.11.0"
}
provider "google" {
version = ">= 1.19, < 3.0"
}
provider "local" {
version = "~> 1.0"
}
provider "null" {
version = "~> 1.0"
}
provider "template" {
version = "~> 1.0"
}
provider "tls" {
version = "~> 1.0"
}

View File

@ -1,89 +0,0 @@
# Secure copy etcd TLS assets to controllers.
resource "null_resource" "copy-controller-secrets" {
count = "${var.controller_count}"
connection {
type = "ssh"
host = "${element(local.controllers_ipv4_public, count.index)}"
user = "fedora"
timeout = "15m"
}
provisioner "file" {
content = "${module.bootkube.etcd_ca_cert}"
destination = "$HOME/etcd-client-ca.crt"
}
provisioner "file" {
content = "${module.bootkube.etcd_client_cert}"
destination = "$HOME/etcd-client.crt"
}
provisioner "file" {
content = "${module.bootkube.etcd_client_key}"
destination = "$HOME/etcd-client.key"
}
provisioner "file" {
content = "${module.bootkube.etcd_server_cert}"
destination = "$HOME/etcd-server.crt"
}
provisioner "file" {
content = "${module.bootkube.etcd_server_key}"
destination = "$HOME/etcd-server.key"
}
provisioner "file" {
content = "${module.bootkube.etcd_peer_cert}"
destination = "$HOME/etcd-peer.crt"
}
provisioner "file" {
content = "${module.bootkube.etcd_peer_key}"
destination = "$HOME/etcd-peer.key"
}
provisioner "remote-exec" {
inline = [
"sudo mkdir -p /etc/ssl/etcd/etcd",
"sudo mv etcd-client* /etc/ssl/etcd/",
"sudo cp /etc/ssl/etcd/etcd-client-ca.crt /etc/ssl/etcd/etcd/server-ca.crt",
"sudo mv etcd-server.crt /etc/ssl/etcd/etcd/server.crt",
"sudo mv etcd-server.key /etc/ssl/etcd/etcd/server.key",
"sudo cp /etc/ssl/etcd/etcd-client-ca.crt /etc/ssl/etcd/etcd/peer-ca.crt",
"sudo mv etcd-peer.crt /etc/ssl/etcd/etcd/peer.crt",
"sudo mv etcd-peer.key /etc/ssl/etcd/etcd/peer.key",
]
}
}
# Secure copy bootkube assets to ONE controller and start bootkube to perform
# one-time self-hosted cluster bootstrapping.
resource "null_resource" "bootkube-start" {
depends_on = [
"null_resource.copy-controller-secrets",
"module.workers",
"google_dns_record_set.apiserver",
]
connection {
type = "ssh"
host = "${element(local.controllers_ipv4_public, 0)}"
user = "fedora"
timeout = "15m"
}
provisioner "file" {
source = "${var.asset_dir}"
destination = "$HOME/assets"
}
provisioner "remote-exec" {
inline = [
"while [ ! -f /var/lib/cloud/instance/boot-finished ]; do sleep 4; done",
"sudo mv $HOME/assets /var/lib/bootkube",
"sudo systemctl start bootkube",
]
}
}

View File

@ -1,110 +0,0 @@
variable "cluster_name" {
type = "string"
description = "Unique cluster name (prepended to dns_zone)"
}
# Google Cloud
variable "region" {
type = "string"
description = "Google Cloud Region (e.g. us-central1, see `gcloud compute regions list`)"
}
variable "dns_zone" {
type = "string"
description = "Google Cloud DNS Zone (e.g. google-cloud.example.com)"
}
variable "dns_zone_name" {
type = "string"
description = "Google Cloud DNS Zone name (e.g. example-zone)"
}
# instances
variable "controller_count" {
type = "string"
default = "1"
description = "Number of controllers (i.e. masters)"
}
variable "worker_count" {
type = "string"
default = "1"
description = "Number of workers"
}
variable "controller_type" {
type = "string"
default = "n1-standard-1"
description = "Machine type for controllers (see `gcloud compute machine-types list`)"
}
variable "worker_type" {
type = "string"
default = "n1-standard-1"
description = "Machine type for controllers (see `gcloud compute machine-types list`)"
}
variable "os_image" {
type = "string"
description = "Custom Fedora Atomic image"
}
variable "disk_size" {
type = "string"
default = "40"
description = "Size of the disk in GB"
}
variable "worker_preemptible" {
type = "string"
default = "false"
description = "If enabled, Compute Engine will terminate workers randomly within 24 hours"
}
# configuration
variable "ssh_authorized_key" {
type = "string"
description = "SSH public key for user 'fedora'"
}
variable "asset_dir" {
description = "Path to a directory where generated assets should be placed (contains secrets)"
type = "string"
}
variable "networking" {
description = "Choice of networking provider (flannel or calico)"
type = "string"
default = "calico"
}
variable "pod_cidr" {
description = "CIDR IPv4 range to assign Kubernetes pods"
type = "string"
default = "10.2.0.0/16"
}
variable "service_cidr" {
description = <<EOD
CIDR IPv4 range to assign Kubernetes services.
The 1st IP will be reserved for kube_apiserver, the 10th IP will be reserved for coredns.
EOD
type = "string"
default = "10.3.0.0/16"
}
variable "cluster_domain_suffix" {
description = "Queries for domains with the suffix will be answered by coredns. Default is cluster.local (e.g. foo.default.svc.cluster.local) "
type = "string"
default = "cluster.local"
}
variable "enable_reporting" {
type = "string"
description = "Enable usage or analytics reporting to upstreams (Calico)"
default = "false"
}

View File

@ -1,20 +0,0 @@
module "workers" {
source = "./workers"
name = "${var.cluster_name}"
cluster_name = "${var.cluster_name}"
# GCE
region = "${var.region}"
network = "${google_compute_network.network.name}"
count = "${var.worker_count}"
machine_type = "${var.worker_type}"
os_image = "${var.os_image}"
disk_size = "${var.disk_size}"
preemptible = "${var.worker_preemptible}"
# configuration
kubeconfig = "${module.bootkube.kubeconfig-kubelet}"
ssh_authorized_key = "${var.ssh_authorized_key}"
service_cidr = "${var.service_cidr}"
cluster_domain_suffix = "${var.cluster_domain_suffix}"
}

View File

@ -1,66 +0,0 @@
#cloud-config
write_files:
- path: /etc/systemd/system/kubelet.service.d/10-typhoon.conf
content: |
[Unit]
Wants=rpc-statd.service
[Service]
ExecStartPre=/bin/mkdir -p /opt/cni/bin
ExecStartPre=/bin/mkdir -p /etc/kubernetes/manifests
ExecStartPre=/bin/mkdir -p /etc/kubernetes/cni/net.d
ExecStartPre=/bin/mkdir -p /var/lib/cni
ExecStartPre=/bin/mkdir -p /var/lib/kubelet/volumeplugins
ExecStartPre=/usr/bin/bash -c "grep 'certificate-authority-data' /etc/kubernetes/kubeconfig | awk '{print $2}' | base64 -d > /etc/kubernetes/ca.crt"
Restart=always
RestartSec=10
- path: /etc/kubernetes/kubelet.conf
content: |
ARGS="--anonymous-auth=false \
--authentication-token-webhook \
--authorization-mode=Webhook \
--client-ca-file=/etc/kubernetes/ca.crt \
--cluster_dns=${cluster_dns_service_ip} \
--cluster_domain=${cluster_domain_suffix} \
--cni-conf-dir=/etc/kubernetes/cni/net.d \
--exit-on-lock-contention \
--kubeconfig=/etc/kubernetes/kubeconfig \
--lock-file=/var/run/lock/kubelet.lock \
--network-plugin=cni \
--node-labels=node-role.kubernetes.io/node \
--pod-manifest-path=/etc/kubernetes/manifests \
--read-only-port=0 \
--volume-plugin-dir=/var/lib/kubelet/volumeplugins"
- path: /etc/kubernetes/kubeconfig
permissions: '0644'
content: |
${kubeconfig}
- path: /etc/NetworkManager/conf.d/typhoon.conf
content: |
[main]
plugins=keyfile
[keyfile]
unmanaged-devices=interface-name:cali*;interface-name:tunl*
- path: /etc/selinux/config
owner: root:root
permissions: '0644'
content: |
SELINUX=permissive
SELINUXTYPE=targeted
bootcmd:
- [setenforce, Permissive]
- [systemctl, disable, firewalld, --now]
# https://github.com/kubernetes/kubernetes/issues/60869
- [modprobe, ip_vs]
runcmd:
- [systemctl, daemon-reload]
- [systemctl, restart, NetworkManager]
- "atomic install --system --name=kubelet quay.io/poseidon/kubelet:v1.14.1"
- [systemctl, start, --no-block, kubelet.service]
users:
- default
- name: fedora
gecos: Fedora Admin
sudo: ALL=(ALL) NOPASSWD:ALL
groups: wheel,adm,systemd-journal,docker
ssh-authorized-keys:
- "${ssh_authorized_key}"

View File

@ -1,4 +0,0 @@
output "instance_group" {
description = "Full URL of the worker managed instance group"
value = "${google_compute_region_instance_group_manager.workers.instance_group}"
}

View File

@ -1,94 +0,0 @@
variable "name" {
type = "string"
description = "Unique name for the worker pool"
}
variable "cluster_name" {
type = "string"
description = "Must be set to `cluster_name of cluster`"
}
# Google Cloud
variable "region" {
type = "string"
description = "Must be set to `region` of cluster"
}
variable "network" {
type = "string"
description = "Must be set to `network_name` output by cluster"
}
# instances
variable "count" {
type = "string"
default = "1"
description = "Number of worker compute instances the instance group should manage"
}
variable "machine_type" {
type = "string"
default = "n1-standard-1"
description = "Machine type for compute instances (e.g. gcloud compute machine-types list)"
}
variable "os_image" {
type = "string"
description = "Custom Fedora Atomic image"
}
variable "disk_size" {
type = "string"
default = "40"
description = "Size of the disk in GB"
}
variable "preemptible" {
type = "string"
default = "false"
description = "If enabled, Compute Engine will terminate instances randomly within 24 hours"
}
# configuration
variable "kubeconfig" {
type = "string"
description = "Must be set to `kubeconfig` output by cluster"
}
variable "ssh_authorized_key" {
type = "string"
description = "SSH public key for user 'fedora'"
}
variable "service_cidr" {
description = <<EOD
CIDR IPv4 range to assign Kubernetes services.
The 1st IP will be reserved for kube_apiserver, the 10th IP will be reserved for coredns.
EOD
type = "string"
default = "10.3.0.0/16"
}
variable "cluster_domain_suffix" {
description = "Queries for domains with the suffix will be answered by coredns. Default is cluster.local (e.g. foo.default.svc.cluster.local) "
type = "string"
default = "cluster.local"
}
# unofficial, undocumented, unsupported, temporary
variable "accelerator_type" {
type = "string"
default = ""
description = "Google Compute Engine accelerator type (e.g. nvidia-tesla-k80, see gcloud compute accelerator-types list)"
}
variable "accelerator_count" {
type = "string"
default = "0"
description = "Number of compute engine accelerators"
}

View File

@ -1,77 +0,0 @@
# Regional managed instance group of workers
resource "google_compute_region_instance_group_manager" "workers" {
name = "${var.name}-worker-group"
description = "Compute instance group of ${var.name} workers"
# instance name prefix for instances in the group
base_instance_name = "${var.name}-worker"
instance_template = "${google_compute_instance_template.worker.self_link}"
region = "${var.region}"
target_size = "${var.count}"
named_port {
name = "http"
port = "80"
}
named_port {
name = "https"
port = "443"
}
}
# Worker instance template
resource "google_compute_instance_template" "worker" {
name_prefix = "${var.name}-worker-"
description = "Worker Instance template"
machine_type = "${var.machine_type}"
metadata = {
user-data = "${data.template_file.worker-cloudinit.rendered}"
}
scheduling {
automatic_restart = "${var.preemptible ? false : true}"
preemptible = "${var.preemptible}"
}
disk {
auto_delete = true
boot = true
source_image = "${var.os_image}"
disk_size_gb = "${var.disk_size}"
}
network_interface {
network = "${var.network}"
# Ephemeral external IP
access_config = {}
}
can_ip_forward = true
tags = ["worker", "${var.cluster_name}-worker", "${var.name}-worker"]
guest_accelerator {
count = "${var.accelerator_count}"
type = "${var.accelerator_type}"
}
lifecycle {
# To update an Instance Template, Terraform should replace the existing resource
create_before_destroy = true
}
}
# Worker Cloud-Init
data "template_file" "worker-cloudinit" {
template = "${file("${path.module}/cloudinit/worker.yaml.tmpl")}"
vars = {
kubeconfig = "${indent(6, var.kubeconfig)}"
ssh_authorized_key = "${var.ssh_authorized_key}"
cluster_dns_service_ip = "${cidrhost(var.service_cidr, 10)}"
cluster_domain_suffix = "${var.cluster_domain_suffix}"
}
}

View File