Remove Fedora Atomic modules
* Typhoon for Fedora Atomic was deprecated in March 2019 * https://typhoon.psdn.io/announce/#march-27-2019
This commit is contained in:
parent
ca18fab5f0
commit
fff7cc035d
|
@ -5,7 +5,7 @@
|
||||||
### Environment
|
### Environment
|
||||||
|
|
||||||
* Platform: aws, azure, bare-metal, google-cloud, digital-ocean
|
* Platform: aws, azure, bare-metal, google-cloud, digital-ocean
|
||||||
* OS: container-linux, flatcar-linux, or fedora-atomic
|
* OS: container-linux, flatcar-linux
|
||||||
* Release: Typhoon version or Git SHA (reporting latest is **not** helpful)
|
* Release: Typhoon version or Git SHA (reporting latest is **not** helpful)
|
||||||
* Terraform: `terraform version` (reporting latest is **not** helpful)
|
* Terraform: `terraform version` (reporting latest is **not** helpful)
|
||||||
* Plugins: Provider plugin versions (reporting latest is **not** helpful)
|
* Plugins: Provider plugin versions (reporting latest is **not** helpful)
|
||||||
|
|
|
@ -11,6 +11,7 @@ Notable changes between versions.
|
||||||
* [Migration](https://typhoon.psdn.io/topics/maintenance/#terraform-v012x) instructions for Terraform v0.12
|
* [Migration](https://typhoon.psdn.io/topics/maintenance/#terraform-v012x) instructions for Terraform v0.12
|
||||||
* Require `terraform-provider-ct` v0.3.2+ to support Terraform v0.12 (action required)
|
* Require `terraform-provider-ct` v0.3.2+ to support Terraform v0.12 (action required)
|
||||||
* Update Calico from v3.7.2 to [v3.7.3](https://docs.projectcalico.org/v3.7/release-notes/)
|
* Update Calico from v3.7.2 to [v3.7.3](https://docs.projectcalico.org/v3.7/release-notes/)
|
||||||
|
* Remove Fedora Atomic modules (deprecated in March) ([#501](https://github.com/poseidon/typhoon/pull/501))
|
||||||
|
|
||||||
#### AWS
|
#### AWS
|
||||||
|
|
||||||
|
|
|
@ -1,23 +0,0 @@
|
||||||
The MIT License (MIT)
|
|
||||||
|
|
||||||
Copyright (c) 2017 Typhoon Authors
|
|
||||||
Copyright (c) 2017 Dalton Hubble
|
|
||||||
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
||||||
of this software and associated documentation files (the "Software"), to deal
|
|
||||||
in the Software without restriction, including without limitation the rights
|
|
||||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
||||||
copies of the Software, and to permit persons to whom the Software is
|
|
||||||
furnished to do so, subject to the following conditions:
|
|
||||||
|
|
||||||
The above copyright notice and this permission notice shall be included in
|
|
||||||
all copies or substantial portions of the Software.
|
|
||||||
|
|
||||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
||||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
||||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
||||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
||||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
||||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
|
||||||
THE SOFTWARE.
|
|
||||||
|
|
|
@ -1,23 +0,0 @@
|
||||||
# Typhoon <img align="right" src="https://storage.googleapis.com/poseidon/typhoon-logo.png">
|
|
||||||
|
|
||||||
Typhoon is a minimal and free Kubernetes distribution.
|
|
||||||
|
|
||||||
* Minimal, stable base Kubernetes distribution
|
|
||||||
* Declarative infrastructure and configuration
|
|
||||||
* Free (freedom and cost) and privacy-respecting
|
|
||||||
* Practical for labs, datacenters, and clouds
|
|
||||||
|
|
||||||
Typhoon distributes upstream Kubernetes, architectural conventions, and cluster addons, much like a GNU/Linux distribution provides the Linux kernel and userspace components.
|
|
||||||
|
|
||||||
## Features <a href="https://www.cncf.io/certification/software-conformance/"><img align="right" src="https://storage.googleapis.com/poseidon/certified-kubernetes.png"></a>
|
|
||||||
|
|
||||||
* Kubernetes v1.15.0 (upstream, via [kubernetes-incubator/bootkube](https://github.com/kubernetes-incubator/bootkube))
|
|
||||||
* Single or multi-master, [Calico](https://www.projectcalico.org/) or [flannel](https://github.com/coreos/flannel) networking
|
|
||||||
* On-cluster etcd with TLS, [RBAC](https://kubernetes.io/docs/admin/authorization/rbac/)-enabled, [network policy](https://kubernetes.io/docs/concepts/services-networking/network-policies/)
|
|
||||||
* Advanced features like [worker pools](https://typhoon.psdn.io/advanced/worker-pools/) and [spot](https://typhoon.psdn.io/cl/aws/#spot) workers
|
|
||||||
* Ready for Ingress, Prometheus, Grafana, and other optional [addons](https://typhoon.psdn.io/addons/overview/)
|
|
||||||
|
|
||||||
## Docs
|
|
||||||
|
|
||||||
Please see the [official docs](https://typhoon.psdn.io) and the AWS [tutorial](https://typhoon.psdn.io/cl/aws/).
|
|
||||||
|
|
|
@ -1,19 +0,0 @@
|
||||||
data "aws_ami" "fedora" {
|
|
||||||
most_recent = true
|
|
||||||
owners = ["125523088429"]
|
|
||||||
|
|
||||||
filter {
|
|
||||||
name = "architecture"
|
|
||||||
values = ["x86_64"]
|
|
||||||
}
|
|
||||||
|
|
||||||
filter {
|
|
||||||
name = "virtualization-type"
|
|
||||||
values = ["hvm"]
|
|
||||||
}
|
|
||||||
|
|
||||||
filter {
|
|
||||||
name = "name"
|
|
||||||
values = ["Fedora-AtomicHost-28-20180625.1.x86_64-*-gp2-*"]
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,18 +0,0 @@
|
||||||
# Self-hosted Kubernetes assets (kubeconfig, manifests)
|
|
||||||
module "bootkube" {
|
|
||||||
source = "git::https://github.com/poseidon/terraform-render-bootkube.git?ref=62df9ad69cc0da35f47d40fa981370c4503ad581"
|
|
||||||
|
|
||||||
cluster_name = "${var.cluster_name}"
|
|
||||||
api_servers = ["${format("%s.%s", var.cluster_name, var.dns_zone)}"]
|
|
||||||
etcd_servers = ["${aws_route53_record.etcds.*.fqdn}"]
|
|
||||||
asset_dir = "${var.asset_dir}"
|
|
||||||
networking = "${var.networking}"
|
|
||||||
network_mtu = "${var.network_mtu}"
|
|
||||||
pod_cidr = "${var.pod_cidr}"
|
|
||||||
service_cidr = "${var.service_cidr}"
|
|
||||||
cluster_domain_suffix = "${var.cluster_domain_suffix}"
|
|
||||||
enable_reporting = "${var.enable_reporting}"
|
|
||||||
|
|
||||||
# Fedora
|
|
||||||
trusted_certs_dir = "/etc/pki/tls/certs"
|
|
||||||
}
|
|
|
@ -1,93 +0,0 @@
|
||||||
#cloud-config
|
|
||||||
write_files:
|
|
||||||
- path: /etc/etcd/etcd.conf
|
|
||||||
content: |
|
|
||||||
ETCD_NAME=${etcd_name}
|
|
||||||
ETCD_DATA_DIR=/var/lib/etcd
|
|
||||||
ETCD_ADVERTISE_CLIENT_URLS=https://${etcd_domain}:2379
|
|
||||||
ETCD_INITIAL_ADVERTISE_PEER_URLS=https://${etcd_domain}:2380
|
|
||||||
ETCD_LISTEN_CLIENT_URLS=https://0.0.0.0:2379
|
|
||||||
ETCD_LISTEN_PEER_URLS=https://0.0.0.0:2380
|
|
||||||
ETCD_LISTEN_METRICS_URLS=http://0.0.0.0:2381
|
|
||||||
ETCD_INITIAL_CLUSTER=${etcd_initial_cluster}
|
|
||||||
ETCD_STRICT_RECONFIG_CHECK=true
|
|
||||||
ETCD_TRUSTED_CA_FILE=/etc/ssl/certs/etcd/server-ca.crt
|
|
||||||
ETCD_CERT_FILE=/etc/ssl/certs/etcd/server.crt
|
|
||||||
ETCD_KEY_FILE=/etc/ssl/certs/etcd/server.key
|
|
||||||
ETCD_CLIENT_CERT_AUTH=true
|
|
||||||
ETCD_PEER_TRUSTED_CA_FILE=/etc/ssl/certs/etcd/peer-ca.crt
|
|
||||||
ETCD_PEER_CERT_FILE=/etc/ssl/certs/etcd/peer.crt
|
|
||||||
ETCD_PEER_KEY_FILE=/etc/ssl/certs/etcd/peer.key
|
|
||||||
ETCD_PEER_CLIENT_CERT_AUTH=true
|
|
||||||
- path: /etc/systemd/system/kubelet.service.d/10-typhoon.conf
|
|
||||||
content: |
|
|
||||||
[Unit]
|
|
||||||
Wants=rpc-statd.service
|
|
||||||
[Service]
|
|
||||||
ExecStartPre=/bin/mkdir -p /opt/cni/bin
|
|
||||||
ExecStartPre=/bin/mkdir -p /etc/kubernetes/manifests
|
|
||||||
ExecStartPre=/bin/mkdir -p /etc/kubernetes/cni/net.d
|
|
||||||
ExecStartPre=/bin/mkdir -p /etc/kubernetes/checkpoint-secrets
|
|
||||||
ExecStartPre=/bin/mkdir -p /etc/kubernetes/inactive-manifests
|
|
||||||
ExecStartPre=/bin/mkdir -p /var/lib/cni
|
|
||||||
ExecStartPre=/bin/mkdir -p /var/lib/kubelet/volumeplugins
|
|
||||||
ExecStartPre=/usr/bin/bash -c "grep 'certificate-authority-data' /etc/kubernetes/kubeconfig | awk '{print $2}' | base64 -d > /etc/kubernetes/ca.crt"
|
|
||||||
Restart=always
|
|
||||||
RestartSec=10
|
|
||||||
- path: /etc/kubernetes/kubelet.conf
|
|
||||||
content: |
|
|
||||||
ARGS="--anonymous-auth=false \
|
|
||||||
--authentication-token-webhook \
|
|
||||||
--authorization-mode=Webhook \
|
|
||||||
--client-ca-file=/etc/kubernetes/ca.crt \
|
|
||||||
--cluster_dns=${cluster_dns_service_ip} \
|
|
||||||
--cluster_domain=${cluster_domain_suffix} \
|
|
||||||
--cni-conf-dir=/etc/kubernetes/cni/net.d \
|
|
||||||
--exit-on-lock-contention \
|
|
||||||
--kubeconfig=/etc/kubernetes/kubeconfig \
|
|
||||||
--lock-file=/var/run/lock/kubelet.lock \
|
|
||||||
--network-plugin=cni \
|
|
||||||
--node-labels=node-role.kubernetes.io/master \
|
|
||||||
--node-labels=node-role.kubernetes.io/controller="true" \
|
|
||||||
--pod-manifest-path=/etc/kubernetes/manifests \
|
|
||||||
--read-only-port=0 \
|
|
||||||
--register-with-taints=node-role.kubernetes.io/master=:NoSchedule \
|
|
||||||
--volume-plugin-dir=/var/lib/kubelet/volumeplugins"
|
|
||||||
- path: /etc/kubernetes/kubeconfig
|
|
||||||
permissions: '0644'
|
|
||||||
content: |
|
|
||||||
${kubeconfig}
|
|
||||||
- path: /var/lib/bootkube/.keep
|
|
||||||
- path: /etc/NetworkManager/conf.d/typhoon.conf
|
|
||||||
content: |
|
|
||||||
[main]
|
|
||||||
plugins=keyfile
|
|
||||||
[keyfile]
|
|
||||||
unmanaged-devices=interface-name:cali*;interface-name:tunl*
|
|
||||||
- path: /etc/selinux/config
|
|
||||||
owner: root:root
|
|
||||||
permissions: '0644'
|
|
||||||
content: |
|
|
||||||
SELINUX=permissive
|
|
||||||
SELINUXTYPE=targeted
|
|
||||||
bootcmd:
|
|
||||||
- [setenforce, Permissive]
|
|
||||||
- [systemctl, disable, firewalld, --now]
|
|
||||||
# https://github.com/kubernetes/kubernetes/issues/60869
|
|
||||||
- [modprobe, ip_vs]
|
|
||||||
runcmd:
|
|
||||||
- [systemctl, daemon-reload]
|
|
||||||
- [systemctl, restart, NetworkManager]
|
|
||||||
- "atomic install --system --name=etcd quay.io/poseidon/etcd:v3.3.12"
|
|
||||||
- "atomic install --system --name=kubelet quay.io/poseidon/kubelet:v1.14.1"
|
|
||||||
- "atomic install --system --name=bootkube quay.io/poseidon/bootkube:v0.14.0"
|
|
||||||
- [systemctl, start, --no-block, etcd.service]
|
|
||||||
- [systemctl, start, --no-block, kubelet.service]
|
|
||||||
users:
|
|
||||||
- default
|
|
||||||
- name: fedora
|
|
||||||
gecos: Fedora Admin
|
|
||||||
sudo: ALL=(ALL) NOPASSWD:ALL
|
|
||||||
groups: wheel,adm,systemd-journal,docker
|
|
||||||
ssh-authorized-keys:
|
|
||||||
- "${ssh_authorized_key}"
|
|
|
@ -1,79 +0,0 @@
|
||||||
# Discrete DNS records for each controller's private IPv4 for etcd usage
|
|
||||||
resource "aws_route53_record" "etcds" {
|
|
||||||
count = "${var.controller_count}"
|
|
||||||
|
|
||||||
# DNS Zone where record should be created
|
|
||||||
zone_id = "${var.dns_zone_id}"
|
|
||||||
|
|
||||||
name = "${format("%s-etcd%d.%s.", var.cluster_name, count.index, var.dns_zone)}"
|
|
||||||
type = "A"
|
|
||||||
ttl = 300
|
|
||||||
|
|
||||||
# private IPv4 address for etcd
|
|
||||||
records = ["${element(aws_instance.controllers.*.private_ip, count.index)}"]
|
|
||||||
}
|
|
||||||
|
|
||||||
# Controller instances
|
|
||||||
resource "aws_instance" "controllers" {
|
|
||||||
count = "${var.controller_count}"
|
|
||||||
|
|
||||||
tags = {
|
|
||||||
Name = "${var.cluster_name}-controller-${count.index}"
|
|
||||||
}
|
|
||||||
|
|
||||||
instance_type = "${var.controller_type}"
|
|
||||||
|
|
||||||
ami = "${data.aws_ami.fedora.image_id}"
|
|
||||||
user_data = "${element(data.template_file.controller-cloudinit.*.rendered, count.index)}"
|
|
||||||
|
|
||||||
# storage
|
|
||||||
root_block_device {
|
|
||||||
volume_type = "${var.disk_type}"
|
|
||||||
volume_size = "${var.disk_size}"
|
|
||||||
iops = "${var.disk_iops}"
|
|
||||||
}
|
|
||||||
|
|
||||||
# network
|
|
||||||
associate_public_ip_address = true
|
|
||||||
subnet_id = "${element(aws_subnet.public.*.id, count.index)}"
|
|
||||||
vpc_security_group_ids = ["${aws_security_group.controller.id}"]
|
|
||||||
|
|
||||||
lifecycle {
|
|
||||||
ignore_changes = [
|
|
||||||
"ami",
|
|
||||||
"user_data",
|
|
||||||
]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
# Controller Cloud-Init
|
|
||||||
data "template_file" "controller-cloudinit" {
|
|
||||||
count = "${var.controller_count}"
|
|
||||||
|
|
||||||
template = "${file("${path.module}/cloudinit/controller.yaml.tmpl")}"
|
|
||||||
|
|
||||||
vars = {
|
|
||||||
# Cannot use cyclic dependencies on controllers or their DNS records
|
|
||||||
etcd_name = "etcd${count.index}"
|
|
||||||
etcd_domain = "${var.cluster_name}-etcd${count.index}.${var.dns_zone}"
|
|
||||||
|
|
||||||
# etcd0=https://cluster-etcd0.example.com,etcd1=https://cluster-etcd1.example.com,...
|
|
||||||
etcd_initial_cluster = "${join(",", data.template_file.etcds.*.rendered)}"
|
|
||||||
|
|
||||||
kubeconfig = "${indent(6, module.bootkube.kubeconfig-kubelet)}"
|
|
||||||
ssh_authorized_key = "${var.ssh_authorized_key}"
|
|
||||||
cluster_dns_service_ip = "${cidrhost(var.service_cidr, 10)}"
|
|
||||||
cluster_domain_suffix = "${var.cluster_domain_suffix}"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
data "template_file" "etcds" {
|
|
||||||
count = "${var.controller_count}"
|
|
||||||
template = "etcd$${index}=https://$${cluster_name}-etcd$${index}.$${dns_zone}:2380"
|
|
||||||
|
|
||||||
vars = {
|
|
||||||
index = "${count.index}"
|
|
||||||
cluster_name = "${var.cluster_name}"
|
|
||||||
dns_zone = "${var.dns_zone}"
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,57 +0,0 @@
|
||||||
data "aws_availability_zones" "all" {}
|
|
||||||
|
|
||||||
# Network VPC, gateway, and routes
|
|
||||||
|
|
||||||
resource "aws_vpc" "network" {
|
|
||||||
cidr_block = "${var.host_cidr}"
|
|
||||||
assign_generated_ipv6_cidr_block = true
|
|
||||||
enable_dns_support = true
|
|
||||||
enable_dns_hostnames = true
|
|
||||||
|
|
||||||
tags = "${map("Name", "${var.cluster_name}")}"
|
|
||||||
}
|
|
||||||
|
|
||||||
resource "aws_internet_gateway" "gateway" {
|
|
||||||
vpc_id = "${aws_vpc.network.id}"
|
|
||||||
|
|
||||||
tags = "${map("Name", "${var.cluster_name}")}"
|
|
||||||
}
|
|
||||||
|
|
||||||
resource "aws_route_table" "default" {
|
|
||||||
vpc_id = "${aws_vpc.network.id}"
|
|
||||||
|
|
||||||
route {
|
|
||||||
cidr_block = "0.0.0.0/0"
|
|
||||||
gateway_id = "${aws_internet_gateway.gateway.id}"
|
|
||||||
}
|
|
||||||
|
|
||||||
route {
|
|
||||||
ipv6_cidr_block = "::/0"
|
|
||||||
gateway_id = "${aws_internet_gateway.gateway.id}"
|
|
||||||
}
|
|
||||||
|
|
||||||
tags = "${map("Name", "${var.cluster_name}")}"
|
|
||||||
}
|
|
||||||
|
|
||||||
# Subnets (one per availability zone)
|
|
||||||
|
|
||||||
resource "aws_subnet" "public" {
|
|
||||||
count = "${length(data.aws_availability_zones.all.names)}"
|
|
||||||
|
|
||||||
vpc_id = "${aws_vpc.network.id}"
|
|
||||||
availability_zone = "${data.aws_availability_zones.all.names[count.index]}"
|
|
||||||
|
|
||||||
cidr_block = "${cidrsubnet(var.host_cidr, 4, count.index)}"
|
|
||||||
ipv6_cidr_block = "${cidrsubnet(aws_vpc.network.ipv6_cidr_block, 8, count.index)}"
|
|
||||||
map_public_ip_on_launch = true
|
|
||||||
assign_ipv6_address_on_creation = true
|
|
||||||
|
|
||||||
tags = "${map("Name", "${var.cluster_name}-public-${count.index}")}"
|
|
||||||
}
|
|
||||||
|
|
||||||
resource "aws_route_table_association" "public" {
|
|
||||||
count = "${length(data.aws_availability_zones.all.names)}"
|
|
||||||
|
|
||||||
route_table_id = "${aws_route_table.default.id}"
|
|
||||||
subnet_id = "${element(aws_subnet.public.*.id, count.index)}"
|
|
||||||
}
|
|
|
@ -1,93 +0,0 @@
|
||||||
# Network Load Balancer DNS Record
|
|
||||||
resource "aws_route53_record" "apiserver" {
|
|
||||||
zone_id = "${var.dns_zone_id}"
|
|
||||||
|
|
||||||
name = "${format("%s.%s.", var.cluster_name, var.dns_zone)}"
|
|
||||||
type = "A"
|
|
||||||
|
|
||||||
# AWS recommends their special "alias" records for NLBs
|
|
||||||
alias {
|
|
||||||
name = "${aws_lb.nlb.dns_name}"
|
|
||||||
zone_id = "${aws_lb.nlb.zone_id}"
|
|
||||||
evaluate_target_health = true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
# Network Load Balancer for apiservers and ingress
|
|
||||||
resource "aws_lb" "nlb" {
|
|
||||||
name = "${var.cluster_name}-nlb"
|
|
||||||
load_balancer_type = "network"
|
|
||||||
internal = false
|
|
||||||
|
|
||||||
subnets = ["${aws_subnet.public.*.id}"]
|
|
||||||
|
|
||||||
enable_cross_zone_load_balancing = true
|
|
||||||
}
|
|
||||||
|
|
||||||
# Forward TCP apiserver traffic to controllers
|
|
||||||
resource "aws_lb_listener" "apiserver-https" {
|
|
||||||
load_balancer_arn = "${aws_lb.nlb.arn}"
|
|
||||||
protocol = "TCP"
|
|
||||||
port = "6443"
|
|
||||||
|
|
||||||
default_action {
|
|
||||||
type = "forward"
|
|
||||||
target_group_arn = "${aws_lb_target_group.controllers.arn}"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
# Forward HTTP ingress traffic to workers
|
|
||||||
resource "aws_lb_listener" "ingress-http" {
|
|
||||||
load_balancer_arn = "${aws_lb.nlb.arn}"
|
|
||||||
protocol = "TCP"
|
|
||||||
port = 80
|
|
||||||
|
|
||||||
default_action {
|
|
||||||
type = "forward"
|
|
||||||
target_group_arn = "${module.workers.target_group_http}"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
# Forward HTTPS ingress traffic to workers
|
|
||||||
resource "aws_lb_listener" "ingress-https" {
|
|
||||||
load_balancer_arn = "${aws_lb.nlb.arn}"
|
|
||||||
protocol = "TCP"
|
|
||||||
port = 443
|
|
||||||
|
|
||||||
default_action {
|
|
||||||
type = "forward"
|
|
||||||
target_group_arn = "${module.workers.target_group_https}"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
# Target group of controllers
|
|
||||||
resource "aws_lb_target_group" "controllers" {
|
|
||||||
name = "${var.cluster_name}-controllers"
|
|
||||||
vpc_id = "${aws_vpc.network.id}"
|
|
||||||
target_type = "instance"
|
|
||||||
|
|
||||||
protocol = "TCP"
|
|
||||||
port = 6443
|
|
||||||
|
|
||||||
# TCP health check for apiserver
|
|
||||||
health_check {
|
|
||||||
protocol = "TCP"
|
|
||||||
port = 6443
|
|
||||||
|
|
||||||
# NLBs required to use same healthy and unhealthy thresholds
|
|
||||||
healthy_threshold = 3
|
|
||||||
unhealthy_threshold = 3
|
|
||||||
|
|
||||||
# Interval between health checks required to be 10 or 30
|
|
||||||
interval = 10
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
# Attach controller instances to apiserver NLB
|
|
||||||
resource "aws_lb_target_group_attachment" "controllers" {
|
|
||||||
count = "${var.controller_count}"
|
|
||||||
|
|
||||||
target_group_arn = "${aws_lb_target_group.controllers.arn}"
|
|
||||||
target_id = "${element(aws_instance.controllers.*.id, count.index)}"
|
|
||||||
port = 6443
|
|
||||||
}
|
|
|
@ -1,48 +0,0 @@
|
||||||
output "kubeconfig-admin" {
|
|
||||||
value = "${module.bootkube.kubeconfig-admin}"
|
|
||||||
}
|
|
||||||
|
|
||||||
# Outputs for Kubernetes Ingress
|
|
||||||
|
|
||||||
output "ingress_dns_name" {
|
|
||||||
value = "${aws_lb.nlb.dns_name}"
|
|
||||||
description = "DNS name of the network load balancer for distributing traffic to Ingress controllers"
|
|
||||||
}
|
|
||||||
|
|
||||||
output "ingress_zone_id" {
|
|
||||||
value = "${aws_lb.nlb.zone_id}"
|
|
||||||
description = "Route53 zone id of the network load balancer DNS name that can be used in Route53 alias records"
|
|
||||||
}
|
|
||||||
|
|
||||||
# Outputs for worker pools
|
|
||||||
|
|
||||||
output "vpc_id" {
|
|
||||||
value = "${aws_vpc.network.id}"
|
|
||||||
description = "ID of the VPC for creating worker instances"
|
|
||||||
}
|
|
||||||
|
|
||||||
output "subnet_ids" {
|
|
||||||
value = ["${aws_subnet.public.*.id}"]
|
|
||||||
description = "List of subnet IDs for creating worker instances"
|
|
||||||
}
|
|
||||||
|
|
||||||
output "worker_security_groups" {
|
|
||||||
value = ["${aws_security_group.worker.id}"]
|
|
||||||
description = "List of worker security group IDs"
|
|
||||||
}
|
|
||||||
|
|
||||||
output "kubeconfig" {
|
|
||||||
value = "${module.bootkube.kubeconfig-kubelet}"
|
|
||||||
}
|
|
||||||
|
|
||||||
# Outputs for custom load balancing
|
|
||||||
|
|
||||||
output "worker_target_group_http" {
|
|
||||||
description = "ARN of a target group of workers for HTTP traffic"
|
|
||||||
value = "${module.workers.target_group_http}"
|
|
||||||
}
|
|
||||||
|
|
||||||
output "worker_target_group_https" {
|
|
||||||
description = "ARN of a target group of workers for HTTPS traffic"
|
|
||||||
value = "${module.workers.target_group_https}"
|
|
||||||
}
|
|
|
@ -1,25 +0,0 @@
|
||||||
# Terraform version and plugin versions
|
|
||||||
|
|
||||||
terraform {
|
|
||||||
required_version = ">= 0.11.0"
|
|
||||||
}
|
|
||||||
|
|
||||||
provider "aws" {
|
|
||||||
version = ">= 1.13, < 3.0"
|
|
||||||
}
|
|
||||||
|
|
||||||
provider "local" {
|
|
||||||
version = "~> 1.0"
|
|
||||||
}
|
|
||||||
|
|
||||||
provider "null" {
|
|
||||||
version = "~> 1.0"
|
|
||||||
}
|
|
||||||
|
|
||||||
provider "template" {
|
|
||||||
version = "~> 1.0"
|
|
||||||
}
|
|
||||||
|
|
||||||
provider "tls" {
|
|
||||||
version = "~> 1.0"
|
|
||||||
}
|
|
|
@ -1,359 +0,0 @@
|
||||||
# Security Groups (instance firewalls)
|
|
||||||
|
|
||||||
# Controller security group
|
|
||||||
|
|
||||||
resource "aws_security_group" "controller" {
|
|
||||||
name = "${var.cluster_name}-controller"
|
|
||||||
description = "${var.cluster_name} controller security group"
|
|
||||||
|
|
||||||
vpc_id = "${aws_vpc.network.id}"
|
|
||||||
|
|
||||||
tags = "${map("Name", "${var.cluster_name}-controller")}"
|
|
||||||
}
|
|
||||||
|
|
||||||
resource "aws_security_group_rule" "controller-ssh" {
|
|
||||||
security_group_id = "${aws_security_group.controller.id}"
|
|
||||||
|
|
||||||
type = "ingress"
|
|
||||||
protocol = "tcp"
|
|
||||||
from_port = 22
|
|
||||||
to_port = 22
|
|
||||||
cidr_blocks = ["0.0.0.0/0"]
|
|
||||||
}
|
|
||||||
|
|
||||||
resource "aws_security_group_rule" "controller-etcd" {
|
|
||||||
security_group_id = "${aws_security_group.controller.id}"
|
|
||||||
|
|
||||||
type = "ingress"
|
|
||||||
protocol = "tcp"
|
|
||||||
from_port = 2379
|
|
||||||
to_port = 2380
|
|
||||||
self = true
|
|
||||||
}
|
|
||||||
|
|
||||||
# Allow Prometheus to scrape etcd metrics
|
|
||||||
resource "aws_security_group_rule" "controller-etcd-metrics" {
|
|
||||||
security_group_id = "${aws_security_group.controller.id}"
|
|
||||||
|
|
||||||
type = "ingress"
|
|
||||||
protocol = "tcp"
|
|
||||||
from_port = 2381
|
|
||||||
to_port = 2381
|
|
||||||
source_security_group_id = "${aws_security_group.worker.id}"
|
|
||||||
}
|
|
||||||
|
|
||||||
resource "aws_security_group_rule" "controller-vxlan" {
|
|
||||||
count = "${var.networking == "flannel" ? 1 : 0}"
|
|
||||||
|
|
||||||
security_group_id = "${aws_security_group.controller.id}"
|
|
||||||
|
|
||||||
type = "ingress"
|
|
||||||
protocol = "udp"
|
|
||||||
from_port = 4789
|
|
||||||
to_port = 4789
|
|
||||||
source_security_group_id = "${aws_security_group.worker.id}"
|
|
||||||
}
|
|
||||||
|
|
||||||
resource "aws_security_group_rule" "controller-vxlan-self" {
|
|
||||||
count = "${var.networking == "flannel" ? 1 : 0}"
|
|
||||||
|
|
||||||
security_group_id = "${aws_security_group.controller.id}"
|
|
||||||
|
|
||||||
type = "ingress"
|
|
||||||
protocol = "udp"
|
|
||||||
from_port = 4789
|
|
||||||
to_port = 4789
|
|
||||||
self = true
|
|
||||||
}
|
|
||||||
|
|
||||||
resource "aws_security_group_rule" "controller-apiserver" {
|
|
||||||
security_group_id = "${aws_security_group.controller.id}"
|
|
||||||
|
|
||||||
type = "ingress"
|
|
||||||
protocol = "tcp"
|
|
||||||
from_port = 6443
|
|
||||||
to_port = 6443
|
|
||||||
cidr_blocks = ["0.0.0.0/0"]
|
|
||||||
}
|
|
||||||
|
|
||||||
# Allow Prometheus to scrape node-exporter daemonset
|
|
||||||
resource "aws_security_group_rule" "controller-node-exporter" {
|
|
||||||
security_group_id = "${aws_security_group.controller.id}"
|
|
||||||
|
|
||||||
type = "ingress"
|
|
||||||
protocol = "tcp"
|
|
||||||
from_port = 9100
|
|
||||||
to_port = 9100
|
|
||||||
source_security_group_id = "${aws_security_group.worker.id}"
|
|
||||||
}
|
|
||||||
|
|
||||||
# Allow apiserver to access kubelets for exec, log, port-forward
|
|
||||||
resource "aws_security_group_rule" "controller-kubelet" {
|
|
||||||
security_group_id = "${aws_security_group.controller.id}"
|
|
||||||
|
|
||||||
type = "ingress"
|
|
||||||
protocol = "tcp"
|
|
||||||
from_port = 10250
|
|
||||||
to_port = 10250
|
|
||||||
source_security_group_id = "${aws_security_group.worker.id}"
|
|
||||||
}
|
|
||||||
|
|
||||||
resource "aws_security_group_rule" "controller-kubelet-self" {
|
|
||||||
security_group_id = "${aws_security_group.controller.id}"
|
|
||||||
|
|
||||||
type = "ingress"
|
|
||||||
protocol = "tcp"
|
|
||||||
from_port = 10250
|
|
||||||
to_port = 10250
|
|
||||||
self = true
|
|
||||||
}
|
|
||||||
|
|
||||||
resource "aws_security_group_rule" "controller-bgp" {
|
|
||||||
security_group_id = "${aws_security_group.controller.id}"
|
|
||||||
|
|
||||||
type = "ingress"
|
|
||||||
protocol = "tcp"
|
|
||||||
from_port = 179
|
|
||||||
to_port = 179
|
|
||||||
source_security_group_id = "${aws_security_group.worker.id}"
|
|
||||||
}
|
|
||||||
|
|
||||||
resource "aws_security_group_rule" "controller-bgp-self" {
|
|
||||||
security_group_id = "${aws_security_group.controller.id}"
|
|
||||||
|
|
||||||
type = "ingress"
|
|
||||||
protocol = "tcp"
|
|
||||||
from_port = 179
|
|
||||||
to_port = 179
|
|
||||||
self = true
|
|
||||||
}
|
|
||||||
|
|
||||||
resource "aws_security_group_rule" "controller-ipip" {
|
|
||||||
security_group_id = "${aws_security_group.controller.id}"
|
|
||||||
|
|
||||||
type = "ingress"
|
|
||||||
protocol = 4
|
|
||||||
from_port = 0
|
|
||||||
to_port = 0
|
|
||||||
source_security_group_id = "${aws_security_group.worker.id}"
|
|
||||||
}
|
|
||||||
|
|
||||||
resource "aws_security_group_rule" "controller-ipip-self" {
|
|
||||||
security_group_id = "${aws_security_group.controller.id}"
|
|
||||||
|
|
||||||
type = "ingress"
|
|
||||||
protocol = 4
|
|
||||||
from_port = 0
|
|
||||||
to_port = 0
|
|
||||||
self = true
|
|
||||||
}
|
|
||||||
|
|
||||||
resource "aws_security_group_rule" "controller-ipip-legacy" {
|
|
||||||
security_group_id = "${aws_security_group.controller.id}"
|
|
||||||
|
|
||||||
type = "ingress"
|
|
||||||
protocol = 94
|
|
||||||
from_port = 0
|
|
||||||
to_port = 0
|
|
||||||
source_security_group_id = "${aws_security_group.worker.id}"
|
|
||||||
}
|
|
||||||
|
|
||||||
resource "aws_security_group_rule" "controller-ipip-legacy-self" {
|
|
||||||
security_group_id = "${aws_security_group.controller.id}"
|
|
||||||
|
|
||||||
type = "ingress"
|
|
||||||
protocol = 94
|
|
||||||
from_port = 0
|
|
||||||
to_port = 0
|
|
||||||
self = true
|
|
||||||
}
|
|
||||||
|
|
||||||
resource "aws_security_group_rule" "controller-egress" {
|
|
||||||
security_group_id = "${aws_security_group.controller.id}"
|
|
||||||
|
|
||||||
type = "egress"
|
|
||||||
protocol = "-1"
|
|
||||||
from_port = 0
|
|
||||||
to_port = 0
|
|
||||||
cidr_blocks = ["0.0.0.0/0"]
|
|
||||||
ipv6_cidr_blocks = ["::/0"]
|
|
||||||
}
|
|
||||||
|
|
||||||
# Worker security group
|
|
||||||
|
|
||||||
resource "aws_security_group" "worker" {
|
|
||||||
name = "${var.cluster_name}-worker"
|
|
||||||
description = "${var.cluster_name} worker security group"
|
|
||||||
|
|
||||||
vpc_id = "${aws_vpc.network.id}"
|
|
||||||
|
|
||||||
tags = "${map("Name", "${var.cluster_name}-worker")}"
|
|
||||||
}
|
|
||||||
|
|
||||||
resource "aws_security_group_rule" "worker-ssh" {
|
|
||||||
security_group_id = "${aws_security_group.worker.id}"
|
|
||||||
|
|
||||||
type = "ingress"
|
|
||||||
protocol = "tcp"
|
|
||||||
from_port = 22
|
|
||||||
to_port = 22
|
|
||||||
cidr_blocks = ["0.0.0.0/0"]
|
|
||||||
}
|
|
||||||
|
|
||||||
resource "aws_security_group_rule" "worker-http" {
|
|
||||||
security_group_id = "${aws_security_group.worker.id}"
|
|
||||||
|
|
||||||
type = "ingress"
|
|
||||||
protocol = "tcp"
|
|
||||||
from_port = 80
|
|
||||||
to_port = 80
|
|
||||||
cidr_blocks = ["0.0.0.0/0"]
|
|
||||||
}
|
|
||||||
|
|
||||||
resource "aws_security_group_rule" "worker-https" {
|
|
||||||
security_group_id = "${aws_security_group.worker.id}"
|
|
||||||
|
|
||||||
type = "ingress"
|
|
||||||
protocol = "tcp"
|
|
||||||
from_port = 443
|
|
||||||
to_port = 443
|
|
||||||
cidr_blocks = ["0.0.0.0/0"]
|
|
||||||
}
|
|
||||||
|
|
||||||
resource "aws_security_group_rule" "worker-vxlan" {
|
|
||||||
count = "${var.networking == "flannel" ? 1 : 0}"
|
|
||||||
|
|
||||||
security_group_id = "${aws_security_group.worker.id}"
|
|
||||||
|
|
||||||
type = "ingress"
|
|
||||||
protocol = "udp"
|
|
||||||
from_port = 4789
|
|
||||||
to_port = 4789
|
|
||||||
source_security_group_id = "${aws_security_group.controller.id}"
|
|
||||||
}
|
|
||||||
|
|
||||||
resource "aws_security_group_rule" "worker-vxlan-self" {
|
|
||||||
count = "${var.networking == "flannel" ? 1 : 0}"
|
|
||||||
|
|
||||||
security_group_id = "${aws_security_group.worker.id}"
|
|
||||||
|
|
||||||
type = "ingress"
|
|
||||||
protocol = "udp"
|
|
||||||
from_port = 4789
|
|
||||||
to_port = 4789
|
|
||||||
self = true
|
|
||||||
}
|
|
||||||
|
|
||||||
# Allow Prometheus to scrape node-exporter daemonset
|
|
||||||
resource "aws_security_group_rule" "worker-node-exporter" {
|
|
||||||
security_group_id = "${aws_security_group.worker.id}"
|
|
||||||
|
|
||||||
type = "ingress"
|
|
||||||
protocol = "tcp"
|
|
||||||
from_port = 9100
|
|
||||||
to_port = 9100
|
|
||||||
self = true
|
|
||||||
}
|
|
||||||
|
|
||||||
resource "aws_security_group_rule" "ingress-health" {
|
|
||||||
security_group_id = "${aws_security_group.worker.id}"
|
|
||||||
|
|
||||||
type = "ingress"
|
|
||||||
protocol = "tcp"
|
|
||||||
from_port = 10254
|
|
||||||
to_port = 10254
|
|
||||||
cidr_blocks = ["0.0.0.0/0"]
|
|
||||||
}
|
|
||||||
|
|
||||||
# Allow apiserver to access kubelets for exec, log, port-forward
|
|
||||||
resource "aws_security_group_rule" "worker-kubelet" {
|
|
||||||
security_group_id = "${aws_security_group.worker.id}"
|
|
||||||
|
|
||||||
type = "ingress"
|
|
||||||
protocol = "tcp"
|
|
||||||
from_port = 10250
|
|
||||||
to_port = 10250
|
|
||||||
source_security_group_id = "${aws_security_group.controller.id}"
|
|
||||||
}
|
|
||||||
|
|
||||||
# Allow Prometheus to scrape kubelet metrics
|
|
||||||
resource "aws_security_group_rule" "worker-kubelet-self" {
|
|
||||||
security_group_id = "${aws_security_group.worker.id}"
|
|
||||||
|
|
||||||
type = "ingress"
|
|
||||||
protocol = "tcp"
|
|
||||||
from_port = 10250
|
|
||||||
to_port = 10250
|
|
||||||
self = true
|
|
||||||
}
|
|
||||||
|
|
||||||
resource "aws_security_group_rule" "worker-bgp" {
|
|
||||||
security_group_id = "${aws_security_group.worker.id}"
|
|
||||||
|
|
||||||
type = "ingress"
|
|
||||||
protocol = "tcp"
|
|
||||||
from_port = 179
|
|
||||||
to_port = 179
|
|
||||||
source_security_group_id = "${aws_security_group.controller.id}"
|
|
||||||
}
|
|
||||||
|
|
||||||
resource "aws_security_group_rule" "worker-bgp-self" {
|
|
||||||
security_group_id = "${aws_security_group.worker.id}"
|
|
||||||
|
|
||||||
type = "ingress"
|
|
||||||
protocol = "tcp"
|
|
||||||
from_port = 179
|
|
||||||
to_port = 179
|
|
||||||
self = true
|
|
||||||
}
|
|
||||||
|
|
||||||
resource "aws_security_group_rule" "worker-ipip" {
|
|
||||||
security_group_id = "${aws_security_group.worker.id}"
|
|
||||||
|
|
||||||
type = "ingress"
|
|
||||||
protocol = 4
|
|
||||||
from_port = 0
|
|
||||||
to_port = 0
|
|
||||||
source_security_group_id = "${aws_security_group.controller.id}"
|
|
||||||
}
|
|
||||||
|
|
||||||
resource "aws_security_group_rule" "worker-ipip-self" {
|
|
||||||
security_group_id = "${aws_security_group.worker.id}"
|
|
||||||
|
|
||||||
type = "ingress"
|
|
||||||
protocol = 4
|
|
||||||
from_port = 0
|
|
||||||
to_port = 0
|
|
||||||
self = true
|
|
||||||
}
|
|
||||||
|
|
||||||
resource "aws_security_group_rule" "worker-ipip-legacy" {
|
|
||||||
security_group_id = "${aws_security_group.worker.id}"
|
|
||||||
|
|
||||||
type = "ingress"
|
|
||||||
protocol = 94
|
|
||||||
from_port = 0
|
|
||||||
to_port = 0
|
|
||||||
source_security_group_id = "${aws_security_group.controller.id}"
|
|
||||||
}
|
|
||||||
|
|
||||||
resource "aws_security_group_rule" "worker-ipip-legacy-self" {
|
|
||||||
security_group_id = "${aws_security_group.worker.id}"
|
|
||||||
|
|
||||||
type = "ingress"
|
|
||||||
protocol = 94
|
|
||||||
from_port = 0
|
|
||||||
to_port = 0
|
|
||||||
self = true
|
|
||||||
}
|
|
||||||
|
|
||||||
resource "aws_security_group_rule" "worker-egress" {
|
|
||||||
security_group_id = "${aws_security_group.worker.id}"
|
|
||||||
|
|
||||||
type = "egress"
|
|
||||||
protocol = "-1"
|
|
||||||
from_port = 0
|
|
||||||
to_port = 0
|
|
||||||
cidr_blocks = ["0.0.0.0/0"]
|
|
||||||
ipv6_cidr_blocks = ["::/0"]
|
|
||||||
}
|
|
|
@ -1,89 +0,0 @@
|
||||||
# Secure copy etcd TLS assets to controllers.
|
|
||||||
resource "null_resource" "copy-controller-secrets" {
|
|
||||||
count = "${var.controller_count}"
|
|
||||||
|
|
||||||
connection {
|
|
||||||
type = "ssh"
|
|
||||||
host = "${element(aws_instance.controllers.*.public_ip, count.index)}"
|
|
||||||
user = "fedora"
|
|
||||||
timeout = "15m"
|
|
||||||
}
|
|
||||||
|
|
||||||
provisioner "file" {
|
|
||||||
content = "${module.bootkube.etcd_ca_cert}"
|
|
||||||
destination = "$HOME/etcd-client-ca.crt"
|
|
||||||
}
|
|
||||||
|
|
||||||
provisioner "file" {
|
|
||||||
content = "${module.bootkube.etcd_client_cert}"
|
|
||||||
destination = "$HOME/etcd-client.crt"
|
|
||||||
}
|
|
||||||
|
|
||||||
provisioner "file" {
|
|
||||||
content = "${module.bootkube.etcd_client_key}"
|
|
||||||
destination = "$HOME/etcd-client.key"
|
|
||||||
}
|
|
||||||
|
|
||||||
provisioner "file" {
|
|
||||||
content = "${module.bootkube.etcd_server_cert}"
|
|
||||||
destination = "$HOME/etcd-server.crt"
|
|
||||||
}
|
|
||||||
|
|
||||||
provisioner "file" {
|
|
||||||
content = "${module.bootkube.etcd_server_key}"
|
|
||||||
destination = "$HOME/etcd-server.key"
|
|
||||||
}
|
|
||||||
|
|
||||||
provisioner "file" {
|
|
||||||
content = "${module.bootkube.etcd_peer_cert}"
|
|
||||||
destination = "$HOME/etcd-peer.crt"
|
|
||||||
}
|
|
||||||
|
|
||||||
provisioner "file" {
|
|
||||||
content = "${module.bootkube.etcd_peer_key}"
|
|
||||||
destination = "$HOME/etcd-peer.key"
|
|
||||||
}
|
|
||||||
|
|
||||||
provisioner "remote-exec" {
|
|
||||||
inline = [
|
|
||||||
"sudo mkdir -p /etc/ssl/etcd/etcd",
|
|
||||||
"sudo mv etcd-client* /etc/ssl/etcd/",
|
|
||||||
"sudo cp /etc/ssl/etcd/etcd-client-ca.crt /etc/ssl/etcd/etcd/server-ca.crt",
|
|
||||||
"sudo mv etcd-server.crt /etc/ssl/etcd/etcd/server.crt",
|
|
||||||
"sudo mv etcd-server.key /etc/ssl/etcd/etcd/server.key",
|
|
||||||
"sudo cp /etc/ssl/etcd/etcd-client-ca.crt /etc/ssl/etcd/etcd/peer-ca.crt",
|
|
||||||
"sudo mv etcd-peer.crt /etc/ssl/etcd/etcd/peer.crt",
|
|
||||||
"sudo mv etcd-peer.key /etc/ssl/etcd/etcd/peer.key",
|
|
||||||
]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
# Secure copy bootkube assets to ONE controller and start bootkube to perform
|
|
||||||
# one-time self-hosted cluster bootstrapping.
|
|
||||||
resource "null_resource" "bootkube-start" {
|
|
||||||
depends_on = [
|
|
||||||
"null_resource.copy-controller-secrets",
|
|
||||||
"module.workers",
|
|
||||||
"aws_route53_record.apiserver",
|
|
||||||
]
|
|
||||||
|
|
||||||
connection {
|
|
||||||
type = "ssh"
|
|
||||||
host = "${aws_instance.controllers.0.public_ip}"
|
|
||||||
user = "fedora"
|
|
||||||
timeout = "15m"
|
|
||||||
}
|
|
||||||
|
|
||||||
provisioner "file" {
|
|
||||||
source = "${var.asset_dir}"
|
|
||||||
destination = "$HOME/assets"
|
|
||||||
}
|
|
||||||
|
|
||||||
provisioner "remote-exec" {
|
|
||||||
inline = [
|
|
||||||
"while [ ! -f /var/lib/cloud/instance/boot-finished ]; do sleep 4; done",
|
|
||||||
"sudo mv $HOME/assets /var/lib/bootkube",
|
|
||||||
"sudo systemctl start bootkube",
|
|
||||||
]
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,124 +0,0 @@
|
||||||
variable "cluster_name" {
|
|
||||||
type = "string"
|
|
||||||
description = "Unique cluster name (prepended to dns_zone)"
|
|
||||||
}
|
|
||||||
|
|
||||||
# AWS
|
|
||||||
|
|
||||||
variable "dns_zone" {
|
|
||||||
type = "string"
|
|
||||||
description = "AWS DNS Zone (e.g. aws.example.com)"
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "dns_zone_id" {
|
|
||||||
type = "string"
|
|
||||||
description = "AWS DNS Zone ID (e.g. Z3PAABBCFAKEC0)"
|
|
||||||
}
|
|
||||||
|
|
||||||
# instances
|
|
||||||
|
|
||||||
variable "controller_count" {
|
|
||||||
type = "string"
|
|
||||||
default = "1"
|
|
||||||
description = "Number of controllers (i.e. masters)"
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "worker_count" {
|
|
||||||
type = "string"
|
|
||||||
default = "1"
|
|
||||||
description = "Number of workers"
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "controller_type" {
|
|
||||||
type = "string"
|
|
||||||
default = "t3.small"
|
|
||||||
description = "EC2 instance type for controllers"
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "worker_type" {
|
|
||||||
type = "string"
|
|
||||||
default = "t3.small"
|
|
||||||
description = "EC2 instance type for workers"
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "disk_size" {
|
|
||||||
type = "string"
|
|
||||||
default = "40"
|
|
||||||
description = "Size of the EBS volume in GB"
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "disk_type" {
|
|
||||||
type = "string"
|
|
||||||
default = "gp2"
|
|
||||||
description = "Type of the EBS volume (e.g. standard, gp2, io1)"
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "disk_iops" {
|
|
||||||
type = "string"
|
|
||||||
default = "0"
|
|
||||||
description = "IOPS of the EBS volume (e.g. 100)"
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "worker_price" {
|
|
||||||
type = "string"
|
|
||||||
default = ""
|
|
||||||
description = "Spot price in USD for autoscaling group spot instances. Leave as default empty string for autoscaling group to use on-demand instances. Note, switching in-place from spot to on-demand is not possible: https://github.com/terraform-providers/terraform-provider-aws/issues/4320"
|
|
||||||
}
|
|
||||||
|
|
||||||
# configuration
|
|
||||||
|
|
||||||
variable "ssh_authorized_key" {
|
|
||||||
type = "string"
|
|
||||||
description = "SSH public key for user 'fedora'"
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "asset_dir" {
|
|
||||||
description = "Path to a directory where generated assets should be placed (contains secrets)"
|
|
||||||
type = "string"
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "networking" {
|
|
||||||
description = "Choice of networking provider (calico or flannel)"
|
|
||||||
type = "string"
|
|
||||||
default = "calico"
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "network_mtu" {
|
|
||||||
description = "CNI interface MTU (applies to calico only). Use 8981 if using instances types with Jumbo frames."
|
|
||||||
type = "string"
|
|
||||||
default = "1480"
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "host_cidr" {
|
|
||||||
description = "CIDR IPv4 range to assign to EC2 nodes"
|
|
||||||
type = "string"
|
|
||||||
default = "10.0.0.0/16"
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "pod_cidr" {
|
|
||||||
description = "CIDR IPv4 range to assign Kubernetes pods"
|
|
||||||
type = "string"
|
|
||||||
default = "10.2.0.0/16"
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "service_cidr" {
|
|
||||||
description = <<EOD
|
|
||||||
CIDR IPv4 range to assign Kubernetes services.
|
|
||||||
The 1st IP will be reserved for kube_apiserver, the 10th IP will be reserved for coredns.
|
|
||||||
EOD
|
|
||||||
|
|
||||||
type = "string"
|
|
||||||
default = "10.3.0.0/16"
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "cluster_domain_suffix" {
|
|
||||||
description = "Queries for domains with the suffix will be answered by coredns. Default is cluster.local (e.g. foo.default.svc.cluster.local) "
|
|
||||||
type = "string"
|
|
||||||
default = "cluster.local"
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "enable_reporting" {
|
|
||||||
type = "string"
|
|
||||||
description = "Enable usage or analytics reporting to upstreams (Calico)"
|
|
||||||
default = "false"
|
|
||||||
}
|
|
|
@ -1,19 +0,0 @@
|
||||||
module "workers" {
|
|
||||||
source = "./workers"
|
|
||||||
name = "${var.cluster_name}"
|
|
||||||
|
|
||||||
# AWS
|
|
||||||
vpc_id = "${aws_vpc.network.id}"
|
|
||||||
subnet_ids = ["${aws_subnet.public.*.id}"]
|
|
||||||
security_groups = ["${aws_security_group.worker.id}"]
|
|
||||||
count = "${var.worker_count}"
|
|
||||||
instance_type = "${var.worker_type}"
|
|
||||||
disk_size = "${var.disk_size}"
|
|
||||||
spot_price = "${var.worker_price}"
|
|
||||||
|
|
||||||
# configuration
|
|
||||||
kubeconfig = "${module.bootkube.kubeconfig-kubelet}"
|
|
||||||
ssh_authorized_key = "${var.ssh_authorized_key}"
|
|
||||||
service_cidr = "${var.service_cidr}"
|
|
||||||
cluster_domain_suffix = "${var.cluster_domain_suffix}"
|
|
||||||
}
|
|
|
@ -1,19 +0,0 @@
|
||||||
data "aws_ami" "fedora" {
|
|
||||||
most_recent = true
|
|
||||||
owners = ["125523088429"]
|
|
||||||
|
|
||||||
filter {
|
|
||||||
name = "architecture"
|
|
||||||
values = ["x86_64"]
|
|
||||||
}
|
|
||||||
|
|
||||||
filter {
|
|
||||||
name = "virtualization-type"
|
|
||||||
values = ["hvm"]
|
|
||||||
}
|
|
||||||
|
|
||||||
filter {
|
|
||||||
name = "name"
|
|
||||||
values = ["Fedora-AtomicHost-28-20180625.1.x86_64-*-gp2-*"]
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,66 +0,0 @@
|
||||||
#cloud-config
|
|
||||||
write_files:
|
|
||||||
- path: /etc/systemd/system/kubelet.service.d/10-typhoon.conf
|
|
||||||
content: |
|
|
||||||
[Unit]
|
|
||||||
Wants=rpc-statd.service
|
|
||||||
[Service]
|
|
||||||
ExecStartPre=/bin/mkdir -p /opt/cni/bin
|
|
||||||
ExecStartPre=/bin/mkdir -p /etc/kubernetes/manifests
|
|
||||||
ExecStartPre=/bin/mkdir -p /etc/kubernetes/cni/net.d
|
|
||||||
ExecStartPre=/bin/mkdir -p /var/lib/cni
|
|
||||||
ExecStartPre=/bin/mkdir -p /var/lib/kubelet/volumeplugins
|
|
||||||
ExecStartPre=/usr/bin/bash -c "grep 'certificate-authority-data' /etc/kubernetes/kubeconfig | awk '{print $2}' | base64 -d > /etc/kubernetes/ca.crt"
|
|
||||||
Restart=always
|
|
||||||
RestartSec=10
|
|
||||||
- path: /etc/kubernetes/kubelet.conf
|
|
||||||
content: |
|
|
||||||
ARGS="--anonymous-auth=false \
|
|
||||||
--authentication-token-webhook \
|
|
||||||
--authorization-mode=Webhook \
|
|
||||||
--client-ca-file=/etc/kubernetes/ca.crt \
|
|
||||||
--cluster_dns=${cluster_dns_service_ip} \
|
|
||||||
--cluster_domain=${cluster_domain_suffix} \
|
|
||||||
--cni-conf-dir=/etc/kubernetes/cni/net.d \
|
|
||||||
--exit-on-lock-contention \
|
|
||||||
--kubeconfig=/etc/kubernetes/kubeconfig \
|
|
||||||
--lock-file=/var/run/lock/kubelet.lock \
|
|
||||||
--network-plugin=cni \
|
|
||||||
--node-labels=node-role.kubernetes.io/node \
|
|
||||||
--pod-manifest-path=/etc/kubernetes/manifests \
|
|
||||||
--read-only-port=0 \
|
|
||||||
--volume-plugin-dir=/var/lib/kubelet/volumeplugins"
|
|
||||||
- path: /etc/kubernetes/kubeconfig
|
|
||||||
permissions: '0644'
|
|
||||||
content: |
|
|
||||||
${kubeconfig}
|
|
||||||
- path: /etc/NetworkManager/conf.d/typhoon.conf
|
|
||||||
content: |
|
|
||||||
[main]
|
|
||||||
plugins=keyfile
|
|
||||||
[keyfile]
|
|
||||||
unmanaged-devices=interface-name:cali*;interface-name:tunl*
|
|
||||||
- path: /etc/selinux/config
|
|
||||||
owner: root:root
|
|
||||||
permissions: '0644'
|
|
||||||
content: |
|
|
||||||
SELINUX=permissive
|
|
||||||
SELINUXTYPE=targeted
|
|
||||||
bootcmd:
|
|
||||||
- [setenforce, Permissive]
|
|
||||||
- [systemctl, disable, firewalld, --now]
|
|
||||||
# https://github.com/kubernetes/kubernetes/issues/60869
|
|
||||||
- [modprobe, ip_vs]
|
|
||||||
runcmd:
|
|
||||||
- [systemctl, daemon-reload]
|
|
||||||
- [systemctl, restart, NetworkManager]
|
|
||||||
- "atomic install --system --name=kubelet quay.io/poseidon/kubelet:v1.14.1"
|
|
||||||
- [systemctl, start, --no-block, kubelet.service]
|
|
||||||
users:
|
|
||||||
- default
|
|
||||||
- name: fedora
|
|
||||||
gecos: Fedora Admin
|
|
||||||
sudo: ALL=(ALL) NOPASSWD:ALL
|
|
||||||
groups: wheel,adm,systemd-journal,docker
|
|
||||||
ssh-authorized-keys:
|
|
||||||
- "${ssh_authorized_key}"
|
|
|
@ -1,47 +0,0 @@
|
||||||
# Target groups of instances for use with load balancers
|
|
||||||
|
|
||||||
resource "aws_lb_target_group" "workers-http" {
|
|
||||||
name = "${var.name}-workers-http"
|
|
||||||
vpc_id = "${var.vpc_id}"
|
|
||||||
target_type = "instance"
|
|
||||||
|
|
||||||
protocol = "TCP"
|
|
||||||
port = 80
|
|
||||||
|
|
||||||
# HTTP health check for ingress
|
|
||||||
health_check {
|
|
||||||
protocol = "HTTP"
|
|
||||||
port = 10254
|
|
||||||
path = "/healthz"
|
|
||||||
|
|
||||||
# NLBs required to use same healthy and unhealthy thresholds
|
|
||||||
healthy_threshold = 3
|
|
||||||
unhealthy_threshold = 3
|
|
||||||
|
|
||||||
# Interval between health checks required to be 10 or 30
|
|
||||||
interval = 10
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
resource "aws_lb_target_group" "workers-https" {
|
|
||||||
name = "${var.name}-workers-https"
|
|
||||||
vpc_id = "${var.vpc_id}"
|
|
||||||
target_type = "instance"
|
|
||||||
|
|
||||||
protocol = "TCP"
|
|
||||||
port = 443
|
|
||||||
|
|
||||||
# HTTP health check for ingress
|
|
||||||
health_check {
|
|
||||||
protocol = "HTTP"
|
|
||||||
port = 10254
|
|
||||||
path = "/healthz"
|
|
||||||
|
|
||||||
# NLBs required to use same healthy and unhealthy thresholds
|
|
||||||
healthy_threshold = 3
|
|
||||||
unhealthy_threshold = 3
|
|
||||||
|
|
||||||
# Interval between health checks required to be 10 or 30
|
|
||||||
interval = 10
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,9 +0,0 @@
|
||||||
output "target_group_http" {
|
|
||||||
description = "ARN of a target group of workers for HTTP traffic"
|
|
||||||
value = "${aws_lb_target_group.workers-http.arn}"
|
|
||||||
}
|
|
||||||
|
|
||||||
output "target_group_https" {
|
|
||||||
description = "ARN of a target group of workers for HTTPS traffic"
|
|
||||||
value = "${aws_lb_target_group.workers-https.arn}"
|
|
||||||
}
|
|
|
@ -1,87 +0,0 @@
|
||||||
variable "name" {
|
|
||||||
type = "string"
|
|
||||||
description = "Unique name for the worker pool"
|
|
||||||
}
|
|
||||||
|
|
||||||
# AWS
|
|
||||||
|
|
||||||
variable "vpc_id" {
|
|
||||||
type = "string"
|
|
||||||
description = "Must be set to `vpc_id` output by cluster"
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "subnet_ids" {
|
|
||||||
type = "list"
|
|
||||||
description = "Must be set to `subnet_ids` output by cluster"
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "security_groups" {
|
|
||||||
type = "list"
|
|
||||||
description = "Must be set to `worker_security_groups` output by cluster"
|
|
||||||
}
|
|
||||||
|
|
||||||
# instances
|
|
||||||
|
|
||||||
variable "count" {
|
|
||||||
type = "string"
|
|
||||||
default = "1"
|
|
||||||
description = "Number of instances"
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "instance_type" {
|
|
||||||
type = "string"
|
|
||||||
default = "t3.small"
|
|
||||||
description = "EC2 instance type"
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "disk_size" {
|
|
||||||
type = "string"
|
|
||||||
default = "40"
|
|
||||||
description = "Size of the EBS volume in GB"
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "disk_type" {
|
|
||||||
type = "string"
|
|
||||||
default = "gp2"
|
|
||||||
description = "Type of the EBS volume (e.g. standard, gp2, io1)"
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "disk_iops" {
|
|
||||||
type = "string"
|
|
||||||
default = "0"
|
|
||||||
description = "IOPS of the EBS volume (required for io1)"
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "spot_price" {
|
|
||||||
type = "string"
|
|
||||||
default = ""
|
|
||||||
description = "Spot price in USD for autoscaling group spot instances. Leave as default empty string for autoscaling group to use on-demand instances. Note, switching in-place from spot to on-demand is not possible: https://github.com/terraform-providers/terraform-provider-aws/issues/4320"
|
|
||||||
}
|
|
||||||
|
|
||||||
# configuration
|
|
||||||
|
|
||||||
variable "kubeconfig" {
|
|
||||||
type = "string"
|
|
||||||
description = "Must be set to `kubeconfig` output by cluster"
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "ssh_authorized_key" {
|
|
||||||
type = "string"
|
|
||||||
description = "SSH public key for user 'fedora'"
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "service_cidr" {
|
|
||||||
description = <<EOD
|
|
||||||
CIDR IPv4 range to assign Kubernetes services.
|
|
||||||
The 1st IP will be reserved for kube_apiserver, the 10th IP will be reserved for coredns.
|
|
||||||
EOD
|
|
||||||
|
|
||||||
type = "string"
|
|
||||||
default = "10.3.0.0/16"
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "cluster_domain_suffix" {
|
|
||||||
description = "Queries for domains with the suffix will be answered by coredns. Default is cluster.local (e.g. foo.default.svc.cluster.local) "
|
|
||||||
type = "string"
|
|
||||||
default = "cluster.local"
|
|
||||||
}
|
|
|
@ -1,78 +0,0 @@
|
||||||
# Workers AutoScaling Group
|
|
||||||
resource "aws_autoscaling_group" "workers" {
|
|
||||||
name = "${var.name}-worker ${aws_launch_configuration.worker.name}"
|
|
||||||
|
|
||||||
# count
|
|
||||||
desired_capacity = "${var.count}"
|
|
||||||
min_size = "${var.count}"
|
|
||||||
max_size = "${var.count + 2}"
|
|
||||||
default_cooldown = 30
|
|
||||||
health_check_grace_period = 30
|
|
||||||
|
|
||||||
# network
|
|
||||||
vpc_zone_identifier = ["${var.subnet_ids}"]
|
|
||||||
|
|
||||||
# template
|
|
||||||
launch_configuration = "${aws_launch_configuration.worker.name}"
|
|
||||||
|
|
||||||
# target groups to which instances should be added
|
|
||||||
target_group_arns = [
|
|
||||||
"${aws_lb_target_group.workers-http.id}",
|
|
||||||
"${aws_lb_target_group.workers-https.id}",
|
|
||||||
]
|
|
||||||
|
|
||||||
lifecycle {
|
|
||||||
# override the default destroy and replace update behavior
|
|
||||||
create_before_destroy = true
|
|
||||||
}
|
|
||||||
|
|
||||||
# Waiting for instance creation delays adding the ASG to state. If instances
|
|
||||||
# can't be created (e.g. spot price too low), the ASG will be orphaned.
|
|
||||||
# Orphaned ASGs escape cleanup, can't be updated, and keep bidding if spot is
|
|
||||||
# used. Disable wait to avoid issues and align with other clouds.
|
|
||||||
wait_for_capacity_timeout = "0"
|
|
||||||
|
|
||||||
tags = [{
|
|
||||||
key = "Name"
|
|
||||||
value = "${var.name}-worker"
|
|
||||||
propagate_at_launch = true
|
|
||||||
}]
|
|
||||||
}
|
|
||||||
|
|
||||||
# Worker template
|
|
||||||
resource "aws_launch_configuration" "worker" {
|
|
||||||
image_id = "${data.aws_ami.fedora.image_id}"
|
|
||||||
instance_type = "${var.instance_type}"
|
|
||||||
spot_price = "${var.spot_price}"
|
|
||||||
enable_monitoring = false
|
|
||||||
|
|
||||||
user_data = "${data.template_file.worker-cloudinit.rendered}"
|
|
||||||
|
|
||||||
# storage
|
|
||||||
root_block_device {
|
|
||||||
volume_type = "${var.disk_type}"
|
|
||||||
volume_size = "${var.disk_size}"
|
|
||||||
iops = "${var.disk_iops}"
|
|
||||||
}
|
|
||||||
|
|
||||||
# network
|
|
||||||
security_groups = ["${var.security_groups}"]
|
|
||||||
|
|
||||||
lifecycle {
|
|
||||||
// Override the default destroy and replace update behavior
|
|
||||||
create_before_destroy = true
|
|
||||||
ignore_changes = ["image_id"]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
# Worker Cloud-Init
|
|
||||||
data "template_file" "worker-cloudinit" {
|
|
||||||
template = "${file("${path.module}/cloudinit/worker.yaml.tmpl")}"
|
|
||||||
|
|
||||||
vars = {
|
|
||||||
kubeconfig = "${indent(6, var.kubeconfig)}"
|
|
||||||
ssh_authorized_key = "${var.ssh_authorized_key}"
|
|
||||||
cluster_dns_service_ip = "${cidrhost(var.service_cidr, 10)}"
|
|
||||||
cluster_domain_suffix = "${var.cluster_domain_suffix}"
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,23 +0,0 @@
|
||||||
The MIT License (MIT)
|
|
||||||
|
|
||||||
Copyright (c) 2017 Typhoon Authors
|
|
||||||
Copyright (c) 2017 Dalton Hubble
|
|
||||||
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
||||||
of this software and associated documentation files (the "Software"), to deal
|
|
||||||
in the Software without restriction, including without limitation the rights
|
|
||||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
||||||
copies of the Software, and to permit persons to whom the Software is
|
|
||||||
furnished to do so, subject to the following conditions:
|
|
||||||
|
|
||||||
The above copyright notice and this permission notice shall be included in
|
|
||||||
all copies or substantial portions of the Software.
|
|
||||||
|
|
||||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
||||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
||||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
||||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
||||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
||||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
|
||||||
THE SOFTWARE.
|
|
||||||
|
|
|
@ -1,22 +0,0 @@
|
||||||
# Typhoon <img align="right" src="https://storage.googleapis.com/poseidon/typhoon-logo.png">
|
|
||||||
|
|
||||||
Typhoon is a minimal and free Kubernetes distribution.
|
|
||||||
|
|
||||||
* Minimal, stable base Kubernetes distribution
|
|
||||||
* Declarative infrastructure and configuration
|
|
||||||
* Free (freedom and cost) and privacy-respecting
|
|
||||||
* Practical for labs, datacenters, and clouds
|
|
||||||
|
|
||||||
Typhoon distributes upstream Kubernetes, architectural conventions, and cluster addons, much like a GNU/Linux distribution provides the Linux kernel and userspace components.
|
|
||||||
|
|
||||||
## Features <a href="https://www.cncf.io/certification/software-conformance/"><img align="right" src="https://storage.googleapis.com/poseidon/certified-kubernetes.png"></a>
|
|
||||||
|
|
||||||
* Kubernetes v1.15.0 (upstream, via [kubernetes-incubator/bootkube](https://github.com/kubernetes-incubator/bootkube))
|
|
||||||
* Single or multi-master, [Calico](https://www.projectcalico.org/) or [flannel](https://github.com/coreos/flannel) networking
|
|
||||||
* On-cluster etcd with TLS, [RBAC](https://kubernetes.io/docs/admin/authorization/rbac/)-enabled, [network policy](https://kubernetes.io/docs/concepts/services-networking/network-policies/)
|
|
||||||
* Ready for Ingress, Prometheus, Grafana, and other optional [addons](https://typhoon.psdn.io/addons/overview/)
|
|
||||||
|
|
||||||
## Docs
|
|
||||||
|
|
||||||
Please see the [official docs](https://typhoon.psdn.io) and the bare-metal [tutorial](https://typhoon.psdn.io/cl/bare-metal/).
|
|
||||||
|
|
|
@ -1,18 +0,0 @@
|
||||||
# Self-hosted Kubernetes assets (kubeconfig, manifests)
|
|
||||||
module "bootkube" {
|
|
||||||
source = "git::https://github.com/poseidon/terraform-render-bootkube.git?ref=62df9ad69cc0da35f47d40fa981370c4503ad581"
|
|
||||||
|
|
||||||
cluster_name = "${var.cluster_name}"
|
|
||||||
api_servers = ["${var.k8s_domain_name}"]
|
|
||||||
etcd_servers = ["${var.controller_domains}"]
|
|
||||||
asset_dir = "${var.asset_dir}"
|
|
||||||
networking = "${var.networking}"
|
|
||||||
network_mtu = "${var.network_mtu}"
|
|
||||||
pod_cidr = "${var.pod_cidr}"
|
|
||||||
service_cidr = "${var.service_cidr}"
|
|
||||||
cluster_domain_suffix = "${var.cluster_domain_suffix}"
|
|
||||||
enable_reporting = "${var.enable_reporting}"
|
|
||||||
|
|
||||||
# Fedora
|
|
||||||
trusted_certs_dir = "/etc/pki/tls/certs"
|
|
||||||
}
|
|
|
@ -1,100 +0,0 @@
|
||||||
#cloud-config
|
|
||||||
write_files:
|
|
||||||
- path: /etc/etcd/etcd.conf
|
|
||||||
content: |
|
|
||||||
ETCD_NAME=${etcd_name}
|
|
||||||
ETCD_DATA_DIR=/var/lib/etcd
|
|
||||||
ETCD_ADVERTISE_CLIENT_URLS=https://${domain_name}:2379
|
|
||||||
ETCD_INITIAL_ADVERTISE_PEER_URLS=https://${domain_name}:2380
|
|
||||||
ETCD_LISTEN_CLIENT_URLS=https://0.0.0.0:2379
|
|
||||||
ETCD_LISTEN_PEER_URLS=https://0.0.0.0:2380
|
|
||||||
ETCD_LISTEN_METRICS_URLS=http://0.0.0.0:2381
|
|
||||||
ETCD_INITIAL_CLUSTER=${etcd_initial_cluster}
|
|
||||||
ETCD_STRICT_RECONFIG_CHECK=true
|
|
||||||
ETCD_TRUSTED_CA_FILE=/etc/ssl/certs/etcd/server-ca.crt
|
|
||||||
ETCD_CERT_FILE=/etc/ssl/certs/etcd/server.crt
|
|
||||||
ETCD_KEY_FILE=/etc/ssl/certs/etcd/server.key
|
|
||||||
ETCD_CLIENT_CERT_AUTH=true
|
|
||||||
ETCD_PEER_TRUSTED_CA_FILE=/etc/ssl/certs/etcd/peer-ca.crt
|
|
||||||
ETCD_PEER_CERT_FILE=/etc/ssl/certs/etcd/peer.crt
|
|
||||||
ETCD_PEER_KEY_FILE=/etc/ssl/certs/etcd/peer.key
|
|
||||||
ETCD_PEER_CLIENT_CERT_AUTH=true
|
|
||||||
- path: /etc/systemd/system/kubelet.service.d/10-typhoon.conf
|
|
||||||
content: |
|
|
||||||
[Unit]
|
|
||||||
Wants=rpc-statd.service
|
|
||||||
[Service]
|
|
||||||
ExecStartPre=/bin/mkdir -p /opt/cni/bin
|
|
||||||
ExecStartPre=/bin/mkdir -p /etc/kubernetes/manifests
|
|
||||||
ExecStartPre=/bin/mkdir -p /etc/kubernetes/cni/net.d
|
|
||||||
ExecStartPre=/bin/mkdir -p /etc/kubernetes/checkpoint-secrets
|
|
||||||
ExecStartPre=/bin/mkdir -p /etc/kubernetes/inactive-manifests
|
|
||||||
ExecStartPre=/bin/mkdir -p /var/lib/cni
|
|
||||||
ExecStartPre=/bin/mkdir -p /var/lib/kubelet/volumeplugins
|
|
||||||
ExecStartPre=/usr/bin/bash -c "grep 'certificate-authority-data' /etc/kubernetes/kubeconfig | awk '{print $2}' | base64 -d > /etc/kubernetes/ca.crt"
|
|
||||||
Restart=always
|
|
||||||
RestartSec=10
|
|
||||||
- path: /etc/kubernetes/kubelet.conf
|
|
||||||
content: |
|
|
||||||
ARGS="--anonymous-auth=false \
|
|
||||||
--authentication-token-webhook \
|
|
||||||
--authorization-mode=Webhook \
|
|
||||||
--client-ca-file=/etc/kubernetes/ca.crt \
|
|
||||||
--cluster_dns=${cluster_dns_service_ip} \
|
|
||||||
--cluster_domain=${cluster_domain_suffix} \
|
|
||||||
--cni-conf-dir=/etc/kubernetes/cni/net.d \
|
|
||||||
--exit-on-lock-contention \
|
|
||||||
--hostname-override=${domain_name} \
|
|
||||||
--kubeconfig=/etc/kubernetes/kubeconfig \
|
|
||||||
--lock-file=/var/run/lock/kubelet.lock \
|
|
||||||
--network-plugin=cni \
|
|
||||||
--node-labels=node-role.kubernetes.io/master \
|
|
||||||
--node-labels=node-role.kubernetes.io/controller="true" \
|
|
||||||
--pod-manifest-path=/etc/kubernetes/manifests \
|
|
||||||
--read-only-port=0 \
|
|
||||||
--register-with-taints=node-role.kubernetes.io/master=:NoSchedule \
|
|
||||||
--volume-plugin-dir=/var/lib/kubelet/volumeplugins"
|
|
||||||
- path: /etc/systemd/system/kubelet.path
|
|
||||||
content: |
|
|
||||||
[Unit]
|
|
||||||
Description=Watch for kubeconfig
|
|
||||||
[Path]
|
|
||||||
PathExists=/etc/kubernetes/kubeconfig
|
|
||||||
[Install]
|
|
||||||
WantedBy=multi-user.target
|
|
||||||
- path: /var/lib/bootkube/.keep
|
|
||||||
- path: /etc/NetworkManager/conf.d/typhoon.conf
|
|
||||||
content: |
|
|
||||||
[main]
|
|
||||||
plugins=keyfile
|
|
||||||
[keyfile]
|
|
||||||
unmanaged-devices=interface-name:cali*;interface-name:tunl*
|
|
||||||
- path: /etc/selinux/config
|
|
||||||
owner: root:root
|
|
||||||
permissions: '0644'
|
|
||||||
content: |
|
|
||||||
SELINUX=permissive
|
|
||||||
SELINUXTYPE=targeted
|
|
||||||
bootcmd:
|
|
||||||
- [setenforce, Permissive]
|
|
||||||
- [systemctl, disable, firewalld, --now]
|
|
||||||
# https://github.com/kubernetes/kubernetes/issues/60869
|
|
||||||
- [modprobe, ip_vs]
|
|
||||||
runcmd:
|
|
||||||
- [systemctl, daemon-reload]
|
|
||||||
- [systemctl, restart, NetworkManager]
|
|
||||||
- [hostnamectl, set-hostname, ${domain_name}]
|
|
||||||
- "atomic install --system --name=etcd quay.io/poseidon/etcd:v3.3.12"
|
|
||||||
- "atomic install --system --name=kubelet quay.io/poseidon/kubelet:v1.14.1"
|
|
||||||
- "atomic install --system --name=bootkube quay.io/poseidon/bootkube:v0.14.0"
|
|
||||||
- [systemctl, start, --no-block, etcd.service]
|
|
||||||
- [systemctl, enable, kubelet.path]
|
|
||||||
- [systemctl, start, --no-block, kubelet.path]
|
|
||||||
users:
|
|
||||||
- default
|
|
||||||
- name: fedora
|
|
||||||
gecos: Fedora Admin
|
|
||||||
sudo: ALL=(ALL) NOPASSWD:ALL
|
|
||||||
groups: wheel,adm,systemd-journal,docker
|
|
||||||
ssh-authorized-keys:
|
|
||||||
- "${ssh_authorized_key}"
|
|
|
@ -1,73 +0,0 @@
|
||||||
#cloud-config
|
|
||||||
write_files:
|
|
||||||
- path: /etc/systemd/system/kubelet.service.d/10-typhoon.conf
|
|
||||||
content: |
|
|
||||||
[Unit]
|
|
||||||
Wants=rpc-statd.service
|
|
||||||
[Service]
|
|
||||||
ExecStartPre=/bin/mkdir -p /opt/cni/bin
|
|
||||||
ExecStartPre=/bin/mkdir -p /etc/kubernetes/manifests
|
|
||||||
ExecStartPre=/bin/mkdir -p /etc/kubernetes/cni/net.d
|
|
||||||
ExecStartPre=/bin/mkdir -p /var/lib/cni
|
|
||||||
ExecStartPre=/bin/mkdir -p /var/lib/kubelet/volumeplugins
|
|
||||||
ExecStartPre=/usr/bin/bash -c "grep 'certificate-authority-data' /etc/kubernetes/kubeconfig | awk '{print $2}' | base64 -d > /etc/kubernetes/ca.crt"
|
|
||||||
Restart=always
|
|
||||||
RestartSec=10
|
|
||||||
- path: /etc/kubernetes/kubelet.conf
|
|
||||||
content: |
|
|
||||||
ARGS="--anonymous-auth=false \
|
|
||||||
--authentication-token-webhook \
|
|
||||||
--authorization-mode=Webhook \
|
|
||||||
--client-ca-file=/etc/kubernetes/ca.crt \
|
|
||||||
--cluster_dns=${cluster_dns_service_ip} \
|
|
||||||
--cluster_domain=${cluster_domain_suffix} \
|
|
||||||
--cni-conf-dir=/etc/kubernetes/cni/net.d \
|
|
||||||
--exit-on-lock-contention \
|
|
||||||
--hostname-override=${domain_name} \
|
|
||||||
--kubeconfig=/etc/kubernetes/kubeconfig \
|
|
||||||
--lock-file=/var/run/lock/kubelet.lock \
|
|
||||||
--network-plugin=cni \
|
|
||||||
--node-labels=node-role.kubernetes.io/node \
|
|
||||||
--pod-manifest-path=/etc/kubernetes/manifests \
|
|
||||||
--read-only-port=0 \
|
|
||||||
--volume-plugin-dir=/var/lib/kubelet/volumeplugins"
|
|
||||||
- path: /etc/systemd/system/kubelet.path
|
|
||||||
content: |
|
|
||||||
[Unit]
|
|
||||||
Description=Watch for kubeconfig
|
|
||||||
[Path]
|
|
||||||
PathExists=/etc/kubernetes/kubeconfig
|
|
||||||
[Install]
|
|
||||||
WantedBy=multi-user.target
|
|
||||||
- path: /etc/NetworkManager/conf.d/typhoon.conf
|
|
||||||
content: |
|
|
||||||
[main]
|
|
||||||
plugins=keyfile
|
|
||||||
[keyfile]
|
|
||||||
unmanaged-devices=interface-name:cali*;interface-name:tunl*
|
|
||||||
- path: /etc/selinux/config
|
|
||||||
owner: root:root
|
|
||||||
permissions: '0644'
|
|
||||||
content: |
|
|
||||||
SELINUX=permissive
|
|
||||||
SELINUXTYPE=targeted
|
|
||||||
bootcmd:
|
|
||||||
- [setenforce, Permissive]
|
|
||||||
- [systemctl, disable, firewalld, --now]
|
|
||||||
# https://github.com/kubernetes/kubernetes/issues/60869
|
|
||||||
- [modprobe, ip_vs]
|
|
||||||
runcmd:
|
|
||||||
- [systemctl, daemon-reload]
|
|
||||||
- [systemctl, restart, NetworkManager]
|
|
||||||
- [hostnamectl, set-hostname, ${domain_name}]
|
|
||||||
- "atomic install --system --name=kubelet quay.io/poseidon/kubelet:v1.14.1"
|
|
||||||
- [systemctl, enable, kubelet.path]
|
|
||||||
- [systemctl, start, --no-block, kubelet.path]
|
|
||||||
users:
|
|
||||||
- default
|
|
||||||
- name: fedora
|
|
||||||
gecos: Fedora Admin
|
|
||||||
sudo: ALL=(ALL) NOPASSWD:ALL
|
|
||||||
groups: wheel,adm,systemd-journal,docker
|
|
||||||
ssh-authorized-keys:
|
|
||||||
- "${ssh_authorized_key}"
|
|
|
@ -1,37 +0,0 @@
|
||||||
// Install Fedora to disk
|
|
||||||
resource "matchbox_group" "install" {
|
|
||||||
count = "${length(var.controller_names) + length(var.worker_names)}"
|
|
||||||
|
|
||||||
name = "${format("fedora-install-%s", element(concat(var.controller_names, var.worker_names), count.index))}"
|
|
||||||
profile = "${element(matchbox_profile.cached-fedora-install.*.name, count.index)}"
|
|
||||||
|
|
||||||
selector = {
|
|
||||||
mac = "${element(concat(var.controller_macs, var.worker_macs), count.index)}"
|
|
||||||
}
|
|
||||||
|
|
||||||
metadata = {
|
|
||||||
ssh_authorized_key = "${var.ssh_authorized_key}"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
resource "matchbox_group" "controller" {
|
|
||||||
count = "${length(var.controller_names)}"
|
|
||||||
name = "${format("%s-%s", var.cluster_name, element(var.controller_names, count.index))}"
|
|
||||||
profile = "${element(matchbox_profile.controllers.*.name, count.index)}"
|
|
||||||
|
|
||||||
selector = {
|
|
||||||
mac = "${element(var.controller_macs, count.index)}"
|
|
||||||
os = "installed"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
resource "matchbox_group" "worker" {
|
|
||||||
count = "${length(var.worker_names)}"
|
|
||||||
name = "${format("%s-%s", var.cluster_name, element(var.worker_names, count.index))}"
|
|
||||||
profile = "${element(matchbox_profile.workers.*.name, count.index)}"
|
|
||||||
|
|
||||||
selector = {
|
|
||||||
mac = "${element(var.worker_macs, count.index)}"
|
|
||||||
os = "installed"
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,36 +0,0 @@
|
||||||
# required
|
|
||||||
lang en_US.UTF-8
|
|
||||||
keyboard us
|
|
||||||
timezone --utc Etc/UTC
|
|
||||||
|
|
||||||
# wipe disks
|
|
||||||
zerombr
|
|
||||||
clearpart --all --initlabel
|
|
||||||
|
|
||||||
# locked root and temporary user
|
|
||||||
rootpw --lock --iscrypted locked
|
|
||||||
user --name=none
|
|
||||||
|
|
||||||
# config
|
|
||||||
autopart --type=lvm --noswap
|
|
||||||
network --bootproto=dhcp --device=link --activate --onboot=on
|
|
||||||
bootloader --timeout=1 --append="ds=nocloud\;seedfrom=/var/cloud-init/"
|
|
||||||
services --enabled=cloud-init,cloud-init-local,cloud-config,cloud-final
|
|
||||||
|
|
||||||
ostreesetup --osname="fedora-atomic" --remote="fedora-atomic" --url="${atomic_assets_endpoint}/repo" --ref=fedora/28/x86_64/atomic-host --nogpg
|
|
||||||
|
|
||||||
reboot
|
|
||||||
|
|
||||||
%post --erroronfail
|
|
||||||
mkdir /var/cloud-init
|
|
||||||
curl --retry 10 "${matchbox_http_endpoint}/generic?mac=${mac}&os=installed" -o /var/cloud-init/user-data
|
|
||||||
echo "instance-id: iid-local01" > /var/cloud-init/meta-data
|
|
||||||
|
|
||||||
rm -f /etc/ostree/remotes.d/fedora-atomic.conf
|
|
||||||
ostree remote add fedora-atomic https://dl.fedoraproject.org/atomic/repo/ --set=gpgkeypath=/etc/pki/rpm-gpg/RPM-GPG-KEY-fedora-28-primary
|
|
||||||
|
|
||||||
# lock root user
|
|
||||||
passwd -l root
|
|
||||||
# remove temporary user
|
|
||||||
userdel -r none
|
|
||||||
%end
|
|
|
@ -1,3 +0,0 @@
|
||||||
output "kubeconfig-admin" {
|
|
||||||
value = "${module.bootkube.kubeconfig-admin}"
|
|
||||||
}
|
|
|
@ -1,87 +0,0 @@
|
||||||
locals {
|
|
||||||
default_assets_endpoint = "${var.matchbox_http_endpoint}/assets/fedora/28"
|
|
||||||
atomic_assets_endpoint = "${var.atomic_assets_endpoint != "" ? var.atomic_assets_endpoint : local.default_assets_endpoint}"
|
|
||||||
}
|
|
||||||
|
|
||||||
// Cached Fedora Install profile (from matchbox /assets cache)
|
|
||||||
// Note: Admin must have downloaded Fedora kernel, initrd, and repo into
|
|
||||||
// matchbox assets.
|
|
||||||
resource "matchbox_profile" "cached-fedora-install" {
|
|
||||||
count = "${length(var.controller_names) + length(var.worker_names)}"
|
|
||||||
name = "${format("%s-cached-fedora-install-%s", var.cluster_name, element(concat(var.controller_names, var.worker_names), count.index))}"
|
|
||||||
|
|
||||||
kernel = "${local.atomic_assets_endpoint}/images/pxeboot/vmlinuz"
|
|
||||||
|
|
||||||
initrd = [
|
|
||||||
"${local.atomic_assets_endpoint}/images/pxeboot/initrd.img",
|
|
||||||
]
|
|
||||||
|
|
||||||
args = [
|
|
||||||
"initrd=initrd.img",
|
|
||||||
"inst.repo=${local.atomic_assets_endpoint}",
|
|
||||||
"inst.ks=${var.matchbox_http_endpoint}/generic?mac=${element(concat(var.controller_macs, var.worker_macs), count.index)}",
|
|
||||||
"inst.text",
|
|
||||||
"${var.kernel_args}",
|
|
||||||
]
|
|
||||||
|
|
||||||
# kickstart
|
|
||||||
generic_config = "${element(data.template_file.install-kickstarts.*.rendered, count.index)}"
|
|
||||||
}
|
|
||||||
|
|
||||||
data "template_file" "install-kickstarts" {
|
|
||||||
count = "${length(var.controller_names) + length(var.worker_names)}"
|
|
||||||
|
|
||||||
template = "${file("${path.module}/kickstart/fedora-atomic.ks.tmpl")}"
|
|
||||||
|
|
||||||
vars = {
|
|
||||||
matchbox_http_endpoint = "${var.matchbox_http_endpoint}"
|
|
||||||
atomic_assets_endpoint = "${local.atomic_assets_endpoint}"
|
|
||||||
mac = "${element(concat(var.controller_macs, var.worker_macs), count.index)}"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Kubernetes Controller profiles
|
|
||||||
resource "matchbox_profile" "controllers" {
|
|
||||||
count = "${length(var.controller_names)}"
|
|
||||||
name = "${format("%s-controller-%s", var.cluster_name, element(var.controller_names, count.index))}"
|
|
||||||
|
|
||||||
# cloud-init
|
|
||||||
generic_config = "${element(data.template_file.controller-configs.*.rendered, count.index)}"
|
|
||||||
}
|
|
||||||
|
|
||||||
data "template_file" "controller-configs" {
|
|
||||||
count = "${length(var.controller_names)}"
|
|
||||||
|
|
||||||
template = "${file("${path.module}/cloudinit/controller.yaml.tmpl")}"
|
|
||||||
|
|
||||||
vars = {
|
|
||||||
domain_name = "${element(var.controller_domains, count.index)}"
|
|
||||||
etcd_name = "${element(var.controller_names, count.index)}"
|
|
||||||
etcd_initial_cluster = "${join(",", formatlist("%s=https://%s:2380", var.controller_names, var.controller_domains))}"
|
|
||||||
cluster_dns_service_ip = "${module.bootkube.cluster_dns_service_ip}"
|
|
||||||
cluster_domain_suffix = "${var.cluster_domain_suffix}"
|
|
||||||
ssh_authorized_key = "${var.ssh_authorized_key}"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Kubernetes Worker profiles
|
|
||||||
resource "matchbox_profile" "workers" {
|
|
||||||
count = "${length(var.worker_names)}"
|
|
||||||
name = "${format("%s-worker-%s", var.cluster_name, element(var.worker_names, count.index))}"
|
|
||||||
|
|
||||||
# cloud-init
|
|
||||||
generic_config = "${element(data.template_file.worker-configs.*.rendered, count.index)}"
|
|
||||||
}
|
|
||||||
|
|
||||||
data "template_file" "worker-configs" {
|
|
||||||
count = "${length(var.worker_names)}"
|
|
||||||
|
|
||||||
template = "${file("${path.module}/cloudinit/worker.yaml.tmpl")}"
|
|
||||||
|
|
||||||
vars = {
|
|
||||||
domain_name = "${element(var.worker_domains, count.index)}"
|
|
||||||
cluster_dns_service_ip = "${module.bootkube.cluster_dns_service_ip}"
|
|
||||||
cluster_domain_suffix = "${var.cluster_domain_suffix}"
|
|
||||||
ssh_authorized_key = "${var.ssh_authorized_key}"
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,21 +0,0 @@
|
||||||
# Terraform version and plugin versions
|
|
||||||
|
|
||||||
terraform {
|
|
||||||
required_version = ">= 0.11.0"
|
|
||||||
}
|
|
||||||
|
|
||||||
provider "local" {
|
|
||||||
version = "~> 1.0"
|
|
||||||
}
|
|
||||||
|
|
||||||
provider "null" {
|
|
||||||
version = "~> 1.0"
|
|
||||||
}
|
|
||||||
|
|
||||||
provider "template" {
|
|
||||||
version = "~> 1.0"
|
|
||||||
}
|
|
||||||
|
|
||||||
provider "tls" {
|
|
||||||
version = "~> 1.0"
|
|
||||||
}
|
|
|
@ -1,136 +0,0 @@
|
||||||
# Secure copy etcd TLS assets and kubeconfig to controllers. Activates kubelet.service
|
|
||||||
resource "null_resource" "copy-controller-secrets" {
|
|
||||||
count = "${length(var.controller_names)}"
|
|
||||||
|
|
||||||
# Without depends_on, remote-exec could start and wait for machines before
|
|
||||||
# matchbox groups are written, causing a deadlock.
|
|
||||||
depends_on = [
|
|
||||||
"matchbox_group.install",
|
|
||||||
"matchbox_group.controller",
|
|
||||||
"matchbox_group.worker",
|
|
||||||
]
|
|
||||||
|
|
||||||
connection {
|
|
||||||
type = "ssh"
|
|
||||||
host = "${element(var.controller_domains, count.index)}"
|
|
||||||
user = "fedora"
|
|
||||||
timeout = "60m"
|
|
||||||
}
|
|
||||||
|
|
||||||
provisioner "file" {
|
|
||||||
content = "${module.bootkube.kubeconfig-kubelet}"
|
|
||||||
destination = "$HOME/kubeconfig"
|
|
||||||
}
|
|
||||||
|
|
||||||
provisioner "file" {
|
|
||||||
content = "${module.bootkube.etcd_ca_cert}"
|
|
||||||
destination = "$HOME/etcd-client-ca.crt"
|
|
||||||
}
|
|
||||||
|
|
||||||
provisioner "file" {
|
|
||||||
content = "${module.bootkube.etcd_client_cert}"
|
|
||||||
destination = "$HOME/etcd-client.crt"
|
|
||||||
}
|
|
||||||
|
|
||||||
provisioner "file" {
|
|
||||||
content = "${module.bootkube.etcd_client_key}"
|
|
||||||
destination = "$HOME/etcd-client.key"
|
|
||||||
}
|
|
||||||
|
|
||||||
provisioner "file" {
|
|
||||||
content = "${module.bootkube.etcd_server_cert}"
|
|
||||||
destination = "$HOME/etcd-server.crt"
|
|
||||||
}
|
|
||||||
|
|
||||||
provisioner "file" {
|
|
||||||
content = "${module.bootkube.etcd_server_key}"
|
|
||||||
destination = "$HOME/etcd-server.key"
|
|
||||||
}
|
|
||||||
|
|
||||||
provisioner "file" {
|
|
||||||
content = "${module.bootkube.etcd_peer_cert}"
|
|
||||||
destination = "$HOME/etcd-peer.crt"
|
|
||||||
}
|
|
||||||
|
|
||||||
provisioner "file" {
|
|
||||||
content = "${module.bootkube.etcd_peer_key}"
|
|
||||||
destination = "$HOME/etcd-peer.key"
|
|
||||||
}
|
|
||||||
|
|
||||||
provisioner "remote-exec" {
|
|
||||||
inline = [
|
|
||||||
"sudo mkdir -p /etc/ssl/etcd/etcd",
|
|
||||||
"sudo mv etcd-client* /etc/ssl/etcd/",
|
|
||||||
"sudo cp /etc/ssl/etcd/etcd-client-ca.crt /etc/ssl/etcd/etcd/server-ca.crt",
|
|
||||||
"sudo mv etcd-server.crt /etc/ssl/etcd/etcd/server.crt",
|
|
||||||
"sudo mv etcd-server.key /etc/ssl/etcd/etcd/server.key",
|
|
||||||
"sudo cp /etc/ssl/etcd/etcd-client-ca.crt /etc/ssl/etcd/etcd/peer-ca.crt",
|
|
||||||
"sudo mv etcd-peer.crt /etc/ssl/etcd/etcd/peer.crt",
|
|
||||||
"sudo mv etcd-peer.key /etc/ssl/etcd/etcd/peer.key",
|
|
||||||
"sudo mv $HOME/kubeconfig /etc/kubernetes/kubeconfig",
|
|
||||||
]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
# Secure copy kubeconfig to all workers. Activates kubelet.service
|
|
||||||
resource "null_resource" "copy-worker-secrets" {
|
|
||||||
count = "${length(var.worker_names)}"
|
|
||||||
|
|
||||||
# Without depends_on, remote-exec could start and wait for machines before
|
|
||||||
# matchbox groups are written, causing a deadlock.
|
|
||||||
depends_on = [
|
|
||||||
"matchbox_group.install",
|
|
||||||
"matchbox_group.controller",
|
|
||||||
"matchbox_group.worker",
|
|
||||||
]
|
|
||||||
|
|
||||||
connection {
|
|
||||||
type = "ssh"
|
|
||||||
host = "${element(var.worker_domains, count.index)}"
|
|
||||||
user = "fedora"
|
|
||||||
timeout = "60m"
|
|
||||||
}
|
|
||||||
|
|
||||||
provisioner "file" {
|
|
||||||
content = "${module.bootkube.kubeconfig-kubelet}"
|
|
||||||
destination = "$HOME/kubeconfig"
|
|
||||||
}
|
|
||||||
|
|
||||||
provisioner "remote-exec" {
|
|
||||||
inline = [
|
|
||||||
"sudo mv $HOME/kubeconfig /etc/kubernetes/kubeconfig",
|
|
||||||
]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
# Secure copy bootkube assets to ONE controller and start bootkube to perform
|
|
||||||
# one-time self-hosted cluster bootstrapping.
|
|
||||||
resource "null_resource" "bootkube-start" {
|
|
||||||
# Without depends_on, this remote-exec may start before the kubeconfig copy.
|
|
||||||
# Terraform only does one task at a time, so it would try to bootstrap
|
|
||||||
# while no Kubelets are running.
|
|
||||||
depends_on = [
|
|
||||||
"null_resource.copy-controller-secrets",
|
|
||||||
"null_resource.copy-worker-secrets",
|
|
||||||
]
|
|
||||||
|
|
||||||
connection {
|
|
||||||
type = "ssh"
|
|
||||||
host = "${element(var.controller_domains, 0)}"
|
|
||||||
user = "fedora"
|
|
||||||
timeout = "15m"
|
|
||||||
}
|
|
||||||
|
|
||||||
provisioner "file" {
|
|
||||||
source = "${var.asset_dir}"
|
|
||||||
destination = "$HOME/assets"
|
|
||||||
}
|
|
||||||
|
|
||||||
provisioner "remote-exec" {
|
|
||||||
inline = [
|
|
||||||
"while [ ! -f /var/lib/cloud/instance/boot-finished ]; do sleep 4; done",
|
|
||||||
"sudo mv $HOME/assets /var/lib/bootkube",
|
|
||||||
"sudo systemctl start bootkube",
|
|
||||||
]
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,118 +0,0 @@
|
||||||
variable "cluster_name" {
|
|
||||||
type = "string"
|
|
||||||
description = "Unique cluster name"
|
|
||||||
}
|
|
||||||
|
|
||||||
# bare-metal
|
|
||||||
|
|
||||||
variable "matchbox_http_endpoint" {
|
|
||||||
type = "string"
|
|
||||||
description = "Matchbox HTTP read-only endpoint (e.g. http://matchbox.example.com:8080)"
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "atomic_assets_endpoint" {
|
|
||||||
type = "string"
|
|
||||||
default = ""
|
|
||||||
|
|
||||||
description = <<EOD
|
|
||||||
HTTP endpoint serving the Fedora Atomic Host vmlinuz, initrd, os repo, and ostree repo (.e.g `http://example.com/some/path`).
|
|
||||||
|
|
||||||
Ensure the HTTP server directory contains `vmlinuz` and `initrd` files and `os` and `repo` directories. Leave unset to assume ${matchbox_http_endpoint}/assets/fedora/28
|
|
||||||
EOD
|
|
||||||
}
|
|
||||||
|
|
||||||
# machines
|
|
||||||
# Terraform's crude "type system" does not properly support lists of maps so we do this.
|
|
||||||
|
|
||||||
variable "controller_names" {
|
|
||||||
type = "list"
|
|
||||||
description = "Ordered list of controller names (e.g. [node1])"
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "controller_macs" {
|
|
||||||
type = "list"
|
|
||||||
description = "Ordered list of controller identifying MAC addresses (e.g. [52:54:00:a1:9c:ae])"
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "controller_domains" {
|
|
||||||
type = "list"
|
|
||||||
description = "Ordered list of controller FQDNs (e.g. [node1.example.com])"
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "worker_names" {
|
|
||||||
type = "list"
|
|
||||||
description = "Ordered list of worker names (e.g. [node2, node3])"
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "worker_macs" {
|
|
||||||
type = "list"
|
|
||||||
description = "Ordered list of worker identifying MAC addresses (e.g. [52:54:00:b2:2f:86, 52:54:00:c3:61:77])"
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "worker_domains" {
|
|
||||||
type = "list"
|
|
||||||
description = "Ordered list of worker FQDNs (e.g. [node2.example.com, node3.example.com])"
|
|
||||||
}
|
|
||||||
|
|
||||||
# configuration
|
|
||||||
|
|
||||||
variable "k8s_domain_name" {
|
|
||||||
description = "Controller DNS name which resolves to a controller instance. Workers and kubeconfig's will communicate with this endpoint (e.g. cluster.example.com)"
|
|
||||||
type = "string"
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "ssh_authorized_key" {
|
|
||||||
type = "string"
|
|
||||||
description = "SSH public key for user 'fedora'"
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "asset_dir" {
|
|
||||||
description = "Path to a directory where generated assets should be placed (contains secrets)"
|
|
||||||
type = "string"
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "networking" {
|
|
||||||
description = "Choice of networking provider (flannel or calico)"
|
|
||||||
type = "string"
|
|
||||||
default = "calico"
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "network_mtu" {
|
|
||||||
description = "CNI interface MTU (applies to calico only)"
|
|
||||||
type = "string"
|
|
||||||
default = "1480"
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "pod_cidr" {
|
|
||||||
description = "CIDR IPv4 range to assign Kubernetes pods"
|
|
||||||
type = "string"
|
|
||||||
default = "10.2.0.0/16"
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "service_cidr" {
|
|
||||||
description = <<EOD
|
|
||||||
CIDR IPv4 range to assign Kubernetes services.
|
|
||||||
The 1st IP will be reserved for kube_apiserver, the 10th IP will be reserved for coredns.
|
|
||||||
EOD
|
|
||||||
|
|
||||||
type = "string"
|
|
||||||
default = "10.3.0.0/16"
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "cluster_domain_suffix" {
|
|
||||||
description = "Queries for domains with the suffix will be answered by coredns. Default is cluster.local (e.g. foo.default.svc.cluster.local) "
|
|
||||||
type = "string"
|
|
||||||
default = "cluster.local"
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "kernel_args" {
|
|
||||||
description = "Additional kernel arguments to provide at PXE boot."
|
|
||||||
type = "list"
|
|
||||||
default = []
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "enable_reporting" {
|
|
||||||
type = "string"
|
|
||||||
description = "Enable usage or analytics reporting to upstreams (Calico)"
|
|
||||||
default = "false"
|
|
||||||
}
|
|
|
@ -1,23 +0,0 @@
|
||||||
The MIT License (MIT)
|
|
||||||
|
|
||||||
Copyright (c) 2017 Typhoon Authors
|
|
||||||
Copyright (c) 2017 Dalton Hubble
|
|
||||||
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
||||||
of this software and associated documentation files (the "Software"), to deal
|
|
||||||
in the Software without restriction, including without limitation the rights
|
|
||||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
||||||
copies of the Software, and to permit persons to whom the Software is
|
|
||||||
furnished to do so, subject to the following conditions:
|
|
||||||
|
|
||||||
The above copyright notice and this permission notice shall be included in
|
|
||||||
all copies or substantial portions of the Software.
|
|
||||||
|
|
||||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
||||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
||||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
||||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
||||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
||||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
|
||||||
THE SOFTWARE.
|
|
||||||
|
|
|
@ -1,22 +0,0 @@
|
||||||
# Typhoon <img align="right" src="https://storage.googleapis.com/poseidon/typhoon-logo.png">
|
|
||||||
|
|
||||||
Typhoon is a minimal and free Kubernetes distribution.
|
|
||||||
|
|
||||||
* Minimal, stable base Kubernetes distribution
|
|
||||||
* Declarative infrastructure and configuration
|
|
||||||
* Free (freedom and cost) and privacy-respecting
|
|
||||||
* Practical for labs, datacenters, and clouds
|
|
||||||
|
|
||||||
Typhoon distributes upstream Kubernetes, architectural conventions, and cluster addons, much like a GNU/Linux distribution provides the Linux kernel and userspace components.
|
|
||||||
|
|
||||||
## Features <a href="https://www.cncf.io/certification/software-conformance/"><img align="right" src="https://storage.googleapis.com/poseidon/certified-kubernetes.png"></a>
|
|
||||||
|
|
||||||
* Kubernetes v1.15.0 (upstream, via [kubernetes-incubator/bootkube](https://github.com/kubernetes-incubator/bootkube))
|
|
||||||
* Single or multi-master, [flannel](https://github.com/coreos/flannel) networking
|
|
||||||
* On-cluster etcd with TLS, [RBAC](https://kubernetes.io/docs/admin/authorization/rbac/)-enabled
|
|
||||||
* Ready for Ingress, Prometheus, Grafana, and other optional [addons](https://typhoon.psdn.io/addons/overview/)
|
|
||||||
|
|
||||||
## Docs
|
|
||||||
|
|
||||||
Please see the [official docs](https://typhoon.psdn.io) and the Digital Ocean [tutorial](https://typhoon.psdn.io/cl/digital-ocean/).
|
|
||||||
|
|
|
@ -1,18 +0,0 @@
|
||||||
# Self-hosted Kubernetes assets (kubeconfig, manifests)
|
|
||||||
module "bootkube" {
|
|
||||||
source = "git::https://github.com/poseidon/terraform-render-bootkube.git?ref=62df9ad69cc0da35f47d40fa981370c4503ad581"
|
|
||||||
|
|
||||||
cluster_name = "${var.cluster_name}"
|
|
||||||
api_servers = ["${format("%s.%s", var.cluster_name, var.dns_zone)}"]
|
|
||||||
etcd_servers = "${digitalocean_record.etcds.*.fqdn}"
|
|
||||||
asset_dir = "${var.asset_dir}"
|
|
||||||
networking = "flannel"
|
|
||||||
network_mtu = 1440
|
|
||||||
pod_cidr = "${var.pod_cidr}"
|
|
||||||
service_cidr = "${var.service_cidr}"
|
|
||||||
cluster_domain_suffix = "${var.cluster_domain_suffix}"
|
|
||||||
enable_reporting = "${var.enable_reporting}"
|
|
||||||
|
|
||||||
# Fedora
|
|
||||||
trusted_certs_dir = "/etc/pki/tls/certs"
|
|
||||||
}
|
|
|
@ -1,107 +0,0 @@
|
||||||
#cloud-config
|
|
||||||
write_files:
|
|
||||||
- path: /etc/etcd/etcd.conf
|
|
||||||
content: |
|
|
||||||
ETCD_NAME=${etcd_name}
|
|
||||||
ETCD_DATA_DIR=/var/lib/etcd
|
|
||||||
ETCD_ADVERTISE_CLIENT_URLS=https://${etcd_domain}:2379
|
|
||||||
ETCD_INITIAL_ADVERTISE_PEER_URLS=https://${etcd_domain}:2380
|
|
||||||
ETCD_LISTEN_CLIENT_URLS=https://0.0.0.0:2379
|
|
||||||
ETCD_LISTEN_PEER_URLS=https://0.0.0.0:2380
|
|
||||||
ETCD_LISTEN_METRICS_URLS=http://0.0.0.0:2381
|
|
||||||
ETCD_INITIAL_CLUSTER=${etcd_initial_cluster}
|
|
||||||
ETCD_STRICT_RECONFIG_CHECK=true
|
|
||||||
ETCD_TRUSTED_CA_FILE=/etc/ssl/certs/etcd/server-ca.crt
|
|
||||||
ETCD_CERT_FILE=/etc/ssl/certs/etcd/server.crt
|
|
||||||
ETCD_KEY_FILE=/etc/ssl/certs/etcd/server.key
|
|
||||||
ETCD_CLIENT_CERT_AUTH=true
|
|
||||||
ETCD_PEER_TRUSTED_CA_FILE=/etc/ssl/certs/etcd/peer-ca.crt
|
|
||||||
ETCD_PEER_CERT_FILE=/etc/ssl/certs/etcd/peer.crt
|
|
||||||
ETCD_PEER_KEY_FILE=/etc/ssl/certs/etcd/peer.key
|
|
||||||
ETCD_PEER_CLIENT_CERT_AUTH=true
|
|
||||||
- path: /etc/systemd/system/cloud-metadata.service
|
|
||||||
content: |
|
|
||||||
[Unit]
|
|
||||||
Description=Cloud metadata agent
|
|
||||||
[Service]
|
|
||||||
Type=oneshot
|
|
||||||
Environment=OUTPUT=/run/metadata/cloud
|
|
||||||
ExecStart=/usr/bin/mkdir -p /run/metadata
|
|
||||||
ExecStart=/usr/bin/bash -c 'echo "HOSTNAME_OVERRIDE=$(curl\
|
|
||||||
--url http://169.254.169.254/metadata/v1/interfaces/private/0/ipv4/address\
|
|
||||||
--retry 10)" > $${OUTPUT}'
|
|
||||||
[Install]
|
|
||||||
WantedBy=multi-user.target
|
|
||||||
- path: /etc/systemd/system/kubelet.service.d/10-typhoon.conf
|
|
||||||
content: |
|
|
||||||
[Unit]
|
|
||||||
Requires=cloud-metadata.service
|
|
||||||
After=cloud-metadata.service
|
|
||||||
Wants=rpc-statd.service
|
|
||||||
[Service]
|
|
||||||
ExecStartPre=/bin/mkdir -p /opt/cni/bin
|
|
||||||
ExecStartPre=/bin/mkdir -p /etc/kubernetes/manifests
|
|
||||||
ExecStartPre=/bin/mkdir -p /etc/kubernetes/cni/net.d
|
|
||||||
ExecStartPre=/bin/mkdir -p /etc/kubernetes/checkpoint-secrets
|
|
||||||
ExecStartPre=/bin/mkdir -p /etc/kubernetes/inactive-manifests
|
|
||||||
ExecStartPre=/bin/mkdir -p /var/lib/cni
|
|
||||||
ExecStartPre=/bin/mkdir -p /var/lib/kubelet/volumeplugins
|
|
||||||
ExecStartPre=/usr/bin/bash -c "grep 'certificate-authority-data' /etc/kubernetes/kubeconfig | awk '{print $2}' | base64 -d > /etc/kubernetes/ca.crt"
|
|
||||||
Restart=always
|
|
||||||
RestartSec=10
|
|
||||||
- path: /etc/kubernetes/kubelet.conf
|
|
||||||
content: |
|
|
||||||
ARGS="--anonymous-auth=false \
|
|
||||||
--authentication-token-webhook \
|
|
||||||
--authorization-mode=Webhook \
|
|
||||||
--client-ca-file=/etc/kubernetes/ca.crt \
|
|
||||||
--cluster_dns=${cluster_dns_service_ip} \
|
|
||||||
--cluster_domain=${cluster_domain_suffix} \
|
|
||||||
--cni-conf-dir=/etc/kubernetes/cni/net.d \
|
|
||||||
--exit-on-lock-contention \
|
|
||||||
--kubeconfig=/etc/kubernetes/kubeconfig \
|
|
||||||
--lock-file=/var/run/lock/kubelet.lock \
|
|
||||||
--network-plugin=cni \
|
|
||||||
--node-labels=node-role.kubernetes.io/master \
|
|
||||||
--node-labels=node-role.kubernetes.io/controller="true" \
|
|
||||||
--pod-manifest-path=/etc/kubernetes/manifests \
|
|
||||||
--read-only-port=0 \
|
|
||||||
--register-with-taints=node-role.kubernetes.io/master=:NoSchedule \
|
|
||||||
--volume-plugin-dir=/var/lib/kubelet/volumeplugins"
|
|
||||||
- path: /etc/systemd/system/kubelet.path
|
|
||||||
content: |
|
|
||||||
[Unit]
|
|
||||||
Description=Watch for kubeconfig
|
|
||||||
[Path]
|
|
||||||
PathExists=/etc/kubernetes/kubeconfig
|
|
||||||
[Install]
|
|
||||||
WantedBy=multi-user.target
|
|
||||||
- path: /var/lib/bootkube/.keep
|
|
||||||
- path: /etc/selinux/config
|
|
||||||
owner: root:root
|
|
||||||
permissions: '0644'
|
|
||||||
content: |
|
|
||||||
SELINUX=permissive
|
|
||||||
SELINUXTYPE=targeted
|
|
||||||
bootcmd:
|
|
||||||
- [setenforce, Permissive]
|
|
||||||
- [systemctl, disable, firewalld, --now]
|
|
||||||
# https://github.com/kubernetes/kubernetes/issues/60869
|
|
||||||
- [modprobe, ip_vs]
|
|
||||||
runcmd:
|
|
||||||
- [systemctl, daemon-reload]
|
|
||||||
- "atomic install --system --name=etcd quay.io/poseidon/etcd:v3.3.12"
|
|
||||||
- "atomic install --system --name=kubelet quay.io/poseidon/kubelet:v1.14.1"
|
|
||||||
- "atomic install --system --name=bootkube quay.io/poseidon/bootkube:v0.14.0"
|
|
||||||
- [systemctl, start, --no-block, etcd.service]
|
|
||||||
- [systemctl, enable, cloud-metadata.service]
|
|
||||||
- [systemctl, enable, kubelet.path]
|
|
||||||
- [systemctl, start, --no-block, kubelet.path]
|
|
||||||
users:
|
|
||||||
- default
|
|
||||||
- name: fedora
|
|
||||||
gecos: Fedora Admin
|
|
||||||
sudo: ALL=(ALL) NOPASSWD:ALL
|
|
||||||
groups: wheel,adm,systemd-journal,docker
|
|
||||||
ssh-authorized-keys:
|
|
||||||
- "${ssh_authorized_key}"
|
|
|
@ -1,80 +0,0 @@
|
||||||
#cloud-config
|
|
||||||
write_files:
|
|
||||||
- path: /etc/systemd/system/cloud-metadata.service
|
|
||||||
content: |
|
|
||||||
[Unit]
|
|
||||||
Description=Cloud metadata agent
|
|
||||||
[Service]
|
|
||||||
Type=oneshot
|
|
||||||
Environment=OUTPUT=/run/metadata/cloud
|
|
||||||
ExecStart=/usr/bin/mkdir -p /run/metadata
|
|
||||||
ExecStart=/usr/bin/bash -c 'echo "HOSTNAME_OVERRIDE=$(curl\
|
|
||||||
--url http://169.254.169.254/metadata/v1/interfaces/private/0/ipv4/address\
|
|
||||||
--retry 10)" > $${OUTPUT}'
|
|
||||||
[Install]
|
|
||||||
WantedBy=multi-user.target
|
|
||||||
- path: /etc/systemd/system/kubelet.service.d/10-typhoon.conf
|
|
||||||
content: |
|
|
||||||
[Unit]
|
|
||||||
Requires=cloud-metadata.service
|
|
||||||
After=cloud-metadata.service
|
|
||||||
Wants=rpc-statd.service
|
|
||||||
[Service]
|
|
||||||
ExecStartPre=/bin/mkdir -p /opt/cni/bin
|
|
||||||
ExecStartPre=/bin/mkdir -p /etc/kubernetes/manifests
|
|
||||||
ExecStartPre=/bin/mkdir -p /etc/kubernetes/cni/net.d
|
|
||||||
ExecStartPre=/bin/mkdir -p /var/lib/cni
|
|
||||||
ExecStartPre=/bin/mkdir -p /var/lib/kubelet/volumeplugins
|
|
||||||
ExecStartPre=/usr/bin/bash -c "grep 'certificate-authority-data' /etc/kubernetes/kubeconfig | awk '{print $2}' | base64 -d > /etc/kubernetes/ca.crt"
|
|
||||||
Restart=always
|
|
||||||
RestartSec=10
|
|
||||||
- path: /etc/kubernetes/kubelet.conf
|
|
||||||
content: |
|
|
||||||
ARGS="--anonymous-auth=false \
|
|
||||||
--authentication-token-webhook \
|
|
||||||
--authorization-mode=Webhook \
|
|
||||||
--client-ca-file=/etc/kubernetes/ca.crt \
|
|
||||||
--cluster_dns=${cluster_dns_service_ip} \
|
|
||||||
--cluster_domain=${cluster_domain_suffix} \
|
|
||||||
--cni-conf-dir=/etc/kubernetes/cni/net.d \
|
|
||||||
--exit-on-lock-contention \
|
|
||||||
--kubeconfig=/etc/kubernetes/kubeconfig \
|
|
||||||
--lock-file=/var/run/lock/kubelet.lock \
|
|
||||||
--network-plugin=cni \
|
|
||||||
--node-labels=node-role.kubernetes.io/node \
|
|
||||||
--pod-manifest-path=/etc/kubernetes/manifests \
|
|
||||||
--read-only-port=0 \
|
|
||||||
--volume-plugin-dir=/var/lib/kubelet/volumeplugins"
|
|
||||||
- path: /etc/systemd/system/kubelet.path
|
|
||||||
content: |
|
|
||||||
[Unit]
|
|
||||||
Description=Watch for kubeconfig
|
|
||||||
[Path]
|
|
||||||
PathExists=/etc/kubernetes/kubeconfig
|
|
||||||
[Install]
|
|
||||||
WantedBy=multi-user.target
|
|
||||||
- path: /etc/selinux/config
|
|
||||||
owner: root:root
|
|
||||||
permissions: '0644'
|
|
||||||
content: |
|
|
||||||
SELINUX=permissive
|
|
||||||
SELINUXTYPE=targeted
|
|
||||||
bootcmd:
|
|
||||||
- [setenforce, Permissive]
|
|
||||||
- [systemctl, disable, firewalld, --now]
|
|
||||||
# https://github.com/kubernetes/kubernetes/issues/60869
|
|
||||||
- [modprobe, ip_vs]
|
|
||||||
runcmd:
|
|
||||||
- [systemctl, daemon-reload]
|
|
||||||
- [systemctl, enable, cloud-metadata.service]
|
|
||||||
- "atomic install --system --name=kubelet quay.io/poseidon/kubelet:v1.14.1"
|
|
||||||
- [systemctl, enable, kubelet.path]
|
|
||||||
- [systemctl, start, --no-block, kubelet.path]
|
|
||||||
users:
|
|
||||||
- default
|
|
||||||
- name: fedora
|
|
||||||
gecos: Fedora Admin
|
|
||||||
sudo: ALL=(ALL) NOPASSWD:ALL
|
|
||||||
groups: wheel,adm,systemd-journal,docker
|
|
||||||
ssh-authorized-keys:
|
|
||||||
- "${ssh_authorized_key}"
|
|
|
@ -1,95 +0,0 @@
|
||||||
# Controller Instance DNS records
|
|
||||||
resource "digitalocean_record" "controllers" {
|
|
||||||
count = "${var.controller_count}"
|
|
||||||
|
|
||||||
# DNS zone where record should be created
|
|
||||||
domain = "${var.dns_zone}"
|
|
||||||
|
|
||||||
# DNS record (will be prepended to domain)
|
|
||||||
name = "${var.cluster_name}"
|
|
||||||
type = "A"
|
|
||||||
ttl = 300
|
|
||||||
|
|
||||||
# IPv4 addresses of controllers
|
|
||||||
value = "${element(digitalocean_droplet.controllers.*.ipv4_address, count.index)}"
|
|
||||||
}
|
|
||||||
|
|
||||||
# Discrete DNS records for each controller's private IPv4 for etcd usage
|
|
||||||
resource "digitalocean_record" "etcds" {
|
|
||||||
count = "${var.controller_count}"
|
|
||||||
|
|
||||||
# DNS zone where record should be created
|
|
||||||
domain = "${var.dns_zone}"
|
|
||||||
|
|
||||||
# DNS record (will be prepended to domain)
|
|
||||||
name = "${var.cluster_name}-etcd${count.index}"
|
|
||||||
type = "A"
|
|
||||||
ttl = 300
|
|
||||||
|
|
||||||
# private IPv4 address for etcd
|
|
||||||
value = "${element(digitalocean_droplet.controllers.*.ipv4_address_private, count.index)}"
|
|
||||||
}
|
|
||||||
|
|
||||||
# Controller droplet instances
|
|
||||||
resource "digitalocean_droplet" "controllers" {
|
|
||||||
count = "${var.controller_count}"
|
|
||||||
|
|
||||||
name = "${var.cluster_name}-controller-${count.index}"
|
|
||||||
region = "${var.region}"
|
|
||||||
|
|
||||||
image = "${var.image}"
|
|
||||||
size = "${var.controller_type}"
|
|
||||||
|
|
||||||
# network
|
|
||||||
ipv6 = true
|
|
||||||
private_networking = true
|
|
||||||
|
|
||||||
user_data = "${element(data.template_file.controller-cloudinit.*.rendered, count.index)}"
|
|
||||||
ssh_keys = ["${var.ssh_fingerprints}"]
|
|
||||||
|
|
||||||
tags = [
|
|
||||||
"${digitalocean_tag.controllers.id}",
|
|
||||||
]
|
|
||||||
|
|
||||||
lifecycle {
|
|
||||||
ignore_changes = [
|
|
||||||
"user_data",
|
|
||||||
]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
# Tag to label controllers
|
|
||||||
resource "digitalocean_tag" "controllers" {
|
|
||||||
name = "${var.cluster_name}-controller"
|
|
||||||
}
|
|
||||||
|
|
||||||
# Controller Cloud-Init
|
|
||||||
data "template_file" "controller-cloudinit" {
|
|
||||||
count = "${var.controller_count}"
|
|
||||||
|
|
||||||
template = "${file("${path.module}/cloudinit/controller.yaml.tmpl")}"
|
|
||||||
|
|
||||||
vars = {
|
|
||||||
# Cannot use cyclic dependencies on controllers or their DNS records
|
|
||||||
etcd_name = "etcd${count.index}"
|
|
||||||
etcd_domain = "${var.cluster_name}-etcd${count.index}.${var.dns_zone}"
|
|
||||||
|
|
||||||
# etcd0=https://cluster-etcd0.example.com,etcd1=https://cluster-etcd1.example.com,...
|
|
||||||
etcd_initial_cluster = "${join(",", data.template_file.etcds.*.rendered)}"
|
|
||||||
|
|
||||||
ssh_authorized_key = "${var.ssh_authorized_key}"
|
|
||||||
cluster_dns_service_ip = "${cidrhost(var.service_cidr, 10)}"
|
|
||||||
cluster_domain_suffix = "${var.cluster_domain_suffix}"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
data "template_file" "etcds" {
|
|
||||||
count = "${var.controller_count}"
|
|
||||||
template = "etcd$${index}=https://$${cluster_name}-etcd$${index}.$${dns_zone}:2380"
|
|
||||||
|
|
||||||
vars = {
|
|
||||||
index = "${count.index}"
|
|
||||||
cluster_name = "${var.cluster_name}"
|
|
||||||
dns_zone = "${var.dns_zone}"
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,58 +0,0 @@
|
||||||
resource "digitalocean_firewall" "rules" {
|
|
||||||
name = "${var.cluster_name}"
|
|
||||||
|
|
||||||
tags = ["${var.cluster_name}-controller", "${var.cluster_name}-worker"]
|
|
||||||
|
|
||||||
# allow ssh, apiserver, http/https ingress, and peer-to-peer traffic
|
|
||||||
inbound_rule = [
|
|
||||||
{
|
|
||||||
protocol = "tcp"
|
|
||||||
port_range = "22"
|
|
||||||
source_addresses = ["0.0.0.0/0", "::/0"]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
protocol = "tcp"
|
|
||||||
port_range = "80"
|
|
||||||
source_addresses = ["0.0.0.0/0", "::/0"]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
protocol = "tcp"
|
|
||||||
port_range = "443"
|
|
||||||
source_addresses = ["0.0.0.0/0", "::/0"]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
protocol = "tcp"
|
|
||||||
port_range = "6443"
|
|
||||||
source_addresses = ["0.0.0.0/0", "::/0"]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
protocol = "udp"
|
|
||||||
port_range = "1-65535"
|
|
||||||
source_tags = ["${digitalocean_tag.controllers.name}", "${digitalocean_tag.workers.name}"]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
protocol = "tcp"
|
|
||||||
port_range = "1-65535"
|
|
||||||
source_tags = ["${digitalocean_tag.controllers.name}", "${digitalocean_tag.workers.name}"]
|
|
||||||
},
|
|
||||||
]
|
|
||||||
|
|
||||||
# allow all outbound traffic
|
|
||||||
outbound_rule = [
|
|
||||||
{
|
|
||||||
protocol = "tcp"
|
|
||||||
port_range = "1-65535"
|
|
||||||
destination_addresses = ["0.0.0.0/0", "::/0"]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
protocol = "udp"
|
|
||||||
port_range = "1-65535"
|
|
||||||
destination_addresses = ["0.0.0.0/0", "::/0"]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
protocol = "icmp"
|
|
||||||
port_range = "1-65535"
|
|
||||||
destination_addresses = ["0.0.0.0/0", "::/0"]
|
|
||||||
},
|
|
||||||
]
|
|
||||||
}
|
|
|
@ -1,28 +0,0 @@
|
||||||
output "kubeconfig-admin" {
|
|
||||||
value = "${module.bootkube.kubeconfig-admin}"
|
|
||||||
}
|
|
||||||
|
|
||||||
output "controllers_dns" {
|
|
||||||
value = "${digitalocean_record.controllers.0.fqdn}"
|
|
||||||
}
|
|
||||||
|
|
||||||
output "workers_dns" {
|
|
||||||
# Multiple A and AAAA records with the same FQDN
|
|
||||||
value = "${digitalocean_record.workers-record-a.0.fqdn}"
|
|
||||||
}
|
|
||||||
|
|
||||||
output "controllers_ipv4" {
|
|
||||||
value = ["${digitalocean_droplet.controllers.*.ipv4_address}"]
|
|
||||||
}
|
|
||||||
|
|
||||||
output "controllers_ipv6" {
|
|
||||||
value = ["${digitalocean_droplet.controllers.*.ipv6_address}"]
|
|
||||||
}
|
|
||||||
|
|
||||||
output "workers_ipv4" {
|
|
||||||
value = ["${digitalocean_droplet.workers.*.ipv4_address}"]
|
|
||||||
}
|
|
||||||
|
|
||||||
output "workers_ipv6" {
|
|
||||||
value = ["${digitalocean_droplet.workers.*.ipv6_address}"]
|
|
||||||
}
|
|
|
@ -1,25 +0,0 @@
|
||||||
# Terraform version and plugin versions
|
|
||||||
|
|
||||||
terraform {
|
|
||||||
required_version = ">= 0.11.0"
|
|
||||||
}
|
|
||||||
|
|
||||||
provider "digitalocean" {
|
|
||||||
version = "~> 1.0"
|
|
||||||
}
|
|
||||||
|
|
||||||
provider "local" {
|
|
||||||
version = "~> 1.0"
|
|
||||||
}
|
|
||||||
|
|
||||||
provider "null" {
|
|
||||||
version = "~> 1.0"
|
|
||||||
}
|
|
||||||
|
|
||||||
provider "template" {
|
|
||||||
version = "~> 1.0"
|
|
||||||
}
|
|
||||||
|
|
||||||
provider "tls" {
|
|
||||||
version = "~> 1.0"
|
|
||||||
}
|
|
|
@ -1,121 +0,0 @@
|
||||||
# Secure copy etcd TLS assets and kubeconfig to controllers. Activates kubelet.service
|
|
||||||
resource "null_resource" "copy-controller-secrets" {
|
|
||||||
count = "${var.controller_count}"
|
|
||||||
|
|
||||||
depends_on = [
|
|
||||||
"digitalocean_firewall.rules",
|
|
||||||
]
|
|
||||||
|
|
||||||
connection {
|
|
||||||
type = "ssh"
|
|
||||||
host = "${element(concat(digitalocean_droplet.controllers.*.ipv4_address), count.index)}"
|
|
||||||
user = "fedora"
|
|
||||||
timeout = "15m"
|
|
||||||
}
|
|
||||||
|
|
||||||
provisioner "file" {
|
|
||||||
content = "${module.bootkube.kubeconfig-kubelet}"
|
|
||||||
destination = "$HOME/kubeconfig"
|
|
||||||
}
|
|
||||||
|
|
||||||
provisioner "file" {
|
|
||||||
content = "${module.bootkube.etcd_ca_cert}"
|
|
||||||
destination = "$HOME/etcd-client-ca.crt"
|
|
||||||
}
|
|
||||||
|
|
||||||
provisioner "file" {
|
|
||||||
content = "${module.bootkube.etcd_client_cert}"
|
|
||||||
destination = "$HOME/etcd-client.crt"
|
|
||||||
}
|
|
||||||
|
|
||||||
provisioner "file" {
|
|
||||||
content = "${module.bootkube.etcd_client_key}"
|
|
||||||
destination = "$HOME/etcd-client.key"
|
|
||||||
}
|
|
||||||
|
|
||||||
provisioner "file" {
|
|
||||||
content = "${module.bootkube.etcd_server_cert}"
|
|
||||||
destination = "$HOME/etcd-server.crt"
|
|
||||||
}
|
|
||||||
|
|
||||||
provisioner "file" {
|
|
||||||
content = "${module.bootkube.etcd_server_key}"
|
|
||||||
destination = "$HOME/etcd-server.key"
|
|
||||||
}
|
|
||||||
|
|
||||||
provisioner "file" {
|
|
||||||
content = "${module.bootkube.etcd_peer_cert}"
|
|
||||||
destination = "$HOME/etcd-peer.crt"
|
|
||||||
}
|
|
||||||
|
|
||||||
provisioner "file" {
|
|
||||||
content = "${module.bootkube.etcd_peer_key}"
|
|
||||||
destination = "$HOME/etcd-peer.key"
|
|
||||||
}
|
|
||||||
|
|
||||||
provisioner "remote-exec" {
|
|
||||||
inline = [
|
|
||||||
"sudo mkdir -p /etc/ssl/etcd/etcd",
|
|
||||||
"sudo mv etcd-client* /etc/ssl/etcd/",
|
|
||||||
"sudo cp /etc/ssl/etcd/etcd-client-ca.crt /etc/ssl/etcd/etcd/server-ca.crt",
|
|
||||||
"sudo mv etcd-server.crt /etc/ssl/etcd/etcd/server.crt",
|
|
||||||
"sudo mv etcd-server.key /etc/ssl/etcd/etcd/server.key",
|
|
||||||
"sudo cp /etc/ssl/etcd/etcd-client-ca.crt /etc/ssl/etcd/etcd/peer-ca.crt",
|
|
||||||
"sudo mv etcd-peer.crt /etc/ssl/etcd/etcd/peer.crt",
|
|
||||||
"sudo mv etcd-peer.key /etc/ssl/etcd/etcd/peer.key",
|
|
||||||
"sudo mv $HOME/kubeconfig /etc/kubernetes/kubeconfig",
|
|
||||||
]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
# Secure copy kubeconfig to all workers. Activates kubelet.service.
|
|
||||||
resource "null_resource" "copy-worker-secrets" {
|
|
||||||
count = "${var.worker_count}"
|
|
||||||
|
|
||||||
connection {
|
|
||||||
type = "ssh"
|
|
||||||
host = "${element(concat(digitalocean_droplet.workers.*.ipv4_address), count.index)}"
|
|
||||||
user = "fedora"
|
|
||||||
timeout = "15m"
|
|
||||||
}
|
|
||||||
|
|
||||||
provisioner "file" {
|
|
||||||
content = "${module.bootkube.kubeconfig-kubelet}"
|
|
||||||
destination = "$HOME/kubeconfig"
|
|
||||||
}
|
|
||||||
|
|
||||||
provisioner "remote-exec" {
|
|
||||||
inline = [
|
|
||||||
"sudo mv $HOME/kubeconfig /etc/kubernetes/kubeconfig",
|
|
||||||
]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
# Secure copy bootkube assets to ONE controller and start bootkube to perform
|
|
||||||
# one-time self-hosted cluster bootstrapping.
|
|
||||||
resource "null_resource" "bootkube-start" {
|
|
||||||
depends_on = [
|
|
||||||
"null_resource.copy-controller-secrets",
|
|
||||||
"null_resource.copy-worker-secrets",
|
|
||||||
]
|
|
||||||
|
|
||||||
connection {
|
|
||||||
type = "ssh"
|
|
||||||
host = "${digitalocean_droplet.controllers.0.ipv4_address}"
|
|
||||||
user = "fedora"
|
|
||||||
timeout = "15m"
|
|
||||||
}
|
|
||||||
|
|
||||||
provisioner "file" {
|
|
||||||
source = "${var.asset_dir}"
|
|
||||||
destination = "$HOME/assets"
|
|
||||||
}
|
|
||||||
|
|
||||||
provisioner "remote-exec" {
|
|
||||||
inline = [
|
|
||||||
"while [ ! -f /var/lib/cloud/instance/boot-finished ]; do sleep 4; done",
|
|
||||||
"sudo mv $HOME/assets /var/lib/bootkube",
|
|
||||||
"sudo systemctl start bootkube",
|
|
||||||
]
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,93 +0,0 @@
|
||||||
variable "cluster_name" {
|
|
||||||
type = "string"
|
|
||||||
description = "Unique cluster name (prepended to dns_zone)"
|
|
||||||
}
|
|
||||||
|
|
||||||
# Digital Ocean
|
|
||||||
|
|
||||||
variable "region" {
|
|
||||||
type = "string"
|
|
||||||
description = "Digital Ocean region (e.g. nyc1, sfo2, fra1, tor1)"
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "dns_zone" {
|
|
||||||
type = "string"
|
|
||||||
description = "Digital Ocean domain (i.e. DNS zone) (e.g. do.example.com)"
|
|
||||||
}
|
|
||||||
|
|
||||||
# instances
|
|
||||||
|
|
||||||
variable "controller_count" {
|
|
||||||
type = "string"
|
|
||||||
default = "1"
|
|
||||||
description = "Number of controllers (i.e. masters)"
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "worker_count" {
|
|
||||||
type = "string"
|
|
||||||
default = "1"
|
|
||||||
description = "Number of workers"
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "controller_type" {
|
|
||||||
type = "string"
|
|
||||||
default = "s-2vcpu-2gb"
|
|
||||||
description = "Droplet type for controllers (e.g. s-2vcpu-2gb, s-2vcpu-4gb, s-4vcpu-8gb)"
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "worker_type" {
|
|
||||||
type = "string"
|
|
||||||
default = "s-1vcpu-1gb"
|
|
||||||
description = "Droplet type for workers (e.g. s-1vcpu-1gb, s-1vcpu-2gb, s-2vcpu-2gb)"
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "image" {
|
|
||||||
type = "string"
|
|
||||||
default = "fedora-28-x64-atomic"
|
|
||||||
description = "OS image from which to initialize the disk (e.g. fedora-28-x64-atomic)"
|
|
||||||
}
|
|
||||||
|
|
||||||
# configuration
|
|
||||||
|
|
||||||
variable "ssh_authorized_key" {
|
|
||||||
type = "string"
|
|
||||||
description = "SSH public key for user 'fedora'"
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "ssh_fingerprints" {
|
|
||||||
type = "list"
|
|
||||||
description = "SSH public key fingerprints. (e.g. see `ssh-add -l -E md5`)"
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "asset_dir" {
|
|
||||||
description = "Path to a directory where generated assets should be placed (contains secrets)"
|
|
||||||
type = "string"
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "pod_cidr" {
|
|
||||||
description = "CIDR IPv4 range to assign Kubernetes pods"
|
|
||||||
type = "string"
|
|
||||||
default = "10.2.0.0/16"
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "service_cidr" {
|
|
||||||
description = <<EOD
|
|
||||||
CIDR IPv4 range to assign Kubernetes services.
|
|
||||||
The 1st IP will be reserved for kube_apiserver, the 10th IP will be reserved for coredns.
|
|
||||||
EOD
|
|
||||||
|
|
||||||
type = "string"
|
|
||||||
default = "10.3.0.0/16"
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "cluster_domain_suffix" {
|
|
||||||
description = "Queries for domains with the suffix will be answered by coredns. Default is cluster.local (e.g. foo.default.svc.cluster.local) "
|
|
||||||
type = "string"
|
|
||||||
default = "cluster.local"
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "enable_reporting" {
|
|
||||||
type = "string"
|
|
||||||
description = "Enable usage or analytics reporting to upstreams (Calico)"
|
|
||||||
default = "false"
|
|
||||||
}
|
|
|
@ -1,66 +0,0 @@
|
||||||
# Worker DNS records
|
|
||||||
resource "digitalocean_record" "workers-record-a" {
|
|
||||||
count = "${var.worker_count}"
|
|
||||||
|
|
||||||
# DNS zone where record should be created
|
|
||||||
domain = "${var.dns_zone}"
|
|
||||||
|
|
||||||
name = "${var.cluster_name}-workers"
|
|
||||||
type = "A"
|
|
||||||
ttl = 300
|
|
||||||
value = "${element(digitalocean_droplet.workers.*.ipv4_address, count.index)}"
|
|
||||||
}
|
|
||||||
|
|
||||||
resource "digitalocean_record" "workers-record-aaaa" {
|
|
||||||
count = "${var.worker_count}"
|
|
||||||
|
|
||||||
# DNS zone where record should be created
|
|
||||||
domain = "${var.dns_zone}"
|
|
||||||
|
|
||||||
name = "${var.cluster_name}-workers"
|
|
||||||
type = "AAAA"
|
|
||||||
ttl = 300
|
|
||||||
value = "${element(digitalocean_droplet.workers.*.ipv6_address, count.index)}"
|
|
||||||
}
|
|
||||||
|
|
||||||
# Worker droplet instances
|
|
||||||
resource "digitalocean_droplet" "workers" {
|
|
||||||
count = "${var.worker_count}"
|
|
||||||
|
|
||||||
name = "${var.cluster_name}-worker-${count.index}"
|
|
||||||
region = "${var.region}"
|
|
||||||
|
|
||||||
image = "${var.image}"
|
|
||||||
size = "${var.worker_type}"
|
|
||||||
|
|
||||||
# network
|
|
||||||
ipv6 = true
|
|
||||||
private_networking = true
|
|
||||||
|
|
||||||
user_data = "${data.template_file.worker-cloudinit.rendered}"
|
|
||||||
ssh_keys = ["${var.ssh_fingerprints}"]
|
|
||||||
|
|
||||||
tags = [
|
|
||||||
"${digitalocean_tag.workers.id}",
|
|
||||||
]
|
|
||||||
|
|
||||||
lifecycle {
|
|
||||||
create_before_destroy = true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
# Tag to label workers
|
|
||||||
resource "digitalocean_tag" "workers" {
|
|
||||||
name = "${var.cluster_name}-worker"
|
|
||||||
}
|
|
||||||
|
|
||||||
# Worker Cloud-Init
|
|
||||||
data "template_file" "worker-cloudinit" {
|
|
||||||
template = "${file("${path.module}/cloudinit/worker.yaml.tmpl")}"
|
|
||||||
|
|
||||||
vars = {
|
|
||||||
ssh_authorized_key = "${var.ssh_authorized_key}"
|
|
||||||
cluster_dns_service_ip = "${cidrhost(var.service_cidr, 10)}"
|
|
||||||
cluster_domain_suffix = "${var.cluster_domain_suffix}"
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,23 +0,0 @@
|
||||||
The MIT License (MIT)
|
|
||||||
|
|
||||||
Copyright (c) 2017 Typhoon Authors
|
|
||||||
Copyright (c) 2017 Dalton Hubble
|
|
||||||
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
||||||
of this software and associated documentation files (the "Software"), to deal
|
|
||||||
in the Software without restriction, including without limitation the rights
|
|
||||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
||||||
copies of the Software, and to permit persons to whom the Software is
|
|
||||||
furnished to do so, subject to the following conditions:
|
|
||||||
|
|
||||||
The above copyright notice and this permission notice shall be included in
|
|
||||||
all copies or substantial portions of the Software.
|
|
||||||
|
|
||||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
||||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
||||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
||||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
||||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
||||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
|
||||||
THE SOFTWARE.
|
|
||||||
|
|
|
@ -1,23 +0,0 @@
|
||||||
# Typhoon <img align="right" src="https://storage.googleapis.com/poseidon/typhoon-logo.png">
|
|
||||||
|
|
||||||
Typhoon is a minimal and free Kubernetes distribution.
|
|
||||||
|
|
||||||
* Minimal, stable base Kubernetes distribution
|
|
||||||
* Declarative infrastructure and configuration
|
|
||||||
* Free (freedom and cost) and privacy-respecting
|
|
||||||
* Practical for labs, datacenters, and clouds
|
|
||||||
|
|
||||||
Typhoon distributes upstream Kubernetes, architectural conventions, and cluster addons, much like a GNU/Linux distribution provides the Linux kernel and userspace components.
|
|
||||||
|
|
||||||
## Features <a href="https://www.cncf.io/certification/software-conformance/"><img align="right" src="https://storage.googleapis.com/poseidon/certified-kubernetes.png"></a>
|
|
||||||
|
|
||||||
* Kubernetes v1.15.0 (upstream, via [kubernetes-incubator/bootkube](https://github.com/kubernetes-incubator/bootkube))
|
|
||||||
* Single or multi-master, [Calico](https://www.projectcalico.org/) or [flannel](https://github.com/coreos/flannel) networking
|
|
||||||
* On-cluster etcd with TLS, [RBAC](https://kubernetes.io/docs/admin/authorization/rbac/)-enabled, [network policy](https://kubernetes.io/docs/concepts/services-networking/network-policies/)
|
|
||||||
* Advanced features like [worker pools](https://typhoon.psdn.io/advanced/worker-pools/) and [preemptible](https://typhoon.psdn.io/cl/google-cloud/#preemption) workers
|
|
||||||
* Ready for Ingress, Prometheus, Grafana, and other optional [addons](https://typhoon.psdn.io/addons/overview/)
|
|
||||||
|
|
||||||
## Docs
|
|
||||||
|
|
||||||
Please see the [official docs](https://typhoon.psdn.io) and the Google Cloud [tutorial](https://typhoon.psdn.io/cl/google-cloud/).
|
|
||||||
|
|
|
@ -1,97 +0,0 @@
|
||||||
# TCP Proxy load balancer DNS record
|
|
||||||
resource "google_dns_record_set" "apiserver" {
|
|
||||||
# DNS Zone name where record should be created
|
|
||||||
managed_zone = "${var.dns_zone_name}"
|
|
||||||
|
|
||||||
# DNS record
|
|
||||||
name = "${format("%s.%s.", var.cluster_name, var.dns_zone)}"
|
|
||||||
type = "A"
|
|
||||||
ttl = 300
|
|
||||||
|
|
||||||
# IPv4 address of apiserver TCP Proxy load balancer
|
|
||||||
rrdatas = ["${google_compute_global_address.apiserver-ipv4.address}"]
|
|
||||||
}
|
|
||||||
|
|
||||||
# Static IPv4 address for the TCP Proxy Load Balancer
|
|
||||||
resource "google_compute_global_address" "apiserver-ipv4" {
|
|
||||||
name = "${var.cluster_name}-apiserver-ip"
|
|
||||||
ip_version = "IPV4"
|
|
||||||
}
|
|
||||||
|
|
||||||
# Forward IPv4 TCP traffic to the TCP proxy load balancer
|
|
||||||
resource "google_compute_global_forwarding_rule" "apiserver" {
|
|
||||||
name = "${var.cluster_name}-apiserver"
|
|
||||||
ip_address = "${google_compute_global_address.apiserver-ipv4.address}"
|
|
||||||
ip_protocol = "TCP"
|
|
||||||
port_range = "443"
|
|
||||||
target = "${google_compute_target_tcp_proxy.apiserver.self_link}"
|
|
||||||
}
|
|
||||||
|
|
||||||
# TCP Proxy Load Balancer for apiservers
|
|
||||||
resource "google_compute_target_tcp_proxy" "apiserver" {
|
|
||||||
name = "${var.cluster_name}-apiserver"
|
|
||||||
description = "Distribute TCP load across ${var.cluster_name} controllers"
|
|
||||||
backend_service = "${google_compute_backend_service.apiserver.self_link}"
|
|
||||||
}
|
|
||||||
|
|
||||||
# Backend service backed by unmanaged instance groups
|
|
||||||
resource "google_compute_backend_service" "apiserver" {
|
|
||||||
name = "${var.cluster_name}-apiserver"
|
|
||||||
description = "${var.cluster_name} apiserver service"
|
|
||||||
|
|
||||||
protocol = "TCP"
|
|
||||||
port_name = "apiserver"
|
|
||||||
session_affinity = "NONE"
|
|
||||||
timeout_sec = "300"
|
|
||||||
|
|
||||||
# controller(s) spread across zonal instance groups
|
|
||||||
backend {
|
|
||||||
group = "${google_compute_instance_group.controllers.0.self_link}"
|
|
||||||
}
|
|
||||||
|
|
||||||
backend {
|
|
||||||
group = "${google_compute_instance_group.controllers.1.self_link}"
|
|
||||||
}
|
|
||||||
|
|
||||||
backend {
|
|
||||||
group = "${google_compute_instance_group.controllers.2.self_link}"
|
|
||||||
}
|
|
||||||
|
|
||||||
health_checks = ["${google_compute_health_check.apiserver.self_link}"]
|
|
||||||
}
|
|
||||||
|
|
||||||
# Instance group of heterogeneous (unmanged) controller instances
|
|
||||||
resource "google_compute_instance_group" "controllers" {
|
|
||||||
count = "${length(local.zones)}"
|
|
||||||
|
|
||||||
name = "${format("%s-controllers-%s", var.cluster_name, element(local.zones, count.index))}"
|
|
||||||
zone = "${element(local.zones, count.index)}"
|
|
||||||
|
|
||||||
named_port {
|
|
||||||
name = "apiserver"
|
|
||||||
port = "443"
|
|
||||||
}
|
|
||||||
|
|
||||||
# add instances in the zone into the instance group
|
|
||||||
instances = [
|
|
||||||
"${matchkeys(google_compute_instance.controllers.*.self_link,
|
|
||||||
google_compute_instance.controllers.*.zone,
|
|
||||||
list(element(local.zones, count.index)))}",
|
|
||||||
]
|
|
||||||
}
|
|
||||||
|
|
||||||
# TCP health check for apiserver
|
|
||||||
resource "google_compute_health_check" "apiserver" {
|
|
||||||
name = "${var.cluster_name}-apiserver-tcp-health"
|
|
||||||
description = "TCP health check for kube-apiserver"
|
|
||||||
|
|
||||||
timeout_sec = 5
|
|
||||||
check_interval_sec = 5
|
|
||||||
|
|
||||||
healthy_threshold = 1
|
|
||||||
unhealthy_threshold = 3
|
|
||||||
|
|
||||||
tcp_health_check {
|
|
||||||
port = "443"
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,21 +0,0 @@
|
||||||
# Self-hosted Kubernetes assets (kubeconfig, manifests)
|
|
||||||
module "bootkube" {
|
|
||||||
source = "git::https://github.com/poseidon/terraform-render-bootkube.git?ref=62df9ad69cc0da35f47d40fa981370c4503ad581"
|
|
||||||
|
|
||||||
cluster_name = "${var.cluster_name}"
|
|
||||||
api_servers = ["${format("%s.%s", var.cluster_name, var.dns_zone)}"]
|
|
||||||
etcd_servers = ["${google_dns_record_set.etcds.*.name}"]
|
|
||||||
asset_dir = "${var.asset_dir}"
|
|
||||||
networking = "${var.networking}"
|
|
||||||
network_mtu = 1440
|
|
||||||
pod_cidr = "${var.pod_cidr}"
|
|
||||||
service_cidr = "${var.service_cidr}"
|
|
||||||
cluster_domain_suffix = "${var.cluster_domain_suffix}"
|
|
||||||
enable_reporting = "${var.enable_reporting}"
|
|
||||||
|
|
||||||
# Fedora
|
|
||||||
trusted_certs_dir = "/etc/pki/tls/certs"
|
|
||||||
|
|
||||||
// temporary
|
|
||||||
apiserver_port = 443
|
|
||||||
}
|
|
|
@ -1,93 +0,0 @@
|
||||||
#cloud-config
|
|
||||||
write_files:
|
|
||||||
- path: /etc/etcd/etcd.conf
|
|
||||||
content: |
|
|
||||||
ETCD_NAME=${etcd_name}
|
|
||||||
ETCD_DATA_DIR=/var/lib/etcd
|
|
||||||
ETCD_ADVERTISE_CLIENT_URLS=https://${etcd_domain}:2379
|
|
||||||
ETCD_INITIAL_ADVERTISE_PEER_URLS=https://${etcd_domain}:2380
|
|
||||||
ETCD_LISTEN_CLIENT_URLS=https://0.0.0.0:2379
|
|
||||||
ETCD_LISTEN_PEER_URLS=https://0.0.0.0:2380
|
|
||||||
ETCD_LISTEN_METRICS_URLS=http://0.0.0.0:2381
|
|
||||||
ETCD_INITIAL_CLUSTER=${etcd_initial_cluster}
|
|
||||||
ETCD_STRICT_RECONFIG_CHECK=true
|
|
||||||
ETCD_TRUSTED_CA_FILE=/etc/ssl/certs/etcd/server-ca.crt
|
|
||||||
ETCD_CERT_FILE=/etc/ssl/certs/etcd/server.crt
|
|
||||||
ETCD_KEY_FILE=/etc/ssl/certs/etcd/server.key
|
|
||||||
ETCD_CLIENT_CERT_AUTH=true
|
|
||||||
ETCD_PEER_TRUSTED_CA_FILE=/etc/ssl/certs/etcd/peer-ca.crt
|
|
||||||
ETCD_PEER_CERT_FILE=/etc/ssl/certs/etcd/peer.crt
|
|
||||||
ETCD_PEER_KEY_FILE=/etc/ssl/certs/etcd/peer.key
|
|
||||||
ETCD_PEER_CLIENT_CERT_AUTH=true
|
|
||||||
- path: /etc/systemd/system/kubelet.service.d/10-typhoon.conf
|
|
||||||
content: |
|
|
||||||
[Unit]
|
|
||||||
Wants=rpc-statd.service
|
|
||||||
[Service]
|
|
||||||
ExecStartPre=/bin/mkdir -p /opt/cni/bin
|
|
||||||
ExecStartPre=/bin/mkdir -p /etc/kubernetes/manifests
|
|
||||||
ExecStartPre=/bin/mkdir -p /etc/kubernetes/cni/net.d
|
|
||||||
ExecStartPre=/bin/mkdir -p /etc/kubernetes/checkpoint-secrets
|
|
||||||
ExecStartPre=/bin/mkdir -p /etc/kubernetes/inactive-manifests
|
|
||||||
ExecStartPre=/bin/mkdir -p /var/lib/cni
|
|
||||||
ExecStartPre=/bin/mkdir -p /var/lib/kubelet/volumeplugins
|
|
||||||
ExecStartPre=/usr/bin/bash -c "grep 'certificate-authority-data' /etc/kubernetes/kubeconfig | awk '{print $2}' | base64 -d > /etc/kubernetes/ca.crt"
|
|
||||||
Restart=always
|
|
||||||
RestartSec=10
|
|
||||||
- path: /etc/kubernetes/kubelet.conf
|
|
||||||
content: |
|
|
||||||
ARGS="--anonymous-auth=false \
|
|
||||||
--authentication-token-webhook \
|
|
||||||
--authorization-mode=Webhook \
|
|
||||||
--client-ca-file=/etc/kubernetes/ca.crt \
|
|
||||||
--cluster_dns=${cluster_dns_service_ip} \
|
|
||||||
--cluster_domain=${cluster_domain_suffix} \
|
|
||||||
--cni-conf-dir=/etc/kubernetes/cni/net.d \
|
|
||||||
--exit-on-lock-contention \
|
|
||||||
--kubeconfig=/etc/kubernetes/kubeconfig \
|
|
||||||
--lock-file=/var/run/lock/kubelet.lock \
|
|
||||||
--network-plugin=cni \
|
|
||||||
--node-labels=node-role.kubernetes.io/master \
|
|
||||||
--node-labels=node-role.kubernetes.io/controller="true" \
|
|
||||||
--pod-manifest-path=/etc/kubernetes/manifests \
|
|
||||||
--read-only-port=0 \
|
|
||||||
--register-with-taints=node-role.kubernetes.io/master=:NoSchedule \
|
|
||||||
--volume-plugin-dir=/var/lib/kubelet/volumeplugins"
|
|
||||||
- path: /etc/kubernetes/kubeconfig
|
|
||||||
permissions: '0644'
|
|
||||||
content: |
|
|
||||||
${kubeconfig}
|
|
||||||
- path: /var/lib/bootkube/.keep
|
|
||||||
- path: /etc/NetworkManager/conf.d/typhoon.conf
|
|
||||||
content: |
|
|
||||||
[main]
|
|
||||||
plugins=keyfile
|
|
||||||
[keyfile]
|
|
||||||
unmanaged-devices=interface-name:cali*;interface-name:tunl*
|
|
||||||
- path: /etc/selinux/config
|
|
||||||
owner: root:root
|
|
||||||
permissions: '0644'
|
|
||||||
content: |
|
|
||||||
SELINUX=permissive
|
|
||||||
SELINUXTYPE=targeted
|
|
||||||
bootcmd:
|
|
||||||
- [setenforce, Permissive]
|
|
||||||
- [systemctl, disable, firewalld, --now]
|
|
||||||
# https://github.com/kubernetes/kubernetes/issues/60869
|
|
||||||
- [modprobe, ip_vs]
|
|
||||||
runcmd:
|
|
||||||
- [systemctl, daemon-reload]
|
|
||||||
- [systemctl, restart, NetworkManager]
|
|
||||||
- "atomic install --system --name=etcd quay.io/poseidon/etcd:v3.3.12"
|
|
||||||
- "atomic install --system --name=kubelet quay.io/poseidon/kubelet:v1.14.1"
|
|
||||||
- "atomic install --system --name=bootkube quay.io/poseidon/bootkube:v0.14.0"
|
|
||||||
- [systemctl, start, --no-block, etcd.service]
|
|
||||||
- [systemctl, start, --no-block, kubelet.service]
|
|
||||||
users:
|
|
||||||
- default
|
|
||||||
- name: fedora
|
|
||||||
gecos: Fedora Admin
|
|
||||||
sudo: ALL=(ALL) NOPASSWD:ALL
|
|
||||||
groups: wheel,adm,systemd-journal,docker
|
|
||||||
ssh-authorized-keys:
|
|
||||||
- "${ssh_authorized_key}"
|
|
|
@ -1,98 +0,0 @@
|
||||||
# Discrete DNS records for each controller's private IPv4 for etcd usage
|
|
||||||
resource "google_dns_record_set" "etcds" {
|
|
||||||
count = "${var.controller_count}"
|
|
||||||
|
|
||||||
# DNS Zone name where record should be created
|
|
||||||
managed_zone = "${var.dns_zone_name}"
|
|
||||||
|
|
||||||
# DNS record
|
|
||||||
name = "${format("%s-etcd%d.%s.", var.cluster_name, count.index, var.dns_zone)}"
|
|
||||||
type = "A"
|
|
||||||
ttl = 300
|
|
||||||
|
|
||||||
# private IPv4 address for etcd
|
|
||||||
rrdatas = ["${element(google_compute_instance.controllers.*.network_interface.0.network_ip, count.index)}"]
|
|
||||||
}
|
|
||||||
|
|
||||||
# Zones in the region
|
|
||||||
data "google_compute_zones" "all" {
|
|
||||||
region = "${var.region}"
|
|
||||||
}
|
|
||||||
|
|
||||||
locals {
|
|
||||||
# TCP proxy load balancers require a fixed number of zonal backends. Spread
|
|
||||||
# controllers over up to 3 zones, since all GCP regions have at least 3.
|
|
||||||
zones = "${slice(data.google_compute_zones.all.names, 0, 3)}"
|
|
||||||
|
|
||||||
controllers_ipv4_public = ["${google_compute_instance.controllers.*.network_interface.0.access_config.0.nat_ip}"]
|
|
||||||
}
|
|
||||||
|
|
||||||
# Controller instances
|
|
||||||
resource "google_compute_instance" "controllers" {
|
|
||||||
count = "${var.controller_count}"
|
|
||||||
|
|
||||||
name = "${var.cluster_name}-controller-${count.index}"
|
|
||||||
zone = "${element(local.zones, count.index)}"
|
|
||||||
machine_type = "${var.controller_type}"
|
|
||||||
|
|
||||||
metadata = {
|
|
||||||
user-data = "${element(data.template_file.controller-cloudinit.*.rendered, count.index)}"
|
|
||||||
}
|
|
||||||
|
|
||||||
boot_disk {
|
|
||||||
auto_delete = true
|
|
||||||
|
|
||||||
initialize_params {
|
|
||||||
image = "${var.os_image}"
|
|
||||||
size = "${var.disk_size}"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
network_interface {
|
|
||||||
network = "${google_compute_network.network.name}"
|
|
||||||
|
|
||||||
# Ephemeral external IP
|
|
||||||
access_config = {}
|
|
||||||
}
|
|
||||||
|
|
||||||
can_ip_forward = true
|
|
||||||
tags = ["${var.cluster_name}-controller"]
|
|
||||||
|
|
||||||
lifecycle {
|
|
||||||
ignore_changes = [
|
|
||||||
"metadata",
|
|
||||||
]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
# Controller Cloud-Init
|
|
||||||
data "template_file" "controller-cloudinit" {
|
|
||||||
count = "${var.controller_count}"
|
|
||||||
|
|
||||||
template = "${file("${path.module}/cloudinit/controller.yaml.tmpl")}"
|
|
||||||
|
|
||||||
vars = {
|
|
||||||
# Cannot use cyclic dependencies on controllers or their DNS records
|
|
||||||
etcd_name = "etcd${count.index}"
|
|
||||||
etcd_domain = "${var.cluster_name}-etcd${count.index}.${var.dns_zone}"
|
|
||||||
|
|
||||||
# etcd0=https://cluster-etcd0.example.com,etcd1=https://cluster-etcd1.example.com,...
|
|
||||||
etcd_initial_cluster = "${join(",", data.template_file.etcds.*.rendered)}"
|
|
||||||
|
|
||||||
kubeconfig = "${indent(6, module.bootkube.kubeconfig-kubelet)}"
|
|
||||||
ssh_authorized_key = "${var.ssh_authorized_key}"
|
|
||||||
cluster_dns_service_ip = "${cidrhost(var.service_cidr, 10)}"
|
|
||||||
cluster_domain_suffix = "${var.cluster_domain_suffix}"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
data "template_file" "etcds" {
|
|
||||||
count = "${var.controller_count}"
|
|
||||||
template = "etcd$${index}=https://$${cluster_name}-etcd$${index}.$${dns_zone}:2380"
|
|
||||||
|
|
||||||
vars = {
|
|
||||||
index = "${count.index}"
|
|
||||||
cluster_name = "${var.cluster_name}"
|
|
||||||
dns_zone = "${var.dns_zone}"
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,122 +0,0 @@
|
||||||
# Static IPv4 address for Ingress Load Balancing
|
|
||||||
resource "google_compute_global_address" "ingress-ipv4" {
|
|
||||||
name = "${var.cluster_name}-ingress-ipv4"
|
|
||||||
ip_version = "IPV4"
|
|
||||||
}
|
|
||||||
|
|
||||||
# Static IPv6 address for Ingress Load Balancing
|
|
||||||
resource "google_compute_global_address" "ingress-ipv6" {
|
|
||||||
name = "${var.cluster_name}-ingress-ipv6"
|
|
||||||
ip_version = "IPV6"
|
|
||||||
}
|
|
||||||
|
|
||||||
# Forward IPv4 TCP traffic to the HTTP proxy load balancer
|
|
||||||
# Google Cloud does not allow TCP proxies for port 80. Must use HTTP proxy.
|
|
||||||
resource "google_compute_global_forwarding_rule" "ingress-http-ipv4" {
|
|
||||||
name = "${var.cluster_name}-ingress-http-ipv4"
|
|
||||||
ip_address = "${google_compute_global_address.ingress-ipv4.address}"
|
|
||||||
ip_protocol = "TCP"
|
|
||||||
port_range = "80"
|
|
||||||
target = "${google_compute_target_http_proxy.ingress-http.self_link}"
|
|
||||||
}
|
|
||||||
|
|
||||||
# Forward IPv4 TCP traffic to the TCP proxy load balancer
|
|
||||||
resource "google_compute_global_forwarding_rule" "ingress-https-ipv4" {
|
|
||||||
name = "${var.cluster_name}-ingress-https-ipv4"
|
|
||||||
ip_address = "${google_compute_global_address.ingress-ipv4.address}"
|
|
||||||
ip_protocol = "TCP"
|
|
||||||
port_range = "443"
|
|
||||||
target = "${google_compute_target_tcp_proxy.ingress-https.self_link}"
|
|
||||||
}
|
|
||||||
|
|
||||||
# Forward IPv6 TCP traffic to the HTTP proxy load balancer
|
|
||||||
# Google Cloud does not allow TCP proxies for port 80. Must use HTTP proxy.
|
|
||||||
resource "google_compute_global_forwarding_rule" "ingress-http-ipv6" {
|
|
||||||
name = "${var.cluster_name}-ingress-http-ipv6"
|
|
||||||
ip_address = "${google_compute_global_address.ingress-ipv6.address}"
|
|
||||||
ip_protocol = "TCP"
|
|
||||||
port_range = "80"
|
|
||||||
target = "${google_compute_target_http_proxy.ingress-http.self_link}"
|
|
||||||
}
|
|
||||||
|
|
||||||
# Forward IPv6 TCP traffic to the TCP proxy load balancer
|
|
||||||
resource "google_compute_global_forwarding_rule" "ingress-https-ipv6" {
|
|
||||||
name = "${var.cluster_name}-ingress-https-ipv6"
|
|
||||||
ip_address = "${google_compute_global_address.ingress-ipv6.address}"
|
|
||||||
ip_protocol = "TCP"
|
|
||||||
port_range = "443"
|
|
||||||
target = "${google_compute_target_tcp_proxy.ingress-https.self_link}"
|
|
||||||
}
|
|
||||||
|
|
||||||
# HTTP proxy load balancer for ingress controllers
|
|
||||||
resource "google_compute_target_http_proxy" "ingress-http" {
|
|
||||||
name = "${var.cluster_name}-ingress-http"
|
|
||||||
description = "Distribute HTTP load across ${var.cluster_name} workers"
|
|
||||||
url_map = "${google_compute_url_map.ingress-http.self_link}"
|
|
||||||
}
|
|
||||||
|
|
||||||
# TCP proxy load balancer for ingress controllers
|
|
||||||
resource "google_compute_target_tcp_proxy" "ingress-https" {
|
|
||||||
name = "${var.cluster_name}-ingress-https"
|
|
||||||
description = "Distribute HTTPS load across ${var.cluster_name} workers"
|
|
||||||
backend_service = "${google_compute_backend_service.ingress-https.self_link}"
|
|
||||||
}
|
|
||||||
|
|
||||||
# HTTP URL Map (required)
|
|
||||||
resource "google_compute_url_map" "ingress-http" {
|
|
||||||
name = "${var.cluster_name}-ingress-http"
|
|
||||||
|
|
||||||
# Do not add host/path rules for applications here. Use Ingress resources.
|
|
||||||
default_service = "${google_compute_backend_service.ingress-http.self_link}"
|
|
||||||
}
|
|
||||||
|
|
||||||
# Backend service backed by managed instance group of workers
|
|
||||||
resource "google_compute_backend_service" "ingress-http" {
|
|
||||||
name = "${var.cluster_name}-ingress-http"
|
|
||||||
description = "${var.cluster_name} ingress service"
|
|
||||||
|
|
||||||
protocol = "HTTP"
|
|
||||||
port_name = "http"
|
|
||||||
session_affinity = "NONE"
|
|
||||||
timeout_sec = "60"
|
|
||||||
|
|
||||||
backend {
|
|
||||||
group = "${module.workers.instance_group}"
|
|
||||||
}
|
|
||||||
|
|
||||||
health_checks = ["${google_compute_health_check.ingress.self_link}"]
|
|
||||||
}
|
|
||||||
|
|
||||||
# Backend service backed by managed instance group of workers
|
|
||||||
resource "google_compute_backend_service" "ingress-https" {
|
|
||||||
name = "${var.cluster_name}-ingress-https"
|
|
||||||
description = "${var.cluster_name} ingress service"
|
|
||||||
|
|
||||||
protocol = "TCP"
|
|
||||||
port_name = "https"
|
|
||||||
session_affinity = "NONE"
|
|
||||||
timeout_sec = "60"
|
|
||||||
|
|
||||||
backend {
|
|
||||||
group = "${module.workers.instance_group}"
|
|
||||||
}
|
|
||||||
|
|
||||||
health_checks = ["${google_compute_health_check.ingress.self_link}"]
|
|
||||||
}
|
|
||||||
|
|
||||||
# Ingress HTTP Health Check
|
|
||||||
resource "google_compute_health_check" "ingress" {
|
|
||||||
name = "${var.cluster_name}-ingress-health"
|
|
||||||
description = "Health check for Ingress controller"
|
|
||||||
|
|
||||||
timeout_sec = 5
|
|
||||||
check_interval_sec = 5
|
|
||||||
|
|
||||||
healthy_threshold = 2
|
|
||||||
unhealthy_threshold = 4
|
|
||||||
|
|
||||||
http_health_check {
|
|
||||||
port = 10254
|
|
||||||
request_path = "/healthz"
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,153 +0,0 @@
|
||||||
resource "google_compute_network" "network" {
|
|
||||||
name = "${var.cluster_name}"
|
|
||||||
description = "Network for the ${var.cluster_name} cluster"
|
|
||||||
auto_create_subnetworks = true
|
|
||||||
}
|
|
||||||
|
|
||||||
resource "google_compute_firewall" "allow-ssh" {
|
|
||||||
name = "${var.cluster_name}-allow-ssh"
|
|
||||||
network = "${google_compute_network.network.name}"
|
|
||||||
|
|
||||||
allow {
|
|
||||||
protocol = "tcp"
|
|
||||||
ports = [22]
|
|
||||||
}
|
|
||||||
|
|
||||||
source_ranges = ["0.0.0.0/0"]
|
|
||||||
target_tags = ["${var.cluster_name}-controller", "${var.cluster_name}-worker"]
|
|
||||||
}
|
|
||||||
|
|
||||||
resource "google_compute_firewall" "internal-etcd" {
|
|
||||||
name = "${var.cluster_name}-internal-etcd"
|
|
||||||
network = "${google_compute_network.network.name}"
|
|
||||||
|
|
||||||
allow {
|
|
||||||
protocol = "tcp"
|
|
||||||
ports = [2379, 2380]
|
|
||||||
}
|
|
||||||
|
|
||||||
source_tags = ["${var.cluster_name}-controller"]
|
|
||||||
target_tags = ["${var.cluster_name}-controller"]
|
|
||||||
}
|
|
||||||
|
|
||||||
# Allow Prometheus to scrape etcd metrics
|
|
||||||
resource "google_compute_firewall" "internal-etcd-metrics" {
|
|
||||||
name = "${var.cluster_name}-internal-etcd-metrics"
|
|
||||||
network = "${google_compute_network.network.name}"
|
|
||||||
|
|
||||||
allow {
|
|
||||||
protocol = "tcp"
|
|
||||||
ports = [2381]
|
|
||||||
}
|
|
||||||
|
|
||||||
source_tags = ["${var.cluster_name}-worker"]
|
|
||||||
target_tags = ["${var.cluster_name}-controller"]
|
|
||||||
}
|
|
||||||
|
|
||||||
resource "google_compute_firewall" "allow-apiserver" {
|
|
||||||
name = "${var.cluster_name}-allow-apiserver"
|
|
||||||
network = "${google_compute_network.network.name}"
|
|
||||||
|
|
||||||
allow {
|
|
||||||
protocol = "tcp"
|
|
||||||
ports = [443]
|
|
||||||
}
|
|
||||||
|
|
||||||
source_ranges = ["0.0.0.0/0"]
|
|
||||||
target_tags = ["${var.cluster_name}-controller"]
|
|
||||||
}
|
|
||||||
|
|
||||||
# BGP and IPIP
|
|
||||||
# https://docs.projectcalico.org/latest/reference/public-cloud/gce
|
|
||||||
resource "google_compute_firewall" "internal-bgp" {
|
|
||||||
count = "${var.networking != "flannel" ? 1 : 0}"
|
|
||||||
|
|
||||||
name = "${var.cluster_name}-internal-bgp"
|
|
||||||
network = "${google_compute_network.network.name}"
|
|
||||||
|
|
||||||
allow {
|
|
||||||
protocol = "tcp"
|
|
||||||
ports = ["179"]
|
|
||||||
}
|
|
||||||
|
|
||||||
allow {
|
|
||||||
protocol = "ipip"
|
|
||||||
}
|
|
||||||
|
|
||||||
source_tags = ["${var.cluster_name}-controller", "${var.cluster_name}-worker"]
|
|
||||||
target_tags = ["${var.cluster_name}-controller", "${var.cluster_name}-worker"]
|
|
||||||
}
|
|
||||||
|
|
||||||
# flannel VXLAN
|
|
||||||
resource "google_compute_firewall" "internal-vxlan" {
|
|
||||||
count = "${var.networking == "flannel" ? 1 : 0}"
|
|
||||||
|
|
||||||
name = "${var.cluster_name}-internal-vxlan"
|
|
||||||
network = "${google_compute_network.network.name}"
|
|
||||||
|
|
||||||
allow {
|
|
||||||
protocol = "udp"
|
|
||||||
ports = [4789]
|
|
||||||
}
|
|
||||||
|
|
||||||
source_tags = ["${var.cluster_name}-controller", "${var.cluster_name}-worker"]
|
|
||||||
target_tags = ["${var.cluster_name}-controller", "${var.cluster_name}-worker"]
|
|
||||||
}
|
|
||||||
|
|
||||||
# Allow Prometheus to scrape node-exporter daemonset
|
|
||||||
resource "google_compute_firewall" "internal-node-exporter" {
|
|
||||||
name = "${var.cluster_name}-internal-node-exporter"
|
|
||||||
network = "${google_compute_network.network.name}"
|
|
||||||
|
|
||||||
allow {
|
|
||||||
protocol = "tcp"
|
|
||||||
ports = [9100]
|
|
||||||
}
|
|
||||||
|
|
||||||
source_tags = ["${var.cluster_name}-worker"]
|
|
||||||
target_tags = ["${var.cluster_name}-controller", "${var.cluster_name}-worker"]
|
|
||||||
}
|
|
||||||
|
|
||||||
# Allow apiserver to access kubelets for exec, log, port-forward
|
|
||||||
resource "google_compute_firewall" "internal-kubelet" {
|
|
||||||
name = "${var.cluster_name}-internal-kubelet"
|
|
||||||
network = "${google_compute_network.network.name}"
|
|
||||||
|
|
||||||
allow {
|
|
||||||
protocol = "tcp"
|
|
||||||
ports = [10250]
|
|
||||||
}
|
|
||||||
|
|
||||||
# allow Prometheus to scrape kubelet metrics too
|
|
||||||
source_tags = ["${var.cluster_name}-controller", "${var.cluster_name}-worker"]
|
|
||||||
target_tags = ["${var.cluster_name}-controller", "${var.cluster_name}-worker"]
|
|
||||||
}
|
|
||||||
|
|
||||||
# Workers
|
|
||||||
|
|
||||||
resource "google_compute_firewall" "allow-ingress" {
|
|
||||||
name = "${var.cluster_name}-allow-ingress"
|
|
||||||
network = "${google_compute_network.network.name}"
|
|
||||||
|
|
||||||
allow {
|
|
||||||
protocol = "tcp"
|
|
||||||
ports = [80, 443]
|
|
||||||
}
|
|
||||||
|
|
||||||
source_ranges = ["0.0.0.0/0"]
|
|
||||||
target_tags = ["${var.cluster_name}-worker"]
|
|
||||||
}
|
|
||||||
|
|
||||||
resource "google_compute_firewall" "google-health-checks" {
|
|
||||||
name = "${var.cluster_name}-google-health-checks"
|
|
||||||
network = "${google_compute_network.network.name}"
|
|
||||||
|
|
||||||
allow {
|
|
||||||
protocol = "tcp"
|
|
||||||
ports = [10254]
|
|
||||||
}
|
|
||||||
|
|
||||||
# https://cloud.google.com/compute/docs/load-balancing/tcp-ssl/tcp-proxy#health-checking
|
|
||||||
source_ranges = ["130.211.0.0/22", "35.191.0.0/16"]
|
|
||||||
target_tags = ["${var.cluster_name}-worker"]
|
|
||||||
}
|
|
|
@ -1,38 +0,0 @@
|
||||||
output "kubeconfig-admin" {
|
|
||||||
value = "${module.bootkube.kubeconfig-admin}"
|
|
||||||
}
|
|
||||||
|
|
||||||
# Outputs for Kubernetes Ingress
|
|
||||||
|
|
||||||
output "ingress_static_ipv4" {
|
|
||||||
description = "Global IPv4 address for proxy load balancing to the nearest Ingress controller"
|
|
||||||
value = "${google_compute_global_address.ingress-ipv4.address}"
|
|
||||||
}
|
|
||||||
|
|
||||||
output "ingress_static_ipv6" {
|
|
||||||
description = "Global IPv6 address for proxy load balancing to the nearest Ingress controller"
|
|
||||||
value = "${google_compute_global_address.ingress-ipv6.address}"
|
|
||||||
}
|
|
||||||
|
|
||||||
# Outputs for worker pools
|
|
||||||
|
|
||||||
output "network_name" {
|
|
||||||
value = "${google_compute_network.network.name}"
|
|
||||||
}
|
|
||||||
|
|
||||||
output "kubeconfig" {
|
|
||||||
value = "${module.bootkube.kubeconfig-kubelet}"
|
|
||||||
}
|
|
||||||
|
|
||||||
# Outputs for custom firewalling
|
|
||||||
|
|
||||||
output "network_self_link" {
|
|
||||||
value = "${google_compute_network.network.self_link}"
|
|
||||||
}
|
|
||||||
|
|
||||||
# Outputs for custom load balancing
|
|
||||||
|
|
||||||
output "worker_instance_group" {
|
|
||||||
description = "Full URL of the worker managed instance group"
|
|
||||||
value = "${module.workers.instance_group}"
|
|
||||||
}
|
|
|
@ -1,25 +0,0 @@
|
||||||
# Terraform version and plugin versions
|
|
||||||
|
|
||||||
terraform {
|
|
||||||
required_version = ">= 0.11.0"
|
|
||||||
}
|
|
||||||
|
|
||||||
provider "google" {
|
|
||||||
version = ">= 1.19, < 3.0"
|
|
||||||
}
|
|
||||||
|
|
||||||
provider "local" {
|
|
||||||
version = "~> 1.0"
|
|
||||||
}
|
|
||||||
|
|
||||||
provider "null" {
|
|
||||||
version = "~> 1.0"
|
|
||||||
}
|
|
||||||
|
|
||||||
provider "template" {
|
|
||||||
version = "~> 1.0"
|
|
||||||
}
|
|
||||||
|
|
||||||
provider "tls" {
|
|
||||||
version = "~> 1.0"
|
|
||||||
}
|
|
|
@ -1,89 +0,0 @@
|
||||||
# Secure copy etcd TLS assets to controllers.
|
|
||||||
resource "null_resource" "copy-controller-secrets" {
|
|
||||||
count = "${var.controller_count}"
|
|
||||||
|
|
||||||
connection {
|
|
||||||
type = "ssh"
|
|
||||||
host = "${element(local.controllers_ipv4_public, count.index)}"
|
|
||||||
user = "fedora"
|
|
||||||
timeout = "15m"
|
|
||||||
}
|
|
||||||
|
|
||||||
provisioner "file" {
|
|
||||||
content = "${module.bootkube.etcd_ca_cert}"
|
|
||||||
destination = "$HOME/etcd-client-ca.crt"
|
|
||||||
}
|
|
||||||
|
|
||||||
provisioner "file" {
|
|
||||||
content = "${module.bootkube.etcd_client_cert}"
|
|
||||||
destination = "$HOME/etcd-client.crt"
|
|
||||||
}
|
|
||||||
|
|
||||||
provisioner "file" {
|
|
||||||
content = "${module.bootkube.etcd_client_key}"
|
|
||||||
destination = "$HOME/etcd-client.key"
|
|
||||||
}
|
|
||||||
|
|
||||||
provisioner "file" {
|
|
||||||
content = "${module.bootkube.etcd_server_cert}"
|
|
||||||
destination = "$HOME/etcd-server.crt"
|
|
||||||
}
|
|
||||||
|
|
||||||
provisioner "file" {
|
|
||||||
content = "${module.bootkube.etcd_server_key}"
|
|
||||||
destination = "$HOME/etcd-server.key"
|
|
||||||
}
|
|
||||||
|
|
||||||
provisioner "file" {
|
|
||||||
content = "${module.bootkube.etcd_peer_cert}"
|
|
||||||
destination = "$HOME/etcd-peer.crt"
|
|
||||||
}
|
|
||||||
|
|
||||||
provisioner "file" {
|
|
||||||
content = "${module.bootkube.etcd_peer_key}"
|
|
||||||
destination = "$HOME/etcd-peer.key"
|
|
||||||
}
|
|
||||||
|
|
||||||
provisioner "remote-exec" {
|
|
||||||
inline = [
|
|
||||||
"sudo mkdir -p /etc/ssl/etcd/etcd",
|
|
||||||
"sudo mv etcd-client* /etc/ssl/etcd/",
|
|
||||||
"sudo cp /etc/ssl/etcd/etcd-client-ca.crt /etc/ssl/etcd/etcd/server-ca.crt",
|
|
||||||
"sudo mv etcd-server.crt /etc/ssl/etcd/etcd/server.crt",
|
|
||||||
"sudo mv etcd-server.key /etc/ssl/etcd/etcd/server.key",
|
|
||||||
"sudo cp /etc/ssl/etcd/etcd-client-ca.crt /etc/ssl/etcd/etcd/peer-ca.crt",
|
|
||||||
"sudo mv etcd-peer.crt /etc/ssl/etcd/etcd/peer.crt",
|
|
||||||
"sudo mv etcd-peer.key /etc/ssl/etcd/etcd/peer.key",
|
|
||||||
]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
# Secure copy bootkube assets to ONE controller and start bootkube to perform
|
|
||||||
# one-time self-hosted cluster bootstrapping.
|
|
||||||
resource "null_resource" "bootkube-start" {
|
|
||||||
depends_on = [
|
|
||||||
"null_resource.copy-controller-secrets",
|
|
||||||
"module.workers",
|
|
||||||
"google_dns_record_set.apiserver",
|
|
||||||
]
|
|
||||||
|
|
||||||
connection {
|
|
||||||
type = "ssh"
|
|
||||||
host = "${element(local.controllers_ipv4_public, 0)}"
|
|
||||||
user = "fedora"
|
|
||||||
timeout = "15m"
|
|
||||||
}
|
|
||||||
|
|
||||||
provisioner "file" {
|
|
||||||
source = "${var.asset_dir}"
|
|
||||||
destination = "$HOME/assets"
|
|
||||||
}
|
|
||||||
|
|
||||||
provisioner "remote-exec" {
|
|
||||||
inline = [
|
|
||||||
"while [ ! -f /var/lib/cloud/instance/boot-finished ]; do sleep 4; done",
|
|
||||||
"sudo mv $HOME/assets /var/lib/bootkube",
|
|
||||||
"sudo systemctl start bootkube",
|
|
||||||
]
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,110 +0,0 @@
|
||||||
variable "cluster_name" {
|
|
||||||
type = "string"
|
|
||||||
description = "Unique cluster name (prepended to dns_zone)"
|
|
||||||
}
|
|
||||||
|
|
||||||
# Google Cloud
|
|
||||||
|
|
||||||
variable "region" {
|
|
||||||
type = "string"
|
|
||||||
description = "Google Cloud Region (e.g. us-central1, see `gcloud compute regions list`)"
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "dns_zone" {
|
|
||||||
type = "string"
|
|
||||||
description = "Google Cloud DNS Zone (e.g. google-cloud.example.com)"
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "dns_zone_name" {
|
|
||||||
type = "string"
|
|
||||||
description = "Google Cloud DNS Zone name (e.g. example-zone)"
|
|
||||||
}
|
|
||||||
|
|
||||||
# instances
|
|
||||||
|
|
||||||
variable "controller_count" {
|
|
||||||
type = "string"
|
|
||||||
default = "1"
|
|
||||||
description = "Number of controllers (i.e. masters)"
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "worker_count" {
|
|
||||||
type = "string"
|
|
||||||
default = "1"
|
|
||||||
description = "Number of workers"
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "controller_type" {
|
|
||||||
type = "string"
|
|
||||||
default = "n1-standard-1"
|
|
||||||
description = "Machine type for controllers (see `gcloud compute machine-types list`)"
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "worker_type" {
|
|
||||||
type = "string"
|
|
||||||
default = "n1-standard-1"
|
|
||||||
description = "Machine type for controllers (see `gcloud compute machine-types list`)"
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "os_image" {
|
|
||||||
type = "string"
|
|
||||||
description = "Custom Fedora Atomic image"
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "disk_size" {
|
|
||||||
type = "string"
|
|
||||||
default = "40"
|
|
||||||
description = "Size of the disk in GB"
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "worker_preemptible" {
|
|
||||||
type = "string"
|
|
||||||
default = "false"
|
|
||||||
description = "If enabled, Compute Engine will terminate workers randomly within 24 hours"
|
|
||||||
}
|
|
||||||
|
|
||||||
# configuration
|
|
||||||
|
|
||||||
variable "ssh_authorized_key" {
|
|
||||||
type = "string"
|
|
||||||
description = "SSH public key for user 'fedora'"
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "asset_dir" {
|
|
||||||
description = "Path to a directory where generated assets should be placed (contains secrets)"
|
|
||||||
type = "string"
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "networking" {
|
|
||||||
description = "Choice of networking provider (flannel or calico)"
|
|
||||||
type = "string"
|
|
||||||
default = "calico"
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "pod_cidr" {
|
|
||||||
description = "CIDR IPv4 range to assign Kubernetes pods"
|
|
||||||
type = "string"
|
|
||||||
default = "10.2.0.0/16"
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "service_cidr" {
|
|
||||||
description = <<EOD
|
|
||||||
CIDR IPv4 range to assign Kubernetes services.
|
|
||||||
The 1st IP will be reserved for kube_apiserver, the 10th IP will be reserved for coredns.
|
|
||||||
EOD
|
|
||||||
|
|
||||||
type = "string"
|
|
||||||
default = "10.3.0.0/16"
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "cluster_domain_suffix" {
|
|
||||||
description = "Queries for domains with the suffix will be answered by coredns. Default is cluster.local (e.g. foo.default.svc.cluster.local) "
|
|
||||||
type = "string"
|
|
||||||
default = "cluster.local"
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "enable_reporting" {
|
|
||||||
type = "string"
|
|
||||||
description = "Enable usage or analytics reporting to upstreams (Calico)"
|
|
||||||
default = "false"
|
|
||||||
}
|
|
|
@ -1,20 +0,0 @@
|
||||||
module "workers" {
|
|
||||||
source = "./workers"
|
|
||||||
name = "${var.cluster_name}"
|
|
||||||
cluster_name = "${var.cluster_name}"
|
|
||||||
|
|
||||||
# GCE
|
|
||||||
region = "${var.region}"
|
|
||||||
network = "${google_compute_network.network.name}"
|
|
||||||
count = "${var.worker_count}"
|
|
||||||
machine_type = "${var.worker_type}"
|
|
||||||
os_image = "${var.os_image}"
|
|
||||||
disk_size = "${var.disk_size}"
|
|
||||||
preemptible = "${var.worker_preemptible}"
|
|
||||||
|
|
||||||
# configuration
|
|
||||||
kubeconfig = "${module.bootkube.kubeconfig-kubelet}"
|
|
||||||
ssh_authorized_key = "${var.ssh_authorized_key}"
|
|
||||||
service_cidr = "${var.service_cidr}"
|
|
||||||
cluster_domain_suffix = "${var.cluster_domain_suffix}"
|
|
||||||
}
|
|
|
@ -1,66 +0,0 @@
|
||||||
#cloud-config
|
|
||||||
write_files:
|
|
||||||
- path: /etc/systemd/system/kubelet.service.d/10-typhoon.conf
|
|
||||||
content: |
|
|
||||||
[Unit]
|
|
||||||
Wants=rpc-statd.service
|
|
||||||
[Service]
|
|
||||||
ExecStartPre=/bin/mkdir -p /opt/cni/bin
|
|
||||||
ExecStartPre=/bin/mkdir -p /etc/kubernetes/manifests
|
|
||||||
ExecStartPre=/bin/mkdir -p /etc/kubernetes/cni/net.d
|
|
||||||
ExecStartPre=/bin/mkdir -p /var/lib/cni
|
|
||||||
ExecStartPre=/bin/mkdir -p /var/lib/kubelet/volumeplugins
|
|
||||||
ExecStartPre=/usr/bin/bash -c "grep 'certificate-authority-data' /etc/kubernetes/kubeconfig | awk '{print $2}' | base64 -d > /etc/kubernetes/ca.crt"
|
|
||||||
Restart=always
|
|
||||||
RestartSec=10
|
|
||||||
- path: /etc/kubernetes/kubelet.conf
|
|
||||||
content: |
|
|
||||||
ARGS="--anonymous-auth=false \
|
|
||||||
--authentication-token-webhook \
|
|
||||||
--authorization-mode=Webhook \
|
|
||||||
--client-ca-file=/etc/kubernetes/ca.crt \
|
|
||||||
--cluster_dns=${cluster_dns_service_ip} \
|
|
||||||
--cluster_domain=${cluster_domain_suffix} \
|
|
||||||
--cni-conf-dir=/etc/kubernetes/cni/net.d \
|
|
||||||
--exit-on-lock-contention \
|
|
||||||
--kubeconfig=/etc/kubernetes/kubeconfig \
|
|
||||||
--lock-file=/var/run/lock/kubelet.lock \
|
|
||||||
--network-plugin=cni \
|
|
||||||
--node-labels=node-role.kubernetes.io/node \
|
|
||||||
--pod-manifest-path=/etc/kubernetes/manifests \
|
|
||||||
--read-only-port=0 \
|
|
||||||
--volume-plugin-dir=/var/lib/kubelet/volumeplugins"
|
|
||||||
- path: /etc/kubernetes/kubeconfig
|
|
||||||
permissions: '0644'
|
|
||||||
content: |
|
|
||||||
${kubeconfig}
|
|
||||||
- path: /etc/NetworkManager/conf.d/typhoon.conf
|
|
||||||
content: |
|
|
||||||
[main]
|
|
||||||
plugins=keyfile
|
|
||||||
[keyfile]
|
|
||||||
unmanaged-devices=interface-name:cali*;interface-name:tunl*
|
|
||||||
- path: /etc/selinux/config
|
|
||||||
owner: root:root
|
|
||||||
permissions: '0644'
|
|
||||||
content: |
|
|
||||||
SELINUX=permissive
|
|
||||||
SELINUXTYPE=targeted
|
|
||||||
bootcmd:
|
|
||||||
- [setenforce, Permissive]
|
|
||||||
- [systemctl, disable, firewalld, --now]
|
|
||||||
# https://github.com/kubernetes/kubernetes/issues/60869
|
|
||||||
- [modprobe, ip_vs]
|
|
||||||
runcmd:
|
|
||||||
- [systemctl, daemon-reload]
|
|
||||||
- [systemctl, restart, NetworkManager]
|
|
||||||
- "atomic install --system --name=kubelet quay.io/poseidon/kubelet:v1.14.1"
|
|
||||||
- [systemctl, start, --no-block, kubelet.service]
|
|
||||||
users:
|
|
||||||
- default
|
|
||||||
- name: fedora
|
|
||||||
gecos: Fedora Admin
|
|
||||||
sudo: ALL=(ALL) NOPASSWD:ALL
|
|
||||||
groups: wheel,adm,systemd-journal,docker
|
|
||||||
ssh-authorized-keys:
|
|
||||||
- "${ssh_authorized_key}"
|
|
|
@ -1,4 +0,0 @@
|
||||||
output "instance_group" {
|
|
||||||
description = "Full URL of the worker managed instance group"
|
|
||||||
value = "${google_compute_region_instance_group_manager.workers.instance_group}"
|
|
||||||
}
|
|
|
@ -1,94 +0,0 @@
|
||||||
variable "name" {
|
|
||||||
type = "string"
|
|
||||||
description = "Unique name for the worker pool"
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "cluster_name" {
|
|
||||||
type = "string"
|
|
||||||
description = "Must be set to `cluster_name of cluster`"
|
|
||||||
}
|
|
||||||
|
|
||||||
# Google Cloud
|
|
||||||
|
|
||||||
variable "region" {
|
|
||||||
type = "string"
|
|
||||||
description = "Must be set to `region` of cluster"
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "network" {
|
|
||||||
type = "string"
|
|
||||||
description = "Must be set to `network_name` output by cluster"
|
|
||||||
}
|
|
||||||
|
|
||||||
# instances
|
|
||||||
|
|
||||||
variable "count" {
|
|
||||||
type = "string"
|
|
||||||
default = "1"
|
|
||||||
description = "Number of worker compute instances the instance group should manage"
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "machine_type" {
|
|
||||||
type = "string"
|
|
||||||
default = "n1-standard-1"
|
|
||||||
description = "Machine type for compute instances (e.g. gcloud compute machine-types list)"
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "os_image" {
|
|
||||||
type = "string"
|
|
||||||
description = "Custom Fedora Atomic image"
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "disk_size" {
|
|
||||||
type = "string"
|
|
||||||
default = "40"
|
|
||||||
description = "Size of the disk in GB"
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "preemptible" {
|
|
||||||
type = "string"
|
|
||||||
default = "false"
|
|
||||||
description = "If enabled, Compute Engine will terminate instances randomly within 24 hours"
|
|
||||||
}
|
|
||||||
|
|
||||||
# configuration
|
|
||||||
|
|
||||||
variable "kubeconfig" {
|
|
||||||
type = "string"
|
|
||||||
description = "Must be set to `kubeconfig` output by cluster"
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "ssh_authorized_key" {
|
|
||||||
type = "string"
|
|
||||||
description = "SSH public key for user 'fedora'"
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "service_cidr" {
|
|
||||||
description = <<EOD
|
|
||||||
CIDR IPv4 range to assign Kubernetes services.
|
|
||||||
The 1st IP will be reserved for kube_apiserver, the 10th IP will be reserved for coredns.
|
|
||||||
EOD
|
|
||||||
|
|
||||||
type = "string"
|
|
||||||
default = "10.3.0.0/16"
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "cluster_domain_suffix" {
|
|
||||||
description = "Queries for domains with the suffix will be answered by coredns. Default is cluster.local (e.g. foo.default.svc.cluster.local) "
|
|
||||||
type = "string"
|
|
||||||
default = "cluster.local"
|
|
||||||
}
|
|
||||||
|
|
||||||
# unofficial, undocumented, unsupported, temporary
|
|
||||||
|
|
||||||
variable "accelerator_type" {
|
|
||||||
type = "string"
|
|
||||||
default = ""
|
|
||||||
description = "Google Compute Engine accelerator type (e.g. nvidia-tesla-k80, see gcloud compute accelerator-types list)"
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "accelerator_count" {
|
|
||||||
type = "string"
|
|
||||||
default = "0"
|
|
||||||
description = "Number of compute engine accelerators"
|
|
||||||
}
|
|
|
@ -1,77 +0,0 @@
|
||||||
# Regional managed instance group of workers
|
|
||||||
resource "google_compute_region_instance_group_manager" "workers" {
|
|
||||||
name = "${var.name}-worker-group"
|
|
||||||
description = "Compute instance group of ${var.name} workers"
|
|
||||||
|
|
||||||
# instance name prefix for instances in the group
|
|
||||||
base_instance_name = "${var.name}-worker"
|
|
||||||
instance_template = "${google_compute_instance_template.worker.self_link}"
|
|
||||||
region = "${var.region}"
|
|
||||||
|
|
||||||
target_size = "${var.count}"
|
|
||||||
|
|
||||||
named_port {
|
|
||||||
name = "http"
|
|
||||||
port = "80"
|
|
||||||
}
|
|
||||||
|
|
||||||
named_port {
|
|
||||||
name = "https"
|
|
||||||
port = "443"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
# Worker instance template
|
|
||||||
resource "google_compute_instance_template" "worker" {
|
|
||||||
name_prefix = "${var.name}-worker-"
|
|
||||||
description = "Worker Instance template"
|
|
||||||
machine_type = "${var.machine_type}"
|
|
||||||
|
|
||||||
metadata = {
|
|
||||||
user-data = "${data.template_file.worker-cloudinit.rendered}"
|
|
||||||
}
|
|
||||||
|
|
||||||
scheduling {
|
|
||||||
automatic_restart = "${var.preemptible ? false : true}"
|
|
||||||
preemptible = "${var.preemptible}"
|
|
||||||
}
|
|
||||||
|
|
||||||
disk {
|
|
||||||
auto_delete = true
|
|
||||||
boot = true
|
|
||||||
source_image = "${var.os_image}"
|
|
||||||
disk_size_gb = "${var.disk_size}"
|
|
||||||
}
|
|
||||||
|
|
||||||
network_interface {
|
|
||||||
network = "${var.network}"
|
|
||||||
|
|
||||||
# Ephemeral external IP
|
|
||||||
access_config = {}
|
|
||||||
}
|
|
||||||
|
|
||||||
can_ip_forward = true
|
|
||||||
tags = ["worker", "${var.cluster_name}-worker", "${var.name}-worker"]
|
|
||||||
|
|
||||||
guest_accelerator {
|
|
||||||
count = "${var.accelerator_count}"
|
|
||||||
type = "${var.accelerator_type}"
|
|
||||||
}
|
|
||||||
|
|
||||||
lifecycle {
|
|
||||||
# To update an Instance Template, Terraform should replace the existing resource
|
|
||||||
create_before_destroy = true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
# Worker Cloud-Init
|
|
||||||
data "template_file" "worker-cloudinit" {
|
|
||||||
template = "${file("${path.module}/cloudinit/worker.yaml.tmpl")}"
|
|
||||||
|
|
||||||
vars = {
|
|
||||||
kubeconfig = "${indent(6, var.kubeconfig)}"
|
|
||||||
ssh_authorized_key = "${var.ssh_authorized_key}"
|
|
||||||
cluster_dns_service_ip = "${cidrhost(var.service_cidr, 10)}"
|
|
||||||
cluster_domain_suffix = "${var.cluster_domain_suffix}"
|
|
||||||
}
|
|
||||||
}
|
|
Loading…
Reference in New Issue