Rename container-linux modules to flatcar-linux

* CoreOS Container Linux was deprecated in v1.18.3
* Continue transitioning docs and modules from supporting
both CoreOS and Flatcar "variants" of Container Linux to
now supporting Flatcar Linux and equivalents

Action Required: Update the Flatcar Linux modules `source`
to replace `s/container-linux/flatcar-linux`. See docs for
examples
This commit is contained in:
Dalton Hubble
2020-10-20 22:47:19 -07:00
parent a99a990d49
commit 7c3f3ab6d0
98 changed files with 58 additions and 57 deletions

View File

@ -0,0 +1,23 @@
The MIT License (MIT)
Copyright (c) 2017 Typhoon Authors
Copyright (c) 2017 Dalton Hubble
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.

View File

@ -0,0 +1,23 @@
# Typhoon <img align="right" src="https://storage.googleapis.com/poseidon/typhoon-logo.png">
Typhoon is a minimal and free Kubernetes distribution.
* Minimal, stable base Kubernetes distribution
* Declarative infrastructure and configuration
* Free (freedom and cost) and privacy-respecting
* Practical for labs, datacenters, and clouds
Typhoon distributes upstream Kubernetes, architectural conventions, and cluster addons, much like a GNU/Linux distribution provides the Linux kernel and userspace components.
## Features <a href="https://www.cncf.io/certification/software-conformance/"><img align="right" src="https://storage.googleapis.com/poseidon/certified-kubernetes.png"></a>
* Kubernetes v1.19.3 (upstream)
* Single or multi-master, [Calico](https://www.projectcalico.org/) or [Cilium](https://github.com/cilium/cilium) or [flannel](https://github.com/coreos/flannel) networking
* On-cluster etcd with TLS, [RBAC](https://kubernetes.io/docs/admin/authorization/rbac/)-enabled, [network policy](https://kubernetes.io/docs/concepts/services-networking/network-policies/)
* Advanced features like [snippets](https://typhoon.psdn.io/advanced/customization/#hosts) customization
* Ready for Ingress, Prometheus, Grafana, CSI, and other [addons](https://typhoon.psdn.io/addons/overview/)
## Docs
Please see the [official docs](https://typhoon.psdn.io) and the Digital Ocean [tutorial](https://typhoon.psdn.io/cl/digital-ocean/).

View File

@ -0,0 +1,21 @@
# Kubernetes assets (kubeconfig, manifests)
module "bootstrap" {
source = "git::https://github.com/poseidon/terraform-render-bootstrap.git?ref=9037d7311b949439b217cd9c657d4500eab3e16b"
cluster_name = var.cluster_name
api_servers = [format("%s.%s", var.cluster_name, var.dns_zone)]
etcd_servers = digitalocean_record.etcds.*.fqdn
networking = var.networking
# only effective with Calico networking
network_encapsulation = "vxlan"
network_mtu = "1450"
pod_cidr = var.pod_cidr
service_cidr = var.service_cidr
cluster_domain_suffix = var.cluster_domain_suffix
enable_reporting = var.enable_reporting
enable_aggregation = var.enable_aggregation
}

View File

@ -0,0 +1,196 @@
---
systemd:
units:
- name: etcd-member.service
enabled: true
dropins:
- name: 40-etcd-cluster.conf
contents: |
[Service]
Environment="ETCD_IMAGE_TAG=v3.4.12"
Environment="ETCD_IMAGE_URL=docker://quay.io/coreos/etcd"
Environment="RKT_RUN_ARGS=--insecure-options=image"
Environment="ETCD_NAME=${etcd_name}"
Environment="ETCD_ADVERTISE_CLIENT_URLS=https://${etcd_domain}:2379"
Environment="ETCD_INITIAL_ADVERTISE_PEER_URLS=https://${etcd_domain}:2380"
Environment="ETCD_LISTEN_CLIENT_URLS=https://0.0.0.0:2379"
Environment="ETCD_LISTEN_PEER_URLS=https://0.0.0.0:2380"
Environment="ETCD_LISTEN_METRICS_URLS=http://0.0.0.0:2381"
Environment="ETCD_INITIAL_CLUSTER=${etcd_initial_cluster}"
Environment="ETCD_STRICT_RECONFIG_CHECK=true"
Environment="ETCD_SSL_DIR=/etc/ssl/etcd"
Environment="ETCD_TRUSTED_CA_FILE=/etc/ssl/certs/etcd/server-ca.crt"
Environment="ETCD_CERT_FILE=/etc/ssl/certs/etcd/server.crt"
Environment="ETCD_KEY_FILE=/etc/ssl/certs/etcd/server.key"
Environment="ETCD_CLIENT_CERT_AUTH=true"
Environment="ETCD_PEER_TRUSTED_CA_FILE=/etc/ssl/certs/etcd/peer-ca.crt"
Environment="ETCD_PEER_CERT_FILE=/etc/ssl/certs/etcd/peer.crt"
Environment="ETCD_PEER_KEY_FILE=/etc/ssl/certs/etcd/peer.key"
Environment="ETCD_PEER_CLIENT_CERT_AUTH=true"
- name: docker.service
enabled: true
- name: locksmithd.service
mask: true
- name: kubelet.path
enabled: true
contents: |
[Unit]
Description=Watch for kubeconfig
[Path]
PathExists=/etc/kubernetes/kubeconfig
[Install]
WantedBy=multi-user.target
- name: wait-for-dns.service
enabled: true
contents: |
[Unit]
Description=Wait for DNS entries
Wants=systemd-resolved.service
Before=kubelet.service
[Service]
Type=oneshot
RemainAfterExit=true
ExecStart=/bin/sh -c 'while ! /usr/bin/grep '^[^#[:space:]]' /etc/resolv.conf > /dev/null; do sleep 1; done'
[Install]
RequiredBy=kubelet.service
RequiredBy=etcd-member.service
- name: kubelet.service
contents: |
[Unit]
Description=Kubelet
Requires=docker.service
After=docker.service
Requires=coreos-metadata.service
After=coreos-metadata.service
Wants=rpc-statd.service
[Service]
Environment=KUBELET_IMAGE=quay.io/poseidon/kubelet:v1.19.3
EnvironmentFile=/run/metadata/coreos
ExecStartPre=/bin/mkdir -p /etc/kubernetes/cni/net.d
ExecStartPre=/bin/mkdir -p /etc/kubernetes/manifests
ExecStartPre=/bin/mkdir -p /opt/cni/bin
ExecStartPre=/bin/mkdir -p /var/lib/calico
ExecStartPre=/bin/mkdir -p /var/lib/kubelet/volumeplugins
ExecStartPre=/usr/bin/bash -c "grep 'certificate-authority-data' /etc/kubernetes/kubeconfig | awk '{print $2}' | base64 -d > /etc/kubernetes/ca.crt"
ExecStartPre=/usr/bin/docker run -d \
--name kubelet \
--privileged \
--pid host \
--network host \
-v /etc/kubernetes:/etc/kubernetes:ro \
-v /etc/machine-id:/etc/machine-id:ro \
-v /usr/lib/os-release:/etc/os-release:ro \
-v /lib/modules:/lib/modules:ro \
-v /run:/run \
-v /sys/fs/cgroup:/sys/fs/cgroup:ro \
-v /sys/fs/cgroup/systemd:/sys/fs/cgroup/systemd \
-v /var/lib/calico:/var/lib/calico:ro \
-v /var/lib/docker:/var/lib/docker \
-v /var/lib/kubelet:/var/lib/kubelet:rshared \
-v /var/log:/var/log \
-v /opt/cni/bin:/opt/cni/bin \
$${KUBELET_IMAGE} \
--anonymous-auth=false \
--authentication-token-webhook \
--authorization-mode=Webhook \
--bootstrap-kubeconfig=/etc/kubernetes/kubeconfig \
--client-ca-file=/etc/kubernetes/ca.crt \
--cluster_dns=${cluster_dns_service_ip} \
--cluster_domain=${cluster_domain_suffix} \
--cni-conf-dir=/etc/kubernetes/cni/net.d \
--healthz-port=0 \
--hostname-override=$${COREOS_DIGITALOCEAN_IPV4_PRIVATE_0} \
--kubeconfig=/var/lib/kubelet/kubeconfig \
--network-plugin=cni \
--node-labels=node.kubernetes.io/controller="true" \
--pod-manifest-path=/etc/kubernetes/manifests \
--read-only-port=0 \
--register-with-taints=node-role.kubernetes.io/controller=:NoSchedule \
--rotate-certificates \
--volume-plugin-dir=/var/lib/kubelet/volumeplugins
ExecStart=docker logs -f kubelet
ExecStop=docker stop kubelet
ExecStopPost=docker rm kubelet
Restart=always
RestartSec=10
[Install]
WantedBy=multi-user.target
- name: bootstrap.service
contents: |
[Unit]
Description=Kubernetes control plane
ConditionPathExists=!/opt/bootstrap/bootstrap.done
[Service]
Type=oneshot
RemainAfterExit=true
WorkingDirectory=/opt/bootstrap
ExecStart=/usr/bin/rkt run \
--trust-keys-from-https \
--volume config,kind=host,source=/etc/kubernetes/bootstrap-secrets \
--mount volume=config,target=/etc/kubernetes/secrets \
--volume assets,kind=host,source=/opt/bootstrap/assets \
--mount volume=assets,target=/assets \
--volume script,kind=host,source=/opt/bootstrap/apply \
--mount volume=script,target=/apply \
--insecure-options=image \
docker://quay.io/poseidon/kubelet:v1.19.3 \
--net=host \
--dns=host \
--exec=/apply
ExecStartPost=/bin/touch /opt/bootstrap/bootstrap.done
[Install]
WantedBy=multi-user.target
storage:
directories:
- path: /var/lib/etcd
filesystem: root
mode: 0700
overwrite: true
- path: /etc/kubernetes
filesystem: root
mode: 0755
files:
- path: /opt/bootstrap/layout
filesystem: root
mode: 0544
contents:
inline: |
#!/bin/bash -e
mkdir -p -- auth tls/etcd tls/k8s static-manifests manifests/coredns manifests-networking
awk '/#####/ {filename=$2; next} {print > filename}' assets
mkdir -p /etc/ssl/etcd/etcd
mkdir -p /etc/kubernetes/bootstrap-secrets
mv tls/etcd/{peer*,server*} /etc/ssl/etcd/etcd/
mv tls/etcd/etcd-client* /etc/kubernetes/bootstrap-secrets/
chown -R etcd:etcd /etc/ssl/etcd
chmod -R 500 /etc/ssl/etcd
chmod -R 700 /var/lib/etcd
mv auth/kubeconfig /etc/kubernetes/bootstrap-secrets/
mv tls/k8s/* /etc/kubernetes/bootstrap-secrets/
mkdir -p /etc/kubernetes/manifests
mv static-manifests/* /etc/kubernetes/manifests/
mkdir -p /opt/bootstrap/assets
mv manifests /opt/bootstrap/assets/manifests
mv manifests-networking/* /opt/bootstrap/assets/manifests/
rm -rf assets auth static-manifests tls manifests-networking
- path: /opt/bootstrap/apply
filesystem: root
mode: 0544
contents:
inline: |
#!/bin/bash -e
export KUBECONFIG=/etc/kubernetes/secrets/kubeconfig
until kubectl version; do
echo "Waiting for static pod control plane"
sleep 5
done
until kubectl apply -f /assets/manifests -R; do
echo "Retry applying manifests"
sleep 5
done
- path: /etc/sysctl.d/max-user-watches.conf
filesystem: root
mode: 0644
contents:
inline: |
fs.inotify.max_user_watches=16184

View File

@ -0,0 +1,117 @@
---
systemd:
units:
- name: docker.service
enabled: true
- name: locksmithd.service
mask: true
- name: kubelet.path
enabled: true
contents: |
[Unit]
Description=Watch for kubeconfig
[Path]
PathExists=/etc/kubernetes/kubeconfig
[Install]
WantedBy=multi-user.target
- name: wait-for-dns.service
enabled: true
contents: |
[Unit]
Description=Wait for DNS entries
Wants=systemd-resolved.service
Before=kubelet.service
[Service]
Type=oneshot
RemainAfterExit=true
ExecStart=/bin/sh -c 'while ! /usr/bin/grep '^[^#[:space:]]' /etc/resolv.conf > /dev/null; do sleep 1; done'
[Install]
RequiredBy=kubelet.service
- name: kubelet.service
contents: |
[Unit]
Description=Kubelet
Requires=docker.service
After=docker.service
Requires=coreos-metadata.service
After=coreos-metadata.service
Wants=rpc-statd.service
[Service]
Environment=KUBELET_IMAGE=quay.io/poseidon/kubelet:v1.19.3
EnvironmentFile=/run/metadata/coreos
ExecStartPre=/bin/mkdir -p /etc/kubernetes/cni/net.d
ExecStartPre=/bin/mkdir -p /etc/kubernetes/manifests
ExecStartPre=/bin/mkdir -p /opt/cni/bin
ExecStartPre=/bin/mkdir -p /var/lib/calico
ExecStartPre=/bin/mkdir -p /var/lib/kubelet/volumeplugins
ExecStartPre=/usr/bin/bash -c "grep 'certificate-authority-data' /etc/kubernetes/kubeconfig | awk '{print $2}' | base64 -d > /etc/kubernetes/ca.crt"
# Podman, rkt, or runc run container processes, whereas docker run
# is a client to a daemon and requires workarounds to use within a
# systemd unit. https://github.com/moby/moby/issues/6791
ExecStartPre=/usr/bin/docker run -d \
--name kubelet \
--privileged \
--pid host \
--network host \
-v /etc/kubernetes:/etc/kubernetes:ro \
-v /etc/machine-id:/etc/machine-id:ro \
-v /usr/lib/os-release:/etc/os-release:ro \
-v /lib/modules:/lib/modules:ro \
-v /run:/run \
-v /sys/fs/cgroup:/sys/fs/cgroup:ro \
-v /sys/fs/cgroup/systemd:/sys/fs/cgroup/systemd \
-v /var/lib/calico:/var/lib/calico:ro \
-v /var/lib/docker:/var/lib/docker \
-v /var/lib/kubelet:/var/lib/kubelet:rshared \
-v /var/log:/var/log \
-v /opt/cni/bin:/opt/cni/bin \
$${KUBELET_IMAGE} \
--anonymous-auth=false \
--authentication-token-webhook \
--authorization-mode=Webhook \
--bootstrap-kubeconfig=/etc/kubernetes/kubeconfig \
--client-ca-file=/etc/kubernetes/ca.crt \
--cluster_dns=${cluster_dns_service_ip} \
--cluster_domain=${cluster_domain_suffix} \
--cni-conf-dir=/etc/kubernetes/cni/net.d \
--healthz-port=0 \
--hostname-override=$${COREOS_DIGITALOCEAN_IPV4_PRIVATE_0} \
--kubeconfig=/var/lib/kubelet/kubeconfig \
--network-plugin=cni \
--node-labels=node.kubernetes.io/node \
--pod-manifest-path=/etc/kubernetes/manifests \
--read-only-port=0 \
--rotate-certificates \
--volume-plugin-dir=/var/lib/kubelet/volumeplugins
ExecStart=docker logs -f kubelet
ExecStop=docker stop kubelet
ExecStopPost=docker rm kubelet
Restart=always
RestartSec=5
[Install]
WantedBy=multi-user.target
- name: delete-node.service
enabled: true
contents: |
[Unit]
Description=Delete Kubernetes node on shutdown
[Service]
Environment=KUBELET_IMAGE=quay.io/poseidon/kubelet:v1.19.3
Type=oneshot
RemainAfterExit=true
ExecStart=/bin/true
ExecStop=/bin/bash -c '/usr/bin/docker run -v /var/lib/kubelet:/var/lib/kubelet:ro --entrypoint /usr/local/bin/kubectl $${KUBELET_IMAGE} --kubeconfig=/var/lib/kubelet/kubeconfig delete node $HOSTNAME'
[Install]
WantedBy=multi-user.target
storage:
directories:
- path: /etc/kubernetes
filesystem: root
mode: 0755
files:
- path: /etc/sysctl.d/max-user-watches.conf
filesystem: root
mode: 0644
contents:
inline: |
fs.inotify.max_user_watches=16184

View File

@ -0,0 +1,106 @@
locals {
official_images = []
is_official_image = contains(local.official_images, var.os_image)
}
# Controller Instance DNS records
resource "digitalocean_record" "controllers" {
count = var.controller_count
# DNS zone where record should be created
domain = var.dns_zone
# DNS record (will be prepended to domain)
name = var.cluster_name
type = "A"
ttl = 300
# IPv4 addresses of controllers
value = digitalocean_droplet.controllers.*.ipv4_address[count.index]
}
# Discrete DNS records for each controller's private IPv4 for etcd usage
resource "digitalocean_record" "etcds" {
count = var.controller_count
# DNS zone where record should be created
domain = var.dns_zone
# DNS record (will be prepended to domain)
name = "${var.cluster_name}-etcd${count.index}"
type = "A"
ttl = 300
# private IPv4 address for etcd
value = digitalocean_droplet.controllers.*.ipv4_address_private[count.index]
}
# Controller droplet instances
resource "digitalocean_droplet" "controllers" {
count = var.controller_count
name = "${var.cluster_name}-controller-${count.index}"
region = var.region
image = var.os_image
size = var.controller_type
# network
private_networking = true
vpc_uuid = digitalocean_vpc.network.id
# TODO: Only official DigitalOcean images support IPv6
ipv6 = false
user_data = data.ct_config.controller-ignitions.*.rendered[count.index]
ssh_keys = var.ssh_fingerprints
tags = [
digitalocean_tag.controllers.id,
]
lifecycle {
ignore_changes = [user_data]
}
}
# Tag to label controllers
resource "digitalocean_tag" "controllers" {
name = "${var.cluster_name}-controller"
}
# Controller Ignition configs
data "ct_config" "controller-ignitions" {
count = var.controller_count
content = data.template_file.controller-configs.*.rendered[count.index]
strict = true
snippets = var.controller_snippets
}
# Controller Container Linux configs
data "template_file" "controller-configs" {
count = var.controller_count
template = file("${path.module}/cl/controller.yaml")
vars = {
# Cannot use cyclic dependencies on controllers or their DNS records
etcd_name = "etcd${count.index}"
etcd_domain = "${var.cluster_name}-etcd${count.index}.${var.dns_zone}"
# etcd0=https://cluster-etcd0.example.com,etcd1=https://cluster-etcd1.example.com,...
etcd_initial_cluster = join(",", data.template_file.etcds.*.rendered)
cluster_dns_service_ip = cidrhost(var.service_cidr, 10)
cluster_domain_suffix = var.cluster_domain_suffix
}
}
data "template_file" "etcds" {
count = var.controller_count
template = "etcd$${index}=https://$${cluster_name}-etcd$${index}.$${dns_zone}:2380"
vars = {
index = count.index
cluster_name = var.cluster_name
dns_zone = var.dns_zone
}
}

View File

@ -0,0 +1,148 @@
# Network VPC
resource "digitalocean_vpc" "network" {
name = var.cluster_name
region = var.region
description = "Network for ${var.cluster_name} cluster"
}
resource "digitalocean_firewall" "rules" {
name = var.cluster_name
tags = [
digitalocean_tag.controllers.name,
digitalocean_tag.workers.name
]
inbound_rule {
protocol = "icmp"
source_tags = [digitalocean_tag.controllers.name, digitalocean_tag.workers.name]
}
# allow ssh, internal flannel, internal node-exporter, internal kubelet
inbound_rule {
protocol = "tcp"
port_range = "22"
source_addresses = ["0.0.0.0/0", "::/0"]
}
# Cilium health
inbound_rule {
protocol = "tcp"
port_range = "4240"
source_tags = [digitalocean_tag.controllers.name, digitalocean_tag.workers.name]
}
# IANA vxlan (flannel, calico)
inbound_rule {
protocol = "udp"
port_range = "4789"
source_tags = [digitalocean_tag.controllers.name, digitalocean_tag.workers.name]
}
# Linux vxlan (Cilium)
inbound_rule {
protocol = "udp"
port_range = "8472"
source_tags = [digitalocean_tag.controllers.name, digitalocean_tag.workers.name]
}
# Allow Prometheus to scrape node-exporter
inbound_rule {
protocol = "tcp"
port_range = "9100"
source_tags = [digitalocean_tag.workers.name]
}
# Allow Prometheus to scrape kube-proxy
inbound_rule {
protocol = "tcp"
port_range = "10249"
source_tags = [digitalocean_tag.workers.name]
}
# Kubelet
inbound_rule {
protocol = "tcp"
port_range = "10250"
source_tags = [digitalocean_tag.controllers.name, digitalocean_tag.workers.name]
}
# allow all outbound traffic
outbound_rule {
protocol = "tcp"
port_range = "1-65535"
destination_addresses = ["0.0.0.0/0", "::/0"]
}
outbound_rule {
protocol = "udp"
port_range = "1-65535"
destination_addresses = ["0.0.0.0/0", "::/0"]
}
outbound_rule {
protocol = "icmp"
port_range = "1-65535"
destination_addresses = ["0.0.0.0/0", "::/0"]
}
}
resource "digitalocean_firewall" "controllers" {
name = "${var.cluster_name}-controllers"
tags = [digitalocean_tag.controllers.name]
# etcd
inbound_rule {
protocol = "tcp"
port_range = "2379-2380"
source_tags = [digitalocean_tag.controllers.name]
}
# etcd metrics
inbound_rule {
protocol = "tcp"
port_range = "2381"
source_tags = [digitalocean_tag.workers.name]
}
# kube-apiserver
inbound_rule {
protocol = "tcp"
port_range = "6443"
source_addresses = ["0.0.0.0/0", "::/0"]
}
# kube-scheduler metrics, kube-controller-manager metrics
inbound_rule {
protocol = "tcp"
port_range = "10251-10252"
source_tags = [digitalocean_tag.workers.name]
}
}
resource "digitalocean_firewall" "workers" {
name = "${var.cluster_name}-workers"
tags = [digitalocean_tag.workers.name]
# allow HTTP/HTTPS ingress
inbound_rule {
protocol = "tcp"
port_range = "80"
source_addresses = ["0.0.0.0/0", "::/0"]
}
inbound_rule {
protocol = "tcp"
port_range = "443"
source_addresses = ["0.0.0.0/0", "::/0"]
}
inbound_rule {
protocol = "tcp"
port_range = "10254"
source_addresses = ["0.0.0.0/0"]
}
}

View File

@ -0,0 +1,62 @@
output "kubeconfig-admin" {
value = module.bootstrap.kubeconfig-admin
}
# Outputs for Kubernetes Ingress
output "controllers_dns" {
value = digitalocean_record.controllers[0].fqdn
}
output "workers_dns" {
# Multiple A and AAAA records with the same FQDN
value = digitalocean_record.workers-record-a[0].fqdn
}
output "controllers_ipv4" {
value = digitalocean_droplet.controllers.*.ipv4_address
}
output "controllers_ipv6" {
value = digitalocean_droplet.controllers.*.ipv6_address
}
output "workers_ipv4" {
value = digitalocean_droplet.workers.*.ipv4_address
}
output "workers_ipv6" {
value = digitalocean_droplet.workers.*.ipv6_address
}
# Outputs for worker pools
output "kubeconfig" {
value = module.bootstrap.kubeconfig-kubelet
}
# Outputs for custom firewalls
output "controller_tag" {
description = "Tag applied to controller droplets"
value = digitalocean_tag.controllers.name
}
output "worker_tag" {
description = "Tag applied to worker droplets"
value = digitalocean_tag.workers.name
}
# Outputs for custom load balancing
output "vpc_id" {
description = "ID of the cluster VPC"
value = digitalocean_vpc.network.id
}
# Outputs for debug
output "assets_dist" {
value = module.bootstrap.assets_dist
}

View File

@ -0,0 +1,87 @@
locals {
# format assets for distribution
assets_bundle = [
# header with the unpack location
for key, value in module.bootstrap.assets_dist :
format("##### %s\n%s", key, value)
]
}
# Secure copy assets to controllers. Activates kubelet.service
resource "null_resource" "copy-controller-secrets" {
count = var.controller_count
depends_on = [
module.bootstrap,
digitalocean_firewall.rules
]
connection {
type = "ssh"
host = digitalocean_droplet.controllers.*.ipv4_address[count.index]
user = "core"
timeout = "15m"
}
provisioner "file" {
content = module.bootstrap.kubeconfig-kubelet
destination = "$HOME/kubeconfig"
}
provisioner "file" {
content = join("\n", local.assets_bundle)
destination = "$HOME/assets"
}
provisioner "remote-exec" {
inline = [
"sudo mv $HOME/kubeconfig /etc/kubernetes/kubeconfig",
"sudo /opt/bootstrap/layout",
]
}
}
# Secure copy kubeconfig to all workers. Activates kubelet.service.
resource "null_resource" "copy-worker-secrets" {
count = var.worker_count
connection {
type = "ssh"
host = digitalocean_droplet.workers.*.ipv4_address[count.index]
user = "core"
timeout = "15m"
}
provisioner "file" {
content = module.bootstrap.kubeconfig-kubelet
destination = "$HOME/kubeconfig"
}
provisioner "remote-exec" {
inline = [
"sudo mv $HOME/kubeconfig /etc/kubernetes/kubeconfig",
]
}
}
# Connect to a controller to perform one-time cluster bootstrap.
resource "null_resource" "bootstrap" {
depends_on = [
null_resource.copy-controller-secrets,
null_resource.copy-worker-secrets,
]
connection {
type = "ssh"
host = digitalocean_droplet.controllers[0].ipv4_address
user = "core"
timeout = "15m"
}
provisioner "remote-exec" {
inline = [
"sudo systemctl start bootstrap",
]
}
}

View File

@ -0,0 +1,108 @@
variable "cluster_name" {
type = string
description = "Unique cluster name (prepended to dns_zone)"
}
# Digital Ocean
variable "region" {
type = string
description = "Digital Ocean region (e.g. nyc1, sfo2, fra1, tor1)"
}
variable "dns_zone" {
type = string
description = "Digital Ocean domain (i.e. DNS zone) (e.g. do.example.com)"
}
# instances
variable "controller_count" {
type = number
description = "Number of controllers (i.e. masters)"
default = 1
}
variable "worker_count" {
type = number
description = "Number of workers"
default = 1
}
variable "controller_type" {
type = string
description = "Droplet type for controllers (e.g. s-2vcpu-2gb, s-2vcpu-4gb, s-4vcpu-8gb)."
default = "s-2vcpu-2gb"
}
variable "worker_type" {
type = string
description = "Droplet type for workers (e.g. s-1vcpu-2gb, s-2vcpu-2gb)"
default = "s-1vcpu-2gb"
}
variable "os_image" {
type = string
description = "Flatcar Linux image for instances (e.g. custom-image-id)"
}
variable "controller_snippets" {
type = list(string)
description = "Controller Container Linux Config snippets"
default = []
}
variable "worker_snippets" {
type = list(string)
description = "Worker Container Linux Config snippets"
default = []
}
# configuration
variable "ssh_fingerprints" {
type = list(string)
description = "SSH public key fingerprints. (e.g. see `ssh-add -l -E md5`)"
}
variable "networking" {
type = string
description = "Choice of networking provider (flannel or calico)"
default = "calico"
}
variable "pod_cidr" {
type = string
description = "CIDR IPv4 range to assign Kubernetes pods"
default = "10.2.0.0/16"
}
variable "service_cidr" {
type = string
description = <<EOD
CIDR IPv4 range to assign Kubernetes services.
The 1st IP will be reserved for kube_apiserver, the 10th IP will be reserved for coredns.
EOD
default = "10.3.0.0/16"
}
variable "enable_reporting" {
type = bool
description = "Enable usage or analytics reporting to upstreams (Calico)"
default = false
}
variable "enable_aggregation" {
type = bool
description = "Enable the Kubernetes Aggregation Layer (defaults to false)"
default = false
}
# unofficial, undocumented, unsupported
variable "cluster_domain_suffix" {
type = string
description = "Queries for domains with the suffix will be answered by coredns. Default is cluster.local (e.g. foo.default.svc.cluster.local) "
default = "cluster.local"
}

View File

@ -0,0 +1,20 @@
# Terraform version and plugin versions
terraform {
required_version = ">= 0.12.26, < 0.14.0"
required_providers {
template = "~> 2.1"
null = "~> 2.1"
ct = {
source = "poseidon/ct"
version = "~> 0.6.1"
}
digitalocean = {
source = "digitalocean/digitalocean"
version = "~> 1.20"
}
}
}

View File

@ -0,0 +1,76 @@
# Worker DNS records
resource "digitalocean_record" "workers-record-a" {
count = var.worker_count
# DNS zone where record should be created
domain = var.dns_zone
name = "${var.cluster_name}-workers"
type = "A"
ttl = 300
value = digitalocean_droplet.workers.*.ipv4_address[count.index]
}
resource "digitalocean_record" "workers-record-aaaa" {
# only official DigitalOcean images support IPv6
count = local.is_official_image ? var.worker_count : 0
# DNS zone where record should be created
domain = var.dns_zone
name = "${var.cluster_name}-workers"
type = "AAAA"
ttl = 300
value = digitalocean_droplet.workers.*.ipv6_address[count.index]
}
# Worker droplet instances
resource "digitalocean_droplet" "workers" {
count = var.worker_count
name = "${var.cluster_name}-worker-${count.index}"
region = var.region
image = var.os_image
size = var.worker_type
# network
private_networking = true
vpc_uuid = digitalocean_vpc.network.id
# only official DigitalOcean images support IPv6
ipv6 = local.is_official_image
user_data = data.ct_config.worker-ignition.rendered
ssh_keys = var.ssh_fingerprints
tags = [
digitalocean_tag.workers.id,
]
lifecycle {
create_before_destroy = true
}
}
# Tag to label workers
resource "digitalocean_tag" "workers" {
name = "${var.cluster_name}-worker"
}
# Worker Ignition config
data "ct_config" "worker-ignition" {
content = data.template_file.worker-config.rendered
strict = true
snippets = var.worker_snippets
}
# Worker Container Linux config
data "template_file" "worker-config" {
template = file("${path.module}/cl/worker.yaml")
vars = {
cluster_dns_service_ip = cidrhost(var.service_cidr, 10)
cluster_domain_suffix = var.cluster_domain_suffix
}
}