Add fedora-cloud module for Digital Ocean

This commit is contained in:
Dalton Hubble 2018-03-25 00:08:03 -07:00
parent a54f76db2a
commit 485586e5d8
12 changed files with 726 additions and 0 deletions

View File

@ -0,0 +1,23 @@
The MIT License (MIT)
Copyright (c) 2017 Typhoon Authors
Copyright (c) 2017 Dalton Hubble
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.

View File

@ -0,0 +1,22 @@
# Typhoon <img align="right" src="https://storage.googleapis.com/poseidon/typhoon-logo.png">
Typhoon is a minimal and free Kubernetes distribution.
* Minimal, stable base Kubernetes distribution
* Declarative infrastructure and configuration
* Free (freedom and cost) and privacy-respecting
* Practical for labs, datacenters, and clouds
Typhoon distributes upstream Kubernetes, architectural conventions, and cluster addons, much like a GNU/Linux distribution provides the Linux kernel and userspace components.
## Features <a href="https://www.cncf.io/certification/software-conformance/"><img align="right" src="https://storage.googleapis.com/poseidon/certified-kubernetes.png"></a>
* Kubernetes v1.9.6 (upstream, via [kubernetes-incubator/bootkube](https://github.com/kubernetes-incubator/bootkube))
* Single or multi-master, workloads isolated on workers, [Calico](https://www.projectcalico.org/) or [flannel](https://github.com/coreos/flannel) networking
* On-cluster etcd with TLS, [RBAC](https://kubernetes.io/docs/admin/authorization/rbac/)-enabled, [network policy](https://kubernetes.io/docs/concepts/services-networking/network-policies/)
* Ready for Ingress, Prometheus, Grafana, and other optional [addons](https://typhoon.psdn.io/addons/overview/)
## Docs
Please see the [official docs](https://typhoon.psdn.io) and the Digital Ocean [tutorial](https://typhoon.psdn.io/digital-ocean/).

View File

@ -0,0 +1,14 @@
# Self-hosted Kubernetes assets (kubeconfig, manifests)
module "bootkube" {
source = "git::https://github.com/poseidon/terraform-render-bootkube.git?ref=88b361207d42ec3121930a4add6b64ba7cf18360"
cluster_name = "${var.cluster_name}"
api_servers = ["${format("%s.%s", var.cluster_name, var.dns_zone)}"]
etcd_servers = "${digitalocean_record.etcds.*.fqdn}"
asset_dir = "${var.asset_dir}"
networking = "${var.networking}"
network_mtu = 1440
pod_cidr = "${var.pod_cidr}"
service_cidr = "${var.service_cidr}"
cluster_domain_suffix = "${var.cluster_domain_suffix}"
}

View File

@ -0,0 +1,164 @@
#cloud-config
yum_repos:
kubernetes:
name: kubernetes
baseurl: https://packages.cloud.google.com/yum/repos/kubernetes-el7-x86_64
enabled: true
gpgcheck: true
gpgkey: https://packages.cloud.google.com/yum/doc/yum-key.gpg https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg
packages:
- vim
- docker
- kubelet
- nfs-utils
write_files:
- path: /etc/systemd/system/cloud-metadata.service
content: |
[Unit]
Description=Digital Ocean metadata agent
[Service]
Type=oneshot
Environment=OUTPUT=/run/metadata/digitalocean
ExecStart=/usr/bin/mkdir -p /run/metadata
ExecStart=/usr/bin/bash -c 'echo "DIGITALOCEAN_IPV4_PUBLIC_0=$(curl\
--url http://169.254.169.254/metadata/v1/interfaces/public/0/ipv4/address\
--retry 10)\nDIGITALOCEAN_IPV4_PRIVATE_0=$(curl\
--url http://169.254.169.254/metadata/v1/interfaces/private/0/ipv4/address\
--retry 10)" > $${OUTPUT}'
- path: /etc/systemd/system/etcd-member.service
content: |
[Unit]
Description=etcd-member
Documentation=https://github.com/coreos/etcd
Wants=network-online.target network.target
After=network-online.target
Requires=docker.service
After=docker.service
[Service]
EnvironmentFile=/etc/etcd/etcd.conf
ExecStartPre=/bin/mkdir -p /var/lib/etcd
ExecStart=/usr/bin/docker run --rm --name etcd-member \
--net=host \
-v /usr/share/ca-certificates:/usr/share/ca-certificates:ro,z \
-v /etc/ssl/etcd:/etc/ssl/certs:ro,Z \
-v /var/lib/etcd:/var/lib/etcd:Z \
--env-file=/etc/etcd/etcd.conf \
quay.io/coreos/etcd:v3.3.2
ExecStop=/usr/bin/docker stop etcd-member
Restart=on-failure
RestartSec=10s
TimeoutStartSec=0
LimitNOFILE=40000
[Install]
WantedBy=multi-user.target
- path: /etc/etcd/etcd.conf
content: |
ETCD_NAME=${etcd_name}
ETCD_DATA_DIR=/var/lib/etcd
ETCD_ADVERTISE_CLIENT_URLS=https://${etcd_domain}:2379
ETCD_INITIAL_ADVERTISE_PEER_URLS=https://${etcd_domain}:2380
ETCD_LISTEN_CLIENT_URLS=https://0.0.0.0:2379
ETCD_LISTEN_PEER_URLS=https://0.0.0.0:2380
ETCD_INITIAL_CLUSTER=${etcd_initial_cluster}
ETCD_STRICT_RECONFIG_CHECK=true
ETCD_TRUSTED_CA_FILE=/etc/ssl/certs/etcd/server-ca.crt
ETCD_CERT_FILE=/etc/ssl/certs/etcd/server.crt
ETCD_KEY_FILE=/etc/ssl/certs/etcd/server.key
ETCD_CLIENT_CERT_AUTH=true
ETCD_PEER_TRUSTED_CA_FILE=/etc/ssl/certs/etcd/peer-ca.crt
ETCD_PEER_CERT_FILE=/etc/ssl/certs/etcd/peer.crt
ETCD_PEER_KEY_FILE=/etc/ssl/certs/etcd/peer.key
ETCD_PEER_CLIENT_CERT_AUTH=true
- path: /etc/systemd/system/kubelet.service.d/10-typhoon.conf
content: |
[Unit]
Description=Kubelet
Requires=cloud-metadata.service
After=cloud-metadata.service
Wants=rpc-statd.service
[Service]
EnvironmentFile=/run/metadata/digitalocean
ExecStartPre=/bin/mkdir -p /opt/cni/bin
ExecStartPre=/bin/mkdir -p /etc/kubernetes/manifests
ExecStartPre=/bin/mkdir -p /etc/kubernetes/cni/net.d
ExecStartPre=/bin/mkdir -p /etc/kubernetes/checkpoint-secrets
ExecStartPre=/bin/mkdir -p /etc/kubernetes/inactive-manifests
ExecStartPre=/bin/mkdir -p /var/lib/cni
ExecStartPre=/bin/mkdir -p /var/lib/kubelet/volumeplugins
ExecStartPre=/usr/bin/bash -c "grep 'certificate-authority-data' /etc/kubernetes/kubeconfig | awk '{print $2}' | base64 -d > /etc/kubernetes/ca.crt"
ExecStart=
ExecStart=/usr/bin/kubelet \
--allow-privileged \
--anonymous-auth=false \
--cgroup-driver=systemd \
--client-ca-file=/etc/kubernetes/ca.crt \
--cluster_dns=${k8s_dns_service_ip} \
--cluster_domain=${cluster_domain_suffix} \
--cni-conf-dir=/etc/kubernetes/cni/net.d \
--exit-on-lock-contention \
--hostname-override=$${DIGITALOCEAN_IPV4_PRIVATE_0} \
--kubeconfig=/etc/kubernetes/kubeconfig \
--lock-file=/var/run/lock/kubelet.lock \
--network-plugin=cni \
--node-labels=node-role.kubernetes.io/master \
--node-labels=node-role.kubernetes.io/controller="true" \
--pod-manifest-path=/etc/kubernetes/manifests \
--register-with-taints=node-role.kubernetes.io/master=:NoSchedule \
--volume-plugin-dir=/var/lib/kubelet/volumeplugins
Restart=always
RestartSec=10
[Install]
WantedBy=multi-user.target
- path: /etc/systemd/system/kubelet.path
content: |
[Unit]
Description=Watch for kubeconfig
[Path]
PathExists=/etc/kubernetes/kubeconfig
[Install]
WantedBy=multi-user.target
- path: /etc/kubernetes/.keep
- path: /etc/selinux/config
content: |
SELINUX=permissive
- path: /etc/systemd/system/bootkube.service
content: |
[Unit]
Description=Bootstrap a Kubernetes cluster
ConditionPathExists=!/opt/bootkube/init_bootkube.done
[Service]
Type=oneshot
RemainAfterExit=true
WorkingDirectory=/opt/bootkube
ExecStart=/opt/bootkube/bootkube-start
ExecStartPost=/bin/touch /opt/bootkube/init_bootkube.done
[Install]
WantedBy=multi-user.target
- path: /opt/bootkube/bootkube-start
permissions: '0544'
content: |
#!/bin/bash -e
# Wrapper for bootkube start
[ -n "$(ls /opt/bootkube/assets/manifests-*/* 2>/dev/null)" ] && mv /opt/bootkube/assets/manifests-*/* /opt/bootkube/assets/manifests && rm -rf /opt/bootkube/assets/manifests-*
/usr/bin/docker run --rm --name bootkube \
--volume /etc/kubernetes:/etc/kubernetes:Z \
--volume /opt/bootkube/assets:/assets:Z \
--entrypoint=/bootkube \
quay.io/coreos/bootkube:v0.11.0 start --asset-dir=/assets
runcmd:
- [systemctl, daemon-reload]
- [systemctl, enable, docker.service]
- [systemctl, start, --no-block, docker.service]
- [systemctl, enable, etcd-member.service]
- [systemctl, start, --no-block, etcd-member.service]
- [systemctl, enable, cloud-metadata.service]
- [systemctl, enable, kubelet.path]
- [systemctl, start, --no-block, kubelet.path]
users:
- default
- name: fedora
gecos: Fedora Admin
sudo: ALL=(ALL) NOPASSWD:ALL
groups: wheel,adm,systemd-journal,docker
ssh-authorized-keys:
- "${ssh_authorized_key}"

View File

@ -0,0 +1,92 @@
#cloud-config
yum_repos:
kubernetes:
name: kubernetes
baseurl: https://packages.cloud.google.com/yum/repos/kubernetes-el7-x86_64
enabled: true
gpgcheck: true
gpgkey: https://packages.cloud.google.com/yum/doc/yum-key.gpg https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg
packages:
- vim
- docker
- kubelet
- nfs-utils
write_files:
- path: /etc/systemd/system/cloud-metadata.service
content: |
[Unit]
Description=Digital Ocean metadata agent
[Service]
Type=oneshot
Environment=OUTPUT=/run/metadata/digitalocean
ExecStart=/usr/bin/mkdir -p /run/metadata
ExecStart=/usr/bin/bash -c 'echo "DIGITALOCEAN_IPV4_PUBLIC_0=$(curl\
--url http://169.254.169.254/metadata/v1/interfaces/public/0/ipv4/address\
--retry 10)\nDIGITALOCEAN_IPV4_PRIVATE_0=$(curl\
--url http://169.254.169.254/metadata/v1/interfaces/private/0/ipv4/address\
--retry 10)" > $${OUTPUT}'
- path: /etc/systemd/system/kubelet.service.d/10-typhoon.conf
content: |
[Unit]
Description=Kubelet
Requires=cloud-metadata.service
After=cloud-metadata.service
Wants=rpc-statd.service
[Service]
EnvironmentFile=/run/metadata/digitalocean
ExecStartPre=/bin/mkdir -p /opt/cni/bin
ExecStartPre=/bin/mkdir -p /etc/kubernetes/manifests
ExecStartPre=/bin/mkdir -p /etc/kubernetes/cni/net.d
ExecStartPre=/bin/mkdir -p /etc/kubernetes/checkpoint-secrets
ExecStartPre=/bin/mkdir -p /etc/kubernetes/inactive-manifests
ExecStartPre=/bin/mkdir -p /var/lib/cni
ExecStartPre=/bin/mkdir -p /var/lib/kubelet/volumeplugins
ExecStartPre=/usr/bin/bash -c "grep 'certificate-authority-data' /etc/kubernetes/kubeconfig | awk '{print $2}' | base64 -d > /etc/kubernetes/ca.crt"
ExecStart=
ExecStart=/usr/bin/kubelet \
--allow-privileged \
--anonymous-auth=false \
--cgroup-driver=systemd \
--client-ca-file=/etc/kubernetes/ca.crt \
--cluster_dns=${k8s_dns_service_ip} \
--cluster_domain=${cluster_domain_suffix} \
--cni-conf-dir=/etc/kubernetes/cni/net.d \
--exit-on-lock-contention \
--hostname-override=$${DIGITALOCEAN_IPV4_PRIVATE_0} \
--kubeconfig=/etc/kubernetes/kubeconfig \
--lock-file=/var/run/lock/kubelet.lock \
--network-plugin=cni \
--node-labels=node-role.kubernetes.io/node \
--pod-manifest-path=/etc/kubernetes/manifests \
--volume-plugin-dir=/var/lib/kubelet/volumeplugins
Restart=always
RestartSec=10
[Install]
WantedBy=multi-user.target
- path: /etc/systemd/system/kubelet.path
content: |
[Unit]
Description=Watch for kubeconfig
[Path]
PathExists=/etc/kubernetes/kubeconfig
[Install]
WantedBy=multi-user.target
- path: /etc/kubernetes/.keep
- path: /etc/selinux/config
content: |
SELINUX=permissive
runcmd:
- [systemctl, daemon-reload]
- [systemctl, enable, docker.service]
- [systemctl, start, --no-block, docker.service]
- [systemctl, enable, cloud-metadata.service]
- [systemctl, enable, kubelet.path]
- [systemctl, start, --no-block, kubelet.path]
users:
- default
- name: fedora
gecos: Fedora Admin
sudo: ALL=(ALL) NOPASSWD:ALL
groups: wheel,adm,systemd-journal,docker
ssh-authorized-keys:
- "${ssh_authorized_key}"

View File

@ -0,0 +1,87 @@
# Controller Instance DNS records
resource "digitalocean_record" "controllers" {
count = "${var.controller_count}"
# DNS zone where record should be created
domain = "${var.dns_zone}"
# DNS record (will be prepended to domain)
name = "${var.cluster_name}"
type = "A"
ttl = 300
# IPv4 addresses of controllers
value = "${element(digitalocean_droplet.controllers.*.ipv4_address, count.index)}"
}
# Discrete DNS records for each controller's private IPv4 for etcd usage
resource "digitalocean_record" "etcds" {
count = "${var.controller_count}"
# DNS zone where record should be created
domain = "${var.dns_zone}"
# DNS record (will be prepended to domain)
name = "${var.cluster_name}-etcd${count.index}"
type = "A"
ttl = 300
# private IPv4 address for etcd
value = "${element(digitalocean_droplet.controllers.*.ipv4_address_private, count.index)}"
}
# Controller droplet instances
resource "digitalocean_droplet" "controllers" {
count = "${var.controller_count}"
name = "${var.cluster_name}-controller-${count.index}"
region = "${var.region}"
image = "${var.image}"
size = "${var.controller_type}"
# network
ipv6 = true
private_networking = true
user_data = "${element(data.template_file.controller-cloudinit.*.rendered, count.index)}"
tags = [
"${digitalocean_tag.controllers.id}",
]
}
# Tag to label controllers
resource "digitalocean_tag" "controllers" {
name = "${var.cluster_name}-controller"
}
# Controller Cloud-init
data "template_file" "controller-cloudinit" {
count = "${var.controller_count}"
template = "${file("${path.module}/cloudinit/controller.yaml.tmpl")}"
vars = {
# Cannot use cyclic dependencies on controllers or their DNS records
etcd_name = "etcd${count.index}"
etcd_domain = "${var.cluster_name}-etcd${count.index}.${var.dns_zone}"
# etcd0=https://cluster-etcd0.example.com,etcd1=https://cluster-etcd1.example.com,...
etcd_initial_cluster = "${join(",", formatlist("%s=https://%s:2380", null_resource.repeat.*.triggers.name, null_resource.repeat.*.triggers.domain))}"
ssh_authorized_key = "${var.ssh_authorized_key}"
k8s_dns_service_ip = "${cidrhost(var.service_cidr, 10)}"
cluster_domain_suffix = "${var.cluster_domain_suffix}"
}
}
# Horrible hack to generate a Terraform list of a desired length without dependencies.
# Ideal ${repeat("etcd", 3) -> ["etcd", "etcd", "etcd"]}
resource null_resource "repeat" {
count = "${var.controller_count}"
triggers {
name = "etcd${count.index}"
domain = "${var.cluster_name}-etcd${count.index}.${var.dns_zone}"
}
}

View File

@ -0,0 +1,53 @@
resource "digitalocean_firewall" "rules" {
name = "${var.cluster_name}"
tags = ["${var.cluster_name}-controller", "${var.cluster_name}-worker"]
# allow ssh, http/https ingress, and peer-to-peer traffic
inbound_rule = [
{
protocol = "tcp"
port_range = "22"
source_addresses = ["0.0.0.0/0", "::/0"]
},
{
protocol = "tcp"
port_range = "80"
source_addresses = ["0.0.0.0/0", "::/0"]
},
{
protocol = "tcp"
port_range = "443"
source_addresses = ["0.0.0.0/0", "::/0"]
},
{
protocol = "udp"
port_range = "1-65535"
source_tags = ["${digitalocean_tag.controllers.name}", "${digitalocean_tag.workers.name}"]
},
{
protocol = "tcp"
port_range = "1-65535"
source_tags = ["${digitalocean_tag.controllers.name}", "${digitalocean_tag.workers.name}"]
},
]
# allow all outbound traffic
outbound_rule = [
{
protocol = "tcp"
port_range = "1-65535"
destination_addresses = ["0.0.0.0/0", "::/0"]
},
{
protocol = "udp"
port_range = "1-65535"
destination_addresses = ["0.0.0.0/0", "::/0"]
},
{
protocol = "icmp"
port_range = "1-65535"
destination_addresses = ["0.0.0.0/0", "::/0"]
},
]
}

View File

@ -0,0 +1,23 @@
output "controllers_dns" {
value = "${digitalocean_record.controllers.0.fqdn}"
}
output "workers_dns" {
value = "${digitalocean_record.workers.0.fqdn}"
}
output "controllers_ipv4" {
value = ["${digitalocean_droplet.controllers.*.ipv4_address}"]
}
output "controllers_ipv6" {
value = ["${digitalocean_droplet.controllers.*.ipv6_address}"]
}
output "workers_ipv4" {
value = ["${digitalocean_droplet.workers.*.ipv4_address}"]
}
output "workers_ipv6" {
value = ["${digitalocean_droplet.workers.*.ipv6_address}"]
}

View File

@ -0,0 +1,25 @@
# Terraform version and plugin versions
terraform {
required_version = ">= 0.10.4"
}
provider "digitalocean" {
version = "~> 0.1.2"
}
provider "local" {
version = "~> 1.0"
}
provider "null" {
version = "~> 1.0"
}
provider "template" {
version = "~> 1.0"
}
provider "tls" {
version = "~> 1.0"
}

View File

@ -0,0 +1,91 @@
# Secure copy kubeconfig to all nodes. Activates kubelet.service
resource "null_resource" "copy-secrets" {
count = "${var.controller_count + var.worker_count}"
connection {
type = "ssh"
host = "${element(concat(digitalocean_droplet.controllers.*.ipv4_address, digitalocean_droplet.workers.*.ipv4_address), count.index)}"
user = "fedora"
timeout = "15m"
}
provisioner "file" {
content = "${module.bootkube.kubeconfig}"
destination = "$HOME/kubeconfig"
}
provisioner "file" {
content = "${module.bootkube.etcd_ca_cert}"
destination = "$HOME/etcd-client-ca.crt"
}
provisioner "file" {
content = "${module.bootkube.etcd_client_cert}"
destination = "$HOME/etcd-client.crt"
}
provisioner "file" {
content = "${module.bootkube.etcd_client_key}"
destination = "$HOME/etcd-client.key"
}
provisioner "file" {
content = "${module.bootkube.etcd_server_cert}"
destination = "$HOME/etcd-server.crt"
}
provisioner "file" {
content = "${module.bootkube.etcd_server_key}"
destination = "$HOME/etcd-server.key"
}
provisioner "file" {
content = "${module.bootkube.etcd_peer_cert}"
destination = "$HOME/etcd-peer.crt"
}
provisioner "file" {
content = "${module.bootkube.etcd_peer_key}"
destination = "$HOME/etcd-peer.key"
}
provisioner "remote-exec" {
inline = [
"while [ ! -f /var/lib/cloud/instance/boot-finished ]; do sleep 4; done",
"sudo mkdir -p /etc/ssl/etcd/etcd",
"sudo mv etcd-client* /etc/ssl/etcd/",
"sudo cp /etc/ssl/etcd/etcd-client-ca.crt /etc/ssl/etcd/etcd/server-ca.crt",
"sudo mv etcd-server.crt /etc/ssl/etcd/etcd/server.crt",
"sudo mv etcd-server.key /etc/ssl/etcd/etcd/server.key",
"sudo cp /etc/ssl/etcd/etcd-client-ca.crt /etc/ssl/etcd/etcd/peer-ca.crt",
"sudo mv etcd-peer.crt /etc/ssl/etcd/etcd/peer.crt",
"sudo mv etcd-peer.key /etc/ssl/etcd/etcd/peer.key",
"sudo mv kubeconfig /etc/kubernetes/kubeconfig",
]
}
}
# Secure copy bootkube assets to ONE controller and start bootkube to perform
# one-time self-hosted cluster bootstrapping.
resource "null_resource" "bootkube-start" {
depends_on = ["module.bootkube", "null_resource.copy-secrets"]
connection {
type = "ssh"
host = "${digitalocean_droplet.controllers.0.ipv4_address}"
user = "fedora"
timeout = "15m"
}
provisioner "file" {
source = "${var.asset_dir}"
destination = "$HOME/assets"
}
provisioner "remote-exec" {
inline = [
"sudo mv assets /opt/bootkube",
"sudo systemctl start bootkube",
]
}
}

View File

@ -0,0 +1,84 @@
variable "cluster_name" {
type = "string"
description = "Unique cluster name"
}
variable "region" {
type = "string"
description = "Digital Ocean region (e.g. nyc1, sfo2, fra1, tor1)"
}
variable "dns_zone" {
type = "string"
description = "Digital Ocean domain (i.e. DNS zone) (e.g. do.example.com)"
}
variable "image" {
type = "string"
default = "fedora-27-x64"
description = "OS image from which to initialize the disk (e.g. fedora-27-x64)"
}
variable "controller_count" {
type = "string"
default = "1"
description = "Number of controllers"
}
variable "controller_type" {
type = "string"
default = "s-2vcpu-2gb"
description = "Digital Ocean droplet size (e.g. s-2vcpu-2gb, s-2vcpu-4gb, s-4vcpu-8gb)."
}
variable "worker_count" {
type = "string"
default = "1"
description = "Number of workers"
}
variable "worker_type" {
type = "string"
default = "s-1vcpu-1gb"
description = "Digital Ocean droplet size (e.g. s-1vcpu-1gb, s-1vcpu-2gb, s-2vcpu-2gb)"
}
variable "ssh_authorized_key" {
type = "string"
description = "SSH public key for user 'fedora'"
}
# bootkube assets
variable "asset_dir" {
description = "Path to a directory where generated assets should be placed (contains secrets)"
type = "string"
}
variable "networking" {
description = "Choice of networking provider (flannel or calico)"
type = "string"
default = "flannel"
}
variable "pod_cidr" {
description = "CIDR IP range to assign Kubernetes pods"
type = "string"
default = "10.2.0.0/16"
}
variable "service_cidr" {
description = <<EOD
CIDR IP range to assign Kubernetes services.
The 1st IP will be reserved for kube_apiserver, the 10th IP will be reserved for kube-dns.
EOD
type = "string"
default = "10.3.0.0/16"
}
variable "cluster_domain_suffix" {
description = "Queries for domains with the suffix will be answered by kube-dns. Default is cluster.local (e.g. foo.default.svc.cluster.local) "
type = "string"
default = "cluster.local"
}

View File

@ -0,0 +1,48 @@
# Worker DNS records
resource "digitalocean_record" "workers" {
count = "${var.worker_count}"
# DNS zone where record should be created
domain = "${var.dns_zone}"
name = "${var.cluster_name}-workers"
type = "A"
ttl = 300
value = "${element(digitalocean_droplet.workers.*.ipv4_address, count.index)}"
}
# Worker droplet instances
resource "digitalocean_droplet" "workers" {
count = "${var.worker_count}"
name = "${var.cluster_name}-worker-${count.index}"
region = "${var.region}"
image = "${var.image}"
size = "${var.worker_type}"
# network
ipv6 = true
private_networking = true
user_data = "${data.template_file.worker-cloudinit.rendered}"
tags = [
"${digitalocean_tag.workers.id}",
]
}
# Tag to label workers
resource "digitalocean_tag" "workers" {
name = "${var.cluster_name}-worker"
}
# Worker Container Linux Config
data "template_file" "worker-cloudinit" {
template = "${file("${path.module}/cloudinit/worker.yaml.tmpl")}"
vars = {
ssh_authorized_key = "${var.ssh_authorized_key}"
k8s_dns_service_ip = "${cidrhost(var.service_cidr, 10)}"
cluster_domain_suffix = "${var.cluster_domain_suffix}"
}
}