mirror of
https://github.com/puppetmaster/typhoon.git
synced 2024-12-24 18:19:33 +01:00
Add Google Cloud fedora-atomic module
* Network load balancer for ingress doesn't work yet because Compute Engine packages are missing * port-forward / socat is broken
This commit is contained in:
parent
24d230505a
commit
2b74aba564
23
google-cloud/fedora-atomic/kubernetes/LICENSE
Normal file
23
google-cloud/fedora-atomic/kubernetes/LICENSE
Normal file
@ -0,0 +1,23 @@
|
||||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2017 Typhoon Authors
|
||||
Copyright (c) 2017 Dalton Hubble
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
||||
|
22
google-cloud/fedora-atomic/kubernetes/README.md
Normal file
22
google-cloud/fedora-atomic/kubernetes/README.md
Normal file
@ -0,0 +1,22 @@
|
||||
# Typhoon <img align="right" src="https://storage.googleapis.com/poseidon/typhoon-logo.png">
|
||||
|
||||
Typhoon is a minimal and free Kubernetes distribution.
|
||||
|
||||
* Minimal, stable base Kubernetes distribution
|
||||
* Declarative infrastructure and configuration
|
||||
* Free (freedom and cost) and privacy-respecting
|
||||
* Practical for labs, datacenters, and clouds
|
||||
|
||||
Typhoon distributes upstream Kubernetes, architectural conventions, and cluster addons, much like a GNU/Linux distribution provides the Linux kernel and userspace components.
|
||||
|
||||
## Features <a href="https://www.cncf.io/certification/software-conformance/"><img align="right" src="https://storage.googleapis.com/poseidon/certified-kubernetes.png"></a>
|
||||
|
||||
* Kubernetes v1.10.0 (upstream, via [kubernetes-incubator/bootkube](https://github.com/kubernetes-incubator/bootkube))
|
||||
* Single or multi-master, workloads isolated on workers, [Calico](https://www.projectcalico.org/) or [flannel](https://github.com/coreos/flannel) networking
|
||||
* On-cluster etcd with TLS, [RBAC](https://kubernetes.io/docs/admin/authorization/rbac/)-enabled, [network policy](https://kubernetes.io/docs/concepts/services-networking/network-policies/)
|
||||
* Ready for Ingress, Prometheus, Grafana, and other optional [addons](https://typhoon.psdn.io/addons/overview/)
|
||||
|
||||
## Docs
|
||||
|
||||
Please see the [official docs](https://typhoon.psdn.io) and the Google Cloud [tutorial](https://typhoon.psdn.io/google-cloud/).
|
||||
|
95
google-cloud/fedora-atomic/kubernetes/apiserver.tf
Normal file
95
google-cloud/fedora-atomic/kubernetes/apiserver.tf
Normal file
@ -0,0 +1,95 @@
|
||||
# TCP Proxy load balancer DNS record
|
||||
resource "google_dns_record_set" "apiserver" {
|
||||
# DNS Zone name where record should be created
|
||||
managed_zone = "${var.dns_zone_name}"
|
||||
|
||||
# DNS record
|
||||
name = "${format("%s.%s.", var.cluster_name, var.dns_zone)}"
|
||||
type = "A"
|
||||
ttl = 300
|
||||
|
||||
# IPv4 address of apiserver TCP Proxy load balancer
|
||||
rrdatas = ["${google_compute_global_address.apiserver-ipv4.address}"]
|
||||
}
|
||||
|
||||
# Static IPv4 address for the TCP Proxy Load Balancer
|
||||
resource "google_compute_global_address" "apiserver-ipv4" {
|
||||
name = "${var.cluster_name}-apiserver-ip"
|
||||
ip_version = "IPV4"
|
||||
}
|
||||
|
||||
# Forward IPv4 TCP traffic to the TCP proxy load balancer
|
||||
resource "google_compute_global_forwarding_rule" "apiserver" {
|
||||
name = "${var.cluster_name}-apiserver"
|
||||
ip_address = "${google_compute_global_address.apiserver-ipv4.address}"
|
||||
ip_protocol = "TCP"
|
||||
port_range = "443"
|
||||
target = "${google_compute_target_tcp_proxy.apiserver.self_link}"
|
||||
}
|
||||
|
||||
# TCP Proxy Load Balancer for apiservers
|
||||
resource "google_compute_target_tcp_proxy" "apiserver" {
|
||||
name = "${var.cluster_name}-apiserver"
|
||||
description = "Distribute TCP load across ${var.cluster_name} controllers"
|
||||
backend_service = "${google_compute_backend_service.apiserver.self_link}"
|
||||
}
|
||||
|
||||
# Backend service backed by unmanaged instance groups
|
||||
resource "google_compute_backend_service" "apiserver" {
|
||||
name = "${var.cluster_name}-apiserver"
|
||||
description = "${var.cluster_name} apiserver service"
|
||||
|
||||
protocol = "TCP"
|
||||
port_name = "apiserver"
|
||||
session_affinity = "NONE"
|
||||
timeout_sec = "60"
|
||||
|
||||
# controller(s) spread across zonal instance groups
|
||||
backend {
|
||||
group = "${google_compute_instance_group.controllers.0.self_link}"
|
||||
}
|
||||
backend {
|
||||
group = "${google_compute_instance_group.controllers.1.self_link}"
|
||||
}
|
||||
backend {
|
||||
group = "${google_compute_instance_group.controllers.2.self_link}"
|
||||
}
|
||||
|
||||
health_checks = ["${google_compute_health_check.apiserver.self_link}"]
|
||||
}
|
||||
|
||||
# Instance group of heterogeneous (unmanged) controller instances
|
||||
resource "google_compute_instance_group" "controllers" {
|
||||
count = "${length(local.zones)}"
|
||||
|
||||
name = "${format("%s-controllers-%s", var.cluster_name, element(local.zones, count.index))}"
|
||||
zone = "${element(local.zones, count.index)}"
|
||||
|
||||
named_port {
|
||||
name = "apiserver"
|
||||
port = "443"
|
||||
}
|
||||
|
||||
# add instances in the zone into the instance group
|
||||
instances = [
|
||||
"${matchkeys(google_compute_instance.controllers.*.self_link,
|
||||
google_compute_instance.controllers.*.zone,
|
||||
list(element(local.zones, count.index)))}"
|
||||
]
|
||||
}
|
||||
|
||||
# TCP health check for apiserver
|
||||
resource "google_compute_health_check" "apiserver" {
|
||||
name = "${var.cluster_name}-apiserver-tcp-health"
|
||||
description = "TCP health check for kube-apiserver"
|
||||
|
||||
timeout_sec = 5
|
||||
check_interval_sec = 5
|
||||
|
||||
healthy_threshold = 1
|
||||
unhealthy_threshold = 3
|
||||
|
||||
tcp_health_check {
|
||||
port = "443"
|
||||
}
|
||||
}
|
17
google-cloud/fedora-atomic/kubernetes/bootkube.tf
Normal file
17
google-cloud/fedora-atomic/kubernetes/bootkube.tf
Normal file
@ -0,0 +1,17 @@
|
||||
# Self-hosted Kubernetes assets (kubeconfig, manifests)
|
||||
module "bootkube" {
|
||||
source = "git::https://github.com/poseidon/terraform-render-bootkube.git?ref=db36b92abced3c4b0af279adfd5ed4bf0cf8c39f"
|
||||
|
||||
cluster_name = "${var.cluster_name}"
|
||||
api_servers = ["${format("%s.%s", var.cluster_name, var.dns_zone)}"]
|
||||
etcd_servers = ["${null_resource.repeat.*.triggers.domain}"]
|
||||
asset_dir = "${var.asset_dir}"
|
||||
networking = "${var.networking}"
|
||||
network_mtu = 1440
|
||||
pod_cidr = "${var.pod_cidr}"
|
||||
service_cidr = "${var.service_cidr}"
|
||||
cluster_domain_suffix = "${var.cluster_domain_suffix}"
|
||||
|
||||
# Fedora
|
||||
trusted_certs_dir = "/etc/pki/tls/certs"
|
||||
}
|
@ -0,0 +1,124 @@
|
||||
#cloud-config
|
||||
write_files:
|
||||
- path: /etc/etcd/etcd.conf
|
||||
content: |
|
||||
ETCD_NAME=${etcd_name}
|
||||
ETCD_DATA_DIR=/var/lib/etcd
|
||||
ETCD_ADVERTISE_CLIENT_URLS=https://${etcd_domain}:2379
|
||||
ETCD_INITIAL_ADVERTISE_PEER_URLS=https://${etcd_domain}:2380
|
||||
ETCD_LISTEN_CLIENT_URLS=https://0.0.0.0:2379
|
||||
ETCD_LISTEN_PEER_URLS=https://0.0.0.0:2380
|
||||
ETCD_LISTEN_METRICS_URLS=http://0.0.0.0:2381
|
||||
ETCD_INITIAL_CLUSTER=${etcd_initial_cluster}
|
||||
ETCD_STRICT_RECONFIG_CHECK=true
|
||||
ETCD_TRUSTED_CA_FILE=/etc/ssl/certs/etcd/server-ca.crt
|
||||
ETCD_CERT_FILE=/etc/ssl/certs/etcd/server.crt
|
||||
ETCD_KEY_FILE=/etc/ssl/certs/etcd/server.key
|
||||
ETCD_CLIENT_CERT_AUTH=true
|
||||
ETCD_PEER_TRUSTED_CA_FILE=/etc/ssl/certs/etcd/peer-ca.crt
|
||||
ETCD_PEER_CERT_FILE=/etc/ssl/certs/etcd/peer.crt
|
||||
ETCD_PEER_KEY_FILE=/etc/ssl/certs/etcd/peer.key
|
||||
ETCD_PEER_CLIENT_CERT_AUTH=true
|
||||
- path: /etc/systemd/system/cloud-metadata.service
|
||||
content: |
|
||||
[Unit]
|
||||
Description=Cloud metadata agent
|
||||
[Service]
|
||||
Type=oneshot
|
||||
Environment=OUTPUT=/run/metadata/cloud
|
||||
ExecStart=/usr/bin/mkdir -p /run/metadata
|
||||
ExecStart=/usr/bin/bash -c 'echo "HOSTNAME_OVERRIDE=$(curl\
|
||||
-H "Metadata-Flavor: Google"\
|
||||
--url http://metadata.google.internal/computeMetadata/v1/instance/hostname\
|
||||
--retry 10)" > $${OUTPUT}'
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
- path: /etc/systemd/system/kubelet.service.d/10-typhoon.conf
|
||||
content: |
|
||||
[Unit]
|
||||
Requires=cloud-metadata.service
|
||||
After=cloud-metadata.service
|
||||
Wants=rpc-statd.service
|
||||
[Service]
|
||||
ExecStartPre=/bin/mkdir -p /opt/cni/bin
|
||||
ExecStartPre=/bin/mkdir -p /etc/kubernetes/manifests
|
||||
ExecStartPre=/bin/mkdir -p /etc/kubernetes/cni/net.d
|
||||
ExecStartPre=/bin/mkdir -p /etc/kubernetes/checkpoint-secrets
|
||||
ExecStartPre=/bin/mkdir -p /etc/kubernetes/inactive-manifests
|
||||
ExecStartPre=/bin/mkdir -p /var/lib/cni
|
||||
ExecStartPre=/bin/mkdir -p /var/lib/kubelet/volumeplugins
|
||||
ExecStartPre=/usr/bin/bash -c "grep 'certificate-authority-data' /etc/kubernetes/kubeconfig | awk '{print $2}' | base64 -d > /etc/kubernetes/ca.crt"
|
||||
Restart=always
|
||||
RestartSec=10
|
||||
- path: /etc/kubernetes/kubelet.conf
|
||||
content: |
|
||||
ARGS="--allow-privileged \
|
||||
--anonymous-auth=false \
|
||||
--client-ca-file=/etc/kubernetes/ca.crt \
|
||||
--cluster_dns=${k8s_dns_service_ip} \
|
||||
--cluster_domain=${cluster_domain_suffix} \
|
||||
--cni-conf-dir=/etc/kubernetes/cni/net.d \
|
||||
--exit-on-lock-contention \
|
||||
--kubeconfig=/etc/kubernetes/kubeconfig \
|
||||
--lock-file=/var/run/lock/kubelet.lock \
|
||||
--network-plugin=cni \
|
||||
--node-labels=node-role.kubernetes.io/master \
|
||||
--node-labels=node-role.kubernetes.io/controller="true" \
|
||||
--pod-manifest-path=/etc/kubernetes/manifests \
|
||||
--register-with-taints=node-role.kubernetes.io/master=:NoSchedule \
|
||||
--volume-plugin-dir=/var/lib/kubelet/volumeplugins"
|
||||
- path: /etc/kubernetes/kubeconfig
|
||||
permissions: '0644'
|
||||
content: |
|
||||
${kubeconfig}
|
||||
- path: /etc/systemd/system/bootkube.service
|
||||
content: |
|
||||
[Unit]
|
||||
Description=Bootstrap a Kubernetes cluster
|
||||
ConditionPathExists=!/var/bootkube/init_bootkube.done
|
||||
[Service]
|
||||
Type=oneshot
|
||||
RemainAfterExit=true
|
||||
WorkingDirectory=/var/bootkube
|
||||
ExecStartPre=/bin/mkdir -p /var/bootkube
|
||||
ExecStart=/usr/local/bin/bootkube-start
|
||||
ExecStartPost=/bin/touch /var/bootkube/init_bootkube.done
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
- path: /var/bootkube/.keep
|
||||
- path: /usr/local/bin/bootkube-start
|
||||
permissions: '0755'
|
||||
content: |
|
||||
#!/bin/bash -e
|
||||
# Wrapper for bootkube start
|
||||
[ -n "$(ls /var/bootkube/assets/manifests-*/* 2>/dev/null)" ] && mv /var/bootkube/assets/manifests-*/* /var/bootkube/assets/manifests && rm -rf /var/bootkube/assets/manifests-*
|
||||
/usr/bin/docker run --rm --name bootkube \
|
||||
--net=host \
|
||||
--volume /etc/kubernetes:/etc/kubernetes:Z \
|
||||
--volume /var/bootkube/assets:/assets:Z \
|
||||
--entrypoint=/bootkube \
|
||||
quay.io/coreos/bootkube:v0.12.0 start --asset-dir=/assets
|
||||
- path: /etc/selinux/config
|
||||
owner: root:root
|
||||
permissions: '0644'
|
||||
content: |
|
||||
SELINUX=permissive
|
||||
SELINUXTYPE=targeted
|
||||
bootcmd:
|
||||
- [setenforce, Permissive]
|
||||
- [systemctl, disable, firewalld, --now]
|
||||
runcmd:
|
||||
- [systemctl, daemon-reload]
|
||||
- "atomic install --system --name=etcd quay.io/dghubble/etcd:0265e6680d2533f3fbf4512af868d29ff07451ca"
|
||||
- [systemctl, start, --no-block, etcd.service]
|
||||
- [systemctl, enable, cloud-metadata.service]
|
||||
- "atomic install --system --name=kubelet quay.io/dghubble/kubelet:8767d4433f7c5a38c55edf6e682efb53fcd06f61"
|
||||
- [systemctl, start, --no-block, kubelet.service]
|
||||
users:
|
||||
- default
|
||||
- name: fedora
|
||||
gecos: Fedora Admin
|
||||
sudo: ALL=(ALL) NOPASSWD:ALL
|
||||
groups: wheel,adm,systemd-journal,docker
|
||||
ssh-authorized-keys:
|
||||
- "${ssh_authorized_key}"
|
91
google-cloud/fedora-atomic/kubernetes/controllers.tf
Normal file
91
google-cloud/fedora-atomic/kubernetes/controllers.tf
Normal file
@ -0,0 +1,91 @@
|
||||
# Discrete DNS records for each controller's private IPv4 for etcd usage
|
||||
resource "google_dns_record_set" "etcds" {
|
||||
count = "${var.controller_count}"
|
||||
|
||||
# DNS Zone name where record should be created
|
||||
managed_zone = "${var.dns_zone_name}"
|
||||
|
||||
# DNS record
|
||||
name = "${format("%s-etcd%d.%s.", var.cluster_name, count.index, var.dns_zone)}"
|
||||
type = "A"
|
||||
ttl = 300
|
||||
|
||||
# private IPv4 address for etcd
|
||||
rrdatas = ["${element(google_compute_instance.controllers.*.network_interface.0.address, count.index)}"]
|
||||
}
|
||||
|
||||
# Zones in the region
|
||||
data "google_compute_zones" "all" {
|
||||
region = "${var.region}"
|
||||
}
|
||||
|
||||
locals {
|
||||
# TCP proxy load balancers require a fixed number of zonal backends. Spread
|
||||
# controllers over up to 3 zones, since all GCP regions have at least 3.
|
||||
zones = "${slice(data.google_compute_zones.all.names, 0, 3)}"
|
||||
controllers_ipv4_public = ["${google_compute_instance.controllers.*.network_interface.0.access_config.0.assigned_nat_ip}"]
|
||||
}
|
||||
|
||||
# Controller instances
|
||||
resource "google_compute_instance" "controllers" {
|
||||
count = "${var.controller_count}"
|
||||
|
||||
name = "${var.cluster_name}-controller-${count.index}"
|
||||
zone = "${element(local.zones, count.index)}"
|
||||
machine_type = "${var.controller_type}"
|
||||
|
||||
metadata {
|
||||
user-data = "${element(data.template_file.controller-cloudinit.*.rendered, count.index)}"
|
||||
}
|
||||
|
||||
boot_disk {
|
||||
auto_delete = true
|
||||
|
||||
initialize_params {
|
||||
image = "${var.os_image}"
|
||||
size = "${var.disk_size}"
|
||||
}
|
||||
}
|
||||
|
||||
network_interface {
|
||||
network = "${google_compute_network.network.name}"
|
||||
|
||||
# Ephemeral external IP
|
||||
access_config = {}
|
||||
}
|
||||
|
||||
can_ip_forward = true
|
||||
tags = ["${var.cluster_name}-controller"]
|
||||
}
|
||||
|
||||
# Controller Cloud-Init
|
||||
data "template_file" "controller-cloudinit" {
|
||||
count = "${var.controller_count}"
|
||||
|
||||
template = "${file("${path.module}/cloudinit/controller.yaml.tmpl")}"
|
||||
|
||||
vars = {
|
||||
# Cannot use cyclic dependencies on controllers or their DNS records
|
||||
etcd_name = "etcd${count.index}"
|
||||
etcd_domain = "${var.cluster_name}-etcd${count.index}.${var.dns_zone}"
|
||||
|
||||
# etcd0=https://cluster-etcd0.example.com,etcd1=https://cluster-etcd1.example.com,...
|
||||
etcd_initial_cluster = "${join(",", formatlist("%s=https://%s:2380", null_resource.repeat.*.triggers.name, null_resource.repeat.*.triggers.domain))}"
|
||||
|
||||
kubeconfig = "${indent(6, module.bootkube.kubeconfig)}"
|
||||
ssh_authorized_key = "${var.ssh_authorized_key}"
|
||||
k8s_dns_service_ip = "${cidrhost(var.service_cidr, 10)}"
|
||||
cluster_domain_suffix = "${var.cluster_domain_suffix}"
|
||||
}
|
||||
}
|
||||
|
||||
# Horrible hack to generate a Terraform list of a desired length without dependencies.
|
||||
# Ideal ${repeat("etcd", 3) -> ["etcd", "etcd", "etcd"]}
|
||||
resource null_resource "repeat" {
|
||||
count = "${var.controller_count}"
|
||||
|
||||
triggers {
|
||||
name = "etcd${count.index}"
|
||||
domain = "${var.cluster_name}-etcd${count.index}.${var.dns_zone}"
|
||||
}
|
||||
}
|
149
google-cloud/fedora-atomic/kubernetes/network.tf
Normal file
149
google-cloud/fedora-atomic/kubernetes/network.tf
Normal file
@ -0,0 +1,149 @@
|
||||
resource "google_compute_network" "network" {
|
||||
name = "${var.cluster_name}"
|
||||
description = "Network for the ${var.cluster_name} cluster"
|
||||
auto_create_subnetworks = true
|
||||
}
|
||||
|
||||
resource "google_compute_firewall" "allow-ssh" {
|
||||
name = "${var.cluster_name}-allow-ssh"
|
||||
network = "${google_compute_network.network.name}"
|
||||
|
||||
allow {
|
||||
protocol = "tcp"
|
||||
ports = [22]
|
||||
}
|
||||
|
||||
source_ranges = ["0.0.0.0/0"]
|
||||
target_tags = ["${var.cluster_name}-controller", "${var.cluster_name}-worker"]
|
||||
}
|
||||
|
||||
resource "google_compute_firewall" "allow-apiserver" {
|
||||
name = "${var.cluster_name}-allow-apiserver"
|
||||
network = "${google_compute_network.network.name}"
|
||||
|
||||
allow {
|
||||
protocol = "tcp"
|
||||
ports = [443]
|
||||
}
|
||||
|
||||
source_ranges = ["0.0.0.0/0"]
|
||||
target_tags = ["${var.cluster_name}-controller"]
|
||||
}
|
||||
|
||||
resource "google_compute_firewall" "allow-ingress" {
|
||||
name = "${var.cluster_name}-allow-ingress"
|
||||
network = "${google_compute_network.network.name}"
|
||||
|
||||
allow {
|
||||
protocol = "tcp"
|
||||
ports = [80, 443]
|
||||
}
|
||||
|
||||
source_ranges = ["0.0.0.0/0"]
|
||||
target_tags = ["${var.cluster_name}-worker"]
|
||||
}
|
||||
|
||||
resource "google_compute_firewall" "internal-etcd" {
|
||||
name = "${var.cluster_name}-internal-etcd"
|
||||
network = "${google_compute_network.network.name}"
|
||||
|
||||
allow {
|
||||
protocol = "tcp"
|
||||
ports = [2380]
|
||||
}
|
||||
|
||||
source_tags = ["${var.cluster_name}-controller"]
|
||||
target_tags = ["${var.cluster_name}-controller"]
|
||||
}
|
||||
|
||||
# Allow Prometheus to scrape etcd metrics
|
||||
resource "google_compute_firewall" "internal-etcd-metrics" {
|
||||
name = "${var.cluster_name}-internal-etcd-metrics"
|
||||
network = "${google_compute_network.network.name}"
|
||||
|
||||
allow {
|
||||
protocol = "tcp"
|
||||
ports = [2381]
|
||||
}
|
||||
|
||||
source_tags = ["${var.cluster_name}-worker"]
|
||||
target_tags = ["${var.cluster_name}-controller"]
|
||||
}
|
||||
|
||||
# Calico BGP and IPIP
|
||||
# https://docs.projectcalico.org/v2.5/reference/public-cloud/gce
|
||||
resource "google_compute_firewall" "internal-calico" {
|
||||
count = "${var.networking == "calico" ? 1 : 0}"
|
||||
|
||||
name = "${var.cluster_name}-internal-calico"
|
||||
network = "${google_compute_network.network.name}"
|
||||
|
||||
allow {
|
||||
protocol = "tcp"
|
||||
ports = ["179"]
|
||||
}
|
||||
|
||||
allow {
|
||||
protocol = "ipip"
|
||||
}
|
||||
|
||||
source_tags = ["${var.cluster_name}-controller", "${var.cluster_name}-worker"]
|
||||
target_tags = ["${var.cluster_name}-controller", "${var.cluster_name}-worker"]
|
||||
}
|
||||
|
||||
# flannel
|
||||
resource "google_compute_firewall" "internal-flannel" {
|
||||
count = "${var.networking == "flannel" ? 1 : 0}"
|
||||
|
||||
name = "${var.cluster_name}-internal-flannel"
|
||||
network = "${google_compute_network.network.name}"
|
||||
|
||||
allow {
|
||||
protocol = "udp"
|
||||
ports = [8472]
|
||||
}
|
||||
|
||||
source_tags = ["${var.cluster_name}-controller", "${var.cluster_name}-worker"]
|
||||
target_tags = ["${var.cluster_name}-controller", "${var.cluster_name}-worker"]
|
||||
}
|
||||
|
||||
# Allow Prometheus to scrape node-exporter daemonset
|
||||
resource "google_compute_firewall" "internal-node-exporter" {
|
||||
name = "${var.cluster_name}-internal-node-exporter"
|
||||
network = "${google_compute_network.network.name}"
|
||||
|
||||
allow {
|
||||
protocol = "tcp"
|
||||
ports = [9100]
|
||||
}
|
||||
|
||||
source_tags = ["${var.cluster_name}-worker"]
|
||||
target_tags = ["${var.cluster_name}-controller", "${var.cluster_name}-worker"]
|
||||
}
|
||||
|
||||
# kubelet API to allow kubectl exec and log
|
||||
resource "google_compute_firewall" "internal-kubelet" {
|
||||
name = "${var.cluster_name}-internal-kubelet"
|
||||
network = "${google_compute_network.network.name}"
|
||||
|
||||
allow {
|
||||
protocol = "tcp"
|
||||
ports = [10250]
|
||||
}
|
||||
|
||||
source_tags = ["${var.cluster_name}-controller"]
|
||||
target_tags = ["${var.cluster_name}-controller", "${var.cluster_name}-worker"]
|
||||
}
|
||||
|
||||
resource "google_compute_firewall" "internal-kubelet-readonly" {
|
||||
name = "${var.cluster_name}-internal-kubelet-readonly"
|
||||
network = "${google_compute_network.network.name}"
|
||||
|
||||
allow {
|
||||
protocol = "tcp"
|
||||
ports = [10255]
|
||||
}
|
||||
|
||||
source_tags = ["${var.cluster_name}-controller", "${var.cluster_name}-worker"]
|
||||
target_tags = ["${var.cluster_name}-controller", "${var.cluster_name}-worker"]
|
||||
}
|
17
google-cloud/fedora-atomic/kubernetes/outputs.tf
Normal file
17
google-cloud/fedora-atomic/kubernetes/outputs.tf
Normal file
@ -0,0 +1,17 @@
|
||||
output "ingress_static_ip" {
|
||||
value = "${module.workers.ingress_static_ip}"
|
||||
}
|
||||
|
||||
output "network_self_link" {
|
||||
value = "${google_compute_network.network.self_link}"
|
||||
}
|
||||
|
||||
# Outputs for worker pools
|
||||
|
||||
output "network_name" {
|
||||
value = "${google_compute_network.network.name}"
|
||||
}
|
||||
|
||||
output "kubeconfig" {
|
||||
value = "${module.bootkube.kubeconfig}"
|
||||
}
|
25
google-cloud/fedora-atomic/kubernetes/require.tf
Normal file
25
google-cloud/fedora-atomic/kubernetes/require.tf
Normal file
@ -0,0 +1,25 @@
|
||||
# Terraform version and plugin versions
|
||||
|
||||
terraform {
|
||||
required_version = ">= 0.10.4"
|
||||
}
|
||||
|
||||
provider "google" {
|
||||
version = "~> 1.6"
|
||||
}
|
||||
|
||||
provider "local" {
|
||||
version = "~> 1.0"
|
||||
}
|
||||
|
||||
provider "null" {
|
||||
version = "~> 1.0"
|
||||
}
|
||||
|
||||
provider "template" {
|
||||
version = "~> 1.0"
|
||||
}
|
||||
|
||||
provider "tls" {
|
||||
version = "~> 1.0"
|
||||
}
|
89
google-cloud/fedora-atomic/kubernetes/ssh.tf
Normal file
89
google-cloud/fedora-atomic/kubernetes/ssh.tf
Normal file
@ -0,0 +1,89 @@
|
||||
# Secure copy etcd TLS assets to controllers.
|
||||
resource "null_resource" "copy-controller-secrets" {
|
||||
count = "${var.controller_count}"
|
||||
|
||||
connection {
|
||||
type = "ssh"
|
||||
host = "${element(local.controllers_ipv4_public, count.index)}"
|
||||
user = "fedora"
|
||||
timeout = "15m"
|
||||
}
|
||||
|
||||
provisioner "file" {
|
||||
content = "${module.bootkube.etcd_ca_cert}"
|
||||
destination = "$HOME/etcd-client-ca.crt"
|
||||
}
|
||||
|
||||
provisioner "file" {
|
||||
content = "${module.bootkube.etcd_client_cert}"
|
||||
destination = "$HOME/etcd-client.crt"
|
||||
}
|
||||
|
||||
provisioner "file" {
|
||||
content = "${module.bootkube.etcd_client_key}"
|
||||
destination = "$HOME/etcd-client.key"
|
||||
}
|
||||
|
||||
provisioner "file" {
|
||||
content = "${module.bootkube.etcd_server_cert}"
|
||||
destination = "$HOME/etcd-server.crt"
|
||||
}
|
||||
|
||||
provisioner "file" {
|
||||
content = "${module.bootkube.etcd_server_key}"
|
||||
destination = "$HOME/etcd-server.key"
|
||||
}
|
||||
|
||||
provisioner "file" {
|
||||
content = "${module.bootkube.etcd_peer_cert}"
|
||||
destination = "$HOME/etcd-peer.crt"
|
||||
}
|
||||
|
||||
provisioner "file" {
|
||||
content = "${module.bootkube.etcd_peer_key}"
|
||||
destination = "$HOME/etcd-peer.key"
|
||||
}
|
||||
|
||||
provisioner "remote-exec" {
|
||||
inline = [
|
||||
"sudo mkdir -p /etc/ssl/etcd/etcd",
|
||||
"sudo mv etcd-client* /etc/ssl/etcd/",
|
||||
"sudo cp /etc/ssl/etcd/etcd-client-ca.crt /etc/ssl/etcd/etcd/server-ca.crt",
|
||||
"sudo mv etcd-server.crt /etc/ssl/etcd/etcd/server.crt",
|
||||
"sudo mv etcd-server.key /etc/ssl/etcd/etcd/server.key",
|
||||
"sudo cp /etc/ssl/etcd/etcd-client-ca.crt /etc/ssl/etcd/etcd/peer-ca.crt",
|
||||
"sudo mv etcd-peer.crt /etc/ssl/etcd/etcd/peer.crt",
|
||||
"sudo mv etcd-peer.key /etc/ssl/etcd/etcd/peer.key",
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
# Secure copy bootkube assets to ONE controller and start bootkube to perform
|
||||
# one-time self-hosted cluster bootstrapping.
|
||||
resource "null_resource" "bootkube-start" {
|
||||
depends_on = [
|
||||
"null_resource.copy-controller-secrets",
|
||||
"module.workers",
|
||||
"google_dns_record_set.apiserver",
|
||||
]
|
||||
|
||||
connection {
|
||||
type = "ssh"
|
||||
host = "${element(local.controllers_ipv4_public, 0)}"
|
||||
user = "fedora"
|
||||
timeout = "15m"
|
||||
}
|
||||
|
||||
provisioner "file" {
|
||||
source = "${var.asset_dir}"
|
||||
destination = "$HOME/assets"
|
||||
}
|
||||
|
||||
provisioner "remote-exec" {
|
||||
inline = [
|
||||
"while [ ! -f /var/lib/cloud/instance/boot-finished ]; do sleep 4; done",
|
||||
"sudo mv $HOME/assets /var/bootkube",
|
||||
"sudo systemctl start bootkube",
|
||||
]
|
||||
}
|
||||
}
|
104
google-cloud/fedora-atomic/kubernetes/variables.tf
Normal file
104
google-cloud/fedora-atomic/kubernetes/variables.tf
Normal file
@ -0,0 +1,104 @@
|
||||
variable "cluster_name" {
|
||||
type = "string"
|
||||
description = "Unique cluster name (prepended to dns_zone)"
|
||||
}
|
||||
|
||||
# Google Cloud
|
||||
|
||||
variable "region" {
|
||||
type = "string"
|
||||
description = "Google Cloud Region (e.g. us-central1, see `gcloud compute regions list`)"
|
||||
}
|
||||
|
||||
variable "dns_zone" {
|
||||
type = "string"
|
||||
description = "Google Cloud DNS Zone (e.g. google-cloud.example.com)"
|
||||
}
|
||||
|
||||
variable "dns_zone_name" {
|
||||
type = "string"
|
||||
description = "Google Cloud DNS Zone name (e.g. example-zone)"
|
||||
}
|
||||
|
||||
# instances
|
||||
|
||||
variable "controller_count" {
|
||||
type = "string"
|
||||
default = "1"
|
||||
description = "Number of controllers (i.e. masters)"
|
||||
}
|
||||
|
||||
variable "worker_count" {
|
||||
type = "string"
|
||||
default = "1"
|
||||
description = "Number of workers"
|
||||
}
|
||||
|
||||
variable "controller_type" {
|
||||
type = "string"
|
||||
default = "n1-standard-1"
|
||||
description = "Machine type for controllers (see `gcloud compute machine-types list`)"
|
||||
}
|
||||
|
||||
variable "worker_type" {
|
||||
type = "string"
|
||||
default = "n1-standard-1"
|
||||
description = "Machine type for controllers (see `gcloud compute machine-types list`)"
|
||||
}
|
||||
|
||||
variable "os_image" {
|
||||
type = "string"
|
||||
description = "Custom Fedora Atomic image"
|
||||
}
|
||||
|
||||
variable "disk_size" {
|
||||
type = "string"
|
||||
default = "40"
|
||||
description = "Size of the disk in GB"
|
||||
}
|
||||
|
||||
variable "worker_preemptible" {
|
||||
type = "string"
|
||||
default = "false"
|
||||
description = "If enabled, Compute Engine will terminate workers randomly within 24 hours"
|
||||
}
|
||||
|
||||
# configuration
|
||||
|
||||
variable "ssh_authorized_key" {
|
||||
type = "string"
|
||||
description = "SSH public key for user 'fedora'"
|
||||
}
|
||||
|
||||
variable "asset_dir" {
|
||||
description = "Path to a directory where generated assets should be placed (contains secrets)"
|
||||
type = "string"
|
||||
}
|
||||
|
||||
variable "networking" {
|
||||
description = "Choice of networking provider (flannel or calico)"
|
||||
type = "string"
|
||||
default = "calico"
|
||||
}
|
||||
|
||||
variable "pod_cidr" {
|
||||
description = "CIDR IPv4 range to assign Kubernetes pods"
|
||||
type = "string"
|
||||
default = "10.2.0.0/16"
|
||||
}
|
||||
|
||||
variable "service_cidr" {
|
||||
description = <<EOD
|
||||
CIDR IPv4 range to assign Kubernetes services.
|
||||
The 1st IP will be reserved for kube_apiserver, the 10th IP will be reserved for kube-dns.
|
||||
EOD
|
||||
|
||||
type = "string"
|
||||
default = "10.3.0.0/16"
|
||||
}
|
||||
|
||||
variable "cluster_domain_suffix" {
|
||||
description = "Queries for domains with the suffix will be answered by kube-dns. Default is cluster.local (e.g. foo.default.svc.cluster.local) "
|
||||
type = "string"
|
||||
default = "cluster.local"
|
||||
}
|
20
google-cloud/fedora-atomic/kubernetes/workers.tf
Normal file
20
google-cloud/fedora-atomic/kubernetes/workers.tf
Normal file
@ -0,0 +1,20 @@
|
||||
module "workers" {
|
||||
source = "workers"
|
||||
name = "${var.cluster_name}"
|
||||
cluster_name = "${var.cluster_name}"
|
||||
|
||||
# GCE
|
||||
region = "${var.region}"
|
||||
network = "${google_compute_network.network.name}"
|
||||
count = "${var.worker_count}"
|
||||
machine_type = "${var.worker_type}"
|
||||
os_image = "${var.os_image}"
|
||||
disk_size = "${var.disk_size}"
|
||||
preemptible = "${var.worker_preemptible}"
|
||||
|
||||
# configuration
|
||||
kubeconfig = "${module.bootkube.kubeconfig}"
|
||||
ssh_authorized_key = "${var.ssh_authorized_key}"
|
||||
service_cidr = "${var.service_cidr}"
|
||||
cluster_domain_suffix = "${var.cluster_domain_suffix}"
|
||||
}
|
@ -0,0 +1,72 @@
|
||||
#cloud-config
|
||||
write_files:
|
||||
- path: /etc/systemd/system/cloud-metadata.service
|
||||
content: |
|
||||
[Unit]
|
||||
Description=Cloud metadata agent
|
||||
[Service]
|
||||
Type=oneshot
|
||||
Environment=OUTPUT=/run/metadata/cloud
|
||||
ExecStart=/usr/bin/mkdir -p /run/metadata
|
||||
ExecStart=/usr/bin/bash -c 'echo "HOSTNAME_OVERRIDE=$(curl\
|
||||
-H "Metadata-Flavor: Google"\
|
||||
--url http://metadata.google.internal/computeMetadata/v1/instance/hostname\
|
||||
--retry 10)" > $${OUTPUT}'
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
- path: /etc/systemd/system/kubelet.service.d/10-typhoon.conf
|
||||
content: |
|
||||
[Unit]
|
||||
Requires=cloud-metadata.service
|
||||
After=cloud-metadata.service
|
||||
Wants=rpc-statd.service
|
||||
[Service]
|
||||
ExecStartPre=/bin/mkdir -p /opt/cni/bin
|
||||
ExecStartPre=/bin/mkdir -p /etc/kubernetes/manifests
|
||||
ExecStartPre=/bin/mkdir -p /etc/kubernetes/cni/net.d
|
||||
ExecStartPre=/bin/mkdir -p /var/lib/cni
|
||||
ExecStartPre=/bin/mkdir -p /var/lib/kubelet/volumeplugins
|
||||
ExecStartPre=/usr/bin/bash -c "grep 'certificate-authority-data' /etc/kubernetes/kubeconfig | awk '{print $2}' | base64 -d > /etc/kubernetes/ca.crt"
|
||||
Restart=always
|
||||
RestartSec=10
|
||||
- path: /etc/kubernetes/kubelet.conf
|
||||
content: |
|
||||
ARGS="--allow-privileged \
|
||||
--anonymous-auth=false \
|
||||
--client-ca-file=/etc/kubernetes/ca.crt \
|
||||
--cluster_dns=${k8s_dns_service_ip} \
|
||||
--cluster_domain=${cluster_domain_suffix} \
|
||||
--cni-conf-dir=/etc/kubernetes/cni/net.d \
|
||||
--exit-on-lock-contention \
|
||||
--kubeconfig=/etc/kubernetes/kubeconfig \
|
||||
--lock-file=/var/run/lock/kubelet.lock \
|
||||
--network-plugin=cni \
|
||||
--node-labels=node-role.kubernetes.io/node \
|
||||
--pod-manifest-path=/etc/kubernetes/manifests \
|
||||
--volume-plugin-dir=/var/lib/kubelet/volumeplugins"
|
||||
- path: /etc/kubernetes/kubeconfig
|
||||
permissions: '0644'
|
||||
content: |
|
||||
${kubeconfig}
|
||||
- path: /etc/selinux/config
|
||||
owner: root:root
|
||||
permissions: '0644'
|
||||
content: |
|
||||
SELINUX=permissive
|
||||
SELINUXTYPE=targeted
|
||||
bootcmd:
|
||||
- [setenforce, Permissive]
|
||||
- [systemctl, disable, firewalld, --now]
|
||||
runcmd:
|
||||
- [systemctl, daemon-reload]
|
||||
- [systemctl, enable, cloud-metadata.service]
|
||||
- "atomic install --system --name=kubelet quay.io/dghubble/kubelet:8767d4433f7c5a38c55edf6e682efb53fcd06f61"
|
||||
- [systemctl, start, --no-block, kubelet.service]
|
||||
users:
|
||||
- default
|
||||
- name: fedora
|
||||
gecos: Fedora Admin
|
||||
sudo: ALL=(ALL) NOPASSWD:ALL
|
||||
groups: wheel,adm,systemd-journal,docker
|
||||
ssh-authorized-keys:
|
||||
- "${ssh_authorized_key}"
|
45
google-cloud/fedora-atomic/kubernetes/workers/ingress.tf
Normal file
45
google-cloud/fedora-atomic/kubernetes/workers/ingress.tf
Normal file
@ -0,0 +1,45 @@
|
||||
# Static IPv4 address for the Network Load Balancer
|
||||
resource "google_compute_address" "ingress-ip" {
|
||||
name = "${var.name}-ingress-ip"
|
||||
}
|
||||
|
||||
# Network Load Balancer (i.e. forwarding rules)
|
||||
resource "google_compute_forwarding_rule" "worker-http-lb" {
|
||||
name = "${var.name}-worker-http-rule"
|
||||
ip_address = "${google_compute_address.ingress-ip.address}"
|
||||
port_range = "80"
|
||||
target = "${google_compute_target_pool.workers.self_link}"
|
||||
}
|
||||
|
||||
resource "google_compute_forwarding_rule" "worker-https-lb" {
|
||||
name = "${var.name}-worker-https-rule"
|
||||
ip_address = "${google_compute_address.ingress-ip.address}"
|
||||
port_range = "443"
|
||||
target = "${google_compute_target_pool.workers.self_link}"
|
||||
}
|
||||
|
||||
# Network Load Balancer target pool of instances.
|
||||
resource "google_compute_target_pool" "workers" {
|
||||
name = "${var.name}-worker-pool"
|
||||
|
||||
health_checks = [
|
||||
"${google_compute_http_health_check.ingress.name}",
|
||||
]
|
||||
|
||||
session_affinity = "NONE"
|
||||
}
|
||||
|
||||
# Ingress HTTP Health Check
|
||||
resource "google_compute_http_health_check" "ingress" {
|
||||
name = "${var.name}-ingress-health"
|
||||
description = "Health check Ingress controller health host port"
|
||||
|
||||
timeout_sec = 5
|
||||
check_interval_sec = 5
|
||||
|
||||
healthy_threshold = 2
|
||||
unhealthy_threshold = 4
|
||||
|
||||
port = 10254
|
||||
request_path = "/healthz"
|
||||
}
|
3
google-cloud/fedora-atomic/kubernetes/workers/outputs.tf
Normal file
3
google-cloud/fedora-atomic/kubernetes/workers/outputs.tf
Normal file
@ -0,0 +1,3 @@
|
||||
output "ingress_static_ip" {
|
||||
value = "${google_compute_address.ingress-ip.address}"
|
||||
}
|
94
google-cloud/fedora-atomic/kubernetes/workers/variables.tf
Normal file
94
google-cloud/fedora-atomic/kubernetes/workers/variables.tf
Normal file
@ -0,0 +1,94 @@
|
||||
variable "name" {
|
||||
type = "string"
|
||||
description = "Unique name for the worker pool"
|
||||
}
|
||||
|
||||
variable "cluster_name" {
|
||||
type = "string"
|
||||
description = "Must be set to `cluster_name of cluster`"
|
||||
}
|
||||
|
||||
# Google Cloud
|
||||
|
||||
variable "region" {
|
||||
type = "string"
|
||||
description = "Must be set to `region` of cluster"
|
||||
}
|
||||
|
||||
variable "network" {
|
||||
type = "string"
|
||||
description = "Must be set to `network_name` output by cluster"
|
||||
}
|
||||
|
||||
# instances
|
||||
|
||||
variable "count" {
|
||||
type = "string"
|
||||
default = "1"
|
||||
description = "Number of worker compute instances the instance group should manage"
|
||||
}
|
||||
|
||||
variable "machine_type" {
|
||||
type = "string"
|
||||
default = "n1-standard-1"
|
||||
description = "Machine type for compute instances (e.g. gcloud compute machine-types list)"
|
||||
}
|
||||
|
||||
variable "os_image" {
|
||||
type = "string"
|
||||
description = "Custom Fedora Atomic image"
|
||||
}
|
||||
|
||||
variable "disk_size" {
|
||||
type = "string"
|
||||
default = "40"
|
||||
description = "Size of the disk in GB"
|
||||
}
|
||||
|
||||
variable "preemptible" {
|
||||
type = "string"
|
||||
default = "false"
|
||||
description = "If enabled, Compute Engine will terminate instances randomly within 24 hours"
|
||||
}
|
||||
|
||||
# configuration
|
||||
|
||||
variable "kubeconfig" {
|
||||
type = "string"
|
||||
description = "Must be set to `kubeconfig` output by cluster"
|
||||
}
|
||||
|
||||
variable "ssh_authorized_key" {
|
||||
type = "string"
|
||||
description = "SSH public key for user 'fedora'"
|
||||
}
|
||||
|
||||
variable "service_cidr" {
|
||||
description = <<EOD
|
||||
CIDR IPv4 range to assign Kubernetes services.
|
||||
The 1st IP will be reserved for kube_apiserver, the 10th IP will be reserved for kube-dns.
|
||||
EOD
|
||||
|
||||
type = "string"
|
||||
default = "10.3.0.0/16"
|
||||
}
|
||||
|
||||
variable "cluster_domain_suffix" {
|
||||
description = "Queries for domains with the suffix will be answered by kube-dns. Default is cluster.local (e.g. foo.default.svc.cluster.local) "
|
||||
type = "string"
|
||||
default = "cluster.local"
|
||||
}
|
||||
|
||||
# unofficial, undocumented, unsupported, temporary
|
||||
|
||||
variable "accelerator_type" {
|
||||
type = "string"
|
||||
default = ""
|
||||
description = "Google Compute Engine accelerator type (e.g. nvidia-tesla-k80, see gcloud compute accelerator-types list)"
|
||||
}
|
||||
|
||||
variable "accelerator_count" {
|
||||
type = "string"
|
||||
default = "0"
|
||||
description = "Number of compute engine accelerators"
|
||||
}
|
72
google-cloud/fedora-atomic/kubernetes/workers/workers.tf
Normal file
72
google-cloud/fedora-atomic/kubernetes/workers/workers.tf
Normal file
@ -0,0 +1,72 @@
|
||||
# Regional managed instance group maintains a homogeneous set of workers that
|
||||
# span the zones in the region.
|
||||
resource "google_compute_region_instance_group_manager" "workers" {
|
||||
name = "${var.name}-worker-group"
|
||||
description = "Compute instance group of ${var.name} workers"
|
||||
|
||||
# instance name prefix for instances in the group
|
||||
base_instance_name = "${var.name}-worker"
|
||||
instance_template = "${google_compute_instance_template.worker.self_link}"
|
||||
region = "${var.region}"
|
||||
|
||||
target_size = "${var.count}"
|
||||
|
||||
# target pool to which instances in the group should be added
|
||||
target_pools = [
|
||||
"${google_compute_target_pool.workers.self_link}",
|
||||
]
|
||||
}
|
||||
|
||||
resource "google_compute_instance_template" "worker" {
|
||||
name_prefix = "${var.name}-worker-"
|
||||
description = "Worker Instance template"
|
||||
machine_type = "${var.machine_type}"
|
||||
|
||||
metadata {
|
||||
user-data = "${data.template_file.worker-cloudinit.rendered}"
|
||||
}
|
||||
|
||||
scheduling {
|
||||
automatic_restart = "${var.preemptible ? false : true}"
|
||||
preemptible = "${var.preemptible}"
|
||||
}
|
||||
|
||||
disk {
|
||||
auto_delete = true
|
||||
boot = true
|
||||
source_image = "${var.os_image}"
|
||||
disk_size_gb = "${var.disk_size}"
|
||||
}
|
||||
|
||||
network_interface {
|
||||
network = "${var.network}"
|
||||
|
||||
# Ephemeral external IP
|
||||
access_config = {}
|
||||
}
|
||||
|
||||
can_ip_forward = true
|
||||
tags = ["worker", "${var.cluster_name}-worker", "${var.name}-worker"]
|
||||
|
||||
guest_accelerator {
|
||||
count = "${var.accelerator_count}"
|
||||
type = "${var.accelerator_type}"
|
||||
}
|
||||
|
||||
lifecycle {
|
||||
# To update an Instance Template, Terraform should replace the existing resource
|
||||
create_before_destroy = true
|
||||
}
|
||||
}
|
||||
|
||||
# Worker Cloud-Init
|
||||
data "template_file" "worker-cloudinit" {
|
||||
template = "${file("${path.module}/cloudinit/worker.yaml.tmpl")}"
|
||||
|
||||
vars = {
|
||||
kubeconfig = "${indent(6, var.kubeconfig)}"
|
||||
ssh_authorized_key = "${var.ssh_authorized_key}"
|
||||
k8s_dns_service_ip = "${cidrhost(var.service_cidr, 10)}"
|
||||
cluster_domain_suffix = "${var.cluster_domain_suffix}"
|
||||
}
|
||||
}
|
Loading…
Reference in New Issue
Block a user