mirror of
https://github.com/puppetmaster/typhoon.git
synced 2025-07-31 16:01:35 +02:00
Add Google Cloud fedora-atomic module
* Network load balancer for ingress doesn't work yet because Compute Engine packages are missing * port-forward / socat is broken
This commit is contained in:
@ -0,0 +1,72 @@
|
||||
#cloud-config
|
||||
write_files:
|
||||
- path: /etc/systemd/system/cloud-metadata.service
|
||||
content: |
|
||||
[Unit]
|
||||
Description=Cloud metadata agent
|
||||
[Service]
|
||||
Type=oneshot
|
||||
Environment=OUTPUT=/run/metadata/cloud
|
||||
ExecStart=/usr/bin/mkdir -p /run/metadata
|
||||
ExecStart=/usr/bin/bash -c 'echo "HOSTNAME_OVERRIDE=$(curl\
|
||||
-H "Metadata-Flavor: Google"\
|
||||
--url http://metadata.google.internal/computeMetadata/v1/instance/hostname\
|
||||
--retry 10)" > $${OUTPUT}'
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
- path: /etc/systemd/system/kubelet.service.d/10-typhoon.conf
|
||||
content: |
|
||||
[Unit]
|
||||
Requires=cloud-metadata.service
|
||||
After=cloud-metadata.service
|
||||
Wants=rpc-statd.service
|
||||
[Service]
|
||||
ExecStartPre=/bin/mkdir -p /opt/cni/bin
|
||||
ExecStartPre=/bin/mkdir -p /etc/kubernetes/manifests
|
||||
ExecStartPre=/bin/mkdir -p /etc/kubernetes/cni/net.d
|
||||
ExecStartPre=/bin/mkdir -p /var/lib/cni
|
||||
ExecStartPre=/bin/mkdir -p /var/lib/kubelet/volumeplugins
|
||||
ExecStartPre=/usr/bin/bash -c "grep 'certificate-authority-data' /etc/kubernetes/kubeconfig | awk '{print $2}' | base64 -d > /etc/kubernetes/ca.crt"
|
||||
Restart=always
|
||||
RestartSec=10
|
||||
- path: /etc/kubernetes/kubelet.conf
|
||||
content: |
|
||||
ARGS="--allow-privileged \
|
||||
--anonymous-auth=false \
|
||||
--client-ca-file=/etc/kubernetes/ca.crt \
|
||||
--cluster_dns=${k8s_dns_service_ip} \
|
||||
--cluster_domain=${cluster_domain_suffix} \
|
||||
--cni-conf-dir=/etc/kubernetes/cni/net.d \
|
||||
--exit-on-lock-contention \
|
||||
--kubeconfig=/etc/kubernetes/kubeconfig \
|
||||
--lock-file=/var/run/lock/kubelet.lock \
|
||||
--network-plugin=cni \
|
||||
--node-labels=node-role.kubernetes.io/node \
|
||||
--pod-manifest-path=/etc/kubernetes/manifests \
|
||||
--volume-plugin-dir=/var/lib/kubelet/volumeplugins"
|
||||
- path: /etc/kubernetes/kubeconfig
|
||||
permissions: '0644'
|
||||
content: |
|
||||
${kubeconfig}
|
||||
- path: /etc/selinux/config
|
||||
owner: root:root
|
||||
permissions: '0644'
|
||||
content: |
|
||||
SELINUX=permissive
|
||||
SELINUXTYPE=targeted
|
||||
bootcmd:
|
||||
- [setenforce, Permissive]
|
||||
- [systemctl, disable, firewalld, --now]
|
||||
runcmd:
|
||||
- [systemctl, daemon-reload]
|
||||
- [systemctl, enable, cloud-metadata.service]
|
||||
- "atomic install --system --name=kubelet quay.io/dghubble/kubelet:8767d4433f7c5a38c55edf6e682efb53fcd06f61"
|
||||
- [systemctl, start, --no-block, kubelet.service]
|
||||
users:
|
||||
- default
|
||||
- name: fedora
|
||||
gecos: Fedora Admin
|
||||
sudo: ALL=(ALL) NOPASSWD:ALL
|
||||
groups: wheel,adm,systemd-journal,docker
|
||||
ssh-authorized-keys:
|
||||
- "${ssh_authorized_key}"
|
45
google-cloud/fedora-atomic/kubernetes/workers/ingress.tf
Normal file
45
google-cloud/fedora-atomic/kubernetes/workers/ingress.tf
Normal file
@ -0,0 +1,45 @@
|
||||
# Static IPv4 address for the Network Load Balancer
|
||||
resource "google_compute_address" "ingress-ip" {
|
||||
name = "${var.name}-ingress-ip"
|
||||
}
|
||||
|
||||
# Network Load Balancer (i.e. forwarding rules)
|
||||
resource "google_compute_forwarding_rule" "worker-http-lb" {
|
||||
name = "${var.name}-worker-http-rule"
|
||||
ip_address = "${google_compute_address.ingress-ip.address}"
|
||||
port_range = "80"
|
||||
target = "${google_compute_target_pool.workers.self_link}"
|
||||
}
|
||||
|
||||
resource "google_compute_forwarding_rule" "worker-https-lb" {
|
||||
name = "${var.name}-worker-https-rule"
|
||||
ip_address = "${google_compute_address.ingress-ip.address}"
|
||||
port_range = "443"
|
||||
target = "${google_compute_target_pool.workers.self_link}"
|
||||
}
|
||||
|
||||
# Network Load Balancer target pool of instances.
|
||||
resource "google_compute_target_pool" "workers" {
|
||||
name = "${var.name}-worker-pool"
|
||||
|
||||
health_checks = [
|
||||
"${google_compute_http_health_check.ingress.name}",
|
||||
]
|
||||
|
||||
session_affinity = "NONE"
|
||||
}
|
||||
|
||||
# Ingress HTTP Health Check
|
||||
resource "google_compute_http_health_check" "ingress" {
|
||||
name = "${var.name}-ingress-health"
|
||||
description = "Health check Ingress controller health host port"
|
||||
|
||||
timeout_sec = 5
|
||||
check_interval_sec = 5
|
||||
|
||||
healthy_threshold = 2
|
||||
unhealthy_threshold = 4
|
||||
|
||||
port = 10254
|
||||
request_path = "/healthz"
|
||||
}
|
3
google-cloud/fedora-atomic/kubernetes/workers/outputs.tf
Normal file
3
google-cloud/fedora-atomic/kubernetes/workers/outputs.tf
Normal file
@ -0,0 +1,3 @@
|
||||
output "ingress_static_ip" {
|
||||
value = "${google_compute_address.ingress-ip.address}"
|
||||
}
|
94
google-cloud/fedora-atomic/kubernetes/workers/variables.tf
Normal file
94
google-cloud/fedora-atomic/kubernetes/workers/variables.tf
Normal file
@ -0,0 +1,94 @@
|
||||
variable "name" {
|
||||
type = "string"
|
||||
description = "Unique name for the worker pool"
|
||||
}
|
||||
|
||||
variable "cluster_name" {
|
||||
type = "string"
|
||||
description = "Must be set to `cluster_name of cluster`"
|
||||
}
|
||||
|
||||
# Google Cloud
|
||||
|
||||
variable "region" {
|
||||
type = "string"
|
||||
description = "Must be set to `region` of cluster"
|
||||
}
|
||||
|
||||
variable "network" {
|
||||
type = "string"
|
||||
description = "Must be set to `network_name` output by cluster"
|
||||
}
|
||||
|
||||
# instances
|
||||
|
||||
variable "count" {
|
||||
type = "string"
|
||||
default = "1"
|
||||
description = "Number of worker compute instances the instance group should manage"
|
||||
}
|
||||
|
||||
variable "machine_type" {
|
||||
type = "string"
|
||||
default = "n1-standard-1"
|
||||
description = "Machine type for compute instances (e.g. gcloud compute machine-types list)"
|
||||
}
|
||||
|
||||
variable "os_image" {
|
||||
type = "string"
|
||||
description = "Custom Fedora Atomic image"
|
||||
}
|
||||
|
||||
variable "disk_size" {
|
||||
type = "string"
|
||||
default = "40"
|
||||
description = "Size of the disk in GB"
|
||||
}
|
||||
|
||||
variable "preemptible" {
|
||||
type = "string"
|
||||
default = "false"
|
||||
description = "If enabled, Compute Engine will terminate instances randomly within 24 hours"
|
||||
}
|
||||
|
||||
# configuration
|
||||
|
||||
variable "kubeconfig" {
|
||||
type = "string"
|
||||
description = "Must be set to `kubeconfig` output by cluster"
|
||||
}
|
||||
|
||||
variable "ssh_authorized_key" {
|
||||
type = "string"
|
||||
description = "SSH public key for user 'fedora'"
|
||||
}
|
||||
|
||||
variable "service_cidr" {
|
||||
description = <<EOD
|
||||
CIDR IPv4 range to assign Kubernetes services.
|
||||
The 1st IP will be reserved for kube_apiserver, the 10th IP will be reserved for kube-dns.
|
||||
EOD
|
||||
|
||||
type = "string"
|
||||
default = "10.3.0.0/16"
|
||||
}
|
||||
|
||||
variable "cluster_domain_suffix" {
|
||||
description = "Queries for domains with the suffix will be answered by kube-dns. Default is cluster.local (e.g. foo.default.svc.cluster.local) "
|
||||
type = "string"
|
||||
default = "cluster.local"
|
||||
}
|
||||
|
||||
# unofficial, undocumented, unsupported, temporary
|
||||
|
||||
variable "accelerator_type" {
|
||||
type = "string"
|
||||
default = ""
|
||||
description = "Google Compute Engine accelerator type (e.g. nvidia-tesla-k80, see gcloud compute accelerator-types list)"
|
||||
}
|
||||
|
||||
variable "accelerator_count" {
|
||||
type = "string"
|
||||
default = "0"
|
||||
description = "Number of compute engine accelerators"
|
||||
}
|
72
google-cloud/fedora-atomic/kubernetes/workers/workers.tf
Normal file
72
google-cloud/fedora-atomic/kubernetes/workers/workers.tf
Normal file
@ -0,0 +1,72 @@
|
||||
# Regional managed instance group maintains a homogeneous set of workers that
|
||||
# span the zones in the region.
|
||||
resource "google_compute_region_instance_group_manager" "workers" {
|
||||
name = "${var.name}-worker-group"
|
||||
description = "Compute instance group of ${var.name} workers"
|
||||
|
||||
# instance name prefix for instances in the group
|
||||
base_instance_name = "${var.name}-worker"
|
||||
instance_template = "${google_compute_instance_template.worker.self_link}"
|
||||
region = "${var.region}"
|
||||
|
||||
target_size = "${var.count}"
|
||||
|
||||
# target pool to which instances in the group should be added
|
||||
target_pools = [
|
||||
"${google_compute_target_pool.workers.self_link}",
|
||||
]
|
||||
}
|
||||
|
||||
resource "google_compute_instance_template" "worker" {
|
||||
name_prefix = "${var.name}-worker-"
|
||||
description = "Worker Instance template"
|
||||
machine_type = "${var.machine_type}"
|
||||
|
||||
metadata {
|
||||
user-data = "${data.template_file.worker-cloudinit.rendered}"
|
||||
}
|
||||
|
||||
scheduling {
|
||||
automatic_restart = "${var.preemptible ? false : true}"
|
||||
preemptible = "${var.preemptible}"
|
||||
}
|
||||
|
||||
disk {
|
||||
auto_delete = true
|
||||
boot = true
|
||||
source_image = "${var.os_image}"
|
||||
disk_size_gb = "${var.disk_size}"
|
||||
}
|
||||
|
||||
network_interface {
|
||||
network = "${var.network}"
|
||||
|
||||
# Ephemeral external IP
|
||||
access_config = {}
|
||||
}
|
||||
|
||||
can_ip_forward = true
|
||||
tags = ["worker", "${var.cluster_name}-worker", "${var.name}-worker"]
|
||||
|
||||
guest_accelerator {
|
||||
count = "${var.accelerator_count}"
|
||||
type = "${var.accelerator_type}"
|
||||
}
|
||||
|
||||
lifecycle {
|
||||
# To update an Instance Template, Terraform should replace the existing resource
|
||||
create_before_destroy = true
|
||||
}
|
||||
}
|
||||
|
||||
# Worker Cloud-Init
|
||||
data "template_file" "worker-cloudinit" {
|
||||
template = "${file("${path.module}/cloudinit/worker.yaml.tmpl")}"
|
||||
|
||||
vars = {
|
||||
kubeconfig = "${indent(6, var.kubeconfig)}"
|
||||
ssh_authorized_key = "${var.ssh_authorized_key}"
|
||||
k8s_dns_service_ip = "${cidrhost(var.service_cidr, 10)}"
|
||||
cluster_domain_suffix = "${var.cluster_domain_suffix}"
|
||||
}
|
||||
}
|
Reference in New Issue
Block a user