mirror of
https://github.com/puppetmaster/typhoon.git
synced 2025-07-22 00:51:36 +02:00
Organize modules by platform and OS distribution
This commit is contained in:
127
google-cloud/container-linux/workers/cl/worker.yaml.tmpl
Normal file
127
google-cloud/container-linux/workers/cl/worker.yaml.tmpl
Normal file
@ -0,0 +1,127 @@
|
||||
---
|
||||
systemd:
|
||||
units:
|
||||
- name: docker.service
|
||||
enable: true
|
||||
- name: locksmithd.service
|
||||
mask: true
|
||||
- name: wait-for-dns.service
|
||||
enable: true
|
||||
contents: |
|
||||
[Unit]
|
||||
Description=Wait for DNS entries
|
||||
Wants=systemd-resolved.service
|
||||
Before=kubelet.service
|
||||
[Service]
|
||||
Type=oneshot
|
||||
RemainAfterExit=true
|
||||
ExecStart=/bin/sh -c 'while ! /usr/bin/grep '^[^#[:space:]]' /etc/resolv.conf > /dev/null; do sleep 1; done'
|
||||
[Install]
|
||||
RequiredBy=kubelet.service
|
||||
- name: kubelet.service
|
||||
enable: true
|
||||
contents: |
|
||||
[Unit]
|
||||
Description=Kubelet via Hyperkube ACI
|
||||
[Service]
|
||||
EnvironmentFile=/etc/kubernetes/kubelet.env
|
||||
Environment="RKT_RUN_ARGS=--uuid-file-save=/var/run/kubelet-pod.uuid \
|
||||
--volume=resolv,kind=host,source=/etc/resolv.conf \
|
||||
--mount volume=resolv,target=/etc/resolv.conf \
|
||||
--volume var-lib-cni,kind=host,source=/var/lib/cni \
|
||||
--mount volume=var-lib-cni,target=/var/lib/cni \
|
||||
--volume var-log,kind=host,source=/var/log \
|
||||
--mount volume=var-log,target=/var/log"
|
||||
ExecStartPre=/bin/mkdir -p /etc/kubernetes/manifests
|
||||
ExecStartPre=/bin/mkdir -p /etc/kubernetes/cni/net.d
|
||||
ExecStartPre=/bin/mkdir -p /etc/kubernetes/checkpoint-secrets
|
||||
ExecStartPre=/bin/mkdir -p /etc/kubernetes/inactive-manifests
|
||||
ExecStartPre=/bin/mkdir -p /var/lib/cni
|
||||
ExecStartPre=/usr/bin/bash -c "grep 'certificate-authority-data' /etc/kubernetes/kubeconfig | awk '{print $2}' | base64 -d > /etc/kubernetes/ca.crt"
|
||||
ExecStartPre=-/usr/bin/rkt rm --uuid-file=/var/run/kubelet-pod.uuid
|
||||
ExecStart=/usr/lib/coreos/kubelet-wrapper \
|
||||
--kubeconfig=/etc/kubernetes/kubeconfig \
|
||||
--require-kubeconfig \
|
||||
--client-ca-file=/etc/kubernetes/ca.crt \
|
||||
--anonymous-auth=false \
|
||||
--cni-conf-dir=/etc/kubernetes/cni/net.d \
|
||||
--network-plugin=cni \
|
||||
--lock-file=/var/run/lock/kubelet.lock \
|
||||
--exit-on-lock-contention \
|
||||
--pod-manifest-path=/etc/kubernetes/manifests \
|
||||
--allow-privileged \
|
||||
--node-labels=node-role.kubernetes.io/node \
|
||||
--cluster_dns=${k8s_dns_service_ip} \
|
||||
--cluster_domain=cluster.local
|
||||
ExecStop=-/usr/bin/rkt stop --uuid-file=/var/run/kubelet-pod.uuid
|
||||
Restart=always
|
||||
RestartSec=5
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
- name: delete-node.service
|
||||
enable: true
|
||||
contents: |
|
||||
[Unit]
|
||||
Description=Waiting to delete Kubernetes node on shutdown
|
||||
[Service]
|
||||
Type=oneshot
|
||||
RemainAfterExit=true
|
||||
ExecStart=/bin/true
|
||||
ExecStop=/etc/kubernetes/delete-node
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
storage:
|
||||
files:
|
||||
- path: /etc/kubernetes/kubeconfig
|
||||
filesystem: root
|
||||
mode: 0644
|
||||
contents:
|
||||
inline: |
|
||||
apiVersion: v1
|
||||
kind: Config
|
||||
clusters:
|
||||
- name: local
|
||||
cluster:
|
||||
server: ${kubeconfig_server}
|
||||
certificate-authority-data: ${kubeconfig_ca_cert}
|
||||
users:
|
||||
- name: kubelet
|
||||
user:
|
||||
client-certificate-data: ${kubeconfig_kubelet_cert}
|
||||
client-key-data: ${kubeconfig_kubelet_key}
|
||||
contexts:
|
||||
- context:
|
||||
cluster: local
|
||||
user: kubelet
|
||||
- path: /etc/kubernetes/kubelet.env
|
||||
filesystem: root
|
||||
mode: 0644
|
||||
contents:
|
||||
inline: |
|
||||
KUBELET_IMAGE_URL=quay.io/coreos/hyperkube
|
||||
KUBELET_IMAGE_TAG=v1.7.1_coreos.0
|
||||
- path: /etc/sysctl.d/max-user-watches.conf
|
||||
filesystem: root
|
||||
contents:
|
||||
inline: |
|
||||
fs.inotify.max_user_watches=16184
|
||||
- path: /etc/kubernetes/delete-node
|
||||
filesystem: root
|
||||
mode: 0744
|
||||
contents:
|
||||
inline: |
|
||||
#!/bin/bash
|
||||
set -e
|
||||
exec /usr/bin/rkt run \
|
||||
--trust-keys-from-https \
|
||||
--volume config,kind=host,source=/etc/kubernetes \
|
||||
--mount volume=config,target=/etc/kubernetes \
|
||||
quay.io/coreos/hyperkube:v1.7.1_coreos.0 \
|
||||
--net=host \
|
||||
--dns=host \
|
||||
--exec=/kubectl -- --kubeconfig=/etc/kubernetes/kubeconfig delete node $(hostname)
|
||||
passwd:
|
||||
users:
|
||||
- name: core
|
||||
ssh_authorized_keys:
|
||||
- "${ssh_authorized_key}"
|
45
google-cloud/container-linux/workers/network.tf
Normal file
45
google-cloud/container-linux/workers/network.tf
Normal file
@ -0,0 +1,45 @@
|
||||
# Static IP for the Network Load Balancer
|
||||
resource "google_compute_address" "ingress-ip" {
|
||||
name = "${var.cluster_name}-ingress-ip"
|
||||
}
|
||||
|
||||
# Network Load Balancer (i.e. forwarding rules)
|
||||
resource "google_compute_forwarding_rule" "worker-http-lb" {
|
||||
name = "${var.cluster_name}-worker-http-rule"
|
||||
ip_address = "${google_compute_address.ingress-ip.address}"
|
||||
port_range = "80"
|
||||
target = "${google_compute_target_pool.workers.self_link}"
|
||||
}
|
||||
|
||||
resource "google_compute_forwarding_rule" "worker-https-lb" {
|
||||
name = "${var.cluster_name}-worker-https-rule"
|
||||
ip_address = "${google_compute_address.ingress-ip.address}"
|
||||
port_range = "443"
|
||||
target = "${google_compute_target_pool.workers.self_link}"
|
||||
}
|
||||
|
||||
# Network Load Balancer target pool of instances.
|
||||
resource "google_compute_target_pool" "workers" {
|
||||
name = "${var.cluster_name}-worker-pool"
|
||||
|
||||
health_checks = [
|
||||
"${google_compute_http_health_check.ingress.name}",
|
||||
]
|
||||
|
||||
session_affinity = "NONE"
|
||||
}
|
||||
|
||||
# Ingress HTTP Health Check
|
||||
resource "google_compute_http_health_check" "ingress" {
|
||||
name = "${var.cluster_name}-ingress-health"
|
||||
description = "Health check Ingress controller health host port"
|
||||
|
||||
timeout_sec = 5
|
||||
check_interval_sec = 5
|
||||
|
||||
healthy_threshold = 2
|
||||
unhealthy_threshold = 4
|
||||
|
||||
port = 10254
|
||||
request_path = "/healthz"
|
||||
}
|
3
google-cloud/container-linux/workers/outputs.tf
Normal file
3
google-cloud/container-linux/workers/outputs.tf
Normal file
@ -0,0 +1,3 @@
|
||||
output "ingress_static_ip" {
|
||||
value = "${google_compute_address.ingress-ip.address}"
|
||||
}
|
82
google-cloud/container-linux/workers/variables.tf
Normal file
82
google-cloud/container-linux/workers/variables.tf
Normal file
@ -0,0 +1,82 @@
|
||||
variable "cluster_name" {
|
||||
type = "string"
|
||||
description = "Unique cluster name"
|
||||
}
|
||||
|
||||
variable "ssh_authorized_key" {
|
||||
type = "string"
|
||||
description = "SSH public key for logging in as user 'core'"
|
||||
}
|
||||
|
||||
variable "network" {
|
||||
type = "string"
|
||||
description = "Name of the network to attach to the compute instance interfaces"
|
||||
}
|
||||
|
||||
# instances
|
||||
|
||||
variable "count" {
|
||||
type = "string"
|
||||
description = "Number of worker compute instances the instance group should manage"
|
||||
}
|
||||
|
||||
variable "zone" {
|
||||
type = "string"
|
||||
description = "Google zone that compute instances in the group should be created in (e.g. gcloud compute zones list)"
|
||||
}
|
||||
|
||||
variable "machine_type" {
|
||||
type = "string"
|
||||
description = "Machine type for compute instances (e.g. gcloud compute machine-types list)"
|
||||
}
|
||||
|
||||
variable "os_image" {
|
||||
type = "string"
|
||||
description = "OS image from which to initialize the disk (e.g. gcloud compute images list)"
|
||||
}
|
||||
|
||||
variable "disk_size" {
|
||||
type = "string"
|
||||
default = "40"
|
||||
description = "The size of the disk in gigabytes."
|
||||
}
|
||||
|
||||
variable "preemptible" {
|
||||
type = "string"
|
||||
default = "false"
|
||||
description = "If enabled, Compute Engine will terminate instances randomly within 24 hours"
|
||||
}
|
||||
|
||||
# configuration
|
||||
|
||||
variable "service_cidr" {
|
||||
description = <<EOD
|
||||
CIDR IP range to assign Kubernetes services.
|
||||
The 1st IP will be reserved for kube_apiserver, the 10th IP will be reserved for kube-dns, the 15th IP will be reserved for self-hosted etcd, and the 200th IP will be reserved for bootstrap self-hosted etcd.
|
||||
EOD
|
||||
|
||||
type = "string"
|
||||
default = "10.3.0.0/16"
|
||||
}
|
||||
|
||||
# kubeconfig
|
||||
|
||||
variable "kubeconfig_ca_cert" {
|
||||
type = "string"
|
||||
description = "Generated kubeconfig CA certificate"
|
||||
}
|
||||
|
||||
variable "kubeconfig_kubelet_cert" {
|
||||
type = "string"
|
||||
description = "Generated kubeconfig kubelet certificate"
|
||||
}
|
||||
|
||||
variable "kubeconfig_kubelet_key" {
|
||||
type = "string"
|
||||
description = "Generated kubeconfig kubelet private key"
|
||||
}
|
||||
|
||||
variable "kubeconfig_server" {
|
||||
type = "string"
|
||||
description = "Generated kubeconfig server"
|
||||
}
|
87
google-cloud/container-linux/workers/workers.tf
Normal file
87
google-cloud/container-linux/workers/workers.tf
Normal file
@ -0,0 +1,87 @@
|
||||
# Managed Instance Group
|
||||
resource "google_compute_instance_group_manager" "workers" {
|
||||
name = "${var.cluster_name}-worker-group"
|
||||
description = "Compute instance group of ${var.cluster_name} workers"
|
||||
|
||||
# Instance name prefix for instances in the group
|
||||
base_instance_name = "${var.cluster_name}-worker"
|
||||
instance_template = "${google_compute_instance_template.worker.self_link}"
|
||||
update_strategy = "RESTART"
|
||||
zone = "${var.zone}"
|
||||
target_size = "${var.count}"
|
||||
|
||||
# Target pool instances in the group should be added into
|
||||
target_pools = [
|
||||
"${google_compute_target_pool.workers.self_link}",
|
||||
]
|
||||
}
|
||||
|
||||
# Worker Container Linux Config
|
||||
data "template_file" "worker_config" {
|
||||
template = "${file("${path.module}/cl/worker.yaml.tmpl")}"
|
||||
|
||||
vars = {
|
||||
k8s_dns_service_ip = "${cidrhost(var.service_cidr, 10)}"
|
||||
k8s_etcd_service_ip = "${cidrhost(var.service_cidr, 15)}"
|
||||
ssh_authorized_key = "${var.ssh_authorized_key}"
|
||||
kubeconfig_ca_cert = "${var.kubeconfig_ca_cert}"
|
||||
kubeconfig_kubelet_cert = "${var.kubeconfig_kubelet_cert}"
|
||||
kubeconfig_kubelet_key = "${var.kubeconfig_kubelet_key}"
|
||||
kubeconfig_server = "${var.kubeconfig_server}"
|
||||
}
|
||||
}
|
||||
|
||||
data "ct_config" "worker_ign" {
|
||||
content = "${data.template_file.worker_config.rendered}"
|
||||
pretty_print = false
|
||||
}
|
||||
|
||||
resource "google_compute_instance_template" "worker" {
|
||||
name_prefix = "${var.cluster_name}-worker-"
|
||||
description = "Worker Instance template"
|
||||
machine_type = "${var.machine_type}"
|
||||
|
||||
metadata {
|
||||
user-data = "${data.ct_config.worker_ign.rendered}"
|
||||
}
|
||||
|
||||
scheduling {
|
||||
automatic_restart = "${var.preemptible ? false : true}"
|
||||
preemptible = "${var.preemptible}"
|
||||
}
|
||||
|
||||
# QUIRK: Undocumented field defaults to true if not set
|
||||
automatic_restart = "${var.preemptible ? false : true}"
|
||||
|
||||
disk {
|
||||
auto_delete = true
|
||||
boot = true
|
||||
source_image = "${var.os_image}"
|
||||
disk_size_gb = "${var.disk_size}"
|
||||
}
|
||||
|
||||
network_interface {
|
||||
network = "${var.network}"
|
||||
|
||||
# Ephemeral external IP
|
||||
access_config = {}
|
||||
}
|
||||
|
||||
can_ip_forward = true
|
||||
|
||||
service_account {
|
||||
scopes = [
|
||||
"storage-ro",
|
||||
"compute-rw",
|
||||
"datastore",
|
||||
"userinfo-email",
|
||||
]
|
||||
}
|
||||
|
||||
tags = ["worker"]
|
||||
|
||||
lifecycle {
|
||||
# To update an Instance Template, Terraform should replace the existing resource
|
||||
create_before_destroy = true
|
||||
}
|
||||
}
|
Reference in New Issue
Block a user