mirror of
https://github.com/puppetmaster/typhoon.git
synced 2025-07-22 18:21:33 +02:00
Add module for Typhoon Azure with Container Linux
This commit is contained in:
121
azure/container-linux/kubernetes/workers/cl/worker.yaml.tmpl
Normal file
121
azure/container-linux/kubernetes/workers/cl/worker.yaml.tmpl
Normal file
@ -0,0 +1,121 @@
|
||||
---
|
||||
systemd:
|
||||
units:
|
||||
- name: docker.service
|
||||
enable: true
|
||||
- name: locksmithd.service
|
||||
mask: true
|
||||
- name: wait-for-dns.service
|
||||
enable: true
|
||||
contents: |
|
||||
[Unit]
|
||||
Description=Wait for DNS entries
|
||||
Wants=systemd-resolved.service
|
||||
Before=kubelet.service
|
||||
[Service]
|
||||
Type=oneshot
|
||||
RemainAfterExit=true
|
||||
ExecStart=/bin/sh -c 'while ! /usr/bin/grep '^[^#[:space:]]' /etc/resolv.conf > /dev/null; do sleep 1; done'
|
||||
[Install]
|
||||
RequiredBy=kubelet.service
|
||||
- name: kubelet.service
|
||||
enable: true
|
||||
contents: |
|
||||
[Unit]
|
||||
Description=Kubelet via Hyperkube
|
||||
Wants=rpc-statd.service
|
||||
[Service]
|
||||
EnvironmentFile=/etc/kubernetes/kubelet.env
|
||||
Environment="RKT_RUN_ARGS=--uuid-file-save=/var/cache/kubelet-pod.uuid \
|
||||
--volume=resolv,kind=host,source=/etc/resolv.conf \
|
||||
--mount volume=resolv,target=/etc/resolv.conf \
|
||||
--volume var-lib-cni,kind=host,source=/var/lib/cni \
|
||||
--mount volume=var-lib-cni,target=/var/lib/cni \
|
||||
--volume var-lib-calico,kind=host,source=/var/lib/calico \
|
||||
--mount volume=var-lib-calico,target=/var/lib/calico \
|
||||
--volume opt-cni-bin,kind=host,source=/opt/cni/bin \
|
||||
--mount volume=opt-cni-bin,target=/opt/cni/bin \
|
||||
--volume var-log,kind=host,source=/var/log \
|
||||
--mount volume=var-log,target=/var/log \
|
||||
--insecure-options=image"
|
||||
ExecStartPre=/bin/mkdir -p /opt/cni/bin
|
||||
ExecStartPre=/bin/mkdir -p /etc/kubernetes/manifests
|
||||
ExecStartPre=/bin/mkdir -p /etc/kubernetes/cni/net.d
|
||||
ExecStartPre=/bin/mkdir -p /var/lib/cni
|
||||
ExecStartPre=/bin/mkdir -p /var/lib/calico
|
||||
ExecStartPre=/bin/mkdir -p /var/lib/kubelet/volumeplugins
|
||||
ExecStartPre=/usr/bin/bash -c "grep 'certificate-authority-data' /etc/kubernetes/kubeconfig | awk '{print $2}' | base64 -d > /etc/kubernetes/ca.crt"
|
||||
ExecStartPre=-/usr/bin/rkt rm --uuid-file=/var/cache/kubelet-pod.uuid
|
||||
ExecStart=/usr/lib/coreos/kubelet-wrapper \
|
||||
--anonymous-auth=false \
|
||||
--authentication-token-webhook \
|
||||
--authorization-mode=Webhook \
|
||||
--client-ca-file=/etc/kubernetes/ca.crt \
|
||||
--cluster_dns=${k8s_dns_service_ip} \
|
||||
--cluster_domain=${cluster_domain_suffix} \
|
||||
--cni-conf-dir=/etc/kubernetes/cni/net.d \
|
||||
--exit-on-lock-contention \
|
||||
--kubeconfig=/etc/kubernetes/kubeconfig \
|
||||
--lock-file=/var/run/lock/kubelet.lock \
|
||||
--network-plugin=cni \
|
||||
--node-labels=node-role.kubernetes.io/node \
|
||||
--pod-manifest-path=/etc/kubernetes/manifests \
|
||||
--volume-plugin-dir=/var/lib/kubelet/volumeplugins
|
||||
ExecStop=-/usr/bin/rkt stop --uuid-file=/var/cache/kubelet-pod.uuid
|
||||
Restart=always
|
||||
RestartSec=5
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
- name: delete-node.service
|
||||
enable: true
|
||||
contents: |
|
||||
[Unit]
|
||||
Description=Waiting to delete Kubernetes node on shutdown
|
||||
[Service]
|
||||
Type=oneshot
|
||||
RemainAfterExit=true
|
||||
ExecStart=/bin/true
|
||||
ExecStop=/etc/kubernetes/delete-node
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
storage:
|
||||
files:
|
||||
- path: /etc/kubernetes/kubeconfig
|
||||
filesystem: root
|
||||
mode: 0644
|
||||
contents:
|
||||
inline: |
|
||||
${kubeconfig}
|
||||
- path: /etc/kubernetes/kubelet.env
|
||||
filesystem: root
|
||||
mode: 0644
|
||||
contents:
|
||||
inline: |
|
||||
KUBELET_IMAGE_URL=docker://k8s.gcr.io/hyperkube
|
||||
KUBELET_IMAGE_TAG=v1.11.2
|
||||
- path: /etc/sysctl.d/max-user-watches.conf
|
||||
filesystem: root
|
||||
contents:
|
||||
inline: |
|
||||
fs.inotify.max_user_watches=16184
|
||||
- path: /etc/kubernetes/delete-node
|
||||
filesystem: root
|
||||
mode: 0744
|
||||
contents:
|
||||
inline: |
|
||||
#!/bin/bash
|
||||
set -e
|
||||
exec /usr/bin/rkt run \
|
||||
--trust-keys-from-https \
|
||||
--volume config,kind=host,source=/etc/kubernetes \
|
||||
--mount volume=config,target=/etc/kubernetes \
|
||||
--insecure-options=image \
|
||||
docker://k8s.gcr.io/hyperkube:v1.11.2 \
|
||||
--net=host \
|
||||
--dns=host \
|
||||
--exec=/kubectl -- --kubeconfig=/etc/kubernetes/kubeconfig delete node $(hostname)
|
||||
passwd:
|
||||
users:
|
||||
- name: core
|
||||
ssh_authorized_keys:
|
||||
- "${ssh_authorized_key}"
|
1
azure/container-linux/kubernetes/workers/ingress.tf
Normal file
1
azure/container-linux/kubernetes/workers/ingress.tf
Normal file
@ -0,0 +1 @@
|
||||
|
97
azure/container-linux/kubernetes/workers/variables.tf
Normal file
97
azure/container-linux/kubernetes/workers/variables.tf
Normal file
@ -0,0 +1,97 @@
|
||||
variable "name" {
|
||||
type = "string"
|
||||
description = "Unique name for the worker pool"
|
||||
}
|
||||
|
||||
# Azure
|
||||
|
||||
variable "region" {
|
||||
type = "string"
|
||||
description = "Must be set to the Azure Region of cluster"
|
||||
}
|
||||
|
||||
variable "resource_group_name" {
|
||||
type = "string"
|
||||
description = "Must be set to the resource group name of cluster"
|
||||
}
|
||||
|
||||
variable "subnet_id" {
|
||||
type = "string"
|
||||
description = "Must be set to the `worker_subnet_id` output by cluster"
|
||||
}
|
||||
|
||||
variable "security_group_id" {
|
||||
type = "string"
|
||||
description = "Must be set to the `worker_security_group_id` output by cluster"
|
||||
}
|
||||
|
||||
variable "backend_address_pool_id" {
|
||||
type = "string"
|
||||
description = "Must be set to the `worker_backend_address_pool_id` output by cluster"
|
||||
}
|
||||
|
||||
# instances
|
||||
|
||||
variable "count" {
|
||||
type = "string"
|
||||
default = "1"
|
||||
description = "Number of instances"
|
||||
}
|
||||
|
||||
variable "vm_type" {
|
||||
type = "string"
|
||||
default = "Standard_DS1_v2"
|
||||
description = "Machine type for instances (see `az vm list-skus --location centralus`)"
|
||||
}
|
||||
|
||||
variable "os_image" {
|
||||
type = "string"
|
||||
default = "coreos-stable"
|
||||
description = "Channel for a Container Linux derivative (coreos-stable, coreos-beta, coreos-alpha)"
|
||||
}
|
||||
|
||||
variable "disk_size" {
|
||||
type = "string"
|
||||
default = "40"
|
||||
description = "Size of the disk in GB"
|
||||
}
|
||||
|
||||
variable "priority" {
|
||||
type = "string"
|
||||
default = "Regular"
|
||||
description = "Set priority to Low to use reduced cost surplus capacity, with the tradeoff that instances can be evicted at any time."
|
||||
}
|
||||
|
||||
variable "clc_snippets" {
|
||||
type = "list"
|
||||
description = "Container Linux Config snippets"
|
||||
default = []
|
||||
}
|
||||
|
||||
# configuration
|
||||
|
||||
variable "kubeconfig" {
|
||||
type = "string"
|
||||
description = "Must be set to `kubeconfig` output by cluster"
|
||||
}
|
||||
|
||||
variable "ssh_authorized_key" {
|
||||
type = "string"
|
||||
description = "SSH public key for user 'core'"
|
||||
}
|
||||
|
||||
variable "service_cidr" {
|
||||
description = <<EOD
|
||||
CIDR IPv4 range to assign Kubernetes services.
|
||||
The 1st IP will be reserved for kube_apiserver, the 10th IP will be reserved for coredns.
|
||||
EOD
|
||||
|
||||
type = "string"
|
||||
default = "10.3.0.0/16"
|
||||
}
|
||||
|
||||
variable "cluster_domain_suffix" {
|
||||
description = "Queries for domains with the suffix will be answered by coredns. Default is cluster.local (e.g. foo.default.svc.cluster.local) "
|
||||
type = "string"
|
||||
default = "cluster.local"
|
||||
}
|
114
azure/container-linux/kubernetes/workers/workers.tf
Normal file
114
azure/container-linux/kubernetes/workers/workers.tf
Normal file
@ -0,0 +1,114 @@
|
||||
locals {
|
||||
# Channel for a Container Linux derivative
|
||||
# coreos-stable -> Container Linux Stable
|
||||
channel = "${element(split("-", var.os_image), 1)}"
|
||||
}
|
||||
|
||||
# Workers scale set
|
||||
resource "azurerm_virtual_machine_scale_set" "workers" {
|
||||
resource_group_name = "${var.resource_group_name}"
|
||||
|
||||
name = "${var.name}-workers"
|
||||
location = "${var.region}"
|
||||
|
||||
sku {
|
||||
name = "${var.vm_type}"
|
||||
tier = "standard"
|
||||
capacity = "${var.count}"
|
||||
}
|
||||
|
||||
# boot
|
||||
storage_profile_image_reference {
|
||||
publisher = "CoreOS"
|
||||
offer = "CoreOS"
|
||||
sku = "${local.channel}"
|
||||
version = "latest"
|
||||
}
|
||||
|
||||
# storage
|
||||
storage_profile_os_disk {
|
||||
create_option = "FromImage"
|
||||
caching = "ReadWrite"
|
||||
os_type = "linux"
|
||||
managed_disk_type = "Standard_LRS"
|
||||
}
|
||||
|
||||
os_profile {
|
||||
computer_name_prefix = "${var.name}-worker-"
|
||||
admin_username = "core"
|
||||
|
||||
# Required by Azure, but password auth is disabled below
|
||||
admin_password = ""
|
||||
custom_data = "${element(data.ct_config.worker-ignitions.*.rendered, count.index)}"
|
||||
}
|
||||
|
||||
# Azure mandates setting an ssh_key, even though Ignition custom_data handles it too
|
||||
os_profile_linux_config {
|
||||
disable_password_authentication = true
|
||||
|
||||
ssh_keys {
|
||||
path = "/home/core/.ssh/authorized_keys"
|
||||
key_data = "${var.ssh_authorized_key}"
|
||||
}
|
||||
}
|
||||
|
||||
# network
|
||||
network_profile {
|
||||
name = "nic0"
|
||||
primary = true
|
||||
network_security_group_id = "${var.security_group_id}"
|
||||
|
||||
ip_configuration {
|
||||
name = "ip0"
|
||||
subnet_id = "${var.subnet_id}"
|
||||
|
||||
# backend address pool to which the NIC should be added
|
||||
load_balancer_backend_address_pool_ids = ["${var.backend_address_pool_id}"]
|
||||
}
|
||||
}
|
||||
|
||||
# lifecycle
|
||||
priority = "${var.priority}"
|
||||
upgrade_policy_mode = "Manual"
|
||||
}
|
||||
|
||||
# Scale up or down to maintain desired number, tolerating deallocations.
|
||||
resource "azurerm_autoscale_setting" "workers" {
|
||||
resource_group_name = "${var.resource_group_name}"
|
||||
|
||||
name = "maintain-desired"
|
||||
location = "${var.region}"
|
||||
|
||||
# autoscale
|
||||
enabled = true
|
||||
target_resource_id = "${azurerm_virtual_machine_scale_set.workers.id}"
|
||||
|
||||
profile {
|
||||
name = "default"
|
||||
|
||||
capacity {
|
||||
minimum = "${var.count}"
|
||||
default = "${var.count}"
|
||||
maximum = "${var.count}"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
# Worker Ignition configs
|
||||
data "ct_config" "worker-ignitions" {
|
||||
content = "${data.template_file.worker-configs.rendered}"
|
||||
pretty_print = false
|
||||
snippets = ["${var.clc_snippets}"]
|
||||
}
|
||||
|
||||
# Worker Container Linux configs
|
||||
data "template_file" "worker-configs" {
|
||||
template = "${file("${path.module}/cl/worker.yaml.tmpl")}"
|
||||
|
||||
vars = {
|
||||
kubeconfig = "${indent(10, var.kubeconfig)}"
|
||||
ssh_authorized_key = "${var.ssh_authorized_key}"
|
||||
k8s_dns_service_ip = "${cidrhost(var.service_cidr, 10)}"
|
||||
cluster_domain_suffix = "${var.cluster_domain_suffix}"
|
||||
}
|
||||
}
|
Reference in New Issue
Block a user