Rename container-linux modules to flatcar-linux

* CoreOS Container Linux was deprecated in v1.18.3
* Continue transitioning docs and modules from supporting
both CoreOS and Flatcar "variants" of Container Linux to
now supporting Flatcar Linux and equivalents

Action Required: Update the Flatcar Linux modules `source`
to replace `s/container-linux/flatcar-linux`. See docs for
examples
This commit is contained in:
Dalton Hubble
2020-10-20 22:47:19 -07:00
parent a99a990d49
commit 7c3f3ab6d0
98 changed files with 58 additions and 57 deletions

View File

@ -0,0 +1,115 @@
---
systemd:
units:
- name: docker.service
enabled: true
- name: locksmithd.service
mask: true
- name: wait-for-dns.service
enabled: true
contents: |
[Unit]
Description=Wait for DNS entries
Wants=systemd-resolved.service
Before=kubelet.service
[Service]
Type=oneshot
RemainAfterExit=true
ExecStart=/bin/sh -c 'while ! /usr/bin/grep '^[^#[:space:]]' /etc/resolv.conf > /dev/null; do sleep 1; done'
[Install]
RequiredBy=kubelet.service
- name: kubelet.service
enabled: true
contents: |
[Unit]
Description=Kubelet
Requires=docker.service
After=docker.service
Wants=rpc-statd.service
[Service]
Environment=KUBELET_IMAGE=quay.io/poseidon/kubelet:v1.19.3
ExecStartPre=/bin/mkdir -p /etc/kubernetes/cni/net.d
ExecStartPre=/bin/mkdir -p /etc/kubernetes/manifests
ExecStartPre=/bin/mkdir -p /opt/cni/bin
ExecStartPre=/bin/mkdir -p /var/lib/calico
ExecStartPre=/bin/mkdir -p /var/lib/kubelet/volumeplugins
ExecStartPre=/usr/bin/bash -c "grep 'certificate-authority-data' /etc/kubernetes/kubeconfig | awk '{print $2}' | base64 -d > /etc/kubernetes/ca.crt"
# Podman, rkt, or runc run container processes, whereas docker run
# is a client to a daemon and requires workarounds to use within a
# systemd unit. https://github.com/moby/moby/issues/6791
ExecStartPre=/usr/bin/docker run -d \
--name kubelet \
--privileged \
--pid host \
--network host \
-v /etc/kubernetes:/etc/kubernetes:ro \
-v /etc/machine-id:/etc/machine-id:ro \
-v /usr/lib/os-release:/etc/os-release:ro \
-v /lib/modules:/lib/modules:ro \
-v /run:/run \
-v /sys/fs/cgroup:/sys/fs/cgroup:ro \
-v /sys/fs/cgroup/systemd:/sys/fs/cgroup/systemd \
-v /var/lib/calico:/var/lib/calico:ro \
-v /var/lib/docker:/var/lib/docker \
-v /var/lib/kubelet:/var/lib/kubelet:rshared \
-v /var/log:/var/log \
-v /opt/cni/bin:/opt/cni/bin \
$${KUBELET_IMAGE} \
--anonymous-auth=false \
--authentication-token-webhook \
--authorization-mode=Webhook \
--bootstrap-kubeconfig=/etc/kubernetes/kubeconfig \
--client-ca-file=/etc/kubernetes/ca.crt \
--cluster_dns=${cluster_dns_service_ip} \
--cluster_domain=${cluster_domain_suffix} \
--cni-conf-dir=/etc/kubernetes/cni/net.d \
--healthz-port=0 \
--kubeconfig=/var/lib/kubelet/kubeconfig \
--network-plugin=cni \
--node-labels=node.kubernetes.io/node \
%{~ for label in split(",", node_labels) ~}
--node-labels=${label} \
%{~ endfor ~}
--pod-manifest-path=/etc/kubernetes/manifests \
--read-only-port=0 \
--rotate-certificates \
--volume-plugin-dir=/var/lib/kubelet/volumeplugins
ExecStart=docker logs -f kubelet
ExecStop=docker stop kubelet
ExecStopPost=docker rm kubelet
Restart=always
RestartSec=5
[Install]
WantedBy=multi-user.target
- name: delete-node.service
enabled: true
contents: |
[Unit]
Description=Delete Kubernetes node on shutdown
[Service]
Environment=KUBELET_IMAGE=quay.io/poseidon/kubelet:v1.19.3
Type=oneshot
RemainAfterExit=true
ExecStart=/bin/true
ExecStop=/bin/bash -c '/usr/bin/docker run -v /var/lib/kubelet:/var/lib/kubelet:ro --entrypoint /usr/local/bin/kubectl $${KUBELET_IMAGE} --kubeconfig=/var/lib/kubelet/kubeconfig delete node $HOSTNAME'
[Install]
WantedBy=multi-user.target
storage:
files:
- path: /etc/kubernetes/kubeconfig
filesystem: root
mode: 0644
contents:
inline: |
${kubeconfig}
- path: /etc/sysctl.d/max-user-watches.conf
filesystem: root
mode: 0644
contents:
inline: |
fs.inotify.max_user_watches=16184
passwd:
users:
- name: core
ssh_authorized_keys:
- "${ssh_authorized_key}"

View File

@ -0,0 +1,14 @@
# Outputs for global load balancing
output "instance_group" {
description = "Worker managed instance group full URL"
value = google_compute_region_instance_group_manager.workers.instance_group
}
# Outputs for regional load balancing
output "target_pool" {
description = "Worker target pool self link"
value = google_compute_target_pool.workers.self_link
}

View File

@ -0,0 +1,23 @@
# Target pool for TCP/UDP load balancing
resource "google_compute_target_pool" "workers" {
name = "${var.name}-worker-pool"
region = var.region
session_affinity = "NONE"
health_checks = [
google_compute_http_health_check.workers.name,
]
}
# HTTP Health Check (for TCP/UDP load balancing)
# Forward rules (regional) to target pools don't support different external
# and internal ports. Health check for nodes with Ingress controllers that
# may support proxying or otherwise satisfy the check.
resource "google_compute_http_health_check" "workers" {
name = "${var.name}-target-pool-health"
description = "Health check for the worker target pool"
port = 10254
request_path = "/healthz"
}

View File

@ -0,0 +1,106 @@
variable "name" {
type = string
description = "Unique name for the worker pool"
}
variable "cluster_name" {
type = string
description = "Must be set to `cluster_name of cluster`"
}
# Google Cloud
variable "region" {
type = string
description = "Must be set to `region` of cluster"
}
variable "network" {
type = string
description = "Must be set to `network_name` output by cluster"
}
# instances
variable "worker_count" {
type = number
description = "Number of worker compute instances the instance group should manage"
default = 1
}
variable "machine_type" {
type = string
description = "Machine type for compute instances (e.g. gcloud compute machine-types list)"
default = "n1-standard-1"
}
variable "os_image" {
type = string
description = "Flatcar Linux image for compute instanges (e.g. gcloud compute images list)"
}
variable "disk_size" {
type = number
description = "Size of the disk in GB"
default = 40
}
variable "preemptible" {
type = bool
description = "If enabled, Compute Engine will terminate instances randomly within 24 hours"
default = false
}
variable "snippets" {
type = list(string)
description = "Container Linux Config snippets"
default = []
}
# configuration
variable "kubeconfig" {
type = string
description = "Must be set to `kubeconfig` output by cluster"
}
variable "ssh_authorized_key" {
type = string
description = "SSH public key for user 'core'"
}
variable "service_cidr" {
type = string
description = <<EOD
CIDR IPv4 range to assign Kubernetes services.
The 1st IP will be reserved for kube_apiserver, the 10th IP will be reserved for coredns.
EOD
default = "10.3.0.0/16"
}
variable "node_labels" {
type = list(string)
description = "List of initial node labels"
default = []
}
# unofficial, undocumented, unsupported, temporary
variable "cluster_domain_suffix" {
type = string
description = "Queries for domains with the suffix will be answered by coredns. Default is cluster.local (e.g. foo.default.svc.cluster.local) "
default = "cluster.local"
}
variable "accelerator_type" {
type = string
default = ""
description = "Google Compute Engine accelerator type (e.g. nvidia-tesla-k80, see gcloud compute accelerator-types list)"
}
variable "accelerator_count" {
type = string
default = "0"
description = "Number of compute engine accelerators"
}

View File

@ -0,0 +1,14 @@
# Terraform version and plugin versions
terraform {
required_version = ">= 0.12.26, < 0.14.0"
required_providers {
google = ">= 2.19, < 4.0"
template = "~> 2.1"
ct = {
source = "poseidon/ct"
version = "~> 0.6.1"
}
}
}

View File

@ -0,0 +1,91 @@
# Managed instance group of workers
resource "google_compute_region_instance_group_manager" "workers" {
name = "${var.name}-worker-group"
description = "Compute instance group of ${var.name} workers"
# instance name prefix for instances in the group
base_instance_name = "${var.name}-worker"
region = var.region
version {
name = "default"
instance_template = google_compute_instance_template.worker.self_link
}
target_size = var.worker_count
target_pools = [google_compute_target_pool.workers.self_link]
named_port {
name = "http"
port = "80"
}
named_port {
name = "https"
port = "443"
}
}
# Worker instance template
resource "google_compute_instance_template" "worker" {
name_prefix = "${var.name}-worker-"
description = "Worker Instance template"
machine_type = var.machine_type
metadata = {
user-data = data.ct_config.worker-ignition.rendered
}
scheduling {
automatic_restart = var.preemptible ? false : true
preemptible = var.preemptible
}
disk {
auto_delete = true
boot = true
source_image = var.os_image
disk_size_gb = var.disk_size
}
network_interface {
network = var.network
# Ephemeral external IP
access_config {
}
}
can_ip_forward = true
tags = ["worker", "${var.cluster_name}-worker", "${var.name}-worker"]
guest_accelerator {
count = var.accelerator_count
type = var.accelerator_type
}
lifecycle {
# To update an Instance Template, Terraform should replace the existing resource
create_before_destroy = true
}
}
# Worker Ignition config
data "ct_config" "worker-ignition" {
content = data.template_file.worker-config.rendered
strict = true
snippets = var.snippets
}
# Worker Container Linux config
data "template_file" "worker-config" {
template = file("${path.module}/cl/worker.yaml")
vars = {
kubeconfig = indent(10, var.kubeconfig)
ssh_authorized_key = var.ssh_authorized_key
cluster_dns_service_ip = cidrhost(var.service_cidr, 10)
cluster_domain_suffix = var.cluster_domain_suffix
node_labels = join(",", var.node_labels)
}
}