Migrate GCP, DO, Azure to static pod control plane

* Run a kube-apiserver, kube-scheduler, and kube-controller-manager
static pod on each controller node. Previously, kube-apiserver was
self-hosted as a DaemonSet across controllers and kube-scheduler
and kube-controller-manager were a Deployment (with 2 or
controller_count many replicas).
* Remove bootkube bootstrap and pivot to self-hosted
* Remove pod-checkpointer manifests (no longer needed)
This commit is contained in:
Dalton Hubble 2019-09-05 23:12:09 -07:00
parent c933bdfc26
commit db947537d1
21 changed files with 235 additions and 199 deletions

View File

@ -97,16 +97,12 @@ kube-system calico-node-d1l5b 2/2 Running 0
kube-system calico-node-sp9ps 2/2 Running 0 6m
kube-system coredns-1187388186-zj5dl 1/1 Running 0 6m
kube-system coredns-1187388186-dkh3o 1/1 Running 0 6m
kube-system kube-apiserver-zppls 1/1 Running 0 6m
kube-system kube-controller-manager-3271970485-gh9kt 1/1 Running 0 6m
kube-system kube-controller-manager-3271970485-h90v8 1/1 Running 1 6m
kube-system kube-apiserver-controller-0 1/1 Running 0 6m
kube-system kube-controller-manager-controller-0 1/1 Running 0 6m
kube-system kube-proxy-117v6 1/1 Running 0 6m
kube-system kube-proxy-9886n 1/1 Running 0 6m
kube-system kube-proxy-njn47 1/1 Running 0 6m
kube-system kube-scheduler-3895335239-5x87r 1/1 Running 0 6m
kube-system kube-scheduler-3895335239-bzrrt 1/1 Running 1 6m
kube-system pod-checkpointer-l6lrt 1/1 Running 0 6m
kube-system pod-checkpointer-l6lrt-controller-0 1/1 Running 0 6m
kube-system kube-scheduler-controller-0 1/1 Running 0 6m
```
## Non-Goals

View File

@ -11,7 +11,7 @@ Typhoon distributes upstream Kubernetes, architectural conventions, and cluster
## Features <a href="https://www.cncf.io/certification/software-conformance/"><img align="right" src="https://storage.googleapis.com/poseidon/certified-kubernetes.png"></a>
* Kubernetes v1.15.3 (upstream, via [kubernetes-incubator/bootkube](https://github.com/kubernetes-incubator/bootkube))
* Kubernetes v1.15.3 (upstream)
* Single or multi-master, [Calico](https://www.projectcalico.org/) or [flannel](https://github.com/coreos/flannel) networking
* On-cluster etcd with TLS, [RBAC](https://kubernetes.io/docs/admin/authorization/rbac/)-enabled, [network policy](https://kubernetes.io/docs/concepts/services-networking/network-policies/)
* Advanced features like [worker pools](https://typhoon.psdn.io/advanced/worker-pools/), [low-priority](https://typhoon.psdn.io/cl/azure/#low-priority) workers, and [snippets](https://typhoon.psdn.io/advanced/customization/#container-linux) customization

View File

@ -1,6 +1,6 @@
# Self-hosted Kubernetes assets (kubeconfig, manifests)
module "bootkube" {
source = "git::https://github.com/poseidon/terraform-render-bootkube.git?ref=98cc19f80f2c4c3ddc63fc7aea6320e74bec561a"
source = "git::https://github.com/poseidon/terraform-render-bootkube.git?ref=6e59af71138bc5f784453873074de16e7ee150eb"
cluster_name = var.cluster_name
api_servers = [format("%s.%s", var.cluster_name, var.dns_zone)]

View File

@ -96,17 +96,28 @@ systemd:
RestartSec=10
[Install]
WantedBy=multi-user.target
- name: bootkube.service
- name: bootstrap.service
contents: |
[Unit]
Description=Bootstrap a Kubernetes cluster
ConditionPathExists=!/opt/bootkube/init_bootkube.done
Description=Kubernetes control plane
ConditionPathExists=!/opt/bootstrap/bootstrap.done
[Service]
Type=oneshot
RemainAfterExit=true
WorkingDirectory=/opt/bootkube
ExecStart=/opt/bootkube/bootkube-start
ExecStartPost=/bin/touch /opt/bootkube/init_bootkube.done
WorkingDirectory=/opt/bootstrap
ExecStartPre=-/usr/bin/bash -c 'set -x && [ -n "$(ls /opt/bootstrap/assets/manifests-*/* 2>/dev/null)" ] && mv /opt/bootstrap/assets/manifests-*/* /opt/bootstrap/assets/manifests && rm -rf /opt/bootstrap/assets/manifests-*'
ExecStart=/usr/bin/rkt run \
--trust-keys-from-https \
--volume assets,kind=host,source=/opt/bootstrap/assets \
--mount volume=assets,target=/assets \
--volume script,kind=host,source=/opt/bootstrap/apply \
--mount volume=script,target=/apply \
--insecure-options=image \
docker://k8s.gcr.io/hyperkube:v1.15.3 \
--net=host \
--dns=host \
--exec=/apply
ExecStartPost=/bin/touch /opt/bootstrap/bootstrap.done
[Install]
WantedBy=multi-user.target
storage:
@ -124,36 +135,26 @@ storage:
inline: |
KUBELET_IMAGE_URL=docker://k8s.gcr.io/hyperkube
KUBELET_IMAGE_TAG=v1.15.3
- path: /opt/bootstrap/apply
filesystem: root
mode: 0544
contents:
inline: |
#!/bin/bash -e
export KUBECONFIG=/assets/auth/kubeconfig
until kubectl version; do
echo "Waiting for static pod control plane"
sleep 5
done
until kubectl apply -f /assets/manifests -R; do
echo "Retry applying manifests"
sleep 5
done
- path: /etc/sysctl.d/max-user-watches.conf
filesystem: root
contents:
inline: |
fs.inotify.max_user_watches=16184
- path: /opt/bootkube/bootkube-start
filesystem: root
mode: 0544
user:
id: 500
group:
id: 500
contents:
inline: |
#!/bin/bash
# Wrapper for bootkube start
set -e
# Move experimental manifests
[ -n "$(ls /opt/bootkube/assets/manifests-*/* 2>/dev/null)" ] && mv /opt/bootkube/assets/manifests-*/* /opt/bootkube/assets/manifests && rm -rf /opt/bootkube/assets/manifests-*
exec /usr/bin/rkt run \
--trust-keys-from-https \
--volume assets,kind=host,source=/opt/bootkube/assets \
--mount volume=assets,target=/assets \
--volume bootstrap,kind=host,source=/etc/kubernetes \
--mount volume=bootstrap,target=/etc/kubernetes \
$${RKT_OPTS} \
quay.io/coreos/bootkube:v0.14.0 \
--net=host \
--dns=host \
--exec=/bootkube -- start --asset-dir=/assets "$@"
passwd:
users:
- name: core

View File

@ -53,6 +53,22 @@ resource "azurerm_network_security_rule" "controller-etcd-metrics" {
destination_address_prefix = azurerm_subnet.controller.address_prefix
}
# Allow Prometheus to scrape kube-scheduler and kube-controller-manager metrics
resource "azurerm_network_security_rule" "controller-kube-metrics" {
resource_group_name = azurerm_resource_group.cluster.name
name = "allow-kube-metrics"
network_security_group_name = azurerm_network_security_group.controller.name
priority = "2011"
access = "Allow"
direction = "Inbound"
protocol = "Tcp"
source_port_range = "*"
destination_port_range = "10251-10252"
source_address_prefix = azurerm_subnet.worker.address_prefix
destination_address_prefix = azurerm_subnet.controller.address_prefix
}
resource "azurerm_network_security_rule" "controller-apiserver" {
resource_group_name = azurerm_resource_group.cluster.name

View File

@ -1,12 +1,15 @@
# Secure copy etcd TLS assets to controllers.
# Secure copy assets to controllers.
resource "null_resource" "copy-controller-secrets" {
count = var.controller_count
depends_on = [azurerm_virtual_machine.controllers]
depends_on = [
module.bootkube,
azurerm_virtual_machine.controllers
]
connection {
type = "ssh"
host = element(azurerm_public_ip.controllers.*.ip_address, count.index)
host = azurerm_public_ip.controllers.*.ip_address[count.index]
user = "core"
timeout = "15m"
}
@ -46,6 +49,11 @@ resource "null_resource" "copy-controller-secrets" {
destination = "$HOME/etcd-peer.key"
}
provisioner "file" {
source = var.asset_dir
destination = "$HOME/assets"
}
provisioner "remote-exec" {
inline = [
"sudo mkdir -p /etc/ssl/etcd/etcd",
@ -58,18 +66,21 @@ resource "null_resource" "copy-controller-secrets" {
"sudo mv etcd-peer.key /etc/ssl/etcd/etcd/peer.key",
"sudo chown -R etcd:etcd /etc/ssl/etcd",
"sudo chmod -R 500 /etc/ssl/etcd",
"sudo mv $HOME/assets /opt/bootstrap/assets",
"sudo mkdir -p /etc/kubernetes/bootstrap-secrets",
"sudo cp -r /opt/bootstrap/assets/tls/* /etc/kubernetes/bootstrap-secrets/",
"sudo cp /opt/bootstrap/assets/auth/kubeconfig /etc/kubernetes/bootstrap-secrets/",
"sudo cp -r /opt/bootstrap/assets/static-manifests/* /etc/kubernetes/manifests/",
]
}
}
# Secure copy bootkube assets to ONE controller and start bootkube to perform
# one-time self-hosted cluster bootstrapping.
resource "null_resource" "bootkube-start" {
# Connect to a controller to perform one-time cluster bootstrap.
resource "null_resource" "bootstrap" {
depends_on = [
module.bootkube,
null_resource.copy-controller-secrets,
module.workers,
azurerm_dns_a_record.apiserver,
null_resource.copy-controller-secrets,
]
connection {
@ -79,15 +90,9 @@ resource "null_resource" "bootkube-start" {
timeout = "15m"
}
provisioner "file" {
source = var.asset_dir
destination = "$HOME/assets"
}
provisioner "remote-exec" {
inline = [
"sudo mv $HOME/assets /opt/bootkube",
"sudo systemctl start bootkube",
"sudo systemctl start bootstrap",
]
}
}

View File

@ -99,7 +99,7 @@ resource "null_resource" "copy-worker-secrets" {
connection {
type = "ssh"
host = element(var.worker_domains, count.index)
host = var.worker_domains[count.index]
user = "core"
timeout = "60m"
}

View File

@ -11,7 +11,7 @@ Typhoon distributes upstream Kubernetes, architectural conventions, and cluster
## Features <a href="https://www.cncf.io/certification/software-conformance/"><img align="right" src="https://storage.googleapis.com/poseidon/certified-kubernetes.png"></a>
* Kubernetes v1.15.3 (upstream, via [kubernetes-incubator/bootkube](https://github.com/kubernetes-incubator/bootkube))
* Kubernetes v1.15.3 (upstream)
* Single or multi-master, [Calico](https://www.projectcalico.org/) or [flannel](https://github.com/coreos/flannel) networking
* On-cluster etcd with TLS, [RBAC](https://kubernetes.io/docs/admin/authorization/rbac/)-enabled, [network policy](https://kubernetes.io/docs/concepts/services-networking/network-policies/)
* Advanced features like [snippets](https://typhoon.psdn.io/advanced/customization/#container-linux) customization

View File

@ -1,6 +1,6 @@
# Self-hosted Kubernetes assets (kubeconfig, manifests)
module "bootkube" {
source = "git::https://github.com/poseidon/terraform-render-bootkube.git?ref=98cc19f80f2c4c3ddc63fc7aea6320e74bec561a"
source = "git::https://github.com/poseidon/terraform-render-bootkube.git?ref=6e59af71138bc5f784453873074de16e7ee150eb"
cluster_name = var.cluster_name
api_servers = [format("%s.%s", var.cluster_name, var.dns_zone)]

View File

@ -108,17 +108,28 @@ systemd:
RestartSec=10
[Install]
WantedBy=multi-user.target
- name: bootkube.service
- name: bootstrap.service
contents: |
[Unit]
Description=Bootstrap a Kubernetes cluster
ConditionPathExists=!/opt/bootkube/init_bootkube.done
Description=Kubernetes control plane
ConditionPathExists=!/opt/bootstrap/bootstrap.done
[Service]
Type=oneshot
RemainAfterExit=true
WorkingDirectory=/opt/bootkube
ExecStart=/opt/bootkube/bootkube-start
ExecStartPost=/bin/touch /opt/bootkube/init_bootkube.done
WorkingDirectory=/opt/bootstrap
ExecStartPre=-/usr/bin/bash -c 'set -x && [ -n "$(ls /opt/bootstrap/assets/manifests-*/* 2>/dev/null)" ] && mv /opt/bootstrap/assets/manifests-*/* /opt/bootstrap/assets/manifests && rm -rf /opt/bootstrap/assets/manifests-*'
ExecStart=/usr/bin/rkt run \
--trust-keys-from-https \
--volume assets,kind=host,source=/opt/bootstrap/assets \
--mount volume=assets,target=/assets \
--volume script,kind=host,source=/opt/bootstrap/apply \
--mount volume=script,target=/apply \
--insecure-options=image \
docker://k8s.gcr.io/hyperkube:v1.15.3 \
--net=host \
--dns=host \
--exec=/apply
ExecStartPost=/bin/touch /opt/bootstrap/bootstrap.done
[Install]
WantedBy=multi-user.target
storage:
@ -130,33 +141,23 @@ storage:
inline: |
KUBELET_IMAGE_URL=docker://k8s.gcr.io/hyperkube
KUBELET_IMAGE_TAG=v1.15.3
- path: /opt/bootstrap/apply
filesystem: root
mode: 0544
contents:
inline: |
#!/bin/bash -e
export KUBECONFIG=/assets/auth/kubeconfig
until kubectl version; do
echo "Waiting for static pod control plane"
sleep 5
done
until kubectl apply -f /assets/manifests -R; do
echo "Retry applying manifests"
sleep 5
done
- path: /etc/sysctl.d/max-user-watches.conf
filesystem: root
contents:
inline: |
fs.inotify.max_user_watches=16184
- path: /opt/bootkube/bootkube-start
filesystem: root
mode: 0544
user:
id: 500
group:
id: 500
contents:
inline: |
#!/bin/bash
# Wrapper for bootkube start
set -e
# Move experimental manifests
[ -n "$(ls /opt/bootkube/assets/manifests-*/* 2>/dev/null)" ] && mv /opt/bootkube/assets/manifests-*/* /opt/bootkube/assets/manifests && rm -rf /opt/bootkube/assets/manifests-*
exec /usr/bin/rkt run \
--trust-keys-from-https \
--volume assets,kind=host,source=/opt/bootkube/assets \
--mount volume=assets,target=/assets \
--volume bootstrap,kind=host,source=/etc/kubernetes \
--mount volume=bootstrap,target=/etc/kubernetes \
$${RKT_OPTS} \
quay.io/coreos/bootkube:v0.14.0 \
--net=host \
--dns=host \
--exec=/bootkube -- start --asset-dir=/assets "$@"

View File

@ -53,24 +53,33 @@ resource "digitalocean_firewall" "controllers" {
tags = ["${var.cluster_name}-controller"]
# etcd, kube-apiserver, kubelet
# etcd
inbound_rule {
protocol = "tcp"
port_range = "2379-2380"
source_tags = [digitalocean_tag.controllers.name]
}
# etcd metrics
inbound_rule {
protocol = "tcp"
port_range = "2381"
source_tags = [digitalocean_tag.workers.name]
}
# kube-apiserver
inbound_rule {
protocol = "tcp"
port_range = "6443"
source_addresses = ["0.0.0.0/0", "::/0"]
}
# kube-scheduler metrics, kube-controller-manager metrics
inbound_rule {
protocol = "tcp"
port_range = "10251-10252"
source_tags = [digitalocean_tag.workers.name]
}
}
resource "digitalocean_firewall" "workers" {

View File

@ -1,14 +1,15 @@
# Secure copy etcd TLS assets and kubeconfig to controllers. Activates kubelet.service
# Secure copy assets to controllers. Activates kubelet.service
resource "null_resource" "copy-controller-secrets" {
count = var.controller_count
depends_on = [
module.bootkube,
digitalocean_firewall.rules
]
connection {
type = "ssh"
host = element(digitalocean_droplet.controllers.*.ipv4_address, count.index)
host = digitalocean_droplet.controllers.*.ipv4_address[count.index]
user = "core"
timeout = "15m"
}
@ -53,6 +54,11 @@ resource "null_resource" "copy-controller-secrets" {
destination = "$HOME/etcd-peer.key"
}
provisioner "file" {
source = var.asset_dir
destination = "$HOME/assets"
}
provisioner "remote-exec" {
inline = [
"sudo mkdir -p /etc/ssl/etcd/etcd",
@ -66,6 +72,11 @@ resource "null_resource" "copy-controller-secrets" {
"sudo chown -R etcd:etcd /etc/ssl/etcd",
"sudo chmod -R 500 /etc/ssl/etcd",
"sudo mv $HOME/kubeconfig /etc/kubernetes/kubeconfig",
"sudo mv $HOME/assets /opt/bootstrap/assets",
"sudo mkdir -p /etc/kubernetes/bootstrap-secrets",
"sudo cp -r /opt/bootstrap/assets/tls/* /etc/kubernetes/bootstrap-secrets/",
"sudo cp /opt/bootstrap/assets/auth/kubeconfig /etc/kubernetes/bootstrap-secrets/",
"sudo cp -r /opt/bootstrap/assets/static-manifests/* /etc/kubernetes/manifests/",
]
}
}
@ -76,7 +87,7 @@ resource "null_resource" "copy-worker-secrets" {
connection {
type = "ssh"
host = element(digitalocean_droplet.workers.*.ipv4_address, count.index)
host = digitalocean_droplet.workers.*.ipv4_address[count.index]
user = "core"
timeout = "15m"
}
@ -93,11 +104,9 @@ resource "null_resource" "copy-worker-secrets" {
}
}
# Secure copy bootkube assets to ONE controller and start bootkube to perform
# one-time self-hosted cluster bootstrapping.
resource "null_resource" "bootkube-start" {
# Connect to a controller to perform one-time cluster bootstrap.
resource "null_resource" "bootstrap" {
depends_on = [
module.bootkube,
null_resource.copy-controller-secrets,
null_resource.copy-worker-secrets,
]
@ -109,15 +118,9 @@ resource "null_resource" "bootkube-start" {
timeout = "15m"
}
provisioner "file" {
source = var.asset_dir
destination = "$HOME/assets"
}
provisioner "remote-exec" {
inline = [
"sudo mv $HOME/assets /opt/bootkube",
"sudo systemctl start bootkube",
"sudo systemctl start bootstrap",
]
}
}

View File

@ -7,7 +7,7 @@ In this tutorial, we'll create a Kubernetes v1.15.3 cluster on Azure with Contai
We'll declare a Kubernetes cluster using the Typhoon Terraform module. Then apply the changes to create a resource group, virtual network, subnets, security groups, controller availability set, worker scale set, load balancer, and TLS assets.
Controllers are provisioned to run an `etcd-member` peer and a `kubelet` service. Workers run just a `kubelet` service. A one-time [bootkube](https://github.com/kubernetes-incubator/bootkube) bootstrap schedules the `apiserver`, `scheduler`, `controller-manager`, and `coredns` on controllers and schedules `kube-proxy` and `flannel` on every node. A generated `kubeconfig` provides `kubectl` access to the cluster.
Controller hosts are provisioned to run an `etcd-member` peer and a `kubelet` service. Worker hosts run a `kubelet` service. Controller nodes run `kube-apiserver`, `kube-scheduler`, `kube-controller-manager`, and `coredns`, while `kube-proxy` and `calico` (or `flannel`) run on every node. A generated `kubeconfig` provides `kubectl` access to the cluster.
## Requirements
@ -88,7 +88,7 @@ Reference the [variables docs](#variables) or the [variables.tf](https://github.
## ssh-agent
Initial bootstrapping requires `bootkube.service` be started on one controller node. Terraform uses `ssh-agent` to automate this step. Add your SSH private key to `ssh-agent`.
Initial bootstrapping requires `bootstrap.service` be started on one controller node. Terraform uses `ssh-agent` to automate this step. Add your SSH private key to `ssh-agent`.
```sh
ssh-add ~/.ssh/id_rsa
@ -115,9 +115,9 @@ Apply the changes to create the cluster.
```sh
$ terraform apply
...
module.azure-ramius.null_resource.bootkube-start: Still creating... (6m50s elapsed)
module.azure-ramius.null_resource.bootkube-start: Still creating... (7m0s elapsed)
module.azure-ramius.null_resource.bootkube-start: Creation complete after 7m8s (ID: 3961816482286168143)
module.azure-ramius.null_resource.bootstrap: Still creating... (6m50s elapsed)
module.azure-ramius.null_resource.bootstrap: Still creating... (7m0s elapsed)
module.azure-ramius.null_resource.bootstrap: Creation complete after 7m8s (ID: 3961816482286168143)
Apply complete! Resources: 86 added, 0 changed, 0 destroyed.
```
@ -144,19 +144,15 @@ $ kubectl get pods --all-namespaces
NAMESPACE NAME READY STATUS RESTARTS AGE
kube-system coredns-7c6fbb4f4b-b6qzx 1/1 Running 0 26m
kube-system coredns-7c6fbb4f4b-j2k3d 1/1 Running 0 26m
kube-system flannel-bwf24 2/2 Running 2 26m
kube-system flannel-bwf24 2/2 Running 0 26m
kube-system flannel-ks5qb 2/2 Running 0 26m
kube-system flannel-tq2wg 2/2 Running 0 26m
kube-system kube-apiserver-hxgsx 1/1 Running 3 26m
kube-system kube-controller-manager-5ff9cd7bb6-b942n 1/1 Running 0 26m
kube-system kube-controller-manager-5ff9cd7bb6-bbr6w 1/1 Running 0 26m
kube-system kube-apiserver-ramius-controller-0 1/1 Running 0 26m
kube-system kube-controller-manager-ramius-controller-0 1/1 Running 0 26m
kube-system kube-proxy-j4vpq 1/1 Running 0 26m
kube-system kube-proxy-jxr5d 1/1 Running 0 26m
kube-system kube-proxy-lbdw5 1/1 Running 0 26m
kube-system kube-scheduler-5f76d69686-s4fbx 1/1 Running 0 26m
kube-system kube-scheduler-5f76d69686-vgdgn 1/1 Running 0 26m
kube-system pod-checkpointer-cnqdg 1/1 Running 0 26m
kube-system pod-checkpointer-cnqdg-ramius-controller-0 1/1 Running 0 25m
kube-system kube-scheduler-ramius-controller-0 1/1 Running 0 26m
```
## Going Further

View File

@ -4,7 +4,7 @@ In this tutorial, we'll create a Kubernetes v1.15.3 cluster on DigitalOcean with
We'll declare a Kubernetes cluster using the Typhoon Terraform module. Then apply the changes to create controller droplets, worker droplets, DNS records, tags, and TLS assets.
Controllers are provisioned to run an `etcd-member` peer and a `kubelet` service. Workers run just a `kubelet` service. A one-time [bootkube](https://github.com/kubernetes-incubator/bootkube) bootstrap schedules the `apiserver`, `scheduler`, `controller-manager`, and `coredns` on controllers and schedules `kube-proxy` and `flannel` on every node. A generated `kubeconfig` provides `kubectl` access to the cluster.
Controller hosts are provisioned to run an `etcd-member` peer and a `kubelet` service. Worker hosts run a `kubelet` service. Controller nodes run `kube-apiserver`, `kube-scheduler`, `kube-controller-manager`, and `coredns`, while `kube-proxy` and `calico` (or `flannel`) run on every node. A generated `kubeconfig` provides `kubectl` access to the cluster.
## Requirements
@ -85,7 +85,7 @@ Reference the [variables docs](#variables) or the [variables.tf](https://github.
## ssh-agent
Initial bootstrapping requires `bootkube.service` be started on one controller node. Terraform uses `ssh-agent` to automate this step. Add your SSH private key to `ssh-agent`.
Initial bootstrapping requires `bootstrap.service` be started on one controller node. Terraform uses `ssh-agent` to automate this step. Add your SSH private key to `ssh-agent`.
```sh
ssh-add ~/.ssh/id_rsa
@ -111,11 +111,11 @@ Apply the changes to create the cluster.
```sh
$ terraform apply
module.digital-ocean-nemo.null_resource.bootkube-start: Still creating... (30s elapsed)
module.digital-ocean-nemo.null_resource.bootkube-start: Provisioning with 'remote-exec'...
module.digital-ocean-nemo.null_resource.bootstrap: Still creating... (30s elapsed)
module.digital-ocean-nemo.null_resource.bootstrap: Provisioning with 'remote-exec'...
...
module.digital-ocean-nemo.null_resource.bootkube-start: Still creating... (6m20s elapsed)
module.digital-ocean-nemo.null_resource.bootkube-start: Creation complete (ID: 7599298447329218468)
module.digital-ocean-nemo.null_resource.bootstrap: Still creating... (6m20s elapsed)
module.digital-ocean-nemo.null_resource.bootstrap: Creation complete (ID: 7599298447329218468)
Apply complete! Resources: 54 added, 0 changed, 0 destroyed.
```
@ -142,18 +142,14 @@ NAMESPACE NAME READY STATUS RES
kube-system coredns-1187388186-ld1j7 1/1 Running 0 11m
kube-system coredns-1187388186-rdhf7 1/1 Running 0 11m
kube-system flannel-1cq1v 2/2 Running 0 11m
kube-system flannel-hq9t0 2/2 Running 1 11m
kube-system flannel-hq9t0 2/2 Running 0 11m
kube-system flannel-v0g9w 2/2 Running 0 11m
kube-system kube-apiserver-n10qr 1/1 Running 0 11m
kube-system kube-controller-manager-3271970485-37gtw 1/1 Running 1 11m
kube-system kube-controller-manager-3271970485-p52t5 1/1 Running 0 11m
kube-system kube-apiserver-ip-10.132.115.81 1/1 Running 0 11m
kube-system kube-controller-manager-ip-10.132.115.81 1/1 Running 0 11m
kube-system kube-proxy-6kxjf 1/1 Running 0 11m
kube-system kube-proxy-fh3td 1/1 Running 0 11m
kube-system kube-proxy-k35rc 1/1 Running 0 11m
kube-system kube-scheduler-3895335239-2bc4c 1/1 Running 0 11m
kube-system kube-scheduler-3895335239-b7q47 1/1 Running 1 11m
kube-system pod-checkpointer-pr1lq 1/1 Running 0 11m
kube-system pod-checkpointer-pr1lq-10.132.115.81 1/1 Running 0 10m
kube-system kube-scheduler-ip-10.132.115.81 1/1 Running 0 11m
```
## Going Further

View File

@ -4,7 +4,7 @@ In this tutorial, we'll create a Kubernetes v1.15.3 cluster on Google Compute En
We'll declare a Kubernetes cluster using the Typhoon Terraform module. Then apply the changes to create a network, firewall rules, health checks, controller instances, worker managed instance group, load balancers, and TLS assets.
Controllers are provisioned to run an `etcd-member` peer and a `kubelet` service. Workers run just a `kubelet` service. A one-time [bootkube](https://github.com/kubernetes-incubator/bootkube) bootstrap schedules the `apiserver`, `scheduler`, `controller-manager`, and `coredns` on controllers and schedules `kube-proxy` and `calico` (or `flannel`) on every node. A generated `kubeconfig` provides `kubectl` access to the cluster.
Controller hosts are provisioned to run an `etcd-member` peer and a `kubelet` service. Worker hosts run a `kubelet` service. Controller nodes run `kube-apiserver`, `kube-scheduler`, `kube-controller-manager`, and `coredns`, while `kube-proxy` and `calico` (or `flannel`) run on every node. A generated `kubeconfig` provides `kubectl` access to the cluster.
## Requirements
@ -92,7 +92,7 @@ Reference the [variables docs](#variables) or the [variables.tf](https://github.
## ssh-agent
Initial bootstrapping requires `bootkube.service` be started on one controller node. Terraform uses `ssh-agent` to automate this step. Add your SSH private key to `ssh-agent`.
Initial bootstrapping requires `bootstrap.service` be started on one controller node. Terraform uses `ssh-agent` to automate this step. Add your SSH private key to `ssh-agent`.
```sh
ssh-add ~/.ssh/id_rsa
@ -118,12 +118,11 @@ Apply the changes to create the cluster.
```sh
$ terraform apply
module.google-cloud-yavin.null_resource.bootkube-start: Still creating... (10s elapsed)
module.google-cloud-yavin.null_resource.bootstrap: Still creating... (10s elapsed)
...
module.google-cloud-yavin.null_resource.bootkube-start: Still creating... (5m30s elapsed)
module.google-cloud-yavin.null_resource.bootkube-start: Still creating... (5m40s elapsed)
module.google-cloud-yavin.null_resource.bootkube-start: Creation complete (ID: 5768638456220583358)
module.google-cloud-yavin.null_resource.bootstrap: Still creating... (5m30s elapsed)
module.google-cloud-yavin.null_resource.bootstrap: Still creating... (5m40s elapsed)
module.google-cloud-yavin.null_resource.bootstrap: Creation complete (ID: 5768638456220583358)
Apply complete! Resources: 64 added, 0 changed, 0 destroyed.
```
@ -153,15 +152,12 @@ kube-system calico-node-d1l5b 2/2 Running 0
kube-system calico-node-sp9ps 2/2 Running 0 6m
kube-system coredns-1187388186-dkh3o 1/1 Running 0 6m
kube-system coredns-1187388186-zj5dl 1/1 Running 0 6m
kube-system kube-apiserver-zppls 1/1 Running 0 6m
kube-system kube-controller-manager-3271970485-gh9kt 1/1 Running 0 6m
kube-system kube-controller-manager-3271970485-h90v8 1/1 Running 1 6m
kube-system kube-apiserver-controller-0 1/1 Running 0 6m
kube-system kube-controller-manager-controller-0 1/1 Running 0 6m
kube-system kube-proxy-117v6 1/1 Running 0 6m
kube-system kube-proxy-9886n 1/1 Running 0 6m
kube-system kube-proxy-njn47 1/1 Running 0 6m
kube-system kube-scheduler-3895335239-5x87r 1/1 Running 0 6m
kube-system kube-scheduler-3895335239-bzrrt 1/1 Running 1 6m
kube-system pod-checkpointer-l6lrt 1/1 Running 0 6m
kube-system kube-scheduler-controller-0 1/1 Running 0 6m
```
## Going Further

View File

@ -95,16 +95,12 @@ kube-system calico-node-d1l5b 2/2 Running 0
kube-system calico-node-sp9ps 2/2 Running 0 6m
kube-system coredns-1187388186-dkh3o 1/1 Running 0 6m
kube-system coredns-1187388186-zj5dl 1/1 Running 0 6m
kube-system kube-apiserver-zppls 1/1 Running 0 6m
kube-system kube-controller-manager-3271970485-gh9kt 1/1 Running 0 6m
kube-system kube-controller-manager-3271970485-h90v8 1/1 Running 1 6m
kube-system kube-apiserver-controller-0 1/1 Running 0 6m
kube-system kube-controller-manager-controller-0 1/1 Running 0 6m
kube-system kube-proxy-117v6 1/1 Running 0 6m
kube-system kube-proxy-9886n 1/1 Running 0 6m
kube-system kube-proxy-njn47 1/1 Running 0 6m
kube-system kube-scheduler-3895335239-5x87r 1/1 Running 0 6m
kube-system kube-scheduler-3895335239-bzrrt 1/1 Running 1 6m
kube-system pod-checkpointer-l6lrt 1/1 Running 0 6m
kube-system pod-checkpointer-l6lrt-controller-0 1/1 Running 0 6m
kube-system kube-scheduler-controller-0 1/1 Running 0 6m
```
## Help

View File

@ -11,7 +11,7 @@ Typhoon distributes upstream Kubernetes, architectural conventions, and cluster
## Features <a href="https://www.cncf.io/certification/software-conformance/"><img align="right" src="https://storage.googleapis.com/poseidon/certified-kubernetes.png"></a>
* Kubernetes v1.15.3 (upstream, via [kubernetes-incubator/bootkube](https://github.com/kubernetes-incubator/bootkube))
* Kubernetes v1.15.3 (upstream)
* Single or multi-master, [Calico](https://www.projectcalico.org/) or [flannel](https://github.com/coreos/flannel) networking
* On-cluster etcd with TLS, [RBAC](https://kubernetes.io/docs/admin/authorization/rbac/)-enabled, [network policy](https://kubernetes.io/docs/concepts/services-networking/network-policies/)
* Advanced features like [worker pools](https://typhoon.psdn.io/advanced/worker-pools/), [preemptible](https://typhoon.psdn.io/cl/google-cloud/#preemption) workers, and [snippets](https://typhoon.psdn.io/advanced/customization/#container-linux) customization

View File

@ -1,6 +1,6 @@
# Self-hosted Kubernetes assets (kubeconfig, manifests)
module "bootkube" {
source = "git::https://github.com/poseidon/terraform-render-bootkube.git?ref=98cc19f80f2c4c3ddc63fc7aea6320e74bec561a"
source = "git::https://github.com/poseidon/terraform-render-bootkube.git?ref=6e59af71138bc5f784453873074de16e7ee150eb"
cluster_name = var.cluster_name
api_servers = [format("%s.%s", var.cluster_name, var.dns_zone)]

View File

@ -97,17 +97,28 @@ systemd:
RestartSec=10
[Install]
WantedBy=multi-user.target
- name: bootkube.service
- name: bootstrap.service
contents: |
[Unit]
Description=Bootstrap a Kubernetes cluster
ConditionPathExists=!/opt/bootkube/init_bootkube.done
Description=Kubernetes control plane
ConditionPathExists=!/opt/bootstrap/bootstrap.done
[Service]
Type=oneshot
RemainAfterExit=true
WorkingDirectory=/opt/bootkube
ExecStart=/opt/bootkube/bootkube-start
ExecStartPost=/bin/touch /opt/bootkube/init_bootkube.done
WorkingDirectory=/opt/bootstrap
ExecStartPre=-/usr/bin/bash -c 'set -x && [ -n "$(ls /opt/bootstrap/assets/manifests-*/* 2>/dev/null)" ] && mv /opt/bootstrap/assets/manifests-*/* /opt/bootstrap/assets/manifests && rm -rf /opt/bootstrap/assets/manifests-*'
ExecStart=/usr/bin/rkt run \
--trust-keys-from-https \
--volume assets,kind=host,source=/opt/bootstrap/assets \
--mount volume=assets,target=/assets \
--volume script,kind=host,source=/opt/bootstrap/apply \
--mount volume=script,target=/apply \
--insecure-options=image \
docker://k8s.gcr.io/hyperkube:v1.15.3 \
--net=host \
--dns=host \
--exec=/apply
ExecStartPost=/bin/touch /opt/bootstrap/bootstrap.done
[Install]
WantedBy=multi-user.target
storage:
@ -125,36 +136,26 @@ storage:
inline: |
KUBELET_IMAGE_URL=docker://k8s.gcr.io/hyperkube
KUBELET_IMAGE_TAG=v1.15.3
- path: /opt/bootstrap/apply
filesystem: root
mode: 0544
contents:
inline: |
#!/bin/bash -e
export KUBECONFIG=/assets/auth/kubeconfig
until kubectl version; do
echo "Waiting for static pod control plane"
sleep 5
done
until kubectl apply -f /assets/manifests -R; do
echo "Retry applying manifests"
sleep 5
done
- path: /etc/sysctl.d/max-user-watches.conf
filesystem: root
contents:
inline: |
fs.inotify.max_user_watches=16184
- path: /opt/bootkube/bootkube-start
filesystem: root
mode: 0544
user:
id: 500
group:
id: 500
contents:
inline: |
#!/bin/bash
# Wrapper for bootkube start
set -e
# Move experimental manifests
[ -n "$(ls /opt/bootkube/assets/manifests-*/* 2>/dev/null)" ] && mv /opt/bootkube/assets/manifests-*/* /opt/bootkube/assets/manifests && rm -rf /opt/bootkube/assets/manifests-*
exec /usr/bin/rkt run \
--trust-keys-from-https \
--volume assets,kind=host,source=/opt/bootkube/assets \
--mount volume=assets,target=/assets \
--volume bootstrap,kind=host,source=/etc/kubernetes \
--mount volume=bootstrap,target=/etc/kubernetes \
$${RKT_OPTS} \
quay.io/coreos/bootkube:v0.14.0 \
--net=host \
--dns=host \
--exec=/bootkube -- start --asset-dir=/assets "$@"
passwd:
users:
- name: core

View File

@ -48,6 +48,20 @@ resource "google_compute_firewall" "internal-etcd-metrics" {
target_tags = ["${var.cluster_name}-controller"]
}
# Allow Prometheus to scrape kube-scheduler and kube-controller-manager metrics
resource "google_compute_firewall" "internal-kube-metrics" {
name = "${var.cluster_name}-internal-kube-metrics"
network = google_compute_network.network.name
allow {
protocol = "tcp"
ports = [10251, 10252]
}
source_tags = ["${var.cluster_name}-worker"]
target_tags = ["${var.cluster_name}-controller"]
}
resource "google_compute_firewall" "allow-apiserver" {
name = "${var.cluster_name}-allow-apiserver"
network = google_compute_network.network.name

View File

@ -1,10 +1,14 @@
# Secure copy etcd TLS assets to controllers.
# Secure copy assets to controllers.
resource "null_resource" "copy-controller-secrets" {
count = var.controller_count
depends_on = [
module.bootkube,
]
connection {
type = "ssh"
host = element(local.controllers_ipv4_public, count.index)
host = local.controllers_ipv4_public[count.index]
user = "core"
timeout = "15m"
}
@ -44,6 +48,11 @@ resource "null_resource" "copy-controller-secrets" {
destination = "$HOME/etcd-peer.key"
}
provisioner "file" {
source = var.asset_dir
destination = "$HOME/assets"
}
provisioner "remote-exec" {
inline = [
"sudo mkdir -p /etc/ssl/etcd/etcd",
@ -56,36 +65,33 @@ resource "null_resource" "copy-controller-secrets" {
"sudo mv etcd-peer.key /etc/ssl/etcd/etcd/peer.key",
"sudo chown -R etcd:etcd /etc/ssl/etcd",
"sudo chmod -R 500 /etc/ssl/etcd",
"sudo mv $HOME/assets /opt/bootstrap/assets",
"sudo mkdir -p /etc/kubernetes/bootstrap-secrets",
"sudo cp -r /opt/bootstrap/assets/tls/* /etc/kubernetes/bootstrap-secrets/",
"sudo cp /opt/bootstrap/assets/auth/kubeconfig /etc/kubernetes/bootstrap-secrets/",
"sudo cp -r /opt/bootstrap/assets/static-manifests/* /etc/kubernetes/manifests/",
]
}
}
# Secure copy bootkube assets to ONE controller and start bootkube to perform
# one-time self-hosted cluster bootstrapping.
resource "null_resource" "bootkube-start" {
# Connect to a controller to perform one-time cluster bootstrap.
resource "null_resource" "bootstrap" {
depends_on = [
module.bootkube,
null_resource.copy-controller-secrets,
module.workers,
google_dns_record_set.apiserver,
null_resource.copy-controller-secrets,
]
connection {
type = "ssh"
host = element(local.controllers_ipv4_public, 0)
host = local.controllers_ipv4_public[0]
user = "core"
timeout = "15m"
}
provisioner "file" {
source = var.asset_dir
destination = "$HOME/assets"
}
provisioner "remote-exec" {
inline = [
"sudo mv $HOME/assets /opt/bootkube",
"sudo systemctl start bootkube",
"sudo systemctl start bootstrap",
]
}
}