diff --git a/bare-metal/container-linux/kubernetes/README.md b/bare-metal/container-linux/kubernetes/README.md index e902faa8..4869b159 100644 --- a/bare-metal/container-linux/kubernetes/README.md +++ b/bare-metal/container-linux/kubernetes/README.md @@ -11,7 +11,7 @@ Typhoon distributes upstream Kubernetes, architectural conventions, and cluster ## Features -* Kubernetes v1.15.3 (upstream, via [kubernetes-incubator/bootkube](https://github.com/kubernetes-incubator/bootkube)) +* Kubernetes v1.15.3 (upstream) * Single or multi-master, [Calico](https://www.projectcalico.org/) or [flannel](https://github.com/coreos/flannel) networking * On-cluster etcd with TLS, [RBAC](https://kubernetes.io/docs/admin/authorization/rbac/)-enabled, [network policy](https://kubernetes.io/docs/concepts/services-networking/network-policies/) * Advanced features like [snippets](https://typhoon.psdn.io/advanced/customization/#container-linux) customization diff --git a/bare-metal/container-linux/kubernetes/bootkube.tf b/bare-metal/container-linux/kubernetes/bootkube.tf index f19b57f3..67efc0c7 100644 --- a/bare-metal/container-linux/kubernetes/bootkube.tf +++ b/bare-metal/container-linux/kubernetes/bootkube.tf @@ -1,6 +1,6 @@ -# Self-hosted Kubernetes assets (kubeconfig, manifests) +# Kubernetes assets (kubeconfig, manifests) module "bootkube" { - source = "git::https://github.com/poseidon/terraform-render-bootkube.git?ref=98cc19f80f2c4c3ddc63fc7aea6320e74bec561a" + source = "git::https://github.com/poseidon/terraform-render-bootkube.git?ref=6e59af71138bc5f784453873074de16e7ee150eb" cluster_name = var.cluster_name api_servers = [var.k8s_domain_name] diff --git a/bare-metal/container-linux/kubernetes/cl/controller.yaml.tmpl b/bare-metal/container-linux/kubernetes/cl/controller.yaml.tmpl index 234c00f5..e3aeeac8 100644 --- a/bare-metal/container-linux/kubernetes/cl/controller.yaml.tmpl +++ b/bare-metal/container-linux/kubernetes/cl/controller.yaml.tmpl @@ -111,17 +111,30 @@ systemd: RestartSec=10 [Install] WantedBy=multi-user.target - - name: bootkube.service + - name: bootstrap.service contents: | [Unit] - Description=Bootstrap a Kubernetes control plane with a temp api-server - ConditionPathExists=!/opt/bootkube/init_bootkube.done + Description=Kubernetes control plane + ConditionPathExists=!/opt/bootstrap/bootstrap.done [Service] Type=oneshot RemainAfterExit=true - WorkingDirectory=/opt/bootkube - ExecStart=/opt/bootkube/bootkube-start - ExecStartPost=/bin/touch /opt/bootkube/init_bootkube.done + WorkingDirectory=/opt/bootstrap + ExecStartPre=-/usr/bin/bash -c 'set -x && [ -n "$(ls /opt/bootstrap/assets/manifests-*/* 2>/dev/null)" ] && mv /opt/bootstrap/assets/manifests-*/* /opt/bootstrap/assets/manifests && rm -rf /opt/bootstrap/assets/manifests-*' + ExecStart=/usr/bin/rkt run \ + --trust-keys-from-https \ + --volume assets,kind=host,source=/opt/bootstrap/assets \ + --mount volume=assets,target=/assets \ + --volume script,kind=host,source=/opt/bootstrap/apply \ + --mount volume=script,target=/apply \ + --insecure-options=image \ + docker://k8s.gcr.io/hyperkube:v1.15.3 \ + --net=host \ + --dns=host \ + --exec=/apply + ExecStartPost=/bin/touch /opt/bootstrap/bootstrap.done + [Install] + WantedBy=multi-user.target storage: files: - path: /etc/kubernetes/kubelet.env @@ -137,36 +150,26 @@ storage: contents: inline: ${domain_name} + - path: /opt/bootstrap/apply + filesystem: root + mode: 0544 + contents: + inline: | + #!/bin/bash -e + export KUBECONFIG=/assets/auth/kubeconfig + until kubectl version; do + echo "Waiting for static pod control plane" + sleep 5 + done + until kubectl apply -f /assets/manifests -R; do + echo "Retry applying manifests" + sleep 5 + done - path: /etc/sysctl.d/max-user-watches.conf filesystem: root contents: inline: | fs.inotify.max_user_watches=16184 - - path: /opt/bootkube/bootkube-start - filesystem: root - mode: 0544 - user: - id: 500 - group: - id: 500 - contents: - inline: | - #!/bin/bash - # Wrapper for bootkube start - set -e - # Move experimental manifests - [ -n "$(ls /opt/bootkube/assets/manifests-*/* 2>/dev/null)" ] && mv /opt/bootkube/assets/manifests-*/* /opt/bootkube/assets/manifests && rm -rf /opt/bootkube/assets/manifests-* - exec /usr/bin/rkt run \ - --trust-keys-from-https \ - --volume assets,kind=host,source=/opt/bootkube/assets \ - --mount volume=assets,target=/assets \ - --volume bootstrap,kind=host,source=/etc/kubernetes \ - --mount volume=bootstrap,target=/etc/kubernetes \ - $${RKT_OPTS} \ - quay.io/coreos/bootkube:v0.14.0 \ - --net=host \ - --dns=host \ - --exec=/bootkube -- start --asset-dir=/assets "$@" passwd: users: - name: core diff --git a/bare-metal/container-linux/kubernetes/ssh.tf b/bare-metal/container-linux/kubernetes/ssh.tf index 72f3c58f..4a8f80ea 100644 --- a/bare-metal/container-linux/kubernetes/ssh.tf +++ b/bare-metal/container-linux/kubernetes/ssh.tf @@ -1,4 +1,4 @@ -# Secure copy etcd TLS assets and kubeconfig to controllers. Activates kubelet.service +# Secure copy assets to controllers. Activates kubelet.service resource "null_resource" "copy-controller-secrets" { count = length(var.controller_names) @@ -8,11 +8,12 @@ resource "null_resource" "copy-controller-secrets" { matchbox_group.install, matchbox_group.controller, matchbox_group.worker, + module.bootkube, ] connection { type = "ssh" - host = element(var.controller_domains, count.index) + host = var.controller_domains[count.index] user = "core" timeout = "60m" } @@ -56,6 +57,11 @@ resource "null_resource" "copy-controller-secrets" { content = module.bootkube.etcd_peer_key destination = "$HOME/etcd-peer.key" } + + provisioner "file" { + source = var.asset_dir + destination = "$HOME/assets" + } provisioner "remote-exec" { inline = [ @@ -70,6 +76,11 @@ resource "null_resource" "copy-controller-secrets" { "sudo chown -R etcd:etcd /etc/ssl/etcd", "sudo chmod -R 500 /etc/ssl/etcd", "sudo mv $HOME/kubeconfig /etc/kubernetes/kubeconfig", + "sudo mv $HOME/assets /opt/bootstrap/assets", + "sudo mkdir -p /etc/kubernetes/bootstrap-secrets", + "sudo cp -r /opt/bootstrap/assets/tls/* /etc/kubernetes/bootstrap-secrets/", + "sudo cp /opt/bootstrap/assets/auth/kubeconfig /etc/kubernetes/bootstrap-secrets/", + "sudo cp -r /opt/bootstrap/assets/static-manifests/* /etc/kubernetes/manifests/", ] } } @@ -105,9 +116,8 @@ resource "null_resource" "copy-worker-secrets" { } } -# Secure copy bootkube assets to ONE controller and start bootkube to perform -# one-time self-hosted cluster bootstrapping. -resource "null_resource" "bootkube-start" { +# Connect to a controller to perform one-time cluster bootstrap. +resource "null_resource" "bootstrap" { # Without depends_on, this remote-exec may start before the kubeconfig copy. # Terraform only does one task at a time, so it would try to bootstrap # while no Kubelets are running. @@ -118,20 +128,14 @@ resource "null_resource" "bootkube-start" { connection { type = "ssh" - host = element(var.controller_domains, 0) + host = var.controller_domains[0] user = "core" timeout = "15m" } - provisioner "file" { - source = var.asset_dir - destination = "$HOME/assets" - } - provisioner "remote-exec" { inline = [ - "sudo mv $HOME/assets /opt/bootkube", - "sudo systemctl start bootkube", + "sudo systemctl start bootstrap", ] } } diff --git a/docs/cl/bare-metal.md b/docs/cl/bare-metal.md index 3e73d4c0..c608608e 100644 --- a/docs/cl/bare-metal.md +++ b/docs/cl/bare-metal.md @@ -4,7 +4,7 @@ In this tutorial, we'll network boot and provision a Kubernetes v1.15.3 cluster First, we'll deploy a [Matchbox](https://github.com/poseidon/matchbox) service and setup a network boot environment. Then, we'll declare a Kubernetes cluster using the Typhoon Terraform module and power on machines. On PXE boot, machines will install Container Linux to disk, reboot into the disk install, and provision themselves as Kubernetes controllers or workers via Ignition. -Controllers are provisioned to run an `etcd-member` peer and a `kubelet` service. Workers run just a `kubelet` service. A one-time [bootkube](https://github.com/kubernetes-incubator/bootkube) bootstrap schedules the `apiserver`, `scheduler`, `controller-manager`, and `coredns` on controllers and schedules `kube-proxy` and `calico` (or `flannel`) on every node. A generated `kubeconfig` provides `kubectl` access to the cluster. +Controller hosts are provisioned to run an `etcd-member` peer and a `kubelet` service. Worker hosts run a `kubelet` service. Controller nodes run `kube-apiserver`, `kube-scheduler`, `kube-controller-manager`, and `coredns` while `kube-proxy` and `calico` (or `flannel`) run on every node. A generated `kubeconfig` provides `kubectl` access to the cluster. ## Requirements @@ -199,7 +199,7 @@ Reference the [variables docs](#variables) or the [variables.tf](https://github. ## ssh-agent -Initial bootstrapping requires `bootkube.service` be started on one controller node. Terraform uses `ssh-agent` to automate this step. Add your SSH private key to `ssh-agent`. +Initial bootstrapping requires `bootstrap.service` be started on one controller node. Terraform uses `ssh-agent` to automate this step. Add your SSH private key to `ssh-agent`. ```sh ssh-add ~/.ssh/id_rsa @@ -221,14 +221,12 @@ $ terraform plan Plan: 55 to add, 0 to change, 0 to destroy. ``` -Apply the changes. Terraform will generate bootkube assets to `asset_dir` and create Matchbox profiles (e.g. controller, worker) and matching rules via the Matchbox API. +Apply the changes. Terraform will generate bootstrap assets to `asset_dir` and create Matchbox profiles (e.g. controller, worker) and matching rules via the Matchbox API. ```sh $ terraform apply -module.bare-metal-mercury.null_resource.copy-kubeconfig.0: Provisioning with 'file'... -module.bare-metal-mercury.null_resource.copy-etcd-secrets.0: Provisioning with 'file'... -module.bare-metal-mercury.null_resource.copy-kubeconfig.0: Still creating... (10s elapsed) -module.bare-metal-mercury.null_resource.copy-etcd-secrets.0: Still creating... (10s elapsed) +module.bare-metal-mercury.null_resource.copy-controller-secrets.0: Still creating... (10s elapsed) +module.bare-metal-mercury.null_resource.copy-worker-secrets.0: Still creating... (10s elapsed) ... ``` @@ -250,14 +248,14 @@ Machines will network boot, install Container Linux to disk, reboot into the dis ### Bootstrap -Wait for the `bootkube-start` step to finish bootstrapping the Kubernetes control plane. This may take 5-15 minutes depending on your network. +Wait for the `bootstrap` step to finish bootstrapping the Kubernetes control plane. This may take 5-15 minutes depending on your network. ``` -module.bare-metal-mercury.null_resource.bootkube-start: Still creating... (6m10s elapsed) -module.bare-metal-mercury.null_resource.bootkube-start: Still creating... (6m20s elapsed) -module.bare-metal-mercury.null_resource.bootkube-start: Still creating... (6m30s elapsed) -module.bare-metal-mercury.null_resource.bootkube-start: Still creating... (6m40s elapsed) -module.bare-metal-mercury.null_resource.bootkube-start: Creation complete (ID: 5441741360626669024) +module.bare-metal-mercury.null_resource.bootstrap: Still creating... (6m10s elapsed) +module.bare-metal-mercury.null_resource.bootstrap: Still creating... (6m20s elapsed) +module.bare-metal-mercury.null_resource.bootstrap: Still creating... (6m30s elapsed) +module.bare-metal-mercury.null_resource.bootstrap: Still creating... (6m40s elapsed) +module.bare-metal-mercury.null_resource.bootstrap: Creation complete (ID: 5441741360626669024) Apply complete! Resources: 55 added, 0 changed, 0 destroyed. ``` @@ -275,13 +273,12 @@ To watch the bootstrap process in detail, SSH to the first controller and journa ``` $ ssh core@node1.example.com -$ journalctl -f -u bootkube -bootkube[5]: Pod Status: pod-checkpointer Running -bootkube[5]: Pod Status: kube-apiserver Running -bootkube[5]: Pod Status: kube-scheduler Running -bootkube[5]: Pod Status: kube-controller-manager Running -bootkube[5]: All self-hosted control plane components successfully started -bootkube[5]: Tearing down temporary bootstrap control plane... +$ journalctl -f -u bootstrap +podman[1750]: The connection to the server cluster.example.com:6443 was refused - did you specify the right host or port? +podman[1750]: Waiting for static pod control plane +... +podman[1750]: serviceaccount/calico-node unchanged +systemd[1]: Started Kubernetes control plane. ``` ## Verify @@ -307,16 +304,12 @@ kube-system calico-node-gnjrm 2/2 Running 0 kube-system calico-node-llbgt 2/2 Running 0 11m kube-system coredns-1187388186-dj3pd 1/1 Running 0 11m kube-system coredns-1187388186-mx9rt 1/1 Running 0 11m -kube-system kube-apiserver-7336w 1/1 Running 0 11m -kube-system kube-controller-manager-3271970485-b9chx 1/1 Running 0 11m -kube-system kube-controller-manager-3271970485-v30js 1/1 Running 1 11m +kube-system kube-apiserver-node1.example.com 1/1 Running 0 11m +kube-system kube-controller-node1.example.com 1/1 Running 1 11m kube-system kube-proxy-50sd4 1/1 Running 0 11m kube-system kube-proxy-bczhp 1/1 Running 0 11m kube-system kube-proxy-mp2fw 1/1 Running 0 11m -kube-system kube-scheduler-3895335239-fd3l7 1/1 Running 1 11m -kube-system kube-scheduler-3895335239-hfjv0 1/1 Running 0 11m -kube-system pod-checkpointer-wf65d 1/1 Running 0 11m -kube-system pod-checkpointer-wf65d-node1.example.com 1/1 Running 0 11m +kube-system kube-scheduler-node1.example.com 1/1 Running 0 11m ``` ## Going Further