Add bare-metal support for Container Linux with Matchbox
This commit is contained in:
parent
833c92b2bf
commit
da596e06bb
|
@ -0,0 +1,12 @@
|
||||||
|
# Self-hosted Kubernetes assets (kubeconfig, manifests)
|
||||||
|
module "bootkube" {
|
||||||
|
source = "git::https://github.com/dghubble/bootkube-terraform.git?ref=v0.6.0"
|
||||||
|
|
||||||
|
cluster_name = "${var.cluster_name}"
|
||||||
|
api_servers = ["${var.k8s_domain_name}"]
|
||||||
|
etcd_servers = ["${var.controller_domains}"]
|
||||||
|
asset_dir = "${var.asset_dir}"
|
||||||
|
pod_cidr = "${var.pod_cidr}"
|
||||||
|
service_cidr = "${var.service_cidr}"
|
||||||
|
experimental_self_hosted_etcd = "${var.experimental_self_hosted_etcd}"
|
||||||
|
}
|
|
@ -0,0 +1,37 @@
|
||||||
|
---
|
||||||
|
systemd:
|
||||||
|
units:
|
||||||
|
- name: installer.service
|
||||||
|
enable: true
|
||||||
|
contents: |
|
||||||
|
[Unit]
|
||||||
|
Requires=network-online.target
|
||||||
|
After=network-online.target
|
||||||
|
[Service]
|
||||||
|
Type=simple
|
||||||
|
ExecStart=/opt/installer
|
||||||
|
[Install]
|
||||||
|
WantedBy=multi-user.target
|
||||||
|
storage:
|
||||||
|
files:
|
||||||
|
- path: /opt/installer
|
||||||
|
filesystem: root
|
||||||
|
mode: 0500
|
||||||
|
contents:
|
||||||
|
inline: |
|
||||||
|
#!/bin/bash -ex
|
||||||
|
curl --retry 10 "${ignition_endpoint}?{{.request.raw_query}}&os=installed" -o ignition.json
|
||||||
|
coreos-install \
|
||||||
|
-d ${install_disk} \
|
||||||
|
-C ${container_linux_channel} \
|
||||||
|
-V ${container_linux_version} \
|
||||||
|
-o "${container_linux_oem}" \
|
||||||
|
${baseurl_flag} \
|
||||||
|
-i ignition.json
|
||||||
|
udevadm settle
|
||||||
|
systemctl reboot
|
||||||
|
passwd:
|
||||||
|
users:
|
||||||
|
- name: core
|
||||||
|
ssh_authorized_keys:
|
||||||
|
- {{.ssh_authorized_key}}
|
|
@ -0,0 +1,177 @@
|
||||||
|
---
|
||||||
|
systemd:
|
||||||
|
units:
|
||||||
|
{{ if eq .etcd_on_host "true" }}
|
||||||
|
- name: etcd-member.service
|
||||||
|
enable: true
|
||||||
|
dropins:
|
||||||
|
- name: 40-etcd-cluster.conf
|
||||||
|
contents: |
|
||||||
|
[Service]
|
||||||
|
Environment="ETCD_IMAGE_TAG=v3.2.0"
|
||||||
|
Environment="ETCD_NAME={{.etcd_name}}"
|
||||||
|
Environment="ETCD_ADVERTISE_CLIENT_URLS=https://{{.domain_name}}:2379"
|
||||||
|
Environment="ETCD_INITIAL_ADVERTISE_PEER_URLS=https://{{.domain_name}}:2380"
|
||||||
|
Environment="ETCD_LISTEN_CLIENT_URLS=https://0.0.0.0:2379"
|
||||||
|
Environment="ETCD_LISTEN_PEER_URLS=https://0.0.0.0:2380"
|
||||||
|
Environment="ETCD_INITIAL_CLUSTER={{.etcd_initial_cluster}}"
|
||||||
|
Environment="ETCD_STRICT_RECONFIG_CHECK=true"
|
||||||
|
Environment="ETCD_SSL_DIR=/etc/ssl/etcd"
|
||||||
|
Environment="ETCD_TRUSTED_CA_FILE=/etc/ssl/certs/etcd/server-ca.crt"
|
||||||
|
Environment="ETCD_CERT_FILE=/etc/ssl/certs/etcd/server.crt"
|
||||||
|
Environment="ETCD_KEY_FILE=/etc/ssl/certs/etcd/server.key"
|
||||||
|
Environment="ETCD_CLIENT_CERT_AUTH=true"
|
||||||
|
Environment="ETCD_PEER_TRUSTED_CA_FILE=/etc/ssl/certs/etcd/peer-ca.crt"
|
||||||
|
Environment="ETCD_PEER_CERT_FILE=/etc/ssl/certs/etcd/peer.crt"
|
||||||
|
Environment="ETCD_PEER_KEY_FILE=/etc/ssl/certs/etcd/peer.key"
|
||||||
|
Environment="ETCD_PEER_CLIENT_CERT_AUTH=true"
|
||||||
|
{{ end }}
|
||||||
|
- name: docker.service
|
||||||
|
enable: true
|
||||||
|
- name: locksmithd.service
|
||||||
|
mask: true
|
||||||
|
- name: kubelet.path
|
||||||
|
enable: true
|
||||||
|
contents: |
|
||||||
|
[Unit]
|
||||||
|
Description=Watch for kubeconfig
|
||||||
|
[Path]
|
||||||
|
PathExists=/etc/kubernetes/kubeconfig
|
||||||
|
[Install]
|
||||||
|
WantedBy=multi-user.target
|
||||||
|
- name: wait-for-dns.service
|
||||||
|
enable: true
|
||||||
|
contents: |
|
||||||
|
[Unit]
|
||||||
|
Description=Wait for DNS entries
|
||||||
|
Wants=systemd-resolved.service
|
||||||
|
Before=kubelet.service
|
||||||
|
[Service]
|
||||||
|
Type=oneshot
|
||||||
|
RemainAfterExit=true
|
||||||
|
ExecStart=/bin/sh -c 'while ! /usr/bin/grep '^[^#[:space:]]' /etc/resolv.conf > /dev/null; do sleep 1; done'
|
||||||
|
[Install]
|
||||||
|
RequiredBy=kubelet.service
|
||||||
|
- name: kubelet.service
|
||||||
|
contents: |
|
||||||
|
[Unit]
|
||||||
|
Description=Kubelet via Hyperkube ACI
|
||||||
|
[Service]
|
||||||
|
EnvironmentFile=/etc/kubernetes/kubelet.env
|
||||||
|
Environment="RKT_RUN_ARGS=--uuid-file-save=/var/run/kubelet-pod.uuid \
|
||||||
|
--volume=resolv,kind=host,source=/etc/resolv.conf \
|
||||||
|
--mount volume=resolv,target=/etc/resolv.conf \
|
||||||
|
--volume var-lib-cni,kind=host,source=/var/lib/cni \
|
||||||
|
--mount volume=var-lib-cni,target=/var/lib/cni \
|
||||||
|
--volume var-log,kind=host,source=/var/log \
|
||||||
|
--mount volume=var-log,target=/var/log"
|
||||||
|
ExecStartPre=/bin/mkdir -p /etc/kubernetes/manifests
|
||||||
|
ExecStartPre=/bin/mkdir -p /etc/kubernetes/cni/net.d
|
||||||
|
ExecStartPre=/bin/mkdir -p /etc/kubernetes/checkpoint-secrets
|
||||||
|
ExecStartPre=/bin/mkdir -p /etc/kubernetes/inactive-manifests
|
||||||
|
ExecStartPre=/bin/mkdir -p /var/lib/cni
|
||||||
|
ExecStartPre=/usr/bin/bash -c "grep 'certificate-authority-data' /etc/kubernetes/kubeconfig | awk '{print $2}' | base64 -d > /etc/kubernetes/ca.crt"
|
||||||
|
ExecStartPre=-/usr/bin/rkt rm --uuid-file=/var/run/kubelet-pod.uuid
|
||||||
|
ExecStart=/usr/lib/coreos/kubelet-wrapper \
|
||||||
|
--kubeconfig=/etc/kubernetes/kubeconfig \
|
||||||
|
--require-kubeconfig \
|
||||||
|
--client-ca-file=/etc/kubernetes/ca.crt \
|
||||||
|
--anonymous-auth=false \
|
||||||
|
--cni-conf-dir=/etc/kubernetes/cni/net.d \
|
||||||
|
--network-plugin=cni \
|
||||||
|
--lock-file=/var/run/lock/kubelet.lock \
|
||||||
|
--exit-on-lock-contention \
|
||||||
|
--pod-manifest-path=/etc/kubernetes/manifests \
|
||||||
|
--allow-privileged \
|
||||||
|
--hostname-override={{.domain_name}} \
|
||||||
|
--node-labels=node-role.kubernetes.io/master \
|
||||||
|
--register-with-taints=node-role.kubernetes.io/master=:NoSchedule \
|
||||||
|
--cluster_dns={{.k8s_dns_service_ip}} \
|
||||||
|
--cluster_domain=cluster.local
|
||||||
|
ExecStop=-/usr/bin/rkt stop --uuid-file=/var/run/kubelet-pod.uuid
|
||||||
|
Restart=always
|
||||||
|
RestartSec=10
|
||||||
|
[Install]
|
||||||
|
WantedBy=multi-user.target
|
||||||
|
- name: bootkube.service
|
||||||
|
contents: |
|
||||||
|
[Unit]
|
||||||
|
Description=Bootstrap a Kubernetes control plane with a temp api-server
|
||||||
|
ConditionPathExists=!/opt/bootkube/init_bootkube.done
|
||||||
|
[Service]
|
||||||
|
Type=oneshot
|
||||||
|
RemainAfterExit=true
|
||||||
|
WorkingDirectory=/opt/bootkube
|
||||||
|
ExecStart=/opt/bootkube/bootkube-start
|
||||||
|
ExecStartPost=/bin/touch /opt/bootkube/init_bootkube.done
|
||||||
|
storage:
|
||||||
|
{{ if index . "pxe" }}
|
||||||
|
disks:
|
||||||
|
- device: /dev/sda
|
||||||
|
wipe_table: true
|
||||||
|
partitions:
|
||||||
|
- label: ROOT
|
||||||
|
filesystems:
|
||||||
|
- name: root
|
||||||
|
mount:
|
||||||
|
device: "/dev/sda1"
|
||||||
|
format: "ext4"
|
||||||
|
create:
|
||||||
|
force: true
|
||||||
|
options:
|
||||||
|
- "-LROOT"
|
||||||
|
{{end}}
|
||||||
|
files:
|
||||||
|
- path: /etc/kubernetes/kubelet.env
|
||||||
|
filesystem: root
|
||||||
|
mode: 0644
|
||||||
|
contents:
|
||||||
|
inline: |
|
||||||
|
KUBELET_IMAGE_URL=quay.io/coreos/hyperkube
|
||||||
|
KUBELET_IMAGE_TAG=v1.7.1_coreos.0
|
||||||
|
- path: /etc/hostname
|
||||||
|
filesystem: root
|
||||||
|
mode: 0644
|
||||||
|
contents:
|
||||||
|
inline:
|
||||||
|
{{.domain_name}}
|
||||||
|
- path: /etc/sysctl.d/max-user-watches.conf
|
||||||
|
filesystem: root
|
||||||
|
contents:
|
||||||
|
inline: |
|
||||||
|
fs.inotify.max_user_watches=16184
|
||||||
|
- path: /opt/bootkube/bootkube-start
|
||||||
|
filesystem: root
|
||||||
|
mode: 0544
|
||||||
|
user:
|
||||||
|
id: 500
|
||||||
|
group:
|
||||||
|
id: 500
|
||||||
|
contents:
|
||||||
|
inline: |
|
||||||
|
#!/bin/bash
|
||||||
|
# Wrapper for bootkube start
|
||||||
|
set -e
|
||||||
|
# Move experimental manifests
|
||||||
|
[ -d /opt/bootkube/assets/experimental/manifests ] && mv /opt/bootkube/assets/experimental/manifests/* /opt/bootkube/assets/manifests && rm -r /opt/bootkube/assets/experimental/manifests
|
||||||
|
[ -d /opt/bootkube/assets/experimental/bootstrap-manifests ] && mv /opt/bootkube/assets/experimental/bootstrap-manifests/* /opt/bootkube/assets/bootstrap-manifests && rm -r /opt/bootkube/assets/experimental/bootstrap-manifests
|
||||||
|
BOOTKUBE_ACI="${BOOTKUBE_ACI:-quay.io/coreos/bootkube}"
|
||||||
|
BOOTKUBE_VERSION="${BOOTKUBE_VERSION:-v0.6.0}"
|
||||||
|
BOOTKUBE_ASSETS="${BOOTKUBE_ASSETS:-/opt/bootkube/assets}"
|
||||||
|
exec /usr/bin/rkt run \
|
||||||
|
--trust-keys-from-https \
|
||||||
|
--volume assets,kind=host,source=$BOOTKUBE_ASSETS \
|
||||||
|
--mount volume=assets,target=/assets \
|
||||||
|
--volume bootstrap,kind=host,source=/etc/kubernetes \
|
||||||
|
--mount volume=bootstrap,target=/etc/kubernetes \
|
||||||
|
$RKT_OPTS \
|
||||||
|
${BOOTKUBE_ACI}:${BOOTKUBE_VERSION} \
|
||||||
|
--net=host \
|
||||||
|
--dns=host \
|
||||||
|
--exec=/bootkube -- start --asset-dir=/assets "$@"
|
||||||
|
passwd:
|
||||||
|
users:
|
||||||
|
- name: core
|
||||||
|
ssh_authorized_keys:
|
||||||
|
- {{.ssh_authorized_key}}
|
||||||
|
|
|
@ -0,0 +1,112 @@
|
||||||
|
---
|
||||||
|
systemd:
|
||||||
|
units:
|
||||||
|
- name: docker.service
|
||||||
|
enable: true
|
||||||
|
- name: locksmithd.service
|
||||||
|
mask: true
|
||||||
|
- name: kubelet.path
|
||||||
|
enable: true
|
||||||
|
contents: |
|
||||||
|
[Unit]
|
||||||
|
Description=Watch for kubeconfig
|
||||||
|
[Path]
|
||||||
|
PathExists=/etc/kubernetes/kubeconfig
|
||||||
|
[Install]
|
||||||
|
WantedBy=multi-user.target
|
||||||
|
- name: wait-for-dns.service
|
||||||
|
enable: true
|
||||||
|
contents: |
|
||||||
|
[Unit]
|
||||||
|
Description=Wait for DNS entries
|
||||||
|
Wants=systemd-resolved.service
|
||||||
|
Before=kubelet.service
|
||||||
|
[Service]
|
||||||
|
Type=oneshot
|
||||||
|
RemainAfterExit=true
|
||||||
|
ExecStart=/bin/sh -c 'while ! /usr/bin/grep '^[^#[:space:]]' /etc/resolv.conf > /dev/null; do sleep 1; done'
|
||||||
|
[Install]
|
||||||
|
RequiredBy=kubelet.service
|
||||||
|
- name: kubelet.service
|
||||||
|
contents: |
|
||||||
|
[Unit]
|
||||||
|
Description=Kubelet via Hyperkube ACI
|
||||||
|
[Service]
|
||||||
|
EnvironmentFile=/etc/kubernetes/kubelet.env
|
||||||
|
Environment="RKT_RUN_ARGS=--uuid-file-save=/var/run/kubelet-pod.uuid \
|
||||||
|
--volume=resolv,kind=host,source=/etc/resolv.conf \
|
||||||
|
--mount volume=resolv,target=/etc/resolv.conf \
|
||||||
|
--volume var-lib-cni,kind=host,source=/var/lib/cni \
|
||||||
|
--mount volume=var-lib-cni,target=/var/lib/cni \
|
||||||
|
--volume var-log,kind=host,source=/var/log \
|
||||||
|
--mount volume=var-log,target=/var/log"
|
||||||
|
ExecStartPre=/bin/mkdir -p /etc/kubernetes/manifests
|
||||||
|
ExecStartPre=/bin/mkdir -p /etc/kubernetes/cni/net.d
|
||||||
|
ExecStartPre=/bin/mkdir -p /etc/kubernetes/checkpoint-secrets
|
||||||
|
ExecStartPre=/bin/mkdir -p /etc/kubernetes/inactive-manifests
|
||||||
|
ExecStartPre=/bin/mkdir -p /var/lib/cni
|
||||||
|
ExecStartPre=/usr/bin/bash -c "grep 'certificate-authority-data' /etc/kubernetes/kubeconfig | awk '{print $2}' | base64 -d > /etc/kubernetes/ca.crt"
|
||||||
|
ExecStartPre=-/usr/bin/rkt rm --uuid-file=/var/run/kubelet-pod.uuid
|
||||||
|
ExecStart=/usr/lib/coreos/kubelet-wrapper \
|
||||||
|
--kubeconfig=/etc/kubernetes/kubeconfig \
|
||||||
|
--require-kubeconfig \
|
||||||
|
--client-ca-file=/etc/kubernetes/ca.crt \
|
||||||
|
--anonymous-auth=false \
|
||||||
|
--cni-conf-dir=/etc/kubernetes/cni/net.d \
|
||||||
|
--network-plugin=cni \
|
||||||
|
--lock-file=/var/run/lock/kubelet.lock \
|
||||||
|
--exit-on-lock-contention \
|
||||||
|
--pod-manifest-path=/etc/kubernetes/manifests \
|
||||||
|
--allow-privileged \
|
||||||
|
--hostname-override={{.domain_name}} \
|
||||||
|
--node-labels=node-role.kubernetes.io/node \
|
||||||
|
--cluster_dns={{.k8s_dns_service_ip}} \
|
||||||
|
--cluster_domain=cluster.local
|
||||||
|
ExecStop=-/usr/bin/rkt stop --uuid-file=/var/run/kubelet-pod.uuid
|
||||||
|
Restart=always
|
||||||
|
RestartSec=5
|
||||||
|
[Install]
|
||||||
|
WantedBy=multi-user.target
|
||||||
|
|
||||||
|
storage:
|
||||||
|
{{ if index . "pxe" }}
|
||||||
|
disks:
|
||||||
|
- device: /dev/sda
|
||||||
|
wipe_table: true
|
||||||
|
partitions:
|
||||||
|
- label: ROOT
|
||||||
|
filesystems:
|
||||||
|
- name: root
|
||||||
|
mount:
|
||||||
|
device: "/dev/sda1"
|
||||||
|
format: "ext4"
|
||||||
|
create:
|
||||||
|
force: true
|
||||||
|
options:
|
||||||
|
- "-LROOT"
|
||||||
|
{{end}}
|
||||||
|
files:
|
||||||
|
- path: /etc/kubernetes/kubelet.env
|
||||||
|
filesystem: root
|
||||||
|
mode: 0644
|
||||||
|
contents:
|
||||||
|
inline: |
|
||||||
|
KUBELET_IMAGE_URL=quay.io/coreos/hyperkube
|
||||||
|
KUBELET_IMAGE_TAG=v1.7.1_coreos.0
|
||||||
|
- path: /etc/hostname
|
||||||
|
filesystem: root
|
||||||
|
mode: 0644
|
||||||
|
contents:
|
||||||
|
inline:
|
||||||
|
{{.domain_name}}
|
||||||
|
- path: /etc/sysctl.d/max-user-watches.conf
|
||||||
|
filesystem: root
|
||||||
|
contents:
|
||||||
|
inline: |
|
||||||
|
fs.inotify.max_user_watches=16184
|
||||||
|
passwd:
|
||||||
|
users:
|
||||||
|
- name: core
|
||||||
|
ssh_authorized_keys:
|
||||||
|
- {{.ssh_authorized_key}}
|
||||||
|
|
|
@ -0,0 +1,53 @@
|
||||||
|
// Install Container Linux to disk
|
||||||
|
resource "matchbox_group" "container-linux-install" {
|
||||||
|
count = "${length(var.controller_names) + length(var.worker_names)}"
|
||||||
|
|
||||||
|
name = "${format("container-linux-install-%s", element(concat(var.controller_names, var.worker_names), count.index))}"
|
||||||
|
profile = "${var.cached_install == "true" ? matchbox_profile.cached-container-linux-install.name : matchbox_profile.container-linux-install.name}"
|
||||||
|
|
||||||
|
selector {
|
||||||
|
mac = "${element(concat(var.controller_macs, var.worker_macs), count.index)}"
|
||||||
|
}
|
||||||
|
|
||||||
|
metadata {
|
||||||
|
ssh_authorized_key = "${var.ssh_authorized_key}"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "matchbox_group" "controller" {
|
||||||
|
count = "${length(var.controller_names)}"
|
||||||
|
name = "${format("%s-%s", var.cluster_name, element(var.controller_names, count.index))}"
|
||||||
|
profile = "${matchbox_profile.controller.name}"
|
||||||
|
|
||||||
|
selector {
|
||||||
|
mac = "${element(var.controller_macs, count.index)}"
|
||||||
|
os = "installed"
|
||||||
|
}
|
||||||
|
|
||||||
|
metadata {
|
||||||
|
domain_name = "${element(var.controller_domains, count.index)}"
|
||||||
|
etcd_name = "${element(var.controller_names, count.index)}"
|
||||||
|
etcd_initial_cluster = "${join(",", formatlist("%s=https://%s:2380", var.controller_names, var.controller_domains))}"
|
||||||
|
etcd_on_host = "${var.experimental_self_hosted_etcd ? "false" : "true"}"
|
||||||
|
k8s_dns_service_ip = "${module.bootkube.kube_dns_service_ip}"
|
||||||
|
ssh_authorized_key = "${var.ssh_authorized_key}"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "matchbox_group" "worker" {
|
||||||
|
count = "${length(var.worker_names)}"
|
||||||
|
name = "${format("%s-%s", var.cluster_name, element(var.worker_names, count.index))}"
|
||||||
|
profile = "${matchbox_profile.worker.name}"
|
||||||
|
|
||||||
|
selector {
|
||||||
|
mac = "${element(var.worker_macs, count.index)}"
|
||||||
|
os = "installed"
|
||||||
|
}
|
||||||
|
|
||||||
|
metadata {
|
||||||
|
domain_name = "${element(var.worker_domains, count.index)}"
|
||||||
|
etcd_on_host = "${var.experimental_self_hosted_etcd ? "false" : "true"}"
|
||||||
|
k8s_dns_service_ip = "${module.bootkube.kube_dns_service_ip}"
|
||||||
|
ssh_authorized_key = "${var.ssh_authorized_key}"
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,3 @@
|
||||||
|
output "kubeconfig" {
|
||||||
|
value = "${module.bootkube.kubeconfig}"
|
||||||
|
}
|
|
@ -0,0 +1,81 @@
|
||||||
|
// Container Linux Install profile (from release.core-os.net)
|
||||||
|
resource "matchbox_profile" "container-linux-install" {
|
||||||
|
name = "container-linux-install"
|
||||||
|
kernel = "http://${var.container_linux_channel}.release.core-os.net/amd64-usr/${var.container_linux_version}/coreos_production_pxe.vmlinuz"
|
||||||
|
|
||||||
|
initrd = [
|
||||||
|
"http://${var.container_linux_channel}.release.core-os.net/amd64-usr/${var.container_linux_version}/coreos_production_pxe_image.cpio.gz",
|
||||||
|
]
|
||||||
|
|
||||||
|
args = [
|
||||||
|
"coreos.config.url=${var.matchbox_http_endpoint}/ignition?uuid=$${uuid}&mac=$${mac:hexhyp}",
|
||||||
|
"coreos.first_boot=yes",
|
||||||
|
"console=tty0",
|
||||||
|
"console=ttyS0",
|
||||||
|
]
|
||||||
|
|
||||||
|
container_linux_config = "${data.template_file.container-linux-install-config.rendered}"
|
||||||
|
}
|
||||||
|
|
||||||
|
data "template_file" "container-linux-install-config" {
|
||||||
|
template = "${file("${path.module}/cl/container-linux-install.yaml.tmpl")}"
|
||||||
|
|
||||||
|
vars {
|
||||||
|
container_linux_channel = "${var.container_linux_channel}"
|
||||||
|
container_linux_version = "${var.container_linux_version}"
|
||||||
|
ignition_endpoint = "${format("%s/ignition", var.matchbox_http_endpoint)}"
|
||||||
|
install_disk = "${var.install_disk}"
|
||||||
|
container_linux_oem = "${var.container_linux_oem}"
|
||||||
|
|
||||||
|
# only cached-container-linux profile adds -b baseurl
|
||||||
|
baseurl_flag = ""
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Container Linux Install profile (from matchbox /assets cache)
|
||||||
|
// Note: Admin must have downloaded container_linux_version into matchbox assets.
|
||||||
|
resource "matchbox_profile" "cached-container-linux-install" {
|
||||||
|
name = "cached-container-linux-install"
|
||||||
|
kernel = "/assets/coreos/${var.container_linux_version}/coreos_production_pxe.vmlinuz"
|
||||||
|
|
||||||
|
initrd = [
|
||||||
|
"/assets/coreos/${var.container_linux_version}/coreos_production_pxe_image.cpio.gz",
|
||||||
|
]
|
||||||
|
|
||||||
|
args = [
|
||||||
|
"coreos.config.url=${var.matchbox_http_endpoint}/ignition?uuid=$${uuid}&mac=$${mac:hexhyp}",
|
||||||
|
"coreos.first_boot=yes",
|
||||||
|
"console=tty0",
|
||||||
|
"console=ttyS0",
|
||||||
|
]
|
||||||
|
|
||||||
|
container_linux_config = "${data.template_file.cached-container-linux-install-config.rendered}"
|
||||||
|
}
|
||||||
|
|
||||||
|
data "template_file" "cached-container-linux-install-config" {
|
||||||
|
template = "${file("${path.module}/cl/container-linux-install.yaml.tmpl")}"
|
||||||
|
|
||||||
|
vars {
|
||||||
|
container_linux_channel = "${var.container_linux_channel}"
|
||||||
|
container_linux_version = "${var.container_linux_version}"
|
||||||
|
ignition_endpoint = "${format("%s/ignition", var.matchbox_http_endpoint)}"
|
||||||
|
install_disk = "${var.install_disk}"
|
||||||
|
container_linux_oem = "${var.container_linux_oem}"
|
||||||
|
|
||||||
|
# profile uses -b baseurl to install from matchbox cache
|
||||||
|
baseurl_flag = "-b ${var.matchbox_http_endpoint}/assets/coreos"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Kubernetes Controller profile
|
||||||
|
resource "matchbox_profile" "controller" {
|
||||||
|
name = "controller"
|
||||||
|
container_linux_config = "${file("${path.module}/cl/controller.yaml.tmpl")}"
|
||||||
|
}
|
||||||
|
|
||||||
|
// Kubernetes Worker profile
|
||||||
|
resource "matchbox_profile" "worker" {
|
||||||
|
name = "worker"
|
||||||
|
container_linux_config = "${file("${path.module}/cl/worker.yaml.tmpl")}"
|
||||||
|
}
|
||||||
|
|
|
@ -0,0 +1,97 @@
|
||||||
|
# Secure copy etcd TLS assets and kubeconfig to all nodes. Activates kubelet.service
|
||||||
|
resource "null_resource" "copy-secrets" {
|
||||||
|
count = "${length(var.controller_names) + length(var.worker_names)}"
|
||||||
|
|
||||||
|
connection {
|
||||||
|
type = "ssh"
|
||||||
|
host = "${element(concat(var.controller_domains, var.worker_domains), count.index)}"
|
||||||
|
user = "core"
|
||||||
|
timeout = "60m"
|
||||||
|
}
|
||||||
|
|
||||||
|
provisioner "file" {
|
||||||
|
content = "${module.bootkube.kubeconfig}"
|
||||||
|
destination = "$HOME/kubeconfig"
|
||||||
|
}
|
||||||
|
|
||||||
|
provisioner "file" {
|
||||||
|
content = "${module.bootkube.etcd_ca_cert}"
|
||||||
|
destination = "$HOME/etcd-client-ca.crt"
|
||||||
|
}
|
||||||
|
|
||||||
|
provisioner "file" {
|
||||||
|
content = "${module.bootkube.etcd_client_cert}"
|
||||||
|
destination = "$HOME/etcd-client.crt"
|
||||||
|
}
|
||||||
|
|
||||||
|
provisioner "file" {
|
||||||
|
content = "${module.bootkube.etcd_client_key}"
|
||||||
|
destination = "$HOME/etcd-client.key"
|
||||||
|
}
|
||||||
|
|
||||||
|
provisioner "file" {
|
||||||
|
content = "${module.bootkube.etcd_server_cert}"
|
||||||
|
destination = "$HOME/etcd-server.crt"
|
||||||
|
}
|
||||||
|
|
||||||
|
provisioner "file" {
|
||||||
|
content = "${module.bootkube.etcd_server_key}"
|
||||||
|
destination = "$HOME/etcd-server.key"
|
||||||
|
}
|
||||||
|
|
||||||
|
provisioner "file" {
|
||||||
|
content = "${module.bootkube.etcd_peer_cert}"
|
||||||
|
destination = "$HOME/etcd-peer.crt"
|
||||||
|
}
|
||||||
|
|
||||||
|
provisioner "file" {
|
||||||
|
content = "${module.bootkube.etcd_peer_key}"
|
||||||
|
destination = "$HOME/etcd-peer.key"
|
||||||
|
}
|
||||||
|
|
||||||
|
provisioner "remote-exec" {
|
||||||
|
inline = [
|
||||||
|
"sudo mkdir -p /etc/ssl/etcd/etcd",
|
||||||
|
"sudo mv etcd-client* /etc/ssl/etcd/",
|
||||||
|
"sudo cp /etc/ssl/etcd/etcd-client-ca.crt /etc/ssl/etcd/etcd/server-ca.crt",
|
||||||
|
"sudo mv etcd-server.crt /etc/ssl/etcd/etcd/server.crt",
|
||||||
|
"sudo mv etcd-server.key /etc/ssl/etcd/etcd/server.key",
|
||||||
|
"sudo cp /etc/ssl/etcd/etcd-client-ca.crt /etc/ssl/etcd/etcd/peer-ca.crt",
|
||||||
|
"sudo mv etcd-peer.crt /etc/ssl/etcd/etcd/peer.crt",
|
||||||
|
"sudo mv etcd-peer.key /etc/ssl/etcd/etcd/peer.key",
|
||||||
|
"sudo chown -R etcd:etcd /etc/ssl/etcd",
|
||||||
|
"sudo chmod -R 500 /etc/ssl/etcd",
|
||||||
|
"sudo mv /home/core/kubeconfig /etc/kubernetes/kubeconfig",
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
# Secure copy bootkube assets to ONE controller and start bootkube to perform
|
||||||
|
# one-time self-hosted cluster bootstrapping.
|
||||||
|
resource "null_resource" "bootkube-start" {
|
||||||
|
# Without depends_on, this remote-exec may start before the kubeconfig copy.
|
||||||
|
# Terraform only does one task at a time, so it would try to bootstrap
|
||||||
|
# Kubernetes and Tectonic while no Kubelets are running. Ensure all nodes
|
||||||
|
# receive a kubeconfig before proceeding with bootkube and tectonic.
|
||||||
|
depends_on = ["null_resource.copy-secrets"]
|
||||||
|
|
||||||
|
connection {
|
||||||
|
type = "ssh"
|
||||||
|
host = "${element(var.controller_domains, 0)}"
|
||||||
|
user = "core"
|
||||||
|
timeout = "60m"
|
||||||
|
}
|
||||||
|
|
||||||
|
provisioner "file" {
|
||||||
|
source = "${var.asset_dir}"
|
||||||
|
destination = "$HOME/assets"
|
||||||
|
}
|
||||||
|
|
||||||
|
provisioner "remote-exec" {
|
||||||
|
inline = [
|
||||||
|
"sudo mv /home/core/assets /opt/bootkube",
|
||||||
|
"sudo systemctl start bootkube",
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
|
@ -0,0 +1,104 @@
|
||||||
|
variable "matchbox_http_endpoint" {
|
||||||
|
type = "string"
|
||||||
|
description = "Matchbox HTTP read-only endpoint (e.g. http://matchbox.example.com:8080)"
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "container_linux_channel" {
|
||||||
|
type = "string"
|
||||||
|
description = "Container Linux channel corresponding to the container_linux_version"
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "container_linux_version" {
|
||||||
|
type = "string"
|
||||||
|
description = "Container Linux version of the kernel/initrd to PXE or the image to install"
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "cluster_name" {
|
||||||
|
type = "string"
|
||||||
|
description = "Cluster name"
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "ssh_authorized_key" {
|
||||||
|
type = "string"
|
||||||
|
description = "SSH public key to set as an authorized_key on machines"
|
||||||
|
}
|
||||||
|
|
||||||
|
# Machines
|
||||||
|
# Terraform's crude "type system" does properly support lists of maps so we do this.
|
||||||
|
|
||||||
|
variable "controller_names" {
|
||||||
|
type = "list"
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "controller_macs" {
|
||||||
|
type = "list"
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "controller_domains" {
|
||||||
|
type = "list"
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "worker_names" {
|
||||||
|
type = "list"
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "worker_macs" {
|
||||||
|
type = "list"
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "worker_domains" {
|
||||||
|
type = "list"
|
||||||
|
}
|
||||||
|
|
||||||
|
# bootkube assets
|
||||||
|
|
||||||
|
variable "k8s_domain_name" {
|
||||||
|
description = "Controller DNS name which resolves to a controller instance. Workers and kubeconfig's will communicate with this endpoint (e.g. cluster.example.com)"
|
||||||
|
type = "string"
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "asset_dir" {
|
||||||
|
description = "Path to a directory where generated assets should be placed (contains secrets)"
|
||||||
|
type = "string"
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "pod_cidr" {
|
||||||
|
description = "CIDR IP range to assign Kubernetes pods"
|
||||||
|
type = "string"
|
||||||
|
default = "10.2.0.0/16"
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "service_cidr" {
|
||||||
|
description = <<EOD
|
||||||
|
CIDR IP range to assign Kubernetes services.
|
||||||
|
The 1st IP will be reserved for kube_apiserver, the 10th IP will be reserved for kube-dns, the 15th IP will be reserved for self-hosted etcd, and the 200th IP will be reserved for bootstrap self-hosted etcd.
|
||||||
|
EOD
|
||||||
|
|
||||||
|
type = "string"
|
||||||
|
default = "10.3.0.0/16"
|
||||||
|
}
|
||||||
|
|
||||||
|
# optional
|
||||||
|
|
||||||
|
variable "cached_install" {
|
||||||
|
type = "string"
|
||||||
|
default = "false"
|
||||||
|
description = "Whether Container Linux should PXE boot and install from matchbox /assets cache. Note that the admin must have downloaded the container_linux_version into matchbox assets."
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "install_disk" {
|
||||||
|
type = "string"
|
||||||
|
default = "/dev/sda"
|
||||||
|
description = "Disk device to which the install profiles should install Container Linux (e.g. /dev/sda)"
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "container_linux_oem" {
|
||||||
|
type = "string"
|
||||||
|
default = ""
|
||||||
|
description = "Specify an OEM image id to use as base for the installation (e.g. ami, vmware_raw, xen) or leave blank for the default image"
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "experimental_self_hosted_etcd" {
|
||||||
|
default = "false"
|
||||||
|
description = "Create self-hosted etcd cluster as pods on Kubernetes, instead of on-hosts"
|
||||||
|
}
|
Loading…
Reference in New Issue