Add bare-metal Fedora Atomic module
* Several known hacks and broken areas * Download v1.10 Kubelet from release tarball * Install flannel CNI binaries to /opt/cni * Switch SELinux to Permissive * Disable firewalld service * port-forward won't work, socat missing
This commit is contained in:
parent
b80a2eb8a0
commit
ddc75e99ac
|
@ -0,0 +1,23 @@
|
||||||
|
The MIT License (MIT)
|
||||||
|
|
||||||
|
Copyright (c) 2017 Typhoon Authors
|
||||||
|
Copyright (c) 2017 Dalton Hubble
|
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
in the Software without restriction, including without limitation the rights
|
||||||
|
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||||
|
copies of the Software, and to permit persons to whom the Software is
|
||||||
|
furnished to do so, subject to the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be included in
|
||||||
|
all copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||||
|
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
|
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||||
|
THE SOFTWARE.
|
||||||
|
|
|
@ -0,0 +1,22 @@
|
||||||
|
# Typhoon <img align="right" src="https://storage.googleapis.com/poseidon/typhoon-logo.png">
|
||||||
|
|
||||||
|
Typhoon is a minimal and free Kubernetes distribution.
|
||||||
|
|
||||||
|
* Minimal, stable base Kubernetes distribution
|
||||||
|
* Declarative infrastructure and configuration
|
||||||
|
* Free (freedom and cost) and privacy-respecting
|
||||||
|
* Practical for labs, datacenters, and clouds
|
||||||
|
|
||||||
|
Typhoon distributes upstream Kubernetes, architectural conventions, and cluster addons, much like a GNU/Linux distribution provides the Linux kernel and userspace components.
|
||||||
|
|
||||||
|
## Features <a href="https://www.cncf.io/certification/software-conformance/"><img align="right" src="https://storage.googleapis.com/poseidon/certified-kubernetes.png"></a>
|
||||||
|
|
||||||
|
* Kubernetes v1.10.0 (upstream, via [kubernetes-incubator/bootkube](https://github.com/kubernetes-incubator/bootkube))
|
||||||
|
* Single or multi-master, workloads isolated on workers, [Calico](https://www.projectcalico.org/) or [flannel](https://github.com/coreos/flannel) networking
|
||||||
|
* On-cluster etcd with TLS, [RBAC](https://kubernetes.io/docs/admin/authorization/rbac/)-enabled, [network policy](https://kubernetes.io/docs/concepts/services-networking/network-policies/)
|
||||||
|
* Ready for Ingress, Prometheus, Grafana, and other optional [addons](https://typhoon.psdn.io/addons/overview/)
|
||||||
|
|
||||||
|
## Docs
|
||||||
|
|
||||||
|
Please see the [official docs](https://typhoon.psdn.io) and the bare-metal [tutorial](https://typhoon.psdn.io/bare-metal/).
|
||||||
|
|
|
@ -0,0 +1,17 @@
|
||||||
|
# Self-hosted Kubernetes assets (kubeconfig, manifests)
|
||||||
|
module "bootkube" {
|
||||||
|
source = "git::https://github.com/poseidon/terraform-render-bootkube.git?ref=61fb176647e15d4d0e72fdccb34d27e47430407c"
|
||||||
|
|
||||||
|
cluster_name = "${var.cluster_name}"
|
||||||
|
api_servers = ["${var.k8s_domain_name}"]
|
||||||
|
etcd_servers = ["${var.controller_domains}"]
|
||||||
|
asset_dir = "${var.asset_dir}"
|
||||||
|
networking = "${var.networking}"
|
||||||
|
network_mtu = "${var.network_mtu}"
|
||||||
|
pod_cidr = "${var.pod_cidr}"
|
||||||
|
service_cidr = "${var.service_cidr}"
|
||||||
|
cluster_domain_suffix = "${var.cluster_domain_suffix}"
|
||||||
|
|
||||||
|
# Fedora
|
||||||
|
trusted_certs_dir = "/etc/pki/tls/certs"
|
||||||
|
}
|
|
@ -0,0 +1,147 @@
|
||||||
|
#cloud-config
|
||||||
|
write_files:
|
||||||
|
- path: /etc/systemd/system/etcd-member.service
|
||||||
|
content: |
|
||||||
|
[Unit]
|
||||||
|
Description=etcd-member
|
||||||
|
Documentation=https://github.com/coreos/etcd
|
||||||
|
Wants=network-online.target network.target
|
||||||
|
After=network-online.target
|
||||||
|
Requires=docker.service
|
||||||
|
After=docker.service
|
||||||
|
[Service]
|
||||||
|
EnvironmentFile=/etc/etcd/etcd.conf
|
||||||
|
ExecStartPre=/bin/mkdir -p /var/lib/etcd
|
||||||
|
ExecStart=/usr/bin/docker run --rm --name etcd-member \
|
||||||
|
--net=host \
|
||||||
|
-v /etc/pki/tls/certs:/usr/share/ca-certificates:ro,z \
|
||||||
|
-v /etc/ssl/etcd:/etc/ssl/certs:ro,Z \
|
||||||
|
-v /var/lib/etcd:/var/lib/etcd:Z \
|
||||||
|
--env-file=/etc/etcd/etcd.conf \
|
||||||
|
quay.io/coreos/etcd:v3.3.3
|
||||||
|
ExecStop=/usr/bin/docker stop etcd-member
|
||||||
|
Restart=on-failure
|
||||||
|
RestartSec=10s
|
||||||
|
TimeoutStartSec=0
|
||||||
|
LimitNOFILE=40000
|
||||||
|
[Install]
|
||||||
|
WantedBy=multi-user.target
|
||||||
|
- path: /etc/etcd/etcd.conf
|
||||||
|
content: |
|
||||||
|
ETCD_NAME=${etcd_name}
|
||||||
|
ETCD_DATA_DIR=/var/lib/etcd
|
||||||
|
ETCD_ADVERTISE_CLIENT_URLS=https://${domain_name}:2379
|
||||||
|
ETCD_INITIAL_ADVERTISE_PEER_URLS=https://${domain_name}:2380
|
||||||
|
ETCD_LISTEN_CLIENT_URLS=https://0.0.0.0:2379
|
||||||
|
ETCD_LISTEN_PEER_URLS=https://0.0.0.0:2380
|
||||||
|
ETCD_INITIAL_CLUSTER=${etcd_initial_cluster}
|
||||||
|
ETCD_STRICT_RECONFIG_CHECK=true
|
||||||
|
ETCD_TRUSTED_CA_FILE=/etc/ssl/certs/etcd/server-ca.crt
|
||||||
|
ETCD_CERT_FILE=/etc/ssl/certs/etcd/server.crt
|
||||||
|
ETCD_KEY_FILE=/etc/ssl/certs/etcd/server.key
|
||||||
|
ETCD_CLIENT_CERT_AUTH=true
|
||||||
|
ETCD_PEER_TRUSTED_CA_FILE=/etc/ssl/certs/etcd/peer-ca.crt
|
||||||
|
ETCD_PEER_CERT_FILE=/etc/ssl/certs/etcd/peer.crt
|
||||||
|
ETCD_PEER_KEY_FILE=/etc/ssl/certs/etcd/peer.key
|
||||||
|
ETCD_PEER_CLIENT_CERT_AUTH=true
|
||||||
|
- path: /etc/systemd/system/kubelet.service
|
||||||
|
content: |
|
||||||
|
[Unit]
|
||||||
|
Description=Kubelet
|
||||||
|
Wants=rpc-statd.service
|
||||||
|
[Service]
|
||||||
|
WorkingDirectory=/etc/kubernetes
|
||||||
|
ExecStartPre=/bin/mkdir -p /opt/cni/bin
|
||||||
|
ExecStartPre=/bin/mkdir -p /etc/kubernetes/manifests
|
||||||
|
ExecStartPre=/bin/mkdir -p /etc/kubernetes/cni/net.d
|
||||||
|
ExecStartPre=/bin/mkdir -p /etc/kubernetes/checkpoint-secrets
|
||||||
|
ExecStartPre=/bin/mkdir -p /etc/kubernetes/inactive-manifests
|
||||||
|
ExecStartPre=/bin/mkdir -p /var/lib/cni
|
||||||
|
ExecStartPre=/bin/mkdir -p /var/lib/kubelet/volumeplugins
|
||||||
|
ExecStartPre=/usr/bin/bash -c "grep 'certificate-authority-data' /etc/kubernetes/kubeconfig | awk '{print $2}' | base64 -d > /etc/kubernetes/ca.crt"
|
||||||
|
# Atomic's system containers and RPMs are old and unfriendly. Use this.
|
||||||
|
ExecStartPre=/usr/bin/curl -L https://dl.k8s.io/v1.10.0/kubernetes-node-linux-amd64.tar.gz -o kubernetes-node-linux-amd64.tar.gz
|
||||||
|
ExecStartPre=/usr/bin/tar xzf kubernetes-node-linux-amd64.tar.gz -C /usr/local/bin --strip-components=3 kubernetes/node/bin/kubelet
|
||||||
|
ExecStart=
|
||||||
|
ExecStart=/usr/local/bin/kubelet \
|
||||||
|
--allow-privileged \
|
||||||
|
--anonymous-auth=false \
|
||||||
|
--cgroup-driver=systemd \
|
||||||
|
--client-ca-file=/etc/kubernetes/ca.crt \
|
||||||
|
--cluster_dns=${k8s_dns_service_ip} \
|
||||||
|
--cluster_domain=${cluster_domain_suffix} \
|
||||||
|
--cni-conf-dir=/etc/kubernetes/cni/net.d \
|
||||||
|
--exit-on-lock-contention \
|
||||||
|
--hostname-override=${domain_name} \
|
||||||
|
--kubeconfig=/etc/kubernetes/kubeconfig \
|
||||||
|
--lock-file=/var/run/lock/kubelet.lock \
|
||||||
|
--network-plugin=cni \
|
||||||
|
--node-labels=node-role.kubernetes.io/master \
|
||||||
|
--node-labels=node-role.kubernetes.io/controller="true" \
|
||||||
|
--pod-manifest-path=/etc/kubernetes/manifests \
|
||||||
|
--register-with-taints=node-role.kubernetes.io/master=:NoSchedule \
|
||||||
|
--volume-plugin-dir=/var/lib/kubelet/volumeplugins
|
||||||
|
Restart=always
|
||||||
|
RestartSec=10
|
||||||
|
[Install]
|
||||||
|
WantedBy=multi-user.target
|
||||||
|
- path: /etc/systemd/system/kubelet.path
|
||||||
|
content: |
|
||||||
|
[Unit]
|
||||||
|
Description=Watch for kubeconfig
|
||||||
|
[Path]
|
||||||
|
PathExists=/etc/kubernetes/kubeconfig
|
||||||
|
[Install]
|
||||||
|
WantedBy=multi-user.target
|
||||||
|
- path: /etc/systemd/system/bootkube.service
|
||||||
|
content: |
|
||||||
|
[Unit]
|
||||||
|
Description=Bootstrap a Kubernetes cluster
|
||||||
|
ConditionPathExists=!/var/bootkube/init_bootkube.done
|
||||||
|
[Service]
|
||||||
|
Type=oneshot
|
||||||
|
RemainAfterExit=true
|
||||||
|
WorkingDirectory=/var/bootkube
|
||||||
|
ExecStartPre=/bin/mkdir -p /var/bootkube
|
||||||
|
ExecStart=/usr/local/bin/bootkube-start
|
||||||
|
ExecStartPost=/bin/touch /var/bootkube/init_bootkube.done
|
||||||
|
[Install]
|
||||||
|
WantedBy=multi-user.target
|
||||||
|
- path: /etc/kubernetes/.keep
|
||||||
|
- path: /var/bootkube/.keep
|
||||||
|
- path: /etc/selinux/config
|
||||||
|
owner: root:root
|
||||||
|
permissions: '0644'
|
||||||
|
content: |
|
||||||
|
SELINUX=permissive
|
||||||
|
SELINUXTYPE=targeted
|
||||||
|
- path: /usr/local/bin/bootkube-start
|
||||||
|
permissions: '0755'
|
||||||
|
content: |
|
||||||
|
#!/bin/bash -e
|
||||||
|
# Wrapper for bootkube start
|
||||||
|
[ -n "$(ls /var/bootkube/assets/manifests-*/* 2>/dev/null)" ] && mv /var/bootkube/assets/manifests-*/* /var/bootkube/assets/manifests && rm -rf /var/bootkube/assets/manifests-*
|
||||||
|
/usr/bin/docker run --rm --name bootkube \
|
||||||
|
--net=host \
|
||||||
|
--volume /etc/kubernetes:/etc/kubernetes:Z \
|
||||||
|
--volume /var/bootkube/assets:/assets:Z \
|
||||||
|
--entrypoint=/bootkube \
|
||||||
|
quay.io/coreos/bootkube:v0.11.0 start --asset-dir=/assets
|
||||||
|
bootcmd:
|
||||||
|
- [setenforce, Permissive]
|
||||||
|
runcmd:
|
||||||
|
- [systemctl, daemon-reload]
|
||||||
|
- [systemctl, enable, etcd-member.service]
|
||||||
|
- [systemctl, start, --no-block, etcd-member.service]
|
||||||
|
- [hostnamectl, set-hostname, ${domain_name}]
|
||||||
|
- [systemctl, enable, kubelet.path]
|
||||||
|
- [systemctl, start, --no-block, kubelet.path]
|
||||||
|
- [systemctl, disable, firewalld, --now]
|
||||||
|
users:
|
||||||
|
- default
|
||||||
|
- name: fedora
|
||||||
|
gecos: Fedora Admin
|
||||||
|
sudo: ALL=(ALL) NOPASSWD:ALL
|
||||||
|
groups: wheel,adm,systemd-journal,docker
|
||||||
|
ssh-authorized-keys:
|
||||||
|
- "${ssh_authorized_key}"
|
|
@ -0,0 +1,72 @@
|
||||||
|
#cloud-config
|
||||||
|
write_files:
|
||||||
|
- path: /etc/systemd/system/kubelet.service
|
||||||
|
content: |
|
||||||
|
[Unit]
|
||||||
|
Description=Kubelet
|
||||||
|
Wants=rpc-statd.service
|
||||||
|
[Service]
|
||||||
|
WorkingDirectory=/etc/kubernetes
|
||||||
|
ExecStartPre=/bin/mkdir -p /opt/cni/bin
|
||||||
|
ExecStartPre=/bin/mkdir -p /etc/kubernetes/manifests
|
||||||
|
ExecStartPre=/bin/mkdir -p /etc/kubernetes/cni/net.d
|
||||||
|
ExecStartPre=/bin/mkdir -p /etc/kubernetes/checkpoint-secrets
|
||||||
|
ExecStartPre=/bin/mkdir -p /etc/kubernetes/inactive-manifests
|
||||||
|
ExecStartPre=/bin/mkdir -p /var/lib/cni
|
||||||
|
ExecStartPre=/bin/mkdir -p /var/lib/kubelet/volumeplugins
|
||||||
|
ExecStartPre=/usr/bin/bash -c "grep 'certificate-authority-data' /etc/kubernetes/kubeconfig | awk '{print $2}' | base64 -d > /etc/kubernetes/ca.crt"
|
||||||
|
# Atomic's system containers and RPMs are old and unfriendly. Use this.
|
||||||
|
ExecStartPre=/usr/bin/curl -L https://dl.k8s.io/v1.10.0/kubernetes-node-linux-amd64.tar.gz -o kubernetes-node-linux-amd64.tar.gz
|
||||||
|
ExecStartPre=/usr/bin/tar xzf kubernetes-node-linux-amd64.tar.gz -C /usr/local/bin --strip-components=3 kubernetes/node/bin/kubelet
|
||||||
|
ExecStart=
|
||||||
|
ExecStart=/usr/local/bin/kubelet \
|
||||||
|
--allow-privileged \
|
||||||
|
--anonymous-auth=false \
|
||||||
|
--cgroup-driver=systemd \
|
||||||
|
--client-ca-file=/etc/kubernetes/ca.crt \
|
||||||
|
--cluster_dns=${k8s_dns_service_ip} \
|
||||||
|
--cluster_domain=${cluster_domain_suffix} \
|
||||||
|
--cni-conf-dir=/etc/kubernetes/cni/net.d \
|
||||||
|
--exit-on-lock-contention \
|
||||||
|
--hostname-override=${domain_name} \
|
||||||
|
--kubeconfig=/etc/kubernetes/kubeconfig \
|
||||||
|
--lock-file=/var/run/lock/kubelet.lock \
|
||||||
|
--network-plugin=cni \
|
||||||
|
--node-labels=node-role.kubernetes.io/node \
|
||||||
|
--pod-manifest-path=/etc/kubernetes/manifests \
|
||||||
|
--volume-plugin-dir=/var/lib/kubelet/volumeplugins
|
||||||
|
Restart=always
|
||||||
|
RestartSec=10
|
||||||
|
[Install]
|
||||||
|
WantedBy=multi-user.target
|
||||||
|
- path: /etc/systemd/system/kubelet.path
|
||||||
|
content: |
|
||||||
|
[Unit]
|
||||||
|
Description=Watch for kubeconfig
|
||||||
|
[Path]
|
||||||
|
PathExists=/etc/kubernetes/kubeconfig
|
||||||
|
[Install]
|
||||||
|
WantedBy=multi-user.target
|
||||||
|
- path: /etc/kubernetes/.keep
|
||||||
|
- path: /etc/selinux/config
|
||||||
|
owner: root:root
|
||||||
|
permissions: '0644'
|
||||||
|
content: |
|
||||||
|
SELINUX=permissive
|
||||||
|
SELINUXTYPE=targeted
|
||||||
|
bootcmd:
|
||||||
|
- [setenforce, Permissive]
|
||||||
|
runcmd:
|
||||||
|
- [systemctl, daemon-reload]
|
||||||
|
- [hostnamectl, set-hostname, ${domain_name}]
|
||||||
|
- [systemctl, enable, kubelet.path]
|
||||||
|
- [systemctl, start, --no-block, kubelet.path]
|
||||||
|
- [systemctl, disable, firewalld, --now]
|
||||||
|
users:
|
||||||
|
- default
|
||||||
|
- name: fedora
|
||||||
|
gecos: Fedora Admin
|
||||||
|
sudo: ALL=(ALL) NOPASSWD:ALL
|
||||||
|
groups: wheel,adm,systemd-journal,docker
|
||||||
|
ssh-authorized-keys:
|
||||||
|
- "${ssh_authorized_key}"
|
|
@ -0,0 +1,37 @@
|
||||||
|
// Install Fedora to disk
|
||||||
|
resource "matchbox_group" "fedora-install" {
|
||||||
|
count = "${length(var.controller_names) + length(var.worker_names)}"
|
||||||
|
|
||||||
|
name = "${format("fedora-install-%s", element(concat(var.controller_names, var.worker_names), count.index))}"
|
||||||
|
profile = "${element(matchbox_profile.cached-fedora-install.*.name, count.index)}"
|
||||||
|
|
||||||
|
selector {
|
||||||
|
mac = "${element(concat(var.controller_macs, var.worker_macs), count.index)}"
|
||||||
|
}
|
||||||
|
|
||||||
|
metadata {
|
||||||
|
ssh_authorized_key = "${var.ssh_authorized_key}"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "matchbox_group" "controller" {
|
||||||
|
count = "${length(var.controller_names)}"
|
||||||
|
name = "${format("%s-%s", var.cluster_name, element(var.controller_names, count.index))}"
|
||||||
|
profile = "${element(matchbox_profile.controllers.*.name, count.index)}"
|
||||||
|
|
||||||
|
selector {
|
||||||
|
mac = "${element(var.controller_macs, count.index)}"
|
||||||
|
os = "installed"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "matchbox_group" "worker" {
|
||||||
|
count = "${length(var.worker_names)}"
|
||||||
|
name = "${format("%s-%s", var.cluster_name, element(var.worker_names, count.index))}"
|
||||||
|
profile = "${element(matchbox_profile.workers.*.name, count.index)}"
|
||||||
|
|
||||||
|
selector {
|
||||||
|
mac = "${element(var.worker_macs, count.index)}"
|
||||||
|
os = "installed"
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,36 @@
|
||||||
|
# required
|
||||||
|
lang en_US.UTF-8
|
||||||
|
keyboard us
|
||||||
|
timezone --utc Etc/UTC
|
||||||
|
|
||||||
|
# wipe disks
|
||||||
|
zerombr
|
||||||
|
clearpart --all --initlabel
|
||||||
|
|
||||||
|
# locked root and temporary user
|
||||||
|
rootpw --lock --iscrypted locked
|
||||||
|
user --name=none
|
||||||
|
|
||||||
|
# config
|
||||||
|
autopart --type=lvm --noswap
|
||||||
|
network --bootproto=dhcp --device=link --activate --onboot=on
|
||||||
|
bootloader --timeout=1 --append="ds=nocloud\;seedfrom=/var/cloud-init/"
|
||||||
|
services --enabled=cloud-init,cloud-init-local,cloud-config,cloud-final
|
||||||
|
|
||||||
|
ostreesetup --osname="fedora-atomic" --remote="fedora-atomic-27" --url="${matchbox_http_endpoint}/assets/fedora/27/repo" --ref=fedora/27/x86_64/atomic-host --nogpg
|
||||||
|
|
||||||
|
reboot
|
||||||
|
|
||||||
|
%post --erroronfail
|
||||||
|
mkdir /var/cloud-init
|
||||||
|
curl --retry 10 "${matchbox_http_endpoint}/generic?mac=${mac}&os=installed" -o /var/cloud-init/user-data
|
||||||
|
echo "instance-id: iid-local01" > /var/cloud-init/meta-data
|
||||||
|
|
||||||
|
rm -f /etc/ostree/remotes.d/fedora-atomic-27.conf
|
||||||
|
ostree remote add fedora-atomic-27 https://kojipkgs.fedoraproject.org/atomic/27 --set=gpgkeypath=/etc/pki/rpm-gpg/RPM-GPG-KEY-fedora-27-primary
|
||||||
|
|
||||||
|
# lock root user
|
||||||
|
passwd -l root
|
||||||
|
# remove temporary user
|
||||||
|
userdel -r none
|
||||||
|
%end
|
|
@ -0,0 +1,3 @@
|
||||||
|
output "kubeconfig" {
|
||||||
|
value = "${module.bootkube.kubeconfig}"
|
||||||
|
}
|
|
@ -0,0 +1,79 @@
|
||||||
|
// Cached Fedora Install profile (from matchbox /assets cache)
|
||||||
|
// Note: Admin must have downloaded Fedora kernel, initrd, and repo into
|
||||||
|
// matchbox assets.
|
||||||
|
resource "matchbox_profile" "cached-fedora-install" {
|
||||||
|
count = "${length(var.controller_names) + length(var.worker_names)}"
|
||||||
|
name = "${format("%s-cached-fedora-install-%s", var.cluster_name, element(concat(var.controller_names, var.worker_names), count.index))}"
|
||||||
|
|
||||||
|
kernel = "/assets/fedora/27/vmlinuz"
|
||||||
|
|
||||||
|
initrd = [
|
||||||
|
"/assets/fedora/27/initrd.img",
|
||||||
|
]
|
||||||
|
|
||||||
|
args = [
|
||||||
|
"initrd=initrd.img",
|
||||||
|
"inst.repo=${var.matchbox_http_endpoint}/assets/fedora/27/Server/x86_64/os/",
|
||||||
|
"inst.ks=${var.matchbox_http_endpoint}/generic?mac=${element(concat(var.controller_macs, var.worker_macs), count.index)}",
|
||||||
|
"inst.text",
|
||||||
|
"${var.kernel_args}",
|
||||||
|
]
|
||||||
|
|
||||||
|
# kickstart
|
||||||
|
generic_config = "${element(data.template_file.install-kickstarts.*.rendered, count.index)}"
|
||||||
|
}
|
||||||
|
|
||||||
|
data "template_file" "install-kickstarts" {
|
||||||
|
count = "${length(var.controller_names) + length(var.worker_names)}"
|
||||||
|
|
||||||
|
template = "${file("${path.module}/kickstart/fedora-atomic.ks.tmpl")}"
|
||||||
|
|
||||||
|
vars {
|
||||||
|
matchbox_http_endpoint = "${var.matchbox_http_endpoint}"
|
||||||
|
mac = "${element(concat(var.controller_macs, var.worker_macs), count.index)}"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Kubernetes Controller profiles
|
||||||
|
resource "matchbox_profile" "controllers" {
|
||||||
|
count = "${length(var.controller_names)}"
|
||||||
|
name = "${format("%s-controller-%s", var.cluster_name, element(var.controller_names, count.index))}"
|
||||||
|
# cloud-init
|
||||||
|
generic_config = "${element(data.template_file.controller-configs.*.rendered, count.index)}"
|
||||||
|
}
|
||||||
|
|
||||||
|
data "template_file" "controller-configs" {
|
||||||
|
count = "${length(var.controller_names)}"
|
||||||
|
|
||||||
|
template = "${file("${path.module}/cloudinit/controller.yaml.tmpl")}"
|
||||||
|
|
||||||
|
vars {
|
||||||
|
domain_name = "${element(var.controller_domains, count.index)}"
|
||||||
|
etcd_name = "${element(var.controller_names, count.index)}"
|
||||||
|
etcd_initial_cluster = "${join(",", formatlist("%s=https://%s:2380", var.controller_names, var.controller_domains))}"
|
||||||
|
k8s_dns_service_ip = "${module.bootkube.kube_dns_service_ip}"
|
||||||
|
cluster_domain_suffix = "${var.cluster_domain_suffix}"
|
||||||
|
ssh_authorized_key = "${var.ssh_authorized_key}"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Kubernetes Worker profiles
|
||||||
|
resource "matchbox_profile" "workers" {
|
||||||
|
count = "${length(var.worker_names)}"
|
||||||
|
name = "${format("%s-worker-%s", var.cluster_name, element(var.worker_names, count.index))}"
|
||||||
|
# cloud-init
|
||||||
|
generic_config = "${element(data.template_file.worker-configs.*.rendered, count.index)}"
|
||||||
|
}
|
||||||
|
|
||||||
|
data "template_file" "worker-configs" {
|
||||||
|
count = "${length(var.worker_names)}"
|
||||||
|
|
||||||
|
template = "${file("${path.module}/cloudinit/worker.yaml.tmpl")}"
|
||||||
|
|
||||||
|
vars {
|
||||||
|
domain_name = "${element(var.worker_domains, count.index)}"
|
||||||
|
k8s_dns_service_ip = "${module.bootkube.kube_dns_service_ip}"
|
||||||
|
cluster_domain_suffix = "${var.cluster_domain_suffix}"
|
||||||
|
ssh_authorized_key = "${var.ssh_authorized_key}"
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,21 @@
|
||||||
|
# Terraform version and plugin versions
|
||||||
|
|
||||||
|
terraform {
|
||||||
|
required_version = ">= 0.10.4"
|
||||||
|
}
|
||||||
|
|
||||||
|
provider "local" {
|
||||||
|
version = "~> 1.0"
|
||||||
|
}
|
||||||
|
|
||||||
|
provider "null" {
|
||||||
|
version = "~> 1.0"
|
||||||
|
}
|
||||||
|
|
||||||
|
provider "template" {
|
||||||
|
version = "~> 1.0"
|
||||||
|
}
|
||||||
|
|
||||||
|
provider "tls" {
|
||||||
|
version = "~> 1.0"
|
||||||
|
}
|
|
@ -0,0 +1,120 @@
|
||||||
|
# Secure copy etcd TLS assets and kubeconfig to controllers. Activates kubelet.service
|
||||||
|
resource "null_resource" "copy-controller-secrets" {
|
||||||
|
count = "${length(var.controller_names)}"
|
||||||
|
|
||||||
|
connection {
|
||||||
|
type = "ssh"
|
||||||
|
host = "${element(var.controller_domains, count.index)}"
|
||||||
|
user = "fedora"
|
||||||
|
timeout = "60m"
|
||||||
|
}
|
||||||
|
|
||||||
|
provisioner "file" {
|
||||||
|
content = "${module.bootkube.kubeconfig}"
|
||||||
|
destination = "$HOME/kubeconfig"
|
||||||
|
}
|
||||||
|
|
||||||
|
provisioner "file" {
|
||||||
|
content = "${module.bootkube.etcd_ca_cert}"
|
||||||
|
destination = "$HOME/etcd-client-ca.crt"
|
||||||
|
}
|
||||||
|
|
||||||
|
provisioner "file" {
|
||||||
|
content = "${module.bootkube.etcd_client_cert}"
|
||||||
|
destination = "$HOME/etcd-client.crt"
|
||||||
|
}
|
||||||
|
|
||||||
|
provisioner "file" {
|
||||||
|
content = "${module.bootkube.etcd_client_key}"
|
||||||
|
destination = "$HOME/etcd-client.key"
|
||||||
|
}
|
||||||
|
|
||||||
|
provisioner "file" {
|
||||||
|
content = "${module.bootkube.etcd_server_cert}"
|
||||||
|
destination = "$HOME/etcd-server.crt"
|
||||||
|
}
|
||||||
|
|
||||||
|
provisioner "file" {
|
||||||
|
content = "${module.bootkube.etcd_server_key}"
|
||||||
|
destination = "$HOME/etcd-server.key"
|
||||||
|
}
|
||||||
|
|
||||||
|
provisioner "file" {
|
||||||
|
content = "${module.bootkube.etcd_peer_cert}"
|
||||||
|
destination = "$HOME/etcd-peer.crt"
|
||||||
|
}
|
||||||
|
|
||||||
|
provisioner "file" {
|
||||||
|
content = "${module.bootkube.etcd_peer_key}"
|
||||||
|
destination = "$HOME/etcd-peer.key"
|
||||||
|
}
|
||||||
|
|
||||||
|
provisioner "remote-exec" {
|
||||||
|
inline = [
|
||||||
|
"sudo mkdir -p /etc/ssl/etcd/etcd",
|
||||||
|
"sudo mv etcd-client* /etc/ssl/etcd/",
|
||||||
|
"sudo cp /etc/ssl/etcd/etcd-client-ca.crt /etc/ssl/etcd/etcd/server-ca.crt",
|
||||||
|
"sudo mv etcd-server.crt /etc/ssl/etcd/etcd/server.crt",
|
||||||
|
"sudo mv etcd-server.key /etc/ssl/etcd/etcd/server.key",
|
||||||
|
"sudo cp /etc/ssl/etcd/etcd-client-ca.crt /etc/ssl/etcd/etcd/peer-ca.crt",
|
||||||
|
"sudo mv etcd-peer.crt /etc/ssl/etcd/etcd/peer.crt",
|
||||||
|
"sudo mv etcd-peer.key /etc/ssl/etcd/etcd/peer.key",
|
||||||
|
"sudo mv $HOME/kubeconfig /etc/kubernetes/kubeconfig",
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
# Secure copy kubeconfig to all workers. Activates kubelet.service
|
||||||
|
resource "null_resource" "copy-worker-secrets" {
|
||||||
|
count = "${length(var.worker_names)}"
|
||||||
|
|
||||||
|
connection {
|
||||||
|
type = "ssh"
|
||||||
|
host = "${element(var.worker_domains, count.index)}"
|
||||||
|
user = "fedora"
|
||||||
|
timeout = "60m"
|
||||||
|
}
|
||||||
|
|
||||||
|
provisioner "file" {
|
||||||
|
content = "${module.bootkube.kubeconfig}"
|
||||||
|
destination = "$HOME/kubeconfig"
|
||||||
|
}
|
||||||
|
|
||||||
|
provisioner "remote-exec" {
|
||||||
|
inline = [
|
||||||
|
"sudo mv $HOME/kubeconfig /etc/kubernetes/kubeconfig",
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
# Secure copy bootkube assets to ONE controller and start bootkube to perform
|
||||||
|
# one-time self-hosted cluster bootstrapping.
|
||||||
|
resource "null_resource" "bootkube-start" {
|
||||||
|
# Without depends_on, this remote-exec may start before the kubeconfig copy.
|
||||||
|
# Terraform only does one task at a time, so it would try to bootstrap
|
||||||
|
# while no Kubelets are running.
|
||||||
|
depends_on = [
|
||||||
|
"null_resource.copy-controller-secrets",
|
||||||
|
"null_resource.copy-worker-secrets",
|
||||||
|
]
|
||||||
|
|
||||||
|
connection {
|
||||||
|
type = "ssh"
|
||||||
|
host = "${element(var.controller_domains, 0)}"
|
||||||
|
user = "fedora"
|
||||||
|
timeout = "15m"
|
||||||
|
}
|
||||||
|
|
||||||
|
provisioner "file" {
|
||||||
|
source = "${var.asset_dir}"
|
||||||
|
destination = "$HOME/assets"
|
||||||
|
}
|
||||||
|
|
||||||
|
provisioner "remote-exec" {
|
||||||
|
inline = [
|
||||||
|
"while [ ! -f /var/lib/cloud/instance/boot-finished ]; do sleep 4; done",
|
||||||
|
"sudo mv $HOME/assets /var/bootkube",
|
||||||
|
"sudo systemctl start bootkube",
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,97 @@
|
||||||
|
variable "cluster_name" {
|
||||||
|
type = "string"
|
||||||
|
description = "Unique cluster name"
|
||||||
|
}
|
||||||
|
|
||||||
|
# bare-metal
|
||||||
|
|
||||||
|
variable "matchbox_http_endpoint" {
|
||||||
|
type = "string"
|
||||||
|
description = "Matchbox HTTP read-only endpoint (e.g. http://matchbox.example.com:8080)"
|
||||||
|
}
|
||||||
|
|
||||||
|
# machines
|
||||||
|
# Terraform's crude "type system" does not properly support lists of maps so we do this.
|
||||||
|
|
||||||
|
variable "controller_names" {
|
||||||
|
type = "list"
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "controller_macs" {
|
||||||
|
type = "list"
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "controller_domains" {
|
||||||
|
type = "list"
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "worker_names" {
|
||||||
|
type = "list"
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "worker_macs" {
|
||||||
|
type = "list"
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "worker_domains" {
|
||||||
|
type = "list"
|
||||||
|
}
|
||||||
|
|
||||||
|
# configuration
|
||||||
|
|
||||||
|
variable "k8s_domain_name" {
|
||||||
|
description = "Controller DNS name which resolves to a controller instance. Workers and kubeconfig's will communicate with this endpoint (e.g. cluster.example.com)"
|
||||||
|
type = "string"
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "ssh_authorized_key" {
|
||||||
|
type = "string"
|
||||||
|
description = "SSH public key for user 'fedora'"
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "asset_dir" {
|
||||||
|
description = "Path to a directory where generated assets should be placed (contains secrets)"
|
||||||
|
type = "string"
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "networking" {
|
||||||
|
description = "Choice of networking provider (flannel or calico)"
|
||||||
|
type = "string"
|
||||||
|
default = "calico"
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "network_mtu" {
|
||||||
|
description = "CNI interface MTU (applies to calico only)"
|
||||||
|
type = "string"
|
||||||
|
default = "1480"
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "pod_cidr" {
|
||||||
|
description = "CIDR IPv4 range to assign Kubernetes pods"
|
||||||
|
type = "string"
|
||||||
|
default = "10.2.0.0/16"
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "service_cidr" {
|
||||||
|
description = <<EOD
|
||||||
|
CIDR IPv4 range to assign Kubernetes services.
|
||||||
|
The 1st IP will be reserved for kube_apiserver, the 10th IP will be reserved for kube-dns.
|
||||||
|
EOD
|
||||||
|
|
||||||
|
type = "string"
|
||||||
|
default = "10.3.0.0/16"
|
||||||
|
}
|
||||||
|
|
||||||
|
# optional
|
||||||
|
|
||||||
|
variable "cluster_domain_suffix" {
|
||||||
|
description = "Queries for domains with the suffix will be answered by kube-dns. Default is cluster.local (e.g. foo.default.svc.cluster.local) "
|
||||||
|
type = "string"
|
||||||
|
default = "cluster.local"
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "kernel_args" {
|
||||||
|
description = "Additional kernel arguments to provide at PXE boot."
|
||||||
|
type = "list"
|
||||||
|
default = []
|
||||||
|
}
|
Loading…
Reference in New Issue