Merge pull request #22 from poseidon/better-templating
bare-metal: Ues Terraform templating for Container Linux configs
This commit is contained in:
commit
69cabd9486
|
@ -9,12 +9,12 @@ systemd:
|
||||||
contents: |
|
contents: |
|
||||||
[Service]
|
[Service]
|
||||||
Environment="ETCD_IMAGE_TAG=v3.2.0"
|
Environment="ETCD_IMAGE_TAG=v3.2.0"
|
||||||
Environment="ETCD_NAME={{.etcd_name}}"
|
Environment="ETCD_NAME=${etcd_name}"
|
||||||
Environment="ETCD_ADVERTISE_CLIENT_URLS=https://{{.domain_name}}:2379"
|
Environment="ETCD_ADVERTISE_CLIENT_URLS=https://${domain_name}:2379"
|
||||||
Environment="ETCD_INITIAL_ADVERTISE_PEER_URLS=https://{{.domain_name}}:2380"
|
Environment="ETCD_INITIAL_ADVERTISE_PEER_URLS=https://${domain_name}:2380"
|
||||||
Environment="ETCD_LISTEN_CLIENT_URLS=https://0.0.0.0:2379"
|
Environment="ETCD_LISTEN_CLIENT_URLS=https://0.0.0.0:2379"
|
||||||
Environment="ETCD_LISTEN_PEER_URLS=https://0.0.0.0:2380"
|
Environment="ETCD_LISTEN_PEER_URLS=https://0.0.0.0:2380"
|
||||||
Environment="ETCD_INITIAL_CLUSTER={{.etcd_initial_cluster}}"
|
Environment="ETCD_INITIAL_CLUSTER=${etcd_initial_cluster}"
|
||||||
Environment="ETCD_STRICT_RECONFIG_CHECK=true"
|
Environment="ETCD_STRICT_RECONFIG_CHECK=true"
|
||||||
Environment="ETCD_SSL_DIR=/etc/ssl/etcd"
|
Environment="ETCD_SSL_DIR=/etc/ssl/etcd"
|
||||||
Environment="ETCD_TRUSTED_CA_FILE=/etc/ssl/certs/etcd/server-ca.crt"
|
Environment="ETCD_TRUSTED_CA_FILE=/etc/ssl/certs/etcd/server-ca.crt"
|
||||||
|
@ -79,11 +79,11 @@ systemd:
|
||||||
--allow-privileged \
|
--allow-privileged \
|
||||||
--anonymous-auth=false \
|
--anonymous-auth=false \
|
||||||
--client-ca-file=/etc/kubernetes/ca.crt \
|
--client-ca-file=/etc/kubernetes/ca.crt \
|
||||||
--cluster_dns={{.k8s_dns_service_ip}} \
|
--cluster_dns=${k8s_dns_service_ip} \
|
||||||
--cluster_domain=cluster.local \
|
--cluster_domain=cluster.local \
|
||||||
--cni-conf-dir=/etc/kubernetes/cni/net.d \
|
--cni-conf-dir=/etc/kubernetes/cni/net.d \
|
||||||
--exit-on-lock-contention \
|
--exit-on-lock-contention \
|
||||||
--hostname-override={{.domain_name}} \
|
--hostname-override=${domain_name} \
|
||||||
--kubeconfig=/etc/kubernetes/kubeconfig \
|
--kubeconfig=/etc/kubernetes/kubeconfig \
|
||||||
--lock-file=/var/run/lock/kubelet.lock \
|
--lock-file=/var/run/lock/kubelet.lock \
|
||||||
--network-plugin=cni \
|
--network-plugin=cni \
|
||||||
|
@ -137,7 +137,7 @@ storage:
|
||||||
mode: 0644
|
mode: 0644
|
||||||
contents:
|
contents:
|
||||||
inline:
|
inline:
|
||||||
{{.domain_name}}
|
${domain_name}
|
||||||
- path: /etc/sysctl.d/max-user-watches.conf
|
- path: /etc/sysctl.d/max-user-watches.conf
|
||||||
filesystem: root
|
filesystem: root
|
||||||
contents:
|
contents:
|
||||||
|
@ -159,17 +159,17 @@ storage:
|
||||||
[ -d /opt/bootkube/assets/manifests-* ] && mv /opt/bootkube/assets/manifests-*/* /opt/bootkube/assets/manifests && rm -rf /opt/bootkube/assets/manifests-*
|
[ -d /opt/bootkube/assets/manifests-* ] && mv /opt/bootkube/assets/manifests-*/* /opt/bootkube/assets/manifests && rm -rf /opt/bootkube/assets/manifests-*
|
||||||
[ -d /opt/bootkube/assets/experimental/manifests ] && mv /opt/bootkube/assets/experimental/manifests/* /opt/bootkube/assets/manifests && rm -r /opt/bootkube/assets/experimental/manifests
|
[ -d /opt/bootkube/assets/experimental/manifests ] && mv /opt/bootkube/assets/experimental/manifests/* /opt/bootkube/assets/manifests && rm -r /opt/bootkube/assets/experimental/manifests
|
||||||
[ -d /opt/bootkube/assets/experimental/bootstrap-manifests ] && mv /opt/bootkube/assets/experimental/bootstrap-manifests/* /opt/bootkube/assets/bootstrap-manifests && rm -r /opt/bootkube/assets/experimental/bootstrap-manifests
|
[ -d /opt/bootkube/assets/experimental/bootstrap-manifests ] && mv /opt/bootkube/assets/experimental/bootstrap-manifests/* /opt/bootkube/assets/bootstrap-manifests && rm -r /opt/bootkube/assets/experimental/bootstrap-manifests
|
||||||
BOOTKUBE_ACI="${BOOTKUBE_ACI:-quay.io/coreos/bootkube}"
|
BOOTKUBE_ACI="$${BOOTKUBE_ACI:-quay.io/coreos/bootkube}"
|
||||||
BOOTKUBE_VERSION="${BOOTKUBE_VERSION:-v0.6.2}"
|
BOOTKUBE_VERSION="$${BOOTKUBE_VERSION:-v0.6.2}"
|
||||||
BOOTKUBE_ASSETS="${BOOTKUBE_ASSETS:-/opt/bootkube/assets}"
|
BOOTKUBE_ASSETS="$${BOOTKUBE_ASSETS:-/opt/bootkube/assets}"
|
||||||
exec /usr/bin/rkt run \
|
exec /usr/bin/rkt run \
|
||||||
--trust-keys-from-https \
|
--trust-keys-from-https \
|
||||||
--volume assets,kind=host,source=$BOOTKUBE_ASSETS \
|
--volume assets,kind=host,source=$BOOTKUBE_ASSETS \
|
||||||
--mount volume=assets,target=/assets \
|
--mount volume=assets,target=/assets \
|
||||||
--volume bootstrap,kind=host,source=/etc/kubernetes \
|
--volume bootstrap,kind=host,source=/etc/kubernetes \
|
||||||
--mount volume=bootstrap,target=/etc/kubernetes \
|
--mount volume=bootstrap,target=/etc/kubernetes \
|
||||||
$RKT_OPTS \
|
$$RKT_OPTS \
|
||||||
${BOOTKUBE_ACI}:${BOOTKUBE_VERSION} \
|
$${BOOTKUBE_ACI}:$${BOOTKUBE_VERSION} \
|
||||||
--net=host \
|
--net=host \
|
||||||
--dns=host \
|
--dns=host \
|
||||||
--exec=/bootkube -- start --asset-dir=/assets "$@"
|
--exec=/bootkube -- start --asset-dir=/assets "$@"
|
||||||
|
@ -177,5 +177,4 @@ passwd:
|
||||||
users:
|
users:
|
||||||
- name: core
|
- name: core
|
||||||
ssh_authorized_keys:
|
ssh_authorized_keys:
|
||||||
- {{.ssh_authorized_key}}
|
- ${ssh_authorized_key}
|
||||||
|
|
||||||
|
|
|
@ -54,11 +54,11 @@ systemd:
|
||||||
--allow-privileged \
|
--allow-privileged \
|
||||||
--anonymous-auth=false \
|
--anonymous-auth=false \
|
||||||
--client-ca-file=/etc/kubernetes/ca.crt \
|
--client-ca-file=/etc/kubernetes/ca.crt \
|
||||||
--cluster_dns={{.k8s_dns_service_ip}} \
|
--cluster_dns=${k8s_dns_service_ip} \
|
||||||
--cluster_domain=cluster.local \
|
--cluster_domain=cluster.local \
|
||||||
--cni-conf-dir=/etc/kubernetes/cni/net.d \
|
--cni-conf-dir=/etc/kubernetes/cni/net.d \
|
||||||
--exit-on-lock-contention \
|
--exit-on-lock-contention \
|
||||||
--hostname-override={{.domain_name}} \
|
--hostname-override=${domain_name} \
|
||||||
--kubeconfig=/etc/kubernetes/kubeconfig \
|
--kubeconfig=/etc/kubernetes/kubeconfig \
|
||||||
--lock-file=/var/run/lock/kubelet.lock \
|
--lock-file=/var/run/lock/kubelet.lock \
|
||||||
--network-plugin=cni \
|
--network-plugin=cni \
|
||||||
|
@ -101,7 +101,7 @@ storage:
|
||||||
mode: 0644
|
mode: 0644
|
||||||
contents:
|
contents:
|
||||||
inline:
|
inline:
|
||||||
{{.domain_name}}
|
${domain_name}
|
||||||
- path: /etc/sysctl.d/max-user-watches.conf
|
- path: /etc/sysctl.d/max-user-watches.conf
|
||||||
filesystem: root
|
filesystem: root
|
||||||
contents:
|
contents:
|
||||||
|
@ -111,5 +111,5 @@ passwd:
|
||||||
users:
|
users:
|
||||||
- name: core
|
- name: core
|
||||||
ssh_authorized_keys:
|
ssh_authorized_keys:
|
||||||
- {{.ssh_authorized_key}}
|
- ${ssh_authorized_key}
|
||||||
|
|
||||||
|
|
|
@ -17,7 +17,7 @@ resource "matchbox_group" "container-linux-install" {
|
||||||
resource "matchbox_group" "controller" {
|
resource "matchbox_group" "controller" {
|
||||||
count = "${length(var.controller_names)}"
|
count = "${length(var.controller_names)}"
|
||||||
name = "${format("%s-%s", var.cluster_name, element(var.controller_names, count.index))}"
|
name = "${format("%s-%s", var.cluster_name, element(var.controller_names, count.index))}"
|
||||||
profile = "${matchbox_profile.controller.name}"
|
profile = "${element(matchbox_profile.controllers.*.name, count.index)}"
|
||||||
|
|
||||||
selector {
|
selector {
|
||||||
mac = "${element(var.controller_macs, count.index)}"
|
mac = "${element(var.controller_macs, count.index)}"
|
||||||
|
@ -25,29 +25,17 @@ resource "matchbox_group" "controller" {
|
||||||
}
|
}
|
||||||
|
|
||||||
metadata {
|
metadata {
|
||||||
domain_name = "${element(var.controller_domains, count.index)}"
|
|
||||||
etcd_name = "${element(var.controller_names, count.index)}"
|
|
||||||
etcd_initial_cluster = "${join(",", formatlist("%s=https://%s:2380", var.controller_names, var.controller_domains))}"
|
|
||||||
etcd_on_host = "${var.experimental_self_hosted_etcd ? "false" : "true"}"
|
etcd_on_host = "${var.experimental_self_hosted_etcd ? "false" : "true"}"
|
||||||
k8s_dns_service_ip = "${module.bootkube.kube_dns_service_ip}"
|
|
||||||
ssh_authorized_key = "${var.ssh_authorized_key}"
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "matchbox_group" "worker" {
|
resource "matchbox_group" "worker" {
|
||||||
count = "${length(var.worker_names)}"
|
count = "${length(var.worker_names)}"
|
||||||
name = "${format("%s-%s", var.cluster_name, element(var.worker_names, count.index))}"
|
name = "${format("%s-%s", var.cluster_name, element(var.worker_names, count.index))}"
|
||||||
profile = "${matchbox_profile.worker.name}"
|
profile = "${element(matchbox_profile.workers.*.name, count.index)}"
|
||||||
|
|
||||||
selector {
|
selector {
|
||||||
mac = "${element(var.worker_macs, count.index)}"
|
mac = "${element(var.worker_macs, count.index)}"
|
||||||
os = "installed"
|
os = "installed"
|
||||||
}
|
}
|
||||||
|
|
||||||
metadata {
|
|
||||||
domain_name = "${element(var.worker_domains, count.index)}"
|
|
||||||
etcd_on_host = "${var.experimental_self_hosted_etcd ? "false" : "true"}"
|
|
||||||
k8s_dns_service_ip = "${module.bootkube.kube_dns_service_ip}"
|
|
||||||
ssh_authorized_key = "${var.ssh_authorized_key}"
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -67,14 +67,42 @@ data "template_file" "cached-container-linux-install-config" {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Kubernetes Controller profile
|
// Kubernetes Controller profiles
|
||||||
resource "matchbox_profile" "controller" {
|
resource "matchbox_profile" "controllers" {
|
||||||
name = "controller"
|
count = "${length(var.controller_names)}"
|
||||||
container_linux_config = "${file("${path.module}/cl/controller.yaml.tmpl")}"
|
name = "${format("%s-controller-%s", var.cluster_name, element(var.controller_names, count.index))}"
|
||||||
|
container_linux_config = "${element(data.template_file.controller-configs.*.rendered, count.index)}"
|
||||||
}
|
}
|
||||||
|
|
||||||
// Kubernetes Worker profile
|
data "template_file" "controller-configs" {
|
||||||
resource "matchbox_profile" "worker" {
|
count = "${length(var.controller_names)}"
|
||||||
name = "worker"
|
|
||||||
container_linux_config = "${file("${path.module}/cl/worker.yaml.tmpl")}"
|
template = "${file("${path.module}/cl/controller.yaml.tmpl")}"
|
||||||
|
|
||||||
|
vars {
|
||||||
|
domain_name = "${element(var.controller_domains, count.index)}"
|
||||||
|
etcd_name = "${element(var.controller_names, count.index)}"
|
||||||
|
etcd_initial_cluster = "${join(",", formatlist("%s=https://%s:2380", var.controller_names, var.controller_domains))}"
|
||||||
|
k8s_dns_service_ip = "${module.bootkube.kube_dns_service_ip}"
|
||||||
|
ssh_authorized_key = "${var.ssh_authorized_key}"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Kubernetes Worker profiles
|
||||||
|
resource "matchbox_profile" "workers" {
|
||||||
|
count = "${length(var.worker_names)}"
|
||||||
|
name = "${format("%s-worker-%s", var.cluster_name, element(var.worker_names, count.index))}"
|
||||||
|
container_linux_config = "${element(data.template_file.worker-configs.*.rendered, count.index)}"
|
||||||
|
}
|
||||||
|
|
||||||
|
data "template_file" "worker-configs" {
|
||||||
|
count = "${length(var.worker_names)}"
|
||||||
|
|
||||||
|
template = "${file("${path.module}/cl/worker.yaml.tmpl")}"
|
||||||
|
|
||||||
|
vars {
|
||||||
|
domain_name = "${element(var.worker_domains, count.index)}"
|
||||||
|
k8s_dns_service_ip = "${module.bootkube.kube_dns_service_ip}"
|
||||||
|
ssh_authorized_key = "${var.ssh_authorized_key}"
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue