mirror of
https://github.com/puppetmaster/typhoon.git
synced 2025-08-02 22:31:33 +02:00
Compare commits
53 Commits
Author | SHA1 | Date | |
---|---|---|---|
0595915a19 | |||
e6bc5143aa | |||
e4ac1027c8 | |||
24fc440d83 | |||
a6702573a2 | |||
69188af565 | |||
d874bdd17d | |||
5b9dab6659 | |||
5196709fe0 | |||
ab72f1ab2d | |||
5ef4155e08 | |||
15c4b793c3 | |||
36ed53924f | |||
19de38b30d | |||
995824fa6d | |||
1c5ed84fc2 | |||
ca7d62720e | |||
26f8d76755 | |||
fdd6882a87 | |||
f82266ac8c | |||
7bcf2d7831 | |||
78bfff0afe | |||
a6de245d8a | |||
96afa6a531 | |||
a407ff72df | |||
f453c54956 | |||
3e34fb075b | |||
9bfb1c5faf | |||
99ab81f79c | |||
8703f2c3c5 | |||
078f084220 | |||
81a1ae38e6 | |||
5b06e0e869 | |||
b951aca66f | |||
9da3725738 | |||
fd12f3612b | |||
96b646cf6d | |||
b15c60fa2f | |||
db947537d1 | |||
c933bdfc26 | |||
21632c6674 | |||
74780fb09f | |||
b60a2ecdf7 | |||
4a7083d94a | |||
c20683067d | |||
dc436b8fe9 | |||
efb9a2d09a | |||
e8d586f3b3 | |||
b74f470701 | |||
45bc52d156 | |||
4d5f962d76 | |||
e7d805d9a4 | |||
d95bf2d1ea |
96
CHANGES.md
96
CHANGES.md
@ -4,6 +4,102 @@ Notable changes between versions.
|
||||
|
||||
## Latest
|
||||
|
||||
* Kubernetes [v1.16.2](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG-1.16.md#v1162)
|
||||
* Update etcd from v3.4.1 to v3.4.2 ([#570](https://github.com/poseidon/typhoon/pull/570))
|
||||
* Update Calico from v3.9.1 to [v3.9.2](https://docs.projectcalico.org/v3.9/release-notes/)
|
||||
* Default to using Calico and supporting NetworkPolicy on all platforms
|
||||
|
||||
#### Azure
|
||||
|
||||
* Change default networking provider from "flannel" to "calico" ([#573](https://github.com/poseidon/typhoon/pull/573))
|
||||
|
||||
#### Bare-Metal
|
||||
|
||||
* Add `controllers` and `workers` as typed lists of machine detail objects ([#566](https://github.com/poseidon/typhoon/pull/566))
|
||||
* Define clusters' machines cleanly and with Terraform v0.12 type constraints (**action required**, see PR example)
|
||||
* Remove `controller_names`, `controller_macs`, and `controller_domains` variables
|
||||
* Remove `worker_names`, `worker_macs`, and `worker_domains` variables
|
||||
|
||||
#### DigitalOcean
|
||||
|
||||
* Change default networking provider from "flannel" to "calico" ([#573](https://github.com/poseidon/typhoon/pull/573))
|
||||
|
||||
#### Addons
|
||||
|
||||
* Update Grafana from v6.4.1 to [v6.4.2](https://github.com/grafana/grafana/releases/tag/v6.4.2)
|
||||
* Change CLUO label from "app" to "name"
|
||||
|
||||
## v1.16.1
|
||||
|
||||
* Kubernetes [v1.16.1](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG-1.16.md#v1161)
|
||||
* Update etcd from v3.4.0 to [v3.4.1](https://github.com/etcd-io/etcd/releases/tag/v3.4.1)
|
||||
* Update Calico from v3.8.2 to [v3.9.1](https://docs.projectcalico.org/v3.9/release-notes/)
|
||||
* Add Terraform v0.12 variables types ([#553](https://github.com/poseidon/typhoon/pull/553), [#557](https://github.com/poseidon/typhoon/pull/557), [#560](https://github.com/poseidon/typhoon/pull/560), [#556](https://github.com/poseidon/typhoon/pull/556), [#562](https://github.com/poseidon/typhoon/pull/562))
|
||||
* Deprecate `cluster_domain_suffix` variable
|
||||
|
||||
#### AWS
|
||||
|
||||
* Add `worker_node_labels` variable to set initial worker node labels ([#550](https://github.com/poseidon/typhoon/pull/550))
|
||||
* Add `node_labels` variable to internal `workers` pool module ([#550](https://github.com/poseidon/typhoon/pull/550))
|
||||
* For Fedora CoreOS, detect most recent AMI in the region
|
||||
|
||||
#### Azure
|
||||
|
||||
* Promote `networking` provider Calico VXLAN out of experimental (set `networking = "calico"`)
|
||||
* Add `worker_node_labels` variable to set initial worker node labels ([#550](https://github.com/poseidon/typhoon/pull/550))
|
||||
* Add `node_labels` variable to internal `workers` pool module ([#550](https://github.com/poseidon/typhoon/pull/550))
|
||||
* Change `workers` module default `vm_type` to `Standard_DS1_v2` (followup to [#539](https://github.com/poseidon/typhoon/pull/539))
|
||||
|
||||
#### Bare-Metal
|
||||
|
||||
* For Fedora CoreOS, use new kernel, initrd, and raw paths ([#563](https://github.com/poseidon/typhoon/pull/563))
|
||||
* Fix Terraform missing comma error ([#549](https://github.com/poseidon/typhoon/pull/549))
|
||||
* Remove deprecated `container_linux_oem` variable ([#562](https://github.com/poseidon/typhoon/pull/562))
|
||||
|
||||
#### DigitalOcean
|
||||
|
||||
* Promote `networking` provider Calico VXLAN out of experimental (set `networking = "calico"`)
|
||||
* Fix Terraform missing comma error ([#549](https://github.com/poseidon/typhoon/pull/549))
|
||||
|
||||
#### Google Cloud
|
||||
|
||||
* Add `worker_node_labels` variable to set initial worker node labels ([#550](https://github.com/poseidon/typhoon/pull/550))
|
||||
* Add `node_labels` variable to internal `workers` module ([#550](https://github.com/poseidon/typhoon/pull/550))
|
||||
|
||||
#### Addons
|
||||
|
||||
* Update Prometheus from v2.12.0 to [v2.13.0](https://github.com/prometheus/prometheus/releases/tag/v2.13.0)
|
||||
* Fix Prometheus etcd target discovery and scraping ([#561](https://github.com/poseidon/typhoon/pull/561), regressed with Kubernetes v1.16.0)
|
||||
* Update kube-state-metrics from v1.7.2 to v1.8.0
|
||||
* Update nginx-ingress from v0.25.1 to [v0.26.1](https://github.com/kubernetes/ingress-nginx/releases/tag/nginx-0.26.1) ([#555](https://github.com/poseidon/typhoon/pull/555))
|
||||
* Add lifecycle hook to allow draining for up to 5 minutes
|
||||
* Update Grafana from v6.3.5 to [v6.4.1](https://github.com/grafana/grafana/releases/tag/v6.4.1)
|
||||
|
||||
## v1.16.0
|
||||
|
||||
* Kubernetes [v1.16.0](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG-1.16.md#v1160) ([#543](https://github.com/poseidon/typhoon/pull/543))
|
||||
* Read about several Kubernetes API [deprecations](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG-1.16.md#deprecations-and-removals)!
|
||||
* Remove legacy node role labels (no longer shown in `kubectl get nodes`)
|
||||
* Rename node labels to `node.kubernetes.io/master` and `node.kubernetes.io/node` (migratory)
|
||||
* Migrate control plane from self-hosted to static pods ([#536](https://github.com/poseidon/typhoon/pull/536))
|
||||
* Run `kube-apiserver`, `kube-scheduler`, and `kube-controller-manager` as static pods on each controller
|
||||
* `kubectl` edits to `kube-apiserver`, `kube-scheduler`, and `kube-controller-manager` are no longer possible (change)
|
||||
* Remove bootkube, self-hosted pivot, and `pod-checkpointer`
|
||||
* Update CoreDNS from v1.5.0 to v1.6.2 ([#535](https://github.com/poseidon/typhoon/pull/535))
|
||||
* Update etcd from v3.3.15 to [v3.4.0](https://github.com/etcd-io/etcd/releases/tag/v3.4.0)
|
||||
* Recommend updating `terraform-provider-ct` plugin from v0.3.2 to [v0.4.0](https://github.com/poseidon/terraform-provider-ct/releases/tag/v0.4.0)
|
||||
|
||||
#### Azure
|
||||
|
||||
* Change default `controller_type` to `Standard_B2s` ([#539](https://github.com/poseidon/typhoon/pull/539))
|
||||
* `B2s` is cheaper by $17/month and provides 2 vCPU, 4GB RAM
|
||||
* Change default `worker_type` to `Standard_DS1_v2` ([#539](https://github.com/poseidon/typhoon/pull/539))
|
||||
* `F1` is previous generation. `DS1_v2` is newer, similar cost, and supports Low Priority mode
|
||||
|
||||
#### Addons
|
||||
|
||||
* Update Grafana from v6.3.3 to v6.3.5
|
||||
|
||||
## v1.15.3
|
||||
|
||||
* Kubernetes [v1.15.3](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG-1.15.md#v1153)
|
||||
|
24
README.md
24
README.md
@ -1,4 +1,4 @@
|
||||
# Typhoon []() <img align="right" src="https://storage.googleapis.com/poseidon/typhoon-logo.png">
|
||||
# Typhoon <img align="right" src="https://storage.googleapis.com/poseidon/typhoon-logo.png">
|
||||
|
||||
Typhoon is a minimal and free Kubernetes distribution.
|
||||
|
||||
@ -11,7 +11,7 @@ Typhoon distributes upstream Kubernetes, architectural conventions, and cluster
|
||||
|
||||
## Features <a href="https://www.cncf.io/certification/software-conformance/"><img align="right" src="https://storage.googleapis.com/poseidon/certified-kubernetes.png"></a>
|
||||
|
||||
* Kubernetes v1.15.3 (upstream, via [kubernetes-incubator/bootkube](https://github.com/kubernetes-incubator/bootkube))
|
||||
* Kubernetes v1.16.2 (upstream)
|
||||
* Single or multi-master, [Calico](https://www.projectcalico.org/) or [flannel](https://github.com/coreos/flannel) networking
|
||||
* On-cluster etcd with TLS, [RBAC](https://kubernetes.io/docs/admin/authorization/rbac/)-enabled, [network policy](https://kubernetes.io/docs/concepts/services-networking/network-policies/)
|
||||
* Advanced features like [worker pools](https://typhoon.psdn.io/advanced/worker-pools/), [preemptible](https://typhoon.psdn.io/cl/google-cloud/#preemption) workers, and [snippets](https://typhoon.psdn.io/advanced/customization/#container-linux) customization
|
||||
@ -48,7 +48,7 @@ Define a Kubernetes cluster by using the Terraform module for your chosen platfo
|
||||
|
||||
```tf
|
||||
module "google-cloud-yavin" {
|
||||
source = "git::https://github.com/poseidon/typhoon//google-cloud/container-linux/kubernetes?ref=v1.15.3"
|
||||
source = "git::https://github.com/poseidon/typhoon//google-cloud/container-linux/kubernetes?ref=v1.16.2"
|
||||
|
||||
# Google Cloud
|
||||
cluster_name = "yavin"
|
||||
@ -81,10 +81,10 @@ In 4-8 minutes (varies by platform), the cluster will be ready. This Google Clou
|
||||
```sh
|
||||
$ export KUBECONFIG=/home/user/.secrets/clusters/yavin/auth/kubeconfig
|
||||
$ kubectl get nodes
|
||||
NAME ROLES STATUS AGE VERSION
|
||||
yavin-controller-0.c.example-com.internal controller,master Ready 6m v1.15.3
|
||||
yavin-worker-jrbf.c.example-com.internal node Ready 5m v1.15.3
|
||||
yavin-worker-mzdm.c.example-com.internal node Ready 5m v1.15.3
|
||||
NAME ROLES STATUS AGE VERSION
|
||||
yavin-controller-0.c.example-com.internal <none> Ready 6m v1.16.2
|
||||
yavin-worker-jrbf.c.example-com.internal <none> Ready 5m v1.16.2
|
||||
yavin-worker-mzdm.c.example-com.internal <none> Ready 5m v1.16.2
|
||||
```
|
||||
|
||||
List the pods.
|
||||
@ -97,16 +97,12 @@ kube-system calico-node-d1l5b 2/2 Running 0
|
||||
kube-system calico-node-sp9ps 2/2 Running 0 6m
|
||||
kube-system coredns-1187388186-zj5dl 1/1 Running 0 6m
|
||||
kube-system coredns-1187388186-dkh3o 1/1 Running 0 6m
|
||||
kube-system kube-apiserver-zppls 1/1 Running 0 6m
|
||||
kube-system kube-controller-manager-3271970485-gh9kt 1/1 Running 0 6m
|
||||
kube-system kube-controller-manager-3271970485-h90v8 1/1 Running 1 6m
|
||||
kube-system kube-apiserver-controller-0 1/1 Running 0 6m
|
||||
kube-system kube-controller-manager-controller-0 1/1 Running 0 6m
|
||||
kube-system kube-proxy-117v6 1/1 Running 0 6m
|
||||
kube-system kube-proxy-9886n 1/1 Running 0 6m
|
||||
kube-system kube-proxy-njn47 1/1 Running 0 6m
|
||||
kube-system kube-scheduler-3895335239-5x87r 1/1 Running 0 6m
|
||||
kube-system kube-scheduler-3895335239-bzrrt 1/1 Running 1 6m
|
||||
kube-system pod-checkpointer-l6lrt 1/1 Running 0 6m
|
||||
kube-system pod-checkpointer-l6lrt-controller-0 1/1 Running 0 6m
|
||||
kube-system kube-scheduler-controller-0 1/1 Running 0 6m
|
||||
```
|
||||
|
||||
## Non-Goals
|
||||
|
@ -10,11 +10,11 @@ spec:
|
||||
maxUnavailable: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: container-linux-update-agent
|
||||
name: container-linux-update-agent
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: container-linux-update-agent
|
||||
name: container-linux-update-agent
|
||||
annotations:
|
||||
seccomp.security.alpha.kubernetes.io/pod: 'docker/default'
|
||||
spec:
|
||||
|
@ -7,11 +7,11 @@ spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: container-linux-update-operator
|
||||
name: container-linux-update-operator
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: container-linux-update-operator
|
||||
name: container-linux-update-operator
|
||||
annotations:
|
||||
seccomp.security.alpha.kubernetes.io/pod: 'docker/default'
|
||||
spec:
|
||||
|
@ -23,7 +23,7 @@ spec:
|
||||
spec:
|
||||
containers:
|
||||
- name: grafana
|
||||
image: docker.io/grafana/grafana:6.3.3
|
||||
image: docker.io/grafana/grafana:6.4.2
|
||||
env:
|
||||
- name: GF_PATHS_CONFIG
|
||||
value: "/etc/grafana/custom.ini"
|
||||
|
@ -20,11 +20,9 @@ spec:
|
||||
annotations:
|
||||
seccomp.security.alpha.kubernetes.io/pod: 'docker/default'
|
||||
spec:
|
||||
nodeSelector:
|
||||
node-role.kubernetes.io/node: ""
|
||||
containers:
|
||||
- name: nginx-ingress-controller
|
||||
image: quay.io/kubernetes-ingress-controller/nginx-ingress-controller:0.25.1
|
||||
image: quay.io/kubernetes-ingress-controller/nginx-ingress-controller:0.26.1
|
||||
args:
|
||||
- /nginx-ingress-controller
|
||||
- --ingress-class=public
|
||||
@ -67,6 +65,11 @@ spec:
|
||||
periodSeconds: 10
|
||||
successThreshold: 1
|
||||
timeoutSeconds: 5
|
||||
lifecycle:
|
||||
preStop:
|
||||
exec:
|
||||
command:
|
||||
- /wait-shutdown
|
||||
securityContext:
|
||||
capabilities:
|
||||
add:
|
||||
@ -75,4 +78,4 @@ spec:
|
||||
- ALL
|
||||
runAsUser: 33 # www-data
|
||||
restartPolicy: Always
|
||||
terminationGracePeriodSeconds: 60
|
||||
terminationGracePeriodSeconds: 300
|
||||
|
@ -20,11 +20,9 @@ spec:
|
||||
annotations:
|
||||
seccomp.security.alpha.kubernetes.io/pod: 'docker/default'
|
||||
spec:
|
||||
nodeSelector:
|
||||
node-role.kubernetes.io/node: ""
|
||||
containers:
|
||||
- name: nginx-ingress-controller
|
||||
image: quay.io/kubernetes-ingress-controller/nginx-ingress-controller:0.25.1
|
||||
image: quay.io/kubernetes-ingress-controller/nginx-ingress-controller:0.26.1
|
||||
args:
|
||||
- /nginx-ingress-controller
|
||||
- --ingress-class=public
|
||||
@ -67,6 +65,11 @@ spec:
|
||||
periodSeconds: 10
|
||||
successThreshold: 1
|
||||
timeoutSeconds: 5
|
||||
lifecycle:
|
||||
preStop:
|
||||
exec:
|
||||
command:
|
||||
- /wait-shutdown
|
||||
securityContext:
|
||||
capabilities:
|
||||
add:
|
||||
@ -75,4 +78,4 @@ spec:
|
||||
- ALL
|
||||
runAsUser: 33 # www-data
|
||||
restartPolicy: Always
|
||||
terminationGracePeriodSeconds: 60
|
||||
terminationGracePeriodSeconds: 300
|
||||
|
@ -22,7 +22,7 @@ spec:
|
||||
spec:
|
||||
containers:
|
||||
- name: nginx-ingress-controller
|
||||
image: quay.io/kubernetes-ingress-controller/nginx-ingress-controller:0.25.1
|
||||
image: quay.io/kubernetes-ingress-controller/nginx-ingress-controller:0.26.1
|
||||
args:
|
||||
- /nginx-ingress-controller
|
||||
- --ingress-class=public
|
||||
@ -62,6 +62,11 @@ spec:
|
||||
successThreshold: 1
|
||||
failureThreshold: 3
|
||||
timeoutSeconds: 5
|
||||
lifecycle:
|
||||
preStop:
|
||||
exec:
|
||||
command:
|
||||
- /wait-shutdown
|
||||
securityContext:
|
||||
capabilities:
|
||||
add:
|
||||
@ -70,5 +75,5 @@ spec:
|
||||
- ALL
|
||||
runAsUser: 33 # www-data
|
||||
restartPolicy: Always
|
||||
terminationGracePeriodSeconds: 60
|
||||
terminationGracePeriodSeconds: 300
|
||||
|
||||
|
@ -20,11 +20,9 @@ spec:
|
||||
annotations:
|
||||
seccomp.security.alpha.kubernetes.io/pod: 'docker/default'
|
||||
spec:
|
||||
nodeSelector:
|
||||
node-role.kubernetes.io/node: ""
|
||||
containers:
|
||||
- name: nginx-ingress-controller
|
||||
image: quay.io/kubernetes-ingress-controller/nginx-ingress-controller:0.25.1
|
||||
image: quay.io/kubernetes-ingress-controller/nginx-ingress-controller:0.26.1
|
||||
args:
|
||||
- /nginx-ingress-controller
|
||||
- --ingress-class=public
|
||||
@ -67,6 +65,11 @@ spec:
|
||||
periodSeconds: 10
|
||||
successThreshold: 1
|
||||
timeoutSeconds: 5
|
||||
lifecycle:
|
||||
preStop:
|
||||
exec:
|
||||
command:
|
||||
- /wait-shutdown
|
||||
securityContext:
|
||||
capabilities:
|
||||
add:
|
||||
@ -75,4 +78,4 @@ spec:
|
||||
- ALL
|
||||
runAsUser: 33 # www-data
|
||||
restartPolicy: Always
|
||||
terminationGracePeriodSeconds: 60
|
||||
terminationGracePeriodSeconds: 300
|
||||
|
@ -20,11 +20,9 @@ spec:
|
||||
annotations:
|
||||
seccomp.security.alpha.kubernetes.io/pod: 'docker/default'
|
||||
spec:
|
||||
nodeSelector:
|
||||
node-role.kubernetes.io/node: ""
|
||||
containers:
|
||||
- name: nginx-ingress-controller
|
||||
image: quay.io/kubernetes-ingress-controller/nginx-ingress-controller:0.25.1
|
||||
image: quay.io/kubernetes-ingress-controller/nginx-ingress-controller:0.26.1
|
||||
args:
|
||||
- /nginx-ingress-controller
|
||||
- --ingress-class=public
|
||||
@ -67,6 +65,11 @@ spec:
|
||||
periodSeconds: 10
|
||||
successThreshold: 1
|
||||
timeoutSeconds: 5
|
||||
lifecycle:
|
||||
preStop:
|
||||
exec:
|
||||
command:
|
||||
- /wait-shutdown
|
||||
securityContext:
|
||||
capabilities:
|
||||
add:
|
||||
@ -75,4 +78,4 @@ spec:
|
||||
- ALL
|
||||
runAsUser: 33 # www-data
|
||||
restartPolicy: Always
|
||||
terminationGracePeriodSeconds: 60
|
||||
terminationGracePeriodSeconds: 300
|
||||
|
@ -115,7 +115,7 @@ data:
|
||||
- role: node
|
||||
scheme: http
|
||||
relabel_configs:
|
||||
- source_labels: [__meta_kubernetes_node_label_node_role_kubernetes_io_controller]
|
||||
- source_labels: [__meta_kubernetes_node_label_node_kubernetes_io_controller]
|
||||
action: keep
|
||||
regex: 'true'
|
||||
- action: labelmap
|
||||
|
@ -20,7 +20,7 @@ spec:
|
||||
serviceAccountName: prometheus
|
||||
containers:
|
||||
- name: prometheus
|
||||
image: quay.io/prometheus/prometheus:v2.12.0
|
||||
image: quay.io/prometheus/prometheus:v2.13.0
|
||||
args:
|
||||
- --web.listen-address=0.0.0.0:9090
|
||||
- --config.file=/etc/prometheus/prometheus.yaml
|
||||
|
@ -24,7 +24,7 @@ spec:
|
||||
serviceAccountName: kube-state-metrics
|
||||
containers:
|
||||
- name: kube-state-metrics
|
||||
image: quay.io/coreos/kube-state-metrics:v1.7.2
|
||||
image: quay.io/coreos/kube-state-metrics:v1.8.0
|
||||
ports:
|
||||
- name: metrics
|
||||
containerPort: 8080
|
||||
|
@ -11,7 +11,7 @@ Typhoon distributes upstream Kubernetes, architectural conventions, and cluster
|
||||
|
||||
## Features <a href="https://www.cncf.io/certification/software-conformance/"><img align="right" src="https://storage.googleapis.com/poseidon/certified-kubernetes.png"></a>
|
||||
|
||||
* Kubernetes v1.15.3 (upstream, via [kubernetes-incubator/bootkube](https://github.com/kubernetes-incubator/bootkube))
|
||||
* Kubernetes v1.16.2 (upstream)
|
||||
* Single or multi-master, [Calico](https://www.projectcalico.org/) or [flannel](https://github.com/coreos/flannel) networking
|
||||
* On-cluster etcd with TLS, [RBAC](https://kubernetes.io/docs/admin/authorization/rbac/)-enabled, [network policy](https://kubernetes.io/docs/concepts/services-networking/network-policies/)
|
||||
* Advanced features like [worker pools](https://typhoon.psdn.io/advanced/worker-pools/), [spot](https://typhoon.psdn.io/cl/aws/#spot) workers, and [snippets](https://typhoon.psdn.io/advanced/customization/#container-linux) customization
|
||||
|
@ -1,6 +1,6 @@
|
||||
# Self-hosted Kubernetes assets (kubeconfig, manifests)
|
||||
module "bootkube" {
|
||||
source = "git::https://github.com/poseidon/terraform-render-bootkube.git?ref=248675e7a95660e5c8a65f8b7ce86be89ef55a0e"
|
||||
# Kubernetes assets (kubeconfig, manifests)
|
||||
module "bootstrap" {
|
||||
source = "git::https://github.com/poseidon/terraform-render-bootstrap.git?ref=0fcc067476fa1463d057fd43760df222b7262b27"
|
||||
|
||||
cluster_name = var.cluster_name
|
||||
api_servers = [format("%s.%s", var.cluster_name, var.dns_zone)]
|
@ -7,7 +7,7 @@ systemd:
|
||||
- name: 40-etcd-cluster.conf
|
||||
contents: |
|
||||
[Service]
|
||||
Environment="ETCD_IMAGE_TAG=v3.3.15"
|
||||
Environment="ETCD_IMAGE_TAG=v3.4.2"
|
||||
Environment="ETCD_NAME=${etcd_name}"
|
||||
Environment="ETCD_ADVERTISE_CLIENT_URLS=https://${etcd_domain}:2379"
|
||||
Environment="ETCD_INITIAL_ADVERTISE_PEER_URLS=https://${etcd_domain}:2380"
|
||||
@ -64,11 +64,9 @@ systemd:
|
||||
--mount volume=var-log,target=/var/log \
|
||||
--insecure-options=image"
|
||||
Environment=KUBELET_CGROUP_DRIVER=${cgroup_driver}
|
||||
ExecStartPre=/bin/mkdir -p /opt/cni/bin
|
||||
ExecStartPre=/bin/mkdir -p /etc/kubernetes/manifests
|
||||
ExecStartPre=/bin/mkdir -p /etc/kubernetes/cni/net.d
|
||||
ExecStartPre=/bin/mkdir -p /etc/kubernetes/checkpoint-secrets
|
||||
ExecStartPre=/bin/mkdir -p /etc/kubernetes/inactive-manifests
|
||||
ExecStartPre=/bin/mkdir -p /etc/kubernetes/manifests
|
||||
ExecStartPre=/bin/mkdir -p /opt/cni/bin
|
||||
ExecStartPre=/bin/mkdir -p /var/lib/cni
|
||||
ExecStartPre=/bin/mkdir -p /var/lib/calico
|
||||
ExecStartPre=/bin/mkdir -p /var/lib/kubelet/volumeplugins
|
||||
@ -87,8 +85,8 @@ systemd:
|
||||
--kubeconfig=/etc/kubernetes/kubeconfig \
|
||||
--lock-file=/var/run/lock/kubelet.lock \
|
||||
--network-plugin=cni \
|
||||
--node-labels=node-role.kubernetes.io/master \
|
||||
--node-labels=node-role.kubernetes.io/controller="true" \
|
||||
--node-labels=node.kubernetes.io/master \
|
||||
--node-labels=node.kubernetes.io/controller="true" \
|
||||
--pod-manifest-path=/etc/kubernetes/manifests \
|
||||
--read-only-port=0 \
|
||||
--register-with-taints=node-role.kubernetes.io/master=:NoSchedule \
|
||||
@ -98,17 +96,28 @@ systemd:
|
||||
RestartSec=10
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
- name: bootkube.service
|
||||
- name: bootstrap.service
|
||||
contents: |
|
||||
[Unit]
|
||||
Description=Bootstrap a Kubernetes cluster
|
||||
ConditionPathExists=!/opt/bootkube/init_bootkube.done
|
||||
Description=Kubernetes control plane
|
||||
ConditionPathExists=!/opt/bootstrap/bootstrap.done
|
||||
[Service]
|
||||
Type=oneshot
|
||||
RemainAfterExit=true
|
||||
WorkingDirectory=/opt/bootkube
|
||||
ExecStart=/opt/bootkube/bootkube-start
|
||||
ExecStartPost=/bin/touch /opt/bootkube/init_bootkube.done
|
||||
WorkingDirectory=/opt/bootstrap
|
||||
ExecStartPre=-/usr/bin/bash -c 'set -x && [ -n "$(ls /opt/bootstrap/assets/manifests-*/* 2>/dev/null)" ] && mv /opt/bootstrap/assets/manifests-*/* /opt/bootstrap/assets/manifests && rm -rf /opt/bootstrap/assets/manifests-*'
|
||||
ExecStart=/usr/bin/rkt run \
|
||||
--trust-keys-from-https \
|
||||
--volume assets,kind=host,source=/opt/bootstrap/assets \
|
||||
--mount volume=assets,target=/assets \
|
||||
--volume script,kind=host,source=/opt/bootstrap/apply \
|
||||
--mount volume=script,target=/apply \
|
||||
--insecure-options=image \
|
||||
docker://k8s.gcr.io/hyperkube:v1.16.2 \
|
||||
--net=host \
|
||||
--dns=host \
|
||||
--exec=/apply
|
||||
ExecStartPost=/bin/touch /opt/bootstrap/bootstrap.done
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
storage:
|
||||
@ -125,37 +134,27 @@ storage:
|
||||
contents:
|
||||
inline: |
|
||||
KUBELET_IMAGE_URL=docker://k8s.gcr.io/hyperkube
|
||||
KUBELET_IMAGE_TAG=v1.15.3
|
||||
KUBELET_IMAGE_TAG=v1.16.2
|
||||
- path: /opt/bootstrap/apply
|
||||
filesystem: root
|
||||
mode: 0544
|
||||
contents:
|
||||
inline: |
|
||||
#!/bin/bash -e
|
||||
export KUBECONFIG=/assets/auth/kubeconfig
|
||||
until kubectl version; do
|
||||
echo "Waiting for static pod control plane"
|
||||
sleep 5
|
||||
done
|
||||
until kubectl apply -f /assets/manifests -R; do
|
||||
echo "Retry applying manifests"
|
||||
sleep 5
|
||||
done
|
||||
- path: /etc/sysctl.d/max-user-watches.conf
|
||||
filesystem: root
|
||||
contents:
|
||||
inline: |
|
||||
fs.inotify.max_user_watches=16184
|
||||
- path: /opt/bootkube/bootkube-start
|
||||
filesystem: root
|
||||
mode: 0544
|
||||
user:
|
||||
id: 500
|
||||
group:
|
||||
id: 500
|
||||
contents:
|
||||
inline: |
|
||||
#!/bin/bash
|
||||
# Wrapper for bootkube start
|
||||
set -e
|
||||
# Move experimental manifests
|
||||
[ -n "$(ls /opt/bootkube/assets/manifests-*/* 2>/dev/null)" ] && mv /opt/bootkube/assets/manifests-*/* /opt/bootkube/assets/manifests && rm -rf /opt/bootkube/assets/manifests-*
|
||||
exec /usr/bin/rkt run \
|
||||
--trust-keys-from-https \
|
||||
--volume assets,kind=host,source=/opt/bootkube/assets \
|
||||
--mount volume=assets,target=/assets \
|
||||
--volume bootstrap,kind=host,source=/etc/kubernetes \
|
||||
--mount volume=bootstrap,target=/etc/kubernetes \
|
||||
$${RKT_OPTS} \
|
||||
quay.io/coreos/bootkube:v0.14.0 \
|
||||
--net=host \
|
||||
--dns=host \
|
||||
--exec=/bootkube -- start --asset-dir=/assets "$@"
|
||||
passwd:
|
||||
users:
|
||||
- name: core
|
||||
|
@ -71,7 +71,7 @@ data "template_file" "controller-configs" {
|
||||
# etcd0=https://cluster-etcd0.example.com,etcd1=https://cluster-etcd1.example.com,...
|
||||
etcd_initial_cluster = join(",", data.template_file.etcds.*.rendered)
|
||||
cgroup_driver = local.flavor == "flatcar" && local.channel == "edge" ? "systemd" : "cgroupfs"
|
||||
kubeconfig = indent(10, module.bootkube.kubeconfig-kubelet)
|
||||
kubeconfig = indent(10, module.bootstrap.kubeconfig-kubelet)
|
||||
ssh_authorized_key = var.ssh_authorized_key
|
||||
cluster_dns_service_ip = cidrhost(var.service_cidr, 10)
|
||||
cluster_domain_suffix = var.cluster_domain_suffix
|
||||
|
@ -1,5 +1,5 @@
|
||||
output "kubeconfig-admin" {
|
||||
value = module.bootkube.kubeconfig-admin
|
||||
value = module.bootstrap.kubeconfig-admin
|
||||
}
|
||||
|
||||
# Outputs for Kubernetes Ingress
|
||||
@ -32,7 +32,7 @@ output "worker_security_groups" {
|
||||
}
|
||||
|
||||
output "kubeconfig" {
|
||||
value = module.bootkube.kubeconfig-kubelet
|
||||
value = module.bootstrap.kubeconfig-kubelet
|
||||
}
|
||||
|
||||
# Outputs for custom load balancing
|
||||
|
@ -33,6 +33,28 @@ resource "aws_security_group_rule" "controller-etcd" {
|
||||
self = true
|
||||
}
|
||||
|
||||
# Allow Prometheus to scrape kube-scheduler
|
||||
resource "aws_security_group_rule" "controller-scheduler-metrics" {
|
||||
security_group_id = aws_security_group.controller.id
|
||||
|
||||
type = "ingress"
|
||||
protocol = "tcp"
|
||||
from_port = 10251
|
||||
to_port = 10251
|
||||
source_security_group_id = aws_security_group.worker.id
|
||||
}
|
||||
|
||||
# Allow Prometheus to scrape kube-controller-manager
|
||||
resource "aws_security_group_rule" "controller-manager-metrics" {
|
||||
security_group_id = aws_security_group.controller.id
|
||||
|
||||
type = "ingress"
|
||||
protocol = "tcp"
|
||||
from_port = 10252
|
||||
to_port = 10252
|
||||
source_security_group_id = aws_security_group.worker.id
|
||||
}
|
||||
|
||||
# Allow Prometheus to scrape etcd metrics
|
||||
resource "aws_security_group_rule" "controller-etcd-metrics" {
|
||||
security_group_id = aws_security_group.controller.id
|
||||
|
@ -1,48 +1,57 @@
|
||||
# Secure copy etcd TLS assets to controllers.
|
||||
# Secure copy assets to controllers.
|
||||
resource "null_resource" "copy-controller-secrets" {
|
||||
count = var.controller_count
|
||||
|
||||
depends_on = [
|
||||
module.bootstrap,
|
||||
]
|
||||
|
||||
connection {
|
||||
type = "ssh"
|
||||
host = element(aws_instance.controllers.*.public_ip, count.index)
|
||||
host = aws_instance.controllers.*.public_ip[count.index]
|
||||
user = "core"
|
||||
timeout = "15m"
|
||||
}
|
||||
|
||||
provisioner "file" {
|
||||
content = module.bootkube.etcd_ca_cert
|
||||
content = module.bootstrap.etcd_ca_cert
|
||||
destination = "$HOME/etcd-client-ca.crt"
|
||||
}
|
||||
|
||||
provisioner "file" {
|
||||
content = module.bootkube.etcd_client_cert
|
||||
content = module.bootstrap.etcd_client_cert
|
||||
destination = "$HOME/etcd-client.crt"
|
||||
}
|
||||
|
||||
provisioner "file" {
|
||||
content = module.bootkube.etcd_client_key
|
||||
content = module.bootstrap.etcd_client_key
|
||||
destination = "$HOME/etcd-client.key"
|
||||
}
|
||||
|
||||
provisioner "file" {
|
||||
content = module.bootkube.etcd_server_cert
|
||||
content = module.bootstrap.etcd_server_cert
|
||||
destination = "$HOME/etcd-server.crt"
|
||||
}
|
||||
|
||||
provisioner "file" {
|
||||
content = module.bootkube.etcd_server_key
|
||||
content = module.bootstrap.etcd_server_key
|
||||
destination = "$HOME/etcd-server.key"
|
||||
}
|
||||
|
||||
provisioner "file" {
|
||||
content = module.bootkube.etcd_peer_cert
|
||||
content = module.bootstrap.etcd_peer_cert
|
||||
destination = "$HOME/etcd-peer.crt"
|
||||
}
|
||||
|
||||
provisioner "file" {
|
||||
content = module.bootkube.etcd_peer_key
|
||||
content = module.bootstrap.etcd_peer_key
|
||||
destination = "$HOME/etcd-peer.key"
|
||||
}
|
||||
|
||||
provisioner "file" {
|
||||
source = var.asset_dir
|
||||
destination = "$HOME/assets"
|
||||
}
|
||||
|
||||
provisioner "remote-exec" {
|
||||
inline = [
|
||||
@ -56,18 +65,22 @@ resource "null_resource" "copy-controller-secrets" {
|
||||
"sudo mv etcd-peer.key /etc/ssl/etcd/etcd/peer.key",
|
||||
"sudo chown -R etcd:etcd /etc/ssl/etcd",
|
||||
"sudo chmod -R 500 /etc/ssl/etcd",
|
||||
"sudo mv $HOME/assets /opt/bootstrap/assets",
|
||||
"sudo mkdir -p /etc/kubernetes/manifests",
|
||||
"sudo mkdir -p /etc/kubernetes/bootstrap-secrets",
|
||||
"sudo cp -r /opt/bootstrap/assets/tls/* /etc/kubernetes/bootstrap-secrets/",
|
||||
"sudo cp /opt/bootstrap/assets/auth/kubeconfig /etc/kubernetes/bootstrap-secrets/",
|
||||
"sudo cp -r /opt/bootstrap/assets/static-manifests/* /etc/kubernetes/manifests/",
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
# Secure copy bootkube assets to ONE controller and start bootkube to perform
|
||||
# one-time self-hosted cluster bootstrapping.
|
||||
resource "null_resource" "bootkube-start" {
|
||||
# Connect to a controller to perform one-time cluster bootstrap.
|
||||
resource "null_resource" "bootstrap" {
|
||||
depends_on = [
|
||||
module.bootkube,
|
||||
null_resource.copy-controller-secrets,
|
||||
module.workers,
|
||||
aws_route53_record.apiserver,
|
||||
null_resource.copy-controller-secrets,
|
||||
]
|
||||
|
||||
connection {
|
||||
@ -77,15 +90,9 @@ resource "null_resource" "bootkube-start" {
|
||||
timeout = "15m"
|
||||
}
|
||||
|
||||
provisioner "file" {
|
||||
source = var.asset_dir
|
||||
destination = "$HOME/assets"
|
||||
}
|
||||
|
||||
provisioner "remote-exec" {
|
||||
inline = [
|
||||
"sudo mv $HOME/assets /opt/bootkube",
|
||||
"sudo systemctl start bootkube",
|
||||
"sudo systemctl start bootstrap",
|
||||
]
|
||||
}
|
||||
}
|
||||
|
@ -18,57 +18,57 @@ variable "dns_zone_id" {
|
||||
# instances
|
||||
|
||||
variable "controller_count" {
|
||||
type = string
|
||||
default = "1"
|
||||
type = number
|
||||
description = "Number of controllers (i.e. masters)"
|
||||
default = 1
|
||||
}
|
||||
|
||||
variable "worker_count" {
|
||||
type = string
|
||||
default = "1"
|
||||
type = number
|
||||
description = "Number of workers"
|
||||
default = 1
|
||||
}
|
||||
|
||||
variable "controller_type" {
|
||||
type = string
|
||||
default = "t3.small"
|
||||
description = "EC2 instance type for controllers"
|
||||
default = "t3.small"
|
||||
}
|
||||
|
||||
variable "worker_type" {
|
||||
type = string
|
||||
default = "t3.small"
|
||||
description = "EC2 instance type for workers"
|
||||
default = "t3.small"
|
||||
}
|
||||
|
||||
variable "os_image" {
|
||||
type = string
|
||||
default = "coreos-stable"
|
||||
description = "AMI channel for a Container Linux derivative (coreos-stable, coreos-beta, coreos-alpha, flatcar-stable, flatcar-beta, flatcar-alpha, flatcar-edge)"
|
||||
default = "coreos-stable"
|
||||
}
|
||||
|
||||
variable "disk_size" {
|
||||
type = string
|
||||
default = "40"
|
||||
type = number
|
||||
description = "Size of the EBS volume in GB"
|
||||
default = 40
|
||||
}
|
||||
|
||||
variable "disk_type" {
|
||||
type = string
|
||||
default = "gp2"
|
||||
description = "Type of the EBS volume (e.g. standard, gp2, io1)"
|
||||
default = "gp2"
|
||||
}
|
||||
|
||||
variable "disk_iops" {
|
||||
type = string
|
||||
default = "0"
|
||||
type = number
|
||||
description = "IOPS of the EBS volume (e.g. 100)"
|
||||
default = 0
|
||||
}
|
||||
|
||||
variable "worker_price" {
|
||||
type = string
|
||||
default = ""
|
||||
description = "Spot price in USD for autoscaling group spot instances. Leave as default empty string for autoscaling group to use on-demand instances. Note, switching in-place from spot to on-demand is not possible: https://github.com/terraform-providers/terraform-provider-aws/issues/4320"
|
||||
type = number
|
||||
description = "Spot price in USD for worker instances or 0 to use on-demand instances"
|
||||
default = 0
|
||||
}
|
||||
|
||||
variable "worker_target_groups" {
|
||||
@ -97,60 +97,66 @@ variable "ssh_authorized_key" {
|
||||
}
|
||||
|
||||
variable "asset_dir" {
|
||||
description = "Path to a directory where generated assets should be placed (contains secrets)"
|
||||
type = string
|
||||
description = "Absolute path to a directory where generated assets should be placed (contains secrets)"
|
||||
}
|
||||
|
||||
variable "networking" {
|
||||
description = "Choice of networking provider (calico or flannel)"
|
||||
type = string
|
||||
description = "Choice of networking provider (calico or flannel)"
|
||||
default = "calico"
|
||||
}
|
||||
|
||||
variable "network_mtu" {
|
||||
type = number
|
||||
description = "CNI interface MTU (applies to calico only). Use 8981 if using instances types with Jumbo frames."
|
||||
type = string
|
||||
default = "1480"
|
||||
default = 1480
|
||||
}
|
||||
|
||||
variable "host_cidr" {
|
||||
description = "CIDR IPv4 range to assign to EC2 nodes"
|
||||
type = string
|
||||
description = "CIDR IPv4 range to assign to EC2 nodes"
|
||||
default = "10.0.0.0/16"
|
||||
}
|
||||
|
||||
variable "pod_cidr" {
|
||||
description = "CIDR IPv4 range to assign Kubernetes pods"
|
||||
type = string
|
||||
description = "CIDR IPv4 range to assign Kubernetes pods"
|
||||
default = "10.2.0.0/16"
|
||||
}
|
||||
|
||||
variable "service_cidr" {
|
||||
type = string
|
||||
description = <<EOD
|
||||
CIDR IPv4 range to assign Kubernetes services.
|
||||
The 1st IP will be reserved for kube_apiserver, the 10th IP will be reserved for coredns.
|
||||
EOD
|
||||
|
||||
|
||||
type = string
|
||||
default = "10.3.0.0/16"
|
||||
}
|
||||
|
||||
variable "cluster_domain_suffix" {
|
||||
description = "Queries for domains with the suffix will be answered by coredns. Default is cluster.local (e.g. foo.default.svc.cluster.local) "
|
||||
type = string
|
||||
default = "cluster.local"
|
||||
}
|
||||
|
||||
variable "enable_reporting" {
|
||||
type = string
|
||||
type = bool
|
||||
description = "Enable usage or analytics reporting to upstreams (Calico)"
|
||||
default = "false"
|
||||
default = false
|
||||
}
|
||||
|
||||
variable "enable_aggregation" {
|
||||
type = bool
|
||||
description = "Enable the Kubernetes Aggregation Layer (defaults to false)"
|
||||
type = string
|
||||
default = "false"
|
||||
default = false
|
||||
}
|
||||
|
||||
variable "worker_node_labels" {
|
||||
type = list(string)
|
||||
description = "List of initial worker node labels"
|
||||
default = []
|
||||
}
|
||||
|
||||
# unofficial, undocumented, unsupported
|
||||
|
||||
variable "cluster_domain_suffix" {
|
||||
type = string
|
||||
description = "Queries for domains with the suffix will be answered by CoreDNS. Default is cluster.local (e.g. foo.default.svc.cluster.local)"
|
||||
default = "cluster.local"
|
||||
}
|
||||
|
||||
|
@ -14,10 +14,11 @@ module "workers" {
|
||||
target_groups = var.worker_target_groups
|
||||
|
||||
# configuration
|
||||
kubeconfig = module.bootkube.kubeconfig-kubelet
|
||||
kubeconfig = module.bootstrap.kubeconfig-kubelet
|
||||
ssh_authorized_key = var.ssh_authorized_key
|
||||
service_cidr = var.service_cidr
|
||||
cluster_domain_suffix = var.cluster_domain_suffix
|
||||
clc_snippets = var.worker_clc_snippets
|
||||
node_labels = var.worker_node_labels
|
||||
}
|
||||
|
||||
|
@ -39,9 +39,9 @@ systemd:
|
||||
--mount volume=var-log,target=/var/log \
|
||||
--insecure-options=image"
|
||||
Environment=KUBELET_CGROUP_DRIVER=${cgroup_driver}
|
||||
ExecStartPre=/bin/mkdir -p /opt/cni/bin
|
||||
ExecStartPre=/bin/mkdir -p /etc/kubernetes/manifests
|
||||
ExecStartPre=/bin/mkdir -p /etc/kubernetes/cni/net.d
|
||||
ExecStartPre=/bin/mkdir -p /opt/cni/bin
|
||||
ExecStartPre=/bin/mkdir -p /var/lib/cni
|
||||
ExecStartPre=/bin/mkdir -p /var/lib/calico
|
||||
ExecStartPre=/bin/mkdir -p /var/lib/kubelet/volumeplugins
|
||||
@ -60,7 +60,10 @@ systemd:
|
||||
--kubeconfig=/etc/kubernetes/kubeconfig \
|
||||
--lock-file=/var/run/lock/kubelet.lock \
|
||||
--network-plugin=cni \
|
||||
--node-labels=node-role.kubernetes.io/node \
|
||||
--node-labels=node.kubernetes.io/node \
|
||||
%{ for label in split(",", node_labels) }
|
||||
--node-labels=${label} \
|
||||
%{ endfor ~}
|
||||
--pod-manifest-path=/etc/kubernetes/manifests \
|
||||
--read-only-port=0 \
|
||||
--volume-plugin-dir=/var/lib/kubelet/volumeplugins
|
||||
@ -95,7 +98,7 @@ storage:
|
||||
contents:
|
||||
inline: |
|
||||
KUBELET_IMAGE_URL=docker://k8s.gcr.io/hyperkube
|
||||
KUBELET_IMAGE_TAG=v1.15.3
|
||||
KUBELET_IMAGE_TAG=v1.16.2
|
||||
- path: /etc/sysctl.d/max-user-watches.conf
|
||||
filesystem: root
|
||||
contents:
|
||||
@ -113,7 +116,7 @@ storage:
|
||||
--volume config,kind=host,source=/etc/kubernetes \
|
||||
--mount volume=config,target=/etc/kubernetes \
|
||||
--insecure-options=image \
|
||||
docker://k8s.gcr.io/hyperkube:v1.15.3 \
|
||||
docker://k8s.gcr.io/hyperkube:v1.16.2 \
|
||||
--net=host \
|
||||
--dns=host \
|
||||
--exec=/kubectl -- --kubeconfig=/etc/kubernetes/kubeconfig delete node $(hostname)
|
||||
|
@ -23,45 +23,45 @@ variable "security_groups" {
|
||||
# instances
|
||||
|
||||
variable "worker_count" {
|
||||
type = string
|
||||
default = "1"
|
||||
type = number
|
||||
description = "Number of instances"
|
||||
default = 1
|
||||
}
|
||||
|
||||
variable "instance_type" {
|
||||
type = string
|
||||
default = "t3.small"
|
||||
description = "EC2 instance type"
|
||||
default = "t3.small"
|
||||
}
|
||||
|
||||
variable "os_image" {
|
||||
type = string
|
||||
default = "coreos-stable"
|
||||
description = "AMI channel for a Container Linux derivative (coreos-stable, coreos-beta, coreos-alpha, flatcar-stable, flatcar-beta, flatcar-alpha, flatcar-edge)"
|
||||
default = "coreos-stable"
|
||||
}
|
||||
|
||||
variable "disk_size" {
|
||||
type = string
|
||||
default = "40"
|
||||
type = number
|
||||
description = "Size of the EBS volume in GB"
|
||||
default = 40
|
||||
}
|
||||
|
||||
variable "disk_type" {
|
||||
type = string
|
||||
default = "gp2"
|
||||
description = "Type of the EBS volume (e.g. standard, gp2, io1)"
|
||||
default = "gp2"
|
||||
}
|
||||
|
||||
variable "disk_iops" {
|
||||
type = string
|
||||
default = "0"
|
||||
type = number
|
||||
description = "IOPS of the EBS volume (required for io1)"
|
||||
default = 0
|
||||
}
|
||||
|
||||
variable "spot_price" {
|
||||
type = string
|
||||
default = ""
|
||||
description = "Spot price in USD for autoscaling group spot instances. Leave as default empty string for autoscaling group to use on-demand instances. Note, switching in-place from spot to on-demand is not possible: https://github.com/terraform-providers/terraform-provider-aws/issues/4320"
|
||||
type = number
|
||||
description = "Spot price in USD for worker instances or 0 to use on-demand instances"
|
||||
default = 0
|
||||
}
|
||||
|
||||
variable "target_groups" {
|
||||
@ -89,19 +89,22 @@ variable "ssh_authorized_key" {
|
||||
}
|
||||
|
||||
variable "service_cidr" {
|
||||
type = string
|
||||
description = <<EOD
|
||||
CIDR IPv4 range to assign Kubernetes services.
|
||||
The 1st IP will be reserved for kube_apiserver, the 10th IP will be reserved for coredns.
|
||||
EOD
|
||||
|
||||
|
||||
type = string
|
||||
default = "10.3.0.0/16"
|
||||
}
|
||||
|
||||
variable "cluster_domain_suffix" {
|
||||
description = "Queries for domains with the suffix will be answered by coredns. Default is cluster.local (e.g. foo.default.svc.cluster.local) "
|
||||
type = string
|
||||
description = "Queries for domains with the suffix will be answered by coredns. Default is cluster.local (e.g. foo.default.svc.cluster.local) "
|
||||
default = "cluster.local"
|
||||
}
|
||||
|
||||
variable "node_labels" {
|
||||
type = list(string)
|
||||
description = "List of initial node labels"
|
||||
default = []
|
||||
}
|
||||
|
@ -46,7 +46,7 @@ resource "aws_autoscaling_group" "workers" {
|
||||
resource "aws_launch_configuration" "worker" {
|
||||
image_id = local.ami_id
|
||||
instance_type = var.instance_type
|
||||
spot_price = var.spot_price
|
||||
spot_price = var.spot_price > 0 ? var.spot_price : null
|
||||
enable_monitoring = false
|
||||
|
||||
user_data = data.ct_config.worker-ignition.rendered
|
||||
@ -86,6 +86,7 @@ data "template_file" "worker-config" {
|
||||
cluster_dns_service_ip = cidrhost(var.service_cidr, 10)
|
||||
cluster_domain_suffix = var.cluster_domain_suffix
|
||||
cgroup_driver = local.flavor == "flatcar" && local.channel == "edge" ? "systemd" : "cgroupfs"
|
||||
node_labels = join(",", var.node_labels)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -11,7 +11,7 @@ Typhoon distributes upstream Kubernetes, architectural conventions, and cluster
|
||||
|
||||
## Features <a href="https://www.cncf.io/certification/software-conformance/"><img align="right" src="https://storage.googleapis.com/poseidon/certified-kubernetes.png"></a>
|
||||
|
||||
* Kubernetes v1.15.3 (upstream, via [kubernetes-incubator/bootkube](https://github.com/kubernetes-incubator/bootkube))
|
||||
* Kubernetes v1.16.2 (upstream)
|
||||
* Single or multi-master, [Calico](https://www.projectcalico.org/) or [flannel](https://github.com/coreos/flannel) networking
|
||||
* On-cluster etcd with TLS, [RBAC](https://kubernetes.io/docs/admin/authorization/rbac/)-enabled, [network policy](https://kubernetes.io/docs/concepts/services-networking/network-policies/)
|
||||
* Advanced features like [worker pools](https://typhoon.psdn.io/advanced/worker-pools/), [spot](https://typhoon.psdn.io/cl/aws/#spot) workers, and [snippets](https://typhoon.psdn.io/advanced/customization/#container-linux) customization
|
||||
|
@ -13,9 +13,11 @@ data "aws_ami" "fedora-coreos" {
|
||||
values = ["hvm"]
|
||||
}
|
||||
|
||||
// pin on known ok versions as preview matures
|
||||
filter {
|
||||
name = "name"
|
||||
values = ["fedora-coreos-30.20190801.0-hvm"]
|
||||
values = ["fedora-coreos-30.*.*-hvm"]
|
||||
}
|
||||
|
||||
# try to filter out dev images (AWS filters can't)
|
||||
name_regex = "^fedora-coreos-30.[0-9]*.[0-9]*-hvm*"
|
||||
}
|
||||
|
@ -1,6 +1,6 @@
|
||||
# Self-hosted Kubernetes assets (kubeconfig, manifests)
|
||||
module "bootkube" {
|
||||
source = "git::https://github.com/poseidon/terraform-render-bootkube.git?ref=248675e7a95660e5c8a65f8b7ce86be89ef55a0e"
|
||||
# Kubernetes assets (kubeconfig, manifests)
|
||||
module "bootstrap" {
|
||||
source = "git::https://github.com/poseidon/terraform-render-bootstrap.git?ref=0fcc067476fa1463d057fd43760df222b7262b27"
|
||||
|
||||
cluster_name = var.cluster_name
|
||||
api_servers = [format("%s.%s", var.cluster_name, var.dns_zone)]
|
@ -67,7 +67,7 @@ data "template_file" "controller-configs" {
|
||||
etcd_domain = "${var.cluster_name}-etcd${count.index}.${var.dns_zone}"
|
||||
# etcd0=https://cluster-etcd0.example.com,etcd1=https://cluster-etcd1.example.com,...
|
||||
etcd_initial_cluster = join(",", data.template_file.etcds.*.rendered)
|
||||
kubeconfig = indent(10, module.bootkube.kubeconfig-kubelet)
|
||||
kubeconfig = indent(10, module.bootstrap.kubeconfig-kubelet)
|
||||
ssh_authorized_key = var.ssh_authorized_key
|
||||
cluster_dns_service_ip = cidrhost(var.service_cidr, 10)
|
||||
cluster_domain_suffix = var.cluster_domain_suffix
|
||||
|
@ -28,7 +28,7 @@ systemd:
|
||||
--network host \
|
||||
--volume /var/lib/etcd:/var/lib/etcd:rw,Z \
|
||||
--volume /etc/ssl/etcd:/etc/ssl/certs:ro,Z \
|
||||
quay.io/coreos/etcd:v3.3.15
|
||||
quay.io/coreos/etcd:v3.4.2
|
||||
ExecStop=/usr/bin/podman stop etcd
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
@ -56,9 +56,9 @@ systemd:
|
||||
[Service]
|
||||
ExecStartPre=/bin/mkdir -p /etc/kubernetes/cni/net.d
|
||||
ExecStartPre=/bin/mkdir -p /etc/kubernetes/manifests
|
||||
ExecStartPre=/bin/mkdir -p /opt/cni/bin
|
||||
ExecStartPre=/bin/mkdir -p /var/lib/calico
|
||||
ExecStartPre=/bin/mkdir -p /var/lib/kubelet/volumeplugins
|
||||
ExecStartPre=/bin/mkdir -p /opt/cni/bin
|
||||
ExecStartPre=/usr/bin/bash -c "grep 'certificate-authority-data' /etc/kubernetes/kubeconfig | awk '{print $2}' | base64 -d > /etc/kubernetes/ca.crt"
|
||||
ExecStartPre=-/usr/bin/podman rm kubelet
|
||||
ExecStart=/usr/bin/podman run --name kubelet \
|
||||
@ -80,13 +80,13 @@ systemd:
|
||||
--volume /var/run:/var/run \
|
||||
--volume /var/run/lock:/var/run/lock:z \
|
||||
--volume /opt/cni/bin:/opt/cni/bin:z \
|
||||
k8s.gcr.io/hyperkube:v1.15.3 /hyperkube kubelet \
|
||||
k8s.gcr.io/hyperkube:v1.16.2 /hyperkube kubelet \
|
||||
--anonymous-auth=false \
|
||||
--authentication-token-webhook \
|
||||
--authorization-mode=Webhook \
|
||||
--cgroup-driver=systemd \
|
||||
--cgroups-per-qos=false \
|
||||
--enforce-node-allocatable="" \
|
||||
--cgroups-per-qos=true \
|
||||
--enforce-node-allocatable=pods \
|
||||
--client-ca-file=/etc/kubernetes/ca.crt \
|
||||
--cluster_dns=${cluster_dns_service_ip} \
|
||||
--cluster_domain=${cluster_domain_suffix} \
|
||||
@ -95,8 +95,8 @@ systemd:
|
||||
--kubeconfig=/etc/kubernetes/kubeconfig \
|
||||
--lock-file=/var/run/lock/kubelet.lock \
|
||||
--network-plugin=cni \
|
||||
--node-labels=node-role.kubernetes.io/master \
|
||||
--node-labels=node-role.kubernetes.io/controller="true" \
|
||||
--node-labels=node.kubernetes.io/master \
|
||||
--node-labels=node.kubernetes.io/controller="true" \
|
||||
--pod-manifest-path=/etc/kubernetes/manifests \
|
||||
--read-only-port=0 \
|
||||
--register-with-taints=node-role.kubernetes.io/master=:NoSchedule \
|
||||
@ -107,33 +107,48 @@ systemd:
|
||||
RestartSec=10
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
- name: bootkube.service
|
||||
- name: bootstrap.service
|
||||
contents: |
|
||||
[Unit]
|
||||
Description=Bootstrap a Kubernetes control plane
|
||||
ConditionPathExists=!/opt/bootkube/init_bootkube.done
|
||||
Description=Kubernetes control plane
|
||||
ConditionPathExists=!/opt/bootstrap/bootstrap.done
|
||||
[Service]
|
||||
Type=oneshot
|
||||
RemainAfterExit=true
|
||||
WorkingDirectory=/opt/bootkube
|
||||
ExecStart=/usr/bin/bash -c 'set -x && \
|
||||
[ -n "$(ls /opt/bootkube/assets/manifests-*/* 2>/dev/null)" ] && mv /opt/bootkube/assets/manifests-*/* /opt/bootkube/assets/manifests && rm -rf /opt/bootkube/assets/manifests-* && exec podman run --name bootkube --privileged \
|
||||
WorkingDirectory=/opt/bootstrap
|
||||
ExecStartPre=-/usr/bin/bash -c 'set -x && [ -n "$(ls /opt/bootstrap/assets/manifests-*/* 2>/dev/null)" ] && mv /opt/bootstrap/assets/manifests-*/* /opt/bootstrap/assets/manifests && rm -rf /opt/bootstrap/assets/manifests-*'
|
||||
ExecStart=/usr/bin/podman run --name bootstrap \
|
||||
--network host \
|
||||
--volume /opt/bootkube/assets:/assets \
|
||||
--volume /etc/kubernetes:/etc/kubernetes \
|
||||
quay.io/coreos/bootkube:v0.14.0 \
|
||||
/bootkube start --asset-dir=/assets'
|
||||
ExecStartPost=/bin/touch /opt/bootkube/init_bootkube.done
|
||||
--volume /opt/bootstrap/assets:/assets:ro,Z \
|
||||
--volume /opt/bootstrap/apply:/apply:ro,Z \
|
||||
k8s.gcr.io/hyperkube:v1.16.2 \
|
||||
/apply
|
||||
ExecStartPost=/bin/touch /opt/bootstrap/bootstrap.done
|
||||
ExecStartPost=-/usr/bin/podman stop bootstrap
|
||||
storage:
|
||||
directories:
|
||||
- path: /etc/kubernetes
|
||||
- path: /opt/bootkube
|
||||
- path: /opt/bootstrap
|
||||
files:
|
||||
- path: /etc/kubernetes/kubeconfig
|
||||
mode: 0644
|
||||
contents:
|
||||
inline: |
|
||||
${kubeconfig}
|
||||
- path: /opt/bootstrap/apply
|
||||
mode: 0544
|
||||
contents:
|
||||
inline: |
|
||||
#!/bin/bash -e
|
||||
export KUBECONFIG=/assets/auth/kubeconfig
|
||||
until kubectl version; do
|
||||
echo "Waiting for static pod control plane"
|
||||
sleep 5
|
||||
done
|
||||
until kubectl apply -f /assets/manifests -R; do
|
||||
echo "Retry applying manifests"
|
||||
sleep 5
|
||||
done
|
||||
- path: /etc/sysctl.d/reverse-path-filter.conf
|
||||
contents:
|
||||
inline: |
|
||||
|
@ -1,5 +1,5 @@
|
||||
output "kubeconfig-admin" {
|
||||
value = module.bootkube.kubeconfig-admin
|
||||
value = module.bootstrap.kubeconfig-admin
|
||||
}
|
||||
|
||||
# Outputs for Kubernetes Ingress
|
||||
@ -32,7 +32,7 @@ output "worker_security_groups" {
|
||||
}
|
||||
|
||||
output "kubeconfig" {
|
||||
value = module.bootkube.kubeconfig-kubelet
|
||||
value = module.bootstrap.kubeconfig-kubelet
|
||||
}
|
||||
|
||||
# Outputs for custom load balancing
|
||||
|
@ -44,6 +44,28 @@ resource "aws_security_group_rule" "controller-etcd-metrics" {
|
||||
source_security_group_id = aws_security_group.worker.id
|
||||
}
|
||||
|
||||
# Allow Prometheus to scrape kube-scheduler
|
||||
resource "aws_security_group_rule" "controller-scheduler-metrics" {
|
||||
security_group_id = aws_security_group.controller.id
|
||||
|
||||
type = "ingress"
|
||||
protocol = "tcp"
|
||||
from_port = 10251
|
||||
to_port = 10251
|
||||
source_security_group_id = aws_security_group.worker.id
|
||||
}
|
||||
|
||||
# Allow Prometheus to scrape kube-controller-manager
|
||||
resource "aws_security_group_rule" "controller-manager-metrics" {
|
||||
security_group_id = aws_security_group.controller.id
|
||||
|
||||
type = "ingress"
|
||||
protocol = "tcp"
|
||||
from_port = 10252
|
||||
to_port = 10252
|
||||
source_security_group_id = aws_security_group.worker.id
|
||||
}
|
||||
|
||||
resource "aws_security_group_rule" "controller-vxlan" {
|
||||
count = var.networking == "flannel" ? 1 : 0
|
||||
|
||||
|
@ -1,6 +1,10 @@
|
||||
# Secure copy etcd TLS assets to controllers.
|
||||
# Secure copy assets to controllers.
|
||||
resource "null_resource" "copy-controller-secrets" {
|
||||
count = var.controller_count
|
||||
|
||||
depends_on = [
|
||||
module.bootstrap,
|
||||
]
|
||||
|
||||
connection {
|
||||
type = "ssh"
|
||||
@ -10,40 +14,45 @@ resource "null_resource" "copy-controller-secrets" {
|
||||
}
|
||||
|
||||
provisioner "file" {
|
||||
content = module.bootkube.etcd_ca_cert
|
||||
content = module.bootstrap.etcd_ca_cert
|
||||
destination = "$HOME/etcd-client-ca.crt"
|
||||
}
|
||||
|
||||
provisioner "file" {
|
||||
content = module.bootkube.etcd_client_cert
|
||||
content = module.bootstrap.etcd_client_cert
|
||||
destination = "$HOME/etcd-client.crt"
|
||||
}
|
||||
|
||||
provisioner "file" {
|
||||
content = module.bootkube.etcd_client_key
|
||||
content = module.bootstrap.etcd_client_key
|
||||
destination = "$HOME/etcd-client.key"
|
||||
}
|
||||
|
||||
provisioner "file" {
|
||||
content = module.bootkube.etcd_server_cert
|
||||
content = module.bootstrap.etcd_server_cert
|
||||
destination = "$HOME/etcd-server.crt"
|
||||
}
|
||||
|
||||
provisioner "file" {
|
||||
content = module.bootkube.etcd_server_key
|
||||
content = module.bootstrap.etcd_server_key
|
||||
destination = "$HOME/etcd-server.key"
|
||||
}
|
||||
|
||||
provisioner "file" {
|
||||
content = module.bootkube.etcd_peer_cert
|
||||
content = module.bootstrap.etcd_peer_cert
|
||||
destination = "$HOME/etcd-peer.crt"
|
||||
}
|
||||
|
||||
provisioner "file" {
|
||||
content = module.bootkube.etcd_peer_key
|
||||
content = module.bootstrap.etcd_peer_key
|
||||
destination = "$HOME/etcd-peer.key"
|
||||
}
|
||||
|
||||
provisioner "file" {
|
||||
source = var.asset_dir
|
||||
destination = "$HOME/assets"
|
||||
}
|
||||
|
||||
provisioner "remote-exec" {
|
||||
inline = [
|
||||
"sudo mkdir -p /etc/ssl/etcd/etcd",
|
||||
@ -56,18 +65,22 @@ resource "null_resource" "copy-controller-secrets" {
|
||||
"sudo mv etcd-peer.key /etc/ssl/etcd/etcd/peer.key",
|
||||
"sudo chown -R etcd:etcd /etc/ssl/etcd",
|
||||
"sudo chmod -R 500 /etc/ssl/etcd",
|
||||
"sudo mv $HOME/assets /opt/bootstrap/assets",
|
||||
"sudo mkdir -p /etc/kubernetes/manifests",
|
||||
"sudo mkdir -p /etc/kubernetes/bootstrap-secrets",
|
||||
"sudo cp -r /opt/bootstrap/assets/tls/* /etc/kubernetes/bootstrap-secrets/",
|
||||
"sudo cp /opt/bootstrap/assets/auth/kubeconfig /etc/kubernetes/bootstrap-secrets/",
|
||||
"sudo cp -r /opt/bootstrap/assets/static-manifests/* /etc/kubernetes/manifests/"
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
# Secure copy bootkube assets to ONE controller and start bootkube to perform
|
||||
# one-time self-hosted cluster bootstrapping.
|
||||
resource "null_resource" "bootkube-start" {
|
||||
# Connect to a controller to perform one-time cluster bootstrap.
|
||||
resource "null_resource" "bootstrap" {
|
||||
depends_on = [
|
||||
module.bootkube,
|
||||
null_resource.copy-controller-secrets,
|
||||
module.workers,
|
||||
aws_route53_record.apiserver,
|
||||
null_resource.copy-controller-secrets,
|
||||
]
|
||||
|
||||
connection {
|
||||
@ -77,15 +90,9 @@ resource "null_resource" "bootkube-start" {
|
||||
timeout = "15m"
|
||||
}
|
||||
|
||||
provisioner "file" {
|
||||
source = var.asset_dir
|
||||
destination = "$HOME/assets"
|
||||
}
|
||||
|
||||
provisioner "remote-exec" {
|
||||
inline = [
|
||||
"sudo mv $HOME/assets /opt/bootkube",
|
||||
"sudo systemctl start bootkube",
|
||||
"sudo systemctl start bootstrap",
|
||||
]
|
||||
}
|
||||
}
|
||||
|
@ -18,57 +18,57 @@ variable "dns_zone_id" {
|
||||
# instances
|
||||
|
||||
variable "controller_count" {
|
||||
type = string
|
||||
default = "1"
|
||||
type = number
|
||||
description = "Number of controllers (i.e. masters)"
|
||||
default = 1
|
||||
}
|
||||
|
||||
variable "worker_count" {
|
||||
type = string
|
||||
default = "1"
|
||||
type = number
|
||||
description = "Number of workers"
|
||||
default = 1
|
||||
}
|
||||
|
||||
variable "controller_type" {
|
||||
type = string
|
||||
default = "t3.small"
|
||||
description = "EC2 instance type for controllers"
|
||||
default = "t3.small"
|
||||
}
|
||||
|
||||
variable "worker_type" {
|
||||
type = string
|
||||
default = "t3.small"
|
||||
description = "EC2 instance type for workers"
|
||||
default = "t3.small"
|
||||
}
|
||||
|
||||
variable "os_image" {
|
||||
type = string
|
||||
default = "coreos-stable"
|
||||
description = "AMI channel for Fedora CoreOS (not yet used)"
|
||||
default = "coreos-stable"
|
||||
}
|
||||
|
||||
variable "disk_size" {
|
||||
type = string
|
||||
default = "40"
|
||||
type = number
|
||||
description = "Size of the EBS volume in GB"
|
||||
default = 40
|
||||
}
|
||||
|
||||
variable "disk_type" {
|
||||
type = string
|
||||
default = "gp2"
|
||||
description = "Type of the EBS volume (e.g. standard, gp2, io1)"
|
||||
default = "gp2"
|
||||
}
|
||||
|
||||
variable "disk_iops" {
|
||||
type = string
|
||||
default = "0"
|
||||
type = number
|
||||
description = "IOPS of the EBS volume (e.g. 100)"
|
||||
default = 0
|
||||
}
|
||||
|
||||
variable "worker_price" {
|
||||
type = string
|
||||
default = ""
|
||||
description = "Spot price in USD for autoscaling group spot instances. Leave as default empty string for autoscaling group to use on-demand instances. Note, switching in-place from spot to on-demand is not possible: https://github.com/terraform-providers/terraform-provider-aws/issues/4320"
|
||||
type = number
|
||||
description = "Spot price in USD for worker instances or 0 to use on-demand instances"
|
||||
default = 0
|
||||
}
|
||||
|
||||
variable "worker_target_groups" {
|
||||
@ -97,60 +97,66 @@ variable "ssh_authorized_key" {
|
||||
}
|
||||
|
||||
variable "asset_dir" {
|
||||
description = "Path to a directory where generated assets should be placed (contains secrets)"
|
||||
type = string
|
||||
description = "Absolute path to a directory where generated assets should be placed (contains secrets)"
|
||||
}
|
||||
|
||||
variable "networking" {
|
||||
description = "Choice of networking provider (calico or flannel)"
|
||||
type = string
|
||||
description = "Choice of networking provider (calico or flannel)"
|
||||
default = "calico"
|
||||
}
|
||||
|
||||
variable "network_mtu" {
|
||||
type = number
|
||||
description = "CNI interface MTU (applies to calico only). Use 8981 if using instances types with Jumbo frames."
|
||||
type = string
|
||||
default = "1480"
|
||||
default = 1480
|
||||
}
|
||||
|
||||
variable "host_cidr" {
|
||||
description = "CIDR IPv4 range to assign to EC2 nodes"
|
||||
type = string
|
||||
description = "CIDR IPv4 range to assign to EC2 nodes"
|
||||
default = "10.0.0.0/16"
|
||||
}
|
||||
|
||||
variable "pod_cidr" {
|
||||
description = "CIDR IPv4 range to assign Kubernetes pods"
|
||||
type = string
|
||||
description = "CIDR IPv4 range to assign Kubernetes pods"
|
||||
default = "10.2.0.0/16"
|
||||
}
|
||||
|
||||
variable "service_cidr" {
|
||||
type = string
|
||||
description = <<EOD
|
||||
CIDR IPv4 range to assign Kubernetes services.
|
||||
The 1st IP will be reserved for kube_apiserver, the 10th IP will be reserved for coredns.
|
||||
EOD
|
||||
|
||||
|
||||
type = string
|
||||
default = "10.3.0.0/16"
|
||||
}
|
||||
|
||||
variable "cluster_domain_suffix" {
|
||||
description = "Queries for domains with the suffix will be answered by coredns. Default is cluster.local (e.g. foo.default.svc.cluster.local) "
|
||||
type = string
|
||||
default = "cluster.local"
|
||||
}
|
||||
|
||||
variable "enable_reporting" {
|
||||
type = string
|
||||
type = bool
|
||||
description = "Enable usage or analytics reporting to upstreams (Calico)"
|
||||
default = "false"
|
||||
default = false
|
||||
}
|
||||
|
||||
variable "enable_aggregation" {
|
||||
type = bool
|
||||
description = "Enable the Kubernetes Aggregation Layer (defaults to false)"
|
||||
type = string
|
||||
default = "false"
|
||||
default = false
|
||||
}
|
||||
|
||||
variable "worker_node_labels" {
|
||||
type = list(string)
|
||||
description = "List of initial worker node labels"
|
||||
default = []
|
||||
}
|
||||
|
||||
# unofficial, undocumented, unsupported
|
||||
|
||||
variable "cluster_domain_suffix" {
|
||||
type = string
|
||||
description = "Queries for domains with the suffix will be answered by CoreDNS. Default is cluster.local (e.g. foo.default.svc.cluster.local)"
|
||||
default = "cluster.local"
|
||||
}
|
||||
|
||||
|
@ -14,10 +14,11 @@ module "workers" {
|
||||
target_groups = var.worker_target_groups
|
||||
|
||||
# configuration
|
||||
kubeconfig = module.bootkube.kubeconfig-kubelet
|
||||
kubeconfig = module.bootstrap.kubeconfig-kubelet
|
||||
ssh_authorized_key = var.ssh_authorized_key
|
||||
service_cidr = var.service_cidr
|
||||
cluster_domain_suffix = var.cluster_domain_suffix
|
||||
snippets = var.worker_snippets
|
||||
node_labels = var.worker_node_labels
|
||||
}
|
||||
|
||||
|
@ -13,9 +13,11 @@ data "aws_ami" "fedora-coreos" {
|
||||
values = ["hvm"]
|
||||
}
|
||||
|
||||
// pin on known ok versions as preview matures
|
||||
filter {
|
||||
name = "name"
|
||||
values = ["fedora-coreos-30.20190801.0-hvm"]
|
||||
values = ["fedora-coreos-30.*.*-hvm"]
|
||||
}
|
||||
|
||||
# try to filter out dev images (AWS filters can't)
|
||||
name_regex = "^fedora-coreos-30.[0-9]*.[0-9]*-hvm*"
|
||||
}
|
||||
|
@ -26,9 +26,9 @@ systemd:
|
||||
[Service]
|
||||
ExecStartPre=/bin/mkdir -p /etc/kubernetes/cni/net.d
|
||||
ExecStartPre=/bin/mkdir -p /etc/kubernetes/manifests
|
||||
ExecStartPre=/bin/mkdir -p /opt/cni/bin
|
||||
ExecStartPre=/bin/mkdir -p /var/lib/calico
|
||||
ExecStartPre=/bin/mkdir -p /var/lib/kubelet/volumeplugins
|
||||
ExecStartPre=/bin/mkdir -p /opt/cni/bin
|
||||
ExecStartPre=/usr/bin/bash -c "grep 'certificate-authority-data' /etc/kubernetes/kubeconfig | awk '{print $2}' | base64 -d > /etc/kubernetes/ca.crt"
|
||||
ExecStartPre=-/usr/bin/podman rm kubelet
|
||||
ExecStart=/usr/bin/podman run --name kubelet \
|
||||
@ -50,7 +50,7 @@ systemd:
|
||||
--volume /var/run:/var/run \
|
||||
--volume /var/run/lock:/var/run/lock:z \
|
||||
--volume /opt/cni/bin:/opt/cni/bin:z \
|
||||
k8s.gcr.io/hyperkube:v1.15.3 /hyperkube kubelet \
|
||||
k8s.gcr.io/hyperkube:v1.16.2 /hyperkube kubelet \
|
||||
--anonymous-auth=false \
|
||||
--authentication-token-webhook \
|
||||
--authorization-mode=Webhook \
|
||||
@ -65,7 +65,10 @@ systemd:
|
||||
--kubeconfig=/etc/kubernetes/kubeconfig \
|
||||
--lock-file=/var/run/lock/kubelet.lock \
|
||||
--network-plugin=cni \
|
||||
--node-labels=node-role.kubernetes.io/node \
|
||||
--node-labels=node.kubernetes.io/node \
|
||||
%{ for label in split(",", node_labels) }
|
||||
--node-labels=${label} \
|
||||
%{ endfor ~}
|
||||
--pod-manifest-path=/etc/kubernetes/manifests \
|
||||
--read-only-port=0 \
|
||||
--volume-plugin-dir=/var/lib/kubelet/volumeplugins
|
||||
@ -78,7 +81,6 @@ systemd:
|
||||
storage:
|
||||
directories:
|
||||
- path: /etc/kubernetes
|
||||
- path: /opt/bootkube
|
||||
files:
|
||||
- path: /etc/kubernetes/kubeconfig
|
||||
mode: 0644
|
||||
|
@ -23,45 +23,45 @@ variable "security_groups" {
|
||||
# instances
|
||||
|
||||
variable "worker_count" {
|
||||
type = string
|
||||
default = "1"
|
||||
type = number
|
||||
description = "Number of instances"
|
||||
default = 1
|
||||
}
|
||||
|
||||
variable "instance_type" {
|
||||
type = string
|
||||
default = "t3.small"
|
||||
description = "EC2 instance type"
|
||||
default = "t3.small"
|
||||
}
|
||||
|
||||
variable "os_image" {
|
||||
type = string
|
||||
default = "coreos-stable"
|
||||
description = "AMI channel for Fedora CoreOS (not yet used)"
|
||||
default = "coreos-stable"
|
||||
}
|
||||
|
||||
variable "disk_size" {
|
||||
type = string
|
||||
default = "40"
|
||||
type = number
|
||||
description = "Size of the EBS volume in GB"
|
||||
default = 40
|
||||
}
|
||||
|
||||
variable "disk_type" {
|
||||
type = string
|
||||
default = "gp2"
|
||||
description = "Type of the EBS volume (e.g. standard, gp2, io1)"
|
||||
default = "gp2"
|
||||
}
|
||||
|
||||
variable "disk_iops" {
|
||||
type = string
|
||||
default = "0"
|
||||
type = number
|
||||
description = "IOPS of the EBS volume (required for io1)"
|
||||
default = 0
|
||||
}
|
||||
|
||||
variable "spot_price" {
|
||||
type = string
|
||||
default = ""
|
||||
description = "Spot price in USD for autoscaling group spot instances. Leave as default empty string for autoscaling group to use on-demand instances. Note, switching in-place from spot to on-demand is not possible: https://github.com/terraform-providers/terraform-provider-aws/issues/4320"
|
||||
type = number
|
||||
description = "Spot price in USD for worker instances or 0 to use on-demand instances"
|
||||
default = 0
|
||||
}
|
||||
|
||||
variable "target_groups" {
|
||||
@ -89,19 +89,22 @@ variable "ssh_authorized_key" {
|
||||
}
|
||||
|
||||
variable "service_cidr" {
|
||||
type = string
|
||||
description = <<EOD
|
||||
CIDR IPv4 range to assign Kubernetes services.
|
||||
The 1st IP will be reserved for kube_apiserver, the 10th IP will be reserved for coredns.
|
||||
EOD
|
||||
|
||||
|
||||
type = string
|
||||
default = "10.3.0.0/16"
|
||||
}
|
||||
|
||||
variable "cluster_domain_suffix" {
|
||||
description = "Queries for domains with the suffix will be answered by coredns. Default is cluster.local (e.g. foo.default.svc.cluster.local) "
|
||||
type = string
|
||||
description = "Queries for domains with the suffix will be answered by coredns. Default is cluster.local (e.g. foo.default.svc.cluster.local) "
|
||||
default = "cluster.local"
|
||||
}
|
||||
|
||||
variable "node_labels" {
|
||||
type = list(string)
|
||||
description = "List of initial node labels"
|
||||
default = []
|
||||
}
|
||||
|
@ -46,7 +46,7 @@ resource "aws_autoscaling_group" "workers" {
|
||||
resource "aws_launch_configuration" "worker" {
|
||||
image_id = data.aws_ami.fedora-coreos.image_id
|
||||
instance_type = var.instance_type
|
||||
spot_price = var.spot_price
|
||||
spot_price = var.spot_price > 0 ? var.spot_price : null
|
||||
enable_monitoring = false
|
||||
|
||||
user_data = data.ct_config.worker-ignition.rendered
|
||||
@ -85,6 +85,7 @@ data "template_file" "worker-config" {
|
||||
ssh_authorized_key = var.ssh_authorized_key
|
||||
cluster_dns_service_ip = cidrhost(var.service_cidr, 10)
|
||||
cluster_domain_suffix = var.cluster_domain_suffix
|
||||
node_labels = join(",", var.node_labels)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -11,7 +11,7 @@ Typhoon distributes upstream Kubernetes, architectural conventions, and cluster
|
||||
|
||||
## Features <a href="https://www.cncf.io/certification/software-conformance/"><img align="right" src="https://storage.googleapis.com/poseidon/certified-kubernetes.png"></a>
|
||||
|
||||
* Kubernetes v1.15.3 (upstream, via [kubernetes-incubator/bootkube](https://github.com/kubernetes-incubator/bootkube))
|
||||
* Kubernetes v1.16.2 (upstream)
|
||||
* Single or multi-master, [Calico](https://www.projectcalico.org/) or [flannel](https://github.com/coreos/flannel) networking
|
||||
* On-cluster etcd with TLS, [RBAC](https://kubernetes.io/docs/admin/authorization/rbac/)-enabled, [network policy](https://kubernetes.io/docs/concepts/services-networking/network-policies/)
|
||||
* Advanced features like [worker pools](https://typhoon.psdn.io/advanced/worker-pools/), [low-priority](https://typhoon.psdn.io/cl/azure/#low-priority) workers, and [snippets](https://typhoon.psdn.io/advanced/customization/#container-linux) customization
|
||||
|
@ -1,6 +1,6 @@
|
||||
# Self-hosted Kubernetes assets (kubeconfig, manifests)
|
||||
module "bootkube" {
|
||||
source = "git::https://github.com/poseidon/terraform-render-bootkube.git?ref=248675e7a95660e5c8a65f8b7ce86be89ef55a0e"
|
||||
# Kubernetes assets (kubeconfig, manifests)
|
||||
module "bootstrap" {
|
||||
source = "git::https://github.com/poseidon/terraform-render-bootstrap.git?ref=0fcc067476fa1463d057fd43760df222b7262b27"
|
||||
|
||||
cluster_name = var.cluster_name
|
||||
api_servers = [format("%s.%s", var.cluster_name, var.dns_zone)]
|
@ -7,7 +7,7 @@ systemd:
|
||||
- name: 40-etcd-cluster.conf
|
||||
contents: |
|
||||
[Service]
|
||||
Environment="ETCD_IMAGE_TAG=v3.3.15"
|
||||
Environment="ETCD_IMAGE_TAG=v3.4.2"
|
||||
Environment="ETCD_NAME=${etcd_name}"
|
||||
Environment="ETCD_ADVERTISE_CLIENT_URLS=https://${etcd_domain}:2379"
|
||||
Environment="ETCD_INITIAL_ADVERTISE_PEER_URLS=https://${etcd_domain}:2380"
|
||||
@ -63,11 +63,9 @@ systemd:
|
||||
--volume var-log,kind=host,source=/var/log \
|
||||
--mount volume=var-log,target=/var/log \
|
||||
--insecure-options=image"
|
||||
ExecStartPre=/bin/mkdir -p /opt/cni/bin
|
||||
ExecStartPre=/bin/mkdir -p /etc/kubernetes/manifests
|
||||
ExecStartPre=/bin/mkdir -p /etc/kubernetes/cni/net.d
|
||||
ExecStartPre=/bin/mkdir -p /etc/kubernetes/checkpoint-secrets
|
||||
ExecStartPre=/bin/mkdir -p /etc/kubernetes/inactive-manifests
|
||||
ExecStartPre=/bin/mkdir -p /opt/cni/bin
|
||||
ExecStartPre=/bin/mkdir -p /var/lib/cni
|
||||
ExecStartPre=/bin/mkdir -p /var/lib/calico
|
||||
ExecStartPre=/bin/mkdir -p /var/lib/kubelet/volumeplugins
|
||||
@ -85,8 +83,8 @@ systemd:
|
||||
--kubeconfig=/etc/kubernetes/kubeconfig \
|
||||
--lock-file=/var/run/lock/kubelet.lock \
|
||||
--network-plugin=cni \
|
||||
--node-labels=node-role.kubernetes.io/master \
|
||||
--node-labels=node-role.kubernetes.io/controller="true" \
|
||||
--node-labels=node.kubernetes.io/master \
|
||||
--node-labels=node.kubernetes.io/controller="true" \
|
||||
--pod-manifest-path=/etc/kubernetes/manifests \
|
||||
--read-only-port=0 \
|
||||
--register-with-taints=node-role.kubernetes.io/master=:NoSchedule \
|
||||
@ -96,17 +94,28 @@ systemd:
|
||||
RestartSec=10
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
- name: bootkube.service
|
||||
- name: bootstrap.service
|
||||
contents: |
|
||||
[Unit]
|
||||
Description=Bootstrap a Kubernetes cluster
|
||||
ConditionPathExists=!/opt/bootkube/init_bootkube.done
|
||||
Description=Kubernetes control plane
|
||||
ConditionPathExists=!/opt/bootstrap/bootstrap.done
|
||||
[Service]
|
||||
Type=oneshot
|
||||
RemainAfterExit=true
|
||||
WorkingDirectory=/opt/bootkube
|
||||
ExecStart=/opt/bootkube/bootkube-start
|
||||
ExecStartPost=/bin/touch /opt/bootkube/init_bootkube.done
|
||||
WorkingDirectory=/opt/bootstrap
|
||||
ExecStartPre=-/usr/bin/bash -c 'set -x && [ -n "$(ls /opt/bootstrap/assets/manifests-*/* 2>/dev/null)" ] && mv /opt/bootstrap/assets/manifests-*/* /opt/bootstrap/assets/manifests && rm -rf /opt/bootstrap/assets/manifests-*'
|
||||
ExecStart=/usr/bin/rkt run \
|
||||
--trust-keys-from-https \
|
||||
--volume assets,kind=host,source=/opt/bootstrap/assets \
|
||||
--mount volume=assets,target=/assets \
|
||||
--volume script,kind=host,source=/opt/bootstrap/apply \
|
||||
--mount volume=script,target=/apply \
|
||||
--insecure-options=image \
|
||||
docker://k8s.gcr.io/hyperkube:v1.16.2 \
|
||||
--net=host \
|
||||
--dns=host \
|
||||
--exec=/apply
|
||||
ExecStartPost=/bin/touch /opt/bootstrap/bootstrap.done
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
storage:
|
||||
@ -123,37 +132,27 @@ storage:
|
||||
contents:
|
||||
inline: |
|
||||
KUBELET_IMAGE_URL=docker://k8s.gcr.io/hyperkube
|
||||
KUBELET_IMAGE_TAG=v1.15.3
|
||||
KUBELET_IMAGE_TAG=v1.16.2
|
||||
- path: /opt/bootstrap/apply
|
||||
filesystem: root
|
||||
mode: 0544
|
||||
contents:
|
||||
inline: |
|
||||
#!/bin/bash -e
|
||||
export KUBECONFIG=/assets/auth/kubeconfig
|
||||
until kubectl version; do
|
||||
echo "Waiting for static pod control plane"
|
||||
sleep 5
|
||||
done
|
||||
until kubectl apply -f /assets/manifests -R; do
|
||||
echo "Retry applying manifests"
|
||||
sleep 5
|
||||
done
|
||||
- path: /etc/sysctl.d/max-user-watches.conf
|
||||
filesystem: root
|
||||
contents:
|
||||
inline: |
|
||||
fs.inotify.max_user_watches=16184
|
||||
- path: /opt/bootkube/bootkube-start
|
||||
filesystem: root
|
||||
mode: 0544
|
||||
user:
|
||||
id: 500
|
||||
group:
|
||||
id: 500
|
||||
contents:
|
||||
inline: |
|
||||
#!/bin/bash
|
||||
# Wrapper for bootkube start
|
||||
set -e
|
||||
# Move experimental manifests
|
||||
[ -n "$(ls /opt/bootkube/assets/manifests-*/* 2>/dev/null)" ] && mv /opt/bootkube/assets/manifests-*/* /opt/bootkube/assets/manifests && rm -rf /opt/bootkube/assets/manifests-*
|
||||
exec /usr/bin/rkt run \
|
||||
--trust-keys-from-https \
|
||||
--volume assets,kind=host,source=/opt/bootkube/assets \
|
||||
--mount volume=assets,target=/assets \
|
||||
--volume bootstrap,kind=host,source=/etc/kubernetes \
|
||||
--mount volume=bootstrap,target=/etc/kubernetes \
|
||||
$${RKT_OPTS} \
|
||||
quay.io/coreos/bootkube:v0.14.0 \
|
||||
--net=host \
|
||||
--dns=host \
|
||||
--exec=/bootkube -- start --asset-dir=/assets "$@"
|
||||
passwd:
|
||||
users:
|
||||
- name: core
|
||||
|
@ -155,7 +155,7 @@ data "template_file" "controller-configs" {
|
||||
etcd_domain = "${var.cluster_name}-etcd${count.index}.${var.dns_zone}"
|
||||
# etcd0=https://cluster-etcd0.example.com,etcd1=https://cluster-etcd1.example.com,...
|
||||
etcd_initial_cluster = join(",", data.template_file.etcds.*.rendered)
|
||||
kubeconfig = indent(10, module.bootkube.kubeconfig-kubelet)
|
||||
kubeconfig = indent(10, module.bootstrap.kubeconfig-kubelet)
|
||||
ssh_authorized_key = var.ssh_authorized_key
|
||||
cluster_dns_service_ip = cidrhost(var.service_cidr, 10)
|
||||
cluster_domain_suffix = var.cluster_domain_suffix
|
||||
|
@ -1,5 +1,5 @@
|
||||
output "kubeconfig-admin" {
|
||||
value = module.bootkube.kubeconfig-admin
|
||||
value = module.bootstrap.kubeconfig-admin
|
||||
}
|
||||
|
||||
# Outputs for Kubernetes Ingress
|
||||
@ -28,7 +28,7 @@ output "security_group_id" {
|
||||
}
|
||||
|
||||
output "kubeconfig" {
|
||||
value = module.bootkube.kubeconfig-kubelet
|
||||
value = module.bootstrap.kubeconfig-kubelet
|
||||
}
|
||||
|
||||
# Outputs for custom firewalling
|
||||
|
@ -53,6 +53,22 @@ resource "azurerm_network_security_rule" "controller-etcd-metrics" {
|
||||
destination_address_prefix = azurerm_subnet.controller.address_prefix
|
||||
}
|
||||
|
||||
# Allow Prometheus to scrape kube-scheduler and kube-controller-manager metrics
|
||||
resource "azurerm_network_security_rule" "controller-kube-metrics" {
|
||||
resource_group_name = azurerm_resource_group.cluster.name
|
||||
|
||||
name = "allow-kube-metrics"
|
||||
network_security_group_name = azurerm_network_security_group.controller.name
|
||||
priority = "2011"
|
||||
access = "Allow"
|
||||
direction = "Inbound"
|
||||
protocol = "Tcp"
|
||||
source_port_range = "*"
|
||||
destination_port_range = "10251-10252"
|
||||
source_address_prefix = azurerm_subnet.worker.address_prefix
|
||||
destination_address_prefix = azurerm_subnet.controller.address_prefix
|
||||
}
|
||||
|
||||
resource "azurerm_network_security_rule" "controller-apiserver" {
|
||||
resource_group_name = azurerm_resource_group.cluster.name
|
||||
|
||||
|
@ -1,50 +1,58 @@
|
||||
# Secure copy etcd TLS assets to controllers.
|
||||
# Secure copy assets to controllers.
|
||||
resource "null_resource" "copy-controller-secrets" {
|
||||
count = var.controller_count
|
||||
|
||||
depends_on = [azurerm_virtual_machine.controllers]
|
||||
depends_on = [
|
||||
module.bootstrap,
|
||||
azurerm_virtual_machine.controllers
|
||||
]
|
||||
|
||||
connection {
|
||||
type = "ssh"
|
||||
host = element(azurerm_public_ip.controllers.*.ip_address, count.index)
|
||||
host = azurerm_public_ip.controllers.*.ip_address[count.index]
|
||||
user = "core"
|
||||
timeout = "15m"
|
||||
}
|
||||
|
||||
provisioner "file" {
|
||||
content = module.bootkube.etcd_ca_cert
|
||||
content = module.bootstrap.etcd_ca_cert
|
||||
destination = "$HOME/etcd-client-ca.crt"
|
||||
}
|
||||
|
||||
provisioner "file" {
|
||||
content = module.bootkube.etcd_client_cert
|
||||
content = module.bootstrap.etcd_client_cert
|
||||
destination = "$HOME/etcd-client.crt"
|
||||
}
|
||||
|
||||
provisioner "file" {
|
||||
content = module.bootkube.etcd_client_key
|
||||
content = module.bootstrap.etcd_client_key
|
||||
destination = "$HOME/etcd-client.key"
|
||||
}
|
||||
|
||||
provisioner "file" {
|
||||
content = module.bootkube.etcd_server_cert
|
||||
content = module.bootstrap.etcd_server_cert
|
||||
destination = "$HOME/etcd-server.crt"
|
||||
}
|
||||
|
||||
provisioner "file" {
|
||||
content = module.bootkube.etcd_server_key
|
||||
content = module.bootstrap.etcd_server_key
|
||||
destination = "$HOME/etcd-server.key"
|
||||
}
|
||||
|
||||
provisioner "file" {
|
||||
content = module.bootkube.etcd_peer_cert
|
||||
content = module.bootstrap.etcd_peer_cert
|
||||
destination = "$HOME/etcd-peer.crt"
|
||||
}
|
||||
|
||||
provisioner "file" {
|
||||
content = module.bootkube.etcd_peer_key
|
||||
content = module.bootstrap.etcd_peer_key
|
||||
destination = "$HOME/etcd-peer.key"
|
||||
}
|
||||
|
||||
provisioner "file" {
|
||||
source = var.asset_dir
|
||||
destination = "$HOME/assets"
|
||||
}
|
||||
|
||||
provisioner "remote-exec" {
|
||||
inline = [
|
||||
@ -58,18 +66,22 @@ resource "null_resource" "copy-controller-secrets" {
|
||||
"sudo mv etcd-peer.key /etc/ssl/etcd/etcd/peer.key",
|
||||
"sudo chown -R etcd:etcd /etc/ssl/etcd",
|
||||
"sudo chmod -R 500 /etc/ssl/etcd",
|
||||
"sudo mv $HOME/assets /opt/bootstrap/assets",
|
||||
"sudo mkdir -p /etc/kubernetes/manifests",
|
||||
"sudo mkdir -p /etc/kubernetes/bootstrap-secrets",
|
||||
"sudo cp -r /opt/bootstrap/assets/tls/* /etc/kubernetes/bootstrap-secrets/",
|
||||
"sudo cp /opt/bootstrap/assets/auth/kubeconfig /etc/kubernetes/bootstrap-secrets/",
|
||||
"sudo cp -r /opt/bootstrap/assets/static-manifests/* /etc/kubernetes/manifests/",
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
# Secure copy bootkube assets to ONE controller and start bootkube to perform
|
||||
# one-time self-hosted cluster bootstrapping.
|
||||
resource "null_resource" "bootkube-start" {
|
||||
# Connect to a controller to perform one-time cluster bootstrap.
|
||||
resource "null_resource" "bootstrap" {
|
||||
depends_on = [
|
||||
module.bootkube,
|
||||
null_resource.copy-controller-secrets,
|
||||
module.workers,
|
||||
azurerm_dns_a_record.apiserver,
|
||||
null_resource.copy-controller-secrets,
|
||||
]
|
||||
|
||||
connection {
|
||||
@ -79,15 +91,9 @@ resource "null_resource" "bootkube-start" {
|
||||
timeout = "15m"
|
||||
}
|
||||
|
||||
provisioner "file" {
|
||||
source = var.asset_dir
|
||||
destination = "$HOME/assets"
|
||||
}
|
||||
|
||||
provisioner "remote-exec" {
|
||||
inline = [
|
||||
"sudo mv $HOME/assets /opt/bootkube",
|
||||
"sudo systemctl start bootkube",
|
||||
"sudo systemctl start bootstrap",
|
||||
]
|
||||
}
|
||||
}
|
||||
|
@ -23,27 +23,27 @@ variable "dns_zone_group" {
|
||||
# instances
|
||||
|
||||
variable "controller_count" {
|
||||
type = string
|
||||
default = "1"
|
||||
type = number
|
||||
description = "Number of controllers (i.e. masters)"
|
||||
default = 1
|
||||
}
|
||||
|
||||
variable "worker_count" {
|
||||
type = string
|
||||
default = "1"
|
||||
type = number
|
||||
description = "Number of workers"
|
||||
default = 1
|
||||
}
|
||||
|
||||
variable "controller_type" {
|
||||
type = string
|
||||
default = "Standard_DS1_v2"
|
||||
description = "Machine type for controllers (see `az vm list-skus --location centralus`)"
|
||||
default = "Standard_B2s"
|
||||
}
|
||||
|
||||
variable "worker_type" {
|
||||
type = string
|
||||
default = "Standard_F1"
|
||||
description = "Machine type for workers (see `az vm list-skus --location centralus`)"
|
||||
default = "Standard_DS1_v2"
|
||||
}
|
||||
|
||||
variable "os_image" {
|
||||
@ -53,15 +53,15 @@ variable "os_image" {
|
||||
}
|
||||
|
||||
variable "disk_size" {
|
||||
type = string
|
||||
default = "40"
|
||||
type = number
|
||||
description = "Size of the disk in GB"
|
||||
default = 40
|
||||
}
|
||||
|
||||
variable "worker_priority" {
|
||||
type = string
|
||||
default = "Regular"
|
||||
description = "Set worker priority to Low to use reduced cost surplus capacity, with the tradeoff that instances can be deallocated at any time."
|
||||
default = "Regular"
|
||||
}
|
||||
|
||||
variable "controller_clc_snippets" {
|
||||
@ -84,54 +84,60 @@ variable "ssh_authorized_key" {
|
||||
}
|
||||
|
||||
variable "asset_dir" {
|
||||
description = "Path to a directory where generated assets should be placed (contains secrets)"
|
||||
type = string
|
||||
description = "Absolute path to a directory where generated assets should be placed (contains secrets)"
|
||||
}
|
||||
|
||||
variable "networking" {
|
||||
description = "Choice of networking provider (flannel or calico)"
|
||||
type = string
|
||||
default = "flannel"
|
||||
description = "Choice of networking provider (flannel or calico)"
|
||||
default = "calico"
|
||||
}
|
||||
|
||||
variable "host_cidr" {
|
||||
description = "CIDR IPv4 range to assign to instances"
|
||||
type = string
|
||||
description = "CIDR IPv4 range to assign to instances"
|
||||
default = "10.0.0.0/16"
|
||||
}
|
||||
|
||||
variable "pod_cidr" {
|
||||
description = "CIDR IPv4 range to assign Kubernetes pods"
|
||||
type = string
|
||||
description = "CIDR IPv4 range to assign Kubernetes pods"
|
||||
default = "10.2.0.0/16"
|
||||
}
|
||||
|
||||
variable "service_cidr" {
|
||||
type = string
|
||||
description = <<EOD
|
||||
CIDR IPv4 range to assign Kubernetes services.
|
||||
The 1st IP will be reserved for kube_apiserver, the 10th IP will be reserved for coredns.
|
||||
EOD
|
||||
|
||||
|
||||
type = string
|
||||
default = "10.3.0.0/16"
|
||||
}
|
||||
|
||||
variable "cluster_domain_suffix" {
|
||||
description = "Queries for domains with the suffix will be answered by coredns. Default is cluster.local (e.g. foo.default.svc.cluster.local) "
|
||||
type = string
|
||||
default = "cluster.local"
|
||||
}
|
||||
|
||||
variable "enable_reporting" {
|
||||
type = string
|
||||
type = bool
|
||||
description = "Enable usage or analytics reporting to upstreams (Calico)"
|
||||
default = "false"
|
||||
default = false
|
||||
}
|
||||
|
||||
variable "enable_aggregation" {
|
||||
type = bool
|
||||
description = "Enable the Kubernetes Aggregation Layer (defaults to false)"
|
||||
type = string
|
||||
default = "false"
|
||||
default = false
|
||||
}
|
||||
|
||||
variable "worker_node_labels" {
|
||||
type = list(string)
|
||||
description = "List of initial worker node labels"
|
||||
default = []
|
||||
}
|
||||
|
||||
# unofficial, undocumented, unsupported
|
||||
|
||||
variable "cluster_domain_suffix" {
|
||||
type = string
|
||||
description = "Queries for domains with the suffix will be answered by coredns. Default is cluster.local (e.g. foo.default.svc.cluster.local) "
|
||||
default = "cluster.local"
|
||||
}
|
||||
|
||||
|
@ -15,10 +15,11 @@ module "workers" {
|
||||
priority = var.worker_priority
|
||||
|
||||
# configuration
|
||||
kubeconfig = module.bootkube.kubeconfig-kubelet
|
||||
kubeconfig = module.bootstrap.kubeconfig-kubelet
|
||||
ssh_authorized_key = var.ssh_authorized_key
|
||||
service_cidr = var.service_cidr
|
||||
cluster_domain_suffix = var.cluster_domain_suffix
|
||||
clc_snippets = var.worker_clc_snippets
|
||||
node_labels = var.worker_node_labels
|
||||
}
|
||||
|
||||
|
@ -38,9 +38,9 @@ systemd:
|
||||
--volume var-log,kind=host,source=/var/log \
|
||||
--mount volume=var-log,target=/var/log \
|
||||
--insecure-options=image"
|
||||
ExecStartPre=/bin/mkdir -p /opt/cni/bin
|
||||
ExecStartPre=/bin/mkdir -p /etc/kubernetes/manifests
|
||||
ExecStartPre=/bin/mkdir -p /etc/kubernetes/cni/net.d
|
||||
ExecStartPre=/bin/mkdir -p /opt/cni/bin
|
||||
ExecStartPre=/bin/mkdir -p /var/lib/cni
|
||||
ExecStartPre=/bin/mkdir -p /var/lib/calico
|
||||
ExecStartPre=/bin/mkdir -p /var/lib/kubelet/volumeplugins
|
||||
@ -58,7 +58,10 @@ systemd:
|
||||
--kubeconfig=/etc/kubernetes/kubeconfig \
|
||||
--lock-file=/var/run/lock/kubelet.lock \
|
||||
--network-plugin=cni \
|
||||
--node-labels=node-role.kubernetes.io/node \
|
||||
--node-labels=node.kubernetes.io/node \
|
||||
%{ for label in split(",", node_labels) }
|
||||
--node-labels=${label} \
|
||||
%{ endfor ~}
|
||||
--pod-manifest-path=/etc/kubernetes/manifests \
|
||||
--read-only-port=0 \
|
||||
--volume-plugin-dir=/var/lib/kubelet/volumeplugins
|
||||
@ -93,7 +96,7 @@ storage:
|
||||
contents:
|
||||
inline: |
|
||||
KUBELET_IMAGE_URL=docker://k8s.gcr.io/hyperkube
|
||||
KUBELET_IMAGE_TAG=v1.15.3
|
||||
KUBELET_IMAGE_TAG=v1.16.2
|
||||
- path: /etc/sysctl.d/max-user-watches.conf
|
||||
filesystem: root
|
||||
contents:
|
||||
@ -111,7 +114,7 @@ storage:
|
||||
--volume config,kind=host,source=/etc/kubernetes \
|
||||
--mount volume=config,target=/etc/kubernetes \
|
||||
--insecure-options=image \
|
||||
docker://k8s.gcr.io/hyperkube:v1.15.3 \
|
||||
docker://k8s.gcr.io/hyperkube:v1.16.2 \
|
||||
--net=host \
|
||||
--dns=host \
|
||||
--exec=/kubectl -- --kubeconfig=/etc/kubernetes/kubeconfig delete node $(hostname | tr '[:upper:]' '[:lower:]')
|
||||
|
@ -33,27 +33,27 @@ variable "backend_address_pool_id" {
|
||||
# instances
|
||||
|
||||
variable "worker_count" {
|
||||
type = string
|
||||
default = "1"
|
||||
type = number
|
||||
description = "Number of instances"
|
||||
default = 1
|
||||
}
|
||||
|
||||
variable "vm_type" {
|
||||
type = string
|
||||
default = "Standard_F1"
|
||||
description = "Machine type for instances (see `az vm list-skus --location centralus`)"
|
||||
default = "Standard_DS1_v2"
|
||||
}
|
||||
|
||||
variable "os_image" {
|
||||
type = string
|
||||
default = "coreos-stable"
|
||||
description = "Channel for a Container Linux derivative (coreos-stable, coreos-beta, coreos-alpha)"
|
||||
default = "coreos-stable"
|
||||
}
|
||||
|
||||
variable "priority" {
|
||||
type = string
|
||||
default = "Regular"
|
||||
description = "Set priority to Low to use reduced cost surplus capacity, with the tradeoff that instances can be evicted at any time."
|
||||
default = "Regular"
|
||||
}
|
||||
|
||||
variable "clc_snippets" {
|
||||
@ -75,16 +75,22 @@ variable "ssh_authorized_key" {
|
||||
}
|
||||
|
||||
variable "service_cidr" {
|
||||
type = string
|
||||
description = <<EOD
|
||||
CIDR IPv4 range to assign Kubernetes services.
|
||||
The 1st IP will be reserved for kube_apiserver, the 10th IP will be reserved for coredns.
|
||||
EOD
|
||||
|
||||
|
||||
type = string
|
||||
default = "10.3.0.0/16"
|
||||
}
|
||||
|
||||
variable "node_labels" {
|
||||
type = list(string)
|
||||
description = "List of initial node labels"
|
||||
default = []
|
||||
}
|
||||
|
||||
# unofficial, undocumented, unsupported
|
||||
|
||||
variable "cluster_domain_suffix" {
|
||||
description = "Queries for domains with the suffix will be answered by coredns. Default is cluster.local (e.g. foo.default.svc.cluster.local) "
|
||||
type = string
|
||||
|
@ -111,6 +111,7 @@ data "template_file" "worker-config" {
|
||||
ssh_authorized_key = var.ssh_authorized_key
|
||||
cluster_dns_service_ip = cidrhost(var.service_cidr, 10)
|
||||
cluster_domain_suffix = var.cluster_domain_suffix
|
||||
node_labels = join(",", var.node_labels)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -11,7 +11,7 @@ Typhoon distributes upstream Kubernetes, architectural conventions, and cluster
|
||||
|
||||
## Features <a href="https://www.cncf.io/certification/software-conformance/"><img align="right" src="https://storage.googleapis.com/poseidon/certified-kubernetes.png"></a>
|
||||
|
||||
* Kubernetes v1.15.3 (upstream, via [kubernetes-incubator/bootkube](https://github.com/kubernetes-incubator/bootkube))
|
||||
* Kubernetes v1.16.2 (upstream)
|
||||
* Single or multi-master, [Calico](https://www.projectcalico.org/) or [flannel](https://github.com/coreos/flannel) networking
|
||||
* On-cluster etcd with TLS, [RBAC](https://kubernetes.io/docs/admin/authorization/rbac/)-enabled, [network policy](https://kubernetes.io/docs/concepts/services-networking/network-policies/)
|
||||
* Advanced features like [snippets](https://typhoon.psdn.io/advanced/customization/#container-linux) customization
|
||||
|
@ -1,10 +1,10 @@
|
||||
# Self-hosted Kubernetes assets (kubeconfig, manifests)
|
||||
module "bootkube" {
|
||||
source = "git::https://github.com/poseidon/terraform-render-bootkube.git?ref=248675e7a95660e5c8a65f8b7ce86be89ef55a0e"
|
||||
# Kubernetes assets (kubeconfig, manifests)
|
||||
module "bootstrap" {
|
||||
source = "git::https://github.com/poseidon/terraform-render-bootstrap.git?ref=0fcc067476fa1463d057fd43760df222b7262b27"
|
||||
|
||||
cluster_name = var.cluster_name
|
||||
api_servers = [var.k8s_domain_name]
|
||||
etcd_servers = var.controller_domains
|
||||
etcd_servers = var.controllers.*.domain
|
||||
asset_dir = var.asset_dir
|
||||
networking = var.networking
|
||||
network_mtu = var.network_mtu
|
@ -7,7 +7,7 @@ systemd:
|
||||
- name: 40-etcd-cluster.conf
|
||||
contents: |
|
||||
[Service]
|
||||
Environment="ETCD_IMAGE_TAG=v3.3.15"
|
||||
Environment="ETCD_IMAGE_TAG=v3.4.2"
|
||||
Environment="ETCD_NAME=${etcd_name}"
|
||||
Environment="ETCD_ADVERTISE_CLIENT_URLS=https://${domain_name}:2379"
|
||||
Environment="ETCD_INITIAL_ADVERTISE_PEER_URLS=https://${domain_name}:2380"
|
||||
@ -76,11 +76,9 @@ systemd:
|
||||
--mount volume=iscsiadm,target=/sbin/iscsiadm \
|
||||
--insecure-options=image"
|
||||
Environment=KUBELET_CGROUP_DRIVER=${cgroup_driver}
|
||||
ExecStartPre=/bin/mkdir -p /opt/cni/bin
|
||||
ExecStartPre=/bin/mkdir -p /etc/kubernetes/manifests
|
||||
ExecStartPre=/bin/mkdir -p /etc/kubernetes/cni/net.d
|
||||
ExecStartPre=/bin/mkdir -p /etc/kubernetes/checkpoint-secrets
|
||||
ExecStartPre=/bin/mkdir -p /etc/kubernetes/inactive-manifests
|
||||
ExecStartPre=/bin/mkdir -p /etc/kubernetes/manifests
|
||||
ExecStartPre=/bin/mkdir -p /opt/cni/bin
|
||||
ExecStartPre=/bin/mkdir -p /var/lib/cni
|
||||
ExecStartPre=/bin/mkdir -p /var/lib/calico
|
||||
ExecStartPre=/bin/mkdir -p /var/lib/kubelet/volumeplugins
|
||||
@ -100,8 +98,8 @@ systemd:
|
||||
--kubeconfig=/etc/kubernetes/kubeconfig \
|
||||
--lock-file=/var/run/lock/kubelet.lock \
|
||||
--network-plugin=cni \
|
||||
--node-labels=node-role.kubernetes.io/master \
|
||||
--node-labels=node-role.kubernetes.io/controller="true" \
|
||||
--node-labels=node.kubernetes.io/master \
|
||||
--node-labels=node.kubernetes.io/controller="true" \
|
||||
--pod-manifest-path=/etc/kubernetes/manifests \
|
||||
--read-only-port=0 \
|
||||
--register-with-taints=node-role.kubernetes.io/master=:NoSchedule \
|
||||
@ -111,17 +109,30 @@ systemd:
|
||||
RestartSec=10
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
- name: bootkube.service
|
||||
- name: bootstrap.service
|
||||
contents: |
|
||||
[Unit]
|
||||
Description=Bootstrap a Kubernetes control plane with a temp api-server
|
||||
ConditionPathExists=!/opt/bootkube/init_bootkube.done
|
||||
Description=Kubernetes control plane
|
||||
ConditionPathExists=!/opt/bootstrap/bootstrap.done
|
||||
[Service]
|
||||
Type=oneshot
|
||||
RemainAfterExit=true
|
||||
WorkingDirectory=/opt/bootkube
|
||||
ExecStart=/opt/bootkube/bootkube-start
|
||||
ExecStartPost=/bin/touch /opt/bootkube/init_bootkube.done
|
||||
WorkingDirectory=/opt/bootstrap
|
||||
ExecStartPre=-/usr/bin/bash -c 'set -x && [ -n "$(ls /opt/bootstrap/assets/manifests-*/* 2>/dev/null)" ] && mv /opt/bootstrap/assets/manifests-*/* /opt/bootstrap/assets/manifests && rm -rf /opt/bootstrap/assets/manifests-*'
|
||||
ExecStart=/usr/bin/rkt run \
|
||||
--trust-keys-from-https \
|
||||
--volume assets,kind=host,source=/opt/bootstrap/assets \
|
||||
--mount volume=assets,target=/assets \
|
||||
--volume script,kind=host,source=/opt/bootstrap/apply \
|
||||
--mount volume=script,target=/apply \
|
||||
--insecure-options=image \
|
||||
docker://k8s.gcr.io/hyperkube:v1.16.2 \
|
||||
--net=host \
|
||||
--dns=host \
|
||||
--exec=/apply
|
||||
ExecStartPost=/bin/touch /opt/bootstrap/bootstrap.done
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
storage:
|
||||
files:
|
||||
- path: /etc/kubernetes/kubelet.env
|
||||
@ -130,43 +141,33 @@ storage:
|
||||
contents:
|
||||
inline: |
|
||||
KUBELET_IMAGE_URL=docker://k8s.gcr.io/hyperkube
|
||||
KUBELET_IMAGE_TAG=v1.15.3
|
||||
KUBELET_IMAGE_TAG=v1.16.2
|
||||
- path: /etc/hostname
|
||||
filesystem: root
|
||||
mode: 0644
|
||||
contents:
|
||||
inline:
|
||||
${domain_name}
|
||||
- path: /opt/bootstrap/apply
|
||||
filesystem: root
|
||||
mode: 0544
|
||||
contents:
|
||||
inline: |
|
||||
#!/bin/bash -e
|
||||
export KUBECONFIG=/assets/auth/kubeconfig
|
||||
until kubectl version; do
|
||||
echo "Waiting for static pod control plane"
|
||||
sleep 5
|
||||
done
|
||||
until kubectl apply -f /assets/manifests -R; do
|
||||
echo "Retry applying manifests"
|
||||
sleep 5
|
||||
done
|
||||
- path: /etc/sysctl.d/max-user-watches.conf
|
||||
filesystem: root
|
||||
contents:
|
||||
inline: |
|
||||
fs.inotify.max_user_watches=16184
|
||||
- path: /opt/bootkube/bootkube-start
|
||||
filesystem: root
|
||||
mode: 0544
|
||||
user:
|
||||
id: 500
|
||||
group:
|
||||
id: 500
|
||||
contents:
|
||||
inline: |
|
||||
#!/bin/bash
|
||||
# Wrapper for bootkube start
|
||||
set -e
|
||||
# Move experimental manifests
|
||||
[ -n "$(ls /opt/bootkube/assets/manifests-*/* 2>/dev/null)" ] && mv /opt/bootkube/assets/manifests-*/* /opt/bootkube/assets/manifests && rm -rf /opt/bootkube/assets/manifests-*
|
||||
exec /usr/bin/rkt run \
|
||||
--trust-keys-from-https \
|
||||
--volume assets,kind=host,source=/opt/bootkube/assets \
|
||||
--mount volume=assets,target=/assets \
|
||||
--volume bootstrap,kind=host,source=/etc/kubernetes \
|
||||
--mount volume=bootstrap,target=/etc/kubernetes \
|
||||
$${RKT_OPTS} \
|
||||
quay.io/coreos/bootkube:v0.14.0 \
|
||||
--net=host \
|
||||
--dns=host \
|
||||
--exec=/bootkube -- start --asset-dir=/assets "$@"
|
||||
passwd:
|
||||
users:
|
||||
- name: core
|
||||
|
@ -35,7 +35,6 @@ storage:
|
||||
-d ${install_disk} \
|
||||
-C ${os_channel} \
|
||||
-V ${os_version} \
|
||||
-o "${container_linux_oem}" \
|
||||
${baseurl_flag} \
|
||||
-i ignition.json
|
||||
udevadm settle
|
||||
|
@ -51,9 +51,9 @@ systemd:
|
||||
--mount volume=iscsiadm,target=/sbin/iscsiadm \
|
||||
--insecure-options=image"
|
||||
Environment=KUBELET_CGROUP_DRIVER=${cgroup_driver}
|
||||
ExecStartPre=/bin/mkdir -p /opt/cni/bin
|
||||
ExecStartPre=/bin/mkdir -p /etc/kubernetes/manifests
|
||||
ExecStartPre=/bin/mkdir -p /etc/kubernetes/cni/net.d
|
||||
ExecStartPre=/bin/mkdir -p /etc/kubernetes/manifests
|
||||
ExecStartPre=/bin/mkdir -p /opt/cni/bin
|
||||
ExecStartPre=/bin/mkdir -p /var/lib/cni
|
||||
ExecStartPre=/bin/mkdir -p /var/lib/calico
|
||||
ExecStartPre=/bin/mkdir -p /var/lib/kubelet/volumeplugins
|
||||
@ -73,7 +73,7 @@ systemd:
|
||||
--kubeconfig=/etc/kubernetes/kubeconfig \
|
||||
--lock-file=/var/run/lock/kubelet.lock \
|
||||
--network-plugin=cni \
|
||||
--node-labels=node-role.kubernetes.io/node \
|
||||
--node-labels=node.kubernetes.io/node \
|
||||
--pod-manifest-path=/etc/kubernetes/manifests \
|
||||
--read-only-port=0 \
|
||||
--volume-plugin-dir=/var/lib/kubelet/volumeplugins
|
||||
@ -91,7 +91,7 @@ storage:
|
||||
contents:
|
||||
inline: |
|
||||
KUBELET_IMAGE_URL=docker://k8s.gcr.io/hyperkube
|
||||
KUBELET_IMAGE_TAG=v1.15.3
|
||||
KUBELET_IMAGE_TAG=v1.16.2
|
||||
- path: /etc/hostname
|
||||
filesystem: root
|
||||
mode: 0644
|
||||
|
@ -1,34 +1,34 @@
|
||||
resource "matchbox_group" "install" {
|
||||
count = length(var.controller_names) + length(var.worker_names)
|
||||
count = length(var.controllers) + length(var.workers)
|
||||
|
||||
name = format("install-%s", element(concat(var.controller_names, var.worker_names), count.index))
|
||||
name = format("install-%s", concat(var.controllers.*.name, var.workers.*.name)[count.index])
|
||||
|
||||
# pick one of 4 Matchbox profiles (Container Linux or Flatcar, cached or non-cached)
|
||||
profile = local.flavor == "flatcar" ? var.cached_install == "true" ? element(matchbox_profile.cached-flatcar-linux-install.*.name, count.index) : element(matchbox_profile.flatcar-install.*.name, count.index) : var.cached_install == "true" ? element(matchbox_profile.cached-container-linux-install.*.name, count.index) : element(matchbox_profile.container-linux-install.*.name, count.index)
|
||||
profile = local.flavor == "flatcar" ? var.cached_install ? matchbox_profile.cached-flatcar-linux-install.*.name[count.index] : matchbox_profile.flatcar-install.*.name[count.index] : var.cached_install ? matchbox_profile.cached-container-linux-install.*.name[count.index] : matchbox_profile.container-linux-install.*.name[count.index]
|
||||
|
||||
selector = {
|
||||
mac = element(concat(var.controller_macs, var.worker_macs), count.index)
|
||||
mac = concat(var.controllers.*.mac, var.workers.*.mac)[count.index]
|
||||
}
|
||||
}
|
||||
|
||||
resource "matchbox_group" "controller" {
|
||||
count = length(var.controller_names)
|
||||
name = format("%s-%s", var.cluster_name, element(var.controller_names, count.index))
|
||||
profile = element(matchbox_profile.controllers.*.name, count.index)
|
||||
count = length(var.controllers)
|
||||
name = format("%s-%s", var.cluster_name, var.controllers[count.index].name)
|
||||
profile = matchbox_profile.controllers.*.name[count.index]
|
||||
|
||||
selector = {
|
||||
mac = element(var.controller_macs, count.index)
|
||||
mac = var.controllers[count.index].mac
|
||||
os = "installed"
|
||||
}
|
||||
}
|
||||
|
||||
resource "matchbox_group" "worker" {
|
||||
count = length(var.worker_names)
|
||||
name = format("%s-%s", var.cluster_name, element(var.worker_names, count.index))
|
||||
profile = element(matchbox_profile.workers.*.name, count.index)
|
||||
count = length(var.workers)
|
||||
name = format("%s-%s", var.cluster_name, var.workers[count.index].name)
|
||||
profile = matchbox_profile.workers.*.name[count.index]
|
||||
|
||||
selector = {
|
||||
mac = element(var.worker_macs, count.index)
|
||||
mac = var.workers[count.index].mac
|
||||
os = "installed"
|
||||
}
|
||||
}
|
||||
|
@ -1,4 +1,4 @@
|
||||
output "kubeconfig-admin" {
|
||||
value = module.bootkube.kubeconfig-admin
|
||||
value = module.bootstrap.kubeconfig-admin
|
||||
}
|
||||
|
||||
|
@ -1,15 +1,14 @@
|
||||
locals {
|
||||
# coreos-stable -> coreos flavor, stable channel
|
||||
# flatcar-stable -> flatcar flavor, stable channel
|
||||
flavor = element(split("-", var.os_channel), 0)
|
||||
|
||||
channel = element(split("-", var.os_channel), 1)
|
||||
flavor = split("-", var.os_channel)[0]
|
||||
channel = split("-", var.os_channel)[1]
|
||||
}
|
||||
|
||||
// Container Linux Install profile (from release.core-os.net)
|
||||
resource "matchbox_profile" "container-linux-install" {
|
||||
count = length(var.controller_names) + length(var.worker_names)
|
||||
name = format("%s-container-linux-install-%s", var.cluster_name, element(concat(var.controller_names, var.worker_names), count.index))
|
||||
count = length(var.controllers) + length(var.workers)
|
||||
name = format("%s-container-linux-install-%s", var.cluster_name, concat(var.controllers.*.name, var.workers.*.name)[count.index])
|
||||
|
||||
kernel = "${var.download_protocol}://${local.channel}.release.core-os.net/amd64-usr/${var.os_version}/coreos_production_pxe.vmlinuz"
|
||||
|
||||
@ -26,11 +25,11 @@ resource "matchbox_profile" "container-linux-install" {
|
||||
var.kernel_args,
|
||||
])
|
||||
|
||||
container_linux_config = element(data.template_file.container-linux-install-configs.*.rendered, count.index)
|
||||
container_linux_config = data.template_file.container-linux-install-configs.*.rendered[count.index]
|
||||
}
|
||||
|
||||
data "template_file" "container-linux-install-configs" {
|
||||
count = length(var.controller_names) + length(var.worker_names)
|
||||
count = length(var.controllers) + length(var.workers)
|
||||
|
||||
template = file("${path.module}/cl/install.yaml.tmpl")
|
||||
|
||||
@ -40,7 +39,6 @@ data "template_file" "container-linux-install-configs" {
|
||||
os_version = var.os_version
|
||||
ignition_endpoint = format("%s/ignition", var.matchbox_http_endpoint)
|
||||
install_disk = var.install_disk
|
||||
container_linux_oem = var.container_linux_oem
|
||||
ssh_authorized_key = var.ssh_authorized_key
|
||||
# only cached-container-linux profile adds -b baseurl
|
||||
baseurl_flag = ""
|
||||
@ -50,8 +48,8 @@ data "template_file" "container-linux-install-configs" {
|
||||
// Container Linux Install profile (from matchbox /assets cache)
|
||||
// Note: Admin must have downloaded os_version into matchbox assets/coreos.
|
||||
resource "matchbox_profile" "cached-container-linux-install" {
|
||||
count = length(var.controller_names) + length(var.worker_names)
|
||||
name = format("%s-cached-container-linux-install-%s", var.cluster_name, element(concat(var.controller_names, var.worker_names), count.index))
|
||||
count = length(var.controllers) + length(var.workers)
|
||||
name = format("%s-cached-container-linux-install-%s", var.cluster_name, concat(var.controllers.*.name, var.workers.*.name)[count.index])
|
||||
|
||||
kernel = "/assets/coreos/${var.os_version}/coreos_production_pxe.vmlinuz"
|
||||
|
||||
@ -68,11 +66,11 @@ resource "matchbox_profile" "cached-container-linux-install" {
|
||||
var.kernel_args,
|
||||
])
|
||||
|
||||
container_linux_config = element(data.template_file.cached-container-linux-install-configs.*.rendered, count.index)
|
||||
container_linux_config = data.template_file.cached-container-linux-install-configs.*.rendered[count.index]
|
||||
}
|
||||
|
||||
data "template_file" "cached-container-linux-install-configs" {
|
||||
count = length(var.controller_names) + length(var.worker_names)
|
||||
count = length(var.controllers) + length(var.workers)
|
||||
|
||||
template = file("${path.module}/cl/install.yaml.tmpl")
|
||||
|
||||
@ -82,7 +80,6 @@ data "template_file" "cached-container-linux-install-configs" {
|
||||
os_version = var.os_version
|
||||
ignition_endpoint = format("%s/ignition", var.matchbox_http_endpoint)
|
||||
install_disk = var.install_disk
|
||||
container_linux_oem = var.container_linux_oem
|
||||
ssh_authorized_key = var.ssh_authorized_key
|
||||
# profile uses -b baseurl to install from matchbox cache
|
||||
baseurl_flag = "-b ${var.matchbox_http_endpoint}/assets/${local.flavor}"
|
||||
@ -91,8 +88,8 @@ data "template_file" "cached-container-linux-install-configs" {
|
||||
|
||||
// Flatcar Linux install profile (from release.flatcar-linux.net)
|
||||
resource "matchbox_profile" "flatcar-install" {
|
||||
count = length(var.controller_names) + length(var.worker_names)
|
||||
name = format("%s-flatcar-install-%s", var.cluster_name, element(concat(var.controller_names, var.worker_names), count.index))
|
||||
count = length(var.controllers) + length(var.workers)
|
||||
name = format("%s-flatcar-install-%s", var.cluster_name, concat(var.controllers.*.name, var.workers.*.name)[count.index])
|
||||
|
||||
kernel = "${var.download_protocol}://${local.channel}.release.flatcar-linux.net/amd64-usr/${var.os_version}/flatcar_production_pxe.vmlinuz"
|
||||
|
||||
@ -109,14 +106,14 @@ resource "matchbox_profile" "flatcar-install" {
|
||||
var.kernel_args,
|
||||
])
|
||||
|
||||
container_linux_config = element(data.template_file.container-linux-install-configs.*.rendered, count.index)
|
||||
container_linux_config = data.template_file.container-linux-install-configs.*.rendered[count.index]
|
||||
}
|
||||
|
||||
// Flatcar Linux Install profile (from matchbox /assets cache)
|
||||
// Note: Admin must have downloaded os_version into matchbox assets/flatcar.
|
||||
resource "matchbox_profile" "cached-flatcar-linux-install" {
|
||||
count = length(var.controller_names) + length(var.worker_names)
|
||||
name = format("%s-cached-flatcar-linux-install-%s", var.cluster_name, element(concat(var.controller_names, var.worker_names), count.index))
|
||||
count = length(var.controllers) + length(var.workers)
|
||||
name = format("%s-cached-flatcar-linux-install-%s", var.cluster_name, concat(var.controllers.*.name, var.workers.*.name)[count.index])
|
||||
|
||||
kernel = "/assets/flatcar/${var.os_version}/flatcar_production_pxe.vmlinuz"
|
||||
|
||||
@ -133,34 +130,34 @@ resource "matchbox_profile" "cached-flatcar-linux-install" {
|
||||
var.kernel_args,
|
||||
])
|
||||
|
||||
container_linux_config = element(data.template_file.cached-container-linux-install-configs.*.rendered, count.index)
|
||||
container_linux_config = data.template_file.cached-container-linux-install-configs.*.rendered[count.index]
|
||||
}
|
||||
|
||||
// Kubernetes Controller profiles
|
||||
resource "matchbox_profile" "controllers" {
|
||||
count = length(var.controller_names)
|
||||
name = format("%s-controller-%s", var.cluster_name, element(var.controller_names, count.index))
|
||||
raw_ignition = element(data.ct_config.controller-ignitions.*.rendered, count.index)
|
||||
count = length(var.controllers)
|
||||
name = format("%s-controller-%s", var.cluster_name, var.controllers.*.name[count.index])
|
||||
raw_ignition = data.ct_config.controller-ignitions.*.rendered[count.index]
|
||||
}
|
||||
|
||||
data "ct_config" "controller-ignitions" {
|
||||
count = length(var.controller_names)
|
||||
content = element(data.template_file.controller-configs.*.rendered, count.index)
|
||||
count = length(var.controllers)
|
||||
content = data.template_file.controller-configs.*.rendered[count.index]
|
||||
pretty_print = false
|
||||
snippets = local.clc_map[element(var.controller_names, count.index)]
|
||||
snippets = local.clc_map[var.controllers.*.name[count.index]]
|
||||
}
|
||||
|
||||
data "template_file" "controller-configs" {
|
||||
count = length(var.controller_names)
|
||||
count = length(var.controllers)
|
||||
|
||||
template = file("${path.module}/cl/controller.yaml.tmpl")
|
||||
|
||||
vars = {
|
||||
domain_name = element(var.controller_domains, count.index)
|
||||
etcd_name = element(var.controller_names, count.index)
|
||||
etcd_initial_cluster = join(",", formatlist("%s=https://%s:2380", var.controller_names, var.controller_domains))
|
||||
domain_name = var.controllers.*.domain[count.index]
|
||||
etcd_name = var.controllers.*.name[count.index]
|
||||
etcd_initial_cluster = join(",", formatlist("%s=https://%s:2380", var.controllers.*.name, var.controllers.*.domain))
|
||||
cgroup_driver = var.os_channel == "flatcar-edge" ? "systemd" : "cgroupfs"
|
||||
cluster_dns_service_ip = module.bootkube.cluster_dns_service_ip
|
||||
cluster_dns_service_ip = module.bootstrap.cluster_dns_service_ip
|
||||
cluster_domain_suffix = var.cluster_domain_suffix
|
||||
ssh_authorized_key = var.ssh_authorized_key
|
||||
}
|
||||
@ -168,27 +165,27 @@ data "template_file" "controller-configs" {
|
||||
|
||||
// Kubernetes Worker profiles
|
||||
resource "matchbox_profile" "workers" {
|
||||
count = length(var.worker_names)
|
||||
name = format("%s-worker-%s", var.cluster_name, element(var.worker_names, count.index))
|
||||
raw_ignition = element(data.ct_config.worker-ignitions.*.rendered, count.index)
|
||||
count = length(var.workers)
|
||||
name = format("%s-worker-%s", var.cluster_name, var.workers.*.name[count.index])
|
||||
raw_ignition = data.ct_config.worker-ignitions.*.rendered[count.index]
|
||||
}
|
||||
|
||||
data "ct_config" "worker-ignitions" {
|
||||
count = length(var.worker_names)
|
||||
content = element(data.template_file.worker-configs.*.rendered, count.index)
|
||||
count = length(var.workers)
|
||||
content = data.template_file.worker-configs.*.rendered[count.index]
|
||||
pretty_print = false
|
||||
snippets = local.clc_map[element(var.worker_names, count.index)]
|
||||
snippets = local.clc_map[var.workers.*.name[count.index]]
|
||||
}
|
||||
|
||||
data "template_file" "worker-configs" {
|
||||
count = length(var.worker_names)
|
||||
count = length(var.workers)
|
||||
|
||||
template = file("${path.module}/cl/worker.yaml.tmpl")
|
||||
|
||||
vars = {
|
||||
domain_name = element(var.worker_domains, count.index)
|
||||
domain_name = var.workers.*.domain[count.index]
|
||||
cgroup_driver = var.os_channel == "flatcar-edge" ? "systemd" : "cgroupfs"
|
||||
cluster_dns_service_ip = module.bootkube.cluster_dns_service_ip
|
||||
cluster_dns_service_ip = module.bootstrap.cluster_dns_service_ip
|
||||
cluster_domain_suffix = var.cluster_domain_suffix
|
||||
ssh_authorized_key = var.ssh_authorized_key
|
||||
}
|
||||
@ -200,7 +197,7 @@ locals {
|
||||
# Default Container Linux config snippets map every node names to list("\n") so
|
||||
# all lookups succeed
|
||||
clc_defaults = zipmap(
|
||||
concat(var.controller_names, var.worker_names),
|
||||
concat(var.controllers.*.name, var.workers.*.name),
|
||||
chunklist(data.template_file.clc-default-snippets.*.rendered, 1),
|
||||
)
|
||||
|
||||
@ -210,7 +207,7 @@ locals {
|
||||
|
||||
// Horrible hack to generate a Terraform list of node count length
|
||||
data "template_file" "clc-default-snippets" {
|
||||
count = length(var.controller_names) + length(var.worker_names)
|
||||
count = length(var.controllers) + length(var.workers)
|
||||
template = "\n"
|
||||
}
|
||||
|
||||
|
@ -1,6 +1,6 @@
|
||||
# Secure copy etcd TLS assets and kubeconfig to controllers. Activates kubelet.service
|
||||
# Secure copy assets to controllers. Activates kubelet.service
|
||||
resource "null_resource" "copy-controller-secrets" {
|
||||
count = length(var.controller_names)
|
||||
count = length(var.controllers)
|
||||
|
||||
# Without depends_on, remote-exec could start and wait for machines before
|
||||
# matchbox groups are written, causing a deadlock.
|
||||
@ -8,54 +8,60 @@ resource "null_resource" "copy-controller-secrets" {
|
||||
matchbox_group.install,
|
||||
matchbox_group.controller,
|
||||
matchbox_group.worker,
|
||||
module.bootstrap,
|
||||
]
|
||||
|
||||
connection {
|
||||
type = "ssh"
|
||||
host = element(var.controller_domains, count.index)
|
||||
host = var.controllers.*.domain[count.index]
|
||||
user = "core"
|
||||
timeout = "60m"
|
||||
}
|
||||
|
||||
provisioner "file" {
|
||||
content = module.bootkube.kubeconfig-kubelet
|
||||
content = module.bootstrap.kubeconfig-kubelet
|
||||
destination = "$HOME/kubeconfig"
|
||||
}
|
||||
|
||||
provisioner "file" {
|
||||
content = module.bootkube.etcd_ca_cert
|
||||
content = module.bootstrap.etcd_ca_cert
|
||||
destination = "$HOME/etcd-client-ca.crt"
|
||||
}
|
||||
|
||||
provisioner "file" {
|
||||
content = module.bootkube.etcd_client_cert
|
||||
content = module.bootstrap.etcd_client_cert
|
||||
destination = "$HOME/etcd-client.crt"
|
||||
}
|
||||
|
||||
provisioner "file" {
|
||||
content = module.bootkube.etcd_client_key
|
||||
content = module.bootstrap.etcd_client_key
|
||||
destination = "$HOME/etcd-client.key"
|
||||
}
|
||||
|
||||
provisioner "file" {
|
||||
content = module.bootkube.etcd_server_cert
|
||||
content = module.bootstrap.etcd_server_cert
|
||||
destination = "$HOME/etcd-server.crt"
|
||||
}
|
||||
|
||||
provisioner "file" {
|
||||
content = module.bootkube.etcd_server_key
|
||||
content = module.bootstrap.etcd_server_key
|
||||
destination = "$HOME/etcd-server.key"
|
||||
}
|
||||
|
||||
provisioner "file" {
|
||||
content = module.bootkube.etcd_peer_cert
|
||||
content = module.bootstrap.etcd_peer_cert
|
||||
destination = "$HOME/etcd-peer.crt"
|
||||
}
|
||||
|
||||
provisioner "file" {
|
||||
content = module.bootkube.etcd_peer_key
|
||||
content = module.bootstrap.etcd_peer_key
|
||||
destination = "$HOME/etcd-peer.key"
|
||||
}
|
||||
|
||||
provisioner "file" {
|
||||
source = var.asset_dir
|
||||
destination = "$HOME/assets"
|
||||
}
|
||||
|
||||
provisioner "remote-exec" {
|
||||
inline = [
|
||||
@ -69,14 +75,20 @@ resource "null_resource" "copy-controller-secrets" {
|
||||
"sudo mv etcd-peer.key /etc/ssl/etcd/etcd/peer.key",
|
||||
"sudo chown -R etcd:etcd /etc/ssl/etcd",
|
||||
"sudo chmod -R 500 /etc/ssl/etcd",
|
||||
"sudo mv $HOME/assets /opt/bootstrap/assets",
|
||||
"sudo mkdir -p /etc/kubernetes/manifests",
|
||||
"sudo mkdir -p /etc/kubernetes/bootstrap-secrets",
|
||||
"sudo mv $HOME/kubeconfig /etc/kubernetes/kubeconfig",
|
||||
"sudo cp -r /opt/bootstrap/assets/tls/* /etc/kubernetes/bootstrap-secrets/",
|
||||
"sudo cp /opt/bootstrap/assets/auth/kubeconfig /etc/kubernetes/bootstrap-secrets/",
|
||||
"sudo cp -r /opt/bootstrap/assets/static-manifests/* /etc/kubernetes/manifests/",
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
# Secure copy kubeconfig to all workers. Activates kubelet.service
|
||||
resource "null_resource" "copy-worker-secrets" {
|
||||
count = length(var.worker_names)
|
||||
count = length(var.workers)
|
||||
|
||||
# Without depends_on, remote-exec could start and wait for machines before
|
||||
# matchbox groups are written, causing a deadlock.
|
||||
@ -88,13 +100,13 @@ resource "null_resource" "copy-worker-secrets" {
|
||||
|
||||
connection {
|
||||
type = "ssh"
|
||||
host = element(var.worker_domains, count.index)
|
||||
host = var.workers.*.domain[count.index]
|
||||
user = "core"
|
||||
timeout = "60m"
|
||||
}
|
||||
|
||||
provisioner "file" {
|
||||
content = module.bootkube.kubeconfig-kubelet
|
||||
content = module.bootstrap.kubeconfig-kubelet
|
||||
destination = "$HOME/kubeconfig"
|
||||
}
|
||||
|
||||
@ -105,9 +117,8 @@ resource "null_resource" "copy-worker-secrets" {
|
||||
}
|
||||
}
|
||||
|
||||
# Secure copy bootkube assets to ONE controller and start bootkube to perform
|
||||
# one-time self-hosted cluster bootstrapping.
|
||||
resource "null_resource" "bootkube-start" {
|
||||
# Connect to a controller to perform one-time cluster bootstrap.
|
||||
resource "null_resource" "bootstrap" {
|
||||
# Without depends_on, this remote-exec may start before the kubeconfig copy.
|
||||
# Terraform only does one task at a time, so it would try to bootstrap
|
||||
# while no Kubelets are running.
|
||||
@ -118,20 +129,14 @@ resource "null_resource" "bootkube-start" {
|
||||
|
||||
connection {
|
||||
type = "ssh"
|
||||
host = element(var.controller_domains, 0)
|
||||
host = var.controllers[0].domain
|
||||
user = "core"
|
||||
timeout = "15m"
|
||||
}
|
||||
|
||||
provisioner "file" {
|
||||
source = var.asset_dir
|
||||
destination = "$HOME/assets"
|
||||
}
|
||||
|
||||
provisioner "remote-exec" {
|
||||
inline = [
|
||||
"sudo mv $HOME/assets /opt/bootkube",
|
||||
"sudo systemctl start bootkube",
|
||||
"sudo systemctl start bootstrap",
|
||||
]
|
||||
}
|
||||
}
|
||||
|
@ -21,36 +21,32 @@ variable "os_version" {
|
||||
}
|
||||
|
||||
# machines
|
||||
# Terraform's crude "type system" does not properly support lists of maps so we do this.
|
||||
|
||||
variable "controller_names" {
|
||||
type = list(string)
|
||||
description = "Ordered list of controller names (e.g. [node1])"
|
||||
variable "controllers" {
|
||||
type = list(object({
|
||||
name = string
|
||||
mac = string
|
||||
domain = string
|
||||
}))
|
||||
description = <<EOD
|
||||
List of controller machine details (unique name, identifying MAC address, FQDN)
|
||||
[{ name = "node1", mac = "52:54:00:a1:9c:ae", domain = "node1.example.com"}]
|
||||
EOD
|
||||
}
|
||||
|
||||
variable "controller_macs" {
|
||||
type = list(string)
|
||||
description = "Ordered list of controller identifying MAC addresses (e.g. [52:54:00:a1:9c:ae])"
|
||||
}
|
||||
|
||||
variable "controller_domains" {
|
||||
type = list(string)
|
||||
description = "Ordered list of controller FQDNs (e.g. [node1.example.com])"
|
||||
}
|
||||
|
||||
variable "worker_names" {
|
||||
type = list(string)
|
||||
description = "Ordered list of worker names (e.g. [node2, node3])"
|
||||
}
|
||||
|
||||
variable "worker_macs" {
|
||||
type = list(string)
|
||||
description = "Ordered list of worker identifying MAC addresses (e.g. [52:54:00:b2:2f:86, 52:54:00:c3:61:77])"
|
||||
}
|
||||
|
||||
variable "worker_domains" {
|
||||
type = list(string)
|
||||
description = "Ordered list of worker FQDNs (e.g. [node2.example.com, node3.example.com])"
|
||||
variable "workers" {
|
||||
type = list(object({
|
||||
name = string
|
||||
mac = string
|
||||
domain = string
|
||||
}))
|
||||
description = <<EOD
|
||||
List of worker machine details (unique name, identifying MAC address, FQDN)
|
||||
[
|
||||
{ name = "node2", mac = "52:54:00:b2:2f:86", domain = "node2.example.com"},
|
||||
{ name = "node3", mac = "52:54:00:c3:61:77", domain = "node3.example.com"}
|
||||
]
|
||||
EOD
|
||||
}
|
||||
|
||||
variable "clc_snippets" {
|
||||
@ -62,8 +58,8 @@ variable "clc_snippets" {
|
||||
# configuration
|
||||
|
||||
variable "k8s_domain_name" {
|
||||
description = "Controller DNS name which resolves to a controller instance. Workers and kubeconfig's will communicate with this endpoint (e.g. cluster.example.com)"
|
||||
type = string
|
||||
description = "Controller DNS name which resolves to a controller instance. Workers and kubeconfig's will communicate with this endpoint (e.g. cluster.example.com)"
|
||||
}
|
||||
|
||||
variable "ssh_authorized_key" {
|
||||
@ -72,63 +68,55 @@ variable "ssh_authorized_key" {
|
||||
}
|
||||
|
||||
variable "asset_dir" {
|
||||
description = "Path to a directory where generated assets should be placed (contains secrets)"
|
||||
type = string
|
||||
description = "Absolute path to a directory where generated assets should be placed (contains secrets)"
|
||||
}
|
||||
|
||||
variable "networking" {
|
||||
description = "Choice of networking provider (flannel or calico)"
|
||||
type = string
|
||||
description = "Choice of networking provider (flannel or calico)"
|
||||
default = "calico"
|
||||
}
|
||||
|
||||
variable "network_mtu" {
|
||||
type = number
|
||||
description = "CNI interface MTU (applies to calico only)"
|
||||
type = string
|
||||
default = "1480"
|
||||
default = 1480
|
||||
}
|
||||
|
||||
variable "network_ip_autodetection_method" {
|
||||
description = "Method to autodetect the host IPv4 address (applies to calico only)"
|
||||
type = string
|
||||
description = "Method to autodetect the host IPv4 address (applies to calico only)"
|
||||
default = "first-found"
|
||||
}
|
||||
|
||||
variable "pod_cidr" {
|
||||
description = "CIDR IPv4 range to assign Kubernetes pods"
|
||||
type = string
|
||||
description = "CIDR IPv4 range to assign Kubernetes pods"
|
||||
default = "10.2.0.0/16"
|
||||
}
|
||||
|
||||
variable "service_cidr" {
|
||||
type = string
|
||||
description = <<EOD
|
||||
CIDR IPv4 range to assign Kubernetes services.
|
||||
The 1st IP will be reserved for kube_apiserver, the 10th IP will be reserved for coredns.
|
||||
EOD
|
||||
|
||||
|
||||
type = string
|
||||
default = "10.3.0.0/16"
|
||||
}
|
||||
|
||||
# optional
|
||||
|
||||
variable "cluster_domain_suffix" {
|
||||
description = "Queries for domains with the suffix will be answered by coredns. Default is cluster.local (e.g. foo.default.svc.cluster.local) "
|
||||
type = string
|
||||
default = "cluster.local"
|
||||
}
|
||||
|
||||
variable "download_protocol" {
|
||||
type = string
|
||||
default = "https"
|
||||
description = "Protocol iPXE should use to download the kernel and initrd. Defaults to https, which requires iPXE compiled with crypto support. Unused if cached_install is true."
|
||||
default = "https"
|
||||
}
|
||||
|
||||
variable "cached_install" {
|
||||
type = string
|
||||
default = "false"
|
||||
type = bool
|
||||
description = "Whether Container Linux should PXE boot and install from matchbox /assets cache. Note that the admin must have downloaded the os_version into matchbox assets."
|
||||
default = false
|
||||
}
|
||||
|
||||
variable "install_disk" {
|
||||
@ -137,27 +125,29 @@ variable "install_disk" {
|
||||
description = "Disk device to which the install profiles should install Container Linux (e.g. /dev/sda)"
|
||||
}
|
||||
|
||||
variable "container_linux_oem" {
|
||||
type = string
|
||||
default = ""
|
||||
description = "DEPRECATED: Specify an OEM image id to use as base for the installation (e.g. ami, vmware_raw, xen) or leave blank for the default image"
|
||||
}
|
||||
|
||||
variable "kernel_args" {
|
||||
description = "Additional kernel arguments to provide at PXE boot."
|
||||
type = list(string)
|
||||
description = "Additional kernel arguments to provide at PXE boot."
|
||||
default = []
|
||||
}
|
||||
|
||||
variable "enable_reporting" {
|
||||
type = string
|
||||
type = bool
|
||||
description = "Enable usage or analytics reporting to upstreams (Calico)"
|
||||
default = "false"
|
||||
default = false
|
||||
}
|
||||
|
||||
variable "enable_aggregation" {
|
||||
type = bool
|
||||
description = "Enable the Kubernetes Aggregation Layer (defaults to false)"
|
||||
type = string
|
||||
default = "false"
|
||||
default = false
|
||||
}
|
||||
|
||||
# unofficial, undocumented, unsupported
|
||||
|
||||
variable "cluster_domain_suffix" {
|
||||
type = string
|
||||
description = "Queries for domains with the suffix will be answered by coredns. Default is cluster.local (e.g. foo.default.svc.cluster.local) "
|
||||
default = "cluster.local"
|
||||
}
|
||||
|
||||
|
@ -11,7 +11,7 @@ Typhoon distributes upstream Kubernetes, architectural conventions, and cluster
|
||||
|
||||
## Features <a href="https://www.cncf.io/certification/software-conformance/"><img align="right" src="https://storage.googleapis.com/poseidon/certified-kubernetes.png"></a>
|
||||
|
||||
* Kubernetes v1.15.3 (upstream, via [kubernetes-incubator/bootkube](https://github.com/kubernetes-incubator/bootkube))
|
||||
* Kubernetes v1.16.2 (upstream)
|
||||
* Single or multi-master, [Calico](https://www.projectcalico.org/) or [flannel](https://github.com/coreos/flannel) networking
|
||||
* On-cluster etcd with TLS, [RBAC](https://kubernetes.io/docs/admin/authorization/rbac/)-enabled, [network policy](https://kubernetes.io/docs/concepts/services-networking/network-policies/)
|
||||
* Advanced features like [snippets](https://typhoon.psdn.io/advanced/customization/#container-linux) customization
|
||||
|
@ -1,10 +1,10 @@
|
||||
# Self-hosted Kubernetes assets (kubeconfig, manifests)
|
||||
module "bootkube" {
|
||||
source = "git::https://github.com/poseidon/terraform-render-bootkube.git?ref=248675e7a95660e5c8a65f8b7ce86be89ef55a0e"
|
||||
# Kubernetes assets (kubeconfig, manifests)
|
||||
module "bootstrap" {
|
||||
source = "git::https://github.com/poseidon/terraform-render-bootstrap.git?ref=0fcc067476fa1463d057fd43760df222b7262b27"
|
||||
|
||||
cluster_name = var.cluster_name
|
||||
api_servers = [var.k8s_domain_name]
|
||||
etcd_servers = var.controller_domains
|
||||
etcd_servers = var.controllers.*.domain
|
||||
asset_dir = var.asset_dir
|
||||
networking = var.networking
|
||||
network_mtu = var.network_mtu
|
@ -28,7 +28,7 @@ systemd:
|
||||
--network host \
|
||||
--volume /var/lib/etcd:/var/lib/etcd:rw,Z \
|
||||
--volume /etc/ssl/etcd:/etc/ssl/certs:ro,Z \
|
||||
quay.io/coreos/etcd:v3.3.15
|
||||
quay.io/coreos/etcd:v3.4.2
|
||||
ExecStop=/usr/bin/podman stop etcd
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
@ -55,9 +55,9 @@ systemd:
|
||||
[Service]
|
||||
ExecStartPre=/bin/mkdir -p /etc/kubernetes/cni/net.d
|
||||
ExecStartPre=/bin/mkdir -p /etc/kubernetes/manifests
|
||||
ExecStartPre=/bin/mkdir -p /opt/cni/bin
|
||||
ExecStartPre=/bin/mkdir -p /var/lib/calico
|
||||
ExecStartPre=/bin/mkdir -p /var/lib/kubelet/volumeplugins
|
||||
ExecStartPre=/bin/mkdir -p /opt/cni/bin
|
||||
ExecStartPre=/usr/bin/bash -c "grep 'certificate-authority-data' /etc/kubernetes/kubeconfig | awk '{print $2}' | base64 -d > /etc/kubernetes/ca.crt"
|
||||
ExecStartPre=-/usr/bin/podman rm kubelet
|
||||
ExecStart=/usr/bin/podman run --name kubelet \
|
||||
@ -81,13 +81,13 @@ systemd:
|
||||
--volume /opt/cni/bin:/opt/cni/bin:z \
|
||||
--volume /etc/iscsi:/etc/iscsi \
|
||||
--volume /sbin/iscsiadm:/sbin/iscsiadm \
|
||||
k8s.gcr.io/hyperkube:v1.15.3 /hyperkube kubelet \
|
||||
k8s.gcr.io/hyperkube:v1.16.2 /hyperkube kubelet \
|
||||
--anonymous-auth=false \
|
||||
--authentication-token-webhook \
|
||||
--authorization-mode=Webhook \
|
||||
--cgroup-driver=systemd \
|
||||
--cgroups-per-qos=false \
|
||||
--enforce-node-allocatable="" \
|
||||
--cgroups-per-qos=true \
|
||||
--enforce-node-allocatable=pods \
|
||||
--client-ca-file=/etc/kubernetes/ca.crt \
|
||||
--cluster_dns=${cluster_dns_service_ip} \
|
||||
--cluster_domain=${cluster_domain_suffix} \
|
||||
@ -97,8 +97,8 @@ systemd:
|
||||
--kubeconfig=/etc/kubernetes/kubeconfig \
|
||||
--lock-file=/var/run/lock/kubelet.lock \
|
||||
--network-plugin=cni \
|
||||
--node-labels=node-role.kubernetes.io/master \
|
||||
--node-labels=node-role.kubernetes.io/controller="true" \
|
||||
--node-labels=node.kubernetes.io/master \
|
||||
--node-labels=node.kubernetes.io/controller="true" \
|
||||
--pod-manifest-path=/etc/kubernetes/manifests \
|
||||
--read-only-port=0 \
|
||||
--register-with-taints=node-role.kubernetes.io/master=:NoSchedule \
|
||||
@ -118,33 +118,48 @@ systemd:
|
||||
PathExists=/etc/kubernetes/kubeconfig
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
- name: bootkube.service
|
||||
- name: bootstrap.service
|
||||
contents: |
|
||||
[Unit]
|
||||
Description=Bootstrap a Kubernetes control plane
|
||||
ConditionPathExists=!/opt/bootkube/init_bootkube.done
|
||||
Description=Kubernetes control plane
|
||||
ConditionPathExists=!/opt/bootstrap/bootstrap.done
|
||||
[Service]
|
||||
Type=oneshot
|
||||
RemainAfterExit=true
|
||||
WorkingDirectory=/opt/bootkube
|
||||
ExecStart=/usr/bin/bash -c 'set -x && \
|
||||
[ -n "$(ls /opt/bootkube/assets/manifests-*/* 2>/dev/null)" ] && mv /opt/bootkube/assets/manifests-*/* /opt/bootkube/assets/manifests && rm -rf /opt/bootkube/assets/manifests-* && exec podman run --name bootkube --privileged \
|
||||
WorkingDirectory=/opt/bootstrap
|
||||
ExecStartPre=-/usr/bin/bash -c 'set -x && [ -n "$(ls /opt/bootstrap/assets/manifests-*/* 2>/dev/null)" ] && mv /opt/bootstrap/assets/manifests-*/* /opt/bootstrap/assets/manifests && rm -rf /opt/bootstrap/assets/manifests-*'
|
||||
ExecStart=/usr/bin/podman run --name bootstrap \
|
||||
--network host \
|
||||
--volume /opt/bootkube/assets:/assets \
|
||||
--volume /etc/kubernetes:/etc/kubernetes \
|
||||
quay.io/coreos/bootkube:v0.14.0 \
|
||||
/bootkube start --asset-dir=/assets'
|
||||
ExecStartPost=/bin/touch /opt/bootkube/init_bootkube.done
|
||||
--volume /opt/bootstrap/assets:/assets:ro,Z \
|
||||
--volume /opt/bootstrap/apply:/apply:ro,Z \
|
||||
k8s.gcr.io/hyperkube:v1.16.2 \
|
||||
/apply
|
||||
ExecStartPost=/bin/touch /opt/bootstrap/bootstrap.done
|
||||
ExecStartPost=-/usr/bin/podman stop bootstrap
|
||||
storage:
|
||||
directories:
|
||||
- path: /etc/kubernetes
|
||||
- path: /opt/bootkube
|
||||
- path: /opt/bootstrap
|
||||
files:
|
||||
- path: /etc/hostname
|
||||
mode: 0644
|
||||
contents:
|
||||
inline:
|
||||
${domain_name}
|
||||
- path: /opt/bootstrap/apply
|
||||
mode: 0544
|
||||
contents:
|
||||
inline: |
|
||||
#!/bin/bash -e
|
||||
export KUBECONFIG=/assets/auth/kubeconfig
|
||||
until kubectl version; do
|
||||
echo "Waiting for static pod control plane"
|
||||
sleep 5
|
||||
done
|
||||
until kubectl apply -f /assets/manifests -R; do
|
||||
echo "Retry applying manifests"
|
||||
sleep 5
|
||||
done
|
||||
- path: /etc/sysctl.d/reverse-path-filter.conf
|
||||
contents:
|
||||
inline: |
|
||||
|
@ -25,9 +25,9 @@ systemd:
|
||||
[Service]
|
||||
ExecStartPre=/bin/mkdir -p /etc/kubernetes/cni/net.d
|
||||
ExecStartPre=/bin/mkdir -p /etc/kubernetes/manifests
|
||||
ExecStartPre=/bin/mkdir -p /opt/cni/bin
|
||||
ExecStartPre=/bin/mkdir -p /var/lib/calico
|
||||
ExecStartPre=/bin/mkdir -p /var/lib/kubelet/volumeplugins
|
||||
ExecStartPre=/bin/mkdir -p /opt/cni/bin
|
||||
ExecStartPre=/usr/bin/bash -c "grep 'certificate-authority-data' /etc/kubernetes/kubeconfig | awk '{print $2}' | base64 -d > /etc/kubernetes/ca.crt"
|
||||
ExecStartPre=-/usr/bin/podman rm kubelet
|
||||
ExecStart=/usr/bin/podman run --name kubelet \
|
||||
@ -51,7 +51,7 @@ systemd:
|
||||
--volume /opt/cni/bin:/opt/cni/bin:z \
|
||||
--volume /etc/iscsi:/etc/iscsi \
|
||||
--volume /sbin/iscsiadm:/sbin/iscsiadm \
|
||||
k8s.gcr.io/hyperkube:v1.15.3 /hyperkube kubelet \
|
||||
k8s.gcr.io/hyperkube:v1.16.2 /hyperkube kubelet \
|
||||
--anonymous-auth=false \
|
||||
--authentication-token-webhook \
|
||||
--authorization-mode=Webhook \
|
||||
@ -67,7 +67,7 @@ systemd:
|
||||
--kubeconfig=/etc/kubernetes/kubeconfig \
|
||||
--lock-file=/var/run/lock/kubelet.lock \
|
||||
--network-plugin=cni \
|
||||
--node-labels=node-role.kubernetes.io/node \
|
||||
--node-labels=node.kubernetes.io/node \
|
||||
--pod-manifest-path=/etc/kubernetes/manifests \
|
||||
--read-only-port=0 \
|
||||
--volume-plugin-dir=/var/lib/kubelet/volumeplugins
|
||||
@ -89,7 +89,6 @@ systemd:
|
||||
storage:
|
||||
directories:
|
||||
- path: /etc/kubernetes
|
||||
- path: /opt/bootkube
|
||||
files:
|
||||
- path: /etc/hostname
|
||||
mode: 0644
|
||||
|
@ -1,22 +1,22 @@
|
||||
# Match each controller or worker to a profile
|
||||
|
||||
resource "matchbox_group" "controller" {
|
||||
count = length(var.controller_names)
|
||||
name = format("%s-%s", var.cluster_name, var.controller_names[count.index])
|
||||
count = length(var.controllers)
|
||||
name = format("%s-%s", var.cluster_name, var.controllers.*.name[count.index])
|
||||
profile = matchbox_profile.controllers.*.name[count.index]
|
||||
|
||||
selector = {
|
||||
mac = var.controller_macs[count.index]
|
||||
mac = var.controllers.*.mac[count.index]
|
||||
}
|
||||
}
|
||||
|
||||
resource "matchbox_group" "worker" {
|
||||
count = length(var.worker_names)
|
||||
name = format("%s-%s", var.cluster_name, var.worker_names[count.index])
|
||||
count = length(var.workers)
|
||||
name = format("%s-%s", var.cluster_name, var.workers.*.name[count.index])
|
||||
profile = matchbox_profile.workers.*.name[count.index]
|
||||
|
||||
selector = {
|
||||
mac = var.worker_macs[count.index]
|
||||
mac = var.workers.*.mac[count.index]
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1,4 +1,4 @@
|
||||
output "kubeconfig-admin" {
|
||||
value = module.bootkube.kubeconfig-admin
|
||||
value = module.bootstrap.kubeconfig-admin
|
||||
}
|
||||
|
||||
|
@ -1,36 +1,36 @@
|
||||
locals {
|
||||
remote_kernel = "https://builds.coreos.fedoraproject.org/prod/streams/${var.os_stream}/builds/${var.os_version}/x86_64/fedora-coreos-${var.os_version}-installer-kernel"
|
||||
remote_initrd = "https://builds.coreos.fedoraproject.org/prod/streams/${var.os_stream}/builds/${var.os_version}/x86_64/fedora-coreos-${var.os_version}-installer-initramfs.img"
|
||||
remote_kernel = "https://builds.coreos.fedoraproject.org/prod/streams/${var.os_stream}/builds/${var.os_version}/x86_64/fedora-coreos-${var.os_version}-installer-kernel-x86_64"
|
||||
remote_initrd = "https://builds.coreos.fedoraproject.org/prod/streams/${var.os_stream}/builds/${var.os_version}/x86_64/fedora-coreos-${var.os_version}-installer-initramfs.x86_64.img"
|
||||
remote_args = [
|
||||
"ip=dhcp",
|
||||
"rd.neednet=1",
|
||||
"coreos.inst=yes",
|
||||
"coreos.inst.image_url=https://builds.coreos.fedoraproject.org/prod/streams/${var.os_stream}/builds/${var.os_version}/x86_64/fedora-coreos-${var.os_version}-metal.raw.xz",
|
||||
"coreos.inst.image_url=https://builds.coreos.fedoraproject.org/prod/streams/${var.os_stream}/builds/${var.os_version}/x86_64/fedora-coreos-${var.os_version}-metal.x86_64.raw.xz",
|
||||
"coreos.inst.ignition_url=${var.matchbox_http_endpoint}/ignition?uuid=$${uuid}&mac=$${mac:hexhyp}",
|
||||
"coreos.inst.install_dev=${var.install_disk}"
|
||||
]
|
||||
|
||||
cached_kernel = "/assets/fedora-coreos/fedora-coreos-${var.os_version}-installer-kernel"
|
||||
cached_initrd = "/assets/fedora-coreos/fedora-coreos-${var.os_version}-installer-initramfs.img"
|
||||
cached_kernel = "/assets/fedora-coreos/fedora-coreos-${var.os_version}-installer-kernel-x86_64"
|
||||
cached_initrd = "/assets/fedora-coreos/fedora-coreos-${var.os_version}-installer-initramfs.x86_64.img"
|
||||
cached_args = [
|
||||
"ip=dhcp",
|
||||
"rd.neednet=1",
|
||||
"coreos.inst=yes",
|
||||
"coreos.inst.image_url=${var.matchbox_http_endpoint}/assets/fedora-coreos/fedora-coreos-${var.os_version}-metal.raw.xz",
|
||||
"coreos.inst.image_url=${var.matchbox_http_endpoint}/assets/fedora-coreos/fedora-coreos-${var.os_version}-metal.x86_64.raw.xz",
|
||||
"coreos.inst.ignition_url=${var.matchbox_http_endpoint}/ignition?uuid=$${uuid}&mac=$${mac:hexhyp}",
|
||||
"coreos.inst.install_dev=${var.install_disk}"
|
||||
]
|
||||
|
||||
kernel = var.cached_install == "true" ? local.cached_kernel : local.remote_kernel
|
||||
initrd = var.cached_install == "true" ? local.cached_initrd : local.remote_initrd
|
||||
args = var.cached_install == "true" ? local.cached_args : local.remote_args
|
||||
kernel = var.cached_install ? local.cached_kernel : local.remote_kernel
|
||||
initrd = var.cached_install ? local.cached_initrd : local.remote_initrd
|
||||
args = var.cached_install ? local.cached_args : local.remote_args
|
||||
}
|
||||
|
||||
|
||||
// Fedora CoreOS controller profile
|
||||
resource "matchbox_profile" "controllers" {
|
||||
count = length(var.controller_names)
|
||||
name = format("%s-controller-%s", var.cluster_name, var.controller_names[count.index])
|
||||
count = length(var.controllers)
|
||||
name = format("%s-controller-%s", var.cluster_name, var.controllers.*.name[count.index])
|
||||
|
||||
kernel = local.kernel
|
||||
initrd = [
|
||||
@ -42,21 +42,21 @@ resource "matchbox_profile" "controllers" {
|
||||
}
|
||||
|
||||
data "ct_config" "controller-ignitions" {
|
||||
count = length(var.controller_names)
|
||||
count = length(var.controllers)
|
||||
|
||||
content = data.template_file.controller-configs.*.rendered[count.index]
|
||||
strict = true
|
||||
}
|
||||
|
||||
data "template_file" "controller-configs" {
|
||||
count = length(var.controller_names)
|
||||
count = length(var.controllers)
|
||||
|
||||
template = file("${path.module}/fcc/controller.yaml")
|
||||
vars = {
|
||||
domain_name = var.controller_domains[count.index]
|
||||
etcd_name = var.controller_names[count.index]
|
||||
etcd_initial_cluster = join(",", formatlist("%s=https://%s:2380", var.controller_names, var.controller_domains))
|
||||
cluster_dns_service_ip = module.bootkube.cluster_dns_service_ip
|
||||
domain_name = var.controllers.*.domain[count.index]
|
||||
etcd_name = var.controllers.*.name[count.index]
|
||||
etcd_initial_cluster = join(",", formatlist("%s=https://%s:2380", var.controllers.*.name, var.controllers.*.domain))
|
||||
cluster_dns_service_ip = module.bootstrap.cluster_dns_service_ip
|
||||
cluster_domain_suffix = var.cluster_domain_suffix
|
||||
ssh_authorized_key = var.ssh_authorized_key
|
||||
}
|
||||
@ -64,8 +64,8 @@ data "template_file" "controller-configs" {
|
||||
|
||||
// Fedora CoreOS worker profile
|
||||
resource "matchbox_profile" "workers" {
|
||||
count = length(var.worker_names)
|
||||
name = format("%s-worker-%s", var.cluster_name, var.worker_names[count.index])
|
||||
count = length(var.workers)
|
||||
name = format("%s-worker-%s", var.cluster_name, var.workers.*.name[count.index])
|
||||
|
||||
kernel = local.kernel
|
||||
initrd = [
|
||||
@ -77,19 +77,19 @@ resource "matchbox_profile" "workers" {
|
||||
}
|
||||
|
||||
data "ct_config" "worker-ignitions" {
|
||||
count = length(var.worker_names)
|
||||
count = length(var.workers)
|
||||
|
||||
content = data.template_file.worker-configs.*.rendered[count.index]
|
||||
strict = true
|
||||
}
|
||||
|
||||
data "template_file" "worker-configs" {
|
||||
count = length(var.worker_names)
|
||||
count = length(var.workers)
|
||||
|
||||
template = file("${path.module}/fcc/worker.yaml")
|
||||
vars = {
|
||||
domain_name = var.worker_domains[count.index]
|
||||
cluster_dns_service_ip = module.bootkube.cluster_dns_service_ip
|
||||
domain_name = var.workers.*.domain[count.index]
|
||||
cluster_dns_service_ip = module.bootstrap.cluster_dns_service_ip
|
||||
cluster_domain_suffix = var.cluster_domain_suffix
|
||||
ssh_authorized_key = var.ssh_authorized_key
|
||||
}
|
||||
|
@ -1,60 +1,66 @@
|
||||
# Secure copy etcd TLS assets and kubeconfig to controllers. Activates kubelet.service
|
||||
# Secure copy assets to controllers. Activates kubelet.service
|
||||
resource "null_resource" "copy-controller-secrets" {
|
||||
count = length(var.controller_names)
|
||||
count = length(var.controllers)
|
||||
|
||||
# Without depends_on, remote-exec could start and wait for machines before
|
||||
# matchbox groups are written, causing a deadlock.
|
||||
depends_on = [
|
||||
matchbox_group.controller,
|
||||
matchbox_group.worker,
|
||||
module.bootstrap,
|
||||
]
|
||||
|
||||
connection {
|
||||
type = "ssh"
|
||||
host = var.controller_domains[count.index]
|
||||
host = var.controllers.*.domain[count.index]
|
||||
user = "core"
|
||||
timeout = "60m"
|
||||
}
|
||||
|
||||
provisioner "file" {
|
||||
content = module.bootkube.kubeconfig-kubelet
|
||||
content = module.bootstrap.kubeconfig-kubelet
|
||||
destination = "$HOME/kubeconfig"
|
||||
}
|
||||
|
||||
provisioner "file" {
|
||||
content = module.bootkube.etcd_ca_cert
|
||||
content = module.bootstrap.etcd_ca_cert
|
||||
destination = "$HOME/etcd-client-ca.crt"
|
||||
}
|
||||
|
||||
provisioner "file" {
|
||||
content = module.bootkube.etcd_client_cert
|
||||
content = module.bootstrap.etcd_client_cert
|
||||
destination = "$HOME/etcd-client.crt"
|
||||
}
|
||||
|
||||
provisioner "file" {
|
||||
content = module.bootkube.etcd_client_key
|
||||
content = module.bootstrap.etcd_client_key
|
||||
destination = "$HOME/etcd-client.key"
|
||||
}
|
||||
|
||||
provisioner "file" {
|
||||
content = module.bootkube.etcd_server_cert
|
||||
content = module.bootstrap.etcd_server_cert
|
||||
destination = "$HOME/etcd-server.crt"
|
||||
}
|
||||
|
||||
provisioner "file" {
|
||||
content = module.bootkube.etcd_server_key
|
||||
content = module.bootstrap.etcd_server_key
|
||||
destination = "$HOME/etcd-server.key"
|
||||
}
|
||||
|
||||
provisioner "file" {
|
||||
content = module.bootkube.etcd_peer_cert
|
||||
content = module.bootstrap.etcd_peer_cert
|
||||
destination = "$HOME/etcd-peer.crt"
|
||||
}
|
||||
|
||||
provisioner "file" {
|
||||
content = module.bootkube.etcd_peer_key
|
||||
content = module.bootstrap.etcd_peer_key
|
||||
destination = "$HOME/etcd-peer.key"
|
||||
}
|
||||
|
||||
provisioner "file" {
|
||||
source = var.asset_dir
|
||||
destination = "$HOME/assets"
|
||||
}
|
||||
|
||||
provisioner "remote-exec" {
|
||||
inline = [
|
||||
@ -66,14 +72,20 @@ resource "null_resource" "copy-controller-secrets" {
|
||||
"sudo cp /etc/ssl/etcd/etcd-client-ca.crt /etc/ssl/etcd/etcd/peer-ca.crt",
|
||||
"sudo mv etcd-peer.crt /etc/ssl/etcd/etcd/peer.crt",
|
||||
"sudo mv etcd-peer.key /etc/ssl/etcd/etcd/peer.key",
|
||||
"sudo mv $HOME/assets /opt/bootstrap/assets",
|
||||
"sudo mkdir -p /etc/kubernetes/manifests",
|
||||
"sudo mkdir -p /etc/kubernetes/bootstrap-secrets",
|
||||
"sudo mv $HOME/kubeconfig /etc/kubernetes/kubeconfig",
|
||||
"sudo cp -r /opt/bootstrap/assets/tls/* /etc/kubernetes/bootstrap-secrets/",
|
||||
"sudo cp /opt/bootstrap/assets/auth/kubeconfig /etc/kubernetes/bootstrap-secrets/",
|
||||
"sudo cp -r /opt/bootstrap/assets/static-manifests/* /etc/kubernetes/manifests/"
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
# Secure copy kubeconfig to all workers. Activates kubelet.service
|
||||
resource "null_resource" "copy-worker-secrets" {
|
||||
count = length(var.worker_names)
|
||||
count = length(var.workers)
|
||||
|
||||
# Without depends_on, remote-exec could start and wait for machines before
|
||||
# matchbox groups are written, causing a deadlock.
|
||||
@ -84,13 +96,13 @@ resource "null_resource" "copy-worker-secrets" {
|
||||
|
||||
connection {
|
||||
type = "ssh"
|
||||
host = var.worker_domains[count.index]
|
||||
host = var.workers.*.domain[count.index]
|
||||
user = "core"
|
||||
timeout = "60m"
|
||||
}
|
||||
|
||||
provisioner "file" {
|
||||
content = module.bootkube.kubeconfig-kubelet
|
||||
content = module.bootstrap.kubeconfig-kubelet
|
||||
destination = "$HOME/kubeconfig"
|
||||
}
|
||||
|
||||
@ -101,9 +113,8 @@ resource "null_resource" "copy-worker-secrets" {
|
||||
}
|
||||
}
|
||||
|
||||
# Secure copy bootkube assets to ONE controller and start bootkube to perform
|
||||
# one-time self-hosted cluster bootstrapping.
|
||||
resource "null_resource" "bootkube-start" {
|
||||
# Connect to a controller to perform one-time cluster bootstrap.
|
||||
resource "null_resource" "bootstrap" {
|
||||
# Without depends_on, this remote-exec may start before the kubeconfig copy.
|
||||
# Terraform only does one task at a time, so it would try to bootstrap
|
||||
# while no Kubelets are running.
|
||||
@ -114,20 +125,14 @@ resource "null_resource" "bootkube-start" {
|
||||
|
||||
connection {
|
||||
type = "ssh"
|
||||
host = var.controller_domains[0]
|
||||
host = var.controllers[0].domain
|
||||
user = "core"
|
||||
timeout = "15m"
|
||||
}
|
||||
|
||||
provisioner "file" {
|
||||
source = var.asset_dir
|
||||
destination = "$HOME/assets"
|
||||
}
|
||||
|
||||
provisioner "remote-exec" {
|
||||
inline = [
|
||||
"sudo mv $HOME/assets /opt/bootkube",
|
||||
"sudo systemctl start bootkube",
|
||||
"sudo systemctl start bootstrap",
|
||||
]
|
||||
}
|
||||
}
|
||||
|
@ -22,36 +22,32 @@ variable "os_version" {
|
||||
}
|
||||
|
||||
# machines
|
||||
# Terraform's crude "type system" does not properly support lists of maps so we do this.
|
||||
|
||||
variable "controller_names" {
|
||||
type = list(string)
|
||||
description = "Ordered list of controller names (e.g. [node1])"
|
||||
variable "controllers" {
|
||||
type = list(object({
|
||||
name = string
|
||||
mac = string
|
||||
domain = string
|
||||
}))
|
||||
description = <<EOD
|
||||
List of controller machine details (unique name, identifying MAC address, FQDN)
|
||||
[{ name = "node1", mac = "52:54:00:a1:9c:ae", domain = "node1.example.com"}]
|
||||
EOD
|
||||
}
|
||||
|
||||
variable "controller_macs" {
|
||||
type = list(string)
|
||||
description = "Ordered list of controller identifying MAC addresses (e.g. [52:54:00:a1:9c:ae])"
|
||||
}
|
||||
|
||||
variable "controller_domains" {
|
||||
type = list(string)
|
||||
description = "Ordered list of controller FQDNs (e.g. [node1.example.com])"
|
||||
}
|
||||
|
||||
variable "worker_names" {
|
||||
type = list(string)
|
||||
description = "Ordered list of worker names (e.g. [node2, node3])"
|
||||
}
|
||||
|
||||
variable "worker_macs" {
|
||||
type = list(string)
|
||||
description = "Ordered list of worker identifying MAC addresses (e.g. [52:54:00:b2:2f:86, 52:54:00:c3:61:77])"
|
||||
}
|
||||
|
||||
variable "worker_domains" {
|
||||
type = list(string)
|
||||
description = "Ordered list of worker FQDNs (e.g. [node2.example.com, node3.example.com])"
|
||||
variable "workers" {
|
||||
type = list(object({
|
||||
name = string
|
||||
mac = string
|
||||
domain = string
|
||||
}))
|
||||
description = <<EOD
|
||||
List of worker machine details (unique name, identifying MAC address, FQDN)
|
||||
[
|
||||
{ name = "node2", mac = "52:54:00:b2:2f:86", domain = "node2.example.com"},
|
||||
{ name = "node3", mac = "52:54:00:c3:61:77", domain = "node3.example.com"}
|
||||
]
|
||||
EOD
|
||||
}
|
||||
|
||||
variable "snippets" {
|
||||
@ -63,8 +59,8 @@ variable "snippets" {
|
||||
# configuration
|
||||
|
||||
variable "k8s_domain_name" {
|
||||
description = "Controller DNS name which resolves to a controller instance. Workers and kubeconfig's will communicate with this endpoint (e.g. cluster.example.com)"
|
||||
type = string
|
||||
description = "Controller DNS name which resolves to a controller instance. Workers and kubeconfig's will communicate with this endpoint (e.g. cluster.example.com)"
|
||||
}
|
||||
|
||||
variable "ssh_authorized_key" {
|
||||
@ -73,80 +69,80 @@ variable "ssh_authorized_key" {
|
||||
}
|
||||
|
||||
variable "asset_dir" {
|
||||
description = "Path to a directory where generated assets should be placed (contains secrets)"
|
||||
type = string
|
||||
description = "Absolute path to a directory where generated assets should be placed (contains secrets)"
|
||||
}
|
||||
|
||||
variable "networking" {
|
||||
description = "Choice of networking provider (flannel or calico)"
|
||||
type = string
|
||||
description = "Choice of networking provider (flannel or calico)"
|
||||
default = "calico"
|
||||
}
|
||||
|
||||
variable "network_mtu" {
|
||||
type = number
|
||||
description = "CNI interface MTU (applies to calico only)"
|
||||
type = string
|
||||
default = "1480"
|
||||
default = 1480
|
||||
}
|
||||
|
||||
variable "network_ip_autodetection_method" {
|
||||
description = "Method to autodetect the host IPv4 address (applies to calico only)"
|
||||
type = string
|
||||
description = "Method to autodetect the host IPv4 address (applies to calico only)"
|
||||
default = "first-found"
|
||||
}
|
||||
|
||||
variable "pod_cidr" {
|
||||
description = "CIDR IPv4 range to assign Kubernetes pods"
|
||||
type = string
|
||||
description = "CIDR IPv4 range to assign Kubernetes pods"
|
||||
default = "10.2.0.0/16"
|
||||
}
|
||||
|
||||
variable "service_cidr" {
|
||||
type = string
|
||||
description = <<EOD
|
||||
CIDR IPv4 range to assign Kubernetes services.
|
||||
The 1st IP will be reserved for kube_apiserver, the 10th IP will be reserved for coredns.
|
||||
EOD
|
||||
|
||||
|
||||
type = string
|
||||
default = "10.3.0.0/16"
|
||||
}
|
||||
|
||||
# optional
|
||||
|
||||
variable "cached_install" {
|
||||
type = bool
|
||||
description = "Whether Fedora CoreOS should PXE boot and install from matchbox /assets cache. Note that the admin must have downloaded the os_version into matchbox assets."
|
||||
default = false
|
||||
}
|
||||
|
||||
variable "install_disk" {
|
||||
type = string
|
||||
description = "Disk device to install Fedora CoreOS (e.g. sda)"
|
||||
default = "sda"
|
||||
}
|
||||
|
||||
variable "kernel_args" {
|
||||
type = list(string)
|
||||
description = "Additional kernel arguments to provide at PXE boot."
|
||||
default = []
|
||||
}
|
||||
|
||||
variable "enable_reporting" {
|
||||
type = bool
|
||||
description = "Enable usage or analytics reporting to upstreams (Calico)"
|
||||
default = false
|
||||
}
|
||||
|
||||
variable "enable_aggregation" {
|
||||
type = bool
|
||||
description = "Enable the Kubernetes Aggregation Layer (defaults to false)"
|
||||
default = false
|
||||
}
|
||||
|
||||
# unofficial, undocumented, unsupported
|
||||
|
||||
variable "cluster_domain_suffix" {
|
||||
description = "Queries for domains with the suffix will be answered by coredns. Default is cluster.local (e.g. foo.default.svc.cluster.local) "
|
||||
type = string
|
||||
default = "cluster.local"
|
||||
}
|
||||
|
||||
variable "cached_install" {
|
||||
type = string
|
||||
default = "false"
|
||||
description = "Whether Fedora CoreOS should PXE boot and install from matchbox /assets cache. Note that the admin must have downloaded the os_version into matchbox assets."
|
||||
}
|
||||
|
||||
variable "install_disk" {
|
||||
type = string
|
||||
default = "sda"
|
||||
description = "Disk device to install Fedora CoreOS (e.g. sda)"
|
||||
}
|
||||
|
||||
variable "kernel_args" {
|
||||
description = "Additional kernel arguments to provide at PXE boot."
|
||||
type = list(string)
|
||||
default = []
|
||||
}
|
||||
|
||||
variable "enable_reporting" {
|
||||
type = string
|
||||
description = "Enable usage or analytics reporting to upstreams (Calico)"
|
||||
default = "false"
|
||||
}
|
||||
|
||||
variable "enable_aggregation" {
|
||||
description = "Enable the Kubernetes Aggregation Layer (defaults to false)"
|
||||
type = string
|
||||
default = "false"
|
||||
}
|
||||
|
||||
|
@ -11,7 +11,7 @@ Typhoon distributes upstream Kubernetes, architectural conventions, and cluster
|
||||
|
||||
## Features <a href="https://www.cncf.io/certification/software-conformance/"><img align="right" src="https://storage.googleapis.com/poseidon/certified-kubernetes.png"></a>
|
||||
|
||||
* Kubernetes v1.15.3 (upstream, via [kubernetes-incubator/bootkube](https://github.com/kubernetes-incubator/bootkube))
|
||||
* Kubernetes v1.16.2 (upstream)
|
||||
* Single or multi-master, [Calico](https://www.projectcalico.org/) or [flannel](https://github.com/coreos/flannel) networking
|
||||
* On-cluster etcd with TLS, [RBAC](https://kubernetes.io/docs/admin/authorization/rbac/)-enabled, [network policy](https://kubernetes.io/docs/concepts/services-networking/network-policies/)
|
||||
* Advanced features like [snippets](https://typhoon.psdn.io/advanced/customization/#container-linux) customization
|
||||
|
@ -1,6 +1,6 @@
|
||||
# Self-hosted Kubernetes assets (kubeconfig, manifests)
|
||||
module "bootkube" {
|
||||
source = "git::https://github.com/poseidon/terraform-render-bootkube.git?ref=248675e7a95660e5c8a65f8b7ce86be89ef55a0e"
|
||||
# Kubernetes assets (kubeconfig, manifests)
|
||||
module "bootstrap" {
|
||||
source = "git::https://github.com/poseidon/terraform-render-bootstrap.git?ref=0fcc067476fa1463d057fd43760df222b7262b27"
|
||||
|
||||
cluster_name = var.cluster_name
|
||||
api_servers = [format("%s.%s", var.cluster_name, var.dns_zone)]
|
@ -7,7 +7,7 @@ systemd:
|
||||
- name: 40-etcd-cluster.conf
|
||||
contents: |
|
||||
[Service]
|
||||
Environment="ETCD_IMAGE_TAG=v3.3.15"
|
||||
Environment="ETCD_IMAGE_TAG=v3.4.2"
|
||||
Environment="ETCD_NAME=${etcd_name}"
|
||||
Environment="ETCD_ADVERTISE_CLIENT_URLS=https://${etcd_domain}:2379"
|
||||
Environment="ETCD_INITIAL_ADVERTISE_PEER_URLS=https://${etcd_domain}:2380"
|
||||
@ -74,11 +74,9 @@ systemd:
|
||||
--volume var-log,kind=host,source=/var/log \
|
||||
--mount volume=var-log,target=/var/log \
|
||||
--insecure-options=image"
|
||||
ExecStartPre=/bin/mkdir -p /opt/cni/bin
|
||||
ExecStartPre=/bin/mkdir -p /etc/kubernetes/manifests
|
||||
ExecStartPre=/bin/mkdir -p /etc/kubernetes/cni/net.d
|
||||
ExecStartPre=/bin/mkdir -p /etc/kubernetes/checkpoint-secrets
|
||||
ExecStartPre=/bin/mkdir -p /etc/kubernetes/inactive-manifests
|
||||
ExecStartPre=/bin/mkdir -p /etc/kubernetes/manifests
|
||||
ExecStartPre=/bin/mkdir -p /opt/cni/bin
|
||||
ExecStartPre=/bin/mkdir -p /var/lib/cni
|
||||
ExecStartPre=/bin/mkdir -p /var/lib/calico
|
||||
ExecStartPre=/bin/mkdir -p /var/lib/kubelet/volumeplugins
|
||||
@ -97,8 +95,8 @@ systemd:
|
||||
--kubeconfig=/etc/kubernetes/kubeconfig \
|
||||
--lock-file=/var/run/lock/kubelet.lock \
|
||||
--network-plugin=cni \
|
||||
--node-labels=node-role.kubernetes.io/master \
|
||||
--node-labels=node-role.kubernetes.io/controller="true" \
|
||||
--node-labels=node.kubernetes.io/master \
|
||||
--node-labels=node.kubernetes.io/controller="true" \
|
||||
--pod-manifest-path=/etc/kubernetes/manifests \
|
||||
--read-only-port=0 \
|
||||
--register-with-taints=node-role.kubernetes.io/master=:NoSchedule \
|
||||
@ -108,17 +106,28 @@ systemd:
|
||||
RestartSec=10
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
- name: bootkube.service
|
||||
- name: bootstrap.service
|
||||
contents: |
|
||||
[Unit]
|
||||
Description=Bootstrap a Kubernetes cluster
|
||||
ConditionPathExists=!/opt/bootkube/init_bootkube.done
|
||||
Description=Kubernetes control plane
|
||||
ConditionPathExists=!/opt/bootstrap/bootstrap.done
|
||||
[Service]
|
||||
Type=oneshot
|
||||
RemainAfterExit=true
|
||||
WorkingDirectory=/opt/bootkube
|
||||
ExecStart=/opt/bootkube/bootkube-start
|
||||
ExecStartPost=/bin/touch /opt/bootkube/init_bootkube.done
|
||||
WorkingDirectory=/opt/bootstrap
|
||||
ExecStartPre=-/usr/bin/bash -c 'set -x && [ -n "$(ls /opt/bootstrap/assets/manifests-*/* 2>/dev/null)" ] && mv /opt/bootstrap/assets/manifests-*/* /opt/bootstrap/assets/manifests && rm -rf /opt/bootstrap/assets/manifests-*'
|
||||
ExecStart=/usr/bin/rkt run \
|
||||
--trust-keys-from-https \
|
||||
--volume assets,kind=host,source=/opt/bootstrap/assets \
|
||||
--mount volume=assets,target=/assets \
|
||||
--volume script,kind=host,source=/opt/bootstrap/apply \
|
||||
--mount volume=script,target=/apply \
|
||||
--insecure-options=image \
|
||||
docker://k8s.gcr.io/hyperkube:v1.16.2 \
|
||||
--net=host \
|
||||
--dns=host \
|
||||
--exec=/apply
|
||||
ExecStartPost=/bin/touch /opt/bootstrap/bootstrap.done
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
storage:
|
||||
@ -129,34 +138,24 @@ storage:
|
||||
contents:
|
||||
inline: |
|
||||
KUBELET_IMAGE_URL=docker://k8s.gcr.io/hyperkube
|
||||
KUBELET_IMAGE_TAG=v1.15.3
|
||||
KUBELET_IMAGE_TAG=v1.16.2
|
||||
- path: /opt/bootstrap/apply
|
||||
filesystem: root
|
||||
mode: 0544
|
||||
contents:
|
||||
inline: |
|
||||
#!/bin/bash -e
|
||||
export KUBECONFIG=/assets/auth/kubeconfig
|
||||
until kubectl version; do
|
||||
echo "Waiting for static pod control plane"
|
||||
sleep 5
|
||||
done
|
||||
until kubectl apply -f /assets/manifests -R; do
|
||||
echo "Retry applying manifests"
|
||||
sleep 5
|
||||
done
|
||||
- path: /etc/sysctl.d/max-user-watches.conf
|
||||
filesystem: root
|
||||
contents:
|
||||
inline: |
|
||||
fs.inotify.max_user_watches=16184
|
||||
- path: /opt/bootkube/bootkube-start
|
||||
filesystem: root
|
||||
mode: 0544
|
||||
user:
|
||||
id: 500
|
||||
group:
|
||||
id: 500
|
||||
contents:
|
||||
inline: |
|
||||
#!/bin/bash
|
||||
# Wrapper for bootkube start
|
||||
set -e
|
||||
# Move experimental manifests
|
||||
[ -n "$(ls /opt/bootkube/assets/manifests-*/* 2>/dev/null)" ] && mv /opt/bootkube/assets/manifests-*/* /opt/bootkube/assets/manifests && rm -rf /opt/bootkube/assets/manifests-*
|
||||
exec /usr/bin/rkt run \
|
||||
--trust-keys-from-https \
|
||||
--volume assets,kind=host,source=/opt/bootkube/assets \
|
||||
--mount volume=assets,target=/assets \
|
||||
--volume bootstrap,kind=host,source=/etc/kubernetes \
|
||||
--mount volume=bootstrap,target=/etc/kubernetes \
|
||||
$${RKT_OPTS} \
|
||||
quay.io/coreos/bootkube:v0.14.0 \
|
||||
--net=host \
|
||||
--dns=host \
|
||||
--exec=/bootkube -- start --asset-dir=/assets "$@"
|
||||
|
@ -49,9 +49,9 @@ systemd:
|
||||
--volume var-log,kind=host,source=/var/log \
|
||||
--mount volume=var-log,target=/var/log \
|
||||
--insecure-options=image"
|
||||
ExecStartPre=/bin/mkdir -p /opt/cni/bin
|
||||
ExecStartPre=/bin/mkdir -p /etc/kubernetes/manifests
|
||||
ExecStartPre=/bin/mkdir -p /etc/kubernetes/cni/net.d
|
||||
ExecStartPre=/bin/mkdir -p /etc/kubernetes/manifests
|
||||
ExecStartPre=/bin/mkdir -p /opt/cni/bin
|
||||
ExecStartPre=/bin/mkdir -p /var/lib/cni
|
||||
ExecStartPre=/bin/mkdir -p /var/lib/calico
|
||||
ExecStartPre=/bin/mkdir -p /var/lib/kubelet/volumeplugins
|
||||
@ -70,7 +70,7 @@ systemd:
|
||||
--kubeconfig=/etc/kubernetes/kubeconfig \
|
||||
--lock-file=/var/run/lock/kubelet.lock \
|
||||
--network-plugin=cni \
|
||||
--node-labels=node-role.kubernetes.io/node \
|
||||
--node-labels=node.kubernetes.io/node \
|
||||
--pod-manifest-path=/etc/kubernetes/manifests \
|
||||
--read-only-port=0 \
|
||||
--volume-plugin-dir=/var/lib/kubelet/volumeplugins
|
||||
@ -99,7 +99,7 @@ storage:
|
||||
contents:
|
||||
inline: |
|
||||
KUBELET_IMAGE_URL=docker://k8s.gcr.io/hyperkube
|
||||
KUBELET_IMAGE_TAG=v1.15.3
|
||||
KUBELET_IMAGE_TAG=v1.16.2
|
||||
- path: /etc/sysctl.d/max-user-watches.conf
|
||||
filesystem: root
|
||||
contents:
|
||||
@ -117,7 +117,7 @@ storage:
|
||||
--volume config,kind=host,source=/etc/kubernetes \
|
||||
--mount volume=config,target=/etc/kubernetes \
|
||||
--insecure-options=image \
|
||||
docker://k8s.gcr.io/hyperkube:v1.15.3 \
|
||||
docker://k8s.gcr.io/hyperkube:v1.16.2 \
|
||||
--net=host \
|
||||
--dns=host \
|
||||
--exec=/kubectl -- --kubeconfig=/etc/kubernetes/kubeconfig delete node $(hostname)
|
||||
|
@ -53,24 +53,33 @@ resource "digitalocean_firewall" "controllers" {
|
||||
|
||||
tags = ["${var.cluster_name}-controller"]
|
||||
|
||||
# etcd, kube-apiserver, kubelet
|
||||
# etcd
|
||||
inbound_rule {
|
||||
protocol = "tcp"
|
||||
port_range = "2379-2380"
|
||||
source_tags = [digitalocean_tag.controllers.name]
|
||||
}
|
||||
|
||||
# etcd metrics
|
||||
inbound_rule {
|
||||
protocol = "tcp"
|
||||
port_range = "2381"
|
||||
source_tags = [digitalocean_tag.workers.name]
|
||||
}
|
||||
|
||||
# kube-apiserver
|
||||
inbound_rule {
|
||||
protocol = "tcp"
|
||||
port_range = "6443"
|
||||
source_addresses = ["0.0.0.0/0", "::/0"]
|
||||
}
|
||||
|
||||
# kube-scheduler metrics, kube-controller-manager metrics
|
||||
inbound_rule {
|
||||
protocol = "tcp"
|
||||
port_range = "10251-10252"
|
||||
source_tags = [digitalocean_tag.workers.name]
|
||||
}
|
||||
}
|
||||
|
||||
resource "digitalocean_firewall" "workers" {
|
||||
|
@ -1,5 +1,5 @@
|
||||
output "kubeconfig-admin" {
|
||||
value = module.bootkube.kubeconfig-admin
|
||||
value = module.bootstrap.kubeconfig-admin
|
||||
}
|
||||
|
||||
output "controllers_dns" {
|
||||
|
@ -1,57 +1,63 @@
|
||||
# Secure copy etcd TLS assets and kubeconfig to controllers. Activates kubelet.service
|
||||
# Secure copy assets to controllers. Activates kubelet.service
|
||||
resource "null_resource" "copy-controller-secrets" {
|
||||
count = var.controller_count
|
||||
|
||||
depends_on = [
|
||||
module.bootstrap,
|
||||
digitalocean_firewall.rules
|
||||
]
|
||||
|
||||
connection {
|
||||
type = "ssh"
|
||||
host = element(digitalocean_droplet.controllers.*.ipv4_address, count.index)
|
||||
host = digitalocean_droplet.controllers.*.ipv4_address[count.index]
|
||||
user = "core"
|
||||
timeout = "15m"
|
||||
}
|
||||
|
||||
provisioner "file" {
|
||||
content = module.bootkube.kubeconfig-kubelet
|
||||
content = module.bootstrap.kubeconfig-kubelet
|
||||
destination = "$HOME/kubeconfig"
|
||||
}
|
||||
|
||||
provisioner "file" {
|
||||
content = module.bootkube.etcd_ca_cert
|
||||
content = module.bootstrap.etcd_ca_cert
|
||||
destination = "$HOME/etcd-client-ca.crt"
|
||||
}
|
||||
|
||||
provisioner "file" {
|
||||
content = module.bootkube.etcd_client_cert
|
||||
content = module.bootstrap.etcd_client_cert
|
||||
destination = "$HOME/etcd-client.crt"
|
||||
}
|
||||
|
||||
provisioner "file" {
|
||||
content = module.bootkube.etcd_client_key
|
||||
content = module.bootstrap.etcd_client_key
|
||||
destination = "$HOME/etcd-client.key"
|
||||
}
|
||||
|
||||
provisioner "file" {
|
||||
content = module.bootkube.etcd_server_cert
|
||||
content = module.bootstrap.etcd_server_cert
|
||||
destination = "$HOME/etcd-server.crt"
|
||||
}
|
||||
|
||||
provisioner "file" {
|
||||
content = module.bootkube.etcd_server_key
|
||||
content = module.bootstrap.etcd_server_key
|
||||
destination = "$HOME/etcd-server.key"
|
||||
}
|
||||
|
||||
provisioner "file" {
|
||||
content = module.bootkube.etcd_peer_cert
|
||||
content = module.bootstrap.etcd_peer_cert
|
||||
destination = "$HOME/etcd-peer.crt"
|
||||
}
|
||||
|
||||
provisioner "file" {
|
||||
content = module.bootkube.etcd_peer_key
|
||||
content = module.bootstrap.etcd_peer_key
|
||||
destination = "$HOME/etcd-peer.key"
|
||||
}
|
||||
|
||||
provisioner "file" {
|
||||
source = var.asset_dir
|
||||
destination = "$HOME/assets"
|
||||
}
|
||||
|
||||
provisioner "remote-exec" {
|
||||
inline = [
|
||||
@ -65,7 +71,13 @@ resource "null_resource" "copy-controller-secrets" {
|
||||
"sudo mv etcd-peer.key /etc/ssl/etcd/etcd/peer.key",
|
||||
"sudo chown -R etcd:etcd /etc/ssl/etcd",
|
||||
"sudo chmod -R 500 /etc/ssl/etcd",
|
||||
"sudo mv $HOME/assets /opt/bootstrap/assets",
|
||||
"sudo mkdir -p /etc/kubernetes/manifests",
|
||||
"sudo mkdir -p /etc/kubernetes/bootstrap-secrets",
|
||||
"sudo mv $HOME/kubeconfig /etc/kubernetes/kubeconfig",
|
||||
"sudo cp -r /opt/bootstrap/assets/tls/* /etc/kubernetes/bootstrap-secrets/",
|
||||
"sudo cp /opt/bootstrap/assets/auth/kubeconfig /etc/kubernetes/bootstrap-secrets/",
|
||||
"sudo cp -r /opt/bootstrap/assets/static-manifests/* /etc/kubernetes/manifests/",
|
||||
]
|
||||
}
|
||||
}
|
||||
@ -76,13 +88,13 @@ resource "null_resource" "copy-worker-secrets" {
|
||||
|
||||
connection {
|
||||
type = "ssh"
|
||||
host = element(digitalocean_droplet.workers.*.ipv4_address, count.index)
|
||||
host = digitalocean_droplet.workers.*.ipv4_address[count.index]
|
||||
user = "core"
|
||||
timeout = "15m"
|
||||
}
|
||||
|
||||
provisioner "file" {
|
||||
content = module.bootkube.kubeconfig-kubelet
|
||||
content = module.bootstrap.kubeconfig-kubelet
|
||||
destination = "$HOME/kubeconfig"
|
||||
}
|
||||
|
||||
@ -93,11 +105,9 @@ resource "null_resource" "copy-worker-secrets" {
|
||||
}
|
||||
}
|
||||
|
||||
# Secure copy bootkube assets to ONE controller and start bootkube to perform
|
||||
# one-time self-hosted cluster bootstrapping.
|
||||
resource "null_resource" "bootkube-start" {
|
||||
# Connect to a controller to perform one-time cluster bootstrap.
|
||||
resource "null_resource" "bootstrap" {
|
||||
depends_on = [
|
||||
module.bootkube,
|
||||
null_resource.copy-controller-secrets,
|
||||
null_resource.copy-worker-secrets,
|
||||
]
|
||||
@ -109,15 +119,9 @@ resource "null_resource" "bootkube-start" {
|
||||
timeout = "15m"
|
||||
}
|
||||
|
||||
provisioner "file" {
|
||||
source = var.asset_dir
|
||||
destination = "$HOME/assets"
|
||||
}
|
||||
|
||||
provisioner "remote-exec" {
|
||||
inline = [
|
||||
"sudo mv $HOME/assets /opt/bootkube",
|
||||
"sudo systemctl start bootkube",
|
||||
"sudo systemctl start bootstrap",
|
||||
]
|
||||
}
|
||||
}
|
||||
|
@ -18,33 +18,33 @@ variable "dns_zone" {
|
||||
# instances
|
||||
|
||||
variable "controller_count" {
|
||||
type = string
|
||||
default = "1"
|
||||
type = number
|
||||
description = "Number of controllers (i.e. masters)"
|
||||
default = 1
|
||||
}
|
||||
|
||||
variable "worker_count" {
|
||||
type = string
|
||||
default = "1"
|
||||
type = number
|
||||
description = "Number of workers"
|
||||
default = 1
|
||||
}
|
||||
|
||||
variable "controller_type" {
|
||||
type = string
|
||||
default = "s-2vcpu-2gb"
|
||||
description = "Droplet type for controllers (e.g. s-2vcpu-2gb, s-2vcpu-4gb, s-4vcpu-8gb)."
|
||||
default = "s-2vcpu-2gb"
|
||||
}
|
||||
|
||||
variable "worker_type" {
|
||||
type = string
|
||||
default = "s-1vcpu-2gb"
|
||||
description = "Droplet type for workers (e.g. s-1vcpu-2gb, s-2vcpu-2gb)"
|
||||
default = "s-1vcpu-2gb"
|
||||
}
|
||||
|
||||
variable "image" {
|
||||
type = string
|
||||
default = "coreos-stable"
|
||||
description = "Container Linux image for instances (e.g. coreos-stable)"
|
||||
default = "coreos-stable"
|
||||
}
|
||||
|
||||
variable "controller_clc_snippets" {
|
||||
@ -67,48 +67,48 @@ variable "ssh_fingerprints" {
|
||||
}
|
||||
|
||||
variable "asset_dir" {
|
||||
description = "Path to a directory where generated assets should be placed (contains secrets)"
|
||||
type = string
|
||||
description = "Absolute path to a directory where generated assets should be placed (contains secrets)"
|
||||
}
|
||||
|
||||
variable "networking" {
|
||||
description = "Choice of networking provider (flannel or calico)"
|
||||
type = string
|
||||
default = "flannel"
|
||||
description = "Choice of networking provider (flannel or calico)"
|
||||
default = "calico"
|
||||
}
|
||||
|
||||
variable "pod_cidr" {
|
||||
description = "CIDR IPv4 range to assign Kubernetes pods"
|
||||
type = string
|
||||
description = "CIDR IPv4 range to assign Kubernetes pods"
|
||||
default = "10.2.0.0/16"
|
||||
}
|
||||
|
||||
variable "service_cidr" {
|
||||
type = string
|
||||
description = <<EOD
|
||||
CIDR IPv4 range to assign Kubernetes services.
|
||||
The 1st IP will be reserved for kube_apiserver, the 10th IP will be reserved for coredns.
|
||||
EOD
|
||||
|
||||
|
||||
type = string
|
||||
default = "10.3.0.0/16"
|
||||
}
|
||||
|
||||
variable "cluster_domain_suffix" {
|
||||
description = "Queries for domains with the suffix will be answered by coredns. Default is cluster.local (e.g. foo.default.svc.cluster.local) "
|
||||
type = string
|
||||
default = "cluster.local"
|
||||
}
|
||||
|
||||
variable "enable_reporting" {
|
||||
type = string
|
||||
type = bool
|
||||
description = "Enable usage or analytics reporting to upstreams (Calico)"
|
||||
default = "false"
|
||||
default = false
|
||||
}
|
||||
|
||||
variable "enable_aggregation" {
|
||||
type = bool
|
||||
description = "Enable the Kubernetes Aggregation Layer (defaults to false)"
|
||||
type = string
|
||||
default = "false"
|
||||
default = false
|
||||
}
|
||||
|
||||
# unofficial, undocumented, unsupported
|
||||
|
||||
variable "cluster_domain_suffix" {
|
||||
type = string
|
||||
description = "Queries for domains with the suffix will be answered by coredns. Default is cluster.local (e.g. foo.default.svc.cluster.local) "
|
||||
default = "cluster.local"
|
||||
}
|
||||
|
||||
|
@ -147,5 +147,5 @@ module "digital-ocean-nemo" {
|
||||
}
|
||||
```
|
||||
|
||||
To customize lower-level Kubernetes control plane bootstrapping, see the [poseidon/terraform-render-bootkube](https://github.com/poseidon/terraform-render-bootkube) Terraform module.
|
||||
To customize low-level Kubernetes control plane bootstrapping, see the [poseidon/terraform-render-bootstrap](https://github.com/poseidon/terraform-render-bootstrap) Terraform module.
|
||||
|
||||
|
@ -62,11 +62,14 @@ The AWS internal `workers` module supports a number of [variables](https://githu
|
||||
|:-----|:------------|:--------|:--------|
|
||||
| worker_count | Number of instances | 1 | 3 |
|
||||
| instance_type | EC2 instance type | "t3.small" | "t3.medium" |
|
||||
| os_image | AMI channel for a Container Linux derivative | coreos-stable | coreos-stable, coreos-beta, coreos-alpha, flatcar-stable, flatcar-beta, flatcar-alpha |
|
||||
| disk_size | Size of the disk in GB | 40 | 100 |
|
||||
| spot_price | Spot price in USD for workers. Leave as default empty string for regular on-demand instances | "" | "0.10" |
|
||||
| os_image | AMI channel for a Container Linux derivative | "coreos-stable" | coreos-stable, coreos-beta, coreos-alpha, flatcar-stable, flatcar-beta, flatcar-alpha |
|
||||
| disk_size | Size of the EBS volume in GB | 40 | 100 |
|
||||
| disk_type | Type of the EBS volume | "gp2" | standard, gp2, io1 |
|
||||
| disk_iops | IOPS of the EBS volume | 0 (i.e. auto) | 400 |
|
||||
| spot_price | Spot price in USD for worker instances or 0 to use on-demand instances | 0 | 0.10 |
|
||||
| clc_snippets | Container Linux Config snippets | [] | [example](/advanced/customization/#usage) |
|
||||
| service_cidr | Must match `service_cidr` of cluster | "10.3.0.0/16" | "10.3.0.0/24" |
|
||||
| cluster_domain_suffix | Must match `cluster_domain_suffix` of cluster | "cluster.local" | "k8s.example.com" |
|
||||
| node_labels | List of initial node labels | [] | ["worker-pool=foo"] |
|
||||
|
||||
Check the list of valid [instance types](https://aws.amazon.com/ec2/instance-types/) or per-region and per-type [spot prices](https://aws.amazon.com/ec2/spot/pricing/).
|
||||
|
||||
@ -76,7 +79,7 @@ Create a cluster following the Azure [tutorial](../cl/azure.md#cluster). Define
|
||||
|
||||
```tf
|
||||
module "ramius-worker-pool" {
|
||||
source = "git::https://github.com/poseidon/typhoon//azure/container-linux/kubernetes/workers?ref=v1.15.3"
|
||||
source = "git::https://github.com/poseidon/typhoon//azure/container-linux/kubernetes/workers?ref=v1.16.2"
|
||||
|
||||
# Azure
|
||||
region = module.azure-ramius.region
|
||||
@ -127,12 +130,12 @@ The Azure internal `workers` module supports a number of [variables](https://git
|
||||
| Name | Description | Default | Example |
|
||||
|:-----|:------------|:--------|:--------|
|
||||
| worker_count | Number of instances | 1 | 3 |
|
||||
| vm_type | Machine type for instances | "Standard_F1" | See below |
|
||||
| os_image | Channel for a Container Linux derivative | coreos-stable | coreos-stable, coreos-beta, coreos-alpha |
|
||||
| priority | Set priority to Low to use reduced cost surplus capacity, with the tradeoff that instances can be deallocated at any time | Regular | Low |
|
||||
| vm_type | Machine type for instances | "Standard_DS1_v2" | See below |
|
||||
| os_image | Channel for a Container Linux derivative | "coreos-stable" | coreos-stable, coreos-beta, coreos-alpha |
|
||||
| priority | Set priority to Low to use reduced cost surplus capacity, with the tradeoff that instances can be deallocated at any time | "Regular" | "Low" |
|
||||
| clc_snippets | Container Linux Config snippets | [] | [example](/advanced/customization/#usage) |
|
||||
| service_cidr | CIDR IPv4 range to assign to Kubernetes services | "10.3.0.0/16" | "10.3.0.0/24" |
|
||||
| cluster_domain_suffix | FQDN suffix for Kubernetes services answered by coredns. | "cluster.local" | "k8s.example.com" |
|
||||
| node_labels | List of initial node labels | [] | ["worker-pool=foo"] |
|
||||
|
||||
Check the list of valid [machine types](https://azure.microsoft.com/en-us/pricing/details/virtual-machines/linux/) and their [specs](https://docs.microsoft.com/en-us/azure/virtual-machines/linux/sizes-general). Use `az vm list-skus` to get the identifier.
|
||||
|
||||
@ -142,7 +145,7 @@ Create a cluster following the Google Cloud [tutorial](../cl/google-cloud.md#clu
|
||||
|
||||
```tf
|
||||
module "yavin-worker-pool" {
|
||||
source = "git::https://github.com/poseidon/typhoon//google-cloud/container-linux/kubernetes/workers?ref=v1.15.3"
|
||||
source = "git::https://github.com/poseidon/typhoon//google-cloud/container-linux/kubernetes/workers?ref=v1.16.2"
|
||||
|
||||
# Google Cloud
|
||||
region = "europe-west2"
|
||||
@ -173,11 +176,11 @@ Verify a managed instance group of workers joins the cluster within a few minute
|
||||
```
|
||||
$ kubectl get nodes
|
||||
NAME STATUS AGE VERSION
|
||||
yavin-controller-0.c.example-com.internal Ready 6m v1.15.3
|
||||
yavin-worker-jrbf.c.example-com.internal Ready 5m v1.15.3
|
||||
yavin-worker-mzdm.c.example-com.internal Ready 5m v1.15.3
|
||||
yavin-16x-worker-jrbf.c.example-com.internal Ready 3m v1.15.3
|
||||
yavin-16x-worker-mzdm.c.example-com.internal Ready 3m v1.15.3
|
||||
yavin-controller-0.c.example-com.internal Ready 6m v1.16.2
|
||||
yavin-worker-jrbf.c.example-com.internal Ready 5m v1.16.2
|
||||
yavin-worker-mzdm.c.example-com.internal Ready 5m v1.16.2
|
||||
yavin-16x-worker-jrbf.c.example-com.internal Ready 3m v1.16.2
|
||||
yavin-16x-worker-mzdm.c.example-com.internal Ready 3m v1.16.2
|
||||
```
|
||||
|
||||
### Variables
|
||||
@ -189,9 +192,9 @@ The Google Cloud internal `workers` module supports a number of [variables](http
|
||||
| Name | Description | Example |
|
||||
|:-----|:------------|:--------|
|
||||
| name | Unique name (distinct from cluster name) | "yavin-16x" |
|
||||
| cluster_name | Must be set to `cluster_name` of cluster | "yavin" |
|
||||
| region | Region for the worker pool instances. May differ from the cluster's region | "europe-west2" |
|
||||
| network | Must be set to `network_name` output by cluster | module.cluster.network_name |
|
||||
| cluster_name | Must be set to `cluster_name` of cluster | "yavin" |
|
||||
| kubeconfig | Must be set to `kubeconfig` output by cluster | module.cluster.kubeconfig |
|
||||
| ssh_authorized_key | SSH public key for user 'core' | "ssh-rsa AAAAB3NZ..." |
|
||||
|
||||
@ -206,8 +209,9 @@ Check the list of regions [docs](https://cloud.google.com/compute/docs/regions-z
|
||||
| os_image | Container Linux image for compute instances | "coreos-stable" | "coreos-alpha", "coreos-beta" |
|
||||
| disk_size | Size of the disk in GB | 40 | 100 |
|
||||
| preemptible | If true, Compute Engine will terminate instances randomly within 24 hours | false | true |
|
||||
| clc_snippets | Container Linux Config snippets | [] | [example](/advanced/customization/#usage) |
|
||||
| service_cidr | Must match `service_cidr` of cluster | "10.3.0.0/16" | "10.3.0.0/24" |
|
||||
| cluster_domain_suffix | Must match `cluster_domain_suffix` of cluster | "cluster.local" | "k8s.example.com" |
|
||||
| node_labels | List of initial node labels | [] | ["worker-pool=foo"] |
|
||||
|
||||
Check the list of valid [machine types](https://cloud.google.com/compute/docs/machine-types).
|
||||
|
||||
|
@ -30,7 +30,7 @@ Together, they diversify Typhoon to support a range of container technologies.
|
||||
|-------------------|-----------------|---------------|
|
||||
| single-master | all platforms | all platforms |
|
||||
| multi-master | all platforms | all platforms |
|
||||
| control plane | self-hosted | self-hosted |
|
||||
| control plane | static pods | static pods |
|
||||
| kubelet image | upstream hyperkube | upstream hyperkube |
|
||||
| control plane images | upstream hyperkube | upstream hyperkube |
|
||||
| on-host etcd | rkt-fly | podman |
|
||||
|
@ -1,10 +1,10 @@
|
||||
# AWS
|
||||
|
||||
In this tutorial, we'll create a Kubernetes v1.15.3 cluster on AWS with Container Linux.
|
||||
In this tutorial, we'll create a Kubernetes v1.16.2 cluster on AWS with Container Linux.
|
||||
|
||||
We'll declare a Kubernetes cluster using the Typhoon Terraform module. Then apply the changes to create a VPC, gateway, subnets, security groups, controller instances, worker auto-scaling group, network load balancer, and TLS assets.
|
||||
|
||||
Controllers are provisioned to run an `etcd-member` peer and a `kubelet` service. Workers run just a `kubelet` service. A one-time [bootkube](https://github.com/kubernetes-incubator/bootkube) bootstrap schedules the `apiserver`, `scheduler`, `controller-manager`, and `coredns` on controllers and schedules `kube-proxy` and `calico` (or `flannel`) on every node. A generated `kubeconfig` provides `kubectl` access to the cluster.
|
||||
Controller hosts are provisioned to run an `etcd-member` peer and a `kubelet` service. Worker hosts run a `kubelet` service. Controller nodes run `kube-apiserver`, `kube-scheduler`, `kube-controller-manager`, and `coredns`, while `kube-proxy` and `calico` (or `flannel`) run on every node. A generated `kubeconfig` provides `kubectl` access to the cluster.
|
||||
|
||||
## Requirements
|
||||
|
||||
@ -18,15 +18,15 @@ Install [Terraform](https://www.terraform.io/downloads.html) v0.12.x on your sys
|
||||
|
||||
```sh
|
||||
$ terraform version
|
||||
Terraform v0.12.6
|
||||
Terraform v0.12.9
|
||||
```
|
||||
|
||||
Add the [terraform-provider-ct](https://github.com/poseidon/terraform-provider-ct) plugin binary for your system to `~/.terraform.d/plugins/`, noting the final name.
|
||||
|
||||
```sh
|
||||
wget https://github.com/poseidon/terraform-provider-ct/releases/download/v0.3.2/terraform-provider-ct-v0.3.2-linux-amd64.tar.gz
|
||||
tar xzf terraform-provider-ct-v0.3.2-linux-amd64.tar.gz
|
||||
mv terraform-provider-ct-v0.3.2-linux-amd64/terraform-provider-ct ~/.terraform.d/plugins/terraform-provider-ct_v0.3.2
|
||||
wget https://github.com/poseidon/terraform-provider-ct/releases/download/v0.4.0/terraform-provider-ct-v0.4.0-linux-amd64.tar.gz
|
||||
tar xzf terraform-provider-ct-v0.4.0-linux-amd64.tar.gz
|
||||
mv terraform-provider-ct-v0.4.0-linux-amd64/terraform-provider-ct ~/.terraform.d/plugins/terraform-provider-ct_v0.4.0
|
||||
```
|
||||
|
||||
Read [concepts](/architecture/concepts/) to learn about Terraform, modules, and organizing resources. Change to your infrastructure repository (e.g. `infra`).
|
||||
@ -49,13 +49,13 @@ Configure the AWS provider to use your access key credentials in a `providers.tf
|
||||
|
||||
```tf
|
||||
provider "aws" {
|
||||
version = "2.23.0"
|
||||
version = "2.31.0"
|
||||
region = "eu-central-1"
|
||||
shared_credentials_file = "/home/user/.config/aws/credentials"
|
||||
}
|
||||
|
||||
provider "ct" {
|
||||
version = "0.3.2"
|
||||
version = "0.4.0"
|
||||
}
|
||||
```
|
||||
|
||||
@ -70,7 +70,7 @@ Define a Kubernetes cluster using the module `aws/container-linux/kubernetes`.
|
||||
|
||||
```tf
|
||||
module "tempest" {
|
||||
source = "git::https://github.com/poseidon/typhoon//aws/container-linux/kubernetes?ref=v1.15.3"
|
||||
source = "git::https://github.com/poseidon/typhoon//aws/container-linux/kubernetes?ref=v1.16.2"
|
||||
|
||||
# AWS
|
||||
cluster_name = "tempest"
|
||||
@ -91,7 +91,7 @@ Reference the [variables docs](#variables) or the [variables.tf](https://github.
|
||||
|
||||
## ssh-agent
|
||||
|
||||
Initial bootstrapping requires `bootkube.service` be started on one controller node. Terraform uses `ssh-agent` to automate this step. Add your SSH private key to `ssh-agent`.
|
||||
Initial bootstrapping requires `bootstrap.service` be started on one controller node. Terraform uses `ssh-agent` to automate this step. Add your SSH private key to `ssh-agent`.
|
||||
|
||||
```sh
|
||||
ssh-add ~/.ssh/id_rsa
|
||||
@ -118,9 +118,9 @@ Apply the changes to create the cluster.
|
||||
```sh
|
||||
$ terraform apply
|
||||
...
|
||||
module.aws-tempest.null_resource.bootkube-start: Still creating... (4m50s elapsed)
|
||||
module.aws-tempest.null_resource.bootkube-start: Still creating... (5m0s elapsed)
|
||||
module.aws-tempest.null_resource.bootkube-start: Creation complete after 11m8s (ID: 3961816482286168143)
|
||||
module.aws-tempest.null_resource.bootstrap: Still creating... (4m50s elapsed)
|
||||
module.aws-tempest.null_resource.bootstrap: Still creating... (5m0s elapsed)
|
||||
module.aws-tempest.null_resource.bootstrap: Creation complete after 11m8s (ID: 3961816482286168143)
|
||||
|
||||
Apply complete! Resources: 98 added, 0 changed, 0 destroyed.
|
||||
```
|
||||
@ -134,10 +134,10 @@ In 4-8 minutes, the Kubernetes cluster will be ready.
|
||||
```
|
||||
$ export KUBECONFIG=/home/user/.secrets/clusters/tempest/auth/kubeconfig
|
||||
$ kubectl get nodes
|
||||
NAME STATUS ROLES AGE VERSION
|
||||
ip-10-0-3-155 Ready controller,master 10m v1.15.3
|
||||
ip-10-0-26-65 Ready node 10m v1.15.3
|
||||
ip-10-0-41-21 Ready node 10m v1.15.3
|
||||
NAME STATUS ROLES AGE VERSION
|
||||
ip-10-0-3-155 Ready <none> 10m v1.16.2
|
||||
ip-10-0-26-65 Ready <none> 10m v1.16.2
|
||||
ip-10-0-41-21 Ready <none> 10m v1.16.2
|
||||
```
|
||||
|
||||
List the pods.
|
||||
@ -150,16 +150,12 @@ kube-system calico-node-7jmr1 2/2 Running 0
|
||||
kube-system calico-node-bknc8 2/2 Running 0 34m
|
||||
kube-system coredns-1187388186-wx1lg 1/1 Running 0 34m
|
||||
kube-system coredns-1187388186-qjnvp 1/1 Running 0 34m
|
||||
kube-system kube-apiserver-4mjbk 1/1 Running 0 34m
|
||||
kube-system kube-controller-manager-3597210155-j2jbt 1/1 Running 1 34m
|
||||
kube-system kube-controller-manager-3597210155-j7g7x 1/1 Running 0 34m
|
||||
kube-system kube-apiserver-ip-10-0-3-155 1/1 Running 0 34m
|
||||
kube-system kube-controller-manager-ip-10-0-3-155 1/1 Running 0 34m
|
||||
kube-system kube-proxy-14wxv 1/1 Running 0 34m
|
||||
kube-system kube-proxy-9vxh2 1/1 Running 0 34m
|
||||
kube-system kube-proxy-sbbsh 1/1 Running 0 34m
|
||||
kube-system kube-scheduler-3359497473-5plhf 1/1 Running 0 34m
|
||||
kube-system kube-scheduler-3359497473-r7zg7 1/1 Running 1 34m
|
||||
kube-system pod-checkpointer-4kxtl 1/1 Running 0 34m
|
||||
kube-system pod-checkpointer-4kxtl-ip-10-0-3-155 1/1 Running 0 33m
|
||||
kube-system kube-scheduler-ip-10-0-3-155 1/1 Running 1 34m
|
||||
```
|
||||
|
||||
## Going Further
|
||||
@ -181,7 +177,7 @@ Check the [variables.tf](https://github.com/poseidon/typhoon/blob/master/aws/con
|
||||
| dns_zone | AWS Route53 DNS zone | "aws.example.com" |
|
||||
| dns_zone_id | AWS Route53 DNS zone id | "Z3PAABBCFAKEC0" |
|
||||
| ssh_authorized_key | SSH public key for user 'core' | "ssh-rsa AAAAB3NZ..." |
|
||||
| asset_dir | Path to a directory where generated assets should be placed (contains secrets) | "/home/user/.secrets/clusters/tempest" |
|
||||
| asset_dir | Absolute path to a directory where generated assets should be placed (contains secrets) | "/home/user/.secrets/clusters/tempest" |
|
||||
|
||||
#### DNS Zone
|
||||
|
||||
@ -195,7 +191,7 @@ resource "aws_route53_zone" "zone-for-clusters" {
|
||||
}
|
||||
```
|
||||
|
||||
Reference the DNS zone id with `"${aws_route53_zone.zone-for-clusters.zone_id}"`.
|
||||
Reference the DNS zone id with `aws_route53_zone.zone-for-clusters.zone_id`.
|
||||
|
||||
!!! tip ""
|
||||
If you have an existing domain name with a zone file elsewhere, just delegate a subdomain that can be managed on Route53 (e.g. aws.mydomain.com) and [update nameservers](http://docs.aws.amazon.com/Route53/latest/DeveloperGuide/SOA-NSrecords.html).
|
||||
@ -209,11 +205,11 @@ Reference the DNS zone id with `"${aws_route53_zone.zone-for-clusters.zone_id}"`
|
||||
| controller_type | EC2 instance type for controllers | "t3.small" | See below |
|
||||
| worker_type | EC2 instance type for workers | "t3.small" | See below |
|
||||
| os_image | AMI channel for a Container Linux derivative | coreos-stable | coreos-stable, coreos-beta, coreos-alpha, flatcar-stable, flatcar-beta, flatcar-alpha, flatcar-edge |
|
||||
| disk_size | Size of the EBS volume in GB | "40" | "100" |
|
||||
| disk_size | Size of the EBS volume in GB | 40 | 100 |
|
||||
| disk_type | Type of the EBS volume | "gp2" | standard, gp2, io1 |
|
||||
| disk_iops | IOPS of the EBS volume | "0" (i.e. auto) | "400" |
|
||||
| worker_target_groups | Target group ARNs to which worker instances should be added | [] | ["${aws_lb_target_group.app.id}"] |
|
||||
| worker_price | Spot price in USD for workers. Leave as default empty string for regular on-demand instances | "" | "0.10" |
|
||||
| disk_iops | IOPS of the EBS volume | 0 (i.e. auto) | 400 |
|
||||
| worker_target_groups | Target group ARNs to which worker instances should be added | [] | [aws_lb_target_group.app.id] |
|
||||
| worker_price | Spot price in USD for worker instances or 0 to use on-demand instances | 0/null | 0.10 |
|
||||
| controller_clc_snippets | Controller Container Linux Config snippets | [] | [example](/advanced/customization/) |
|
||||
| worker_clc_snippets | Worker Container Linux Config snippets | [] | [example](/advanced/customization/) |
|
||||
| networking | Choice of networking provider | "calico" | "calico" or "flannel" |
|
||||
@ -221,7 +217,7 @@ Reference the DNS zone id with `"${aws_route53_zone.zone-for-clusters.zone_id}"`
|
||||
| host_cidr | CIDR IPv4 range to assign to EC2 instances | "10.0.0.0/16" | "10.1.0.0/16" |
|
||||
| pod_cidr | CIDR IPv4 range to assign to Kubernetes pods | "10.2.0.0/16" | "10.22.0.0/16" |
|
||||
| service_cidr | CIDR IPv4 range to assign to Kubernetes services | "10.3.0.0/16" | "10.3.0.0/24" |
|
||||
| cluster_domain_suffix | FQDN suffix for Kubernetes services answered by coredns. | "cluster.local" | "k8s.example.com" |
|
||||
| worker_node_labels | List of initial worker node labels | [] | ["worker-pool=default"] |
|
||||
|
||||
Check the list of valid [instance types](https://aws.amazon.com/ec2/instance-types/).
|
||||
|
||||
|
@ -3,11 +3,11 @@
|
||||
!!! danger
|
||||
Typhoon for Azure is alpha. For production, use AWS, Google Cloud, or bare-metal. As Azure matures, check [errata](https://github.com/poseidon/typhoon/wiki/Errata) for known shortcomings.
|
||||
|
||||
In this tutorial, we'll create a Kubernetes v1.15.3 cluster on Azure with Container Linux.
|
||||
In this tutorial, we'll create a Kubernetes v1.16.2 cluster on Azure with Container Linux.
|
||||
|
||||
We'll declare a Kubernetes cluster using the Typhoon Terraform module. Then apply the changes to create a resource group, virtual network, subnets, security groups, controller availability set, worker scale set, load balancer, and TLS assets.
|
||||
|
||||
Controllers are provisioned to run an `etcd-member` peer and a `kubelet` service. Workers run just a `kubelet` service. A one-time [bootkube](https://github.com/kubernetes-incubator/bootkube) bootstrap schedules the `apiserver`, `scheduler`, `controller-manager`, and `coredns` on controllers and schedules `kube-proxy` and `flannel` on every node. A generated `kubeconfig` provides `kubectl` access to the cluster.
|
||||
Controller hosts are provisioned to run an `etcd-member` peer and a `kubelet` service. Worker hosts run a `kubelet` service. Controller nodes run `kube-apiserver`, `kube-scheduler`, `kube-controller-manager`, and `coredns`, while `kube-proxy` and `calico` (or `flannel`) run on every node. A generated `kubeconfig` provides `kubectl` access to the cluster.
|
||||
|
||||
## Requirements
|
||||
|
||||
@ -21,15 +21,15 @@ Install [Terraform](https://www.terraform.io/downloads.html) v0.12.x on your sys
|
||||
|
||||
```sh
|
||||
$ terraform version
|
||||
Terraform v0.12.6
|
||||
Terraform v0.12.9
|
||||
```
|
||||
|
||||
Add the [terraform-provider-ct](https://github.com/poseidon/terraform-provider-ct) plugin binary for your system to `~/.terraform.d/plugins/`, noting the final name.
|
||||
|
||||
```sh
|
||||
wget https://github.com/poseidon/terraform-provider-ct/releases/download/v0.3.2/terraform-provider-ct-v0.3.2-linux-amd64.tar.gz
|
||||
tar xzf terraform-provider-ct-v0.3.2-linux-amd64.tar.gz
|
||||
mv terraform-provider-ct-v0.3.2-linux-amd64/terraform-provider-ct ~/.terraform.d/plugins/terraform-provider-ct_v0.3.2
|
||||
wget https://github.com/poseidon/terraform-provider-ct/releases/download/v0.4.0/terraform-provider-ct-v0.4.0-linux-amd64.tar.gz
|
||||
tar xzf terraform-provider-ct-v0.4.0-linux-amd64.tar.gz
|
||||
mv terraform-provider-ct-v0.4.0-linux-amd64/terraform-provider-ct ~/.terraform.d/plugins/terraform-provider-ct_v0.4.0
|
||||
```
|
||||
|
||||
Read [concepts](/architecture/concepts/) to learn about Terraform, modules, and organizing resources. Change to your infrastructure repository (e.g. `infra`).
|
||||
@ -50,11 +50,11 @@ Configure the Azure provider in a `providers.tf` file.
|
||||
|
||||
```tf
|
||||
provider "azurerm" {
|
||||
version = "1.32.1"
|
||||
version = "1.35.0"
|
||||
}
|
||||
|
||||
provider "ct" {
|
||||
version = "0.3.2"
|
||||
version = "0.4.0"
|
||||
}
|
||||
```
|
||||
|
||||
@ -66,7 +66,7 @@ Define a Kubernetes cluster using the module `azure/container-linux/kubernetes`.
|
||||
|
||||
```tf
|
||||
module "ramius" {
|
||||
source = "git::https://github.com/poseidon/typhoon//azure/container-linux/kubernetes?ref=v1.15.3"
|
||||
source = "git::https://github.com/poseidon/typhoon//azure/container-linux/kubernetes?ref=v1.16.2"
|
||||
|
||||
# Azure
|
||||
cluster_name = "ramius"
|
||||
@ -88,7 +88,7 @@ Reference the [variables docs](#variables) or the [variables.tf](https://github.
|
||||
|
||||
## ssh-agent
|
||||
|
||||
Initial bootstrapping requires `bootkube.service` be started on one controller node. Terraform uses `ssh-agent` to automate this step. Add your SSH private key to `ssh-agent`.
|
||||
Initial bootstrapping requires `bootstrap.service` be started on one controller node. Terraform uses `ssh-agent` to automate this step. Add your SSH private key to `ssh-agent`.
|
||||
|
||||
```sh
|
||||
ssh-add ~/.ssh/id_rsa
|
||||
@ -115,9 +115,9 @@ Apply the changes to create the cluster.
|
||||
```sh
|
||||
$ terraform apply
|
||||
...
|
||||
module.azure-ramius.null_resource.bootkube-start: Still creating... (6m50s elapsed)
|
||||
module.azure-ramius.null_resource.bootkube-start: Still creating... (7m0s elapsed)
|
||||
module.azure-ramius.null_resource.bootkube-start: Creation complete after 7m8s (ID: 3961816482286168143)
|
||||
module.azure-ramius.null_resource.bootstrap: Still creating... (6m50s elapsed)
|
||||
module.azure-ramius.null_resource.bootstrap: Still creating... (7m0s elapsed)
|
||||
module.azure-ramius.null_resource.bootstrap: Creation complete after 7m8s (ID: 3961816482286168143)
|
||||
|
||||
Apply complete! Resources: 86 added, 0 changed, 0 destroyed.
|
||||
```
|
||||
@ -131,10 +131,10 @@ In 4-8 minutes, the Kubernetes cluster will be ready.
|
||||
```
|
||||
$ export KUBECONFIG=/home/user/.secrets/clusters/ramius/auth/kubeconfig
|
||||
$ kubectl get nodes
|
||||
NAME STATUS ROLES AGE VERSION
|
||||
ramius-controller-0 Ready controller,master 24m v1.15.3
|
||||
ramius-worker-000001 Ready node 25m v1.15.3
|
||||
ramius-worker-000002 Ready node 24m v1.15.3
|
||||
NAME STATUS ROLES AGE VERSION
|
||||
ramius-controller-0 Ready <none> 24m v1.16.2
|
||||
ramius-worker-000001 Ready <none> 25m v1.16.2
|
||||
ramius-worker-000002 Ready <none> 24m v1.16.2
|
||||
```
|
||||
|
||||
List the pods.
|
||||
@ -144,19 +144,15 @@ $ kubectl get pods --all-namespaces
|
||||
NAMESPACE NAME READY STATUS RESTARTS AGE
|
||||
kube-system coredns-7c6fbb4f4b-b6qzx 1/1 Running 0 26m
|
||||
kube-system coredns-7c6fbb4f4b-j2k3d 1/1 Running 0 26m
|
||||
kube-system flannel-bwf24 2/2 Running 2 26m
|
||||
kube-system flannel-ks5qb 2/2 Running 0 26m
|
||||
kube-system flannel-tq2wg 2/2 Running 0 26m
|
||||
kube-system kube-apiserver-hxgsx 1/1 Running 3 26m
|
||||
kube-system kube-controller-manager-5ff9cd7bb6-b942n 1/1 Running 0 26m
|
||||
kube-system kube-controller-manager-5ff9cd7bb6-bbr6w 1/1 Running 0 26m
|
||||
kube-system calico-node-1m5bf 2/2 Running 0 26m
|
||||
kube-system calico-node-7jmr1 2/2 Running 0 26m
|
||||
kube-system calico-node-bknc8 2/2 Running 0 26m
|
||||
kube-system kube-apiserver-ramius-controller-0 1/1 Running 0 26m
|
||||
kube-system kube-controller-manager-ramius-controller-0 1/1 Running 0 26m
|
||||
kube-system kube-proxy-j4vpq 1/1 Running 0 26m
|
||||
kube-system kube-proxy-jxr5d 1/1 Running 0 26m
|
||||
kube-system kube-proxy-lbdw5 1/1 Running 0 26m
|
||||
kube-system kube-scheduler-5f76d69686-s4fbx 1/1 Running 0 26m
|
||||
kube-system kube-scheduler-5f76d69686-vgdgn 1/1 Running 0 26m
|
||||
kube-system pod-checkpointer-cnqdg 1/1 Running 0 26m
|
||||
kube-system pod-checkpointer-cnqdg-ramius-controller-0 1/1 Running 0 25m
|
||||
kube-system kube-scheduler-ramius-controller-0 1/1 Running 0 26m
|
||||
```
|
||||
|
||||
## Going Further
|
||||
@ -179,7 +175,7 @@ Check the [variables.tf](https://github.com/poseidon/typhoon/blob/master/azure/c
|
||||
| dns_zone | Azure DNS zone | "azure.example.com" |
|
||||
| dns_zone_group | Resource group where the Azure DNS zone resides | "global" |
|
||||
| ssh_authorized_key | SSH public key for user 'core' | "ssh-rsa AAAAB3NZ..." |
|
||||
| asset_dir | Path to a directory where generated assets should be placed (contains secrets) | "/home/user/.secrets/clusters/ramius" |
|
||||
| asset_dir | Absolute path to a directory where generated assets should be placed (contains secrets) | "/home/user/.secrets/clusters/ramius" |
|
||||
|
||||
!!! tip
|
||||
Regions are shown in [docs](https://azure.microsoft.com/en-us/global-infrastructure/regions/) or with `az account list-locations --output table`.
|
||||
@ -199,14 +195,14 @@ resource "azurerm_resource_group" "global" {
|
||||
|
||||
# DNS zone for clusters
|
||||
resource "azurerm_dns_zone" "clusters" {
|
||||
resource_group_name = "${azurerm_resource_group.global.name}"
|
||||
resource_group_name = azurerm_resource_group.global.name
|
||||
|
||||
name = "azure.example.com"
|
||||
zone_type = "Public"
|
||||
}
|
||||
```
|
||||
|
||||
Reference the DNS zone with `"${azurerm_dns_zone.clusters.name}"` and its resource group with `"${azurerm_resource_group.global.name}"`.
|
||||
Reference the DNS zone with `azurerm_dns_zone.clusters.name` and its resource group with `"azurerm_resource_group.global.name`.
|
||||
|
||||
!!! tip ""
|
||||
If you have an existing domain name with a zone file elsewhere, just delegate a subdomain that can be managed on Azure DNS (e.g. azure.mydomain.com) and [update nameservers](https://docs.microsoft.com/en-us/azure/dns/dns-delegate-domain-azure-dns).
|
||||
@ -217,18 +213,18 @@ Reference the DNS zone with `"${azurerm_dns_zone.clusters.name}"` and its resour
|
||||
|:-----|:------------|:--------|:--------|
|
||||
| controller_count | Number of controllers (i.e. masters) | 1 | 1 |
|
||||
| worker_count | Number of workers | 1 | 3 |
|
||||
| controller_type | Machine type for controllers | "Standard_DS1_v2" | See below |
|
||||
| worker_type | Machine type for workers | "Standard_F1" | See below |
|
||||
| os_image | Channel for a Container Linux derivative | coreos-stable | coreos-stable, coreos-beta, coreos-alpha |
|
||||
| disk_size | Size of the disk in GB | "40" | "100" |
|
||||
| controller_type | Machine type for controllers | "Standard_B2s" | See below |
|
||||
| worker_type | Machine type for workers | "Standard_DS1_v2" | See below |
|
||||
| os_image | Channel for a Container Linux derivative | "coreos-stable" | coreos-stable, coreos-beta, coreos-alpha |
|
||||
| disk_size | Size of the disk in GB | 40 | 100 |
|
||||
| worker_priority | Set priority to Low to use reduced cost surplus capacity, with the tradeoff that instances can be deallocated at any time | Regular | Low |
|
||||
| controller_clc_snippets | Controller Container Linux Config snippets | [] | [example](/advanced/customization/#usage) |
|
||||
| worker_clc_snippets | Worker Container Linux Config snippets | [] | [example](/advanced/customization/#usage) |
|
||||
| networking | Choice of networking provider | "flannel" | "flannel" or "calico" (experimental) |
|
||||
| networking | Choice of networking provider | "calico" | "flannel" or "calico" |
|
||||
| host_cidr | CIDR IPv4 range to assign to instances | "10.0.0.0/16" | "10.0.0.0/20" |
|
||||
| pod_cidr | CIDR IPv4 range to assign to Kubernetes pods | "10.2.0.0/16" | "10.22.0.0/16" |
|
||||
| service_cidr | CIDR IPv4 range to assign to Kubernetes services | "10.3.0.0/16" | "10.3.0.0/24" |
|
||||
| cluster_domain_suffix | FQDN suffix for Kubernetes services answered by coredns. | "cluster.local" | "k8s.example.com" |
|
||||
| worker_node_labels | List of initial worker node labels | [] | ["worker-pool=default"] |
|
||||
|
||||
Check the list of valid [machine types](https://azure.microsoft.com/en-us/pricing/details/virtual-machines/linux/) and their [specs](https://docs.microsoft.com/en-us/azure/virtual-machines/linux/sizes-general). Use `az vm list-skus` to get the identifier.
|
||||
|
||||
@ -236,7 +232,7 @@ Check the list of valid [machine types](https://azure.microsoft.com/en-us/pricin
|
||||
Unlike AWS and GCP, Azure requires its *virtual* networks to have non-overlapping IPv4 CIDRs (yeah, go figure). Instead of each cluster just using `10.0.0.0/16` for instances, each Azure cluster's `host_cidr` must be non-overlapping (e.g. 10.0.0.0/20 for the 1st cluster, 10.0.16.0/20 for the 2nd cluster, etc).
|
||||
|
||||
!!! warning
|
||||
Do not choose a `controller_type` smaller than `Standard_DS1_v2`. Smaller instances are not sufficient for running a controller.
|
||||
Do not choose a `controller_type` smaller than `Standard_B2s`. Smaller instances are not sufficient for running a controller.
|
||||
|
||||
#### Low Priority
|
||||
|
||||
|
@ -1,10 +1,10 @@
|
||||
# Bare-Metal
|
||||
|
||||
In this tutorial, we'll network boot and provision a Kubernetes v1.15.3 cluster on bare-metal with Container Linux.
|
||||
In this tutorial, we'll network boot and provision a Kubernetes v1.16.2 cluster on bare-metal with Container Linux.
|
||||
|
||||
First, we'll deploy a [Matchbox](https://github.com/poseidon/matchbox) service and setup a network boot environment. Then, we'll declare a Kubernetes cluster using the Typhoon Terraform module and power on machines. On PXE boot, machines will install Container Linux to disk, reboot into the disk install, and provision themselves as Kubernetes controllers or workers via Ignition.
|
||||
|
||||
Controllers are provisioned to run an `etcd-member` peer and a `kubelet` service. Workers run just a `kubelet` service. A one-time [bootkube](https://github.com/kubernetes-incubator/bootkube) bootstrap schedules the `apiserver`, `scheduler`, `controller-manager`, and `coredns` on controllers and schedules `kube-proxy` and `calico` (or `flannel`) on every node. A generated `kubeconfig` provides `kubectl` access to the cluster.
|
||||
Controller hosts are provisioned to run an `etcd-member` peer and a `kubelet` service. Worker hosts run a `kubelet` service. Controller nodes run `kube-apiserver`, `kube-scheduler`, `kube-controller-manager`, and `coredns` while `kube-proxy` and `calico` (or `flannel`) run on every node. A generated `kubeconfig` provides `kubectl` access to the cluster.
|
||||
|
||||
## Requirements
|
||||
|
||||
@ -111,7 +111,7 @@ Install [Terraform](https://www.terraform.io/downloads.html) v0.12.x on your sys
|
||||
|
||||
```sh
|
||||
$ terraform version
|
||||
Terraform v0.12.6
|
||||
Terraform v0.12.9
|
||||
```
|
||||
|
||||
Add the [terraform-provider-matchbox](https://github.com/poseidon/terraform-provider-matchbox) plugin binary for your system to `~/.terraform.d/plugins/`, noting the final name.
|
||||
@ -125,9 +125,9 @@ mv terraform-provider-matchbox-v0.3.0-linux-amd64/terraform-provider-matchbox ~/
|
||||
Add the [terraform-provider-ct](https://github.com/poseidon/terraform-provider-ct) plugin binary for your system to `~/.terraform.d/plugins/`, noting the final name.
|
||||
|
||||
```sh
|
||||
wget https://github.com/poseidon/terraform-provider-ct/releases/download/v0.3.2/terraform-provider-ct-v0.3.2-linux-amd64.tar.gz
|
||||
tar xzf terraform-provider-ct-v0.3.2-linux-amd64.tar.gz
|
||||
mv terraform-provider-ct-v0.3.2-linux-amd64/terraform-provider-ct ~/.terraform.d/plugins/terraform-provider-ct_v0.3.2
|
||||
wget https://github.com/poseidon/terraform-provider-ct/releases/download/v0.4.0/terraform-provider-ct-v0.4.0-linux-amd64.tar.gz
|
||||
tar xzf terraform-provider-ct-v0.4.0-linux-amd64.tar.gz
|
||||
mv terraform-provider-ct-v0.4.0-linux-amd64/terraform-provider-ct ~/.terraform.d/plugins/terraform-provider-ct_v0.4.0
|
||||
```
|
||||
|
||||
Read [concepts](/architecture/concepts/) to learn about Terraform, modules, and organizing resources. Change to your infrastructure repository (e.g. `infra`).
|
||||
@ -144,13 +144,13 @@ Configure the Matchbox provider to use your Matchbox API endpoint and client cer
|
||||
provider "matchbox" {
|
||||
version = "0.3.0"
|
||||
endpoint = "matchbox.example.com:8081"
|
||||
client_cert = "${file("~/.config/matchbox/client.crt")}"
|
||||
client_key = "${file("~/.config/matchbox/client.key")}"
|
||||
ca = "${file("~/.config/matchbox/ca.crt")}"
|
||||
client_cert = file("~/.config/matchbox/client.crt")
|
||||
client_key = file("~/.config/matchbox/client.key")
|
||||
ca = file("~/.config/matchbox/ca.crt")
|
||||
}
|
||||
|
||||
provider "ct" {
|
||||
version = "0.3.2"
|
||||
version = "0.4.0"
|
||||
}
|
||||
```
|
||||
|
||||
@ -160,13 +160,13 @@ Define a Kubernetes cluster using the module `bare-metal/container-linux/kuberne
|
||||
|
||||
```tf
|
||||
module "bare-metal-mercury" {
|
||||
source = "git::https://github.com/poseidon/typhoon//bare-metal/container-linux/kubernetes?ref=v1.15.3"
|
||||
source = "git::https://github.com/poseidon/typhoon//bare-metal/container-linux/kubernetes?ref=v1.16.2"
|
||||
|
||||
# bare-metal
|
||||
cluster_name = "mercury"
|
||||
matchbox_http_endpoint = "http://matchbox.example.com"
|
||||
os_channel = "coreos-stable"
|
||||
os_version = "1632.3.0"
|
||||
os_version = "2191.5.0"
|
||||
|
||||
# configuration
|
||||
k8s_domain_name = "node1.example.com"
|
||||
@ -174,20 +174,22 @@ module "bare-metal-mercury" {
|
||||
asset_dir = "/home/user/.secrets/clusters/mercury"
|
||||
|
||||
# machines
|
||||
controller_names = ["node1"]
|
||||
controller_macs = ["52:54:00:a1:9c:ae"]
|
||||
controller_domains = ["node1.example.com"]
|
||||
worker_names = [
|
||||
"node2",
|
||||
"node3",
|
||||
]
|
||||
worker_macs = [
|
||||
"52:54:00:b2:2f:86",
|
||||
"52:54:00:c3:61:77",
|
||||
]
|
||||
worker_domains = [
|
||||
"node2.example.com",
|
||||
"node3.example.com",
|
||||
controllers = [{
|
||||
name = "node1"
|
||||
mac = "52:54:00:a1:9c:ae"
|
||||
domain = "node1.example.com"
|
||||
}]
|
||||
workers = [
|
||||
{
|
||||
name = "node2",
|
||||
mac = "52:54:00:b2:2f:86"
|
||||
domain = "node2.example.com"
|
||||
},
|
||||
{
|
||||
name = "node3",
|
||||
mac = "52:54:00:c3:61:77"
|
||||
domain = "node3.example.com"
|
||||
}
|
||||
]
|
||||
|
||||
# set to http only if you cannot chainload to iPXE firmware with https support
|
||||
@ -199,7 +201,7 @@ Reference the [variables docs](#variables) or the [variables.tf](https://github.
|
||||
|
||||
## ssh-agent
|
||||
|
||||
Initial bootstrapping requires `bootkube.service` be started on one controller node. Terraform uses `ssh-agent` to automate this step. Add your SSH private key to `ssh-agent`.
|
||||
Initial bootstrapping requires `bootstrap.service` be started on one controller node. Terraform uses `ssh-agent` to automate this step. Add your SSH private key to `ssh-agent`.
|
||||
|
||||
```sh
|
||||
ssh-add ~/.ssh/id_rsa
|
||||
@ -221,14 +223,12 @@ $ terraform plan
|
||||
Plan: 55 to add, 0 to change, 0 to destroy.
|
||||
```
|
||||
|
||||
Apply the changes. Terraform will generate bootkube assets to `asset_dir` and create Matchbox profiles (e.g. controller, worker) and matching rules via the Matchbox API.
|
||||
Apply the changes. Terraform will generate bootstrap assets to `asset_dir` and create Matchbox profiles (e.g. controller, worker) and matching rules via the Matchbox API.
|
||||
|
||||
```sh
|
||||
$ terraform apply
|
||||
module.bare-metal-mercury.null_resource.copy-kubeconfig.0: Provisioning with 'file'...
|
||||
module.bare-metal-mercury.null_resource.copy-etcd-secrets.0: Provisioning with 'file'...
|
||||
module.bare-metal-mercury.null_resource.copy-kubeconfig.0: Still creating... (10s elapsed)
|
||||
module.bare-metal-mercury.null_resource.copy-etcd-secrets.0: Still creating... (10s elapsed)
|
||||
module.bare-metal-mercury.null_resource.copy-controller-secrets.0: Still creating... (10s elapsed)
|
||||
module.bare-metal-mercury.null_resource.copy-worker-secrets.0: Still creating... (10s elapsed)
|
||||
...
|
||||
```
|
||||
|
||||
@ -250,14 +250,14 @@ Machines will network boot, install Container Linux to disk, reboot into the dis
|
||||
|
||||
### Bootstrap
|
||||
|
||||
Wait for the `bootkube-start` step to finish bootstrapping the Kubernetes control plane. This may take 5-15 minutes depending on your network.
|
||||
Wait for the `bootstrap` step to finish bootstrapping the Kubernetes control plane. This may take 5-15 minutes depending on your network.
|
||||
|
||||
```
|
||||
module.bare-metal-mercury.null_resource.bootkube-start: Still creating... (6m10s elapsed)
|
||||
module.bare-metal-mercury.null_resource.bootkube-start: Still creating... (6m20s elapsed)
|
||||
module.bare-metal-mercury.null_resource.bootkube-start: Still creating... (6m30s elapsed)
|
||||
module.bare-metal-mercury.null_resource.bootkube-start: Still creating... (6m40s elapsed)
|
||||
module.bare-metal-mercury.null_resource.bootkube-start: Creation complete (ID: 5441741360626669024)
|
||||
module.bare-metal-mercury.null_resource.bootstrap: Still creating... (6m10s elapsed)
|
||||
module.bare-metal-mercury.null_resource.bootstrap: Still creating... (6m20s elapsed)
|
||||
module.bare-metal-mercury.null_resource.bootstrap: Still creating... (6m30s elapsed)
|
||||
module.bare-metal-mercury.null_resource.bootstrap: Still creating... (6m40s elapsed)
|
||||
module.bare-metal-mercury.null_resource.bootstrap: Creation complete (ID: 5441741360626669024)
|
||||
|
||||
Apply complete! Resources: 55 added, 0 changed, 0 destroyed.
|
||||
```
|
||||
@ -265,9 +265,9 @@ Apply complete! Resources: 55 added, 0 changed, 0 destroyed.
|
||||
To watch the install to disk (until machines reboot from disk), SSH to port 2222.
|
||||
|
||||
```
|
||||
# before v1.15.3
|
||||
# before v1.16.2
|
||||
$ ssh debug@node1.example.com
|
||||
# after v1.15.3
|
||||
# after v1.16.2
|
||||
$ ssh -p 2222 core@node1.example.com
|
||||
```
|
||||
|
||||
@ -275,13 +275,12 @@ To watch the bootstrap process in detail, SSH to the first controller and journa
|
||||
|
||||
```
|
||||
$ ssh core@node1.example.com
|
||||
$ journalctl -f -u bootkube
|
||||
bootkube[5]: Pod Status: pod-checkpointer Running
|
||||
bootkube[5]: Pod Status: kube-apiserver Running
|
||||
bootkube[5]: Pod Status: kube-scheduler Running
|
||||
bootkube[5]: Pod Status: kube-controller-manager Running
|
||||
bootkube[5]: All self-hosted control plane components successfully started
|
||||
bootkube[5]: Tearing down temporary bootstrap control plane...
|
||||
$ journalctl -f -u bootstrap
|
||||
podman[1750]: The connection to the server cluster.example.com:6443 was refused - did you specify the right host or port?
|
||||
podman[1750]: Waiting for static pod control plane
|
||||
...
|
||||
podman[1750]: serviceaccount/calico-node unchanged
|
||||
systemd[1]: Started Kubernetes control plane.
|
||||
```
|
||||
|
||||
## Verify
|
||||
@ -291,10 +290,10 @@ bootkube[5]: Tearing down temporary bootstrap control plane...
|
||||
```
|
||||
$ export KUBECONFIG=/home/user/.secrets/clusters/mercury/auth/kubeconfig
|
||||
$ kubectl get nodes
|
||||
NAME STATUS ROLES AGE VERSION
|
||||
node1.example.com Ready controller,master 10m v1.15.3
|
||||
node2.example.com Ready node 10m v1.15.3
|
||||
node3.example.com Ready node 10m v1.15.3
|
||||
NAME STATUS ROLES AGE VERSION
|
||||
node1.example.com Ready <none> 10m v1.16.2
|
||||
node2.example.com Ready <none> 10m v1.16.2
|
||||
node3.example.com Ready <none> 10m v1.16.2
|
||||
```
|
||||
|
||||
List the pods.
|
||||
@ -307,16 +306,12 @@ kube-system calico-node-gnjrm 2/2 Running 0
|
||||
kube-system calico-node-llbgt 2/2 Running 0 11m
|
||||
kube-system coredns-1187388186-dj3pd 1/1 Running 0 11m
|
||||
kube-system coredns-1187388186-mx9rt 1/1 Running 0 11m
|
||||
kube-system kube-apiserver-7336w 1/1 Running 0 11m
|
||||
kube-system kube-controller-manager-3271970485-b9chx 1/1 Running 0 11m
|
||||
kube-system kube-controller-manager-3271970485-v30js 1/1 Running 1 11m
|
||||
kube-system kube-apiserver-node1.example.com 1/1 Running 0 11m
|
||||
kube-system kube-controller-node1.example.com 1/1 Running 1 11m
|
||||
kube-system kube-proxy-50sd4 1/1 Running 0 11m
|
||||
kube-system kube-proxy-bczhp 1/1 Running 0 11m
|
||||
kube-system kube-proxy-mp2fw 1/1 Running 0 11m
|
||||
kube-system kube-scheduler-3895335239-fd3l7 1/1 Running 1 11m
|
||||
kube-system kube-scheduler-3895335239-hfjv0 1/1 Running 0 11m
|
||||
kube-system pod-checkpointer-wf65d 1/1 Running 0 11m
|
||||
kube-system pod-checkpointer-wf65d-node1.example.com 1/1 Running 0 11m
|
||||
kube-system kube-scheduler-node1.example.com 1/1 Running 0 11m
|
||||
```
|
||||
|
||||
## Going Further
|
||||
@ -334,19 +329,15 @@ Check the [variables.tf](https://github.com/poseidon/typhoon/blob/master/bare-me
|
||||
|
||||
| Name | Description | Example |
|
||||
|:-----|:------------|:--------|
|
||||
| cluster_name | Unique cluster name | mercury |
|
||||
| matchbox_http_endpoint | Matchbox HTTP read-only endpoint | http://matchbox.example.com:port |
|
||||
| cluster_name | Unique cluster name | "mercury" |
|
||||
| matchbox_http_endpoint | Matchbox HTTP read-only endpoint | "http://matchbox.example.com:port" |
|
||||
| os_channel | Channel for a Container Linux derivative | coreos-stable, coreos-beta, coreos-alpha, flatcar-stable, flatcar-beta, flatcar-alpha, flatcar-edge |
|
||||
| os_version | Version for a Container Linux derivative to PXE and install | 1632.3.0 |
|
||||
| os_version | Version for a Container Linux derivative to PXE and install | "1632.3.0" |
|
||||
| k8s_domain_name | FQDN resolving to the controller(s) nodes. Workers and kubectl will communicate with this endpoint | "myk8s.example.com" |
|
||||
| ssh_authorized_key | SSH public key for user 'core' | "ssh-rsa AAAAB3Nz..." |
|
||||
| asset_dir | Path to a directory where generated assets should be placed (contains secrets) | "/home/user/.secrets/clusters/mercury" |
|
||||
| controller_names | Ordered list of controller short names | ["node1"] |
|
||||
| controller_macs | Ordered list of controller identifying MAC addresses | ["52:54:00:a1:9c:ae"] |
|
||||
| controller_domains | Ordered list of controller FQDNs | ["node1.example.com"] |
|
||||
| worker_names | Ordered list of worker short names | ["node2", "node3"] |
|
||||
| worker_macs | Ordered list of worker identifying MAC addresses | ["52:54:00:b2:2f:86", "52:54:00:c3:61:77"] |
|
||||
| worker_domains | Ordered list of worker FQDNs | ["node2.example.com", "node3.example.com"] |
|
||||
| asset_dir | Absolute path to a directory where generated assets should be placed (contains secrets) | "/home/user/.secrets/clusters/mercury" |
|
||||
| controllers | List of controller machine detail objects (unique name, identifying MAC address, FQDN) | `[{name="node1", mac="52:54:00:a1:9c:ae", domain="node1.example.com"}]` |
|
||||
| workers | List of worker machine detail objects (unique name, identifying MAC address, FQDN) | `[{name="node2", mac="52:54:00:b2:2f:86", domain="node2.example.com"}, {name="node3", mac="52:54:00:c3:61:77", domain="node3.example.com"}]` |
|
||||
|
||||
### Optional
|
||||
|
||||
@ -358,9 +349,8 @@ Check the [variables.tf](https://github.com/poseidon/typhoon/blob/master/bare-me
|
||||
| networking | Choice of networking provider | "calico" | "calico" or "flannel" |
|
||||
| network_mtu | CNI interface MTU (calico-only) | 1480 | - |
|
||||
| clc_snippets | Map from machine names to lists of Container Linux Config snippets | {} | [example](/advanced/customization/#usage) |
|
||||
| network_ip_autodetection_method | Method to detect host IPv4 address (calico-only) | first-found | can-reach=10.0.0.1 |
|
||||
| network_ip_autodetection_method | Method to detect host IPv4 address (calico-only) | "first-found" | "can-reach=10.0.0.1" |
|
||||
| pod_cidr | CIDR IPv4 range to assign to Kubernetes pods | "10.2.0.0/16" | "10.22.0.0/16" |
|
||||
| service_cidr | CIDR IPv4 range to assign to Kubernetes services | "10.3.0.0/16" | "10.3.0.0/24" |
|
||||
| cluster_domain_suffix | FQDN suffix for Kubernetes services answered by coredns. | "cluster.local" | "k8s.example.com" |
|
||||
| kernel_args | Additional kernel args to provide at PXE boot | [] | "kvm-intel.nested=1" |
|
||||
| kernel_args | Additional kernel args to provide at PXE boot | [] | ["kvm-intel.nested=1"] |
|
||||
|
||||
|
@ -1,10 +1,10 @@
|
||||
# Digital Ocean
|
||||
|
||||
In this tutorial, we'll create a Kubernetes v1.15.3 cluster on DigitalOcean with Container Linux.
|
||||
In this tutorial, we'll create a Kubernetes v1.16.2 cluster on DigitalOcean with Container Linux.
|
||||
|
||||
We'll declare a Kubernetes cluster using the Typhoon Terraform module. Then apply the changes to create controller droplets, worker droplets, DNS records, tags, and TLS assets.
|
||||
|
||||
Controllers are provisioned to run an `etcd-member` peer and a `kubelet` service. Workers run just a `kubelet` service. A one-time [bootkube](https://github.com/kubernetes-incubator/bootkube) bootstrap schedules the `apiserver`, `scheduler`, `controller-manager`, and `coredns` on controllers and schedules `kube-proxy` and `flannel` on every node. A generated `kubeconfig` provides `kubectl` access to the cluster.
|
||||
Controller hosts are provisioned to run an `etcd-member` peer and a `kubelet` service. Worker hosts run a `kubelet` service. Controller nodes run `kube-apiserver`, `kube-scheduler`, `kube-controller-manager`, and `coredns`, while `kube-proxy` and `calico` (or `flannel`) run on every node. A generated `kubeconfig` provides `kubectl` access to the cluster.
|
||||
|
||||
## Requirements
|
||||
|
||||
@ -18,15 +18,15 @@ Install [Terraform](https://www.terraform.io/downloads.html) v0.12.x on your sys
|
||||
|
||||
```sh
|
||||
$ terraform version
|
||||
Terraform v0.12.6
|
||||
Terraform v0.12.9
|
||||
```
|
||||
|
||||
Add the [terraform-provider-ct](https://github.com/poseidon/terraform-provider-ct) plugin binary for your system to `~/.terraform.d/plugins/`, noting the final name.
|
||||
|
||||
```sh
|
||||
wget https://github.com/poseidon/terraform-provider-ct/releases/download/v0.3.2/terraform-provider-ct-v0.3.2-linux-amd64.tar.gz
|
||||
tar xzf terraform-provider-ct-v0.3.2-linux-amd64.tar.gz
|
||||
mv terraform-provider-ct-v0.3.2-linux-amd64/terraform-provider-ct ~/.terraform.d/plugins/terraform-provider-ct_v0.3.2
|
||||
wget https://github.com/poseidon/terraform-provider-ct/releases/download/v0.4.0/terraform-provider-ct-v0.4.0-linux-amd64.tar.gz
|
||||
tar xzf terraform-provider-ct-v0.4.0-linux-amd64.tar.gz
|
||||
mv terraform-provider-ct-v0.4.0-linux-amd64/terraform-provider-ct ~/.terraform.d/plugins/terraform-provider-ct_v0.4.0
|
||||
```
|
||||
|
||||
Read [concepts](/architecture/concepts/) to learn about Terraform, modules, and organizing resources. Change to your infrastructure repository (e.g. `infra`).
|
||||
@ -50,12 +50,12 @@ Configure the DigitalOcean provider to use your token in a `providers.tf` file.
|
||||
|
||||
```tf
|
||||
provider "digitalocean" {
|
||||
version = "1.6.0"
|
||||
version = "1.8.0"
|
||||
token = "${chomp(file("~/.config/digital-ocean/token"))}"
|
||||
}
|
||||
|
||||
provider "ct" {
|
||||
version = "0.3.2"
|
||||
version = "0.4.0"
|
||||
}
|
||||
```
|
||||
|
||||
@ -65,7 +65,7 @@ Define a Kubernetes cluster using the module `digital-ocean/container-linux/kube
|
||||
|
||||
```tf
|
||||
module "digital-ocean-nemo" {
|
||||
source = "git::https://github.com/poseidon/typhoon//digital-ocean/container-linux/kubernetes?ref=v1.15.3"
|
||||
source = "git::https://github.com/poseidon/typhoon//digital-ocean/container-linux/kubernetes?ref=v1.16.2"
|
||||
|
||||
# Digital Ocean
|
||||
cluster_name = "nemo"
|
||||
@ -85,7 +85,7 @@ Reference the [variables docs](#variables) or the [variables.tf](https://github.
|
||||
|
||||
## ssh-agent
|
||||
|
||||
Initial bootstrapping requires `bootkube.service` be started on one controller node. Terraform uses `ssh-agent` to automate this step. Add your SSH private key to `ssh-agent`.
|
||||
Initial bootstrapping requires `bootstrap.service` be started on one controller node. Terraform uses `ssh-agent` to automate this step. Add your SSH private key to `ssh-agent`.
|
||||
|
||||
```sh
|
||||
ssh-add ~/.ssh/id_rsa
|
||||
@ -111,11 +111,11 @@ Apply the changes to create the cluster.
|
||||
|
||||
```sh
|
||||
$ terraform apply
|
||||
module.digital-ocean-nemo.null_resource.bootkube-start: Still creating... (30s elapsed)
|
||||
module.digital-ocean-nemo.null_resource.bootkube-start: Provisioning with 'remote-exec'...
|
||||
module.digital-ocean-nemo.null_resource.bootstrap: Still creating... (30s elapsed)
|
||||
module.digital-ocean-nemo.null_resource.bootstrap: Provisioning with 'remote-exec'...
|
||||
...
|
||||
module.digital-ocean-nemo.null_resource.bootkube-start: Still creating... (6m20s elapsed)
|
||||
module.digital-ocean-nemo.null_resource.bootkube-start: Creation complete (ID: 7599298447329218468)
|
||||
module.digital-ocean-nemo.null_resource.bootstrap: Still creating... (6m20s elapsed)
|
||||
module.digital-ocean-nemo.null_resource.bootstrap: Creation complete (ID: 7599298447329218468)
|
||||
|
||||
Apply complete! Resources: 54 added, 0 changed, 0 destroyed.
|
||||
```
|
||||
@ -129,10 +129,10 @@ In 3-6 minutes, the Kubernetes cluster will be ready.
|
||||
```
|
||||
$ export KUBECONFIG=/home/user/.secrets/clusters/nemo/auth/kubeconfig
|
||||
$ kubectl get nodes
|
||||
NAME STATUS ROLES AGE VERSION
|
||||
10.132.110.130 Ready controller,master 10m v1.15.3
|
||||
10.132.115.81 Ready node 10m v1.15.3
|
||||
10.132.124.107 Ready node 10m v1.15.3
|
||||
NAME STATUS ROLES AGE VERSION
|
||||
10.132.110.130 Ready <none> 10m v1.16.2
|
||||
10.132.115.81 Ready <none> 10m v1.16.2
|
||||
10.132.124.107 Ready <none> 10m v1.16.2
|
||||
```
|
||||
|
||||
List the pods.
|
||||
@ -141,19 +141,15 @@ List the pods.
|
||||
NAMESPACE NAME READY STATUS RESTARTS AGE
|
||||
kube-system coredns-1187388186-ld1j7 1/1 Running 0 11m
|
||||
kube-system coredns-1187388186-rdhf7 1/1 Running 0 11m
|
||||
kube-system flannel-1cq1v 2/2 Running 0 11m
|
||||
kube-system flannel-hq9t0 2/2 Running 1 11m
|
||||
kube-system flannel-v0g9w 2/2 Running 0 11m
|
||||
kube-system kube-apiserver-n10qr 1/1 Running 0 11m
|
||||
kube-system kube-controller-manager-3271970485-37gtw 1/1 Running 1 11m
|
||||
kube-system kube-controller-manager-3271970485-p52t5 1/1 Running 0 11m
|
||||
kube-system calico-node-1m5bf 2/2 Running 0 11m
|
||||
kube-system calico-node-7jmr1 2/2 Running 0 11m
|
||||
kube-system calico-node-bknc8 2/2 Running 0 11m
|
||||
kube-system kube-apiserver-ip-10.132.115.81 1/1 Running 0 11m
|
||||
kube-system kube-controller-manager-ip-10.132.115.81 1/1 Running 0 11m
|
||||
kube-system kube-proxy-6kxjf 1/1 Running 0 11m
|
||||
kube-system kube-proxy-fh3td 1/1 Running 0 11m
|
||||
kube-system kube-proxy-k35rc 1/1 Running 0 11m
|
||||
kube-system kube-scheduler-3895335239-2bc4c 1/1 Running 0 11m
|
||||
kube-system kube-scheduler-3895335239-b7q47 1/1 Running 1 11m
|
||||
kube-system pod-checkpointer-pr1lq 1/1 Running 0 11m
|
||||
kube-system pod-checkpointer-pr1lq-10.132.115.81 1/1 Running 0 10m
|
||||
kube-system kube-scheduler-ip-10.132.115.81 1/1 Running 0 11m
|
||||
```
|
||||
|
||||
## Going Further
|
||||
@ -171,11 +167,11 @@ Check the [variables.tf](https://github.com/poseidon/typhoon/blob/master/digital
|
||||
|
||||
| Name | Description | Example |
|
||||
|:-----|:------------|:--------|
|
||||
| cluster_name | Unique cluster name (prepended to dns_zone) | nemo |
|
||||
| region | Digital Ocean region | nyc1, sfo2, fra1, tor1 |
|
||||
| dns_zone | Digital Ocean domain (i.e. DNS zone) | do.example.com |
|
||||
| cluster_name | Unique cluster name (prepended to dns_zone) | "nemo" |
|
||||
| region | Digital Ocean region | "nyc1", "sfo2", "fra1", tor1" |
|
||||
| dns_zone | Digital Ocean domain (i.e. DNS zone) | "do.example.com" |
|
||||
| ssh_fingerprints | SSH public key fingerprints | ["d7:9d..."] |
|
||||
| asset_dir | Path to a directory where generated assets should be placed (contains secrets) | /home/user/.secrets/nemo |
|
||||
| asset_dir | Absolute path to a directory where generated assets should be placed (contains secrets) | "/home/user/.secrets/nemo" |
|
||||
|
||||
#### DNS Zone
|
||||
|
||||
@ -218,15 +214,14 @@ Digital Ocean requires the SSH public key be uploaded to your account, so you ma
|
||||
|:-----|:------------|:--------|:--------|
|
||||
| controller_count | Number of controllers (i.e. masters) | 1 | 1 |
|
||||
| worker_count | Number of workers | 1 | 3 |
|
||||
| controller_type | Droplet type for controllers | s-2vcpu-2gb | s-2vcpu-2gb, s-2vcpu-4gb, s-4vcpu-8gb, ... |
|
||||
| worker_type | Droplet type for workers | s-1vcpu-2gb | s-1vcpu-2gb, s-2vcpu-2gb, ... |
|
||||
| controller_type | Droplet type for controllers | "s-2vcpu-2gb" | s-2vcpu-2gb, s-2vcpu-4gb, s-4vcpu-8gb, ... |
|
||||
| worker_type | Droplet type for workers | "s-1vcpu-2gb" | s-1vcpu-2gb, s-2vcpu-2gb, ... |
|
||||
| image | Container Linux image for instances | "coreos-stable" | coreos-stable, coreos-beta, coreos-alpha |
|
||||
| controller_clc_snippets | Controller Container Linux Config snippets | [] | [example](/advanced/customization/) |
|
||||
| worker_clc_snippets | Worker Container Linux Config snippets | [] | [example](/advanced/customization/) |
|
||||
| networking | Choice of networking provider | "flannel" | "flannel" or "calico" (experimental) |
|
||||
| networking | Choice of networking provider | "calico" | "flannel" or "calico" |
|
||||
| pod_cidr | CIDR IPv4 range to assign to Kubernetes pods | "10.2.0.0/16" | "10.22.0.0/16" |
|
||||
| service_cidr | CIDR IPv4 range to assign to Kubernetes services | "10.3.0.0/16" | "10.3.0.0/24" |
|
||||
| cluster_domain_suffix | FQDN suffix for Kubernetes services answered by coredns. | "cluster.local" | "k8s.example.com" |
|
||||
|
||||
Check the list of valid [droplet types](https://developers.digitalocean.com/documentation/changelog/api-v2/new-size-slugs-for-droplet-plan-changes/) or use `doctl compute size list`.
|
||||
|
||||
|
@ -1,10 +1,10 @@
|
||||
# Google Cloud
|
||||
|
||||
In this tutorial, we'll create a Kubernetes v1.15.3 cluster on Google Compute Engine with Container Linux.
|
||||
In this tutorial, we'll create a Kubernetes v1.16.2 cluster on Google Compute Engine with Container Linux.
|
||||
|
||||
We'll declare a Kubernetes cluster using the Typhoon Terraform module. Then apply the changes to create a network, firewall rules, health checks, controller instances, worker managed instance group, load balancers, and TLS assets.
|
||||
|
||||
Controllers are provisioned to run an `etcd-member` peer and a `kubelet` service. Workers run just a `kubelet` service. A one-time [bootkube](https://github.com/kubernetes-incubator/bootkube) bootstrap schedules the `apiserver`, `scheduler`, `controller-manager`, and `coredns` on controllers and schedules `kube-proxy` and `calico` (or `flannel`) on every node. A generated `kubeconfig` provides `kubectl` access to the cluster.
|
||||
Controller hosts are provisioned to run an `etcd-member` peer and a `kubelet` service. Worker hosts run a `kubelet` service. Controller nodes run `kube-apiserver`, `kube-scheduler`, `kube-controller-manager`, and `coredns`, while `kube-proxy` and `calico` (or `flannel`) run on every node. A generated `kubeconfig` provides `kubectl` access to the cluster.
|
||||
|
||||
## Requirements
|
||||
|
||||
@ -18,15 +18,15 @@ Install [Terraform](https://www.terraform.io/downloads.html) v0.12.x on your sys
|
||||
|
||||
```sh
|
||||
$ terraform version
|
||||
Terraform v0.12.6
|
||||
Terraform v0.12.9
|
||||
```
|
||||
|
||||
Add the [terraform-provider-ct](https://github.com/poseidon/terraform-provider-ct) plugin binary for your system to `~/.terraform.d/plugins/`, noting the final name.
|
||||
|
||||
```sh
|
||||
wget https://github.com/poseidon/terraform-provider-ct/releases/download/v0.3.2/terraform-provider-ct-v0.3.2-linux-amd64.tar.gz
|
||||
tar xzf terraform-provider-ct-v0.3.2-linux-amd64.tar.gz
|
||||
mv terraform-provider-ct-v0.3.2-linux-amd64/terraform-provider-ct ~/.terraform.d/plugins/terraform-provider-ct_v0.3.2
|
||||
wget https://github.com/poseidon/terraform-provider-ct/releases/download/v0.4.0/terraform-provider-ct-v0.4.0-linux-amd64.tar.gz
|
||||
tar xzf terraform-provider-ct-v0.4.0-linux-amd64.tar.gz
|
||||
mv terraform-provider-ct-v0.4.0-linux-amd64/terraform-provider-ct ~/.terraform.d/plugins/terraform-provider-ct_v0.4.0
|
||||
```
|
||||
|
||||
Read [concepts](/architecture/concepts/) to learn about Terraform, modules, and organizing resources. Change to your infrastructure repository (e.g. `infra`).
|
||||
@ -49,14 +49,14 @@ Configure the Google Cloud provider to use your service account key, project-id,
|
||||
|
||||
```tf
|
||||
provider "google" {
|
||||
version = "2.12.0"
|
||||
version = "2.16.0"
|
||||
project = "project-id"
|
||||
region = "us-central1"
|
||||
credentials = "${file("~/.config/google-cloud/terraform.json")}"
|
||||
credentials = file("~/.config/google-cloud/terraform.json")
|
||||
}
|
||||
|
||||
provider "ct" {
|
||||
version = "0.3.2"
|
||||
version = "0.4.0"
|
||||
}
|
||||
```
|
||||
|
||||
@ -71,7 +71,7 @@ Define a Kubernetes cluster using the module `google-cloud/container-linux/kuber
|
||||
|
||||
```tf
|
||||
module "google-cloud-yavin" {
|
||||
source = "git::https://github.com/poseidon/typhoon//google-cloud/container-linux/kubernetes?ref=v1.15.3"
|
||||
source = "git::https://github.com/poseidon/typhoon//google-cloud/container-linux/kubernetes?ref=v1.16.2"
|
||||
|
||||
# Google Cloud
|
||||
cluster_name = "yavin"
|
||||
@ -92,7 +92,7 @@ Reference the [variables docs](#variables) or the [variables.tf](https://github.
|
||||
|
||||
## ssh-agent
|
||||
|
||||
Initial bootstrapping requires `bootkube.service` be started on one controller node. Terraform uses `ssh-agent` to automate this step. Add your SSH private key to `ssh-agent`.
|
||||
Initial bootstrapping requires `bootstrap.service` be started on one controller node. Terraform uses `ssh-agent` to automate this step. Add your SSH private key to `ssh-agent`.
|
||||
|
||||
```sh
|
||||
ssh-add ~/.ssh/id_rsa
|
||||
@ -118,12 +118,11 @@ Apply the changes to create the cluster.
|
||||
|
||||
```sh
|
||||
$ terraform apply
|
||||
module.google-cloud-yavin.null_resource.bootkube-start: Still creating... (10s elapsed)
|
||||
module.google-cloud-yavin.null_resource.bootstrap: Still creating... (10s elapsed)
|
||||
...
|
||||
|
||||
module.google-cloud-yavin.null_resource.bootkube-start: Still creating... (5m30s elapsed)
|
||||
module.google-cloud-yavin.null_resource.bootkube-start: Still creating... (5m40s elapsed)
|
||||
module.google-cloud-yavin.null_resource.bootkube-start: Creation complete (ID: 5768638456220583358)
|
||||
module.google-cloud-yavin.null_resource.bootstrap: Still creating... (5m30s elapsed)
|
||||
module.google-cloud-yavin.null_resource.bootstrap: Still creating... (5m40s elapsed)
|
||||
module.google-cloud-yavin.null_resource.bootstrap: Creation complete (ID: 5768638456220583358)
|
||||
|
||||
Apply complete! Resources: 64 added, 0 changed, 0 destroyed.
|
||||
```
|
||||
@ -137,10 +136,10 @@ In 4-8 minutes, the Kubernetes cluster will be ready.
|
||||
```
|
||||
$ export KUBECONFIG=/home/user/.secrets/clusters/yavin/auth/kubeconfig
|
||||
$ kubectl get nodes
|
||||
NAME ROLES STATUS AGE VERSION
|
||||
yavin-controller-0.c.example-com.internal controller,master Ready 6m v1.15.3
|
||||
yavin-worker-jrbf.c.example-com.internal node Ready 5m v1.15.3
|
||||
yavin-worker-mzdm.c.example-com.internal node Ready 5m v1.15.3
|
||||
NAME ROLES STATUS AGE VERSION
|
||||
yavin-controller-0.c.example-com.internal <none> Ready 6m v1.16.2
|
||||
yavin-worker-jrbf.c.example-com.internal <none> Ready 5m v1.16.2
|
||||
yavin-worker-mzdm.c.example-com.internal <none> Ready 5m v1.16.2
|
||||
```
|
||||
|
||||
List the pods.
|
||||
@ -153,15 +152,12 @@ kube-system calico-node-d1l5b 2/2 Running 0
|
||||
kube-system calico-node-sp9ps 2/2 Running 0 6m
|
||||
kube-system coredns-1187388186-dkh3o 1/1 Running 0 6m
|
||||
kube-system coredns-1187388186-zj5dl 1/1 Running 0 6m
|
||||
kube-system kube-apiserver-zppls 1/1 Running 0 6m
|
||||
kube-system kube-controller-manager-3271970485-gh9kt 1/1 Running 0 6m
|
||||
kube-system kube-controller-manager-3271970485-h90v8 1/1 Running 1 6m
|
||||
kube-system kube-apiserver-controller-0 1/1 Running 0 6m
|
||||
kube-system kube-controller-manager-controller-0 1/1 Running 0 6m
|
||||
kube-system kube-proxy-117v6 1/1 Running 0 6m
|
||||
kube-system kube-proxy-9886n 1/1 Running 0 6m
|
||||
kube-system kube-proxy-njn47 1/1 Running 0 6m
|
||||
kube-system kube-scheduler-3895335239-5x87r 1/1 Running 0 6m
|
||||
kube-system kube-scheduler-3895335239-bzrrt 1/1 Running 1 6m
|
||||
kube-system pod-checkpointer-l6lrt 1/1 Running 0 6m
|
||||
kube-system kube-scheduler-controller-0 1/1 Running 0 6m
|
||||
```
|
||||
|
||||
## Going Further
|
||||
@ -184,7 +180,7 @@ Check the [variables.tf](https://github.com/poseidon/typhoon/blob/master/google-
|
||||
| dns_zone | Google Cloud DNS zone | "google-cloud.example.com" |
|
||||
| dns_zone_name | Google Cloud DNS zone name | "example-zone" |
|
||||
| ssh_authorized_key | SSH public key for user 'core' | "ssh-rsa AAAAB3NZ..." |
|
||||
| asset_dir | Path to a directory where generated assets should be placed (contains secrets) | "/home/user/.secrets/clusters/yavin" |
|
||||
| asset_dir | Absolute path to a directory where generated assets should be placed (contains secrets) | "/home/user/.secrets/clusters/yavin" |
|
||||
|
||||
Check the list of valid [regions](https://cloud.google.com/compute/docs/regions-zones/regions-zones) and list Container Linux [images](https://cloud.google.com/compute/docs/images) with `gcloud compute images list | grep coreos`.
|
||||
|
||||
@ -221,7 +217,7 @@ resource "google_dns_managed_zone" "zone-for-clusters" {
|
||||
| networking | Choice of networking provider | "calico" | "calico" or "flannel" |
|
||||
| pod_cidr | CIDR IPv4 range to assign to Kubernetes pods | "10.2.0.0/16" | "10.22.0.0/16" |
|
||||
| service_cidr | CIDR IPv4 range to assign to Kubernetes services | "10.3.0.0/16" | "10.3.0.0/24" |
|
||||
| cluster_domain_suffix | FQDN suffix for Kubernetes services answered by coredns. | "cluster.local" | "k8s.example.com" |
|
||||
| worker_node_labels | List of initial worker node labels | [] | ["worker-pool=default"] |
|
||||
|
||||
Check the list of valid [machine types](https://cloud.google.com/compute/docs/machine-types).
|
||||
|
||||
|
@ -3,11 +3,11 @@
|
||||
!!! danger
|
||||
Typhoon for Fedora CoreOS is an early preview! Fedora CoreOS itself is a preview! Expect bugs and design shifts. Please help both projects solve problems. Report Fedora CoreOS bugs to [Fedora](https://github.com/coreos/fedora-coreos-tracker/issues). Report Typhoon issues to Typhoon.
|
||||
|
||||
In this tutorial, we'll create a Kubernetes v1.15.3 cluster on AWS with Fedora CoreOS.
|
||||
In this tutorial, we'll create a Kubernetes v1.16.2 cluster on AWS with Fedora CoreOS.
|
||||
|
||||
We'll declare a Kubernetes cluster using the Typhoon Terraform module. Then apply the changes to create a VPC, gateway, subnets, security groups, controller instances, worker auto-scaling group, network load balancer, and TLS assets.
|
||||
|
||||
Controllers are provisioned to run an `etcd-member` peer and a `kubelet` service. Workers run just a `kubelet` service. A one-time [bootkube](https://github.com/kubernetes-incubator/bootkube) bootstrap schedules the `apiserver`, `scheduler`, `controller-manager`, and `coredns` on controllers and schedules `kube-proxy` and `calico` (or `flannel`) on every node. A generated `kubeconfig` provides `kubectl` access to the cluster.
|
||||
Controller hosts are provisioned to run an `etcd-member` peer and a `kubelet` service. Worker hosts run a `kubelet` service. Controller nodes run `kube-apiserver`, `kube-scheduler`, `kube-controller-manager`, and `coredns`, while `kube-proxy` and `calico` (or `flannel`) run on every node. A generated `kubeconfig` provides `kubectl` access to the cluster.
|
||||
|
||||
## Requirements
|
||||
|
||||
@ -21,7 +21,7 @@ Install [Terraform](https://www.terraform.io/downloads.html) v0.12.x on your sys
|
||||
|
||||
```sh
|
||||
$ terraform version
|
||||
Terraform v0.12.6
|
||||
Terraform v0.12.9
|
||||
```
|
||||
|
||||
Add the [terraform-provider-ct](https://github.com/poseidon/terraform-provider-ct) plugin binary for your system to `~/.terraform.d/plugins/`, noting the final name.
|
||||
@ -52,8 +52,8 @@ Configure the AWS provider to use your access key credentials in a `providers.tf
|
||||
|
||||
```tf
|
||||
provider "aws" {
|
||||
version = "2.23.0"
|
||||
region = "us-east-1" # MUST be us-east-1 right now!
|
||||
version = "2.31.0"
|
||||
region = "eu-central-1"
|
||||
shared_credentials_file = "/home/user/.config/aws/credentials"
|
||||
}
|
||||
|
||||
@ -73,7 +73,7 @@ Define a Kubernetes cluster using the module `aws/fedora-coreos/kubernetes`.
|
||||
|
||||
```tf
|
||||
module "aws-tempest" {
|
||||
source = "git::https://github.com/poseidon/typhoon//aws/fedora-coreos/kubernetes?ref=DEVELOPMENT_SHA"
|
||||
source = "git::https://github.com/poseidon/typhoon//aws/fedora-coreos/kubernetes?ref=v1.16.2"
|
||||
|
||||
# AWS
|
||||
cluster_name = "tempest"
|
||||
@ -94,7 +94,7 @@ Reference the [variables docs](#variables) or the [variables.tf](https://github.
|
||||
|
||||
## ssh-agent
|
||||
|
||||
Initial bootstrapping requires `bootkube.service` be started on one controller node. Terraform uses `ssh-agent` to automate this step. Add your SSH private key to `ssh-agent`.
|
||||
Initial bootstrapping requires `bootstrap.service` be started on one controller node. Terraform uses `ssh-agent` to automate this step. Add your SSH private key to `ssh-agent`.
|
||||
|
||||
```sh
|
||||
ssh-add ~/.ssh/id_rsa
|
||||
@ -121,9 +121,9 @@ Apply the changes to create the cluster.
|
||||
```sh
|
||||
$ terraform apply
|
||||
...
|
||||
module.aws-tempest.null_resource.bootkube-start: Still creating... (4m50s elapsed)
|
||||
module.aws-tempest.null_resource.bootkube-start: Still creating... (5m0s elapsed)
|
||||
module.aws-tempest.null_resource.bootkube-start: Creation complete after 11m8s (ID: 3961816482286168143)
|
||||
module.aws-tempest.null_resource.bootstrap: Still creating... (4m50s elapsed)
|
||||
module.aws-tempest.null_resource.bootstrap: Still creating... (5m0s elapsed)
|
||||
module.aws-tempest.null_resource.bootstrap: Creation complete after 5m8s (ID: 3961816482286168143)
|
||||
|
||||
Apply complete! Resources: 98 added, 0 changed, 0 destroyed.
|
||||
```
|
||||
@ -137,32 +137,28 @@ In 4-8 minutes, the Kubernetes cluster will be ready.
|
||||
```
|
||||
$ export KUBECONFIG=/home/user/.secrets/clusters/tempest/auth/kubeconfig
|
||||
$ kubectl get nodes
|
||||
NAME STATUS ROLES AGE VERSION
|
||||
ip-10-0-3-155 Ready controller,master 10m v1.15.3
|
||||
ip-10-0-26-65 Ready node 10m v1.15.3
|
||||
ip-10-0-41-21 Ready node 10m v1.15.3
|
||||
NAME STATUS ROLES AGE VERSION
|
||||
ip-10-0-3-155 Ready <none> 10m v1.16.2
|
||||
ip-10-0-26-65 Ready <none> 10m v1.16.2
|
||||
ip-10-0-41-21 Ready <none> 10m v1.16.2
|
||||
```
|
||||
|
||||
List the pods.
|
||||
|
||||
```
|
||||
$ kubectl get pods --all-namespaces
|
||||
NAMESPACE NAME READY STATUS RESTARTS AGE
|
||||
kube-system calico-node-1m5bf 2/2 Running 0 34m
|
||||
kube-system calico-node-7jmr1 2/2 Running 0 34m
|
||||
kube-system calico-node-bknc8 2/2 Running 0 34m
|
||||
kube-system coredns-1187388186-wx1lg 1/1 Running 0 34m
|
||||
kube-system coredns-1187388186-qjnvp 1/1 Running 0 34m
|
||||
kube-system kube-apiserver-4mjbk 1/1 Running 0 34m
|
||||
kube-system kube-controller-manager-3597210155-j2jbt 1/1 Running 1 34m
|
||||
kube-system kube-controller-manager-3597210155-j7g7x 1/1 Running 0 34m
|
||||
kube-system kube-proxy-14wxv 1/1 Running 0 34m
|
||||
kube-system kube-proxy-9vxh2 1/1 Running 0 34m
|
||||
kube-system kube-proxy-sbbsh 1/1 Running 0 34m
|
||||
kube-system kube-scheduler-3359497473-5plhf 1/1 Running 0 34m
|
||||
kube-system kube-scheduler-3359497473-r7zg7 1/1 Running 1 34m
|
||||
kube-system pod-checkpointer-4kxtl 1/1 Running 0 34m
|
||||
kube-system pod-checkpointer-4kxtl-ip-10-0-3-155 1/1 Running 0 33m
|
||||
NAMESPACE NAME READY STATUS RESTARTS AGE
|
||||
kube-system calico-node-1m5bf 2/2 Running 0 34m
|
||||
kube-system calico-node-7jmr1 2/2 Running 0 34m
|
||||
kube-system calico-node-bknc8 2/2 Running 0 34m
|
||||
kube-system coredns-1187388186-wx1lg 1/1 Running 0 34m
|
||||
kube-system coredns-1187388186-qjnvp 1/1 Running 0 34m
|
||||
kube-system kube-apiserver-ip-10-0-3-155 1/1 Running 0 34m
|
||||
kube-system kube-controller-manager-ip-10-0-3-155 1/1 Running 0 34m
|
||||
kube-system kube-proxy-14wxv 1/1 Running 0 34m
|
||||
kube-system kube-proxy-9vxh2 1/1 Running 0 34m
|
||||
kube-system kube-proxy-sbbsh 1/1 Running 0 34m
|
||||
kube-system kube-scheduler-ip-10-0-3-155 1/1 Running 1 34m
|
||||
```
|
||||
|
||||
## Going Further
|
||||
@ -195,7 +191,7 @@ resource "aws_route53_zone" "zone-for-clusters" {
|
||||
}
|
||||
```
|
||||
|
||||
Reference the DNS zone id with `"${aws_route53_zone.zone-for-clusters.zone_id}"`.
|
||||
Reference the DNS zone id with `aws_route53_zone.zone-for-clusters.zone_id`.
|
||||
|
||||
!!! tip ""
|
||||
If you have an existing domain name with a zone file elsewhere, just delegate a subdomain that can be managed on Route53 (e.g. aws.mydomain.com) and [update nameservers](http://docs.aws.amazon.com/Route53/latest/DeveloperGuide/SOA-NSrecords.html).
|
||||
@ -209,11 +205,11 @@ Reference the DNS zone id with `"${aws_route53_zone.zone-for-clusters.zone_id}"`
|
||||
| controller_type | EC2 instance type for controllers | "t3.small" | See below |
|
||||
| worker_type | EC2 instance type for workers | "t3.small" | See below |
|
||||
| os_image | AMI channel for Fedora CoreOS | not yet used | ? |
|
||||
| disk_size | Size of the EBS volume in GB | "40" | "100" |
|
||||
| disk_size | Size of the EBS volume in GB | 40 | 100 |
|
||||
| disk_type | Type of the EBS volume | "gp2" | standard, gp2, io1 |
|
||||
| disk_iops | IOPS of the EBS volume | "0" (i.e. auto) | "400" |
|
||||
| worker_target_groups | Target group ARNs to which worker instances should be added | [] | ["${aws_lb_target_group.app.id}"] |
|
||||
| worker_price | Spot price in USD for workers. Leave as default empty string for regular on-demand instances | "" | "0.10" |
|
||||
| disk_iops | IOPS of the EBS volume | 0 (i.e. auto) | 400 |
|
||||
| worker_target_groups | Target group ARNs to which worker instances should be added | [] | [aws_lb_target_group.app.id] |
|
||||
| worker_price | Spot price in USD for worker instances or 0 to use on-demand instances | 0 | 0.10 |
|
||||
| controller_snippets | Controller Fedora CoreOS Config snippets | [] | UNSUPPORTED |
|
||||
| worker_clc_snippets | Worker Fedora CoreOS Config snippets | [] | UNSUPPORTED |
|
||||
| networking | Choice of networking provider | "calico" | "calico" or "flannel" |
|
||||
@ -221,7 +217,7 @@ Reference the DNS zone id with `"${aws_route53_zone.zone-for-clusters.zone_id}"`
|
||||
| host_cidr | CIDR IPv4 range to assign to EC2 instances | "10.0.0.0/16" | "10.1.0.0/16" |
|
||||
| pod_cidr | CIDR IPv4 range to assign to Kubernetes pods | "10.2.0.0/16" | "10.22.0.0/16" |
|
||||
| service_cidr | CIDR IPv4 range to assign to Kubernetes services | "10.3.0.0/16" | "10.3.0.0/24" |
|
||||
| cluster_domain_suffix | FQDN suffix for Kubernetes services answered by coredns. | "cluster.local" | "k8s.example.com" |
|
||||
| worker_node_labels | List of initial worker node labels | [] | ["worker-pool=default"] |
|
||||
|
||||
Check the list of valid [instance types](https://aws.amazon.com/ec2/instance-types/).
|
||||
|
||||
|
@ -3,11 +3,11 @@
|
||||
!!! danger
|
||||
Typhoon for Fedora CoreOS is an early preview! Fedora CoreOS itself is a preview! Expect bugs and design shifts. Please help both projects solve problems. Report Fedora CoreOS bugs to [Fedora](https://github.com/coreos/fedora-coreos-tracker/issues). Report Typhoon issues to Typhoon.
|
||||
|
||||
In this tutorial, we'll network boot and provision a Kubernetes v1.15.3 cluster on bare-metal with Fedora CoreOS.
|
||||
In this tutorial, we'll network boot and provision a Kubernetes v1.16.2 cluster on bare-metal with Fedora CoreOS.
|
||||
|
||||
First, we'll deploy a [Matchbox](https://github.com/poseidon/matchbox) service and setup a network boot environment. Then, we'll declare a Kubernetes cluster using the Typhoon Terraform module and power on machines. On PXE boot, machines will install Fedora CoreOS to disk, reboot into the disk install, and provision themselves as Kubernetes controllers or workers via Ignition.
|
||||
|
||||
Controllers are provisioned to run an `etcd-member` peer and a `kubelet` service. Workers run just a `kubelet` service. A one-time [bootkube](https://github.com/kubernetes-incubator/bootkube) bootstrap schedules the `apiserver`, `scheduler`, `controller-manager`, and `coredns` on controllers and schedules `kube-proxy` and `calico` (or `flannel`) on every node. A generated `kubeconfig` provides `kubectl` access to the cluster.
|
||||
Controller hosts are provisioned to run an `etcd-member` peer and a `kubelet` service. Worker hosts run a `kubelet` service. Controller nodes run `kube-apiserver`, `kube-scheduler`, `kube-controller-manager`, and `coredns`, while `kube-proxy` and `calico` (or `flannel`) run on every node. A generated `kubeconfig` provides `kubectl` access to the cluster.
|
||||
|
||||
## Requirements
|
||||
|
||||
@ -114,7 +114,7 @@ Install [Terraform](https://www.terraform.io/downloads.html) v0.12.x on your sys
|
||||
|
||||
```sh
|
||||
$ terraform version
|
||||
Terraform v0.12.6
|
||||
Terraform v0.12.9
|
||||
```
|
||||
|
||||
Add the [terraform-provider-matchbox](https://github.com/poseidon/terraform-provider-matchbox) plugin binary for your system to `~/.terraform.d/plugins/`, noting the final name.
|
||||
@ -147,9 +147,9 @@ Configure the Matchbox provider to use your Matchbox API endpoint and client cer
|
||||
provider "matchbox" {
|
||||
version = "0.3.0"
|
||||
endpoint = "matchbox.example.com:8081"
|
||||
client_cert = "${file("~/.config/matchbox/client.crt")}"
|
||||
client_key = "${file("~/.config/matchbox/client.key")}"
|
||||
ca = "${file("~/.config/matchbox/ca.crt")}"
|
||||
client_cert = file("~/.config/matchbox/client.crt")
|
||||
client_key = file("~/.config/matchbox/client.key")
|
||||
ca = file("~/.config/matchbox/ca.crt")
|
||||
}
|
||||
|
||||
provider "ct" {
|
||||
@ -163,14 +163,14 @@ Define a Kubernetes cluster using the module `bare-metal/fedora-coreos/kubernete
|
||||
|
||||
```tf
|
||||
module "bare-metal-mercury" {
|
||||
source = "git::https://github.com/poseidon/typhoon//bare-metal/fedora-coreos/kubernetes?ref=DEVELOPMENT_SHA"
|
||||
source = "git::https://github.com/poseidon/typhoon//bare-metal/fedora-coreos/kubernetes?ref=v1.16.2"
|
||||
|
||||
# bare-metal
|
||||
cluster_name = "mercury"
|
||||
matchbox_http_endpoint = "http://matchbox.example.com"
|
||||
os_stream = "testing"
|
||||
os_version = "30.20190716.1"
|
||||
cached_install = false
|
||||
os_version = "30.20191002.0"
|
||||
cached_install = true
|
||||
|
||||
# configuration
|
||||
k8s_domain_name = "node1.example.com"
|
||||
@ -178,20 +178,22 @@ module "bare-metal-mercury" {
|
||||
asset_dir = "/home/user/.secrets/clusters/mercury"
|
||||
|
||||
# machines
|
||||
controller_names = ["node1"]
|
||||
controller_macs = ["52:54:00:a1:9c:ae"]
|
||||
controller_domains = ["node1.example.com"]
|
||||
worker_names = [
|
||||
"node2",
|
||||
"node3",
|
||||
]
|
||||
worker_macs = [
|
||||
"52:54:00:b2:2f:86",
|
||||
"52:54:00:c3:61:77",
|
||||
]
|
||||
worker_domains = [
|
||||
"node2.example.com",
|
||||
"node3.example.com",
|
||||
controllers = [{
|
||||
name = "node1"
|
||||
mac = "52:54:00:a1:9c:ae"
|
||||
domain = "node1.example.com"
|
||||
}]
|
||||
workers = [
|
||||
{
|
||||
name = "node2",
|
||||
mac = "52:54:00:b2:2f:86"
|
||||
domain = "node2.example.com"
|
||||
},
|
||||
{
|
||||
name = "node3",
|
||||
mac = "52:54:00:c3:61:77"
|
||||
domain = "node3.example.com"
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
@ -200,7 +202,7 @@ Reference the [variables docs](#variables) or the [variables.tf](https://github.
|
||||
|
||||
## ssh-agent
|
||||
|
||||
Initial bootstrapping requires `bootkube.service` be started on one controller node. Terraform uses `ssh-agent` to automate this step. Add your SSH private key to `ssh-agent`.
|
||||
Initial bootstrapping requires `bootstrap.service` be started on one controller node. Terraform uses `ssh-agent` to automate this step. Add your SSH private key to `ssh-agent`.
|
||||
|
||||
```sh
|
||||
ssh-add ~/.ssh/id_rsa
|
||||
@ -222,7 +224,7 @@ $ terraform plan
|
||||
Plan: 55 to add, 0 to change, 0 to destroy.
|
||||
```
|
||||
|
||||
Apply the changes. Terraform will generate bootkube assets to `asset_dir` and create Matchbox profiles (e.g. controller, worker) and matching rules via the Matchbox API.
|
||||
Apply the changes. Terraform will generate bootstrap assets to `asset_dir` and create Matchbox profiles (e.g. controller, worker) and matching rules via the Matchbox API.
|
||||
|
||||
```sh
|
||||
$ terraform apply
|
||||
@ -251,14 +253,14 @@ Machines will network boot, install Fedora CoreOS to disk, reboot into the disk
|
||||
|
||||
### Bootstrap
|
||||
|
||||
Wait for the `bootkube-start` step to finish bootstrapping the Kubernetes control plane. This may take 5-15 minutes depending on your network.
|
||||
Wait for the `bootstrap` step to finish bootstrapping the Kubernetes control plane. This may take 5-15 minutes depending on your network.
|
||||
|
||||
```
|
||||
module.bare-metal-mercury.null_resource.bootkube-start: Still creating... (6m10s elapsed)
|
||||
module.bare-metal-mercury.null_resource.bootkube-start: Still creating... (6m20s elapsed)
|
||||
module.bare-metal-mercury.null_resource.bootkube-start: Still creating... (6m30s elapsed)
|
||||
module.bare-metal-mercury.null_resource.bootkube-start: Still creating... (6m40s elapsed)
|
||||
module.bare-metal-mercury.null_resource.bootkube-start: Creation complete (ID: 5441741360626669024)
|
||||
module.bare-metal-mercury.null_resource.bootstrap: Still creating... (6m10s elapsed)
|
||||
module.bare-metal-mercury.null_resource.bootstrap: Still creating... (6m20s elapsed)
|
||||
module.bare-metal-mercury.null_resource.bootstrap: Still creating... (6m30s elapsed)
|
||||
module.bare-metal-mercury.null_resource.bootstrap: Still creating... (6m40s elapsed)
|
||||
module.bare-metal-mercury.null_resource.bootstrap: Creation complete (ID: 5441741360626669024)
|
||||
|
||||
Apply complete! Resources: 55 added, 0 changed, 0 destroyed.
|
||||
```
|
||||
@ -267,13 +269,12 @@ To watch the bootstrap process in detail, SSH to the first controller and journa
|
||||
|
||||
```
|
||||
$ ssh core@node1.example.com
|
||||
$ journalctl -f -u bootkube
|
||||
bootkube[5]: Pod Status: pod-checkpointer Running
|
||||
bootkube[5]: Pod Status: kube-apiserver Running
|
||||
bootkube[5]: Pod Status: kube-scheduler Running
|
||||
bootkube[5]: Pod Status: kube-controller-manager Running
|
||||
bootkube[5]: All self-hosted control plane components successfully started
|
||||
bootkube[5]: Tearing down temporary bootstrap control plane...
|
||||
$ journalctl -f -u bootstrap
|
||||
podman[1750]: The connection to the server cluster.example.com:6443 was refused - did you specify the right host or port?
|
||||
podman[1750]: Waiting for static pod control plane
|
||||
...
|
||||
podman[1750]: serviceaccount/calico-node unchanged
|
||||
systemd[1]: Started Kubernetes control plane.
|
||||
```
|
||||
|
||||
## Verify
|
||||
@ -283,10 +284,10 @@ bootkube[5]: Tearing down temporary bootstrap control plane...
|
||||
```
|
||||
$ export KUBECONFIG=/home/user/.secrets/clusters/mercury/auth/kubeconfig
|
||||
$ kubectl get nodes
|
||||
NAME STATUS ROLES AGE VERSION
|
||||
node1.example.com Ready controller,master 10m v1.15.3
|
||||
node2.example.com Ready node 10m v1.15.3
|
||||
node3.example.com Ready node 10m v1.15.3
|
||||
NAME STATUS ROLES AGE VERSION
|
||||
node1.example.com Ready <none> 10m v1.16.2
|
||||
node2.example.com Ready <none> 10m v1.16.2
|
||||
node3.example.com Ready <none> 10m v1.16.2
|
||||
```
|
||||
|
||||
List the pods.
|
||||
@ -299,16 +300,12 @@ kube-system calico-node-gnjrm 2/2 Running 0
|
||||
kube-system calico-node-llbgt 2/2 Running 0 11m
|
||||
kube-system coredns-1187388186-dj3pd 1/1 Running 0 11m
|
||||
kube-system coredns-1187388186-mx9rt 1/1 Running 0 11m
|
||||
kube-system kube-apiserver-7336w 1/1 Running 0 11m
|
||||
kube-system kube-controller-manager-3271970485-b9chx 1/1 Running 0 11m
|
||||
kube-system kube-controller-manager-3271970485-v30js 1/1 Running 1 11m
|
||||
kube-system kube-apiserver-node1.example.com 1/1 Running 0 11m
|
||||
kube-system kube-controller-manager-node1.example.com 1/1 Running 1 11m
|
||||
kube-system kube-proxy-50sd4 1/1 Running 0 11m
|
||||
kube-system kube-proxy-bczhp 1/1 Running 0 11m
|
||||
kube-system kube-proxy-mp2fw 1/1 Running 0 11m
|
||||
kube-system kube-scheduler-3895335239-fd3l7 1/1 Running 1 11m
|
||||
kube-system kube-scheduler-3895335239-hfjv0 1/1 Running 0 11m
|
||||
kube-system pod-checkpointer-wf65d 1/1 Running 0 11m
|
||||
kube-system pod-checkpointer-wf65d-node1.example.com 1/1 Running 0 11m
|
||||
kube-system kube-scheduler-node1.example.com 1/1 Running 0 11m
|
||||
```
|
||||
|
||||
## Going Further
|
||||
@ -323,19 +320,15 @@ Check the [variables.tf](https://github.com/poseidon/typhoon/blob/master/bare-me
|
||||
|
||||
| Name | Description | Example |
|
||||
|:-----|:------------|:--------|
|
||||
| cluster_name | Unique cluster name | mercury |
|
||||
| matchbox_http_endpoint | Matchbox HTTP read-only endpoint | http://matchbox.example.com:port |
|
||||
| os_stream | Fedora CoreOS release stream | testing |
|
||||
| os_version | Fedora CoreOS version to PXE and install | 30.20190716.1 |
|
||||
| cluster_name | Unique cluster name | "mercury" |
|
||||
| matchbox_http_endpoint | Matchbox HTTP read-only endpoint | "http://matchbox.example.com:port" |
|
||||
| os_stream | Fedora CoreOS release stream | "testing" |
|
||||
| os_version | Fedora CoreOS version to PXE and install | "30.20190716.1" |
|
||||
| k8s_domain_name | FQDN resolving to the controller(s) nodes. Workers and kubectl will communicate with this endpoint | "myk8s.example.com" |
|
||||
| ssh_authorized_key | SSH public key for user 'core' | "ssh-rsa AAAAB3Nz..." |
|
||||
| asset_dir | Absolute path to a directory where generated assets should be placed (contains secrets) | "/home/user/.secrets/clusters/mercury" |
|
||||
| controller_names | Ordered list of controller short names | ["node1"] |
|
||||
| controller_macs | Ordered list of controller identifying MAC addresses | ["52:54:00:a1:9c:ae"] |
|
||||
| controller_domains | Ordered list of controller FQDNs | ["node1.example.com"] |
|
||||
| worker_names | Ordered list of worker short names | ["node2", "node3"] |
|
||||
| worker_macs | Ordered list of worker identifying MAC addresses | ["52:54:00:b2:2f:86", "52:54:00:c3:61:77"] |
|
||||
| worker_domains | Ordered list of worker FQDNs | ["node2.example.com", "node3.example.com"] |
|
||||
| controllers | List of controller machine detail objects (unique name, identifying MAC address, FQDN) | `[{name="node1", mac="52:54:00:a1:9c:ae", domain="node1.example.com"}]` |
|
||||
| workers | List of worker machine detail objects (unique name, identifying MAC address, FQDN) | `[{name="node2", mac="52:54:00:b2:2f:86", domain="node2.example.com"}, {name="node3", mac="52:54:00:c3:61:77", domain="node3.example.com"}]` |
|
||||
|
||||
### Optional
|
||||
|
||||
@ -346,9 +339,8 @@ Check the [variables.tf](https://github.com/poseidon/typhoon/blob/master/bare-me
|
||||
| networking | Choice of networking provider | "calico" | "calico" or "flannel" |
|
||||
| network_mtu | CNI interface MTU (calico-only) | 1480 | - |
|
||||
| snippets | Map from machine names to lists of Fedora CoreOS Config snippets | {} | UNSUPPORTED |
|
||||
| network_ip_autodetection_method | Method to detect host IPv4 address (calico-only) | first-found | can-reach=10.0.0.1 |
|
||||
| network_ip_autodetection_method | Method to detect host IPv4 address (calico-only) | "first-found" | "can-reach=10.0.0.1" |
|
||||
| pod_cidr | CIDR IPv4 range to assign to Kubernetes pods | "10.2.0.0/16" | "10.22.0.0/16" |
|
||||
| service_cidr | CIDR IPv4 range to assign to Kubernetes services | "10.3.0.0/16" | "10.3.0.0/24" |
|
||||
| cluster_domain_suffix | FQDN suffix for Kubernetes services answered by coredns. | "cluster.local" | "k8s.example.com" |
|
||||
| kernel_args | Additional kernel args to provide at PXE boot | [] | "kvm-intel.nested=1" |
|
||||
| kernel_args | Additional kernel args to provide at PXE boot | [] | ["kvm-intel.nested=1"] |
|
||||
|
||||
|
@ -11,7 +11,7 @@ Typhoon distributes upstream Kubernetes, architectural conventions, and cluster
|
||||
|
||||
## Features <a href="https://www.cncf.io/certification/software-conformance/"><img align="right" src="https://storage.googleapis.com/poseidon/certified-kubernetes.png"></a>
|
||||
|
||||
* Kubernetes v1.15.3 (upstream, via [kubernetes-incubator/bootkube](https://github.com/kubernetes-incubator/bootkube))
|
||||
* Kubernetes v1.16.2 (upstream)
|
||||
* Single or multi-master, [Calico](https://www.projectcalico.org/) or [flannel](https://github.com/coreos/flannel) networking
|
||||
* On-cluster etcd with TLS, [RBAC](https://kubernetes.io/docs/admin/authorization/rbac/)-enabled, [network policy](https://kubernetes.io/docs/concepts/services-networking/network-policies/)
|
||||
* Advanced features like [worker pools](advanced/worker-pools/), [preemptible](cl/google-cloud/#preemption) workers, and [snippets](advanced/customization/#container-linux) customization
|
||||
@ -47,7 +47,7 @@ Define a Kubernetes cluster by using the Terraform module for your chosen platfo
|
||||
|
||||
```tf
|
||||
module "google-cloud-yavin" {
|
||||
source = "git::https://github.com/poseidon/typhoon//google-cloud/container-linux/kubernetes?ref=v1.15.3"
|
||||
source = "git::https://github.com/poseidon/typhoon//google-cloud/container-linux/kubernetes?ref=v1.16.2"
|
||||
|
||||
# Google Cloud
|
||||
cluster_name = "yavin"
|
||||
@ -79,10 +79,10 @@ In 4-8 minutes (varies by platform), the cluster will be ready. This Google Clou
|
||||
```
|
||||
$ export KUBECONFIG=/home/user/.secrets/clusters/yavin/auth/kubeconfig
|
||||
$ kubectl get nodes
|
||||
NAME ROLES STATUS AGE VERSION
|
||||
yavin-controller-0.c.example-com.internal controller,master Ready 6m v1.15.3
|
||||
yavin-worker-jrbf.c.example-com.internal node Ready 5m v1.15.3
|
||||
yavin-worker-mzdm.c.example-com.internal node Ready 5m v1.15.3
|
||||
NAME ROLES STATUS AGE VERSION
|
||||
yavin-controller-0.c.example-com.internal <none> Ready 6m v1.16.2
|
||||
yavin-worker-jrbf.c.example-com.internal <none> Ready 5m v1.16.2
|
||||
yavin-worker-mzdm.c.example-com.internal <none> Ready 5m v1.16.2
|
||||
```
|
||||
|
||||
List the pods.
|
||||
@ -95,16 +95,12 @@ kube-system calico-node-d1l5b 2/2 Running 0
|
||||
kube-system calico-node-sp9ps 2/2 Running 0 6m
|
||||
kube-system coredns-1187388186-dkh3o 1/1 Running 0 6m
|
||||
kube-system coredns-1187388186-zj5dl 1/1 Running 0 6m
|
||||
kube-system kube-apiserver-zppls 1/1 Running 0 6m
|
||||
kube-system kube-controller-manager-3271970485-gh9kt 1/1 Running 0 6m
|
||||
kube-system kube-controller-manager-3271970485-h90v8 1/1 Running 1 6m
|
||||
kube-system kube-apiserver-controller-0 1/1 Running 0 6m
|
||||
kube-system kube-controller-manager-controller-0 1/1 Running 0 6m
|
||||
kube-system kube-proxy-117v6 1/1 Running 0 6m
|
||||
kube-system kube-proxy-9886n 1/1 Running 0 6m
|
||||
kube-system kube-proxy-njn47 1/1 Running 0 6m
|
||||
kube-system kube-scheduler-3895335239-5x87r 1/1 Running 0 6m
|
||||
kube-system kube-scheduler-3895335239-bzrrt 1/1 Running 1 6m
|
||||
kube-system pod-checkpointer-l6lrt 1/1 Running 0 6m
|
||||
kube-system pod-checkpointer-l6lrt-controller-0 1/1 Running 0 6m
|
||||
kube-system kube-scheduler-controller-0 1/1 Running 0 6m
|
||||
```
|
||||
|
||||
## Help
|
||||
|
@ -18,7 +18,7 @@ module "google-cloud-yavin" {
|
||||
}
|
||||
|
||||
module "bare-metal-mercury" {
|
||||
source = "git::https://github.com/poseidon/typhoon//bare-metal/container-linux/kubernetes?ref=v1.15.3"
|
||||
source = "git::https://github.com/poseidon/typhoon//bare-metal/container-linux/kubernetes?ref=v1.16.2"
|
||||
...
|
||||
}
|
||||
```
|
||||
@ -110,7 +110,7 @@ Apply complete! Resources: 0 added, 0 changed, 55 destroyed.
|
||||
|
||||
#### In-place Edits
|
||||
|
||||
Typhoon uses a self-hosted Kubernetes control plane which allows certain manifest upgrades to be performed in-place. Components like `apiserver`, `controller-manager`, `scheduler`, `flannel`/`calico`, `coredns`, and `kube-proxy` are run on Kubernetes itself and can be edited via `kubectl`. If you're interested, see the bootkube [upgrade docs](https://github.com/kubernetes-incubator/bootkube/blob/master/Documentation/upgrading.md).
|
||||
Typhoon uses a static pod Kubernetes control plane which allows certain manifest upgrades to be performed in-place. Components like `kube-apiserver`, `kube-controller-manager`, and `kube-scheduler` are run as static pods. Components `flannel`/`calico`, `coredns`, and `kube-proxy` are scheduled on Kubernetes and can be edited via `kubectl`.
|
||||
|
||||
In certain scenarios, in-place edits can be useful for quickly rolling out security patches (e.g. bumping `coredns`) or prioritizing speed over the safety of a proper cluster re-provision and transition.
|
||||
|
||||
@ -279,15 +279,15 @@ Typhoon modules have been adapted for Terraform v0.12. Provider plugins requirem
|
||||
|
||||
| Typhoon Release | Terraform version |
|
||||
|-------------------|---------------------|
|
||||
| v1.15.3 - ? | v0.12.x |
|
||||
| v1.10.3 - v1.15.3 | v0.11.x |
|
||||
| v1.16.2 - ? | v0.12.x |
|
||||
| v1.10.3 - v1.16.2 | v0.11.x |
|
||||
| v1.9.2 - v1.10.2 | v0.10.4+ or v0.11.x |
|
||||
| v1.7.3 - v1.9.1 | v0.10.x |
|
||||
| v1.6.4 - v1.7.2 | v0.9.x |
|
||||
|
||||
### New users
|
||||
|
||||
New users can start with Terraform v0.12.x and follow the docs for Typhoon v1.15.3+ without issue.
|
||||
New users can start with Terraform v0.12.x and follow the docs for Typhoon v1.16.2+ without issue.
|
||||
|
||||
### Existing users
|
||||
|
||||
@ -404,7 +404,7 @@ tree .
|
||||
└── infraB <- new Terraform v0.12.x configs
|
||||
```
|
||||
|
||||
Define Typhoon clusters in the new config directory using Terraform v0.12 syntax. Follow the Typhoon v1.15.3+ docs (e.g. use `terraform12` in the `infraB` dir). See [AWS](/cl/aws), [Azure](/cl/azure), [Bare-Metal](/cl/bare-metal), [Digital Ocean](/cl/digital-ocean), or [Google-Cloud](/cl/google-cloud)) to create new clusters. Follow the usual [upgrade](/topics/maintenance/#upgrades) process to apply workloads and shift traffic. Later, switch back to the old config directory and deprovision clusters with Terraform v0.11.
|
||||
Define Typhoon clusters in the new config directory using Terraform v0.12 syntax. Follow the Typhoon v1.16.2+ docs (e.g. use `terraform12` in the `infraB` dir). See [AWS](/cl/aws), [Azure](/cl/azure), [Bare-Metal](/cl/bare-metal), [Digital Ocean](/cl/digital-ocean), or [Google-Cloud](/cl/google-cloud)) to create new clusters. Follow the usual [upgrade](/topics/maintenance/#upgrades) process to apply workloads and shift traffic. Later, switch back to the old config directory and deprovision clusters with Terraform v0.11.
|
||||
|
||||
```shell
|
||||
terraform12 init
|
||||
|
@ -12,7 +12,7 @@ Typhoon aims to be minimal and secure. We're running it ourselves after all.
|
||||
* Workloads run on worker nodes only, unless they tolerate the master taint
|
||||
* Kubernetes [Network Policy](https://kubernetes.io/docs/concepts/services-networking/network-policies/) and Calico [NetworkPolicy](https://docs.projectcalico.org/latest/reference/calicoctl/resources/networkpolicy) support [^1]
|
||||
|
||||
[^1]: Requires `networking = "calico"`. Calico is the default on AWS, bare-metal, and Google Cloud. Azure and Digital Ocean are limited to `networking = "flannel"`.
|
||||
[^1]: Requires `networking = "calico"`. Calico is the default on all platforms (AWS, Azure, bare-metal, DigitalOcean, and Google Cloud).
|
||||
|
||||
**Hosts**
|
||||
|
||||
|
@ -11,7 +11,7 @@ Typhoon distributes upstream Kubernetes, architectural conventions, and cluster
|
||||
|
||||
## Features <a href="https://www.cncf.io/certification/software-conformance/"><img align="right" src="https://storage.googleapis.com/poseidon/certified-kubernetes.png"></a>
|
||||
|
||||
* Kubernetes v1.15.3 (upstream, via [kubernetes-incubator/bootkube](https://github.com/kubernetes-incubator/bootkube))
|
||||
* Kubernetes v1.16.2 (upstream)
|
||||
* Single or multi-master, [Calico](https://www.projectcalico.org/) or [flannel](https://github.com/coreos/flannel) networking
|
||||
* On-cluster etcd with TLS, [RBAC](https://kubernetes.io/docs/admin/authorization/rbac/)-enabled, [network policy](https://kubernetes.io/docs/concepts/services-networking/network-policies/)
|
||||
* Advanced features like [worker pools](https://typhoon.psdn.io/advanced/worker-pools/), [preemptible](https://typhoon.psdn.io/cl/google-cloud/#preemption) workers, and [snippets](https://typhoon.psdn.io/advanced/customization/#container-linux) customization
|
||||
|
@ -1,6 +1,6 @@
|
||||
# Self-hosted Kubernetes assets (kubeconfig, manifests)
|
||||
module "bootkube" {
|
||||
source = "git::https://github.com/poseidon/terraform-render-bootkube.git?ref=248675e7a95660e5c8a65f8b7ce86be89ef55a0e"
|
||||
# Kubernetes assets (kubeconfig, manifests)
|
||||
module "bootstrap" {
|
||||
source = "git::https://github.com/poseidon/terraform-render-bootstrap.git?ref=0fcc067476fa1463d057fd43760df222b7262b27"
|
||||
|
||||
cluster_name = var.cluster_name
|
||||
api_servers = [format("%s.%s", var.cluster_name, var.dns_zone)]
|
@ -7,7 +7,7 @@ systemd:
|
||||
- name: 40-etcd-cluster.conf
|
||||
contents: |
|
||||
[Service]
|
||||
Environment="ETCD_IMAGE_TAG=v3.3.15"
|
||||
Environment="ETCD_IMAGE_TAG=v3.4.2"
|
||||
Environment="ETCD_NAME=${etcd_name}"
|
||||
Environment="ETCD_ADVERTISE_CLIENT_URLS=https://${etcd_domain}:2379"
|
||||
Environment="ETCD_INITIAL_ADVERTISE_PEER_URLS=https://${etcd_domain}:2380"
|
||||
@ -64,11 +64,9 @@ systemd:
|
||||
--mount volume=var-log,target=/var/log \
|
||||
--hosts-entry=host \
|
||||
--insecure-options=image"
|
||||
ExecStartPre=/bin/mkdir -p /opt/cni/bin
|
||||
ExecStartPre=/bin/mkdir -p /etc/kubernetes/manifests
|
||||
ExecStartPre=/bin/mkdir -p /etc/kubernetes/cni/net.d
|
||||
ExecStartPre=/bin/mkdir -p /etc/kubernetes/checkpoint-secrets
|
||||
ExecStartPre=/bin/mkdir -p /etc/kubernetes/inactive-manifests
|
||||
ExecStartPre=/bin/mkdir -p /etc/kubernetes/manifests
|
||||
ExecStartPre=/bin/mkdir -p /opt/cni/bin
|
||||
ExecStartPre=/bin/mkdir -p /var/lib/cni
|
||||
ExecStartPre=/bin/mkdir -p /var/lib/calico
|
||||
ExecStartPre=/bin/mkdir -p /var/lib/kubelet/volumeplugins
|
||||
@ -86,8 +84,8 @@ systemd:
|
||||
--kubeconfig=/etc/kubernetes/kubeconfig \
|
||||
--lock-file=/var/run/lock/kubelet.lock \
|
||||
--network-plugin=cni \
|
||||
--node-labels=node-role.kubernetes.io/master \
|
||||
--node-labels=node-role.kubernetes.io/controller="true" \
|
||||
--node-labels=node.kubernetes.io/master \
|
||||
--node-labels=node.kubernetes.io/controller="true" \
|
||||
--pod-manifest-path=/etc/kubernetes/manifests \
|
||||
--register-with-taints=node-role.kubernetes.io/master=:NoSchedule \
|
||||
--read-only-port=0 \
|
||||
@ -97,17 +95,28 @@ systemd:
|
||||
RestartSec=10
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
- name: bootkube.service
|
||||
- name: bootstrap.service
|
||||
contents: |
|
||||
[Unit]
|
||||
Description=Bootstrap a Kubernetes cluster
|
||||
ConditionPathExists=!/opt/bootkube/init_bootkube.done
|
||||
Description=Kubernetes control plane
|
||||
ConditionPathExists=!/opt/bootstrap/bootstrap.done
|
||||
[Service]
|
||||
Type=oneshot
|
||||
RemainAfterExit=true
|
||||
WorkingDirectory=/opt/bootkube
|
||||
ExecStart=/opt/bootkube/bootkube-start
|
||||
ExecStartPost=/bin/touch /opt/bootkube/init_bootkube.done
|
||||
WorkingDirectory=/opt/bootstrap
|
||||
ExecStartPre=-/usr/bin/bash -c 'set -x && [ -n "$(ls /opt/bootstrap/assets/manifests-*/* 2>/dev/null)" ] && mv /opt/bootstrap/assets/manifests-*/* /opt/bootstrap/assets/manifests && rm -rf /opt/bootstrap/assets/manifests-*'
|
||||
ExecStart=/usr/bin/rkt run \
|
||||
--trust-keys-from-https \
|
||||
--volume assets,kind=host,source=/opt/bootstrap/assets \
|
||||
--mount volume=assets,target=/assets \
|
||||
--volume script,kind=host,source=/opt/bootstrap/apply \
|
||||
--mount volume=script,target=/apply \
|
||||
--insecure-options=image \
|
||||
docker://k8s.gcr.io/hyperkube:v1.16.2 \
|
||||
--net=host \
|
||||
--dns=host \
|
||||
--exec=/apply
|
||||
ExecStartPost=/bin/touch /opt/bootstrap/bootstrap.done
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
storage:
|
||||
@ -124,37 +133,27 @@ storage:
|
||||
contents:
|
||||
inline: |
|
||||
KUBELET_IMAGE_URL=docker://k8s.gcr.io/hyperkube
|
||||
KUBELET_IMAGE_TAG=v1.15.3
|
||||
KUBELET_IMAGE_TAG=v1.16.2
|
||||
- path: /opt/bootstrap/apply
|
||||
filesystem: root
|
||||
mode: 0544
|
||||
contents:
|
||||
inline: |
|
||||
#!/bin/bash -e
|
||||
export KUBECONFIG=/assets/auth/kubeconfig
|
||||
until kubectl version; do
|
||||
echo "Waiting for static pod control plane"
|
||||
sleep 5
|
||||
done
|
||||
until kubectl apply -f /assets/manifests -R; do
|
||||
echo "Retry applying manifests"
|
||||
sleep 5
|
||||
done
|
||||
- path: /etc/sysctl.d/max-user-watches.conf
|
||||
filesystem: root
|
||||
contents:
|
||||
inline: |
|
||||
fs.inotify.max_user_watches=16184
|
||||
- path: /opt/bootkube/bootkube-start
|
||||
filesystem: root
|
||||
mode: 0544
|
||||
user:
|
||||
id: 500
|
||||
group:
|
||||
id: 500
|
||||
contents:
|
||||
inline: |
|
||||
#!/bin/bash
|
||||
# Wrapper for bootkube start
|
||||
set -e
|
||||
# Move experimental manifests
|
||||
[ -n "$(ls /opt/bootkube/assets/manifests-*/* 2>/dev/null)" ] && mv /opt/bootkube/assets/manifests-*/* /opt/bootkube/assets/manifests && rm -rf /opt/bootkube/assets/manifests-*
|
||||
exec /usr/bin/rkt run \
|
||||
--trust-keys-from-https \
|
||||
--volume assets,kind=host,source=/opt/bootkube/assets \
|
||||
--mount volume=assets,target=/assets \
|
||||
--volume bootstrap,kind=host,source=/etc/kubernetes \
|
||||
--mount volume=bootstrap,target=/etc/kubernetes \
|
||||
$${RKT_OPTS} \
|
||||
quay.io/coreos/bootkube:v0.14.0 \
|
||||
--net=host \
|
||||
--dns=host \
|
||||
--exec=/bootkube -- start --asset-dir=/assets "$@"
|
||||
passwd:
|
||||
users:
|
||||
- name: core
|
||||
|
@ -85,7 +85,7 @@ data "template_file" "controller-configs" {
|
||||
etcd_domain = "${var.cluster_name}-etcd${count.index}.${var.dns_zone}"
|
||||
# etcd0=https://cluster-etcd0.example.com,etcd1=https://cluster-etcd1.example.com,...
|
||||
etcd_initial_cluster = join(",", data.template_file.etcds.*.rendered)
|
||||
kubeconfig = indent(10, module.bootkube.kubeconfig-kubelet)
|
||||
kubeconfig = indent(10, module.bootstrap.kubeconfig-kubelet)
|
||||
ssh_authorized_key = var.ssh_authorized_key
|
||||
cluster_dns_service_ip = cidrhost(var.service_cidr, 10)
|
||||
cluster_domain_suffix = var.cluster_domain_suffix
|
||||
|
@ -48,6 +48,20 @@ resource "google_compute_firewall" "internal-etcd-metrics" {
|
||||
target_tags = ["${var.cluster_name}-controller"]
|
||||
}
|
||||
|
||||
# Allow Prometheus to scrape kube-scheduler and kube-controller-manager metrics
|
||||
resource "google_compute_firewall" "internal-kube-metrics" {
|
||||
name = "${var.cluster_name}-internal-kube-metrics"
|
||||
network = google_compute_network.network.name
|
||||
|
||||
allow {
|
||||
protocol = "tcp"
|
||||
ports = [10251, 10252]
|
||||
}
|
||||
|
||||
source_tags = ["${var.cluster_name}-worker"]
|
||||
target_tags = ["${var.cluster_name}-controller"]
|
||||
}
|
||||
|
||||
resource "google_compute_firewall" "allow-apiserver" {
|
||||
name = "${var.cluster_name}-allow-apiserver"
|
||||
network = google_compute_network.network.name
|
||||
|
@ -1,5 +1,5 @@
|
||||
output "kubeconfig-admin" {
|
||||
value = module.bootkube.kubeconfig-admin
|
||||
value = module.bootstrap.kubeconfig-admin
|
||||
}
|
||||
|
||||
# Outputs for Kubernetes Ingress
|
||||
@ -21,7 +21,7 @@ output "network_name" {
|
||||
}
|
||||
|
||||
output "kubeconfig" {
|
||||
value = module.bootkube.kubeconfig-kubelet
|
||||
value = module.bootstrap.kubeconfig-kubelet
|
||||
}
|
||||
|
||||
# Outputs for custom firewalling
|
||||
|
@ -1,48 +1,57 @@
|
||||
# Secure copy etcd TLS assets to controllers.
|
||||
# Secure copy assets to controllers.
|
||||
resource "null_resource" "copy-controller-secrets" {
|
||||
count = var.controller_count
|
||||
|
||||
depends_on = [
|
||||
module.bootstrap,
|
||||
]
|
||||
|
||||
connection {
|
||||
type = "ssh"
|
||||
host = element(local.controllers_ipv4_public, count.index)
|
||||
host = local.controllers_ipv4_public[count.index]
|
||||
user = "core"
|
||||
timeout = "15m"
|
||||
}
|
||||
|
||||
provisioner "file" {
|
||||
content = module.bootkube.etcd_ca_cert
|
||||
content = module.bootstrap.etcd_ca_cert
|
||||
destination = "$HOME/etcd-client-ca.crt"
|
||||
}
|
||||
|
||||
provisioner "file" {
|
||||
content = module.bootkube.etcd_client_cert
|
||||
content = module.bootstrap.etcd_client_cert
|
||||
destination = "$HOME/etcd-client.crt"
|
||||
}
|
||||
|
||||
provisioner "file" {
|
||||
content = module.bootkube.etcd_client_key
|
||||
content = module.bootstrap.etcd_client_key
|
||||
destination = "$HOME/etcd-client.key"
|
||||
}
|
||||
|
||||
provisioner "file" {
|
||||
content = module.bootkube.etcd_server_cert
|
||||
content = module.bootstrap.etcd_server_cert
|
||||
destination = "$HOME/etcd-server.crt"
|
||||
}
|
||||
|
||||
provisioner "file" {
|
||||
content = module.bootkube.etcd_server_key
|
||||
content = module.bootstrap.etcd_server_key
|
||||
destination = "$HOME/etcd-server.key"
|
||||
}
|
||||
|
||||
provisioner "file" {
|
||||
content = module.bootkube.etcd_peer_cert
|
||||
content = module.bootstrap.etcd_peer_cert
|
||||
destination = "$HOME/etcd-peer.crt"
|
||||
}
|
||||
|
||||
provisioner "file" {
|
||||
content = module.bootkube.etcd_peer_key
|
||||
content = module.bootstrap.etcd_peer_key
|
||||
destination = "$HOME/etcd-peer.key"
|
||||
}
|
||||
|
||||
provisioner "file" {
|
||||
source = var.asset_dir
|
||||
destination = "$HOME/assets"
|
||||
}
|
||||
|
||||
provisioner "remote-exec" {
|
||||
inline = [
|
||||
@ -56,36 +65,34 @@ resource "null_resource" "copy-controller-secrets" {
|
||||
"sudo mv etcd-peer.key /etc/ssl/etcd/etcd/peer.key",
|
||||
"sudo chown -R etcd:etcd /etc/ssl/etcd",
|
||||
"sudo chmod -R 500 /etc/ssl/etcd",
|
||||
"sudo mv $HOME/assets /opt/bootstrap/assets",
|
||||
"sudo mkdir -p /etc/kubernetes/manifests",
|
||||
"sudo mkdir -p /etc/kubernetes/bootstrap-secrets",
|
||||
"sudo cp -r /opt/bootstrap/assets/tls/* /etc/kubernetes/bootstrap-secrets/",
|
||||
"sudo cp /opt/bootstrap/assets/auth/kubeconfig /etc/kubernetes/bootstrap-secrets/",
|
||||
"sudo cp -r /opt/bootstrap/assets/static-manifests/* /etc/kubernetes/manifests/",
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
# Secure copy bootkube assets to ONE controller and start bootkube to perform
|
||||
# one-time self-hosted cluster bootstrapping.
|
||||
resource "null_resource" "bootkube-start" {
|
||||
# Connect to a controller to perform one-time cluster bootstrap.
|
||||
resource "null_resource" "bootstrap" {
|
||||
depends_on = [
|
||||
module.bootkube,
|
||||
null_resource.copy-controller-secrets,
|
||||
module.workers,
|
||||
google_dns_record_set.apiserver,
|
||||
null_resource.copy-controller-secrets,
|
||||
]
|
||||
|
||||
connection {
|
||||
type = "ssh"
|
||||
host = element(local.controllers_ipv4_public, 0)
|
||||
host = local.controllers_ipv4_public[0]
|
||||
user = "core"
|
||||
timeout = "15m"
|
||||
}
|
||||
|
||||
provisioner "file" {
|
||||
source = var.asset_dir
|
||||
destination = "$HOME/assets"
|
||||
}
|
||||
|
||||
provisioner "remote-exec" {
|
||||
inline = [
|
||||
"sudo mv $HOME/assets /opt/bootkube",
|
||||
"sudo systemctl start bootkube",
|
||||
"sudo systemctl start bootstrap",
|
||||
]
|
||||
}
|
||||
}
|
||||
|
@ -23,45 +23,45 @@ variable "dns_zone_name" {
|
||||
# instances
|
||||
|
||||
variable "controller_count" {
|
||||
type = string
|
||||
default = "1"
|
||||
type = number
|
||||
description = "Number of controllers (i.e. masters)"
|
||||
default = 1
|
||||
}
|
||||
|
||||
variable "worker_count" {
|
||||
type = string
|
||||
default = "1"
|
||||
type = number
|
||||
description = "Number of workers"
|
||||
default = 1
|
||||
}
|
||||
|
||||
variable "controller_type" {
|
||||
type = string
|
||||
default = "n1-standard-1"
|
||||
description = "Machine type for controllers (see `gcloud compute machine-types list`)"
|
||||
default = "n1-standard-1"
|
||||
}
|
||||
|
||||
variable "worker_type" {
|
||||
type = string
|
||||
default = "n1-standard-1"
|
||||
description = "Machine type for controllers (see `gcloud compute machine-types list`)"
|
||||
default = "n1-standard-1"
|
||||
}
|
||||
|
||||
variable "os_image" {
|
||||
type = string
|
||||
default = "coreos-stable"
|
||||
description = "Container Linux image for compute instances (e.g. coreos-stable)"
|
||||
default = "coreos-stable"
|
||||
}
|
||||
|
||||
variable "disk_size" {
|
||||
type = string
|
||||
default = "40"
|
||||
type = number
|
||||
description = "Size of the disk in GB"
|
||||
default = 40
|
||||
}
|
||||
|
||||
variable "worker_preemptible" {
|
||||
type = string
|
||||
default = "false"
|
||||
type = bool
|
||||
description = "If enabled, Compute Engine will terminate workers randomly within 24 hours"
|
||||
default = false
|
||||
}
|
||||
|
||||
variable "controller_clc_snippets" {
|
||||
@ -84,48 +84,55 @@ variable "ssh_authorized_key" {
|
||||
}
|
||||
|
||||
variable "asset_dir" {
|
||||
description = "Path to a directory where generated assets should be placed (contains secrets)"
|
||||
type = string
|
||||
description = "Absolute path to a directory where generated assets should be placed (contains secrets)"
|
||||
}
|
||||
|
||||
variable "networking" {
|
||||
description = "Choice of networking provider (flannel or calico)"
|
||||
type = string
|
||||
description = "Choice of networking provider (flannel or calico)"
|
||||
default = "calico"
|
||||
}
|
||||
|
||||
variable "pod_cidr" {
|
||||
description = "CIDR IPv4 range to assign Kubernetes pods"
|
||||
type = string
|
||||
description = "CIDR IPv4 range to assign Kubernetes pods"
|
||||
default = "10.2.0.0/16"
|
||||
}
|
||||
|
||||
variable "service_cidr" {
|
||||
type = string
|
||||
description = <<EOD
|
||||
CIDR IPv4 range to assign Kubernetes services.
|
||||
The 1st IP will be reserved for kube_apiserver, the 10th IP will be reserved for coredns.
|
||||
EOD
|
||||
|
||||
|
||||
type = string
|
||||
default = "10.3.0.0/16"
|
||||
}
|
||||
|
||||
variable "cluster_domain_suffix" {
|
||||
description = "Queries for domains with the suffix will be answered by coredns. Default is cluster.local (e.g. foo.default.svc.cluster.local) "
|
||||
type = string
|
||||
default = "cluster.local"
|
||||
}
|
||||
|
||||
variable "enable_reporting" {
|
||||
type = string
|
||||
type = bool
|
||||
description = "Enable usage or analytics reporting to upstreams (Calico)"
|
||||
default = "false"
|
||||
default = false
|
||||
}
|
||||
|
||||
variable "enable_aggregation" {
|
||||
type = bool
|
||||
description = "Enable the Kubernetes Aggregation Layer (defaults to false)"
|
||||
type = string
|
||||
default = "false"
|
||||
default = false
|
||||
}
|
||||
|
||||
variable "worker_node_labels" {
|
||||
type = list(string)
|
||||
description = "List of initial worker node labels"
|
||||
default = []
|
||||
}
|
||||
|
||||
# unofficial, undocumented, unsupported
|
||||
|
||||
variable "cluster_domain_suffix" {
|
||||
type = string
|
||||
description = "Queries for domains with the suffix will be answered by coredns. Default is cluster.local (e.g. foo.default.svc.cluster.local) "
|
||||
default = "cluster.local"
|
||||
}
|
||||
|
||||
|
@ -13,10 +13,11 @@ module "workers" {
|
||||
preemptible = var.worker_preemptible
|
||||
|
||||
# configuration
|
||||
kubeconfig = module.bootkube.kubeconfig-kubelet
|
||||
kubeconfig = module.bootstrap.kubeconfig-kubelet
|
||||
ssh_authorized_key = var.ssh_authorized_key
|
||||
service_cidr = var.service_cidr
|
||||
cluster_domain_suffix = var.cluster_domain_suffix
|
||||
clc_snippets = var.worker_clc_snippets
|
||||
node_labels = var.worker_node_labels
|
||||
}
|
||||
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user