mirror of
https://github.com/puppetmaster/typhoon.git
synced 2025-08-04 23:31:33 +02:00
Compare commits
30 Commits
Author | SHA1 | Date | |
---|---|---|---|
0595915a19 | |||
e6bc5143aa | |||
e4ac1027c8 | |||
24fc440d83 | |||
a6702573a2 | |||
69188af565 | |||
d874bdd17d | |||
5b9dab6659 | |||
5196709fe0 | |||
ab72f1ab2d | |||
5ef4155e08 | |||
15c4b793c3 | |||
36ed53924f | |||
19de38b30d | |||
995824fa6d | |||
1c5ed84fc2 | |||
ca7d62720e | |||
26f8d76755 | |||
fdd6882a87 | |||
f82266ac8c | |||
7bcf2d7831 | |||
78bfff0afe | |||
a6de245d8a | |||
96afa6a531 | |||
a407ff72df | |||
f453c54956 | |||
3e34fb075b | |||
9bfb1c5faf | |||
99ab81f79c | |||
8703f2c3c5 |
71
CHANGES.md
71
CHANGES.md
@ -4,6 +4,77 @@ Notable changes between versions.
|
|||||||
|
|
||||||
## Latest
|
## Latest
|
||||||
|
|
||||||
|
* Kubernetes [v1.16.2](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG-1.16.md#v1162)
|
||||||
|
* Update etcd from v3.4.1 to v3.4.2 ([#570](https://github.com/poseidon/typhoon/pull/570))
|
||||||
|
* Update Calico from v3.9.1 to [v3.9.2](https://docs.projectcalico.org/v3.9/release-notes/)
|
||||||
|
* Default to using Calico and supporting NetworkPolicy on all platforms
|
||||||
|
|
||||||
|
#### Azure
|
||||||
|
|
||||||
|
* Change default networking provider from "flannel" to "calico" ([#573](https://github.com/poseidon/typhoon/pull/573))
|
||||||
|
|
||||||
|
#### Bare-Metal
|
||||||
|
|
||||||
|
* Add `controllers` and `workers` as typed lists of machine detail objects ([#566](https://github.com/poseidon/typhoon/pull/566))
|
||||||
|
* Define clusters' machines cleanly and with Terraform v0.12 type constraints (**action required**, see PR example)
|
||||||
|
* Remove `controller_names`, `controller_macs`, and `controller_domains` variables
|
||||||
|
* Remove `worker_names`, `worker_macs`, and `worker_domains` variables
|
||||||
|
|
||||||
|
#### DigitalOcean
|
||||||
|
|
||||||
|
* Change default networking provider from "flannel" to "calico" ([#573](https://github.com/poseidon/typhoon/pull/573))
|
||||||
|
|
||||||
|
#### Addons
|
||||||
|
|
||||||
|
* Update Grafana from v6.4.1 to [v6.4.2](https://github.com/grafana/grafana/releases/tag/v6.4.2)
|
||||||
|
* Change CLUO label from "app" to "name"
|
||||||
|
|
||||||
|
## v1.16.1
|
||||||
|
|
||||||
|
* Kubernetes [v1.16.1](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG-1.16.md#v1161)
|
||||||
|
* Update etcd from v3.4.0 to [v3.4.1](https://github.com/etcd-io/etcd/releases/tag/v3.4.1)
|
||||||
|
* Update Calico from v3.8.2 to [v3.9.1](https://docs.projectcalico.org/v3.9/release-notes/)
|
||||||
|
* Add Terraform v0.12 variables types ([#553](https://github.com/poseidon/typhoon/pull/553), [#557](https://github.com/poseidon/typhoon/pull/557), [#560](https://github.com/poseidon/typhoon/pull/560), [#556](https://github.com/poseidon/typhoon/pull/556), [#562](https://github.com/poseidon/typhoon/pull/562))
|
||||||
|
* Deprecate `cluster_domain_suffix` variable
|
||||||
|
|
||||||
|
#### AWS
|
||||||
|
|
||||||
|
* Add `worker_node_labels` variable to set initial worker node labels ([#550](https://github.com/poseidon/typhoon/pull/550))
|
||||||
|
* Add `node_labels` variable to internal `workers` pool module ([#550](https://github.com/poseidon/typhoon/pull/550))
|
||||||
|
* For Fedora CoreOS, detect most recent AMI in the region
|
||||||
|
|
||||||
|
#### Azure
|
||||||
|
|
||||||
|
* Promote `networking` provider Calico VXLAN out of experimental (set `networking = "calico"`)
|
||||||
|
* Add `worker_node_labels` variable to set initial worker node labels ([#550](https://github.com/poseidon/typhoon/pull/550))
|
||||||
|
* Add `node_labels` variable to internal `workers` pool module ([#550](https://github.com/poseidon/typhoon/pull/550))
|
||||||
|
* Change `workers` module default `vm_type` to `Standard_DS1_v2` (followup to [#539](https://github.com/poseidon/typhoon/pull/539))
|
||||||
|
|
||||||
|
#### Bare-Metal
|
||||||
|
|
||||||
|
* For Fedora CoreOS, use new kernel, initrd, and raw paths ([#563](https://github.com/poseidon/typhoon/pull/563))
|
||||||
|
* Fix Terraform missing comma error ([#549](https://github.com/poseidon/typhoon/pull/549))
|
||||||
|
* Remove deprecated `container_linux_oem` variable ([#562](https://github.com/poseidon/typhoon/pull/562))
|
||||||
|
|
||||||
|
#### DigitalOcean
|
||||||
|
|
||||||
|
* Promote `networking` provider Calico VXLAN out of experimental (set `networking = "calico"`)
|
||||||
|
* Fix Terraform missing comma error ([#549](https://github.com/poseidon/typhoon/pull/549))
|
||||||
|
|
||||||
|
#### Google Cloud
|
||||||
|
|
||||||
|
* Add `worker_node_labels` variable to set initial worker node labels ([#550](https://github.com/poseidon/typhoon/pull/550))
|
||||||
|
* Add `node_labels` variable to internal `workers` module ([#550](https://github.com/poseidon/typhoon/pull/550))
|
||||||
|
|
||||||
|
#### Addons
|
||||||
|
|
||||||
|
* Update Prometheus from v2.12.0 to [v2.13.0](https://github.com/prometheus/prometheus/releases/tag/v2.13.0)
|
||||||
|
* Fix Prometheus etcd target discovery and scraping ([#561](https://github.com/poseidon/typhoon/pull/561), regressed with Kubernetes v1.16.0)
|
||||||
|
* Update kube-state-metrics from v1.7.2 to v1.8.0
|
||||||
|
* Update nginx-ingress from v0.25.1 to [v0.26.1](https://github.com/kubernetes/ingress-nginx/releases/tag/nginx-0.26.1) ([#555](https://github.com/poseidon/typhoon/pull/555))
|
||||||
|
* Add lifecycle hook to allow draining for up to 5 minutes
|
||||||
|
* Update Grafana from v6.3.5 to [v6.4.1](https://github.com/grafana/grafana/releases/tag/v6.4.1)
|
||||||
|
|
||||||
## v1.16.0
|
## v1.16.0
|
||||||
|
|
||||||
* Kubernetes [v1.16.0](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG-1.16.md#v1160) ([#543](https://github.com/poseidon/typhoon/pull/543))
|
* Kubernetes [v1.16.0](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG-1.16.md#v1160) ([#543](https://github.com/poseidon/typhoon/pull/543))
|
||||||
|
10
README.md
10
README.md
@ -11,7 +11,7 @@ Typhoon distributes upstream Kubernetes, architectural conventions, and cluster
|
|||||||
|
|
||||||
## Features <a href="https://www.cncf.io/certification/software-conformance/"><img align="right" src="https://storage.googleapis.com/poseidon/certified-kubernetes.png"></a>
|
## Features <a href="https://www.cncf.io/certification/software-conformance/"><img align="right" src="https://storage.googleapis.com/poseidon/certified-kubernetes.png"></a>
|
||||||
|
|
||||||
* Kubernetes v1.16.0 (upstream)
|
* Kubernetes v1.16.2 (upstream)
|
||||||
* Single or multi-master, [Calico](https://www.projectcalico.org/) or [flannel](https://github.com/coreos/flannel) networking
|
* Single or multi-master, [Calico](https://www.projectcalico.org/) or [flannel](https://github.com/coreos/flannel) networking
|
||||||
* On-cluster etcd with TLS, [RBAC](https://kubernetes.io/docs/admin/authorization/rbac/)-enabled, [network policy](https://kubernetes.io/docs/concepts/services-networking/network-policies/)
|
* On-cluster etcd with TLS, [RBAC](https://kubernetes.io/docs/admin/authorization/rbac/)-enabled, [network policy](https://kubernetes.io/docs/concepts/services-networking/network-policies/)
|
||||||
* Advanced features like [worker pools](https://typhoon.psdn.io/advanced/worker-pools/), [preemptible](https://typhoon.psdn.io/cl/google-cloud/#preemption) workers, and [snippets](https://typhoon.psdn.io/advanced/customization/#container-linux) customization
|
* Advanced features like [worker pools](https://typhoon.psdn.io/advanced/worker-pools/), [preemptible](https://typhoon.psdn.io/cl/google-cloud/#preemption) workers, and [snippets](https://typhoon.psdn.io/advanced/customization/#container-linux) customization
|
||||||
@ -48,7 +48,7 @@ Define a Kubernetes cluster by using the Terraform module for your chosen platfo
|
|||||||
|
|
||||||
```tf
|
```tf
|
||||||
module "google-cloud-yavin" {
|
module "google-cloud-yavin" {
|
||||||
source = "git::https://github.com/poseidon/typhoon//google-cloud/container-linux/kubernetes?ref=v1.16.0"
|
source = "git::https://github.com/poseidon/typhoon//google-cloud/container-linux/kubernetes?ref=v1.16.2"
|
||||||
|
|
||||||
# Google Cloud
|
# Google Cloud
|
||||||
cluster_name = "yavin"
|
cluster_name = "yavin"
|
||||||
@ -82,9 +82,9 @@ In 4-8 minutes (varies by platform), the cluster will be ready. This Google Clou
|
|||||||
$ export KUBECONFIG=/home/user/.secrets/clusters/yavin/auth/kubeconfig
|
$ export KUBECONFIG=/home/user/.secrets/clusters/yavin/auth/kubeconfig
|
||||||
$ kubectl get nodes
|
$ kubectl get nodes
|
||||||
NAME ROLES STATUS AGE VERSION
|
NAME ROLES STATUS AGE VERSION
|
||||||
yavin-controller-0.c.example-com.internal <none> Ready 6m v1.16.0
|
yavin-controller-0.c.example-com.internal <none> Ready 6m v1.16.2
|
||||||
yavin-worker-jrbf.c.example-com.internal <none> Ready 5m v1.16.0
|
yavin-worker-jrbf.c.example-com.internal <none> Ready 5m v1.16.2
|
||||||
yavin-worker-mzdm.c.example-com.internal <none> Ready 5m v1.16.0
|
yavin-worker-mzdm.c.example-com.internal <none> Ready 5m v1.16.2
|
||||||
```
|
```
|
||||||
|
|
||||||
List the pods.
|
List the pods.
|
||||||
|
@ -10,11 +10,11 @@ spec:
|
|||||||
maxUnavailable: 1
|
maxUnavailable: 1
|
||||||
selector:
|
selector:
|
||||||
matchLabels:
|
matchLabels:
|
||||||
app: container-linux-update-agent
|
name: container-linux-update-agent
|
||||||
template:
|
template:
|
||||||
metadata:
|
metadata:
|
||||||
labels:
|
labels:
|
||||||
app: container-linux-update-agent
|
name: container-linux-update-agent
|
||||||
annotations:
|
annotations:
|
||||||
seccomp.security.alpha.kubernetes.io/pod: 'docker/default'
|
seccomp.security.alpha.kubernetes.io/pod: 'docker/default'
|
||||||
spec:
|
spec:
|
||||||
|
@ -7,11 +7,11 @@ spec:
|
|||||||
replicas: 1
|
replicas: 1
|
||||||
selector:
|
selector:
|
||||||
matchLabels:
|
matchLabels:
|
||||||
app: container-linux-update-operator
|
name: container-linux-update-operator
|
||||||
template:
|
template:
|
||||||
metadata:
|
metadata:
|
||||||
labels:
|
labels:
|
||||||
app: container-linux-update-operator
|
name: container-linux-update-operator
|
||||||
annotations:
|
annotations:
|
||||||
seccomp.security.alpha.kubernetes.io/pod: 'docker/default'
|
seccomp.security.alpha.kubernetes.io/pod: 'docker/default'
|
||||||
spec:
|
spec:
|
||||||
|
@ -23,7 +23,7 @@ spec:
|
|||||||
spec:
|
spec:
|
||||||
containers:
|
containers:
|
||||||
- name: grafana
|
- name: grafana
|
||||||
image: docker.io/grafana/grafana:6.3.5
|
image: docker.io/grafana/grafana:6.4.2
|
||||||
env:
|
env:
|
||||||
- name: GF_PATHS_CONFIG
|
- name: GF_PATHS_CONFIG
|
||||||
value: "/etc/grafana/custom.ini"
|
value: "/etc/grafana/custom.ini"
|
||||||
|
@ -22,7 +22,7 @@ spec:
|
|||||||
spec:
|
spec:
|
||||||
containers:
|
containers:
|
||||||
- name: nginx-ingress-controller
|
- name: nginx-ingress-controller
|
||||||
image: quay.io/kubernetes-ingress-controller/nginx-ingress-controller:0.25.1
|
image: quay.io/kubernetes-ingress-controller/nginx-ingress-controller:0.26.1
|
||||||
args:
|
args:
|
||||||
- /nginx-ingress-controller
|
- /nginx-ingress-controller
|
||||||
- --ingress-class=public
|
- --ingress-class=public
|
||||||
@ -65,6 +65,11 @@ spec:
|
|||||||
periodSeconds: 10
|
periodSeconds: 10
|
||||||
successThreshold: 1
|
successThreshold: 1
|
||||||
timeoutSeconds: 5
|
timeoutSeconds: 5
|
||||||
|
lifecycle:
|
||||||
|
preStop:
|
||||||
|
exec:
|
||||||
|
command:
|
||||||
|
- /wait-shutdown
|
||||||
securityContext:
|
securityContext:
|
||||||
capabilities:
|
capabilities:
|
||||||
add:
|
add:
|
||||||
@ -73,4 +78,4 @@ spec:
|
|||||||
- ALL
|
- ALL
|
||||||
runAsUser: 33 # www-data
|
runAsUser: 33 # www-data
|
||||||
restartPolicy: Always
|
restartPolicy: Always
|
||||||
terminationGracePeriodSeconds: 60
|
terminationGracePeriodSeconds: 300
|
||||||
|
@ -22,7 +22,7 @@ spec:
|
|||||||
spec:
|
spec:
|
||||||
containers:
|
containers:
|
||||||
- name: nginx-ingress-controller
|
- name: nginx-ingress-controller
|
||||||
image: quay.io/kubernetes-ingress-controller/nginx-ingress-controller:0.25.1
|
image: quay.io/kubernetes-ingress-controller/nginx-ingress-controller:0.26.1
|
||||||
args:
|
args:
|
||||||
- /nginx-ingress-controller
|
- /nginx-ingress-controller
|
||||||
- --ingress-class=public
|
- --ingress-class=public
|
||||||
@ -65,6 +65,11 @@ spec:
|
|||||||
periodSeconds: 10
|
periodSeconds: 10
|
||||||
successThreshold: 1
|
successThreshold: 1
|
||||||
timeoutSeconds: 5
|
timeoutSeconds: 5
|
||||||
|
lifecycle:
|
||||||
|
preStop:
|
||||||
|
exec:
|
||||||
|
command:
|
||||||
|
- /wait-shutdown
|
||||||
securityContext:
|
securityContext:
|
||||||
capabilities:
|
capabilities:
|
||||||
add:
|
add:
|
||||||
@ -73,4 +78,4 @@ spec:
|
|||||||
- ALL
|
- ALL
|
||||||
runAsUser: 33 # www-data
|
runAsUser: 33 # www-data
|
||||||
restartPolicy: Always
|
restartPolicy: Always
|
||||||
terminationGracePeriodSeconds: 60
|
terminationGracePeriodSeconds: 300
|
||||||
|
@ -22,7 +22,7 @@ spec:
|
|||||||
spec:
|
spec:
|
||||||
containers:
|
containers:
|
||||||
- name: nginx-ingress-controller
|
- name: nginx-ingress-controller
|
||||||
image: quay.io/kubernetes-ingress-controller/nginx-ingress-controller:0.25.1
|
image: quay.io/kubernetes-ingress-controller/nginx-ingress-controller:0.26.1
|
||||||
args:
|
args:
|
||||||
- /nginx-ingress-controller
|
- /nginx-ingress-controller
|
||||||
- --ingress-class=public
|
- --ingress-class=public
|
||||||
@ -62,6 +62,11 @@ spec:
|
|||||||
successThreshold: 1
|
successThreshold: 1
|
||||||
failureThreshold: 3
|
failureThreshold: 3
|
||||||
timeoutSeconds: 5
|
timeoutSeconds: 5
|
||||||
|
lifecycle:
|
||||||
|
preStop:
|
||||||
|
exec:
|
||||||
|
command:
|
||||||
|
- /wait-shutdown
|
||||||
securityContext:
|
securityContext:
|
||||||
capabilities:
|
capabilities:
|
||||||
add:
|
add:
|
||||||
@ -70,5 +75,5 @@ spec:
|
|||||||
- ALL
|
- ALL
|
||||||
runAsUser: 33 # www-data
|
runAsUser: 33 # www-data
|
||||||
restartPolicy: Always
|
restartPolicy: Always
|
||||||
terminationGracePeriodSeconds: 60
|
terminationGracePeriodSeconds: 300
|
||||||
|
|
||||||
|
@ -22,7 +22,7 @@ spec:
|
|||||||
spec:
|
spec:
|
||||||
containers:
|
containers:
|
||||||
- name: nginx-ingress-controller
|
- name: nginx-ingress-controller
|
||||||
image: quay.io/kubernetes-ingress-controller/nginx-ingress-controller:0.25.1
|
image: quay.io/kubernetes-ingress-controller/nginx-ingress-controller:0.26.1
|
||||||
args:
|
args:
|
||||||
- /nginx-ingress-controller
|
- /nginx-ingress-controller
|
||||||
- --ingress-class=public
|
- --ingress-class=public
|
||||||
@ -65,6 +65,11 @@ spec:
|
|||||||
periodSeconds: 10
|
periodSeconds: 10
|
||||||
successThreshold: 1
|
successThreshold: 1
|
||||||
timeoutSeconds: 5
|
timeoutSeconds: 5
|
||||||
|
lifecycle:
|
||||||
|
preStop:
|
||||||
|
exec:
|
||||||
|
command:
|
||||||
|
- /wait-shutdown
|
||||||
securityContext:
|
securityContext:
|
||||||
capabilities:
|
capabilities:
|
||||||
add:
|
add:
|
||||||
@ -73,4 +78,4 @@ spec:
|
|||||||
- ALL
|
- ALL
|
||||||
runAsUser: 33 # www-data
|
runAsUser: 33 # www-data
|
||||||
restartPolicy: Always
|
restartPolicy: Always
|
||||||
terminationGracePeriodSeconds: 60
|
terminationGracePeriodSeconds: 300
|
||||||
|
@ -22,7 +22,7 @@ spec:
|
|||||||
spec:
|
spec:
|
||||||
containers:
|
containers:
|
||||||
- name: nginx-ingress-controller
|
- name: nginx-ingress-controller
|
||||||
image: quay.io/kubernetes-ingress-controller/nginx-ingress-controller:0.25.1
|
image: quay.io/kubernetes-ingress-controller/nginx-ingress-controller:0.26.1
|
||||||
args:
|
args:
|
||||||
- /nginx-ingress-controller
|
- /nginx-ingress-controller
|
||||||
- --ingress-class=public
|
- --ingress-class=public
|
||||||
@ -65,6 +65,11 @@ spec:
|
|||||||
periodSeconds: 10
|
periodSeconds: 10
|
||||||
successThreshold: 1
|
successThreshold: 1
|
||||||
timeoutSeconds: 5
|
timeoutSeconds: 5
|
||||||
|
lifecycle:
|
||||||
|
preStop:
|
||||||
|
exec:
|
||||||
|
command:
|
||||||
|
- /wait-shutdown
|
||||||
securityContext:
|
securityContext:
|
||||||
capabilities:
|
capabilities:
|
||||||
add:
|
add:
|
||||||
@ -73,4 +78,4 @@ spec:
|
|||||||
- ALL
|
- ALL
|
||||||
runAsUser: 33 # www-data
|
runAsUser: 33 # www-data
|
||||||
restartPolicy: Always
|
restartPolicy: Always
|
||||||
terminationGracePeriodSeconds: 60
|
terminationGracePeriodSeconds: 300
|
||||||
|
@ -115,7 +115,7 @@ data:
|
|||||||
- role: node
|
- role: node
|
||||||
scheme: http
|
scheme: http
|
||||||
relabel_configs:
|
relabel_configs:
|
||||||
- source_labels: [__meta_kubernetes_node_label_node_role_kubernetes_io_controller]
|
- source_labels: [__meta_kubernetes_node_label_node_kubernetes_io_controller]
|
||||||
action: keep
|
action: keep
|
||||||
regex: 'true'
|
regex: 'true'
|
||||||
- action: labelmap
|
- action: labelmap
|
||||||
|
@ -20,7 +20,7 @@ spec:
|
|||||||
serviceAccountName: prometheus
|
serviceAccountName: prometheus
|
||||||
containers:
|
containers:
|
||||||
- name: prometheus
|
- name: prometheus
|
||||||
image: quay.io/prometheus/prometheus:v2.12.0
|
image: quay.io/prometheus/prometheus:v2.13.0
|
||||||
args:
|
args:
|
||||||
- --web.listen-address=0.0.0.0:9090
|
- --web.listen-address=0.0.0.0:9090
|
||||||
- --config.file=/etc/prometheus/prometheus.yaml
|
- --config.file=/etc/prometheus/prometheus.yaml
|
||||||
|
@ -24,7 +24,7 @@ spec:
|
|||||||
serviceAccountName: kube-state-metrics
|
serviceAccountName: kube-state-metrics
|
||||||
containers:
|
containers:
|
||||||
- name: kube-state-metrics
|
- name: kube-state-metrics
|
||||||
image: quay.io/coreos/kube-state-metrics:v1.7.2
|
image: quay.io/coreos/kube-state-metrics:v1.8.0
|
||||||
ports:
|
ports:
|
||||||
- name: metrics
|
- name: metrics
|
||||||
containerPort: 8080
|
containerPort: 8080
|
||||||
|
@ -11,7 +11,7 @@ Typhoon distributes upstream Kubernetes, architectural conventions, and cluster
|
|||||||
|
|
||||||
## Features <a href="https://www.cncf.io/certification/software-conformance/"><img align="right" src="https://storage.googleapis.com/poseidon/certified-kubernetes.png"></a>
|
## Features <a href="https://www.cncf.io/certification/software-conformance/"><img align="right" src="https://storage.googleapis.com/poseidon/certified-kubernetes.png"></a>
|
||||||
|
|
||||||
* Kubernetes v1.16.0 (upstream)
|
* Kubernetes v1.16.2 (upstream)
|
||||||
* Single or multi-master, [Calico](https://www.projectcalico.org/) or [flannel](https://github.com/coreos/flannel) networking
|
* Single or multi-master, [Calico](https://www.projectcalico.org/) or [flannel](https://github.com/coreos/flannel) networking
|
||||||
* On-cluster etcd with TLS, [RBAC](https://kubernetes.io/docs/admin/authorization/rbac/)-enabled, [network policy](https://kubernetes.io/docs/concepts/services-networking/network-policies/)
|
* On-cluster etcd with TLS, [RBAC](https://kubernetes.io/docs/admin/authorization/rbac/)-enabled, [network policy](https://kubernetes.io/docs/concepts/services-networking/network-policies/)
|
||||||
* Advanced features like [worker pools](https://typhoon.psdn.io/advanced/worker-pools/), [spot](https://typhoon.psdn.io/cl/aws/#spot) workers, and [snippets](https://typhoon.psdn.io/advanced/customization/#container-linux) customization
|
* Advanced features like [worker pools](https://typhoon.psdn.io/advanced/worker-pools/), [spot](https://typhoon.psdn.io/cl/aws/#spot) workers, and [snippets](https://typhoon.psdn.io/advanced/customization/#container-linux) customization
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
# Kubernetes assets (kubeconfig, manifests)
|
# Kubernetes assets (kubeconfig, manifests)
|
||||||
module "bootstrap" {
|
module "bootstrap" {
|
||||||
source = "git::https://github.com/poseidon/terraform-render-bootstrap.git?ref=539b725093c8cd94ba46603adb25ac5280562ec8"
|
source = "git::https://github.com/poseidon/terraform-render-bootstrap.git?ref=0fcc067476fa1463d057fd43760df222b7262b27"
|
||||||
|
|
||||||
cluster_name = var.cluster_name
|
cluster_name = var.cluster_name
|
||||||
api_servers = [format("%s.%s", var.cluster_name, var.dns_zone)]
|
api_servers = [format("%s.%s", var.cluster_name, var.dns_zone)]
|
@ -7,7 +7,7 @@ systemd:
|
|||||||
- name: 40-etcd-cluster.conf
|
- name: 40-etcd-cluster.conf
|
||||||
contents: |
|
contents: |
|
||||||
[Service]
|
[Service]
|
||||||
Environment="ETCD_IMAGE_TAG=v3.4.0"
|
Environment="ETCD_IMAGE_TAG=v3.4.2"
|
||||||
Environment="ETCD_NAME=${etcd_name}"
|
Environment="ETCD_NAME=${etcd_name}"
|
||||||
Environment="ETCD_ADVERTISE_CLIENT_URLS=https://${etcd_domain}:2379"
|
Environment="ETCD_ADVERTISE_CLIENT_URLS=https://${etcd_domain}:2379"
|
||||||
Environment="ETCD_INITIAL_ADVERTISE_PEER_URLS=https://${etcd_domain}:2380"
|
Environment="ETCD_INITIAL_ADVERTISE_PEER_URLS=https://${etcd_domain}:2380"
|
||||||
@ -113,7 +113,7 @@ systemd:
|
|||||||
--volume script,kind=host,source=/opt/bootstrap/apply \
|
--volume script,kind=host,source=/opt/bootstrap/apply \
|
||||||
--mount volume=script,target=/apply \
|
--mount volume=script,target=/apply \
|
||||||
--insecure-options=image \
|
--insecure-options=image \
|
||||||
docker://k8s.gcr.io/hyperkube:v1.16.0 \
|
docker://k8s.gcr.io/hyperkube:v1.16.2 \
|
||||||
--net=host \
|
--net=host \
|
||||||
--dns=host \
|
--dns=host \
|
||||||
--exec=/apply
|
--exec=/apply
|
||||||
@ -134,7 +134,7 @@ storage:
|
|||||||
contents:
|
contents:
|
||||||
inline: |
|
inline: |
|
||||||
KUBELET_IMAGE_URL=docker://k8s.gcr.io/hyperkube
|
KUBELET_IMAGE_URL=docker://k8s.gcr.io/hyperkube
|
||||||
KUBELET_IMAGE_TAG=v1.16.0
|
KUBELET_IMAGE_TAG=v1.16.2
|
||||||
- path: /opt/bootstrap/apply
|
- path: /opt/bootstrap/apply
|
||||||
filesystem: root
|
filesystem: root
|
||||||
mode: 0544
|
mode: 0544
|
||||||
|
@ -18,57 +18,57 @@ variable "dns_zone_id" {
|
|||||||
# instances
|
# instances
|
||||||
|
|
||||||
variable "controller_count" {
|
variable "controller_count" {
|
||||||
type = string
|
type = number
|
||||||
default = "1"
|
|
||||||
description = "Number of controllers (i.e. masters)"
|
description = "Number of controllers (i.e. masters)"
|
||||||
|
default = 1
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "worker_count" {
|
variable "worker_count" {
|
||||||
type = string
|
type = number
|
||||||
default = "1"
|
|
||||||
description = "Number of workers"
|
description = "Number of workers"
|
||||||
|
default = 1
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "controller_type" {
|
variable "controller_type" {
|
||||||
type = string
|
type = string
|
||||||
default = "t3.small"
|
|
||||||
description = "EC2 instance type for controllers"
|
description = "EC2 instance type for controllers"
|
||||||
|
default = "t3.small"
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "worker_type" {
|
variable "worker_type" {
|
||||||
type = string
|
type = string
|
||||||
default = "t3.small"
|
|
||||||
description = "EC2 instance type for workers"
|
description = "EC2 instance type for workers"
|
||||||
|
default = "t3.small"
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "os_image" {
|
variable "os_image" {
|
||||||
type = string
|
type = string
|
||||||
default = "coreos-stable"
|
|
||||||
description = "AMI channel for a Container Linux derivative (coreos-stable, coreos-beta, coreos-alpha, flatcar-stable, flatcar-beta, flatcar-alpha, flatcar-edge)"
|
description = "AMI channel for a Container Linux derivative (coreos-stable, coreos-beta, coreos-alpha, flatcar-stable, flatcar-beta, flatcar-alpha, flatcar-edge)"
|
||||||
|
default = "coreos-stable"
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "disk_size" {
|
variable "disk_size" {
|
||||||
type = string
|
type = number
|
||||||
default = "40"
|
|
||||||
description = "Size of the EBS volume in GB"
|
description = "Size of the EBS volume in GB"
|
||||||
|
default = 40
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "disk_type" {
|
variable "disk_type" {
|
||||||
type = string
|
type = string
|
||||||
default = "gp2"
|
|
||||||
description = "Type of the EBS volume (e.g. standard, gp2, io1)"
|
description = "Type of the EBS volume (e.g. standard, gp2, io1)"
|
||||||
|
default = "gp2"
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "disk_iops" {
|
variable "disk_iops" {
|
||||||
type = string
|
type = number
|
||||||
default = "0"
|
|
||||||
description = "IOPS of the EBS volume (e.g. 100)"
|
description = "IOPS of the EBS volume (e.g. 100)"
|
||||||
|
default = 0
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "worker_price" {
|
variable "worker_price" {
|
||||||
type = string
|
type = number
|
||||||
default = ""
|
description = "Spot price in USD for worker instances or 0 to use on-demand instances"
|
||||||
description = "Spot price in USD for autoscaling group spot instances. Leave as default empty string for autoscaling group to use on-demand instances. Note, switching in-place from spot to on-demand is not possible: https://github.com/terraform-providers/terraform-provider-aws/issues/4320"
|
default = 0
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "worker_target_groups" {
|
variable "worker_target_groups" {
|
||||||
@ -97,60 +97,66 @@ variable "ssh_authorized_key" {
|
|||||||
}
|
}
|
||||||
|
|
||||||
variable "asset_dir" {
|
variable "asset_dir" {
|
||||||
description = "Path to a directory where generated assets should be placed (contains secrets)"
|
|
||||||
type = string
|
type = string
|
||||||
|
description = "Absolute path to a directory where generated assets should be placed (contains secrets)"
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "networking" {
|
variable "networking" {
|
||||||
description = "Choice of networking provider (calico or flannel)"
|
|
||||||
type = string
|
type = string
|
||||||
|
description = "Choice of networking provider (calico or flannel)"
|
||||||
default = "calico"
|
default = "calico"
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "network_mtu" {
|
variable "network_mtu" {
|
||||||
|
type = number
|
||||||
description = "CNI interface MTU (applies to calico only). Use 8981 if using instances types with Jumbo frames."
|
description = "CNI interface MTU (applies to calico only). Use 8981 if using instances types with Jumbo frames."
|
||||||
type = string
|
default = 1480
|
||||||
default = "1480"
|
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "host_cidr" {
|
variable "host_cidr" {
|
||||||
description = "CIDR IPv4 range to assign to EC2 nodes"
|
|
||||||
type = string
|
type = string
|
||||||
|
description = "CIDR IPv4 range to assign to EC2 nodes"
|
||||||
default = "10.0.0.0/16"
|
default = "10.0.0.0/16"
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "pod_cidr" {
|
variable "pod_cidr" {
|
||||||
description = "CIDR IPv4 range to assign Kubernetes pods"
|
|
||||||
type = string
|
type = string
|
||||||
|
description = "CIDR IPv4 range to assign Kubernetes pods"
|
||||||
default = "10.2.0.0/16"
|
default = "10.2.0.0/16"
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "service_cidr" {
|
variable "service_cidr" {
|
||||||
|
type = string
|
||||||
description = <<EOD
|
description = <<EOD
|
||||||
CIDR IPv4 range to assign Kubernetes services.
|
CIDR IPv4 range to assign Kubernetes services.
|
||||||
The 1st IP will be reserved for kube_apiserver, the 10th IP will be reserved for coredns.
|
The 1st IP will be reserved for kube_apiserver, the 10th IP will be reserved for coredns.
|
||||||
EOD
|
EOD
|
||||||
|
|
||||||
|
|
||||||
type = string
|
|
||||||
default = "10.3.0.0/16"
|
default = "10.3.0.0/16"
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "cluster_domain_suffix" {
|
|
||||||
description = "Queries for domains with the suffix will be answered by coredns. Default is cluster.local (e.g. foo.default.svc.cluster.local) "
|
|
||||||
type = string
|
|
||||||
default = "cluster.local"
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "enable_reporting" {
|
variable "enable_reporting" {
|
||||||
type = string
|
type = bool
|
||||||
description = "Enable usage or analytics reporting to upstreams (Calico)"
|
description = "Enable usage or analytics reporting to upstreams (Calico)"
|
||||||
default = "false"
|
default = false
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "enable_aggregation" {
|
variable "enable_aggregation" {
|
||||||
|
type = bool
|
||||||
description = "Enable the Kubernetes Aggregation Layer (defaults to false)"
|
description = "Enable the Kubernetes Aggregation Layer (defaults to false)"
|
||||||
type = string
|
default = false
|
||||||
default = "false"
|
}
|
||||||
|
|
||||||
|
variable "worker_node_labels" {
|
||||||
|
type = list(string)
|
||||||
|
description = "List of initial worker node labels"
|
||||||
|
default = []
|
||||||
|
}
|
||||||
|
|
||||||
|
# unofficial, undocumented, unsupported
|
||||||
|
|
||||||
|
variable "cluster_domain_suffix" {
|
||||||
|
type = string
|
||||||
|
description = "Queries for domains with the suffix will be answered by CoreDNS. Default is cluster.local (e.g. foo.default.svc.cluster.local)"
|
||||||
|
default = "cluster.local"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -19,5 +19,6 @@ module "workers" {
|
|||||||
service_cidr = var.service_cidr
|
service_cidr = var.service_cidr
|
||||||
cluster_domain_suffix = var.cluster_domain_suffix
|
cluster_domain_suffix = var.cluster_domain_suffix
|
||||||
clc_snippets = var.worker_clc_snippets
|
clc_snippets = var.worker_clc_snippets
|
||||||
|
node_labels = var.worker_node_labels
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -61,6 +61,9 @@ systemd:
|
|||||||
--lock-file=/var/run/lock/kubelet.lock \
|
--lock-file=/var/run/lock/kubelet.lock \
|
||||||
--network-plugin=cni \
|
--network-plugin=cni \
|
||||||
--node-labels=node.kubernetes.io/node \
|
--node-labels=node.kubernetes.io/node \
|
||||||
|
%{ for label in split(",", node_labels) }
|
||||||
|
--node-labels=${label} \
|
||||||
|
%{ endfor ~}
|
||||||
--pod-manifest-path=/etc/kubernetes/manifests \
|
--pod-manifest-path=/etc/kubernetes/manifests \
|
||||||
--read-only-port=0 \
|
--read-only-port=0 \
|
||||||
--volume-plugin-dir=/var/lib/kubelet/volumeplugins
|
--volume-plugin-dir=/var/lib/kubelet/volumeplugins
|
||||||
@ -95,7 +98,7 @@ storage:
|
|||||||
contents:
|
contents:
|
||||||
inline: |
|
inline: |
|
||||||
KUBELET_IMAGE_URL=docker://k8s.gcr.io/hyperkube
|
KUBELET_IMAGE_URL=docker://k8s.gcr.io/hyperkube
|
||||||
KUBELET_IMAGE_TAG=v1.16.0
|
KUBELET_IMAGE_TAG=v1.16.2
|
||||||
- path: /etc/sysctl.d/max-user-watches.conf
|
- path: /etc/sysctl.d/max-user-watches.conf
|
||||||
filesystem: root
|
filesystem: root
|
||||||
contents:
|
contents:
|
||||||
@ -113,7 +116,7 @@ storage:
|
|||||||
--volume config,kind=host,source=/etc/kubernetes \
|
--volume config,kind=host,source=/etc/kubernetes \
|
||||||
--mount volume=config,target=/etc/kubernetes \
|
--mount volume=config,target=/etc/kubernetes \
|
||||||
--insecure-options=image \
|
--insecure-options=image \
|
||||||
docker://k8s.gcr.io/hyperkube:v1.16.0 \
|
docker://k8s.gcr.io/hyperkube:v1.16.2 \
|
||||||
--net=host \
|
--net=host \
|
||||||
--dns=host \
|
--dns=host \
|
||||||
--exec=/kubectl -- --kubeconfig=/etc/kubernetes/kubeconfig delete node $(hostname)
|
--exec=/kubectl -- --kubeconfig=/etc/kubernetes/kubeconfig delete node $(hostname)
|
||||||
|
@ -23,45 +23,45 @@ variable "security_groups" {
|
|||||||
# instances
|
# instances
|
||||||
|
|
||||||
variable "worker_count" {
|
variable "worker_count" {
|
||||||
type = string
|
type = number
|
||||||
default = "1"
|
|
||||||
description = "Number of instances"
|
description = "Number of instances"
|
||||||
|
default = 1
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "instance_type" {
|
variable "instance_type" {
|
||||||
type = string
|
type = string
|
||||||
default = "t3.small"
|
|
||||||
description = "EC2 instance type"
|
description = "EC2 instance type"
|
||||||
|
default = "t3.small"
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "os_image" {
|
variable "os_image" {
|
||||||
type = string
|
type = string
|
||||||
default = "coreos-stable"
|
|
||||||
description = "AMI channel for a Container Linux derivative (coreos-stable, coreos-beta, coreos-alpha, flatcar-stable, flatcar-beta, flatcar-alpha, flatcar-edge)"
|
description = "AMI channel for a Container Linux derivative (coreos-stable, coreos-beta, coreos-alpha, flatcar-stable, flatcar-beta, flatcar-alpha, flatcar-edge)"
|
||||||
|
default = "coreos-stable"
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "disk_size" {
|
variable "disk_size" {
|
||||||
type = string
|
type = number
|
||||||
default = "40"
|
|
||||||
description = "Size of the EBS volume in GB"
|
description = "Size of the EBS volume in GB"
|
||||||
|
default = 40
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "disk_type" {
|
variable "disk_type" {
|
||||||
type = string
|
type = string
|
||||||
default = "gp2"
|
|
||||||
description = "Type of the EBS volume (e.g. standard, gp2, io1)"
|
description = "Type of the EBS volume (e.g. standard, gp2, io1)"
|
||||||
|
default = "gp2"
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "disk_iops" {
|
variable "disk_iops" {
|
||||||
type = string
|
type = number
|
||||||
default = "0"
|
|
||||||
description = "IOPS of the EBS volume (required for io1)"
|
description = "IOPS of the EBS volume (required for io1)"
|
||||||
|
default = 0
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "spot_price" {
|
variable "spot_price" {
|
||||||
type = string
|
type = number
|
||||||
default = ""
|
description = "Spot price in USD for worker instances or 0 to use on-demand instances"
|
||||||
description = "Spot price in USD for autoscaling group spot instances. Leave as default empty string for autoscaling group to use on-demand instances. Note, switching in-place from spot to on-demand is not possible: https://github.com/terraform-providers/terraform-provider-aws/issues/4320"
|
default = 0
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "target_groups" {
|
variable "target_groups" {
|
||||||
@ -89,19 +89,22 @@ variable "ssh_authorized_key" {
|
|||||||
}
|
}
|
||||||
|
|
||||||
variable "service_cidr" {
|
variable "service_cidr" {
|
||||||
|
type = string
|
||||||
description = <<EOD
|
description = <<EOD
|
||||||
CIDR IPv4 range to assign Kubernetes services.
|
CIDR IPv4 range to assign Kubernetes services.
|
||||||
The 1st IP will be reserved for kube_apiserver, the 10th IP will be reserved for coredns.
|
The 1st IP will be reserved for kube_apiserver, the 10th IP will be reserved for coredns.
|
||||||
EOD
|
EOD
|
||||||
|
|
||||||
|
|
||||||
type = string
|
|
||||||
default = "10.3.0.0/16"
|
default = "10.3.0.0/16"
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "cluster_domain_suffix" {
|
variable "cluster_domain_suffix" {
|
||||||
description = "Queries for domains with the suffix will be answered by coredns. Default is cluster.local (e.g. foo.default.svc.cluster.local) "
|
|
||||||
type = string
|
type = string
|
||||||
|
description = "Queries for domains with the suffix will be answered by coredns. Default is cluster.local (e.g. foo.default.svc.cluster.local) "
|
||||||
default = "cluster.local"
|
default = "cluster.local"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
variable "node_labels" {
|
||||||
|
type = list(string)
|
||||||
|
description = "List of initial node labels"
|
||||||
|
default = []
|
||||||
|
}
|
||||||
|
@ -46,7 +46,7 @@ resource "aws_autoscaling_group" "workers" {
|
|||||||
resource "aws_launch_configuration" "worker" {
|
resource "aws_launch_configuration" "worker" {
|
||||||
image_id = local.ami_id
|
image_id = local.ami_id
|
||||||
instance_type = var.instance_type
|
instance_type = var.instance_type
|
||||||
spot_price = var.spot_price
|
spot_price = var.spot_price > 0 ? var.spot_price : null
|
||||||
enable_monitoring = false
|
enable_monitoring = false
|
||||||
|
|
||||||
user_data = data.ct_config.worker-ignition.rendered
|
user_data = data.ct_config.worker-ignition.rendered
|
||||||
@ -86,6 +86,7 @@ data "template_file" "worker-config" {
|
|||||||
cluster_dns_service_ip = cidrhost(var.service_cidr, 10)
|
cluster_dns_service_ip = cidrhost(var.service_cidr, 10)
|
||||||
cluster_domain_suffix = var.cluster_domain_suffix
|
cluster_domain_suffix = var.cluster_domain_suffix
|
||||||
cgroup_driver = local.flavor == "flatcar" && local.channel == "edge" ? "systemd" : "cgroupfs"
|
cgroup_driver = local.flavor == "flatcar" && local.channel == "edge" ? "systemd" : "cgroupfs"
|
||||||
|
node_labels = join(",", var.node_labels)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -11,7 +11,7 @@ Typhoon distributes upstream Kubernetes, architectural conventions, and cluster
|
|||||||
|
|
||||||
## Features <a href="https://www.cncf.io/certification/software-conformance/"><img align="right" src="https://storage.googleapis.com/poseidon/certified-kubernetes.png"></a>
|
## Features <a href="https://www.cncf.io/certification/software-conformance/"><img align="right" src="https://storage.googleapis.com/poseidon/certified-kubernetes.png"></a>
|
||||||
|
|
||||||
* Kubernetes v1.16.0 (upstream)
|
* Kubernetes v1.16.2 (upstream)
|
||||||
* Single or multi-master, [Calico](https://www.projectcalico.org/) or [flannel](https://github.com/coreos/flannel) networking
|
* Single or multi-master, [Calico](https://www.projectcalico.org/) or [flannel](https://github.com/coreos/flannel) networking
|
||||||
* On-cluster etcd with TLS, [RBAC](https://kubernetes.io/docs/admin/authorization/rbac/)-enabled, [network policy](https://kubernetes.io/docs/concepts/services-networking/network-policies/)
|
* On-cluster etcd with TLS, [RBAC](https://kubernetes.io/docs/admin/authorization/rbac/)-enabled, [network policy](https://kubernetes.io/docs/concepts/services-networking/network-policies/)
|
||||||
* Advanced features like [worker pools](https://typhoon.psdn.io/advanced/worker-pools/), [spot](https://typhoon.psdn.io/cl/aws/#spot) workers, and [snippets](https://typhoon.psdn.io/advanced/customization/#container-linux) customization
|
* Advanced features like [worker pools](https://typhoon.psdn.io/advanced/worker-pools/), [spot](https://typhoon.psdn.io/cl/aws/#spot) workers, and [snippets](https://typhoon.psdn.io/advanced/customization/#container-linux) customization
|
||||||
|
@ -13,9 +13,11 @@ data "aws_ami" "fedora-coreos" {
|
|||||||
values = ["hvm"]
|
values = ["hvm"]
|
||||||
}
|
}
|
||||||
|
|
||||||
// pin on known ok versions as preview matures
|
|
||||||
filter {
|
filter {
|
||||||
name = "name"
|
name = "name"
|
||||||
values = ["fedora-coreos-30.20190801.0-hvm"]
|
values = ["fedora-coreos-30.*.*-hvm"]
|
||||||
}
|
}
|
||||||
|
|
||||||
|
# try to filter out dev images (AWS filters can't)
|
||||||
|
name_regex = "^fedora-coreos-30.[0-9]*.[0-9]*-hvm*"
|
||||||
}
|
}
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
# Kubernetes assets (kubeconfig, manifests)
|
# Kubernetes assets (kubeconfig, manifests)
|
||||||
module "bootstrap" {
|
module "bootstrap" {
|
||||||
source = "git::https://github.com/poseidon/terraform-render-bootstrap.git?ref=539b725093c8cd94ba46603adb25ac5280562ec8"
|
source = "git::https://github.com/poseidon/terraform-render-bootstrap.git?ref=0fcc067476fa1463d057fd43760df222b7262b27"
|
||||||
|
|
||||||
cluster_name = var.cluster_name
|
cluster_name = var.cluster_name
|
||||||
api_servers = [format("%s.%s", var.cluster_name, var.dns_zone)]
|
api_servers = [format("%s.%s", var.cluster_name, var.dns_zone)]
|
@ -28,7 +28,7 @@ systemd:
|
|||||||
--network host \
|
--network host \
|
||||||
--volume /var/lib/etcd:/var/lib/etcd:rw,Z \
|
--volume /var/lib/etcd:/var/lib/etcd:rw,Z \
|
||||||
--volume /etc/ssl/etcd:/etc/ssl/certs:ro,Z \
|
--volume /etc/ssl/etcd:/etc/ssl/certs:ro,Z \
|
||||||
quay.io/coreos/etcd:v3.4.0
|
quay.io/coreos/etcd:v3.4.2
|
||||||
ExecStop=/usr/bin/podman stop etcd
|
ExecStop=/usr/bin/podman stop etcd
|
||||||
[Install]
|
[Install]
|
||||||
WantedBy=multi-user.target
|
WantedBy=multi-user.target
|
||||||
@ -80,7 +80,7 @@ systemd:
|
|||||||
--volume /var/run:/var/run \
|
--volume /var/run:/var/run \
|
||||||
--volume /var/run/lock:/var/run/lock:z \
|
--volume /var/run/lock:/var/run/lock:z \
|
||||||
--volume /opt/cni/bin:/opt/cni/bin:z \
|
--volume /opt/cni/bin:/opt/cni/bin:z \
|
||||||
k8s.gcr.io/hyperkube:v1.16.0 /hyperkube kubelet \
|
k8s.gcr.io/hyperkube:v1.16.2 /hyperkube kubelet \
|
||||||
--anonymous-auth=false \
|
--anonymous-auth=false \
|
||||||
--authentication-token-webhook \
|
--authentication-token-webhook \
|
||||||
--authorization-mode=Webhook \
|
--authorization-mode=Webhook \
|
||||||
@ -121,7 +121,7 @@ systemd:
|
|||||||
--network host \
|
--network host \
|
||||||
--volume /opt/bootstrap/assets:/assets:ro,Z \
|
--volume /opt/bootstrap/assets:/assets:ro,Z \
|
||||||
--volume /opt/bootstrap/apply:/apply:ro,Z \
|
--volume /opt/bootstrap/apply:/apply:ro,Z \
|
||||||
k8s.gcr.io/hyperkube:v1.16.0 \
|
k8s.gcr.io/hyperkube:v1.16.2 \
|
||||||
/apply
|
/apply
|
||||||
ExecStartPost=/bin/touch /opt/bootstrap/bootstrap.done
|
ExecStartPost=/bin/touch /opt/bootstrap/bootstrap.done
|
||||||
ExecStartPost=-/usr/bin/podman stop bootstrap
|
ExecStartPost=-/usr/bin/podman stop bootstrap
|
||||||
|
@ -18,57 +18,57 @@ variable "dns_zone_id" {
|
|||||||
# instances
|
# instances
|
||||||
|
|
||||||
variable "controller_count" {
|
variable "controller_count" {
|
||||||
type = string
|
type = number
|
||||||
default = "1"
|
|
||||||
description = "Number of controllers (i.e. masters)"
|
description = "Number of controllers (i.e. masters)"
|
||||||
|
default = 1
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "worker_count" {
|
variable "worker_count" {
|
||||||
type = string
|
type = number
|
||||||
default = "1"
|
|
||||||
description = "Number of workers"
|
description = "Number of workers"
|
||||||
|
default = 1
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "controller_type" {
|
variable "controller_type" {
|
||||||
type = string
|
type = string
|
||||||
default = "t3.small"
|
|
||||||
description = "EC2 instance type for controllers"
|
description = "EC2 instance type for controllers"
|
||||||
|
default = "t3.small"
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "worker_type" {
|
variable "worker_type" {
|
||||||
type = string
|
type = string
|
||||||
default = "t3.small"
|
|
||||||
description = "EC2 instance type for workers"
|
description = "EC2 instance type for workers"
|
||||||
|
default = "t3.small"
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "os_image" {
|
variable "os_image" {
|
||||||
type = string
|
type = string
|
||||||
default = "coreos-stable"
|
|
||||||
description = "AMI channel for Fedora CoreOS (not yet used)"
|
description = "AMI channel for Fedora CoreOS (not yet used)"
|
||||||
|
default = "coreos-stable"
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "disk_size" {
|
variable "disk_size" {
|
||||||
type = string
|
type = number
|
||||||
default = "40"
|
|
||||||
description = "Size of the EBS volume in GB"
|
description = "Size of the EBS volume in GB"
|
||||||
|
default = 40
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "disk_type" {
|
variable "disk_type" {
|
||||||
type = string
|
type = string
|
||||||
default = "gp2"
|
|
||||||
description = "Type of the EBS volume (e.g. standard, gp2, io1)"
|
description = "Type of the EBS volume (e.g. standard, gp2, io1)"
|
||||||
|
default = "gp2"
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "disk_iops" {
|
variable "disk_iops" {
|
||||||
type = string
|
type = number
|
||||||
default = "0"
|
|
||||||
description = "IOPS of the EBS volume (e.g. 100)"
|
description = "IOPS of the EBS volume (e.g. 100)"
|
||||||
|
default = 0
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "worker_price" {
|
variable "worker_price" {
|
||||||
type = string
|
type = number
|
||||||
default = ""
|
description = "Spot price in USD for worker instances or 0 to use on-demand instances"
|
||||||
description = "Spot price in USD for autoscaling group spot instances. Leave as default empty string for autoscaling group to use on-demand instances. Note, switching in-place from spot to on-demand is not possible: https://github.com/terraform-providers/terraform-provider-aws/issues/4320"
|
default = 0
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "worker_target_groups" {
|
variable "worker_target_groups" {
|
||||||
@ -97,60 +97,66 @@ variable "ssh_authorized_key" {
|
|||||||
}
|
}
|
||||||
|
|
||||||
variable "asset_dir" {
|
variable "asset_dir" {
|
||||||
description = "Path to a directory where generated assets should be placed (contains secrets)"
|
|
||||||
type = string
|
type = string
|
||||||
|
description = "Absolute path to a directory where generated assets should be placed (contains secrets)"
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "networking" {
|
variable "networking" {
|
||||||
description = "Choice of networking provider (calico or flannel)"
|
|
||||||
type = string
|
type = string
|
||||||
|
description = "Choice of networking provider (calico or flannel)"
|
||||||
default = "calico"
|
default = "calico"
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "network_mtu" {
|
variable "network_mtu" {
|
||||||
|
type = number
|
||||||
description = "CNI interface MTU (applies to calico only). Use 8981 if using instances types with Jumbo frames."
|
description = "CNI interface MTU (applies to calico only). Use 8981 if using instances types with Jumbo frames."
|
||||||
type = string
|
default = 1480
|
||||||
default = "1480"
|
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "host_cidr" {
|
variable "host_cidr" {
|
||||||
description = "CIDR IPv4 range to assign to EC2 nodes"
|
|
||||||
type = string
|
type = string
|
||||||
|
description = "CIDR IPv4 range to assign to EC2 nodes"
|
||||||
default = "10.0.0.0/16"
|
default = "10.0.0.0/16"
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "pod_cidr" {
|
variable "pod_cidr" {
|
||||||
description = "CIDR IPv4 range to assign Kubernetes pods"
|
|
||||||
type = string
|
type = string
|
||||||
|
description = "CIDR IPv4 range to assign Kubernetes pods"
|
||||||
default = "10.2.0.0/16"
|
default = "10.2.0.0/16"
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "service_cidr" {
|
variable "service_cidr" {
|
||||||
|
type = string
|
||||||
description = <<EOD
|
description = <<EOD
|
||||||
CIDR IPv4 range to assign Kubernetes services.
|
CIDR IPv4 range to assign Kubernetes services.
|
||||||
The 1st IP will be reserved for kube_apiserver, the 10th IP will be reserved for coredns.
|
The 1st IP will be reserved for kube_apiserver, the 10th IP will be reserved for coredns.
|
||||||
EOD
|
EOD
|
||||||
|
|
||||||
|
|
||||||
type = string
|
|
||||||
default = "10.3.0.0/16"
|
default = "10.3.0.0/16"
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "cluster_domain_suffix" {
|
|
||||||
description = "Queries for domains with the suffix will be answered by coredns. Default is cluster.local (e.g. foo.default.svc.cluster.local) "
|
|
||||||
type = string
|
|
||||||
default = "cluster.local"
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "enable_reporting" {
|
variable "enable_reporting" {
|
||||||
type = string
|
type = bool
|
||||||
description = "Enable usage or analytics reporting to upstreams (Calico)"
|
description = "Enable usage or analytics reporting to upstreams (Calico)"
|
||||||
default = "false"
|
default = false
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "enable_aggregation" {
|
variable "enable_aggregation" {
|
||||||
|
type = bool
|
||||||
description = "Enable the Kubernetes Aggregation Layer (defaults to false)"
|
description = "Enable the Kubernetes Aggregation Layer (defaults to false)"
|
||||||
type = string
|
default = false
|
||||||
default = "false"
|
}
|
||||||
|
|
||||||
|
variable "worker_node_labels" {
|
||||||
|
type = list(string)
|
||||||
|
description = "List of initial worker node labels"
|
||||||
|
default = []
|
||||||
|
}
|
||||||
|
|
||||||
|
# unofficial, undocumented, unsupported
|
||||||
|
|
||||||
|
variable "cluster_domain_suffix" {
|
||||||
|
type = string
|
||||||
|
description = "Queries for domains with the suffix will be answered by CoreDNS. Default is cluster.local (e.g. foo.default.svc.cluster.local)"
|
||||||
|
default = "cluster.local"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -19,5 +19,6 @@ module "workers" {
|
|||||||
service_cidr = var.service_cidr
|
service_cidr = var.service_cidr
|
||||||
cluster_domain_suffix = var.cluster_domain_suffix
|
cluster_domain_suffix = var.cluster_domain_suffix
|
||||||
snippets = var.worker_snippets
|
snippets = var.worker_snippets
|
||||||
|
node_labels = var.worker_node_labels
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -13,9 +13,11 @@ data "aws_ami" "fedora-coreos" {
|
|||||||
values = ["hvm"]
|
values = ["hvm"]
|
||||||
}
|
}
|
||||||
|
|
||||||
// pin on known ok versions as preview matures
|
|
||||||
filter {
|
filter {
|
||||||
name = "name"
|
name = "name"
|
||||||
values = ["fedora-coreos-30.20190801.0-hvm"]
|
values = ["fedora-coreos-30.*.*-hvm"]
|
||||||
}
|
}
|
||||||
|
|
||||||
|
# try to filter out dev images (AWS filters can't)
|
||||||
|
name_regex = "^fedora-coreos-30.[0-9]*.[0-9]*-hvm*"
|
||||||
}
|
}
|
||||||
|
@ -50,7 +50,7 @@ systemd:
|
|||||||
--volume /var/run:/var/run \
|
--volume /var/run:/var/run \
|
||||||
--volume /var/run/lock:/var/run/lock:z \
|
--volume /var/run/lock:/var/run/lock:z \
|
||||||
--volume /opt/cni/bin:/opt/cni/bin:z \
|
--volume /opt/cni/bin:/opt/cni/bin:z \
|
||||||
k8s.gcr.io/hyperkube:v1.16.0 /hyperkube kubelet \
|
k8s.gcr.io/hyperkube:v1.16.2 /hyperkube kubelet \
|
||||||
--anonymous-auth=false \
|
--anonymous-auth=false \
|
||||||
--authentication-token-webhook \
|
--authentication-token-webhook \
|
||||||
--authorization-mode=Webhook \
|
--authorization-mode=Webhook \
|
||||||
@ -66,6 +66,9 @@ systemd:
|
|||||||
--lock-file=/var/run/lock/kubelet.lock \
|
--lock-file=/var/run/lock/kubelet.lock \
|
||||||
--network-plugin=cni \
|
--network-plugin=cni \
|
||||||
--node-labels=node.kubernetes.io/node \
|
--node-labels=node.kubernetes.io/node \
|
||||||
|
%{ for label in split(",", node_labels) }
|
||||||
|
--node-labels=${label} \
|
||||||
|
%{ endfor ~}
|
||||||
--pod-manifest-path=/etc/kubernetes/manifests \
|
--pod-manifest-path=/etc/kubernetes/manifests \
|
||||||
--read-only-port=0 \
|
--read-only-port=0 \
|
||||||
--volume-plugin-dir=/var/lib/kubelet/volumeplugins
|
--volume-plugin-dir=/var/lib/kubelet/volumeplugins
|
||||||
|
@ -23,45 +23,45 @@ variable "security_groups" {
|
|||||||
# instances
|
# instances
|
||||||
|
|
||||||
variable "worker_count" {
|
variable "worker_count" {
|
||||||
type = string
|
type = number
|
||||||
default = "1"
|
|
||||||
description = "Number of instances"
|
description = "Number of instances"
|
||||||
|
default = 1
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "instance_type" {
|
variable "instance_type" {
|
||||||
type = string
|
type = string
|
||||||
default = "t3.small"
|
|
||||||
description = "EC2 instance type"
|
description = "EC2 instance type"
|
||||||
|
default = "t3.small"
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "os_image" {
|
variable "os_image" {
|
||||||
type = string
|
type = string
|
||||||
default = "coreos-stable"
|
|
||||||
description = "AMI channel for Fedora CoreOS (not yet used)"
|
description = "AMI channel for Fedora CoreOS (not yet used)"
|
||||||
|
default = "coreos-stable"
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "disk_size" {
|
variable "disk_size" {
|
||||||
type = string
|
type = number
|
||||||
default = "40"
|
|
||||||
description = "Size of the EBS volume in GB"
|
description = "Size of the EBS volume in GB"
|
||||||
|
default = 40
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "disk_type" {
|
variable "disk_type" {
|
||||||
type = string
|
type = string
|
||||||
default = "gp2"
|
|
||||||
description = "Type of the EBS volume (e.g. standard, gp2, io1)"
|
description = "Type of the EBS volume (e.g. standard, gp2, io1)"
|
||||||
|
default = "gp2"
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "disk_iops" {
|
variable "disk_iops" {
|
||||||
type = string
|
type = number
|
||||||
default = "0"
|
|
||||||
description = "IOPS of the EBS volume (required for io1)"
|
description = "IOPS of the EBS volume (required for io1)"
|
||||||
|
default = 0
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "spot_price" {
|
variable "spot_price" {
|
||||||
type = string
|
type = number
|
||||||
default = ""
|
description = "Spot price in USD for worker instances or 0 to use on-demand instances"
|
||||||
description = "Spot price in USD for autoscaling group spot instances. Leave as default empty string for autoscaling group to use on-demand instances. Note, switching in-place from spot to on-demand is not possible: https://github.com/terraform-providers/terraform-provider-aws/issues/4320"
|
default = 0
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "target_groups" {
|
variable "target_groups" {
|
||||||
@ -89,19 +89,22 @@ variable "ssh_authorized_key" {
|
|||||||
}
|
}
|
||||||
|
|
||||||
variable "service_cidr" {
|
variable "service_cidr" {
|
||||||
|
type = string
|
||||||
description = <<EOD
|
description = <<EOD
|
||||||
CIDR IPv4 range to assign Kubernetes services.
|
CIDR IPv4 range to assign Kubernetes services.
|
||||||
The 1st IP will be reserved for kube_apiserver, the 10th IP will be reserved for coredns.
|
The 1st IP will be reserved for kube_apiserver, the 10th IP will be reserved for coredns.
|
||||||
EOD
|
EOD
|
||||||
|
|
||||||
|
|
||||||
type = string
|
|
||||||
default = "10.3.0.0/16"
|
default = "10.3.0.0/16"
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "cluster_domain_suffix" {
|
variable "cluster_domain_suffix" {
|
||||||
description = "Queries for domains with the suffix will be answered by coredns. Default is cluster.local (e.g. foo.default.svc.cluster.local) "
|
|
||||||
type = string
|
type = string
|
||||||
|
description = "Queries for domains with the suffix will be answered by coredns. Default is cluster.local (e.g. foo.default.svc.cluster.local) "
|
||||||
default = "cluster.local"
|
default = "cluster.local"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
variable "node_labels" {
|
||||||
|
type = list(string)
|
||||||
|
description = "List of initial node labels"
|
||||||
|
default = []
|
||||||
|
}
|
||||||
|
@ -46,7 +46,7 @@ resource "aws_autoscaling_group" "workers" {
|
|||||||
resource "aws_launch_configuration" "worker" {
|
resource "aws_launch_configuration" "worker" {
|
||||||
image_id = data.aws_ami.fedora-coreos.image_id
|
image_id = data.aws_ami.fedora-coreos.image_id
|
||||||
instance_type = var.instance_type
|
instance_type = var.instance_type
|
||||||
spot_price = var.spot_price
|
spot_price = var.spot_price > 0 ? var.spot_price : null
|
||||||
enable_monitoring = false
|
enable_monitoring = false
|
||||||
|
|
||||||
user_data = data.ct_config.worker-ignition.rendered
|
user_data = data.ct_config.worker-ignition.rendered
|
||||||
@ -85,6 +85,7 @@ data "template_file" "worker-config" {
|
|||||||
ssh_authorized_key = var.ssh_authorized_key
|
ssh_authorized_key = var.ssh_authorized_key
|
||||||
cluster_dns_service_ip = cidrhost(var.service_cidr, 10)
|
cluster_dns_service_ip = cidrhost(var.service_cidr, 10)
|
||||||
cluster_domain_suffix = var.cluster_domain_suffix
|
cluster_domain_suffix = var.cluster_domain_suffix
|
||||||
|
node_labels = join(",", var.node_labels)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -11,7 +11,7 @@ Typhoon distributes upstream Kubernetes, architectural conventions, and cluster
|
|||||||
|
|
||||||
## Features <a href="https://www.cncf.io/certification/software-conformance/"><img align="right" src="https://storage.googleapis.com/poseidon/certified-kubernetes.png"></a>
|
## Features <a href="https://www.cncf.io/certification/software-conformance/"><img align="right" src="https://storage.googleapis.com/poseidon/certified-kubernetes.png"></a>
|
||||||
|
|
||||||
* Kubernetes v1.16.0 (upstream)
|
* Kubernetes v1.16.2 (upstream)
|
||||||
* Single or multi-master, [Calico](https://www.projectcalico.org/) or [flannel](https://github.com/coreos/flannel) networking
|
* Single or multi-master, [Calico](https://www.projectcalico.org/) or [flannel](https://github.com/coreos/flannel) networking
|
||||||
* On-cluster etcd with TLS, [RBAC](https://kubernetes.io/docs/admin/authorization/rbac/)-enabled, [network policy](https://kubernetes.io/docs/concepts/services-networking/network-policies/)
|
* On-cluster etcd with TLS, [RBAC](https://kubernetes.io/docs/admin/authorization/rbac/)-enabled, [network policy](https://kubernetes.io/docs/concepts/services-networking/network-policies/)
|
||||||
* Advanced features like [worker pools](https://typhoon.psdn.io/advanced/worker-pools/), [low-priority](https://typhoon.psdn.io/cl/azure/#low-priority) workers, and [snippets](https://typhoon.psdn.io/advanced/customization/#container-linux) customization
|
* Advanced features like [worker pools](https://typhoon.psdn.io/advanced/worker-pools/), [low-priority](https://typhoon.psdn.io/cl/azure/#low-priority) workers, and [snippets](https://typhoon.psdn.io/advanced/customization/#container-linux) customization
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
# Kubernetes assets (kubeconfig, manifests)
|
# Kubernetes assets (kubeconfig, manifests)
|
||||||
module "bootstrap" {
|
module "bootstrap" {
|
||||||
source = "git::https://github.com/poseidon/terraform-render-bootstrap.git?ref=539b725093c8cd94ba46603adb25ac5280562ec8"
|
source = "git::https://github.com/poseidon/terraform-render-bootstrap.git?ref=0fcc067476fa1463d057fd43760df222b7262b27"
|
||||||
|
|
||||||
cluster_name = var.cluster_name
|
cluster_name = var.cluster_name
|
||||||
api_servers = [format("%s.%s", var.cluster_name, var.dns_zone)]
|
api_servers = [format("%s.%s", var.cluster_name, var.dns_zone)]
|
@ -7,7 +7,7 @@ systemd:
|
|||||||
- name: 40-etcd-cluster.conf
|
- name: 40-etcd-cluster.conf
|
||||||
contents: |
|
contents: |
|
||||||
[Service]
|
[Service]
|
||||||
Environment="ETCD_IMAGE_TAG=v3.4.0"
|
Environment="ETCD_IMAGE_TAG=v3.4.2"
|
||||||
Environment="ETCD_NAME=${etcd_name}"
|
Environment="ETCD_NAME=${etcd_name}"
|
||||||
Environment="ETCD_ADVERTISE_CLIENT_URLS=https://${etcd_domain}:2379"
|
Environment="ETCD_ADVERTISE_CLIENT_URLS=https://${etcd_domain}:2379"
|
||||||
Environment="ETCD_INITIAL_ADVERTISE_PEER_URLS=https://${etcd_domain}:2380"
|
Environment="ETCD_INITIAL_ADVERTISE_PEER_URLS=https://${etcd_domain}:2380"
|
||||||
@ -111,7 +111,7 @@ systemd:
|
|||||||
--volume script,kind=host,source=/opt/bootstrap/apply \
|
--volume script,kind=host,source=/opt/bootstrap/apply \
|
||||||
--mount volume=script,target=/apply \
|
--mount volume=script,target=/apply \
|
||||||
--insecure-options=image \
|
--insecure-options=image \
|
||||||
docker://k8s.gcr.io/hyperkube:v1.16.0 \
|
docker://k8s.gcr.io/hyperkube:v1.16.2 \
|
||||||
--net=host \
|
--net=host \
|
||||||
--dns=host \
|
--dns=host \
|
||||||
--exec=/apply
|
--exec=/apply
|
||||||
@ -132,7 +132,7 @@ storage:
|
|||||||
contents:
|
contents:
|
||||||
inline: |
|
inline: |
|
||||||
KUBELET_IMAGE_URL=docker://k8s.gcr.io/hyperkube
|
KUBELET_IMAGE_URL=docker://k8s.gcr.io/hyperkube
|
||||||
KUBELET_IMAGE_TAG=v1.16.0
|
KUBELET_IMAGE_TAG=v1.16.2
|
||||||
- path: /opt/bootstrap/apply
|
- path: /opt/bootstrap/apply
|
||||||
filesystem: root
|
filesystem: root
|
||||||
mode: 0544
|
mode: 0544
|
||||||
|
@ -23,27 +23,27 @@ variable "dns_zone_group" {
|
|||||||
# instances
|
# instances
|
||||||
|
|
||||||
variable "controller_count" {
|
variable "controller_count" {
|
||||||
type = string
|
type = number
|
||||||
default = "1"
|
|
||||||
description = "Number of controllers (i.e. masters)"
|
description = "Number of controllers (i.e. masters)"
|
||||||
|
default = 1
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "worker_count" {
|
variable "worker_count" {
|
||||||
type = string
|
type = number
|
||||||
default = "1"
|
|
||||||
description = "Number of workers"
|
description = "Number of workers"
|
||||||
|
default = 1
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "controller_type" {
|
variable "controller_type" {
|
||||||
type = string
|
type = string
|
||||||
default = "Standard_B2s"
|
|
||||||
description = "Machine type for controllers (see `az vm list-skus --location centralus`)"
|
description = "Machine type for controllers (see `az vm list-skus --location centralus`)"
|
||||||
|
default = "Standard_B2s"
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "worker_type" {
|
variable "worker_type" {
|
||||||
type = string
|
type = string
|
||||||
default = "Standard_DS1_v2"
|
|
||||||
description = "Machine type for workers (see `az vm list-skus --location centralus`)"
|
description = "Machine type for workers (see `az vm list-skus --location centralus`)"
|
||||||
|
default = "Standard_DS1_v2"
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "os_image" {
|
variable "os_image" {
|
||||||
@ -53,15 +53,15 @@ variable "os_image" {
|
|||||||
}
|
}
|
||||||
|
|
||||||
variable "disk_size" {
|
variable "disk_size" {
|
||||||
type = string
|
type = number
|
||||||
default = "40"
|
|
||||||
description = "Size of the disk in GB"
|
description = "Size of the disk in GB"
|
||||||
|
default = 40
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "worker_priority" {
|
variable "worker_priority" {
|
||||||
type = string
|
type = string
|
||||||
default = "Regular"
|
|
||||||
description = "Set worker priority to Low to use reduced cost surplus capacity, with the tradeoff that instances can be deallocated at any time."
|
description = "Set worker priority to Low to use reduced cost surplus capacity, with the tradeoff that instances can be deallocated at any time."
|
||||||
|
default = "Regular"
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "controller_clc_snippets" {
|
variable "controller_clc_snippets" {
|
||||||
@ -84,54 +84,60 @@ variable "ssh_authorized_key" {
|
|||||||
}
|
}
|
||||||
|
|
||||||
variable "asset_dir" {
|
variable "asset_dir" {
|
||||||
description = "Path to a directory where generated assets should be placed (contains secrets)"
|
|
||||||
type = string
|
type = string
|
||||||
|
description = "Absolute path to a directory where generated assets should be placed (contains secrets)"
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "networking" {
|
variable "networking" {
|
||||||
description = "Choice of networking provider (flannel or calico)"
|
|
||||||
type = string
|
type = string
|
||||||
default = "flannel"
|
description = "Choice of networking provider (flannel or calico)"
|
||||||
|
default = "calico"
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "host_cidr" {
|
variable "host_cidr" {
|
||||||
description = "CIDR IPv4 range to assign to instances"
|
|
||||||
type = string
|
type = string
|
||||||
|
description = "CIDR IPv4 range to assign to instances"
|
||||||
default = "10.0.0.0/16"
|
default = "10.0.0.0/16"
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "pod_cidr" {
|
variable "pod_cidr" {
|
||||||
description = "CIDR IPv4 range to assign Kubernetes pods"
|
|
||||||
type = string
|
type = string
|
||||||
|
description = "CIDR IPv4 range to assign Kubernetes pods"
|
||||||
default = "10.2.0.0/16"
|
default = "10.2.0.0/16"
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "service_cidr" {
|
variable "service_cidr" {
|
||||||
|
type = string
|
||||||
description = <<EOD
|
description = <<EOD
|
||||||
CIDR IPv4 range to assign Kubernetes services.
|
CIDR IPv4 range to assign Kubernetes services.
|
||||||
The 1st IP will be reserved for kube_apiserver, the 10th IP will be reserved for coredns.
|
The 1st IP will be reserved for kube_apiserver, the 10th IP will be reserved for coredns.
|
||||||
EOD
|
EOD
|
||||||
|
|
||||||
|
|
||||||
type = string
|
|
||||||
default = "10.3.0.0/16"
|
default = "10.3.0.0/16"
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "cluster_domain_suffix" {
|
|
||||||
description = "Queries for domains with the suffix will be answered by coredns. Default is cluster.local (e.g. foo.default.svc.cluster.local) "
|
|
||||||
type = string
|
|
||||||
default = "cluster.local"
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "enable_reporting" {
|
variable "enable_reporting" {
|
||||||
type = string
|
type = bool
|
||||||
description = "Enable usage or analytics reporting to upstreams (Calico)"
|
description = "Enable usage or analytics reporting to upstreams (Calico)"
|
||||||
default = "false"
|
default = false
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "enable_aggregation" {
|
variable "enable_aggregation" {
|
||||||
|
type = bool
|
||||||
description = "Enable the Kubernetes Aggregation Layer (defaults to false)"
|
description = "Enable the Kubernetes Aggregation Layer (defaults to false)"
|
||||||
type = string
|
default = false
|
||||||
default = "false"
|
}
|
||||||
|
|
||||||
|
variable "worker_node_labels" {
|
||||||
|
type = list(string)
|
||||||
|
description = "List of initial worker node labels"
|
||||||
|
default = []
|
||||||
|
}
|
||||||
|
|
||||||
|
# unofficial, undocumented, unsupported
|
||||||
|
|
||||||
|
variable "cluster_domain_suffix" {
|
||||||
|
type = string
|
||||||
|
description = "Queries for domains with the suffix will be answered by coredns. Default is cluster.local (e.g. foo.default.svc.cluster.local) "
|
||||||
|
default = "cluster.local"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -20,5 +20,6 @@ module "workers" {
|
|||||||
service_cidr = var.service_cidr
|
service_cidr = var.service_cidr
|
||||||
cluster_domain_suffix = var.cluster_domain_suffix
|
cluster_domain_suffix = var.cluster_domain_suffix
|
||||||
clc_snippets = var.worker_clc_snippets
|
clc_snippets = var.worker_clc_snippets
|
||||||
|
node_labels = var.worker_node_labels
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -59,6 +59,9 @@ systemd:
|
|||||||
--lock-file=/var/run/lock/kubelet.lock \
|
--lock-file=/var/run/lock/kubelet.lock \
|
||||||
--network-plugin=cni \
|
--network-plugin=cni \
|
||||||
--node-labels=node.kubernetes.io/node \
|
--node-labels=node.kubernetes.io/node \
|
||||||
|
%{ for label in split(",", node_labels) }
|
||||||
|
--node-labels=${label} \
|
||||||
|
%{ endfor ~}
|
||||||
--pod-manifest-path=/etc/kubernetes/manifests \
|
--pod-manifest-path=/etc/kubernetes/manifests \
|
||||||
--read-only-port=0 \
|
--read-only-port=0 \
|
||||||
--volume-plugin-dir=/var/lib/kubelet/volumeplugins
|
--volume-plugin-dir=/var/lib/kubelet/volumeplugins
|
||||||
@ -93,7 +96,7 @@ storage:
|
|||||||
contents:
|
contents:
|
||||||
inline: |
|
inline: |
|
||||||
KUBELET_IMAGE_URL=docker://k8s.gcr.io/hyperkube
|
KUBELET_IMAGE_URL=docker://k8s.gcr.io/hyperkube
|
||||||
KUBELET_IMAGE_TAG=v1.16.0
|
KUBELET_IMAGE_TAG=v1.16.2
|
||||||
- path: /etc/sysctl.d/max-user-watches.conf
|
- path: /etc/sysctl.d/max-user-watches.conf
|
||||||
filesystem: root
|
filesystem: root
|
||||||
contents:
|
contents:
|
||||||
@ -111,7 +114,7 @@ storage:
|
|||||||
--volume config,kind=host,source=/etc/kubernetes \
|
--volume config,kind=host,source=/etc/kubernetes \
|
||||||
--mount volume=config,target=/etc/kubernetes \
|
--mount volume=config,target=/etc/kubernetes \
|
||||||
--insecure-options=image \
|
--insecure-options=image \
|
||||||
docker://k8s.gcr.io/hyperkube:v1.16.0 \
|
docker://k8s.gcr.io/hyperkube:v1.16.2 \
|
||||||
--net=host \
|
--net=host \
|
||||||
--dns=host \
|
--dns=host \
|
||||||
--exec=/kubectl -- --kubeconfig=/etc/kubernetes/kubeconfig delete node $(hostname | tr '[:upper:]' '[:lower:]')
|
--exec=/kubectl -- --kubeconfig=/etc/kubernetes/kubeconfig delete node $(hostname | tr '[:upper:]' '[:lower:]')
|
||||||
|
@ -33,27 +33,27 @@ variable "backend_address_pool_id" {
|
|||||||
# instances
|
# instances
|
||||||
|
|
||||||
variable "worker_count" {
|
variable "worker_count" {
|
||||||
type = string
|
type = number
|
||||||
default = "1"
|
|
||||||
description = "Number of instances"
|
description = "Number of instances"
|
||||||
|
default = 1
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "vm_type" {
|
variable "vm_type" {
|
||||||
type = string
|
type = string
|
||||||
default = "Standard_F1"
|
|
||||||
description = "Machine type for instances (see `az vm list-skus --location centralus`)"
|
description = "Machine type for instances (see `az vm list-skus --location centralus`)"
|
||||||
|
default = "Standard_DS1_v2"
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "os_image" {
|
variable "os_image" {
|
||||||
type = string
|
type = string
|
||||||
default = "coreos-stable"
|
|
||||||
description = "Channel for a Container Linux derivative (coreos-stable, coreos-beta, coreos-alpha)"
|
description = "Channel for a Container Linux derivative (coreos-stable, coreos-beta, coreos-alpha)"
|
||||||
|
default = "coreos-stable"
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "priority" {
|
variable "priority" {
|
||||||
type = string
|
type = string
|
||||||
default = "Regular"
|
|
||||||
description = "Set priority to Low to use reduced cost surplus capacity, with the tradeoff that instances can be evicted at any time."
|
description = "Set priority to Low to use reduced cost surplus capacity, with the tradeoff that instances can be evicted at any time."
|
||||||
|
default = "Regular"
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "clc_snippets" {
|
variable "clc_snippets" {
|
||||||
@ -75,16 +75,22 @@ variable "ssh_authorized_key" {
|
|||||||
}
|
}
|
||||||
|
|
||||||
variable "service_cidr" {
|
variable "service_cidr" {
|
||||||
|
type = string
|
||||||
description = <<EOD
|
description = <<EOD
|
||||||
CIDR IPv4 range to assign Kubernetes services.
|
CIDR IPv4 range to assign Kubernetes services.
|
||||||
The 1st IP will be reserved for kube_apiserver, the 10th IP will be reserved for coredns.
|
The 1st IP will be reserved for kube_apiserver, the 10th IP will be reserved for coredns.
|
||||||
EOD
|
EOD
|
||||||
|
|
||||||
|
|
||||||
type = string
|
|
||||||
default = "10.3.0.0/16"
|
default = "10.3.0.0/16"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
variable "node_labels" {
|
||||||
|
type = list(string)
|
||||||
|
description = "List of initial node labels"
|
||||||
|
default = []
|
||||||
|
}
|
||||||
|
|
||||||
|
# unofficial, undocumented, unsupported
|
||||||
|
|
||||||
variable "cluster_domain_suffix" {
|
variable "cluster_domain_suffix" {
|
||||||
description = "Queries for domains with the suffix will be answered by coredns. Default is cluster.local (e.g. foo.default.svc.cluster.local) "
|
description = "Queries for domains with the suffix will be answered by coredns. Default is cluster.local (e.g. foo.default.svc.cluster.local) "
|
||||||
type = string
|
type = string
|
||||||
|
@ -111,6 +111,7 @@ data "template_file" "worker-config" {
|
|||||||
ssh_authorized_key = var.ssh_authorized_key
|
ssh_authorized_key = var.ssh_authorized_key
|
||||||
cluster_dns_service_ip = cidrhost(var.service_cidr, 10)
|
cluster_dns_service_ip = cidrhost(var.service_cidr, 10)
|
||||||
cluster_domain_suffix = var.cluster_domain_suffix
|
cluster_domain_suffix = var.cluster_domain_suffix
|
||||||
|
node_labels = join(",", var.node_labels)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -11,7 +11,7 @@ Typhoon distributes upstream Kubernetes, architectural conventions, and cluster
|
|||||||
|
|
||||||
## Features <a href="https://www.cncf.io/certification/software-conformance/"><img align="right" src="https://storage.googleapis.com/poseidon/certified-kubernetes.png"></a>
|
## Features <a href="https://www.cncf.io/certification/software-conformance/"><img align="right" src="https://storage.googleapis.com/poseidon/certified-kubernetes.png"></a>
|
||||||
|
|
||||||
* Kubernetes v1.16.0 (upstream)
|
* Kubernetes v1.16.2 (upstream)
|
||||||
* Single or multi-master, [Calico](https://www.projectcalico.org/) or [flannel](https://github.com/coreos/flannel) networking
|
* Single or multi-master, [Calico](https://www.projectcalico.org/) or [flannel](https://github.com/coreos/flannel) networking
|
||||||
* On-cluster etcd with TLS, [RBAC](https://kubernetes.io/docs/admin/authorization/rbac/)-enabled, [network policy](https://kubernetes.io/docs/concepts/services-networking/network-policies/)
|
* On-cluster etcd with TLS, [RBAC](https://kubernetes.io/docs/admin/authorization/rbac/)-enabled, [network policy](https://kubernetes.io/docs/concepts/services-networking/network-policies/)
|
||||||
* Advanced features like [snippets](https://typhoon.psdn.io/advanced/customization/#container-linux) customization
|
* Advanced features like [snippets](https://typhoon.psdn.io/advanced/customization/#container-linux) customization
|
||||||
|
@ -1,10 +1,10 @@
|
|||||||
# Kubernetes assets (kubeconfig, manifests)
|
# Kubernetes assets (kubeconfig, manifests)
|
||||||
module "bootstrap" {
|
module "bootstrap" {
|
||||||
source = "git::https://github.com/poseidon/terraform-render-bootstrap.git?ref=539b725093c8cd94ba46603adb25ac5280562ec8"
|
source = "git::https://github.com/poseidon/terraform-render-bootstrap.git?ref=0fcc067476fa1463d057fd43760df222b7262b27"
|
||||||
|
|
||||||
cluster_name = var.cluster_name
|
cluster_name = var.cluster_name
|
||||||
api_servers = [var.k8s_domain_name]
|
api_servers = [var.k8s_domain_name]
|
||||||
etcd_servers = var.controller_domains
|
etcd_servers = var.controllers.*.domain
|
||||||
asset_dir = var.asset_dir
|
asset_dir = var.asset_dir
|
||||||
networking = var.networking
|
networking = var.networking
|
||||||
network_mtu = var.network_mtu
|
network_mtu = var.network_mtu
|
@ -7,7 +7,7 @@ systemd:
|
|||||||
- name: 40-etcd-cluster.conf
|
- name: 40-etcd-cluster.conf
|
||||||
contents: |
|
contents: |
|
||||||
[Service]
|
[Service]
|
||||||
Environment="ETCD_IMAGE_TAG=v3.4.0"
|
Environment="ETCD_IMAGE_TAG=v3.4.2"
|
||||||
Environment="ETCD_NAME=${etcd_name}"
|
Environment="ETCD_NAME=${etcd_name}"
|
||||||
Environment="ETCD_ADVERTISE_CLIENT_URLS=https://${domain_name}:2379"
|
Environment="ETCD_ADVERTISE_CLIENT_URLS=https://${domain_name}:2379"
|
||||||
Environment="ETCD_INITIAL_ADVERTISE_PEER_URLS=https://${domain_name}:2380"
|
Environment="ETCD_INITIAL_ADVERTISE_PEER_URLS=https://${domain_name}:2380"
|
||||||
@ -126,7 +126,7 @@ systemd:
|
|||||||
--volume script,kind=host,source=/opt/bootstrap/apply \
|
--volume script,kind=host,source=/opt/bootstrap/apply \
|
||||||
--mount volume=script,target=/apply \
|
--mount volume=script,target=/apply \
|
||||||
--insecure-options=image \
|
--insecure-options=image \
|
||||||
docker://k8s.gcr.io/hyperkube:v1.16.0 \
|
docker://k8s.gcr.io/hyperkube:v1.16.2 \
|
||||||
--net=host \
|
--net=host \
|
||||||
--dns=host \
|
--dns=host \
|
||||||
--exec=/apply
|
--exec=/apply
|
||||||
@ -141,7 +141,7 @@ storage:
|
|||||||
contents:
|
contents:
|
||||||
inline: |
|
inline: |
|
||||||
KUBELET_IMAGE_URL=docker://k8s.gcr.io/hyperkube
|
KUBELET_IMAGE_URL=docker://k8s.gcr.io/hyperkube
|
||||||
KUBELET_IMAGE_TAG=v1.16.0
|
KUBELET_IMAGE_TAG=v1.16.2
|
||||||
- path: /etc/hostname
|
- path: /etc/hostname
|
||||||
filesystem: root
|
filesystem: root
|
||||||
mode: 0644
|
mode: 0644
|
||||||
|
@ -35,7 +35,6 @@ storage:
|
|||||||
-d ${install_disk} \
|
-d ${install_disk} \
|
||||||
-C ${os_channel} \
|
-C ${os_channel} \
|
||||||
-V ${os_version} \
|
-V ${os_version} \
|
||||||
-o "${container_linux_oem}" \
|
|
||||||
${baseurl_flag} \
|
${baseurl_flag} \
|
||||||
-i ignition.json
|
-i ignition.json
|
||||||
udevadm settle
|
udevadm settle
|
||||||
|
@ -91,7 +91,7 @@ storage:
|
|||||||
contents:
|
contents:
|
||||||
inline: |
|
inline: |
|
||||||
KUBELET_IMAGE_URL=docker://k8s.gcr.io/hyperkube
|
KUBELET_IMAGE_URL=docker://k8s.gcr.io/hyperkube
|
||||||
KUBELET_IMAGE_TAG=v1.16.0
|
KUBELET_IMAGE_TAG=v1.16.2
|
||||||
- path: /etc/hostname
|
- path: /etc/hostname
|
||||||
filesystem: root
|
filesystem: root
|
||||||
mode: 0644
|
mode: 0644
|
||||||
|
@ -1,34 +1,34 @@
|
|||||||
resource "matchbox_group" "install" {
|
resource "matchbox_group" "install" {
|
||||||
count = length(var.controller_names) + length(var.worker_names)
|
count = length(var.controllers) + length(var.workers)
|
||||||
|
|
||||||
name = format("install-%s", element(concat(var.controller_names, var.worker_names), count.index))
|
name = format("install-%s", concat(var.controllers.*.name, var.workers.*.name)[count.index])
|
||||||
|
|
||||||
# pick one of 4 Matchbox profiles (Container Linux or Flatcar, cached or non-cached)
|
# pick one of 4 Matchbox profiles (Container Linux or Flatcar, cached or non-cached)
|
||||||
profile = local.flavor == "flatcar" ? var.cached_install == "true" ? element(matchbox_profile.cached-flatcar-linux-install.*.name, count.index) : element(matchbox_profile.flatcar-install.*.name, count.index) : var.cached_install == "true" ? element(matchbox_profile.cached-container-linux-install.*.name, count.index) : element(matchbox_profile.container-linux-install.*.name, count.index)
|
profile = local.flavor == "flatcar" ? var.cached_install ? matchbox_profile.cached-flatcar-linux-install.*.name[count.index] : matchbox_profile.flatcar-install.*.name[count.index] : var.cached_install ? matchbox_profile.cached-container-linux-install.*.name[count.index] : matchbox_profile.container-linux-install.*.name[count.index]
|
||||||
|
|
||||||
selector = {
|
selector = {
|
||||||
mac = element(concat(var.controller_macs, var.worker_macs), count.index)
|
mac = concat(var.controllers.*.mac, var.workers.*.mac)[count.index]
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "matchbox_group" "controller" {
|
resource "matchbox_group" "controller" {
|
||||||
count = length(var.controller_names)
|
count = length(var.controllers)
|
||||||
name = format("%s-%s", var.cluster_name, element(var.controller_names, count.index))
|
name = format("%s-%s", var.cluster_name, var.controllers[count.index].name)
|
||||||
profile = element(matchbox_profile.controllers.*.name, count.index)
|
profile = matchbox_profile.controllers.*.name[count.index]
|
||||||
|
|
||||||
selector = {
|
selector = {
|
||||||
mac = element(var.controller_macs, count.index)
|
mac = var.controllers[count.index].mac
|
||||||
os = "installed"
|
os = "installed"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "matchbox_group" "worker" {
|
resource "matchbox_group" "worker" {
|
||||||
count = length(var.worker_names)
|
count = length(var.workers)
|
||||||
name = format("%s-%s", var.cluster_name, element(var.worker_names, count.index))
|
name = format("%s-%s", var.cluster_name, var.workers[count.index].name)
|
||||||
profile = element(matchbox_profile.workers.*.name, count.index)
|
profile = matchbox_profile.workers.*.name[count.index]
|
||||||
|
|
||||||
selector = {
|
selector = {
|
||||||
mac = element(var.worker_macs, count.index)
|
mac = var.workers[count.index].mac
|
||||||
os = "installed"
|
os = "installed"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1,15 +1,14 @@
|
|||||||
locals {
|
locals {
|
||||||
# coreos-stable -> coreos flavor, stable channel
|
# coreos-stable -> coreos flavor, stable channel
|
||||||
# flatcar-stable -> flatcar flavor, stable channel
|
# flatcar-stable -> flatcar flavor, stable channel
|
||||||
flavor = element(split("-", var.os_channel), 0)
|
flavor = split("-", var.os_channel)[0]
|
||||||
|
channel = split("-", var.os_channel)[1]
|
||||||
channel = element(split("-", var.os_channel), 1)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Container Linux Install profile (from release.core-os.net)
|
// Container Linux Install profile (from release.core-os.net)
|
||||||
resource "matchbox_profile" "container-linux-install" {
|
resource "matchbox_profile" "container-linux-install" {
|
||||||
count = length(var.controller_names) + length(var.worker_names)
|
count = length(var.controllers) + length(var.workers)
|
||||||
name = format("%s-container-linux-install-%s", var.cluster_name, element(concat(var.controller_names, var.worker_names), count.index))
|
name = format("%s-container-linux-install-%s", var.cluster_name, concat(var.controllers.*.name, var.workers.*.name)[count.index])
|
||||||
|
|
||||||
kernel = "${var.download_protocol}://${local.channel}.release.core-os.net/amd64-usr/${var.os_version}/coreos_production_pxe.vmlinuz"
|
kernel = "${var.download_protocol}://${local.channel}.release.core-os.net/amd64-usr/${var.os_version}/coreos_production_pxe.vmlinuz"
|
||||||
|
|
||||||
@ -26,11 +25,11 @@ resource "matchbox_profile" "container-linux-install" {
|
|||||||
var.kernel_args,
|
var.kernel_args,
|
||||||
])
|
])
|
||||||
|
|
||||||
container_linux_config = element(data.template_file.container-linux-install-configs.*.rendered, count.index)
|
container_linux_config = data.template_file.container-linux-install-configs.*.rendered[count.index]
|
||||||
}
|
}
|
||||||
|
|
||||||
data "template_file" "container-linux-install-configs" {
|
data "template_file" "container-linux-install-configs" {
|
||||||
count = length(var.controller_names) + length(var.worker_names)
|
count = length(var.controllers) + length(var.workers)
|
||||||
|
|
||||||
template = file("${path.module}/cl/install.yaml.tmpl")
|
template = file("${path.module}/cl/install.yaml.tmpl")
|
||||||
|
|
||||||
@ -40,7 +39,6 @@ data "template_file" "container-linux-install-configs" {
|
|||||||
os_version = var.os_version
|
os_version = var.os_version
|
||||||
ignition_endpoint = format("%s/ignition", var.matchbox_http_endpoint)
|
ignition_endpoint = format("%s/ignition", var.matchbox_http_endpoint)
|
||||||
install_disk = var.install_disk
|
install_disk = var.install_disk
|
||||||
container_linux_oem = var.container_linux_oem
|
|
||||||
ssh_authorized_key = var.ssh_authorized_key
|
ssh_authorized_key = var.ssh_authorized_key
|
||||||
# only cached-container-linux profile adds -b baseurl
|
# only cached-container-linux profile adds -b baseurl
|
||||||
baseurl_flag = ""
|
baseurl_flag = ""
|
||||||
@ -50,8 +48,8 @@ data "template_file" "container-linux-install-configs" {
|
|||||||
// Container Linux Install profile (from matchbox /assets cache)
|
// Container Linux Install profile (from matchbox /assets cache)
|
||||||
// Note: Admin must have downloaded os_version into matchbox assets/coreos.
|
// Note: Admin must have downloaded os_version into matchbox assets/coreos.
|
||||||
resource "matchbox_profile" "cached-container-linux-install" {
|
resource "matchbox_profile" "cached-container-linux-install" {
|
||||||
count = length(var.controller_names) + length(var.worker_names)
|
count = length(var.controllers) + length(var.workers)
|
||||||
name = format("%s-cached-container-linux-install-%s", var.cluster_name, element(concat(var.controller_names, var.worker_names), count.index))
|
name = format("%s-cached-container-linux-install-%s", var.cluster_name, concat(var.controllers.*.name, var.workers.*.name)[count.index])
|
||||||
|
|
||||||
kernel = "/assets/coreos/${var.os_version}/coreos_production_pxe.vmlinuz"
|
kernel = "/assets/coreos/${var.os_version}/coreos_production_pxe.vmlinuz"
|
||||||
|
|
||||||
@ -68,11 +66,11 @@ resource "matchbox_profile" "cached-container-linux-install" {
|
|||||||
var.kernel_args,
|
var.kernel_args,
|
||||||
])
|
])
|
||||||
|
|
||||||
container_linux_config = element(data.template_file.cached-container-linux-install-configs.*.rendered, count.index)
|
container_linux_config = data.template_file.cached-container-linux-install-configs.*.rendered[count.index]
|
||||||
}
|
}
|
||||||
|
|
||||||
data "template_file" "cached-container-linux-install-configs" {
|
data "template_file" "cached-container-linux-install-configs" {
|
||||||
count = length(var.controller_names) + length(var.worker_names)
|
count = length(var.controllers) + length(var.workers)
|
||||||
|
|
||||||
template = file("${path.module}/cl/install.yaml.tmpl")
|
template = file("${path.module}/cl/install.yaml.tmpl")
|
||||||
|
|
||||||
@ -82,7 +80,6 @@ data "template_file" "cached-container-linux-install-configs" {
|
|||||||
os_version = var.os_version
|
os_version = var.os_version
|
||||||
ignition_endpoint = format("%s/ignition", var.matchbox_http_endpoint)
|
ignition_endpoint = format("%s/ignition", var.matchbox_http_endpoint)
|
||||||
install_disk = var.install_disk
|
install_disk = var.install_disk
|
||||||
container_linux_oem = var.container_linux_oem
|
|
||||||
ssh_authorized_key = var.ssh_authorized_key
|
ssh_authorized_key = var.ssh_authorized_key
|
||||||
# profile uses -b baseurl to install from matchbox cache
|
# profile uses -b baseurl to install from matchbox cache
|
||||||
baseurl_flag = "-b ${var.matchbox_http_endpoint}/assets/${local.flavor}"
|
baseurl_flag = "-b ${var.matchbox_http_endpoint}/assets/${local.flavor}"
|
||||||
@ -91,8 +88,8 @@ data "template_file" "cached-container-linux-install-configs" {
|
|||||||
|
|
||||||
// Flatcar Linux install profile (from release.flatcar-linux.net)
|
// Flatcar Linux install profile (from release.flatcar-linux.net)
|
||||||
resource "matchbox_profile" "flatcar-install" {
|
resource "matchbox_profile" "flatcar-install" {
|
||||||
count = length(var.controller_names) + length(var.worker_names)
|
count = length(var.controllers) + length(var.workers)
|
||||||
name = format("%s-flatcar-install-%s", var.cluster_name, element(concat(var.controller_names, var.worker_names), count.index))
|
name = format("%s-flatcar-install-%s", var.cluster_name, concat(var.controllers.*.name, var.workers.*.name)[count.index])
|
||||||
|
|
||||||
kernel = "${var.download_protocol}://${local.channel}.release.flatcar-linux.net/amd64-usr/${var.os_version}/flatcar_production_pxe.vmlinuz"
|
kernel = "${var.download_protocol}://${local.channel}.release.flatcar-linux.net/amd64-usr/${var.os_version}/flatcar_production_pxe.vmlinuz"
|
||||||
|
|
||||||
@ -109,14 +106,14 @@ resource "matchbox_profile" "flatcar-install" {
|
|||||||
var.kernel_args,
|
var.kernel_args,
|
||||||
])
|
])
|
||||||
|
|
||||||
container_linux_config = element(data.template_file.container-linux-install-configs.*.rendered, count.index)
|
container_linux_config = data.template_file.container-linux-install-configs.*.rendered[count.index]
|
||||||
}
|
}
|
||||||
|
|
||||||
// Flatcar Linux Install profile (from matchbox /assets cache)
|
// Flatcar Linux Install profile (from matchbox /assets cache)
|
||||||
// Note: Admin must have downloaded os_version into matchbox assets/flatcar.
|
// Note: Admin must have downloaded os_version into matchbox assets/flatcar.
|
||||||
resource "matchbox_profile" "cached-flatcar-linux-install" {
|
resource "matchbox_profile" "cached-flatcar-linux-install" {
|
||||||
count = length(var.controller_names) + length(var.worker_names)
|
count = length(var.controllers) + length(var.workers)
|
||||||
name = format("%s-cached-flatcar-linux-install-%s", var.cluster_name, element(concat(var.controller_names, var.worker_names), count.index))
|
name = format("%s-cached-flatcar-linux-install-%s", var.cluster_name, concat(var.controllers.*.name, var.workers.*.name)[count.index])
|
||||||
|
|
||||||
kernel = "/assets/flatcar/${var.os_version}/flatcar_production_pxe.vmlinuz"
|
kernel = "/assets/flatcar/${var.os_version}/flatcar_production_pxe.vmlinuz"
|
||||||
|
|
||||||
@ -133,32 +130,32 @@ resource "matchbox_profile" "cached-flatcar-linux-install" {
|
|||||||
var.kernel_args,
|
var.kernel_args,
|
||||||
])
|
])
|
||||||
|
|
||||||
container_linux_config = element(data.template_file.cached-container-linux-install-configs.*.rendered, count.index)
|
container_linux_config = data.template_file.cached-container-linux-install-configs.*.rendered[count.index]
|
||||||
}
|
}
|
||||||
|
|
||||||
// Kubernetes Controller profiles
|
// Kubernetes Controller profiles
|
||||||
resource "matchbox_profile" "controllers" {
|
resource "matchbox_profile" "controllers" {
|
||||||
count = length(var.controller_names)
|
count = length(var.controllers)
|
||||||
name = format("%s-controller-%s", var.cluster_name, element(var.controller_names, count.index))
|
name = format("%s-controller-%s", var.cluster_name, var.controllers.*.name[count.index])
|
||||||
raw_ignition = element(data.ct_config.controller-ignitions.*.rendered, count.index)
|
raw_ignition = data.ct_config.controller-ignitions.*.rendered[count.index]
|
||||||
}
|
}
|
||||||
|
|
||||||
data "ct_config" "controller-ignitions" {
|
data "ct_config" "controller-ignitions" {
|
||||||
count = length(var.controller_names)
|
count = length(var.controllers)
|
||||||
content = element(data.template_file.controller-configs.*.rendered, count.index)
|
content = data.template_file.controller-configs.*.rendered[count.index]
|
||||||
pretty_print = false
|
pretty_print = false
|
||||||
snippets = local.clc_map[element(var.controller_names, count.index)]
|
snippets = local.clc_map[var.controllers.*.name[count.index]]
|
||||||
}
|
}
|
||||||
|
|
||||||
data "template_file" "controller-configs" {
|
data "template_file" "controller-configs" {
|
||||||
count = length(var.controller_names)
|
count = length(var.controllers)
|
||||||
|
|
||||||
template = file("${path.module}/cl/controller.yaml.tmpl")
|
template = file("${path.module}/cl/controller.yaml.tmpl")
|
||||||
|
|
||||||
vars = {
|
vars = {
|
||||||
domain_name = element(var.controller_domains, count.index)
|
domain_name = var.controllers.*.domain[count.index]
|
||||||
etcd_name = element(var.controller_names, count.index)
|
etcd_name = var.controllers.*.name[count.index]
|
||||||
etcd_initial_cluster = join(",", formatlist("%s=https://%s:2380", var.controller_names, var.controller_domains))
|
etcd_initial_cluster = join(",", formatlist("%s=https://%s:2380", var.controllers.*.name, var.controllers.*.domain))
|
||||||
cgroup_driver = var.os_channel == "flatcar-edge" ? "systemd" : "cgroupfs"
|
cgroup_driver = var.os_channel == "flatcar-edge" ? "systemd" : "cgroupfs"
|
||||||
cluster_dns_service_ip = module.bootstrap.cluster_dns_service_ip
|
cluster_dns_service_ip = module.bootstrap.cluster_dns_service_ip
|
||||||
cluster_domain_suffix = var.cluster_domain_suffix
|
cluster_domain_suffix = var.cluster_domain_suffix
|
||||||
@ -168,25 +165,25 @@ data "template_file" "controller-configs" {
|
|||||||
|
|
||||||
// Kubernetes Worker profiles
|
// Kubernetes Worker profiles
|
||||||
resource "matchbox_profile" "workers" {
|
resource "matchbox_profile" "workers" {
|
||||||
count = length(var.worker_names)
|
count = length(var.workers)
|
||||||
name = format("%s-worker-%s", var.cluster_name, element(var.worker_names, count.index))
|
name = format("%s-worker-%s", var.cluster_name, var.workers.*.name[count.index])
|
||||||
raw_ignition = element(data.ct_config.worker-ignitions.*.rendered, count.index)
|
raw_ignition = data.ct_config.worker-ignitions.*.rendered[count.index]
|
||||||
}
|
}
|
||||||
|
|
||||||
data "ct_config" "worker-ignitions" {
|
data "ct_config" "worker-ignitions" {
|
||||||
count = length(var.worker_names)
|
count = length(var.workers)
|
||||||
content = element(data.template_file.worker-configs.*.rendered, count.index)
|
content = data.template_file.worker-configs.*.rendered[count.index]
|
||||||
pretty_print = false
|
pretty_print = false
|
||||||
snippets = local.clc_map[element(var.worker_names, count.index)]
|
snippets = local.clc_map[var.workers.*.name[count.index]]
|
||||||
}
|
}
|
||||||
|
|
||||||
data "template_file" "worker-configs" {
|
data "template_file" "worker-configs" {
|
||||||
count = length(var.worker_names)
|
count = length(var.workers)
|
||||||
|
|
||||||
template = file("${path.module}/cl/worker.yaml.tmpl")
|
template = file("${path.module}/cl/worker.yaml.tmpl")
|
||||||
|
|
||||||
vars = {
|
vars = {
|
||||||
domain_name = element(var.worker_domains, count.index)
|
domain_name = var.workers.*.domain[count.index]
|
||||||
cgroup_driver = var.os_channel == "flatcar-edge" ? "systemd" : "cgroupfs"
|
cgroup_driver = var.os_channel == "flatcar-edge" ? "systemd" : "cgroupfs"
|
||||||
cluster_dns_service_ip = module.bootstrap.cluster_dns_service_ip
|
cluster_dns_service_ip = module.bootstrap.cluster_dns_service_ip
|
||||||
cluster_domain_suffix = var.cluster_domain_suffix
|
cluster_domain_suffix = var.cluster_domain_suffix
|
||||||
@ -200,7 +197,7 @@ locals {
|
|||||||
# Default Container Linux config snippets map every node names to list("\n") so
|
# Default Container Linux config snippets map every node names to list("\n") so
|
||||||
# all lookups succeed
|
# all lookups succeed
|
||||||
clc_defaults = zipmap(
|
clc_defaults = zipmap(
|
||||||
concat(var.controller_names, var.worker_names),
|
concat(var.controllers.*.name, var.workers.*.name),
|
||||||
chunklist(data.template_file.clc-default-snippets.*.rendered, 1),
|
chunklist(data.template_file.clc-default-snippets.*.rendered, 1),
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -210,7 +207,7 @@ locals {
|
|||||||
|
|
||||||
// Horrible hack to generate a Terraform list of node count length
|
// Horrible hack to generate a Terraform list of node count length
|
||||||
data "template_file" "clc-default-snippets" {
|
data "template_file" "clc-default-snippets" {
|
||||||
count = length(var.controller_names) + length(var.worker_names)
|
count = length(var.controllers) + length(var.workers)
|
||||||
template = "\n"
|
template = "\n"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
# Secure copy assets to controllers. Activates kubelet.service
|
# Secure copy assets to controllers. Activates kubelet.service
|
||||||
resource "null_resource" "copy-controller-secrets" {
|
resource "null_resource" "copy-controller-secrets" {
|
||||||
count = length(var.controller_names)
|
count = length(var.controllers)
|
||||||
|
|
||||||
# Without depends_on, remote-exec could start and wait for machines before
|
# Without depends_on, remote-exec could start and wait for machines before
|
||||||
# matchbox groups are written, causing a deadlock.
|
# matchbox groups are written, causing a deadlock.
|
||||||
@ -13,7 +13,7 @@ resource "null_resource" "copy-controller-secrets" {
|
|||||||
|
|
||||||
connection {
|
connection {
|
||||||
type = "ssh"
|
type = "ssh"
|
||||||
host = var.controller_domains[count.index]
|
host = var.controllers.*.domain[count.index]
|
||||||
user = "core"
|
user = "core"
|
||||||
timeout = "60m"
|
timeout = "60m"
|
||||||
}
|
}
|
||||||
@ -76,7 +76,7 @@ resource "null_resource" "copy-controller-secrets" {
|
|||||||
"sudo chown -R etcd:etcd /etc/ssl/etcd",
|
"sudo chown -R etcd:etcd /etc/ssl/etcd",
|
||||||
"sudo chmod -R 500 /etc/ssl/etcd",
|
"sudo chmod -R 500 /etc/ssl/etcd",
|
||||||
"sudo mv $HOME/assets /opt/bootstrap/assets",
|
"sudo mv $HOME/assets /opt/bootstrap/assets",
|
||||||
"sudo mkdir -p /etc/kubernetes/manifests"
|
"sudo mkdir -p /etc/kubernetes/manifests",
|
||||||
"sudo mkdir -p /etc/kubernetes/bootstrap-secrets",
|
"sudo mkdir -p /etc/kubernetes/bootstrap-secrets",
|
||||||
"sudo mv $HOME/kubeconfig /etc/kubernetes/kubeconfig",
|
"sudo mv $HOME/kubeconfig /etc/kubernetes/kubeconfig",
|
||||||
"sudo cp -r /opt/bootstrap/assets/tls/* /etc/kubernetes/bootstrap-secrets/",
|
"sudo cp -r /opt/bootstrap/assets/tls/* /etc/kubernetes/bootstrap-secrets/",
|
||||||
@ -88,7 +88,7 @@ resource "null_resource" "copy-controller-secrets" {
|
|||||||
|
|
||||||
# Secure copy kubeconfig to all workers. Activates kubelet.service
|
# Secure copy kubeconfig to all workers. Activates kubelet.service
|
||||||
resource "null_resource" "copy-worker-secrets" {
|
resource "null_resource" "copy-worker-secrets" {
|
||||||
count = length(var.worker_names)
|
count = length(var.workers)
|
||||||
|
|
||||||
# Without depends_on, remote-exec could start and wait for machines before
|
# Without depends_on, remote-exec could start and wait for machines before
|
||||||
# matchbox groups are written, causing a deadlock.
|
# matchbox groups are written, causing a deadlock.
|
||||||
@ -100,7 +100,7 @@ resource "null_resource" "copy-worker-secrets" {
|
|||||||
|
|
||||||
connection {
|
connection {
|
||||||
type = "ssh"
|
type = "ssh"
|
||||||
host = var.worker_domains[count.index]
|
host = var.workers.*.domain[count.index]
|
||||||
user = "core"
|
user = "core"
|
||||||
timeout = "60m"
|
timeout = "60m"
|
||||||
}
|
}
|
||||||
@ -129,7 +129,7 @@ resource "null_resource" "bootstrap" {
|
|||||||
|
|
||||||
connection {
|
connection {
|
||||||
type = "ssh"
|
type = "ssh"
|
||||||
host = var.controller_domains[0]
|
host = var.controllers[0].domain
|
||||||
user = "core"
|
user = "core"
|
||||||
timeout = "15m"
|
timeout = "15m"
|
||||||
}
|
}
|
||||||
|
@ -21,36 +21,32 @@ variable "os_version" {
|
|||||||
}
|
}
|
||||||
|
|
||||||
# machines
|
# machines
|
||||||
# Terraform's crude "type system" does not properly support lists of maps so we do this.
|
|
||||||
|
|
||||||
variable "controller_names" {
|
variable "controllers" {
|
||||||
type = list(string)
|
type = list(object({
|
||||||
description = "Ordered list of controller names (e.g. [node1])"
|
name = string
|
||||||
|
mac = string
|
||||||
|
domain = string
|
||||||
|
}))
|
||||||
|
description = <<EOD
|
||||||
|
List of controller machine details (unique name, identifying MAC address, FQDN)
|
||||||
|
[{ name = "node1", mac = "52:54:00:a1:9c:ae", domain = "node1.example.com"}]
|
||||||
|
EOD
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "controller_macs" {
|
variable "workers" {
|
||||||
type = list(string)
|
type = list(object({
|
||||||
description = "Ordered list of controller identifying MAC addresses (e.g. [52:54:00:a1:9c:ae])"
|
name = string
|
||||||
}
|
mac = string
|
||||||
|
domain = string
|
||||||
variable "controller_domains" {
|
}))
|
||||||
type = list(string)
|
description = <<EOD
|
||||||
description = "Ordered list of controller FQDNs (e.g. [node1.example.com])"
|
List of worker machine details (unique name, identifying MAC address, FQDN)
|
||||||
}
|
[
|
||||||
|
{ name = "node2", mac = "52:54:00:b2:2f:86", domain = "node2.example.com"},
|
||||||
variable "worker_names" {
|
{ name = "node3", mac = "52:54:00:c3:61:77", domain = "node3.example.com"}
|
||||||
type = list(string)
|
]
|
||||||
description = "Ordered list of worker names (e.g. [node2, node3])"
|
EOD
|
||||||
}
|
|
||||||
|
|
||||||
variable "worker_macs" {
|
|
||||||
type = list(string)
|
|
||||||
description = "Ordered list of worker identifying MAC addresses (e.g. [52:54:00:b2:2f:86, 52:54:00:c3:61:77])"
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "worker_domains" {
|
|
||||||
type = list(string)
|
|
||||||
description = "Ordered list of worker FQDNs (e.g. [node2.example.com, node3.example.com])"
|
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "clc_snippets" {
|
variable "clc_snippets" {
|
||||||
@ -62,8 +58,8 @@ variable "clc_snippets" {
|
|||||||
# configuration
|
# configuration
|
||||||
|
|
||||||
variable "k8s_domain_name" {
|
variable "k8s_domain_name" {
|
||||||
description = "Controller DNS name which resolves to a controller instance. Workers and kubeconfig's will communicate with this endpoint (e.g. cluster.example.com)"
|
|
||||||
type = string
|
type = string
|
||||||
|
description = "Controller DNS name which resolves to a controller instance. Workers and kubeconfig's will communicate with this endpoint (e.g. cluster.example.com)"
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "ssh_authorized_key" {
|
variable "ssh_authorized_key" {
|
||||||
@ -72,63 +68,55 @@ variable "ssh_authorized_key" {
|
|||||||
}
|
}
|
||||||
|
|
||||||
variable "asset_dir" {
|
variable "asset_dir" {
|
||||||
description = "Path to a directory where generated assets should be placed (contains secrets)"
|
|
||||||
type = string
|
type = string
|
||||||
|
description = "Absolute path to a directory where generated assets should be placed (contains secrets)"
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "networking" {
|
variable "networking" {
|
||||||
description = "Choice of networking provider (flannel or calico)"
|
|
||||||
type = string
|
type = string
|
||||||
|
description = "Choice of networking provider (flannel or calico)"
|
||||||
default = "calico"
|
default = "calico"
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "network_mtu" {
|
variable "network_mtu" {
|
||||||
|
type = number
|
||||||
description = "CNI interface MTU (applies to calico only)"
|
description = "CNI interface MTU (applies to calico only)"
|
||||||
type = string
|
default = 1480
|
||||||
default = "1480"
|
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "network_ip_autodetection_method" {
|
variable "network_ip_autodetection_method" {
|
||||||
description = "Method to autodetect the host IPv4 address (applies to calico only)"
|
|
||||||
type = string
|
type = string
|
||||||
|
description = "Method to autodetect the host IPv4 address (applies to calico only)"
|
||||||
default = "first-found"
|
default = "first-found"
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "pod_cidr" {
|
variable "pod_cidr" {
|
||||||
description = "CIDR IPv4 range to assign Kubernetes pods"
|
|
||||||
type = string
|
type = string
|
||||||
|
description = "CIDR IPv4 range to assign Kubernetes pods"
|
||||||
default = "10.2.0.0/16"
|
default = "10.2.0.0/16"
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "service_cidr" {
|
variable "service_cidr" {
|
||||||
|
type = string
|
||||||
description = <<EOD
|
description = <<EOD
|
||||||
CIDR IPv4 range to assign Kubernetes services.
|
CIDR IPv4 range to assign Kubernetes services.
|
||||||
The 1st IP will be reserved for kube_apiserver, the 10th IP will be reserved for coredns.
|
The 1st IP will be reserved for kube_apiserver, the 10th IP will be reserved for coredns.
|
||||||
EOD
|
EOD
|
||||||
|
|
||||||
|
|
||||||
type = string
|
|
||||||
default = "10.3.0.0/16"
|
default = "10.3.0.0/16"
|
||||||
}
|
}
|
||||||
|
|
||||||
# optional
|
# optional
|
||||||
|
|
||||||
variable "cluster_domain_suffix" {
|
|
||||||
description = "Queries for domains with the suffix will be answered by coredns. Default is cluster.local (e.g. foo.default.svc.cluster.local) "
|
|
||||||
type = string
|
|
||||||
default = "cluster.local"
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "download_protocol" {
|
variable "download_protocol" {
|
||||||
type = string
|
type = string
|
||||||
default = "https"
|
|
||||||
description = "Protocol iPXE should use to download the kernel and initrd. Defaults to https, which requires iPXE compiled with crypto support. Unused if cached_install is true."
|
description = "Protocol iPXE should use to download the kernel and initrd. Defaults to https, which requires iPXE compiled with crypto support. Unused if cached_install is true."
|
||||||
|
default = "https"
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "cached_install" {
|
variable "cached_install" {
|
||||||
type = string
|
type = bool
|
||||||
default = "false"
|
|
||||||
description = "Whether Container Linux should PXE boot and install from matchbox /assets cache. Note that the admin must have downloaded the os_version into matchbox assets."
|
description = "Whether Container Linux should PXE boot and install from matchbox /assets cache. Note that the admin must have downloaded the os_version into matchbox assets."
|
||||||
|
default = false
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "install_disk" {
|
variable "install_disk" {
|
||||||
@ -137,27 +125,29 @@ variable "install_disk" {
|
|||||||
description = "Disk device to which the install profiles should install Container Linux (e.g. /dev/sda)"
|
description = "Disk device to which the install profiles should install Container Linux (e.g. /dev/sda)"
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "container_linux_oem" {
|
|
||||||
type = string
|
|
||||||
default = ""
|
|
||||||
description = "DEPRECATED: Specify an OEM image id to use as base for the installation (e.g. ami, vmware_raw, xen) or leave blank for the default image"
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "kernel_args" {
|
variable "kernel_args" {
|
||||||
description = "Additional kernel arguments to provide at PXE boot."
|
|
||||||
type = list(string)
|
type = list(string)
|
||||||
|
description = "Additional kernel arguments to provide at PXE boot."
|
||||||
default = []
|
default = []
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "enable_reporting" {
|
variable "enable_reporting" {
|
||||||
type = string
|
type = bool
|
||||||
description = "Enable usage or analytics reporting to upstreams (Calico)"
|
description = "Enable usage or analytics reporting to upstreams (Calico)"
|
||||||
default = "false"
|
default = false
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "enable_aggregation" {
|
variable "enable_aggregation" {
|
||||||
|
type = bool
|
||||||
description = "Enable the Kubernetes Aggregation Layer (defaults to false)"
|
description = "Enable the Kubernetes Aggregation Layer (defaults to false)"
|
||||||
type = string
|
default = false
|
||||||
default = "false"
|
}
|
||||||
|
|
||||||
|
# unofficial, undocumented, unsupported
|
||||||
|
|
||||||
|
variable "cluster_domain_suffix" {
|
||||||
|
type = string
|
||||||
|
description = "Queries for domains with the suffix will be answered by coredns. Default is cluster.local (e.g. foo.default.svc.cluster.local) "
|
||||||
|
default = "cluster.local"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -11,7 +11,7 @@ Typhoon distributes upstream Kubernetes, architectural conventions, and cluster
|
|||||||
|
|
||||||
## Features <a href="https://www.cncf.io/certification/software-conformance/"><img align="right" src="https://storage.googleapis.com/poseidon/certified-kubernetes.png"></a>
|
## Features <a href="https://www.cncf.io/certification/software-conformance/"><img align="right" src="https://storage.googleapis.com/poseidon/certified-kubernetes.png"></a>
|
||||||
|
|
||||||
* Kubernetes v1.16.0 (upstream)
|
* Kubernetes v1.16.2 (upstream)
|
||||||
* Single or multi-master, [Calico](https://www.projectcalico.org/) or [flannel](https://github.com/coreos/flannel) networking
|
* Single or multi-master, [Calico](https://www.projectcalico.org/) or [flannel](https://github.com/coreos/flannel) networking
|
||||||
* On-cluster etcd with TLS, [RBAC](https://kubernetes.io/docs/admin/authorization/rbac/)-enabled, [network policy](https://kubernetes.io/docs/concepts/services-networking/network-policies/)
|
* On-cluster etcd with TLS, [RBAC](https://kubernetes.io/docs/admin/authorization/rbac/)-enabled, [network policy](https://kubernetes.io/docs/concepts/services-networking/network-policies/)
|
||||||
* Advanced features like [snippets](https://typhoon.psdn.io/advanced/customization/#container-linux) customization
|
* Advanced features like [snippets](https://typhoon.psdn.io/advanced/customization/#container-linux) customization
|
||||||
|
@ -1,10 +1,10 @@
|
|||||||
# Kubernetes assets (kubeconfig, manifests)
|
# Kubernetes assets (kubeconfig, manifests)
|
||||||
module "bootstrap" {
|
module "bootstrap" {
|
||||||
source = "git::https://github.com/poseidon/terraform-render-bootstrap.git?ref=539b725093c8cd94ba46603adb25ac5280562ec8"
|
source = "git::https://github.com/poseidon/terraform-render-bootstrap.git?ref=0fcc067476fa1463d057fd43760df222b7262b27"
|
||||||
|
|
||||||
cluster_name = var.cluster_name
|
cluster_name = var.cluster_name
|
||||||
api_servers = [var.k8s_domain_name]
|
api_servers = [var.k8s_domain_name]
|
||||||
etcd_servers = var.controller_domains
|
etcd_servers = var.controllers.*.domain
|
||||||
asset_dir = var.asset_dir
|
asset_dir = var.asset_dir
|
||||||
networking = var.networking
|
networking = var.networking
|
||||||
network_mtu = var.network_mtu
|
network_mtu = var.network_mtu
|
@ -28,7 +28,7 @@ systemd:
|
|||||||
--network host \
|
--network host \
|
||||||
--volume /var/lib/etcd:/var/lib/etcd:rw,Z \
|
--volume /var/lib/etcd:/var/lib/etcd:rw,Z \
|
||||||
--volume /etc/ssl/etcd:/etc/ssl/certs:ro,Z \
|
--volume /etc/ssl/etcd:/etc/ssl/certs:ro,Z \
|
||||||
quay.io/coreos/etcd:v3.4.0
|
quay.io/coreos/etcd:v3.4.2
|
||||||
ExecStop=/usr/bin/podman stop etcd
|
ExecStop=/usr/bin/podman stop etcd
|
||||||
[Install]
|
[Install]
|
||||||
WantedBy=multi-user.target
|
WantedBy=multi-user.target
|
||||||
@ -81,7 +81,7 @@ systemd:
|
|||||||
--volume /opt/cni/bin:/opt/cni/bin:z \
|
--volume /opt/cni/bin:/opt/cni/bin:z \
|
||||||
--volume /etc/iscsi:/etc/iscsi \
|
--volume /etc/iscsi:/etc/iscsi \
|
||||||
--volume /sbin/iscsiadm:/sbin/iscsiadm \
|
--volume /sbin/iscsiadm:/sbin/iscsiadm \
|
||||||
k8s.gcr.io/hyperkube:v1.16.0 /hyperkube kubelet \
|
k8s.gcr.io/hyperkube:v1.16.2 /hyperkube kubelet \
|
||||||
--anonymous-auth=false \
|
--anonymous-auth=false \
|
||||||
--authentication-token-webhook \
|
--authentication-token-webhook \
|
||||||
--authorization-mode=Webhook \
|
--authorization-mode=Webhook \
|
||||||
@ -132,7 +132,7 @@ systemd:
|
|||||||
--network host \
|
--network host \
|
||||||
--volume /opt/bootstrap/assets:/assets:ro,Z \
|
--volume /opt/bootstrap/assets:/assets:ro,Z \
|
||||||
--volume /opt/bootstrap/apply:/apply:ro,Z \
|
--volume /opt/bootstrap/apply:/apply:ro,Z \
|
||||||
k8s.gcr.io/hyperkube:v1.16.0 \
|
k8s.gcr.io/hyperkube:v1.16.2 \
|
||||||
/apply
|
/apply
|
||||||
ExecStartPost=/bin/touch /opt/bootstrap/bootstrap.done
|
ExecStartPost=/bin/touch /opt/bootstrap/bootstrap.done
|
||||||
ExecStartPost=-/usr/bin/podman stop bootstrap
|
ExecStartPost=-/usr/bin/podman stop bootstrap
|
||||||
|
@ -51,7 +51,7 @@ systemd:
|
|||||||
--volume /opt/cni/bin:/opt/cni/bin:z \
|
--volume /opt/cni/bin:/opt/cni/bin:z \
|
||||||
--volume /etc/iscsi:/etc/iscsi \
|
--volume /etc/iscsi:/etc/iscsi \
|
||||||
--volume /sbin/iscsiadm:/sbin/iscsiadm \
|
--volume /sbin/iscsiadm:/sbin/iscsiadm \
|
||||||
k8s.gcr.io/hyperkube:v1.16.0 /hyperkube kubelet \
|
k8s.gcr.io/hyperkube:v1.16.2 /hyperkube kubelet \
|
||||||
--anonymous-auth=false \
|
--anonymous-auth=false \
|
||||||
--authentication-token-webhook \
|
--authentication-token-webhook \
|
||||||
--authorization-mode=Webhook \
|
--authorization-mode=Webhook \
|
||||||
|
@ -1,22 +1,22 @@
|
|||||||
# Match each controller or worker to a profile
|
# Match each controller or worker to a profile
|
||||||
|
|
||||||
resource "matchbox_group" "controller" {
|
resource "matchbox_group" "controller" {
|
||||||
count = length(var.controller_names)
|
count = length(var.controllers)
|
||||||
name = format("%s-%s", var.cluster_name, var.controller_names[count.index])
|
name = format("%s-%s", var.cluster_name, var.controllers.*.name[count.index])
|
||||||
profile = matchbox_profile.controllers.*.name[count.index]
|
profile = matchbox_profile.controllers.*.name[count.index]
|
||||||
|
|
||||||
selector = {
|
selector = {
|
||||||
mac = var.controller_macs[count.index]
|
mac = var.controllers.*.mac[count.index]
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "matchbox_group" "worker" {
|
resource "matchbox_group" "worker" {
|
||||||
count = length(var.worker_names)
|
count = length(var.workers)
|
||||||
name = format("%s-%s", var.cluster_name, var.worker_names[count.index])
|
name = format("%s-%s", var.cluster_name, var.workers.*.name[count.index])
|
||||||
profile = matchbox_profile.workers.*.name[count.index]
|
profile = matchbox_profile.workers.*.name[count.index]
|
||||||
|
|
||||||
selector = {
|
selector = {
|
||||||
mac = var.worker_macs[count.index]
|
mac = var.workers.*.mac[count.index]
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1,36 +1,36 @@
|
|||||||
locals {
|
locals {
|
||||||
remote_kernel = "https://builds.coreos.fedoraproject.org/prod/streams/${var.os_stream}/builds/${var.os_version}/x86_64/fedora-coreos-${var.os_version}-installer-kernel"
|
remote_kernel = "https://builds.coreos.fedoraproject.org/prod/streams/${var.os_stream}/builds/${var.os_version}/x86_64/fedora-coreos-${var.os_version}-installer-kernel-x86_64"
|
||||||
remote_initrd = "https://builds.coreos.fedoraproject.org/prod/streams/${var.os_stream}/builds/${var.os_version}/x86_64/fedora-coreos-${var.os_version}-installer-initramfs.img"
|
remote_initrd = "https://builds.coreos.fedoraproject.org/prod/streams/${var.os_stream}/builds/${var.os_version}/x86_64/fedora-coreos-${var.os_version}-installer-initramfs.x86_64.img"
|
||||||
remote_args = [
|
remote_args = [
|
||||||
"ip=dhcp",
|
"ip=dhcp",
|
||||||
"rd.neednet=1",
|
"rd.neednet=1",
|
||||||
"coreos.inst=yes",
|
"coreos.inst=yes",
|
||||||
"coreos.inst.image_url=https://builds.coreos.fedoraproject.org/prod/streams/${var.os_stream}/builds/${var.os_version}/x86_64/fedora-coreos-${var.os_version}-metal.raw.xz",
|
"coreos.inst.image_url=https://builds.coreos.fedoraproject.org/prod/streams/${var.os_stream}/builds/${var.os_version}/x86_64/fedora-coreos-${var.os_version}-metal.x86_64.raw.xz",
|
||||||
"coreos.inst.ignition_url=${var.matchbox_http_endpoint}/ignition?uuid=$${uuid}&mac=$${mac:hexhyp}",
|
"coreos.inst.ignition_url=${var.matchbox_http_endpoint}/ignition?uuid=$${uuid}&mac=$${mac:hexhyp}",
|
||||||
"coreos.inst.install_dev=${var.install_disk}"
|
"coreos.inst.install_dev=${var.install_disk}"
|
||||||
]
|
]
|
||||||
|
|
||||||
cached_kernel = "/assets/fedora-coreos/fedora-coreos-${var.os_version}-installer-kernel"
|
cached_kernel = "/assets/fedora-coreos/fedora-coreos-${var.os_version}-installer-kernel-x86_64"
|
||||||
cached_initrd = "/assets/fedora-coreos/fedora-coreos-${var.os_version}-installer-initramfs.img"
|
cached_initrd = "/assets/fedora-coreos/fedora-coreos-${var.os_version}-installer-initramfs.x86_64.img"
|
||||||
cached_args = [
|
cached_args = [
|
||||||
"ip=dhcp",
|
"ip=dhcp",
|
||||||
"rd.neednet=1",
|
"rd.neednet=1",
|
||||||
"coreos.inst=yes",
|
"coreos.inst=yes",
|
||||||
"coreos.inst.image_url=${var.matchbox_http_endpoint}/assets/fedora-coreos/fedora-coreos-${var.os_version}-metal.raw.xz",
|
"coreos.inst.image_url=${var.matchbox_http_endpoint}/assets/fedora-coreos/fedora-coreos-${var.os_version}-metal.x86_64.raw.xz",
|
||||||
"coreos.inst.ignition_url=${var.matchbox_http_endpoint}/ignition?uuid=$${uuid}&mac=$${mac:hexhyp}",
|
"coreos.inst.ignition_url=${var.matchbox_http_endpoint}/ignition?uuid=$${uuid}&mac=$${mac:hexhyp}",
|
||||||
"coreos.inst.install_dev=${var.install_disk}"
|
"coreos.inst.install_dev=${var.install_disk}"
|
||||||
]
|
]
|
||||||
|
|
||||||
kernel = var.cached_install == "true" ? local.cached_kernel : local.remote_kernel
|
kernel = var.cached_install ? local.cached_kernel : local.remote_kernel
|
||||||
initrd = var.cached_install == "true" ? local.cached_initrd : local.remote_initrd
|
initrd = var.cached_install ? local.cached_initrd : local.remote_initrd
|
||||||
args = var.cached_install == "true" ? local.cached_args : local.remote_args
|
args = var.cached_install ? local.cached_args : local.remote_args
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
// Fedora CoreOS controller profile
|
// Fedora CoreOS controller profile
|
||||||
resource "matchbox_profile" "controllers" {
|
resource "matchbox_profile" "controllers" {
|
||||||
count = length(var.controller_names)
|
count = length(var.controllers)
|
||||||
name = format("%s-controller-%s", var.cluster_name, var.controller_names[count.index])
|
name = format("%s-controller-%s", var.cluster_name, var.controllers.*.name[count.index])
|
||||||
|
|
||||||
kernel = local.kernel
|
kernel = local.kernel
|
||||||
initrd = [
|
initrd = [
|
||||||
@ -42,20 +42,20 @@ resource "matchbox_profile" "controllers" {
|
|||||||
}
|
}
|
||||||
|
|
||||||
data "ct_config" "controller-ignitions" {
|
data "ct_config" "controller-ignitions" {
|
||||||
count = length(var.controller_names)
|
count = length(var.controllers)
|
||||||
|
|
||||||
content = data.template_file.controller-configs.*.rendered[count.index]
|
content = data.template_file.controller-configs.*.rendered[count.index]
|
||||||
strict = true
|
strict = true
|
||||||
}
|
}
|
||||||
|
|
||||||
data "template_file" "controller-configs" {
|
data "template_file" "controller-configs" {
|
||||||
count = length(var.controller_names)
|
count = length(var.controllers)
|
||||||
|
|
||||||
template = file("${path.module}/fcc/controller.yaml")
|
template = file("${path.module}/fcc/controller.yaml")
|
||||||
vars = {
|
vars = {
|
||||||
domain_name = var.controller_domains[count.index]
|
domain_name = var.controllers.*.domain[count.index]
|
||||||
etcd_name = var.controller_names[count.index]
|
etcd_name = var.controllers.*.name[count.index]
|
||||||
etcd_initial_cluster = join(",", formatlist("%s=https://%s:2380", var.controller_names, var.controller_domains))
|
etcd_initial_cluster = join(",", formatlist("%s=https://%s:2380", var.controllers.*.name, var.controllers.*.domain))
|
||||||
cluster_dns_service_ip = module.bootstrap.cluster_dns_service_ip
|
cluster_dns_service_ip = module.bootstrap.cluster_dns_service_ip
|
||||||
cluster_domain_suffix = var.cluster_domain_suffix
|
cluster_domain_suffix = var.cluster_domain_suffix
|
||||||
ssh_authorized_key = var.ssh_authorized_key
|
ssh_authorized_key = var.ssh_authorized_key
|
||||||
@ -64,8 +64,8 @@ data "template_file" "controller-configs" {
|
|||||||
|
|
||||||
// Fedora CoreOS worker profile
|
// Fedora CoreOS worker profile
|
||||||
resource "matchbox_profile" "workers" {
|
resource "matchbox_profile" "workers" {
|
||||||
count = length(var.worker_names)
|
count = length(var.workers)
|
||||||
name = format("%s-worker-%s", var.cluster_name, var.worker_names[count.index])
|
name = format("%s-worker-%s", var.cluster_name, var.workers.*.name[count.index])
|
||||||
|
|
||||||
kernel = local.kernel
|
kernel = local.kernel
|
||||||
initrd = [
|
initrd = [
|
||||||
@ -77,18 +77,18 @@ resource "matchbox_profile" "workers" {
|
|||||||
}
|
}
|
||||||
|
|
||||||
data "ct_config" "worker-ignitions" {
|
data "ct_config" "worker-ignitions" {
|
||||||
count = length(var.worker_names)
|
count = length(var.workers)
|
||||||
|
|
||||||
content = data.template_file.worker-configs.*.rendered[count.index]
|
content = data.template_file.worker-configs.*.rendered[count.index]
|
||||||
strict = true
|
strict = true
|
||||||
}
|
}
|
||||||
|
|
||||||
data "template_file" "worker-configs" {
|
data "template_file" "worker-configs" {
|
||||||
count = length(var.worker_names)
|
count = length(var.workers)
|
||||||
|
|
||||||
template = file("${path.module}/fcc/worker.yaml")
|
template = file("${path.module}/fcc/worker.yaml")
|
||||||
vars = {
|
vars = {
|
||||||
domain_name = var.worker_domains[count.index]
|
domain_name = var.workers.*.domain[count.index]
|
||||||
cluster_dns_service_ip = module.bootstrap.cluster_dns_service_ip
|
cluster_dns_service_ip = module.bootstrap.cluster_dns_service_ip
|
||||||
cluster_domain_suffix = var.cluster_domain_suffix
|
cluster_domain_suffix = var.cluster_domain_suffix
|
||||||
ssh_authorized_key = var.ssh_authorized_key
|
ssh_authorized_key = var.ssh_authorized_key
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
# Secure copy assets to controllers. Activates kubelet.service
|
# Secure copy assets to controllers. Activates kubelet.service
|
||||||
resource "null_resource" "copy-controller-secrets" {
|
resource "null_resource" "copy-controller-secrets" {
|
||||||
count = length(var.controller_names)
|
count = length(var.controllers)
|
||||||
|
|
||||||
# Without depends_on, remote-exec could start and wait for machines before
|
# Without depends_on, remote-exec could start and wait for machines before
|
||||||
# matchbox groups are written, causing a deadlock.
|
# matchbox groups are written, causing a deadlock.
|
||||||
@ -12,7 +12,7 @@ resource "null_resource" "copy-controller-secrets" {
|
|||||||
|
|
||||||
connection {
|
connection {
|
||||||
type = "ssh"
|
type = "ssh"
|
||||||
host = var.controller_domains[count.index]
|
host = var.controllers.*.domain[count.index]
|
||||||
user = "core"
|
user = "core"
|
||||||
timeout = "60m"
|
timeout = "60m"
|
||||||
}
|
}
|
||||||
@ -73,7 +73,7 @@ resource "null_resource" "copy-controller-secrets" {
|
|||||||
"sudo mv etcd-peer.crt /etc/ssl/etcd/etcd/peer.crt",
|
"sudo mv etcd-peer.crt /etc/ssl/etcd/etcd/peer.crt",
|
||||||
"sudo mv etcd-peer.key /etc/ssl/etcd/etcd/peer.key",
|
"sudo mv etcd-peer.key /etc/ssl/etcd/etcd/peer.key",
|
||||||
"sudo mv $HOME/assets /opt/bootstrap/assets",
|
"sudo mv $HOME/assets /opt/bootstrap/assets",
|
||||||
"sudo mkdir -p /etc/kubernetes/manifests"
|
"sudo mkdir -p /etc/kubernetes/manifests",
|
||||||
"sudo mkdir -p /etc/kubernetes/bootstrap-secrets",
|
"sudo mkdir -p /etc/kubernetes/bootstrap-secrets",
|
||||||
"sudo mv $HOME/kubeconfig /etc/kubernetes/kubeconfig",
|
"sudo mv $HOME/kubeconfig /etc/kubernetes/kubeconfig",
|
||||||
"sudo cp -r /opt/bootstrap/assets/tls/* /etc/kubernetes/bootstrap-secrets/",
|
"sudo cp -r /opt/bootstrap/assets/tls/* /etc/kubernetes/bootstrap-secrets/",
|
||||||
@ -85,7 +85,7 @@ resource "null_resource" "copy-controller-secrets" {
|
|||||||
|
|
||||||
# Secure copy kubeconfig to all workers. Activates kubelet.service
|
# Secure copy kubeconfig to all workers. Activates kubelet.service
|
||||||
resource "null_resource" "copy-worker-secrets" {
|
resource "null_resource" "copy-worker-secrets" {
|
||||||
count = length(var.worker_names)
|
count = length(var.workers)
|
||||||
|
|
||||||
# Without depends_on, remote-exec could start and wait for machines before
|
# Without depends_on, remote-exec could start and wait for machines before
|
||||||
# matchbox groups are written, causing a deadlock.
|
# matchbox groups are written, causing a deadlock.
|
||||||
@ -96,7 +96,7 @@ resource "null_resource" "copy-worker-secrets" {
|
|||||||
|
|
||||||
connection {
|
connection {
|
||||||
type = "ssh"
|
type = "ssh"
|
||||||
host = var.worker_domains[count.index]
|
host = var.workers.*.domain[count.index]
|
||||||
user = "core"
|
user = "core"
|
||||||
timeout = "60m"
|
timeout = "60m"
|
||||||
}
|
}
|
||||||
@ -125,7 +125,7 @@ resource "null_resource" "bootstrap" {
|
|||||||
|
|
||||||
connection {
|
connection {
|
||||||
type = "ssh"
|
type = "ssh"
|
||||||
host = var.controller_domains[0]
|
host = var.controllers[0].domain
|
||||||
user = "core"
|
user = "core"
|
||||||
timeout = "15m"
|
timeout = "15m"
|
||||||
}
|
}
|
||||||
|
@ -22,36 +22,32 @@ variable "os_version" {
|
|||||||
}
|
}
|
||||||
|
|
||||||
# machines
|
# machines
|
||||||
# Terraform's crude "type system" does not properly support lists of maps so we do this.
|
|
||||||
|
|
||||||
variable "controller_names" {
|
variable "controllers" {
|
||||||
type = list(string)
|
type = list(object({
|
||||||
description = "Ordered list of controller names (e.g. [node1])"
|
name = string
|
||||||
|
mac = string
|
||||||
|
domain = string
|
||||||
|
}))
|
||||||
|
description = <<EOD
|
||||||
|
List of controller machine details (unique name, identifying MAC address, FQDN)
|
||||||
|
[{ name = "node1", mac = "52:54:00:a1:9c:ae", domain = "node1.example.com"}]
|
||||||
|
EOD
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "controller_macs" {
|
variable "workers" {
|
||||||
type = list(string)
|
type = list(object({
|
||||||
description = "Ordered list of controller identifying MAC addresses (e.g. [52:54:00:a1:9c:ae])"
|
name = string
|
||||||
}
|
mac = string
|
||||||
|
domain = string
|
||||||
variable "controller_domains" {
|
}))
|
||||||
type = list(string)
|
description = <<EOD
|
||||||
description = "Ordered list of controller FQDNs (e.g. [node1.example.com])"
|
List of worker machine details (unique name, identifying MAC address, FQDN)
|
||||||
}
|
[
|
||||||
|
{ name = "node2", mac = "52:54:00:b2:2f:86", domain = "node2.example.com"},
|
||||||
variable "worker_names" {
|
{ name = "node3", mac = "52:54:00:c3:61:77", domain = "node3.example.com"}
|
||||||
type = list(string)
|
]
|
||||||
description = "Ordered list of worker names (e.g. [node2, node3])"
|
EOD
|
||||||
}
|
|
||||||
|
|
||||||
variable "worker_macs" {
|
|
||||||
type = list(string)
|
|
||||||
description = "Ordered list of worker identifying MAC addresses (e.g. [52:54:00:b2:2f:86, 52:54:00:c3:61:77])"
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "worker_domains" {
|
|
||||||
type = list(string)
|
|
||||||
description = "Ordered list of worker FQDNs (e.g. [node2.example.com, node3.example.com])"
|
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "snippets" {
|
variable "snippets" {
|
||||||
@ -63,8 +59,8 @@ variable "snippets" {
|
|||||||
# configuration
|
# configuration
|
||||||
|
|
||||||
variable "k8s_domain_name" {
|
variable "k8s_domain_name" {
|
||||||
description = "Controller DNS name which resolves to a controller instance. Workers and kubeconfig's will communicate with this endpoint (e.g. cluster.example.com)"
|
|
||||||
type = string
|
type = string
|
||||||
|
description = "Controller DNS name which resolves to a controller instance. Workers and kubeconfig's will communicate with this endpoint (e.g. cluster.example.com)"
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "ssh_authorized_key" {
|
variable "ssh_authorized_key" {
|
||||||
@ -73,80 +69,80 @@ variable "ssh_authorized_key" {
|
|||||||
}
|
}
|
||||||
|
|
||||||
variable "asset_dir" {
|
variable "asset_dir" {
|
||||||
description = "Path to a directory where generated assets should be placed (contains secrets)"
|
|
||||||
type = string
|
type = string
|
||||||
|
description = "Absolute path to a directory where generated assets should be placed (contains secrets)"
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "networking" {
|
variable "networking" {
|
||||||
description = "Choice of networking provider (flannel or calico)"
|
|
||||||
type = string
|
type = string
|
||||||
|
description = "Choice of networking provider (flannel or calico)"
|
||||||
default = "calico"
|
default = "calico"
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "network_mtu" {
|
variable "network_mtu" {
|
||||||
|
type = number
|
||||||
description = "CNI interface MTU (applies to calico only)"
|
description = "CNI interface MTU (applies to calico only)"
|
||||||
type = string
|
default = 1480
|
||||||
default = "1480"
|
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "network_ip_autodetection_method" {
|
variable "network_ip_autodetection_method" {
|
||||||
description = "Method to autodetect the host IPv4 address (applies to calico only)"
|
|
||||||
type = string
|
type = string
|
||||||
|
description = "Method to autodetect the host IPv4 address (applies to calico only)"
|
||||||
default = "first-found"
|
default = "first-found"
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "pod_cidr" {
|
variable "pod_cidr" {
|
||||||
description = "CIDR IPv4 range to assign Kubernetes pods"
|
|
||||||
type = string
|
type = string
|
||||||
|
description = "CIDR IPv4 range to assign Kubernetes pods"
|
||||||
default = "10.2.0.0/16"
|
default = "10.2.0.0/16"
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "service_cidr" {
|
variable "service_cidr" {
|
||||||
|
type = string
|
||||||
description = <<EOD
|
description = <<EOD
|
||||||
CIDR IPv4 range to assign Kubernetes services.
|
CIDR IPv4 range to assign Kubernetes services.
|
||||||
The 1st IP will be reserved for kube_apiserver, the 10th IP will be reserved for coredns.
|
The 1st IP will be reserved for kube_apiserver, the 10th IP will be reserved for coredns.
|
||||||
EOD
|
EOD
|
||||||
|
|
||||||
|
|
||||||
type = string
|
|
||||||
default = "10.3.0.0/16"
|
default = "10.3.0.0/16"
|
||||||
}
|
}
|
||||||
|
|
||||||
# optional
|
# optional
|
||||||
|
|
||||||
|
variable "cached_install" {
|
||||||
|
type = bool
|
||||||
|
description = "Whether Fedora CoreOS should PXE boot and install from matchbox /assets cache. Note that the admin must have downloaded the os_version into matchbox assets."
|
||||||
|
default = false
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "install_disk" {
|
||||||
|
type = string
|
||||||
|
description = "Disk device to install Fedora CoreOS (e.g. sda)"
|
||||||
|
default = "sda"
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "kernel_args" {
|
||||||
|
type = list(string)
|
||||||
|
description = "Additional kernel arguments to provide at PXE boot."
|
||||||
|
default = []
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "enable_reporting" {
|
||||||
|
type = bool
|
||||||
|
description = "Enable usage or analytics reporting to upstreams (Calico)"
|
||||||
|
default = false
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "enable_aggregation" {
|
||||||
|
type = bool
|
||||||
|
description = "Enable the Kubernetes Aggregation Layer (defaults to false)"
|
||||||
|
default = false
|
||||||
|
}
|
||||||
|
|
||||||
|
# unofficial, undocumented, unsupported
|
||||||
|
|
||||||
variable "cluster_domain_suffix" {
|
variable "cluster_domain_suffix" {
|
||||||
description = "Queries for domains with the suffix will be answered by coredns. Default is cluster.local (e.g. foo.default.svc.cluster.local) "
|
description = "Queries for domains with the suffix will be answered by coredns. Default is cluster.local (e.g. foo.default.svc.cluster.local) "
|
||||||
type = string
|
type = string
|
||||||
default = "cluster.local"
|
default = "cluster.local"
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "cached_install" {
|
|
||||||
type = string
|
|
||||||
default = "false"
|
|
||||||
description = "Whether Fedora CoreOS should PXE boot and install from matchbox /assets cache. Note that the admin must have downloaded the os_version into matchbox assets."
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "install_disk" {
|
|
||||||
type = string
|
|
||||||
default = "sda"
|
|
||||||
description = "Disk device to install Fedora CoreOS (e.g. sda)"
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "kernel_args" {
|
|
||||||
description = "Additional kernel arguments to provide at PXE boot."
|
|
||||||
type = list(string)
|
|
||||||
default = []
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "enable_reporting" {
|
|
||||||
type = string
|
|
||||||
description = "Enable usage or analytics reporting to upstreams (Calico)"
|
|
||||||
default = "false"
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "enable_aggregation" {
|
|
||||||
description = "Enable the Kubernetes Aggregation Layer (defaults to false)"
|
|
||||||
type = string
|
|
||||||
default = "false"
|
|
||||||
}
|
|
||||||
|
|
||||||
|
@ -11,7 +11,7 @@ Typhoon distributes upstream Kubernetes, architectural conventions, and cluster
|
|||||||
|
|
||||||
## Features <a href="https://www.cncf.io/certification/software-conformance/"><img align="right" src="https://storage.googleapis.com/poseidon/certified-kubernetes.png"></a>
|
## Features <a href="https://www.cncf.io/certification/software-conformance/"><img align="right" src="https://storage.googleapis.com/poseidon/certified-kubernetes.png"></a>
|
||||||
|
|
||||||
* Kubernetes v1.16.0 (upstream)
|
* Kubernetes v1.16.2 (upstream)
|
||||||
* Single or multi-master, [Calico](https://www.projectcalico.org/) or [flannel](https://github.com/coreos/flannel) networking
|
* Single or multi-master, [Calico](https://www.projectcalico.org/) or [flannel](https://github.com/coreos/flannel) networking
|
||||||
* On-cluster etcd with TLS, [RBAC](https://kubernetes.io/docs/admin/authorization/rbac/)-enabled, [network policy](https://kubernetes.io/docs/concepts/services-networking/network-policies/)
|
* On-cluster etcd with TLS, [RBAC](https://kubernetes.io/docs/admin/authorization/rbac/)-enabled, [network policy](https://kubernetes.io/docs/concepts/services-networking/network-policies/)
|
||||||
* Advanced features like [snippets](https://typhoon.psdn.io/advanced/customization/#container-linux) customization
|
* Advanced features like [snippets](https://typhoon.psdn.io/advanced/customization/#container-linux) customization
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
# Kubernetes assets (kubeconfig, manifests)
|
# Kubernetes assets (kubeconfig, manifests)
|
||||||
module "bootstrap" {
|
module "bootstrap" {
|
||||||
source = "git::https://github.com/poseidon/terraform-render-bootstrap.git?ref=539b725093c8cd94ba46603adb25ac5280562ec8"
|
source = "git::https://github.com/poseidon/terraform-render-bootstrap.git?ref=0fcc067476fa1463d057fd43760df222b7262b27"
|
||||||
|
|
||||||
cluster_name = var.cluster_name
|
cluster_name = var.cluster_name
|
||||||
api_servers = [format("%s.%s", var.cluster_name, var.dns_zone)]
|
api_servers = [format("%s.%s", var.cluster_name, var.dns_zone)]
|
@ -7,7 +7,7 @@ systemd:
|
|||||||
- name: 40-etcd-cluster.conf
|
- name: 40-etcd-cluster.conf
|
||||||
contents: |
|
contents: |
|
||||||
[Service]
|
[Service]
|
||||||
Environment="ETCD_IMAGE_TAG=v3.4.0"
|
Environment="ETCD_IMAGE_TAG=v3.4.2"
|
||||||
Environment="ETCD_NAME=${etcd_name}"
|
Environment="ETCD_NAME=${etcd_name}"
|
||||||
Environment="ETCD_ADVERTISE_CLIENT_URLS=https://${etcd_domain}:2379"
|
Environment="ETCD_ADVERTISE_CLIENT_URLS=https://${etcd_domain}:2379"
|
||||||
Environment="ETCD_INITIAL_ADVERTISE_PEER_URLS=https://${etcd_domain}:2380"
|
Environment="ETCD_INITIAL_ADVERTISE_PEER_URLS=https://${etcd_domain}:2380"
|
||||||
@ -123,7 +123,7 @@ systemd:
|
|||||||
--volume script,kind=host,source=/opt/bootstrap/apply \
|
--volume script,kind=host,source=/opt/bootstrap/apply \
|
||||||
--mount volume=script,target=/apply \
|
--mount volume=script,target=/apply \
|
||||||
--insecure-options=image \
|
--insecure-options=image \
|
||||||
docker://k8s.gcr.io/hyperkube:v1.16.0 \
|
docker://k8s.gcr.io/hyperkube:v1.16.2 \
|
||||||
--net=host \
|
--net=host \
|
||||||
--dns=host \
|
--dns=host \
|
||||||
--exec=/apply
|
--exec=/apply
|
||||||
@ -138,7 +138,7 @@ storage:
|
|||||||
contents:
|
contents:
|
||||||
inline: |
|
inline: |
|
||||||
KUBELET_IMAGE_URL=docker://k8s.gcr.io/hyperkube
|
KUBELET_IMAGE_URL=docker://k8s.gcr.io/hyperkube
|
||||||
KUBELET_IMAGE_TAG=v1.16.0
|
KUBELET_IMAGE_TAG=v1.16.2
|
||||||
- path: /opt/bootstrap/apply
|
- path: /opt/bootstrap/apply
|
||||||
filesystem: root
|
filesystem: root
|
||||||
mode: 0544
|
mode: 0544
|
||||||
|
@ -99,7 +99,7 @@ storage:
|
|||||||
contents:
|
contents:
|
||||||
inline: |
|
inline: |
|
||||||
KUBELET_IMAGE_URL=docker://k8s.gcr.io/hyperkube
|
KUBELET_IMAGE_URL=docker://k8s.gcr.io/hyperkube
|
||||||
KUBELET_IMAGE_TAG=v1.16.0
|
KUBELET_IMAGE_TAG=v1.16.2
|
||||||
- path: /etc/sysctl.d/max-user-watches.conf
|
- path: /etc/sysctl.d/max-user-watches.conf
|
||||||
filesystem: root
|
filesystem: root
|
||||||
contents:
|
contents:
|
||||||
@ -117,7 +117,7 @@ storage:
|
|||||||
--volume config,kind=host,source=/etc/kubernetes \
|
--volume config,kind=host,source=/etc/kubernetes \
|
||||||
--mount volume=config,target=/etc/kubernetes \
|
--mount volume=config,target=/etc/kubernetes \
|
||||||
--insecure-options=image \
|
--insecure-options=image \
|
||||||
docker://k8s.gcr.io/hyperkube:v1.16.0 \
|
docker://k8s.gcr.io/hyperkube:v1.16.2 \
|
||||||
--net=host \
|
--net=host \
|
||||||
--dns=host \
|
--dns=host \
|
||||||
--exec=/kubectl -- --kubeconfig=/etc/kubernetes/kubeconfig delete node $(hostname)
|
--exec=/kubectl -- --kubeconfig=/etc/kubernetes/kubeconfig delete node $(hostname)
|
||||||
|
@ -72,7 +72,7 @@ resource "null_resource" "copy-controller-secrets" {
|
|||||||
"sudo chown -R etcd:etcd /etc/ssl/etcd",
|
"sudo chown -R etcd:etcd /etc/ssl/etcd",
|
||||||
"sudo chmod -R 500 /etc/ssl/etcd",
|
"sudo chmod -R 500 /etc/ssl/etcd",
|
||||||
"sudo mv $HOME/assets /opt/bootstrap/assets",
|
"sudo mv $HOME/assets /opt/bootstrap/assets",
|
||||||
"sudo mkdir -p /etc/kubernetes/manifests"
|
"sudo mkdir -p /etc/kubernetes/manifests",
|
||||||
"sudo mkdir -p /etc/kubernetes/bootstrap-secrets",
|
"sudo mkdir -p /etc/kubernetes/bootstrap-secrets",
|
||||||
"sudo mv $HOME/kubeconfig /etc/kubernetes/kubeconfig",
|
"sudo mv $HOME/kubeconfig /etc/kubernetes/kubeconfig",
|
||||||
"sudo cp -r /opt/bootstrap/assets/tls/* /etc/kubernetes/bootstrap-secrets/",
|
"sudo cp -r /opt/bootstrap/assets/tls/* /etc/kubernetes/bootstrap-secrets/",
|
||||||
|
@ -18,33 +18,33 @@ variable "dns_zone" {
|
|||||||
# instances
|
# instances
|
||||||
|
|
||||||
variable "controller_count" {
|
variable "controller_count" {
|
||||||
type = string
|
type = number
|
||||||
default = "1"
|
|
||||||
description = "Number of controllers (i.e. masters)"
|
description = "Number of controllers (i.e. masters)"
|
||||||
|
default = 1
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "worker_count" {
|
variable "worker_count" {
|
||||||
type = string
|
type = number
|
||||||
default = "1"
|
|
||||||
description = "Number of workers"
|
description = "Number of workers"
|
||||||
|
default = 1
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "controller_type" {
|
variable "controller_type" {
|
||||||
type = string
|
type = string
|
||||||
default = "s-2vcpu-2gb"
|
|
||||||
description = "Droplet type for controllers (e.g. s-2vcpu-2gb, s-2vcpu-4gb, s-4vcpu-8gb)."
|
description = "Droplet type for controllers (e.g. s-2vcpu-2gb, s-2vcpu-4gb, s-4vcpu-8gb)."
|
||||||
|
default = "s-2vcpu-2gb"
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "worker_type" {
|
variable "worker_type" {
|
||||||
type = string
|
type = string
|
||||||
default = "s-1vcpu-2gb"
|
|
||||||
description = "Droplet type for workers (e.g. s-1vcpu-2gb, s-2vcpu-2gb)"
|
description = "Droplet type for workers (e.g. s-1vcpu-2gb, s-2vcpu-2gb)"
|
||||||
|
default = "s-1vcpu-2gb"
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "image" {
|
variable "image" {
|
||||||
type = string
|
type = string
|
||||||
default = "coreos-stable"
|
|
||||||
description = "Container Linux image for instances (e.g. coreos-stable)"
|
description = "Container Linux image for instances (e.g. coreos-stable)"
|
||||||
|
default = "coreos-stable"
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "controller_clc_snippets" {
|
variable "controller_clc_snippets" {
|
||||||
@ -67,48 +67,48 @@ variable "ssh_fingerprints" {
|
|||||||
}
|
}
|
||||||
|
|
||||||
variable "asset_dir" {
|
variable "asset_dir" {
|
||||||
description = "Path to a directory where generated assets should be placed (contains secrets)"
|
|
||||||
type = string
|
type = string
|
||||||
|
description = "Absolute path to a directory where generated assets should be placed (contains secrets)"
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "networking" {
|
variable "networking" {
|
||||||
description = "Choice of networking provider (flannel or calico)"
|
|
||||||
type = string
|
type = string
|
||||||
default = "flannel"
|
description = "Choice of networking provider (flannel or calico)"
|
||||||
|
default = "calico"
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "pod_cidr" {
|
variable "pod_cidr" {
|
||||||
description = "CIDR IPv4 range to assign Kubernetes pods"
|
|
||||||
type = string
|
type = string
|
||||||
|
description = "CIDR IPv4 range to assign Kubernetes pods"
|
||||||
default = "10.2.0.0/16"
|
default = "10.2.0.0/16"
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "service_cidr" {
|
variable "service_cidr" {
|
||||||
|
type = string
|
||||||
description = <<EOD
|
description = <<EOD
|
||||||
CIDR IPv4 range to assign Kubernetes services.
|
CIDR IPv4 range to assign Kubernetes services.
|
||||||
The 1st IP will be reserved for kube_apiserver, the 10th IP will be reserved for coredns.
|
The 1st IP will be reserved for kube_apiserver, the 10th IP will be reserved for coredns.
|
||||||
EOD
|
EOD
|
||||||
|
|
||||||
|
|
||||||
type = string
|
|
||||||
default = "10.3.0.0/16"
|
default = "10.3.0.0/16"
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "cluster_domain_suffix" {
|
|
||||||
description = "Queries for domains with the suffix will be answered by coredns. Default is cluster.local (e.g. foo.default.svc.cluster.local) "
|
|
||||||
type = string
|
|
||||||
default = "cluster.local"
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "enable_reporting" {
|
variable "enable_reporting" {
|
||||||
type = string
|
type = bool
|
||||||
description = "Enable usage or analytics reporting to upstreams (Calico)"
|
description = "Enable usage or analytics reporting to upstreams (Calico)"
|
||||||
default = "false"
|
default = false
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "enable_aggregation" {
|
variable "enable_aggregation" {
|
||||||
|
type = bool
|
||||||
description = "Enable the Kubernetes Aggregation Layer (defaults to false)"
|
description = "Enable the Kubernetes Aggregation Layer (defaults to false)"
|
||||||
type = string
|
default = false
|
||||||
default = "false"
|
}
|
||||||
|
|
||||||
|
# unofficial, undocumented, unsupported
|
||||||
|
|
||||||
|
variable "cluster_domain_suffix" {
|
||||||
|
type = string
|
||||||
|
description = "Queries for domains with the suffix will be answered by coredns. Default is cluster.local (e.g. foo.default.svc.cluster.local) "
|
||||||
|
default = "cluster.local"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -62,11 +62,14 @@ The AWS internal `workers` module supports a number of [variables](https://githu
|
|||||||
|:-----|:------------|:--------|:--------|
|
|:-----|:------------|:--------|:--------|
|
||||||
| worker_count | Number of instances | 1 | 3 |
|
| worker_count | Number of instances | 1 | 3 |
|
||||||
| instance_type | EC2 instance type | "t3.small" | "t3.medium" |
|
| instance_type | EC2 instance type | "t3.small" | "t3.medium" |
|
||||||
| os_image | AMI channel for a Container Linux derivative | coreos-stable | coreos-stable, coreos-beta, coreos-alpha, flatcar-stable, flatcar-beta, flatcar-alpha |
|
| os_image | AMI channel for a Container Linux derivative | "coreos-stable" | coreos-stable, coreos-beta, coreos-alpha, flatcar-stable, flatcar-beta, flatcar-alpha |
|
||||||
| disk_size | Size of the disk in GB | 40 | 100 |
|
| disk_size | Size of the EBS volume in GB | 40 | 100 |
|
||||||
| spot_price | Spot price in USD for workers. Leave as default empty string for regular on-demand instances | "" | "0.10" |
|
| disk_type | Type of the EBS volume | "gp2" | standard, gp2, io1 |
|
||||||
|
| disk_iops | IOPS of the EBS volume | 0 (i.e. auto) | 400 |
|
||||||
|
| spot_price | Spot price in USD for worker instances or 0 to use on-demand instances | 0 | 0.10 |
|
||||||
|
| clc_snippets | Container Linux Config snippets | [] | [example](/advanced/customization/#usage) |
|
||||||
| service_cidr | Must match `service_cidr` of cluster | "10.3.0.0/16" | "10.3.0.0/24" |
|
| service_cidr | Must match `service_cidr` of cluster | "10.3.0.0/16" | "10.3.0.0/24" |
|
||||||
| cluster_domain_suffix | Must match `cluster_domain_suffix` of cluster | "cluster.local" | "k8s.example.com" |
|
| node_labels | List of initial node labels | [] | ["worker-pool=foo"] |
|
||||||
|
|
||||||
Check the list of valid [instance types](https://aws.amazon.com/ec2/instance-types/) or per-region and per-type [spot prices](https://aws.amazon.com/ec2/spot/pricing/).
|
Check the list of valid [instance types](https://aws.amazon.com/ec2/instance-types/) or per-region and per-type [spot prices](https://aws.amazon.com/ec2/spot/pricing/).
|
||||||
|
|
||||||
@ -76,7 +79,7 @@ Create a cluster following the Azure [tutorial](../cl/azure.md#cluster). Define
|
|||||||
|
|
||||||
```tf
|
```tf
|
||||||
module "ramius-worker-pool" {
|
module "ramius-worker-pool" {
|
||||||
source = "git::https://github.com/poseidon/typhoon//azure/container-linux/kubernetes/workers?ref=v1.16.0"
|
source = "git::https://github.com/poseidon/typhoon//azure/container-linux/kubernetes/workers?ref=v1.16.2"
|
||||||
|
|
||||||
# Azure
|
# Azure
|
||||||
region = module.azure-ramius.region
|
region = module.azure-ramius.region
|
||||||
@ -127,12 +130,12 @@ The Azure internal `workers` module supports a number of [variables](https://git
|
|||||||
| Name | Description | Default | Example |
|
| Name | Description | Default | Example |
|
||||||
|:-----|:------------|:--------|:--------|
|
|:-----|:------------|:--------|:--------|
|
||||||
| worker_count | Number of instances | 1 | 3 |
|
| worker_count | Number of instances | 1 | 3 |
|
||||||
| vm_type | Machine type for instances | "Standard_F1" | See below |
|
| vm_type | Machine type for instances | "Standard_DS1_v2" | See below |
|
||||||
| os_image | Channel for a Container Linux derivative | coreos-stable | coreos-stable, coreos-beta, coreos-alpha |
|
| os_image | Channel for a Container Linux derivative | "coreos-stable" | coreos-stable, coreos-beta, coreos-alpha |
|
||||||
| priority | Set priority to Low to use reduced cost surplus capacity, with the tradeoff that instances can be deallocated at any time | Regular | Low |
|
| priority | Set priority to Low to use reduced cost surplus capacity, with the tradeoff that instances can be deallocated at any time | "Regular" | "Low" |
|
||||||
| clc_snippets | Container Linux Config snippets | [] | [example](/advanced/customization/#usage) |
|
| clc_snippets | Container Linux Config snippets | [] | [example](/advanced/customization/#usage) |
|
||||||
| service_cidr | CIDR IPv4 range to assign to Kubernetes services | "10.3.0.0/16" | "10.3.0.0/24" |
|
| service_cidr | CIDR IPv4 range to assign to Kubernetes services | "10.3.0.0/16" | "10.3.0.0/24" |
|
||||||
| cluster_domain_suffix | FQDN suffix for Kubernetes services answered by coredns. | "cluster.local" | "k8s.example.com" |
|
| node_labels | List of initial node labels | [] | ["worker-pool=foo"] |
|
||||||
|
|
||||||
Check the list of valid [machine types](https://azure.microsoft.com/en-us/pricing/details/virtual-machines/linux/) and their [specs](https://docs.microsoft.com/en-us/azure/virtual-machines/linux/sizes-general). Use `az vm list-skus` to get the identifier.
|
Check the list of valid [machine types](https://azure.microsoft.com/en-us/pricing/details/virtual-machines/linux/) and their [specs](https://docs.microsoft.com/en-us/azure/virtual-machines/linux/sizes-general). Use `az vm list-skus` to get the identifier.
|
||||||
|
|
||||||
@ -142,7 +145,7 @@ Create a cluster following the Google Cloud [tutorial](../cl/google-cloud.md#clu
|
|||||||
|
|
||||||
```tf
|
```tf
|
||||||
module "yavin-worker-pool" {
|
module "yavin-worker-pool" {
|
||||||
source = "git::https://github.com/poseidon/typhoon//google-cloud/container-linux/kubernetes/workers?ref=v1.16.0"
|
source = "git::https://github.com/poseidon/typhoon//google-cloud/container-linux/kubernetes/workers?ref=v1.16.2"
|
||||||
|
|
||||||
# Google Cloud
|
# Google Cloud
|
||||||
region = "europe-west2"
|
region = "europe-west2"
|
||||||
@ -173,11 +176,11 @@ Verify a managed instance group of workers joins the cluster within a few minute
|
|||||||
```
|
```
|
||||||
$ kubectl get nodes
|
$ kubectl get nodes
|
||||||
NAME STATUS AGE VERSION
|
NAME STATUS AGE VERSION
|
||||||
yavin-controller-0.c.example-com.internal Ready 6m v1.16.0
|
yavin-controller-0.c.example-com.internal Ready 6m v1.16.2
|
||||||
yavin-worker-jrbf.c.example-com.internal Ready 5m v1.16.0
|
yavin-worker-jrbf.c.example-com.internal Ready 5m v1.16.2
|
||||||
yavin-worker-mzdm.c.example-com.internal Ready 5m v1.16.0
|
yavin-worker-mzdm.c.example-com.internal Ready 5m v1.16.2
|
||||||
yavin-16x-worker-jrbf.c.example-com.internal Ready 3m v1.16.0
|
yavin-16x-worker-jrbf.c.example-com.internal Ready 3m v1.16.2
|
||||||
yavin-16x-worker-mzdm.c.example-com.internal Ready 3m v1.16.0
|
yavin-16x-worker-mzdm.c.example-com.internal Ready 3m v1.16.2
|
||||||
```
|
```
|
||||||
|
|
||||||
### Variables
|
### Variables
|
||||||
@ -189,9 +192,9 @@ The Google Cloud internal `workers` module supports a number of [variables](http
|
|||||||
| Name | Description | Example |
|
| Name | Description | Example |
|
||||||
|:-----|:------------|:--------|
|
|:-----|:------------|:--------|
|
||||||
| name | Unique name (distinct from cluster name) | "yavin-16x" |
|
| name | Unique name (distinct from cluster name) | "yavin-16x" |
|
||||||
|
| cluster_name | Must be set to `cluster_name` of cluster | "yavin" |
|
||||||
| region | Region for the worker pool instances. May differ from the cluster's region | "europe-west2" |
|
| region | Region for the worker pool instances. May differ from the cluster's region | "europe-west2" |
|
||||||
| network | Must be set to `network_name` output by cluster | module.cluster.network_name |
|
| network | Must be set to `network_name` output by cluster | module.cluster.network_name |
|
||||||
| cluster_name | Must be set to `cluster_name` of cluster | "yavin" |
|
|
||||||
| kubeconfig | Must be set to `kubeconfig` output by cluster | module.cluster.kubeconfig |
|
| kubeconfig | Must be set to `kubeconfig` output by cluster | module.cluster.kubeconfig |
|
||||||
| ssh_authorized_key | SSH public key for user 'core' | "ssh-rsa AAAAB3NZ..." |
|
| ssh_authorized_key | SSH public key for user 'core' | "ssh-rsa AAAAB3NZ..." |
|
||||||
|
|
||||||
@ -206,8 +209,9 @@ Check the list of regions [docs](https://cloud.google.com/compute/docs/regions-z
|
|||||||
| os_image | Container Linux image for compute instances | "coreos-stable" | "coreos-alpha", "coreos-beta" |
|
| os_image | Container Linux image for compute instances | "coreos-stable" | "coreos-alpha", "coreos-beta" |
|
||||||
| disk_size | Size of the disk in GB | 40 | 100 |
|
| disk_size | Size of the disk in GB | 40 | 100 |
|
||||||
| preemptible | If true, Compute Engine will terminate instances randomly within 24 hours | false | true |
|
| preemptible | If true, Compute Engine will terminate instances randomly within 24 hours | false | true |
|
||||||
|
| clc_snippets | Container Linux Config snippets | [] | [example](/advanced/customization/#usage) |
|
||||||
| service_cidr | Must match `service_cidr` of cluster | "10.3.0.0/16" | "10.3.0.0/24" |
|
| service_cidr | Must match `service_cidr` of cluster | "10.3.0.0/16" | "10.3.0.0/24" |
|
||||||
| cluster_domain_suffix | Must match `cluster_domain_suffix` of cluster | "cluster.local" | "k8s.example.com" |
|
| node_labels | List of initial node labels | [] | ["worker-pool=foo"] |
|
||||||
|
|
||||||
Check the list of valid [machine types](https://cloud.google.com/compute/docs/machine-types).
|
Check the list of valid [machine types](https://cloud.google.com/compute/docs/machine-types).
|
||||||
|
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
# AWS
|
# AWS
|
||||||
|
|
||||||
In this tutorial, we'll create a Kubernetes v1.16.0 cluster on AWS with Container Linux.
|
In this tutorial, we'll create a Kubernetes v1.16.2 cluster on AWS with Container Linux.
|
||||||
|
|
||||||
We'll declare a Kubernetes cluster using the Typhoon Terraform module. Then apply the changes to create a VPC, gateway, subnets, security groups, controller instances, worker auto-scaling group, network load balancer, and TLS assets.
|
We'll declare a Kubernetes cluster using the Typhoon Terraform module. Then apply the changes to create a VPC, gateway, subnets, security groups, controller instances, worker auto-scaling group, network load balancer, and TLS assets.
|
||||||
|
|
||||||
@ -18,7 +18,7 @@ Install [Terraform](https://www.terraform.io/downloads.html) v0.12.x on your sys
|
|||||||
|
|
||||||
```sh
|
```sh
|
||||||
$ terraform version
|
$ terraform version
|
||||||
Terraform v0.12.7
|
Terraform v0.12.9
|
||||||
```
|
```
|
||||||
|
|
||||||
Add the [terraform-provider-ct](https://github.com/poseidon/terraform-provider-ct) plugin binary for your system to `~/.terraform.d/plugins/`, noting the final name.
|
Add the [terraform-provider-ct](https://github.com/poseidon/terraform-provider-ct) plugin binary for your system to `~/.terraform.d/plugins/`, noting the final name.
|
||||||
@ -49,7 +49,7 @@ Configure the AWS provider to use your access key credentials in a `providers.tf
|
|||||||
|
|
||||||
```tf
|
```tf
|
||||||
provider "aws" {
|
provider "aws" {
|
||||||
version = "2.29.0"
|
version = "2.31.0"
|
||||||
region = "eu-central-1"
|
region = "eu-central-1"
|
||||||
shared_credentials_file = "/home/user/.config/aws/credentials"
|
shared_credentials_file = "/home/user/.config/aws/credentials"
|
||||||
}
|
}
|
||||||
@ -70,7 +70,7 @@ Define a Kubernetes cluster using the module `aws/container-linux/kubernetes`.
|
|||||||
|
|
||||||
```tf
|
```tf
|
||||||
module "tempest" {
|
module "tempest" {
|
||||||
source = "git::https://github.com/poseidon/typhoon//aws/container-linux/kubernetes?ref=v1.16.0"
|
source = "git::https://github.com/poseidon/typhoon//aws/container-linux/kubernetes?ref=v1.16.2"
|
||||||
|
|
||||||
# AWS
|
# AWS
|
||||||
cluster_name = "tempest"
|
cluster_name = "tempest"
|
||||||
@ -135,9 +135,9 @@ In 4-8 minutes, the Kubernetes cluster will be ready.
|
|||||||
$ export KUBECONFIG=/home/user/.secrets/clusters/tempest/auth/kubeconfig
|
$ export KUBECONFIG=/home/user/.secrets/clusters/tempest/auth/kubeconfig
|
||||||
$ kubectl get nodes
|
$ kubectl get nodes
|
||||||
NAME STATUS ROLES AGE VERSION
|
NAME STATUS ROLES AGE VERSION
|
||||||
ip-10-0-3-155 Ready <none> 10m v1.16.0
|
ip-10-0-3-155 Ready <none> 10m v1.16.2
|
||||||
ip-10-0-26-65 Ready <none> 10m v1.16.0
|
ip-10-0-26-65 Ready <none> 10m v1.16.2
|
||||||
ip-10-0-41-21 Ready <none> 10m v1.16.0
|
ip-10-0-41-21 Ready <none> 10m v1.16.2
|
||||||
```
|
```
|
||||||
|
|
||||||
List the pods.
|
List the pods.
|
||||||
@ -177,7 +177,7 @@ Check the [variables.tf](https://github.com/poseidon/typhoon/blob/master/aws/con
|
|||||||
| dns_zone | AWS Route53 DNS zone | "aws.example.com" |
|
| dns_zone | AWS Route53 DNS zone | "aws.example.com" |
|
||||||
| dns_zone_id | AWS Route53 DNS zone id | "Z3PAABBCFAKEC0" |
|
| dns_zone_id | AWS Route53 DNS zone id | "Z3PAABBCFAKEC0" |
|
||||||
| ssh_authorized_key | SSH public key for user 'core' | "ssh-rsa AAAAB3NZ..." |
|
| ssh_authorized_key | SSH public key for user 'core' | "ssh-rsa AAAAB3NZ..." |
|
||||||
| asset_dir | Path to a directory where generated assets should be placed (contains secrets) | "/home/user/.secrets/clusters/tempest" |
|
| asset_dir | Absolute path to a directory where generated assets should be placed (contains secrets) | "/home/user/.secrets/clusters/tempest" |
|
||||||
|
|
||||||
#### DNS Zone
|
#### DNS Zone
|
||||||
|
|
||||||
@ -191,7 +191,7 @@ resource "aws_route53_zone" "zone-for-clusters" {
|
|||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
Reference the DNS zone id with `"${aws_route53_zone.zone-for-clusters.zone_id}"`.
|
Reference the DNS zone id with `aws_route53_zone.zone-for-clusters.zone_id`.
|
||||||
|
|
||||||
!!! tip ""
|
!!! tip ""
|
||||||
If you have an existing domain name with a zone file elsewhere, just delegate a subdomain that can be managed on Route53 (e.g. aws.mydomain.com) and [update nameservers](http://docs.aws.amazon.com/Route53/latest/DeveloperGuide/SOA-NSrecords.html).
|
If you have an existing domain name with a zone file elsewhere, just delegate a subdomain that can be managed on Route53 (e.g. aws.mydomain.com) and [update nameservers](http://docs.aws.amazon.com/Route53/latest/DeveloperGuide/SOA-NSrecords.html).
|
||||||
@ -205,11 +205,11 @@ Reference the DNS zone id with `"${aws_route53_zone.zone-for-clusters.zone_id}"`
|
|||||||
| controller_type | EC2 instance type for controllers | "t3.small" | See below |
|
| controller_type | EC2 instance type for controllers | "t3.small" | See below |
|
||||||
| worker_type | EC2 instance type for workers | "t3.small" | See below |
|
| worker_type | EC2 instance type for workers | "t3.small" | See below |
|
||||||
| os_image | AMI channel for a Container Linux derivative | coreos-stable | coreos-stable, coreos-beta, coreos-alpha, flatcar-stable, flatcar-beta, flatcar-alpha, flatcar-edge |
|
| os_image | AMI channel for a Container Linux derivative | coreos-stable | coreos-stable, coreos-beta, coreos-alpha, flatcar-stable, flatcar-beta, flatcar-alpha, flatcar-edge |
|
||||||
| disk_size | Size of the EBS volume in GB | "40" | "100" |
|
| disk_size | Size of the EBS volume in GB | 40 | 100 |
|
||||||
| disk_type | Type of the EBS volume | "gp2" | standard, gp2, io1 |
|
| disk_type | Type of the EBS volume | "gp2" | standard, gp2, io1 |
|
||||||
| disk_iops | IOPS of the EBS volume | "0" (i.e. auto) | "400" |
|
| disk_iops | IOPS of the EBS volume | 0 (i.e. auto) | 400 |
|
||||||
| worker_target_groups | Target group ARNs to which worker instances should be added | [] | ["${aws_lb_target_group.app.id}"] |
|
| worker_target_groups | Target group ARNs to which worker instances should be added | [] | [aws_lb_target_group.app.id] |
|
||||||
| worker_price | Spot price in USD for workers. Leave as default empty string for regular on-demand instances | "" | "0.10" |
|
| worker_price | Spot price in USD for worker instances or 0 to use on-demand instances | 0/null | 0.10 |
|
||||||
| controller_clc_snippets | Controller Container Linux Config snippets | [] | [example](/advanced/customization/) |
|
| controller_clc_snippets | Controller Container Linux Config snippets | [] | [example](/advanced/customization/) |
|
||||||
| worker_clc_snippets | Worker Container Linux Config snippets | [] | [example](/advanced/customization/) |
|
| worker_clc_snippets | Worker Container Linux Config snippets | [] | [example](/advanced/customization/) |
|
||||||
| networking | Choice of networking provider | "calico" | "calico" or "flannel" |
|
| networking | Choice of networking provider | "calico" | "calico" or "flannel" |
|
||||||
@ -217,7 +217,7 @@ Reference the DNS zone id with `"${aws_route53_zone.zone-for-clusters.zone_id}"`
|
|||||||
| host_cidr | CIDR IPv4 range to assign to EC2 instances | "10.0.0.0/16" | "10.1.0.0/16" |
|
| host_cidr | CIDR IPv4 range to assign to EC2 instances | "10.0.0.0/16" | "10.1.0.0/16" |
|
||||||
| pod_cidr | CIDR IPv4 range to assign to Kubernetes pods | "10.2.0.0/16" | "10.22.0.0/16" |
|
| pod_cidr | CIDR IPv4 range to assign to Kubernetes pods | "10.2.0.0/16" | "10.22.0.0/16" |
|
||||||
| service_cidr | CIDR IPv4 range to assign to Kubernetes services | "10.3.0.0/16" | "10.3.0.0/24" |
|
| service_cidr | CIDR IPv4 range to assign to Kubernetes services | "10.3.0.0/16" | "10.3.0.0/24" |
|
||||||
| cluster_domain_suffix | FQDN suffix for Kubernetes services answered by coredns. | "cluster.local" | "k8s.example.com" |
|
| worker_node_labels | List of initial worker node labels | [] | ["worker-pool=default"] |
|
||||||
|
|
||||||
Check the list of valid [instance types](https://aws.amazon.com/ec2/instance-types/).
|
Check the list of valid [instance types](https://aws.amazon.com/ec2/instance-types/).
|
||||||
|
|
||||||
|
@ -3,7 +3,7 @@
|
|||||||
!!! danger
|
!!! danger
|
||||||
Typhoon for Azure is alpha. For production, use AWS, Google Cloud, or bare-metal. As Azure matures, check [errata](https://github.com/poseidon/typhoon/wiki/Errata) for known shortcomings.
|
Typhoon for Azure is alpha. For production, use AWS, Google Cloud, or bare-metal. As Azure matures, check [errata](https://github.com/poseidon/typhoon/wiki/Errata) for known shortcomings.
|
||||||
|
|
||||||
In this tutorial, we'll create a Kubernetes v1.16.0 cluster on Azure with Container Linux.
|
In this tutorial, we'll create a Kubernetes v1.16.2 cluster on Azure with Container Linux.
|
||||||
|
|
||||||
We'll declare a Kubernetes cluster using the Typhoon Terraform module. Then apply the changes to create a resource group, virtual network, subnets, security groups, controller availability set, worker scale set, load balancer, and TLS assets.
|
We'll declare a Kubernetes cluster using the Typhoon Terraform module. Then apply the changes to create a resource group, virtual network, subnets, security groups, controller availability set, worker scale set, load balancer, and TLS assets.
|
||||||
|
|
||||||
@ -21,7 +21,7 @@ Install [Terraform](https://www.terraform.io/downloads.html) v0.12.x on your sys
|
|||||||
|
|
||||||
```sh
|
```sh
|
||||||
$ terraform version
|
$ terraform version
|
||||||
Terraform v0.12.7
|
Terraform v0.12.9
|
||||||
```
|
```
|
||||||
|
|
||||||
Add the [terraform-provider-ct](https://github.com/poseidon/terraform-provider-ct) plugin binary for your system to `~/.terraform.d/plugins/`, noting the final name.
|
Add the [terraform-provider-ct](https://github.com/poseidon/terraform-provider-ct) plugin binary for your system to `~/.terraform.d/plugins/`, noting the final name.
|
||||||
@ -50,7 +50,7 @@ Configure the Azure provider in a `providers.tf` file.
|
|||||||
|
|
||||||
```tf
|
```tf
|
||||||
provider "azurerm" {
|
provider "azurerm" {
|
||||||
version = "1.34.0"
|
version = "1.35.0"
|
||||||
}
|
}
|
||||||
|
|
||||||
provider "ct" {
|
provider "ct" {
|
||||||
@ -66,7 +66,7 @@ Define a Kubernetes cluster using the module `azure/container-linux/kubernetes`.
|
|||||||
|
|
||||||
```tf
|
```tf
|
||||||
module "ramius" {
|
module "ramius" {
|
||||||
source = "git::https://github.com/poseidon/typhoon//azure/container-linux/kubernetes?ref=v1.16.0"
|
source = "git::https://github.com/poseidon/typhoon//azure/container-linux/kubernetes?ref=v1.16.2"
|
||||||
|
|
||||||
# Azure
|
# Azure
|
||||||
cluster_name = "ramius"
|
cluster_name = "ramius"
|
||||||
@ -132,9 +132,9 @@ In 4-8 minutes, the Kubernetes cluster will be ready.
|
|||||||
$ export KUBECONFIG=/home/user/.secrets/clusters/ramius/auth/kubeconfig
|
$ export KUBECONFIG=/home/user/.secrets/clusters/ramius/auth/kubeconfig
|
||||||
$ kubectl get nodes
|
$ kubectl get nodes
|
||||||
NAME STATUS ROLES AGE VERSION
|
NAME STATUS ROLES AGE VERSION
|
||||||
ramius-controller-0 Ready <none> 24m v1.16.0
|
ramius-controller-0 Ready <none> 24m v1.16.2
|
||||||
ramius-worker-000001 Ready <none> 25m v1.16.0
|
ramius-worker-000001 Ready <none> 25m v1.16.2
|
||||||
ramius-worker-000002 Ready <none> 24m v1.16.0
|
ramius-worker-000002 Ready <none> 24m v1.16.2
|
||||||
```
|
```
|
||||||
|
|
||||||
List the pods.
|
List the pods.
|
||||||
@ -144,9 +144,9 @@ $ kubectl get pods --all-namespaces
|
|||||||
NAMESPACE NAME READY STATUS RESTARTS AGE
|
NAMESPACE NAME READY STATUS RESTARTS AGE
|
||||||
kube-system coredns-7c6fbb4f4b-b6qzx 1/1 Running 0 26m
|
kube-system coredns-7c6fbb4f4b-b6qzx 1/1 Running 0 26m
|
||||||
kube-system coredns-7c6fbb4f4b-j2k3d 1/1 Running 0 26m
|
kube-system coredns-7c6fbb4f4b-j2k3d 1/1 Running 0 26m
|
||||||
kube-system flannel-bwf24 2/2 Running 0 26m
|
kube-system calico-node-1m5bf 2/2 Running 0 26m
|
||||||
kube-system flannel-ks5qb 2/2 Running 0 26m
|
kube-system calico-node-7jmr1 2/2 Running 0 26m
|
||||||
kube-system flannel-tq2wg 2/2 Running 0 26m
|
kube-system calico-node-bknc8 2/2 Running 0 26m
|
||||||
kube-system kube-apiserver-ramius-controller-0 1/1 Running 0 26m
|
kube-system kube-apiserver-ramius-controller-0 1/1 Running 0 26m
|
||||||
kube-system kube-controller-manager-ramius-controller-0 1/1 Running 0 26m
|
kube-system kube-controller-manager-ramius-controller-0 1/1 Running 0 26m
|
||||||
kube-system kube-proxy-j4vpq 1/1 Running 0 26m
|
kube-system kube-proxy-j4vpq 1/1 Running 0 26m
|
||||||
@ -175,7 +175,7 @@ Check the [variables.tf](https://github.com/poseidon/typhoon/blob/master/azure/c
|
|||||||
| dns_zone | Azure DNS zone | "azure.example.com" |
|
| dns_zone | Azure DNS zone | "azure.example.com" |
|
||||||
| dns_zone_group | Resource group where the Azure DNS zone resides | "global" |
|
| dns_zone_group | Resource group where the Azure DNS zone resides | "global" |
|
||||||
| ssh_authorized_key | SSH public key for user 'core' | "ssh-rsa AAAAB3NZ..." |
|
| ssh_authorized_key | SSH public key for user 'core' | "ssh-rsa AAAAB3NZ..." |
|
||||||
| asset_dir | Path to a directory where generated assets should be placed (contains secrets) | "/home/user/.secrets/clusters/ramius" |
|
| asset_dir | Absolute path to a directory where generated assets should be placed (contains secrets) | "/home/user/.secrets/clusters/ramius" |
|
||||||
|
|
||||||
!!! tip
|
!!! tip
|
||||||
Regions are shown in [docs](https://azure.microsoft.com/en-us/global-infrastructure/regions/) or with `az account list-locations --output table`.
|
Regions are shown in [docs](https://azure.microsoft.com/en-us/global-infrastructure/regions/) or with `az account list-locations --output table`.
|
||||||
@ -195,14 +195,14 @@ resource "azurerm_resource_group" "global" {
|
|||||||
|
|
||||||
# DNS zone for clusters
|
# DNS zone for clusters
|
||||||
resource "azurerm_dns_zone" "clusters" {
|
resource "azurerm_dns_zone" "clusters" {
|
||||||
resource_group_name = "${azurerm_resource_group.global.name}"
|
resource_group_name = azurerm_resource_group.global.name
|
||||||
|
|
||||||
name = "azure.example.com"
|
name = "azure.example.com"
|
||||||
zone_type = "Public"
|
zone_type = "Public"
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
Reference the DNS zone with `"${azurerm_dns_zone.clusters.name}"` and its resource group with `"${azurerm_resource_group.global.name}"`.
|
Reference the DNS zone with `azurerm_dns_zone.clusters.name` and its resource group with `"azurerm_resource_group.global.name`.
|
||||||
|
|
||||||
!!! tip ""
|
!!! tip ""
|
||||||
If you have an existing domain name with a zone file elsewhere, just delegate a subdomain that can be managed on Azure DNS (e.g. azure.mydomain.com) and [update nameservers](https://docs.microsoft.com/en-us/azure/dns/dns-delegate-domain-azure-dns).
|
If you have an existing domain name with a zone file elsewhere, just delegate a subdomain that can be managed on Azure DNS (e.g. azure.mydomain.com) and [update nameservers](https://docs.microsoft.com/en-us/azure/dns/dns-delegate-domain-azure-dns).
|
||||||
@ -213,18 +213,18 @@ Reference the DNS zone with `"${azurerm_dns_zone.clusters.name}"` and its resour
|
|||||||
|:-----|:------------|:--------|:--------|
|
|:-----|:------------|:--------|:--------|
|
||||||
| controller_count | Number of controllers (i.e. masters) | 1 | 1 |
|
| controller_count | Number of controllers (i.e. masters) | 1 | 1 |
|
||||||
| worker_count | Number of workers | 1 | 3 |
|
| worker_count | Number of workers | 1 | 3 |
|
||||||
| controller_type | Machine type for controllers | "Standard_DS1_v2" | See below |
|
| controller_type | Machine type for controllers | "Standard_B2s" | See below |
|
||||||
| worker_type | Machine type for workers | "Standard_F1" | See below |
|
| worker_type | Machine type for workers | "Standard_DS1_v2" | See below |
|
||||||
| os_image | Channel for a Container Linux derivative | coreos-stable | coreos-stable, coreos-beta, coreos-alpha |
|
| os_image | Channel for a Container Linux derivative | "coreos-stable" | coreos-stable, coreos-beta, coreos-alpha |
|
||||||
| disk_size | Size of the disk in GB | "40" | "100" |
|
| disk_size | Size of the disk in GB | 40 | 100 |
|
||||||
| worker_priority | Set priority to Low to use reduced cost surplus capacity, with the tradeoff that instances can be deallocated at any time | Regular | Low |
|
| worker_priority | Set priority to Low to use reduced cost surplus capacity, with the tradeoff that instances can be deallocated at any time | Regular | Low |
|
||||||
| controller_clc_snippets | Controller Container Linux Config snippets | [] | [example](/advanced/customization/#usage) |
|
| controller_clc_snippets | Controller Container Linux Config snippets | [] | [example](/advanced/customization/#usage) |
|
||||||
| worker_clc_snippets | Worker Container Linux Config snippets | [] | [example](/advanced/customization/#usage) |
|
| worker_clc_snippets | Worker Container Linux Config snippets | [] | [example](/advanced/customization/#usage) |
|
||||||
| networking | Choice of networking provider | "flannel" | "flannel" or "calico" (experimental) |
|
| networking | Choice of networking provider | "calico" | "flannel" or "calico" |
|
||||||
| host_cidr | CIDR IPv4 range to assign to instances | "10.0.0.0/16" | "10.0.0.0/20" |
|
| host_cidr | CIDR IPv4 range to assign to instances | "10.0.0.0/16" | "10.0.0.0/20" |
|
||||||
| pod_cidr | CIDR IPv4 range to assign to Kubernetes pods | "10.2.0.0/16" | "10.22.0.0/16" |
|
| pod_cidr | CIDR IPv4 range to assign to Kubernetes pods | "10.2.0.0/16" | "10.22.0.0/16" |
|
||||||
| service_cidr | CIDR IPv4 range to assign to Kubernetes services | "10.3.0.0/16" | "10.3.0.0/24" |
|
| service_cidr | CIDR IPv4 range to assign to Kubernetes services | "10.3.0.0/16" | "10.3.0.0/24" |
|
||||||
| cluster_domain_suffix | FQDN suffix for Kubernetes services answered by coredns. | "cluster.local" | "k8s.example.com" |
|
| worker_node_labels | List of initial worker node labels | [] | ["worker-pool=default"] |
|
||||||
|
|
||||||
Check the list of valid [machine types](https://azure.microsoft.com/en-us/pricing/details/virtual-machines/linux/) and their [specs](https://docs.microsoft.com/en-us/azure/virtual-machines/linux/sizes-general). Use `az vm list-skus` to get the identifier.
|
Check the list of valid [machine types](https://azure.microsoft.com/en-us/pricing/details/virtual-machines/linux/) and their [specs](https://docs.microsoft.com/en-us/azure/virtual-machines/linux/sizes-general). Use `az vm list-skus` to get the identifier.
|
||||||
|
|
||||||
@ -232,7 +232,7 @@ Check the list of valid [machine types](https://azure.microsoft.com/en-us/pricin
|
|||||||
Unlike AWS and GCP, Azure requires its *virtual* networks to have non-overlapping IPv4 CIDRs (yeah, go figure). Instead of each cluster just using `10.0.0.0/16` for instances, each Azure cluster's `host_cidr` must be non-overlapping (e.g. 10.0.0.0/20 for the 1st cluster, 10.0.16.0/20 for the 2nd cluster, etc).
|
Unlike AWS and GCP, Azure requires its *virtual* networks to have non-overlapping IPv4 CIDRs (yeah, go figure). Instead of each cluster just using `10.0.0.0/16` for instances, each Azure cluster's `host_cidr` must be non-overlapping (e.g. 10.0.0.0/20 for the 1st cluster, 10.0.16.0/20 for the 2nd cluster, etc).
|
||||||
|
|
||||||
!!! warning
|
!!! warning
|
||||||
Do not choose a `controller_type` smaller than `Standard_DS1_v2`. Smaller instances are not sufficient for running a controller.
|
Do not choose a `controller_type` smaller than `Standard_B2s`. Smaller instances are not sufficient for running a controller.
|
||||||
|
|
||||||
#### Low Priority
|
#### Low Priority
|
||||||
|
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
# Bare-Metal
|
# Bare-Metal
|
||||||
|
|
||||||
In this tutorial, we'll network boot and provision a Kubernetes v1.16.0 cluster on bare-metal with Container Linux.
|
In this tutorial, we'll network boot and provision a Kubernetes v1.16.2 cluster on bare-metal with Container Linux.
|
||||||
|
|
||||||
First, we'll deploy a [Matchbox](https://github.com/poseidon/matchbox) service and setup a network boot environment. Then, we'll declare a Kubernetes cluster using the Typhoon Terraform module and power on machines. On PXE boot, machines will install Container Linux to disk, reboot into the disk install, and provision themselves as Kubernetes controllers or workers via Ignition.
|
First, we'll deploy a [Matchbox](https://github.com/poseidon/matchbox) service and setup a network boot environment. Then, we'll declare a Kubernetes cluster using the Typhoon Terraform module and power on machines. On PXE boot, machines will install Container Linux to disk, reboot into the disk install, and provision themselves as Kubernetes controllers or workers via Ignition.
|
||||||
|
|
||||||
@ -111,7 +111,7 @@ Install [Terraform](https://www.terraform.io/downloads.html) v0.12.x on your sys
|
|||||||
|
|
||||||
```sh
|
```sh
|
||||||
$ terraform version
|
$ terraform version
|
||||||
Terraform v0.12.7
|
Terraform v0.12.9
|
||||||
```
|
```
|
||||||
|
|
||||||
Add the [terraform-provider-matchbox](https://github.com/poseidon/terraform-provider-matchbox) plugin binary for your system to `~/.terraform.d/plugins/`, noting the final name.
|
Add the [terraform-provider-matchbox](https://github.com/poseidon/terraform-provider-matchbox) plugin binary for your system to `~/.terraform.d/plugins/`, noting the final name.
|
||||||
@ -144,9 +144,9 @@ Configure the Matchbox provider to use your Matchbox API endpoint and client cer
|
|||||||
provider "matchbox" {
|
provider "matchbox" {
|
||||||
version = "0.3.0"
|
version = "0.3.0"
|
||||||
endpoint = "matchbox.example.com:8081"
|
endpoint = "matchbox.example.com:8081"
|
||||||
client_cert = "${file("~/.config/matchbox/client.crt")}"
|
client_cert = file("~/.config/matchbox/client.crt")
|
||||||
client_key = "${file("~/.config/matchbox/client.key")}"
|
client_key = file("~/.config/matchbox/client.key")
|
||||||
ca = "${file("~/.config/matchbox/ca.crt")}"
|
ca = file("~/.config/matchbox/ca.crt")
|
||||||
}
|
}
|
||||||
|
|
||||||
provider "ct" {
|
provider "ct" {
|
||||||
@ -160,13 +160,13 @@ Define a Kubernetes cluster using the module `bare-metal/container-linux/kuberne
|
|||||||
|
|
||||||
```tf
|
```tf
|
||||||
module "bare-metal-mercury" {
|
module "bare-metal-mercury" {
|
||||||
source = "git::https://github.com/poseidon/typhoon//bare-metal/container-linux/kubernetes?ref=v1.16.0"
|
source = "git::https://github.com/poseidon/typhoon//bare-metal/container-linux/kubernetes?ref=v1.16.2"
|
||||||
|
|
||||||
# bare-metal
|
# bare-metal
|
||||||
cluster_name = "mercury"
|
cluster_name = "mercury"
|
||||||
matchbox_http_endpoint = "http://matchbox.example.com"
|
matchbox_http_endpoint = "http://matchbox.example.com"
|
||||||
os_channel = "coreos-stable"
|
os_channel = "coreos-stable"
|
||||||
os_version = "1632.3.0"
|
os_version = "2191.5.0"
|
||||||
|
|
||||||
# configuration
|
# configuration
|
||||||
k8s_domain_name = "node1.example.com"
|
k8s_domain_name = "node1.example.com"
|
||||||
@ -174,20 +174,22 @@ module "bare-metal-mercury" {
|
|||||||
asset_dir = "/home/user/.secrets/clusters/mercury"
|
asset_dir = "/home/user/.secrets/clusters/mercury"
|
||||||
|
|
||||||
# machines
|
# machines
|
||||||
controller_names = ["node1"]
|
controllers = [{
|
||||||
controller_macs = ["52:54:00:a1:9c:ae"]
|
name = "node1"
|
||||||
controller_domains = ["node1.example.com"]
|
mac = "52:54:00:a1:9c:ae"
|
||||||
worker_names = [
|
domain = "node1.example.com"
|
||||||
"node2",
|
}]
|
||||||
"node3",
|
workers = [
|
||||||
]
|
{
|
||||||
worker_macs = [
|
name = "node2",
|
||||||
"52:54:00:b2:2f:86",
|
mac = "52:54:00:b2:2f:86"
|
||||||
"52:54:00:c3:61:77",
|
domain = "node2.example.com"
|
||||||
]
|
},
|
||||||
worker_domains = [
|
{
|
||||||
"node2.example.com",
|
name = "node3",
|
||||||
"node3.example.com",
|
mac = "52:54:00:c3:61:77"
|
||||||
|
domain = "node3.example.com"
|
||||||
|
}
|
||||||
]
|
]
|
||||||
|
|
||||||
# set to http only if you cannot chainload to iPXE firmware with https support
|
# set to http only if you cannot chainload to iPXE firmware with https support
|
||||||
@ -263,9 +265,9 @@ Apply complete! Resources: 55 added, 0 changed, 0 destroyed.
|
|||||||
To watch the install to disk (until machines reboot from disk), SSH to port 2222.
|
To watch the install to disk (until machines reboot from disk), SSH to port 2222.
|
||||||
|
|
||||||
```
|
```
|
||||||
# before v1.16.0
|
# before v1.16.2
|
||||||
$ ssh debug@node1.example.com
|
$ ssh debug@node1.example.com
|
||||||
# after v1.16.0
|
# after v1.16.2
|
||||||
$ ssh -p 2222 core@node1.example.com
|
$ ssh -p 2222 core@node1.example.com
|
||||||
```
|
```
|
||||||
|
|
||||||
@ -289,9 +291,9 @@ systemd[1]: Started Kubernetes control plane.
|
|||||||
$ export KUBECONFIG=/home/user/.secrets/clusters/mercury/auth/kubeconfig
|
$ export KUBECONFIG=/home/user/.secrets/clusters/mercury/auth/kubeconfig
|
||||||
$ kubectl get nodes
|
$ kubectl get nodes
|
||||||
NAME STATUS ROLES AGE VERSION
|
NAME STATUS ROLES AGE VERSION
|
||||||
node1.example.com Ready <none> 10m v1.16.0
|
node1.example.com Ready <none> 10m v1.16.2
|
||||||
node2.example.com Ready <none> 10m v1.16.0
|
node2.example.com Ready <none> 10m v1.16.2
|
||||||
node3.example.com Ready <none> 10m v1.16.0
|
node3.example.com Ready <none> 10m v1.16.2
|
||||||
```
|
```
|
||||||
|
|
||||||
List the pods.
|
List the pods.
|
||||||
@ -327,19 +329,15 @@ Check the [variables.tf](https://github.com/poseidon/typhoon/blob/master/bare-me
|
|||||||
|
|
||||||
| Name | Description | Example |
|
| Name | Description | Example |
|
||||||
|:-----|:------------|:--------|
|
|:-----|:------------|:--------|
|
||||||
| cluster_name | Unique cluster name | mercury |
|
| cluster_name | Unique cluster name | "mercury" |
|
||||||
| matchbox_http_endpoint | Matchbox HTTP read-only endpoint | http://matchbox.example.com:port |
|
| matchbox_http_endpoint | Matchbox HTTP read-only endpoint | "http://matchbox.example.com:port" |
|
||||||
| os_channel | Channel for a Container Linux derivative | coreos-stable, coreos-beta, coreos-alpha, flatcar-stable, flatcar-beta, flatcar-alpha, flatcar-edge |
|
| os_channel | Channel for a Container Linux derivative | coreos-stable, coreos-beta, coreos-alpha, flatcar-stable, flatcar-beta, flatcar-alpha, flatcar-edge |
|
||||||
| os_version | Version for a Container Linux derivative to PXE and install | 1632.3.0 |
|
| os_version | Version for a Container Linux derivative to PXE and install | "1632.3.0" |
|
||||||
| k8s_domain_name | FQDN resolving to the controller(s) nodes. Workers and kubectl will communicate with this endpoint | "myk8s.example.com" |
|
| k8s_domain_name | FQDN resolving to the controller(s) nodes. Workers and kubectl will communicate with this endpoint | "myk8s.example.com" |
|
||||||
| ssh_authorized_key | SSH public key for user 'core' | "ssh-rsa AAAAB3Nz..." |
|
| ssh_authorized_key | SSH public key for user 'core' | "ssh-rsa AAAAB3Nz..." |
|
||||||
| asset_dir | Path to a directory where generated assets should be placed (contains secrets) | "/home/user/.secrets/clusters/mercury" |
|
| asset_dir | Absolute path to a directory where generated assets should be placed (contains secrets) | "/home/user/.secrets/clusters/mercury" |
|
||||||
| controller_names | Ordered list of controller short names | ["node1"] |
|
| controllers | List of controller machine detail objects (unique name, identifying MAC address, FQDN) | `[{name="node1", mac="52:54:00:a1:9c:ae", domain="node1.example.com"}]` |
|
||||||
| controller_macs | Ordered list of controller identifying MAC addresses | ["52:54:00:a1:9c:ae"] |
|
| workers | List of worker machine detail objects (unique name, identifying MAC address, FQDN) | `[{name="node2", mac="52:54:00:b2:2f:86", domain="node2.example.com"}, {name="node3", mac="52:54:00:c3:61:77", domain="node3.example.com"}]` |
|
||||||
| controller_domains | Ordered list of controller FQDNs | ["node1.example.com"] |
|
|
||||||
| worker_names | Ordered list of worker short names | ["node2", "node3"] |
|
|
||||||
| worker_macs | Ordered list of worker identifying MAC addresses | ["52:54:00:b2:2f:86", "52:54:00:c3:61:77"] |
|
|
||||||
| worker_domains | Ordered list of worker FQDNs | ["node2.example.com", "node3.example.com"] |
|
|
||||||
|
|
||||||
### Optional
|
### Optional
|
||||||
|
|
||||||
@ -351,9 +349,8 @@ Check the [variables.tf](https://github.com/poseidon/typhoon/blob/master/bare-me
|
|||||||
| networking | Choice of networking provider | "calico" | "calico" or "flannel" |
|
| networking | Choice of networking provider | "calico" | "calico" or "flannel" |
|
||||||
| network_mtu | CNI interface MTU (calico-only) | 1480 | - |
|
| network_mtu | CNI interface MTU (calico-only) | 1480 | - |
|
||||||
| clc_snippets | Map from machine names to lists of Container Linux Config snippets | {} | [example](/advanced/customization/#usage) |
|
| clc_snippets | Map from machine names to lists of Container Linux Config snippets | {} | [example](/advanced/customization/#usage) |
|
||||||
| network_ip_autodetection_method | Method to detect host IPv4 address (calico-only) | first-found | can-reach=10.0.0.1 |
|
| network_ip_autodetection_method | Method to detect host IPv4 address (calico-only) | "first-found" | "can-reach=10.0.0.1" |
|
||||||
| pod_cidr | CIDR IPv4 range to assign to Kubernetes pods | "10.2.0.0/16" | "10.22.0.0/16" |
|
| pod_cidr | CIDR IPv4 range to assign to Kubernetes pods | "10.2.0.0/16" | "10.22.0.0/16" |
|
||||||
| service_cidr | CIDR IPv4 range to assign to Kubernetes services | "10.3.0.0/16" | "10.3.0.0/24" |
|
| service_cidr | CIDR IPv4 range to assign to Kubernetes services | "10.3.0.0/16" | "10.3.0.0/24" |
|
||||||
| cluster_domain_suffix | FQDN suffix for Kubernetes services answered by coredns. | "cluster.local" | "k8s.example.com" |
|
| kernel_args | Additional kernel args to provide at PXE boot | [] | ["kvm-intel.nested=1"] |
|
||||||
| kernel_args | Additional kernel args to provide at PXE boot | [] | "kvm-intel.nested=1" |
|
|
||||||
|
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
# Digital Ocean
|
# Digital Ocean
|
||||||
|
|
||||||
In this tutorial, we'll create a Kubernetes v1.16.0 cluster on DigitalOcean with Container Linux.
|
In this tutorial, we'll create a Kubernetes v1.16.2 cluster on DigitalOcean with Container Linux.
|
||||||
|
|
||||||
We'll declare a Kubernetes cluster using the Typhoon Terraform module. Then apply the changes to create controller droplets, worker droplets, DNS records, tags, and TLS assets.
|
We'll declare a Kubernetes cluster using the Typhoon Terraform module. Then apply the changes to create controller droplets, worker droplets, DNS records, tags, and TLS assets.
|
||||||
|
|
||||||
@ -18,7 +18,7 @@ Install [Terraform](https://www.terraform.io/downloads.html) v0.12.x on your sys
|
|||||||
|
|
||||||
```sh
|
```sh
|
||||||
$ terraform version
|
$ terraform version
|
||||||
Terraform v0.12.7
|
Terraform v0.12.9
|
||||||
```
|
```
|
||||||
|
|
||||||
Add the [terraform-provider-ct](https://github.com/poseidon/terraform-provider-ct) plugin binary for your system to `~/.terraform.d/plugins/`, noting the final name.
|
Add the [terraform-provider-ct](https://github.com/poseidon/terraform-provider-ct) plugin binary for your system to `~/.terraform.d/plugins/`, noting the final name.
|
||||||
@ -50,7 +50,7 @@ Configure the DigitalOcean provider to use your token in a `providers.tf` file.
|
|||||||
|
|
||||||
```tf
|
```tf
|
||||||
provider "digitalocean" {
|
provider "digitalocean" {
|
||||||
version = "1.7.0"
|
version = "1.8.0"
|
||||||
token = "${chomp(file("~/.config/digital-ocean/token"))}"
|
token = "${chomp(file("~/.config/digital-ocean/token"))}"
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -65,7 +65,7 @@ Define a Kubernetes cluster using the module `digital-ocean/container-linux/kube
|
|||||||
|
|
||||||
```tf
|
```tf
|
||||||
module "digital-ocean-nemo" {
|
module "digital-ocean-nemo" {
|
||||||
source = "git::https://github.com/poseidon/typhoon//digital-ocean/container-linux/kubernetes?ref=v1.16.0"
|
source = "git::https://github.com/poseidon/typhoon//digital-ocean/container-linux/kubernetes?ref=v1.16.2"
|
||||||
|
|
||||||
# Digital Ocean
|
# Digital Ocean
|
||||||
cluster_name = "nemo"
|
cluster_name = "nemo"
|
||||||
@ -130,9 +130,9 @@ In 3-6 minutes, the Kubernetes cluster will be ready.
|
|||||||
$ export KUBECONFIG=/home/user/.secrets/clusters/nemo/auth/kubeconfig
|
$ export KUBECONFIG=/home/user/.secrets/clusters/nemo/auth/kubeconfig
|
||||||
$ kubectl get nodes
|
$ kubectl get nodes
|
||||||
NAME STATUS ROLES AGE VERSION
|
NAME STATUS ROLES AGE VERSION
|
||||||
10.132.110.130 Ready <none> 10m v1.16.0
|
10.132.110.130 Ready <none> 10m v1.16.2
|
||||||
10.132.115.81 Ready <none> 10m v1.16.0
|
10.132.115.81 Ready <none> 10m v1.16.2
|
||||||
10.132.124.107 Ready <none> 10m v1.16.0
|
10.132.124.107 Ready <none> 10m v1.16.2
|
||||||
```
|
```
|
||||||
|
|
||||||
List the pods.
|
List the pods.
|
||||||
@ -141,9 +141,9 @@ List the pods.
|
|||||||
NAMESPACE NAME READY STATUS RESTARTS AGE
|
NAMESPACE NAME READY STATUS RESTARTS AGE
|
||||||
kube-system coredns-1187388186-ld1j7 1/1 Running 0 11m
|
kube-system coredns-1187388186-ld1j7 1/1 Running 0 11m
|
||||||
kube-system coredns-1187388186-rdhf7 1/1 Running 0 11m
|
kube-system coredns-1187388186-rdhf7 1/1 Running 0 11m
|
||||||
kube-system flannel-1cq1v 2/2 Running 0 11m
|
kube-system calico-node-1m5bf 2/2 Running 0 11m
|
||||||
kube-system flannel-hq9t0 2/2 Running 0 11m
|
kube-system calico-node-7jmr1 2/2 Running 0 11m
|
||||||
kube-system flannel-v0g9w 2/2 Running 0 11m
|
kube-system calico-node-bknc8 2/2 Running 0 11m
|
||||||
kube-system kube-apiserver-ip-10.132.115.81 1/1 Running 0 11m
|
kube-system kube-apiserver-ip-10.132.115.81 1/1 Running 0 11m
|
||||||
kube-system kube-controller-manager-ip-10.132.115.81 1/1 Running 0 11m
|
kube-system kube-controller-manager-ip-10.132.115.81 1/1 Running 0 11m
|
||||||
kube-system kube-proxy-6kxjf 1/1 Running 0 11m
|
kube-system kube-proxy-6kxjf 1/1 Running 0 11m
|
||||||
@ -167,11 +167,11 @@ Check the [variables.tf](https://github.com/poseidon/typhoon/blob/master/digital
|
|||||||
|
|
||||||
| Name | Description | Example |
|
| Name | Description | Example |
|
||||||
|:-----|:------------|:--------|
|
|:-----|:------------|:--------|
|
||||||
| cluster_name | Unique cluster name (prepended to dns_zone) | nemo |
|
| cluster_name | Unique cluster name (prepended to dns_zone) | "nemo" |
|
||||||
| region | Digital Ocean region | nyc1, sfo2, fra1, tor1 |
|
| region | Digital Ocean region | "nyc1", "sfo2", "fra1", tor1" |
|
||||||
| dns_zone | Digital Ocean domain (i.e. DNS zone) | do.example.com |
|
| dns_zone | Digital Ocean domain (i.e. DNS zone) | "do.example.com" |
|
||||||
| ssh_fingerprints | SSH public key fingerprints | ["d7:9d..."] |
|
| ssh_fingerprints | SSH public key fingerprints | ["d7:9d..."] |
|
||||||
| asset_dir | Path to a directory where generated assets should be placed (contains secrets) | /home/user/.secrets/nemo |
|
| asset_dir | Absolute path to a directory where generated assets should be placed (contains secrets) | "/home/user/.secrets/nemo" |
|
||||||
|
|
||||||
#### DNS Zone
|
#### DNS Zone
|
||||||
|
|
||||||
@ -214,15 +214,14 @@ Digital Ocean requires the SSH public key be uploaded to your account, so you ma
|
|||||||
|:-----|:------------|:--------|:--------|
|
|:-----|:------------|:--------|:--------|
|
||||||
| controller_count | Number of controllers (i.e. masters) | 1 | 1 |
|
| controller_count | Number of controllers (i.e. masters) | 1 | 1 |
|
||||||
| worker_count | Number of workers | 1 | 3 |
|
| worker_count | Number of workers | 1 | 3 |
|
||||||
| controller_type | Droplet type for controllers | s-2vcpu-2gb | s-2vcpu-2gb, s-2vcpu-4gb, s-4vcpu-8gb, ... |
|
| controller_type | Droplet type for controllers | "s-2vcpu-2gb" | s-2vcpu-2gb, s-2vcpu-4gb, s-4vcpu-8gb, ... |
|
||||||
| worker_type | Droplet type for workers | s-1vcpu-2gb | s-1vcpu-2gb, s-2vcpu-2gb, ... |
|
| worker_type | Droplet type for workers | "s-1vcpu-2gb" | s-1vcpu-2gb, s-2vcpu-2gb, ... |
|
||||||
| image | Container Linux image for instances | "coreos-stable" | coreos-stable, coreos-beta, coreos-alpha |
|
| image | Container Linux image for instances | "coreos-stable" | coreos-stable, coreos-beta, coreos-alpha |
|
||||||
| controller_clc_snippets | Controller Container Linux Config snippets | [] | [example](/advanced/customization/) |
|
| controller_clc_snippets | Controller Container Linux Config snippets | [] | [example](/advanced/customization/) |
|
||||||
| worker_clc_snippets | Worker Container Linux Config snippets | [] | [example](/advanced/customization/) |
|
| worker_clc_snippets | Worker Container Linux Config snippets | [] | [example](/advanced/customization/) |
|
||||||
| networking | Choice of networking provider | "flannel" | "flannel" or "calico" (experimental) |
|
| networking | Choice of networking provider | "calico" | "flannel" or "calico" |
|
||||||
| pod_cidr | CIDR IPv4 range to assign to Kubernetes pods | "10.2.0.0/16" | "10.22.0.0/16" |
|
| pod_cidr | CIDR IPv4 range to assign to Kubernetes pods | "10.2.0.0/16" | "10.22.0.0/16" |
|
||||||
| service_cidr | CIDR IPv4 range to assign to Kubernetes services | "10.3.0.0/16" | "10.3.0.0/24" |
|
| service_cidr | CIDR IPv4 range to assign to Kubernetes services | "10.3.0.0/16" | "10.3.0.0/24" |
|
||||||
| cluster_domain_suffix | FQDN suffix for Kubernetes services answered by coredns. | "cluster.local" | "k8s.example.com" |
|
|
||||||
|
|
||||||
Check the list of valid [droplet types](https://developers.digitalocean.com/documentation/changelog/api-v2/new-size-slugs-for-droplet-plan-changes/) or use `doctl compute size list`.
|
Check the list of valid [droplet types](https://developers.digitalocean.com/documentation/changelog/api-v2/new-size-slugs-for-droplet-plan-changes/) or use `doctl compute size list`.
|
||||||
|
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
# Google Cloud
|
# Google Cloud
|
||||||
|
|
||||||
In this tutorial, we'll create a Kubernetes v1.16.0 cluster on Google Compute Engine with Container Linux.
|
In this tutorial, we'll create a Kubernetes v1.16.2 cluster on Google Compute Engine with Container Linux.
|
||||||
|
|
||||||
We'll declare a Kubernetes cluster using the Typhoon Terraform module. Then apply the changes to create a network, firewall rules, health checks, controller instances, worker managed instance group, load balancers, and TLS assets.
|
We'll declare a Kubernetes cluster using the Typhoon Terraform module. Then apply the changes to create a network, firewall rules, health checks, controller instances, worker managed instance group, load balancers, and TLS assets.
|
||||||
|
|
||||||
@ -18,7 +18,7 @@ Install [Terraform](https://www.terraform.io/downloads.html) v0.12.x on your sys
|
|||||||
|
|
||||||
```sh
|
```sh
|
||||||
$ terraform version
|
$ terraform version
|
||||||
Terraform v0.12.7
|
Terraform v0.12.9
|
||||||
```
|
```
|
||||||
|
|
||||||
Add the [terraform-provider-ct](https://github.com/poseidon/terraform-provider-ct) plugin binary for your system to `~/.terraform.d/plugins/`, noting the final name.
|
Add the [terraform-provider-ct](https://github.com/poseidon/terraform-provider-ct) plugin binary for your system to `~/.terraform.d/plugins/`, noting the final name.
|
||||||
@ -49,10 +49,10 @@ Configure the Google Cloud provider to use your service account key, project-id,
|
|||||||
|
|
||||||
```tf
|
```tf
|
||||||
provider "google" {
|
provider "google" {
|
||||||
version = "2.15.0"
|
version = "2.16.0"
|
||||||
project = "project-id"
|
project = "project-id"
|
||||||
region = "us-central1"
|
region = "us-central1"
|
||||||
credentials = "${file("~/.config/google-cloud/terraform.json")}"
|
credentials = file("~/.config/google-cloud/terraform.json")
|
||||||
}
|
}
|
||||||
|
|
||||||
provider "ct" {
|
provider "ct" {
|
||||||
@ -71,7 +71,7 @@ Define a Kubernetes cluster using the module `google-cloud/container-linux/kuber
|
|||||||
|
|
||||||
```tf
|
```tf
|
||||||
module "google-cloud-yavin" {
|
module "google-cloud-yavin" {
|
||||||
source = "git::https://github.com/poseidon/typhoon//google-cloud/container-linux/kubernetes?ref=v1.16.0"
|
source = "git::https://github.com/poseidon/typhoon//google-cloud/container-linux/kubernetes?ref=v1.16.2"
|
||||||
|
|
||||||
# Google Cloud
|
# Google Cloud
|
||||||
cluster_name = "yavin"
|
cluster_name = "yavin"
|
||||||
@ -137,9 +137,9 @@ In 4-8 minutes, the Kubernetes cluster will be ready.
|
|||||||
$ export KUBECONFIG=/home/user/.secrets/clusters/yavin/auth/kubeconfig
|
$ export KUBECONFIG=/home/user/.secrets/clusters/yavin/auth/kubeconfig
|
||||||
$ kubectl get nodes
|
$ kubectl get nodes
|
||||||
NAME ROLES STATUS AGE VERSION
|
NAME ROLES STATUS AGE VERSION
|
||||||
yavin-controller-0.c.example-com.internal <none> Ready 6m v1.16.0
|
yavin-controller-0.c.example-com.internal <none> Ready 6m v1.16.2
|
||||||
yavin-worker-jrbf.c.example-com.internal <none> Ready 5m v1.16.0
|
yavin-worker-jrbf.c.example-com.internal <none> Ready 5m v1.16.2
|
||||||
yavin-worker-mzdm.c.example-com.internal <none> Ready 5m v1.16.0
|
yavin-worker-mzdm.c.example-com.internal <none> Ready 5m v1.16.2
|
||||||
```
|
```
|
||||||
|
|
||||||
List the pods.
|
List the pods.
|
||||||
@ -180,7 +180,7 @@ Check the [variables.tf](https://github.com/poseidon/typhoon/blob/master/google-
|
|||||||
| dns_zone | Google Cloud DNS zone | "google-cloud.example.com" |
|
| dns_zone | Google Cloud DNS zone | "google-cloud.example.com" |
|
||||||
| dns_zone_name | Google Cloud DNS zone name | "example-zone" |
|
| dns_zone_name | Google Cloud DNS zone name | "example-zone" |
|
||||||
| ssh_authorized_key | SSH public key for user 'core' | "ssh-rsa AAAAB3NZ..." |
|
| ssh_authorized_key | SSH public key for user 'core' | "ssh-rsa AAAAB3NZ..." |
|
||||||
| asset_dir | Path to a directory where generated assets should be placed (contains secrets) | "/home/user/.secrets/clusters/yavin" |
|
| asset_dir | Absolute path to a directory where generated assets should be placed (contains secrets) | "/home/user/.secrets/clusters/yavin" |
|
||||||
|
|
||||||
Check the list of valid [regions](https://cloud.google.com/compute/docs/regions-zones/regions-zones) and list Container Linux [images](https://cloud.google.com/compute/docs/images) with `gcloud compute images list | grep coreos`.
|
Check the list of valid [regions](https://cloud.google.com/compute/docs/regions-zones/regions-zones) and list Container Linux [images](https://cloud.google.com/compute/docs/images) with `gcloud compute images list | grep coreos`.
|
||||||
|
|
||||||
@ -217,7 +217,7 @@ resource "google_dns_managed_zone" "zone-for-clusters" {
|
|||||||
| networking | Choice of networking provider | "calico" | "calico" or "flannel" |
|
| networking | Choice of networking provider | "calico" | "calico" or "flannel" |
|
||||||
| pod_cidr | CIDR IPv4 range to assign to Kubernetes pods | "10.2.0.0/16" | "10.22.0.0/16" |
|
| pod_cidr | CIDR IPv4 range to assign to Kubernetes pods | "10.2.0.0/16" | "10.22.0.0/16" |
|
||||||
| service_cidr | CIDR IPv4 range to assign to Kubernetes services | "10.3.0.0/16" | "10.3.0.0/24" |
|
| service_cidr | CIDR IPv4 range to assign to Kubernetes services | "10.3.0.0/16" | "10.3.0.0/24" |
|
||||||
| cluster_domain_suffix | FQDN suffix for Kubernetes services answered by coredns. | "cluster.local" | "k8s.example.com" |
|
| worker_node_labels | List of initial worker node labels | [] | ["worker-pool=default"] |
|
||||||
|
|
||||||
Check the list of valid [machine types](https://cloud.google.com/compute/docs/machine-types).
|
Check the list of valid [machine types](https://cloud.google.com/compute/docs/machine-types).
|
||||||
|
|
||||||
|
@ -3,7 +3,7 @@
|
|||||||
!!! danger
|
!!! danger
|
||||||
Typhoon for Fedora CoreOS is an early preview! Fedora CoreOS itself is a preview! Expect bugs and design shifts. Please help both projects solve problems. Report Fedora CoreOS bugs to [Fedora](https://github.com/coreos/fedora-coreos-tracker/issues). Report Typhoon issues to Typhoon.
|
Typhoon for Fedora CoreOS is an early preview! Fedora CoreOS itself is a preview! Expect bugs and design shifts. Please help both projects solve problems. Report Fedora CoreOS bugs to [Fedora](https://github.com/coreos/fedora-coreos-tracker/issues). Report Typhoon issues to Typhoon.
|
||||||
|
|
||||||
In this tutorial, we'll create a Kubernetes v1.16.0 cluster on AWS with Fedora CoreOS.
|
In this tutorial, we'll create a Kubernetes v1.16.2 cluster on AWS with Fedora CoreOS.
|
||||||
|
|
||||||
We'll declare a Kubernetes cluster using the Typhoon Terraform module. Then apply the changes to create a VPC, gateway, subnets, security groups, controller instances, worker auto-scaling group, network load balancer, and TLS assets.
|
We'll declare a Kubernetes cluster using the Typhoon Terraform module. Then apply the changes to create a VPC, gateway, subnets, security groups, controller instances, worker auto-scaling group, network load balancer, and TLS assets.
|
||||||
|
|
||||||
@ -21,7 +21,7 @@ Install [Terraform](https://www.terraform.io/downloads.html) v0.12.x on your sys
|
|||||||
|
|
||||||
```sh
|
```sh
|
||||||
$ terraform version
|
$ terraform version
|
||||||
Terraform v0.12.7
|
Terraform v0.12.9
|
||||||
```
|
```
|
||||||
|
|
||||||
Add the [terraform-provider-ct](https://github.com/poseidon/terraform-provider-ct) plugin binary for your system to `~/.terraform.d/plugins/`, noting the final name.
|
Add the [terraform-provider-ct](https://github.com/poseidon/terraform-provider-ct) plugin binary for your system to `~/.terraform.d/plugins/`, noting the final name.
|
||||||
@ -52,8 +52,8 @@ Configure the AWS provider to use your access key credentials in a `providers.tf
|
|||||||
|
|
||||||
```tf
|
```tf
|
||||||
provider "aws" {
|
provider "aws" {
|
||||||
version = "2.29.0"
|
version = "2.31.0"
|
||||||
region = "us-east-1" # MUST be us-east-1 right now!
|
region = "eu-central-1"
|
||||||
shared_credentials_file = "/home/user/.config/aws/credentials"
|
shared_credentials_file = "/home/user/.config/aws/credentials"
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -73,7 +73,7 @@ Define a Kubernetes cluster using the module `aws/fedora-coreos/kubernetes`.
|
|||||||
|
|
||||||
```tf
|
```tf
|
||||||
module "aws-tempest" {
|
module "aws-tempest" {
|
||||||
source = "git::https://github.com/poseidon/typhoon//aws/fedora-coreos/kubernetes?ref=DEVELOPMENT_SHA"
|
source = "git::https://github.com/poseidon/typhoon//aws/fedora-coreos/kubernetes?ref=v1.16.2"
|
||||||
|
|
||||||
# AWS
|
# AWS
|
||||||
cluster_name = "tempest"
|
cluster_name = "tempest"
|
||||||
@ -138,9 +138,9 @@ In 4-8 minutes, the Kubernetes cluster will be ready.
|
|||||||
$ export KUBECONFIG=/home/user/.secrets/clusters/tempest/auth/kubeconfig
|
$ export KUBECONFIG=/home/user/.secrets/clusters/tempest/auth/kubeconfig
|
||||||
$ kubectl get nodes
|
$ kubectl get nodes
|
||||||
NAME STATUS ROLES AGE VERSION
|
NAME STATUS ROLES AGE VERSION
|
||||||
ip-10-0-3-155 Ready <none> 10m v1.16.0
|
ip-10-0-3-155 Ready <none> 10m v1.16.2
|
||||||
ip-10-0-26-65 Ready <none> 10m v1.16.0
|
ip-10-0-26-65 Ready <none> 10m v1.16.2
|
||||||
ip-10-0-41-21 Ready <none> 10m v1.16.0
|
ip-10-0-41-21 Ready <none> 10m v1.16.2
|
||||||
```
|
```
|
||||||
|
|
||||||
List the pods.
|
List the pods.
|
||||||
@ -191,7 +191,7 @@ resource "aws_route53_zone" "zone-for-clusters" {
|
|||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
Reference the DNS zone id with `"${aws_route53_zone.zone-for-clusters.zone_id}"`.
|
Reference the DNS zone id with `aws_route53_zone.zone-for-clusters.zone_id`.
|
||||||
|
|
||||||
!!! tip ""
|
!!! tip ""
|
||||||
If you have an existing domain name with a zone file elsewhere, just delegate a subdomain that can be managed on Route53 (e.g. aws.mydomain.com) and [update nameservers](http://docs.aws.amazon.com/Route53/latest/DeveloperGuide/SOA-NSrecords.html).
|
If you have an existing domain name with a zone file elsewhere, just delegate a subdomain that can be managed on Route53 (e.g. aws.mydomain.com) and [update nameservers](http://docs.aws.amazon.com/Route53/latest/DeveloperGuide/SOA-NSrecords.html).
|
||||||
@ -205,11 +205,11 @@ Reference the DNS zone id with `"${aws_route53_zone.zone-for-clusters.zone_id}"`
|
|||||||
| controller_type | EC2 instance type for controllers | "t3.small" | See below |
|
| controller_type | EC2 instance type for controllers | "t3.small" | See below |
|
||||||
| worker_type | EC2 instance type for workers | "t3.small" | See below |
|
| worker_type | EC2 instance type for workers | "t3.small" | See below |
|
||||||
| os_image | AMI channel for Fedora CoreOS | not yet used | ? |
|
| os_image | AMI channel for Fedora CoreOS | not yet used | ? |
|
||||||
| disk_size | Size of the EBS volume in GB | "40" | "100" |
|
| disk_size | Size of the EBS volume in GB | 40 | 100 |
|
||||||
| disk_type | Type of the EBS volume | "gp2" | standard, gp2, io1 |
|
| disk_type | Type of the EBS volume | "gp2" | standard, gp2, io1 |
|
||||||
| disk_iops | IOPS of the EBS volume | "0" (i.e. auto) | "400" |
|
| disk_iops | IOPS of the EBS volume | 0 (i.e. auto) | 400 |
|
||||||
| worker_target_groups | Target group ARNs to which worker instances should be added | [] | ["${aws_lb_target_group.app.id}"] |
|
| worker_target_groups | Target group ARNs to which worker instances should be added | [] | [aws_lb_target_group.app.id] |
|
||||||
| worker_price | Spot price in USD for workers. Leave as default empty string for regular on-demand instances | "" | "0.10" |
|
| worker_price | Spot price in USD for worker instances or 0 to use on-demand instances | 0 | 0.10 |
|
||||||
| controller_snippets | Controller Fedora CoreOS Config snippets | [] | UNSUPPORTED |
|
| controller_snippets | Controller Fedora CoreOS Config snippets | [] | UNSUPPORTED |
|
||||||
| worker_clc_snippets | Worker Fedora CoreOS Config snippets | [] | UNSUPPORTED |
|
| worker_clc_snippets | Worker Fedora CoreOS Config snippets | [] | UNSUPPORTED |
|
||||||
| networking | Choice of networking provider | "calico" | "calico" or "flannel" |
|
| networking | Choice of networking provider | "calico" | "calico" or "flannel" |
|
||||||
@ -217,7 +217,7 @@ Reference the DNS zone id with `"${aws_route53_zone.zone-for-clusters.zone_id}"`
|
|||||||
| host_cidr | CIDR IPv4 range to assign to EC2 instances | "10.0.0.0/16" | "10.1.0.0/16" |
|
| host_cidr | CIDR IPv4 range to assign to EC2 instances | "10.0.0.0/16" | "10.1.0.0/16" |
|
||||||
| pod_cidr | CIDR IPv4 range to assign to Kubernetes pods | "10.2.0.0/16" | "10.22.0.0/16" |
|
| pod_cidr | CIDR IPv4 range to assign to Kubernetes pods | "10.2.0.0/16" | "10.22.0.0/16" |
|
||||||
| service_cidr | CIDR IPv4 range to assign to Kubernetes services | "10.3.0.0/16" | "10.3.0.0/24" |
|
| service_cidr | CIDR IPv4 range to assign to Kubernetes services | "10.3.0.0/16" | "10.3.0.0/24" |
|
||||||
| cluster_domain_suffix | FQDN suffix for Kubernetes services answered by coredns. | "cluster.local" | "k8s.example.com" |
|
| worker_node_labels | List of initial worker node labels | [] | ["worker-pool=default"] |
|
||||||
|
|
||||||
Check the list of valid [instance types](https://aws.amazon.com/ec2/instance-types/).
|
Check the list of valid [instance types](https://aws.amazon.com/ec2/instance-types/).
|
||||||
|
|
||||||
|
@ -3,7 +3,7 @@
|
|||||||
!!! danger
|
!!! danger
|
||||||
Typhoon for Fedora CoreOS is an early preview! Fedora CoreOS itself is a preview! Expect bugs and design shifts. Please help both projects solve problems. Report Fedora CoreOS bugs to [Fedora](https://github.com/coreos/fedora-coreos-tracker/issues). Report Typhoon issues to Typhoon.
|
Typhoon for Fedora CoreOS is an early preview! Fedora CoreOS itself is a preview! Expect bugs and design shifts. Please help both projects solve problems. Report Fedora CoreOS bugs to [Fedora](https://github.com/coreos/fedora-coreos-tracker/issues). Report Typhoon issues to Typhoon.
|
||||||
|
|
||||||
In this tutorial, we'll network boot and provision a Kubernetes v1.16.0 cluster on bare-metal with Fedora CoreOS.
|
In this tutorial, we'll network boot and provision a Kubernetes v1.16.2 cluster on bare-metal with Fedora CoreOS.
|
||||||
|
|
||||||
First, we'll deploy a [Matchbox](https://github.com/poseidon/matchbox) service and setup a network boot environment. Then, we'll declare a Kubernetes cluster using the Typhoon Terraform module and power on machines. On PXE boot, machines will install Fedora CoreOS to disk, reboot into the disk install, and provision themselves as Kubernetes controllers or workers via Ignition.
|
First, we'll deploy a [Matchbox](https://github.com/poseidon/matchbox) service and setup a network boot environment. Then, we'll declare a Kubernetes cluster using the Typhoon Terraform module and power on machines. On PXE boot, machines will install Fedora CoreOS to disk, reboot into the disk install, and provision themselves as Kubernetes controllers or workers via Ignition.
|
||||||
|
|
||||||
@ -114,7 +114,7 @@ Install [Terraform](https://www.terraform.io/downloads.html) v0.12.x on your sys
|
|||||||
|
|
||||||
```sh
|
```sh
|
||||||
$ terraform version
|
$ terraform version
|
||||||
Terraform v0.12.7
|
Terraform v0.12.9
|
||||||
```
|
```
|
||||||
|
|
||||||
Add the [terraform-provider-matchbox](https://github.com/poseidon/terraform-provider-matchbox) plugin binary for your system to `~/.terraform.d/plugins/`, noting the final name.
|
Add the [terraform-provider-matchbox](https://github.com/poseidon/terraform-provider-matchbox) plugin binary for your system to `~/.terraform.d/plugins/`, noting the final name.
|
||||||
@ -147,9 +147,9 @@ Configure the Matchbox provider to use your Matchbox API endpoint and client cer
|
|||||||
provider "matchbox" {
|
provider "matchbox" {
|
||||||
version = "0.3.0"
|
version = "0.3.0"
|
||||||
endpoint = "matchbox.example.com:8081"
|
endpoint = "matchbox.example.com:8081"
|
||||||
client_cert = "${file("~/.config/matchbox/client.crt")}"
|
client_cert = file("~/.config/matchbox/client.crt")
|
||||||
client_key = "${file("~/.config/matchbox/client.key")}"
|
client_key = file("~/.config/matchbox/client.key")
|
||||||
ca = "${file("~/.config/matchbox/ca.crt")}"
|
ca = file("~/.config/matchbox/ca.crt")
|
||||||
}
|
}
|
||||||
|
|
||||||
provider "ct" {
|
provider "ct" {
|
||||||
@ -163,14 +163,14 @@ Define a Kubernetes cluster using the module `bare-metal/fedora-coreos/kubernete
|
|||||||
|
|
||||||
```tf
|
```tf
|
||||||
module "bare-metal-mercury" {
|
module "bare-metal-mercury" {
|
||||||
source = "git::https://github.com/poseidon/typhoon//bare-metal/fedora-coreos/kubernetes?ref=DEVELOPMENT_SHA"
|
source = "git::https://github.com/poseidon/typhoon//bare-metal/fedora-coreos/kubernetes?ref=v1.16.2"
|
||||||
|
|
||||||
# bare-metal
|
# bare-metal
|
||||||
cluster_name = "mercury"
|
cluster_name = "mercury"
|
||||||
matchbox_http_endpoint = "http://matchbox.example.com"
|
matchbox_http_endpoint = "http://matchbox.example.com"
|
||||||
os_stream = "testing"
|
os_stream = "testing"
|
||||||
os_version = "30.20190801.0"
|
os_version = "30.20191002.0"
|
||||||
cached_install = "true"
|
cached_install = true
|
||||||
|
|
||||||
# configuration
|
# configuration
|
||||||
k8s_domain_name = "node1.example.com"
|
k8s_domain_name = "node1.example.com"
|
||||||
@ -178,20 +178,22 @@ module "bare-metal-mercury" {
|
|||||||
asset_dir = "/home/user/.secrets/clusters/mercury"
|
asset_dir = "/home/user/.secrets/clusters/mercury"
|
||||||
|
|
||||||
# machines
|
# machines
|
||||||
controller_names = ["node1"]
|
controllers = [{
|
||||||
controller_macs = ["52:54:00:a1:9c:ae"]
|
name = "node1"
|
||||||
controller_domains = ["node1.example.com"]
|
mac = "52:54:00:a1:9c:ae"
|
||||||
worker_names = [
|
domain = "node1.example.com"
|
||||||
"node2",
|
}]
|
||||||
"node3",
|
workers = [
|
||||||
]
|
{
|
||||||
worker_macs = [
|
name = "node2",
|
||||||
"52:54:00:b2:2f:86",
|
mac = "52:54:00:b2:2f:86"
|
||||||
"52:54:00:c3:61:77",
|
domain = "node2.example.com"
|
||||||
]
|
},
|
||||||
worker_domains = [
|
{
|
||||||
"node2.example.com",
|
name = "node3",
|
||||||
"node3.example.com",
|
mac = "52:54:00:c3:61:77"
|
||||||
|
domain = "node3.example.com"
|
||||||
|
}
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
@ -283,9 +285,9 @@ systemd[1]: Started Kubernetes control plane.
|
|||||||
$ export KUBECONFIG=/home/user/.secrets/clusters/mercury/auth/kubeconfig
|
$ export KUBECONFIG=/home/user/.secrets/clusters/mercury/auth/kubeconfig
|
||||||
$ kubectl get nodes
|
$ kubectl get nodes
|
||||||
NAME STATUS ROLES AGE VERSION
|
NAME STATUS ROLES AGE VERSION
|
||||||
node1.example.com Ready <none> 10m v1.16.0
|
node1.example.com Ready <none> 10m v1.16.2
|
||||||
node2.example.com Ready <none> 10m v1.16.0
|
node2.example.com Ready <none> 10m v1.16.2
|
||||||
node3.example.com Ready <none> 10m v1.16.0
|
node3.example.com Ready <none> 10m v1.16.2
|
||||||
```
|
```
|
||||||
|
|
||||||
List the pods.
|
List the pods.
|
||||||
@ -318,19 +320,15 @@ Check the [variables.tf](https://github.com/poseidon/typhoon/blob/master/bare-me
|
|||||||
|
|
||||||
| Name | Description | Example |
|
| Name | Description | Example |
|
||||||
|:-----|:------------|:--------|
|
|:-----|:------------|:--------|
|
||||||
| cluster_name | Unique cluster name | mercury |
|
| cluster_name | Unique cluster name | "mercury" |
|
||||||
| matchbox_http_endpoint | Matchbox HTTP read-only endpoint | http://matchbox.example.com:port |
|
| matchbox_http_endpoint | Matchbox HTTP read-only endpoint | "http://matchbox.example.com:port" |
|
||||||
| os_stream | Fedora CoreOS release stream | testing |
|
| os_stream | Fedora CoreOS release stream | "testing" |
|
||||||
| os_version | Fedora CoreOS version to PXE and install | 30.20190716.1 |
|
| os_version | Fedora CoreOS version to PXE and install | "30.20190716.1" |
|
||||||
| k8s_domain_name | FQDN resolving to the controller(s) nodes. Workers and kubectl will communicate with this endpoint | "myk8s.example.com" |
|
| k8s_domain_name | FQDN resolving to the controller(s) nodes. Workers and kubectl will communicate with this endpoint | "myk8s.example.com" |
|
||||||
| ssh_authorized_key | SSH public key for user 'core' | "ssh-rsa AAAAB3Nz..." |
|
| ssh_authorized_key | SSH public key for user 'core' | "ssh-rsa AAAAB3Nz..." |
|
||||||
| asset_dir | Absolute path to a directory where generated assets should be placed (contains secrets) | "/home/user/.secrets/clusters/mercury" |
|
| asset_dir | Absolute path to a directory where generated assets should be placed (contains secrets) | "/home/user/.secrets/clusters/mercury" |
|
||||||
| controller_names | Ordered list of controller short names | ["node1"] |
|
| controllers | List of controller machine detail objects (unique name, identifying MAC address, FQDN) | `[{name="node1", mac="52:54:00:a1:9c:ae", domain="node1.example.com"}]` |
|
||||||
| controller_macs | Ordered list of controller identifying MAC addresses | ["52:54:00:a1:9c:ae"] |
|
| workers | List of worker machine detail objects (unique name, identifying MAC address, FQDN) | `[{name="node2", mac="52:54:00:b2:2f:86", domain="node2.example.com"}, {name="node3", mac="52:54:00:c3:61:77", domain="node3.example.com"}]` |
|
||||||
| controller_domains | Ordered list of controller FQDNs | ["node1.example.com"] |
|
|
||||||
| worker_names | Ordered list of worker short names | ["node2", "node3"] |
|
|
||||||
| worker_macs | Ordered list of worker identifying MAC addresses | ["52:54:00:b2:2f:86", "52:54:00:c3:61:77"] |
|
|
||||||
| worker_domains | Ordered list of worker FQDNs | ["node2.example.com", "node3.example.com"] |
|
|
||||||
|
|
||||||
### Optional
|
### Optional
|
||||||
|
|
||||||
@ -341,9 +339,8 @@ Check the [variables.tf](https://github.com/poseidon/typhoon/blob/master/bare-me
|
|||||||
| networking | Choice of networking provider | "calico" | "calico" or "flannel" |
|
| networking | Choice of networking provider | "calico" | "calico" or "flannel" |
|
||||||
| network_mtu | CNI interface MTU (calico-only) | 1480 | - |
|
| network_mtu | CNI interface MTU (calico-only) | 1480 | - |
|
||||||
| snippets | Map from machine names to lists of Fedora CoreOS Config snippets | {} | UNSUPPORTED |
|
| snippets | Map from machine names to lists of Fedora CoreOS Config snippets | {} | UNSUPPORTED |
|
||||||
| network_ip_autodetection_method | Method to detect host IPv4 address (calico-only) | first-found | can-reach=10.0.0.1 |
|
| network_ip_autodetection_method | Method to detect host IPv4 address (calico-only) | "first-found" | "can-reach=10.0.0.1" |
|
||||||
| pod_cidr | CIDR IPv4 range to assign to Kubernetes pods | "10.2.0.0/16" | "10.22.0.0/16" |
|
| pod_cidr | CIDR IPv4 range to assign to Kubernetes pods | "10.2.0.0/16" | "10.22.0.0/16" |
|
||||||
| service_cidr | CIDR IPv4 range to assign to Kubernetes services | "10.3.0.0/16" | "10.3.0.0/24" |
|
| service_cidr | CIDR IPv4 range to assign to Kubernetes services | "10.3.0.0/16" | "10.3.0.0/24" |
|
||||||
| cluster_domain_suffix | FQDN suffix for Kubernetes services answered by coredns. | "cluster.local" | "k8s.example.com" |
|
| kernel_args | Additional kernel args to provide at PXE boot | [] | ["kvm-intel.nested=1"] |
|
||||||
| kernel_args | Additional kernel args to provide at PXE boot | [] | "kvm-intel.nested=1" |
|
|
||||||
|
|
||||||
|
@ -11,7 +11,7 @@ Typhoon distributes upstream Kubernetes, architectural conventions, and cluster
|
|||||||
|
|
||||||
## Features <a href="https://www.cncf.io/certification/software-conformance/"><img align="right" src="https://storage.googleapis.com/poseidon/certified-kubernetes.png"></a>
|
## Features <a href="https://www.cncf.io/certification/software-conformance/"><img align="right" src="https://storage.googleapis.com/poseidon/certified-kubernetes.png"></a>
|
||||||
|
|
||||||
* Kubernetes v1.16.0 (upstream)
|
* Kubernetes v1.16.2 (upstream)
|
||||||
* Single or multi-master, [Calico](https://www.projectcalico.org/) or [flannel](https://github.com/coreos/flannel) networking
|
* Single or multi-master, [Calico](https://www.projectcalico.org/) or [flannel](https://github.com/coreos/flannel) networking
|
||||||
* On-cluster etcd with TLS, [RBAC](https://kubernetes.io/docs/admin/authorization/rbac/)-enabled, [network policy](https://kubernetes.io/docs/concepts/services-networking/network-policies/)
|
* On-cluster etcd with TLS, [RBAC](https://kubernetes.io/docs/admin/authorization/rbac/)-enabled, [network policy](https://kubernetes.io/docs/concepts/services-networking/network-policies/)
|
||||||
* Advanced features like [worker pools](advanced/worker-pools/), [preemptible](cl/google-cloud/#preemption) workers, and [snippets](advanced/customization/#container-linux) customization
|
* Advanced features like [worker pools](advanced/worker-pools/), [preemptible](cl/google-cloud/#preemption) workers, and [snippets](advanced/customization/#container-linux) customization
|
||||||
@ -47,7 +47,7 @@ Define a Kubernetes cluster by using the Terraform module for your chosen platfo
|
|||||||
|
|
||||||
```tf
|
```tf
|
||||||
module "google-cloud-yavin" {
|
module "google-cloud-yavin" {
|
||||||
source = "git::https://github.com/poseidon/typhoon//google-cloud/container-linux/kubernetes?ref=v1.16.0"
|
source = "git::https://github.com/poseidon/typhoon//google-cloud/container-linux/kubernetes?ref=v1.16.2"
|
||||||
|
|
||||||
# Google Cloud
|
# Google Cloud
|
||||||
cluster_name = "yavin"
|
cluster_name = "yavin"
|
||||||
@ -80,9 +80,9 @@ In 4-8 minutes (varies by platform), the cluster will be ready. This Google Clou
|
|||||||
$ export KUBECONFIG=/home/user/.secrets/clusters/yavin/auth/kubeconfig
|
$ export KUBECONFIG=/home/user/.secrets/clusters/yavin/auth/kubeconfig
|
||||||
$ kubectl get nodes
|
$ kubectl get nodes
|
||||||
NAME ROLES STATUS AGE VERSION
|
NAME ROLES STATUS AGE VERSION
|
||||||
yavin-controller-0.c.example-com.internal <none> Ready 6m v1.16.0
|
yavin-controller-0.c.example-com.internal <none> Ready 6m v1.16.2
|
||||||
yavin-worker-jrbf.c.example-com.internal <none> Ready 5m v1.16.0
|
yavin-worker-jrbf.c.example-com.internal <none> Ready 5m v1.16.2
|
||||||
yavin-worker-mzdm.c.example-com.internal <none> Ready 5m v1.16.0
|
yavin-worker-mzdm.c.example-com.internal <none> Ready 5m v1.16.2
|
||||||
```
|
```
|
||||||
|
|
||||||
List the pods.
|
List the pods.
|
||||||
|
@ -18,7 +18,7 @@ module "google-cloud-yavin" {
|
|||||||
}
|
}
|
||||||
|
|
||||||
module "bare-metal-mercury" {
|
module "bare-metal-mercury" {
|
||||||
source = "git::https://github.com/poseidon/typhoon//bare-metal/container-linux/kubernetes?ref=v1.16.0"
|
source = "git::https://github.com/poseidon/typhoon//bare-metal/container-linux/kubernetes?ref=v1.16.2"
|
||||||
...
|
...
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
@ -279,15 +279,15 @@ Typhoon modules have been adapted for Terraform v0.12. Provider plugins requirem
|
|||||||
|
|
||||||
| Typhoon Release | Terraform version |
|
| Typhoon Release | Terraform version |
|
||||||
|-------------------|---------------------|
|
|-------------------|---------------------|
|
||||||
| v1.16.0 - ? | v0.12.x |
|
| v1.16.2 - ? | v0.12.x |
|
||||||
| v1.10.3 - v1.16.0 | v0.11.x |
|
| v1.10.3 - v1.16.2 | v0.11.x |
|
||||||
| v1.9.2 - v1.10.2 | v0.10.4+ or v0.11.x |
|
| v1.9.2 - v1.10.2 | v0.10.4+ or v0.11.x |
|
||||||
| v1.7.3 - v1.9.1 | v0.10.x |
|
| v1.7.3 - v1.9.1 | v0.10.x |
|
||||||
| v1.6.4 - v1.7.2 | v0.9.x |
|
| v1.6.4 - v1.7.2 | v0.9.x |
|
||||||
|
|
||||||
### New users
|
### New users
|
||||||
|
|
||||||
New users can start with Terraform v0.12.x and follow the docs for Typhoon v1.16.0+ without issue.
|
New users can start with Terraform v0.12.x and follow the docs for Typhoon v1.16.2+ without issue.
|
||||||
|
|
||||||
### Existing users
|
### Existing users
|
||||||
|
|
||||||
@ -404,7 +404,7 @@ tree .
|
|||||||
└── infraB <- new Terraform v0.12.x configs
|
└── infraB <- new Terraform v0.12.x configs
|
||||||
```
|
```
|
||||||
|
|
||||||
Define Typhoon clusters in the new config directory using Terraform v0.12 syntax. Follow the Typhoon v1.16.0+ docs (e.g. use `terraform12` in the `infraB` dir). See [AWS](/cl/aws), [Azure](/cl/azure), [Bare-Metal](/cl/bare-metal), [Digital Ocean](/cl/digital-ocean), or [Google-Cloud](/cl/google-cloud)) to create new clusters. Follow the usual [upgrade](/topics/maintenance/#upgrades) process to apply workloads and shift traffic. Later, switch back to the old config directory and deprovision clusters with Terraform v0.11.
|
Define Typhoon clusters in the new config directory using Terraform v0.12 syntax. Follow the Typhoon v1.16.2+ docs (e.g. use `terraform12` in the `infraB` dir). See [AWS](/cl/aws), [Azure](/cl/azure), [Bare-Metal](/cl/bare-metal), [Digital Ocean](/cl/digital-ocean), or [Google-Cloud](/cl/google-cloud)) to create new clusters. Follow the usual [upgrade](/topics/maintenance/#upgrades) process to apply workloads and shift traffic. Later, switch back to the old config directory and deprovision clusters with Terraform v0.11.
|
||||||
|
|
||||||
```shell
|
```shell
|
||||||
terraform12 init
|
terraform12 init
|
||||||
|
@ -12,7 +12,7 @@ Typhoon aims to be minimal and secure. We're running it ourselves after all.
|
|||||||
* Workloads run on worker nodes only, unless they tolerate the master taint
|
* Workloads run on worker nodes only, unless they tolerate the master taint
|
||||||
* Kubernetes [Network Policy](https://kubernetes.io/docs/concepts/services-networking/network-policies/) and Calico [NetworkPolicy](https://docs.projectcalico.org/latest/reference/calicoctl/resources/networkpolicy) support [^1]
|
* Kubernetes [Network Policy](https://kubernetes.io/docs/concepts/services-networking/network-policies/) and Calico [NetworkPolicy](https://docs.projectcalico.org/latest/reference/calicoctl/resources/networkpolicy) support [^1]
|
||||||
|
|
||||||
[^1]: Requires `networking = "calico"`. Calico is the default on AWS, bare-metal, and Google Cloud. Azure and Digital Ocean are limited to `networking = "flannel"`.
|
[^1]: Requires `networking = "calico"`. Calico is the default on all platforms (AWS, Azure, bare-metal, DigitalOcean, and Google Cloud).
|
||||||
|
|
||||||
**Hosts**
|
**Hosts**
|
||||||
|
|
||||||
|
@ -11,7 +11,7 @@ Typhoon distributes upstream Kubernetes, architectural conventions, and cluster
|
|||||||
|
|
||||||
## Features <a href="https://www.cncf.io/certification/software-conformance/"><img align="right" src="https://storage.googleapis.com/poseidon/certified-kubernetes.png"></a>
|
## Features <a href="https://www.cncf.io/certification/software-conformance/"><img align="right" src="https://storage.googleapis.com/poseidon/certified-kubernetes.png"></a>
|
||||||
|
|
||||||
* Kubernetes v1.16.0 (upstream)
|
* Kubernetes v1.16.2 (upstream)
|
||||||
* Single or multi-master, [Calico](https://www.projectcalico.org/) or [flannel](https://github.com/coreos/flannel) networking
|
* Single or multi-master, [Calico](https://www.projectcalico.org/) or [flannel](https://github.com/coreos/flannel) networking
|
||||||
* On-cluster etcd with TLS, [RBAC](https://kubernetes.io/docs/admin/authorization/rbac/)-enabled, [network policy](https://kubernetes.io/docs/concepts/services-networking/network-policies/)
|
* On-cluster etcd with TLS, [RBAC](https://kubernetes.io/docs/admin/authorization/rbac/)-enabled, [network policy](https://kubernetes.io/docs/concepts/services-networking/network-policies/)
|
||||||
* Advanced features like [worker pools](https://typhoon.psdn.io/advanced/worker-pools/), [preemptible](https://typhoon.psdn.io/cl/google-cloud/#preemption) workers, and [snippets](https://typhoon.psdn.io/advanced/customization/#container-linux) customization
|
* Advanced features like [worker pools](https://typhoon.psdn.io/advanced/worker-pools/), [preemptible](https://typhoon.psdn.io/cl/google-cloud/#preemption) workers, and [snippets](https://typhoon.psdn.io/advanced/customization/#container-linux) customization
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
# Kubernetes assets (kubeconfig, manifests)
|
# Kubernetes assets (kubeconfig, manifests)
|
||||||
module "bootstrap" {
|
module "bootstrap" {
|
||||||
source = "git::https://github.com/poseidon/terraform-render-bootstrap.git?ref=539b725093c8cd94ba46603adb25ac5280562ec8"
|
source = "git::https://github.com/poseidon/terraform-render-bootstrap.git?ref=0fcc067476fa1463d057fd43760df222b7262b27"
|
||||||
|
|
||||||
cluster_name = var.cluster_name
|
cluster_name = var.cluster_name
|
||||||
api_servers = [format("%s.%s", var.cluster_name, var.dns_zone)]
|
api_servers = [format("%s.%s", var.cluster_name, var.dns_zone)]
|
@ -7,7 +7,7 @@ systemd:
|
|||||||
- name: 40-etcd-cluster.conf
|
- name: 40-etcd-cluster.conf
|
||||||
contents: |
|
contents: |
|
||||||
[Service]
|
[Service]
|
||||||
Environment="ETCD_IMAGE_TAG=v3.4.0"
|
Environment="ETCD_IMAGE_TAG=v3.4.2"
|
||||||
Environment="ETCD_NAME=${etcd_name}"
|
Environment="ETCD_NAME=${etcd_name}"
|
||||||
Environment="ETCD_ADVERTISE_CLIENT_URLS=https://${etcd_domain}:2379"
|
Environment="ETCD_ADVERTISE_CLIENT_URLS=https://${etcd_domain}:2379"
|
||||||
Environment="ETCD_INITIAL_ADVERTISE_PEER_URLS=https://${etcd_domain}:2380"
|
Environment="ETCD_INITIAL_ADVERTISE_PEER_URLS=https://${etcd_domain}:2380"
|
||||||
@ -112,7 +112,7 @@ systemd:
|
|||||||
--volume script,kind=host,source=/opt/bootstrap/apply \
|
--volume script,kind=host,source=/opt/bootstrap/apply \
|
||||||
--mount volume=script,target=/apply \
|
--mount volume=script,target=/apply \
|
||||||
--insecure-options=image \
|
--insecure-options=image \
|
||||||
docker://k8s.gcr.io/hyperkube:v1.16.0 \
|
docker://k8s.gcr.io/hyperkube:v1.16.2 \
|
||||||
--net=host \
|
--net=host \
|
||||||
--dns=host \
|
--dns=host \
|
||||||
--exec=/apply
|
--exec=/apply
|
||||||
@ -133,7 +133,7 @@ storage:
|
|||||||
contents:
|
contents:
|
||||||
inline: |
|
inline: |
|
||||||
KUBELET_IMAGE_URL=docker://k8s.gcr.io/hyperkube
|
KUBELET_IMAGE_URL=docker://k8s.gcr.io/hyperkube
|
||||||
KUBELET_IMAGE_TAG=v1.16.0
|
KUBELET_IMAGE_TAG=v1.16.2
|
||||||
- path: /opt/bootstrap/apply
|
- path: /opt/bootstrap/apply
|
||||||
filesystem: root
|
filesystem: root
|
||||||
mode: 0544
|
mode: 0544
|
||||||
|
@ -23,45 +23,45 @@ variable "dns_zone_name" {
|
|||||||
# instances
|
# instances
|
||||||
|
|
||||||
variable "controller_count" {
|
variable "controller_count" {
|
||||||
type = string
|
type = number
|
||||||
default = "1"
|
|
||||||
description = "Number of controllers (i.e. masters)"
|
description = "Number of controllers (i.e. masters)"
|
||||||
|
default = 1
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "worker_count" {
|
variable "worker_count" {
|
||||||
type = string
|
type = number
|
||||||
default = "1"
|
|
||||||
description = "Number of workers"
|
description = "Number of workers"
|
||||||
|
default = 1
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "controller_type" {
|
variable "controller_type" {
|
||||||
type = string
|
type = string
|
||||||
default = "n1-standard-1"
|
|
||||||
description = "Machine type for controllers (see `gcloud compute machine-types list`)"
|
description = "Machine type for controllers (see `gcloud compute machine-types list`)"
|
||||||
|
default = "n1-standard-1"
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "worker_type" {
|
variable "worker_type" {
|
||||||
type = string
|
type = string
|
||||||
default = "n1-standard-1"
|
|
||||||
description = "Machine type for controllers (see `gcloud compute machine-types list`)"
|
description = "Machine type for controllers (see `gcloud compute machine-types list`)"
|
||||||
|
default = "n1-standard-1"
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "os_image" {
|
variable "os_image" {
|
||||||
type = string
|
type = string
|
||||||
default = "coreos-stable"
|
|
||||||
description = "Container Linux image for compute instances (e.g. coreos-stable)"
|
description = "Container Linux image for compute instances (e.g. coreos-stable)"
|
||||||
|
default = "coreos-stable"
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "disk_size" {
|
variable "disk_size" {
|
||||||
type = string
|
type = number
|
||||||
default = "40"
|
|
||||||
description = "Size of the disk in GB"
|
description = "Size of the disk in GB"
|
||||||
|
default = 40
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "worker_preemptible" {
|
variable "worker_preemptible" {
|
||||||
type = string
|
type = bool
|
||||||
default = "false"
|
|
||||||
description = "If enabled, Compute Engine will terminate workers randomly within 24 hours"
|
description = "If enabled, Compute Engine will terminate workers randomly within 24 hours"
|
||||||
|
default = false
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "controller_clc_snippets" {
|
variable "controller_clc_snippets" {
|
||||||
@ -84,48 +84,55 @@ variable "ssh_authorized_key" {
|
|||||||
}
|
}
|
||||||
|
|
||||||
variable "asset_dir" {
|
variable "asset_dir" {
|
||||||
description = "Path to a directory where generated assets should be placed (contains secrets)"
|
|
||||||
type = string
|
type = string
|
||||||
|
description = "Absolute path to a directory where generated assets should be placed (contains secrets)"
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "networking" {
|
variable "networking" {
|
||||||
description = "Choice of networking provider (flannel or calico)"
|
|
||||||
type = string
|
type = string
|
||||||
|
description = "Choice of networking provider (flannel or calico)"
|
||||||
default = "calico"
|
default = "calico"
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "pod_cidr" {
|
variable "pod_cidr" {
|
||||||
description = "CIDR IPv4 range to assign Kubernetes pods"
|
|
||||||
type = string
|
type = string
|
||||||
|
description = "CIDR IPv4 range to assign Kubernetes pods"
|
||||||
default = "10.2.0.0/16"
|
default = "10.2.0.0/16"
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "service_cidr" {
|
variable "service_cidr" {
|
||||||
|
type = string
|
||||||
description = <<EOD
|
description = <<EOD
|
||||||
CIDR IPv4 range to assign Kubernetes services.
|
CIDR IPv4 range to assign Kubernetes services.
|
||||||
The 1st IP will be reserved for kube_apiserver, the 10th IP will be reserved for coredns.
|
The 1st IP will be reserved for kube_apiserver, the 10th IP will be reserved for coredns.
|
||||||
EOD
|
EOD
|
||||||
|
|
||||||
|
|
||||||
type = string
|
|
||||||
default = "10.3.0.0/16"
|
default = "10.3.0.0/16"
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "cluster_domain_suffix" {
|
|
||||||
description = "Queries for domains with the suffix will be answered by coredns. Default is cluster.local (e.g. foo.default.svc.cluster.local) "
|
|
||||||
type = string
|
|
||||||
default = "cluster.local"
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "enable_reporting" {
|
variable "enable_reporting" {
|
||||||
type = string
|
type = bool
|
||||||
description = "Enable usage or analytics reporting to upstreams (Calico)"
|
description = "Enable usage or analytics reporting to upstreams (Calico)"
|
||||||
default = "false"
|
default = false
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "enable_aggregation" {
|
variable "enable_aggregation" {
|
||||||
|
type = bool
|
||||||
description = "Enable the Kubernetes Aggregation Layer (defaults to false)"
|
description = "Enable the Kubernetes Aggregation Layer (defaults to false)"
|
||||||
type = string
|
default = false
|
||||||
default = "false"
|
}
|
||||||
|
|
||||||
|
variable "worker_node_labels" {
|
||||||
|
type = list(string)
|
||||||
|
description = "List of initial worker node labels"
|
||||||
|
default = []
|
||||||
|
}
|
||||||
|
|
||||||
|
# unofficial, undocumented, unsupported
|
||||||
|
|
||||||
|
variable "cluster_domain_suffix" {
|
||||||
|
type = string
|
||||||
|
description = "Queries for domains with the suffix will be answered by coredns. Default is cluster.local (e.g. foo.default.svc.cluster.local) "
|
||||||
|
default = "cluster.local"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -18,5 +18,6 @@ module "workers" {
|
|||||||
service_cidr = var.service_cidr
|
service_cidr = var.service_cidr
|
||||||
cluster_domain_suffix = var.cluster_domain_suffix
|
cluster_domain_suffix = var.cluster_domain_suffix
|
||||||
clc_snippets = var.worker_clc_snippets
|
clc_snippets = var.worker_clc_snippets
|
||||||
|
node_labels = var.worker_node_labels
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -60,6 +60,9 @@ systemd:
|
|||||||
--lock-file=/var/run/lock/kubelet.lock \
|
--lock-file=/var/run/lock/kubelet.lock \
|
||||||
--network-plugin=cni \
|
--network-plugin=cni \
|
||||||
--node-labels=node.kubernetes.io/node \
|
--node-labels=node.kubernetes.io/node \
|
||||||
|
%{ for label in split(",", node_labels) }
|
||||||
|
--node-labels=${label} \
|
||||||
|
%{ endfor ~}
|
||||||
--pod-manifest-path=/etc/kubernetes/manifests \
|
--pod-manifest-path=/etc/kubernetes/manifests \
|
||||||
--read-only-port=0 \
|
--read-only-port=0 \
|
||||||
--volume-plugin-dir=/var/lib/kubelet/volumeplugins
|
--volume-plugin-dir=/var/lib/kubelet/volumeplugins
|
||||||
@ -94,7 +97,7 @@ storage:
|
|||||||
contents:
|
contents:
|
||||||
inline: |
|
inline: |
|
||||||
KUBELET_IMAGE_URL=docker://k8s.gcr.io/hyperkube
|
KUBELET_IMAGE_URL=docker://k8s.gcr.io/hyperkube
|
||||||
KUBELET_IMAGE_TAG=v1.16.0
|
KUBELET_IMAGE_TAG=v1.16.2
|
||||||
- path: /etc/sysctl.d/max-user-watches.conf
|
- path: /etc/sysctl.d/max-user-watches.conf
|
||||||
filesystem: root
|
filesystem: root
|
||||||
contents:
|
contents:
|
||||||
@ -112,7 +115,7 @@ storage:
|
|||||||
--volume config,kind=host,source=/etc/kubernetes \
|
--volume config,kind=host,source=/etc/kubernetes \
|
||||||
--mount volume=config,target=/etc/kubernetes \
|
--mount volume=config,target=/etc/kubernetes \
|
||||||
--insecure-options=image \
|
--insecure-options=image \
|
||||||
docker://k8s.gcr.io/hyperkube:v1.16.0 \
|
docker://k8s.gcr.io/hyperkube:v1.16.2 \
|
||||||
--net=host \
|
--net=host \
|
||||||
--dns=host \
|
--dns=host \
|
||||||
--exec=/kubectl -- --kubeconfig=/etc/kubernetes/kubeconfig delete node $(hostname)
|
--exec=/kubectl -- --kubeconfig=/etc/kubernetes/kubeconfig delete node $(hostname)
|
||||||
|
@ -23,33 +23,39 @@ variable "network" {
|
|||||||
# instances
|
# instances
|
||||||
|
|
||||||
variable "worker_count" {
|
variable "worker_count" {
|
||||||
type = string
|
type = number
|
||||||
default = "1"
|
|
||||||
description = "Number of worker compute instances the instance group should manage"
|
description = "Number of worker compute instances the instance group should manage"
|
||||||
|
default = 1
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "machine_type" {
|
variable "machine_type" {
|
||||||
type = string
|
type = string
|
||||||
default = "n1-standard-1"
|
|
||||||
description = "Machine type for compute instances (e.g. gcloud compute machine-types list)"
|
description = "Machine type for compute instances (e.g. gcloud compute machine-types list)"
|
||||||
|
default = "n1-standard-1"
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "os_image" {
|
variable "os_image" {
|
||||||
type = string
|
type = string
|
||||||
default = "coreos-stable"
|
|
||||||
description = "Container Linux image for compute instanges (e.g. gcloud compute images list)"
|
description = "Container Linux image for compute instanges (e.g. gcloud compute images list)"
|
||||||
|
default = "coreos-stable"
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "disk_size" {
|
variable "disk_size" {
|
||||||
type = string
|
type = number
|
||||||
default = "40"
|
|
||||||
description = "Size of the disk in GB"
|
description = "Size of the disk in GB"
|
||||||
|
default = 40
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "preemptible" {
|
variable "preemptible" {
|
||||||
type = string
|
type = bool
|
||||||
default = "false"
|
|
||||||
description = "If enabled, Compute Engine will terminate instances randomly within 24 hours"
|
description = "If enabled, Compute Engine will terminate instances randomly within 24 hours"
|
||||||
|
default = false
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "clc_snippets" {
|
||||||
|
type = list(string)
|
||||||
|
description = "Container Linux Config snippets"
|
||||||
|
default = []
|
||||||
}
|
}
|
||||||
|
|
||||||
# configuration
|
# configuration
|
||||||
@ -65,30 +71,28 @@ variable "ssh_authorized_key" {
|
|||||||
}
|
}
|
||||||
|
|
||||||
variable "service_cidr" {
|
variable "service_cidr" {
|
||||||
|
type = string
|
||||||
description = <<EOD
|
description = <<EOD
|
||||||
CIDR IPv4 range to assign Kubernetes services.
|
CIDR IPv4 range to assign Kubernetes services.
|
||||||
The 1st IP will be reserved for kube_apiserver, the 10th IP will be reserved for coredns.
|
The 1st IP will be reserved for kube_apiserver, the 10th IP will be reserved for coredns.
|
||||||
EOD
|
EOD
|
||||||
|
|
||||||
|
|
||||||
type = string
|
|
||||||
default = "10.3.0.0/16"
|
default = "10.3.0.0/16"
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "cluster_domain_suffix" {
|
variable "node_labels" {
|
||||||
description = "Queries for domains with the suffix will be answered by coredns. Default is cluster.local (e.g. foo.default.svc.cluster.local) "
|
|
||||||
type = string
|
|
||||||
default = "cluster.local"
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "clc_snippets" {
|
|
||||||
type = list(string)
|
type = list(string)
|
||||||
description = "Container Linux Config snippets"
|
description = "List of initial node labels"
|
||||||
default = []
|
default = []
|
||||||
}
|
}
|
||||||
|
|
||||||
# unofficial, undocumented, unsupported, temporary
|
# unofficial, undocumented, unsupported, temporary
|
||||||
|
|
||||||
|
variable "cluster_domain_suffix" {
|
||||||
|
type = string
|
||||||
|
description = "Queries for domains with the suffix will be answered by coredns. Default is cluster.local (e.g. foo.default.svc.cluster.local) "
|
||||||
|
default = "cluster.local"
|
||||||
|
}
|
||||||
|
|
||||||
variable "accelerator_type" {
|
variable "accelerator_type" {
|
||||||
type = string
|
type = string
|
||||||
default = ""
|
default = ""
|
||||||
|
@ -82,6 +82,7 @@ data "template_file" "worker-config" {
|
|||||||
ssh_authorized_key = var.ssh_authorized_key
|
ssh_authorized_key = var.ssh_authorized_key
|
||||||
cluster_dns_service_ip = cidrhost(var.service_cidr, 10)
|
cluster_dns_service_ip = cidrhost(var.service_cidr, 10)
|
||||||
cluster_domain_suffix = var.cluster_domain_suffix
|
cluster_domain_suffix = var.cluster_domain_suffix
|
||||||
|
node_labels = join(",", var.node_labels)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
mkdocs==1.0.4
|
mkdocs==1.0.4
|
||||||
mkdocs-material==4.4.2
|
mkdocs-material==4.4.3
|
||||||
pygments==2.2.0
|
pygments==2.2.0
|
||||||
pymdown-extensions==5.0.0
|
pymdown-extensions==5.0.0
|
||||||
|
Reference in New Issue
Block a user