diff --git a/CHANGES.md b/CHANGES.md index 334cf1d5..eaedfac9 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -7,8 +7,11 @@ Notable changes between versions. #### Google Cloud * Add required variable `region` (e.g. "us-central1") +* Reduce time to bootstrap a cluster +* Change etcd to run on-host, across controllers (etcd-member.service) * Change worker managed instance group to automatically span zones in a region -* Remove `controller_preemptible` optional variable (breaking) +* Remove support for self-hosted etcd +* Remove `controller_preemptible` optional variable ## v1.8.2 diff --git a/README.md b/README.md index a368a629..534308a0 100644 --- a/README.md +++ b/README.md @@ -79,7 +79,7 @@ In 5-10 minutes (varies by platform), the cluster will be ready. This Google Clo $ KUBECONFIG=/home/user/.secrets/clusters/yavin/auth/kubeconfig $ kubectl get nodes NAME STATUS AGE VERSION -yavin-controller-1682.c.example-com.internal Ready 6m v1.8.2 +yavin-controller-0.c.example-com.internal Ready 6m v1.8.2 yavin-worker-jrbf.c.example-com.internal Ready 5m v1.8.2 yavin-worker-mzdm.c.example-com.internal Ready 5m v1.8.2 ``` @@ -92,13 +92,10 @@ NAMESPACE NAME READY STATUS RESTART kube-system calico-node-1cs8z 2/2 Running 0 6m kube-system calico-node-d1l5b 2/2 Running 0 6m kube-system calico-node-sp9ps 2/2 Running 0 6m -kube-system etcd-operator-3329263108-f443m 1/1 Running 1 6m kube-system kube-apiserver-zppls 1/1 Running 0 6m kube-system kube-controller-manager-3271970485-gh9kt 1/1 Running 0 6m kube-system kube-controller-manager-3271970485-h90v8 1/1 Running 1 6m kube-system kube-dns-1187388186-zj5dl 3/3 Running 0 6m -kube-system kube-etcd-0000 1/1 Running 0 5m -kube-system kube-etcd-network-checkpointer-crznb 1/1 Running 0 6m kube-system kube-proxy-117v6 1/1 Running 0 6m kube-system kube-proxy-9886n 1/1 Running 0 6m kube-system kube-proxy-njn47 1/1 Running 0 6m diff --git a/digital-ocean/container-linux/kubernetes/controllers.tf b/digital-ocean/container-linux/kubernetes/controllers.tf index 9739af22..243168e2 100644 --- a/digital-ocean/container-linux/kubernetes/controllers.tf +++ b/digital-ocean/container-linux/kubernetes/controllers.tf @@ -14,6 +14,7 @@ resource "digitalocean_record" "controllers" { value = "${element(digitalocean_droplet.controllers.*.ipv4_address, count.index)}" } +# Discrete DNS records for each controller's private IPv4 for etcd usage. resource "digitalocean_record" "etcds" { count = "${var.controller_count}" @@ -25,7 +26,7 @@ resource "digitalocean_record" "etcds" { type = "A" ttl = 300 - # IPv4 addresses of controllers + # private IPv4 address for etcd value = "${element(digitalocean_droplet.controllers.*.ipv4_address_private, count.index)}" } diff --git a/docs/aws.md b/docs/aws.md index 976e9db3..68826e18 100644 --- a/docs/aws.md +++ b/docs/aws.md @@ -4,7 +4,7 @@ In this tutorial, we'll create a Kubernetes v1.8.2 cluster on AWS. We'll declare a Kubernetes cluster in Terraform using the Typhoon Terraform module. On apply, a VPC, gateway, subnets, auto-scaling groups of controllers and workers, network load balancers for controllers and workers, and security groups will be created. -Controllers and workers are provisioned to run a `kubelet`. A one-time [bootkube](https://github.com/kubernetes-incubator/bootkube) bootstrap schedules `etcd`, `apiserver`, `scheduler`, `controller-manager`, and `kube-dns` on controllers and runs `kube-proxy` and `flannel` or `calico` on each node. A generated `kubeconfig` provides `kubectl` access to the cluster. +Controllers and workers are provisioned to run a `kubelet`. A one-time [bootkube](https://github.com/kubernetes-incubator/bootkube) bootstrap schedules `etcd`, `apiserver`, `scheduler`, `controller-manager`, and `kube-dns` on controllers and runs `kube-proxy` and `calico` or `flannel` on each node. A generated `kubeconfig` provides `kubectl` access to the cluster. !!! warning "Alpha" Typhoon Kubernetes clusters on AWS are marked as "alpha". diff --git a/docs/bare-metal.md b/docs/bare-metal.md index 9a13a049..84cc3cd8 100644 --- a/docs/bare-metal.md +++ b/docs/bare-metal.md @@ -4,7 +4,7 @@ In this tutorial, we'll network boot and provison a Kubernetes v1.8.2 cluster on First, we'll deploy a [Matchbox](https://github.com/coreos/matchbox) service and setup a network boot environment. Then, we'll declare a Kubernetes cluster in Terraform using the Typhoon Terraform module and power on machines. On PXE boot, machines will install Container Linux to disk, reboot into the disk install, and provision themselves as Kubernetes controllers or workers. -Controllers are provisioned as etcd peers and run `etcd-member` (etcd3) and `kubelet`. Workers are provisioned to run a `kubelet`. A one-time [bootkube](https://github.com/kubernetes-incubator/bootkube) bootstrap schedules an `apiserver`, `scheduler`, `controller-manager`, and `kube-dns` on controllers and runs `kube-proxy` and `flannel` or `calico` on each node. A generated `kubeconfig` provides `kubectl` access to the cluster. +Controllers are provisioned as etcd peers and run `etcd-member` (etcd3) and `kubelet`. Workers are provisioned to run a `kubelet`. A one-time [bootkube](https://github.com/kubernetes-incubator/bootkube) bootstrap schedules an `apiserver`, `scheduler`, `controller-manager`, and `kube-dns` on controllers and runs `kube-proxy` and `calico` or `flannel` on each node. A generated `kubeconfig` provides `kubectl` access to the cluster. ## Requirements diff --git a/docs/faq.md b/docs/faq.md index 916f4da3..51f29a9f 100644 --- a/docs/faq.md +++ b/docs/faq.md @@ -8,7 +8,7 @@ Formats rise and evolve. Typhoon may choose to adapt the format over time (with ## Self-hosted etcd -AWS and Google Cloud clusters run etcd as "self-hosted" pods, managed by the [etcd-operator](https://github.com/coreos/etcd-operator). By contrast, Typhoon bare-metal and Digital Ocean run an etcd peer as a systemd `etcd-member.service` on each controller (i.e. on-host). +AWS clusters run etcd as "self-hosted" pods, managed by the [etcd-operator](https://github.com/coreos/etcd-operator). By contrast, Typhoon bare-metal, Digital Ocean, and Google Cloud run an etcd peer as a systemd `etcd-member.service` on each controller (i.e. on-host). In practice, self-hosted etcd has proven to be *ok*, but not ideal. Running the apiserver's etcd atop Kubernetes itself is inherently complex, but works in most cases. It can be opaque to debug if complex edge cases with upstream Kubernetes bugs arise. diff --git a/docs/google-cloud.md b/docs/google-cloud.md index 86a8eae8..ad6f9e18 100644 --- a/docs/google-cloud.md +++ b/docs/google-cloud.md @@ -4,7 +4,7 @@ In this tutorial, we'll create a Kubernetes v1.8.2 cluster on Google Compute Eng We'll declare a Kubernetes cluster in Terraform using the Typhoon Terraform module. On apply, a network, firewall rules, managed instance groups of Kubernetes controllers and workers, network load balancers for controllers and workers, and health checks will be created. -Controllers and workers are provisioned to run a `kubelet`. A one-time [bootkube](https://github.com/kubernetes-incubator/bootkube) bootstrap schedules `etcd`, `apiserver`, `scheduler`, `controller-manager`, and `kube-dns` on controllers and runs `kube-proxy` and `flannel` on each node. A generated `kubeconfig` provides `kubectl` access to the cluster. +Controllers and workers are provisioned to run a `kubelet`. A one-time [bootkube](https://github.com/kubernetes-incubator/bootkube) bootstrap schedules an `apiserver`, `scheduler`, `controller-manager`, and `kube-dns` on controllers and runs `kube-proxy` and `calico` or `flannel` on each node. A generated `kubeconfig` provides `kubectl` access to the cluster. ## Requirements @@ -155,7 +155,7 @@ In 5-10 minutes, the Kubernetes cluster will be ready. $ KUBECONFIG=/home/user/.secrets/clusters/yavin/auth/kubeconfig $ kubectl get nodes NAME STATUS AGE VERSION -yavin-controller-1682.c.example-com.internal Ready 6m v1.8.2 +yavin-controller-0.c.example-com.internal Ready 6m v1.8.2 yavin-worker-jrbf.c.example-com.internal Ready 5m v1.8.2 yavin-worker-mzdm.c.example-com.internal Ready 5m v1.8.2 ``` @@ -168,13 +168,10 @@ NAMESPACE NAME READY STATUS RESTART kube-system calico-node-1cs8z 2/2 Running 0 6m kube-system calico-node-d1l5b 2/2 Running 0 6m kube-system calico-node-sp9ps 2/2 Running 0 6m -kube-system etcd-operator-3329263108-f443m 1/1 Running 1 6m kube-system kube-apiserver-zppls 1/1 Running 0 6m kube-system kube-controller-manager-3271970485-gh9kt 1/1 Running 0 6m kube-system kube-controller-manager-3271970485-h90v8 1/1 Running 1 6m kube-system kube-dns-1187388186-zj5dl 3/3 Running 0 6m -kube-system kube-etcd-0000 1/1 Running 0 5m -kube-system kube-etcd-network-checkpointer-crznb 1/1 Running 0 6m kube-system kube-proxy-117v6 1/1 Running 0 6m kube-system kube-proxy-9886n 1/1 Running 0 6m kube-system kube-proxy-njn47 1/1 Running 0 6m diff --git a/docs/index.md b/docs/index.md index 7d880fb8..6896f45e 100644 --- a/docs/index.md +++ b/docs/index.md @@ -78,7 +78,7 @@ In 5-10 minutes (varies by platform), the cluster will be ready. This Google Clo $ KUBECONFIG=/home/user/.secrets/clusters/yavin/auth/kubeconfig $ kubectl get nodes NAME STATUS AGE VERSION -yavin-controller-1682.c.example-com.internal Ready 6m v1.8.2 +yavin-controller-0.c.example-com.internal Ready 6m v1.8.2 yavin-worker-jrbf.c.example-com.internal Ready 5m v1.8.2 yavin-worker-mzdm.c.example-com.internal Ready 5m v1.8.2 ``` @@ -91,13 +91,10 @@ NAMESPACE NAME READY STATUS RESTART kube-system calico-node-1cs8z 2/2 Running 0 6m kube-system calico-node-d1l5b 2/2 Running 0 6m kube-system calico-node-sp9ps 2/2 Running 0 6m -kube-system etcd-operator-3329263108-f443m 1/1 Running 1 6m kube-system kube-apiserver-zppls 1/1 Running 0 6m kube-system kube-controller-manager-3271970485-gh9kt 1/1 Running 0 6m kube-system kube-controller-manager-3271970485-h90v8 1/1 Running 1 6m kube-system kube-dns-1187388186-zj5dl 3/3 Running 0 6m -kube-system kube-etcd-0000 1/1 Running 0 5m -kube-system kube-etcd-network-checkpointer-crznb 1/1 Running 0 6m kube-system kube-proxy-117v6 1/1 Running 0 6m kube-system kube-proxy-9886n 1/1 Running 0 6m kube-system kube-proxy-njn47 1/1 Running 0 6m diff --git a/docs/topics/performance.md b/docs/topics/performance.md index b4153695..cce0dc68 100644 --- a/docs/topics/performance.md +++ b/docs/topics/performance.md @@ -9,7 +9,7 @@ Provisioning times vary based on the platform. Sampling the time to create (appl | AWS | 20 min | 8 min 10 sec | | Bare-Metal | 10-14 min | NA | | Digital Ocean | 3 min 30 sec | 20 sec | -| Google Cloud | 6 min 10 sec | 4 min 30 sec | +| Google Cloud | 4 min | 4 min 30 sec | Notes: diff --git a/google-cloud/container-linux/kubernetes/bootkube.tf b/google-cloud/container-linux/kubernetes/bootkube.tf index 8736f828..363e4be2 100644 --- a/google-cloud/container-linux/kubernetes/bootkube.tf +++ b/google-cloud/container-linux/kubernetes/bootkube.tf @@ -2,13 +2,12 @@ module "bootkube" { source = "git::https://github.com/poseidon/terraform-render-bootkube.git?ref=v0.8.1" - cluster_name = "${var.cluster_name}" - api_servers = ["${format("%s.%s", var.cluster_name, var.dns_zone)}"] - etcd_servers = ["http://127.0.0.1:2379"] - asset_dir = "${var.asset_dir}" - networking = "${var.networking}" - network_mtu = 1440 - pod_cidr = "${var.pod_cidr}" - service_cidr = "${var.service_cidr}" - experimental_self_hosted_etcd = "true" + cluster_name = "${var.cluster_name}" + api_servers = ["${format("%s.%s", var.cluster_name, var.dns_zone)}"] + etcd_servers = "${module.controllers.etcd_fqdns}" + asset_dir = "${var.asset_dir}" + networking = "${var.networking}" + network_mtu = 1440 + pod_cidr = "${var.pod_cidr}" + service_cidr = "${var.service_cidr}" } diff --git a/google-cloud/container-linux/kubernetes/controllers/cl/controller.yaml.tmpl b/google-cloud/container-linux/kubernetes/controllers/cl/controller.yaml.tmpl index fe6ebc16..39eb8458 100644 --- a/google-cloud/container-linux/kubernetes/controllers/cl/controller.yaml.tmpl +++ b/google-cloud/container-linux/kubernetes/controllers/cl/controller.yaml.tmpl @@ -1,6 +1,29 @@ --- systemd: units: + - name: etcd-member.service + enable: true + dropins: + - name: 40-etcd-cluster.conf + contents: | + [Service] + Environment="ETCD_IMAGE_TAG=v3.2.0" + Environment="ETCD_NAME=${etcd_name}" + Environment="ETCD_ADVERTISE_CLIENT_URLS=https://${etcd_domain}:2379" + Environment="ETCD_INITIAL_ADVERTISE_PEER_URLS=https://${etcd_domain}:2380" + Environment="ETCD_LISTEN_CLIENT_URLS=https://0.0.0.0:2379" + Environment="ETCD_LISTEN_PEER_URLS=https://0.0.0.0:2380" + Environment="ETCD_INITIAL_CLUSTER=${etcd_initial_cluster}" + Environment="ETCD_STRICT_RECONFIG_CHECK=true" + Environment="ETCD_SSL_DIR=/etc/ssl/etcd" + Environment="ETCD_TRUSTED_CA_FILE=/etc/ssl/certs/etcd/server-ca.crt" + Environment="ETCD_CERT_FILE=/etc/ssl/certs/etcd/server.crt" + Environment="ETCD_KEY_FILE=/etc/ssl/certs/etcd/server.key" + Environment="ETCD_CLIENT_CERT_AUTH=true" + Environment="ETCD_PEER_TRUSTED_CA_FILE=/etc/ssl/certs/etcd/peer-ca.crt" + Environment="ETCD_PEER_CERT_FILE=/etc/ssl/certs/etcd/peer.crt" + Environment="ETCD_PEER_KEY_FILE=/etc/ssl/certs/etcd/peer.key" + Environment="ETCD_PEER_CLIENT_CERT_AUTH=true" - name: docker.service enable: true - name: locksmithd.service diff --git a/google-cloud/container-linux/kubernetes/controllers/controllers.tf b/google-cloud/container-linux/kubernetes/controllers/controllers.tf index f358a028..85bda340 100644 --- a/google-cloud/container-linux/kubernetes/controllers/controllers.tf +++ b/google-cloud/container-linux/kubernetes/controllers/controllers.tf @@ -1,55 +1,38 @@ -# Managed Instance Group -resource "google_compute_instance_group_manager" "controllers" { - name = "${var.cluster_name}-controller-group" - description = "Compute instance group of ${var.cluster_name} controllers" +# Discrete DNS records for each controller's private IPv4 for etcd usage. +resource "google_dns_record_set" "etcds" { + count = "${var.count}" - # Instance name prefix for instances in the group - base_instance_name = "${var.cluster_name}-controller" - instance_template = "${google_compute_instance_template.controller.self_link}" - update_strategy = "RESTART" - zone = "${var.zone}" - target_size = "${var.count}" + # DNS Zone name where record should be created + managed_zone = "${var.dns_zone_name}" - # Target pool instances in the group should be added into - target_pools = [ - "${google_compute_target_pool.controllers.self_link}", - ] + # DNS record + name = "${format("%s-etcd%d.%s.", var.cluster_name, count.index, var.dns_zone)}" + type = "A" + ttl = 300 + + # private IPv4 address for etcd + rrdatas = ["${element(google_compute_instance.controllers.*.network_interface.0.address, count.index)}"] } -# Controller Container Linux Config -data "template_file" "controller_config" { - template = "${file("${path.module}/cl/controller.yaml.tmpl")}" +# Controller instances +resource "google_compute_instance" "controllers" { + count = "${var.count}" - vars = { - k8s_dns_service_ip = "${cidrhost(var.service_cidr, 10)}" - k8s_etcd_service_ip = "${cidrhost(var.service_cidr, 15)}" - ssh_authorized_key = "${var.ssh_authorized_key}" - kubeconfig_ca_cert = "${var.kubeconfig_ca_cert}" - kubeconfig_kubelet_cert = "${var.kubeconfig_kubelet_cert}" - kubeconfig_kubelet_key = "${var.kubeconfig_kubelet_key}" - kubeconfig_server = "${var.kubeconfig_server}" - } -} - -data "ct_config" "controller_ign" { - content = "${data.template_file.controller_config.rendered}" - pretty_print = false -} - -resource "google_compute_instance_template" "controller" { - name_prefix = "${var.cluster_name}-controller-" - description = "Controller Instance template" + name = "${var.cluster_name}-controller-${count.index}" + zone = "${var.zone}" machine_type = "${var.machine_type}" metadata { - user-data = "${data.ct_config.controller_ign.rendered}" + user-data = "${element(data.ct_config.controller_ign.*.rendered, count.index)}" } - disk { - auto_delete = true - boot = true - source_image = "${var.os_image}" - disk_size_gb = "${var.disk_size}" + boot_disk { + auto_delete = true + + initialize_params { + image = "${var.os_image}" + size = "${var.disk_size}" + } } network_interface { @@ -60,9 +43,44 @@ resource "google_compute_instance_template" "controller" { } can_ip_forward = true +} - lifecycle { - # To update an Instance Template, Terraform should replace the existing resource - create_before_destroy = true +# Controller Container Linux Config +data "template_file" "controller_config" { + count = "${var.count}" + + template = "${file("${path.module}/cl/controller.yaml.tmpl")}" + + vars = { + # Cannot use cyclic dependencies on controllers or their DNS records + etcd_name = "etcd${count.index}" + etcd_domain = "${var.cluster_name}-etcd${count.index}.${var.dns_zone}" + + # etcd0=https://cluster-etcd0.example.com,etcd1=https://cluster-etcd1.example.com,... + etcd_initial_cluster = "${join(",", formatlist("%s=https://%s:2380", null_resource.repeat.*.triggers.name, null_resource.repeat.*.triggers.domain))}" + + k8s_dns_service_ip = "${cidrhost(var.service_cidr, 10)}" + ssh_authorized_key = "${var.ssh_authorized_key}" + kubeconfig_ca_cert = "${var.kubeconfig_ca_cert}" + kubeconfig_kubelet_cert = "${var.kubeconfig_kubelet_cert}" + kubeconfig_kubelet_key = "${var.kubeconfig_kubelet_key}" + kubeconfig_server = "${var.kubeconfig_server}" } } + +# Horrible hack to generate a Terraform list of a desired length without dependencies. +# Ideal ${repeat("etcd", 3) -> ["etcd", "etcd", "etcd"]} +resource null_resource "repeat" { + count = "${var.count}" + + triggers { + name = "etcd${count.index}" + domain = "${var.cluster_name}-etcd${count.index}.${var.dns_zone}" + } +} + +data "ct_config" "controller_ign" { + count = "${var.count}" + content = "${element(data.template_file.controller_config.*.rendered, count.index)}" + pretty_print = false +} diff --git a/google-cloud/container-linux/kubernetes/controllers/network.tf b/google-cloud/container-linux/kubernetes/controllers/network.tf index 0dbd5d17..97fb3ac5 100644 --- a/google-cloud/container-linux/kubernetes/controllers/network.tf +++ b/google-cloud/container-linux/kubernetes/controllers/network.tf @@ -1,4 +1,9 @@ -# Controller Network Load balancer DNS record +# Static IPv4 address for the Network Load Balancer +resource "google_compute_address" "controllers-ip" { + name = "${var.cluster_name}-controllers-ip" +} + +# DNS record for the Network Load Balancer resource "google_dns_record_set" "controllers" { # DNS Zone name where record should be created managed_zone = "${var.dns_zone_name}" @@ -12,12 +17,7 @@ resource "google_dns_record_set" "controllers" { rrdatas = ["${google_compute_address.controllers-ip.address}"] } -# Static IP for the Network Load Balancer -resource "google_compute_address" "controllers-ip" { - name = "${var.cluster_name}-controllers-ip" -} - -# Network Load Balancer (i.e. forwarding rules) +# Network Load Balancer (i.e. forwarding rule) resource "google_compute_forwarding_rule" "controller-https-rule" { name = "${var.cluster_name}-controller-https-rule" ip_address = "${google_compute_address.controllers-ip.address}" @@ -25,26 +25,23 @@ resource "google_compute_forwarding_rule" "controller-https-rule" { target = "${google_compute_target_pool.controllers.self_link}" } -resource "google_compute_forwarding_rule" "controller-ssh-rule" { - name = "${var.cluster_name}-controller-ssh-rule" - ip_address = "${google_compute_address.controllers-ip.address}" - port_range = "22" - target = "${google_compute_target_pool.controllers.self_link}" -} - -# Network Load Balancer target pool of instances. +# Target pool of instances for the controller(s) Network Load Balancer resource "google_compute_target_pool" "controllers" { name = "${var.cluster_name}-controller-pool" + instances = [ + "${google_compute_instance.controllers.*.self_link}", + ] + health_checks = [ - "${google_compute_http_health_check.ingress.name}", + "${google_compute_http_health_check.kubelet.name}", ] session_affinity = "NONE" } # Kubelet HTTP Health Check -resource "google_compute_http_health_check" "ingress" { +resource "google_compute_http_health_check" "kubelet" { name = "${var.cluster_name}-kubelet-health" description = "Health check Kubelet health host port" diff --git a/google-cloud/container-linux/kubernetes/controllers/outputs.tf b/google-cloud/container-linux/kubernetes/controllers/outputs.tf new file mode 100644 index 00000000..7c819d8e --- /dev/null +++ b/google-cloud/container-linux/kubernetes/controllers/outputs.tf @@ -0,0 +1,11 @@ +output "etcd_fqdns" { + value = ["${null_resource.repeat.*.triggers.domain}"] +} + +output "ipv4_public" { + value = ["${google_compute_instance.controllers.*.network_interface.0.access_config.0.assigned_nat_ip}"] +} + +output "ipv4_private" { + value = ["${google_compute_instance.controllers.*.network_interface.0.address}"] +} diff --git a/google-cloud/container-linux/kubernetes/outputs.tf b/google-cloud/container-linux/kubernetes/outputs.tf index 90743b8c..5b4b5eb5 100644 --- a/google-cloud/container-linux/kubernetes/outputs.tf +++ b/google-cloud/container-linux/kubernetes/outputs.tf @@ -1,3 +1,11 @@ +output "controllers_ipv4_public" { + value = ["${module.controllers.ipv4_public}"] +} + +output "controllers_ipv4_private" { + value = ["${module.controllers.ipv4_private}"] +} + output "ingress_static_ip" { value = "${module.workers.ingress_static_ip}" } diff --git a/google-cloud/container-linux/kubernetes/ssh.tf b/google-cloud/container-linux/kubernetes/ssh.tf index 652c493a..9b91ed3e 100644 --- a/google-cloud/container-linux/kubernetes/ssh.tf +++ b/google-cloud/container-linux/kubernetes/ssh.tf @@ -1,12 +1,80 @@ +# Secure copy etcd TLS assets and kubeconfig to controllers. Activates kubelet.service +resource "null_resource" "copy-secrets" { + depends_on = ["module.controllers", "module.bootkube"] + count = "${var.controller_count}" + + connection { + type = "ssh" + host = "${element(module.controllers.ipv4_public, count.index)}" + user = "core" + timeout = "15m" + } + + provisioner "file" { + content = "${module.bootkube.kubeconfig}" + destination = "$HOME/kubeconfig" + } + + provisioner "file" { + content = "${module.bootkube.etcd_ca_cert}" + destination = "$HOME/etcd-client-ca.crt" + } + + provisioner "file" { + content = "${module.bootkube.etcd_client_cert}" + destination = "$HOME/etcd-client.crt" + } + + provisioner "file" { + content = "${module.bootkube.etcd_client_key}" + destination = "$HOME/etcd-client.key" + } + + provisioner "file" { + content = "${module.bootkube.etcd_server_cert}" + destination = "$HOME/etcd-server.crt" + } + + provisioner "file" { + content = "${module.bootkube.etcd_server_key}" + destination = "$HOME/etcd-server.key" + } + + provisioner "file" { + content = "${module.bootkube.etcd_peer_cert}" + destination = "$HOME/etcd-peer.crt" + } + + provisioner "file" { + content = "${module.bootkube.etcd_peer_key}" + destination = "$HOME/etcd-peer.key" + } + + provisioner "remote-exec" { + inline = [ + "sudo mkdir -p /etc/ssl/etcd/etcd", + "sudo mv etcd-client* /etc/ssl/etcd/", + "sudo cp /etc/ssl/etcd/etcd-client-ca.crt /etc/ssl/etcd/etcd/server-ca.crt", + "sudo mv etcd-server.crt /etc/ssl/etcd/etcd/server.crt", + "sudo mv etcd-server.key /etc/ssl/etcd/etcd/server.key", + "sudo cp /etc/ssl/etcd/etcd-client-ca.crt /etc/ssl/etcd/etcd/peer-ca.crt", + "sudo mv etcd-peer.crt /etc/ssl/etcd/etcd/peer.crt", + "sudo mv etcd-peer.key /etc/ssl/etcd/etcd/peer.key", + "sudo chown -R etcd:etcd /etc/ssl/etcd", + "sudo chmod -R 500 /etc/ssl/etcd", + "sudo mv /home/core/kubeconfig /etc/kubernetes/kubeconfig", + ] + } +} + # Secure copy bootkube assets to ONE controller and start bootkube to perform # one-time self-hosted cluster bootstrapping. resource "null_resource" "bootkube-start" { depends_on = ["module.controllers", "module.workers", "module.bootkube"] - # TODO: SSH to a controller's IP instead of waiting on DNS resolution connection { type = "ssh" - host = "${format("%s.%s", var.cluster_name, var.dns_zone)}" + host = "${element(module.controllers.ipv4_public, 0)}" user = "core" timeout = "15m" } diff --git a/google-cloud/container-linux/kubernetes/workers/network.tf b/google-cloud/container-linux/kubernetes/workers/network.tf index 7dcd1403..6cb6c4f6 100644 --- a/google-cloud/container-linux/kubernetes/workers/network.tf +++ b/google-cloud/container-linux/kubernetes/workers/network.tf @@ -1,4 +1,4 @@ -# Static IP for the Network Load Balancer +# Static IPv4 address for the Network Load Balancer resource "google_compute_address" "ingress-ip" { name = "${var.cluster_name}-ingress-ip" }