diff --git a/README.md b/README.md index f5772df5..98a4f8db 100644 --- a/README.md +++ b/README.md @@ -12,7 +12,7 @@ Typhoon distributes upstream Kubernetes, architectural conventions, and cluster ## Features * Kubernetes v1.7.5 (upstream, via [kubernetes-incubator/bootkube](https://github.com/kubernetes-incubator/bootkube)) -* Single or multi-master, workloads isolated on workers, [flannel](https://github.com/coreos/flannel) or [Calico](https://www.projectcalico.org/) networking +* Single or multi-master, workloads isolated on workers, [Calico](https://www.projectcalico.org/) or [flannel](https://github.com/coreos/flannel) networking * On-cluster etcd with TLS, [RBAC](https://kubernetes.io/docs/admin/authorization/rbac/)-enabled, [network policy](https://kubernetes.io/docs/concepts/services-networking/network-policies/) * Ready for Ingress, Dashboards, Metrics, and other optional [addons](https://typhoon.psdn.io/addons/overview/) @@ -22,6 +22,7 @@ Typhoon provides a Terraform Module for each supported operating system and plat | Platform | Operating System | Terraform Module | |---------------|------------------|------------------| +| AWS | Container Linux | [aws/container-linux/kubernetes](aws/container-linux/kubernetes) | | Bare-Metal | Container Linux | [bare-metal/container-linux/kubernetes](bare-metal/container-linux/kubernetes) | | Digital Ocean | Container Linux | [digital-ocean/container-linux/kubernetes](digital-ocean/container-linux/kubernetes) | | Google Cloud | Container Linux | [google-cloud/container-linux/kubernetes](google-cloud/container-linux/kubernetes) | @@ -30,6 +31,7 @@ Typhoon provides a Terraform Module for each supported operating system and plat * [Docs](https://typhoon.psdn.io) * [Concepts](https://typhoon.psdn.io/concepts/) +* [AWS](https://typhoon.psdn.io/aws/) * [Bare-Metal](https://typhoon.psdn.io/bare-metal/) * [Digital Ocean](https://typhoon.psdn.io/digital-ocean/) * [Google-Cloud](https://typhoon.psdn.io/google-cloud/) diff --git a/aws/container-linux/kubernetes/ami.tf b/aws/container-linux/kubernetes/ami.tf new file mode 100644 index 00000000..03c6c0f6 --- /dev/null +++ b/aws/container-linux/kubernetes/ami.tf @@ -0,0 +1,19 @@ +data "aws_ami" "coreos" { + most_recent = true + owners = ["595879546273"] + + filter { + name = "architecture" + values = ["x86_64"] + } + + filter { + name = "virtualization-type" + values = ["hvm"] + } + + filter { + name = "name" + values = ["CoreOS-${var.os_channel}-*"] + } +} diff --git a/aws/container-linux/kubernetes/bootkube.tf b/aws/container-linux/kubernetes/bootkube.tf new file mode 100644 index 00000000..81fed934 --- /dev/null +++ b/aws/container-linux/kubernetes/bootkube.tf @@ -0,0 +1,14 @@ +# Self-hosted Kubernetes assets (kubeconfig, manifests) +module "bootkube" { + source = "git::https://github.com/poseidon/bootkube-terraform.git?ref=dbfb11c6eafa08f839eac2834ca1aca35dafe965" + + cluster_name = "${var.cluster_name}" + api_servers = ["${format("%s.%s", var.cluster_name, var.dns_zone)}"] + etcd_servers = ["http://127.0.0.1:2379"] + asset_dir = "${var.asset_dir}" + networking = "${var.networking}" + network_mtu = "${var.network_mtu}" + pod_cidr = "${var.pod_cidr}" + service_cidr = "${var.service_cidr}" + experimental_self_hosted_etcd = "true" +} diff --git a/aws/container-linux/kubernetes/cl/controller.yaml.tmpl b/aws/container-linux/kubernetes/cl/controller.yaml.tmpl new file mode 100644 index 00000000..59e779c3 --- /dev/null +++ b/aws/container-linux/kubernetes/cl/controller.yaml.tmpl @@ -0,0 +1,147 @@ +--- +systemd: + units: + - name: docker.service + enable: true + - name: locksmithd.service + mask: true + - name: wait-for-dns.service + enable: true + contents: | + [Unit] + Description=Wait for DNS entries + Wants=systemd-resolved.service + Before=kubelet.service + [Service] + Type=oneshot + RemainAfterExit=true + ExecStart=/bin/sh -c 'while ! /usr/bin/grep '^[^#[:space:]]' /etc/resolv.conf > /dev/null; do sleep 1; done' + [Install] + RequiredBy=kubelet.service + - name: kubelet.service + enable: true + contents: | + [Unit] + Description=Kubelet via Hyperkube ACI + [Service] + EnvironmentFile=/etc/kubernetes/kubelet.env + Environment="RKT_RUN_ARGS=--uuid-file-save=/var/cache/kubelet-pod.uuid \ + --volume=resolv,kind=host,source=/etc/resolv.conf \ + --mount volume=resolv,target=/etc/resolv.conf \ + --volume var-lib-cni,kind=host,source=/var/lib/cni \ + --mount volume=var-lib-cni,target=/var/lib/cni \ + --volume opt-cni-bin,kind=host,source=/opt/cni/bin \ + --mount volume=opt-cni-bin,target=/opt/cni/bin \ + --volume var-log,kind=host,source=/var/log \ + --mount volume=var-log,target=/var/log" + ExecStartPre=/bin/mkdir -p /opt/cni/bin + ExecStartPre=/bin/mkdir -p /etc/kubernetes/manifests + ExecStartPre=/bin/mkdir -p /etc/kubernetes/cni/net.d + ExecStartPre=/bin/mkdir -p /etc/kubernetes/checkpoint-secrets + ExecStartPre=/bin/mkdir -p /etc/kubernetes/inactive-manifests + ExecStartPre=/bin/mkdir -p /var/lib/cni + ExecStartPre=/usr/bin/bash -c "grep 'certificate-authority-data' /etc/kubernetes/kubeconfig | awk '{print $2}' | base64 -d > /etc/kubernetes/ca.crt" + ExecStartPre=-/usr/bin/rkt rm --uuid-file=/var/cache/kubelet-pod.uuid + ExecStart=/usr/lib/coreos/kubelet-wrapper \ + --allow-privileged \ + --anonymous-auth=false \ + --client-ca-file=/etc/kubernetes/ca.crt \ + --cluster_dns=${k8s_dns_service_ip} \ + --cluster_domain=cluster.local \ + --cni-conf-dir=/etc/kubernetes/cni/net.d \ + --exit-on-lock-contention \ + --kubeconfig=/etc/kubernetes/kubeconfig \ + --lock-file=/var/run/lock/kubelet.lock \ + --network-plugin=cni \ + --node-labels=node-role.kubernetes.io/master \ + --pod-manifest-path=/etc/kubernetes/manifests \ + --register-with-taints=node-role.kubernetes.io/master=:NoSchedule \ + --require-kubeconfig + ExecStop=-/usr/bin/rkt stop --uuid-file=/var/cache/kubelet-pod.uuid + Restart=always + RestartSec=10 + [Install] + WantedBy=multi-user.target + - name: bootkube.service + contents: | + [Unit] + Description=Bootstrap a Kubernetes cluster + ConditionPathExists=!/opt/bootkube/init_bootkube.done + [Service] + Type=oneshot + RemainAfterExit=true + WorkingDirectory=/opt/bootkube + ExecStart=/opt/bootkube/bootkube-start + ExecStartPost=/bin/touch /opt/bootkube/init_bootkube.done + [Install] + WantedBy=multi-user.target +storage: + files: + - path: /etc/kubernetes/kubeconfig + filesystem: root + mode: 0644 + contents: + inline: | + apiVersion: v1 + kind: Config + clusters: + - name: local + cluster: + server: ${kubeconfig_server} + certificate-authority-data: ${kubeconfig_ca_cert} + users: + - name: kubelet + user: + client-certificate-data: ${kubeconfig_kubelet_cert} + client-key-data: ${kubeconfig_kubelet_key} + contexts: + - context: + cluster: local + user: kubelet + - path: /etc/kubernetes/kubelet.env + filesystem: root + mode: 0644 + contents: + inline: | + KUBELET_IMAGE_URL=quay.io/coreos/hyperkube + KUBELET_IMAGE_TAG=v1.7.5_coreos.0 + - path: /etc/sysctl.d/max-user-watches.conf + filesystem: root + contents: + inline: | + fs.inotify.max_user_watches=16184 + - path: /opt/bootkube/bootkube-start + filesystem: root + mode: 0544 + user: + id: 500 + group: + id: 500 + contents: + inline: | + #!/bin/bash + # Wrapper for bootkube start + set -e + # Move experimental manifests + [ -d /opt/bootkube/assets/manifests-* ] && mv /opt/bootkube/assets/manifests-*/* /opt/bootkube/assets/manifests && rm -rf /opt/bootkube/assets/manifests-* + [ -d /opt/bootkube/assets/experimental/manifests ] && mv /opt/bootkube/assets/experimental/manifests/* /opt/bootkube/assets/manifests && rm -r /opt/bootkube/assets/experimental/manifests + [ -d /opt/bootkube/assets/experimental/bootstrap-manifests ] && mv /opt/bootkube/assets/experimental/bootstrap-manifests/* /opt/bootkube/assets/bootstrap-manifests && rm -r /opt/bootkube/assets/experimental/bootstrap-manifests + BOOTKUBE_ACI="$${BOOTKUBE_ACI:-quay.io/coreos/bootkube}" + BOOTKUBE_VERSION="$${BOOTKUBE_VERSION:-v0.6.2}" + BOOTKUBE_ASSETS="$${BOOTKUBE_ASSETS:-/opt/bootkube/assets}" + exec /usr/bin/rkt run \ + --trust-keys-from-https \ + --volume assets,kind=host,source=$${BOOTKUBE_ASSETS} \ + --mount volume=assets,target=/assets \ + --volume bootstrap,kind=host,source=/etc/kubernetes \ + --mount volume=bootstrap,target=/etc/kubernetes \ + $${RKT_OPTS} \ + $${BOOTKUBE_ACI}:$${BOOTKUBE_VERSION} \ + --net=host \ + --dns=host \ + --exec=/bootkube -- start --asset-dir=/assets "$@" +passwd: + users: + - name: core + ssh_authorized_keys: + - "${ssh_authorized_key}" diff --git a/aws/container-linux/kubernetes/cl/worker.yaml.tmpl b/aws/container-linux/kubernetes/cl/worker.yaml.tmpl new file mode 100644 index 00000000..f6ab5d50 --- /dev/null +++ b/aws/container-linux/kubernetes/cl/worker.yaml.tmpl @@ -0,0 +1,130 @@ +--- +systemd: + units: + - name: docker.service + enable: true + - name: locksmithd.service + mask: true + - name: wait-for-dns.service + enable: true + contents: | + [Unit] + Description=Wait for DNS entries + Wants=systemd-resolved.service + Before=kubelet.service + [Service] + Type=oneshot + RemainAfterExit=true + ExecStart=/bin/sh -c 'while ! /usr/bin/grep '^[^#[:space:]]' /etc/resolv.conf > /dev/null; do sleep 1; done' + [Install] + RequiredBy=kubelet.service + - name: kubelet.service + enable: true + contents: | + [Unit] + Description=Kubelet via Hyperkube ACI + [Service] + EnvironmentFile=/etc/kubernetes/kubelet.env + Environment="RKT_RUN_ARGS=--uuid-file-save=/var/cache/kubelet-pod.uuid \ + --volume=resolv,kind=host,source=/etc/resolv.conf \ + --mount volume=resolv,target=/etc/resolv.conf \ + --volume var-lib-cni,kind=host,source=/var/lib/cni \ + --mount volume=var-lib-cni,target=/var/lib/cni \ + --volume opt-cni-bin,kind=host,source=/opt/cni/bin \ + --mount volume=opt-cni-bin,target=/opt/cni/bin \ + --volume var-log,kind=host,source=/var/log \ + --mount volume=var-log,target=/var/log" + ExecStartPre=/bin/mkdir -p /opt/cni/bin + ExecStartPre=/bin/mkdir -p /etc/kubernetes/manifests + ExecStartPre=/bin/mkdir -p /etc/kubernetes/cni/net.d + ExecStartPre=/bin/mkdir -p /etc/kubernetes/checkpoint-secrets + ExecStartPre=/bin/mkdir -p /etc/kubernetes/inactive-manifests + ExecStartPre=/bin/mkdir -p /var/lib/cni + ExecStartPre=/usr/bin/bash -c "grep 'certificate-authority-data' /etc/kubernetes/kubeconfig | awk '{print $2}' | base64 -d > /etc/kubernetes/ca.crt" + ExecStartPre=-/usr/bin/rkt rm --uuid-file=/var/cache/kubelet-pod.uuid + ExecStart=/usr/lib/coreos/kubelet-wrapper \ + --allow-privileged \ + --anonymous-auth=false \ + --client-ca-file=/etc/kubernetes/ca.crt \ + --cluster_dns=${k8s_dns_service_ip} \ + --cluster_domain=cluster.local \ + --cni-conf-dir=/etc/kubernetes/cni/net.d \ + --exit-on-lock-contention \ + --kubeconfig=/etc/kubernetes/kubeconfig \ + --lock-file=/var/run/lock/kubelet.lock \ + --network-plugin=cni \ + --node-labels=node-role.kubernetes.io/node \ + --pod-manifest-path=/etc/kubernetes/manifests \ + --require-kubeconfig + ExecStop=-/usr/bin/rkt stop --uuid-file=/var/cache/kubelet-pod.uuid + Restart=always + RestartSec=5 + [Install] + WantedBy=multi-user.target + - name: delete-node.service + enable: true + contents: | + [Unit] + Description=Waiting to delete Kubernetes node on shutdown + [Service] + Type=oneshot + RemainAfterExit=true + ExecStart=/bin/true + ExecStop=/etc/kubernetes/delete-node + [Install] + WantedBy=multi-user.target +storage: + files: + - path: /etc/kubernetes/kubeconfig + filesystem: root + mode: 0644 + contents: + inline: | + apiVersion: v1 + kind: Config + clusters: + - name: local + cluster: + server: ${kubeconfig_server} + certificate-authority-data: ${kubeconfig_ca_cert} + users: + - name: kubelet + user: + client-certificate-data: ${kubeconfig_kubelet_cert} + client-key-data: ${kubeconfig_kubelet_key} + contexts: + - context: + cluster: local + user: kubelet + - path: /etc/kubernetes/kubelet.env + filesystem: root + mode: 0644 + contents: + inline: | + KUBELET_IMAGE_URL=quay.io/coreos/hyperkube + KUBELET_IMAGE_TAG=v1.7.5_coreos.0 + - path: /etc/sysctl.d/max-user-watches.conf + filesystem: root + contents: + inline: | + fs.inotify.max_user_watches=16184 + - path: /etc/kubernetes/delete-node + filesystem: root + mode: 0744 + contents: + inline: | + #!/bin/bash + set -e + exec /usr/bin/rkt run \ + --trust-keys-from-https \ + --volume config,kind=host,source=/etc/kubernetes \ + --mount volume=config,target=/etc/kubernetes \ + quay.io/coreos/hyperkube:v1.7.5_coreos.0 \ + --net=host \ + --dns=host \ + --exec=/kubectl -- --kubeconfig=/etc/kubernetes/kubeconfig delete node $(hostname) +passwd: + users: + - name: core + ssh_authorized_keys: + - "${ssh_authorized_key}" diff --git a/aws/container-linux/kubernetes/controllers.tf b/aws/container-linux/kubernetes/controllers.tf new file mode 100644 index 00000000..b6ad2ffd --- /dev/null +++ b/aws/container-linux/kubernetes/controllers.tf @@ -0,0 +1,244 @@ +# Controllers AutoScaling Group +resource "aws_autoscaling_group" "controllers" { + name = "${var.cluster_name}-controller" + load_balancers = ["${aws_elb.controllers.id}"] + + # count + desired_capacity = "${var.controller_count}" + min_size = "${var.controller_count}" + max_size = "${var.controller_count}" + + # network + vpc_zone_identifier = ["${aws_subnet.public.*.id}"] + + # template + launch_configuration = "${aws_launch_configuration.controller.name}" + + lifecycle { + # override the default destroy and replace update behavior + create_before_destroy = true + ignore_changes = ["image_id"] + } + + tags = [{ + key = "Name" + value = "${var.cluster_name}-controller" + propagate_at_launch = true + }] +} + +# Controller template +resource "aws_launch_configuration" "controller" { + name_prefix = "${var.cluster_name}-controller-template-" + image_id = "${data.aws_ami.coreos.image_id}" + instance_type = "${var.controller_type}" + + user_data = "${data.ct_config.controller_ign.rendered}" + + # storage + root_block_device { + volume_type = "standard" + volume_size = "${var.disk_size}" + } + + # network + associate_public_ip_address = true + security_groups = ["${aws_security_group.controller.id}"] + + lifecycle { + // Override the default destroy and replace update behavior + create_before_destroy = true + } +} + +# Controller Container Linux Config +data "template_file" "controller_config" { + template = "${file("${path.module}/cl/controller.yaml.tmpl")}" + + vars = { + k8s_dns_service_ip = "${cidrhost(var.service_cidr, 10)}" + k8s_etcd_service_ip = "${cidrhost(var.service_cidr, 15)}" + ssh_authorized_key = "${var.ssh_authorized_key}" + kubeconfig_ca_cert = "${module.bootkube.ca_cert}" + kubeconfig_kubelet_cert = "${module.bootkube.kubelet_cert}" + kubeconfig_kubelet_key = "${module.bootkube.kubelet_key}" + kubeconfig_server = "${module.bootkube.server}" + } +} + +data "ct_config" "controller_ign" { + content = "${data.template_file.controller_config.rendered}" + pretty_print = false +} + +# Security Group (instance firewall) + +resource "aws_security_group" "controller" { + name = "${var.cluster_name}-controller" + description = "${var.cluster_name} controller security group" + + vpc_id = "${aws_vpc.network.id}" + + tags = "${map("Name", "${var.cluster_name}-controller")}" +} + +resource "aws_security_group_rule" "controller-icmp" { + security_group_id = "${aws_security_group.controller.id}" + + type = "ingress" + protocol = "icmp" + from_port = 0 + to_port = 0 + cidr_blocks = ["0.0.0.0/0"] +} + +resource "aws_security_group_rule" "controller-ssh" { + security_group_id = "${aws_security_group.controller.id}" + + type = "ingress" + protocol = "tcp" + from_port = 22 + to_port = 22 + cidr_blocks = ["0.0.0.0/0"] +} + +resource "aws_security_group_rule" "controller-apiserver" { + security_group_id = "${aws_security_group.controller.id}" + + type = "ingress" + protocol = "tcp" + from_port = 443 + to_port = 443 + cidr_blocks = ["0.0.0.0/0"] +} + +resource "aws_security_group_rule" "controller-etcd" { + security_group_id = "${aws_security_group.controller.id}" + + type = "ingress" + protocol = "tcp" + from_port = 2379 + to_port = 2380 + self = true +} + +resource "aws_security_group_rule" "controller-bootstrap-etcd" { + security_group_id = "${aws_security_group.controller.id}" + + type = "ingress" + protocol = "tcp" + from_port = 12379 + to_port = 12380 + self = true +} + +resource "aws_security_group_rule" "controller-flannel" { + security_group_id = "${aws_security_group.controller.id}" + + type = "ingress" + protocol = "udp" + from_port = 8472 + to_port = 8472 + source_security_group_id = "${aws_security_group.worker.id}" +} + +resource "aws_security_group_rule" "controller-flannel-self" { + security_group_id = "${aws_security_group.controller.id}" + + type = "ingress" + protocol = "udp" + from_port = 8472 + to_port = 8472 + self = true +} + +resource "aws_security_group_rule" "controller-kubelet-read" { + security_group_id = "${aws_security_group.controller.id}" + + type = "ingress" + protocol = "tcp" + from_port = 10255 + to_port = 10255 + source_security_group_id = "${aws_security_group.worker.id}" +} + +resource "aws_security_group_rule" "controller-kubelet-read-self" { + security_group_id = "${aws_security_group.controller.id}" + + type = "ingress" + protocol = "tcp" + from_port = 10255 + to_port = 10255 + self = true +} + +resource "aws_security_group_rule" "controller-bgp" { + security_group_id = "${aws_security_group.controller.id}" + + type = "ingress" + protocol = "tcp" + from_port = 179 + to_port = 179 + source_security_group_id = "${aws_security_group.worker.id}" +} + +resource "aws_security_group_rule" "controller-bgp-self" { + security_group_id = "${aws_security_group.controller.id}" + + type = "ingress" + protocol = "tcp" + from_port = 179 + to_port = 179 + self = true +} + +resource "aws_security_group_rule" "controller-ipip" { + security_group_id = "${aws_security_group.controller.id}" + + type = "ingress" + protocol = 4 + from_port = 0 + to_port = 0 + source_security_group_id = "${aws_security_group.worker.id}" +} + +resource "aws_security_group_rule" "controller-ipip-self" { + security_group_id = "${aws_security_group.controller.id}" + + type = "ingress" + protocol = 4 + from_port = 0 + to_port = 0 + self = true +} + +resource "aws_security_group_rule" "controller-ipip-legacy" { + security_group_id = "${aws_security_group.controller.id}" + + type = "ingress" + protocol = 94 + from_port = 0 + to_port = 0 + source_security_group_id = "${aws_security_group.worker.id}" +} + +resource "aws_security_group_rule" "controller-ipip-legacy-self" { + security_group_id = "${aws_security_group.controller.id}" + + type = "ingress" + protocol = 94 + from_port = 0 + to_port = 0 + self = true +} + +resource "aws_security_group_rule" "controller-egress" { + security_group_id = "${aws_security_group.controller.id}" + + type = "egress" + protocol = "-1" + from_port = 0 + to_port = 0 + cidr_blocks = ["0.0.0.0/0"] + ipv6_cidr_blocks = ["::/0"] +} diff --git a/aws/container-linux/kubernetes/elb.tf b/aws/container-linux/kubernetes/elb.tf new file mode 100644 index 00000000..51f526f5 --- /dev/null +++ b/aws/container-linux/kubernetes/elb.tf @@ -0,0 +1,48 @@ +# Controller Network Load Balancer DNS Record +resource "aws_route53_record" "controllers" { + zone_id = "${var.dns_zone_id}" + + name = "${format("%s.%s.", var.cluster_name, var.dns_zone)}" + type = "A" + + # AWS recommends their special "alias" records for ELBs + alias { + name = "${aws_elb.controllers.dns_name}" + zone_id = "${aws_elb.controllers.zone_id}" + evaluate_target_health = true + } +} + +# Controller Network Load Balancer +resource "aws_elb" "controllers" { + name = "${var.cluster_name}-controllers" + subnets = ["${aws_subnet.public.*.id}"] + security_groups = ["${aws_security_group.controller.id}"] + + listener { + lb_port = 22 + lb_protocol = "tcp" + instance_port = 22 + instance_protocol = "tcp" + } + + listener { + lb_port = 443 + lb_protocol = "tcp" + instance_port = 443 + instance_protocol = "tcp" + } + + # Kubelet HTTP health check + health_check { + target = "HTTP:10255/healthz" + healthy_threshold = 2 + unhealthy_threshold = 4 + timeout = 5 + interval = 6 + } + + idle_timeout = 1800 + connection_draining = true + connection_draining_timeout = 300 +} diff --git a/aws/container-linux/kubernetes/ingress.tf b/aws/container-linux/kubernetes/ingress.tf new file mode 100644 index 00000000..480c6d44 --- /dev/null +++ b/aws/container-linux/kubernetes/ingress.tf @@ -0,0 +1,32 @@ +# Ingress Network Load Balancer +resource "aws_elb" "ingress" { + name = "${var.cluster_name}-ingress" + subnets = ["${aws_subnet.public.*.id}"] + security_groups = ["${aws_security_group.worker.id}"] + + listener { + lb_port = 80 + lb_protocol = "tcp" + instance_port = 80 + instance_protocol = "tcp" + } + + listener { + lb_port = 443 + lb_protocol = "tcp" + instance_port = 443 + instance_protocol = "tcp" + } + + # Kubelet HTTP health check + health_check { + target = "HTTP:10254/healthz" + healthy_threshold = 2 + unhealthy_threshold = 4 + timeout = 5 + interval = 6 + } + + connection_draining = true + connection_draining_timeout = 300 +} diff --git a/aws/container-linux/kubernetes/network.tf b/aws/container-linux/kubernetes/network.tf new file mode 100644 index 00000000..1be5073b --- /dev/null +++ b/aws/container-linux/kubernetes/network.tf @@ -0,0 +1,57 @@ +data "aws_availability_zones" "all" {} + +# Network VPC, gateway, and routes + +resource "aws_vpc" "network" { + cidr_block = "${var.host_cidr}" + assign_generated_ipv6_cidr_block = true + enable_dns_support = true + enable_dns_hostnames = true + + tags = "${map("Name", "${var.cluster_name}")}" +} + +resource "aws_internet_gateway" "gateway" { + vpc_id = "${aws_vpc.network.id}" + + tags = "${map("Name", "${var.cluster_name}")}" +} + +resource "aws_route_table" "default" { + vpc_id = "${aws_vpc.network.id}" + + route { + cidr_block = "0.0.0.0/0" + gateway_id = "${aws_internet_gateway.gateway.id}" + } + + route { + ipv6_cidr_block = "::/0" + gateway_id = "${aws_internet_gateway.gateway.id}" + } + + tags = "${map("Name", "${var.cluster_name}")}" +} + +# Subnets (one per availability zone) + +resource "aws_subnet" "public" { + count = "${length(data.aws_availability_zones.all.names)}" + + vpc_id = "${aws_vpc.network.id}" + availability_zone = "${data.aws_availability_zones.all.names[count.index]}" + + cidr_block = "${cidrsubnet(var.host_cidr, 4, count.index)}" + ipv6_cidr_block = "${cidrsubnet(aws_vpc.network.ipv6_cidr_block, 8, count.index)}" + map_public_ip_on_launch = true + assign_ipv6_address_on_creation = true + + tags = "${map("Name", "${var.cluster_name}-public-${count.index}")}" +} + +resource "aws_route_table_association" "public" { + count = "${length(data.aws_availability_zones.all.names)}" + + route_table_id = "${aws_route_table.default.id}" + subnet_id = "${element(aws_subnet.public.*.id, count.index)}" +} diff --git a/aws/container-linux/kubernetes/ssh.tf b/aws/container-linux/kubernetes/ssh.tf new file mode 100644 index 00000000..8b9b89dd --- /dev/null +++ b/aws/container-linux/kubernetes/ssh.tf @@ -0,0 +1,25 @@ +# Secure copy bootkube assets to ONE controller and start bootkube to perform +# one-time self-hosted cluster bootstrapping. +resource "null_resource" "bootkube-start" { + depends_on = ["module.bootkube", "aws_autoscaling_group.controllers"] + + # TODO: SSH to a controller's IP instead of waiting on DNS resolution + connection { + type = "ssh" + host = "${aws_route53_record.controllers.fqdn}" + user = "core" + timeout = "15m" + } + + provisioner "file" { + source = "${var.asset_dir}" + destination = "$HOME/assets" + } + + provisioner "remote-exec" { + inline = [ + "sudo mv /home/core/assets /opt/bootkube", + "sudo systemctl start bootkube", + ] + } +} diff --git a/aws/container-linux/kubernetes/variables.tf b/aws/container-linux/kubernetes/variables.tf new file mode 100644 index 00000000..05f17ec7 --- /dev/null +++ b/aws/container-linux/kubernetes/variables.tf @@ -0,0 +1,96 @@ +variable "cluster_name" { + type = "string" + description = "Cluster name" +} + +variable "dns_zone" { + type = "string" + description = "AWS DNS Zone (e.g. aws.dghubble.io)" +} + +variable "dns_zone_id" { + type = "string" + description = "AWS DNS Zone ID (e.g. Z3PAABBCFAKEC0)" +} + +variable "ssh_authorized_key" { + type = "string" + description = "SSH public key for user 'core'" +} + +variable "os_channel" { + type = "string" + default = "stable" + description = "Container Linux AMI channel (stable, beta, alpha)" +} + +variable "disk_size" { + type = "string" + default = "40" + description = "The size of the disk in Gigabytes" +} + +variable "host_cidr" { + description = "CIDR IPv4 range to assign to EC2 nodes" + type = "string" + default = "10.0.0.0/16" +} + +variable "controller_count" { + type = "string" + default = "1" + description = "Number of controllers" +} + +variable "controller_type" { + type = "string" + default = "t2.small" + description = "Controller EC2 instance type" +} + +variable "worker_count" { + type = "string" + default = "1" + description = "Number of workers" +} + +variable "worker_type" { + type = "string" + default = "t2.small" + description = "Worker EC2 instance type" +} + +# bootkube assets + +variable "asset_dir" { + description = "Path to a directory where generated assets should be placed (contains secrets)" + type = "string" +} + +variable "networking" { + description = "Choice of networking provider (calico or flannel)" + type = "string" + default = "calico" +} + +variable "network_mtu" { + description = "CNI interface MTU (applies to calico only). Use 8981 if using instances types with Jumbo frames." + type = "string" + default = "1480" +} + +variable "pod_cidr" { + description = "CIDR IPv4 range to assign Kubernetes pods" + type = "string" + default = "10.2.0.0/16" +} + +variable "service_cidr" { + description = <