Sync fedora-cloud modules with Container Linux

* Update manifests for Kubernetes v1.10.0
* Update etcd from v3.3.2 to v3.3.3
* Add disk_type optional variable on AWS
* Remove redundant kubeconfig copy on AWS
* Distribute etcd secres only to controllers
* Organize module variables and ssh steps
This commit is contained in:
Dalton Hubble 2018-03-31 13:11:42 -07:00
parent 3610da8b71
commit b80a2eb8a0
17 changed files with 142 additions and 90 deletions

View File

@ -14,6 +14,6 @@ data "aws_ami" "fedora" {
filter {
name = "name"
values = ["Fedora-Cloud-Base-27*-standard-0"]
values = ["Fedora-Cloud-Base-27*-gp2-0"]
}
}

View File

@ -1,6 +1,6 @@
# Self-hosted Kubernetes assets (kubeconfig, manifests)
module "bootkube" {
source = "git::https://github.com/poseidon/terraform-render-bootkube.git?ref=88b361207d42ec3121930a4add6b64ba7cf18360"
source = "git::https://github.com/poseidon/terraform-render-bootkube.git?ref=5f3546b66ffb9946b36e612537bb6a1830ae7746"
cluster_name = "${var.cluster_name}"
api_servers = ["${format("%s.%s", var.cluster_name, var.dns_zone)}"]

View File

@ -7,9 +7,8 @@ yum_repos:
gpgcheck: true
gpgkey: https://packages.cloud.google.com/yum/doc/yum-key.gpg https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg
packages:
- vim
- docker
- kubelet
- [docker, 1.13.1]
- [kubelet, 1.10.0]
- nfs-utils
write_files:
- path: /etc/systemd/system/etcd-member.service
@ -30,7 +29,7 @@ write_files:
-v /etc/ssl/etcd:/etc/ssl/certs:ro,Z \
-v /var/lib/etcd:/var/lib/etcd:Z \
--env-file=/etc/etcd/etcd.conf \
quay.io/coreos/etcd:v3.3.2
quay.io/coreos/etcd:v3.3.3
ExecStop=/usr/bin/docker stop etcd-member
Restart=on-failure
RestartSec=10s

View File

@ -28,7 +28,7 @@ resource "aws_instance" "controllers" {
# storage
root_block_device {
volume_type = "standard"
volume_type = "${var.disk_type}"
volume_size = "${var.disk_size}"
}

View File

@ -1,5 +1,5 @@
# Secure copy etcd TLS assets to controllers.
resource "null_resource" "copy-secrets" {
resource "null_resource" "copy-controller-secrets" {
count = "${var.controller_count}"
connection {
@ -61,7 +61,11 @@ resource "null_resource" "copy-secrets" {
# Secure copy bootkube assets to ONE controller and start bootkube to perform
# one-time self-hosted cluster bootstrapping.
resource "null_resource" "bootkube-start" {
depends_on = ["module.bootkube", "null_resource.copy-secrets", "aws_route53_record.apiserver"]
depends_on = [
"null_resource.copy-controller-secrets",
"module.workers",
"aws_route53_record.apiserver",
]
connection {
type = "ssh"
@ -77,7 +81,8 @@ resource "null_resource" "bootkube-start" {
provisioner "remote-exec" {
inline = [
"sudo mv assets /opt/bootkube",
"while [ ! -f /var/lib/cloud/instance/boot-finished ]; do sleep 4; done",
"sudo mv $HOME/assets /opt/bootkube",
"sudo systemctl start bootkube",
]
}

View File

@ -1,11 +1,13 @@
variable "cluster_name" {
type = "string"
description = "Cluster name"
description = "Unique cluster name (prepended to dns_zone)"
}
# AWS
variable "dns_zone" {
type = "string"
description = "AWS DNS Zone (e.g. aws.dghubble.io)"
description = "AWS DNS Zone (e.g. aws.example.com)"
}
variable "dns_zone_id" {
@ -13,33 +15,12 @@ variable "dns_zone_id" {
description = "AWS DNS Zone ID (e.g. Z3PAABBCFAKEC0)"
}
variable "ssh_authorized_key" {
type = "string"
description = "SSH public key for user 'core'"
}
variable "disk_size" {
type = "string"
default = "40"
description = "The size of the disk in Gigabytes"
}
variable "host_cidr" {
description = "CIDR IPv4 range to assign to EC2 nodes"
type = "string"
default = "10.0.0.0/16"
}
# instances
variable "controller_count" {
type = "string"
default = "1"
description = "Number of controllers"
}
variable "controller_type" {
type = "string"
default = "t2.small"
description = "Controller EC2 instance type"
description = "Number of controllers (i.e. masters)"
}
variable "worker_count" {
@ -48,13 +29,36 @@ variable "worker_count" {
description = "Number of workers"
}
variable "controller_type" {
type = "string"
default = "t2.small"
description = "EC2 instance type for controllers"
}
variable "worker_type" {
type = "string"
default = "t2.small"
description = "Worker EC2 instance type"
description = "EC2 instance type for workers"
}
# bootkube assets
variable "disk_size" {
type = "string"
default = "40"
description = "Size of the EBS volume in GB"
}
variable "disk_type" {
type = "string"
default = "gp2"
description = "Type of the EBS volume (e.g. standard, gp2, io1)"
}
# configuration
variable "ssh_authorized_key" {
type = "string"
description = "SSH public key for user 'fedora'"
}
variable "asset_dir" {
description = "Path to a directory where generated assets should be placed (contains secrets)"
@ -73,6 +77,12 @@ variable "network_mtu" {
default = "1480"
}
variable "host_cidr" {
description = "CIDR IPv4 range to assign to EC2 nodes"
type = "string"
default = "10.0.0.0/16"
}
variable "pod_cidr" {
description = "CIDR IPv4 range to assign Kubernetes pods"
type = "string"

View File

@ -14,6 +14,6 @@ data "aws_ami" "fedora" {
filter {
name = "name"
values = ["Fedora-Cloud-Base-27*-standard-0"]
values = ["Fedora-Cloud-Base-27*-gp2-0"]
}
}

View File

@ -7,9 +7,8 @@ yum_repos:
gpgcheck: true
gpgkey: https://packages.cloud.google.com/yum/doc/yum-key.gpg https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg
packages:
- vim
- docker
- kubelet
- [docker, 1.13.1]
- [kubelet, 1.10.0]
- nfs-utils
write_files:
- path: /etc/systemd/system/kubelet.service.d/10-typhoon.conf

View File

@ -1,21 +1,23 @@
variable "name" {
type = "string"
description = "Unique name instance group"
description = "Unique name for the worker pool"
}
# AWS
variable "vpc_id" {
type = "string"
description = "ID of the VPC for creating instances"
description = "Must be set to `vpc_id` output by cluster"
}
variable "subnet_ids" {
type = "list"
description = "List of subnet IDs for creating instances"
description = "Must be set to `subnet_ids` output by cluster"
}
variable "security_groups" {
type = "list"
description = "List of security group IDs"
description = "Must be set to `worker_security_groups` output by cluster"
}
# instances
@ -35,19 +37,25 @@ variable "instance_type" {
variable "disk_size" {
type = "string"
default = "40"
description = "Size of the disk in GB"
description = "Size of the EBS volume in GB"
}
variable "disk_type" {
type = "string"
default = "gp2"
description = "Type of the EBS volume (e.g. standard, gp2, io1)"
}
# configuration
variable "kubeconfig" {
type = "string"
description = "Generated Kubelet kubeconfig"
description = "Must be set to `kubeconfig` output by cluster"
}
variable "ssh_authorized_key" {
type = "string"
description = "SSH public key for user 'core'"
description = "SSH public key for user 'fedora'"
}
variable "service_cidr" {

View File

@ -42,7 +42,7 @@ resource "aws_launch_configuration" "worker" {
# storage
root_block_device {
volume_type = "standard"
volume_type = "${var.disk_type}"
volume_size = "${var.disk_size}"
}

View File

@ -1,12 +1,12 @@
# Self-hosted Kubernetes assets (kubeconfig, manifests)
module "bootkube" {
source = "git::https://github.com/poseidon/terraform-render-bootkube.git?ref=88b361207d42ec3121930a4add6b64ba7cf18360"
source = "git::https://github.com/poseidon/terraform-render-bootkube.git?ref=5f3546b66ffb9946b36e612537bb6a1830ae7746"
cluster_name = "${var.cluster_name}"
api_servers = ["${format("%s.%s", var.cluster_name, var.dns_zone)}"]
etcd_servers = "${digitalocean_record.etcds.*.fqdn}"
asset_dir = "${var.asset_dir}"
networking = "${var.networking}"
networking = "flannel"
network_mtu = 1440
pod_cidr = "${var.pod_cidr}"
service_cidr = "${var.service_cidr}"

View File

@ -7,9 +7,8 @@ yum_repos:
gpgcheck: true
gpgkey: https://packages.cloud.google.com/yum/doc/yum-key.gpg https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg
packages:
- vim
- docker
- kubelet
- [docker, 1.13.1]
- [kubelet, 1.10.0]
- nfs-utils
write_files:
- path: /etc/systemd/system/cloud-metadata.service
@ -43,7 +42,7 @@ write_files:
-v /etc/ssl/etcd:/etc/ssl/certs:ro,Z \
-v /var/lib/etcd:/var/lib/etcd:Z \
--env-file=/etc/etcd/etcd.conf \
quay.io/coreos/etcd:v3.3.2
quay.io/coreos/etcd:v3.3.3
ExecStop=/usr/bin/docker stop etcd-member
Restart=on-failure
RestartSec=10s

View File

@ -7,9 +7,8 @@ yum_repos:
gpgcheck: true
gpgkey: https://packages.cloud.google.com/yum/doc/yum-key.gpg https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg
packages:
- vim
- docker
- kubelet
- [docker, 1.13.1]
- [kubelet, 1.10.0]
- nfs-utils
write_files:
- path: /etc/systemd/system/cloud-metadata.service

View File

@ -45,6 +45,8 @@ resource "digitalocean_droplet" "controllers" {
private_networking = true
user_data = "${element(data.template_file.controller-cloudinit.*.rendered, count.index)}"
ssh_keys = ["${var.ssh_fingerprints}"]
tags = [
"${digitalocean_tag.controllers.id}",
]
@ -67,7 +69,7 @@ data "template_file" "controller-cloudinit" {
etcd_domain = "${var.cluster_name}-etcd${count.index}.${var.dns_zone}"
# etcd0=https://cluster-etcd0.example.com,etcd1=https://cluster-etcd1.example.com,...
etcd_initial_cluster = "${join(",", formatlist("%s=https://%s:2380", null_resource.repeat.*.triggers.name, null_resource.repeat.*.triggers.domain))}"
etcd_initial_cluster = "${join(",", formatlist("%s=https://%s:2380", null_resource.repeat.*.triggers.name, null_resource.repeat.*.triggers.domain))}"
ssh_authorized_key = "${var.ssh_authorized_key}"
k8s_dns_service_ip = "${cidrhost(var.service_cidr, 10)}"

View File

@ -1,10 +1,10 @@
# Secure copy kubeconfig to all nodes. Activates kubelet.service
resource "null_resource" "copy-secrets" {
count = "${var.controller_count + var.worker_count}"
# Secure copy etcd TLS assets and kubeconfig to controllers. Activates kubelet.service
resource "null_resource" "copy-controller-secrets" {
count = "${var.controller_count}"
connection {
type = "ssh"
host = "${element(concat(digitalocean_droplet.controllers.*.ipv4_address, digitalocean_droplet.workers.*.ipv4_address), count.index)}"
host = "${element(concat(digitalocean_droplet.controllers.*.ipv4_address), count.index)}"
user = "fedora"
timeout = "15m"
}
@ -51,7 +51,6 @@ resource "null_resource" "copy-secrets" {
provisioner "remote-exec" {
inline = [
"while [ ! -f /var/lib/cloud/instance/boot-finished ]; do sleep 4; done",
"sudo mkdir -p /etc/ssl/etcd/etcd",
"sudo mv etcd-client* /etc/ssl/etcd/",
"sudo cp /etc/ssl/etcd/etcd-client-ca.crt /etc/ssl/etcd/etcd/server-ca.crt",
@ -60,7 +59,30 @@ resource "null_resource" "copy-secrets" {
"sudo cp /etc/ssl/etcd/etcd-client-ca.crt /etc/ssl/etcd/etcd/peer-ca.crt",
"sudo mv etcd-peer.crt /etc/ssl/etcd/etcd/peer.crt",
"sudo mv etcd-peer.key /etc/ssl/etcd/etcd/peer.key",
"sudo mv kubeconfig /etc/kubernetes/kubeconfig",
"sudo mv $HOME/kubeconfig /etc/kubernetes/kubeconfig",
]
}
}
# Secure copy kubeconfig to all workers. Activates kubelet.service.
resource "null_resource" "copy-worker-secrets" {
count = "${var.worker_count}"
connection {
type = "ssh"
host = "${element(concat(digitalocean_droplet.workers.*.ipv4_address), count.index)}"
user = "fedora"
timeout = "15m"
}
provisioner "file" {
content = "${module.bootkube.kubeconfig}"
destination = "$HOME/kubeconfig"
}
provisioner "remote-exec" {
inline = [
"sudo mv $HOME/kubeconfig /etc/kubernetes/kubeconfig",
]
}
}
@ -68,7 +90,10 @@ resource "null_resource" "copy-secrets" {
# Secure copy bootkube assets to ONE controller and start bootkube to perform
# one-time self-hosted cluster bootstrapping.
resource "null_resource" "bootkube-start" {
depends_on = ["module.bootkube", "null_resource.copy-secrets"]
depends_on = [
"null_resource.copy-controller-secrets",
"null_resource.copy-worker-secrets",
]
connection {
type = "ssh"
@ -84,7 +109,8 @@ resource "null_resource" "bootkube-start" {
provisioner "remote-exec" {
inline = [
"sudo mv assets /opt/bootkube",
"while [ ! -f /var/lib/cloud/instance/boot-finished ]; do sleep 4; done",
"sudo mv $HOME/assets /opt/bootkube",
"sudo systemctl start bootkube",
]
}

View File

@ -1,8 +1,10 @@
variable "cluster_name" {
type = "string"
description = "Unique cluster name"
description = "Unique cluster name (prepended to dns_zone)"
}
# Digital Ocean
variable "region" {
type = "string"
description = "Digital Ocean region (e.g. nyc1, sfo2, fra1, tor1)"
@ -13,22 +15,12 @@ variable "dns_zone" {
description = "Digital Ocean domain (i.e. DNS zone) (e.g. do.example.com)"
}
variable "image" {
type = "string"
default = "fedora-27-x64"
description = "OS image from which to initialize the disk (e.g. fedora-27-x64)"
}
# instances
variable "controller_count" {
type = "string"
default = "1"
description = "Number of controllers"
}
variable "controller_type" {
type = "string"
default = "s-2vcpu-2gb"
description = "Digital Ocean droplet size (e.g. s-2vcpu-2gb, s-2vcpu-4gb, s-4vcpu-8gb)."
description = "Number of controllers (i.e. masters)"
}
variable "worker_count" {
@ -37,39 +29,50 @@ variable "worker_count" {
description = "Number of workers"
}
variable "controller_type" {
type = "string"
default = "s-2vcpu-2gb"
description = "Droplet type for controllers (e.g. s-2vcpu-2gb, s-2vcpu-4gb, s-4vcpu-8gb)"
}
variable "worker_type" {
type = "string"
default = "s-1vcpu-1gb"
description = "Digital Ocean droplet size (e.g. s-1vcpu-1gb, s-1vcpu-2gb, s-2vcpu-2gb)"
description = "Droplet type for workers (e.g. s-1vcpu-1gb, s-1vcpu-2gb, s-2vcpu-2gb)"
}
variable "image" {
type = "string"
default = "fedora-27-x64"
description = "OS image from which to initialize the disk (e.g. fedora-27-x64)"
}
# configuration
variable "ssh_authorized_key" {
type = "string"
description = "SSH public key for user 'fedora'"
}
# bootkube assets
variable "ssh_fingerprints" {
type = "list"
description = "SSH public key fingerprints. (e.g. see `ssh-add -l -E md5`)"
}
variable "asset_dir" {
description = "Path to a directory where generated assets should be placed (contains secrets)"
type = "string"
}
variable "networking" {
description = "Choice of networking provider (flannel or calico)"
type = "string"
default = "flannel"
}
variable "pod_cidr" {
description = "CIDR IP range to assign Kubernetes pods"
description = "CIDR IPv4 range to assign Kubernetes pods"
type = "string"
default = "10.2.0.0/16"
}
variable "service_cidr" {
description = <<EOD
CIDR IP range to assign Kubernetes services.
CIDR IPv4 range to assign Kubernetes services.
The 1st IP will be reserved for kube_apiserver, the 10th IP will be reserved for kube-dns.
EOD

View File

@ -26,6 +26,8 @@ resource "digitalocean_droplet" "workers" {
private_networking = true
user_data = "${data.template_file.worker-cloudinit.rendered}"
ssh_keys = ["${var.ssh_fingerprints}"]
tags = [
"${digitalocean_tag.workers.id}",
]