mirror of
https://github.com/puppetmaster/typhoon.git
synced 2024-12-30 23:59:34 +01:00
0120b9f38d
* Drop support for `cluster_domain_suffix` customization and always use `cluster.local`. Many components in the Kubernetes ecosystem assume this default suffix and its very rare to be setting a special value here these days * Cleanup a few variables that are seldom used
120 lines
3.0 KiB
HCL
120 lines
3.0 KiB
HCL
# Workers AutoScaling Group
|
|
resource "aws_autoscaling_group" "workers" {
|
|
name = "${var.name}-worker"
|
|
|
|
# count
|
|
desired_capacity = var.worker_count
|
|
min_size = var.worker_count
|
|
max_size = var.worker_count + 2
|
|
default_cooldown = 30
|
|
health_check_grace_period = 30
|
|
|
|
# network
|
|
vpc_zone_identifier = var.subnet_ids
|
|
|
|
# template
|
|
launch_template {
|
|
id = aws_launch_template.worker.id
|
|
version = aws_launch_template.worker.latest_version
|
|
}
|
|
|
|
# target groups to which instances should be added
|
|
target_group_arns = flatten([
|
|
aws_lb_target_group.workers-http.id,
|
|
aws_lb_target_group.workers-https.id,
|
|
var.target_groups,
|
|
])
|
|
|
|
instance_refresh {
|
|
strategy = "Rolling"
|
|
preferences {
|
|
instance_warmup = 120
|
|
min_healthy_percentage = 90
|
|
}
|
|
}
|
|
|
|
lifecycle {
|
|
# override the default destroy and replace update behavior
|
|
create_before_destroy = true
|
|
}
|
|
|
|
# Waiting for instance creation delays adding the ASG to state. If instances
|
|
# can't be created (e.g. spot price too low), the ASG will be orphaned.
|
|
# Orphaned ASGs escape cleanup, can't be updated, and keep bidding if spot is
|
|
# used. Disable wait to avoid issues and align with other clouds.
|
|
wait_for_capacity_timeout = "0"
|
|
|
|
tag {
|
|
key = "Name"
|
|
value = "${var.name}-worker"
|
|
propagate_at_launch = true
|
|
}
|
|
}
|
|
|
|
# Worker template
|
|
resource "aws_launch_template" "worker" {
|
|
name_prefix = "${var.name}-worker"
|
|
image_id = local.ami_id
|
|
instance_type = var.instance_type
|
|
monitoring {
|
|
enabled = false
|
|
}
|
|
|
|
user_data = sensitive(base64encode(data.ct_config.worker.rendered))
|
|
|
|
# storage
|
|
ebs_optimized = true
|
|
block_device_mappings {
|
|
device_name = "/dev/xvda"
|
|
ebs {
|
|
volume_type = var.disk_type
|
|
volume_size = var.disk_size
|
|
iops = var.disk_iops
|
|
encrypted = true
|
|
delete_on_termination = true
|
|
}
|
|
}
|
|
|
|
# network
|
|
vpc_security_group_ids = var.security_groups
|
|
|
|
# metadata
|
|
metadata_options {
|
|
http_tokens = "optional"
|
|
}
|
|
|
|
# spot
|
|
dynamic "instance_market_options" {
|
|
for_each = var.spot_price > 0 ? [1] : []
|
|
content {
|
|
market_type = "spot"
|
|
spot_options {
|
|
max_price = var.spot_price
|
|
}
|
|
}
|
|
}
|
|
|
|
credit_specification {
|
|
cpu_credits = var.cpu_credits
|
|
}
|
|
|
|
lifecycle {
|
|
// Override the default destroy and replace update behavior
|
|
create_before_destroy = true
|
|
ignore_changes = [image_id]
|
|
}
|
|
}
|
|
|
|
# Flatcar Linux worker
|
|
data "ct_config" "worker" {
|
|
content = templatefile("${path.module}/butane/worker.yaml", {
|
|
kubeconfig = indent(10, var.kubeconfig)
|
|
ssh_authorized_key = var.ssh_authorized_key
|
|
cluster_dns_service_ip = cidrhost(var.service_cidr, 10)
|
|
node_labels = join(",", var.node_labels)
|
|
node_taints = join(",", var.node_taints)
|
|
})
|
|
strict = true
|
|
snippets = var.snippets
|
|
}
|