google-cloud: Move controller and worker submodules under kubernetes

This commit is contained in:
Dalton Hubble
2017-09-27 20:50:32 -07:00
parent f7dd959e9c
commit 795428329a
10 changed files with 2 additions and 2 deletions

View File

@ -0,0 +1,148 @@
---
systemd:
units:
- name: docker.service
enable: true
- name: locksmithd.service
mask: true
- name: wait-for-dns.service
enable: true
contents: |
[Unit]
Description=Wait for DNS entries
Wants=systemd-resolved.service
Before=kubelet.service
[Service]
Type=oneshot
RemainAfterExit=true
ExecStart=/bin/sh -c 'while ! /usr/bin/grep '^[^#[:space:]]' /etc/resolv.conf > /dev/null; do sleep 1; done'
[Install]
RequiredBy=kubelet.service
- name: kubelet.service
enable: true
contents: |
[Unit]
Description=Kubelet via Hyperkube ACI
Wants=rpc-statd.service
[Service]
EnvironmentFile=/etc/kubernetes/kubelet.env
Environment="RKT_RUN_ARGS=--uuid-file-save=/var/cache/kubelet-pod.uuid \
--volume=resolv,kind=host,source=/etc/resolv.conf \
--mount volume=resolv,target=/etc/resolv.conf \
--volume var-lib-cni,kind=host,source=/var/lib/cni \
--mount volume=var-lib-cni,target=/var/lib/cni \
--volume opt-cni-bin,kind=host,source=/opt/cni/bin \
--mount volume=opt-cni-bin,target=/opt/cni/bin \
--volume var-log,kind=host,source=/var/log \
--mount volume=var-log,target=/var/log"
ExecStartPre=/bin/mkdir -p /opt/cni/bin
ExecStartPre=/bin/mkdir -p /etc/kubernetes/manifests
ExecStartPre=/bin/mkdir -p /etc/kubernetes/cni/net.d
ExecStartPre=/bin/mkdir -p /etc/kubernetes/checkpoint-secrets
ExecStartPre=/bin/mkdir -p /etc/kubernetes/inactive-manifests
ExecStartPre=/bin/mkdir -p /var/lib/cni
ExecStartPre=/usr/bin/bash -c "grep 'certificate-authority-data' /etc/kubernetes/kubeconfig | awk '{print $2}' | base64 -d > /etc/kubernetes/ca.crt"
ExecStartPre=-/usr/bin/rkt rm --uuid-file=/var/cache/kubelet-pod.uuid
ExecStart=/usr/lib/coreos/kubelet-wrapper \
--allow-privileged \
--anonymous-auth=false \
--client-ca-file=/etc/kubernetes/ca.crt \
--cluster_dns=${k8s_dns_service_ip} \
--cluster_domain=cluster.local \
--cni-conf-dir=/etc/kubernetes/cni/net.d \
--exit-on-lock-contention \
--kubeconfig=/etc/kubernetes/kubeconfig \
--lock-file=/var/run/lock/kubelet.lock \
--network-plugin=cni \
--node-labels=node-role.kubernetes.io/master \
--pod-manifest-path=/etc/kubernetes/manifests \
--register-with-taints=node-role.kubernetes.io/master=:NoSchedule \
--require-kubeconfig
ExecStop=-/usr/bin/rkt stop --uuid-file=/var/cache/kubelet-pod.uuid
Restart=always
RestartSec=10
[Install]
WantedBy=multi-user.target
- name: bootkube.service
contents: |
[Unit]
Description=Bootstrap a Kubernetes cluster
ConditionPathExists=!/opt/bootkube/init_bootkube.done
[Service]
Type=oneshot
RemainAfterExit=true
WorkingDirectory=/opt/bootkube
ExecStart=/opt/bootkube/bootkube-start
ExecStartPost=/bin/touch /opt/bootkube/init_bootkube.done
[Install]
WantedBy=multi-user.target
storage:
files:
- path: /etc/kubernetes/kubeconfig
filesystem: root
mode: 0644
contents:
inline: |
apiVersion: v1
kind: Config
clusters:
- name: local
cluster:
server: ${kubeconfig_server}
certificate-authority-data: ${kubeconfig_ca_cert}
users:
- name: kubelet
user:
client-certificate-data: ${kubeconfig_kubelet_cert}
client-key-data: ${kubeconfig_kubelet_key}
contexts:
- context:
cluster: local
user: kubelet
- path: /etc/kubernetes/kubelet.env
filesystem: root
mode: 0644
contents:
inline: |
KUBELET_IMAGE_URL=quay.io/coreos/hyperkube
KUBELET_IMAGE_TAG=v1.7.5_coreos.0
- path: /etc/sysctl.d/max-user-watches.conf
filesystem: root
contents:
inline: |
fs.inotify.max_user_watches=16184
- path: /opt/bootkube/bootkube-start
filesystem: root
mode: 0544
user:
id: 500
group:
id: 500
contents:
inline: |
#!/bin/bash
# Wrapper for bootkube start
set -e
# Move experimental manifests
[ -d /opt/bootkube/assets/manifests-* ] && mv /opt/bootkube/assets/manifests-*/* /opt/bootkube/assets/manifests && rm -rf /opt/bootkube/assets/manifests-*
[ -d /opt/bootkube/assets/experimental/manifests ] && mv /opt/bootkube/assets/experimental/manifests/* /opt/bootkube/assets/manifests && rm -r /opt/bootkube/assets/experimental/manifests
[ -d /opt/bootkube/assets/experimental/bootstrap-manifests ] && mv /opt/bootkube/assets/experimental/bootstrap-manifests/* /opt/bootkube/assets/bootstrap-manifests && rm -r /opt/bootkube/assets/experimental/bootstrap-manifests
BOOTKUBE_ACI="$${BOOTKUBE_ACI:-quay.io/coreos/bootkube}"
BOOTKUBE_VERSION="$${BOOTKUBE_VERSION:-v0.6.2}"
BOOTKUBE_ASSETS="$${BOOTKUBE_ASSETS:-/opt/bootkube/assets}"
exec /usr/bin/rkt run \
--trust-keys-from-https \
--volume assets,kind=host,source=$${BOOTKUBE_ASSETS} \
--mount volume=assets,target=/assets \
--volume bootstrap,kind=host,source=/etc/kubernetes \
--mount volume=bootstrap,target=/etc/kubernetes \
$${RKT_OPTS} \
$${BOOTKUBE_ACI}:$${BOOTKUBE_VERSION} \
--net=host \
--dns=host \
--exec=/bootkube -- start --asset-dir=/assets "$@"
passwd:
users:
- name: core
ssh_authorized_keys:
- "${ssh_authorized_keys}"

View File

@ -0,0 +1,73 @@
# Managed Instance Group
resource "google_compute_instance_group_manager" "controllers" {
name = "${var.cluster_name}-controller-group"
description = "Compute instance group of ${var.cluster_name} controllers"
# Instance name prefix for instances in the group
base_instance_name = "${var.cluster_name}-controller"
instance_template = "${google_compute_instance_template.controller.self_link}"
update_strategy = "RESTART"
zone = "${var.zone}"
target_size = "${var.count}"
# Target pool instances in the group should be added into
target_pools = [
"${google_compute_target_pool.controllers.self_link}",
]
}
# Controller Container Linux Config
data "template_file" "controller_config" {
template = "${file("${path.module}/cl/controller.yaml.tmpl")}"
vars = {
k8s_dns_service_ip = "${cidrhost(var.service_cidr, 10)}"
k8s_etcd_service_ip = "${cidrhost(var.service_cidr, 15)}"
ssh_authorized_keys = "${var.ssh_authorized_key}"
kubeconfig_ca_cert = "${var.kubeconfig_ca_cert}"
kubeconfig_kubelet_cert = "${var.kubeconfig_kubelet_cert}"
kubeconfig_kubelet_key = "${var.kubeconfig_kubelet_key}"
kubeconfig_server = "${var.kubeconfig_server}"
}
}
data "ct_config" "controller_ign" {
content = "${data.template_file.controller_config.rendered}"
pretty_print = false
}
resource "google_compute_instance_template" "controller" {
name_prefix = "${var.cluster_name}-controller-"
description = "Controller Instance template"
machine_type = "${var.machine_type}"
metadata {
user-data = "${data.ct_config.controller_ign.rendered}"
}
scheduling {
automatic_restart = "${var.preemptible ? false : true}"
preemptible = "${var.preemptible}"
}
disk {
auto_delete = true
boot = true
source_image = "${var.os_image}"
disk_size_gb = "${var.disk_size}"
}
network_interface {
network = "${var.network}"
# Ephemeral external IP
access_config = {}
}
can_ip_forward = true
lifecycle {
# To update an Instance Template, Terraform should replace the existing resource
create_before_destroy = true
}
}

View File

@ -0,0 +1,59 @@
# Controller Network Load balancer DNS record
resource "google_dns_record_set" "controllers" {
# DNS Zone name where record should be created
managed_zone = "${var.dns_zone_name}"
# DNS record
name = "${format("%s.%s.", var.cluster_name, var.dns_zone)}"
type = "A"
ttl = 300
# IPv4 address of controllers' network load balancer
rrdatas = ["${google_compute_address.controllers-ip.address}"]
}
# Static IP for the Network Load Balancer
resource "google_compute_address" "controllers-ip" {
name = "${var.cluster_name}-controllers-ip"
}
# Network Load Balancer (i.e. forwarding rules)
resource "google_compute_forwarding_rule" "controller-https-rule" {
name = "${var.cluster_name}-controller-https-rule"
ip_address = "${google_compute_address.controllers-ip.address}"
port_range = "443"
target = "${google_compute_target_pool.controllers.self_link}"
}
resource "google_compute_forwarding_rule" "controller-ssh-rule" {
name = "${var.cluster_name}-controller-ssh-rule"
ip_address = "${google_compute_address.controllers-ip.address}"
port_range = "22"
target = "${google_compute_target_pool.controllers.self_link}"
}
# Network Load Balancer target pool of instances.
resource "google_compute_target_pool" "controllers" {
name = "${var.cluster_name}-controller-pool"
health_checks = [
"${google_compute_http_health_check.ingress.name}",
]
session_affinity = "NONE"
}
# Kubelet HTTP Health Check
resource "google_compute_http_health_check" "ingress" {
name = "${var.cluster_name}-kubelet-health"
description = "Health check Kubelet health host port"
timeout_sec = 5
check_interval_sec = 5
healthy_threshold = 2
unhealthy_threshold = 4
port = 10255
request_path = "/healthz"
}

View File

@ -0,0 +1,98 @@
variable "cluster_name" {
type = "string"
description = "Unique cluster name"
}
variable "ssh_authorized_key" {
type = "string"
description = "SSH public key for logging in as user 'core'"
}
variable "network" {
type = "string"
description = "Name of the network to attach to the compute instance interfaces"
}
variable "dns_zone" {
type = "string"
description = "Google Cloud DNS Zone value to create etcd/k8s subdomains (e.g. dghubble.io)"
}
variable "dns_zone_name" {
type = "string"
description = "Google Cloud DNS Zone name to create etcd/k8s subdomains (e.g. dghubble-io)"
}
# instances
variable "count" {
type = "string"
description = "Number of controller compute instances the instance group should manage"
}
variable "zone" {
type = "string"
description = "Google zone that compute instances in the group should be created in (e.g. gcloud compute zones list)"
}
variable "machine_type" {
type = "string"
description = "Machine type for compute instances (e.g. gcloud compute machine-types list)"
}
variable "os_image" {
type = "string"
description = "OS image from which to initialize the disk (e.g. gcloud compute images list)"
}
variable "disk_size" {
type = "string"
default = "40"
description = "The size of the disk in gigabytes."
}
variable "preemptible" {
type = "string"
default = "false"
description = "If enabled, Compute Engine will terminate instances randomly within 24 hours"
}
// configuration
variable "networking" {
description = "Choice of networking provider (flannel or calico)"
type = "string"
default = "flannel"
}
variable "service_cidr" {
description = <<EOD
CIDR IP range to assign Kubernetes services.
The 1st IP will be reserved for kube_apiserver, the 10th IP will be reserved for kube-dns, the 15th IP will be reserved for self-hosted etcd, and the 200th IP will be reserved for bootstrap self-hosted etcd.
EOD
type = "string"
default = "10.3.0.0/16"
}
// kubeconfig
variable "kubeconfig_ca_cert" {
type = "string"
description = "Generated kubeconfig CA certificate"
}
variable "kubeconfig_kubelet_cert" {
type = "string"
description = "Generated kubeconfig kubelet certificate"
}
variable "kubeconfig_kubelet_key" {
type = "string"
description = "Generated kubeconfig kubelet private key"
}
variable "kubeconfig_server" {
type = "string"
description = "Generated kubeconfig server"
}