1 Commits

Author SHA1 Message Date
c7bb64c671 debian recipe 2023-09-28 16:34:53 +02:00
85 changed files with 583 additions and 1845 deletions

View File

@@ -1,97 +0,0 @@
#Flavour kind
build {
name = "kind"
description = <<EOF
This builder builds a QEMU image from the base build output. The goal here is to install ${local.Kind.Name}
with it's provisionning.
EOF
source "source.qemu.nuo" {
output_directory = "${var.output_dir}/${var.version}/provisionned/${local.Kind.Name}"
vm_name = "${local.output_name}-${var.version}-${local.Kind.Name}.img"
iso_url = "${var.output_dir}/${var.version}/base/${local.output_name}-${var.version}.img"
iso_checksum = "none"
disk_size = 20480
disk_image = true
boot_command = [ "<enter><enter><wait>" ]
ssh_clear_authorized_keys = true
}
source "source.vmware-vmx.nuo" {
output_directory = "${var.output_dir}/${var.version}/provisionned/vmware/nuo-harbor"
vm_name = "${local.output_name}-${var.version}-nuo-harbor.img"
source_path = "${var.output_dir}/${var.version}/base/${local.output_name}-${var.version}.img.vmx"
boot_command = [ "<enter><enter><wait>" ]
ssh_clear_authorized_keys = true
vmx_data_post = {
"memsize" = "8192",
"numvcpus" = "4",
}
}
// Install templater and bootstraper
provisioner "shell" {
script = "${local.locations.provisionning}/templater-install.sh"
}
// Copy configuration values on the image
provisioner "shell" {
inline = [
"sh -cx 'mkdir -p ${local.builder_config.TemplateDir}'",
"sh -cx 'mkdir -p ${local.builder_config.ValueDir}'"
]
}
// Copy configuration templates to the image
provisioner "file" {
destination = "${local.builder_config.TemplateDir}/"
source = "${local.locations.templates}/conf/${build.name}/"
}
// Copy configuration values on the image
provisioner "file" {
destination = "${local.builder_config.ValueDir}/${build.name}.json"
content = "${jsonencode(local.Kind)}"
}
// Generate default configuration for kind
provisioner "shell" {
max_retries = 3
inline = [ "sh -cx '/usr/local/bin/btr -c ${local.builder_config.ValueDir}/ -t ${local.builder_config.TemplateDir}'" ]
}
// Complete kind install
provisioner "shell" {
expect_disconnect = true
max_retries = 6
script = "${local.locations.provisionning}/${build.name}.sh"
}
// Copy ssh Cadoles keys
provisioner "file" {
destination = "/tmp"
source = "${local.locations.provisionning}/ssh/cadoles/"
}
// Copy CNOUS SSH keys
provisioner "file" {
destination = "/tmp"
source = "${local.locations.provisionning}/ssh/cnous/"
}
provisioner "shell" {
inline = [
"sh -cx 'cat /tmp/*.pub >> /root/.ssh/authorized_keys'",
"sh -cx 'chmod -R 600 /root/.ssh/authorized_keys'"
]
}
provisioner "shell" {
inline = [
"service docker start",
"service containerd start",
"sleep 5",
"kubeadm config images pull" ]
}
}

View File

@@ -1,41 +0,0 @@
locals {
// Definition of the Kubernetes service (templater compatible)
ServiceKubernetes = {
ConfigFiles = []
Repositories = {}
Packages = {
kubeadm = {
name = "kind"
action = "install"
}
kubectl = {
name = "kubectl"
action = "install"
}
}
Vars = {}
Users = {}
Daemons = {
ntpd = {
name = "ntpd"
type = "auto"
enabled = true
}
local = {
name = "local"
type = "auto"
enabled = true
}
}
}
// Definition of the Kubernetes full configuration (with all the services)
Kind = {
Name = "kind"
Globals = local.Globals
Services = {
Docker = local.ServiceDocker
Kubernetes = local.ServiceKubernetes
}
}
}

View File

@@ -2,7 +2,7 @@
#set -xeo pipefail
# Run the installer
yes | setup-alpine -e -f /root/install.conf
yes | setup-alpine -e -f install.conf
# Copy ssh keys
echo "Copy packer ssh key"

View File

@@ -1,26 +0,0 @@
#cloud-config
ssh_pwauth: True
user: ${user}
password: ${password}
chpasswd:
expire: False
ssh_authorized_keys:
%{ for sk in ssh_keys ~}
- ${sk}
%{ endfor ~}
%{ if write_files ~}
write_files:
%{ for fl in write_files ~}
- path: ${fl.path}
owner: ${fl.owner}:${fl.group}
permissions: 0o${fl.permissions}
defer: true
content: ${fl.content}
%{ endfor ~}
%{if runcmd ~}
# Work around network interface down after boot
runcmd:
%{ for cmd in runcmd ~}
- ${cmd}
%{ endfor ~}
%{ endif ~}

View File

@@ -0,0 +1,16 @@
name = "debian"
version = "11"
short_version = "11"
code_name = "bullseye"
arch = "amd64"
source_url = "https://cloud.debian.org/images/cloud/bullseye/latest"
iso_cd_checksum = "9ae04227e89047b72970a0d5f1897e2573fd0d4bba3d381086307af604072bad9e33174357fd3c3545a2a2b5b83ce19f3dbb5c352e86d5173b833df59b4a5741"
image_dir_name= "latest"
boot_command = [ "<enter>" ]
# "<enter>",
# "preseed/url=http://{{ .HTTPIP }}:{{ .HTTPPort }}/preseed.cfg<enter>",
# "<wait>",
# "<wait1s>mkdir -p .ssh<enter>",
# "<wait1s>wget http://{{.HTTPIP}}:{{.HTTPPort}}/ssh-packer-pub.key -O .ssh/authorized_keys<enter><wait1s>",
# "<wait1s>chmod 600 .ssh/authorized_keys<enter>",
#]

View File

@@ -10,28 +10,24 @@ locals {
dirs = local.locations
timestamp = regex_replace(timestamp(), "[- TZ:]", "")
output_name = "${var.name}"
source_checksum_url = "file:${var.source_url}/${var.version}/${var.arch}/iso-cd/SHA256SUMS"
source_iso = "${var.source_url}/v${var.short_version}/releases/${var.arch}/alpine-virt-${var.version}-${var.arch}.iso"
source_checksum_url = "file:${var.source_url}/SHA512SUMS"
source_iso = "${var.source_url}/debian-${var.version}-generic-${var.arch}.qcow2"
source_checksum = "${var.iso_cd_checksum}"
ssh_user = "root"
ssh_password = "PbkRc1vup7Wq5n4r"
disk_size = 8000
memory = 512
instance_data = {
"instance-id": "${var.name}"
}
installOpts = {
hostname = var.name
user = "eole"
disk_device = "/dev/vda"
}
installOptsVMWare = {
hostname = var.name
user = "eole"
disk_device = "/dev/sda"
}
installOptsVirtualBox = {
hostname = var.name
user = "eole"
disk_device = "/dev/sda"
}
instance_data = { "instance-id": "${var.name}" }
}

View File

@@ -0,0 +1,62 @@
#Flavour base
build {
name = "base"
description = <<EOF
This builder builds a QEMU image from a Debian cloud image.
EOF
source "vmware-iso.debian" {
output_directory = "${var.output_dir}/${var.version}/base"
vm_name = "${local.output_name}-${var.version}.img"
disk_size = 10240
iso_url = "${local.source_iso}"
iso_checksum = "${var.iso_cd_checksum}"
http_content = {
"/ssh-packer-pub.key" = data.sshkey.install.public_key
"/install.conf" = templatefile("${local.locations.templates}/conf/install/awnsers.pktpl.hcl", local.installOptsVMWare)
}
boot_command = var.boot_command
cd_label = "cidata"
}
source "qemu.debian" {
output_directory = "${var.output_dir}/${var.version}/base"
vm_name = "${local.output_name}-${var.version}.img"
iso_url = "${local.source_iso}"
iso_checksum = "${var.iso_cd_checksum}"
disk_image = true
disk_size = 10240
cd_content = {
"meta-data" = jsonencode(local.instance_data)
"user-data" = templatefile("${path.cwd}/recipes/debian/templates/conf/cloud-init/user-data",
{ user = local.ssh_user,
password = local.ssh_password,
runcmd = var.cloud_init_runcmd })
}
#http_content = {
# "/ssh-packer-pub.key" = data.sshkey.install.public_key
# "/install.conf" = templatefile("${local.locations.templates}/conf/install/awnsers.pktpl.hcl", local.installOpts)
#}
cd_label = "cidata"
boot_command = var.boot_command
}
provisioner "shell" {
script = "${local.locations.provisionning}/${var.name}/${var.name}-${var.short_version}-install.sh"
}
provisioner "shell" {
script = "${local.locations.provisionning}/letsencrypt.sh"
}
post-processor "shell-local" {
inline = [
"/bin/sh ${path.cwd}/post-processors/sparsify.sh ${var.output_dir}/${var.version}/base ${var.image_version}",
]
}
post-processor "manifest" {
keep_input_artifact = true
}
}

View File

@@ -12,10 +12,6 @@ packer {
source = "github.com/hashicorp/qemu"
version = "~> 1"
}
virtualbox = {
source = "github.com/hashicorp/virtualbox"
version = "~> 1"
}
}
}

View File

@@ -0,0 +1,181 @@
#!/bin/sh
ENV_FILE=${ENV_FILE:-/var/run/one-context/one_env}
LOG_FILE="/var/log/initkubernets.log"
FIRST_BOOT="/var/run/firstboot.flag"
infoLog() {
echo "Info: $@" | tee -a ${LOG_FILE}
}
errorLog() {
echo "Error: $@" | tee -a ${LOG_FILE}
}
waitReadyState() {
local vmID="${1}"
local timeout="${2}"
local tick=0
while true ;do
local ready=$(onegate vm show ${vmID} --json | jq -rc ".VM.USER_TEMPLATE.READY")
if [ "${ready}" = "YES" ];then
return 0
elif [ "${timeout}" -eq "${tick}" ];then
return ${timeout}
else
sleep 1
tick=$((tick+1))
fi
done
}
returnToken() {
infoLog "Returning tokens"
local caSecretKey="${1}"
local caToken=$(openssl x509 -in /etc/kubernetes/pki/ca.crt -noout -pubkey | openssl rsa -pubin -outform DER 2>/dev/null | sha256sum | cut -d' ' -f1)
local kubeToken=$(kubeadm token list | awk '/authentication,signing.*The default*/ {print $1}')
local masterAddr=$(awk -F '/' '/server/ {print $3}' /etc/kubernetes/admin.conf)
if [ -n "${ONEGATE_ENDPOINT}" ];then
infoLog "Onegate detected"
data="READY=YES"
data="${data} MASTER_ADDR=${masterAddr}"
data="${data} MASTER_TOKEN=${kubeToken}"
data="${data} MASTER_CA_TOKEN=sha256:${caToken}"
data="${data} MASTER_CA_SECRET_KEY=${caSecretKey}"
onegate vm update --data "${data}"
infoLog "Onegate data seted"
else
infoLog "Onegate is not present"
echo "${masterAdd} ${kubeToken} ${caToken}" >> /root/kube.token
infoLog "Tokens are available at /root/kube.token"
fi
}
joinCluster() {
local master="${MASTER_ADDR}"
local token="${MASTER_TOKEN}"
local caToken="${MASTER_CA_TOKEN}"
local caSecretKey="${MASTER_CA_SECRET_KEY}"
local sname="${SERVICE_NAME}"
if [ -n "${ONEGATE_ENDPOINT}" ];then
local masterID=$(onegate service show --json | jq -c '.SERVICE.roles[] | select(.name == "leader") | .nodes[0].deploy_id')
if [ "${?}" -eq 0 ]; then
waitReadyState ${masterID} 600
if [ "${?}" -ne 0 ];then
errorLog "Master node is node ready after 600s"
return 3
fi
local masterInfo=$(onegate vm show ${masterID} --json | \
jq -cr ".VM.USER_TEMPLATE.MASTER_ADDR, .VM.USER_TEMPLATE.MASTER_TOKEN, .VM.USER_TEMPLATE.MASTER_CA_TOKEN,.VM.USER_TEMPLATE.MASTER_CA_SECRET_KEY, .VM.TEMPLATE.NIC[0].IP")
master=$(echo ${masterInfo} | cut -d " " -f 1)
token=$(echo ${masterInfo} | cut -d " " -f 2)
caToken=$(echo ${masterInfo} | cut -d " " -f 3)
caSecretKey=$(echo ${masterInfo} | cut -d " " -f 4)
masterIP=$(echo ${masterInfo} | cut -d " " -f 5)
sname=$(onegate service show --json | jq -cr ".SERVICE.name")
fi
# Setting dns resolution for cluster
echo "${masterIP} ${sname}" >> /etc/hosts
onegate service show --json | jq -rc '.SERVICE.roles[].nodes[].vm_info.VM | .TEMPLATE.NIC[].IP + " " + .NAME' >> /etc/hosts
fi
if [ -n "${master}" ] & [ -n "${token}" ] & [ -n "${caToken}" ];then
opts="--node-name $(hostname -f)"
opts="${opts} --token ${token}"
opts="${opts} --discovery-token-ca-cert-hash ${caToken}"
if [ -n "${1}" ];then
opts="${opts} --control-plane"
opts="${opts} --certificate-key ${caSecretKey}"
fi
opts="${opts} ${master}"
kubeadm join ${opts} | tee -a "${LOG_FILE}"
else
errorLog "Something is missing, can't join the cluster:"
errorLog " Master addr: [${master}]"
errorLog " Master token: [${token}]"
errorLog " Master CA token: [${caToken}]"
return 3
fi
}
getServiceName() {
local sname=$(onegate service show --json | jq -cr ".SERVICE.name")
local tmout=30
local tick=0
while true ;do
if [ -z "${sname}" ];then
sname=$(onegate service show --json | jq -cr ".SERVICE.name")
else
echo ${sname}
return 0
fi
sleep 1
tick=$((tick+1))
if [ ${tmout} -eq ${tick} ];then
hostname -f
return 3
fi
done
}
initLeader() {
sname="$(hostname -f)"
if [ -n "${ONEGATE_ENDPOINT}" ];then
sname=$(getServiceName)
sip=$(onegate vm show --json | jq -rc ".VM.TEMPLATE.NIC[0].IP")
echo "${sip} ${sname} $(hostname -f)" >> /etc/hosts
onegate service show --json | jq -rc '.SERVICE.roles[].nodes[].vm_info.VM | .TEMPLATE.NIC[].IP + " " + .NAME' >> /etc/hosts
fi
caSecretKey=$(date | sha256sum | awk '{print $1}')
infoLog "Kubernetes init started"
kubeadm init --pod-network-cidr=10.244.0.0/16 \
--node-name="${SET_HOSTNAME}" \
--control-plane-endpoint "${sname}:6443" \
--upload-certs --certificate-key "${caSecretKey}" | tee -a "${LOG_FILE}"
infoLog "Kubernetes init ended"
infoLog "Configuring kubectl"
mkdir /root/.kube
ln -s /etc/kubernetes/admin.conf /root/.kube/config
infoLog "kubectl configured"
infoLog "Installing cilium"
sleep 20
kubectl config view --minify -o jsonpath='{.clusters[].name}'
sleep 20
cilium install --helm-set 'cni.binPath=/usr/libexec/cni' --wait | tee -a "${LOG_FILE}"
infoLog "Cilium is installed"
returnToken "${caSecretKey}"
}
initKube() {
if [ "${SERVER_ROLE}" == "leader" ];then
initLeader
elif [ "${SERVER_ROLE}" == "worker" ];then
joinCluster
elif [ "${SERVER_ROLE}" == "master" ];then
joinCluster "${SERVER_ROLE}"
fi
touch ${FIRST_BOOT}
infoLog "Kubernetes cluster init is finished"
}
if [ -f "${ENV_FILE}" ]; then
. "${ENV_FILE}"
fi
if [ -f "${FIRST_BOOT}" ];then
exit 0
else
uuidgen > /etc/machine-id
swapoff -a # Make sure swap is disabled
initKube &
fi

View File

@@ -0,0 +1,3 @@
#!/bin/sh
mount --make-rshared /

View File

@@ -0,0 +1 @@
matchbox

View File

@@ -0,0 +1,13 @@
#!/bin/sh
CONF="/etc/conf.d/jenkins-slave"
if [ -e "/etc/jenkins-slave.conf" ]; then
CONF="/etc/jenkins-slave.conf"
fi
TOTAL_MEMORY=$(cat /proc/meminfo | grep MemTotal | awk '{ printf "%sg", int($2/1024/1024)+1 }')
sed -i "s|^JENKINS_SLAVE_NAME=.*$|JENKINS_SLAVE_NAME='slave-$ETH0_IP'|" "${CONF}"
sed -i "s|^JENKINS_SLAVE_USERNAME=.*$|JENKINS_SLAVE_USERNAME='$JENKINS_SLAVE_USERNAME'|" "${CONF}"
sed -i "s|^JENKINS_SLAVE_PASSWORD=.*$|JENKINS_SLAVE_PASSWORD='$JENKINS_SLAVE_PASSWORD'|" "${CONF}"
sed -i "s|^JENKINS_MASTER_URL=.*$|JENKINS_MASTER_URL='$JENKINS_MASTER_URL'|" "${CONF}"
sed -i "s|^JENKINS_SLAVE_LABELS=.*$|JENKINS_SLAVE_LABELS='docker docker-compose mem-$TOTAL_MEMORY $JENKINS_SLAVE_LABELS'|" "${CONF}"

View File

@@ -0,0 +1,31 @@
#!/usr/bin/env bash
ENV_FILE=${ENV_FILE:-/var/run/one-context/one_env}
# $TOKENTXT is available only through the env. file
# shellcheck disable=SC1090
if [ -f "${ENV_FILE}" ]; then
. "${ENV_FILE}"
fi
###
if [ -n "${GITLAB_URL}" ]; then
if command -v gitlab-runner; then
if [ -n "${GITLAB_SHELL}" ]; then
opts="--shell=${GITLAB_SHELL}"
fi
# shellcheck disable=SC2086
gitlab-runner register \
--non-interactive \
--url="${GITLAB_URL}" \
--registration-token="${GITLAB_TOKEN}" \
--executor="${GITLAB_EXECUTOR}" \
--description="${GITLAB_RUNNER_NAME}" \
--tag-list="${GITLAB_TAG_LIST}" \
--locked=false \
--access-level=not_protected \
--run-untagged=false \
"${opts}"
fi
fi

View File

@@ -0,0 +1,80 @@
#!/usr/bin/env bash
#
# Generate all the configuration files
# Get all the values from the VLS_DIR
# Process each template from the TPL_DIR with this values
#
ENV_FILE=${ENV_FILE:-/var/run/one-context/one_env}
TPL_DIR="/usr/share/builder/templates"
VLS_DIR="/usr/share/builder/values"
CONFIG=""
. ${ENV_FILE}
BTR="$(command -v btr)"
if [ "${?}" -ne 0 ]; then
echo "Warning: Nothing to do the templater is not installed"
exit 0
fi
if [ ! -e "${TPL_DIR}" ]; then
echo "Error: The template dir is missing (${TPL_DIR})"
exit 1
fi
if [ ! -e "${VLS_DIR}" ]; then
echo "Error: The template dir is missing (${VLS_DIR})"
exit 1
fi
jsonQuery() {
local data="${1}"
local query="${2}"
echo "${data}" | jq -cr "${query}"
}
# NAME: @jsonMerge
# AIM: Merge two json structures
# NOTES:
# The last one has de last word
# if you have the same key in A and B
# this keeps the value of the B structure.
# PARAMS:
# $1: original JSON Structure
# $2: updated JSON Structure
jsonMerge() {
local data="${1}"
local data2="${2}"
echo "${data} ${data2}" | jq -cr -s ".[0] * .[1]"
}
getValues() {
local values=""
for file in $(find ${VLS_DIR} -name "*.json"); do
values="${values}$(cat ${file})"
done
if [ -n "${RAW_CONFIG}" ]; then
values="$(jsonMerge ${values} ${RAW_CONFIG})"
fi
for key in $(echo ${values} | jq -cr '.|keys[]'); do
ukey=${key^^}
if [ -n "${!ukey}" ]; then
values="$(jsonMerge "${values}" "{\"${key}\":\"${!ukey}\"}")"
fi
done
echo ${values}
}
processTemplates() {
${BTR} -t ${TPL_DIR} -c "${1}"
}
VALUES=$(getValues)
echo ${VALUES}
processTemplates "${VALUES}"

View File

@@ -0,0 +1,3 @@
{
"instance-id": "iid-local01"
}

View File

@@ -0,0 +1,9 @@
#!/bin/sh
echo "${1}" >/etc/hostname
apt-get update
apt-get -y dist-upgrade
apt-get install wget curl -y
touch /etc/cloud/cloud-init.disabled

View File

@@ -0,0 +1,107 @@
# To see all available options execute this command once the install is done:
# sudo less /var/log/installer/cdebconf/questions.dat
# If you need information about an option use the command below (example for keymap):
# grep -A 4 "keyboard-configuration/xkb-keymap" /var/log/installer/cdebconf/templates.dat
# Use network mirror for package installation
# d-i apt-setup/use_mirror boolean true
# Automatic installation
d-i auto-install/enable boolean true
# "linux-server" is substituted by "linux-image-amd64"
# Possible options : "linux-image-amd64"(default) or "linux-image-rt-amd64"
d-i base-installer/kernel/override-image string linux-server
# Configure hardware clock
d-i clock-setup/utc boolean true
d-i clock-setup/utc-auto boolean true
d-i netcfg/choose_interface select auto
d-i netcfg/use_dhcp boolean true
# d-i console-setup/ask_detect boolean false
# d-i debconf/frontend select noninteractive
# Set OS locale
d-i debian-installer/language string fr
d-i debian-installer/country string FR
d-i debian-installer/locale string fr_FR.UTF-8
# d-i debian-installer/framebuffer boolean false
# Reboot once the install is done
d-i finish-install/reboot_in_progress note
# Bootloader options
d-i grub-installer/only_debian boolean true
d-i grub-installer/with_other_os boolean true
d-i grub-installer/bootdev string /dev/vda
# Set the keyboard layout
d-i keyboard-configuration/xkb-keymap select fr
# Mirror from which packages will be downloaded
d-i mirror/country string manual
d-i mirror/http/directory string /debian
d-i mirror/http/hostname string httpredir.debian.org
# Configure http proxy if needed "http://[[user][:pass]@]host[:port]/"
d-i mirror/http/proxy string
# Disk configuration
d-i partman-efi/non_efi_system boolean true
d-i partman-auto-lvm/guided_size string max
d-i partman-auto/choose_recipe select atomic
d-i partman-auto/method string lvm
d-i partman-lvm/confirm boolean true
d-i partman-lvm/confirm_nooverwrite boolean true
d-i partman-lvm/device_remove_lvm boolean true
d-i partman/choose_partition select finish
d-i partman/confirm boolean true
d-i partman/confirm_nooverwrite boolean true
d-i partman/confirm_write_new_label boolean true
# User configuration
d-i passwd/root-login boolean true
d-i passwd/root-password-crypted password $1$hA6nLFTh$FitTH.KXJWluJN9z7lDjr0
d-i passwd/user-fullname string packer
d-i passwd/user-uid string 1000
d-i passwd/username string packer
d-i passwd/user-password-crypted password $1$hA6nLFTh$FitTH.KXJWluJN9z7lDjr0
# Extra packages to be installed
d-i pkgsel/include string sudo
d-i pkgsel/include string openssh-server build-essential
d-i pkgsel/install-language-support boolean false
d-i pkgsel/update-policy select none
# Whether to upgrade packages after debootstrap
d-i pkgsel/upgrade select full-upgrade
# Set timezone
d-i time/zone string Europe/Paris
# Allow weak user password
d-i user-setup/allow-password-weak boolean true
# Home folder encryption
d-i user-setup/encrypt-home boolean false
# Do not scan additional CDs
apt-cdrom-setup apt-setup/cdrom/set-first boolean false
# Use network mirror
apt-mirror-setup apt-setup/use_mirror boolean true
# Disable polularity contest
popularity-contest popularity-contest/participate boolean false
# Select base install
tasksel tasksel/first multiselect standard, ssh-server
# Setup passwordless sudo for packer user
d-i preseed/late_command string \
echo "packer ALL=(ALL:ALL) NOPASSWD:ALL" > /target/etc/sudoers.d/packer && chmod 0440 /target/etc/sudoers.d/packer

View File

@@ -0,0 +1,12 @@
#!/bin/sh
set -e
ONE_CONTEXT_VERSION="6.4.0"
ONE_CONTEXT_PKG_VERSION="1"
PKG="one-context-${ONE_CONTEXT_VERSION}-r${ONE_CONTEXT_PKG_VERSION}.apk"
PKG_URL="https://github.com/OpenNebula/addon-context-linux/releases/download/v${ONE_CONTEXT_VERSION}/${PKG}"
cd /tmp || exit 3
wget -q --no-check-certificate ${PKG_URL}
apk add --allow-untrusted --no-cache ${PKG}

View File

@@ -0,0 +1,21 @@
#!/usr/bin/env bash
ENV_FILE=${ENV_FILE:-/var/run/one-context/one_env}
# $TOKENTXT is available only through the env. file
# shellcheck disable=SC1090
if [ -f "${ENV_FILE}" ]; then
. "${ENV_FILE}"
fi
###
if [ -n "${K3S_ROLE}" ]; then
if [ "${K3S_ROLE}" = "server" ]; then
rc-update add dnsmasq default
service dnsmasq start
rc-update add k3s default
service k3s start
fi
fi

View File

@@ -1,12 +1,13 @@
source qemu "nuo" {
source qemu "debian" {
cpus = 1
memory = "${local.memory}"
accelerator = "kvm"
vnc_bind_address = "0.0.0.0"
headless = true
headless = false
# Serve the `http` directory via HTTP, used for preseeding the Debian installer.
#http_directory = "${path.cwd}/recipes/${var.name}/provisionning/${var.name}/http"
http_port_min = 9990
http_port_max = 9999
@@ -32,7 +33,7 @@ source qemu "nuo" {
boot_wait = "5s"
}
source "vmware-iso" "nuo" {
source "vmware-iso" "debian" {
cpus = 1
disk_type_id = 0
memory = "${local.memory}"
@@ -41,6 +42,7 @@ source "vmware-iso" "nuo" {
headless = true
# Serve the `http` directory via HTTP, used for preseeding the Debian installer.
#http_directory = "${path.cwd}/recipes/${var.name}/provisionning/${var.name}/http"
http_port_min = 9990
http_port_max = 9999
@@ -66,13 +68,14 @@ source "vmware-iso" "nuo" {
boot_wait = "5s"
}
source "vmware-vmx" "nuo" {
source "vmware-vmx" "debian" {
disk_type_id = 0
vnc_bind_address = "0.0.0.0"
headless = true
# Serve the `http` directory via HTTP, used for preseeding the Debian installer.
#http_directory = "${path.cwd}/recipes/${var.name}/provisionning/${var.name}/http"
http_port_min = 9990
http_port_max = 9999
@@ -97,39 +100,3 @@ source "vmware-vmx" "nuo" {
boot_wait = "5s"
}
source "virtualbox-iso" "nuo" {
cpus = 1
memory = "${local.memory}"
vrdp_bind_address = "0.0.0.0"
nic_type = "virtio"
headless = false
# Serve the `http` directory via HTTP, used for preseeding the Debian installer.
http_bind_address = "0.0.0.0"
http_port_min = 9290
http_port_max = 9299
# SSH ports to redirect to the VM being built
#host_port_min = 2222
#host_port_max = 2229
# This user is configured in the preseed file.
ssh_username = "${local.ssh_user}"
ssh_private_key_file = data.sshkey.install.private_key_path
ssh_wait_timeout = "1000s"
ssh_file_transfer_method = "sftp"
shutdown_command = "/sbin/poweroff"
# Builds a compact image
#disk_compression = true
#disk_discard = "unmap"
#skip_compaction = false
#disk_detect_zeroes = "unmap"
format = "ova"
boot_wait = "5s"
}

View File

@@ -0,0 +1,12 @@
#cloud-config
ssh_pwauth: True
user: ${user}
password: ${password}
chpasswd:
expire: False
# Work around network interface down after boot
runcmd:
%{ for cmd in runcmd ~}
- ${cmd}
%{ endfor ~}

View File

@@ -0,0 +1,6 @@
# Configuration file of Harbor
# The IP address or hostname to access admin UI and registry service.
# DO NOT use localhost or 127.0.0.1, because Harbor needs to be accessed by external clients.
hostname: ${Vars.RootlessDocker}

View File

@@ -0,0 +1 @@
harbor

View File

@@ -1,26 +1,26 @@
variable "name" {
type = string
default = "nuo"
default = "debian"
}
variable "version" {
type = string
default = "3.18.0"
default = "11"
}
variable "short_version" {
type = string
default = "3.18"
default = "11"
}
variable "arch" {
type = string
default = "x86_64"
default = "amd6464"
}
variable "output_dir" {
type = string
default = "output/nuo/"
default = "output/debian/"
}
variable "source_url" {
@@ -30,7 +30,7 @@ variable "source_url" {
variable "iso_cd_checksum" {
type = string
default = "sha256:ae6d563d2444665316901fe7091059ac34b8f67ba30f9159f7cef7d2fdc5bf8a"
default = "sha256:9ae04227e89047b72970a0d5f1897e2573fd0d4bba3d381086307af604072bad9e33174357fd3c3545a2a2b5b83ce19f3dbb5c352e86d5173b833df59b4a5741"
}
variable "image_version" {
@@ -52,3 +52,8 @@ variable "boot_command" {
type = list(string)
default = []
}
variable "cloud_init_runcmd" {
type = list(string)
default = [ "uname" ]
}

View File

@@ -1,6 +0,0 @@
name = "nuo"
version = "3.18.2"
short_version = "3.18"
arch = "x86_64"
source_url = "https://dl-cdn.alpinelinux.org/alpine"
iso_cd_checksum = "6bc7ff54f5249bfb67082e1cf261aaa6f307d05f64089d3909e18b2b0481467f"

View File

@@ -1,93 +0,0 @@
#Flavour docker
build {
name = "docker"
description = <<EOF
This builder builds a QEMU image from the base build output. The goal here is to install ${local.Docker.Name}
with it's provisionning.
EOF
source "source.vmware-vmx.nuo" {
output_directory = "${var.output_dir}/${var.version}/provisionned/vmware/docker"
vm_name = "${local.output_name}-${var.version}-docker.img"
source_path = "${var.output_dir}/${var.version}/base/${local.output_name}-${var.version}.img.vmx"
boot_command = [ "<enter><enter><wait>" ]
ssh_clear_authorized_keys = true
}
source "source.qemu.nuo" {
output_directory = "${var.output_dir}/${var.version}/provisionned/${local.Docker.Name}"
vm_name = "${local.output_name}-${var.version}-${local.Docker.Name}.img"
iso_url = "${var.output_dir}/${var.version}/base/${local.output_name}-${var.version}.img"
iso_checksum = "none"
disk_size = 20480
disk_image = true
boot_command = [ "<enter><enter><wait>" ]
ssh_clear_authorized_keys = true
}
// Copy ssh Cadoles keys
provisioner "file" {
destination = "/tmp"
source = "${local.locations.provisionning}/ssh/cadoles/"
}
provisioner "shell" {
inline = [
"sh -cx 'cat /tmp/*.pub >> /root/.ssh/authorized_keys'",
"sh -cx 'chmod -R 600 /root/.ssh/authorized_keys'"
]
}
// Install templater and bootstraper
provisioner "shell" {
script = "${local.dirs.provisionning}/templater-install.sh"
}
// Copy configuration values on the image
provisioner "shell" {
inline = [
"sh -cx 'mkdir -p ${local.builder_config.TemplateDir}'",
"sh -cx 'mkdir -p ${local.builder_config.ValueDir}'"
]
}
// Copy configuration templates to the image
provisioner "file" {
destination = "${local.builder_config.TemplateDir}/"
source = "${local.dirs.templates}/conf/${build.name}/"
}
// Copy configuration values on the image
provisioner "file" {
destination = "${local.builder_config.ValueDir}/${build.name}.json"
content = "${jsonencode(local.Docker)}"
}
// Generate default configuration for docker
provisioner "shell" {
inline = [ "sh -cx '/usr/local/bin/btr -c ${local.builder_config.ValueDir}/ -t ${local.builder_config.TemplateDir}'" ]
}
// Install OpenNebula context tool
provisioner "shell" {
script = "${local.dirs.provisionning}/one-context.sh"
}
// Deploy the opennebula context script to manage configuration
provisioner "file" {
destination = "/etc/one-context.d/net-96-templater"
source = "${local.dirs.provisionning}/conf/one-context/net-96-templater"
}
provisioner "shell" {
inline = [ "sh -cx 'chmod +x /etc/one-context.d/net-96-templater'" ]
}
post-processor "shell-local" {
inline = [
"/bin/sh ${local.dirs.post-processors}/sparsify.sh ${var.output_dir}/${var.version}/provisionned/${local.Docker.Name} ${var.image_version}",
//"ruby ${local.dirs.tools}/one-templates -t image -m 640 -T ${local.dirs.templates}/one/image/common.tpl -n ${local.output_name}-${var.version}-${local.Docker.Name} -c '${local.Docker.Name} base image' --image-file ${var.output_dir}/${var.version}/provisionned/${local.Docker.Name}/${local.output_name}-${var.version}-${local.Docker.Name}.img",
//"ruby ${local.dirs.tools}/one-templates -t vm -m 640 -T ${local.dirs.templates}/one/vm/common.xml -n ${local.output_name}-${var.version}-${local.Docker.Name} --image-name ${local.output_name}-${var.version}-${local.Docker.Name}",
]
}
}

View File

@@ -1,136 +0,0 @@
#Flavour ${build.name}
build {
name = "harbor"
description = <<EOF
This builder builds a QEMU image from the base build output. The goal here is to install loki
with it's provisionning.
EOF
source "source.vmware-vmx.nuo" {
output_directory = "${var.output_dir}/${var.version}/provisionned/vmware/nuo-harbor"
vm_name = "${local.output_name}-${var.version}-nuo-harbor.img"
source_path = "${var.output_dir}/${var.version}/base/${local.output_name}-${var.version}.img.vmx"
// BUG https://github.com/hashicorp/packer-plugin-vmware/issues/119
disk_additional_size = [ 81920 ]
//
boot_command = [ "<enter><enter><wait>" ]
ssh_clear_authorized_keys = true
vmx_data = {
"scsi1.pcislotnumber" = "16"
"scsi1.present" = "TRUE"
"scsi1.virtualdev" = "lsilogic"
"scsi1:0.filename" = "disk-1.vmdk"
"scsi1:0.present" = "TRUE"
"scsi1:0.redo" = ""
}
vmx_data_post = {
"memsize" = "4096",
"numvcpus" = "2",
}
}
source "source.qemu.nuo" {
output_directory = "${var.output_dir}/${var.version}/provisionned/${local.Config.Name}"
vm_name = "${local.output_name}-${var.version}-${local.Config.Name}.img"
iso_url = "${var.output_dir}/${var.version}/base/${local.output_name}-${var.version}.img"
iso_checksum = "none"
disk_size = 81920
disk_image = true
boot_command = [ "<enter><enter><wait>" ]
ssh_clear_authorized_keys = true
}
provisioner "shell" {
script = "${local.dirs.provisionning}/tools/additionnal-disk"
environment_vars = [
"PV_DEVICE=/dev/sdb",
"VG_NAME=data",
"LV_NAME=harbor-data",
"LV_MTP=/srv/harbor",
"LV_FS=ext4"
]
}
// Install templater and bootstraper
provisioner "shell" {
script = "${local.dirs.provisionning}/templater-install.sh"
}
// Copy static configurations to /etc
provisioner "file" {
destination = "/etc"
source = "${local.dirs.provisionning}/conf/${build.name}/"
}
// Copy configuration values on the image
provisioner "shell" {
inline = [
"sh -cx 'mkdir -p ${local.builder_config.TemplateDir}'",
"sh -cx 'mkdir -p ${local.builder_config.ValueDir}'"
]
}
// Copy configuration templates to the image
provisioner "file" {
destination = "${local.builder_config.TemplateDir}/"
source = "${local.dirs.templates}/conf/${build.name}/"
}
// Copy Docker configuration templates to the image
provisioner "file" {
destination = "${local.builder_config.TemplateDir}/"
source = "${local.dirs.templates}/conf/docker/"
}
// Copy configuration values on the image
provisioner "file" {
destination = "${local.builder_config.ValueDir}/${build.name}.json"
content = "${jsonencode(local.NuoHarbor)}"
}
provisioner "file" {
destination = "/etc/local.d/templater.start"
source = "${local.locations.provisionning}/conf/common/templater.start"
}
// Copy ssh Cadoles keys
provisioner "file" {
destination = "/tmp"
source = "${local.locations.provisionning}/ssh/cadoles/"
}
// Copy CNOUS SSH keys
provisioner "file" {
destination = "/tmp"
source = "${local.locations.provisionning}/ssh/cnous/"
}
provisioner "shell" {
inline = [
"sh -cx 'cat /tmp/*.pub >> /root/.ssh/authorized_keys'",
"sh -cx 'chmod -R 600 /root/.ssh/authorized_keys'"
]
}
// Generate default configuration for the server
provisioner "shell" {
max_retries = 3
inline = [ "sh -cx '/usr/local/bin/btr -c ${local.builder_config.ValueDir}/ -t ${local.builder_config.TemplateDir}'" ]
}
provisioner "shell" {
environment_vars = [
"HARBOR_SSL_CERT=${local.NuoHarbor.Services.Harbor.Vars.HarborSSLCert}",
"HARBOR_SSL_KEY=${local.NuoHarbor.Services.Harbor.Vars.HarborSSLPrivKey}",
"HARBOR_DOMAIN=${local.NuoHarbor.Services.Harbor.Vars.HarborDomain}"
]
script = "${local.dirs.provisionning}/${build.name}.sh"
}
provisioner "shell" {
inline = [
"chmod +x /etc/local.d/templater.start"
]
}
}

View File

@@ -1,103 +0,0 @@
#Flavour kind
build {
name = "kind"
description = <<EOF
This builder builds a QEMU image from the base build output. The goal here is to install ${local.Kind.Name}
with it's provisionning.
EOF
source "source.qemu.nuo" {
output_directory = "${var.output_dir}/${var.version}/provisionned/${local.Kind.Name}"
vm_name = "${local.output_name}-${var.version}-${local.Kind.Name}.img"
iso_url = "${var.output_dir}/${var.version}/base/${local.output_name}-${var.version}.img"
iso_checksum = "none"
disk_size = 20480
disk_image = true
boot_command = [ "<enter><enter><wait>" ]
ssh_clear_authorized_keys = true
}
source "source.vmware-vmx.nuo" {
output_directory = "${var.output_dir}/${var.version}/provisionned/vmware/nuo-kind"
vm_name = "${local.output_name}-${var.version}-nuo-kind.img"
source_path = "${var.output_dir}/${var.version}/base/${local.output_name}-${var.version}.img.vmx"
boot_command = [ "<enter><enter><wait>" ]
ssh_clear_authorized_keys = true
vmx_data_post = {
"memsize" = "8192",
"numvcpus" = "4",
}
}
// Install templater and bootstraper
provisioner "shell" {
script = "${local.locations.provisionning}/templater-install.sh"
}
// Copy configuration values on the image
provisioner "shell" {
inline = [
"sh -cx 'mkdir -p ${local.builder_config.TemplateDir}'",
"sh -cx 'mkdir -p ${local.builder_config.ValueDir}'"
]
}
// Copy configuration templates to the image
provisioner "file" {
destination = "${local.builder_config.TemplateDir}/"
source = "${local.locations.templates}/conf/${build.name}/"
}
// Copy configuration values on the image
provisioner "file" {
destination = "${local.builder_config.ValueDir}/${build.name}.json"
content = "${jsonencode(local.Kind)}"
}
// Generate default configuration for kind
provisioner "shell" {
max_retries = 3
inline = [ "sh -cx '/usr/local/bin/btr -c ${local.builder_config.ValueDir}/ -t ${local.builder_config.TemplateDir}'" ]
}
// Complete kind install
provisioner "shell" {
expect_disconnect = true
max_retries = 6
script = "${local.locations.provisionning}/${build.name}.sh"
}
// Copy ssh Cadoles keys
provisioner "file" {
destination = "/tmp"
source = "${local.locations.provisionning}/ssh/cadoles/"
}
// Copy CNOUS SSH keys
provisioner "file" {
destination = "/tmp"
source = "${local.locations.provisionning}/ssh/cnous/"
}
provisioner "file" {
destination = "/etc/local.d/init${build.name}.start"
source = "${local.locations.provisionning}/conf/${build.name}/init${build.name}.start"
}
provisioner "shell" {
inline = [
"sh -cx 'chmod +x /etc/local.d/init${build.name}.start'",
"sh -cx 'cat /tmp/*.pub >> /root/.ssh/authorized_keys'",
"sh -cx 'chmod -R 600 /root/.ssh/authorized_keys'"
]
}
provisioner "shell" {
inline = [
"service podman start",
//"service containerd start",
//"sleep 10",
//"kind create cluster --config /etc/cluster.yaml ",
"sleep 10"]
}
}

View File

@@ -1,65 +0,0 @@
locals {
// Definition of the Kubernetes service (templater compatible)
ServiceDocker = {
ConfigFiles = [
{
destination = "/etc/rc.conf"
source = "rc.conf.pktpl.hcl"
mode = "600"
owner = "root"
group = "root"
}
]
Repositories = {}
Packages = {
docker = {
name = "docker"
action = "install"
}
docker-compose = {
name = "docker-compose"
action = "install"
}
gpg = {
name = "gpg"
action = "install"
}
}
Daemons = {
docker = {
name = "docker"
type = "auto"
enabled = true
}
cgroups = {
name = "cgroups"
type = "auto"
enabled = true
}
local = {
name = "local"
type = "auto"
enabled = true
}
}
Vars = {
RootlessDocker = "true"
}
Users = {
dock = {
username = "dock"
group = "dock"
home = "/srv/dock"
shell = "/bin/nologin"
}
}
}
Docker = {
Name = "docker"
Globals = local.Globals
Services = {
Docker = local.ServiceDocker
}
}
}

View File

@@ -1,89 +0,0 @@
locals {
ServiceNuoHarbor = {
ConfigFiles = [
{
destination = "/etc/harbor/harbor.yml"
source = "harbor.yml.pktpl.hcl"
mode = "600"
owner = "root"
group = "root"
}
]
Vars = {
AuthEnabled = false
User = "harbor"
Group = "harbor"
HarborHTTPPort = "80"
HarborHTTPSPort = "443"
HarborSSLCert = "/etc/ssl/certs/harbor.crt"
HarborSSLPrivKey = "/etc/ssl/certs/harbor.key"
HarborDomain = "reg.k8s.in.nuonet.fr"
HarborAdminPassword = "ChangeMeAsSoonAsPossible"
HarborDBPassword = "WeNeedToBeAbleToManagePasswords"
NIC = [
{
Name = "eth0"
IP = "192.168.160.10"
Mask = "255.255.254.0"
Gateway = "192.168.160.1"
}
]
DNS = [ "192.168.160.10" ]
Set = { Hostname = "reg.k8s.in.nuonet.fr" }
}
Repositories = {
AlpineEdgeTesting = {
type = "apk"
name = "testing"
url = "http://mirrors.ircam.fr/pub/alpine/edge/testing"
enabled = true
}
}
Packages = {
vmtools = {
name = "open-vm-tools"
action = "install"
},
mkcert = {
name = "mkcert"
action = "install"
},
gpg-agent = {
name = "gpg-agent"
action = "install"
}
ncurses = {
name = "ncurses"
action = "install"
}
}
Daemons = {
vm-tools = {
name = "open-vm-tools"
type = "auto"
enabled = true
}
harbor = {
name = "harbor"
type = "auto"
enabled = true
}
}
Users = {
harbor = {
username = "harbor"
group = "harbor"
home = "/srv/harbor"
shell = "/bin/nologin"
}
}
}
NuoHarbor = {
Name = "nuo-harbor"
Globals = local.Globals
Services = {
Docker = local.ServiceDocker
Harbor = local.ServiceNuoHarbor
}
}
}

View File

@@ -1,132 +0,0 @@
locals {
// Definition of the Kubernetes service (templater compatible)
ServiceKubernetes = {
ConfigFiles = [
{
destination = "/etc/cluster.yaml"
source = "cluster.yaml.pktpl.hcl"
mode = "600"
owner = "root"
group = "root"
},
{
destination = "/etc/rc.conf"
source = "rc.conf.pktpl.hcl"
mode = "600"
owner = "root"
group = "root"
}
]
Repositories = {}
Packages = {
docker = {
name = "docker"
action = "install"
}
docker-compose = {
name = "docker-compose"
action = "install"
}
gpg = {
name = "gpg"
action = "install"
}
kubeadm = {
name = "kind"
action = "install"
}
vmtools = {
name = "open-vm-tools"
action = "install"
}
vmtools-rc = {
name = "open-vm-tools-openrc"
action = "install"
}
}
Vars = {
RootlessDocker = "true"
Cluster = {
Name = "nuo"
IngressReady = true
PodSubNet = "10.110.0.0/16"
ServieSubNet = "10.115.0.0/16"
Version = "1.27.2"
Nodes = [
{
Role = "control-plane"
Ports = [
{
containerPort = 31000
hostPort = 31000
listenAddress = "0.0.0.0"
},
{
containerPort = 80
hostPort = 8080
listenAddress = "0.0.0.0"
},
{
containerPort = 443
hostPort = 8443
listenAddress = "0.0.0.0"
}
]
},
{ Role = "worker" },
{ Role = "worker" },
{ Role = "worker" }
]
}
}
Users = {
dock = {
username = "dock"
group = "dock"
home = "/srv/dock"
shell = "/bin/nologin"
}
}
Daemons = {
vm-tools = {
name = "open-vm-tools"
type = "auto"
enabled = true
}
docker = {
name = "docker"
type = "auto"
enabled = true
}
cgroups = {
name = "cgroups"
type = "auto"
enabled = true
}
local = {
name = "local"
type = "auto"
enabled = true
}
ntpd = {
name = "ntpd"
type = "auto"
enabled = true
}
local = {
name = "local"
type = "auto"
enabled = true
}
}
}
// Definition of the Kubernetes full configuration (with all the services)
Kind = {
Name = "kind"
Globals = local.Globals
Services = {
Kubernetes = local.ServiceKubernetes
}
}
}

View File

@@ -1,176 +0,0 @@
locals {
// Definition of the Kubernetes service (templater compatible)
ServiceNuoMatchBox = {
ConfigFiles = [
{
destination = "/etc/dnsmasq.d/pxe.conf"
source = "dnsmasq.d/ipxe.conf.pktpl.hcl"
mode = "600"
owner = "root"
group = "root"
},
{
destination = "/etc/dnsmasq-hosts.conf"
source = "dnsmasq.d/dnsmasq-hosts.conf.pktpl.hcl"
mode = "600"
owner = "dnsmasq"
group = "root"
},
{
destination = "/etc/conf.d/matchbox"
source = "conf.d/matchbox.conf.pktpl.hcl"
mode = "600"
owner = "root"
group = "root"
},
{
destination = "/etc/init.d/matchbox"
source = "init.d/matchbox.pktpl.hcl"
mode = "700"
owner = "root"
group = "root"
},
{
destination = "/etc/network/interfaces"
source = "network/interfaces.pktpl.hcl"
mode = "700"
owner = "root"
group = "root"
},
{
destination = "/etc/resolv.conf"
source = "resolv.conf.pktpl.hcl"
mode = "600"
owner = "root"
group = "root"
},
{
destination = "/etc/hostname"
source = "hostname.pktpl.hcl"
mode = "600"
owner = "root"
group = "root"
}
]
Repositories = {
AlpineEdgeTesting = {
type = "apk"
name = "testing"
url = "http://mirrors.ircam.fr/pub/alpine/edge/testing"
enabled = true
}
AlpineEdgeCommunity = {
type = "apk"
name = "testing"
url = "http://mirrors.ircam.fr/pub/alpine/edge/community"
enabled = true
}
}
Packages = {
dnsmasq = {
name = "dnsmasq"
action = "install"
}
terraform = {
name = "terraform"
action = "install"
}
git = {
name = "git"
action = "install"
}
kubectl = {
name = "kubectl"
action = "install"
}
gpg = {
name = "gpg"
action = "install"
}
vmtools = {
name = "open-vm-tools"
action = "install"
}
bash = {
name = "bash"
action = "install"
}
}
Vars = {
PXE = {
DHCPMode = "standalone"
DNSDomain = "k8s.in.nuonet.fr"
ListenInterface = "eth0"
GreetingMessage = "Nuo PXE Boot Server"
DelayTime = "5"
BootingMessage = "Booting from network the Nuo way"
DHCPRangeStart = "192.168.160.20"
DHCPRangeEnd = "192.168.160.60"
DHCPLeaseDuration = "48h"
TFTPRoot = "/var/lib/tftpboot"
}
DNSMasq = {
Hosts = [
{
Name = "reg.k8s.in.nuonet.fr"
IP = "192.168.160.11"
}
]
}
MatchBox = {
Hostname = "mb.k8s.in.nuonet.fr"
HTTPPort = "8080"
gRPCPort = "8081"
LogLevel = "info"
}
NIC = [
{
Name = "eth0"
IP = "192.168.160.10"
Mask = "255.255.254.0"
Gateway = "192.168.160.1"
}
]
DNS = [ "10.253.50.105" ]
Hosts = [
{
Name = "harbor.k8s.in.nuonet.fr"
IP = "192.168.160.11"
}
]
Set = { Hostname = "mb.k8s.in.nuonet.fr" }
}
Users = {}
Daemons = {
vm-tools = {
name = "open-vm-tools"
type = "auto"
enabled = true
}
matchbox = {
name = "matchbox"
type = "auto"
enabled = true
}
dnsmasq = {
name = "dnsmasq"
type = "auto"
enabled = true
}
local = {
name = "local"
type = "auto"
enabled = true
}
}
}
// Definition of the Kubernetes full configuration (with all the services)
NuoMatchBox = {
Name = "nuo-matchbox"
Globals = local.Globals
Services = {
NuoMatchBox = local.ServiceNuoMatchBox
}
}
}

View File

@@ -1,136 +0,0 @@
#Flavour base
build {
name = "base"
description = <<EOF
This builder builds a QEMU image from an nuo "virt" CD ISO file.
EOF
source "vmware-iso.nuo" {
output_directory = "${var.output_dir}/${var.version}/base"
vm_name = "${local.output_name}-${var.version}.img"
disk_size = 10240
iso_url = "${local.source_iso}"
iso_checksum = "${var.iso_cd_checksum}"
http_content = {
"/ssh-packer-pub.key" = data.sshkey.install.public_key
"/install.conf" = templatefile("${local.locations.templates}/conf/install/awnsers.pktpl.hcl", local.installOptsVMWare)
}
boot_command = [
"<wait10s>root<enter>",
"<wait1s><enter>",
"<wait1s>setup-interfaces<enter><wait1s><enter><wait1s><enter><wait1s><enter>",
"<wait1s>ifup eth0<enter>",
"<wait1s>mkdir -p .ssh<enter>",
"<wait1s>wget http://{{.HTTPIP}}:{{.HTTPPort}}/ssh-packer-pub.key -O .ssh/authorized_keys<enter><wait1s>",
"<wait1s>chmod 600 .ssh/authorized_keys<enter>",
"<wait1s>wget http://{{.HTTPIP}}:{{.HTTPPort}}/install.conf<enter><wait1s>",
"<wait1s>setup-sshd -c openssh -k .ssh/authorized_keys<enter><wait1s>",
]
}
source "qemu.nuo" {
output_directory = "${var.output_dir}/${var.version}/base"
vm_name = "${local.output_name}-${var.version}.img"
disk_size = 8000
iso_url = "${local.source_iso}"
iso_checksum = "${var.iso_cd_checksum}"
http_content = {
"/ssh-packer-pub.key" = data.sshkey.install.public_key
"/install.conf" = templatefile("${local.locations.templates}/conf/install/awnsers.pktpl.hcl", local.installOpts)
}
boot_command = [
"<wait5s>root<enter>",
"<wait1s><enter>",
"<wait1s>setup-interfaces<enter><wait1s><enter><wait1s><enter><wait1s><enter>",
"<wait1s>ifup eth0<enter>",
"<wait1s>mkdir -p .ssh<enter>",
"<wait1s>wget http://{{.HTTPIP}}:{{.HTTPPort}}/ssh-packer-pub.key -O .ssh/authorized_keys<enter><wait1s>",
"<wait1s>chmod 600 .ssh/authorized_keys<enter>",
"<wait1s>wget http://{{.HTTPIP}}:{{.HTTPPort}}/install.conf<enter><wait1s>",
"<wait1s>setup-sshd -c openssh -k .ssh/authorized_keys<enter><wait1s>",
]
}
source "virtualbox-iso.nuo" {
output_directory = "${var.output_dir}/${var.version}/base"
vm_name = "${local.output_name}-${var.version}.img"
disk_size = 10240
iso_url = "${local.source_iso}"
iso_checksum = "${var.iso_cd_checksum}"
guest_os_type = "Linux_64"
cd_label = "cidata"
cd_content = {
"meta-data" = jsonencode(local.instance_data)
"user-data" = templatefile("${local.locations.templates}/conf/cloud-init/user-data",
{
user = local.ssh_user,
password = local.ssh_password,
root_password = local.ssh_password,
runcmd = []
ssh_keys = [ data.sshkey.install.public_key ]
files = [
{
path = "/root/install.conf"
owner = "root"
group = "root"
permissions = "600"
content = [ "KEYMAPOPTS=\"fr fr\"",
"HOSTNAMEOPTS=\"-n nuo\"",
"INTERFACESOPTS=\"auto lo",
"iface lo inet loopback",
"auto eth0",
"iface eth0 inet dhcp",
" hostname nuo\"",
"DNSOPTS=\"\"",
"TIMEZONEOPTS=\"-z Europe/Paris\"",
"PROXYOPTS=\"none\"",
"APKREPOSOPTS=\"-r -c\"",
"SSHDOPTS=\"-c openssh -k /root/.ssh/authorized_keys\"",
"NTPOPTS=\"-c openntpd\"",
"DISKOPTS=\"-L -m sys /dev/sda\""]
}
]
}
)
}
boot_command = []
}
provisioner "shell" {
pause_before = "1s"
expect_disconnect = true # Because the previous step has rebooted the machine
script = "${local.locations.provisionning}/${var.name}-${var.short_version}-install.sh"
valid_exit_codes = [ 0, 141 ]
}
provisioner "shell" {
pause_before = "1s"
inline = [ "sh -cx 'mkdir -p ${local.builder_config.TemplateDir}'" ]
}
provisioner "shell" {
pause_before = "10s"
script = "${local.locations.provisionning}/${var.name}-${var.short_version}-postinstall.sh"
}
provisioner "shell" {
script = "${local.locations.provisionning}/letsencrypt.sh"
}
provisioner "file" {
destination = "/etc/conf.d/chronyd"
source = "${local.locations.templates}/conf/conf.d/"
}
post-processor "manifest" {
keep_input_artifact = true
}
post-processor "shell-local" {
inline = [
"/bin/sh ${path.cwd}/post-processors/sparsify.sh ${var.output_dir}/${var.version}/base ${var.image_version}"
]
}
}

View File

@@ -1,120 +0,0 @@
#Flavour nuo-matchbox
build {
name = "matchbox"
description = <<EOF
This builder builds a QEMU image from the base build output. The goal here is to install nuo-matchbox
with it's provisionning.
EOF
source "source.vmware-vmx.nuo" {
output_directory = "${var.output_dir}/${var.version}/provisionned/vmware/nuo-matchbox"
vm_name = "${local.output_name}-${var.version}-nuo-matchbox.img"
source_path = "${var.output_dir}/${var.version}/base/${local.output_name}-${var.version}.img.vmx"
boot_command = [ "<enter><enter><wait>" ]
ssh_clear_authorized_keys = true
}
source "source.qemu.nuo" {
output_directory = "${var.output_dir}/${var.version}/provisionned/nuo-matchbox"
vm_name = "${local.output_name}-${var.version}-nuo-matchbox.img"
iso_url = "${var.output_dir}/${var.version}/base/${local.output_name}-${var.version}.img"
iso_checksum = "none"
disk_size = 40960
disk_image = true
boot_command = [ "<enter><enter><wait>" ]
ssh_clear_authorized_keys = true
}
// Install templater and bootstraper
provisioner "shell" {
script = "${local.dirs.provisionning}/templater-install.sh"
}
// Copy configuration values on the image
provisioner "shell" {
inline = [
"sh -cx 'mkdir -p ${local.builder_config.TemplateDir}'",
"sh -cx 'mkdir -p ${local.builder_config.ValueDir}'"
]
}
// Copy configuration templates to the image
provisioner "file" {
destination = "${local.builder_config.TemplateDir}/"
source = "${local.dirs.templates}/conf/${build.name}/"
}
// Copy configuration values on the image
provisioner "file" {
destination = "${local.builder_config.ValueDir}/${build.name}.json"
content = "${jsonencode(local.NuoMatchBox)}"
}
// Copy nuo-matchbox boot provisionning script
provisioner "file" {
destination = "/etc/local.d/initmatchbox.start"
source = "${local.locations.provisionning}/conf/${build.name}/initmatchbox.start"
}
// Copy ssh Cadoles keys
provisioner "file" {
destination = "/tmp"
source = "${local.locations.provisionning}/ssh/cadoles/"
}
// Copy CNOUS SSH keys
provisioner "file" {
destination = "/tmp"
source = "${local.locations.provisionning}/ssh/cnous/"
}
provisioner "shell" {
inline = [
"sh -cx 'cat /tmp/*.pub >> /root/.ssh/authorized_keys'",
"sh -cx 'chmod -R 600 /root/.ssh/authorized_keys'"
]
}
provisioner "file" {
destination = "/etc/local.d/templater.start"
source = "${local.locations.provisionning}/conf/common/templater.start"
}
// Copy tftp provisionning script
provisioner "file" {
destination = "/etc/local.d/inittftp.start"
source = "${local.locations.provisionning}/conf/${build.name}/inittftp.start"
}
// Generate default configuration for kubernetes
provisioner "shell" {
max_retries = 3
inline = [ "sh -cx '/usr/local/bin/btr -c ${local.builder_config.ValueDir}/ -t ${local.builder_config.TemplateDir}'" ]
}
provisioner "file" {
destination = "/tmp/${build.name}.sh"
source = "${local.dirs.provisionning}/${build.name}.sh"
}
provisioner "shell" {
inline = [
"sh -cx 'sh /tmp/${build.name}.sh'"
]
}
provisioner "shell" {
inline = [
"chmod +x /etc/local.d/initmatchbox.start",
"chmod +x /etc/local.d/templater.start",
"chmod +x /etc/local.d/inittftp.start"
]
}
post-processor "shell-local" {
inline = [
"/bin/sh ${path.cwd}/post-processors/sparsify.sh ${var.output_dir}/${var.version}/provisionned/nuo-matchbox ${var.image_version}"
]
}
}

View File

@@ -1,13 +0,0 @@
#!/bin/sh
CLUSTER_NAME="nuo"
if [ $(kind get clusters -q | grep "${CLUSTER_NAME}") ];then
podman start -f name="^${CLUSTER_NAME}"
else
kind create cluster --config /etc/cluster.yaml | tee -a /var/log/kind-init.log
fi
if [ ! $(which kubectl) ];then
apk add kubectl --repository=https://dl-cdn.alpinelinux.org/alpine/edge/community
fi

View File

@@ -1,38 +0,0 @@
#!/bin/sh
HARBOR_VERSION="2.8.2"
HARBOR_SOURCE_URL="https://github.com/goharbor/harbor/releases/download/v${HARBOR_VERSION}/"
HARBOR_INSTALLER="harbor-offline-installer-v${HARBOR_VERSION}.tgz"
HARBOR_INSTALLER_ASC="${HARBOR_INSTALLER}.asc"
export TERM=xterm
gpg --keyserver hkps://keyserver.ubuntu.com --receive-keys 644FF454C0B4115C
cd /srv
wget -q ${HARBOR_SOURCE_URL}${HARBOR_INSTALLER}
wget -q ${HARBOR_SOURCE_URL}${HARBOR_INSTALLER_ASC}
gpg -v --keyserver hkps://keyserver.ubuntu.com --verify ${HARBOR_INSTALLER}.asc
if [ $? -ne 0 ]; then
echo "Harbor sources ${HARBOR_SOURCE_URL}${HARBOR_INSTALLER} are corrupt"
exit 3
fi
tar xzvf ${HARBOR_INSTALLER}
if [ ! -f ${HARBOR_SSL_CERT} ];then
mkcert -install
mkcert -cert-file ${HARBOR_SSL_CERT} -key-file ${HARBOR_SSL_KEY} ${HARBOR_DOMAIN}
fi
cd harbor
ln -s /etc/harbor/harbor.yml .
service docker start
sleep 5
./prepare
./install.sh --with-notary --with-trivy

View File

@@ -1,10 +0,0 @@
#!/bin/sh
# Remove swap
cat /etc/fstab | grep -v swap > temp.fstab
cat temp.fstab > /etc/fstab
rm temp.fstab
swapoff -a
#lvremove -y /dev/vg0/lv_swap
#lvextend -y -r -l +100%FREE /dev/vg0/lv_root

View File

@@ -1,39 +0,0 @@
#!/bin/sh
VERSION=0.10.0
ARCH=amd64
BIN="matchbox"
FILENAME="matchbox-v${VERSION}-linux-${ARCH}.tar.gz"
URL="https://github.com/poseidon/matchbox/releases/download/v${VERSION}/${FILENAME}"
MATCHBOX_DIR="/var/lib/matchbox"
ASSETS_DIR="${MATCHBOX_DIR}/assets/"
TFTP_DIR="/var/lib/tftpboot"
MATCHBOX_USER="matchbox"
FL_VERSIONS="current 3374.2.0"
apk add wget
echo "Downloading matchbox"
cd /tmp
wget -q --show-progress "${URL}"
tar -xzvf "${FILENAME}"
cd ./matchbox-v${VERSION}-linux-${ARCH}
echo "Installing matchbox"
cp ${BIN} /usr/local/bin
echo "Installing get-flatcar"
cp ./scripts/get-flatcar /usr/local/bin
chmod +x /usr/local/bin/get-flatcar
adduser "${MATCHBOX_USER}"
mkdir -p "${ASSETS_DIR}"
mkdir -p "${TFTP_DIR}"
chown -R "${MATCHBOX_USER}:${MATCHBOX_USER}" "${MATCHBOX_DIR}"
chown -R "${MATCHBOX_USER}:${MATCHBOX_USER}" "${ASSETS_DIR}"
chown -R "${MATCHBOX_USER}:${MATCHBOX_USER}" "${ASSETS_DIR}"
ls -lhaR ${ASSETS_DIR}
cp -rp ./scripts/tls /root
exit "${?}"

View File

@@ -1,17 +0,0 @@
#!/bin/sh
#set -xeo pipefail
# Run the installer
setup-alpine -q
#yes | setup-alpine -e -f /root/install.conf
# Copy ssh keys
echo "Copy packer ssh key"
mount /dev/vg0/lv_root /mnt
cp -rp .ssh /mnt/root/
sync
umount /mnt
echo "Rebooting the host after install"
reboot -nf

View File

@@ -1,23 +0,0 @@
#!/bin/sh
set -xeo pipefail
apk add --no-cache wget curl jq haveged ca-certificates rsyslog bash shadow
rc-update add haveged boot
rc-update add rsyslog boot
rc-update add sshd boot
# Generate root password
pass=$(openssl rand -base64 32 | tee -a .secret)
chmod 600 .secret
echo -e "${pass}\n${pass}" | passwd
# Remove expect package
# Prevent logs spamming like "process '/sbin/getty -L 0 ttyS0 vt100' (pid 2516) exited. Scheduling for restart."
# We don't need an access to ttyS0
sed -i 's@^\(ttyS0::respawn.*\)@#\1@' /etc/inittab
usermod --password $( echo "Cadoles;21" | openssl passwd -1 -stdin) root
sync

View File

@@ -1,47 +0,0 @@
#alpine-config
user:
name: ${user}
password: ${password}
chpasswd:
expire: False
apk:
repositories:
- base_url: https://mirrors.ircam.fr/pub/alpine/
repos: [ "main", "community" ]
package_update: true
packages:
- tmux
- vim
- openssh-server
- openssh-sftp-server
users:
- name: root
lock-passwd: false
passwd: ${root_password}
ssh_authorized_keys:
%{ for sk in ssh_keys ~}
- ${sk}
%{ endfor ~}
ssh_authorized_keys:
%{ for sk in ssh_keys ~}
- ${sk}
%{ endfor ~}
%{ if files != [] ~}
write_files:
%{ for fl in files ~}
- path: ${fl.path}
owner: ${fl.owner}:${fl.group}
permissions: '0${fl.permissions}'
content: |
%{ for li in fl.content ~}
${li}
%{ endfor ~}
%{ endfor ~}
%{ endif ~}
%{ if runcmd != [] ~}
# Work around network interface down after boot
runcmd:
%{ for cmd in runcmd ~}
- ${cmd}
%{ endfor ~}
%{ endif ~}

View File

@@ -1,337 +0,0 @@
# Global OpenRC configuration settings
# ${Vars.RootlessDocker}
# Set to "YES" if you want the rc system to try and start services
# in parallel for a slight speed improvement. When running in parallel we
# prefix the service output with its name as the output will get
# jumbled up.
# WARNING: whilst we have improved parallel, it can still potentially lock
# the boot process. Don't file bugs about this unless you can supply
# patches that fix it without breaking other things!
#rc_parallel="NO"
# Set rc_interactive to "YES" and you'll be able to press the I key during
# boot so you can choose to start specific services. Set to "NO" to disable
# this feature. This feature is automatically disabled if rc_parallel is
# set to YES.
#rc_interactive="YES"
# If we need to drop to a shell, you can specify it here.
# If not specified we use $SHELL, otherwise the one specified in /etc/passwd,
# otherwise /bin/sh
# Linux users could specify /sbin/sulogin
#rc_shell=/bin/sh
# Do we allow any started service in the runlevel to satisfy the dependency
# or do we want all of them regardless of state? For example, if net.eth0
# and net.eth1 are in the default runlevel then with rc_depend_strict="NO"
# both will be started, but services that depend on 'net' will work if either
# one comes up. With rc_depend_strict="YES" we would require them both to
# come up.
#rc_depend_strict="YES"
# rc_hotplug controls which services we allow to be hotplugged.
# A hotplugged service is one started by a dynamic dev manager when a matching
# hardware device is found.
# Hotplugged services appear in the "hotplugged" runlevel.
# If rc_hotplug is set to any value, we compare the name of this service
# to every pattern in the value, from left to right, and we allow the
# service to be hotplugged if it matches a pattern, or if it matches no
# patterns. Patterns can include shell wildcards.
# To disable services from being hotplugged, prefix patterns with "!".
#If rc_hotplug is not set or is empty, all hotplugging is disabled.
# Example - rc_hotplug="net.wlan !net.*"
# This allows net.wlan and any service not matching net.* to be hotplugged.
# Example - rc_hotplug="!net.*"
# This allows services that do not match "net.*" to be hotplugged.
# rc_logger launches a logging daemon to log the entire rc process to
# /var/log/rc.log
# NOTE: Linux systems require the devfs service to be started before
# logging can take place and as such cannot log the sysinit runlevel.
#rc_logger="NO"
# Through rc_log_path you can specify a custom log file.
# The default value is: /var/log/rc.log
#rc_log_path="/var/log/rc.log"
# If you want verbose output for OpenRC, set this to yes. If you want
# verbose output for service foo only, set it to yes in /etc/conf.d/foo.
#rc_verbose=no
# By default we filter the environment for our running scripts. To allow other
# variables through, add them here. Use a * to allow all variables through.
#rc_env_allow="VAR1 VAR2"
# By default we assume that all daemons will start correctly.
# However, some do not - a classic example is that they fork and return 0 AND
# then child barfs on a configuration error. Or the daemon has a bug and the
# child crashes. You can set the number of milliseconds start-stop-daemon
# waits to check that the daemon is still running after starting here.
# The default is 0 - no checking.
#rc_start_wait=100
# rc_nostop is a list of services which will not stop when changing runlevels.
# This still allows the service itself to be stopped when called directly.
#rc_nostop=""
# rc will attempt to start crashed services by default.
# However, it will not stop them by default as that could bring down other
# critical services.
#rc_crashed_stop=NO
#rc_crashed_start=YES
# Set rc_nocolor to yes if you do not want colors displayed in OpenRC
# output.
#rc_nocolor=NO
##############################################################################
# MISC CONFIGURATION VARIABLES
# There variables are shared between many init scripts
# Set unicode to NO to turn off unicode support for keyboards and screens.
#unicode="YES"
# This is how long fuser should wait for a remote server to respond. The
# default is 60 seconds, but it can be adjusted here.
#rc_fuser_timeout=60
# Below is the default list of network fstypes.
#
# afs ceph cifs coda davfs fuse fuse.glusterfs fuse.sshfs gfs glusterfs lustre
# ncpfs nfs nfs4 ocfs2 shfs smbfs
#
# If you would like to add to this list, you can do so by adding your
# own fstypes to the following variable.
#extra_net_fs_list=""
##############################################################################
# SERVICE CONFIGURATION VARIABLES
# These variables are documented here, but should be configured in
# /etc/conf.d/foo for service foo and NOT enabled here unless you
# really want them to work on a global basis.
# If your service has characters in its name which are not legal in
# shell variable names and you configure the variables for it in this
# file, those characters should be replaced with underscores in the
# variable names as shown below.
# Some daemons are started and stopped via start-stop-daemon.
# We can set some things on a per service basis, like the nicelevel.
# These need to be exported
#export SSD_NICELEVEL="0"
# Or the ionice level. The format is class[:data] , just like the
# --ionice start-stop-daemon parameter.
#export SSD_IONICELEVEL="0:0"
# Or the OOM score adjustment.
#export SSD_OOM_SCORE_ADJ="0"
# Pass ulimit parameters
# If you are using bash in POSIX mode for your shell, note that the
# ulimit command uses a block size of 512 bytes for the -c and -f
# options
#rc_ulimit="-u 30"
# It's possible to define extra dependencies for services like so
#rc_config="/etc/foo"
#rc_need="openvpn"
#rc_use="net.eth0"
#rc_after="clock"
#rc_before="local"
#rc_provide="!net"
# You can also enable the above commands here for each service. Below is an
# example for service foo.
#rc_foo_config="/etc/foo"
#rc_foo_need="openvpn"
#rc_foo_after="clock"
# Below is an example for service foo-bar. Note that the '-' is illegal
# in a shell variable name, so we convert it to an underscore.
# example for service foo-bar.
#rc_foo_bar_config="/etc/foo-bar"
#rc_foo_bar_need="openvpn"
#rc_foo_bar_after="clock"
# You can also remove dependencies.
# This is mainly used for saying which services do NOT provide net.
#rc_net_tap0_provide="!net"
# This is the subsystem type.
# It is used to match against keywords set by the keyword call in the
# depend function of service scripts.
#
# It should be set to the value representing the environment this file is
# PRESENTLY in, not the virtualization the environment is capable of.
# If it is commented out, automatic detection will be used.
#
# The list below shows all possible settings as well as the host
# operating systems where they can be used and autodetected.
#
# "" - nothing special
# "docker" - Docker container manager (Linux)
# "jail" - Jail (DragonflyBSD or FreeBSD)
# "lxc" - Linux Containers
# "openvz" - Linux OpenVZ
# "prefix" - Prefix
# "rkt" - CoreOS container management system (Linux)
# "subhurd" - Hurd subhurds (to be checked)
# "systemd-nspawn" - Container created by systemd-nspawn (Linux)
# "uml" - Usermode Linux
# "vserver" - Linux vserver
# "xen0" - Xen0 Domain (Linux and NetBSD)
# "xenU" - XenU Domain (Linux and NetBSD)
#rc_sys=""
# if you use openrc-init, which is currently only available on Linux,
# this is the default runlevel to activate after "sysinit" and "boot"
# when booting.
#rc_default_runlevel="default"
# on Linux and Hurd, this is the number of ttys allocated for logins
# It is used in the consolefont, keymaps, numlock and termencoding
# service scripts.
rc_tty_number=12
##############################################################################
# LINUX CGROUPS RESOURCE MANAGEMENT
# This sets the mode used to mount cgroups.
# "hybrid" mounts cgroups version 2 on /sys/fs/cgroup/unified and
# cgroups version 1 on /sys/fs/cgroup.
# "legacy" mounts cgroups version 1 on /sys/fs/cgroup
# "unified" mounts cgroups version 2 on /sys/fs/cgroup
rc_cgroup_mode="hybrid"
# This is a list of controllers which should be enabled for cgroups version 2
# when hybrid mode is being used.
# Controllers listed here will not be available for cgroups version 1.
rc_cgroup_controllers="cpuset cpu io memory hugelb openrc pids"
# This variable contains the cgroups version 2 settings for your services.
# If this is set in this file, the settings will apply to all services.
# If you want different settings for each service, place the settings in
# /etc/conf.d/foo for service foo.
# The format is to specify the setting and value followed by a newline.
# Multiple settings and values can be specified.
# For example, you would use this to set the maximum memory and maximum
# number of pids for a service.
#rc_cgroup_settings="
#memory.max 10485760
#pids.max max
#"
#
# For more information about the adjustments that can be made with
# cgroups version 2, see Documentation/cgroups-v2.txt in the linux kernel
# source tree.
#rc_cgroup_settings=""
# This switch controls whether or not cgroups version 1 controllers are
# individually mounted under
# /sys/fs/cgroup in hybrid or legacy mode.
rc_controller_cgroups="YES"
# The following setting turns on the memory.use_hierarchy setting in the
# root memory cgroup for cgroups v1.
# It must be set to yes in this file if you want this functionality.
#rc_cgroup_memory_use_hierarchy="NO"
# The following settings allow you to set up values for the cgroups version 1
# controllers for your services.
# They can be set in this file;, however, if you do this, the settings
# will apply to all of your services.
# If you want different settings for each service, place the settings in
# /etc/conf.d/foo for service foo.
# The format is to specify the names of the settings followed by their
# values. Each variable can hold multiple settings.
# For example, you would use this to set the cpu.shares setting in the
# cpu controller to 512 for your service.
# rc_cgroup_cpu="
# cpu.shares 512
# "
#
# For more information about the adjustments that can be made with
# cgroups version 1, see Documentation/cgroups-v1/* in the linux kernel
# source tree.
# Set the blkio controller settings for this service.
#rc_cgroup_blkio=""
# Set the cpu controller settings for this service.
#rc_cgroup_cpu=""
# Add this service to the cpuacct controller (any value means yes).
#rc_cgroup_cpuacct=""
# Set the cpuset controller settings for this service.
#rc_cgroup_cpuset=""
# Set the devices controller settings for this service.
#rc_cgroup_devices=""
# Set the hugetlb controller settings for this service.
#rc_cgroup_hugetlb=""
# Set the memory controller settings for this service.
#rc_cgroup_memory=""
# Set the net_cls controller settings for this service.
#rc_cgroup_net_cls=""
# Set the net_prio controller settings for this service.
#rc_cgroup_net_prio=""
# Set the pids controller settings for this service.
#rc_cgroup_pids=""
# Set this to YES if you want all of the processes in a service's cgroup
# killed when the service is stopped or restarted.
# Be aware that setting this to yes means all of a service's
# child processes will be killed. Keep this in mind if you set this to
# yes here instead of for the individual services in
# /etc/conf.d/<service>.
# To perform this cleanup manually for a stopped service, you can
# execute cgroup_cleanup with /etc/init.d/<service> cgroup_cleanup or
# rc-service <service> cgroup_cleanup.
# If the kernel includes support for cgroup2's cgroup.kill, this is used
# to reliably teardown the cgroup.
# If this fails, the process followed in this cleanup is the following:
# 1. send stopsig (sigterm if it isn't set) to all processes left in the
# cgroup immediately followed by sigcont.
# 2. Send sighup to all processes in the cgroup if rc_send_sighup is
# yes.
# 3. delay for rc_timeout_stopsec seconds.
# 4. send sigkill to all processes in the cgroup unless disabled by
# setting rc_send_sigkill to no.
# rc_cgroup_cleanup="NO"
# If this is yes, we will send sighup to the processes in the cgroup
# immediately after stopsig and sigcont.
#rc_send_sighup="NO"
# This is the amount of time in seconds that we delay after sending sigcont
# and optionally sighup, before we optionally send sigkill to all
# processes in the # cgroup.
# The default is 90 seconds.
#rc_timeout_stopsec="90"
# If this is set to no, we do not send sigkill to all processes in the
# cgroup.
#rc_send_sigkill="YES"
##############################################################################
# SUPERVISE DAEMON CONFIGURATION VARIABLES
# These variables sets more reasonable defaults for supervise-daemon(8).
# They may be overriden on a per service basis.
# Wait this number of seconds before restarting a daemon after it crashes.
respawn_delay=2
# Sets the maximum number of times a daemon will be respawned during a respawn
# period. If a daemon dies more than this number of times during a respawn
# period, supervise-daemon(8) will give up trying to respawn it and exit.
# 0 means unlimited.
respawn_max=5
# Sets the length in seconds of a respawn period.
respawn_period=1800

View File

@@ -1,40 +0,0 @@
kind: Cluster
apiVersion: kind.x-k8s.io/v1alpha4
name: ${Vars.Cluster.Name}
networking:
podSubnet: "${Vars.Cluster.PodSubNet}"
serviceSubnet: "${Vars.Cluster.ServieSubNet}"
nodes:
%{ for nd in Vars.Cluster.Nodes }
- role: ${nd.Role}
image: kindest/node:v${Vars.Cluster.Version}
%{ if nd.Role == "control-plane"}
kubeadmConfigPatches:
- |
kind: InitConfiguration
%{ if Vars.Cluster.IngressReady }
nodeRegistration:
kubeletExtraArgs:
node-labels: "ingress-ready=true"
%{ endif }
extraPortMappings:
- containerPort: 31000
hostPort: 31000
listenAddress: "0.0.0.0" # Optional, defaults to "0.0.0.0"
- containerPort: 80
hostPort: 8080
listenAddress: "0.0.0.0" # Optional, defaults to "0.0.0.0"
%{ if Vars.Cluster.IngressReady }
labels:
ingress-ready: true
%{ endif }
%{ endif }
%{ if nd.Role == "worker" }
kubeadmConfigPatches:
- |
kind: JoinConfiguration
nodeRegistration:
kubeletExtraArgs:
system-reserved: memory=2Gi
%{ endif }
%{ endfor ~}