Recette de construction de l'image "Quid" basée sur Debian 12 #6
|
@ -0,0 +1,16 @@
|
|||
name = "debian"
|
||||
version = "11"
|
||||
short_version = "11"
|
||||
code_name = "bullseye"
|
||||
arch = "amd64"
|
||||
source_url = "https://cloud.debian.org/images/cloud/bullseye/latest"
|
||||
iso_cd_checksum = "9ae04227e89047b72970a0d5f1897e2573fd0d4bba3d381086307af604072bad9e33174357fd3c3545a2a2b5b83ce19f3dbb5c352e86d5173b833df59b4a5741"
|
||||
image_dir_name= "latest"
|
||||
boot_command = [ "<enter>" ]
|
||||
# "<enter>",
|
||||
# "preseed/url=http://{{ .HTTPIP }}:{{ .HTTPPort }}/preseed.cfg<enter>",
|
||||
# "<wait>",
|
||||
# "<wait1s>mkdir -p .ssh<enter>",
|
||||
# "<wait1s>wget http://{{.HTTPIP}}:{{.HTTPPort}}/ssh-packer-pub.key -O .ssh/authorized_keys<enter><wait1s>",
|
||||
# "<wait1s>chmod 600 .ssh/authorized_keys<enter>",
|
||||
#]
|
|
@ -0,0 +1,6 @@
|
|||
locals {
|
||||
builder_config = {
|
||||
TemplateDir = "/usr/share/builder/templates"
|
||||
ValueDir = "/usr/share/builder/values"
|
||||
}
|
||||
}
|
|
@ -0,0 +1,7 @@
|
|||
locals {
|
||||
Globals = {
|
||||
Vars = {
|
||||
PrometheusPort = "9090"
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,33 @@
|
|||
# "timestamp" template function replacement
|
||||
locals {
|
||||
locations = {
|
||||
recipes = "${path.cwd}/recipes/${var.name}"
|
||||
templates = "${path.cwd}/recipes/${var.name}/templates"
|
||||
provisionning = "${path.cwd}/recipes/${var.name}/provisionning"
|
||||
post-processors = "${path.cwd}/recipes/${var.name}/post-processor"
|
||||
tools = "${path.cwd}/tools"
|
||||
}
|
||||
dirs = local.locations
|
||||
timestamp = regex_replace(timestamp(), "[- TZ:]", "")
|
||||
output_name = "${var.name}"
|
||||
source_checksum_url = "file:${var.source_url}/SHA512SUMS"
|
||||
source_iso = "${var.source_url}/debian-${var.version}-generic-${var.arch}.qcow2"
|
||||
source_checksum = "${var.iso_cd_checksum}"
|
||||
ssh_user = "root"
|
||||
ssh_password = "PbkRc1vup7Wq5n4r"
|
||||
disk_size = 8000
|
||||
memory = 512
|
||||
instance_data = {
|
||||
"instance-id": "${var.name}"
|
||||
}
|
||||
installOpts = {
|
||||
hostname = var.name
|
||||
user = "eole"
|
||||
disk_device = "/dev/vda"
|
||||
}
|
||||
installOptsVMWare = {
|
||||
hostname = var.name
|
||||
user = "eole"
|
||||
disk_device = "/dev/sda"
|
||||
}
|
||||
}
|
|
@ -0,0 +1,62 @@
|
|||
#Flavour base
|
||||
build {
|
||||
name = "base"
|
||||
description = <<EOF
|
||||
This builder builds a QEMU image from a Debian cloud image.
|
||||
EOF
|
||||
|
||||
source "vmware-iso.debian" {
|
||||
output_directory = "${var.output_dir}/${var.version}/base"
|
||||
vm_name = "${local.output_name}-${var.version}.img"
|
||||
disk_size = 10240
|
||||
pcaseiro marked this conversation as resolved
|
||||
iso_url = "${local.source_iso}"
|
||||
iso_checksum = "${var.iso_cd_checksum}"
|
||||
http_content = {
|
||||
"/ssh-packer-pub.key" = data.sshkey.install.public_key
|
||||
"/install.conf" = templatefile("${local.locations.templates}/conf/install/awnsers.pktpl.hcl", local.installOptsVMWare)
|
||||
}
|
||||
boot_command = var.boot_command
|
||||
cd_label = "cidata"
|
||||
}
|
||||
|
||||
source "qemu.debian" {
|
||||
output_directory = "${var.output_dir}/${var.version}/base"
|
||||
vm_name = "${local.output_name}-${var.version}.img"
|
||||
iso_url = "${local.source_iso}"
|
||||
iso_checksum = "${var.iso_cd_checksum}"
|
||||
disk_image = true
|
||||
disk_size = 10240
|
||||
cd_content = {
|
||||
"meta-data" = jsonencode(local.instance_data)
|
||||
"user-data" = templatefile("${path.cwd}/recipes/debian/templates/conf/cloud-init/user-data",
|
||||
{ user = local.ssh_user,
|
||||
password = local.ssh_password,
|
||||
runcmd = var.cloud_init_runcmd })
|
||||
}
|
||||
#http_content = {
|
||||
# "/ssh-packer-pub.key" = data.sshkey.install.public_key
|
||||
# "/install.conf" = templatefile("${local.locations.templates}/conf/install/awnsers.pktpl.hcl", local.installOpts)
|
||||
#}
|
||||
cd_label = "cidata"
|
||||
boot_command = var.boot_command
|
||||
}
|
||||
|
||||
provisioner "shell" {
|
||||
script = "${local.locations.provisionning}/${var.name}/${var.name}-${var.short_version}-install.sh"
|
||||
}
|
||||
|
||||
provisioner "shell" {
|
||||
script = "${local.locations.provisionning}/letsencrypt.sh"
|
||||
}
|
||||
|
||||
|
||||
post-processor "shell-local" {
|
||||
inline = [
|
||||
"/bin/sh ${path.cwd}/post-processors/sparsify.sh ${var.output_dir}/${var.version}/base ${var.image_version}",
|
||||
]
|
||||
}
|
||||
|
||||
post-processor "manifest" {
|
||||
keep_input_artifact = true
|
||||
}
|
||||
}
|
|
@ -0,0 +1,20 @@
|
|||
packer {
|
||||
required_plugins {
|
||||
sshkey = {
|
||||
version = ">= 1.0.1"
|
||||
source = "github.com/ivoronin/sshkey"
|
||||
}
|
||||
vmware = {
|
||||
version = ">= 1.0.8"
|
||||
source = "github.com/hashicorp/vmware"
|
||||
}
|
||||
qemu = {
|
||||
source = "github.com/hashicorp/qemu"
|
||||
version = "~> 1"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
data "sshkey" "install" {
|
||||
type = "ed25519"
|
||||
}
|
|
@ -0,0 +1,31 @@
|
|||
#!/bin/sh
|
||||
|
||||
if [ "${#}" -ne 2 ]; then
|
||||
echo Missing arguments
|
||||
exit 2
|
||||
fi
|
||||
|
||||
WORKDIR=${1}
|
||||
VERSION=${2}
|
||||
|
||||
findImages() {
|
||||
find ${1} -iname "*.img"
|
||||
}
|
||||
|
||||
sleep 5
|
||||
|
||||
for imageName in $(findImages ${WORKDIR} ${DOMAIN}); do
|
||||
if [ $(which virt-sparsify) ]; then
|
||||
newName=$(echo $imageName | sed "s/.img/_${VERSION}.img/g")
|
||||
virt-sparsify --compress --tmp ./ --format qcow2 ${imageName} ${newName}
|
||||
if [ "${?}" -eq 0 ]; then
|
||||
rm -rf ${imageName}
|
||||
cd ${WORKDIR}
|
||||
ln -s $(basename ${newName}) $(basename ${imageName})
|
||||
echo ${newName} ${imageName}
|
||||
cd -
|
||||
fi
|
||||
else
|
||||
echo "Sparsify skipped 'virt-sparsify' command is missing"
|
||||
fi
|
||||
done
|
|
@ -0,0 +1,104 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
#
|
||||
# Generate all the configuration files
|
||||
# Get all the values from the VLS_DIR
|
||||
# Process each template from the TPL_DIR with this values
|
||||
#
|
||||
|
||||
ENV_FILE=${ENV_FILE:-/var/run/one-context/one_env}
|
||||
TPL_DIR="/usr/share/builder/templates"
|
||||
VLS_DIR="/usr/share/builder/values"
|
||||
CONFIG=""
|
||||
|
||||
if [ -f "${ENV_FILE}" ]; then
|
||||
. ${ENV_FILE}
|
||||
fi
|
||||
|
||||
BTR="$(command -v btr)"
|
||||
if [ "${?}" -ne 0 ]; then
|
||||
echo "Warning: Nothing to do the templater is not installed"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
if [ ! -e "${TPL_DIR}" ]; then
|
||||
echo "Error: The template dir is missing (${TPL_DIR})"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ ! -e "${VLS_DIR}" ]; then
|
||||
echo "Error: The template dir is missing (${VLS_DIR})"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
jsonQuery() {
|
||||
local data="${1}"
|
||||
local query="${2}"
|
||||
echo "${data}" | jq -cr "${query}"
|
||||
}
|
||||
|
||||
# NAME: @jsonMerge
|
||||
# AIM: Merge two json structures
|
||||
# NOTES:
|
||||
# The last one has de last word
|
||||
# if you have the same key in A and B
|
||||
# this keeps the value of the B structure.
|
||||
# PARAMS:
|
||||
# $1: original JSON Structure
|
||||
# $2: updated JSON Structure
|
||||
jsonMerge() {
|
||||
local data="${1}"
|
||||
local data2="${2}"
|
||||
|
||||
echo "${data} ${data2}" | jq -cr -s ".[0] * .[1]"
|
||||
}
|
||||
|
||||
jsonUpdateVal() {
|
||||
local json="${1}"
|
||||
local key="${2}"
|
||||
local value="${3}"
|
||||
|
||||
echo "${json}" | jq --arg a "${value}" "${key} = \$a"
|
||||
}
|
||||
|
||||
getValues() {
|
||||
|
||||
local values=""
|
||||
|
||||
for file in $(find ${VLS_DIR} -name "*.json"); do
|
||||
values="${values}$(cat ${file})"
|
||||
done
|
||||
|
||||
if [ -n "${RAW_CONFIG}" ]; then
|
||||
values="$(jsonMerge ${values} ${RAW_CONFIG})"
|
||||
fi
|
||||
|
||||
for svc in $(echo ${values} | jq -cr '.Services|keys[]'); do
|
||||
for key in $(echo ${values} | jq -cr ".Services.${svc}.Vars|keys[]"); do
|
||||
ukey=${key^^}
|
||||
vkeys="$(echo ${values} | jq -cr \".Services.${svc}.Vars.${key}\|keys[]\")"
|
||||
if [ ${?} -eq 0 ]; then
|
||||
for var in $(echo ${values} | jq -cr ".Services.${svc}.Vars.${key}|keys[]"); do
|
||||
uvar=${var^^}
|
||||
val=$(eval echo "\$${ukey}_${uvar}")
|
||||
if [ -n "${val}" ]; then
|
||||
values=$(jsonUpdateVal "${values}" ".Services.${svc}.Vars.${key}.${var}" "${val}")
|
||||
fi
|
||||
done
|
||||
else
|
||||
values=$(jsonUpdateVal "${values}" ".Services.${svc}.Vars.${key}" "${!ukey}")
|
||||
fi
|
||||
done
|
||||
done
|
||||
echo ${values}
|
||||
}
|
||||
|
||||
processTemplates() {
|
||||
${BTR} -t ${TPL_DIR} -c "${1}"
|
||||
}
|
||||
|
||||
VALUES=$(getValues)
|
||||
file=$(mktemp)
|
||||
echo "${VALUES}" > "${file}"
|
||||
processTemplates "${file}"
|
||||
rm -rf "${file}"
|
|
@ -0,0 +1,64 @@
|
|||
#!/sbin/openrc-run
|
||||
|
||||
: ${SUBCFGDIR:=/srv}
|
||||
DOCKER_COMPOSE_UP_ARGS=${DOCKER_COMPOSE_UP_ARGS-"--no-build --no-recreate --no-deps"}
|
||||
|
||||
SUBSVC="${SVCNAME#*.}"
|
||||
[ -z "${SUBSVC}" ] && exit 1
|
||||
: ${SUBCFG:="${SUBCFGDIR}/${SUBSVC}/docker-compose.yml"}
|
||||
DOCOCMD="/usr/bin/docker-compose"
|
||||
export COMPOSE_HTTP_TIMEOUT=300
|
||||
|
||||
description="Manage docker services defined in ${SUBCFG}"
|
||||
extra_commands="configtest build"
|
||||
description_configtest="Check configuration via \"docker-compose -f ${SUBCFG} config\""
|
||||
description_build="Run \"docker-compose -f ${SUBCFG} build\""
|
||||
|
||||
depend() {
|
||||
need localmount net docker
|
||||
use dns
|
||||
after docker
|
||||
}
|
||||
|
||||
configtest() {
|
||||
if ! [ -f "${SUBCFG}" ]; then
|
||||
eerror "The config file ${SUBCFG} does not exist!"
|
||||
return 1
|
||||
fi
|
||||
if "${DOCOCMD}" -f "${SUBCFG}" config >&/dev/null; then
|
||||
einfo "config: ok"
|
||||
else
|
||||
eerror "config: error"
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
build() {
|
||||
configtest || return 1
|
||||
ebegin "Building dockerservice ${SUBSVC}"
|
||||
"${DOCOCMD}" -f "${SUBCFG}" build
|
||||
eend $?
|
||||
}
|
||||
|
||||
start() {
|
||||
configtest || return 1
|
||||
ebegin "Starting dockerservice ${SUBSVC}"
|
||||
sleep 5
|
||||
"${DOCOCMD}" -f "${SUBCFG}" up -d ${DOCKER_COMPOSE_UP_ARGS}
|
||||
eend $?
|
||||
}
|
||||
|
||||
stop() {
|
||||
ebegin "Stopping dockerservice ${SUBSVC}"
|
||||
"${DOCOCMD}" -f "${SUBCFG}" stop --timeout=300
|
||||
eend $?
|
||||
}
|
||||
|
||||
status() {
|
||||
if [ "$("${DOCOCMD}" -f "${SUBCFG}" top | wc -l)" -gt "0" ]; then
|
||||
einfo "status: started"
|
||||
else
|
||||
einfo "status: stopped"
|
||||
return 3
|
||||
fi
|
||||
}
|
|
@ -0,0 +1,181 @@
|
|||
#!/bin/sh
|
||||
|
||||
ENV_FILE=${ENV_FILE:-/var/run/one-context/one_env}
|
||||
LOG_FILE="/var/log/initkubernets.log"
|
||||
FIRST_BOOT="/var/run/firstboot.flag"
|
||||
|
||||
infoLog() {
|
||||
echo "Info: $@" | tee -a ${LOG_FILE}
|
||||
}
|
||||
|
||||
errorLog() {
|
||||
echo "Error: $@" | tee -a ${LOG_FILE}
|
||||
}
|
||||
|
||||
waitReadyState() {
|
||||
local vmID="${1}"
|
||||
local timeout="${2}"
|
||||
|
||||
local tick=0
|
||||
while true ;do
|
||||
local ready=$(onegate vm show ${vmID} --json | jq -rc ".VM.USER_TEMPLATE.READY")
|
||||
if [ "${ready}" = "YES" ];then
|
||||
return 0
|
||||
elif [ "${timeout}" -eq "${tick}" ];then
|
||||
return ${timeout}
|
||||
else
|
||||
sleep 1
|
||||
tick=$((tick+1))
|
||||
fi
|
||||
done
|
||||
}
|
||||
|
||||
returnToken() {
|
||||
infoLog "Returning tokens"
|
||||
local caSecretKey="${1}"
|
||||
local caToken=$(openssl x509 -in /etc/kubernetes/pki/ca.crt -noout -pubkey | openssl rsa -pubin -outform DER 2>/dev/null | sha256sum | cut -d' ' -f1)
|
||||
local kubeToken=$(kubeadm token list | awk '/authentication,signing.*The default*/ {print $1}')
|
||||
local masterAddr=$(awk -F '/' '/server/ {print $3}' /etc/kubernetes/admin.conf)
|
||||
|
||||
if [ -n "${ONEGATE_ENDPOINT}" ];then
|
||||
infoLog "Onegate detected"
|
||||
data="READY=YES"
|
||||
data="${data} MASTER_ADDR=${masterAddr}"
|
||||
data="${data} MASTER_TOKEN=${kubeToken}"
|
||||
data="${data} MASTER_CA_TOKEN=sha256:${caToken}"
|
||||
data="${data} MASTER_CA_SECRET_KEY=${caSecretKey}"
|
||||
onegate vm update --data "${data}"
|
||||
infoLog "Onegate data seted"
|
||||
else
|
||||
infoLog "Onegate is not present"
|
||||
echo "${masterAdd} ${kubeToken} ${caToken}" >> /root/kube.token
|
||||
infoLog "Tokens are available at /root/kube.token"
|
||||
fi
|
||||
}
|
||||
|
||||
joinCluster() {
|
||||
local master="${MASTER_ADDR}"
|
||||
local token="${MASTER_TOKEN}"
|
||||
local caToken="${MASTER_CA_TOKEN}"
|
||||
local caSecretKey="${MASTER_CA_SECRET_KEY}"
|
||||
local sname="${SERVICE_NAME}"
|
||||
|
||||
if [ -n "${ONEGATE_ENDPOINT}" ];then
|
||||
local masterID=$(onegate service show --json | jq -c '.SERVICE.roles[] | select(.name == "leader") | .nodes[0].deploy_id')
|
||||
if [ "${?}" -eq 0 ]; then
|
||||
waitReadyState ${masterID} 600
|
||||
if [ "${?}" -ne 0 ];then
|
||||
errorLog "Master node is node ready after 600s"
|
||||
return 3
|
||||
fi
|
||||
local masterInfo=$(onegate vm show ${masterID} --json | \
|
||||
jq -cr ".VM.USER_TEMPLATE.MASTER_ADDR, .VM.USER_TEMPLATE.MASTER_TOKEN, .VM.USER_TEMPLATE.MASTER_CA_TOKEN,.VM.USER_TEMPLATE.MASTER_CA_SECRET_KEY, .VM.TEMPLATE.NIC[0].IP")
|
||||
master=$(echo ${masterInfo} | cut -d " " -f 1)
|
||||
token=$(echo ${masterInfo} | cut -d " " -f 2)
|
||||
caToken=$(echo ${masterInfo} | cut -d " " -f 3)
|
||||
caSecretKey=$(echo ${masterInfo} | cut -d " " -f 4)
|
||||
masterIP=$(echo ${masterInfo} | cut -d " " -f 5)
|
||||
sname=$(onegate service show --json | jq -cr ".SERVICE.name")
|
||||
fi
|
||||
|
||||
# Setting dns resolution for cluster
|
||||
echo "${masterIP} ${sname}" >> /etc/hosts
|
||||
onegate service show --json | jq -rc '.SERVICE.roles[].nodes[].vm_info.VM | .TEMPLATE.NIC[].IP + " " + .NAME' >> /etc/hosts
|
||||
fi
|
||||
if [ -n "${master}" ] & [ -n "${token}" ] & [ -n "${caToken}" ];then
|
||||
opts="--node-name $(hostname -f)"
|
||||
opts="${opts} --token ${token}"
|
||||
opts="${opts} --discovery-token-ca-cert-hash ${caToken}"
|
||||
if [ -n "${1}" ];then
|
||||
opts="${opts} --control-plane"
|
||||
opts="${opts} --certificate-key ${caSecretKey}"
|
||||
fi
|
||||
opts="${opts} ${master}"
|
||||
|
||||
kubeadm join ${opts} | tee -a "${LOG_FILE}"
|
||||
else
|
||||
errorLog "Something is missing, can't join the cluster:"
|
||||
errorLog " Master addr: [${master}]"
|
||||
errorLog " Master token: [${token}]"
|
||||
errorLog " Master CA token: [${caToken}]"
|
||||
return 3
|
||||
fi
|
||||
}
|
||||
|
||||
getServiceName() {
|
||||
local sname=$(onegate service show --json | jq -cr ".SERVICE.name")
|
||||
local tmout=30
|
||||
local tick=0
|
||||
while true ;do
|
||||
if [ -z "${sname}" ];then
|
||||
sname=$(onegate service show --json | jq -cr ".SERVICE.name")
|
||||
else
|
||||
echo ${sname}
|
||||
return 0
|
||||
fi
|
||||
sleep 1
|
||||
tick=$((tick+1))
|
||||
if [ ${tmout} -eq ${tick} ];then
|
||||
hostname -f
|
||||
return 3
|
||||
fi
|
||||
done
|
||||
}
|
||||
|
||||
initLeader() {
|
||||
sname="$(hostname -f)"
|
||||
|
||||
if [ -n "${ONEGATE_ENDPOINT}" ];then
|
||||
sname=$(getServiceName)
|
||||
sip=$(onegate vm show --json | jq -rc ".VM.TEMPLATE.NIC[0].IP")
|
||||
echo "${sip} ${sname} $(hostname -f)" >> /etc/hosts
|
||||
onegate service show --json | jq -rc '.SERVICE.roles[].nodes[].vm_info.VM | .TEMPLATE.NIC[].IP + " " + .NAME' >> /etc/hosts
|
||||
fi
|
||||
|
||||
caSecretKey=$(date | sha256sum | awk '{print $1}')
|
||||
|
||||
infoLog "Kubernetes init started"
|
||||
kubeadm init --pod-network-cidr=10.244.0.0/16 \
|
||||
--node-name="${SET_HOSTNAME}" \
|
||||
--control-plane-endpoint "${sname}:6443" \
|
||||
--upload-certs --certificate-key "${caSecretKey}" | tee -a "${LOG_FILE}"
|
||||
infoLog "Kubernetes init ended"
|
||||
|
||||
infoLog "Configuring kubectl"
|
||||
mkdir /root/.kube
|
||||
ln -s /etc/kubernetes/admin.conf /root/.kube/config
|
||||
infoLog "kubectl configured"
|
||||
|
||||
infoLog "Installing cilium"
|
||||
sleep 20
|
||||
kubectl config view --minify -o jsonpath='{.clusters[].name}'
|
||||
sleep 20
|
||||
cilium install --helm-set 'cni.binPath=/usr/libexec/cni' --wait | tee -a "${LOG_FILE}"
|
||||
infoLog "Cilium is installed"
|
||||
|
||||
returnToken "${caSecretKey}"
|
||||
}
|
||||
|
||||
initKube() {
|
||||
if [ "${SERVER_ROLE}" == "leader" ];then
|
||||
initLeader
|
||||
elif [ "${SERVER_ROLE}" == "worker" ];then
|
||||
joinCluster
|
||||
elif [ "${SERVER_ROLE}" == "master" ];then
|
||||
joinCluster "${SERVER_ROLE}"
|
||||
fi
|
||||
touch ${FIRST_BOOT}
|
||||
infoLog "Kubernetes cluster init is finished"
|
||||
}
|
||||
|
||||
if [ -f "${ENV_FILE}" ]; then
|
||||
. "${ENV_FILE}"
|
||||
fi
|
||||
|
||||
if [ -f "${FIRST_BOOT}" ];then
|
||||
exit 0
|
||||
else
|
||||
uuidgen > /etc/machine-id
|
||||
swapoff -a # Make sure swap is disabled
|
||||
initKube &
|
||||
fi
|
|
@ -0,0 +1,3 @@
|
|||
#!/bin/sh
|
||||
|
||||
mount --make-rshared /
|
|
@ -0,0 +1,25 @@
|
|||
#!/bin/sh
|
||||
|
||||
FL_VERSIONS="current 3374.2.0"
|
||||
MATCHBOX_DIR="/var/lib/matchbox"
|
||||
ASSETS_DIR="${MATCHBOX_DIR}/assets/"
|
||||
|
||||
GPG_FNAME="Flatcar_Image_Signing_Key.asc"
|
||||
GPG_KEYS_URL="https://www.flatcar.org/security/image-signing-key/"
|
||||
|
||||
cd /tmp
|
||||
curl -L -O ${GPG_KEYS_URL}/${GPG_FNAME}
|
||||
gpg --import --keyid-format LONG ${GPG_FNAME}
|
||||
cd -
|
||||
|
||||
echo "Provisionning matchbox with flatcar images"
|
||||
tout=30
|
||||
for version in ${FL_VERSIONS}; do
|
||||
for i in $(seq 1 ${tout});do
|
||||
echo " * ${FL_VERSIONS} stable image (try ${i})"
|
||||
/usr/local/bin/get-flatcar stable ${version} ${ASSETS_DIR}
|
||||
if [[ "${?}" -eq 0 ]]; then
|
||||
break
|
||||
fi
|
||||
done
|
||||
done
|
|
@ -0,0 +1,10 @@
|
|||
#!/bin/sh
|
||||
|
||||
dest="${1}"
|
||||
|
||||
ipxeEFISource="http://boot.ipxe.org/ipxe.efi"
|
||||
kpxeSource="http://boot.ipxe.org/undionly.kpxe"
|
||||
|
||||
cd "${dest}"
|
||||
wget "${ipxeEFISource}"
|
||||
wget "${kpxeSource}"
|
|
@ -0,0 +1 @@
|
|||
harbor
|
|
@ -0,0 +1 @@
|
|||
matchbox
|
|
@ -0,0 +1,13 @@
|
|||
#!/bin/sh
|
||||
|
||||
CONF="/etc/conf.d/jenkins-slave"
|
||||
if [ -e "/etc/jenkins-slave.conf" ]; then
|
||||
CONF="/etc/jenkins-slave.conf"
|
||||
fi
|
||||
|
||||
TOTAL_MEMORY=$(cat /proc/meminfo | grep MemTotal | awk '{ printf "%sg", int($2/1024/1024)+1 }')
|
||||
sed -i "s|^JENKINS_SLAVE_NAME=.*$|JENKINS_SLAVE_NAME='slave-$ETH0_IP'|" "${CONF}"
|
||||
sed -i "s|^JENKINS_SLAVE_USERNAME=.*$|JENKINS_SLAVE_USERNAME='$JENKINS_SLAVE_USERNAME'|" "${CONF}"
|
||||
sed -i "s|^JENKINS_SLAVE_PASSWORD=.*$|JENKINS_SLAVE_PASSWORD='$JENKINS_SLAVE_PASSWORD'|" "${CONF}"
|
||||
sed -i "s|^JENKINS_MASTER_URL=.*$|JENKINS_MASTER_URL='$JENKINS_MASTER_URL'|" "${CONF}"
|
||||
sed -i "s|^JENKINS_SLAVE_LABELS=.*$|JENKINS_SLAVE_LABELS='docker docker-compose mem-$TOTAL_MEMORY $JENKINS_SLAVE_LABELS'|" "${CONF}"
|
|
@ -0,0 +1,31 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
ENV_FILE=${ENV_FILE:-/var/run/one-context/one_env}
|
||||
|
||||
# $TOKENTXT is available only through the env. file
|
||||
# shellcheck disable=SC1090
|
||||
if [ -f "${ENV_FILE}" ]; then
|
||||
. "${ENV_FILE}"
|
||||
fi
|
||||
|
||||
###
|
||||
|
||||
if [ -n "${GITLAB_URL}" ]; then
|
||||
if command -v gitlab-runner; then
|
||||
if [ -n "${GITLAB_SHELL}" ]; then
|
||||
opts="--shell=${GITLAB_SHELL}"
|
||||
fi
|
||||
# shellcheck disable=SC2086
|
||||
gitlab-runner register \
|
||||
--non-interactive \
|
||||
--url="${GITLAB_URL}" \
|
||||
--registration-token="${GITLAB_TOKEN}" \
|
||||
--executor="${GITLAB_EXECUTOR}" \
|
||||
--description="${GITLAB_RUNNER_NAME}" \
|
||||
--tag-list="${GITLAB_TAG_LIST}" \
|
||||
--locked=false \
|
||||
--access-level=not_protected \
|
||||
--run-untagged=false \
|
||||
"${opts}"
|
||||
fi
|
||||
fi
|
|
@ -0,0 +1,80 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
#
|
||||
# Generate all the configuration files
|
||||
# Get all the values from the VLS_DIR
|
||||
# Process each template from the TPL_DIR with this values
|
||||
#
|
||||
|
||||
ENV_FILE=${ENV_FILE:-/var/run/one-context/one_env}
|
||||
TPL_DIR="/usr/share/builder/templates"
|
||||
VLS_DIR="/usr/share/builder/values"
|
||||
CONFIG=""
|
||||
|
||||
. ${ENV_FILE}
|
||||
|
||||
BTR="$(command -v btr)"
|
||||
if [ "${?}" -ne 0 ]; then
|
||||
echo "Warning: Nothing to do the templater is not installed"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
if [ ! -e "${TPL_DIR}" ]; then
|
||||
echo "Error: The template dir is missing (${TPL_DIR})"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ ! -e "${VLS_DIR}" ]; then
|
||||
echo "Error: The template dir is missing (${VLS_DIR})"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
jsonQuery() {
|
||||
local data="${1}"
|
||||
local query="${2}"
|
||||
echo "${data}" | jq -cr "${query}"
|
||||
}
|
||||
|
||||
# NAME: @jsonMerge
|
||||
# AIM: Merge two json structures
|
||||
# NOTES:
|
||||
# The last one has de last word
|
||||
# if you have the same key in A and B
|
||||
# this keeps the value of the B structure.
|
||||
# PARAMS:
|
||||
# $1: original JSON Structure
|
||||
# $2: updated JSON Structure
|
||||
jsonMerge() {
|
||||
local data="${1}"
|
||||
local data2="${2}"
|
||||
|
||||
echo "${data} ${data2}" | jq -cr -s ".[0] * .[1]"
|
||||
}
|
||||
|
||||
getValues() {
|
||||
|
||||
local values=""
|
||||
|
||||
for file in $(find ${VLS_DIR} -name "*.json"); do
|
||||
values="${values}$(cat ${file})"
|
||||
done
|
||||
|
||||
if [ -n "${RAW_CONFIG}" ]; then
|
||||
values="$(jsonMerge ${values} ${RAW_CONFIG})"
|
||||
fi
|
||||
|
||||
for key in $(echo ${values} | jq -cr '.|keys[]'); do
|
||||
ukey=${key^^}
|
||||
if [ -n "${!ukey}" ]; then
|
||||
values="$(jsonMerge "${values}" "{\"${key}\":\"${!ukey}\"}")"
|
||||
fi
|
||||
done
|
||||
echo ${values}
|
||||
}
|
||||
|
||||
processTemplates() {
|
||||
${BTR} -t ${TPL_DIR} -c "${1}"
|
||||
}
|
||||
VALUES=$(getValues)
|
||||
echo ${VALUES}
|
||||
processTemplates "${VALUES}"
|
|
@ -0,0 +1,21 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
ENV_FILE=${ENV_FILE:-/var/run/one-context/one_env}
|
||||
|
||||
# $TOKENTXT is available only through the env. file
|
||||
# shellcheck disable=SC1090
|
||||
if [ -f "${ENV_FILE}" ]; then
|
||||
. "${ENV_FILE}"
|
||||
fi
|
||||
|
||||
###
|
||||
|
||||
if [ -n "${K3S_ROLE}" ]; then
|
||||
if [ "${K3S_ROLE}" = "server" ]; then
|
||||
rc-update add dnsmasq default
|
||||
service dnsmasq start
|
||||
|
||||
rc-update add k3s default
|
||||
service k3s start
|
||||
fi
|
||||
fi
|
|
@ -0,0 +1,3 @@
|
|||
{
|
||||
"instance-id": "iid-local01"
|
||||
}
|
|
@ -0,0 +1,9 @@
|
|||
#!/bin/sh
|
||||
|
||||
echo "${1}" >/etc/hostname
|
||||
|
||||
apt-get update
|
||||
apt-get -y dist-upgrade
|
||||
apt-get install wget curl -y
|
||||
|
||||
touch /etc/cloud/cloud-init.disabled
|
|
@ -0,0 +1,107 @@
|
|||
# To see all available options execute this command once the install is done:
|
||||
# sudo less /var/log/installer/cdebconf/questions.dat
|
||||
# If you need information about an option use the command below (example for keymap):
|
||||
# grep -A 4 "keyboard-configuration/xkb-keymap" /var/log/installer/cdebconf/templates.dat
|
||||
|
||||
# Use network mirror for package installation
|
||||
# d-i apt-setup/use_mirror boolean true
|
||||
|
||||
# Automatic installation
|
||||
d-i auto-install/enable boolean true
|
||||
|
||||
# "linux-server" is substituted by "linux-image-amd64"
|
||||
# Possible options : "linux-image-amd64"(default) or "linux-image-rt-amd64"
|
||||
d-i base-installer/kernel/override-image string linux-server
|
||||
|
||||
# Configure hardware clock
|
||||
d-i clock-setup/utc boolean true
|
||||
d-i clock-setup/utc-auto boolean true
|
||||
|
||||
d-i netcfg/choose_interface select auto
|
||||
d-i netcfg/use_dhcp boolean true
|
||||
|
||||
# d-i console-setup/ask_detect boolean false
|
||||
|
||||
# d-i debconf/frontend select noninteractive
|
||||
|
||||
# Set OS locale
|
||||
d-i debian-installer/language string fr
|
||||
d-i debian-installer/country string FR
|
||||
d-i debian-installer/locale string fr_FR.UTF-8
|
||||
|
||||
# d-i debian-installer/framebuffer boolean false
|
||||
|
||||
# Reboot once the install is done
|
||||
d-i finish-install/reboot_in_progress note
|
||||
|
||||
# Bootloader options
|
||||
d-i grub-installer/only_debian boolean true
|
||||
d-i grub-installer/with_other_os boolean true
|
||||
d-i grub-installer/bootdev string /dev/vda
|
||||
|
||||
# Set the keyboard layout
|
||||
d-i keyboard-configuration/xkb-keymap select fr
|
||||
|
||||
# Mirror from which packages will be downloaded
|
||||
d-i mirror/country string manual
|
||||
d-i mirror/http/directory string /debian
|
||||
d-i mirror/http/hostname string httpredir.debian.org
|
||||
|
||||
# Configure http proxy if needed "http://[[user][:pass]@]host[:port]/"
|
||||
d-i mirror/http/proxy string
|
||||
|
||||
# Disk configuration
|
||||
d-i partman-efi/non_efi_system boolean true
|
||||
d-i partman-auto-lvm/guided_size string max
|
||||
d-i partman-auto/choose_recipe select atomic
|
||||
d-i partman-auto/method string lvm
|
||||
d-i partman-lvm/confirm boolean true
|
||||
d-i partman-lvm/confirm_nooverwrite boolean true
|
||||
d-i partman-lvm/device_remove_lvm boolean true
|
||||
d-i partman/choose_partition select finish
|
||||
d-i partman/confirm boolean true
|
||||
d-i partman/confirm_nooverwrite boolean true
|
||||
d-i partman/confirm_write_new_label boolean true
|
||||
|
||||
# User configuration
|
||||
d-i passwd/root-login boolean true
|
||||
d-i passwd/root-password-crypted password $1$hA6nLFTh$FitTH.KXJWluJN9z7lDjr0
|
||||
d-i passwd/user-fullname string packer
|
||||
d-i passwd/user-uid string 1000
|
||||
d-i passwd/username string packer
|
||||
d-i passwd/user-password-crypted password $1$hA6nLFTh$FitTH.KXJWluJN9z7lDjr0
|
||||
|
||||
# Extra packages to be installed
|
||||
d-i pkgsel/include string sudo
|
||||
d-i pkgsel/include string openssh-server build-essential
|
||||
|
||||
d-i pkgsel/install-language-support boolean false
|
||||
d-i pkgsel/update-policy select none
|
||||
|
||||
# Whether to upgrade packages after debootstrap
|
||||
d-i pkgsel/upgrade select full-upgrade
|
||||
|
||||
# Set timezone
|
||||
d-i time/zone string Europe/Paris
|
||||
|
||||
# Allow weak user password
|
||||
d-i user-setup/allow-password-weak boolean true
|
||||
|
||||
# Home folder encryption
|
||||
d-i user-setup/encrypt-home boolean false
|
||||
|
||||
# Do not scan additional CDs
|
||||
apt-cdrom-setup apt-setup/cdrom/set-first boolean false
|
||||
|
||||
# Use network mirror
|
||||
apt-mirror-setup apt-setup/use_mirror boolean true
|
||||
|
||||
# Disable polularity contest
|
||||
popularity-contest popularity-contest/participate boolean false
|
||||
|
||||
# Select base install
|
||||
tasksel tasksel/first multiselect standard, ssh-server
|
||||
|
||||
# Setup passwordless sudo for packer user
|
||||
d-i preseed/late_command string \
|
||||
echo "packer ALL=(ALL:ALL) NOPASSWD:ALL" > /target/etc/sudoers.d/packer && chmod 0440 /target/etc/sudoers.d/packer
|
|
@ -0,0 +1,26 @@
|
|||
#!/bin/sh
|
||||
|
||||
set -eo pipefail
|
||||
|
||||
DESTDIR=/usr/local/share/ca-certificates
|
||||
UPDATE_CERTS_CMD=update-ca-certificates
|
||||
CERTS="$(cat <<EOF
|
||||
https://letsencrypt.org/certs/isrgrootx1.pem
|
||||
https://letsencrypt.org/certs/isrg-root-x2.pem
|
||||
https://letsencrypt.org/certs/lets-encrypt-r3.pem
|
||||
https://letsencrypt.org/certs/lets-encrypt-e1.pem
|
||||
https://letsencrypt.org/certs/lets-encrypt-r4.pem
|
||||
https://letsencrypt.org/certs/lets-encrypt-e2.pem
|
||||
EOF
|
||||
)"
|
||||
|
||||
cd "$DESTDIR"
|
||||
|
||||
for cert in $CERTS; do
|
||||
echo "Downloading '$cert'..."
|
||||
filename=$(basename "$cert")
|
||||
wget --tries=10 --timeout=30 -O "$filename" "$cert"
|
||||
#openssl x509 -in "$filename" -inform PEM -out "$filename.crt"
|
||||
done
|
||||
|
||||
$UPDATE_CERTS_CMD
|
|
@ -0,0 +1,12 @@
|
|||
#!/bin/sh
|
||||
|
||||
set -e
|
||||
|
||||
ONE_CONTEXT_VERSION="6.4.0"
|
||||
ONE_CONTEXT_PKG_VERSION="1"
|
||||
PKG="one-context-${ONE_CONTEXT_VERSION}-r${ONE_CONTEXT_PKG_VERSION}.apk"
|
||||
PKG_URL="https://github.com/OpenNebula/addon-context-linux/releases/download/v${ONE_CONTEXT_VERSION}/${PKG}"
|
||||
|
||||
cd /tmp || exit 3
|
||||
wget -q --no-check-certificate ${PKG_URL}
|
||||
apk add --allow-untrusted --no-cache ${PKG}
|
||||
pcaseiro marked this conversation as resolved
pcaseiro
commented
Pas certain que ce fichier soit à jour pour Debian ... :'D Pas certain que ce fichier soit à jour pour Debian ... :'D
|
|
@ -0,0 +1,102 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
#
|
||||
# Generate all the configuration files
|
||||
# Get all the values from the VLS_DIR
|
||||
# Process each template from the TPL_DIR with this values
|
||||
#
|
||||
|
||||
ENV_FILE=${ENV_FILE:-/var/run/one-context/one_env}
|
||||
TPL_DIR="/usr/share/builder/templates"
|
||||
VLS_DIR="/usr/share/builder/values"
|
||||
CONFIG=""
|
||||
|
||||
. ${ENV_FILE}
|
||||
|
||||
BTR="$(command -v btr)"
|
||||
if [ "${?}" -ne 0 ]; then
|
||||
echo "Warning: Nothing to do the templater is not installed"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
if [ ! -e "${TPL_DIR}" ]; then
|
||||
echo "Error: The template dir is missing (${TPL_DIR})"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ ! -e "${VLS_DIR}" ]; then
|
||||
echo "Error: The template dir is missing (${VLS_DIR})"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
jsonQuery() {
|
||||
local data="${1}"
|
||||
local query="${2}"
|
||||
echo "${data}" | jq -cr "${query}"
|
||||
}
|
||||
|
||||
# NAME: @jsonMerge
|
||||
# AIM: Merge two json structures
|
||||
# NOTES:
|
||||
# The last one has de last word
|
||||
# if you have the same key in A and B
|
||||
# this keeps the value of the B structure.
|
||||
# PARAMS:
|
||||
# $1: original JSON Structure
|
||||
# $2: updated JSON Structure
|
||||
jsonMerge() {
|
||||
local data="${1}"
|
||||
local data2="${2}"
|
||||
|
||||
echo "${data} ${data2}" | jq -cr -s ".[0] * .[1]"
|
||||
}
|
||||
|
||||
jsonUpdateVal() {
|
||||
local json="${1}"
|
||||
local key="${2}"
|
||||
local value="${3}"
|
||||
|
||||
echo "${json}" | jq --arg a "${value}" "${key} = \$a"
|
||||
}
|
||||
|
||||
getValues() {
|
||||
|
||||
local values=""
|
||||
|
||||
for file in $(find ${VLS_DIR} -name "*.json"); do
|
||||
values="${values}$(cat ${file})"
|
||||
done
|
||||
|
||||
if [ -n "${RAW_CONFIG}" ]; then
|
||||
values="$(jsonMerge ${values} ${RAW_CONFIG})"
|
||||
fi
|
||||
|
||||
for svc in $(echo ${values} | jq -cr '.Services|keys[]'); do
|
||||
for key in $(echo ${values} | jq -cr ".Services.${svc}.Vars|keys[]"); do
|
||||
ukey=${key^^}
|
||||
vkeys="$(echo ${values} | jq -cr \".Services.${svc}.Vars.${key}\|keys[]\")"
|
||||
if [ ${?} -eq 0 ]; then
|
||||
for var in $(echo ${values} | jq -cr ".Services.${svc}.Vars.${key}|keys[]"); do
|
||||
uvar=${var^^}
|
||||
val=$(eval echo "\$${ukey}_${uvar}")
|
||||
if [ -n "${val}" ]; then
|
||||
values=$(jsonUpdateVal "${values}" ".Services.${svc}.Vars.${key}.${var}" "${val}")
|
||||
fi
|
||||
done
|
||||
else
|
||||
values=$(jsonUpdateVal "${values}" ".Services.${svc}.Vars.${key}" "${!ukey}")
|
||||
fi
|
||||
done
|
||||
done
|
||||
echo ${values}
|
||||
}
|
||||
|
||||
processTemplates() {
|
||||
${BTR} -t ${TPL_DIR} -c "${1}"
|
||||
}
|
||||
|
||||
VALUES=$(getValues)
|
||||
file=$(mktemp)
|
||||
echo "${VALUES}" > "${file}"
|
||||
processTemplates "${file}"
|
||||
rm -rf "${file}"
|
|
@ -0,0 +1,21 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
ENV_FILE=${ENV_FILE:-/var/run/one-context/one_env}
|
||||
|
||||
# $TOKENTXT is available only through the env. file
|
||||
# shellcheck disable=SC1090
|
||||
if [ -f "${ENV_FILE}" ]; then
|
||||
. "${ENV_FILE}"
|
||||
fi
|
||||
|
||||
###
|
||||
|
||||
if [ -n "${K3S_ROLE}" ]; then
|
||||
if [ "${K3S_ROLE}" = "server" ]; then
|
||||
rc-update add dnsmasq default
|
||||
service dnsmasq start
|
||||
|
||||
rc-update add k3s default
|
||||
service k3s start
|
||||
fi
|
||||
fi
|
|
@ -0,0 +1 @@
|
|||
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDDph3zh6ojSvH44k13z9B6xj+Hargo3uzkxnYv5s5NI4yagNuBXEc3aS++KdocND+FtVfLK+iVE8qHo2bvmpMmVkqU6WU2apN7DfIP0QGLlSGeo+UOZ9hGeEDlgVO4AOnZKZ5kPGBEPZ84JXuE9CmhKfwEVCK8w3B8XQttA8alFl4A4/4F14x2w4njsSLY1H3b0qah7hgYKU5zHIGLg8Lxx+1BxGIF0l5n5m5rqAskRNaF+aYbs0CcWHv49bPK0sJJ0qPV2r2sq8BlzuZFHExnZRIxpsIXdce4Bm4rdlGi7tBmmurLk4OOtDkwvhD0LMaNJf10k6QLSmRUTVzgsYz/dmGxopbMtwwIXkwi014uSZgi8wAuznXx5I4j2TUGPZHOVf+1iw/yaxWlgTVOSoX7ZxyhDgW5cCgZZGNzU5UWe0vUuVTB+hfSMj50/Q6+Vi92/mDMbPhm4nBoVzD5DT15mB+yGyN45Ej61m0JzVUyZexfvVaffEug1/u5dnwilP0WGKr4i2OXxOXtvSdAs5rlZjvppZk6IxRCwXIcPwEFL97ZrQZAxlVS5Nh+ZnlSwTe3zfQhzHj1ao0AdCAHFPUEdoUPJhSb0OjyCvZ9XZ1KCkXhuhuN/3IUhuoWl4soNCeC3KmU/USx1wda438Exj0hM1mTyBZScDPGyD9nw78DGw== Philippe Caseiro
|
|
@ -0,0 +1 @@
|
|||
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDZxr8C81Dm5Zl2AtDzTVa8hFs04aV1z8ANrXYVHnLf7gEG4c1BI9iWbm94gVCQT4IvoKR5oZxjxGnx1a7VaX6h6dt33+p/s2IJiwG+9/DykPnImw+ALTcnMcyrwOYh68jnQIOGkYzK/VaHRzrvFNuoVWIU+FqfN+sW+bLQWi9v/K5oiup83xQBze6kjMEL2PT48bJwT/dQgP5cnTTEYwcOK/Yes1Cmb+VqjAs5B3uiHDoch10fy4b4duuALozPGhgoOfTLqe9Ekbt8PdIhUzGxFCw79W7IBA9vw79tYBy4B2et8Zb9sf+sMmxPINDkouYmfSnU0PjNjida7Tii2IEWbrb/qbSkRNcyIBmpGKz6VnSIvomv4FA9dGkOLYRyvTjAM6Shy5aiGV8F7T9hMxm3zGDjiVseyPVtMdSjM2SCx95uPCH5oSrj8M1OIjC2D+w3DsmTPFvTjA1gmKEYnXfFj82DvO+wDcbb6/DF2qS6y5rNpdnPWDb57iBqKeZISQ5x+h8arV0U3yItHoi7z4Cb51V29pdBE0xgFx5DE5akuPO3RC+BP0CK242HBdb94YXQCfmoQ1dV59mvu0ObAhP4CH/efOqONHXjTG9eurQyJWUr8yYO9DI7HkQHwvYDS7xuEO9yvs7gizm22FOTcxBPc4M/KFhPfnUs7Nyfw6I0Nw== vfebvre@cadoles.com
|
|
@ -0,0 +1 @@
|
|||
ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIOsoXFfQcqFp6+5QbB1o1ZpjCGeiPMM9aOK2DoZoMM/7 nicolas.melin@cnous.fr
|
|
@ -0,0 +1 @@
|
|||
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQCwyKvtyfZibpHNDDsfg7N6PHPnv9AzA2PowGd7iqF6YRv6CgGPnUixWE791bmekr57TR1QwW58aSEPSQMfLBwo0OwZ7GXYbOb9Fdb6WHAUJHSyMNsFvakgjq0g7TERMw3UksiYpUBCLgvWhF5jNjKsXgK3LyMUVqJs9KlUBt6elxy3CWoMYaWVJTQwXqLEbvr7W9F1rb9PQi80vxcSZXgk5XPPZH4vh7oN7GLB5UwaTFRh4lcup0xnV938gSgLxttPg4t5li5cmvXXMgtCrIDj7JPh9Cic+UXo80cV14nOpX23nuu408Veys/4p5tYiYFCg6NnUtW2dJrfyga9W1h6nc/6JaY8aXdoE+pi7lL7XrMvJPQxVYdwA9rPUBSZAIOmZQQx2aKFMsXocyVXQDzLQyg8lAF9gbMkjXH7DluXd+s0OAdijW9VFxhjutojaC76vhH+ZqSq511vdCTuq+6juW/By/pYQRtKiL1jJqfQoC+JU8RmOVOml5ciT7I0OM/0dakdIMYINX1FaRuSYb8wm0k3pKh+PGmMigja5lY7Bv8M89gRRw+8bJ42h5XkR0Jd04Wagd9eFXvaLa9OdarwF5rE2d6NM5Gfr2wJ4XuDMC7C3r/b6U3sZr6CWvQ5URrXS9OLtZG09DtEGIIuMcu0pgqclitVDi06Ffz5dZMnVQ== olivier.perrot@cnous.fr
|
|
@ -0,0 +1,23 @@
|
|||
#!/bin/sh
|
||||
|
||||
set -ex
|
||||
|
||||
TOOL_DIR="${1:-/usr/local/bin}"
|
||||
TOOL_USER="${2:-root}"
|
||||
TOOL_GROUP="${3:-root}"
|
||||
ATTACHMENT_URL="https://forge.cadoles.com/attachments/"
|
||||
|
||||
installTool() {
|
||||
NAME="${1}"
|
||||
URL="${2}"
|
||||
|
||||
curl -k -o ${TOOL_DIR}/${NAME} ${URL}
|
||||
chmod +x ${TOOL_DIR}/${NAME}
|
||||
}
|
||||
|
||||
apk add curl
|
||||
pcaseiro marked this conversation as resolved
pcaseiro
commented
Ce fichier n'est pas non plus à jour pour Debian, mais d'un autre côté pas certain qu'il y ai un paquet templater pour Debian :D Ce fichier n'est pas non plus à jour pour Debian, mais d'un autre côté pas certain qu'il y ai un paquet templater pour Debian :D
|
||||
|
||||
# Install templater
|
||||
installTool "tpr" "https://forge.cadoles.com/attachments/242b3cba-8d07-4b89-80ab-7c12253a8524"
|
||||
# Install bootstraper
|
||||
installTool "btr" "https://forge.cadoles.com/attachments/e8442b2a-2065-4282-b4a4-648681fa044c"
|
|
@ -0,0 +1,27 @@
|
|||
#!/bin/sh
|
||||
|
||||
#
|
||||
# Quick and dirty script to add disk space
|
||||
# It creates a new PV (with the additionnal disk),
|
||||
# a new VG and a new LV with 100% disk space
|
||||
# The names and devices are provided with env variables:
|
||||
# - PV_DEVICE : The /dev/xxx device
|
||||
# - VG_NAME: The new vg name
|
||||
# - LV_NAME: Then new lv name
|
||||
# - LV_MTP: The mount point for the FS created on the LV
|
||||
# - LV_FS: The fstype of the new FS
|
||||
#
|
||||
if [ -e ${PV_DEVICE} ]; then
|
||||
pvcreate ${PV_DEVICE}
|
||||
vgcreate ${VG_NAME} ${PV_DEVICE}
|
||||
lvcreate -Ay -l 100%FREE -n ${LV_NAME} ${VG_NAME}
|
||||
mkfs.${LV_FS} /dev/${VG_NAME}/${LV_NAME}
|
||||
if [ ! -d ${LV_MTP} ]; then
|
||||
mkdir -p ${LV_MTP}
|
||||
fi
|
||||
mount /dev/${VG_NAME}/${LV_NAME} ${LV_MTP}
|
||||
echo "/dev/${VG_NAME}/${LV_NAME} ${LV_MTP} ${LV_FS} rw,relatime 0 1" >> /etc/fstab
|
||||
else
|
||||
echo "${PV_DEVICE} is missing"
|
||||
exit 3
|
||||
fi
|
|
@ -0,0 +1,102 @@
|
|||
source qemu "debian" {
|
||||
cpus = 1
|
||||
memory = "${local.memory}"
|
||||
accelerator = "kvm"
|
||||
vnc_bind_address = "0.0.0.0"
|
||||
|
||||
headless = false
|
||||
pcaseiro marked this conversation as resolved
Outdated
pcaseiro
commented
Cette valeur doit être à "true" pour être compatible avec le build automatique par un robot. Cette valeur doit être à "true" pour être compatible avec le build automatique par un robot.
|
||||
|
||||
# Serve the `http` directory via HTTP, used for preseeding the Debian installer.
|
||||
#http_directory = "${path.cwd}/recipes/${var.name}/provisionning/${var.name}/http"
|
||||
http_port_min = 9990
|
||||
http_port_max = 9999
|
||||
|
||||
# SSH ports to redirect to the VM being built
|
||||
host_port_min = 2222
|
||||
host_port_max = 2229
|
||||
|
||||
# This user is configured in the preseed file.
|
||||
ssh_username = "${local.ssh_user}"
|
||||
ssh_private_key_file = data.sshkey.install.private_key_path
|
||||
ssh_wait_timeout = "1000s"
|
||||
|
||||
shutdown_command = "/sbin/poweroff"
|
||||
|
||||
# Builds a compact image
|
||||
disk_compression = true
|
||||
disk_discard = "unmap"
|
||||
skip_compaction = false
|
||||
disk_detect_zeroes = "unmap"
|
||||
|
||||
format = "qcow2"
|
||||
|
||||
boot_wait = "5s"
|
||||
}
|
||||
|
||||
source "vmware-iso" "debian" {
|
||||
cpus = 1
|
||||
disk_type_id = 0
|
||||
memory = "${local.memory}"
|
||||
vnc_bind_address = "0.0.0.0"
|
||||
|
||||
headless = true
|
||||
|
||||
# Serve the `http` directory via HTTP, used for preseeding the Debian installer.
|
||||
#http_directory = "${path.cwd}/recipes/${var.name}/provisionning/${var.name}/http"
|
||||
http_port_min = 9990
|
||||
http_port_max = 9999
|
||||
|
||||
# SSH ports to redirect to the VM being built
|
||||
#host_port_min = 2222
|
||||
#host_port_max = 2229
|
||||
|
||||
# This user is configured in the preseed file.
|
||||
ssh_username = "${local.ssh_user}"
|
||||
ssh_private_key_file = data.sshkey.install.private_key_path
|
||||
ssh_wait_timeout = "1000s"
|
||||
|
||||
shutdown_command = "/sbin/poweroff"
|
||||
|
||||
# Builds a compact image
|
||||
#disk_compression = true
|
||||
#disk_discard = "unmap"
|
||||
skip_compaction = false
|
||||
#disk_detect_zeroes = "unmap"
|
||||
|
||||
format = "ova"
|
||||
|
||||
boot_wait = "5s"
|
||||
}
|
||||
|
||||
source "vmware-vmx" "debian" {
|
||||
disk_type_id = 0
|
||||
vnc_bind_address = "0.0.0.0"
|
||||
|
||||
headless = true
|
||||
|
||||
# Serve the `http` directory via HTTP, used for preseeding the Debian installer.
|
||||
#http_directory = "${path.cwd}/recipes/${var.name}/provisionning/${var.name}/http"
|
||||
http_port_min = 9990
|
||||
http_port_max = 9999
|
||||
|
||||
# SSH ports to redirect to the VM being built
|
||||
#host_port_min = 2222
|
||||
#host_port_max = 2229
|
||||
|
||||
# This user is configured in the preseed file.
|
||||
ssh_username = "${local.ssh_user}"
|
||||
ssh_private_key_file = data.sshkey.install.private_key_path
|
||||
ssh_wait_timeout = "1000s"
|
||||
|
||||
shutdown_command = "/sbin/poweroff"
|
||||
|
||||
# Builds a compact image
|
||||
#disk_compression = true
|
||||
#disk_discard = "unmap"
|
||||
skip_compaction = false
|
||||
#disk_detect_zeroes = "unmap"
|
||||
|
||||
format = "ova"
|
||||
|
||||
boot_wait = "5s"
|
||||
}
|
|
@ -0,0 +1,12 @@
|
|||
#cloud-config
|
||||
ssh_pwauth: True
|
||||
user: ${user}
|
||||
password: ${password}
|
||||
chpasswd:
|
||||
expire: False
|
||||
|
||||
# Work around network interface down after boot
|
||||
runcmd:
|
||||
%{ for cmd in runcmd ~}
|
||||
- ${cmd}
|
||||
%{ endfor ~}
|
|
@ -0,0 +1,6 @@
|
|||
# /etc/conf.d/chronyd
|
||||
CFGFILE="/etc/chrony/chrony.conf"
|
||||
FAST_STARTUP=yes
|
||||
ARGS=""
|
||||
# vrf e.g 'vrf-mgmt'
|
||||
#vrf=""
|
|
@ -0,0 +1,6 @@
|
|||
|
||||
# Configuration file of Harbor
|
||||
|
||||
# The IP address or hostname to access admin UI and registry service.
|
||||
# DO NOT use localhost or 127.0.0.1, because Harbor needs to be accessed by external clients.
|
||||
hostname: ${Vars.RootlessDocker}
|
|
@ -0,0 +1,3 @@
|
|||
%{ if Vars.RootlessDocker }
|
||||
docker:231072:65536
|
||||
%{ endif }
|
|
@ -0,0 +1,265 @@
|
|||
# Configuration file of Harbor
|
||||
|
||||
# The IP address or hostname to access admin UI and registry service.
|
||||
# DO NOT use localhost or 127.0.0.1, because Harbor needs to be accessed by external clients.
|
||||
hostname: ${Vars.HarborDomain}
|
||||
|
||||
# http related config
|
||||
http:
|
||||
# port for http, default is 80. If https enabled, this port will redirect to https port
|
||||
port: ${Vars.HarborHTTPPort}
|
||||
|
||||
# https related config
|
||||
https:
|
||||
# https port for harbor, default is 443
|
||||
port: ${Vars.HarborHTTPSPort}
|
||||
# The path of cert and key files for nginx
|
||||
certificate: ${Vars.HarborSSLCert}
|
||||
private_key: ${Vars.HarborSSLPrivKey}
|
||||
|
||||
# # Uncomment following will enable tls communication between all harbor components
|
||||
# internal_tls:
|
||||
# # set enabled to true means internal tls is enabled
|
||||
# enabled: true
|
||||
# # put your cert and key files on dir
|
||||
# dir: /etc/harbor/tls/internal
|
||||
|
||||
# Uncomment external_url if you want to enable external proxy
|
||||
# And when it enabled the hostname will no longer used
|
||||
# external_url: https://reg.mydomain.com:8433
|
||||
|
||||
# The initial password of Harbor admin
|
||||
# It only works in first time to install harbor
|
||||
# Remember Change the admin password from UI after launching Harbor.
|
||||
harbor_admin_password: ${Vars.HarborAdminPassword}
|
||||
|
||||
# Harbor DB configuration
|
||||
database:
|
||||
# The password for the root user of Harbor DB. Change this before any production use.
|
||||
password: ${Vars.HarborDBPassword}
|
||||
# The maximum number of connections in the idle connection pool. If it <=0, no idle connections are retained.
|
||||
max_idle_conns: 50
|
||||
# The maximum number of open connections to the database. If it <= 0, then there is no limit on the number of open connections.
|
||||
# Note: the default number of connections is 100 for postgres.
|
||||
max_open_conns: 200
|
||||
|
||||
# The default data volume
|
||||
data_volume: /srv/harbor/data
|
||||
|
||||
# Harbor Storage settings by default is using /data dir on local filesystem
|
||||
# Uncomment storage_service setting If you want to using external storage
|
||||
# storage_service:
|
||||
# # ca_bundle is the path to the custom root ca certificate, which will be injected into the truststore
|
||||
# # of registry's and chart repository's containers. This is usually needed when the user hosts a internal storage with self signed certificate.
|
||||
# ca_bundle:
|
||||
|
||||
# # storage backend, default is filesystem, options include filesystem, azure, gcs, s3, swift and oss
|
||||
# # for more info about this configuration please refer https://docs.docker.com/registry/configuration/
|
||||
# filesystem:
|
||||
# maxthreads: 100
|
||||
# # set disable to true when you want to disable registry redirect
|
||||
# redirect:
|
||||
# disabled: false
|
||||
|
||||
# Trivy configuration
|
||||
#
|
||||
# Trivy DB contains vulnerability information from NVD, Red Hat, and many other upstream vulnerability databases.
|
||||
# It is downloaded by Trivy from the GitHub release page https://github.com/aquasecurity/trivy-db/releases and cached
|
||||
# in the local file system. In addition, the database contains the update timestamp so Trivy can detect whether it
|
||||
# should download a newer version from the Internet or use the cached one. Currently, the database is updated every
|
||||
# 12 hours and published as a new release to GitHub.
|
||||
trivy:
|
||||
# ignoreUnfixed The flag to display only fixed vulnerabilities
|
||||
ignore_unfixed: false
|
||||
# skipUpdate The flag to enable or disable Trivy DB downloads from GitHub
|
||||
#
|
||||
# You might want to enable this flag in test or CI/CD environments to avoid GitHub rate limiting issues.
|
||||
# If the flag is enabled you have to download the `trivy-offline.tar.gz` archive manually, extract `trivy.db` and
|
||||
# `metadata.json` files and mount them in the `/home/scanner/.cache/trivy/db` path.
|
||||
skip_update: false
|
||||
#
|
||||
# The offline_scan option prevents Trivy from sending API requests to identify dependencies.
|
||||
# Scanning JAR files and pom.xml may require Internet access for better detection, but this option tries to avoid it.
|
||||
# For example, the offline mode will not try to resolve transitive dependencies in pom.xml when the dependency doesn't
|
||||
# exist in the local repositories. It means a number of detected vulnerabilities might be fewer in offline mode.
|
||||
# It would work if all the dependencies are in local.
|
||||
# This option doesn’t affect DB download. You need to specify "skip-update" as well as "offline-scan" in an air-gapped environment.
|
||||
offline_scan: false
|
||||
#
|
||||
# insecure The flag to skip verifying registry certificate
|
||||
insecure: false
|
||||
# github_token The GitHub access token to download Trivy DB
|
||||
#
|
||||
# Anonymous downloads from GitHub are subject to the limit of 60 requests per hour. Normally such rate limit is enough
|
||||
# for production operations. If, for any reason, it's not enough, you could increase the rate limit to 5000
|
||||
# requests per hour by specifying the GitHub access token. For more details on GitHub rate limiting please consult
|
||||
# https://developer.github.com/v3/#rate-limiting
|
||||
#
|
||||
# You can create a GitHub token by following the instructions in
|
||||
# https://help.github.com/en/github/authenticating-to-github/creating-a-personal-access-token-for-the-command-line
|
||||
#
|
||||
# github_token: xxx
|
||||
|
||||
jobservice:
|
||||
# Maximum number of job workers in job service
|
||||
max_job_workers: 10
|
||||
logger_sweeper_duration: 300
|
||||
|
||||
notification:
|
||||
# Maximum retry count for webhook job
|
||||
webhook_job_max_retry: 10
|
||||
webhook_job_http_client_timeout: 300
|
||||
|
||||
chart:
|
||||
# Change the value of absolute_url to enabled can enable absolute url in chart
|
||||
absolute_url: disabled
|
||||
|
||||
# Log configurations
|
||||
log:
|
||||
# options are debug, info, warning, error, fatal
|
||||
level: info
|
||||
# configs for logs in local storage
|
||||
local:
|
||||
# Log files are rotated log_rotate_count times before being removed. If count is 0, old versions are removed rather than rotated.
|
||||
rotate_count: 50
|
||||
# Log files are rotated only if they grow bigger than log_rotate_size bytes. If size is followed by k, the size is assumed to be in kilobytes.
|
||||
# If the M is used, the size is in megabytes, and if G is used, the size is in gigabytes. So size 100, size 100k, size 100M and size 100G
|
||||
# are all valid.
|
||||
rotate_size: 200M
|
||||
# The directory on your host that store log
|
||||
location: /var/log/harbor
|
||||
|
||||
# Uncomment following lines to enable external syslog endpoint.
|
||||
# external_endpoint:
|
||||
# # protocol used to transmit log to external endpoint, options is tcp or udp
|
||||
# protocol: tcp
|
||||
# # The host of external endpoint
|
||||
# host: localhost
|
||||
# # Port of external endpoint
|
||||
# port: 5140
|
||||
|
||||
#This attribute is for migrator to detect the version of the .cfg file, DO NOT MODIFY!
|
||||
_version: 2.6.0
|
||||
|
||||
# Uncomment external_database if using external database.
|
||||
# external_database:
|
||||
# harbor:
|
||||
# host: harbor_db_host
|
||||
# port: harbor_db_port
|
||||
# db_name: harbor_db_name
|
||||
# username: harbor_db_username
|
||||
# password: harbor_db_password
|
||||
# ssl_mode: disable
|
||||
# max_idle_conns: 2
|
||||
# max_open_conns: 0
|
||||
# notary_signer:
|
||||
# host: notary_signer_db_host
|
||||
# port: notary_signer_db_port
|
||||
# db_name: notary_signer_db_name
|
||||
# username: notary_signer_db_username
|
||||
# password: notary_signer_db_password
|
||||
# ssl_mode: disable
|
||||
# notary_server:
|
||||
# host: notary_server_db_host
|
||||
# port: notary_server_db_port
|
||||
# db_name: notary_server_db_name
|
||||
# username: notary_server_db_username
|
||||
# password: notary_server_db_password
|
||||
# ssl_mode: disable
|
||||
|
||||
# Uncomment external_redis if using external Redis server
|
||||
# external_redis:
|
||||
# # support redis, redis+sentinel
|
||||
# # host for redis: <host_redis>:<port_redis>
|
||||
# # host for redis+sentinel:
|
||||
# # <host_sentinel1>:<port_sentinel1>,<host_sentinel2>:<port_sentinel2>,<host_sentinel3>:<port_sentinel3>
|
||||
# host: redis:6379
|
||||
# password:
|
||||
# # sentinel_master_set must be set to support redis+sentinel
|
||||
# #sentinel_master_set:
|
||||
# # db_index 0 is for core, it's unchangeable
|
||||
# registry_db_index: 1
|
||||
# jobservice_db_index: 2
|
||||
# chartmuseum_db_index: 3
|
||||
# trivy_db_index: 5
|
||||
# idle_timeout_seconds: 30
|
||||
|
||||
# Uncomment uaa for trusting the certificate of uaa instance that is hosted via self-signed cert.
|
||||
# uaa:
|
||||
# ca_file: /path/to/ca
|
||||
|
||||
# Global proxy
|
||||
# Config http proxy for components, e.g. http://my.proxy.com:3128
|
||||
# Components doesn't need to connect to each others via http proxy.
|
||||
# Remove component from `components` array if want disable proxy
|
||||
# for it. If you want use proxy for replication, MUST enable proxy
|
||||
# for core and jobservice, and set `http_proxy` and `https_proxy`.
|
||||
# Add domain to the `no_proxy` field, when you want disable proxy
|
||||
# for some special registry.
|
||||
proxy:
|
||||
http_proxy:
|
||||
https_proxy:
|
||||
no_proxy:
|
||||
components:
|
||||
- core
|
||||
- jobservice
|
||||
- notary
|
||||
- trivy
|
||||
|
||||
metric:
|
||||
enabled: false
|
||||
port: 9090
|
||||
path: /metrics
|
||||
|
||||
# Trace related config
|
||||
# only can enable one trace provider(jaeger or otel) at the same time,
|
||||
# and when using jaeger as provider, can only enable it with agent mode or collector mode.
|
||||
# if using jaeger collector mode, uncomment endpoint and uncomment username, password if needed
|
||||
# if using jaeger agetn mode uncomment agent_host and agent_port
|
||||
# trace:
|
||||
# enabled: true
|
||||
# # set sample_rate to 1 if you wanna sampling 100% of trace data; set 0.5 if you wanna sampling 50% of trace data, and so forth
|
||||
# sample_rate: 1
|
||||
# # # namespace used to differenciate different harbor services
|
||||
# # namespace:
|
||||
# # # attributes is a key value dict contains user defined attributes used to initialize trace provider
|
||||
# # attributes:
|
||||
# # application: harbor
|
||||
# # # jaeger should be 1.26 or newer.
|
||||
# # jaeger:
|
||||
# # endpoint: http://hostname:14268/api/traces
|
||||
# # username:
|
||||
# # password:
|
||||
# # agent_host: hostname
|
||||
# # # export trace data by jaeger.thrift in compact mode
|
||||
# # agent_port: 6831
|
||||
# # otel:
|
||||
# # endpoint: hostname:4318
|
||||
# # url_path: /v1/traces
|
||||
# # compression: false
|
||||
# # insecure: true
|
||||
# # timeout: 10s
|
||||
|
||||
# enable purge _upload directories
|
||||
upload_purging:
|
||||
enabled: true
|
||||
# remove files in _upload directories which exist for a period of time, default is one week.
|
||||
age: 168h
|
||||
# the interval of the purge operations
|
||||
interval: 24h
|
||||
dryrun: false
|
||||
|
||||
# cache layer configurations
|
||||
# If this feature enabled, harbor will cache the resource
|
||||
# `project/project_metadata/repository/artifact/manifest` in the redis
|
||||
# which can especially help to improve the performance of high concurrent
|
||||
# manifest pulling.
|
||||
# NOTICE
|
||||
# If you are deploying Harbor in HA mode, make sure that all the harbor
|
||||
# instances have the same behaviour, all with caching enabled or disabled,
|
||||
# otherwise it can lead to potential data inconsistency.
|
||||
cache:
|
||||
# not enabled by default
|
||||
enabled: false
|
||||
# keep cache for one day by default
|
||||
expire_hours: 24
|
|
@ -0,0 +1,47 @@
|
|||
|
||||
# Example answer file for setup-alpine script
|
||||
# If you don't want to use a certain option, then comment it out
|
||||
|
||||
# Use US layout with US variant
|
||||
KEYMAPOPTS="fr fr"
|
||||
|
||||
# Set hostname to alpine-test
|
||||
HOSTNAMEOPTS="-n ${hostname}"
|
||||
|
||||
# Contents of /etc/network/interfaces
|
||||
INTERFACESOPTS="auto lo
|
||||
iface lo inet loopback
|
||||
|
||||
auto eth0
|
||||
iface eth0 inet dhcp
|
||||
hostname ${hostname}
|
||||
"
|
||||
|
||||
# Search domain of example.com, OpenDNS public nameserver
|
||||
# ex: -d example.com 1.1.1.1"
|
||||
DNSOPTS=""
|
||||
|
||||
# Set timezone to UTC
|
||||
TIMEZONEOPTS="-z Europe/Paris"
|
||||
|
||||
# set http/ftp proxy
|
||||
PROXYOPTS="none"
|
||||
|
||||
# Add a random mirror
|
||||
APKREPOSOPTS="-r -c"
|
||||
|
||||
# Install Openssh
|
||||
SSHDOPTS="-c openssh -k /root/.ssh/authorized_keys"
|
||||
|
||||
# Use openntpd
|
||||
NTPOPTS="-c openntpd"
|
||||
|
||||
# Use /dev/sda as a data disk
|
||||
DISKOPTS="-L -m sys ${disk_device}"
|
||||
|
||||
USEROPTS="-a -g 'netdev' ${user}"
|
||||
|
||||
# Setup in /media/vda1
|
||||
# LBUOPTS="/media/vda1"
|
||||
# APKCACHEOPTS="/media/vda1/cache"
|
||||
|
|
@ -0,0 +1,8 @@
|
|||
# k3s options
|
||||
export PATH="/usr/libexec/cni/:$PATH"
|
||||
K3S_EXEC="server"
|
||||
%{ if Vars.DeployTraefik }
|
||||
K3S_OPTS=""
|
||||
%{ else }
|
||||
K3S_OPTS="--disable traefik"
|
||||
%{ endif }
|
|
@ -0,0 +1 @@
|
|||
command_args="-address 0.0.0.0:${Vars.MatchBox.HTTPPort} -rpc-address 0.0.0.0:${Vars.MatchBox.gRPCPort} -log-level ${Vars.MatchBox.LogLevel}"
|
|
@ -0,0 +1,4 @@
|
|||
${Vars.NIC[0].IP} ${Vars.Set.Hostname}
|
||||
%{ if Vars.MatchBox.Hostname != "" }
|
||||
${Vars.NIC[0].IP} ${Vars.MatchBox.Hostname}
|
||||
%{ endif }
|
|
@ -0,0 +1,60 @@
|
|||
log-queries
|
||||
log-dhcp
|
||||
|
||||
#port=0
|
||||
listen-address=0.0.0.0
|
||||
interface=${Vars.PXE.ListenInterface}
|
||||
no-resolv
|
||||
domain-needed
|
||||
bogus-priv
|
||||
expand-hosts
|
||||
server=${Vars.ETH0.DNS}
|
||||
strict-order
|
||||
addn-hosts=/etc/dnsmasq-hosts.conf
|
||||
domain=${Vars.PXE.DNSDomain}
|
||||
local=/${Vars.PXE.DNSDomain}/
|
||||
localise-queries
|
||||
|
||||
|
||||
%{ if Vars.PXE.DHCPMode == "proxy" }
|
||||
#dhcp-no-override
|
||||
dhcp-range=${Vars.ETH0.IP},proxy
|
||||
%{ else }
|
||||
dhcp-range=${Vars.PXE.DHCPRangeStart},${Vars.PXE.DHCPRangeEnd},${Vars.PXE.DHCPLeaseDuration}
|
||||
dhcp-option=option:router,${Vars.ETH0.GATEWAY}
|
||||
%{ endif }
|
||||
|
||||
dhcp-option=option:dns-server,${Vars.ETH0.IP}
|
||||
dhcp-option=option:domain-name,${Vars.PXE.DNSDomain}
|
||||
|
||||
# TFTP Configuration
|
||||
enable-tftp
|
||||
tftp-root="${Vars.PXE.TFTPRoot}"
|
||||
|
||||
pxe-prompt="${Vars.PXE.GreetingMessage}",${Vars.PXE.DelayTime}
|
||||
|
||||
# Based on logic in https://gist.github.com/robinsmidsrod/4008017
|
||||
# iPXE sends a 175 option, checking suboptions
|
||||
dhcp-match=set:ipxe-http,175,19
|
||||
dhcp-match=set:ipxe-https,175,20
|
||||
dhcp-match=set:ipxe-menu,175,39
|
||||
# pcbios specific
|
||||
dhcp-match=set:ipxe-pxe,175,33
|
||||
dhcp-match=set:ipxe-bzimage,175,24
|
||||
dhcp-match=set:ipxe-iscsi,175,17
|
||||
# efi specific
|
||||
dhcp-match=set:ipxe-efi,175,36
|
||||
# combination
|
||||
# set ipxe-ok tag if we have correct combination
|
||||
# http && menu && iscsi ((pxe && bzimage) || efi)
|
||||
tag-if=set:ipxe-ok,tag:ipxe-http,tag:ipxe-menu,tag:ipxe-iscsi,tag:ipxe-pxe,tag:ipxe-bzimage
|
||||
tag-if=set:ipxe-ok,tag:ipxe-http,tag:ipxe-menu,tag:ipxe-iscsi,tag:ipxe-efi
|
||||
|
||||
|
||||
## Load different PXE boot image depending on client architecture (when running as a proxy DHCP)
|
||||
pxe-service=tag:!ipxe-ok, x86PC, "Legacy boot PXE chainload to iPXE", undionly.kpxe
|
||||
pxe-service=tag:!ipxe-ok, BC_EFI, "UEFI32 boot chainload to iPXE", snponly.efi
|
||||
pxe-service=tag:!ipxe-ok, X86-64_EFI, "UEFI64 boot chainload to iPXE", snponly.efi
|
||||
|
||||
dhcp-userclass=set:ipxe,iPXE
|
||||
dhcp-boot=tag:ipxe-ok,http://${Vars.ETH0.IP}:${Vars.MatchBox.HTTPPort}/boot.ipxe,,${Vars.ETH0.IP}
|
|
@ -0,0 +1,28 @@
|
|||
#!/sbin/openrc-run
|
||||
|
||||
name=$RC_SVCNAME
|
||||
command="/usr/local/bin/$RC_SVCNAME"
|
||||
command_user="$RC_SVCNAME"
|
||||
pidfile="/run/$RC_SVCNAME/$RC_SVCNAME.pid"
|
||||
start_stop_daemon_args="--start -b"
|
||||
command_args="$command_args"
|
||||
command_background="yes"
|
||||
|
||||
depend() {
|
||||
need net
|
||||
}
|
||||
|
||||
start_pre() {
|
||||
checkpath --directory --owner $command_user:$command_user --mode 0775 \
|
||||
/run/$RC_SVCNAME /var/log/$RC_SVCNAME
|
||||
if [ ! -f "/etc/matchbox/server.crt" ]; then
|
||||
cd /root/tls
|
||||
export SAN="DNS.1:${Vars.MatchBox.Hostname},IP.1:${Vars.ETH0.IP}"
|
||||
./cert-gen
|
||||
mkdir -p /etc/matchbox
|
||||
cp ca.crt server.crt server.key /etc/matchbox
|
||||
chown -R matchbox:matchbox /etc/matchbox
|
||||
mkdir -p /root/.matchbox
|
||||
cp client.crt client.key ca.crt /root/.matchbox/
|
||||
fi
|
||||
}
|
|
@ -0,0 +1 @@
|
|||
harbor
|
|
@ -0,0 +1 @@
|
|||
command_args="-address 0.0.0.0:${Vars.MatchBox.HTTPPort} -rpc-address 0.0.0.0:${Vars.MatchBox.gRPCPort} -log-level ${Vars.MatchBox.LogLevel}"
|
|
@ -0,0 +1,7 @@
|
|||
${Vars.NIC[0].IP} ${Vars.Set.Hostname}
|
||||
%{ if Vars.MatchBox.Hostname != "" }
|
||||
${Vars.NIC[0].IP} ${Vars.MatchBox.Hostname}
|
||||
%{ endif }
|
||||
%{ for host in Vars.DNSMasq.Hosts }
|
||||
${host.IP} ${host.Name}
|
||||
%{ endfor }
|
|
@ -0,0 +1,60 @@
|
|||
log-queries
|
||||
log-dhcp
|
||||
|
||||
#port=0
|
||||
listen-address=0.0.0.0
|
||||
interface=${Vars.PXE.ListenInterface}
|
||||
no-resolv
|
||||
domain-needed
|
||||
bogus-priv
|
||||
expand-hosts
|
||||
server=${Vars.DNS[0]}
|
||||
strict-order
|
||||
addn-hosts=/etc/dnsmasq-hosts.conf
|
||||
domain=${Vars.PXE.DNSDomain}
|
||||
local=/${Vars.PXE.DNSDomain}/
|
||||
localise-queries
|
||||
|
||||
|
||||
%{ if Vars.PXE.DHCPMode == "proxy" }
|
||||
#dhcp-no-override
|
||||
dhcp-range=${Vars.NIC[0].IP},proxy
|
||||
%{ else }
|
||||
dhcp-range=${Vars.PXE.DHCPRangeStart},${Vars.PXE.DHCPRangeEnd},${Vars.PXE.DHCPLeaseDuration}
|
||||
dhcp-option=option:router,${Vars.NIC[0].Gateway}
|
||||
%{ endif }
|
||||
|
||||
dhcp-option=option:dns-server,${Vars.NIC[0].IP}
|
||||
dhcp-option=option:domain-name,${Vars.PXE.DNSDomain}
|
||||
|
||||
# TFTP Configuration
|
||||
enable-tftp
|
||||
tftp-root="${Vars.PXE.TFTPRoot}"
|
||||
|
||||
pxe-prompt="${Vars.PXE.GreetingMessage}",${Vars.PXE.DelayTime}
|
||||
|
||||
# Based on logic in https://gist.github.com/robinsmidsrod/4008017
|
||||
# iPXE sends a 175 option, checking suboptions
|
||||
dhcp-match=set:ipxe-http,175,19
|
||||
dhcp-match=set:ipxe-https,175,20
|
||||
dhcp-match=set:ipxe-menu,175,39
|
||||
# pcbios specific
|
||||
dhcp-match=set:ipxe-pxe,175,33
|
||||
dhcp-match=set:ipxe-bzimage,175,24
|
||||
dhcp-match=set:ipxe-iscsi,175,17
|
||||
# efi specific
|
||||
dhcp-match=set:ipxe-efi,175,36
|
||||
# combination
|
||||
# set ipxe-ok tag if we have correct combination
|
||||
# http && menu && iscsi ((pxe && bzimage) || efi)
|
||||
tag-if=set:ipxe-ok,tag:ipxe-http,tag:ipxe-menu,tag:ipxe-iscsi,tag:ipxe-pxe,tag:ipxe-bzimage
|
||||
tag-if=set:ipxe-ok,tag:ipxe-http,tag:ipxe-menu,tag:ipxe-iscsi,tag:ipxe-efi
|
||||
|
||||
|
||||
## Load different PXE boot image depending on client architecture (when running as a proxy DHCP)
|
||||
pxe-service=tag:!ipxe-ok, x86PC, "Legacy boot PXE chainload to iPXE", undionly.kpxe
|
||||
pxe-service=tag:!ipxe-ok, BC_EFI, "UEFI32 boot chainload to iPXE", snponly.efi
|
||||
pxe-service=tag:!ipxe-ok, X86-64_EFI, "UEFI64 boot chainload to iPXE", snponly.efi
|
||||
|
||||
dhcp-userclass=set:ipxe,iPXE
|
||||
dhcp-boot=tag:ipxe-ok,http://${Vars.NIC[0].IP}:${Vars.MatchBox.HTTPPort}/boot.ipxe,,${Vars.NIC[0].IP}
|
|
@ -0,0 +1 @@
|
|||
${Vars.Set.Hostname}
|
|
@ -0,0 +1,28 @@
|
|||
#!/sbin/openrc-run
|
||||
|
||||
name=$RC_SVCNAME
|
||||
command="/usr/local/bin/$RC_SVCNAME"
|
||||
command_user="$RC_SVCNAME"
|
||||
pidfile="/run/$RC_SVCNAME/$RC_SVCNAME.pid"
|
||||
start_stop_daemon_args="--start -b"
|
||||
command_args="$command_args"
|
||||
command_background="yes"
|
||||
|
||||
depend() {
|
||||
need net
|
||||
}
|
||||
|
||||
start_pre() {
|
||||
checkpath --directory --owner $command_user:$command_user --mode 0775 \
|
||||
/run/$RC_SVCNAME /var/log/$RC_SVCNAME
|
||||
if [ ! -f "/etc/matchbox/server.crt" ]; then
|
||||
cd /root/tls
|
||||
export SAN="DNS.1:${Vars.MatchBox.Hostname},IP.1:${Vars.NIC[0].IP}"
|
||||
./cert-gen
|
||||
mkdir -p /etc/matchbox
|
||||
cp ca.crt server.crt server.key /etc/matchbox
|
||||
chown -R matchbox:matchbox /etc/matchbox
|
||||
mkdir -p /root/.matchbox
|
||||
cp client.crt client.key ca.crt /root/.matchbox/
|
||||
fi
|
||||
}
|
|
@ -0,0 +1,9 @@
|
|||
|
||||
%{ for iface in Vars.NIC }
|
||||
auto ${iface.Name}
|
||||
|
||||
iface ${iface.Name} inet static
|
||||
address ${iface.IP}
|
||||
netmask ${iface.Mask}
|
||||
gateway ${iface.Gateway}
|
||||
%{ endfor ~}
|
|
@ -0,0 +1,4 @@
|
|||
|
||||
%{ for dns in Vars.DNS }
|
||||
nameserver ${dns}
|
||||
%{ endfor ~}
|
|
@ -0,0 +1,7 @@
|
|||
NAME = <%= image_name %>
|
||||
PATH = <%= image_source %>
|
||||
TYPE = OS
|
||||
PERSISTENT = No
|
||||
DESCRIPTION = "<%= image_comment %>"
|
||||
DEV_PREFIX = vd
|
||||
FORMAT = qcow2
|
|
@ -0,0 +1,48 @@
|
|||
{
|
||||
"name": "<%= template_name %>",
|
||||
"deployment": "straight",
|
||||
"description": "Cluster Kubernetes (k8s)",
|
||||
"roles": [
|
||||
{
|
||||
"name": "leader",
|
||||
"cardinality": 1,
|
||||
"vm_template": <%= getTemplateByName(oneCli, vm_name).id %>,
|
||||
"shutdown_action": "terminate",
|
||||
"vm_template_contents": "NIC = [\n NAME = \"NIC0\",\n NETWORK_ID = \"$main\",\n RDP = \"YES\" ]\nNIC = [\n NAME = \"NIC1\",\n NETWORK_ID = \"$internal\" ]\n",
|
||||
"elasticity_policies": [],
|
||||
"scheduled_policies": []
|
||||
},
|
||||
{
|
||||
"name": "master",
|
||||
"cardinality": 2,
|
||||
"vm_template": <%= getTemplateByName(oneCli, vm_name).id %>,
|
||||
"shutdown_action": "terminate",
|
||||
"vm_template_contents": "NIC = [\n NAME = \"NIC0\",\n NETWORK_ID = \"$main\",\n RDP = \"YES\" ]\nNIC = [\n NAME = \"NIC1\",\n NETWORK_ID = \"$internal\" ]\n",
|
||||
"elasticity_policies": [],
|
||||
"scheduled_policies": []
|
||||
},
|
||||
{
|
||||
"name": "worker",
|
||||
"cardinality": 4,
|
||||
"vm_template": <%= getTemplateByName(oneCli, vm_name).id %>,
|
||||
"shutdown_action": "terminate",
|
||||
"parents": [
|
||||
"leader"
|
||||
],
|
||||
"vm_template_contents": "NIC = [\n NAME = \"NIC0\",\n NETWORK_ID = \"$main\",\n RDP = \"YES\" ]\nNIC = [\n NAME = \"NIC1\",\n NETWORK_ID = \"$internal\" ]\n",
|
||||
"elasticity_policies": [],
|
||||
"scheduled_policies": []
|
||||
}
|
||||
],
|
||||
"networks": {
|
||||
"main": "M|network|Main network| |id:",
|
||||
"internal": "M|network|Internal network| |id:"
|
||||
},
|
||||
"custom_attrs": {
|
||||
"KUBEAPPS_DNS_NAME": "M|text|DNS Name for kubeapps service| |kubeapps.k3s-eole.local",
|
||||
"INGRESS_PROVIDER": "O|list|Default ingress to install|nginx, traefik, |",
|
||||
"LE_EMAIL": "M|text|Email | |"
|
||||
},
|
||||
"shutdown_action": "terminate",
|
||||
"ready_status_gate": true
|
||||
}
|
|
@ -0,0 +1,33 @@
|
|||
NAME = "<%= template_name %>"
|
||||
CONTEXT = [
|
||||
NETWORK = "YES",
|
||||
REPORT_READY = "YES",
|
||||
SET_HOSTNAME = "$NAME",
|
||||
SSH_PUBLIC_KEY = "$USER[SSH_PUBLIC_KEY]",
|
||||
TOKEN = "YES" ]
|
||||
CPU = "0.2"
|
||||
DESCRIPTION = "Alpine basic image"
|
||||
DISK = [
|
||||
DEV_PREFIX = "vd",
|
||||
DRIVER = "qcow2",
|
||||
IMAGE = "<%= image_name %>",
|
||||
IMAGE_UNAME = "<%= user %>" ]
|
||||
GRAPHICS = [
|
||||
KEYMAP = "fr",
|
||||
LISTEN = "0.0.0.0",
|
||||
TYPE = "VNC" ]
|
||||
HYPERVISOR = "kvm"
|
||||
INPUT = [
|
||||
BUS = "usb",
|
||||
TYPE = "tablet" ]
|
||||
INPUTS_ORDER = ""
|
||||
LOGO = "images/logos/linux.png"
|
||||
MEMORY = "512"
|
||||
MEMORY_UNIT_COST = "MB"
|
||||
NIC_DEFAULT = [
|
||||
MODEL = "virtio" ]
|
||||
OS = [
|
||||
ARCH = "x86_64",
|
||||
BOOT = "",
|
||||
SD_DISK_BUS = "scsi" ]
|
||||
VCPU = "2"
|
|
@ -0,0 +1,32 @@
|
|||
NAME = "<%= template_name %>"
|
||||
CONTEXT = [
|
||||
NETWORK = "YES",
|
||||
REPORT_READY = "YES",
|
||||
SET_HOSTNAME = "$NAME",
|
||||
SSH_PUBLIC_KEY = "$USER[SSH_PUBLIC_KEY]",
|
||||
TOKEN = "YES" ]
|
||||
CPU = "0.2"
|
||||
DESCRIPTION = "K3S Ready VM"
|
||||
DISK = [
|
||||
IMAGE = "<%= image_name %>",
|
||||
IMAGE_UNAME = "<%= user %>",
|
||||
DRIVER = "qcow2" ]
|
||||
GRAPHICS = [
|
||||
KEYMAP = "fr",
|
||||
LISTEN = "0.0.0.0",
|
||||
TYPE = "VNC" ]
|
||||
HYPERVISOR = "kvm"
|
||||
INPUT = [
|
||||
BUS = "usb",
|
||||
TYPE = "tablet" ]
|
||||
INPUTS_ORDER = ""
|
||||
LOGO = "images/logos/alpine.png"
|
||||
MEMORY = "2048"
|
||||
MEMORY_UNIT_COST = "MB"
|
||||
NIC_DEFAULT = [
|
||||
MODEL = "virtio" ]
|
||||
OS = [
|
||||
ARCH = "x86_64",
|
||||
BOOT = "",
|
||||
SD_DISK_BUS = "scsi" ]
|
||||
VCPU = "2"
|
|
@ -0,0 +1,35 @@
|
|||
NAME = "<%= template_name %>"
|
||||
CONTEXT = [
|
||||
NETWORK = "YES",
|
||||
REPORT_READY = "YES",
|
||||
SET_HOSTNAME = "$NAME",
|
||||
SERVER_ROLE = "leader",
|
||||
TOKEN = "YES",
|
||||
SSH_PUBLIC_KEY = "$USER[SSH_PUBLIC_KEY]"
|
||||
]
|
||||
CPU = "0.8"
|
||||
DESCRIPTION = "Kubernetes master or Docker VM (check the name)"
|
||||
DISK = [
|
||||
DEV_PREFIX = "vd",
|
||||
IMAGE = "<%= image_name %>",
|
||||
IMAGE_UNAME = "<%= user %>",
|
||||
DRIVER = "qcow2" ]
|
||||
GRAPHICS = [
|
||||
LISTEN = "0.0.0.0",
|
||||
KEYMAP = "fr",
|
||||
TYPE = "VNC" ]
|
||||
HYPERVISOR = "kvm"
|
||||
INPUT = [
|
||||
BUS = "usb",
|
||||
TYPE = "tablet" ]
|
||||
INPUTS_ORDER = ""
|
||||
LOGO = "images/logos/alpine.png"
|
||||
MEMORY = "2048"
|
||||
MEMORY_UNIT_COST = "MB"
|
||||
NIC_DEFAULT = [
|
||||
MODEL = "virtio" ]
|
||||
OS = [
|
||||
ARCH = "x86_64",
|
||||
BOOT = "",
|
||||
SD_DISK_BUS = "scsi" ]
|
||||
VCPU = "4"
|
|
@ -0,0 +1,42 @@
|
|||
NAME = "<%= template_name %>"
|
||||
CONTEXT = [
|
||||
NETWORK = "YES",
|
||||
REPORT_READY = "YES",
|
||||
SET_HOSTNAME = "$NAME",
|
||||
SERVER_ROLE = "master",
|
||||
MASTER_ADDR = "$MASTER_ADDR",
|
||||
MASTER_TOKEN = "$MASTER_TOKEN",
|
||||
MASTER_CA_TOKEN = "$MASTER_CA_TOKEN",
|
||||
TOKEN = "YES",
|
||||
SSH_PUBLIC_KEY = "$USER[SSH_PUBLIC_KEY]"
|
||||
]
|
||||
CPU = "0.8"
|
||||
DESCRIPTION = "Kubernetes worker VM"
|
||||
DISK = [
|
||||
DEV_PREFIX = "vd",
|
||||
IMAGE = "<%= image_name %>",
|
||||
IMAGE_UNAME = "<%= user %>",
|
||||
DRIVER = "qcow2" ]
|
||||
GRAPHICS = [
|
||||
LISTEN = "0.0.0.0",
|
||||
KEYMAP = "fr",
|
||||
TYPE = "VNC" ]
|
||||
HYPERVISOR = "kvm"
|
||||
INPUT = [
|
||||
BUS = "usb",
|
||||
TYPE = "tablet" ]
|
||||
INPUTS_ORDER = ""
|
||||
LOGO = "images/logos/alpine.png"
|
||||
MEMORY = "2048"
|
||||
MEMORY_UNIT_COST = "MB"
|
||||
NIC_DEFAULT = [
|
||||
MODEL = "virtio" ]
|
||||
OS = [
|
||||
ARCH = "x86_64",
|
||||
BOOT = "",
|
||||
SD_DISK_BUS = "scsi" ]
|
||||
USER_INPUTS = [
|
||||
MASTER_ADDR = "O|text|Master address (for workers only)",
|
||||
MASTER_TOKEN = "O|text|Master Token (for workers only)",
|
||||
MASTER_CA_TOKEN = "O|text|Master CA Token (for workers only)" ]
|
||||
VCPU = "4"
|
|
@ -0,0 +1,42 @@
|
|||
NAME = "<%= template_name %>"
|
||||
CONTEXT = [
|
||||
NETWORK = "YES",
|
||||
REPORT_READY = "YES",
|
||||
SET_HOSTNAME = "$NAME",
|
||||
SERVER_ROLE = "worker",
|
||||
MASTER_ADDR = "$MASTER_ADDR",
|
||||
MASTER_TOKEN = "$MASTER_TOKEN",
|
||||
MASTER_CA_TOKEN = "$MASTER_CA_TOKEN",
|
||||
TOKEN = "YES",
|
||||
SSH_PUBLIC_KEY = "$USER[SSH_PUBLIC_KEY]"
|
||||
]
|
||||
CPU = "0.8"
|
||||
DESCRIPTION = "Kubernetes worker VM"
|
||||
DISK = [
|
||||
DEV_PREFIX = "vd",
|
||||
IMAGE = "<%= image_name %>",
|
||||
IMAGE_UNAME = "<%= user %>",
|
||||
DRIVER = "qcow2" ]
|
||||
GRAPHICS = [
|
||||
LISTEN = "0.0.0.0",
|
||||
KEYMAP = "fr",
|
||||
TYPE = "VNC" ]
|
||||
HYPERVISOR = "kvm"
|
||||
INPUT = [
|
||||
BUS = "usb",
|
||||
TYPE = "tablet" ]
|
||||
INPUTS_ORDER = ""
|
||||
LOGO = "images/logos/alpine.png"
|
||||
MEMORY = "4096"
|
||||
MEMORY_UNIT_COST = "MB"
|
||||
NIC_DEFAULT = [
|
||||
MODEL = "virtio" ]
|
||||
OS = [
|
||||
ARCH = "x86_64",
|
||||
BOOT = "",
|
||||
SD_DISK_BUS = "scsi" ]
|
||||
USER_INPUTS = [
|
||||
MASTER_ADDR = "O|text|Master address (for workers only)",
|
||||
MASTER_TOKEN = "O|text|Master Token (for workers only)",
|
||||
MASTER_CA_TOKEN = "O|text|Master CA Token (for workers only)" ]
|
||||
VCPU = "4"
|
|
@ -0,0 +1,47 @@
|
|||
NAME = "<%= template_name %>"
|
||||
CONTEXT = [
|
||||
MATCHBOX_URL = "http://$NAME",
|
||||
NETWORK = "YES",
|
||||
PXE_DHCPLEASEDURATION = "$DHCPLEASEDURATION",
|
||||
PXE_DHCPMODE = "$ADHCPMODE",
|
||||
PXE_DNSDOMAIN = "$BDNSDOMAIN",
|
||||
PXE_DHCPRANGESTART = "$CDHCPRANGESTART",
|
||||
PXE_DHCPRANGEEND = "$DDHCPRANGEEND",
|
||||
PXE_DHCPLEASEDURATION = "$EDHCPLEASEDURATION",
|
||||
MATCHBOX_HOSTNAME = "$FMATCHBOX_HOSTNAME",
|
||||
REPORT_READY = "YES",
|
||||
SET_HOSTNAME = "$NAME",
|
||||
SSH_PUBLIC_KEY = "$USER[SSH_PUBLIC_KEY]",
|
||||
TOKEN = "YES" ]
|
||||
CPU = "0.2"
|
||||
DESCRIPTION = "Matchbox Ready VM"
|
||||
DISK = [
|
||||
IMAGE = "<%= image_name %>",
|
||||
IMAGE_UNAME = "<%= user %>",
|
||||
DRIVER = "qcow2" ]
|
||||
GRAPHICS = [
|
||||
KEYMAP = "fr",
|
||||
LISTEN = "0.0.0.0",
|
||||
TYPE = "VNC" ]
|
||||
HYPERVISOR = "kvm"
|
||||
INPUT = [
|
||||
BUS = "usb",
|
||||
TYPE = "tablet" ]
|
||||
INPUTS_ORDER = ""
|
||||
LOGO = "images/logos/alpine.png"
|
||||
MEMORY = "2048"
|
||||
MEMORY_UNIT_COST = "MB"
|
||||
NIC_DEFAULT = [
|
||||
MODEL = "virtio" ]
|
||||
OS = [
|
||||
ARCH = "x86_64",
|
||||
BOOT = "",
|
||||
SD_DISK_BUS = "scsi" ]
|
||||
USER_INPUTS = [
|
||||
ADHCPMODE = "M|list|DHCP Mode|proxy,direct|proxy",
|
||||
BDNSDOMAIN = "M|text|Nom de la zone DNS (ex: cadol.es)",
|
||||
CDHCPRANGESTART = "O|text|DNSMASQ DHCP Range First IP",
|
||||
DDHCPRANGEEND = "O|text|DNSMASQ DHCP Range Last IP",
|
||||
EDHCPLEASEDURATION = "M|list|DHCP lease duration|1h,2h,4h,6h,8h,10h,12h,14h,24h|1h",
|
||||
FMATCHBOX_HOSTNAME = "O|text|Matchbox service hostname|mb.cadol.es" ]
|
||||
VCPU = "2"
|
|
@ -0,0 +1,59 @@
|
|||
variable "name" {
|
||||
type = string
|
||||
default = "debian"
|
||||
}
|
||||
|
||||
variable "version" {
|
||||
type = string
|
||||
default = "11"
|
||||
}
|
||||
|
||||
variable "short_version" {
|
||||
type = string
|
||||
default = "11"
|
||||
}
|
||||
|
||||
variable "arch" {
|
||||
type = string
|
||||
default = "amd6464"
|
||||
}
|
||||
|
||||
variable "output_dir" {
|
||||
type = string
|
||||
default = "output/debian/"
|
||||
}
|
||||
|
||||
variable "source_url" {
|
||||
type = string
|
||||
default = "https://cdimage.debian.org/cdimage/release"
|
||||
}
|
||||
|
||||
variable "iso_cd_checksum" {
|
||||
type = string
|
||||
default = "sha256:9ae04227e89047b72970a0d5f1897e2573fd0d4bba3d381086307af604072bad9e33174357fd3c3545a2a2b5b83ce19f3dbb5c352e86d5173b833df59b4a5741"
|
||||
}
|
||||
|
||||
variable "image_version" {
|
||||
type = string
|
||||
default = "0.0.1"
|
||||
}
|
||||
|
||||
variable "one_user" {
|
||||
type = string
|
||||
default = env("ONE_USER")
|
||||
}
|
||||
|
||||
variable "one_token" {
|
||||
type = string
|
||||
default = env("ONE_TOKEN")
|
||||
}
|
||||
|
||||
variable "boot_command" {
|
||||
type = list(string)
|
||||
default = []
|
||||
}
|
||||
|
||||
variable "cloud_init_runcmd" {
|
||||
type = list(string)
|
||||
default = [ "uname" ]
|
||||
}
|
Loading…
Reference in New Issue
Je ne penses pas que la VM fasse le resize automatique des partitions au boot, du coup peut importe la taille selectionnée par l'utilisateur lors de l'import de l'OVF, les fs feront toujours 10Go.
Je vais tester un import sur ma machine pour voir.