Compare commits

...

3 Commits

Author SHA1 Message Date
vfebvre c7bb64c671 debian recipe 2023-09-28 16:34:53 +02:00
Philippe Caseiro 3e7d5c028b feat(recipes): adding nuo specific recipes 2023-07-18 10:03:03 +02:00
Philippe Caseiro d13e17ee10 First recipes 2023-06-29 16:38:20 +02:00
154 changed files with 6518 additions and 1 deletions

View File

@ -1,3 +1,81 @@
# vms
Virtual machine image builder (based on EOLE3 builder)
Virtual machine image builder (based on EOLE3 builder)
## What do we have here ?
### The "build" tool
This is a simple wrapper to help you build and publish images with "packer" and "one-templates"
### The "publisher" tool called "tools/one-templates"
This is a simple script to create and manage OpenNebula "Templates", all kinds of templates:
* VMs
* Networks
* Images
* Services
With this tool you can publish all the images and the necessary templates to actually use what you build in OpenNebula.
## What you need ?
* Packer >= 1.6.0
* virt-sparsify (optional)
* Ruby
* These ruby gems:
* opennebula
* opennebula-cli
* opennebula-oca
* webrick
* An OpenNebula server or cluster >= 5.6 (with OpenNebula Flow and OneGate enabled)
* An account with the proper ACL on the OpenNebula server/cluster to:
* create/manage images
* create/manage vm templates
* create/manage services templates
* create/manage network templates
## How to use the "build" tool ?
First you need to create the "publisher" "tools/one-templates" configuration.
This configuration is located in "./tools/.one-templates.conf".
Note that you can use a different configuration file setting the TEMPLATER_CONFIG environment file before starting the build script.
```
$ cat ./tools/one-templates.conf
user: myOpenNebulaUser
token: myVerySecretOpenNebulaTemporaryToken
builder_addr: IP Address of the building machine (often your own local IP address)
endpoint: http://myOpenNebulaServerAddress...
flow_endpoint: http://myOpenNEbulaServerAddress/oneflow
datastore_id: TheDataStoreIDForMyImages
```
When this configuration is done you can use the "build" tool.
The recipes are organised by OS, version and flavor, one OS contains various versions and flavors.
You can list the available OS and versions with this command:
```
$ ./build list
You can build :
* alpine :
- 3.16
```
To build all the flavors of a version you can run this command:
```
$ ./build start alpine 3.16
```
To build only one flavor for one version you can run this command:
```
$ ./build run alpine 3.16 k3s
```
`!!! Make sure the "base" flavor is build before trying to build another flavor. !!!`
```
$ ./build run alpine 3.16 base
```

157
build Executable file
View File

@ -0,0 +1,157 @@
#!/bin/bash
# Simple build wrapper
ACTION=${1}
OS=${2}
VERSION=${3}
RCP_DIR="./recipes"
PACKER=${PACKER:-packer}
BUILDER=${BUILDER:-qemu}
#
# Init packer
# install plugins
#
initPacker() {
os=${1}
${PACKER} init ${RCP_DIR}/${os}
}
#
# Run the build
# First the "base" image then the provisionned ones
#
run() {
${PACKER} build ${PACKER_OPTS} -var-file="${RCP_DIR}/${OS}/${VERSION}.pkrvars.hcl" -only="base.${BUILDER}.${OS}" "${RCP_DIR}/${OS}/."
${PACKER} build ${PACKER_OPTS} -force -var-file="${RCP_DIR}/${OS}/${VERSION}.pkrvars.hcl" -except="base.${BUILER}.${OS}" "${RCP_DIR}/${OS}/."
}
#
# Run a specific build
#
run_build() {
target=${4}
${PACKER} build ${PACKER_OPTS} -force \
-var-file="${RCP_DIR}/${OS}/${VERSION}.pkrvars.hcl" \
-only="${target}.${BUILDER}.${OS}" \
"${RCP_DIR}/${OS}/."
}
#
# Run many builds for one OS
#
run_many() {
targets="${@:4}"
only=""
for target in ${targets};do
only="${only}-only=${target}.${BUILDER}.${OS} "
done
${PACKER} build ${PACKER_OPTS} -force \
-var-file="${RCP_DIR}/${OS}/${VERSION}.pkrvars.hcl" \
${only} \
"${RCP_DIR}/${OS}/."
}
#
# List what you can build
#
list() {
echo "You can build : "
for os in "${RCP_DIR}"/*; do
echo " * $(basename "${os}") :"
cd "${os}" || exit 100
for vfile in *.pkrvars.hcl; do
echo " - ${vfile}" | sed 's/\.pkrvars\.hcl$//'
done
cd - >/dev/null 2>&1 || exit 100
done
exit 0
}
#
# Run all builds
#
run_all() {
versions=""
for os in "${RCP_DIR}"/*; do
cd "${os}" || exit 100
for vfile in *.pkrvars.hcl; do
versions="${versions} $(echo "${vfile}" | sed 's/\.auto\.pkrvars\.hcl$//')"
done
OS=$(basename ${os})
cd - >/dev/null 2>&1 || exit 100
for ver in ${versions}; do
VERSION=${ver}
run
done
versions=""
done
set +x
}
#
# Start only ONE build
#
start_build() {
if [ -z "${OS}" ]; then
echo "OS Name is missing !"
echo " Supported OS are :"
printf " "
ls ${RCP_DIR}
exit 1
fi
if [ -z "${VERSION}" ]; then
echo "OS Version is missing !"
echo " ex: ./build debian 10"
exit 2
fi
run
}
case "${ACTION}" in
"list")
list
;;
"all")
initPacker "${2}" || exit 1
run_all
exit ${?}
;;
"start")
initPacker "${2}" || exit 1
start_build
;;
"run")
initPacker "${2}" || exit 1
run_build $@
;;
"runVMW")
initPacker "${2}" || exit 1
run_build $@
;;
"mrun")
initPacker "${2}" || exit 1
run_many $@
;;
*)
echo "You need to provide a valid action!"
echo " Supported actions are:"
echo " - start "
echo " - list "
echo " - all"
echo " - run"
exit 1
;;
esac

31
post-processors/sparsify.sh Executable file
View File

@ -0,0 +1,31 @@
#!/bin/sh
if [ "${#}" -ne 2 ]; then
echo Missing arguments
exit 2
fi
WORKDIR=${1}
VERSION=${2}
findImages() {
find ${1} -iname "*.img"
}
sleep 5
for imageName in $(findImages ${WORKDIR} ${DOMAIN}); do
if [ $(which virt-sparsify) ]; then
newName=$(echo $imageName | sed "s/.img/_${VERSION}.img/g")
virt-sparsify --compress --tmp ./ --format qcow2 ${imageName} ${newName}
if [ "${?}" -eq 0 ]; then
rm -rf ${imageName}
cd ${WORKDIR}
ln -s $(basename ${newName}) $(basename ${imageName})
echo ${newName} ${imageName}
cd -
fi
else
echo "Sparsify skipped 'virt-sparsify' command is missing"
fi
done

View File

@ -0,0 +1,6 @@
name = "alpine"
version = "3.16.2"
short_version = "3.16"
arch = "x86_64"
source_url = "https://dl-cdn.alpinelinux.org/alpine"
iso_cd_checksum = "6c7cb998ec2c8925d5a1239410a4d224b771203f916a18f8015f31169dd767a2"

View File

@ -0,0 +1,6 @@
name = "alpine"
version = "3.17.0"
short_version = "3.17"
arch = "x86_64"
source_url = "https://dl-cdn.alpinelinux.org/alpine"
iso_cd_checksum = "8d4d53bd34b2045e1e219b87887b0de8d217b6cd4a8b476a077429845a5582ba"

View File

@ -0,0 +1,6 @@
name = "alpine"
version = "3.18.2"
short_version = "3.18"
arch = "x86_64"
source_url = "https://dl-cdn.alpinelinux.org/alpine"
iso_cd_checksum = "6bc7ff54f5249bfb67082e1cf261aaa6f307d05f64089d3909e18b2b0481467f"

View File

@ -0,0 +1,39 @@
#Flavour base-onecontext
build {
name = "base-onecontext"
description = <<EOF
This builder builds a QEMU image from the base build output.
The goal here is to install one-context and provide a few basic tools
to be used as a "OpenNebula cloud image" with it's provisionning.
EOF
source "source.qemu.alpine" {
output_directory = "${var.output_dir}/${var.version}/provisionned/one-context"
vm_name = "${local.output_name}-${var.version}-one-context.img"
iso_url = "${var.output_dir}/${var.version}/base/${local.output_name}-${var.version}.img"
iso_checksum = "none"
disk_size = 8000
disk_image = true
boot_command = [ "<enter><enter><wait>" ]
ssh_clear_authorized_keys = true
}
provisioner "file" {
destination = "/tmp/one-context.sh"
source = "${local.dirs.provisionning}/one-context.sh"
}
provisioner "shell" {
inline = [
"sh -cx 'sh /tmp/one-context.sh'"
]
}
post-processor "shell-local" {
inline = [
"/bin/sh ${local.dirs.post-processors}/sparsify.sh ${var.output_dir}/${var.version}/provisionned/one-context ${var.image_version}",
"ruby ${local.dirs.tools}/one-templates -t image -m 640 -T ${local.dirs.templates}/one/image/common.tpl -n ${local.output_name}-${var.version}-${build.name} -c '${local.output_name}-${var.version} base image' --image-file ${var.output_dir}/${var.version}/provisionned/one-context/${local.output_name}-${var.version}-one-context.img",
"ruby ${local.dirs.tools}/one-templates -t vm -m 640 -T ${local.dirs.templates}/one/vm/common.xml -n ${local.output_name}-${var.version}-${build.name} --image-name ${local.output_name}-${var.version}-${build.name}"
]
}
}

View File

@ -0,0 +1,93 @@
#Flavour docker
build {
name = "docker"
description = <<EOF
This builder builds a QEMU image from the base build output. The goal here is to install ${local.Docker.Name}
with it's provisionning.
EOF
source "source.vmware-vmx.alpine" {
output_directory = "${var.output_dir}/${var.version}/provisionned/vmware/docker"
vm_name = "${local.output_name}-${var.version}-docker.img"
source_path = "${var.output_dir}/${var.version}/base/${local.output_name}-${var.version}.img.vmx"
boot_command = [ "<enter><enter><wait>" ]
ssh_clear_authorized_keys = true
}
source "source.qemu.alpine" {
output_directory = "${var.output_dir}/${var.version}/provisionned/${local.Docker.Name}"
vm_name = "${local.output_name}-${var.version}-${local.Docker.Name}.img"
iso_url = "${var.output_dir}/${var.version}/base/${local.output_name}-${var.version}.img"
iso_checksum = "none"
disk_size = 20480
disk_image = true
boot_command = [ "<enter><enter><wait>" ]
ssh_clear_authorized_keys = true
}
// Copy ssh Cadoles keys
provisioner "file" {
destination = "/tmp"
source = "${local.locations.provisionning}/ssh/cadoles/"
}
provisioner "shell" {
inline = [
"sh -cx 'cat /tmp/*.pub >> /root/.ssh/authorized_keys'",
"sh -cx 'chmod -R 600 /root/.ssh/authorized_keys'"
]
}
// Install templater and bootstraper
provisioner "shell" {
script = "${local.dirs.provisionning}/templater-install.sh"
}
// Copy configuration values on the image
provisioner "shell" {
inline = [
"sh -cx 'mkdir -p ${local.builder_config.TemplateDir}'",
"sh -cx 'mkdir -p ${local.builder_config.ValueDir}'"
]
}
// Copy configuration templates to the image
provisioner "file" {
destination = "${local.builder_config.TemplateDir}/"
source = "${local.dirs.templates}/conf/${build.name}/"
}
// Copy configuration values on the image
provisioner "file" {
destination = "${local.builder_config.ValueDir}/${build.name}.json"
content = "${jsonencode(local.Docker)}"
}
// Generate default configuration for docker
provisioner "shell" {
inline = [ "sh -cx '/usr/local/bin/btr -c ${local.builder_config.ValueDir}/ -t ${local.builder_config.TemplateDir}'" ]
}
// Install OpenNebula context tool
provisioner "shell" {
script = "${local.dirs.provisionning}/one-context.sh"
}
// Deploy the opennebula context script to manage configuration
provisioner "file" {
destination = "/etc/one-context.d/net-96-templater"
source = "${local.dirs.provisionning}/conf/one-context/net-96-templater"
}
provisioner "shell" {
inline = [ "sh -cx 'chmod +x /etc/one-context.d/net-96-templater'" ]
}
post-processor "shell-local" {
inline = [
"/bin/sh ${local.dirs.post-processors}/sparsify.sh ${var.output_dir}/${var.version}/provisionned/${local.Docker.Name} ${var.image_version}",
//"ruby ${local.dirs.tools}/one-templates -t image -m 640 -T ${local.dirs.templates}/one/image/common.tpl -n ${local.output_name}-${var.version}-${local.Docker.Name} -c '${local.Docker.Name} base image' --image-file ${var.output_dir}/${var.version}/provisionned/${local.Docker.Name}/${local.output_name}-${var.version}-${local.Docker.Name}.img",
//"ruby ${local.dirs.tools}/one-templates -t vm -m 640 -T ${local.dirs.templates}/one/vm/common.xml -n ${local.output_name}-${var.version}-${local.Docker.Name} --image-name ${local.output_name}-${var.version}-${local.Docker.Name}",
]
}
}

View File

@ -0,0 +1,76 @@
#Flavour emissary
build {
name = "emissary"
description = <<EOF
This builder builds a QEMU image from the base build output. The goal here is to install emissary
with it's provisionning.
EOF
source "source.qemu.alpine" {
output_directory = "${var.output_dir}/${var.version}/provisionned/emissary"
vm_name = "${local.output_name}-${var.version}-emissary.img"
iso_url = "${var.output_dir}/${var.version}/base/${local.output_name}-${var.version}.img"
iso_checksum = "none"
disk_size = 10240
disk_image = true
boot_command = [ "<enter><enter><wait>" ]
ssh_clear_authorized_keys = true
}
// Install templater and bootstraper
provisioner "shell" {
script = "${local.dirs.provisionning}/templater-install.sh"
}
// Copy configuration values on the image
provisioner "shell" {
inline = [
"sh -cx 'mkdir -p ${local.builder_config.TemplateDir}'",
"sh -cx 'mkdir -p ${local.builder_config.ValueDir}'"
]
}
// Copy configuration templates to the image
provisioner "file" {
destination = "${local.builder_config.TemplateDir}/"
source = "${local.dirs.templates}/conf/${build.name}/"
}
// Copy configuration values on the image
provisioner "file" {
destination = "${local.builder_config.ValueDir}/${build.name}.json"
content = "${jsonencode(local.emissary)}"
}
// Generate default configuration for kubernetes
provisioner "shell" {
max_retries = 3
inline = [ "sh -cx '/usr/local/bin/btr -c ${local.builder_config.ValueDir}/ -t ${local.builder_config.TemplateDir}'" ]
}
provisioner "file" {
destination = "/tmp/${build.name}.sh"
source = "${local.dirs.provisionning}/${build.name}.sh"
}
provisioner "file" {
destination = "/tmp/one-context.sh"
source = "${local.dirs.provisionning}/one-context.sh"
}
provisioner "shell" {
inline = [
"sh -cx 'sh /tmp/one-context.sh'",
"sh -cx 'sh /tmp/${build.name}.sh'"
]
}
post-processor "shell-local" {
inline = [
"/bin/sh ${path.cwd}/post-processors/sparsify.sh ${var.output_dir}/${var.version}/provisionned/emissary ${var.image_version}",
"ruby ${local.dirs.tools}/one-templates -t image -m 640 -T ${local.dirs.templates}/one/image/common.tpl -n ${local.output_name}-${var.version}-emissary -c 'Emissary base image' --image-file ${var.output_dir}/${var.version}/provisionned/emissary/${local.output_name}-${var.version}-emissary.img",
"ruby ${local.dirs.tools}/one-templates -t vm -m 640 -T ${local.dirs.templates}/one/vm/emissary.xml -n ${local.output_name}-${var.version}-emissary --image-name ${local.output_name}-${var.version}-emissary"
]
}
}

View File

@ -0,0 +1,97 @@
#Flavour ${build.name}
build {
name = "${local.Harbor.Name}"
description = <<EOF
This builder builds a QEMU image from the base build output. The goal here is to install loki
with it's provisionning.
EOF
source "source.qemu.alpine" {
output_directory = "${var.output_dir}/${var.version}/provisionned/${local.Config.Name}"
vm_name = "${local.output_name}-${var.version}-${local.Config.Name}.img"
iso_url = "${var.output_dir}/${var.version}/base/${local.output_name}-${var.version}.img"
iso_checksum = "none"
disk_size = 40960
disk_image = true
boot_command = [ "<enter><enter><wait>" ]
ssh_clear_authorized_keys = true
}
provisioner "file" {
destination = "/tmp/${build.name}.sh"
source = "${path.cwd}/provisionning/${var.name}/${build.name}.sh"
}
provisioner "file" {
destination = "/tmp/install-${build.name}.sh"
source = "${path.cwd}/provisionning/${build.name}/install.sh"
}
provisioner "file" {
destination = "/tmp/install-templater.sh"
source = "${path.cwd}/provisionning/templater/install.sh"
}
// Install OpenNebula context tool
provisioner "file" {
destination = "/tmp/one-context.sh"
source = "${path.cwd}/provisionning/${var.name}/one-context.sh"
}
// Deploy the opennebula context script to manage configuration
provisioner "file" {
destination = "/tmp/net-96-templater"
source = "${path.cwd}/provisionning/one-context/net-96-templater"
}
provisioner "shell" {
inline = [
"sh -cx 'sh /tmp/one-context.sh'",
"sh -cx 'sh /tmp/${build.name}.sh'",
"sh -cx 'sh /tmp/install-templater.sh'",
"sh -cx 'sh /tmp/install-${build.name}.sh'",
"sh -cx 'cp /tmp/net-96-templater /etc/one-context.d/net-96-templater'",
"sh -cx 'chmod +x /etc/one-context.d/net-96-templater'"
]
}
provisioner "file" {
name = "templater"
destination = "${local.Config.ConfigFiles[0].destination}"
content = templatefile("${path.cwd}/templates/conf/${build.name}/${local.Config.ConfigFiles[0].source}", local.Config)
}
// Create Builder directories on the image.
provisioner "shell" {
inline = [
"sh -cx 'mkdir -p ${local.builder_config.TemplateDir}/${build.name}'",
"sh -cx 'chown ${local.Config.User}:${local.Config.Group} ${local.builder_config.TemplateDir}/${build.name}'",
"sh -cx 'mkdir -p ${local.builder_config.ValueDir}/${build.name}'",
"sh -cx 'chown ${local.Config.User}:${local.Config.Group} ${local.builder_config.ValueDir}/${build.name}'",
"sh -cx 'mkdir -p ${local.Config.StorageRoot}'",
"sh -cx 'chown ${local.Config.User}:${local.Config.Group} ${local.Config.StorageRoot}'" ]
}
// Copy configuration template on the image
provisioner "file" {
destination = "${local.builder_config.TemplateDir}/${build.name}/${local.Config.ConfigFiles[0].source}"
source = "${path.cwd}/templates/conf/${build.name}/${local.Config.ConfigFiles[0].source}"
}
// Copy configuration values on the image
provisioner "file" {
destination = "${local.builder_config.ValueDir}/${build.name}/values.json"
content = "${jsonencode(local.Config)}"
}
post-processor "shell-local" {
name = "publish"
inline = [
"/bin/sh ${path.cwd}/post-processors/sparsify.sh ${var.output_dir}/${var.version}/provisionned/${build.name} ${var.image_version}",
"ruby ${path.cwd}/tools/one-templates -t image -T ${path.cwd}/templates/one/image/common.tpl -n ${local.output_name}-${var.version}-${build.name} -c '${build.name} base image' --image-file ${var.output_dir}/${var.version}/provisionned/${build.name}/${local.output_name}-${var.version}-${build.name}.img",
"ruby ${path.cwd}/tools/one-templates -t vm -T ${path.cwd}/templates/one/vm/${build.name}.xml -n ${local.output_name}-${var.version}-${build.name} --image-name ${local.output_name}-${var.version}-${build.name}",
]
}
}

View File

@ -0,0 +1,76 @@
#Flavour k3s
build {
name = "k3s"
description = <<EOF
This builder builds a QEMU image from the base build output. The goal here is to install k3s
with it's provisionning.
EOF
source "source.qemu.alpine" {
output_directory = "${var.output_dir}/${var.version}/provisionned/k3s"
vm_name = "${local.output_name}-${var.version}-k3s.img"
iso_url = "${var.output_dir}/${var.version}/base/${local.output_name}-${var.version}.img"
iso_checksum = "none"
disk_size = 40960
disk_image = true
boot_command = [ "<enter><enter><wait>" ]
ssh_clear_authorized_keys = true
}
// Install templater and bootstraper
provisioner "shell" {
script = "${local.dirs.provisionning}/templater-install.sh"
}
// Copy configuration values on the image
provisioner "shell" {
inline = [
"sh -cx 'mkdir -p ${local.builder_config.TemplateDir}'",
"sh -cx 'mkdir -p ${local.builder_config.ValueDir}'"
]
}
// Copy configuration templates to the image
provisioner "file" {
destination = "${local.builder_config.TemplateDir}/"
source = "${local.dirs.templates}/conf/${build.name}/"
}
// Copy configuration values on the image
provisioner "file" {
destination = "${local.builder_config.ValueDir}/${build.name}.json"
content = "${jsonencode(local.K3S)}"
}
// Generate default configuration for kubernetes
provisioner "shell" {
max_retries = 3
inline = [ "sh -cx '/usr/local/bin/btr -c ${local.builder_config.ValueDir}/ -t ${local.builder_config.TemplateDir}'" ]
}
provisioner "file" {
destination = "/tmp/${build.name}.sh"
source = "${local.dirs.provisionning}/${build.name}.sh"
}
provisioner "file" {
destination = "/tmp/one-context.sh"
source = "${local.dirs.provisionning}/one-context.sh"
}
provisioner "shell" {
inline = [
"sh -cx 'sh /tmp/one-context.sh'",
"sh -cx 'sh /tmp/${build.name}.sh'"
]
}
post-processor "shell-local" {
inline = [
"/bin/sh ${path.cwd}/post-processors/sparsify.sh ${var.output_dir}/${var.version}/provisionned/k3s ${var.image_version}",
"ruby ${local.dirs.tools}/one-templates -t image -m 640 -T ${local.dirs.templates}/one/image/common.tpl -n ${local.output_name}-${var.version}-k3s -c 'k3s base image' --image-file ${var.output_dir}/${var.version}/provisionned/k3s/${local.output_name}-${var.version}-k3s.img",
"ruby ${local.dirs.tools}/one-templates -t vm -m 640 -T ${local.dirs.templates}/one/vm/k3s.xml -n ${local.output_name}-${var.version}-k3s --image-name ${local.output_name}-${var.version}-k3s"
]
}
}

View File

@ -0,0 +1,112 @@
#Flavour kubernetes
build {
name = "kubernetes"
description = <<EOF
This builder builds a QEMU image from the base build output. The goal here is to install ${local.Kubernetes.Name}
with it's provisionning.
EOF
source "source.qemu.alpine" {
output_directory = "${var.output_dir}/${var.version}/provisionned/${local.Kubernetes.Name}"
vm_name = "${local.output_name}-${var.version}-${local.Kubernetes.Name}.img"
iso_url = "${var.output_dir}/${var.version}/base/${local.output_name}-${var.version}.img"
iso_checksum = "none"
disk_size = 20480
disk_image = true
boot_command = [ "<enter><enter><wait>" ]
ssh_clear_authorized_keys = true
}
// Install templater and bootstraper
provisioner "shell" {
script = "${local.locations.provisionning}/templater-install.sh"
}
// Copy configuration values on the image
provisioner "shell" {
inline = [
"sh -cx 'mkdir -p ${local.builder_config.TemplateDir}'",
"sh -cx 'mkdir -p ${local.builder_config.ValueDir}'"
]
}
// Copy configuration templates to the image
provisioner "file" {
destination = "${local.builder_config.TemplateDir}/"
source = "${local.locations.templates}/conf/${build.name}/"
}
// Copy configuration values on the image
provisioner "file" {
destination = "${local.builder_config.ValueDir}/${build.name}.json"
content = "${jsonencode(local.Kubernetes)}"
}
// Copy Sharemetrics script
provisioner "file" {
destination = "/etc/local.d/sharemetrics.start"
source = "${local.locations.provisionning}/conf/${build.name}/sharemetrics.start"
}
provisioner "file" {
destination = "/etc/local.d/initkubernetes.start"
source = "${local.locations.provisionning}/conf/${build.name}/initkubernetes.start"
}
provisioner "shell" {
inline = [
"chmod +x /etc/local.d/sharemetrics.start",
"chmod +x /etc/local.d/initkubernetes.start"
]
}
// Generate default configuration for kubernetes
provisioner "shell" {
max_retries = 3
inline = [ "sh -cx '/usr/local/bin/btr -c ${local.builder_config.ValueDir}/ -t ${local.builder_config.TemplateDir}'" ]
}
// Complete kubernetes install
provisioner "shell" {
expect_disconnect = true
max_retries = 6
script = "${local.locations.provisionning}/${build.name}.sh"
}
// Install OpenNebula context tool
provisioner "shell" {
script = "${local.locations.provisionning}/one-context.sh"
}
// Deploy the opennebula context script to manage configuration
provisioner "file" {
destination = "/etc/one-context.d/net-96-templater"
source = "${local.locations.provisionning}/conf/one-context/net-96-templater"
}
provisioner "shell" {
inline = [
"chmod +x /etc/one-context.d/net-96-templater"
]
}
provisioner "shell" {
inline = [
"service docker start",
"service containerd start",
"sleep 5",
"kubeadm config images pull" ]
}
post-processor "shell-local" {
inline = [
"/bin/sh ${path.cwd}/post-processors/sparsify.sh ${var.output_dir}/${var.version}/provisionned/${local.Kubernetes.Name} ${var.image_version}",
"ruby ${local.locations.tools}/one-templates -t image -m 640 -T ${local.locations.templates}/one/image/common.tpl -n ${local.output_name}-${var.version}-${local.Kubernetes.Name} -c '${local.Kubernetes.Name} base image' --image-file ${var.output_dir}/${var.version}/provisionned/${local.Kubernetes.Name}/${local.output_name}-${var.version}-${local.Kubernetes.Name}.img",
"ruby ${local.locations.tools}/one-templates -t vm -m 640 -T ${local.locations.templates}/one/vm/kubeleader.xml -n ${local.output_name}-${var.version}-${local.Kubernetes.Name}Leader --image-name ${local.output_name}-${var.version}-${local.Kubernetes.Name}",
"ruby ${local.locations.tools}/one-templates -t vm -m 640 -T ${local.locations.templates}/one/vm/kubemaster.xml -n ${local.output_name}-${var.version}-${local.Kubernetes.Name}Master --image-name ${local.output_name}-${var.version}-${local.Kubernetes.Name}",
"ruby ${local.locations.tools}/one-templates -t vm -m 640 -T ${local.locations.templates}/one/vm/kubeworker.xml -n ${local.output_name}-${var.version}-${local.Kubernetes.Name}Worker --image-name ${local.output_name}-${var.version}-${local.Kubernetes.Name}",
"ruby ${local.locations.tools}/one-templates -t service -m 640 -T ${local.locations.templates}/one/service/${build.name}-cluster.json -n ${build.name}-cluster-${local.output_name}-${var.version} --vm-name ${local.output_name}-${var.version}-${local.Kubernetes.Name}",
]
}
}

View File

@ -0,0 +1,6 @@
locals {
builder_config = {
TemplateDir = "/usr/share/builder/templates"
ValueDir = "/usr/share/builder/values"
}
}

View File

@ -0,0 +1,83 @@
locals {
// Definition of the Kubernetes service (templater compatible)
ServiceDocker = {
ConfigFiles = [
{
destination = "/etc/subuid"
source = "subuid.pktpl.hcl"
mode = "600"
owner = "root"
group = "root"
},
{
destination = "/etc/subgid"
source = "subgid.pktpl.hcl"
mode = "600"
owner = "root"
group = "root"
}
]
Repositories = {
AlpineEdgeTesting = {
type = "apk"
name = "testing"
url = "http://mirrors.ircam.fr/pub/alpine/edge/testing"
enabled = true
}
}
Packages = {
docker = {
name = "docker"
action = "install"
}
docker-rootless-extras = {
name = "docker-rootless-extras"
action = "install"
}
docker-compose = {
name = "docker-compose"
action = "install"
}
gpg = {
name = "gpg"
action = "install"
}
}
Daemons = {
docker = {
name = "docker"
type = "auto"
enabled = true
}
cgroups = {
name = "cgroups"
type = "auto"
enabled = true
}
local = {
name = "local"
type = "auto"
enabled = true
}
}
Vars = {
RootlessDocker = "true"
}
Users = {
dock = {
username = "dock"
group = "dock"
home = "/srv/dock"
shell = "/bin/nologin"
}
}
}
Docker = {
Name = "docker"
Globals = local.Globals
Services = {
Docker = local.ServiceDocker
}
}
}

View File

@ -0,0 +1,7 @@
locals {
Globals = {
Vars = {
PrometheusPort = "9090"
}
}
}

View File

@ -0,0 +1,22 @@
locals {
ServiceHarbor = {
ConfigFiles = [
{
destination = "/etc/harbor/harbor.yaml"
source = "habor.yaml.pktpl.hcl"
mod = "600"
}
]
AuthEnabled = false
User = "harbor"
Group = "harbor"
HarborDomain = "reg.cadoles.com"
}
Harbor = {
Name = "harbor"
Globals = local.Globals
Services = {
Harbor = local.ServiceHarbor
}
}
}

View File

@ -0,0 +1,79 @@
locals {
// Definition of the Kubernetes service (templater compatible)
ServiceK3S = {
ConfigFiles = [
{
destination = "/etc/conf.d/k3s"
source = "k3s.conf.pkr.hcl"
mode = "600"
owner = "root"
group = "root"
}
]
Repositories = {
AlpineEdge = {
type = "apk"
name = "community"
url = "http://mirrors.ircam.fr/pub/alpine/edge/community"
enabled = true
}
AlpineEdgeTesting = {
type = "apk"
name = "testing"
url = "http://mirrors.ircam.fr/pub/alpine/edge/testing"
enabled = true
}
}
Packages = {
kubelet = {
name = "k3s"
action = "install"
}
kubeadm = {
name = "kubeadm"
action = "install"
}
kubectl = {
name = "kubectl"
action = "install"
}
uuidgen = {
name = "uuidgen"
action = "install"
}
}
Vars = {
ServerName = "kube"
ServerRole = "master"
DeployTraefik = false
}
Users = {}
Daemons = {
kubelet = {
name = "k3s"
type = "auto"
enabled = true
}
ntpd = {
name = "ntpd"
type = "auto"
enabled = true
}
local = {
name = "local"
type = "auto"
enabled = true
}
}
}
// Definition of the Kubernetes full configuration (with all the services)
K3S = {
Name = "k3s"
Globals = local.Globals
Services = {
Docker = local.ServiceDocker
K3S = local.ServiceK3S
}
}
}

View File

@ -0,0 +1,90 @@
locals {
// Definition of the Kubernetes service (templater compatible)
ServiceKubernetes = {
ConfigFiles = []
Repositories = {
AlpineEdge = {
type = "apk"
name = "community"
url = "http://mirrors.ircam.fr/pub/alpine/edge/community"
enabled = true
}
AlpineEdgeTesting = {
type = "apk"
name = "testing"
url = "http://mirrors.ircam.fr/pub/alpine/edge/testing"
enabled = true
}
}
Packages = {
cni-plugin-flannel = {
name = "cni-plugin-flannel"
action = "install"
}
cni-plugins = {
name = "cni-plugins"
action = "install"
}
flannel = {
name = "flannel"
action = "install"
}
flannel-contrib-cni = {
name = "flannel-contrib-cni"
action = "install"
}
cilium = {
name = "cilium-cli"
action = "install"
}
kubelet = {
name = "kubelet"
action = "install"
}
kubeadm = {
name = "kubeadm"
action = "install"
}
kubectl = {
name = "kubectl"
action = "install"
}
uuidgen = {
name = "uuidgen"
action = "install"
}
}
Vars = {
ServerName = "kube"
ServerRole = "master"
}
Users = {}
Daemons = {
kubelet = {
name = "kubelet"
type = "auto"
enabled = true
}
ntpd = {
name = "ntpd"
type = "auto"
enabled = true
}
local = {
name = "local"
type = "auto"
enabled = true
}
}
}
// Definition of the Kubernetes full configuration (with all the services)
Kubernetes = {
Name = "kubernetes"
Globals = local.Globals
Services = {
Docker = local.ServiceDocker
Kubernetes = local.ServiceKubernetes
}
}
}

View File

@ -0,0 +1,126 @@
locals {
// Definition of the Kubernetes service (templater compatible)
ServiceMatchBox = {
ConfigFiles = [
{
destination = "/etc/dnsmasq.d/pxe.conf"
source = "dnsmasq.d/ipxe.conf.pktpl.hcl"
mode = "600"
owner = "root"
group = "root"
},
{
destination = "/etc/dnsmasq-hosts.conf"
source = "dnsmasq.d/dnsmasq-hosts.conf.pktpl.hcl"
mode = "600"
owner = "dnsmasq"
group = "root"
},
{
destination = "/etc/conf.d/matchbox"
source = "conf.d/matchbox.conf.pktpl.hcl"
mode = "600"
owner = "root"
group = "root"
},
{
destination = "/etc/init.d/matchbox"
source = "init.d/matchbox.pktpl.hcl"
mode = "700"
owner = "root"
group = "root"
}
]
Repositories = {
AlpineEdgeTesting = {
type = "apk"
name = "testing"
url = "http://mirrors.ircam.fr/pub/alpine/edge/testing"
enabled = true
}
AlpineEdgeCommunity = {
type = "apk"
name = "testing"
url = "http://mirrors.ircam.fr/pub/alpine/edge/community"
enabled = true
}
}
Packages = {
dnsmasq = {
name = "dnsmasq"
action = "install"
}
terraform = {
name = "terraform"
action = "install"
}
git = {
name = "git"
action = "install"
}
kubectl = {
name = "kubectl"
action = "install"
}
gpg = {
name = "gpg"
action = "install"
}
}
Vars = {
PXE = {
DHCPMode = "proxy"
DNSDomain = "cadoles.com"
ListenInterface = "eth0"
GreetingMessage = "Cadoles PXE Boot Server"
DelayTime = "5"
BootingMessage = "Booting from network the Cadoles way"
DHCPRangeStart = ""
DHCPRangeEnd = ""
DHCPLeaseDuration = "1h"
TFTPRoot = "/var/lib/tftpboot"
}
MatchBox = {
Hostname = "mb.cadoles.com"
HTTPPort = "8080"
gRPCPort = "8081"
LogLevel = "info"
}
ETH0 = {
IP = ""
DNS = ""
GATEWAY = ""
}
Set = {
Hostname = "matchbox"
}
}
Users = {}
Daemons = {
matchbox = {
name = "matchbox"
type = "auto"
enabled = true
}
dnsmasq = {
name = "dnsmasq"
type = "auto"
enabled = true
}
local = {
name = "local"
type = "auto"
enabled = true
}
}
}
// Definition of the Kubernetes full configuration (with all the services)
MatchBox = {
Name = "matchbox"
Globals = local.Globals
Services = {
MatchBox = local.ServiceMatchBox
}
}
}

View File

@ -0,0 +1,89 @@
locals {
ServiceNuoHarbor = {
ConfigFiles = [
{
destination = "/etc/harbor/harbor.yml"
source = "harbor.yml.pktpl.hcl"
mode = "600"
owner = "root"
group = "root"
}
]
Vars = {
AuthEnabled = false
User = "harbor"
Group = "harbor"
HarborHTTPPort = "80"
HarborHTTPSPort = "443"
HarborSSLCert = "/etc/ssl/certs/harbor.crt"
HarborSSLPrivKey = "/etc/ssl/certs/harbor.key"
HarborDomain = "reg.k8s.in.nuonet.fr"
HarborAdminPassword = "ChangeMeAsSoonAsPossible"
HarborDBPassword = "WeNeedToBeAbleToManagePasswords"
NIC = [
{
Name = "eth0"
IP = "192.168.160.10"
Mask = "255.255.254.0"
Gateway = "192.168.160.1"
}
]
DNS = [ "192.168.160.10" ]
Set = { Hostname = "reg.k8s.in.nuonet.fr" }
}
Repositories = {
AlpineEdgeTesting = {
type = "apk"
name = "testing"
url = "http://mirrors.ircam.fr/pub/alpine/edge/testing"
enabled = true
}
}
Packages = {
vmtools = {
name = "open-vm-tools"
action = "install"
},
mkcert = {
name = "mkcert"
action = "install"
},
gpg-agent = {
name = "gpg-agent"
action = "install"
}
ncurses = {
name = "ncurses"
action = "install"
}
}
Daemons = {
vm-tools = {
name = "open-vm-tools"
type = "auto"
enabled = true
}
harbor = {
name = "harbor"
type = "auto"
enabled = true
}
}
Users = {
harbor = {
username = "harbor"
group = "harbor"
home = "/srv/harbor"
shell = "/bin/nologin"
}
}
}
NuoHarbor = {
Name = "nuo-harbor"
Globals = local.Globals
Services = {
Docker = local.ServiceDocker
Harbor = local.ServiceNuoHarbor
}
}
}

View File

@ -0,0 +1,176 @@
locals {
// Definition of the Kubernetes service (templater compatible)
ServiceNuoMatchBox = {
ConfigFiles = [
{
destination = "/etc/dnsmasq.d/pxe.conf"
source = "dnsmasq.d/ipxe.conf.pktpl.hcl"
mode = "600"
owner = "root"
group = "root"
},
{
destination = "/etc/dnsmasq-hosts.conf"
source = "dnsmasq.d/dnsmasq-hosts.conf.pktpl.hcl"
mode = "600"
owner = "dnsmasq"
group = "root"
},
{
destination = "/etc/conf.d/matchbox"
source = "conf.d/matchbox.conf.pktpl.hcl"
mode = "600"
owner = "root"
group = "root"
},
{
destination = "/etc/init.d/matchbox"
source = "init.d/matchbox.pktpl.hcl"
mode = "700"
owner = "root"
group = "root"
},
{
destination = "/etc/network/interfaces"
source = "network/interfaces.pktpl.hcl"
mode = "700"
owner = "root"
group = "root"
},
{
destination = "/etc/resolv.conf"
source = "resolv.conf.pktpl.hcl"
mode = "600"
owner = "root"
group = "root"
},
{
destination = "/etc/hostname"
source = "hostname.pktpl.hcl"
mode = "600"
owner = "root"
group = "root"
}
]
Repositories = {
AlpineEdgeTesting = {
type = "apk"
name = "testing"
url = "http://mirrors.ircam.fr/pub/alpine/edge/testing"
enabled = true
}
AlpineEdgeCommunity = {
type = "apk"
name = "testing"
url = "http://mirrors.ircam.fr/pub/alpine/edge/community"
enabled = true
}
}
Packages = {
dnsmasq = {
name = "dnsmasq"
action = "install"
}
terraform = {
name = "terraform"
action = "install"
}
git = {
name = "git"
action = "install"
}
kubectl = {
name = "kubectl"
action = "install"
}
gpg = {
name = "gpg"
action = "install"
}
vmtools = {
name = "open-vm-tools"
action = "install"
}
bash = {
name = "bash"
action = "install"
}
}
Vars = {
PXE = {
DHCPMode = "standalone"
DNSDomain = "k8s.in.nuonet.fr"
ListenInterface = "eth0"
GreetingMessage = "Nuo PXE Boot Server"
DelayTime = "5"
BootingMessage = "Booting from network the Nuo way"
DHCPRangeStart = "192.168.160.20"
DHCPRangeEnd = "192.168.160.60"
DHCPLeaseDuration = "48h"
TFTPRoot = "/var/lib/tftpboot"
}
DNSMasq = {
Hosts = [
{
Name = "reg.k8s.in.nuonet.fr"
IP = "192.168.160.11"
}
]
}
MatchBox = {
Hostname = "mb.k8s.in.nuonet.fr"
HTTPPort = "8080"
gRPCPort = "8081"
LogLevel = "info"
}
NIC = [
{
Name = "eth0"
IP = "192.168.160.10"
Mask = "255.255.254.0"
Gateway = "192.168.160.1"
}
]
DNS = [ "10.253.50.105" ]
Hosts = [
{
Name = "harbor.k8s.in.nuonet.fr"
IP = "192.168.160.11"
}
]
Set = { Hostname = "mb.k8s.in.nuonet.fr" }
}
Users = {}
Daemons = {
vm-tools = {
name = "open-vm-tools"
type = "auto"
enabled = true
}
matchbox = {
name = "matchbox"
type = "auto"
enabled = true
}
dnsmasq = {
name = "dnsmasq"
type = "auto"
enabled = true
}
local = {
name = "local"
type = "auto"
enabled = true
}
}
}
// Definition of the Kubernetes full configuration (with all the services)
NuoMatchBox = {
Name = "nuo-matchbox"
Globals = local.Globals
Services = {
NuoMatchBox = local.ServiceNuoMatchBox
}
}
}

View File

@ -0,0 +1,30 @@
# "timestamp" template function replacement
locals {
locations = {
recipes = "${path.cwd}/recipes/${var.name}"
templates = "${path.cwd}/recipes/${var.name}/templates"
provisionning = "${path.cwd}/recipes/${var.name}/provisionning"
post-processors = "${path.cwd}/recipes/${var.name}/post-processor"
tools = "${path.cwd}/tools"
}
dirs = local.locations
timestamp = regex_replace(timestamp(), "[- TZ:]", "")
output_name = "${var.name}"
source_checksum_url = "file:${var.source_url}/${var.version}/${var.arch}/iso-cd/SHA256SUMS"
source_iso = "${var.source_url}/v${var.short_version}/releases/${var.arch}/alpine-virt-${var.version}-${var.arch}.iso"
source_checksum = "${var.iso_cd_checksum}"
ssh_user = "root"
ssh_password = "PbkRc1vup7Wq5n4r"
disk_size = 8000
memory = 512
installOpts = {
hostname = var.name
user = "eole"
disk_device = "/dev/vda"
}
installOptsVMWare = {
hostname = var.name
user = "eole"
disk_device = "/dev/sda"
}
}

View File

@ -0,0 +1,89 @@
#Flavour base
build {
name = "base"
description = <<EOF
This builder builds a QEMU image from an Alpine "virt" CD ISO file.
EOF
source "vmware-iso.alpine" {
output_directory = "${var.output_dir}/${var.version}/base"
vm_name = "${local.output_name}-${var.version}.img"
disk_size = 8000
iso_url = "${local.source_iso}"
iso_checksum = "${var.iso_cd_checksum}"
http_content = {
"/ssh-packer-pub.key" = data.sshkey.install.public_key
"/install.conf" = templatefile("${local.locations.templates}/conf/install/awnsers.pktpl.hcl", local.installOptsVMWare)
}
boot_command = [
"<wait10s>root<enter>",
"<wait1s><enter>",
"<wait1s>setup-interfaces<enter><wait1s><enter><wait1s><enter><wait1s><enter>",
"<wait1s>ifup eth0<enter>",
"<wait1s>mkdir -p .ssh<enter>",
"<wait1s>wget http://{{.HTTPIP}}:{{.HTTPPort}}/ssh-packer-pub.key -O .ssh/authorized_keys<enter><wait1s>",
"<wait1s>chmod 600 .ssh/authorized_keys<enter>",
"<wait1s>wget http://{{.HTTPIP}}:{{.HTTPPort}}/install.conf<enter><wait1s>",
"<wait1s>setup-sshd -c openssh -k .ssh/authorized_keys<enter><wait1s>",
]
}
source "qemu.alpine" {
output_directory = "${var.output_dir}/${var.version}/base"
vm_name = "${local.output_name}-${var.version}.img"
disk_size = 8000
iso_url = "${local.source_iso}"
iso_checksum = "${var.iso_cd_checksum}"
http_content = {
"/ssh-packer-pub.key" = data.sshkey.install.public_key
"/install.conf" = templatefile("${local.locations.templates}/conf/install/awnsers.pktpl.hcl", local.installOpts)
}
boot_command = [
"<wait5s>root<enter>",
"<wait1s><enter>",
"<wait1s>setup-interfaces<enter><wait1s><enter><wait1s><enter><wait1s><enter>",
"<wait1s>ifup eth0<enter>",
"<wait1s>mkdir -p .ssh<enter>",
"<wait1s>wget http://{{.HTTPIP}}:{{.HTTPPort}}/ssh-packer-pub.key -O .ssh/authorized_keys<enter><wait1s>",
"<wait1s>chmod 600 .ssh/authorized_keys<enter>",
"<wait1s>wget http://{{.HTTPIP}}:{{.HTTPPort}}/install.conf<enter><wait1s>",
"<wait1s>setup-sshd -c openssh -k .ssh/authorized_keys<enter><wait1s>",
]
}
provisioner "shell" {
pause_before = "1s"
expect_disconnect = true # Because the previous step has rebooted the machine
script = "${local.locations.provisionning}/${var.name}-${var.short_version}-install.sh"
valid_exit_codes = [ 0, 141 ]
}
provisioner "shell" {
pause_before = "1s"
inline = [ "sh -cx 'mkdir -p ${local.builder_config.TemplateDir}'" ]
}
provisioner "shell" {
pause_before = "10s"
script = "${local.locations.provisionning}/${var.name}-${var.short_version}-postinstall.sh"
}
provisioner "shell" {
script = "${local.locations.provisionning}/letsencrypt.sh"
}
provisioner "file" {
destination = "/etc/conf.d/chronyd"
source = "${local.locations.templates}/conf/conf.d/"
}
post-processor "manifest" {
keep_input_artifact = true
}
post-processor "shell-local" {
inline = [
"/bin/sh ${path.cwd}/post-processors/sparsify.sh ${var.output_dir}/${var.version}/base ${var.image_version}"
]
}
}

View File

@ -0,0 +1,99 @@
#Flavour matchbox
build {
name = "matchbox"
description = <<EOF
This builder builds a QEMU image from the base build output. The goal here is to install matchbox
with it's provisionning.
EOF
source "source.qemu.alpine" {
output_directory = "${var.output_dir}/${var.version}/provisionned/matchbox"
vm_name = "${local.output_name}-${var.version}-matchbox.img"
iso_url = "${var.output_dir}/${var.version}/base/${local.output_name}-${var.version}.img"
iso_checksum = "none"
disk_size = 40960
disk_image = true
boot_command = [ "<enter><enter><wait>" ]
ssh_clear_authorized_keys = true
}
// Install templater and bootstraper
provisioner "shell" {
script = "${local.dirs.provisionning}/templater-install.sh"
}
// Copy configuration values on the image
provisioner "shell" {
inline = [
"sh -cx 'mkdir -p ${local.builder_config.TemplateDir}'",
"sh -cx 'mkdir -p ${local.builder_config.ValueDir}'"
]
}
// Copy configuration templates to the image
provisioner "file" {
destination = "${local.builder_config.TemplateDir}/"
source = "${local.dirs.templates}/conf/${build.name}/"
}
// Copy configuration values on the image
provisioner "file" {
destination = "${local.builder_config.ValueDir}/${build.name}.json"
content = "${jsonencode(local.MatchBox)}"
}
// Copy matchbox boot provisionning script
provisioner "file" {
destination = "/etc/local.d/initmatchbox.start"
source = "${local.locations.provisionning}/conf/${build.name}/initmatchbox.start"
}
// Copy tftp provisionning script
provisioner "file" {
destination = "/etc/local.d/inittftp.start"
source = "${local.locations.provisionning}/conf/${build.name}/inittftp.start"
}
// Generate default configuration for kubernetes
provisioner "shell" {
max_retries = 3
inline = [ "sh -cx '/usr/local/bin/btr -c ${local.builder_config.ValueDir}/ -t ${local.builder_config.TemplateDir}'" ]
}
provisioner "file" {
destination = "/tmp/${build.name}.sh"
source = "${local.dirs.provisionning}/${build.name}.sh"
}
provisioner "file" {
destination = "/tmp/one-context.sh"
source = "${local.dirs.provisionning}/one-context.sh"
}
provisioner "shell" {
inline = [
"sh -cx 'sh /tmp/one-context.sh'",
"sh -cx 'sh /tmp/${build.name}.sh'"
]
}
provisioner "file" {
destination = "/etc/one-context.d/net-96-templater"
source = "${local.dirs.provisionning}/one-context/net-96-templater"
}
provisioner "shell" {
inline = [
"chmod +x /etc/local.d/initmatchbox.start",
"chmod +x /etc/local.d/inittftp.start",
"chmod +x /etc/one-context.d/net-96-templater"
]
}
post-processor "shell-local" {
inline = [
"/bin/sh ${path.cwd}/post-processors/sparsify.sh ${var.output_dir}/${var.version}/provisionned/matchbox ${var.image_version}"
]
}
}

View File

@ -0,0 +1,136 @@
#Flavour ${build.name}
build {
name = "nuo-harbor"
description = <<EOF
This builder builds a QEMU image from the base build output. The goal here is to install loki
with it's provisionning.
EOF
source "source.vmware-vmx.alpine" {
output_directory = "${var.output_dir}/${var.version}/provisionned/vmware/nuo-harbor"
vm_name = "${local.output_name}-${var.version}-nuo-harbor.img"
source_path = "${var.output_dir}/${var.version}/base/${local.output_name}-${var.version}.img.vmx"
// BUG https://github.com/hashicorp/packer-plugin-vmware/issues/119
disk_additional_size = [ 81920 ]
//
boot_command = [ "<enter><enter><wait>" ]
ssh_clear_authorized_keys = true
vmx_data = {
"scsi1.pcislotnumber" = "16"
"scsi1.present" = "TRUE"
"scsi1.virtualdev" = "lsilogic"
"scsi1:0.filename" = "disk-1.vmdk"
"scsi1:0.present" = "TRUE"
"scsi1:0.redo" = ""
}
vmx_data_post = {
"memsize" = "4096",
"numvcpus" = "2",
}
}
source "source.qemu.alpine" {
output_directory = "${var.output_dir}/${var.version}/provisionned/${local.Config.Name}"
vm_name = "${local.output_name}-${var.version}-${local.Config.Name}.img"
iso_url = "${var.output_dir}/${var.version}/base/${local.output_name}-${var.version}.img"
iso_checksum = "none"
disk_size = 81920
disk_image = true
boot_command = [ "<enter><enter><wait>" ]
ssh_clear_authorized_keys = true
}
provisioner "shell" {
script = "${local.dirs.provisionning}/tools/additionnal-disk"
environment_vars = [
"PV_DEVICE=/dev/sdb",
"VG_NAME=data",
"LV_NAME=harbor-data",
"LV_MTP=/srv/harbor",
"LV_FS=ext4"
]
}
// Install templater and bootstraper
provisioner "shell" {
script = "${local.dirs.provisionning}/templater-install.sh"
}
// Copy static configurations to /etc
provisioner "file" {
destination = "/etc"
source = "${local.dirs.provisionning}/conf/${build.name}/"
}
// Copy configuration values on the image
provisioner "shell" {
inline = [
"sh -cx 'mkdir -p ${local.builder_config.TemplateDir}'",
"sh -cx 'mkdir -p ${local.builder_config.ValueDir}'"
]
}
// Copy configuration templates to the image
provisioner "file" {
destination = "${local.builder_config.TemplateDir}/"
source = "${local.dirs.templates}/conf/${build.name}/"
}
// Copy Docker configuration templates to the image
provisioner "file" {
destination = "${local.builder_config.TemplateDir}/"
source = "${local.dirs.templates}/conf/docker/"
}
// Copy configuration values on the image
provisioner "file" {
destination = "${local.builder_config.ValueDir}/${build.name}.json"
content = "${jsonencode(local.NuoHarbor)}"
}
provisioner "file" {
destination = "/etc/local.d/templater.start"
source = "${local.locations.provisionning}/conf/common/templater.start"
}
// Copy ssh Cadoles keys
provisioner "file" {
destination = "/tmp"
source = "${local.locations.provisionning}/ssh/cadoles/"
}
// Copy CNOUS SSH keys
provisioner "file" {
destination = "/tmp"
source = "${local.locations.provisionning}/ssh/cnous/"
}
provisioner "shell" {
inline = [
"sh -cx 'cat /tmp/*.pub >> /root/.ssh/authorized_keys'",
"sh -cx 'chmod -R 600 /root/.ssh/authorized_keys'"
]
}
// Generate default configuration for the server
provisioner "shell" {
max_retries = 3
inline = [ "sh -cx '/usr/local/bin/btr -c ${local.builder_config.ValueDir}/ -t ${local.builder_config.TemplateDir}'" ]
}
provisioner "shell" {
environment_vars = [
"HARBOR_SSL_CERT=${local.NuoHarbor.Services.Harbor.Vars.HarborSSLCert}",
"HARBOR_SSL_KEY=${local.NuoHarbor.Services.Harbor.Vars.HarborSSLPrivKey}",
"HARBOR_DOMAIN=${local.NuoHarbor.Services.Harbor.Vars.HarborDomain}"
]
script = "${local.dirs.provisionning}/${build.name}.sh"
}
provisioner "shell" {
inline = [
"chmod +x /etc/local.d/templater.start"
]
}
}

View File

@ -0,0 +1,120 @@
#Flavour nuo-matchbox
build {
name = "nuo-matchbox"
description = <<EOF
This builder builds a QEMU image from the base build output. The goal here is to install nuo-matchbox
with it's provisionning.
EOF
source "source.vmware-vmx.alpine" {
output_directory = "${var.output_dir}/${var.version}/provisionned/vmware/nuo-matchbox"
vm_name = "${local.output_name}-${var.version}-nuo-matchbox.img"
source_path = "${var.output_dir}/${var.version}/base/${local.output_name}-${var.version}.img.vmx"
boot_command = [ "<enter><enter><wait>" ]
ssh_clear_authorized_keys = true
}
source "source.qemu.alpine" {
output_directory = "${var.output_dir}/${var.version}/provisionned/nuo-matchbox"
vm_name = "${local.output_name}-${var.version}-nuo-matchbox.img"
iso_url = "${var.output_dir}/${var.version}/base/${local.output_name}-${var.version}.img"
iso_checksum = "none"
disk_size = 40960
disk_image = true
boot_command = [ "<enter><enter><wait>" ]
ssh_clear_authorized_keys = true
}
// Install templater and bootstraper
provisioner "shell" {
script = "${local.dirs.provisionning}/templater-install.sh"
}
// Copy configuration values on the image
provisioner "shell" {
inline = [
"sh -cx 'mkdir -p ${local.builder_config.TemplateDir}'",
"sh -cx 'mkdir -p ${local.builder_config.ValueDir}'"
]
}
// Copy configuration templates to the image
provisioner "file" {
destination = "${local.builder_config.TemplateDir}/"
source = "${local.dirs.templates}/conf/${build.name}/"
}
// Copy configuration values on the image
provisioner "file" {
destination = "${local.builder_config.ValueDir}/${build.name}.json"
content = "${jsonencode(local.NuoMatchBox)}"
}
// Copy nuo-matchbox boot provisionning script
provisioner "file" {
destination = "/etc/local.d/initmatchbox.start"
source = "${local.locations.provisionning}/conf/${build.name}/initmatchbox.start"
}
// Copy ssh Cadoles keys
provisioner "file" {
destination = "/tmp"
source = "${local.locations.provisionning}/ssh/cadoles/"
}
// Copy CNOUS SSH keys
provisioner "file" {
destination = "/tmp"
source = "${local.locations.provisionning}/ssh/cnous/"
}
provisioner "shell" {
inline = [
"sh -cx 'cat /tmp/*.pub >> /root/.ssh/authorized_keys'",
"sh -cx 'chmod -R 600 /root/.ssh/authorized_keys'"
]
}
provisioner "file" {
destination = "/etc/local.d/templater.start"
source = "${local.locations.provisionning}/conf/common/templater.start"
}
// Copy tftp provisionning script
provisioner "file" {
destination = "/etc/local.d/inittftp.start"
source = "${local.locations.provisionning}/conf/${build.name}/inittftp.start"
}
// Generate default configuration for kubernetes
provisioner "shell" {
max_retries = 3
inline = [ "sh -cx '/usr/local/bin/btr -c ${local.builder_config.ValueDir}/ -t ${local.builder_config.TemplateDir}'" ]
}
provisioner "file" {
destination = "/tmp/${build.name}.sh"
source = "${local.dirs.provisionning}/${build.name}.sh"
}
provisioner "shell" {
inline = [
"sh -cx 'sh /tmp/${build.name}.sh'"
]
}
provisioner "shell" {
inline = [
"chmod +x /etc/local.d/initmatchbox.start",
"chmod +x /etc/local.d/templater.start",
"chmod +x /etc/local.d/inittftp.start"
]
}
post-processor "shell-local" {
inline = [
"/bin/sh ${path.cwd}/post-processors/sparsify.sh ${var.output_dir}/${var.version}/provisionned/nuo-matchbox ${var.image_version}"
]
}
}

View File

@ -0,0 +1,16 @@
packer {
required_plugins {
sshkey = {
version = ">= 1.0.1"
source = "github.com/ivoronin/sshkey"
}
vmware = {
version = ">= 1.0.8"
source = "github.com/hashicorp/vmware"
}
}
}
data "sshkey" "install" {
type = "ed25519"
}

View File

@ -0,0 +1,31 @@
#!/bin/sh
if [ "${#}" -ne 2 ]; then
echo Missing arguments
exit 2
fi
WORKDIR=${1}
VERSION=${2}
findImages() {
find ${1} -iname "*.img"
}
sleep 5
for imageName in $(findImages ${WORKDIR} ${DOMAIN}); do
if [ $(which virt-sparsify) ]; then
newName=$(echo $imageName | sed "s/.img/_${VERSION}.img/g")
virt-sparsify --compress --tmp ./ --format qcow2 ${imageName} ${newName}
if [ "${?}" -eq 0 ]; then
rm -rf ${imageName}
cd ${WORKDIR}
ln -s $(basename ${newName}) $(basename ${imageName})
echo ${newName} ${imageName}
cd -
fi
else
echo "Sparsify skipped 'virt-sparsify' command is missing"
fi
done

View File

@ -0,0 +1,16 @@
#!/bin/sh
#set -xeo pipefail
# Run the installer
yes | setup-alpine -e -f install.conf
# Copy ssh keys
echo "Copy packer ssh key"
mount /dev/vg0/lv_root /mnt
cp -rp .ssh /mnt/root/
sync
umount /mnt
echo "Rebooting the host after install"
reboot -nf

View File

@ -0,0 +1,23 @@
#!/bin/sh
set -xeo pipefail
apk add --no-cache wget curl jq haveged ca-certificates rsyslog bash shadow
rc-update add haveged boot
rc-update add rsyslog boot
rc-update add sshd boot
# Generate root password
pass=$(openssl rand -base64 32 | tee -a .secret)
chmod 600 .secret
echo -e "${pass}\n${pass}" | passwd
# Remove expect package
# Prevent logs spamming like "process '/sbin/getty -L 0 ttyS0 vt100' (pid 2516) exited. Scheduling for restart."
# We don't need an access to ttyS0
sed -i 's@^\(ttyS0::respawn.*\)@#\1@' /etc/inittab
usermod --password $( echo "Cadoles;21" | openssl passwd -1 -stdin) root
sync

View File

@ -0,0 +1 @@
alpine-3.16-install.sh

View File

@ -0,0 +1 @@
alpine-3.16-postinstall.sh

View File

@ -0,0 +1 @@
alpine-3.16-install.sh

View File

@ -0,0 +1 @@
alpine-3.16-postinstall.sh

View File

@ -0,0 +1,104 @@
#!/usr/bin/env bash
#
# Generate all the configuration files
# Get all the values from the VLS_DIR
# Process each template from the TPL_DIR with this values
#
ENV_FILE=${ENV_FILE:-/var/run/one-context/one_env}
TPL_DIR="/usr/share/builder/templates"
VLS_DIR="/usr/share/builder/values"
CONFIG=""
if [ -f "${ENV_FILE}" ]; then
. ${ENV_FILE}
fi
BTR="$(command -v btr)"
if [ "${?}" -ne 0 ]; then
echo "Warning: Nothing to do the templater is not installed"
exit 0
fi
if [ ! -e "${TPL_DIR}" ]; then
echo "Error: The template dir is missing (${TPL_DIR})"
exit 1
fi
if [ ! -e "${VLS_DIR}" ]; then
echo "Error: The template dir is missing (${VLS_DIR})"
exit 1
fi
jsonQuery() {
local data="${1}"
local query="${2}"
echo "${data}" | jq -cr "${query}"
}
# NAME: @jsonMerge
# AIM: Merge two json structures
# NOTES:
# The last one has de last word
# if you have the same key in A and B
# this keeps the value of the B structure.
# PARAMS:
# $1: original JSON Structure
# $2: updated JSON Structure
jsonMerge() {
local data="${1}"
local data2="${2}"
echo "${data} ${data2}" | jq -cr -s ".[0] * .[1]"
}
jsonUpdateVal() {
local json="${1}"
local key="${2}"
local value="${3}"
echo "${json}" | jq --arg a "${value}" "${key} = \$a"
}
getValues() {
local values=""
for file in $(find ${VLS_DIR} -name "*.json"); do
values="${values}$(cat ${file})"
done
if [ -n "${RAW_CONFIG}" ]; then
values="$(jsonMerge ${values} ${RAW_CONFIG})"
fi
for svc in $(echo ${values} | jq -cr '.Services|keys[]'); do
for key in $(echo ${values} | jq -cr ".Services.${svc}.Vars|keys[]"); do
ukey=${key^^}
vkeys="$(echo ${values} | jq -cr \".Services.${svc}.Vars.${key}\|keys[]\")"
if [ ${?} -eq 0 ]; then
for var in $(echo ${values} | jq -cr ".Services.${svc}.Vars.${key}|keys[]"); do
uvar=${var^^}
val=$(eval echo "\$${ukey}_${uvar}")
if [ -n "${val}" ]; then
values=$(jsonUpdateVal "${values}" ".Services.${svc}.Vars.${key}.${var}" "${val}")
fi
done
else
values=$(jsonUpdateVal "${values}" ".Services.${svc}.Vars.${key}" "${!ukey}")
fi
done
done
echo ${values}
}
processTemplates() {
${BTR} -t ${TPL_DIR} -c "${1}"
}
VALUES=$(getValues)
file=$(mktemp)
echo "${VALUES}" > "${file}"
processTemplates "${file}"
rm -rf "${file}"

View File

@ -0,0 +1,64 @@
#!/sbin/openrc-run
: ${SUBCFGDIR:=/srv}
DOCKER_COMPOSE_UP_ARGS=${DOCKER_COMPOSE_UP_ARGS-"--no-build --no-recreate --no-deps"}
SUBSVC="${SVCNAME#*.}"
[ -z "${SUBSVC}" ] && exit 1
: ${SUBCFG:="${SUBCFGDIR}/${SUBSVC}/docker-compose.yml"}
DOCOCMD="/usr/bin/docker-compose"
export COMPOSE_HTTP_TIMEOUT=300
description="Manage docker services defined in ${SUBCFG}"
extra_commands="configtest build"
description_configtest="Check configuration via \"docker-compose -f ${SUBCFG} config\""
description_build="Run \"docker-compose -f ${SUBCFG} build\""
depend() {
need localmount net docker
use dns
after docker
}
configtest() {
if ! [ -f "${SUBCFG}" ]; then
eerror "The config file ${SUBCFG} does not exist!"
return 1
fi
if "${DOCOCMD}" -f "${SUBCFG}" config >&/dev/null; then
einfo "config: ok"
else
eerror "config: error"
return 1
fi
}
build() {
configtest || return 1
ebegin "Building dockerservice ${SUBSVC}"
"${DOCOCMD}" -f "${SUBCFG}" build
eend $?
}
start() {
configtest || return 1
ebegin "Starting dockerservice ${SUBSVC}"
sleep 5
"${DOCOCMD}" -f "${SUBCFG}" up -d ${DOCKER_COMPOSE_UP_ARGS}
eend $?
}
stop() {
ebegin "Stopping dockerservice ${SUBSVC}"
"${DOCOCMD}" -f "${SUBCFG}" stop --timeout=300
eend $?
}
status() {
if [ "$("${DOCOCMD}" -f "${SUBCFG}" top | wc -l)" -gt "0" ]; then
einfo "status: started"
else
einfo "status: stopped"
return 3
fi
}

View File

@ -0,0 +1,181 @@
#!/bin/sh
ENV_FILE=${ENV_FILE:-/var/run/one-context/one_env}
LOG_FILE="/var/log/initkubernets.log"
FIRST_BOOT="/var/run/firstboot.flag"
infoLog() {
echo "Info: $@" | tee -a ${LOG_FILE}
}
errorLog() {
echo "Error: $@" | tee -a ${LOG_FILE}
}
waitReadyState() {
local vmID="${1}"
local timeout="${2}"
local tick=0
while true ;do
local ready=$(onegate vm show ${vmID} --json | jq -rc ".VM.USER_TEMPLATE.READY")
if [ "${ready}" = "YES" ];then
return 0
elif [ "${timeout}" -eq "${tick}" ];then
return ${timeout}
else
sleep 1
tick=$((tick+1))
fi
done
}
returnToken() {
infoLog "Returning tokens"
local caSecretKey="${1}"
local caToken=$(openssl x509 -in /etc/kubernetes/pki/ca.crt -noout -pubkey | openssl rsa -pubin -outform DER 2>/dev/null | sha256sum | cut -d' ' -f1)
local kubeToken=$(kubeadm token list | awk '/authentication,signing.*The default*/ {print $1}')
local masterAddr=$(awk -F '/' '/server/ {print $3}' /etc/kubernetes/admin.conf)
if [ -n "${ONEGATE_ENDPOINT}" ];then
infoLog "Onegate detected"
data="READY=YES"
data="${data} MASTER_ADDR=${masterAddr}"
data="${data} MASTER_TOKEN=${kubeToken}"
data="${data} MASTER_CA_TOKEN=sha256:${caToken}"
data="${data} MASTER_CA_SECRET_KEY=${caSecretKey}"
onegate vm update --data "${data}"
infoLog "Onegate data seted"
else
infoLog "Onegate is not present"
echo "${masterAdd} ${kubeToken} ${caToken}" >> /root/kube.token
infoLog "Tokens are available at /root/kube.token"
fi
}
joinCluster() {
local master="${MASTER_ADDR}"
local token="${MASTER_TOKEN}"
local caToken="${MASTER_CA_TOKEN}"
local caSecretKey="${MASTER_CA_SECRET_KEY}"
local sname="${SERVICE_NAME}"
if [ -n "${ONEGATE_ENDPOINT}" ];then
local masterID=$(onegate service show --json | jq -c '.SERVICE.roles[] | select(.name == "leader") | .nodes[0].deploy_id')
if [ "${?}" -eq 0 ]; then
waitReadyState ${masterID} 600
if [ "${?}" -ne 0 ];then
errorLog "Master node is node ready after 600s"
return 3
fi
local masterInfo=$(onegate vm show ${masterID} --json | \
jq -cr ".VM.USER_TEMPLATE.MASTER_ADDR, .VM.USER_TEMPLATE.MASTER_TOKEN, .VM.USER_TEMPLATE.MASTER_CA_TOKEN,.VM.USER_TEMPLATE.MASTER_CA_SECRET_KEY, .VM.TEMPLATE.NIC[0].IP")
master=$(echo ${masterInfo} | cut -d " " -f 1)
token=$(echo ${masterInfo} | cut -d " " -f 2)
caToken=$(echo ${masterInfo} | cut -d " " -f 3)
caSecretKey=$(echo ${masterInfo} | cut -d " " -f 4)
masterIP=$(echo ${masterInfo} | cut -d " " -f 5)
sname=$(onegate service show --json | jq -cr ".SERVICE.name")
fi
# Setting dns resolution for cluster
echo "${masterIP} ${sname}" >> /etc/hosts
onegate service show --json | jq -rc '.SERVICE.roles[].nodes[].vm_info.VM | .TEMPLATE.NIC[].IP + " " + .NAME' >> /etc/hosts
fi
if [ -n "${master}" ] & [ -n "${token}" ] & [ -n "${caToken}" ];then
opts="--node-name $(hostname -f)"
opts="${opts} --token ${token}"
opts="${opts} --discovery-token-ca-cert-hash ${caToken}"
if [ -n "${1}" ];then
opts="${opts} --control-plane"
opts="${opts} --certificate-key ${caSecretKey}"
fi
opts="${opts} ${master}"
kubeadm join ${opts} | tee -a "${LOG_FILE}"
else
errorLog "Something is missing, can't join the cluster:"
errorLog " Master addr: [${master}]"
errorLog " Master token: [${token}]"
errorLog " Master CA token: [${caToken}]"
return 3
fi
}
getServiceName() {
local sname=$(onegate service show --json | jq -cr ".SERVICE.name")
local tmout=30
local tick=0
while true ;do
if [ -z "${sname}" ];then
sname=$(onegate service show --json | jq -cr ".SERVICE.name")
else
echo ${sname}
return 0
fi
sleep 1
tick=$((tick+1))
if [ ${tmout} -eq ${tick} ];then
hostname -f
return 3
fi
done
}
initLeader() {
sname="$(hostname -f)"
if [ -n "${ONEGATE_ENDPOINT}" ];then
sname=$(getServiceName)
sip=$(onegate vm show --json | jq -rc ".VM.TEMPLATE.NIC[0].IP")
echo "${sip} ${sname} $(hostname -f)" >> /etc/hosts
onegate service show --json | jq -rc '.SERVICE.roles[].nodes[].vm_info.VM | .TEMPLATE.NIC[].IP + " " + .NAME' >> /etc/hosts
fi
caSecretKey=$(date | sha256sum | awk '{print $1}')
infoLog "Kubernetes init started"
kubeadm init --pod-network-cidr=10.244.0.0/16 \
--node-name="${SET_HOSTNAME}" \
--control-plane-endpoint "${sname}:6443" \
--upload-certs --certificate-key "${caSecretKey}" | tee -a "${LOG_FILE}"
infoLog "Kubernetes init ended"
infoLog "Configuring kubectl"
mkdir /root/.kube
ln -s /etc/kubernetes/admin.conf /root/.kube/config
infoLog "kubectl configured"
infoLog "Installing cilium"
sleep 20
kubectl config view --minify -o jsonpath='{.clusters[].name}'
sleep 20
cilium install --helm-set 'cni.binPath=/usr/libexec/cni' --wait | tee -a "${LOG_FILE}"
infoLog "Cilium is installed"
returnToken "${caSecretKey}"
}
initKube() {
if [ "${SERVER_ROLE}" == "leader" ];then
initLeader
elif [ "${SERVER_ROLE}" == "worker" ];then
joinCluster
elif [ "${SERVER_ROLE}" == "master" ];then
joinCluster "${SERVER_ROLE}"
fi
touch ${FIRST_BOOT}
infoLog "Kubernetes cluster init is finished"
}
if [ -f "${ENV_FILE}" ]; then
. "${ENV_FILE}"
fi
if [ -f "${FIRST_BOOT}" ];then
exit 0
else
uuidgen > /etc/machine-id
swapoff -a # Make sure swap is disabled
initKube &
fi

View File

@ -0,0 +1,3 @@
#!/bin/sh
mount --make-rshared /

View File

@ -0,0 +1,25 @@
#!/bin/sh
FL_VERSIONS="current 3374.2.0"
MATCHBOX_DIR="/var/lib/matchbox"
ASSETS_DIR="${MATCHBOX_DIR}/assets/"
GPG_FNAME="Flatcar_Image_Signing_Key.asc"
GPG_KEYS_URL="https://www.flatcar.org/security/image-signing-key/"
cd /tmp
curl -L -O ${GPG_KEYS_URL}/${GPG_FNAME}
gpg --import --keyid-format LONG ${GPG_FNAME}
cd -
echo "Provisionning matchbox with flatcar images"
tout=30
for version in ${FL_VERSIONS}; do
for i in $(seq 1 ${tout});do
echo " * ${FL_VERSIONS} stable image (try ${i})"
/usr/local/bin/get-flatcar stable ${version} ${ASSETS_DIR}
if [[ "${?}" -eq 0 ]]; then
break
fi
done
done

View File

@ -0,0 +1,10 @@
#!/bin/sh
dest="${1}"
ipxeEFISource="http://boot.ipxe.org/ipxe.efi"
kpxeSource="http://boot.ipxe.org/undionly.kpxe"
cd "${dest}"
wget "${ipxeEFISource}"
wget "${kpxeSource}"

View File

@ -0,0 +1 @@
harbor

View File

@ -0,0 +1 @@
matchbox

View File

@ -0,0 +1,13 @@
#!/bin/sh
CONF="/etc/conf.d/jenkins-slave"
if [ -e "/etc/jenkins-slave.conf" ]; then
CONF="/etc/jenkins-slave.conf"
fi
TOTAL_MEMORY=$(cat /proc/meminfo | grep MemTotal | awk '{ printf "%sg", int($2/1024/1024)+1 }')
sed -i "s|^JENKINS_SLAVE_NAME=.*$|JENKINS_SLAVE_NAME='slave-$ETH0_IP'|" "${CONF}"
sed -i "s|^JENKINS_SLAVE_USERNAME=.*$|JENKINS_SLAVE_USERNAME='$JENKINS_SLAVE_USERNAME'|" "${CONF}"
sed -i "s|^JENKINS_SLAVE_PASSWORD=.*$|JENKINS_SLAVE_PASSWORD='$JENKINS_SLAVE_PASSWORD'|" "${CONF}"
sed -i "s|^JENKINS_MASTER_URL=.*$|JENKINS_MASTER_URL='$JENKINS_MASTER_URL'|" "${CONF}"
sed -i "s|^JENKINS_SLAVE_LABELS=.*$|JENKINS_SLAVE_LABELS='docker docker-compose mem-$TOTAL_MEMORY $JENKINS_SLAVE_LABELS'|" "${CONF}"

View File

@ -0,0 +1,31 @@
#!/usr/bin/env bash
ENV_FILE=${ENV_FILE:-/var/run/one-context/one_env}
# $TOKENTXT is available only through the env. file
# shellcheck disable=SC1090
if [ -f "${ENV_FILE}" ]; then
. "${ENV_FILE}"
fi
###
if [ -n "${GITLAB_URL}" ]; then
if command -v gitlab-runner; then
if [ -n "${GITLAB_SHELL}" ]; then
opts="--shell=${GITLAB_SHELL}"
fi
# shellcheck disable=SC2086
gitlab-runner register \
--non-interactive \
--url="${GITLAB_URL}" \
--registration-token="${GITLAB_TOKEN}" \
--executor="${GITLAB_EXECUTOR}" \
--description="${GITLAB_RUNNER_NAME}" \
--tag-list="${GITLAB_TAG_LIST}" \
--locked=false \
--access-level=not_protected \
--run-untagged=false \
"${opts}"
fi
fi

View File

@ -0,0 +1,80 @@
#!/usr/bin/env bash
#
# Generate all the configuration files
# Get all the values from the VLS_DIR
# Process each template from the TPL_DIR with this values
#
ENV_FILE=${ENV_FILE:-/var/run/one-context/one_env}
TPL_DIR="/usr/share/builder/templates"
VLS_DIR="/usr/share/builder/values"
CONFIG=""
. ${ENV_FILE}
BTR="$(command -v btr)"
if [ "${?}" -ne 0 ]; then
echo "Warning: Nothing to do the templater is not installed"
exit 0
fi
if [ ! -e "${TPL_DIR}" ]; then
echo "Error: The template dir is missing (${TPL_DIR})"
exit 1
fi
if [ ! -e "${VLS_DIR}" ]; then
echo "Error: The template dir is missing (${VLS_DIR})"
exit 1
fi
jsonQuery() {
local data="${1}"
local query="${2}"
echo "${data}" | jq -cr "${query}"
}
# NAME: @jsonMerge
# AIM: Merge two json structures
# NOTES:
# The last one has de last word
# if you have the same key in A and B
# this keeps the value of the B structure.
# PARAMS:
# $1: original JSON Structure
# $2: updated JSON Structure
jsonMerge() {
local data="${1}"
local data2="${2}"
echo "${data} ${data2}" | jq -cr -s ".[0] * .[1]"
}
getValues() {
local values=""
for file in $(find ${VLS_DIR} -name "*.json"); do
values="${values}$(cat ${file})"
done
if [ -n "${RAW_CONFIG}" ]; then
values="$(jsonMerge ${values} ${RAW_CONFIG})"
fi
for key in $(echo ${values} | jq -cr '.|keys[]'); do
ukey=${key^^}
if [ -n "${!ukey}" ]; then
values="$(jsonMerge "${values}" "{\"${key}\":\"${!ukey}\"}")"
fi
done
echo ${values}
}
processTemplates() {
${BTR} -t ${TPL_DIR} -c "${1}"
}
VALUES=$(getValues)
echo ${VALUES}
processTemplates "${VALUES}"

View File

@ -0,0 +1,21 @@
#!/usr/bin/env bash
ENV_FILE=${ENV_FILE:-/var/run/one-context/one_env}
# $TOKENTXT is available only through the env. file
# shellcheck disable=SC1090
if [ -f "${ENV_FILE}" ]; then
. "${ENV_FILE}"
fi
###
if [ -n "${K3S_ROLE}" ]; then
if [ "${K3S_ROLE}" = "server" ]; then
rc-update add dnsmasq default
service dnsmasq start
rc-update add k3s default
service k3s start
fi
fi

View File

@ -0,0 +1,38 @@
#!/bin/sh
HARBOR_VERSION="2.8.2"
HARBOR_SOURCE_URL="https://github.com/goharbor/harbor/releases/download/v${HARBOR_VERSION}/"
HARBOR_INSTALLER="harbor-offline-installer-v${HARBOR_VERSION}.tgz"
HARBOR_INSTALLER_ASC="${HARBOR_INSTALLER}.asc"
export TERM=xterm
gpg --keyserver hkps://keyserver.ubuntu.com --receive-keys 644FF454C0B4115C
cd /srv
wget -q ${HARBOR_SOURCE_URL}${HARBOR_INSTALLER}
wget -q ${HARBOR_SOURCE_URL}${HARBOR_INSTALLER_ASC}
gpg -v --keyserver hkps://keyserver.ubuntu.com --verify ${HARBOR_INSTALLER}.asc
if [ $? -ne 0 ]; then
echo "Harbor sources ${HARBOR_SOURCE_URL}${HARBOR_INSTALLER} are corrupt"
exit 3
fi
tar xzvf ${HARBOR_INSTALLER}
if [ ! -f ${HARBOR_SSL_CERT} ];then
mkcert -install
mkcert -cert-file ${HARBOR_SSL_CERT} -key-file ${HARBOR_SSL_KEY} ${HARBOR_DOMAIN}
fi
cd harbor
ln -s /etc/harbor/harbor.yml .
service docker start
sleep 5
./prepare
./install.sh --with-notary --with-trivy

View File

@ -0,0 +1,5 @@
#!/bin/sh
echo "export KUBECONFIG=/etc/rancher/k3s/k3s.yaml" >> /root/.profile
exit 0

View File

@ -0,0 +1,18 @@
#!/bin/sh
mount --make-rshared /
modprobe br_netfilter
uuidgen > /etc/machine-id
sysctl -w net.bridge.bridge-nf-call-iptables=1
# Remove swap
cat /etc/fstab | grep -v swap > temp.fstab
cat temp.fstab > /etc/fstab
rm temp.fstab
swapoff -a
#lvremove -y /dev/vg0/lv_swap
#lvextend -y -r -l +100%FREE /dev/vg0/lv_root

View File

@ -0,0 +1,26 @@
#!/bin/sh
set -eo pipefail
DESTDIR=/usr/local/share/ca-certificates
UPDATE_CERTS_CMD=update-ca-certificates
CERTS="$(cat <<EOF
https://letsencrypt.org/certs/isrgrootx1.pem
https://letsencrypt.org/certs/isrg-root-x2.pem
https://letsencrypt.org/certs/lets-encrypt-r3.pem
https://letsencrypt.org/certs/lets-encrypt-e1.pem
https://letsencrypt.org/certs/lets-encrypt-r4.pem
https://letsencrypt.org/certs/lets-encrypt-e2.pem
EOF
)"
cd "$DESTDIR"
for cert in $CERTS; do
echo "Downloading '$cert'..."
filename=$(basename "$cert")
wget --tries=10 --timeout=30 -O "$filename" "$cert"
#openssl x509 -in "$filename" -inform PEM -out "$filename.crt"
done
$UPDATE_CERTS_CMD

View File

@ -0,0 +1,39 @@
#!/bin/sh
VERSION=0.10.0
ARCH=amd64
BIN="matchbox"
FILENAME="matchbox-v${VERSION}-linux-${ARCH}.tar.gz"
URL="https://github.com/poseidon/matchbox/releases/download/v${VERSION}/${FILENAME}"
MATCHBOX_DIR="/var/lib/matchbox"
ASSETS_DIR="${MATCHBOX_DIR}/assets/"
TFTP_DIR="/var/lib/tftpboot"
MATCHBOX_USER="matchbox"
FL_VERSIONS="current 3374.2.0"
apk add wget
echo "Downloading matchbox"
cd /tmp
wget -q --show-progress "${URL}"
tar -xzvf "${FILENAME}"
cd ./matchbox-v${VERSION}-linux-${ARCH}
echo "Installing matchbox"
cp ${BIN} /usr/local/bin
echo "Installing get-flatcar"
cp ./scripts/get-flatcar /usr/local/bin
chmod +x /usr/local/bin/get-flatcar
adduser "${MATCHBOX_USER}"
mkdir -p "${ASSETS_DIR}"
mkdir -p "${TFTP_DIR}"
chown -R "${MATCHBOX_USER}:${MATCHBOX_USER}" "${MATCHBOX_DIR}"
chown -R "${MATCHBOX_USER}:${MATCHBOX_USER}" "${ASSETS_DIR}"
chown -R "${MATCHBOX_USER}:${MATCHBOX_USER}" "${ASSETS_DIR}"
ls -lhaR ${ASSETS_DIR}
cp -rp ./scripts/tls /root
exit "${?}"

View File

@ -0,0 +1 @@
harbor.sh

View File

@ -0,0 +1 @@
matchbox.sh

View File

@ -0,0 +1,12 @@
#!/bin/sh
set -e
ONE_CONTEXT_VERSION="6.4.0"
ONE_CONTEXT_PKG_VERSION="1"
PKG="one-context-${ONE_CONTEXT_VERSION}-r${ONE_CONTEXT_PKG_VERSION}.apk"
PKG_URL="https://github.com/OpenNebula/addon-context-linux/releases/download/v${ONE_CONTEXT_VERSION}/${PKG}"
cd /tmp || exit 3
wget -q --no-check-certificate ${PKG_URL}
apk add --allow-untrusted --no-cache ${PKG}

View File

@ -0,0 +1,102 @@
#!/usr/bin/env bash
#
# Generate all the configuration files
# Get all the values from the VLS_DIR
# Process each template from the TPL_DIR with this values
#
ENV_FILE=${ENV_FILE:-/var/run/one-context/one_env}
TPL_DIR="/usr/share/builder/templates"
VLS_DIR="/usr/share/builder/values"
CONFIG=""
. ${ENV_FILE}
BTR="$(command -v btr)"
if [ "${?}" -ne 0 ]; then
echo "Warning: Nothing to do the templater is not installed"
exit 0
fi
if [ ! -e "${TPL_DIR}" ]; then
echo "Error: The template dir is missing (${TPL_DIR})"
exit 1
fi
if [ ! -e "${VLS_DIR}" ]; then
echo "Error: The template dir is missing (${VLS_DIR})"
exit 1
fi
jsonQuery() {
local data="${1}"
local query="${2}"
echo "${data}" | jq -cr "${query}"
}
# NAME: @jsonMerge
# AIM: Merge two json structures
# NOTES:
# The last one has de last word
# if you have the same key in A and B
# this keeps the value of the B structure.
# PARAMS:
# $1: original JSON Structure
# $2: updated JSON Structure
jsonMerge() {
local data="${1}"
local data2="${2}"
echo "${data} ${data2}" | jq -cr -s ".[0] * .[1]"
}
jsonUpdateVal() {
local json="${1}"
local key="${2}"
local value="${3}"
echo "${json}" | jq --arg a "${value}" "${key} = \$a"
}
getValues() {
local values=""
for file in $(find ${VLS_DIR} -name "*.json"); do
values="${values}$(cat ${file})"
done
if [ -n "${RAW_CONFIG}" ]; then
values="$(jsonMerge ${values} ${RAW_CONFIG})"
fi
for svc in $(echo ${values} | jq -cr '.Services|keys[]'); do
for key in $(echo ${values} | jq -cr ".Services.${svc}.Vars|keys[]"); do
ukey=${key^^}
vkeys="$(echo ${values} | jq -cr \".Services.${svc}.Vars.${key}\|keys[]\")"
if [ ${?} -eq 0 ]; then
for var in $(echo ${values} | jq -cr ".Services.${svc}.Vars.${key}|keys[]"); do
uvar=${var^^}
val=$(eval echo "\$${ukey}_${uvar}")
if [ -n "${val}" ]; then
values=$(jsonUpdateVal "${values}" ".Services.${svc}.Vars.${key}.${var}" "${val}")
fi
done
else
values=$(jsonUpdateVal "${values}" ".Services.${svc}.Vars.${key}" "${!ukey}")
fi
done
done
echo ${values}
}
processTemplates() {
${BTR} -t ${TPL_DIR} -c "${1}"
}
VALUES=$(getValues)
file=$(mktemp)
echo "${VALUES}" > "${file}"
processTemplates "${file}"
rm -rf "${file}"

View File

@ -0,0 +1,21 @@
#!/usr/bin/env bash
ENV_FILE=${ENV_FILE:-/var/run/one-context/one_env}
# $TOKENTXT is available only through the env. file
# shellcheck disable=SC1090
if [ -f "${ENV_FILE}" ]; then
. "${ENV_FILE}"
fi
###
if [ -n "${K3S_ROLE}" ]; then
if [ "${K3S_ROLE}" = "server" ]; then
rc-update add dnsmasq default
service dnsmasq start
rc-update add k3s default
service k3s start
fi
fi

View File

@ -0,0 +1 @@
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDDph3zh6ojSvH44k13z9B6xj+Hargo3uzkxnYv5s5NI4yagNuBXEc3aS++KdocND+FtVfLK+iVE8qHo2bvmpMmVkqU6WU2apN7DfIP0QGLlSGeo+UOZ9hGeEDlgVO4AOnZKZ5kPGBEPZ84JXuE9CmhKfwEVCK8w3B8XQttA8alFl4A4/4F14x2w4njsSLY1H3b0qah7hgYKU5zHIGLg8Lxx+1BxGIF0l5n5m5rqAskRNaF+aYbs0CcWHv49bPK0sJJ0qPV2r2sq8BlzuZFHExnZRIxpsIXdce4Bm4rdlGi7tBmmurLk4OOtDkwvhD0LMaNJf10k6QLSmRUTVzgsYz/dmGxopbMtwwIXkwi014uSZgi8wAuznXx5I4j2TUGPZHOVf+1iw/yaxWlgTVOSoX7ZxyhDgW5cCgZZGNzU5UWe0vUuVTB+hfSMj50/Q6+Vi92/mDMbPhm4nBoVzD5DT15mB+yGyN45Ej61m0JzVUyZexfvVaffEug1/u5dnwilP0WGKr4i2OXxOXtvSdAs5rlZjvppZk6IxRCwXIcPwEFL97ZrQZAxlVS5Nh+ZnlSwTe3zfQhzHj1ao0AdCAHFPUEdoUPJhSb0OjyCvZ9XZ1KCkXhuhuN/3IUhuoWl4soNCeC3KmU/USx1wda438Exj0hM1mTyBZScDPGyD9nw78DGw== Philippe Caseiro

View File

@ -0,0 +1 @@
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDZxr8C81Dm5Zl2AtDzTVa8hFs04aV1z8ANrXYVHnLf7gEG4c1BI9iWbm94gVCQT4IvoKR5oZxjxGnx1a7VaX6h6dt33+p/s2IJiwG+9/DykPnImw+ALTcnMcyrwOYh68jnQIOGkYzK/VaHRzrvFNuoVWIU+FqfN+sW+bLQWi9v/K5oiup83xQBze6kjMEL2PT48bJwT/dQgP5cnTTEYwcOK/Yes1Cmb+VqjAs5B3uiHDoch10fy4b4duuALozPGhgoOfTLqe9Ekbt8PdIhUzGxFCw79W7IBA9vw79tYBy4B2et8Zb9sf+sMmxPINDkouYmfSnU0PjNjida7Tii2IEWbrb/qbSkRNcyIBmpGKz6VnSIvomv4FA9dGkOLYRyvTjAM6Shy5aiGV8F7T9hMxm3zGDjiVseyPVtMdSjM2SCx95uPCH5oSrj8M1OIjC2D+w3DsmTPFvTjA1gmKEYnXfFj82DvO+wDcbb6/DF2qS6y5rNpdnPWDb57iBqKeZISQ5x+h8arV0U3yItHoi7z4Cb51V29pdBE0xgFx5DE5akuPO3RC+BP0CK242HBdb94YXQCfmoQ1dV59mvu0ObAhP4CH/efOqONHXjTG9eurQyJWUr8yYO9DI7HkQHwvYDS7xuEO9yvs7gizm22FOTcxBPc4M/KFhPfnUs7Nyfw6I0Nw== vfebvre@cadoles.com

View File

@ -0,0 +1 @@
ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIOsoXFfQcqFp6+5QbB1o1ZpjCGeiPMM9aOK2DoZoMM/7 nicolas.melin@cnous.fr

View File

@ -0,0 +1 @@
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQCwyKvtyfZibpHNDDsfg7N6PHPnv9AzA2PowGd7iqF6YRv6CgGPnUixWE791bmekr57TR1QwW58aSEPSQMfLBwo0OwZ7GXYbOb9Fdb6WHAUJHSyMNsFvakgjq0g7TERMw3UksiYpUBCLgvWhF5jNjKsXgK3LyMUVqJs9KlUBt6elxy3CWoMYaWVJTQwXqLEbvr7W9F1rb9PQi80vxcSZXgk5XPPZH4vh7oN7GLB5UwaTFRh4lcup0xnV938gSgLxttPg4t5li5cmvXXMgtCrIDj7JPh9Cic+UXo80cV14nOpX23nuu408Veys/4p5tYiYFCg6NnUtW2dJrfyga9W1h6nc/6JaY8aXdoE+pi7lL7XrMvJPQxVYdwA9rPUBSZAIOmZQQx2aKFMsXocyVXQDzLQyg8lAF9gbMkjXH7DluXd+s0OAdijW9VFxhjutojaC76vhH+ZqSq511vdCTuq+6juW/By/pYQRtKiL1jJqfQoC+JU8RmOVOml5ciT7I0OM/0dakdIMYINX1FaRuSYb8wm0k3pKh+PGmMigja5lY7Bv8M89gRRw+8bJ42h5XkR0Jd04Wagd9eFXvaLa9OdarwF5rE2d6NM5Gfr2wJ4XuDMC7C3r/b6U3sZr6CWvQ5URrXS9OLtZG09DtEGIIuMcu0pgqclitVDi06Ffz5dZMnVQ== olivier.perrot@cnous.fr

View File

@ -0,0 +1,23 @@
#!/bin/sh
set -ex
TOOL_DIR="${1:-/usr/local/bin}"
TOOL_USER="${2:-root}"
TOOL_GROUP="${3:-root}"
ATTACHMENT_URL="https://forge.cadoles.com/attachments/"
installTool() {
NAME="${1}"
URL="${2}"
curl -k -o ${TOOL_DIR}/${NAME} ${URL}
chmod +x ${TOOL_DIR}/${NAME}
}
apk add curl
# Install templater
installTool "tpr" "https://forge.cadoles.com/attachments/242b3cba-8d07-4b89-80ab-7c12253a8524"
# Install bootstraper
installTool "btr" "https://forge.cadoles.com/attachments/e8442b2a-2065-4282-b4a4-648681fa044c"

View File

@ -0,0 +1,27 @@
#!/bin/sh
#
# Quick and dirty script to add disk space
# It creates a new PV (with the additionnal disk),
# a new VG and a new LV with 100% disk space
# The names and devices are provided with env variables:
# - PV_DEVICE : The /dev/xxx device
# - VG_NAME: The new vg name
# - LV_NAME: Then new lv name
# - LV_MTP: The mount point for the FS created on the LV
# - LV_FS: The fstype of the new FS
#
if [ -e ${PV_DEVICE} ]; then
pvcreate ${PV_DEVICE}
vgcreate ${VG_NAME} ${PV_DEVICE}
lvcreate -Ay -l 100%FREE -n ${LV_NAME} ${VG_NAME}
mkfs.${LV_FS} /dev/${VG_NAME}/${LV_NAME}
if [ ! -d ${LV_MTP} ]; then
mkdir -p ${LV_MTP}
fi
mount /dev/${VG_NAME}/${LV_NAME} ${LV_MTP}
echo "/dev/${VG_NAME}/${LV_NAME} ${LV_MTP} ${LV_FS} rw,relatime 0 1" >> /etc/fstab
else
echo "${PV_DEVICE} is missing"
exit 3
fi

View File

@ -0,0 +1,99 @@
source qemu "alpine" {
cpus = 1
memory = "${local.memory}"
accelerator = "kvm"
vnc_bind_address = "0.0.0.0"
headless = true
# Serve the `http` directory via HTTP, used for preseeding the Debian installer.
http_port_min = 9990
http_port_max = 9999
# SSH ports to redirect to the VM being built
host_port_min = 2222
host_port_max = 2229
# This user is configured in the preseed file.
ssh_username = "${local.ssh_user}"
ssh_private_key_file = data.sshkey.install.private_key_path
ssh_wait_timeout = "1000s"
shutdown_command = "/sbin/poweroff"
# Builds a compact image
disk_compression = true
disk_discard = "unmap"
skip_compaction = false
disk_detect_zeroes = "unmap"
format = "qcow2"
boot_wait = "5s"
}
source "vmware-iso" "alpine" {
cpus = 1
disk_type_id = 0
memory = "${local.memory}"
vnc_bind_address = "0.0.0.0"
headless = true
# Serve the `http` directory via HTTP, used for preseeding the Debian installer.
http_port_min = 9990
http_port_max = 9999
# SSH ports to redirect to the VM being built
#host_port_min = 2222
#host_port_max = 2229
# This user is configured in the preseed file.
ssh_username = "${local.ssh_user}"
ssh_private_key_file = data.sshkey.install.private_key_path
ssh_wait_timeout = "1000s"
shutdown_command = "/sbin/poweroff"
# Builds a compact image
#disk_compression = true
#disk_discard = "unmap"
skip_compaction = false
#disk_detect_zeroes = "unmap"
format = "ova"
boot_wait = "5s"
}
source "vmware-vmx" "alpine" {
disk_type_id = 0
vnc_bind_address = "0.0.0.0"
headless = true
# Serve the `http` directory via HTTP, used for preseeding the Debian installer.
http_port_min = 9990
http_port_max = 9999
# SSH ports to redirect to the VM being built
#host_port_min = 2222
#host_port_max = 2229
# This user is configured in the preseed file.
ssh_username = "${local.ssh_user}"
ssh_private_key_file = data.sshkey.install.private_key_path
ssh_wait_timeout = "1000s"
shutdown_command = "/sbin/poweroff"
# Builds a compact image
#disk_compression = true
#disk_discard = "unmap"
skip_compaction = false
#disk_detect_zeroes = "unmap"
format = "ova"
boot_wait = "5s"
}

View File

@ -0,0 +1,6 @@
# /etc/conf.d/chronyd
CFGFILE="/etc/chrony/chrony.conf"
FAST_STARTUP=yes
ARGS=""
# vrf e.g 'vrf-mgmt'
#vrf=""

View File

@ -0,0 +1,6 @@
# Configuration file of Harbor
# The IP address or hostname to access admin UI and registry service.
# DO NOT use localhost or 127.0.0.1, because Harbor needs to be accessed by external clients.
hostname: ${Vars.RootlessDocker}

View File

@ -0,0 +1,3 @@
%{ if Vars.RootlessDocker }
docker:231072:65536
%{ endif }

View File

@ -0,0 +1,265 @@
# Configuration file of Harbor
# The IP address or hostname to access admin UI and registry service.
# DO NOT use localhost or 127.0.0.1, because Harbor needs to be accessed by external clients.
hostname: ${Vars.HarborDomain}
# http related config
http:
# port for http, default is 80. If https enabled, this port will redirect to https port
port: ${Vars.HarborHTTPPort}
# https related config
https:
# https port for harbor, default is 443
port: ${Vars.HarborHTTPSPort}
# The path of cert and key files for nginx
certificate: ${Vars.HarborSSLCert}
private_key: ${Vars.HarborSSLPrivKey}
# # Uncomment following will enable tls communication between all harbor components
# internal_tls:
# # set enabled to true means internal tls is enabled
# enabled: true
# # put your cert and key files on dir
# dir: /etc/harbor/tls/internal
# Uncomment external_url if you want to enable external proxy
# And when it enabled the hostname will no longer used
# external_url: https://reg.mydomain.com:8433
# The initial password of Harbor admin
# It only works in first time to install harbor
# Remember Change the admin password from UI after launching Harbor.
harbor_admin_password: ${Vars.HarborAdminPassword}
# Harbor DB configuration
database:
# The password for the root user of Harbor DB. Change this before any production use.
password: ${Vars.HarborDBPassword}
# The maximum number of connections in the idle connection pool. If it <=0, no idle connections are retained.
max_idle_conns: 50
# The maximum number of open connections to the database. If it <= 0, then there is no limit on the number of open connections.
# Note: the default number of connections is 100 for postgres.
max_open_conns: 200
# The default data volume
data_volume: /srv/harbor/data
# Harbor Storage settings by default is using /data dir on local filesystem
# Uncomment storage_service setting If you want to using external storage
# storage_service:
# # ca_bundle is the path to the custom root ca certificate, which will be injected into the truststore
# # of registry's and chart repository's containers. This is usually needed when the user hosts a internal storage with self signed certificate.
# ca_bundle:
# # storage backend, default is filesystem, options include filesystem, azure, gcs, s3, swift and oss
# # for more info about this configuration please refer https://docs.docker.com/registry/configuration/
# filesystem:
# maxthreads: 100
# # set disable to true when you want to disable registry redirect
# redirect:
# disabled: false
# Trivy configuration
#
# Trivy DB contains vulnerability information from NVD, Red Hat, and many other upstream vulnerability databases.
# It is downloaded by Trivy from the GitHub release page https://github.com/aquasecurity/trivy-db/releases and cached
# in the local file system. In addition, the database contains the update timestamp so Trivy can detect whether it
# should download a newer version from the Internet or use the cached one. Currently, the database is updated every
# 12 hours and published as a new release to GitHub.
trivy:
# ignoreUnfixed The flag to display only fixed vulnerabilities
ignore_unfixed: false
# skipUpdate The flag to enable or disable Trivy DB downloads from GitHub
#
# You might want to enable this flag in test or CI/CD environments to avoid GitHub rate limiting issues.
# If the flag is enabled you have to download the `trivy-offline.tar.gz` archive manually, extract `trivy.db` and
# `metadata.json` files and mount them in the `/home/scanner/.cache/trivy/db` path.
skip_update: false
#
# The offline_scan option prevents Trivy from sending API requests to identify dependencies.
# Scanning JAR files and pom.xml may require Internet access for better detection, but this option tries to avoid it.
# For example, the offline mode will not try to resolve transitive dependencies in pom.xml when the dependency doesn't
# exist in the local repositories. It means a number of detected vulnerabilities might be fewer in offline mode.
# It would work if all the dependencies are in local.
# This option doesnt affect DB download. You need to specify "skip-update" as well as "offline-scan" in an air-gapped environment.
offline_scan: false
#
# insecure The flag to skip verifying registry certificate
insecure: false
# github_token The GitHub access token to download Trivy DB
#
# Anonymous downloads from GitHub are subject to the limit of 60 requests per hour. Normally such rate limit is enough
# for production operations. If, for any reason, it's not enough, you could increase the rate limit to 5000
# requests per hour by specifying the GitHub access token. For more details on GitHub rate limiting please consult
# https://developer.github.com/v3/#rate-limiting
#
# You can create a GitHub token by following the instructions in
# https://help.github.com/en/github/authenticating-to-github/creating-a-personal-access-token-for-the-command-line
#
# github_token: xxx
jobservice:
# Maximum number of job workers in job service
max_job_workers: 10
logger_sweeper_duration: 300
notification:
# Maximum retry count for webhook job
webhook_job_max_retry: 10
webhook_job_http_client_timeout: 300
chart:
# Change the value of absolute_url to enabled can enable absolute url in chart
absolute_url: disabled
# Log configurations
log:
# options are debug, info, warning, error, fatal
level: info
# configs for logs in local storage
local:
# Log files are rotated log_rotate_count times before being removed. If count is 0, old versions are removed rather than rotated.
rotate_count: 50
# Log files are rotated only if they grow bigger than log_rotate_size bytes. If size is followed by k, the size is assumed to be in kilobytes.
# If the M is used, the size is in megabytes, and if G is used, the size is in gigabytes. So size 100, size 100k, size 100M and size 100G
# are all valid.
rotate_size: 200M
# The directory on your host that store log
location: /var/log/harbor
# Uncomment following lines to enable external syslog endpoint.
# external_endpoint:
# # protocol used to transmit log to external endpoint, options is tcp or udp
# protocol: tcp
# # The host of external endpoint
# host: localhost
# # Port of external endpoint
# port: 5140
#This attribute is for migrator to detect the version of the .cfg file, DO NOT MODIFY!
_version: 2.6.0
# Uncomment external_database if using external database.
# external_database:
# harbor:
# host: harbor_db_host
# port: harbor_db_port
# db_name: harbor_db_name
# username: harbor_db_username
# password: harbor_db_password
# ssl_mode: disable
# max_idle_conns: 2
# max_open_conns: 0
# notary_signer:
# host: notary_signer_db_host
# port: notary_signer_db_port
# db_name: notary_signer_db_name
# username: notary_signer_db_username
# password: notary_signer_db_password
# ssl_mode: disable
# notary_server:
# host: notary_server_db_host
# port: notary_server_db_port
# db_name: notary_server_db_name
# username: notary_server_db_username
# password: notary_server_db_password
# ssl_mode: disable
# Uncomment external_redis if using external Redis server
# external_redis:
# # support redis, redis+sentinel
# # host for redis: <host_redis>:<port_redis>
# # host for redis+sentinel:
# # <host_sentinel1>:<port_sentinel1>,<host_sentinel2>:<port_sentinel2>,<host_sentinel3>:<port_sentinel3>
# host: redis:6379
# password:
# # sentinel_master_set must be set to support redis+sentinel
# #sentinel_master_set:
# # db_index 0 is for core, it's unchangeable
# registry_db_index: 1
# jobservice_db_index: 2
# chartmuseum_db_index: 3
# trivy_db_index: 5
# idle_timeout_seconds: 30
# Uncomment uaa for trusting the certificate of uaa instance that is hosted via self-signed cert.
# uaa:
# ca_file: /path/to/ca
# Global proxy
# Config http proxy for components, e.g. http://my.proxy.com:3128
# Components doesn't need to connect to each others via http proxy.
# Remove component from `components` array if want disable proxy
# for it. If you want use proxy for replication, MUST enable proxy
# for core and jobservice, and set `http_proxy` and `https_proxy`.
# Add domain to the `no_proxy` field, when you want disable proxy
# for some special registry.
proxy:
http_proxy:
https_proxy:
no_proxy:
components:
- core
- jobservice
- notary
- trivy
metric:
enabled: false
port: 9090
path: /metrics
# Trace related config
# only can enable one trace provider(jaeger or otel) at the same time,
# and when using jaeger as provider, can only enable it with agent mode or collector mode.
# if using jaeger collector mode, uncomment endpoint and uncomment username, password if needed
# if using jaeger agetn mode uncomment agent_host and agent_port
# trace:
# enabled: true
# # set sample_rate to 1 if you wanna sampling 100% of trace data; set 0.5 if you wanna sampling 50% of trace data, and so forth
# sample_rate: 1
# # # namespace used to differenciate different harbor services
# # namespace:
# # # attributes is a key value dict contains user defined attributes used to initialize trace provider
# # attributes:
# # application: harbor
# # # jaeger should be 1.26 or newer.
# # jaeger:
# # endpoint: http://hostname:14268/api/traces
# # username:
# # password:
# # agent_host: hostname
# # # export trace data by jaeger.thrift in compact mode
# # agent_port: 6831
# # otel:
# # endpoint: hostname:4318
# # url_path: /v1/traces
# # compression: false
# # insecure: true
# # timeout: 10s
# enable purge _upload directories
upload_purging:
enabled: true
# remove files in _upload directories which exist for a period of time, default is one week.
age: 168h
# the interval of the purge operations
interval: 24h
dryrun: false
# cache layer configurations
# If this feature enabled, harbor will cache the resource
# `project/project_metadata/repository/artifact/manifest` in the redis
# which can especially help to improve the performance of high concurrent
# manifest pulling.
# NOTICE
# If you are deploying Harbor in HA mode, make sure that all the harbor
# instances have the same behaviour, all with caching enabled or disabled,
# otherwise it can lead to potential data inconsistency.
cache:
# not enabled by default
enabled: false
# keep cache for one day by default
expire_hours: 24

View File

@ -0,0 +1,47 @@
# Example answer file for setup-alpine script
# If you don't want to use a certain option, then comment it out
# Use US layout with US variant
KEYMAPOPTS="fr fr"
# Set hostname to alpine-test
HOSTNAMEOPTS="-n ${hostname}"
# Contents of /etc/network/interfaces
INTERFACESOPTS="auto lo
iface lo inet loopback
auto eth0
iface eth0 inet dhcp
hostname ${hostname}
"
# Search domain of example.com, OpenDNS public nameserver
# ex: -d example.com 1.1.1.1"
DNSOPTS=""
# Set timezone to UTC
TIMEZONEOPTS="-z Europe/Paris"
# set http/ftp proxy
PROXYOPTS="none"
# Add a random mirror
APKREPOSOPTS="-r -c"
# Install Openssh
SSHDOPTS="-c openssh -k /root/.ssh/authorized_keys"
# Use openntpd
NTPOPTS="-c openntpd"
# Use /dev/sda as a data disk
DISKOPTS="-L -m sys ${disk_device}"
USEROPTS="-a -g 'netdev' ${user}"
# Setup in /media/vda1
# LBUOPTS="/media/vda1"
# APKCACHEOPTS="/media/vda1/cache"

View File

@ -0,0 +1,8 @@
# k3s options
export PATH="/usr/libexec/cni/:$PATH"
K3S_EXEC="server"
%{ if Vars.DeployTraefik }
K3S_OPTS=""
%{ else }
K3S_OPTS="--disable traefik"
%{ endif }

View File

@ -0,0 +1 @@
command_args="-address 0.0.0.0:${Vars.MatchBox.HTTPPort} -rpc-address 0.0.0.0:${Vars.MatchBox.gRPCPort} -log-level ${Vars.MatchBox.LogLevel}"

View File

@ -0,0 +1,4 @@
${Vars.NIC[0].IP} ${Vars.Set.Hostname}
%{ if Vars.MatchBox.Hostname != "" }
${Vars.NIC[0].IP} ${Vars.MatchBox.Hostname}
%{ endif }

View File

@ -0,0 +1,60 @@
log-queries
log-dhcp
#port=0
listen-address=0.0.0.0
interface=${Vars.PXE.ListenInterface}
no-resolv
domain-needed
bogus-priv
expand-hosts
server=${Vars.ETH0.DNS}
strict-order
addn-hosts=/etc/dnsmasq-hosts.conf
domain=${Vars.PXE.DNSDomain}
local=/${Vars.PXE.DNSDomain}/
localise-queries
%{ if Vars.PXE.DHCPMode == "proxy" }
#dhcp-no-override
dhcp-range=${Vars.ETH0.IP},proxy
%{ else }
dhcp-range=${Vars.PXE.DHCPRangeStart},${Vars.PXE.DHCPRangeEnd},${Vars.PXE.DHCPLeaseDuration}
dhcp-option=option:router,${Vars.ETH0.GATEWAY}
%{ endif }
dhcp-option=option:dns-server,${Vars.ETH0.IP}
dhcp-option=option:domain-name,${Vars.PXE.DNSDomain}
# TFTP Configuration
enable-tftp
tftp-root="${Vars.PXE.TFTPRoot}"
pxe-prompt="${Vars.PXE.GreetingMessage}",${Vars.PXE.DelayTime}
# Based on logic in https://gist.github.com/robinsmidsrod/4008017
# iPXE sends a 175 option, checking suboptions
dhcp-match=set:ipxe-http,175,19
dhcp-match=set:ipxe-https,175,20
dhcp-match=set:ipxe-menu,175,39
# pcbios specific
dhcp-match=set:ipxe-pxe,175,33
dhcp-match=set:ipxe-bzimage,175,24
dhcp-match=set:ipxe-iscsi,175,17
# efi specific
dhcp-match=set:ipxe-efi,175,36
# combination
# set ipxe-ok tag if we have correct combination
# http && menu && iscsi ((pxe && bzimage) || efi)
tag-if=set:ipxe-ok,tag:ipxe-http,tag:ipxe-menu,tag:ipxe-iscsi,tag:ipxe-pxe,tag:ipxe-bzimage
tag-if=set:ipxe-ok,tag:ipxe-http,tag:ipxe-menu,tag:ipxe-iscsi,tag:ipxe-efi
## Load different PXE boot image depending on client architecture (when running as a proxy DHCP)
pxe-service=tag:!ipxe-ok, x86PC, "Legacy boot PXE chainload to iPXE", undionly.kpxe
pxe-service=tag:!ipxe-ok, BC_EFI, "UEFI32 boot chainload to iPXE", snponly.efi
pxe-service=tag:!ipxe-ok, X86-64_EFI, "UEFI64 boot chainload to iPXE", snponly.efi
dhcp-userclass=set:ipxe,iPXE
dhcp-boot=tag:ipxe-ok,http://${Vars.ETH0.IP}:${Vars.MatchBox.HTTPPort}/boot.ipxe,,${Vars.ETH0.IP}

View File

@ -0,0 +1,28 @@
#!/sbin/openrc-run
name=$RC_SVCNAME
command="/usr/local/bin/$RC_SVCNAME"
command_user="$RC_SVCNAME"
pidfile="/run/$RC_SVCNAME/$RC_SVCNAME.pid"
start_stop_daemon_args="--start -b"
command_args="$command_args"
command_background="yes"
depend() {
need net
}
start_pre() {
checkpath --directory --owner $command_user:$command_user --mode 0775 \
/run/$RC_SVCNAME /var/log/$RC_SVCNAME
if [ ! -f "/etc/matchbox/server.crt" ]; then
cd /root/tls
export SAN="DNS.1:${Vars.MatchBox.Hostname},IP.1:${Vars.ETH0.IP}"
./cert-gen
mkdir -p /etc/matchbox
cp ca.crt server.crt server.key /etc/matchbox
chown -R matchbox:matchbox /etc/matchbox
mkdir -p /root/.matchbox
cp client.crt client.key ca.crt /root/.matchbox/
fi
}

View File

@ -0,0 +1 @@
harbor

View File

@ -0,0 +1 @@
command_args="-address 0.0.0.0:${Vars.MatchBox.HTTPPort} -rpc-address 0.0.0.0:${Vars.MatchBox.gRPCPort} -log-level ${Vars.MatchBox.LogLevel}"

View File

@ -0,0 +1,7 @@
${Vars.NIC[0].IP} ${Vars.Set.Hostname}
%{ if Vars.MatchBox.Hostname != "" }
${Vars.NIC[0].IP} ${Vars.MatchBox.Hostname}
%{ endif }
%{ for host in Vars.DNSMasq.Hosts }
${host.IP} ${host.Name}
%{ endfor }

View File

@ -0,0 +1,60 @@
log-queries
log-dhcp
#port=0
listen-address=0.0.0.0
interface=${Vars.PXE.ListenInterface}
no-resolv
domain-needed
bogus-priv
expand-hosts
server=${Vars.DNS[0]}
strict-order
addn-hosts=/etc/dnsmasq-hosts.conf
domain=${Vars.PXE.DNSDomain}
local=/${Vars.PXE.DNSDomain}/
localise-queries
%{ if Vars.PXE.DHCPMode == "proxy" }
#dhcp-no-override
dhcp-range=${Vars.NIC[0].IP},proxy
%{ else }
dhcp-range=${Vars.PXE.DHCPRangeStart},${Vars.PXE.DHCPRangeEnd},${Vars.PXE.DHCPLeaseDuration}
dhcp-option=option:router,${Vars.NIC[0].Gateway}
%{ endif }
dhcp-option=option:dns-server,${Vars.NIC[0].IP}
dhcp-option=option:domain-name,${Vars.PXE.DNSDomain}
# TFTP Configuration
enable-tftp
tftp-root="${Vars.PXE.TFTPRoot}"
pxe-prompt="${Vars.PXE.GreetingMessage}",${Vars.PXE.DelayTime}
# Based on logic in https://gist.github.com/robinsmidsrod/4008017
# iPXE sends a 175 option, checking suboptions
dhcp-match=set:ipxe-http,175,19
dhcp-match=set:ipxe-https,175,20
dhcp-match=set:ipxe-menu,175,39
# pcbios specific
dhcp-match=set:ipxe-pxe,175,33
dhcp-match=set:ipxe-bzimage,175,24
dhcp-match=set:ipxe-iscsi,175,17
# efi specific
dhcp-match=set:ipxe-efi,175,36
# combination
# set ipxe-ok tag if we have correct combination
# http && menu && iscsi ((pxe && bzimage) || efi)
tag-if=set:ipxe-ok,tag:ipxe-http,tag:ipxe-menu,tag:ipxe-iscsi,tag:ipxe-pxe,tag:ipxe-bzimage
tag-if=set:ipxe-ok,tag:ipxe-http,tag:ipxe-menu,tag:ipxe-iscsi,tag:ipxe-efi
## Load different PXE boot image depending on client architecture (when running as a proxy DHCP)
pxe-service=tag:!ipxe-ok, x86PC, "Legacy boot PXE chainload to iPXE", undionly.kpxe
pxe-service=tag:!ipxe-ok, BC_EFI, "UEFI32 boot chainload to iPXE", snponly.efi
pxe-service=tag:!ipxe-ok, X86-64_EFI, "UEFI64 boot chainload to iPXE", snponly.efi
dhcp-userclass=set:ipxe,iPXE
dhcp-boot=tag:ipxe-ok,http://${Vars.NIC[0].IP}:${Vars.MatchBox.HTTPPort}/boot.ipxe,,${Vars.NIC[0].IP}

View File

@ -0,0 +1 @@
${Vars.Set.Hostname}

View File

@ -0,0 +1,28 @@
#!/sbin/openrc-run
name=$RC_SVCNAME
command="/usr/local/bin/$RC_SVCNAME"
command_user="$RC_SVCNAME"
pidfile="/run/$RC_SVCNAME/$RC_SVCNAME.pid"
start_stop_daemon_args="--start -b"
command_args="$command_args"
command_background="yes"
depend() {
need net
}
start_pre() {
checkpath --directory --owner $command_user:$command_user --mode 0775 \
/run/$RC_SVCNAME /var/log/$RC_SVCNAME
if [ ! -f "/etc/matchbox/server.crt" ]; then
cd /root/tls
export SAN="DNS.1:${Vars.MatchBox.Hostname},IP.1:${Vars.NIC[0].IP}"
./cert-gen
mkdir -p /etc/matchbox
cp ca.crt server.crt server.key /etc/matchbox
chown -R matchbox:matchbox /etc/matchbox
mkdir -p /root/.matchbox
cp client.crt client.key ca.crt /root/.matchbox/
fi
}

View File

@ -0,0 +1,9 @@
%{ for iface in Vars.NIC }
auto ${iface.Name}
iface ${iface.Name} inet static
address ${iface.IP}
netmask ${iface.Mask}
gateway ${iface.Gateway}
%{ endfor ~}

View File

@ -0,0 +1,4 @@
%{ for dns in Vars.DNS }
nameserver ${dns}
%{ endfor ~}

View File

@ -0,0 +1,7 @@
NAME = <%= image_name %>
PATH = <%= image_source %>
TYPE = OS
PERSISTENT = No
DESCRIPTION = "<%= image_comment %>"
DEV_PREFIX = vd
FORMAT = qcow2

View File

@ -0,0 +1,48 @@
{
"name": "<%= template_name %>",
"deployment": "straight",
"description": "Cluster Kubernetes (k8s)",
"roles": [
{
"name": "leader",
"cardinality": 1,
"vm_template": <%= getTemplateByName(oneCli, vm_name).id %>,
"shutdown_action": "terminate",
"vm_template_contents": "NIC = [\n NAME = \"NIC0\",\n NETWORK_ID = \"$main\",\n RDP = \"YES\" ]\nNIC = [\n NAME = \"NIC1\",\n NETWORK_ID = \"$internal\" ]\n",
"elasticity_policies": [],
"scheduled_policies": []
},
{
"name": "master",
"cardinality": 2,
"vm_template": <%= getTemplateByName(oneCli, vm_name).id %>,
"shutdown_action": "terminate",
"vm_template_contents": "NIC = [\n NAME = \"NIC0\",\n NETWORK_ID = \"$main\",\n RDP = \"YES\" ]\nNIC = [\n NAME = \"NIC1\",\n NETWORK_ID = \"$internal\" ]\n",
"elasticity_policies": [],
"scheduled_policies": []
},
{
"name": "worker",
"cardinality": 4,
"vm_template": <%= getTemplateByName(oneCli, vm_name).id %>,
"shutdown_action": "terminate",
"parents": [
"leader"
],
"vm_template_contents": "NIC = [\n NAME = \"NIC0\",\n NETWORK_ID = \"$main\",\n RDP = \"YES\" ]\nNIC = [\n NAME = \"NIC1\",\n NETWORK_ID = \"$internal\" ]\n",
"elasticity_policies": [],
"scheduled_policies": []
}
],
"networks": {
"main": "M|network|Main network| |id:",
"internal": "M|network|Internal network| |id:"
},
"custom_attrs": {
"KUBEAPPS_DNS_NAME": "M|text|DNS Name for kubeapps service| |kubeapps.k3s-eole.local",
"INGRESS_PROVIDER": "O|list|Default ingress to install|nginx, traefik, |",
"LE_EMAIL": "M|text|Email | |"
},
"shutdown_action": "terminate",
"ready_status_gate": true
}

View File

@ -0,0 +1,33 @@
NAME = "<%= template_name %>"
CONTEXT = [
NETWORK = "YES",
REPORT_READY = "YES",
SET_HOSTNAME = "$NAME",
SSH_PUBLIC_KEY = "$USER[SSH_PUBLIC_KEY]",
TOKEN = "YES" ]
CPU = "0.2"
DESCRIPTION = "Alpine basic image"
DISK = [
DEV_PREFIX = "vd",
DRIVER = "qcow2",
IMAGE = "<%= image_name %>",
IMAGE_UNAME = "<%= user %>" ]
GRAPHICS = [
KEYMAP = "fr",
LISTEN = "0.0.0.0",
TYPE = "VNC" ]
HYPERVISOR = "kvm"
INPUT = [
BUS = "usb",
TYPE = "tablet" ]
INPUTS_ORDER = ""
LOGO = "images/logos/linux.png"
MEMORY = "512"
MEMORY_UNIT_COST = "MB"
NIC_DEFAULT = [
MODEL = "virtio" ]
OS = [
ARCH = "x86_64",
BOOT = "",
SD_DISK_BUS = "scsi" ]
VCPU = "2"

View File

@ -0,0 +1,32 @@
NAME = "<%= template_name %>"
CONTEXT = [
NETWORK = "YES",
REPORT_READY = "YES",
SET_HOSTNAME = "$NAME",
SSH_PUBLIC_KEY = "$USER[SSH_PUBLIC_KEY]",
TOKEN = "YES" ]
CPU = "0.2"
DESCRIPTION = "K3S Ready VM"
DISK = [
IMAGE = "<%= image_name %>",
IMAGE_UNAME = "<%= user %>",
DRIVER = "qcow2" ]
GRAPHICS = [
KEYMAP = "fr",
LISTEN = "0.0.0.0",
TYPE = "VNC" ]
HYPERVISOR = "kvm"
INPUT = [
BUS = "usb",
TYPE = "tablet" ]
INPUTS_ORDER = ""
LOGO = "images/logos/alpine.png"
MEMORY = "2048"
MEMORY_UNIT_COST = "MB"
NIC_DEFAULT = [
MODEL = "virtio" ]
OS = [
ARCH = "x86_64",
BOOT = "",
SD_DISK_BUS = "scsi" ]
VCPU = "2"

View File

@ -0,0 +1,35 @@
NAME = "<%= template_name %>"
CONTEXT = [
NETWORK = "YES",
REPORT_READY = "YES",
SET_HOSTNAME = "$NAME",
SERVER_ROLE = "leader",
TOKEN = "YES",
SSH_PUBLIC_KEY = "$USER[SSH_PUBLIC_KEY]"
]
CPU = "0.8"
DESCRIPTION = "Kubernetes master or Docker VM (check the name)"
DISK = [
DEV_PREFIX = "vd",
IMAGE = "<%= image_name %>",
IMAGE_UNAME = "<%= user %>",
DRIVER = "qcow2" ]
GRAPHICS = [
LISTEN = "0.0.0.0",
KEYMAP = "fr",
TYPE = "VNC" ]
HYPERVISOR = "kvm"
INPUT = [
BUS = "usb",
TYPE = "tablet" ]
INPUTS_ORDER = ""
LOGO = "images/logos/alpine.png"
MEMORY = "2048"
MEMORY_UNIT_COST = "MB"
NIC_DEFAULT = [
MODEL = "virtio" ]
OS = [
ARCH = "x86_64",
BOOT = "",
SD_DISK_BUS = "scsi" ]
VCPU = "4"

View File

@ -0,0 +1,42 @@
NAME = "<%= template_name %>"
CONTEXT = [
NETWORK = "YES",
REPORT_READY = "YES",
SET_HOSTNAME = "$NAME",
SERVER_ROLE = "master",
MASTER_ADDR = "$MASTER_ADDR",
MASTER_TOKEN = "$MASTER_TOKEN",
MASTER_CA_TOKEN = "$MASTER_CA_TOKEN",
TOKEN = "YES",
SSH_PUBLIC_KEY = "$USER[SSH_PUBLIC_KEY]"
]
CPU = "0.8"
DESCRIPTION = "Kubernetes worker VM"
DISK = [
DEV_PREFIX = "vd",
IMAGE = "<%= image_name %>",
IMAGE_UNAME = "<%= user %>",
DRIVER = "qcow2" ]
GRAPHICS = [
LISTEN = "0.0.0.0",
KEYMAP = "fr",
TYPE = "VNC" ]
HYPERVISOR = "kvm"
INPUT = [
BUS = "usb",
TYPE = "tablet" ]
INPUTS_ORDER = ""
LOGO = "images/logos/alpine.png"
MEMORY = "2048"
MEMORY_UNIT_COST = "MB"
NIC_DEFAULT = [
MODEL = "virtio" ]
OS = [
ARCH = "x86_64",
BOOT = "",
SD_DISK_BUS = "scsi" ]
USER_INPUTS = [
MASTER_ADDR = "O|text|Master address (for workers only)",
MASTER_TOKEN = "O|text|Master Token (for workers only)",
MASTER_CA_TOKEN = "O|text|Master CA Token (for workers only)" ]
VCPU = "4"

View File

@ -0,0 +1,42 @@
NAME = "<%= template_name %>"
CONTEXT = [
NETWORK = "YES",
REPORT_READY = "YES",
SET_HOSTNAME = "$NAME",
SERVER_ROLE = "worker",
MASTER_ADDR = "$MASTER_ADDR",
MASTER_TOKEN = "$MASTER_TOKEN",
MASTER_CA_TOKEN = "$MASTER_CA_TOKEN",
TOKEN = "YES",
SSH_PUBLIC_KEY = "$USER[SSH_PUBLIC_KEY]"
]
CPU = "0.8"
DESCRIPTION = "Kubernetes worker VM"
DISK = [
DEV_PREFIX = "vd",
IMAGE = "<%= image_name %>",
IMAGE_UNAME = "<%= user %>",
DRIVER = "qcow2" ]
GRAPHICS = [
LISTEN = "0.0.0.0",
KEYMAP = "fr",
TYPE = "VNC" ]
HYPERVISOR = "kvm"
INPUT = [
BUS = "usb",
TYPE = "tablet" ]
INPUTS_ORDER = ""
LOGO = "images/logos/alpine.png"
MEMORY = "4096"
MEMORY_UNIT_COST = "MB"
NIC_DEFAULT = [
MODEL = "virtio" ]
OS = [
ARCH = "x86_64",
BOOT = "",
SD_DISK_BUS = "scsi" ]
USER_INPUTS = [
MASTER_ADDR = "O|text|Master address (for workers only)",
MASTER_TOKEN = "O|text|Master Token (for workers only)",
MASTER_CA_TOKEN = "O|text|Master CA Token (for workers only)" ]
VCPU = "4"

View File

@ -0,0 +1,47 @@
NAME = "<%= template_name %>"
CONTEXT = [
MATCHBOX_URL = "http://$NAME",
NETWORK = "YES",
PXE_DHCPLEASEDURATION = "$DHCPLEASEDURATION",
PXE_DHCPMODE = "$ADHCPMODE",
PXE_DNSDOMAIN = "$BDNSDOMAIN",
PXE_DHCPRANGESTART = "$CDHCPRANGESTART",
PXE_DHCPRANGEEND = "$DDHCPRANGEEND",
PXE_DHCPLEASEDURATION = "$EDHCPLEASEDURATION",
MATCHBOX_HOSTNAME = "$FMATCHBOX_HOSTNAME",
REPORT_READY = "YES",
SET_HOSTNAME = "$NAME",
SSH_PUBLIC_KEY = "$USER[SSH_PUBLIC_KEY]",
TOKEN = "YES" ]
CPU = "0.2"
DESCRIPTION = "Matchbox Ready VM"
DISK = [
IMAGE = "<%= image_name %>",
IMAGE_UNAME = "<%= user %>",
DRIVER = "qcow2" ]
GRAPHICS = [
KEYMAP = "fr",
LISTEN = "0.0.0.0",
TYPE = "VNC" ]
HYPERVISOR = "kvm"
INPUT = [
BUS = "usb",
TYPE = "tablet" ]
INPUTS_ORDER = ""
LOGO = "images/logos/alpine.png"
MEMORY = "2048"
MEMORY_UNIT_COST = "MB"
NIC_DEFAULT = [
MODEL = "virtio" ]
OS = [
ARCH = "x86_64",
BOOT = "",
SD_DISK_BUS = "scsi" ]
USER_INPUTS = [
ADHCPMODE = "M|list|DHCP Mode|proxy,direct|proxy",
BDNSDOMAIN = "M|text|Nom de la zone DNS (ex: cadol.es)",
CDHCPRANGESTART = "O|text|DNSMASQ DHCP Range First IP",
DDHCPRANGEEND = "O|text|DNSMASQ DHCP Range Last IP",
EDHCPLEASEDURATION = "M|list|DHCP lease duration|1h,2h,4h,6h,8h,10h,12h,14h,24h|1h",
FMATCHBOX_HOSTNAME = "O|text|Matchbox service hostname|mb.cadol.es" ]
VCPU = "2"

View File

@ -0,0 +1,54 @@
variable "name" {
type = string
default = "alpine"
}
variable "version" {
type = string
default = "3.14.2"
}
variable "short_version" {
type = string
default = "3.14"
}
variable "arch" {
type = string
default = "x86_64"
}
variable "output_dir" {
type = string
default = "output/alpine/"
}
variable "source_url" {
type = string
default = "https://cdimage.debian.org/cdimage/release"
}
variable "iso_cd_checksum" {
type = string
default = "sha256:ae6d563d2444665316901fe7091059ac34b8f67ba30f9159f7cef7d2fdc5bf8a"
}
variable "image_version" {
type = string
default = "0.0.1"
}
variable "one_user" {
type = string
default = env("ONE_USER")
}
variable "one_token" {
type = string
default = env("ONE_TOKEN")
}
variable "boot_command" {
type = list(string)
default = []
}

View File

@ -0,0 +1,16 @@
name = "debian"
version = "11"
short_version = "11"
code_name = "bullseye"
arch = "amd64"
source_url = "https://cloud.debian.org/images/cloud/bullseye/latest"
iso_cd_checksum = "9ae04227e89047b72970a0d5f1897e2573fd0d4bba3d381086307af604072bad9e33174357fd3c3545a2a2b5b83ce19f3dbb5c352e86d5173b833df59b4a5741"
image_dir_name= "latest"
boot_command = [ "<enter>" ]
# "<enter>",
# "preseed/url=http://{{ .HTTPIP }}:{{ .HTTPPort }}/preseed.cfg<enter>",
# "<wait>",
# "<wait1s>mkdir -p .ssh<enter>",
# "<wait1s>wget http://{{.HTTPIP}}:{{.HTTPPort}}/ssh-packer-pub.key -O .ssh/authorized_keys<enter><wait1s>",
# "<wait1s>chmod 600 .ssh/authorized_keys<enter>",
#]

View File

@ -0,0 +1,6 @@
locals {
builder_config = {
TemplateDir = "/usr/share/builder/templates"
ValueDir = "/usr/share/builder/values"
}
}

View File

@ -0,0 +1,7 @@
locals {
Globals = {
Vars = {
PrometheusPort = "9090"
}
}
}

View File

@ -0,0 +1,33 @@
# "timestamp" template function replacement
locals {
locations = {
recipes = "${path.cwd}/recipes/${var.name}"
templates = "${path.cwd}/recipes/${var.name}/templates"
provisionning = "${path.cwd}/recipes/${var.name}/provisionning"
post-processors = "${path.cwd}/recipes/${var.name}/post-processor"
tools = "${path.cwd}/tools"
}
dirs = local.locations
timestamp = regex_replace(timestamp(), "[- TZ:]", "")
output_name = "${var.name}"
source_checksum_url = "file:${var.source_url}/SHA512SUMS"
source_iso = "${var.source_url}/debian-${var.version}-generic-${var.arch}.qcow2"
source_checksum = "${var.iso_cd_checksum}"
ssh_user = "root"
ssh_password = "PbkRc1vup7Wq5n4r"
disk_size = 8000
memory = 512
instance_data = {
"instance-id": "${var.name}"
}
installOpts = {
hostname = var.name
user = "eole"
disk_device = "/dev/vda"
}
installOptsVMWare = {
hostname = var.name
user = "eole"
disk_device = "/dev/sda"
}
}

View File

@ -0,0 +1,62 @@
#Flavour base
build {
name = "base"
description = <<EOF
This builder builds a QEMU image from a Debian cloud image.
EOF
source "vmware-iso.debian" {
output_directory = "${var.output_dir}/${var.version}/base"
vm_name = "${local.output_name}-${var.version}.img"
disk_size = 10240
iso_url = "${local.source_iso}"
iso_checksum = "${var.iso_cd_checksum}"
http_content = {
"/ssh-packer-pub.key" = data.sshkey.install.public_key
"/install.conf" = templatefile("${local.locations.templates}/conf/install/awnsers.pktpl.hcl", local.installOptsVMWare)
}
boot_command = var.boot_command
cd_label = "cidata"
}
source "qemu.debian" {
output_directory = "${var.output_dir}/${var.version}/base"
vm_name = "${local.output_name}-${var.version}.img"
iso_url = "${local.source_iso}"
iso_checksum = "${var.iso_cd_checksum}"
disk_image = true
disk_size = 10240
cd_content = {
"meta-data" = jsonencode(local.instance_data)
"user-data" = templatefile("${path.cwd}/recipes/debian/templates/conf/cloud-init/user-data",
{ user = local.ssh_user,
password = local.ssh_password,
runcmd = var.cloud_init_runcmd })
}
#http_content = {
# "/ssh-packer-pub.key" = data.sshkey.install.public_key
# "/install.conf" = templatefile("${local.locations.templates}/conf/install/awnsers.pktpl.hcl", local.installOpts)
#}
cd_label = "cidata"
boot_command = var.boot_command
}
provisioner "shell" {
script = "${local.locations.provisionning}/${var.name}/${var.name}-${var.short_version}-install.sh"
}
provisioner "shell" {
script = "${local.locations.provisionning}/letsencrypt.sh"
}
post-processor "shell-local" {
inline = [
"/bin/sh ${path.cwd}/post-processors/sparsify.sh ${var.output_dir}/${var.version}/base ${var.image_version}",
]
}
post-processor "manifest" {
keep_input_artifact = true
}
}

View File

@ -0,0 +1,20 @@
packer {
required_plugins {
sshkey = {
version = ">= 1.0.1"
source = "github.com/ivoronin/sshkey"
}
vmware = {
version = ">= 1.0.8"
source = "github.com/hashicorp/vmware"
}
qemu = {
source = "github.com/hashicorp/qemu"
version = "~> 1"
}
}
}
data "sshkey" "install" {
type = "ed25519"
}

View File

@ -0,0 +1,31 @@
#!/bin/sh
if [ "${#}" -ne 2 ]; then
echo Missing arguments
exit 2
fi
WORKDIR=${1}
VERSION=${2}
findImages() {
find ${1} -iname "*.img"
}
sleep 5
for imageName in $(findImages ${WORKDIR} ${DOMAIN}); do
if [ $(which virt-sparsify) ]; then
newName=$(echo $imageName | sed "s/.img/_${VERSION}.img/g")
virt-sparsify --compress --tmp ./ --format qcow2 ${imageName} ${newName}
if [ "${?}" -eq 0 ]; then
rm -rf ${imageName}
cd ${WORKDIR}
ln -s $(basename ${newName}) $(basename ${imageName})
echo ${newName} ${imageName}
cd -
fi
else
echo "Sparsify skipped 'virt-sparsify' command is missing"
fi
done

View File

@ -0,0 +1,104 @@
#!/usr/bin/env bash
#
# Generate all the configuration files
# Get all the values from the VLS_DIR
# Process each template from the TPL_DIR with this values
#
ENV_FILE=${ENV_FILE:-/var/run/one-context/one_env}
TPL_DIR="/usr/share/builder/templates"
VLS_DIR="/usr/share/builder/values"
CONFIG=""
if [ -f "${ENV_FILE}" ]; then
. ${ENV_FILE}
fi
BTR="$(command -v btr)"
if [ "${?}" -ne 0 ]; then
echo "Warning: Nothing to do the templater is not installed"
exit 0
fi
if [ ! -e "${TPL_DIR}" ]; then
echo "Error: The template dir is missing (${TPL_DIR})"
exit 1
fi
if [ ! -e "${VLS_DIR}" ]; then
echo "Error: The template dir is missing (${VLS_DIR})"
exit 1
fi
jsonQuery() {
local data="${1}"
local query="${2}"
echo "${data}" | jq -cr "${query}"
}
# NAME: @jsonMerge
# AIM: Merge two json structures
# NOTES:
# The last one has de last word
# if you have the same key in A and B
# this keeps the value of the B structure.
# PARAMS:
# $1: original JSON Structure
# $2: updated JSON Structure
jsonMerge() {
local data="${1}"
local data2="${2}"
echo "${data} ${data2}" | jq -cr -s ".[0] * .[1]"
}
jsonUpdateVal() {
local json="${1}"
local key="${2}"
local value="${3}"
echo "${json}" | jq --arg a "${value}" "${key} = \$a"
}
getValues() {
local values=""
for file in $(find ${VLS_DIR} -name "*.json"); do
values="${values}$(cat ${file})"
done
if [ -n "${RAW_CONFIG}" ]; then
values="$(jsonMerge ${values} ${RAW_CONFIG})"
fi
for svc in $(echo ${values} | jq -cr '.Services|keys[]'); do
for key in $(echo ${values} | jq -cr ".Services.${svc}.Vars|keys[]"); do
ukey=${key^^}
vkeys="$(echo ${values} | jq -cr \".Services.${svc}.Vars.${key}\|keys[]\")"
if [ ${?} -eq 0 ]; then
for var in $(echo ${values} | jq -cr ".Services.${svc}.Vars.${key}|keys[]"); do
uvar=${var^^}
val=$(eval echo "\$${ukey}_${uvar}")
if [ -n "${val}" ]; then
values=$(jsonUpdateVal "${values}" ".Services.${svc}.Vars.${key}.${var}" "${val}")
fi
done
else
values=$(jsonUpdateVal "${values}" ".Services.${svc}.Vars.${key}" "${!ukey}")
fi
done
done
echo ${values}
}
processTemplates() {
${BTR} -t ${TPL_DIR} -c "${1}"
}
VALUES=$(getValues)
file=$(mktemp)
echo "${VALUES}" > "${file}"
processTemplates "${file}"
rm -rf "${file}"

View File

@ -0,0 +1,64 @@
#!/sbin/openrc-run
: ${SUBCFGDIR:=/srv}
DOCKER_COMPOSE_UP_ARGS=${DOCKER_COMPOSE_UP_ARGS-"--no-build --no-recreate --no-deps"}
SUBSVC="${SVCNAME#*.}"
[ -z "${SUBSVC}" ] && exit 1
: ${SUBCFG:="${SUBCFGDIR}/${SUBSVC}/docker-compose.yml"}
DOCOCMD="/usr/bin/docker-compose"
export COMPOSE_HTTP_TIMEOUT=300
description="Manage docker services defined in ${SUBCFG}"
extra_commands="configtest build"
description_configtest="Check configuration via \"docker-compose -f ${SUBCFG} config\""
description_build="Run \"docker-compose -f ${SUBCFG} build\""
depend() {
need localmount net docker
use dns
after docker
}
configtest() {
if ! [ -f "${SUBCFG}" ]; then
eerror "The config file ${SUBCFG} does not exist!"
return 1
fi
if "${DOCOCMD}" -f "${SUBCFG}" config >&/dev/null; then
einfo "config: ok"
else
eerror "config: error"
return 1
fi
}
build() {
configtest || return 1
ebegin "Building dockerservice ${SUBSVC}"
"${DOCOCMD}" -f "${SUBCFG}" build
eend $?
}
start() {
configtest || return 1
ebegin "Starting dockerservice ${SUBSVC}"
sleep 5
"${DOCOCMD}" -f "${SUBCFG}" up -d ${DOCKER_COMPOSE_UP_ARGS}
eend $?
}
stop() {
ebegin "Stopping dockerservice ${SUBSVC}"
"${DOCOCMD}" -f "${SUBCFG}" stop --timeout=300
eend $?
}
status() {
if [ "$("${DOCOCMD}" -f "${SUBCFG}" top | wc -l)" -gt "0" ]; then
einfo "status: started"
else
einfo "status: stopped"
return 3
fi
}

Some files were not shown because too many files have changed in this diff Show More