From d13e17ee102ced96d4167933d3c844a479f231bc Mon Sep 17 00:00:00 2001 From: Philippe Caseiro Date: Tue, 6 Dec 2022 15:44:25 +0100 Subject: [PATCH] First recipes --- README.md | 80 ++- build | 150 +++++ post-processors/sparsify.sh | 31 + recipes/alpine/3.16.pkrvars.hcl | 6 + recipes/alpine/3.17.pkrvars.hcl | 6 + recipes/alpine/base-onecontext.pkr.hcl | 39 ++ recipes/alpine/docker.pkr.hcl | 72 ++ recipes/alpine/k3s.pkr.hcl | 76 +++ recipes/alpine/kubernetes.pkr.hcl | 112 ++++ recipes/alpine/locals.builder.pkr.hcl | 6 + recipes/alpine/locals.docker.pkr.hcl | 34 + recipes/alpine/locals.globals.pkr.hcl | 7 + recipes/alpine/locals.k3s.pkr.hcl | 79 +++ recipes/alpine/locals.kubernetes.pkr.hcl | 90 +++ recipes/alpine/locals.matchbox.pkr.hcl | 120 ++++ recipes/alpine/locals.pkr.hcl | 24 + recipes/alpine/main.pkr.hcl | 66 ++ recipes/alpine/matchbox.pkr.hcl | 101 +++ recipes/alpine/plugins.pkr.hcl | 12 + recipes/alpine/post-processor/sparsify.sh | 31 + .../provisionning/alpine-3.16-install.sh | 15 + .../provisionning/alpine-3.16-postinstall.sh | 21 + .../provisionning/alpine-3.17-install.sh | 1 + .../provisionning/alpine-3.17-postinstall.sh | 1 + .../conf/kubernetes/initkubernetes.start | 181 +++++ .../conf/kubernetes/sharemetrics.start | 3 + .../conf/matchbox/initmatchbox.start | 25 + .../conf/matchbox/inittftp.start | 10 + .../conf/one-context/net-90-jenkins-slave | 13 + .../conf/one-context/net-96-gitlab-register | 31 + .../conf/one-context/net-96-templater | 80 +++ .../conf/one-context/net-97-k3s | 21 + recipes/alpine/provisionning/k3s.sh | 5 + recipes/alpine/provisionning/kubernetes.sh | 18 + recipes/alpine/provisionning/letsencrypt.sh | 26 + recipes/alpine/provisionning/matchbox.sh | 38 ++ recipes/alpine/provisionning/one-context.sh | 12 + .../one-context/net-96-templater | 102 +++ .../provisionning/one-context/net-97-k3s | 21 + .../alpine/provisionning/templater-install.sh | 23 + recipes/alpine/sources.pkr.hcl | 33 + recipes/alpine/templates/conf/conf.d/chronyd | 6 + .../templates/conf/install/awnsers.pktpl.hcl | 47 ++ .../templates/conf/k3s/k3s.conf.pkr.hcl | 8 + .../alpine/templates/conf/kubernetes/.flag | 0 .../matchbox/conf.d/matchbox.conf.pktpl.hcl | 1 + .../dnsmasq.d/dnsmasq-hosts.conf.pktpl.hcl | 4 + .../matchbox/dnsmasq.d/ipxe.conf.pktpl.hcl | 60 ++ .../conf/matchbox/init.d/matchbox.pktpl.hcl | 28 + recipes/alpine/templates/one/image/common.tpl | 7 + .../one/service/kubernetes-cluster.json | 48 ++ recipes/alpine/templates/one/vm/common.xml | 33 + recipes/alpine/templates/one/vm/k3s.xml | 32 + .../alpine/templates/one/vm/kubeleader.xml | 35 + .../alpine/templates/one/vm/kubemaster.xml | 42 ++ .../alpine/templates/one/vm/kubeworker.xml | 42 ++ recipes/alpine/templates/one/vm/matchbox.xml | 47 ++ recipes/alpine/variables.pkr.hcl | 54 ++ tools/one-templates | 628 ++++++++++++++++++ 59 files changed, 2943 insertions(+), 1 deletion(-) create mode 100755 build create mode 100755 post-processors/sparsify.sh create mode 100644 recipes/alpine/3.16.pkrvars.hcl create mode 100644 recipes/alpine/3.17.pkrvars.hcl create mode 100644 recipes/alpine/base-onecontext.pkr.hcl create mode 100644 recipes/alpine/docker.pkr.hcl create mode 100644 recipes/alpine/k3s.pkr.hcl create mode 100644 recipes/alpine/kubernetes.pkr.hcl create mode 100644 recipes/alpine/locals.builder.pkr.hcl create mode 100644 recipes/alpine/locals.docker.pkr.hcl create mode 100644 recipes/alpine/locals.globals.pkr.hcl create mode 100644 recipes/alpine/locals.k3s.pkr.hcl create mode 100644 recipes/alpine/locals.kubernetes.pkr.hcl create mode 100644 recipes/alpine/locals.matchbox.pkr.hcl create mode 100644 recipes/alpine/locals.pkr.hcl create mode 100644 recipes/alpine/main.pkr.hcl create mode 100644 recipes/alpine/matchbox.pkr.hcl create mode 100644 recipes/alpine/plugins.pkr.hcl create mode 100755 recipes/alpine/post-processor/sparsify.sh create mode 100644 recipes/alpine/provisionning/alpine-3.16-install.sh create mode 100644 recipes/alpine/provisionning/alpine-3.16-postinstall.sh create mode 120000 recipes/alpine/provisionning/alpine-3.17-install.sh create mode 120000 recipes/alpine/provisionning/alpine-3.17-postinstall.sh create mode 100644 recipes/alpine/provisionning/conf/kubernetes/initkubernetes.start create mode 100644 recipes/alpine/provisionning/conf/kubernetes/sharemetrics.start create mode 100644 recipes/alpine/provisionning/conf/matchbox/initmatchbox.start create mode 100644 recipes/alpine/provisionning/conf/matchbox/inittftp.start create mode 100644 recipes/alpine/provisionning/conf/one-context/net-90-jenkins-slave create mode 100644 recipes/alpine/provisionning/conf/one-context/net-96-gitlab-register create mode 100644 recipes/alpine/provisionning/conf/one-context/net-96-templater create mode 100644 recipes/alpine/provisionning/conf/one-context/net-97-k3s create mode 100644 recipes/alpine/provisionning/k3s.sh create mode 100644 recipes/alpine/provisionning/kubernetes.sh create mode 100644 recipes/alpine/provisionning/letsencrypt.sh create mode 100644 recipes/alpine/provisionning/matchbox.sh create mode 100644 recipes/alpine/provisionning/one-context.sh create mode 100644 recipes/alpine/provisionning/one-context/net-96-templater create mode 100644 recipes/alpine/provisionning/one-context/net-97-k3s create mode 100644 recipes/alpine/provisionning/templater-install.sh create mode 100644 recipes/alpine/sources.pkr.hcl create mode 100644 recipes/alpine/templates/conf/conf.d/chronyd create mode 100644 recipes/alpine/templates/conf/install/awnsers.pktpl.hcl create mode 100644 recipes/alpine/templates/conf/k3s/k3s.conf.pkr.hcl create mode 100644 recipes/alpine/templates/conf/kubernetes/.flag create mode 100644 recipes/alpine/templates/conf/matchbox/conf.d/matchbox.conf.pktpl.hcl create mode 100644 recipes/alpine/templates/conf/matchbox/dnsmasq.d/dnsmasq-hosts.conf.pktpl.hcl create mode 100644 recipes/alpine/templates/conf/matchbox/dnsmasq.d/ipxe.conf.pktpl.hcl create mode 100644 recipes/alpine/templates/conf/matchbox/init.d/matchbox.pktpl.hcl create mode 100644 recipes/alpine/templates/one/image/common.tpl create mode 100644 recipes/alpine/templates/one/service/kubernetes-cluster.json create mode 100644 recipes/alpine/templates/one/vm/common.xml create mode 100644 recipes/alpine/templates/one/vm/k3s.xml create mode 100644 recipes/alpine/templates/one/vm/kubeleader.xml create mode 100644 recipes/alpine/templates/one/vm/kubemaster.xml create mode 100644 recipes/alpine/templates/one/vm/kubeworker.xml create mode 100644 recipes/alpine/templates/one/vm/matchbox.xml create mode 100644 recipes/alpine/variables.pkr.hcl create mode 100755 tools/one-templates diff --git a/README.md b/README.md index a915a12..1aaf3ff 100644 --- a/README.md +++ b/README.md @@ -1,3 +1,81 @@ # vms -Virtual machine image builder (based on EOLE3 builder) \ No newline at end of file +Virtual machine image builder (based on EOLE3 builder) + +## What do we have here ? + +### The "build" tool + +This is a simple wrapper to help you build and publish images with "packer" and "one-templates" + +### The "publisher" tool called "tools/one-templates" + +This is a simple script to create and manage OpenNebula "Templates", all kinds of templates: +* VMs +* Networks +* Images +* Services + +With this tool you can publish all the images and the necessary templates to actually use what you build in OpenNebula. + +## What you need ? + +* Packer >= 1.6.0 +* virt-sparsify (optional) +* Ruby +* These ruby gems: + * opennebula + * opennebula-cli + * opennebula-oca + * webrick +* An OpenNebula server or cluster >= 5.6 (with OpenNebula Flow and OneGate enabled) +* An account with the proper ACL on the OpenNebula server/cluster to: + * create/manage images + * create/manage vm templates + * create/manage services templates + * create/manage network templates + +## How to use the "build" tool ? + +First you need to create the "publisher" "tools/one-templates" configuration. +This configuration is located in "./tools/.one-templates.conf". +Note that you can use a different configuration file setting the TEMPLATER_CONFIG environment file before starting the build script. + +``` +$ cat ./tools/one-templates.conf +user: myOpenNebulaUser +token: myVerySecretOpenNebulaTemporaryToken +builder_addr: IP Address of the building machine (often your own local IP address) +endpoint: http://myOpenNebulaServerAddress... +flow_endpoint: http://myOpenNEbulaServerAddress/oneflow +datastore_id: TheDataStoreIDForMyImages +``` + +When this configuration is done you can use the "build" tool. + +The recipes are organised by OS, version and flavor, one OS contains various versions and flavors. +You can list the available OS and versions with this command: + +``` +$ ./build list +You can build : + * alpine : + - 3.16 +``` +To build all the flavors of a version you can run this command: + +``` +$ ./build start alpine 3.16 +``` + +To build only one flavor for one version you can run this command: + +``` +$ ./build run alpine 3.16 k3s +``` + +`!!! Make sure the "base" flavor is build before trying to build another flavor. !!!` + +``` +$ ./build run alpine 3.16 base +``` diff --git a/build b/build new file mode 100755 index 0000000..7ff618c --- /dev/null +++ b/build @@ -0,0 +1,150 @@ +#!/bin/bash + +# Simple build wrapper + +ACTION=${1} +OS=${2} +VERSION=${3} + +RCP_DIR="./recipes" +PACKER=${PACKER:-packer} + +# +# Init packer +# install plugins +# +initPacker() { + os=${1} + ${PACKER} init ${RCP_DIR}/${os} +} + +# +# Run the build +# First the "base" image then the provisionned ones +# +run() { + ${PACKER} build ${PACKER_OPTS} -var-file="${RCP_DIR}/${OS}/${VERSION}.pkrvars.hcl" -only="base.qemu.${OS}" "${RCP_DIR}/${OS}/." + ${PACKER} build ${PACKER_OPTS} -force -var-file="${RCP_DIR}/${OS}/${VERSION}.pkrvars.hcl" -except="base.qemu.${OS}" "${RCP_DIR}/${OS}/." +} + +# +# Run a specific build +# +run_build() { + target=${4} + ${PACKER} build ${PACKER_OPTS} -force \ + -var-file="${RCP_DIR}/${OS}/${VERSION}.pkrvars.hcl" \ + -only="${target}.qemu.${OS}" \ + "${RCP_DIR}/${OS}/." +} + +# +# Run many builds for one OS +# +run_many() { + targets="${@:4}" + only="" + for target in ${targets};do + only="${only}-only=${target}.qemu.${OS} " + done + + ${PACKER} build ${PACKER_OPTS} -force \ + -var-file="${RCP_DIR}/${OS}/${VERSION}.pkrvars.hcl" \ + ${only} \ + "${RCP_DIR}/${OS}/." +} + +# +# List what you can build +# +list() { + echo "You can build : " + for os in "${RCP_DIR}"/*; do + echo " * $(basename "${os}") :" + cd "${os}" || exit 100 + for vfile in *.pkrvars.hcl; do + echo " - ${vfile}" | sed 's/\.pkrvars\.hcl$//' + done + cd - >/dev/null 2>&1 || exit 100 + done + exit 0 +} + +# +# Run all builds +# +run_all() { + versions="" + for os in "${RCP_DIR}"/*; do + cd "${os}" || exit 100 + for vfile in *.pkrvars.hcl; do + versions="${versions} $(echo "${vfile}" | sed 's/\.auto\.pkrvars\.hcl$//')" + done + OS=$(basename ${os}) + cd - >/dev/null 2>&1 || exit 100 + for ver in ${versions}; do + VERSION=${ver} + run + done + versions="" + done + set +x +} + +# +# Start only ONE build +# +start_build() { + if [ -z "${OS}" ]; then + echo "OS Name is missing !" + echo " Supported OS are :" + printf " " + ls ${RCP_DIR} + exit 1 + fi + + if [ -z "${VERSION}" ]; then + echo "OS Version is missing !" + echo " ex: ./build debian 10" + exit 2 + fi + run +} + +case "${ACTION}" in + + "list") + list + ;; + + "all") + initPacker "${2}" || exit 1 + run_all + exit ${?} + ;; + + "start") + initPacker "${2}" || exit 1 + start_build + ;; + + "run") + initPacker "${2}" || exit 1 + run_build $@ + ;; + + "mrun") + initPacker "${2}" || exit 1 + run_many $@ + ;; + + *) + echo "You need to provide a valid action!" + echo " Supported actions are:" + echo " - start " + echo " - list " + echo " - all" + echo " - run" + exit 1 + ;; +esac diff --git a/post-processors/sparsify.sh b/post-processors/sparsify.sh new file mode 100755 index 0000000..316265a --- /dev/null +++ b/post-processors/sparsify.sh @@ -0,0 +1,31 @@ +#!/bin/sh + +if [ "${#}" -ne 2 ]; then + echo Missing arguments + exit 2 +fi + +WORKDIR=${1} +VERSION=${2} + +findImages() { + find ${1} -iname "*.img" +} + +sleep 5 + +for imageName in $(findImages ${WORKDIR} ${DOMAIN}); do + if [ $(which virt-sparsify) ]; then + newName=$(echo $imageName | sed "s/.img/_${VERSION}.img/g") + virt-sparsify --compress --tmp ./ --format qcow2 ${imageName} ${newName} + if [ "${?}" -eq 0 ]; then + rm -rf ${imageName} + cd ${WORKDIR} + ln -s $(basename ${newName}) $(basename ${imageName}) + echo ${newName} ${imageName} + cd - + fi + else + echo "Sparsify skipped 'virt-sparsify' command is missing" + fi +done diff --git a/recipes/alpine/3.16.pkrvars.hcl b/recipes/alpine/3.16.pkrvars.hcl new file mode 100644 index 0000000..45e5c01 --- /dev/null +++ b/recipes/alpine/3.16.pkrvars.hcl @@ -0,0 +1,6 @@ +name = "alpine" +version = "3.16.2" +short_version = "3.16" +arch = "x86_64" +source_url = "https://dl-cdn.alpinelinux.org/alpine" +iso_cd_checksum = "6c7cb998ec2c8925d5a1239410a4d224b771203f916a18f8015f31169dd767a2" \ No newline at end of file diff --git a/recipes/alpine/3.17.pkrvars.hcl b/recipes/alpine/3.17.pkrvars.hcl new file mode 100644 index 0000000..20169c1 --- /dev/null +++ b/recipes/alpine/3.17.pkrvars.hcl @@ -0,0 +1,6 @@ +name = "alpine" +version = "3.17.0" +short_version = "3.17" +arch = "x86_64" +source_url = "https://dl-cdn.alpinelinux.org/alpine" +iso_cd_checksum = "8d4d53bd34b2045e1e219b87887b0de8d217b6cd4a8b476a077429845a5582ba" \ No newline at end of file diff --git a/recipes/alpine/base-onecontext.pkr.hcl b/recipes/alpine/base-onecontext.pkr.hcl new file mode 100644 index 0000000..27042e4 --- /dev/null +++ b/recipes/alpine/base-onecontext.pkr.hcl @@ -0,0 +1,39 @@ +#Flavour base-onecontext +build { + name = "base-onecontext" + description = <" ] + ssh_clear_authorized_keys = true + } + + provisioner "file" { + destination = "/tmp/one-context.sh" + source = "${local.dirs.provisionning}/one-context.sh" + } + + provisioner "shell" { + inline = [ + "sh -cx 'sh /tmp/one-context.sh'" + ] + } + + post-processor "shell-local" { + inline = [ + "/bin/sh ${local.dirs.post-processors}/sparsify.sh ${var.output_dir}/${var.version}/provisionned/one-context ${var.image_version}", + "ruby ${local.dirs.tools}/one-templates -t image -m 640 -T ${local.dirs.templates}/one/image/common.tpl -n ${local.output_name}-${var.version}-${build.name} -c '${local.output_name}-${var.version} base image' --image-file ${var.output_dir}/${var.version}/provisionned/one-context/${local.output_name}-${var.version}-one-context.img", + "ruby ${local.dirs.tools}/one-templates -t vm -m 640 -T ${local.dirs.templates}/one/vm/common.xml -n ${local.output_name}-${var.version}-${build.name} --image-name ${local.output_name}-${var.version}-${build.name}" + ] + } +} diff --git a/recipes/alpine/docker.pkr.hcl b/recipes/alpine/docker.pkr.hcl new file mode 100644 index 0000000..b6890aa --- /dev/null +++ b/recipes/alpine/docker.pkr.hcl @@ -0,0 +1,72 @@ +#Flavour docker +build { + name = "docker" + description = <" ] + ssh_clear_authorized_keys = true + } + + // Install templater and bootstraper + provisioner "shell" { + script = "${local.dirs.provisionning}/templater-install.sh" + } + + // Copy configuration values on the image + provisioner "shell" { + inline = [ + "sh -cx 'mkdir -p ${local.builder_config.TemplateDir}'", + "sh -cx 'mkdir -p ${local.builder_config.ValueDir}'" + ] + } + + // Copy configuration templates to the image + provisioner "file" { + destination = "${local.builder_config.TemplateDir}/" + source = "${local.dirs.templates}/conf/${build.name}/" + } + + // Copy configuration values on the image + provisioner "file" { + destination = "${local.builder_config.ValueDir}/${build.name}.json" + content = "${jsonencode(local.Docker)}" + } + + // Generate default configuration for docker + provisioner "shell" { + inline = [ "sh -cx '/usr/local/bin/btr -c ${local.builder_config.ValueDir}/ -t ${local.builder_config.TemplateDir}'" ] + } + + // Install OpenNebula context tool + provisioner "shell" { + script = "${local.dirs.provisionning}/one-context.sh" + } + + // Deploy the opennebula context script to manage configuration + provisioner "file" { + destination = "/etc/one-context.d/net-96-templater" + source = "${local.dirs.provisionning}/conf/one-context/net-96-templater" + } + + provisioner "shell" { + inline = [ "sh -cx 'chmod +x /etc/one-context.d/net-96-templater'" ] + } + post-processor "shell-local" { + inline = [ + "/bin/sh ${local.dirs.post-processors}/sparsify.sh ${var.output_dir}/${var.version}/provisionned/${local.Docker.Name} ${var.image_version}", + "ruby ${local.dirs.tools}/one-templates -t image -m 640 -T ${local.dirs.templates}/one/image/common.tpl -n ${local.output_name}-${var.version}-${local.Docker.Name} -c '${local.Docker.Name} base image' --image-file ${var.output_dir}/${var.version}/provisionned/${local.Docker.Name}/${local.output_name}-${var.version}-${local.Docker.Name}.img", + "ruby ${local.dirs.tools}/one-templates -t vm -m 640 -T ${local.dirs.templates}/one/vm/common.xml -n ${local.output_name}-${var.version}-${local.Docker.Name} --image-name ${local.output_name}-${var.version}-${local.Docker.Name}", + ] + } + +} diff --git a/recipes/alpine/k3s.pkr.hcl b/recipes/alpine/k3s.pkr.hcl new file mode 100644 index 0000000..7d82c44 --- /dev/null +++ b/recipes/alpine/k3s.pkr.hcl @@ -0,0 +1,76 @@ +#Flavour k3s +build { + name = "k3s" + description = <" ] + ssh_clear_authorized_keys = true + } + + // Install templater and bootstraper + provisioner "shell" { + script = "${local.dirs.provisionning}/templater-install.sh" + } + + // Copy configuration values on the image + provisioner "shell" { + inline = [ + "sh -cx 'mkdir -p ${local.builder_config.TemplateDir}'", + "sh -cx 'mkdir -p ${local.builder_config.ValueDir}'" + ] + } + + // Copy configuration templates to the image + provisioner "file" { + destination = "${local.builder_config.TemplateDir}/" + source = "${local.dirs.templates}/conf/${build.name}/" + } + + // Copy configuration values on the image + provisioner "file" { + destination = "${local.builder_config.ValueDir}/${build.name}.json" + content = "${jsonencode(local.K3S)}" + } + + // Generate default configuration for kubernetes + provisioner "shell" { + max_retries = 3 + inline = [ "sh -cx '/usr/local/bin/btr -c ${local.builder_config.ValueDir}/ -t ${local.builder_config.TemplateDir}'" ] + } + + provisioner "file" { + destination = "/tmp/${build.name}.sh" + source = "${local.dirs.provisionning}/${build.name}.sh" + } + + provisioner "file" { + destination = "/tmp/one-context.sh" + source = "${local.dirs.provisionning}/one-context.sh" + } + + provisioner "shell" { + inline = [ + "sh -cx 'sh /tmp/one-context.sh'", + "sh -cx 'sh /tmp/${build.name}.sh'" + ] + } + + post-processor "shell-local" { + inline = [ + "/bin/sh ${path.cwd}/post-processors/sparsify.sh ${var.output_dir}/${var.version}/provisionned/k3s ${var.image_version}", + "ruby ${local.dirs.tools}/one-templates -t image -m 640 -T ${local.dirs.templates}/one/image/common.tpl -n ${local.output_name}-${var.version}-k3s -c 'k3s base image' --image-file ${var.output_dir}/${var.version}/provisionned/k3s/${local.output_name}-${var.version}-k3s.img", + "ruby ${local.dirs.tools}/one-templates -t vm -m 640 -T ${local.dirs.templates}/one/vm/k3s.xml -n ${local.output_name}-${var.version}-k3s --image-name ${local.output_name}-${var.version}-k3s" + ] + } + +} diff --git a/recipes/alpine/kubernetes.pkr.hcl b/recipes/alpine/kubernetes.pkr.hcl new file mode 100644 index 0000000..e9ba131 --- /dev/null +++ b/recipes/alpine/kubernetes.pkr.hcl @@ -0,0 +1,112 @@ +#Flavour kubernetes +build { + name = "kubernetes" + description = <" ] + ssh_clear_authorized_keys = true + } + + // Install templater and bootstraper + provisioner "shell" { + script = "${local.locations.provisionning}/templater-install.sh" + } + + // Copy configuration values on the image + provisioner "shell" { + inline = [ + "sh -cx 'mkdir -p ${local.builder_config.TemplateDir}'", + "sh -cx 'mkdir -p ${local.builder_config.ValueDir}'" + ] + } + + // Copy configuration templates to the image + provisioner "file" { + destination = "${local.builder_config.TemplateDir}/" + source = "${local.locations.templates}/conf/${build.name}/" + } + + // Copy configuration values on the image + provisioner "file" { + destination = "${local.builder_config.ValueDir}/${build.name}.json" + content = "${jsonencode(local.Kubernetes)}" + } + + // Copy Sharemetrics script + provisioner "file" { + destination = "/etc/local.d/sharemetrics.start" + source = "${local.locations.provisionning}/conf/${build.name}/sharemetrics.start" + } + + provisioner "file" { + destination = "/etc/local.d/initkubernetes.start" + source = "${local.locations.provisionning}/conf/${build.name}/initkubernetes.start" + } + + provisioner "shell" { + inline = [ + "chmod +x /etc/local.d/sharemetrics.start", + "chmod +x /etc/local.d/initkubernetes.start" + ] + } + + // Generate default configuration for kubernetes + provisioner "shell" { + max_retries = 3 + inline = [ "sh -cx '/usr/local/bin/btr -c ${local.builder_config.ValueDir}/ -t ${local.builder_config.TemplateDir}'" ] + } + + // Complete kubernetes install + provisioner "shell" { + expect_disconnect = true + max_retries = 6 + script = "${local.locations.provisionning}/${build.name}.sh" + } + + // Install OpenNebula context tool + provisioner "shell" { + script = "${local.locations.provisionning}/one-context.sh" + } + + // Deploy the opennebula context script to manage configuration + provisioner "file" { + destination = "/etc/one-context.d/net-96-templater" + source = "${local.locations.provisionning}/conf/one-context/net-96-templater" + } + + provisioner "shell" { + inline = [ + "chmod +x /etc/one-context.d/net-96-templater" + ] + } + + provisioner "shell" { + inline = [ + "service docker start", + "service containerd start", + "sleep 5", + "kubeadm config images pull" ] + } + + post-processor "shell-local" { + inline = [ + "/bin/sh ${path.cwd}/post-processors/sparsify.sh ${var.output_dir}/${var.version}/provisionned/${local.Kubernetes.Name} ${var.image_version}", + "ruby ${local.locations.tools}/one-templates -t image -m 640 -T ${local.locations.templates}/one/image/common.tpl -n ${local.output_name}-${var.version}-${local.Kubernetes.Name} -c '${local.Kubernetes.Name} base image' --image-file ${var.output_dir}/${var.version}/provisionned/${local.Kubernetes.Name}/${local.output_name}-${var.version}-${local.Kubernetes.Name}.img", + "ruby ${local.locations.tools}/one-templates -t vm -m 640 -T ${local.locations.templates}/one/vm/kubeleader.xml -n ${local.output_name}-${var.version}-${local.Kubernetes.Name}Leader --image-name ${local.output_name}-${var.version}-${local.Kubernetes.Name}", + "ruby ${local.locations.tools}/one-templates -t vm -m 640 -T ${local.locations.templates}/one/vm/kubemaster.xml -n ${local.output_name}-${var.version}-${local.Kubernetes.Name}Master --image-name ${local.output_name}-${var.version}-${local.Kubernetes.Name}", + "ruby ${local.locations.tools}/one-templates -t vm -m 640 -T ${local.locations.templates}/one/vm/kubeworker.xml -n ${local.output_name}-${var.version}-${local.Kubernetes.Name}Worker --image-name ${local.output_name}-${var.version}-${local.Kubernetes.Name}", + "ruby ${local.locations.tools}/one-templates -t service -m 640 -T ${local.locations.templates}/one/service/${build.name}-cluster.json -n ${build.name}-cluster-${local.output_name}-${var.version} --vm-name ${local.output_name}-${var.version}-${local.Kubernetes.Name}", + ] + } + +} diff --git a/recipes/alpine/locals.builder.pkr.hcl b/recipes/alpine/locals.builder.pkr.hcl new file mode 100644 index 0000000..9614d0e --- /dev/null +++ b/recipes/alpine/locals.builder.pkr.hcl @@ -0,0 +1,6 @@ + locals { + builder_config = { + TemplateDir = "/usr/share/builder/templates" + ValueDir = "/usr/share/builder/values" + } + } \ No newline at end of file diff --git a/recipes/alpine/locals.docker.pkr.hcl b/recipes/alpine/locals.docker.pkr.hcl new file mode 100644 index 0000000..4dacc91 --- /dev/null +++ b/recipes/alpine/locals.docker.pkr.hcl @@ -0,0 +1,34 @@ +locals { + // Definition of the Kubernetes service (templater compatible) + ServiceDocker = { + ConfigFiles = [] + Repositories = {} + Packages = { + docker = { + name = "docker" + action = "install" + } + docker-compose = { + name = "docker-compose" + action = "install" + } + } + Daemons = { + docker = { + name = "docker" + type = "auto" + enabled = true + } + } + Vars = {} + Users = {} + } + + Docker = { + Name = "docker" + Globals = local.Globals + Services = { + Docker = local.ServiceDocker + } + } +} \ No newline at end of file diff --git a/recipes/alpine/locals.globals.pkr.hcl b/recipes/alpine/locals.globals.pkr.hcl new file mode 100644 index 0000000..f940e19 --- /dev/null +++ b/recipes/alpine/locals.globals.pkr.hcl @@ -0,0 +1,7 @@ +locals { + Globals = { + Vars = { + PrometheusPort = "9090" + } + } +} \ No newline at end of file diff --git a/recipes/alpine/locals.k3s.pkr.hcl b/recipes/alpine/locals.k3s.pkr.hcl new file mode 100644 index 0000000..94c1746 --- /dev/null +++ b/recipes/alpine/locals.k3s.pkr.hcl @@ -0,0 +1,79 @@ +locals { + // Definition of the Kubernetes service (templater compatible) + ServiceK3S = { + ConfigFiles = [ + { + destination = "/etc/conf.d/k3s" + source = "k3s.conf.pkr.hcl" + mode = "600" + owner = "root" + group = "root" + } + ] + Repositories = { + AlpineEdge = { + type = "apk" + name = "community" + url = "http://mirrors.ircam.fr/pub/alpine/edge/community" + enabled = true + } + AlpineEdgeTesting = { + type = "apk" + name = "testing" + url = "http://mirrors.ircam.fr/pub/alpine/edge/testing" + enabled = true + } + } + Packages = { + kubelet = { + name = "k3s" + action = "install" + } + kubeadm = { + name = "kubeadm" + action = "install" + } + kubectl = { + name = "kubectl" + action = "install" + } + uuidgen = { + name = "uuidgen" + action = "install" + } + } + Vars = { + ServerName = "kube" + ServerRole = "master" + DeployTraefik = false + } + Users = {} + Daemons = { + kubelet = { + name = "k3s" + type = "auto" + enabled = true + } + ntpd = { + name = "ntpd" + type = "auto" + enabled = true + } + local = { + name = "local" + type = "auto" + enabled = true + } + } + } + + // Definition of the Kubernetes full configuration (with all the services) + K3S = { + Name = "k3s" + Globals = local.Globals + Services = { + Docker = local.ServiceDocker + K3S = local.ServiceK3S + } + } +} diff --git a/recipes/alpine/locals.kubernetes.pkr.hcl b/recipes/alpine/locals.kubernetes.pkr.hcl new file mode 100644 index 0000000..74b97be --- /dev/null +++ b/recipes/alpine/locals.kubernetes.pkr.hcl @@ -0,0 +1,90 @@ +locals { + // Definition of the Kubernetes service (templater compatible) + ServiceKubernetes = { + ConfigFiles = [] + Repositories = { + AlpineEdge = { + type = "apk" + name = "community" + url = "http://mirrors.ircam.fr/pub/alpine/edge/community" + enabled = true + } + AlpineEdgeTesting = { + type = "apk" + name = "testing" + url = "http://mirrors.ircam.fr/pub/alpine/edge/testing" + enabled = true + } + } + Packages = { + cni-plugin-flannel = { + name = "cni-plugin-flannel" + action = "install" + } + cni-plugins = { + name = "cni-plugins" + action = "install" + } + flannel = { + name = "flannel" + action = "install" + } + flannel-contrib-cni = { + name = "flannel-contrib-cni" + action = "install" + } + cilium = { + name = "cilium-cli" + action = "install" + } + kubelet = { + name = "kubelet" + action = "install" + } + kubeadm = { + name = "kubeadm" + action = "install" + } + kubectl = { + name = "kubectl" + action = "install" + } + uuidgen = { + name = "uuidgen" + action = "install" + } + } + Vars = { + ServerName = "kube" + ServerRole = "master" + } + Users = {} + Daemons = { + kubelet = { + name = "kubelet" + type = "auto" + enabled = true + } + ntpd = { + name = "ntpd" + type = "auto" + enabled = true + } + local = { + name = "local" + type = "auto" + enabled = true + } + } + } + + // Definition of the Kubernetes full configuration (with all the services) + Kubernetes = { + Name = "kubernetes" + Globals = local.Globals + Services = { + Docker = local.ServiceDocker + Kubernetes = local.ServiceKubernetes + } + } +} diff --git a/recipes/alpine/locals.matchbox.pkr.hcl b/recipes/alpine/locals.matchbox.pkr.hcl new file mode 100644 index 0000000..dbb67dc --- /dev/null +++ b/recipes/alpine/locals.matchbox.pkr.hcl @@ -0,0 +1,120 @@ +locals { + // Definition of the Kubernetes service (templater compatible) + ServiceMatchBox = { + ConfigFiles = [ + { + destination = "/etc/dnsmasq.d/pxe.conf" + source = "dnsmasq.d/ipxe.conf.pktpl.hcl" + mode = "600" + owner = "root" + group = "root" + }, + { + destination = "/etc/dnsmasq-hosts.conf" + source = "dnsmasq.d/dnsmasq-hosts.conf.pktpl.hcl" + mode = "600" + owner = "dnsmasq" + group = "root" + }, + { + destination = "/etc/conf.d/matchbox" + source = "conf.d/matchbox.conf.pktpl.hcl" + mode = "600" + owner = "root" + group = "root" + }, + { + destination = "/etc/init.d/matchbox" + source = "init.d/matchbox.pktpl.hcl" + mode = "700" + owner = "root" + group = "root" + } + ] + Repositories = { + AlpineEdgeTesting = { + type = "apk" + name = "testing" + url = "http://mirrors.ircam.fr/pub/alpine/edge/testing" + enabled = true + } + } + Packages = { + dnsmasq = { + name = "dnsmasq" + action = "install" + } + terraform = { + name = "terraform" + action = "install" + } + git = { + name = "git" + action = "install" + } + kubectl = { + name = "kubectl" + action = "install" + } + gpg = { + name = "gpg" + action = "install" + } + } + Vars = { + PXE = { + DHCPMode = "proxy" + DNSDomain = "cadoles.com" + ListenInterface = "eth0" + GreetingMessage = "Cadoles PXE Boot Server" + DelayTime = "5" + BootingMessage = "Booting from network the Cadoles way" + DHCPRangeStart = "" + DHCPRangeEnd = "" + DHCPLeaseDuration = "1h" + TFTPRoot = "/var/lib/tftpboot" + } + MatchBox = { + Hostname = "mb.cadoles.com" + HTTPPort = "8080" + gRPCPort = "8081" + LogLevel = "info" + } + ETH0 = { + IP = "" + DNS = "" + GATEWAY = "" + } + Set = { + Hostname = "matchbox" + } + } + Users = {} + Daemons = { + matchbox = { + name = "matchbox" + type = "auto" + enabled = true + } + dnsmasq = { + name = "dnsmasq" + type = "auto" + enabled = true + } + local = { + name = "local" + type = "auto" + enabled = true + } + } + } + + // Definition of the Kubernetes full configuration (with all the services) + MatchBox = { + Name = "matchbox" + Globals = local.Globals + Services = { + MatchBox = local.ServiceMatchBox + } + } +} \ No newline at end of file diff --git a/recipes/alpine/locals.pkr.hcl b/recipes/alpine/locals.pkr.hcl new file mode 100644 index 0000000..8a562ad --- /dev/null +++ b/recipes/alpine/locals.pkr.hcl @@ -0,0 +1,24 @@ +# "timestamp" template function replacement +locals { + locations = { + recipes = "${path.cwd}/recipes/${var.name}" + templates = "${path.cwd}/recipes/${var.name}/templates" + provisionning = "${path.cwd}/recipes/${var.name}/provisionning" + post-processors = "${path.cwd}/recipes/${var.name}/post-processor" + tools = "${path.cwd}/tools" + } + dirs = local.locations + timestamp = regex_replace(timestamp(), "[- TZ:]", "") + output_name = "${var.name}" + source_checksum_url = "file:${var.source_url}/${var.version}/${var.arch}/iso-cd/SHA256SUMS" + source_iso = "${var.source_url}/v${var.short_version}/releases/${var.arch}/alpine-virt-${var.version}-${var.arch}.iso" + source_checksum = "${var.iso_cd_checksum}" + ssh_user = "root" + ssh_password = "PbkRc1vup7Wq5n4r" + disk_size = 8000 + memory = 512 + installOpts = { + hostname = var.name + user = "eole" + } +} diff --git a/recipes/alpine/main.pkr.hcl b/recipes/alpine/main.pkr.hcl new file mode 100644 index 0000000..b7ddac4 --- /dev/null +++ b/recipes/alpine/main.pkr.hcl @@ -0,0 +1,66 @@ +#Flavour base +build { + name = "base" + description = <root", + "", + "setup-interfaces", + "ifup eth0", + "mkdir -p .ssh", + "wget http://{{.HTTPIP}}:{{.HTTPPort}}/ssh-packer-pub.key -O .ssh/authorized_keys", + "chmod 600 .ssh/authorized_keys", + "wget http://{{.HTTPIP}}:{{.HTTPPort}}/install.conf", + "setup-sshd -c openssh -k .ssh/authorized_keys", + ] + } + + provisioner "shell" { + pause_before = "1s" + expect_disconnect = true # Because the previous step has rebooted the machine + script = "${local.locations.provisionning}/${var.name}-${var.short_version}-install.sh" + valid_exit_codes = [ 0, 141 ] + } + + provisioner "shell" { + pause_before = "1s" + inline = [ "sh -cx 'mkdir -p ${local.builder_config.TemplateDir}'" ] + } + + provisioner "shell" { + pause_before = "10s" + script = "${local.locations.provisionning}/${var.name}-${var.short_version}-postinstall.sh" + } + + provisioner "shell" { + script = "${local.locations.provisionning}/letsencrypt.sh" + } + + provisioner "file" { + destination = "/etc/conf.d/chronyd" + source = "${local.locations.templates}/conf/conf.d/" + } + + post-processor "manifest" { + keep_input_artifact = true + } + + post-processor "shell-local" { + inline = [ + "/bin/sh ${path.cwd}/post-processors/sparsify.sh ${var.output_dir}/${var.version}/base ${var.image_version}" + ] + } +} diff --git a/recipes/alpine/matchbox.pkr.hcl b/recipes/alpine/matchbox.pkr.hcl new file mode 100644 index 0000000..cece909 --- /dev/null +++ b/recipes/alpine/matchbox.pkr.hcl @@ -0,0 +1,101 @@ +#Flavour matchbox +build { + name = "matchbox" + description = <" ] + ssh_clear_authorized_keys = true + } + + // Install templater and bootstraper + provisioner "shell" { + script = "${local.dirs.provisionning}/templater-install.sh" + } + + // Copy configuration values on the image + provisioner "shell" { + inline = [ + "sh -cx 'mkdir -p ${local.builder_config.TemplateDir}'", + "sh -cx 'mkdir -p ${local.builder_config.ValueDir}'" + ] + } + + // Copy configuration templates to the image + provisioner "file" { + destination = "${local.builder_config.TemplateDir}/" + source = "${local.dirs.templates}/conf/${build.name}/" + } + + // Copy configuration values on the image + provisioner "file" { + destination = "${local.builder_config.ValueDir}/${build.name}.json" + content = "${jsonencode(local.MatchBox)}" + } + + // Copy matchbox boot provisionning script + provisioner "file" { + destination = "/etc/local.d/initmatchbox.start" + source = "${local.locations.provisionning}/conf/${build.name}/initmatchbox.start" + } + + // Copy tftp provisionning script + provisioner "file" { + destination = "/etc/local.d/inittftp.start" + source = "${local.locations.provisionning}/conf/${build.name}/inittftp.start" + } + + // Generate default configuration for kubernetes + provisioner "shell" { + max_retries = 3 + inline = [ "sh -cx '/usr/local/bin/btr -c ${local.builder_config.ValueDir}/ -t ${local.builder_config.TemplateDir}'" ] + } + + provisioner "file" { + destination = "/tmp/${build.name}.sh" + source = "${local.dirs.provisionning}/${build.name}.sh" + } + + provisioner "file" { + destination = "/tmp/one-context.sh" + source = "${local.dirs.provisionning}/one-context.sh" + } + + provisioner "shell" { + inline = [ + "sh -cx 'sh /tmp/one-context.sh'", + "sh -cx 'sh /tmp/${build.name}.sh'" + ] + } + + provisioner "file" { + destination = "/etc/one-context.d/net-96-templater" + source = "${local.dirs.provisionning}/one-context/net-96-templater" + } + + provisioner "shell" { + inline = [ + "chmod +x /etc/local.d/initmatchbox.start", + "chmod +x /etc/local.d/inittftp.start", + "chmod +x /etc/one-context.d/net-96-templater" + ] + } + + post-processor "shell-local" { + inline = [ + "/bin/sh ${path.cwd}/post-processors/sparsify.sh ${var.output_dir}/${var.version}/provisionned/matchbox ${var.image_version}", + "ruby ${local.dirs.tools}/one-templates -t image -m 640 -T ${local.dirs.templates}/one/image/common.tpl -n ${local.output_name}-${var.version}-matchbox -c 'Matchbox base image' --image-file ${var.output_dir}/${var.version}/provisionned/matchbox/${local.output_name}-${var.version}-matchbox.img", + "ruby ${local.dirs.tools}/one-templates -t vm -m 640 -T ${local.dirs.templates}/one/vm/matchbox.xml -n ${local.output_name}-${var.version}-matchbox --image-name ${local.output_name}-${var.version}-matchbox" + ] + } + +} diff --git a/recipes/alpine/plugins.pkr.hcl b/recipes/alpine/plugins.pkr.hcl new file mode 100644 index 0000000..676a4b4 --- /dev/null +++ b/recipes/alpine/plugins.pkr.hcl @@ -0,0 +1,12 @@ +packer { + required_plugins { + sshkey = { + version = ">= 1.0.1" + source = "github.com/ivoronin/sshkey" + } + } +} + +data "sshkey" "install" { + type = "ed25519" +} \ No newline at end of file diff --git a/recipes/alpine/post-processor/sparsify.sh b/recipes/alpine/post-processor/sparsify.sh new file mode 100755 index 0000000..316265a --- /dev/null +++ b/recipes/alpine/post-processor/sparsify.sh @@ -0,0 +1,31 @@ +#!/bin/sh + +if [ "${#}" -ne 2 ]; then + echo Missing arguments + exit 2 +fi + +WORKDIR=${1} +VERSION=${2} + +findImages() { + find ${1} -iname "*.img" +} + +sleep 5 + +for imageName in $(findImages ${WORKDIR} ${DOMAIN}); do + if [ $(which virt-sparsify) ]; then + newName=$(echo $imageName | sed "s/.img/_${VERSION}.img/g") + virt-sparsify --compress --tmp ./ --format qcow2 ${imageName} ${newName} + if [ "${?}" -eq 0 ]; then + rm -rf ${imageName} + cd ${WORKDIR} + ln -s $(basename ${newName}) $(basename ${imageName}) + echo ${newName} ${imageName} + cd - + fi + else + echo "Sparsify skipped 'virt-sparsify' command is missing" + fi +done diff --git a/recipes/alpine/provisionning/alpine-3.16-install.sh b/recipes/alpine/provisionning/alpine-3.16-install.sh new file mode 100644 index 0000000..1bdc826 --- /dev/null +++ b/recipes/alpine/provisionning/alpine-3.16-install.sh @@ -0,0 +1,15 @@ +#!/bin/sh +#set -xeo pipefail + +# Run the installer +yes | setup-alpine -e -f install.conf + +# Copy ssh keys +echo "Copy packer ssh key" +mount /dev/vg0/lv_root /mnt +cp -rp .ssh /mnt/root/ +sync +umount /mnt + +echo "Rebooting the host after install" +reboot -nf \ No newline at end of file diff --git a/recipes/alpine/provisionning/alpine-3.16-postinstall.sh b/recipes/alpine/provisionning/alpine-3.16-postinstall.sh new file mode 100644 index 0000000..9c3af99 --- /dev/null +++ b/recipes/alpine/provisionning/alpine-3.16-postinstall.sh @@ -0,0 +1,21 @@ +#!/bin/sh +set -xeo pipefail + +apk add --no-cache wget curl jq haveged ca-certificates rsyslog + +rc-update add haveged boot +rc-update add rsyslog boot +rc-update add sshd boot + +# Generate root password +pass=$(openssl rand -base64 32 | tee -a .secret) +chmod 600 .secret +echo -e "${pass}\n${pass}" | passwd + +# Remove expect package + +# Prevent logs spamming like "process '/sbin/getty -L 0 ttyS0 vt100' (pid 2516) exited. Scheduling for restart." +# We don't need an access to ttyS0 +sed -i 's@^\(ttyS0::respawn.*\)@#\1@' /etc/inittab + +sync diff --git a/recipes/alpine/provisionning/alpine-3.17-install.sh b/recipes/alpine/provisionning/alpine-3.17-install.sh new file mode 120000 index 0000000..2d4ac84 --- /dev/null +++ b/recipes/alpine/provisionning/alpine-3.17-install.sh @@ -0,0 +1 @@ +alpine-3.16-install.sh \ No newline at end of file diff --git a/recipes/alpine/provisionning/alpine-3.17-postinstall.sh b/recipes/alpine/provisionning/alpine-3.17-postinstall.sh new file mode 120000 index 0000000..db37049 --- /dev/null +++ b/recipes/alpine/provisionning/alpine-3.17-postinstall.sh @@ -0,0 +1 @@ +alpine-3.16-postinstall.sh \ No newline at end of file diff --git a/recipes/alpine/provisionning/conf/kubernetes/initkubernetes.start b/recipes/alpine/provisionning/conf/kubernetes/initkubernetes.start new file mode 100644 index 0000000..a0e0748 --- /dev/null +++ b/recipes/alpine/provisionning/conf/kubernetes/initkubernetes.start @@ -0,0 +1,181 @@ +#!/bin/sh + +ENV_FILE=${ENV_FILE:-/var/run/one-context/one_env} +LOG_FILE="/var/log/initkubernets.log" +FIRST_BOOT="/var/run/firstboot.flag" + +infoLog() { + echo "Info: $@" | tee -a ${LOG_FILE} +} + +errorLog() { + echo "Error: $@" | tee -a ${LOG_FILE} +} + +waitReadyState() { + local vmID="${1}" + local timeout="${2}" + + local tick=0 + while true ;do + local ready=$(onegate vm show ${vmID} --json | jq -rc ".VM.USER_TEMPLATE.READY") + if [ "${ready}" = "YES" ];then + return 0 + elif [ "${timeout}" -eq "${tick}" ];then + return ${timeout} + else + sleep 1 + tick=$((tick+1)) + fi + done +} + +returnToken() { + infoLog "Returning tokens" + local caSecretKey="${1}" + local caToken=$(openssl x509 -in /etc/kubernetes/pki/ca.crt -noout -pubkey | openssl rsa -pubin -outform DER 2>/dev/null | sha256sum | cut -d' ' -f1) + local kubeToken=$(kubeadm token list | awk '/authentication,signing.*The default*/ {print $1}') + local masterAddr=$(awk -F '/' '/server/ {print $3}' /etc/kubernetes/admin.conf) + + if [ -n "${ONEGATE_ENDPOINT}" ];then + infoLog "Onegate detected" + data="READY=YES" + data="${data} MASTER_ADDR=${masterAddr}" + data="${data} MASTER_TOKEN=${kubeToken}" + data="${data} MASTER_CA_TOKEN=sha256:${caToken}" + data="${data} MASTER_CA_SECRET_KEY=${caSecretKey}" + onegate vm update --data "${data}" + infoLog "Onegate data seted" + else + infoLog "Onegate is not present" + echo "${masterAdd} ${kubeToken} ${caToken}" >> /root/kube.token + infoLog "Tokens are available at /root/kube.token" + fi +} + +joinCluster() { + local master="${MASTER_ADDR}" + local token="${MASTER_TOKEN}" + local caToken="${MASTER_CA_TOKEN}" + local caSecretKey="${MASTER_CA_SECRET_KEY}" + local sname="${SERVICE_NAME}" + + if [ -n "${ONEGATE_ENDPOINT}" ];then + local masterID=$(onegate service show --json | jq -c '.SERVICE.roles[] | select(.name == "leader") | .nodes[0].deploy_id') + if [ "${?}" -eq 0 ]; then + waitReadyState ${masterID} 600 + if [ "${?}" -ne 0 ];then + errorLog "Master node is node ready after 600s" + return 3 + fi + local masterInfo=$(onegate vm show ${masterID} --json | \ + jq -cr ".VM.USER_TEMPLATE.MASTER_ADDR, .VM.USER_TEMPLATE.MASTER_TOKEN, .VM.USER_TEMPLATE.MASTER_CA_TOKEN,.VM.USER_TEMPLATE.MASTER_CA_SECRET_KEY, .VM.TEMPLATE.NIC[0].IP") + master=$(echo ${masterInfo} | cut -d " " -f 1) + token=$(echo ${masterInfo} | cut -d " " -f 2) + caToken=$(echo ${masterInfo} | cut -d " " -f 3) + caSecretKey=$(echo ${masterInfo} | cut -d " " -f 4) + masterIP=$(echo ${masterInfo} | cut -d " " -f 5) + sname=$(onegate service show --json | jq -cr ".SERVICE.name") + fi + + # Setting dns resolution for cluster + echo "${masterIP} ${sname}" >> /etc/hosts + onegate service show --json | jq -rc '.SERVICE.roles[].nodes[].vm_info.VM | .TEMPLATE.NIC[].IP + " " + .NAME' >> /etc/hosts + fi + if [ -n "${master}" ] & [ -n "${token}" ] & [ -n "${caToken}" ];then + opts="--node-name $(hostname -f)" + opts="${opts} --token ${token}" + opts="${opts} --discovery-token-ca-cert-hash ${caToken}" + if [ -n "${1}" ];then + opts="${opts} --control-plane" + opts="${opts} --certificate-key ${caSecretKey}" + fi + opts="${opts} ${master}" + + kubeadm join ${opts} | tee -a "${LOG_FILE}" + else + errorLog "Something is missing, can't join the cluster:" + errorLog " Master addr: [${master}]" + errorLog " Master token: [${token}]" + errorLog " Master CA token: [${caToken}]" + return 3 + fi +} + +getServiceName() { + local sname=$(onegate service show --json | jq -cr ".SERVICE.name") + local tmout=30 + local tick=0 + while true ;do + if [ -z "${sname}" ];then + sname=$(onegate service show --json | jq -cr ".SERVICE.name") + else + echo ${sname} + return 0 + fi + sleep 1 + tick=$((tick+1)) + if [ ${tmout} -eq ${tick} ];then + hostname -f + return 3 + fi + done +} + +initLeader() { + sname="$(hostname -f)" + + if [ -n "${ONEGATE_ENDPOINT}" ];then + sname=$(getServiceName) + sip=$(onegate vm show --json | jq -rc ".VM.TEMPLATE.NIC[0].IP") + echo "${sip} ${sname} $(hostname -f)" >> /etc/hosts + onegate service show --json | jq -rc '.SERVICE.roles[].nodes[].vm_info.VM | .TEMPLATE.NIC[].IP + " " + .NAME' >> /etc/hosts + fi + + caSecretKey=$(date | sha256sum | awk '{print $1}') + + infoLog "Kubernetes init started" + kubeadm init --pod-network-cidr=10.244.0.0/16 \ + --node-name="${SET_HOSTNAME}" \ + --control-plane-endpoint "${sname}:6443" \ + --upload-certs --certificate-key "${caSecretKey}" | tee -a "${LOG_FILE}" + infoLog "Kubernetes init ended" + + infoLog "Configuring kubectl" + mkdir /root/.kube + ln -s /etc/kubernetes/admin.conf /root/.kube/config + infoLog "kubectl configured" + + infoLog "Installing cilium" + sleep 20 + kubectl config view --minify -o jsonpath='{.clusters[].name}' + sleep 20 + cilium install --helm-set 'cni.binPath=/usr/libexec/cni' --wait | tee -a "${LOG_FILE}" + infoLog "Cilium is installed" + + returnToken "${caSecretKey}" +} + +initKube() { + if [ "${SERVER_ROLE}" == "leader" ];then + initLeader + elif [ "${SERVER_ROLE}" == "worker" ];then + joinCluster + elif [ "${SERVER_ROLE}" == "master" ];then + joinCluster "${SERVER_ROLE}" + fi + touch ${FIRST_BOOT} + infoLog "Kubernetes cluster init is finished" +} + +if [ -f "${ENV_FILE}" ]; then + . "${ENV_FILE}" +fi + +if [ -f "${FIRST_BOOT}" ];then + exit 0 +else + uuidgen > /etc/machine-id + swapoff -a # Make sure swap is disabled + initKube & +fi \ No newline at end of file diff --git a/recipes/alpine/provisionning/conf/kubernetes/sharemetrics.start b/recipes/alpine/provisionning/conf/kubernetes/sharemetrics.start new file mode 100644 index 0000000..6876caf --- /dev/null +++ b/recipes/alpine/provisionning/conf/kubernetes/sharemetrics.start @@ -0,0 +1,3 @@ +#!/bin/sh + +mount --make-rshared / \ No newline at end of file diff --git a/recipes/alpine/provisionning/conf/matchbox/initmatchbox.start b/recipes/alpine/provisionning/conf/matchbox/initmatchbox.start new file mode 100644 index 0000000..9180b96 --- /dev/null +++ b/recipes/alpine/provisionning/conf/matchbox/initmatchbox.start @@ -0,0 +1,25 @@ +#!/bin/sh + +FL_VERSIONS="current 3374.2.0" +MATCHBOX_DIR="/var/lib/matchbox" +ASSETS_DIR="${MATCHBOX_DIR}/assets/" + +GPG_FNAME="Flatcar_Image_Signing_Key.asc" +GPG_KEYS_URL="https://www.flatcar.org/security/image-signing-key/" + +cd /tmp +curl -L -O ${GPG_KEYS_URL}/${GPG_FNAME} +gpg --import --keyid-format LONG ${GPG_FNAME} +cd - + +echo "Provisionning matchbox with flatcar images" +tout=30 +for version in ${FL_VERSIONS}; do + for i in $(seq 1 ${tout});do + echo " * ${FL_VERSIONS} stable image (try ${i})" + /usr/local/bin/get-flatcar stable ${version} ${ASSETS_DIR} + if [[ "${?}" -eq 0 ]]; then + break + fi + done +done diff --git a/recipes/alpine/provisionning/conf/matchbox/inittftp.start b/recipes/alpine/provisionning/conf/matchbox/inittftp.start new file mode 100644 index 0000000..e076de9 --- /dev/null +++ b/recipes/alpine/provisionning/conf/matchbox/inittftp.start @@ -0,0 +1,10 @@ +#!/bin/sh + +dest="${1}" + +ipxeEFISource="http://boot.ipxe.org/ipxe.efi" +kpxeSource="http://boot.ipxe.org/undionly.kpxe" + +cd "${dest}" +wget "${ipxeEFISource}" +wget "${kpxeSource}" \ No newline at end of file diff --git a/recipes/alpine/provisionning/conf/one-context/net-90-jenkins-slave b/recipes/alpine/provisionning/conf/one-context/net-90-jenkins-slave new file mode 100644 index 0000000..2540f0e --- /dev/null +++ b/recipes/alpine/provisionning/conf/one-context/net-90-jenkins-slave @@ -0,0 +1,13 @@ +#!/bin/sh + +CONF="/etc/conf.d/jenkins-slave" +if [ -e "/etc/jenkins-slave.conf" ]; then + CONF="/etc/jenkins-slave.conf" +fi + +TOTAL_MEMORY=$(cat /proc/meminfo | grep MemTotal | awk '{ printf "%sg", int($2/1024/1024)+1 }') +sed -i "s|^JENKINS_SLAVE_NAME=.*$|JENKINS_SLAVE_NAME='slave-$ETH0_IP'|" "${CONF}" +sed -i "s|^JENKINS_SLAVE_USERNAME=.*$|JENKINS_SLAVE_USERNAME='$JENKINS_SLAVE_USERNAME'|" "${CONF}" +sed -i "s|^JENKINS_SLAVE_PASSWORD=.*$|JENKINS_SLAVE_PASSWORD='$JENKINS_SLAVE_PASSWORD'|" "${CONF}" +sed -i "s|^JENKINS_MASTER_URL=.*$|JENKINS_MASTER_URL='$JENKINS_MASTER_URL'|" "${CONF}" +sed -i "s|^JENKINS_SLAVE_LABELS=.*$|JENKINS_SLAVE_LABELS='docker docker-compose mem-$TOTAL_MEMORY $JENKINS_SLAVE_LABELS'|" "${CONF}" diff --git a/recipes/alpine/provisionning/conf/one-context/net-96-gitlab-register b/recipes/alpine/provisionning/conf/one-context/net-96-gitlab-register new file mode 100644 index 0000000..821d654 --- /dev/null +++ b/recipes/alpine/provisionning/conf/one-context/net-96-gitlab-register @@ -0,0 +1,31 @@ +#!/usr/bin/env bash + +ENV_FILE=${ENV_FILE:-/var/run/one-context/one_env} + +# $TOKENTXT is available only through the env. file +# shellcheck disable=SC1090 +if [ -f "${ENV_FILE}" ]; then + . "${ENV_FILE}" +fi + +### + +if [ -n "${GITLAB_URL}" ]; then + if command -v gitlab-runner; then + if [ -n "${GITLAB_SHELL}" ]; then + opts="--shell=${GITLAB_SHELL}" + fi + # shellcheck disable=SC2086 + gitlab-runner register \ + --non-interactive \ + --url="${GITLAB_URL}" \ + --registration-token="${GITLAB_TOKEN}" \ + --executor="${GITLAB_EXECUTOR}" \ + --description="${GITLAB_RUNNER_NAME}" \ + --tag-list="${GITLAB_TAG_LIST}" \ + --locked=false \ + --access-level=not_protected \ + --run-untagged=false \ + "${opts}" + fi +fi diff --git a/recipes/alpine/provisionning/conf/one-context/net-96-templater b/recipes/alpine/provisionning/conf/one-context/net-96-templater new file mode 100644 index 0000000..f3f9257 --- /dev/null +++ b/recipes/alpine/provisionning/conf/one-context/net-96-templater @@ -0,0 +1,80 @@ +#!/usr/bin/env bash + +# +# Generate all the configuration files +# Get all the values from the VLS_DIR +# Process each template from the TPL_DIR with this values +# + +ENV_FILE=${ENV_FILE:-/var/run/one-context/one_env} +TPL_DIR="/usr/share/builder/templates" +VLS_DIR="/usr/share/builder/values" +CONFIG="" + +. ${ENV_FILE} + +BTR="$(command -v btr)" +if [ "${?}" -ne 0 ]; then + echo "Warning: Nothing to do the templater is not installed" + exit 0 +fi + +if [ ! -e "${TPL_DIR}" ]; then + echo "Error: The template dir is missing (${TPL_DIR})" + exit 1 +fi + +if [ ! -e "${VLS_DIR}" ]; then + echo "Error: The template dir is missing (${VLS_DIR})" + exit 1 +fi + +jsonQuery() { + local data="${1}" + local query="${2}" + echo "${data}" | jq -cr "${query}" +} + +# NAME: @jsonMerge +# AIM: Merge two json structures +# NOTES: +# The last one has de last word +# if you have the same key in A and B +# this keeps the value of the B structure. +# PARAMS: +# $1: original JSON Structure +# $2: updated JSON Structure +jsonMerge() { + local data="${1}" + local data2="${2}" + + echo "${data} ${data2}" | jq -cr -s ".[0] * .[1]" +} + +getValues() { + + local values="" + + for file in $(find ${VLS_DIR} -name "*.json"); do + values="${values}$(cat ${file})" + done + + if [ -n "${RAW_CONFIG}" ]; then + values="$(jsonMerge ${values} ${RAW_CONFIG})" + fi + + for key in $(echo ${values} | jq -cr '.|keys[]'); do + ukey=${key^^} + if [ -n "${!ukey}" ]; then + values="$(jsonMerge "${values}" "{\"${key}\":\"${!ukey}\"}")" + fi + done + echo ${values} +} + +processTemplates() { + ${BTR} -t ${TPL_DIR} -c "${1}" +} +VALUES=$(getValues) +echo ${VALUES} +processTemplates "${VALUES}" diff --git a/recipes/alpine/provisionning/conf/one-context/net-97-k3s b/recipes/alpine/provisionning/conf/one-context/net-97-k3s new file mode 100644 index 0000000..77bd98a --- /dev/null +++ b/recipes/alpine/provisionning/conf/one-context/net-97-k3s @@ -0,0 +1,21 @@ +#!/usr/bin/env bash + +ENV_FILE=${ENV_FILE:-/var/run/one-context/one_env} + +# $TOKENTXT is available only through the env. file +# shellcheck disable=SC1090 +if [ -f "${ENV_FILE}" ]; then + . "${ENV_FILE}" +fi + +### + +if [ -n "${K3S_ROLE}" ]; then + if [ "${K3S_ROLE}" = "server" ]; then + rc-update add dnsmasq default + service dnsmasq start + + rc-update add k3s default + service k3s start + fi +fi diff --git a/recipes/alpine/provisionning/k3s.sh b/recipes/alpine/provisionning/k3s.sh new file mode 100644 index 0000000..62af7f2 --- /dev/null +++ b/recipes/alpine/provisionning/k3s.sh @@ -0,0 +1,5 @@ +#!/bin/sh + +echo "export KUBECONFIG=/etc/rancher/k3s/k3s.yaml" >> /root/.profile + +exit 0 \ No newline at end of file diff --git a/recipes/alpine/provisionning/kubernetes.sh b/recipes/alpine/provisionning/kubernetes.sh new file mode 100644 index 0000000..f37f832 --- /dev/null +++ b/recipes/alpine/provisionning/kubernetes.sh @@ -0,0 +1,18 @@ +#!/bin/sh + +mount --make-rshared / + +modprobe br_netfilter + +uuidgen > /etc/machine-id + +sysctl -w net.bridge.bridge-nf-call-iptables=1 + +# Remove swap +cat /etc/fstab | grep -v swap > temp.fstab +cat temp.fstab > /etc/fstab +rm temp.fstab +swapoff -a + +#lvremove -y /dev/vg0/lv_swap +#lvextend -y -r -l +100%FREE /dev/vg0/lv_root diff --git a/recipes/alpine/provisionning/letsencrypt.sh b/recipes/alpine/provisionning/letsencrypt.sh new file mode 100644 index 0000000..4ae1968 --- /dev/null +++ b/recipes/alpine/provisionning/letsencrypt.sh @@ -0,0 +1,26 @@ +#!/bin/sh + +set -eo pipefail + +DESTDIR=/usr/local/share/ca-certificates +UPDATE_CERTS_CMD=update-ca-certificates +CERTS="$(cat < "${file}" +processTemplates "${file}" +rm -rf "${file}" diff --git a/recipes/alpine/provisionning/one-context/net-97-k3s b/recipes/alpine/provisionning/one-context/net-97-k3s new file mode 100644 index 0000000..77bd98a --- /dev/null +++ b/recipes/alpine/provisionning/one-context/net-97-k3s @@ -0,0 +1,21 @@ +#!/usr/bin/env bash + +ENV_FILE=${ENV_FILE:-/var/run/one-context/one_env} + +# $TOKENTXT is available only through the env. file +# shellcheck disable=SC1090 +if [ -f "${ENV_FILE}" ]; then + . "${ENV_FILE}" +fi + +### + +if [ -n "${K3S_ROLE}" ]; then + if [ "${K3S_ROLE}" = "server" ]; then + rc-update add dnsmasq default + service dnsmasq start + + rc-update add k3s default + service k3s start + fi +fi diff --git a/recipes/alpine/provisionning/templater-install.sh b/recipes/alpine/provisionning/templater-install.sh new file mode 100644 index 0000000..091bbdb --- /dev/null +++ b/recipes/alpine/provisionning/templater-install.sh @@ -0,0 +1,23 @@ +#!/bin/sh + +set -ex + +TOOL_DIR="${1:-/usr/local/bin}" +TOOL_USER="${2:-root}" +TOOL_GROUP="${3:-root}" +ATTACHMENT_URL="https://forge.cadoles.com/attachments/" + +installTool() { + NAME="${1}" + URL="${2}" + + curl -k -o ${TOOL_DIR}/${NAME} ${URL} + chmod +x ${TOOL_DIR}/${NAME} +} + +apk add curl + +# Install templater +installTool "tpr" "https://forge.cadoles.com/attachments/242b3cba-8d07-4b89-80ab-7c12253a8524" +# Install bootstraper +installTool "btr" "https://forge.cadoles.com/attachments/e8442b2a-2065-4282-b4a4-648681fa044c" diff --git a/recipes/alpine/sources.pkr.hcl b/recipes/alpine/sources.pkr.hcl new file mode 100644 index 0000000..4f76ed7 --- /dev/null +++ b/recipes/alpine/sources.pkr.hcl @@ -0,0 +1,33 @@ +source qemu "alpine" { + cpus = 1 + memory = "${local.memory}" + accelerator = "kvm" + vnc_bind_address = "0.0.0.0" + + headless = true + + # Serve the `http` directory via HTTP, used for preseeding the Debian installer. + http_port_min = 9990 + http_port_max = 9999 + + # SSH ports to redirect to the VM being built + host_port_min = 2222 + host_port_max = 2229 + + # This user is configured in the preseed file. + ssh_username = "${local.ssh_user}" + ssh_private_key_file = data.sshkey.install.private_key_path + ssh_wait_timeout = "1000s" + + shutdown_command = "/sbin/poweroff" + + # Builds a compact image + disk_compression = true + disk_discard = "unmap" + skip_compaction = false + disk_detect_zeroes = "unmap" + + format = "qcow2" + + boot_wait = "5s" +} diff --git a/recipes/alpine/templates/conf/conf.d/chronyd b/recipes/alpine/templates/conf/conf.d/chronyd new file mode 100644 index 0000000..e692251 --- /dev/null +++ b/recipes/alpine/templates/conf/conf.d/chronyd @@ -0,0 +1,6 @@ +# /etc/conf.d/chronyd +CFGFILE="/etc/chrony/chrony.conf" +FAST_STARTUP=yes +ARGS="" +# vrf e.g 'vrf-mgmt' +#vrf="" diff --git a/recipes/alpine/templates/conf/install/awnsers.pktpl.hcl b/recipes/alpine/templates/conf/install/awnsers.pktpl.hcl new file mode 100644 index 0000000..1ba7461 --- /dev/null +++ b/recipes/alpine/templates/conf/install/awnsers.pktpl.hcl @@ -0,0 +1,47 @@ + +# Example answer file for setup-alpine script +# If you don't want to use a certain option, then comment it out + +# Use US layout with US variant +KEYMAPOPTS="fr fr" + +# Set hostname to alpine-test +HOSTNAMEOPTS="-n ${hostname}" + +# Contents of /etc/network/interfaces +INTERFACESOPTS="auto lo +iface lo inet loopback + +auto eth0 +iface eth0 inet dhcp + hostname ${hostname} +" + +# Search domain of example.com, OpenDNS public nameserver +# ex: -d example.com 1.1.1.1" +DNSOPTS="" + +# Set timezone to UTC +TIMEZONEOPTS="-z Europe/Paris" + +# set http/ftp proxy +PROXYOPTS="none" + +# Add a random mirror +APKREPOSOPTS="-r -c" + +# Install Openssh +SSHDOPTS="-c openssh -k /root/.ssh/authorized_keys" + +# Use openntpd +NTPOPTS="-c openntpd" + +# Use /dev/sda as a data disk +DISKOPTS="-L -m sys /dev/vda" + +USEROPTS="-a -g 'netdev' ${user}" + +# Setup in /media/vda1 +# LBUOPTS="/media/vda1" +# APKCACHEOPTS="/media/vda1/cache" + diff --git a/recipes/alpine/templates/conf/k3s/k3s.conf.pkr.hcl b/recipes/alpine/templates/conf/k3s/k3s.conf.pkr.hcl new file mode 100644 index 0000000..3fa8501 --- /dev/null +++ b/recipes/alpine/templates/conf/k3s/k3s.conf.pkr.hcl @@ -0,0 +1,8 @@ +# k3s options +export PATH="/usr/libexec/cni/:$PATH" +K3S_EXEC="server" +%{ if Vars.DeployTraefik } +K3S_OPTS="" +%{ else } +K3S_OPTS="--disable traefik" +%{ endif } diff --git a/recipes/alpine/templates/conf/kubernetes/.flag b/recipes/alpine/templates/conf/kubernetes/.flag new file mode 100644 index 0000000..e69de29 diff --git a/recipes/alpine/templates/conf/matchbox/conf.d/matchbox.conf.pktpl.hcl b/recipes/alpine/templates/conf/matchbox/conf.d/matchbox.conf.pktpl.hcl new file mode 100644 index 0000000..b8432f0 --- /dev/null +++ b/recipes/alpine/templates/conf/matchbox/conf.d/matchbox.conf.pktpl.hcl @@ -0,0 +1 @@ +command_args="-address 0.0.0.0:${Vars.MatchBox.HTTPPort} -rpc-address 0.0.0.0:${Vars.MatchBox.gRPCPort} -log-level ${Vars.MatchBox.LogLevel}" \ No newline at end of file diff --git a/recipes/alpine/templates/conf/matchbox/dnsmasq.d/dnsmasq-hosts.conf.pktpl.hcl b/recipes/alpine/templates/conf/matchbox/dnsmasq.d/dnsmasq-hosts.conf.pktpl.hcl new file mode 100644 index 0000000..d99d357 --- /dev/null +++ b/recipes/alpine/templates/conf/matchbox/dnsmasq.d/dnsmasq-hosts.conf.pktpl.hcl @@ -0,0 +1,4 @@ +${Vars.ETH0.IP} ${Vars.Set.Hostname} +%{ if Vars.MatchBox.Hostname != "" } +${Vars.ETH0.IP} ${Vars.MatchBox.Hostname} +%{ endif } \ No newline at end of file diff --git a/recipes/alpine/templates/conf/matchbox/dnsmasq.d/ipxe.conf.pktpl.hcl b/recipes/alpine/templates/conf/matchbox/dnsmasq.d/ipxe.conf.pktpl.hcl new file mode 100644 index 0000000..8d08dac --- /dev/null +++ b/recipes/alpine/templates/conf/matchbox/dnsmasq.d/ipxe.conf.pktpl.hcl @@ -0,0 +1,60 @@ +log-queries +log-dhcp + +#port=0 +listen-address=0.0.0.0 +interface=${Vars.PXE.ListenInterface} +no-resolv +domain-needed +bogus-priv +expand-hosts +server=${Vars.ETH0.DNS} +strict-order +addn-hosts=/etc/dnsmasq-hosts.conf +domain=${Vars.PXE.DNSDomain} +local=/${Vars.PXE.DNSDomain}/ +localise-queries + + +%{ if Vars.PXE.DHCPMode == "proxy" } +#dhcp-no-override +dhcp-range=${Vars.ETH0.IP},proxy +%{ else } +dhcp-range=${Vars.PXE.DHCPRangeStart},${Vars.PXE.DHCPRangeEnd},${Vars.PXE.DHCPLeaseDuration} +dhcp-option=option:router,${Vars.ETH0.GATEWAY} +%{ endif } + +dhcp-option=option:dns-server,${Vars.ETH0.IP} +dhcp-option=option:domain-name,${Vars.PXE.DNSDomain} + +# TFTP Configuration +enable-tftp +tftp-root="${Vars.PXE.TFTPRoot}" + +pxe-prompt="${Vars.PXE.GreetingMessage}",${Vars.PXE.DelayTime} + +# Based on logic in https://gist.github.com/robinsmidsrod/4008017 +# iPXE sends a 175 option, checking suboptions +dhcp-match=set:ipxe-http,175,19 +dhcp-match=set:ipxe-https,175,20 +dhcp-match=set:ipxe-menu,175,39 +# pcbios specific +dhcp-match=set:ipxe-pxe,175,33 +dhcp-match=set:ipxe-bzimage,175,24 +dhcp-match=set:ipxe-iscsi,175,17 +# efi specific +dhcp-match=set:ipxe-efi,175,36 +# combination +# set ipxe-ok tag if we have correct combination +# http && menu && iscsi ((pxe && bzimage) || efi) +tag-if=set:ipxe-ok,tag:ipxe-http,tag:ipxe-menu,tag:ipxe-iscsi,tag:ipxe-pxe,tag:ipxe-bzimage +tag-if=set:ipxe-ok,tag:ipxe-http,tag:ipxe-menu,tag:ipxe-iscsi,tag:ipxe-efi + + +## Load different PXE boot image depending on client architecture (when running as a proxy DHCP) +pxe-service=tag:!ipxe-ok, x86PC, "Legacy boot PXE chainload to iPXE", undionly.kpxe +pxe-service=tag:!ipxe-ok, BC_EFI, "UEFI32 boot chainload to iPXE", snponly.efi +pxe-service=tag:!ipxe-ok, X86-64_EFI, "UEFI64 boot chainload to iPXE", snponly.efi + +dhcp-userclass=set:ipxe,iPXE +dhcp-boot=tag:ipxe-ok,http://${Vars.ETH0.IP}:${Vars.MatchBox.HTTPPort}/boot.ipxe,,${Vars.ETH0.IP} diff --git a/recipes/alpine/templates/conf/matchbox/init.d/matchbox.pktpl.hcl b/recipes/alpine/templates/conf/matchbox/init.d/matchbox.pktpl.hcl new file mode 100644 index 0000000..6652098 --- /dev/null +++ b/recipes/alpine/templates/conf/matchbox/init.d/matchbox.pktpl.hcl @@ -0,0 +1,28 @@ +#!/sbin/openrc-run + +name=$RC_SVCNAME +command="/usr/local/bin/$RC_SVCNAME" +command_user="$RC_SVCNAME" +pidfile="/run/$RC_SVCNAME/$RC_SVCNAME.pid" +start_stop_daemon_args="--start -b" +command_args="$command_args" +command_background="yes" + +depend() { + need net +} + +start_pre() { + checkpath --directory --owner $command_user:$command_user --mode 0775 \ + /run/$RC_SVCNAME /var/log/$RC_SVCNAME + if [ ! -f "/etc/matchbox/server.crt" ]; then + cd /root/tls + export SAN="DNS.1:${Vars.MatchBox.Hostname},IP.1:${Vars.ETH0.IP}" + ./cert-gen + mkdir -p /etc/matchbox + cp ca.crt server.crt server.key /etc/matchbox + chown -R matchbox:matchbox /etc/matchbox + mkdir -p /root/.matchbox + cp client.crt client.key ca.crt /root/.matchbox/ + fi +} \ No newline at end of file diff --git a/recipes/alpine/templates/one/image/common.tpl b/recipes/alpine/templates/one/image/common.tpl new file mode 100644 index 0000000..d422fb1 --- /dev/null +++ b/recipes/alpine/templates/one/image/common.tpl @@ -0,0 +1,7 @@ +NAME = <%= image_name %> +PATH = <%= image_source %> +TYPE = OS +PERSISTENT = No +DESCRIPTION = "<%= image_comment %>" +DEV_PREFIX = vd +FORMAT = qcow2 \ No newline at end of file diff --git a/recipes/alpine/templates/one/service/kubernetes-cluster.json b/recipes/alpine/templates/one/service/kubernetes-cluster.json new file mode 100644 index 0000000..635b8d2 --- /dev/null +++ b/recipes/alpine/templates/one/service/kubernetes-cluster.json @@ -0,0 +1,48 @@ +{ + "name": "<%= template_name %>", + "deployment": "straight", + "description": "Cluster Kubernetes (k8s)", + "roles": [ + { + "name": "leader", + "cardinality": 1, + "vm_template": <%= getTemplateByName(oneCli, vm_name).id %>, + "shutdown_action": "terminate", + "vm_template_contents": "NIC = [\n NAME = \"NIC0\",\n NETWORK_ID = \"$main\",\n RDP = \"YES\" ]\nNIC = [\n NAME = \"NIC1\",\n NETWORK_ID = \"$internal\" ]\n", + "elasticity_policies": [], + "scheduled_policies": [] + }, + { + "name": "master", + "cardinality": 2, + "vm_template": <%= getTemplateByName(oneCli, vm_name).id %>, + "shutdown_action": "terminate", + "vm_template_contents": "NIC = [\n NAME = \"NIC0\",\n NETWORK_ID = \"$main\",\n RDP = \"YES\" ]\nNIC = [\n NAME = \"NIC1\",\n NETWORK_ID = \"$internal\" ]\n", + "elasticity_policies": [], + "scheduled_policies": [] + }, + { + "name": "worker", + "cardinality": 4, + "vm_template": <%= getTemplateByName(oneCli, vm_name).id %>, + "shutdown_action": "terminate", + "parents": [ + "leader" + ], + "vm_template_contents": "NIC = [\n NAME = \"NIC0\",\n NETWORK_ID = \"$main\",\n RDP = \"YES\" ]\nNIC = [\n NAME = \"NIC1\",\n NETWORK_ID = \"$internal\" ]\n", + "elasticity_policies": [], + "scheduled_policies": [] + } + ], + "networks": { + "main": "M|network|Main network| |id:", + "internal": "M|network|Internal network| |id:" + }, + "custom_attrs": { + "KUBEAPPS_DNS_NAME": "M|text|DNS Name for kubeapps service| |kubeapps.k3s-eole.local", + "INGRESS_PROVIDER": "O|list|Default ingress to install|nginx, traefik, |", + "LE_EMAIL": "M|text|Email | |" + }, + "shutdown_action": "terminate", + "ready_status_gate": true + } diff --git a/recipes/alpine/templates/one/vm/common.xml b/recipes/alpine/templates/one/vm/common.xml new file mode 100644 index 0000000..fdb5be4 --- /dev/null +++ b/recipes/alpine/templates/one/vm/common.xml @@ -0,0 +1,33 @@ +NAME = "<%= template_name %>" +CONTEXT = [ + NETWORK = "YES", + REPORT_READY = "YES", + SET_HOSTNAME = "$NAME", + SSH_PUBLIC_KEY = "$USER[SSH_PUBLIC_KEY]", + TOKEN = "YES" ] +CPU = "0.2" +DESCRIPTION = "Alpine basic image" +DISK = [ + DEV_PREFIX = "vd", + DRIVER = "qcow2", + IMAGE = "<%= image_name %>", + IMAGE_UNAME = "<%= user %>" ] +GRAPHICS = [ + KEYMAP = "fr", + LISTEN = "0.0.0.0", + TYPE = "VNC" ] +HYPERVISOR = "kvm" +INPUT = [ + BUS = "usb", + TYPE = "tablet" ] +INPUTS_ORDER = "" +LOGO = "images/logos/linux.png" +MEMORY = "512" +MEMORY_UNIT_COST = "MB" +NIC_DEFAULT = [ + MODEL = "virtio" ] +OS = [ + ARCH = "x86_64", + BOOT = "", + SD_DISK_BUS = "scsi" ] +VCPU = "2" diff --git a/recipes/alpine/templates/one/vm/k3s.xml b/recipes/alpine/templates/one/vm/k3s.xml new file mode 100644 index 0000000..6c515f2 --- /dev/null +++ b/recipes/alpine/templates/one/vm/k3s.xml @@ -0,0 +1,32 @@ +NAME = "<%= template_name %>" +CONTEXT = [ + NETWORK = "YES", + REPORT_READY = "YES", + SET_HOSTNAME = "$NAME", + SSH_PUBLIC_KEY = "$USER[SSH_PUBLIC_KEY]", + TOKEN = "YES" ] +CPU = "0.2" +DESCRIPTION = "K3S Ready VM" +DISK = [ + IMAGE = "<%= image_name %>", + IMAGE_UNAME = "<%= user %>", + DRIVER = "qcow2" ] +GRAPHICS = [ + KEYMAP = "fr", + LISTEN = "0.0.0.0", + TYPE = "VNC" ] +HYPERVISOR = "kvm" +INPUT = [ + BUS = "usb", + TYPE = "tablet" ] +INPUTS_ORDER = "" +LOGO = "images/logos/alpine.png" +MEMORY = "2048" +MEMORY_UNIT_COST = "MB" +NIC_DEFAULT = [ + MODEL = "virtio" ] +OS = [ + ARCH = "x86_64", + BOOT = "", + SD_DISK_BUS = "scsi" ] +VCPU = "2" diff --git a/recipes/alpine/templates/one/vm/kubeleader.xml b/recipes/alpine/templates/one/vm/kubeleader.xml new file mode 100644 index 0000000..c68faa5 --- /dev/null +++ b/recipes/alpine/templates/one/vm/kubeleader.xml @@ -0,0 +1,35 @@ +NAME = "<%= template_name %>" +CONTEXT = [ + NETWORK = "YES", + REPORT_READY = "YES", + SET_HOSTNAME = "$NAME", + SERVER_ROLE = "leader", + TOKEN = "YES", + SSH_PUBLIC_KEY = "$USER[SSH_PUBLIC_KEY]" +] +CPU = "0.8" +DESCRIPTION = "Kubernetes master or Docker VM (check the name)" +DISK = [ + DEV_PREFIX = "vd", + IMAGE = "<%= image_name %>", + IMAGE_UNAME = "<%= user %>", + DRIVER = "qcow2" ] +GRAPHICS = [ + LISTEN = "0.0.0.0", + KEYMAP = "fr", + TYPE = "VNC" ] +HYPERVISOR = "kvm" +INPUT = [ + BUS = "usb", + TYPE = "tablet" ] +INPUTS_ORDER = "" +LOGO = "images/logos/alpine.png" +MEMORY = "2048" +MEMORY_UNIT_COST = "MB" +NIC_DEFAULT = [ + MODEL = "virtio" ] +OS = [ + ARCH = "x86_64", + BOOT = "", + SD_DISK_BUS = "scsi" ] +VCPU = "4" \ No newline at end of file diff --git a/recipes/alpine/templates/one/vm/kubemaster.xml b/recipes/alpine/templates/one/vm/kubemaster.xml new file mode 100644 index 0000000..e0fe33d --- /dev/null +++ b/recipes/alpine/templates/one/vm/kubemaster.xml @@ -0,0 +1,42 @@ +NAME = "<%= template_name %>" +CONTEXT = [ + NETWORK = "YES", + REPORT_READY = "YES", + SET_HOSTNAME = "$NAME", + SERVER_ROLE = "master", + MASTER_ADDR = "$MASTER_ADDR", + MASTER_TOKEN = "$MASTER_TOKEN", + MASTER_CA_TOKEN = "$MASTER_CA_TOKEN", + TOKEN = "YES", + SSH_PUBLIC_KEY = "$USER[SSH_PUBLIC_KEY]" +] +CPU = "0.8" +DESCRIPTION = "Kubernetes worker VM" +DISK = [ + DEV_PREFIX = "vd", + IMAGE = "<%= image_name %>", + IMAGE_UNAME = "<%= user %>", + DRIVER = "qcow2" ] +GRAPHICS = [ + LISTEN = "0.0.0.0", + KEYMAP = "fr", + TYPE = "VNC" ] +HYPERVISOR = "kvm" +INPUT = [ + BUS = "usb", + TYPE = "tablet" ] +INPUTS_ORDER = "" +LOGO = "images/logos/alpine.png" +MEMORY = "2048" +MEMORY_UNIT_COST = "MB" +NIC_DEFAULT = [ + MODEL = "virtio" ] +OS = [ + ARCH = "x86_64", + BOOT = "", + SD_DISK_BUS = "scsi" ] +USER_INPUTS = [ + MASTER_ADDR = "O|text|Master address (for workers only)", + MASTER_TOKEN = "O|text|Master Token (for workers only)", + MASTER_CA_TOKEN = "O|text|Master CA Token (for workers only)" ] +VCPU = "4" \ No newline at end of file diff --git a/recipes/alpine/templates/one/vm/kubeworker.xml b/recipes/alpine/templates/one/vm/kubeworker.xml new file mode 100644 index 0000000..9aa3f0a --- /dev/null +++ b/recipes/alpine/templates/one/vm/kubeworker.xml @@ -0,0 +1,42 @@ +NAME = "<%= template_name %>" +CONTEXT = [ + NETWORK = "YES", + REPORT_READY = "YES", + SET_HOSTNAME = "$NAME", + SERVER_ROLE = "worker", + MASTER_ADDR = "$MASTER_ADDR", + MASTER_TOKEN = "$MASTER_TOKEN", + MASTER_CA_TOKEN = "$MASTER_CA_TOKEN", + TOKEN = "YES", + SSH_PUBLIC_KEY = "$USER[SSH_PUBLIC_KEY]" +] +CPU = "0.8" +DESCRIPTION = "Kubernetes worker VM" +DISK = [ + DEV_PREFIX = "vd", + IMAGE = "<%= image_name %>", + IMAGE_UNAME = "<%= user %>", + DRIVER = "qcow2" ] +GRAPHICS = [ + LISTEN = "0.0.0.0", + KEYMAP = "fr", + TYPE = "VNC" ] +HYPERVISOR = "kvm" +INPUT = [ + BUS = "usb", + TYPE = "tablet" ] +INPUTS_ORDER = "" +LOGO = "images/logos/alpine.png" +MEMORY = "4096" +MEMORY_UNIT_COST = "MB" +NIC_DEFAULT = [ + MODEL = "virtio" ] +OS = [ + ARCH = "x86_64", + BOOT = "", + SD_DISK_BUS = "scsi" ] +USER_INPUTS = [ + MASTER_ADDR = "O|text|Master address (for workers only)", + MASTER_TOKEN = "O|text|Master Token (for workers only)", + MASTER_CA_TOKEN = "O|text|Master CA Token (for workers only)" ] +VCPU = "4" \ No newline at end of file diff --git a/recipes/alpine/templates/one/vm/matchbox.xml b/recipes/alpine/templates/one/vm/matchbox.xml new file mode 100644 index 0000000..794ab44 --- /dev/null +++ b/recipes/alpine/templates/one/vm/matchbox.xml @@ -0,0 +1,47 @@ +NAME = "<%= template_name %>" +CONTEXT = [ + MATCHBOX_URL = "http://$NAME", + NETWORK = "YES", + PXE_DHCPLEASEDURATION = "$DHCPLEASEDURATION", + PXE_DHCPMODE = "$ADHCPMODE", + PXE_DNSDOMAIN = "$BDNSDOMAIN", + PXE_DHCPRANGESTART = "$CDHCPRANGESTART", + PXE_DHCPRANGEEND = "$DDHCPRANGEEND", + PXE_DHCPLEASEDURATION = "$EDHCPLEASEDURATION", + MATCHBOX_HOSTNAME = "$FMATCHBOX_HOSTNAME", + REPORT_READY = "YES", + SET_HOSTNAME = "$NAME", + SSH_PUBLIC_KEY = "$USER[SSH_PUBLIC_KEY]", + TOKEN = "YES" ] +CPU = "0.2" +DESCRIPTION = "Matchbox Ready VM" +DISK = [ + IMAGE = "<%= image_name %>", + IMAGE_UNAME = "<%= user %>", + DRIVER = "qcow2" ] +GRAPHICS = [ + KEYMAP = "fr", + LISTEN = "0.0.0.0", + TYPE = "VNC" ] +HYPERVISOR = "kvm" +INPUT = [ + BUS = "usb", + TYPE = "tablet" ] +INPUTS_ORDER = "" +LOGO = "images/logos/alpine.png" +MEMORY = "2048" +MEMORY_UNIT_COST = "MB" +NIC_DEFAULT = [ + MODEL = "virtio" ] +OS = [ + ARCH = "x86_64", + BOOT = "", + SD_DISK_BUS = "scsi" ] +USER_INPUTS = [ + ADHCPMODE = "M|list|DHCP Mode|proxy,direct|proxy", + BDNSDOMAIN = "M|text|Nom de la zone DNS (ex: cadol.es)", + CDHCPRANGESTART = "O|text|DNSMASQ DHCP Range First IP", + DDHCPRANGEEND = "O|text|DNSMASQ DHCP Range Last IP", + EDHCPLEASEDURATION = "M|list|DHCP lease duration|1h,2h,4h,6h,8h,10h,12h,14h,24h|1h", + FMATCHBOX_HOSTNAME = "O|text|Matchbox service hostname|mb.cadol.es" ] +VCPU = "2" diff --git a/recipes/alpine/variables.pkr.hcl b/recipes/alpine/variables.pkr.hcl new file mode 100644 index 0000000..28100fb --- /dev/null +++ b/recipes/alpine/variables.pkr.hcl @@ -0,0 +1,54 @@ +variable "name" { + type = string + default = "alpine" +} + +variable "version" { + type = string + default = "3.14.2" +} + +variable "short_version" { + type = string + default = "3.14" +} + +variable "arch" { + type = string + default = "x86_64" +} + +variable "output_dir" { + type = string + default = "output/alpine/" +} + +variable "source_url" { + type = string + default = "https://cdimage.debian.org/cdimage/release" +} + +variable "iso_cd_checksum" { + type = string + default = "sha256:ae6d563d2444665316901fe7091059ac34b8f67ba30f9159f7cef7d2fdc5bf8a" +} + +variable "image_version" { + type = string + default = "0.0.1" +} + +variable "one_user" { + type = string + default = env("ONE_USER") +} + +variable "one_token" { + type = string + default = env("ONE_TOKEN") +} + +variable "boot_command" { + type = list(string) + default = [] +} diff --git a/tools/one-templates b/tools/one-templates new file mode 100755 index 0000000..4acd96c --- /dev/null +++ b/tools/one-templates @@ -0,0 +1,628 @@ +#!/usr/bin/env ruby + +############################################################################ +# Environment Configuration +############################################################################ +ONE_LOCATION = ENV['ONE_LOCATION'] + +if !ONE_LOCATION + RUBY_LIB_LOCATION = '/usr/lib/one/ruby' + ONEFLOW_LOCATION = '/usr/lib/one/oneflow/lib' + GEMS_LOCATION = '/usr/share/one/gems' +else + RUBY_LIB_LOCATION = ONE_LOCATION + '/lib/ruby' + ONEFLOW_LOCATION = ONE_LOCATION + '/lib/oneflow/lib' + GEMS_LOCATION = ONE_LOCATION + '/share/gems' +end + +warn_level = $VERBOSE +$VERBOSE = nil +if File.directory?(GEMS_LOCATION) + real_gems_path = File.realpath(GEMS_LOCATION) + if !defined?(Gem) || Gem.path != [real_gems_path] + $LOAD_PATH.reject! {|l| l =~ /vendor_ruby/ } + require 'rubygems' + Gem.use_paths(real_gems_path) + end +end +$VERBOSE = warn_level + +$LOAD_PATH << RUBY_LIB_LOCATION + +############################################################################ +# Required libraries +############################################################################ +require 'erb' +require 'yaml' +require 'json' +require 'socket' +require 'webrick' +require 'pathname' +require 'optparse' +require 'opennebula' +require 'opennebula/oneflow_client' + + +def getServiceID(response) + rsp = JSON.parse(response) + return rsp["DOCUMENT"]["ID"] +end + +def chmodService(sv, path, id, mode) + uri = "#{path}/service_template/#{id}/action" + + params = {} + params["octet"] = mode + params["recursive"] = "all" + action = Service.build_json_action('chmod', params) + + resp = sv.post(uri, action) + if CloudClient.is_error?(resp) + raise Exception.new("Service template chmod failed with error : #{resp}") + end +end + +def getServiceTemplateByName(name, owner, sv, path) + resp = sv.get("#{path}/service_template") + if CloudClient.is_error?(resp) + raise Exception.new(resp) + return nil + else + tpls = JSON.parse(resp.body) + end + + if tpls["DOCUMENT_POOL"].size != 0 + tpls["DOCUMENT_POOL"]["DOCUMENT"].each do |doc| + if name == doc["NAME"] and owner == doc["UNAME"] + return doc + end + end + end + return nil +end + +def publishService(sv, path, template, mode, owner) + tpl = JSON.parse(template) + + svr = getServiceTemplateByName(tpl['name'], owner, sv, path) + if ! svr + resp = sv.post("#{path}/service_template", template) + if CloudClient.is_error?(resp) + raise Exception.new("Service template creation failed with error : #{resp}") + else + id = getServiceID(resp.body) + begin + chmodService(sv, path, id, mode) + rescue => e + raise e + end + return("created [id: #{id}]") + end + else + # Keep registration_time + if svr['TEMPLATE']['BODY'].key?("registration_time") + tpl["registration_time"] = svr['TEMPLATE']['BODY']['registration_time'] + template = tpl.to_json + end + + resp = sv.put("#{path}/service_template/#{svr["ID"]}", template) + if CloudClient.is_error?(resp) + raise Exception.new("Service template tupdate failed with error : #{resp}") + else + id = getServiceID(resp.body) + begin + chmodService(sv, path, id, mode) + rescue => e + raise e + end + return("updated [id: #{id}]") + end + end + return 0 +end + +def getTemplateByName(cli, name) + tpl_pool = OpenNebula::TemplatePool.new(cli, OpenNebula::Pool::INFO_MINE) + rc = tpl_pool.info + if OpenNebula.is_error?(rc) + puts rc.message + return nil + end + tpl_pool.each do |tpl| + if tpl.name == name + return tpl + end + end + return nil +end + +def publishImage(image_name, image_comment, image_file, external_url, template, mode) + image_source = '' + root = File.expand_path(File.dirname(image_file)) + filename = File.basename(File.expand_path(image_file)) + + # Starting a very simple HTTP server to make the image available for ONE. + http_port = nil + t1 = Thread.new do + server = WEBrick::HTTPServer.new(Port: 0, + DocumentRoot: root, + Logger: WEBrick::Log.new('/dev/null'), + AccessLog: []) + http_port = server.config[:Port] + server.start + end + + # rubocop:disable Metrics/BlockLength + # Image creation and cleanup old ones + t2 = Thread.new do + begin + client = OpenNebula::Client.new(CREDENTIALS, ENDPOINT) + img_pool = OpenNebula::ImagePool.new(client, OpenNebula::Pool::INFO_MINE) + + rc = img_pool.info + raise Exception, rc.message if OpenNebula.is_error?(rc) + + img_pool.each do |image| + if image.name =~ /.*_tbr/ + warn("Trying to delete #{image.name}") + rc = image.delete + end + next unless image.name == image_name + + rc = image.delete + if OpenNebula.is_error?(rc) + rc = image.rename("#{image_name}_#{Time.now.strftime('%Y%m%d-%H%M%S')}_tbr") + raise Exception, rc.message if OpenNebula.is_error?(rc) + end + sleep(5) + end + + image_source = if external_url + # We have a reverse proxy in front of us + "#{external_url}/#{HTTP_ADDR}/#{http_port}/#{filename}" + else + "http://#{HTTP_ADDR}:#{http_port}/#{filename}" + end + + tmpl = if template + ERB.new(template).result(binding) + else + <<~TEMPLATE + NAME = #{image_name} + PATH = #{image_source} + TYPE = OS + PERSISTENT = No + DESCRIPTION = "#{image_comment} (default template)" + DEV_PREFIX = vd + FORMAT = qcow2 + TEMPLATE + end + + xml = OpenNebula::Image.build_xml + img = OpenNebula::Image.new(xml, client) + rc = img.allocate(tmpl, DS_ID) + raise Exception, rc.message if OpenNebula.is_error?(rc) + + tout = 300 + while img.short_state_str != 'rdy' + sleep(1) + img.info + tout -= 1 + break if tout.zero? + end + img.chmod_octet(mode) + warn("\nOneNebula template publication:\n") + warn("\tImage template:\n") + warn("\t Image #{image_name} published") + warn("\t * description: #{image_comment}\n") + warn("\t * source: #{image_source}\n") + warn("\t * file: #{image_file}\n") + warn("\t * mode: #{mode}\n") + rescue Exception => e + warn(e.message) + Thread.kill(t1) + exit(-1) + end + Thread.kill(t1) + end + # rubocop:enable Metrics/BlockLength + + t1.join + t2.join +end + +def publishVM(oneCli, template_name, template, mode) + xml = OpenNebula::Template.build_xml + tpl = nil + + rc = nil + print("\tVM template #{template_name} :",) + tpl = getTemplateByName(oneCli, template_name) + if tpl + rc = tpl.update(template) + print(" update ") + else + tpl = OpenNebula::Template.new(xml, oneCli) + rc = tpl.allocate(template) + print(" create ") + end + + if OpenNebula.is_error?(rc) + puts("[KO]") + STDERR.puts rc.message + exit(-1) + end + print("\n\tSet VM template #{template_name} permission to #{mode}") + tpl.chmod_octet(mode) + puts ("[OK]") + return 0 +end + + +options = {} + +OptionParser.new do |opts| + opts.banner = "Usage: onte-templates [options]" + + opts.on("-cFILE", "--config=FILE", "Configuration file to use (default ./.one-templates.conf)") do |c| + options[:config_file] = c + end + + opts.on("-tTYPE", "--type=TYPE", "Set what do you want to publish (vm for a vm_template, service for a service_template)") do |t| + options[:type] = t + end + + opts.on("-nNAME", "--name=NAME", "Name of the template to publish") do |n| + options[:name] = n + end + + opts.on("-TTEMPLATE", "--template=TEMPLATE", "The template to publish (file or raw template)") do |tp| + options[:template] = tp + end + + opts.on("-dDIRECTORY", "--directory=DIRECTORY", "Template directory") do |d| + options[:directory] = d + end + + opts.on("-uUSER", "--user=USER", "OpenNebula user") do |u| + options[:user] = u + end + + opts.on("-pTOKEN", "--password=TOKEN", "OpenNebula user token or password") do |t| + options[:token] = t + end + + opts.on("-eENDPOINT", "--end-point=ENDPOINT", "OpenNebula cluster API end point") do |e| + options[:endpoint] = e + end + + opts.on("-fFLOWENDPOINT", "--flow-end-point=FLOWENDPOINT", "OneFlow API end point") do |f| + options[:flow_endpoint] = f + end + + opts.on("-mMODE", "--mode=MODE", "Permissions for the template (ex: 644)") do |m| + options[:mode] = m + end + + opts.on("-bBUILDER_ADDR","--builder-addr=BUILDER_ADDR", "Builder IP address") do |b| + options[:builder_addr] = b + end + + opts.on("-xEXTERNAL", "--external-url=EXTERNAL", "External URL (reverse proxy)") do |x| + options[:external_url] = x + end + + opts.on("-sDATASTORE_ID", "--datasore-id=DATASTORE_ID", "Images datastore ID") do |s| + options[:datastore_id] = s + end + + opts.on("-iIMAGE_ROOT", "--image-root=IMAGE_ROOT", "Directory containing the images") do |i| + options[:image_root] = i + end + + opts.on("-cCOMMENT", "--comment=COMMENT", "Image comment/description") do |c| + options[:image_comment] = c + end + + opts.on("-IIMAGE", "--image-file=IMAGE", "Image file do publish") do |img| + options[:image_file] = img + end + + opts.on("-VIMAGE_NAME", "--image-name=IMAGE_NAME", "Image name for vm template") do |img| + options[:image_name] = img + end + + opts.on("-vVM_NAME", "--vm-name=IMAGE_NAME", "VM Template name") do |vm| + options[:vm_name] = vm + end + + opts.on("-h", "--help", "Prints this help") do + puts opts + exit + end +end.parse! + +config_file = if ENV.has_key?("TEMPLATER_CONFIG") + ENV["TEMPLATER_CONFIG"] + elsif options.key?(:config_file) + options[:config_file] + else + "#{File.dirname(__FILE__)}/.one-templates.conf" + end + +config = if File.readable?(config_file) + YAML.load_file(config_file) + else + {} + end + +# OpenNebula credentials +user = "" +token = "" + +if options.key?(:user) and options.key?(:token) + user = options[:user] + token = options[:token] +elsif ENV.has_key?("ONE_USER") and ENV.has_key?("ONE_TOKEN") + user = ENV["ONE_USER"] + token = ENV["ONE_TOKEN"] +elsif config.key?("user") and config.key?("token") + user = config["user"] + token = config["token"] +elsif File.file?("~/.one/one_auth") + creds = File.read("~/.one/one_auth").chomp.split(':') + user = creds[0] + token = creds[1] +else + raise Exception.new("OpenNebula user or token or both are missing, provide this informations in configuration or in environement") +end + +template_type = if options.key?(:type) + options[:type] + elsif ENV.has_key?("TEMPLATE_TYPE") + ENV["TEMPLATE_TYPE"] + else + raise Exception.new("Publishing type is not defined, use --type or TYPE environement variable.") + end +if (template_type != "service") && (template_type != "vm") && (template_type != 'image') + raise Exception.new("Type #{template_type} not supported. Type has to be 'image', 'vm' or 'service'") +end + +template_dir = "" +if options.key?(:directory) + template_dir = options[:directory] +elsif ENV.has_key?("SERVICE_TEMPLATE_DIR") + template_dir = ENV["SERVICE_TEMPLATE_DIR"] +elsif config.key?("template_dir") + template_dir = config[:template_dir] +else + if template_type == "service" + template_dir = "#{File.dirname(__FILE__)}/../templates/one/service_template" + elsif template_type == "vm" + template_dir = "#{File.dirname(__FILE__)}/../templates/one/vm" + elsif template_type == "image" + template_dir = "#{File.dirname(__FILE__)}/../templates/one/image" + end +end + +template = if options.key?(:template) + if File.readable?(options[:template]) + File.read(options[:template]) + else + options[:template] + end + elsif ENV.has_key?("TEMPLATE") + ENV("TEMPLATE") + else + nil + end + +template_name = if options[:name] + options[:name] + elsif ENV.has_key?("TEMPLATE_NAME") + ENV["TEMPLATE_NAME"] + end + +template_file = nil + +tplExt = "json" +if template_type == "vm" + tplExt = "xml" +elsif template_type == "image" + tplExt = "tpl" +end + + +# XML_RPC endpoint where OpenNebula is listening +end_point = nil +if options[:endpoint] + end_point = options[:endpoint] +elsif ENV.has_key?("ONE_XMLRPC") + end_point = ENV["ONE_XMLRPC"] +elsif config.key?("endpoint") + end_point = config["endpoint"] +end + +flow_endpoint = nil +if template_type == "service" + if options[:flow_endpoint] + flow_end_point = URI.parse(options[:flow_endpoint]) + elsif ENV.has_key?("ONE_FLOW_ENDPOINT") + flow_end_point = URI.parse(ENV["ONE_FLOW_ENDPOINT"]) + elsif config.key?("flow_endpoint") + flow_end_point = URI.parse(config["flow_endpoint"]) + end + if ! flow_end_point + raise Exception.new("OneFlow API endpoint is missing, use --flow-end-point option or ONE_FLOW_ENDPOINT environement variable") + end + + flow_path = flow_end_point.path +end + +if ! end_point + raise Exception.new("API endpoint is missing, use --end-point option or ONE_XMLRPC environement variable") +end + + +mode = nil +if options[:mode] + mode = options[:mode] +elsif ENV.has_key?("MODE") + mode = ENV["MODE"] +else + mode = "600" +end + +external_url = if options[:external_url] + options[:external_url] + elsif ENV.key?('EXTERNAL_URL') + ENV['EXTERNAL_URL'] + elsif config.key?("external_url") + config["external_url"] + end + +builder_addr = if options[:builder_addr] + options[:buider_addr] + elsif ENV.key?('BUILDER_ADDR') + ENV['BUILDER_ADDR'] + elsif config.key?("builder_addr") + config["builder_addr"] + else + # Get first IP address + Socket.getifaddrs.detect do |addr_info| + addr_info.name != 'lo' && addr_info.addr && addr_info.addr.ipv4? + end.addr.ip_address + end + +datastore_id = if options[:datastore_id] + options[:datastore_id] + elsif ENV.key?('DATASTORE_ID') + ENV['DATASTORE_ID'].to_i + elsif config.key?("datastore_id") + config["datastore_id"].to_i + else + 1 + end + +image_root = if options[:image_root] + options[:image_root] + elsif ENV.key?('IMAGE_ROOT') + ENV['IMAGE_ROOT'] + elsif config[:image_root] + config['image_root'] + else + "#{File.dirname(__FILE__)}/../output" + end + +image_comment = if options[:image_comment] + options[:image_comment] + elsif ENV.key?('IMAGE_COMMENT') + ENV['IMAGE_COMMENT'] + elsif config[:image_comment] + config['image_comment'] + else + "#{template_name}" + end + +image_file = if options[:image_file] + options[:image_file] + elsif ENV.key?('IMAGE_FILE') + ENV['IMAGE_FILE'] + elsif config.key?(:image_file) + config['image_file'] + else + nil + end + +image_name = if options[:image_name] + options[:image_name] + elsif ENV.key?('IMAGE_NAME') + ENV['IMAGE_NAME'] + elsif config.key?(:image_name) + config[:image_name] + else + nil + end + +vm_name = if options[:vm_name] + options[:vm_name] + elsif ENV.key?('VM_NAME') + ENV['VM_NAME'] + elsif config.key?(:vm_name) + config[:vm_name] + else + nil + end + +CREDENTIALS = "#{user}:#{token}" +ENDPOINT = end_point +DS_ID = datastore_id +HTTP_ADDR = builder_addr + +oneCli = OpenNebula::Client.new(CREDENTIALS, ENDPOINT) + +# Template management +# the template can be an ERB template +# if you provide a template we use it as raw template +# if you provide a file name we read it first +# + +tpl_content = nil +if template + if File.readable?(template) + tpl_content = File.read(template) + else + tpl_content = template + end +else + if template_name + fname = "#{template_dir}/#{template_name}.#{tplExt}" + if File.readable?(fname) + tpl_content = File.read(fname) + elsif template_type != "image" + raise Exception.new("No service or vm named #{template_name}, file #{fname} is missing !") + end + else + raise Exception.new("No template provided, template name is missing, please provide a service name with option --name") + end +end + +# Process the ERB template. +# For the images the template is processed later during publishing +if template_type != "image" + tpl = if File.readable?(tpl_content) + ERB.new(File.read(tpl_content)) + else + ERB.new(tpl_content) + end + template = tpl.result(binding) +end + +if template_type == "service" + sv = Service::Client.new( + :username => user, + :password => token, + :url => flow_end_point.to_s, + :user_agent => 'CLI') + begin + puts("OpenNebula template publication:") + res = publishService(sv, flow_path, template, mode, user) + puts("\tService template #{template_name} #{res}") + rescue => err + puts(err) + end +elsif template_type == "vm" + begin + puts("OpenNebula template publication:") + publishVM(oneCli, template_name, template, mode) + rescue => err + puts(err) + end + +elsif template_type == "image" + if ! image_file + raise Exception.new("No image file provided, use --image-file option or IMAGE_FILE environement variable.") + exit(-1) + end + publishImage(template_name, image_comment, image_file, external_url, template, mode) +end