feat/first_recipes #1

Open
pcaseiro wants to merge 10 commits from feat/first-recipes into develop
69 changed files with 3304 additions and 1 deletions
Showing only changes of commit 9f6a5866b1 - Show all commits

View File

@ -0,0 +1,97 @@
#Flavour kind
build {
name = "kind"
description = <<EOF
This builder builds a QEMU image from the base build output. The goal here is to install ${local.Kind.Name}
with it's provisionning.
EOF
source "source.qemu.nuo" {
output_directory = "${var.output_dir}/${var.version}/provisionned/${local.Kind.Name}"
vm_name = "${local.output_name}-${var.version}-${local.Kind.Name}.img"
iso_url = "${var.output_dir}/${var.version}/base/${local.output_name}-${var.version}.img"
iso_checksum = "none"
disk_size = 20480
disk_image = true
boot_command = [ "<enter><enter><wait>" ]
ssh_clear_authorized_keys = true
}
source "source.vmware-vmx.nuo" {
output_directory = "${var.output_dir}/${var.version}/provisionned/vmware/nuo-harbor"
vm_name = "${local.output_name}-${var.version}-nuo-harbor.img"
source_path = "${var.output_dir}/${var.version}/base/${local.output_name}-${var.version}.img.vmx"
boot_command = [ "<enter><enter><wait>" ]
ssh_clear_authorized_keys = true
vmx_data_post = {
"memsize" = "8192",
"numvcpus" = "4",
}
}
// Install templater and bootstraper
provisioner "shell" {
script = "${local.locations.provisionning}/templater-install.sh"
}
// Copy configuration values on the image
provisioner "shell" {
inline = [
"sh -cx 'mkdir -p ${local.builder_config.TemplateDir}'",
"sh -cx 'mkdir -p ${local.builder_config.ValueDir}'"
]
}
// Copy configuration templates to the image
provisioner "file" {
destination = "${local.builder_config.TemplateDir}/"
source = "${local.locations.templates}/conf/${build.name}/"
}
// Copy configuration values on the image
provisioner "file" {
destination = "${local.builder_config.ValueDir}/${build.name}.json"
content = "${jsonencode(local.Kind)}"
}
// Generate default configuration for kind
provisioner "shell" {
max_retries = 3
inline = [ "sh -cx '/usr/local/bin/btr -c ${local.builder_config.ValueDir}/ -t ${local.builder_config.TemplateDir}'" ]
}
// Complete kind install
provisioner "shell" {
expect_disconnect = true
max_retries = 6
script = "${local.locations.provisionning}/${build.name}.sh"
}
// Copy ssh Cadoles keys
provisioner "file" {
destination = "/tmp"
source = "${local.locations.provisionning}/ssh/cadoles/"
}
// Copy CNOUS SSH keys
provisioner "file" {
destination = "/tmp"
source = "${local.locations.provisionning}/ssh/cnous/"
}
provisioner "shell" {
inline = [
"sh -cx 'cat /tmp/*.pub >> /root/.ssh/authorized_keys'",
"sh -cx 'chmod -R 600 /root/.ssh/authorized_keys'"
]
}
provisioner "shell" {
inline = [
"service docker start",
"service containerd start",
"sleep 5",
"kubeadm config images pull" ]
}
}

View File

@ -0,0 +1,41 @@
locals {
// Definition of the Kubernetes service (templater compatible)
ServiceKubernetes = {
ConfigFiles = []
Repositories = {}
Packages = {
kubeadm = {
name = "kind"
action = "install"
}
kubectl = {
name = "kubectl"
action = "install"
}
}
Vars = {}
Users = {}
Daemons = {
ntpd = {
name = "ntpd"
type = "auto"
enabled = true
}
local = {
name = "local"
type = "auto"
enabled = true
}
}
}
// Definition of the Kubernetes full configuration (with all the services)
Kind = {
Name = "kind"
Globals = local.Globals
Services = {
Docker = local.ServiceDocker
Kubernetes = local.ServiceKubernetes
}
}
}

View File

@ -2,7 +2,7 @@
#set -xeo pipefail
# Run the installer
yes | setup-alpine -e -f install.conf
yes | setup-alpine -e -f /root/install.conf
# Copy ssh keys
echo "Copy packer ssh key"

View File

@ -0,0 +1,26 @@
#cloud-config
ssh_pwauth: True
user: ${user}
password: ${password}
chpasswd:
expire: False
ssh_authorized_keys:
%{ for sk in ssh_keys ~}
- ${sk}
%{ endfor ~}
%{ if write_files ~}
write_files:
%{ for fl in write_files ~}
- path: ${fl.path}
owner: ${fl.owner}:${fl.group}
permissions: 0o${fl.permissions}
defer: true
content: ${fl.content}
%{ endfor ~}
%{if runcmd ~}
# Work around network interface down after boot
runcmd:
%{ for cmd in runcmd ~}
- ${cmd}
%{ endfor ~}
%{ endif ~}

View File

@ -0,0 +1,6 @@
name = "nuo"
version = "3.18.2"
short_version = "3.18"
arch = "x86_64"
source_url = "https://dl-cdn.alpinelinux.org/alpine"
iso_cd_checksum = "6bc7ff54f5249bfb67082e1cf261aaa6f307d05f64089d3909e18b2b0481467f"

View File

@ -0,0 +1,93 @@
#Flavour docker
build {
name = "docker"
description = <<EOF
This builder builds a QEMU image from the base build output. The goal here is to install ${local.Docker.Name}
with it's provisionning.
EOF
source "source.vmware-vmx.nuo" {
output_directory = "${var.output_dir}/${var.version}/provisionned/vmware/docker"
vm_name = "${local.output_name}-${var.version}-docker.img"
source_path = "${var.output_dir}/${var.version}/base/${local.output_name}-${var.version}.img.vmx"
boot_command = [ "<enter><enter><wait>" ]
ssh_clear_authorized_keys = true
}
source "source.qemu.nuo" {
output_directory = "${var.output_dir}/${var.version}/provisionned/${local.Docker.Name}"
vm_name = "${local.output_name}-${var.version}-${local.Docker.Name}.img"
iso_url = "${var.output_dir}/${var.version}/base/${local.output_name}-${var.version}.img"
iso_checksum = "none"
disk_size = 20480
disk_image = true
boot_command = [ "<enter><enter><wait>" ]
ssh_clear_authorized_keys = true
}
// Copy ssh Cadoles keys
provisioner "file" {
destination = "/tmp"
source = "${local.locations.provisionning}/ssh/cadoles/"
}
provisioner "shell" {
inline = [
"sh -cx 'cat /tmp/*.pub >> /root/.ssh/authorized_keys'",
"sh -cx 'chmod -R 600 /root/.ssh/authorized_keys'"
]
}
// Install templater and bootstraper
provisioner "shell" {
script = "${local.dirs.provisionning}/templater-install.sh"
}
// Copy configuration values on the image
provisioner "shell" {
inline = [
"sh -cx 'mkdir -p ${local.builder_config.TemplateDir}'",
"sh -cx 'mkdir -p ${local.builder_config.ValueDir}'"
]
}
// Copy configuration templates to the image
provisioner "file" {
destination = "${local.builder_config.TemplateDir}/"
source = "${local.dirs.templates}/conf/${build.name}/"
}
// Copy configuration values on the image
provisioner "file" {
destination = "${local.builder_config.ValueDir}/${build.name}.json"
content = "${jsonencode(local.Docker)}"
}
// Generate default configuration for docker
provisioner "shell" {
inline = [ "sh -cx '/usr/local/bin/btr -c ${local.builder_config.ValueDir}/ -t ${local.builder_config.TemplateDir}'" ]
}
// Install OpenNebula context tool
provisioner "shell" {
script = "${local.dirs.provisionning}/one-context.sh"
}
// Deploy the opennebula context script to manage configuration
provisioner "file" {
destination = "/etc/one-context.d/net-96-templater"
source = "${local.dirs.provisionning}/conf/one-context/net-96-templater"
}
provisioner "shell" {
inline = [ "sh -cx 'chmod +x /etc/one-context.d/net-96-templater'" ]
}
post-processor "shell-local" {
inline = [
"/bin/sh ${local.dirs.post-processors}/sparsify.sh ${var.output_dir}/${var.version}/provisionned/${local.Docker.Name} ${var.image_version}",
//"ruby ${local.dirs.tools}/one-templates -t image -m 640 -T ${local.dirs.templates}/one/image/common.tpl -n ${local.output_name}-${var.version}-${local.Docker.Name} -c '${local.Docker.Name} base image' --image-file ${var.output_dir}/${var.version}/provisionned/${local.Docker.Name}/${local.output_name}-${var.version}-${local.Docker.Name}.img",
//"ruby ${local.dirs.tools}/one-templates -t vm -m 640 -T ${local.dirs.templates}/one/vm/common.xml -n ${local.output_name}-${var.version}-${local.Docker.Name} --image-name ${local.output_name}-${var.version}-${local.Docker.Name}",
]
}
}

136
recipes/nuo/harbor.pkr.hcl Normal file
View File

@ -0,0 +1,136 @@
#Flavour ${build.name}
build {
name = "harbor"
description = <<EOF
This builder builds a QEMU image from the base build output. The goal here is to install loki
with it's provisionning.
EOF
source "source.vmware-vmx.nuo" {
output_directory = "${var.output_dir}/${var.version}/provisionned/vmware/nuo-harbor"
vm_name = "${local.output_name}-${var.version}-nuo-harbor.img"
source_path = "${var.output_dir}/${var.version}/base/${local.output_name}-${var.version}.img.vmx"
// BUG https://github.com/hashicorp/packer-plugin-vmware/issues/119
disk_additional_size = [ 81920 ]
//
boot_command = [ "<enter><enter><wait>" ]
ssh_clear_authorized_keys = true
vmx_data = {
"scsi1.pcislotnumber" = "16"
"scsi1.present" = "TRUE"
"scsi1.virtualdev" = "lsilogic"
"scsi1:0.filename" = "disk-1.vmdk"
"scsi1:0.present" = "TRUE"
"scsi1:0.redo" = ""
}
vmx_data_post = {
"memsize" = "4096",
"numvcpus" = "2",
}
}
source "source.qemu.nuo" {
output_directory = "${var.output_dir}/${var.version}/provisionned/${local.Config.Name}"
vm_name = "${local.output_name}-${var.version}-${local.Config.Name}.img"
iso_url = "${var.output_dir}/${var.version}/base/${local.output_name}-${var.version}.img"
iso_checksum = "none"
disk_size = 81920
disk_image = true
boot_command = [ "<enter><enter><wait>" ]
ssh_clear_authorized_keys = true
}
provisioner "shell" {
script = "${local.dirs.provisionning}/tools/additionnal-disk"
environment_vars = [
"PV_DEVICE=/dev/sdb",
"VG_NAME=data",
"LV_NAME=harbor-data",
"LV_MTP=/srv/harbor",
"LV_FS=ext4"
]
}
// Install templater and bootstraper
provisioner "shell" {
script = "${local.dirs.provisionning}/templater-install.sh"
}
// Copy static configurations to /etc
provisioner "file" {
destination = "/etc"
source = "${local.dirs.provisionning}/conf/${build.name}/"
}
// Copy configuration values on the image
provisioner "shell" {
inline = [
"sh -cx 'mkdir -p ${local.builder_config.TemplateDir}'",
"sh -cx 'mkdir -p ${local.builder_config.ValueDir}'"
]
}
// Copy configuration templates to the image
provisioner "file" {
destination = "${local.builder_config.TemplateDir}/"
source = "${local.dirs.templates}/conf/${build.name}/"
}
// Copy Docker configuration templates to the image
provisioner "file" {
destination = "${local.builder_config.TemplateDir}/"
source = "${local.dirs.templates}/conf/docker/"
}
// Copy configuration values on the image
provisioner "file" {
destination = "${local.builder_config.ValueDir}/${build.name}.json"
content = "${jsonencode(local.NuoHarbor)}"
}
provisioner "file" {
destination = "/etc/local.d/templater.start"
source = "${local.locations.provisionning}/conf/common/templater.start"
}
// Copy ssh Cadoles keys
provisioner "file" {
destination = "/tmp"
source = "${local.locations.provisionning}/ssh/cadoles/"
}
// Copy CNOUS SSH keys
provisioner "file" {
destination = "/tmp"
source = "${local.locations.provisionning}/ssh/cnous/"
}
provisioner "shell" {
inline = [
"sh -cx 'cat /tmp/*.pub >> /root/.ssh/authorized_keys'",
"sh -cx 'chmod -R 600 /root/.ssh/authorized_keys'"
]
}
// Generate default configuration for the server
provisioner "shell" {
max_retries = 3
inline = [ "sh -cx '/usr/local/bin/btr -c ${local.builder_config.ValueDir}/ -t ${local.builder_config.TemplateDir}'" ]
}
provisioner "shell" {
environment_vars = [
"HARBOR_SSL_CERT=${local.NuoHarbor.Services.Harbor.Vars.HarborSSLCert}",
"HARBOR_SSL_KEY=${local.NuoHarbor.Services.Harbor.Vars.HarborSSLPrivKey}",
"HARBOR_DOMAIN=${local.NuoHarbor.Services.Harbor.Vars.HarborDomain}"
]
script = "${local.dirs.provisionning}/${build.name}.sh"
}
provisioner "shell" {
inline = [
"chmod +x /etc/local.d/templater.start"
]
}
}

103
recipes/nuo/kind.pkr.hcl Normal file
View File

@ -0,0 +1,103 @@
#Flavour kind
build {
name = "kind"
description = <<EOF
This builder builds a QEMU image from the base build output. The goal here is to install ${local.Kind.Name}
with it's provisionning.
EOF
source "source.qemu.nuo" {
output_directory = "${var.output_dir}/${var.version}/provisionned/${local.Kind.Name}"
vm_name = "${local.output_name}-${var.version}-${local.Kind.Name}.img"
iso_url = "${var.output_dir}/${var.version}/base/${local.output_name}-${var.version}.img"
iso_checksum = "none"
disk_size = 20480
disk_image = true
boot_command = [ "<enter><enter><wait>" ]
ssh_clear_authorized_keys = true
}
source "source.vmware-vmx.nuo" {
output_directory = "${var.output_dir}/${var.version}/provisionned/vmware/nuo-kind"
vm_name = "${local.output_name}-${var.version}-nuo-kind.img"
source_path = "${var.output_dir}/${var.version}/base/${local.output_name}-${var.version}.img.vmx"
boot_command = [ "<enter><enter><wait>" ]
ssh_clear_authorized_keys = true
vmx_data_post = {
"memsize" = "8192",
"numvcpus" = "4",
}
}
// Install templater and bootstraper
provisioner "shell" {
script = "${local.locations.provisionning}/templater-install.sh"
}
// Copy configuration values on the image
provisioner "shell" {
inline = [
"sh -cx 'mkdir -p ${local.builder_config.TemplateDir}'",
"sh -cx 'mkdir -p ${local.builder_config.ValueDir}'"
]
}
// Copy configuration templates to the image
provisioner "file" {
destination = "${local.builder_config.TemplateDir}/"
source = "${local.locations.templates}/conf/${build.name}/"
}
// Copy configuration values on the image
provisioner "file" {
destination = "${local.builder_config.ValueDir}/${build.name}.json"
content = "${jsonencode(local.Kind)}"
}
// Generate default configuration for kind
provisioner "shell" {
max_retries = 3
inline = [ "sh -cx '/usr/local/bin/btr -c ${local.builder_config.ValueDir}/ -t ${local.builder_config.TemplateDir}'" ]
}
// Complete kind install
provisioner "shell" {
expect_disconnect = true
max_retries = 6
script = "${local.locations.provisionning}/${build.name}.sh"
}
// Copy ssh Cadoles keys
provisioner "file" {
destination = "/tmp"
source = "${local.locations.provisionning}/ssh/cadoles/"
}
// Copy CNOUS SSH keys
provisioner "file" {
destination = "/tmp"
source = "${local.locations.provisionning}/ssh/cnous/"
}
provisioner "file" {
destination = "/etc/local.d/init${build.name}.start"
source = "${local.locations.provisionning}/conf/${build.name}/init${build.name}.start"
}
provisioner "shell" {
inline = [
"sh -cx 'chmod +x /etc/local.d/init${build.name}.start'",
"sh -cx 'cat /tmp/*.pub >> /root/.ssh/authorized_keys'",
"sh -cx 'chmod -R 600 /root/.ssh/authorized_keys'"
]
}
provisioner "shell" {
inline = [
"service podman start",
//"service containerd start",
//"sleep 10",
//"kind create cluster --config /etc/cluster.yaml ",
"sleep 10"]
}
}

View File

@ -0,0 +1,6 @@
locals {
builder_config = {
TemplateDir = "/usr/share/builder/templates"
ValueDir = "/usr/share/builder/values"
}
}

View File

@ -0,0 +1,65 @@
locals {
// Definition of the Kubernetes service (templater compatible)
ServiceDocker = {
ConfigFiles = [
{
destination = "/etc/rc.conf"
source = "rc.conf.pktpl.hcl"
mode = "600"
owner = "root"
group = "root"
}
]
Repositories = {}
Packages = {
docker = {
name = "docker"
action = "install"
}
docker-compose = {
name = "docker-compose"
action = "install"
}
gpg = {
name = "gpg"
action = "install"
}
}
Daemons = {
docker = {
name = "docker"
type = "auto"
enabled = true
}
cgroups = {
name = "cgroups"
type = "auto"
enabled = true
}
local = {
name = "local"
type = "auto"
enabled = true
}
}
Vars = {
RootlessDocker = "true"
}
Users = {
dock = {
username = "dock"
group = "dock"
home = "/srv/dock"
shell = "/bin/nologin"
}
}
}
Docker = {
Name = "docker"
Globals = local.Globals
Services = {
Docker = local.ServiceDocker
}
}
}

View File

@ -0,0 +1,7 @@
locals {
Globals = {
Vars = {
PrometheusPort = "9090"
}
}
}

View File

@ -0,0 +1,89 @@
locals {
ServiceNuoHarbor = {
ConfigFiles = [
{
destination = "/etc/harbor/harbor.yml"
source = "harbor.yml.pktpl.hcl"
mode = "600"
owner = "root"
group = "root"
}
]
Vars = {
AuthEnabled = false
User = "harbor"
Group = "harbor"
HarborHTTPPort = "80"
HarborHTTPSPort = "443"
HarborSSLCert = "/etc/ssl/certs/harbor.crt"
HarborSSLPrivKey = "/etc/ssl/certs/harbor.key"
HarborDomain = "reg.k8s.in.nuonet.fr"
HarborAdminPassword = "ChangeMeAsSoonAsPossible"
HarborDBPassword = "WeNeedToBeAbleToManagePasswords"
NIC = [
{
Name = "eth0"
IP = "192.168.160.10"
Mask = "255.255.254.0"
Gateway = "192.168.160.1"
}
]
DNS = [ "192.168.160.10" ]
Set = { Hostname = "reg.k8s.in.nuonet.fr" }
}
Repositories = {
AlpineEdgeTesting = {
type = "apk"
name = "testing"
url = "http://mirrors.ircam.fr/pub/alpine/edge/testing"
enabled = true
}
}
Packages = {
vmtools = {
name = "open-vm-tools"
action = "install"
},
mkcert = {
name = "mkcert"
action = "install"
},
gpg-agent = {
name = "gpg-agent"
action = "install"
}
ncurses = {
name = "ncurses"
action = "install"
}
}
Daemons = {
vm-tools = {
name = "open-vm-tools"
type = "auto"
enabled = true
}
harbor = {
name = "harbor"
type = "auto"
enabled = true
}
}
Users = {
harbor = {
username = "harbor"
group = "harbor"
home = "/srv/harbor"
shell = "/bin/nologin"
}
}
}
NuoHarbor = {
Name = "nuo-harbor"
Globals = local.Globals
Services = {
Docker = local.ServiceDocker
Harbor = local.ServiceNuoHarbor
}
}
}

View File

@ -0,0 +1,132 @@
locals {
// Definition of the Kubernetes service (templater compatible)
ServiceKubernetes = {
ConfigFiles = [
{
destination = "/etc/cluster.yaml"
source = "cluster.yaml.pktpl.hcl"
mode = "600"
owner = "root"
group = "root"
},
{
destination = "/etc/rc.conf"
source = "rc.conf.pktpl.hcl"
mode = "600"
owner = "root"
group = "root"
}
]
Repositories = {}
Packages = {
docker = {
name = "docker"
action = "install"
}
docker-compose = {
name = "docker-compose"
action = "install"
}
gpg = {
name = "gpg"
action = "install"
}
kubeadm = {
name = "kind"
action = "install"
}
vmtools = {
name = "open-vm-tools"
action = "install"
}
vmtools-rc = {
name = "open-vm-tools-openrc"
action = "install"
}
}
Vars = {
RootlessDocker = "true"
Cluster = {
Name = "nuo"
IngressReady = true
PodSubNet = "10.110.0.0/16"
ServieSubNet = "10.115.0.0/16"
Version = "1.27.2"
Nodes = [
{
Role = "control-plane"
Ports = [
{
containerPort = 31000
hostPort = 31000
listenAddress = "0.0.0.0"
},
{
containerPort = 80
hostPort = 8080
listenAddress = "0.0.0.0"
},
{
containerPort = 443
hostPort = 8443
listenAddress = "0.0.0.0"
}
]
},
{ Role = "worker" },
{ Role = "worker" },
{ Role = "worker" }
]
}
}
Users = {
dock = {
username = "dock"
group = "dock"
home = "/srv/dock"
shell = "/bin/nologin"
}
}
Daemons = {
vm-tools = {
name = "open-vm-tools"
type = "auto"
enabled = true
}
docker = {
name = "docker"
type = "auto"
enabled = true
}
cgroups = {
name = "cgroups"
type = "auto"
enabled = true
}
local = {
name = "local"
type = "auto"
enabled = true
}
ntpd = {
name = "ntpd"
type = "auto"
enabled = true
}
local = {
name = "local"
type = "auto"
enabled = true
}
}
}
// Definition of the Kubernetes full configuration (with all the services)
Kind = {
Name = "kind"
Globals = local.Globals
Services = {
Kubernetes = local.ServiceKubernetes
}
}
}

View File

@ -0,0 +1,176 @@
locals {
// Definition of the Kubernetes service (templater compatible)
ServiceNuoMatchBox = {
ConfigFiles = [
{
destination = "/etc/dnsmasq.d/pxe.conf"
source = "dnsmasq.d/ipxe.conf.pktpl.hcl"
mode = "600"
owner = "root"
group = "root"
},
{
destination = "/etc/dnsmasq-hosts.conf"
source = "dnsmasq.d/dnsmasq-hosts.conf.pktpl.hcl"
mode = "600"
owner = "dnsmasq"
group = "root"
},
{
destination = "/etc/conf.d/matchbox"
source = "conf.d/matchbox.conf.pktpl.hcl"
mode = "600"
owner = "root"
group = "root"
},
{
destination = "/etc/init.d/matchbox"
source = "init.d/matchbox.pktpl.hcl"
mode = "700"
owner = "root"
group = "root"
},
{
destination = "/etc/network/interfaces"
source = "network/interfaces.pktpl.hcl"
mode = "700"
owner = "root"
group = "root"
},
{
destination = "/etc/resolv.conf"
source = "resolv.conf.pktpl.hcl"
mode = "600"
owner = "root"
group = "root"
},
{
destination = "/etc/hostname"
source = "hostname.pktpl.hcl"
mode = "600"
owner = "root"
group = "root"
}
]
Repositories = {
AlpineEdgeTesting = {
type = "apk"
name = "testing"
url = "http://mirrors.ircam.fr/pub/alpine/edge/testing"
enabled = true
}
AlpineEdgeCommunity = {
type = "apk"
name = "testing"
url = "http://mirrors.ircam.fr/pub/alpine/edge/community"
enabled = true
}
}
Packages = {
dnsmasq = {
name = "dnsmasq"
action = "install"
}
terraform = {
name = "terraform"
action = "install"
}
git = {
name = "git"
action = "install"
}
kubectl = {
name = "kubectl"
action = "install"
}
gpg = {
name = "gpg"
action = "install"
}
vmtools = {
name = "open-vm-tools"
action = "install"
}
bash = {
name = "bash"
action = "install"
}
}
Vars = {
PXE = {
DHCPMode = "standalone"
DNSDomain = "k8s.in.nuonet.fr"
ListenInterface = "eth0"
GreetingMessage = "Nuo PXE Boot Server"
DelayTime = "5"
BootingMessage = "Booting from network the Nuo way"
DHCPRangeStart = "192.168.160.20"
DHCPRangeEnd = "192.168.160.60"
DHCPLeaseDuration = "48h"
TFTPRoot = "/var/lib/tftpboot"
}
DNSMasq = {
Hosts = [
{
Name = "reg.k8s.in.nuonet.fr"
IP = "192.168.160.11"
}
]
}
MatchBox = {
Hostname = "mb.k8s.in.nuonet.fr"
HTTPPort = "8080"
gRPCPort = "8081"
LogLevel = "info"
}
NIC = [
{
Name = "eth0"
IP = "192.168.160.10"
Mask = "255.255.254.0"
Gateway = "192.168.160.1"
}
]
DNS = [ "10.253.50.105" ]
Hosts = [
{
Name = "harbor.k8s.in.nuonet.fr"
IP = "192.168.160.11"
}
]
Set = { Hostname = "mb.k8s.in.nuonet.fr" }
}
Users = {}
Daemons = {
vm-tools = {
name = "open-vm-tools"
type = "auto"
enabled = true
}
matchbox = {
name = "matchbox"
type = "auto"
enabled = true
}
dnsmasq = {
name = "dnsmasq"
type = "auto"
enabled = true
}
local = {
name = "local"
type = "auto"
enabled = true
}
}
}
// Definition of the Kubernetes full configuration (with all the services)
NuoMatchBox = {
Name = "nuo-matchbox"
Globals = local.Globals
Services = {
NuoMatchBox = local.ServiceNuoMatchBox
}
}
}

View File

@ -0,0 +1,37 @@
# "timestamp" template function replacement
locals {
locations = {
recipes = "${path.cwd}/recipes/${var.name}"
templates = "${path.cwd}/recipes/${var.name}/templates"
provisionning = "${path.cwd}/recipes/${var.name}/provisionning"
post-processors = "${path.cwd}/recipes/${var.name}/post-processor"
tools = "${path.cwd}/tools"
}
dirs = local.locations
timestamp = regex_replace(timestamp(), "[- TZ:]", "")
output_name = "${var.name}"
source_checksum_url = "file:${var.source_url}/${var.version}/${var.arch}/iso-cd/SHA256SUMS"
source_iso = "${var.source_url}/v${var.short_version}/releases/${var.arch}/alpine-virt-${var.version}-${var.arch}.iso"
source_checksum = "${var.iso_cd_checksum}"
ssh_user = "root"
ssh_password = "PbkRc1vup7Wq5n4r"
disk_size = 8000
memory = 512
installOpts = {
hostname = var.name
user = "eole"
disk_device = "/dev/vda"
}
installOptsVMWare = {
hostname = var.name
user = "eole"
disk_device = "/dev/sda"
}
installOptsVirtualBox = {
hostname = var.name
user = "eole"
disk_device = "/dev/sda"
}
instance_data = { "instance-id": "${var.name}" }
}

136
recipes/nuo/main.pkr.hcl Normal file
View File

@ -0,0 +1,136 @@
#Flavour base
build {
name = "base"
description = <<EOF
This builder builds a QEMU image from an nuo "virt" CD ISO file.
EOF
source "vmware-iso.nuo" {
output_directory = "${var.output_dir}/${var.version}/base"
vm_name = "${local.output_name}-${var.version}.img"
disk_size = 10240
iso_url = "${local.source_iso}"
iso_checksum = "${var.iso_cd_checksum}"
http_content = {
"/ssh-packer-pub.key" = data.sshkey.install.public_key
"/install.conf" = templatefile("${local.locations.templates}/conf/install/awnsers.pktpl.hcl", local.installOptsVMWare)
}
boot_command = [
"<wait10s>root<enter>",
"<wait1s><enter>",
"<wait1s>setup-interfaces<enter><wait1s><enter><wait1s><enter><wait1s><enter>",
"<wait1s>ifup eth0<enter>",
"<wait1s>mkdir -p .ssh<enter>",
"<wait1s>wget http://{{.HTTPIP}}:{{.HTTPPort}}/ssh-packer-pub.key -O .ssh/authorized_keys<enter><wait1s>",
"<wait1s>chmod 600 .ssh/authorized_keys<enter>",
"<wait1s>wget http://{{.HTTPIP}}:{{.HTTPPort}}/install.conf<enter><wait1s>",
"<wait1s>setup-sshd -c openssh -k .ssh/authorized_keys<enter><wait1s>",
]
}
source "qemu.nuo" {
output_directory = "${var.output_dir}/${var.version}/base"
vm_name = "${local.output_name}-${var.version}.img"
disk_size = 8000
iso_url = "${local.source_iso}"
iso_checksum = "${var.iso_cd_checksum}"
http_content = {
"/ssh-packer-pub.key" = data.sshkey.install.public_key
"/install.conf" = templatefile("${local.locations.templates}/conf/install/awnsers.pktpl.hcl", local.installOpts)
}
boot_command = [
"<wait5s>root<enter>",
"<wait1s><enter>",
"<wait1s>setup-interfaces<enter><wait1s><enter><wait1s><enter><wait1s><enter>",
"<wait1s>ifup eth0<enter>",
"<wait1s>mkdir -p .ssh<enter>",
"<wait1s>wget http://{{.HTTPIP}}:{{.HTTPPort}}/ssh-packer-pub.key -O .ssh/authorized_keys<enter><wait1s>",
"<wait1s>chmod 600 .ssh/authorized_keys<enter>",
"<wait1s>wget http://{{.HTTPIP}}:{{.HTTPPort}}/install.conf<enter><wait1s>",
"<wait1s>setup-sshd -c openssh -k .ssh/authorized_keys<enter><wait1s>",
]
}
source "virtualbox-iso.nuo" {
output_directory = "${var.output_dir}/${var.version}/base"
vm_name = "${local.output_name}-${var.version}.img"
disk_size = 10240
iso_url = "${local.source_iso}"
iso_checksum = "${var.iso_cd_checksum}"
guest_os_type = "Linux_64"
cd_label = "cidata"
cd_content = {
"meta-data" = jsonencode(local.instance_data)
"user-data" = templatefile("${local.locations.templates}/conf/cloud-init/user-data",
{
user = local.ssh_user,
password = local.ssh_password,
root_password = local.ssh_password,
runcmd = []
ssh_keys = [ data.sshkey.install.public_key ]
files = [
{
path = "/root/install.conf"
owner = "root"
group = "root"
permissions = "600"
content = [ "KEYMAPOPTS=\"fr fr\"",
"HOSTNAMEOPTS=\"-n nuo\"",
"INTERFACESOPTS=\"auto lo",
"iface lo inet loopback",
"auto eth0",
"iface eth0 inet dhcp",
" hostname nuo\"",
"DNSOPTS=\"\"",
"TIMEZONEOPTS=\"-z Europe/Paris\"",
"PROXYOPTS=\"none\"",
"APKREPOSOPTS=\"-r -c\"",
"SSHDOPTS=\"-c openssh -k /root/.ssh/authorized_keys\"",
"NTPOPTS=\"-c openntpd\"",
"DISKOPTS=\"-L -m sys /dev/sda\""]
}
]
}
)
}
boot_command = []
}
provisioner "shell" {
pause_before = "1s"
expect_disconnect = true # Because the previous step has rebooted the machine
script = "${local.locations.provisionning}/${var.name}-${var.short_version}-install.sh"
valid_exit_codes = [ 0, 141 ]
}
provisioner "shell" {
pause_before = "1s"
inline = [ "sh -cx 'mkdir -p ${local.builder_config.TemplateDir}'" ]
}
provisioner "shell" {
pause_before = "10s"
script = "${local.locations.provisionning}/${var.name}-${var.short_version}-postinstall.sh"
}
provisioner "shell" {
script = "${local.locations.provisionning}/letsencrypt.sh"
}
provisioner "file" {
destination = "/etc/conf.d/chronyd"
source = "${local.locations.templates}/conf/conf.d/"
}
post-processor "manifest" {
keep_input_artifact = true
}
post-processor "shell-local" {
inline = [
"/bin/sh ${path.cwd}/post-processors/sparsify.sh ${var.output_dir}/${var.version}/base ${var.image_version}"
]
}
}

View File

@ -0,0 +1,120 @@
#Flavour nuo-matchbox
build {
name = "matchbox"
description = <<EOF
This builder builds a QEMU image from the base build output. The goal here is to install nuo-matchbox
with it's provisionning.
EOF
source "source.vmware-vmx.nuo" {
output_directory = "${var.output_dir}/${var.version}/provisionned/vmware/nuo-matchbox"
vm_name = "${local.output_name}-${var.version}-nuo-matchbox.img"
source_path = "${var.output_dir}/${var.version}/base/${local.output_name}-${var.version}.img.vmx"
boot_command = [ "<enter><enter><wait>" ]
ssh_clear_authorized_keys = true
}
source "source.qemu.nuo" {
output_directory = "${var.output_dir}/${var.version}/provisionned/nuo-matchbox"
vm_name = "${local.output_name}-${var.version}-nuo-matchbox.img"
iso_url = "${var.output_dir}/${var.version}/base/${local.output_name}-${var.version}.img"
iso_checksum = "none"
disk_size = 40960
disk_image = true
boot_command = [ "<enter><enter><wait>" ]
ssh_clear_authorized_keys = true
}
// Install templater and bootstraper
provisioner "shell" {
script = "${local.dirs.provisionning}/templater-install.sh"
}
// Copy configuration values on the image
provisioner "shell" {
inline = [
"sh -cx 'mkdir -p ${local.builder_config.TemplateDir}'",
"sh -cx 'mkdir -p ${local.builder_config.ValueDir}'"
]
}
// Copy configuration templates to the image
provisioner "file" {
destination = "${local.builder_config.TemplateDir}/"
source = "${local.dirs.templates}/conf/${build.name}/"
}
// Copy configuration values on the image
provisioner "file" {
destination = "${local.builder_config.ValueDir}/${build.name}.json"
content = "${jsonencode(local.NuoMatchBox)}"
}
// Copy nuo-matchbox boot provisionning script
provisioner "file" {
destination = "/etc/local.d/initmatchbox.start"
source = "${local.locations.provisionning}/conf/${build.name}/initmatchbox.start"
}
// Copy ssh Cadoles keys
provisioner "file" {
destination = "/tmp"
source = "${local.locations.provisionning}/ssh/cadoles/"
}
// Copy CNOUS SSH keys
provisioner "file" {
destination = "/tmp"
source = "${local.locations.provisionning}/ssh/cnous/"
}
provisioner "shell" {
inline = [
"sh -cx 'cat /tmp/*.pub >> /root/.ssh/authorized_keys'",
"sh -cx 'chmod -R 600 /root/.ssh/authorized_keys'"
]
}
provisioner "file" {
destination = "/etc/local.d/templater.start"
source = "${local.locations.provisionning}/conf/common/templater.start"
}
// Copy tftp provisionning script
provisioner "file" {
destination = "/etc/local.d/inittftp.start"
source = "${local.locations.provisionning}/conf/${build.name}/inittftp.start"
}
// Generate default configuration for kubernetes
provisioner "shell" {
max_retries = 3
inline = [ "sh -cx '/usr/local/bin/btr -c ${local.builder_config.ValueDir}/ -t ${local.builder_config.TemplateDir}'" ]
}
provisioner "file" {
destination = "/tmp/${build.name}.sh"
source = "${local.dirs.provisionning}/${build.name}.sh"
}
provisioner "shell" {
inline = [
"sh -cx 'sh /tmp/${build.name}.sh'"
]
}
provisioner "shell" {
inline = [
"chmod +x /etc/local.d/initmatchbox.start",
"chmod +x /etc/local.d/templater.start",
"chmod +x /etc/local.d/inittftp.start"
]
}
post-processor "shell-local" {
inline = [
"/bin/sh ${path.cwd}/post-processors/sparsify.sh ${var.output_dir}/${var.version}/provisionned/nuo-matchbox ${var.image_version}"
]
}
}

View File

@ -0,0 +1,24 @@
packer {
required_plugins {
sshkey = {
version = ">= 1.0.1"
source = "github.com/ivoronin/sshkey"
}
vmware = {
version = ">= 1.0.8"
source = "github.com/hashicorp/vmware"
}
qemu = {
source = "github.com/hashicorp/qemu"
version = "~> 1"
}
virtualbox = {
source = "github.com/hashicorp/virtualbox"
version = "~> 1"
}
}
}
data "sshkey" "install" {
type = "ed25519"
}

View File

@ -0,0 +1,31 @@
#!/bin/sh
if [ "${#}" -ne 2 ]; then
echo Missing arguments
exit 2
fi
WORKDIR=${1}
VERSION=${2}
findImages() {
find ${1} -iname "*.img"
}
sleep 5
for imageName in $(findImages ${WORKDIR} ${DOMAIN}); do
if [ $(which virt-sparsify) ]; then
newName=$(echo $imageName | sed "s/.img/_${VERSION}.img/g")
virt-sparsify --compress --tmp ./ --format qcow2 ${imageName} ${newName}
if [ "${?}" -eq 0 ]; then
rm -rf ${imageName}
cd ${WORKDIR}
ln -s $(basename ${newName}) $(basename ${imageName})
echo ${newName} ${imageName}
cd -
fi
else
echo "Sparsify skipped 'virt-sparsify' command is missing"
fi
done

View File

@ -0,0 +1,104 @@
#!/usr/bin/env bash
#
# Generate all the configuration files
# Get all the values from the VLS_DIR
# Process each template from the TPL_DIR with this values
#
ENV_FILE=${ENV_FILE:-/var/run/one-context/one_env}
TPL_DIR="/usr/share/builder/templates"
VLS_DIR="/usr/share/builder/values"
CONFIG=""
if [ -f "${ENV_FILE}" ]; then
. ${ENV_FILE}
fi
BTR="$(command -v btr)"
if [ "${?}" -ne 0 ]; then
echo "Warning: Nothing to do the templater is not installed"
exit 0
fi
if [ ! -e "${TPL_DIR}" ]; then
echo "Error: The template dir is missing (${TPL_DIR})"
exit 1
fi
if [ ! -e "${VLS_DIR}" ]; then
echo "Error: The template dir is missing (${VLS_DIR})"
exit 1
fi
jsonQuery() {
local data="${1}"
local query="${2}"
echo "${data}" | jq -cr "${query}"
}
# NAME: @jsonMerge
# AIM: Merge two json structures
# NOTES:
# The last one has de last word
# if you have the same key in A and B
# this keeps the value of the B structure.
# PARAMS:
# $1: original JSON Structure
# $2: updated JSON Structure
jsonMerge() {
local data="${1}"
local data2="${2}"
echo "${data} ${data2}" | jq -cr -s ".[0] * .[1]"
}
jsonUpdateVal() {
local json="${1}"
local key="${2}"
local value="${3}"
echo "${json}" | jq --arg a "${value}" "${key} = \$a"
}
getValues() {
local values=""
for file in $(find ${VLS_DIR} -name "*.json"); do
values="${values}$(cat ${file})"
done
if [ -n "${RAW_CONFIG}" ]; then
values="$(jsonMerge ${values} ${RAW_CONFIG})"
fi
for svc in $(echo ${values} | jq -cr '.Services|keys[]'); do
for key in $(echo ${values} | jq -cr ".Services.${svc}.Vars|keys[]"); do
ukey=${key^^}
vkeys="$(echo ${values} | jq -cr \".Services.${svc}.Vars.${key}\|keys[]\")"
if [ ${?} -eq 0 ]; then
for var in $(echo ${values} | jq -cr ".Services.${svc}.Vars.${key}|keys[]"); do
uvar=${var^^}
val=$(eval echo "\$${ukey}_${uvar}")
if [ -n "${val}" ]; then
values=$(jsonUpdateVal "${values}" ".Services.${svc}.Vars.${key}.${var}" "${val}")
fi
done
else
values=$(jsonUpdateVal "${values}" ".Services.${svc}.Vars.${key}" "${!ukey}")
fi
done
done
echo ${values}
}
processTemplates() {
${BTR} -t ${TPL_DIR} -c "${1}"
}
VALUES=$(getValues)
file=$(mktemp)
echo "${VALUES}" > "${file}"
processTemplates "${file}"
rm -rf "${file}"

View File

@ -0,0 +1,64 @@
#!/sbin/openrc-run
: ${SUBCFGDIR:=/srv}
DOCKER_COMPOSE_UP_ARGS=${DOCKER_COMPOSE_UP_ARGS-"--no-build --no-recreate --no-deps"}
SUBSVC="${SVCNAME#*.}"
[ -z "${SUBSVC}" ] && exit 1
: ${SUBCFG:="${SUBCFGDIR}/${SUBSVC}/docker-compose.yml"}
DOCOCMD="/usr/bin/docker-compose"
export COMPOSE_HTTP_TIMEOUT=300
description="Manage docker services defined in ${SUBCFG}"
extra_commands="configtest build"
description_configtest="Check configuration via \"docker-compose -f ${SUBCFG} config\""
description_build="Run \"docker-compose -f ${SUBCFG} build\""
depend() {
need localmount net docker
use dns
after docker
}
configtest() {
if ! [ -f "${SUBCFG}" ]; then
eerror "The config file ${SUBCFG} does not exist!"
return 1
fi
if "${DOCOCMD}" -f "${SUBCFG}" config >&/dev/null; then
einfo "config: ok"
else
eerror "config: error"
return 1
fi
}
build() {
configtest || return 1
ebegin "Building dockerservice ${SUBSVC}"
"${DOCOCMD}" -f "${SUBCFG}" build
eend $?
}
start() {
configtest || return 1
ebegin "Starting dockerservice ${SUBSVC}"
sleep 5
"${DOCOCMD}" -f "${SUBCFG}" up -d ${DOCKER_COMPOSE_UP_ARGS}
eend $?
}
stop() {
ebegin "Stopping dockerservice ${SUBSVC}"
"${DOCOCMD}" -f "${SUBCFG}" stop --timeout=300
eend $?
}
status() {
if [ "$("${DOCOCMD}" -f "${SUBCFG}" top | wc -l)" -gt "0" ]; then
einfo "status: started"
else
einfo "status: stopped"
return 3
fi
}

View File

@ -0,0 +1,13 @@
#!/bin/sh
CLUSTER_NAME="nuo"
if [ $(kind get clusters -q | grep "${CLUSTER_NAME}") ];then
podman start -f name="^${CLUSTER_NAME}"
else
kind create cluster --config /etc/cluster.yaml | tee -a /var/log/kind-init.log
fi
if [ ! $(which kubectl) ];then
apk add kubectl --repository=https://dl-cdn.alpinelinux.org/alpine/edge/community
fi

View File

@ -0,0 +1,25 @@
#!/bin/sh
FL_VERSIONS="current 3374.2.0"
MATCHBOX_DIR="/var/lib/matchbox"
ASSETS_DIR="${MATCHBOX_DIR}/assets/"
GPG_FNAME="Flatcar_Image_Signing_Key.asc"
GPG_KEYS_URL="https://www.flatcar.org/security/image-signing-key/"
cd /tmp
curl -L -O ${GPG_KEYS_URL}/${GPG_FNAME}
gpg --import --keyid-format LONG ${GPG_FNAME}
cd -
echo "Provisionning matchbox with flatcar images"
tout=30
for version in ${FL_VERSIONS}; do
for i in $(seq 1 ${tout});do
echo " * ${FL_VERSIONS} stable image (try ${i})"
/usr/local/bin/get-flatcar stable ${version} ${ASSETS_DIR}
if [[ "${?}" -eq 0 ]]; then
break
fi
done
done

View File

@ -0,0 +1,10 @@
#!/bin/sh
dest="${1}"
ipxeEFISource="http://boot.ipxe.org/ipxe.efi"
kpxeSource="http://boot.ipxe.org/undionly.kpxe"
cd "${dest}"
wget "${ipxeEFISource}"
wget "${kpxeSource}"

View File

@ -0,0 +1,38 @@
#!/bin/sh
HARBOR_VERSION="2.8.2"
HARBOR_SOURCE_URL="https://github.com/goharbor/harbor/releases/download/v${HARBOR_VERSION}/"
HARBOR_INSTALLER="harbor-offline-installer-v${HARBOR_VERSION}.tgz"
HARBOR_INSTALLER_ASC="${HARBOR_INSTALLER}.asc"
export TERM=xterm
gpg --keyserver hkps://keyserver.ubuntu.com --receive-keys 644FF454C0B4115C
cd /srv
wget -q ${HARBOR_SOURCE_URL}${HARBOR_INSTALLER}
wget -q ${HARBOR_SOURCE_URL}${HARBOR_INSTALLER_ASC}
gpg -v --keyserver hkps://keyserver.ubuntu.com --verify ${HARBOR_INSTALLER}.asc
if [ $? -ne 0 ]; then
echo "Harbor sources ${HARBOR_SOURCE_URL}${HARBOR_INSTALLER} are corrupt"
exit 3
fi
tar xzvf ${HARBOR_INSTALLER}
if [ ! -f ${HARBOR_SSL_CERT} ];then
mkcert -install
mkcert -cert-file ${HARBOR_SSL_CERT} -key-file ${HARBOR_SSL_KEY} ${HARBOR_DOMAIN}
fi
cd harbor
ln -s /etc/harbor/harbor.yml .
service docker start
sleep 5
./prepare
./install.sh --with-notary --with-trivy

View File

@ -0,0 +1,10 @@
#!/bin/sh
# Remove swap
cat /etc/fstab | grep -v swap > temp.fstab
cat temp.fstab > /etc/fstab
rm temp.fstab
swapoff -a
#lvremove -y /dev/vg0/lv_swap
#lvextend -y -r -l +100%FREE /dev/vg0/lv_root

View File

@ -0,0 +1,26 @@
#!/bin/sh
set -eo pipefail
DESTDIR=/usr/local/share/ca-certificates
UPDATE_CERTS_CMD=update-ca-certificates
CERTS="$(cat <<EOF
https://letsencrypt.org/certs/isrgrootx1.pem
https://letsencrypt.org/certs/isrg-root-x2.pem
https://letsencrypt.org/certs/lets-encrypt-r3.pem
https://letsencrypt.org/certs/lets-encrypt-e1.pem
https://letsencrypt.org/certs/lets-encrypt-r4.pem
https://letsencrypt.org/certs/lets-encrypt-e2.pem
EOF
)"
cd "$DESTDIR"
for cert in $CERTS; do
echo "Downloading '$cert'..."
filename=$(basename "$cert")
wget --tries=10 --timeout=30 -O "$filename" "$cert"
#openssl x509 -in "$filename" -inform PEM -out "$filename.crt"
done
$UPDATE_CERTS_CMD

View File

@ -0,0 +1,39 @@
#!/bin/sh
VERSION=0.10.0
ARCH=amd64
BIN="matchbox"
FILENAME="matchbox-v${VERSION}-linux-${ARCH}.tar.gz"
URL="https://github.com/poseidon/matchbox/releases/download/v${VERSION}/${FILENAME}"
MATCHBOX_DIR="/var/lib/matchbox"
ASSETS_DIR="${MATCHBOX_DIR}/assets/"
TFTP_DIR="/var/lib/tftpboot"
MATCHBOX_USER="matchbox"
FL_VERSIONS="current 3374.2.0"
apk add wget
echo "Downloading matchbox"
cd /tmp
wget -q --show-progress "${URL}"
tar -xzvf "${FILENAME}"
cd ./matchbox-v${VERSION}-linux-${ARCH}
echo "Installing matchbox"
cp ${BIN} /usr/local/bin
echo "Installing get-flatcar"
cp ./scripts/get-flatcar /usr/local/bin
chmod +x /usr/local/bin/get-flatcar
adduser "${MATCHBOX_USER}"
mkdir -p "${ASSETS_DIR}"
mkdir -p "${TFTP_DIR}"
chown -R "${MATCHBOX_USER}:${MATCHBOX_USER}" "${MATCHBOX_DIR}"
chown -R "${MATCHBOX_USER}:${MATCHBOX_USER}" "${ASSETS_DIR}"
chown -R "${MATCHBOX_USER}:${MATCHBOX_USER}" "${ASSETS_DIR}"
ls -lhaR ${ASSETS_DIR}
cp -rp ./scripts/tls /root
exit "${?}"

View File

@ -0,0 +1,17 @@
#!/bin/sh
#set -xeo pipefail
# Run the installer
setup-alpine -q
#yes | setup-alpine -e -f /root/install.conf
# Copy ssh keys
echo "Copy packer ssh key"
mount /dev/vg0/lv_root /mnt
cp -rp .ssh /mnt/root/
sync
umount /mnt
echo "Rebooting the host after install"
reboot -nf

View File

@ -0,0 +1,23 @@
#!/bin/sh
set -xeo pipefail
apk add --no-cache wget curl jq haveged ca-certificates rsyslog bash shadow
rc-update add haveged boot
rc-update add rsyslog boot
rc-update add sshd boot
# Generate root password
pass=$(openssl rand -base64 32 | tee -a .secret)
chmod 600 .secret
echo -e "${pass}\n${pass}" | passwd
# Remove expect package
# Prevent logs spamming like "process '/sbin/getty -L 0 ttyS0 vt100' (pid 2516) exited. Scheduling for restart."
# We don't need an access to ttyS0
sed -i 's@^\(ttyS0::respawn.*\)@#\1@' /etc/inittab
usermod --password $( echo "Cadoles;21" | openssl passwd -1 -stdin) root
sync

View File

@ -0,0 +1,102 @@
#!/usr/bin/env bash
#
# Generate all the configuration files
# Get all the values from the VLS_DIR
# Process each template from the TPL_DIR with this values
#
ENV_FILE=${ENV_FILE:-/var/run/one-context/one_env}
TPL_DIR="/usr/share/builder/templates"
VLS_DIR="/usr/share/builder/values"
CONFIG=""
. ${ENV_FILE}
BTR="$(command -v btr)"
if [ "${?}" -ne 0 ]; then
echo "Warning: Nothing to do the templater is not installed"
exit 0
fi
if [ ! -e "${TPL_DIR}" ]; then
echo "Error: The template dir is missing (${TPL_DIR})"
exit 1
fi
if [ ! -e "${VLS_DIR}" ]; then
echo "Error: The template dir is missing (${VLS_DIR})"
exit 1
fi
jsonQuery() {
local data="${1}"
local query="${2}"
echo "${data}" | jq -cr "${query}"
}
# NAME: @jsonMerge
# AIM: Merge two json structures
# NOTES:
# The last one has de last word
# if you have the same key in A and B
# this keeps the value of the B structure.
# PARAMS:
# $1: original JSON Structure
# $2: updated JSON Structure
jsonMerge() {
local data="${1}"
local data2="${2}"
echo "${data} ${data2}" | jq -cr -s ".[0] * .[1]"
}
jsonUpdateVal() {
local json="${1}"
local key="${2}"
local value="${3}"
echo "${json}" | jq --arg a "${value}" "${key} = \$a"
}
getValues() {
local values=""
for file in $(find ${VLS_DIR} -name "*.json"); do
values="${values}$(cat ${file})"
done
if [ -n "${RAW_CONFIG}" ]; then
values="$(jsonMerge ${values} ${RAW_CONFIG})"
fi
for svc in $(echo ${values} | jq -cr '.Services|keys[]'); do
for key in $(echo ${values} | jq -cr ".Services.${svc}.Vars|keys[]"); do
ukey=${key^^}
vkeys="$(echo ${values} | jq -cr \".Services.${svc}.Vars.${key}\|keys[]\")"
if [ ${?} -eq 0 ]; then
for var in $(echo ${values} | jq -cr ".Services.${svc}.Vars.${key}|keys[]"); do
uvar=${var^^}
val=$(eval echo "\$${ukey}_${uvar}")
if [ -n "${val}" ]; then
values=$(jsonUpdateVal "${values}" ".Services.${svc}.Vars.${key}.${var}" "${val}")
fi
done
else
values=$(jsonUpdateVal "${values}" ".Services.${svc}.Vars.${key}" "${!ukey}")
fi
done
done
echo ${values}
}
processTemplates() {
${BTR} -t ${TPL_DIR} -c "${1}"
}
VALUES=$(getValues)
file=$(mktemp)
echo "${VALUES}" > "${file}"
processTemplates "${file}"
rm -rf "${file}"

View File

@ -0,0 +1,21 @@
#!/usr/bin/env bash
ENV_FILE=${ENV_FILE:-/var/run/one-context/one_env}
# $TOKENTXT is available only through the env. file
# shellcheck disable=SC1090
if [ -f "${ENV_FILE}" ]; then
. "${ENV_FILE}"
fi
###
if [ -n "${K3S_ROLE}" ]; then
if [ "${K3S_ROLE}" = "server" ]; then
rc-update add dnsmasq default
service dnsmasq start
rc-update add k3s default
service k3s start
fi
fi

View File

@ -0,0 +1 @@
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDDph3zh6ojSvH44k13z9B6xj+Hargo3uzkxnYv5s5NI4yagNuBXEc3aS++KdocND+FtVfLK+iVE8qHo2bvmpMmVkqU6WU2apN7DfIP0QGLlSGeo+UOZ9hGeEDlgVO4AOnZKZ5kPGBEPZ84JXuE9CmhKfwEVCK8w3B8XQttA8alFl4A4/4F14x2w4njsSLY1H3b0qah7hgYKU5zHIGLg8Lxx+1BxGIF0l5n5m5rqAskRNaF+aYbs0CcWHv49bPK0sJJ0qPV2r2sq8BlzuZFHExnZRIxpsIXdce4Bm4rdlGi7tBmmurLk4OOtDkwvhD0LMaNJf10k6QLSmRUTVzgsYz/dmGxopbMtwwIXkwi014uSZgi8wAuznXx5I4j2TUGPZHOVf+1iw/yaxWlgTVOSoX7ZxyhDgW5cCgZZGNzU5UWe0vUuVTB+hfSMj50/Q6+Vi92/mDMbPhm4nBoVzD5DT15mB+yGyN45Ej61m0JzVUyZexfvVaffEug1/u5dnwilP0WGKr4i2OXxOXtvSdAs5rlZjvppZk6IxRCwXIcPwEFL97ZrQZAxlVS5Nh+ZnlSwTe3zfQhzHj1ao0AdCAHFPUEdoUPJhSb0OjyCvZ9XZ1KCkXhuhuN/3IUhuoWl4soNCeC3KmU/USx1wda438Exj0hM1mTyBZScDPGyD9nw78DGw== Philippe Caseiro

View File

@ -0,0 +1 @@
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDZxr8C81Dm5Zl2AtDzTVa8hFs04aV1z8ANrXYVHnLf7gEG4c1BI9iWbm94gVCQT4IvoKR5oZxjxGnx1a7VaX6h6dt33+p/s2IJiwG+9/DykPnImw+ALTcnMcyrwOYh68jnQIOGkYzK/VaHRzrvFNuoVWIU+FqfN+sW+bLQWi9v/K5oiup83xQBze6kjMEL2PT48bJwT/dQgP5cnTTEYwcOK/Yes1Cmb+VqjAs5B3uiHDoch10fy4b4duuALozPGhgoOfTLqe9Ekbt8PdIhUzGxFCw79W7IBA9vw79tYBy4B2et8Zb9sf+sMmxPINDkouYmfSnU0PjNjida7Tii2IEWbrb/qbSkRNcyIBmpGKz6VnSIvomv4FA9dGkOLYRyvTjAM6Shy5aiGV8F7T9hMxm3zGDjiVseyPVtMdSjM2SCx95uPCH5oSrj8M1OIjC2D+w3DsmTPFvTjA1gmKEYnXfFj82DvO+wDcbb6/DF2qS6y5rNpdnPWDb57iBqKeZISQ5x+h8arV0U3yItHoi7z4Cb51V29pdBE0xgFx5DE5akuPO3RC+BP0CK242HBdb94YXQCfmoQ1dV59mvu0ObAhP4CH/efOqONHXjTG9eurQyJWUr8yYO9DI7HkQHwvYDS7xuEO9yvs7gizm22FOTcxBPc4M/KFhPfnUs7Nyfw6I0Nw== vfebvre@cadoles.com

View File

@ -0,0 +1 @@
ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIOsoXFfQcqFp6+5QbB1o1ZpjCGeiPMM9aOK2DoZoMM/7 nicolas.melin@cnous.fr

View File

@ -0,0 +1 @@
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQCwyKvtyfZibpHNDDsfg7N6PHPnv9AzA2PowGd7iqF6YRv6CgGPnUixWE791bmekr57TR1QwW58aSEPSQMfLBwo0OwZ7GXYbOb9Fdb6WHAUJHSyMNsFvakgjq0g7TERMw3UksiYpUBCLgvWhF5jNjKsXgK3LyMUVqJs9KlUBt6elxy3CWoMYaWVJTQwXqLEbvr7W9F1rb9PQi80vxcSZXgk5XPPZH4vh7oN7GLB5UwaTFRh4lcup0xnV938gSgLxttPg4t5li5cmvXXMgtCrIDj7JPh9Cic+UXo80cV14nOpX23nuu408Veys/4p5tYiYFCg6NnUtW2dJrfyga9W1h6nc/6JaY8aXdoE+pi7lL7XrMvJPQxVYdwA9rPUBSZAIOmZQQx2aKFMsXocyVXQDzLQyg8lAF9gbMkjXH7DluXd+s0OAdijW9VFxhjutojaC76vhH+ZqSq511vdCTuq+6juW/By/pYQRtKiL1jJqfQoC+JU8RmOVOml5ciT7I0OM/0dakdIMYINX1FaRuSYb8wm0k3pKh+PGmMigja5lY7Bv8M89gRRw+8bJ42h5XkR0Jd04Wagd9eFXvaLa9OdarwF5rE2d6NM5Gfr2wJ4XuDMC7C3r/b6U3sZr6CWvQ5URrXS9OLtZG09DtEGIIuMcu0pgqclitVDi06Ffz5dZMnVQ== olivier.perrot@cnous.fr

View File

@ -0,0 +1,23 @@
#!/bin/sh
set -ex
TOOL_DIR="${1:-/usr/local/bin}"
TOOL_USER="${2:-root}"
TOOL_GROUP="${3:-root}"
ATTACHMENT_URL="https://forge.cadoles.com/attachments/"
installTool() {
NAME="${1}"
URL="${2}"
curl -k -o ${TOOL_DIR}/${NAME} ${URL}
chmod +x ${TOOL_DIR}/${NAME}
}
apk add curl
# Install templater
installTool "tpr" "https://forge.cadoles.com/attachments/242b3cba-8d07-4b89-80ab-7c12253a8524"
# Install bootstraper
installTool "btr" "https://forge.cadoles.com/attachments/e8442b2a-2065-4282-b4a4-648681fa044c"

View File

@ -0,0 +1,27 @@
#!/bin/sh
#
# Quick and dirty script to add disk space
# It creates a new PV (with the additionnal disk),
# a new VG and a new LV with 100% disk space
# The names and devices are provided with env variables:
# - PV_DEVICE : The /dev/xxx device
# - VG_NAME: The new vg name
# - LV_NAME: Then new lv name
# - LV_MTP: The mount point for the FS created on the LV
# - LV_FS: The fstype of the new FS
#
if [ -e ${PV_DEVICE} ]; then
pvcreate ${PV_DEVICE}
vgcreate ${VG_NAME} ${PV_DEVICE}
lvcreate -Ay -l 100%FREE -n ${LV_NAME} ${VG_NAME}
mkfs.${LV_FS} /dev/${VG_NAME}/${LV_NAME}
if [ ! -d ${LV_MTP} ]; then
mkdir -p ${LV_MTP}
fi
mount /dev/${VG_NAME}/${LV_NAME} ${LV_MTP}
echo "/dev/${VG_NAME}/${LV_NAME} ${LV_MTP} ${LV_FS} rw,relatime 0 1" >> /etc/fstab
else
echo "${PV_DEVICE} is missing"
exit 3
fi

135
recipes/nuo/sources.pkr.hcl Normal file
View File

@ -0,0 +1,135 @@
source qemu "nuo" {
cpus = 1
memory = "${local.memory}"
accelerator = "kvm"
vnc_bind_address = "0.0.0.0"
headless = true
# Serve the `http` directory via HTTP, used for preseeding the Debian installer.
http_port_min = 9990
http_port_max = 9999
# SSH ports to redirect to the VM being built
host_port_min = 2222
host_port_max = 2229
# This user is configured in the preseed file.
ssh_username = "${local.ssh_user}"
ssh_private_key_file = data.sshkey.install.private_key_path
ssh_wait_timeout = "1000s"
shutdown_command = "/sbin/poweroff"
# Builds a compact image
disk_compression = true
disk_discard = "unmap"
skip_compaction = false
disk_detect_zeroes = "unmap"
format = "qcow2"
boot_wait = "5s"
}
source "vmware-iso" "nuo" {
cpus = 1
disk_type_id = 0
memory = "${local.memory}"
vnc_bind_address = "0.0.0.0"
headless = true
# Serve the `http` directory via HTTP, used for preseeding the Debian installer.
http_port_min = 9990
http_port_max = 9999
# SSH ports to redirect to the VM being built
#host_port_min = 2222
#host_port_max = 2229
# This user is configured in the preseed file.
ssh_username = "${local.ssh_user}"
ssh_private_key_file = data.sshkey.install.private_key_path
ssh_wait_timeout = "1000s"
shutdown_command = "/sbin/poweroff"
# Builds a compact image
#disk_compression = true
#disk_discard = "unmap"
skip_compaction = false
#disk_detect_zeroes = "unmap"
format = "ova"
boot_wait = "5s"
}
source "vmware-vmx" "nuo" {
disk_type_id = 0
vnc_bind_address = "0.0.0.0"
headless = true
# Serve the `http` directory via HTTP, used for preseeding the Debian installer.
http_port_min = 9990
http_port_max = 9999
# SSH ports to redirect to the VM being built
#host_port_min = 2222
#host_port_max = 2229
# This user is configured in the preseed file.
ssh_username = "${local.ssh_user}"
ssh_private_key_file = data.sshkey.install.private_key_path
ssh_wait_timeout = "1000s"
shutdown_command = "/sbin/poweroff"
# Builds a compact image
#disk_compression = true
#disk_discard = "unmap"
skip_compaction = false
#disk_detect_zeroes = "unmap"
format = "ova"
boot_wait = "5s"
}
source "virtualbox-iso" "nuo" {
cpus = 1
memory = "${local.memory}"
vrdp_bind_address = "0.0.0.0"
nic_type = "virtio"
headless = false
# Serve the `http` directory via HTTP, used for preseeding the Debian installer.
http_bind_address = "0.0.0.0"
http_port_min = 9290
http_port_max = 9299
# SSH ports to redirect to the VM being built
#host_port_min = 2222
#host_port_max = 2229
# This user is configured in the preseed file.
ssh_username = "${local.ssh_user}"
ssh_private_key_file = data.sshkey.install.private_key_path
ssh_wait_timeout = "1000s"
ssh_file_transfer_method = "sftp"
shutdown_command = "/sbin/poweroff"
# Builds a compact image
#disk_compression = true
#disk_discard = "unmap"
#skip_compaction = false
#disk_detect_zeroes = "unmap"
format = "ova"
boot_wait = "5s"
}

View File

@ -0,0 +1,47 @@
#alpine-config
user:
name: ${user}
password: ${password}
chpasswd:
expire: False
apk:
repositories:
- base_url: https://mirrors.ircam.fr/pub/alpine/
repos: [ "main", "community" ]
package_update: true
packages:
- tmux
- vim
- openssh-server
- openssh-sftp-server
users:
- name: root
lock-passwd: false
passwd: ${root_password}
ssh_authorized_keys:
%{ for sk in ssh_keys ~}
- ${sk}
%{ endfor ~}
ssh_authorized_keys:
%{ for sk in ssh_keys ~}
- ${sk}
%{ endfor ~}
%{ if files != [] ~}
write_files:
%{ for fl in files ~}
- path: ${fl.path}
owner: ${fl.owner}:${fl.group}
permissions: '0${fl.permissions}'
content: |
%{ for li in fl.content ~}
${li}
%{ endfor ~}
%{ endfor ~}
%{ endif ~}
%{ if runcmd != [] ~}
# Work around network interface down after boot
runcmd:
%{ for cmd in runcmd ~}
- ${cmd}
%{ endfor ~}
%{ endif ~}

View File

@ -0,0 +1,6 @@
# /etc/conf.d/chronyd
CFGFILE="/etc/chrony/chrony.conf"
FAST_STARTUP=yes
ARGS=""
# vrf e.g 'vrf-mgmt'
#vrf=""

View File

@ -0,0 +1,337 @@
# Global OpenRC configuration settings
# ${Vars.RootlessDocker}
# Set to "YES" if you want the rc system to try and start services
# in parallel for a slight speed improvement. When running in parallel we
# prefix the service output with its name as the output will get
# jumbled up.
# WARNING: whilst we have improved parallel, it can still potentially lock
# the boot process. Don't file bugs about this unless you can supply
# patches that fix it without breaking other things!
#rc_parallel="NO"
# Set rc_interactive to "YES" and you'll be able to press the I key during
# boot so you can choose to start specific services. Set to "NO" to disable
# this feature. This feature is automatically disabled if rc_parallel is
# set to YES.
#rc_interactive="YES"
# If we need to drop to a shell, you can specify it here.
# If not specified we use $SHELL, otherwise the one specified in /etc/passwd,
# otherwise /bin/sh
# Linux users could specify /sbin/sulogin
#rc_shell=/bin/sh
# Do we allow any started service in the runlevel to satisfy the dependency
# or do we want all of them regardless of state? For example, if net.eth0
# and net.eth1 are in the default runlevel then with rc_depend_strict="NO"
# both will be started, but services that depend on 'net' will work if either
# one comes up. With rc_depend_strict="YES" we would require them both to
# come up.
#rc_depend_strict="YES"
# rc_hotplug controls which services we allow to be hotplugged.
# A hotplugged service is one started by a dynamic dev manager when a matching
# hardware device is found.
# Hotplugged services appear in the "hotplugged" runlevel.
# If rc_hotplug is set to any value, we compare the name of this service
# to every pattern in the value, from left to right, and we allow the
# service to be hotplugged if it matches a pattern, or if it matches no
# patterns. Patterns can include shell wildcards.
# To disable services from being hotplugged, prefix patterns with "!".
#If rc_hotplug is not set or is empty, all hotplugging is disabled.
# Example - rc_hotplug="net.wlan !net.*"
# This allows net.wlan and any service not matching net.* to be hotplugged.
# Example - rc_hotplug="!net.*"
# This allows services that do not match "net.*" to be hotplugged.
# rc_logger launches a logging daemon to log the entire rc process to
# /var/log/rc.log
# NOTE: Linux systems require the devfs service to be started before
# logging can take place and as such cannot log the sysinit runlevel.
#rc_logger="NO"
# Through rc_log_path you can specify a custom log file.
# The default value is: /var/log/rc.log
#rc_log_path="/var/log/rc.log"
# If you want verbose output for OpenRC, set this to yes. If you want
# verbose output for service foo only, set it to yes in /etc/conf.d/foo.
#rc_verbose=no
# By default we filter the environment for our running scripts. To allow other
# variables through, add them here. Use a * to allow all variables through.
#rc_env_allow="VAR1 VAR2"
# By default we assume that all daemons will start correctly.
# However, some do not - a classic example is that they fork and return 0 AND
# then child barfs on a configuration error. Or the daemon has a bug and the
# child crashes. You can set the number of milliseconds start-stop-daemon
# waits to check that the daemon is still running after starting here.
# The default is 0 - no checking.
#rc_start_wait=100
# rc_nostop is a list of services which will not stop when changing runlevels.
# This still allows the service itself to be stopped when called directly.
#rc_nostop=""
# rc will attempt to start crashed services by default.
# However, it will not stop them by default as that could bring down other
# critical services.
#rc_crashed_stop=NO
#rc_crashed_start=YES
# Set rc_nocolor to yes if you do not want colors displayed in OpenRC
# output.
#rc_nocolor=NO
##############################################################################
# MISC CONFIGURATION VARIABLES
# There variables are shared between many init scripts
# Set unicode to NO to turn off unicode support for keyboards and screens.
#unicode="YES"
# This is how long fuser should wait for a remote server to respond. The
# default is 60 seconds, but it can be adjusted here.
#rc_fuser_timeout=60
# Below is the default list of network fstypes.
#
# afs ceph cifs coda davfs fuse fuse.glusterfs fuse.sshfs gfs glusterfs lustre
# ncpfs nfs nfs4 ocfs2 shfs smbfs
#
# If you would like to add to this list, you can do so by adding your
# own fstypes to the following variable.
#extra_net_fs_list=""
##############################################################################
# SERVICE CONFIGURATION VARIABLES
# These variables are documented here, but should be configured in
# /etc/conf.d/foo for service foo and NOT enabled here unless you
# really want them to work on a global basis.
# If your service has characters in its name which are not legal in
# shell variable names and you configure the variables for it in this
# file, those characters should be replaced with underscores in the
# variable names as shown below.
# Some daemons are started and stopped via start-stop-daemon.
# We can set some things on a per service basis, like the nicelevel.
# These need to be exported
#export SSD_NICELEVEL="0"
# Or the ionice level. The format is class[:data] , just like the
# --ionice start-stop-daemon parameter.
#export SSD_IONICELEVEL="0:0"
# Or the OOM score adjustment.
#export SSD_OOM_SCORE_ADJ="0"
# Pass ulimit parameters
# If you are using bash in POSIX mode for your shell, note that the
# ulimit command uses a block size of 512 bytes for the -c and -f
# options
#rc_ulimit="-u 30"
# It's possible to define extra dependencies for services like so
#rc_config="/etc/foo"
#rc_need="openvpn"
#rc_use="net.eth0"
#rc_after="clock"
#rc_before="local"
#rc_provide="!net"
# You can also enable the above commands here for each service. Below is an
# example for service foo.
#rc_foo_config="/etc/foo"
#rc_foo_need="openvpn"
#rc_foo_after="clock"
# Below is an example for service foo-bar. Note that the '-' is illegal
# in a shell variable name, so we convert it to an underscore.
# example for service foo-bar.
#rc_foo_bar_config="/etc/foo-bar"
#rc_foo_bar_need="openvpn"
#rc_foo_bar_after="clock"
# You can also remove dependencies.
# This is mainly used for saying which services do NOT provide net.
#rc_net_tap0_provide="!net"
# This is the subsystem type.
# It is used to match against keywords set by the keyword call in the
# depend function of service scripts.
#
# It should be set to the value representing the environment this file is
# PRESENTLY in, not the virtualization the environment is capable of.
# If it is commented out, automatic detection will be used.
#
# The list below shows all possible settings as well as the host
# operating systems where they can be used and autodetected.
#
# "" - nothing special
# "docker" - Docker container manager (Linux)
# "jail" - Jail (DragonflyBSD or FreeBSD)
# "lxc" - Linux Containers
# "openvz" - Linux OpenVZ
# "prefix" - Prefix
# "rkt" - CoreOS container management system (Linux)
# "subhurd" - Hurd subhurds (to be checked)
# "systemd-nspawn" - Container created by systemd-nspawn (Linux)
# "uml" - Usermode Linux
# "vserver" - Linux vserver
# "xen0" - Xen0 Domain (Linux and NetBSD)
# "xenU" - XenU Domain (Linux and NetBSD)
#rc_sys=""
# if you use openrc-init, which is currently only available on Linux,
# this is the default runlevel to activate after "sysinit" and "boot"
# when booting.
#rc_default_runlevel="default"
# on Linux and Hurd, this is the number of ttys allocated for logins
# It is used in the consolefont, keymaps, numlock and termencoding
# service scripts.
rc_tty_number=12
##############################################################################
# LINUX CGROUPS RESOURCE MANAGEMENT
# This sets the mode used to mount cgroups.
# "hybrid" mounts cgroups version 2 on /sys/fs/cgroup/unified and
# cgroups version 1 on /sys/fs/cgroup.
# "legacy" mounts cgroups version 1 on /sys/fs/cgroup
# "unified" mounts cgroups version 2 on /sys/fs/cgroup
rc_cgroup_mode="hybrid"
# This is a list of controllers which should be enabled for cgroups version 2
# when hybrid mode is being used.
# Controllers listed here will not be available for cgroups version 1.
rc_cgroup_controllers="cpuset cpu io memory hugelb openrc pids"
# This variable contains the cgroups version 2 settings for your services.
# If this is set in this file, the settings will apply to all services.
# If you want different settings for each service, place the settings in
# /etc/conf.d/foo for service foo.
# The format is to specify the setting and value followed by a newline.
# Multiple settings and values can be specified.
# For example, you would use this to set the maximum memory and maximum
# number of pids for a service.
#rc_cgroup_settings="
#memory.max 10485760
#pids.max max
#"
#
# For more information about the adjustments that can be made with
# cgroups version 2, see Documentation/cgroups-v2.txt in the linux kernel
# source tree.
#rc_cgroup_settings=""
# This switch controls whether or not cgroups version 1 controllers are
# individually mounted under
# /sys/fs/cgroup in hybrid or legacy mode.
rc_controller_cgroups="YES"
# The following setting turns on the memory.use_hierarchy setting in the
# root memory cgroup for cgroups v1.
# It must be set to yes in this file if you want this functionality.
#rc_cgroup_memory_use_hierarchy="NO"
# The following settings allow you to set up values for the cgroups version 1
# controllers for your services.
# They can be set in this file;, however, if you do this, the settings
# will apply to all of your services.
# If you want different settings for each service, place the settings in
# /etc/conf.d/foo for service foo.
# The format is to specify the names of the settings followed by their
# values. Each variable can hold multiple settings.
# For example, you would use this to set the cpu.shares setting in the
# cpu controller to 512 for your service.
# rc_cgroup_cpu="
# cpu.shares 512
# "
#
# For more information about the adjustments that can be made with
# cgroups version 1, see Documentation/cgroups-v1/* in the linux kernel
# source tree.
# Set the blkio controller settings for this service.
#rc_cgroup_blkio=""
# Set the cpu controller settings for this service.
#rc_cgroup_cpu=""
# Add this service to the cpuacct controller (any value means yes).
#rc_cgroup_cpuacct=""
# Set the cpuset controller settings for this service.
#rc_cgroup_cpuset=""
# Set the devices controller settings for this service.
#rc_cgroup_devices=""
# Set the hugetlb controller settings for this service.
#rc_cgroup_hugetlb=""
# Set the memory controller settings for this service.
#rc_cgroup_memory=""
# Set the net_cls controller settings for this service.
#rc_cgroup_net_cls=""
# Set the net_prio controller settings for this service.
#rc_cgroup_net_prio=""
# Set the pids controller settings for this service.
#rc_cgroup_pids=""
# Set this to YES if you want all of the processes in a service's cgroup
# killed when the service is stopped or restarted.
# Be aware that setting this to yes means all of a service's
# child processes will be killed. Keep this in mind if you set this to
# yes here instead of for the individual services in
# /etc/conf.d/<service>.
# To perform this cleanup manually for a stopped service, you can
# execute cgroup_cleanup with /etc/init.d/<service> cgroup_cleanup or
# rc-service <service> cgroup_cleanup.
# If the kernel includes support for cgroup2's cgroup.kill, this is used
# to reliably teardown the cgroup.
# If this fails, the process followed in this cleanup is the following:
# 1. send stopsig (sigterm if it isn't set) to all processes left in the
# cgroup immediately followed by sigcont.
# 2. Send sighup to all processes in the cgroup if rc_send_sighup is
# yes.
# 3. delay for rc_timeout_stopsec seconds.
# 4. send sigkill to all processes in the cgroup unless disabled by
# setting rc_send_sigkill to no.
# rc_cgroup_cleanup="NO"
# If this is yes, we will send sighup to the processes in the cgroup
# immediately after stopsig and sigcont.
#rc_send_sighup="NO"
# This is the amount of time in seconds that we delay after sending sigcont
# and optionally sighup, before we optionally send sigkill to all
# processes in the # cgroup.
# The default is 90 seconds.
#rc_timeout_stopsec="90"
# If this is set to no, we do not send sigkill to all processes in the
# cgroup.
#rc_send_sigkill="YES"
##############################################################################
# SUPERVISE DAEMON CONFIGURATION VARIABLES
# These variables sets more reasonable defaults for supervise-daemon(8).
# They may be overriden on a per service basis.
# Wait this number of seconds before restarting a daemon after it crashes.
respawn_delay=2
# Sets the maximum number of times a daemon will be respawned during a respawn
# period. If a daemon dies more than this number of times during a respawn
# period, supervise-daemon(8) will give up trying to respawn it and exit.
# 0 means unlimited.
respawn_max=5
# Sets the length in seconds of a respawn period.
respawn_period=1800

View File

@ -0,0 +1,3 @@
%{ if Vars.RootlessDocker }
docker:231072:65536
%{ endif }

View File

@ -0,0 +1,265 @@
# Configuration file of Harbor
# The IP address or hostname to access admin UI and registry service.
# DO NOT use localhost or 127.0.0.1, because Harbor needs to be accessed by external clients.
hostname: ${Vars.HarborDomain}
# http related config
http:
# port for http, default is 80. If https enabled, this port will redirect to https port
port: ${Vars.HarborHTTPPort}
# https related config
https:
# https port for harbor, default is 443
port: ${Vars.HarborHTTPSPort}
# The path of cert and key files for nginx
certificate: ${Vars.HarborSSLCert}
private_key: ${Vars.HarborSSLPrivKey}
# # Uncomment following will enable tls communication between all harbor components
# internal_tls:
# # set enabled to true means internal tls is enabled
# enabled: true
# # put your cert and key files on dir
# dir: /etc/harbor/tls/internal
# Uncomment external_url if you want to enable external proxy
# And when it enabled the hostname will no longer used
# external_url: https://reg.mydomain.com:8433
# The initial password of Harbor admin
# It only works in first time to install harbor
# Remember Change the admin password from UI after launching Harbor.
harbor_admin_password: ${Vars.HarborAdminPassword}
# Harbor DB configuration
database:
# The password for the root user of Harbor DB. Change this before any production use.
password: ${Vars.HarborDBPassword}
# The maximum number of connections in the idle connection pool. If it <=0, no idle connections are retained.
max_idle_conns: 50
# The maximum number of open connections to the database. If it <= 0, then there is no limit on the number of open connections.
# Note: the default number of connections is 100 for postgres.
max_open_conns: 200
# The default data volume
data_volume: /srv/harbor/data
# Harbor Storage settings by default is using /data dir on local filesystem
# Uncomment storage_service setting If you want to using external storage
# storage_service:
# # ca_bundle is the path to the custom root ca certificate, which will be injected into the truststore
# # of registry's and chart repository's containers. This is usually needed when the user hosts a internal storage with self signed certificate.
# ca_bundle:
# # storage backend, default is filesystem, options include filesystem, azure, gcs, s3, swift and oss
# # for more info about this configuration please refer https://docs.docker.com/registry/configuration/
# filesystem:
# maxthreads: 100
# # set disable to true when you want to disable registry redirect
# redirect:
# disabled: false
# Trivy configuration
#
# Trivy DB contains vulnerability information from NVD, Red Hat, and many other upstream vulnerability databases.
# It is downloaded by Trivy from the GitHub release page https://github.com/aquasecurity/trivy-db/releases and cached
# in the local file system. In addition, the database contains the update timestamp so Trivy can detect whether it
# should download a newer version from the Internet or use the cached one. Currently, the database is updated every
# 12 hours and published as a new release to GitHub.
trivy:
# ignoreUnfixed The flag to display only fixed vulnerabilities
ignore_unfixed: false
# skipUpdate The flag to enable or disable Trivy DB downloads from GitHub
#
# You might want to enable this flag in test or CI/CD environments to avoid GitHub rate limiting issues.
# If the flag is enabled you have to download the `trivy-offline.tar.gz` archive manually, extract `trivy.db` and
# `metadata.json` files and mount them in the `/home/scanner/.cache/trivy/db` path.
skip_update: false
#
# The offline_scan option prevents Trivy from sending API requests to identify dependencies.
# Scanning JAR files and pom.xml may require Internet access for better detection, but this option tries to avoid it.
# For example, the offline mode will not try to resolve transitive dependencies in pom.xml when the dependency doesn't
# exist in the local repositories. It means a number of detected vulnerabilities might be fewer in offline mode.
# It would work if all the dependencies are in local.
# This option doesnt affect DB download. You need to specify "skip-update" as well as "offline-scan" in an air-gapped environment.
offline_scan: false
#
# insecure The flag to skip verifying registry certificate
insecure: false
# github_token The GitHub access token to download Trivy DB
#
# Anonymous downloads from GitHub are subject to the limit of 60 requests per hour. Normally such rate limit is enough
# for production operations. If, for any reason, it's not enough, you could increase the rate limit to 5000
# requests per hour by specifying the GitHub access token. For more details on GitHub rate limiting please consult
# https://developer.github.com/v3/#rate-limiting
#
# You can create a GitHub token by following the instructions in
# https://help.github.com/en/github/authenticating-to-github/creating-a-personal-access-token-for-the-command-line
#
# github_token: xxx
jobservice:
# Maximum number of job workers in job service
max_job_workers: 10
logger_sweeper_duration: 300
notification:
# Maximum retry count for webhook job
webhook_job_max_retry: 10
webhook_job_http_client_timeout: 300
chart:
# Change the value of absolute_url to enabled can enable absolute url in chart
absolute_url: disabled
# Log configurations
log:
# options are debug, info, warning, error, fatal
level: info
# configs for logs in local storage
local:
# Log files are rotated log_rotate_count times before being removed. If count is 0, old versions are removed rather than rotated.
rotate_count: 50
# Log files are rotated only if they grow bigger than log_rotate_size bytes. If size is followed by k, the size is assumed to be in kilobytes.
# If the M is used, the size is in megabytes, and if G is used, the size is in gigabytes. So size 100, size 100k, size 100M and size 100G
# are all valid.
rotate_size: 200M
# The directory on your host that store log
location: /var/log/harbor
# Uncomment following lines to enable external syslog endpoint.
# external_endpoint:
# # protocol used to transmit log to external endpoint, options is tcp or udp
# protocol: tcp
# # The host of external endpoint
# host: localhost
# # Port of external endpoint
# port: 5140
#This attribute is for migrator to detect the version of the .cfg file, DO NOT MODIFY!
_version: 2.6.0
# Uncomment external_database if using external database.
# external_database:
# harbor:
# host: harbor_db_host
# port: harbor_db_port
# db_name: harbor_db_name
# username: harbor_db_username
# password: harbor_db_password
# ssl_mode: disable
# max_idle_conns: 2
# max_open_conns: 0
# notary_signer:
# host: notary_signer_db_host
# port: notary_signer_db_port
# db_name: notary_signer_db_name
# username: notary_signer_db_username
# password: notary_signer_db_password
# ssl_mode: disable
# notary_server:
# host: notary_server_db_host
# port: notary_server_db_port
# db_name: notary_server_db_name
# username: notary_server_db_username
# password: notary_server_db_password
# ssl_mode: disable
# Uncomment external_redis if using external Redis server
# external_redis:
# # support redis, redis+sentinel
# # host for redis: <host_redis>:<port_redis>
# # host for redis+sentinel:
# # <host_sentinel1>:<port_sentinel1>,<host_sentinel2>:<port_sentinel2>,<host_sentinel3>:<port_sentinel3>
# host: redis:6379
# password:
# # sentinel_master_set must be set to support redis+sentinel
# #sentinel_master_set:
# # db_index 0 is for core, it's unchangeable
# registry_db_index: 1
# jobservice_db_index: 2
# chartmuseum_db_index: 3
# trivy_db_index: 5
# idle_timeout_seconds: 30
# Uncomment uaa for trusting the certificate of uaa instance that is hosted via self-signed cert.
# uaa:
# ca_file: /path/to/ca
# Global proxy
# Config http proxy for components, e.g. http://my.proxy.com:3128
# Components doesn't need to connect to each others via http proxy.
# Remove component from `components` array if want disable proxy
# for it. If you want use proxy for replication, MUST enable proxy
# for core and jobservice, and set `http_proxy` and `https_proxy`.
# Add domain to the `no_proxy` field, when you want disable proxy
# for some special registry.
proxy:
http_proxy:
https_proxy:
no_proxy:
components:
- core
- jobservice
- notary
- trivy
metric:
enabled: false
port: 9090
path: /metrics
# Trace related config
# only can enable one trace provider(jaeger or otel) at the same time,
# and when using jaeger as provider, can only enable it with agent mode or collector mode.
# if using jaeger collector mode, uncomment endpoint and uncomment username, password if needed
# if using jaeger agetn mode uncomment agent_host and agent_port
# trace:
# enabled: true
# # set sample_rate to 1 if you wanna sampling 100% of trace data; set 0.5 if you wanna sampling 50% of trace data, and so forth
# sample_rate: 1
# # # namespace used to differenciate different harbor services
# # namespace:
# # # attributes is a key value dict contains user defined attributes used to initialize trace provider
# # attributes:
# # application: harbor
# # # jaeger should be 1.26 or newer.
# # jaeger:
# # endpoint: http://hostname:14268/api/traces
# # username:
# # password:
# # agent_host: hostname
# # # export trace data by jaeger.thrift in compact mode
# # agent_port: 6831
# # otel:
# # endpoint: hostname:4318
# # url_path: /v1/traces
# # compression: false
# # insecure: true
# # timeout: 10s
# enable purge _upload directories
upload_purging:
enabled: true
# remove files in _upload directories which exist for a period of time, default is one week.
age: 168h
# the interval of the purge operations
interval: 24h
dryrun: false
# cache layer configurations
# If this feature enabled, harbor will cache the resource
# `project/project_metadata/repository/artifact/manifest` in the redis
# which can especially help to improve the performance of high concurrent
# manifest pulling.
# NOTICE
# If you are deploying Harbor in HA mode, make sure that all the harbor
# instances have the same behaviour, all with caching enabled or disabled,
# otherwise it can lead to potential data inconsistency.
cache:
# not enabled by default
enabled: false
# keep cache for one day by default
expire_hours: 24

View File

@ -0,0 +1,47 @@
# Example answer file for setup-alpine script
# If you don't want to use a certain option, then comment it out
# Use US layout with US variant
KEYMAPOPTS="fr fr"
# Set hostname to alpine-test
HOSTNAMEOPTS="-n ${hostname}"
# Contents of /etc/network/interfaces
INTERFACESOPTS="auto lo
iface lo inet loopback
auto eth0
iface eth0 inet dhcp
hostname ${hostname}
"
# Search domain of example.com, OpenDNS public nameserver
# ex: -d example.com 1.1.1.1"
DNSOPTS=""
# Set timezone to UTC
TIMEZONEOPTS="-z Europe/Paris"
# set http/ftp proxy
PROXYOPTS="none"
# Add a random mirror
APKREPOSOPTS="-r -c"
# Install Openssh
SSHDOPTS="-c openssh -k /root/.ssh/authorized_keys"
# Use openntpd
NTPOPTS="-c openntpd"
# Use /dev/sda as a data disk
DISKOPTS="-L -m sys ${disk_device}"
USEROPTS="-a -g 'netdev' ${user}"
# Setup in /media/vda1
# LBUOPTS="/media/vda1"
# APKCACHEOPTS="/media/vda1/cache"

View File

@ -0,0 +1,8 @@
# k3s options
export PATH="/usr/libexec/cni/:$PATH"
K3S_EXEC="server"
%{ if Vars.DeployTraefik }
K3S_OPTS=""
%{ else }
K3S_OPTS="--disable traefik"
%{ endif }

View File

@ -0,0 +1,40 @@
kind: Cluster
apiVersion: kind.x-k8s.io/v1alpha4
name: ${Vars.Cluster.Name}
networking:
podSubnet: "${Vars.Cluster.PodSubNet}"
serviceSubnet: "${Vars.Cluster.ServieSubNet}"
nodes:
%{ for nd in Vars.Cluster.Nodes }
- role: ${nd.Role}
image: kindest/node:v${Vars.Cluster.Version}
%{ if nd.Role == "control-plane"}
kubeadmConfigPatches:
- |
kind: InitConfiguration
%{ if Vars.Cluster.IngressReady }
nodeRegistration:
kubeletExtraArgs:
node-labels: "ingress-ready=true"
%{ endif }
extraPortMappings:
- containerPort: 31000
hostPort: 31000
listenAddress: "0.0.0.0" # Optional, defaults to "0.0.0.0"
- containerPort: 80
hostPort: 8080
listenAddress: "0.0.0.0" # Optional, defaults to "0.0.0.0"
%{ if Vars.Cluster.IngressReady }
labels:
ingress-ready: true
%{ endif }
%{ endif }
%{ if nd.Role == "worker" }
kubeadmConfigPatches:
- |
kind: JoinConfiguration
nodeRegistration:
kubeletExtraArgs:
system-reserved: memory=2Gi
%{ endif }
%{ endfor ~}

View File

@ -0,0 +1 @@
command_args="-address 0.0.0.0:${Vars.MatchBox.HTTPPort} -rpc-address 0.0.0.0:${Vars.MatchBox.gRPCPort} -log-level ${Vars.MatchBox.LogLevel}"

View File

@ -0,0 +1,4 @@
${Vars.NIC[0].IP} ${Vars.Set.Hostname}
%{ if Vars.MatchBox.Hostname != "" }
${Vars.NIC[0].IP} ${Vars.MatchBox.Hostname}
%{ endif }

View File

@ -0,0 +1,60 @@
log-queries
log-dhcp
#port=0
listen-address=0.0.0.0
interface=${Vars.PXE.ListenInterface}
no-resolv
domain-needed
bogus-priv
expand-hosts
server=${Vars.ETH0.DNS}
strict-order
addn-hosts=/etc/dnsmasq-hosts.conf
domain=${Vars.PXE.DNSDomain}
local=/${Vars.PXE.DNSDomain}/
localise-queries
%{ if Vars.PXE.DHCPMode == "proxy" }
#dhcp-no-override
dhcp-range=${Vars.ETH0.IP},proxy
%{ else }
dhcp-range=${Vars.PXE.DHCPRangeStart},${Vars.PXE.DHCPRangeEnd},${Vars.PXE.DHCPLeaseDuration}
dhcp-option=option:router,${Vars.ETH0.GATEWAY}
%{ endif }
dhcp-option=option:dns-server,${Vars.ETH0.IP}
dhcp-option=option:domain-name,${Vars.PXE.DNSDomain}
# TFTP Configuration
enable-tftp
tftp-root="${Vars.PXE.TFTPRoot}"
pxe-prompt="${Vars.PXE.GreetingMessage}",${Vars.PXE.DelayTime}
# Based on logic in https://gist.github.com/robinsmidsrod/4008017
# iPXE sends a 175 option, checking suboptions
dhcp-match=set:ipxe-http,175,19
dhcp-match=set:ipxe-https,175,20
dhcp-match=set:ipxe-menu,175,39
# pcbios specific
dhcp-match=set:ipxe-pxe,175,33
dhcp-match=set:ipxe-bzimage,175,24
dhcp-match=set:ipxe-iscsi,175,17
# efi specific
dhcp-match=set:ipxe-efi,175,36
# combination
# set ipxe-ok tag if we have correct combination
# http && menu && iscsi ((pxe && bzimage) || efi)
tag-if=set:ipxe-ok,tag:ipxe-http,tag:ipxe-menu,tag:ipxe-iscsi,tag:ipxe-pxe,tag:ipxe-bzimage
tag-if=set:ipxe-ok,tag:ipxe-http,tag:ipxe-menu,tag:ipxe-iscsi,tag:ipxe-efi
## Load different PXE boot image depending on client architecture (when running as a proxy DHCP)
pxe-service=tag:!ipxe-ok, x86PC, "Legacy boot PXE chainload to iPXE", undionly.kpxe
pxe-service=tag:!ipxe-ok, BC_EFI, "UEFI32 boot chainload to iPXE", snponly.efi
pxe-service=tag:!ipxe-ok, X86-64_EFI, "UEFI64 boot chainload to iPXE", snponly.efi
dhcp-userclass=set:ipxe,iPXE
dhcp-boot=tag:ipxe-ok,http://${Vars.ETH0.IP}:${Vars.MatchBox.HTTPPort}/boot.ipxe,,${Vars.ETH0.IP}

View File

@ -0,0 +1,28 @@
#!/sbin/openrc-run
name=$RC_SVCNAME
command="/usr/local/bin/$RC_SVCNAME"
command_user="$RC_SVCNAME"
pidfile="/run/$RC_SVCNAME/$RC_SVCNAME.pid"
start_stop_daemon_args="--start -b"
command_args="$command_args"
command_background="yes"
depend() {
need net
}
start_pre() {
checkpath --directory --owner $command_user:$command_user --mode 0775 \
/run/$RC_SVCNAME /var/log/$RC_SVCNAME
if [ ! -f "/etc/matchbox/server.crt" ]; then
cd /root/tls
export SAN="DNS.1:${Vars.MatchBox.Hostname},IP.1:${Vars.ETH0.IP}"
./cert-gen
mkdir -p /etc/matchbox
cp ca.crt server.crt server.key /etc/matchbox
chown -R matchbox:matchbox /etc/matchbox
mkdir -p /root/.matchbox
cp client.crt client.key ca.crt /root/.matchbox/
fi
}

View File

@ -0,0 +1 @@
harbor

View File

@ -0,0 +1 @@
command_args="-address 0.0.0.0:${Vars.MatchBox.HTTPPort} -rpc-address 0.0.0.0:${Vars.MatchBox.gRPCPort} -log-level ${Vars.MatchBox.LogLevel}"

View File

@ -0,0 +1,7 @@
${Vars.NIC[0].IP} ${Vars.Set.Hostname}
%{ if Vars.MatchBox.Hostname != "" }
${Vars.NIC[0].IP} ${Vars.MatchBox.Hostname}
%{ endif }
%{ for host in Vars.DNSMasq.Hosts }
${host.IP} ${host.Name}
%{ endfor }

View File

@ -0,0 +1,60 @@
log-queries
log-dhcp
#port=0
listen-address=0.0.0.0
interface=${Vars.PXE.ListenInterface}
no-resolv
domain-needed
bogus-priv
expand-hosts
server=${Vars.DNS[0]}
strict-order
addn-hosts=/etc/dnsmasq-hosts.conf
domain=${Vars.PXE.DNSDomain}
local=/${Vars.PXE.DNSDomain}/
localise-queries
%{ if Vars.PXE.DHCPMode == "proxy" }
#dhcp-no-override
dhcp-range=${Vars.NIC[0].IP},proxy
%{ else }
dhcp-range=${Vars.PXE.DHCPRangeStart},${Vars.PXE.DHCPRangeEnd},${Vars.PXE.DHCPLeaseDuration}
dhcp-option=option:router,${Vars.NIC[0].Gateway}
%{ endif }
dhcp-option=option:dns-server,${Vars.NIC[0].IP}
dhcp-option=option:domain-name,${Vars.PXE.DNSDomain}
# TFTP Configuration
enable-tftp
tftp-root="${Vars.PXE.TFTPRoot}"
pxe-prompt="${Vars.PXE.GreetingMessage}",${Vars.PXE.DelayTime}
# Based on logic in https://gist.github.com/robinsmidsrod/4008017
# iPXE sends a 175 option, checking suboptions
dhcp-match=set:ipxe-http,175,19
dhcp-match=set:ipxe-https,175,20
dhcp-match=set:ipxe-menu,175,39
# pcbios specific
dhcp-match=set:ipxe-pxe,175,33
dhcp-match=set:ipxe-bzimage,175,24
dhcp-match=set:ipxe-iscsi,175,17
# efi specific
dhcp-match=set:ipxe-efi,175,36
# combination
# set ipxe-ok tag if we have correct combination
# http && menu && iscsi ((pxe && bzimage) || efi)
tag-if=set:ipxe-ok,tag:ipxe-http,tag:ipxe-menu,tag:ipxe-iscsi,tag:ipxe-pxe,tag:ipxe-bzimage
tag-if=set:ipxe-ok,tag:ipxe-http,tag:ipxe-menu,tag:ipxe-iscsi,tag:ipxe-efi
## Load different PXE boot image depending on client architecture (when running as a proxy DHCP)
pxe-service=tag:!ipxe-ok, x86PC, "Legacy boot PXE chainload to iPXE", undionly.kpxe
pxe-service=tag:!ipxe-ok, BC_EFI, "UEFI32 boot chainload to iPXE", snponly.efi
pxe-service=tag:!ipxe-ok, X86-64_EFI, "UEFI64 boot chainload to iPXE", snponly.efi
dhcp-userclass=set:ipxe,iPXE
dhcp-boot=tag:ipxe-ok,http://${Vars.NIC[0].IP}:${Vars.MatchBox.HTTPPort}/boot.ipxe,,${Vars.NIC[0].IP}

View File

@ -0,0 +1 @@
${Vars.Set.Hostname}

View File

@ -0,0 +1,28 @@
#!/sbin/openrc-run
name=$RC_SVCNAME
command="/usr/local/bin/$RC_SVCNAME"
command_user="$RC_SVCNAME"
pidfile="/run/$RC_SVCNAME/$RC_SVCNAME.pid"
start_stop_daemon_args="--start -b"
command_args="$command_args"
command_background="yes"
depend() {
need net
}
start_pre() {
checkpath --directory --owner $command_user:$command_user --mode 0775 \
/run/$RC_SVCNAME /var/log/$RC_SVCNAME
if [ ! -f "/etc/matchbox/server.crt" ]; then
cd /root/tls
export SAN="DNS.1:${Vars.MatchBox.Hostname},IP.1:${Vars.NIC[0].IP}"
./cert-gen
mkdir -p /etc/matchbox
cp ca.crt server.crt server.key /etc/matchbox
chown -R matchbox:matchbox /etc/matchbox
mkdir -p /root/.matchbox
cp client.crt client.key ca.crt /root/.matchbox/
fi
}

View File

@ -0,0 +1,9 @@
%{ for iface in Vars.NIC }
auto ${iface.Name}
iface ${iface.Name} inet static
address ${iface.IP}
netmask ${iface.Mask}
gateway ${iface.Gateway}
%{ endfor ~}

View File

@ -0,0 +1,4 @@
%{ for dns in Vars.DNS }
nameserver ${dns}
%{ endfor ~}

View File

@ -0,0 +1,7 @@
NAME = <%= image_name %>
PATH = <%= image_source %>
TYPE = OS
PERSISTENT = No
DESCRIPTION = "<%= image_comment %>"
DEV_PREFIX = vd
FORMAT = qcow2

View File

@ -0,0 +1,48 @@
{
"name": "<%= template_name %>",
"deployment": "straight",
"description": "Cluster Kubernetes (k8s)",
"roles": [
{
"name": "leader",
"cardinality": 1,
"vm_template": <%= getTemplateByName(oneCli, vm_name).id %>,
"shutdown_action": "terminate",
"vm_template_contents": "NIC = [\n NAME = \"NIC0\",\n NETWORK_ID = \"$main\",\n RDP = \"YES\" ]\nNIC = [\n NAME = \"NIC1\",\n NETWORK_ID = \"$internal\" ]\n",
"elasticity_policies": [],
"scheduled_policies": []
},
{
"name": "master",
"cardinality": 2,
"vm_template": <%= getTemplateByName(oneCli, vm_name).id %>,
"shutdown_action": "terminate",
"vm_template_contents": "NIC = [\n NAME = \"NIC0\",\n NETWORK_ID = \"$main\",\n RDP = \"YES\" ]\nNIC = [\n NAME = \"NIC1\",\n NETWORK_ID = \"$internal\" ]\n",
"elasticity_policies": [],
"scheduled_policies": []
},
{
"name": "worker",
"cardinality": 4,
"vm_template": <%= getTemplateByName(oneCli, vm_name).id %>,
"shutdown_action": "terminate",
"parents": [
"leader"
],
"vm_template_contents": "NIC = [\n NAME = \"NIC0\",\n NETWORK_ID = \"$main\",\n RDP = \"YES\" ]\nNIC = [\n NAME = \"NIC1\",\n NETWORK_ID = \"$internal\" ]\n",
"elasticity_policies": [],
"scheduled_policies": []
}
],
"networks": {
"main": "M|network|Main network| |id:",
"internal": "M|network|Internal network| |id:"
},
"custom_attrs": {
"KUBEAPPS_DNS_NAME": "M|text|DNS Name for kubeapps service| |kubeapps.k3s-eole.local",
"INGRESS_PROVIDER": "O|list|Default ingress to install|nginx, traefik, |",
"LE_EMAIL": "M|text|Email | |"
},
"shutdown_action": "terminate",
"ready_status_gate": true
}

View File

@ -0,0 +1,33 @@
NAME = "<%= template_name %>"
CONTEXT = [
NETWORK = "YES",
REPORT_READY = "YES",
SET_HOSTNAME = "$NAME",
SSH_PUBLIC_KEY = "$USER[SSH_PUBLIC_KEY]",
TOKEN = "YES" ]
CPU = "0.2"
DESCRIPTION = "Alpine basic image"
DISK = [
DEV_PREFIX = "vd",
DRIVER = "qcow2",
IMAGE = "<%= image_name %>",
IMAGE_UNAME = "<%= user %>" ]
GRAPHICS = [
KEYMAP = "fr",
LISTEN = "0.0.0.0",
TYPE = "VNC" ]
HYPERVISOR = "kvm"
INPUT = [
BUS = "usb",
TYPE = "tablet" ]
INPUTS_ORDER = ""
LOGO = "images/logos/linux.png"
MEMORY = "512"
MEMORY_UNIT_COST = "MB"
NIC_DEFAULT = [
MODEL = "virtio" ]
OS = [
ARCH = "x86_64",
BOOT = "",
SD_DISK_BUS = "scsi" ]
VCPU = "2"

View File

@ -0,0 +1,32 @@
NAME = "<%= template_name %>"
CONTEXT = [
NETWORK = "YES",
REPORT_READY = "YES",
SET_HOSTNAME = "$NAME",
SSH_PUBLIC_KEY = "$USER[SSH_PUBLIC_KEY]",
TOKEN = "YES" ]
CPU = "0.2"
DESCRIPTION = "K3S Ready VM"
DISK = [
IMAGE = "<%= image_name %>",
IMAGE_UNAME = "<%= user %>",
DRIVER = "qcow2" ]
GRAPHICS = [
KEYMAP = "fr",
LISTEN = "0.0.0.0",
TYPE = "VNC" ]
HYPERVISOR = "kvm"
INPUT = [
BUS = "usb",
TYPE = "tablet" ]
INPUTS_ORDER = ""
LOGO = "images/logos/alpine.png"
MEMORY = "2048"
MEMORY_UNIT_COST = "MB"
NIC_DEFAULT = [
MODEL = "virtio" ]
OS = [
ARCH = "x86_64",
BOOT = "",
SD_DISK_BUS = "scsi" ]
VCPU = "2"

View File

@ -0,0 +1,35 @@
NAME = "<%= template_name %>"
CONTEXT = [
NETWORK = "YES",
REPORT_READY = "YES",
SET_HOSTNAME = "$NAME",
SERVER_ROLE = "leader",
TOKEN = "YES",
SSH_PUBLIC_KEY = "$USER[SSH_PUBLIC_KEY]"
]
CPU = "0.8"
DESCRIPTION = "Kubernetes master or Docker VM (check the name)"
DISK = [
DEV_PREFIX = "vd",
IMAGE = "<%= image_name %>",
IMAGE_UNAME = "<%= user %>",
DRIVER = "qcow2" ]
GRAPHICS = [
LISTEN = "0.0.0.0",
KEYMAP = "fr",
TYPE = "VNC" ]
HYPERVISOR = "kvm"
INPUT = [
BUS = "usb",
TYPE = "tablet" ]
INPUTS_ORDER = ""
LOGO = "images/logos/alpine.png"
MEMORY = "2048"
MEMORY_UNIT_COST = "MB"
NIC_DEFAULT = [
MODEL = "virtio" ]
OS = [
ARCH = "x86_64",
BOOT = "",
SD_DISK_BUS = "scsi" ]
VCPU = "4"

View File

@ -0,0 +1,42 @@
NAME = "<%= template_name %>"
CONTEXT = [
NETWORK = "YES",
REPORT_READY = "YES",
SET_HOSTNAME = "$NAME",
SERVER_ROLE = "master",
MASTER_ADDR = "$MASTER_ADDR",
MASTER_TOKEN = "$MASTER_TOKEN",
MASTER_CA_TOKEN = "$MASTER_CA_TOKEN",
TOKEN = "YES",
SSH_PUBLIC_KEY = "$USER[SSH_PUBLIC_KEY]"
]
CPU = "0.8"
DESCRIPTION = "Kubernetes worker VM"
DISK = [
DEV_PREFIX = "vd",
IMAGE = "<%= image_name %>",
IMAGE_UNAME = "<%= user %>",
DRIVER = "qcow2" ]
GRAPHICS = [
LISTEN = "0.0.0.0",
KEYMAP = "fr",
TYPE = "VNC" ]
HYPERVISOR = "kvm"
INPUT = [
BUS = "usb",
TYPE = "tablet" ]
INPUTS_ORDER = ""
LOGO = "images/logos/alpine.png"
MEMORY = "2048"
MEMORY_UNIT_COST = "MB"
NIC_DEFAULT = [
MODEL = "virtio" ]
OS = [
ARCH = "x86_64",
BOOT = "",
SD_DISK_BUS = "scsi" ]
USER_INPUTS = [
MASTER_ADDR = "O|text|Master address (for workers only)",
MASTER_TOKEN = "O|text|Master Token (for workers only)",
MASTER_CA_TOKEN = "O|text|Master CA Token (for workers only)" ]
VCPU = "4"

View File

@ -0,0 +1,42 @@
NAME = "<%= template_name %>"
CONTEXT = [
NETWORK = "YES",
REPORT_READY = "YES",
SET_HOSTNAME = "$NAME",
SERVER_ROLE = "worker",
MASTER_ADDR = "$MASTER_ADDR",
MASTER_TOKEN = "$MASTER_TOKEN",
MASTER_CA_TOKEN = "$MASTER_CA_TOKEN",
TOKEN = "YES",
SSH_PUBLIC_KEY = "$USER[SSH_PUBLIC_KEY]"
]
CPU = "0.8"
DESCRIPTION = "Kubernetes worker VM"
DISK = [
DEV_PREFIX = "vd",
IMAGE = "<%= image_name %>",
IMAGE_UNAME = "<%= user %>",
DRIVER = "qcow2" ]
GRAPHICS = [
LISTEN = "0.0.0.0",
KEYMAP = "fr",
TYPE = "VNC" ]
HYPERVISOR = "kvm"
INPUT = [
BUS = "usb",
TYPE = "tablet" ]
INPUTS_ORDER = ""
LOGO = "images/logos/alpine.png"
MEMORY = "4096"
MEMORY_UNIT_COST = "MB"
NIC_DEFAULT = [
MODEL = "virtio" ]
OS = [
ARCH = "x86_64",
BOOT = "",
SD_DISK_BUS = "scsi" ]
USER_INPUTS = [
MASTER_ADDR = "O|text|Master address (for workers only)",
MASTER_TOKEN = "O|text|Master Token (for workers only)",
MASTER_CA_TOKEN = "O|text|Master CA Token (for workers only)" ]
VCPU = "4"

View File

@ -0,0 +1,47 @@
NAME = "<%= template_name %>"
CONTEXT = [
MATCHBOX_URL = "http://$NAME",
NETWORK = "YES",
PXE_DHCPLEASEDURATION = "$DHCPLEASEDURATION",
PXE_DHCPMODE = "$ADHCPMODE",
PXE_DNSDOMAIN = "$BDNSDOMAIN",
PXE_DHCPRANGESTART = "$CDHCPRANGESTART",
PXE_DHCPRANGEEND = "$DDHCPRANGEEND",
PXE_DHCPLEASEDURATION = "$EDHCPLEASEDURATION",
MATCHBOX_HOSTNAME = "$FMATCHBOX_HOSTNAME",
REPORT_READY = "YES",
SET_HOSTNAME = "$NAME",
SSH_PUBLIC_KEY = "$USER[SSH_PUBLIC_KEY]",
TOKEN = "YES" ]
CPU = "0.2"
DESCRIPTION = "Matchbox Ready VM"
DISK = [
IMAGE = "<%= image_name %>",
IMAGE_UNAME = "<%= user %>",
DRIVER = "qcow2" ]
GRAPHICS = [
KEYMAP = "fr",
LISTEN = "0.0.0.0",
TYPE = "VNC" ]
HYPERVISOR = "kvm"
INPUT = [
BUS = "usb",
TYPE = "tablet" ]
INPUTS_ORDER = ""
LOGO = "images/logos/alpine.png"
MEMORY = "2048"
MEMORY_UNIT_COST = "MB"
NIC_DEFAULT = [
MODEL = "virtio" ]
OS = [
ARCH = "x86_64",
BOOT = "",
SD_DISK_BUS = "scsi" ]
USER_INPUTS = [
ADHCPMODE = "M|list|DHCP Mode|proxy,direct|proxy",
BDNSDOMAIN = "M|text|Nom de la zone DNS (ex: cadol.es)",
CDHCPRANGESTART = "O|text|DNSMASQ DHCP Range First IP",
DDHCPRANGEEND = "O|text|DNSMASQ DHCP Range Last IP",
EDHCPLEASEDURATION = "M|list|DHCP lease duration|1h,2h,4h,6h,8h,10h,12h,14h,24h|1h",
FMATCHBOX_HOSTNAME = "O|text|Matchbox service hostname|mb.cadol.es" ]
VCPU = "2"

View File

@ -0,0 +1,54 @@
variable "name" {
type = string
default = "nuo"
}
variable "version" {
type = string
default = "3.18.0"
}
variable "short_version" {
type = string
default = "3.18"
}
variable "arch" {
type = string
default = "x86_64"
}
variable "output_dir" {
type = string
default = "output/nuo/"
}
variable "source_url" {
type = string
default = "https://cdimage.debian.org/cdimage/release"
}
variable "iso_cd_checksum" {
type = string
default = "sha256:ae6d563d2444665316901fe7091059ac34b8f67ba30f9159f7cef7d2fdc5bf8a"
}
variable "image_version" {
type = string
default = "0.0.1"
}
variable "one_user" {
type = string
default = env("ONE_USER")
}
variable "one_token" {
type = string
default = env("ONE_TOKEN")
}
variable "boot_command" {
type = list(string)
default = []
}