Compare commits

..

No commits in common. "473ff8ea3549a8610f97c5adc489d021a0346482" and "66d290c298e4622b08b0889a4f57420d905d2f4f" have entirely different histories.

37 changed files with 97 additions and 2951 deletions

2
.gitignore vendored
View File

@ -14,4 +14,4 @@
# Dependency directories (remove the comment below to include it)
# vendor/
bin/

View File

@ -2,14 +2,11 @@ LINT_ARGS ?= ./...
DESTDIR ?= "/usr/local"
bin:
GOOS=linux CGO_ENABLED=0 go build -o bin/templater-linux cmd/templater/main.go
GOOS=linux CGO_ENABLED=0 go build -o bin/bootstraper-linux cmd/bootstraper/main.go
GOOS=linux go build -o bin/templater-linux main.go
upx bin/templater-linux
upx bin/bootstraper-linux
install:
cp bin/templater-linux $(DESTDIR)/bin/templater
cp bin/bootstraper-linux $(DESTDIR)/bin/bootstraper
uninstall:
rm $(DESTDIR)/bin/templater

View File

@ -1,48 +0,0 @@
package api
import (
"github.com/gin-gonic/gin"
)
type Template struct {
Type string
Content string
Config string
}
func Generate(c *gin.Context) {
return
}
/*
func Generate(c *gin.Context) {
var template Template
err := c.Request.ParseForm()
if err != nil {
c.String(500, err.Error())
}
err = c.ShouldBindJSON(&template)
if err != nil {
c.String(500, err.Error())
return
}
templateType := template.Type
templateFile := template.Content
config := []byte(template.Config)
res := ""
if templateType == "go" {
res = templater.ProcessGoTemplate(templateFile, config)
c.JSON(http.StatusOK, gin.H{"data": res})
} else if templateType == "hcl" {
res = templater.ProcessHCLTemplate(templateFile, config)
c.JSON(http.StatusOK, gin.H{"data": res})
} else {
c.JSON(http.StatusBadRequest, gin.H{"data": "Unkown template type"})
}
}
*/

View File

@ -1,28 +0,0 @@
package main
import (
"forge.cadoles.com/pcaseiro/templatefile/pkg/templater"
"github.com/alexflint/go-arg"
)
func main() {
var args struct {
Config string `arg:"-c,--config,env:CONFIG" help:"Configuration values file or directory path" default:"./data/config"`
TemplateDirectory string `arg:"-t,--template-dir,env:TEMPLATE_DIR" help:"Template directory path" default:"./data/templates"`
RootDirectory string `arg:"-r,--root-dir,env:ROOT_DIR" help:"Generate files with this root instead of /" default:"/"`
DryRun bool `arg:"-d,--dry-run,env:DRY_RUN" help:"Dry run do not really complete actions" default:"false"`
}
arg.MustParse(&args)
var hostConfig templater.TemplaterConfig
err := hostConfig.New(args.Config, args.TemplateDirectory, args.RootDirectory)
if err != nil {
panic(err)
}
if err = hostConfig.ManageServices(args.DryRun); err != nil {
panic(err)
}
}

View File

@ -1,76 +0,0 @@
package main
import (
"fmt"
"os"
"forge.cadoles.com/pcaseiro/templatefile/api"
"forge.cadoles.com/pcaseiro/templatefile/pkg/templater"
"github.com/alexflint/go-arg"
"github.com/gin-gonic/gin"
)
func Daemon(port int) (err error) {
r := gin.Default()
r.POST("/generate", api.Generate)
err = r.Run(fmt.Sprintf("0.0.0.0:%d", port)) // listen and serve on 0.0.0.0:8080 (for windows "localhost:8080")
if err != nil {
return (err)
}
return nil
}
func main() {
var args struct {
Daemon bool `arg:"-d,--daemon,env:TEMPLATER_DAEMON" default:"false" help:"Enable api server"`
Port int `arg:"-p,--port,env:TEMPLATER_PORT" default:"8080" help:"Listening port for the api server"`
Type string `arg:"-t,--type,env:TEMPLATE_TYPE" default:"hcl" help:"Template type (go/template or hcl)"`
Output string `arg:"-o,--output,env:TEMPLATER_OUTPUT" default:"stdout" help:"Destination of the result (stdout or file path)"`
Config string `arg:"-c,--config,env:TEMPLATE_CONFIG" help:"Configuration values"`
File string `arg:"-f,--template-file,env:TEMPLATE_FILE" help:"Template file path"`
}
arg.MustParse(&args)
if args.Daemon {
err := Daemon(args.Port)
if err != nil {
panic(err)
}
} else {
var config []byte
templateType := args.Type
templateFile := args.File
output := args.Output
if _, err := os.Stat(args.Config); err == nil {
config, err = os.ReadFile(args.Config)
if err != nil {
panic(err)
}
} else {
config = []byte(args.Config)
}
var file templater.ConfigFile
file.Source = templateFile
file.TemplateType = templateType
result, err := file.ProcessTemplate(templateFile, config)
if err != nil {
panic(err)
}
if output == "stdout" {
fmt.Printf("%s", result)
} else {
err := os.WriteFile(output, []byte(result), 0644)
if err != nil {
panic(err)
}
}
}
}

View File

@ -1,66 +0,0 @@
{
"ConfigFiles": [
{
"destination": "/etc/loki/loki-local-config.yaml",
"group": "grafana",
"mode": "600",
"owner": "loki",
"service": "loki",
"source": "loki-local-config.pktpl.hcl"
}
],
"Daemons": {
"Loki": {
"enabled": true,
"name": "loki"
}
},
"Packages": {
"loki": {
"action": "install",
"name": "loki"
},
"nodeExporter": {
"action": "install",
"name": "prometheus-node-exporter"
},
"promtail": {
"action": "install",
"name": "loki-promtail"
}
},
"Repositories": {
"AlpineTesting": {
"enabled": true,
"name": "testing",
"type": "apk",
"url": "http://mirrors.bfsu.edu.cn/alpine/edge/testing"
}
},
"Users": {
"loki": {
"group": "grafana",
"home": "/srv/loki",
"shell": "/bin/nologin",
"username": "loki"
}
},
"Vars": {
"AlertManagerURL": "http://localhost:9092",
"AuthEnabled": false,
"GRPCPort": "9095",
"Group": "grafana",
"HTTPPort": "3099",
"LogLevel": "error",
"ObjectStore": "filesystem",
"S2": {
"APIKey": "",
"APISecretKey": "",
"BucketName": "",
"URL": ""
},
"SharedStore": "filesystem",
"StorageRoot": "/var/loki",
"User": "loki"
}
}

View File

@ -1,204 +0,0 @@
{
"Globals": {
"Vars": {
"PrometheusPort": "9090"
}
},
"Name": "loki-stack",
"Services": {
"Alertmanager": {
"ConfigFiles": [
{
"destination": "/etc/alertmanager/alertmanager.yml",
"group": "prometheus",
"mode": "600",
"owner": "prometheus",
"source": "alertmanager.yml.pktpl.hcl"
}
],
"Daemons": {
"prometheus": {
"enabled": true,
"name": "alertmanager",
"type": "auto"
}
},
"Packages": {
"alertmanager": {
"action": "install",
"name": "alertmanager"
},
"nodeExporter": {
"action": "install",
"name": "prometheus-node-exporter"
}
},
"Users": {
"prometheus": {
"group": "prometheus",
"home": "/var/lib/prometheus",
"shell": "/sbin/nologin",
"username": "prometheus"
}
},
"Vars": {}
},
"Grafana": {
"ConfigFiles": [
{
"destination": "/etc/grafana.ini",
"group": "grafana",
"mode": "600",
"owner": "grafana",
"source": "grafana.ini.pktpl.hcl"
}
],
"Daemons": {
"grafana": {
"enabled": true,
"name": "grafana",
"type": "auto"
}
},
"Packages": {
"grafana": {
"action": "install",
"name": "grafana"
},
"nodeExporter": {
"action": "install",
"name": "prometheus-node-exporter"
}
},
"Users": {
"grafana": {
"group": "grafana",
"home": "/srv/grafana",
"shell": "/bin/nologin",
"username": "grafana"
}
},
"Vars": {
"AppMode": "production",
"DomainName": "www.grafana.local",
"HTTPPort": "80",
"HostName": "grafana.local",
"UserName": "grafana"
}
},
"Loki": {
"ConfigFiles": [
{
"destination": "/etc/loki/loki-local-config.yaml",
"group": "grafana",
"mode": "600",
"owner": "loki",
"service": "loki",
"source": "loki-local-config.pktpl.hcl"
}
],
"Daemons": {
"Loki": {
"enabled": true,
"name": "loki"
}
},
"Packages": {
"loki": {
"action": "install",
"name": "loki"
},
"nodeExporter": {
"action": "install",
"name": "prometheus-node-exporter"
},
"promtail": {
"action": "install",
"name": "loki-promtail"
}
},
"Repositories": {
"AlpineTesting": {
"enabled": true,
"name": "testing",
"type": "apk",
"url": "http://mirrors.bfsu.edu.cn/alpine/edge/testing"
}
},
"Users": {
"loki": {
"group": "grafana",
"home": "/srv/loki",
"shell": "/bin/nologin",
"username": "loki"
}
},
"Vars": {
"AlertManagerURL": "http://localhost:9092",
"AuthEnabled": false,
"GRPCPort": "9095",
"Group": "grafana",
"HTTPPort": "3099",
"LogLevel": "error",
"ObjectStore": "filesystem",
"S2": {
"APIKey": "",
"APISecretKey": "",
"BucketName": "",
"URL": ""
},
"SharedStore": "filesystem",
"StorageRoot": "/var/loki",
"User": "loki"
}
},
"Prometheus": {
"ConfigFiles": [
{
"destination": "/etc/prometheus/prometheus.yml",
"group": "prometheus",
"mode": "600",
"owner": "prometheus",
"source": "prometheus.yml.pktpl.hcl"
}
],
"Daemons": {
"prometheus": {
"enabled": true,
"name": "prometheus",
"type": "auto"
}
},
"Packages": {
"nodeExporter": {
"action": "install",
"name": "prometheus-node-exporter"
},
"prometheus": {
"action": "install",
"name": "prometheus"
}
},
"Users": {
"prometheus": {
"group": "prometheus",
"home": "/var/lib/prometheus",
"shell": "/sbin/nologin",
"username": "prometheus"
}
},
"Vars": {
"Scrapers": [
{
"MetricsPath": "/metrics",
"Name": "Prometheus",
"Scheme": "http",
"Targets": [
"localhost:9001"
]
}
]
}
}
}
}

View File

@ -1,16 +0,0 @@
route:
group_by: ['alertname']
group_wait: 30s
group_interval: 5m
repeat_interval: 1h
receiver: 'web.hook'
receivers:
- name: 'web.hook'
webhook_configs:
- url: 'http://127.0.0.1:5001/'
inhibit_rules:
- source_match:
severity: 'critical'
target_match:
severity: 'warning'
equal: ['alertname', 'dev', 'instance']

View File

@ -1,13 +0,0 @@
### Go template test ###
{{ if .Vars.AuthEnabled }}
auth_enabled: true
{{ else }}
auth_enabled: false
{{ end }}
server:
http_listen_port: {{ .Vars.HTTPPort }}
grpc_listen_port: {{ .Vars.GRPCPort }}
log_level: {{ .Vars.LogLevel }}
### End Go template test ###

View File

@ -1,12 +0,0 @@
### HCL2 Template test ###
%{ if Vars.AuthEnabled ~}
auth_enabled: true
%{ else }
auth_enabled: false
%{ endif }
server:
http_listen_port: ${Vars.HTTPPort}
grpc_listen_port: ${Vars.GRPCPort}
log_level: ${Vars.LogLevel}
### END HCL Template test ###

View File

@ -1,1155 +0,0 @@
#####################.Configuration Example #####################
#
# Everything has defaults so you only need to uncomment things you want to
# change
# possible values : production, development
app_mode = ${Vars.AppMode}
# instance name, defaults to HOSTNAME environment variable value or hostname if HOSTNAME var is empty
instance_name = ${Vars.HostName}
# force migration will run migrations that might cause dataloss
;force_migration = false
#################################### Paths ####################################
[paths]
# Path to where grafana can store temp files, sessions, and the sqlite3 db (if that is used)
data = /var/lib/grafana
# Temporary files in `data` directory older than given duration will be removed
temp_data_lifetime = 24h
# Directory where grafana can store logs
logs = /var/log/grafana
# Directory where grafana will automatically scan and look for plugins
plugins = /var/lib/grafana/plugins
# folder that contains provisioning config files that grafana will apply on startup and while running.
provisioning = conf/provisioning
#################################### Server ####################################
[server]
# Protocol (http, https, h2, socket)
;protocol = http
# The ip address to bind to, empty will bind to all interfaces
;http_addr =
# The http port to use
http_port = ${Vars.HTTPPort}
# The public facing domain name used to access grafana from a browser
domain = ${Vars.DomainName}
# Redirect to correct domain if host header does not match domain
# Prevents DNS rebinding attacks
;enforce_domain = false
# The full public facing url you use in browser, used for redirects and emails
# If you use reverse proxy and sub path specify full url (with sub path)
;root_url = %(protocol)s://%(domain)s:%(http_port)s/
# Serve.from subpath specified in `root_url` setting. By default it is set to `false` for compatibility reasons.
;serve_from_sub_path = false
# Log web requests
;router_logging = false
# the path relative working path
;static_root_path = public
# enable gzip
;enable_gzip = false
# https certs & key file
;cert_file =
;cert_key =
# Unix socket path
;socket =
# CDN Url
;cdn_url =
# Sets the maximum time using a duration format (5s/5m/5ms) before timing out read of an incoming request and closing idle connections.
# `0` means there is no timeout for reading the request.
;read_timeout = 0
#################################### Database ####################################
[database]
# You can configure the database connection by specifying type, host, name, user and password
# as separate properties or as on string using the url properties.
# Either "mysql", "postgres" or "sqlite3", it's your choice
type = sqlite3
host = 127.0.0.1:3306
name = grafana
user = ${Vars.UserName}
# If the password contains # or ; you have to wrap it with triple quotes. Ex """#password;"""
;password =
# Use either URL or the previous fields to configure the database
# Example: mysql://user:secret@host:port/database
;url =
# For "postgres" only, either "disable", "require" or "verify-full"
;ssl_mode = disable
# Database drivers may support different transaction isolation levels.
# Currently, only "mysql" driver supports isolation levels.
# If the value is empty - driver's default isolation level is applied.
# For "mysql" use "READ-UNCOMMITTED", "READ-COMMITTED", "REPEATABLE-READ" or "SERIALIZABLE".
;isolation_level =
;ca_cert_path =
;client_key_path =
;client_cert_path =
;server_cert_name =
# For "sqlite3" only, path relative to data_path setting
path = grafana.db
# Max idle conn setting default is 2
;max_idle_conn = 2
# Max conn setting default is 0 (mean not set)
;max_open_conn =
# Connection Max Lifetime default is 14400 (means 14400 seconds or 4 hours)
;conn_max_lifetime = 14400
# Set to true to log the sql calls and execution times.
;log_queries =
# For "sqlite3" only. cache mode setting used for connecting to the database. (private, shared)
;cache_mode = private
# For "mysql" only if lockingMigration feature toggle is set. How many seconds to wait before failing to lock the database for the migrations, default is 0.
;locking_attempt_timeout_sec = 0
################################### Data sources #########################
[datasources]
# Upper limit of data sources that.will return. This limit is a temporary configuration and it will be deprecated when pagination will be introduced on the list data sources API.
;datasource_limit = 5000
#################################### Cache server #############################
[remote_cache]
# Either "redis", "memcached" or "database" default is "database"
;type = database
# cache connectionstring options
# database: will use.primary database.
# redis: config like redis server e.g. `addr=127.0.0.1:6379,pool_size=100,db=0,ssl=false`. Only addr is required. ssl may be 'true', 'false', or 'insecure'.
# memcache: 127.0.0.1:11211
;connstr =
#################################### Data proxy ###########################
[dataproxy]
# This enables data proxy logging, default is false
;logging = false
# How long the data proxy waits to read the headers of the response before timing out, default is 30 seconds.
# This setting also applies to core backend HTTP data sources where query requests use an HTTP client with timeout set.
;timeout = 30
# How long the data proxy waits to establish a TCP connection before timing out, default is 10 seconds.
;dialTimeout = 10
# How many seconds the data proxy waits before sending a keepalive probe request.
;keep_alive_seconds = 30
# How many seconds the data proxy waits for a successful TLS Handshake before timing out.
;tls_handshake_timeout_seconds = 10
# How many seconds the data proxy will wait for a server's first response headers after
# fully writing the request headers if the request has an "Expect: 100-continue"
# header. A value of 0 will result in the body being sent immediately, without
# waiting for the server to approve.
;expect_continue_timeout_seconds = 1
# Optionally limits the total number of connections per host, including connections in the dialing,
# active, and idle states. On limit violation, dials will block.
# A value of zero (0) means no limit.
;max_conns_per_host = 0
# The maximum number of idle connections that.will keep alive.
;max_idle_connections = 100
# How many seconds the data proxy keeps an idle connection open before timing out.
;idle_conn_timeout_seconds = 90
# If enabled and user is not anonymous, data proxy will add X.User header with username into the request, default is false.
;send_user_header = false
# Limit the amount of bytes that will be read/accepted from responses of outgoing HTTP requests.
;response_limit = 0
# Limits the number of rows that.will process from SQL data sources.
;row_limit = 1000000
#################################### Analytics ####################################
[analytics]
# Server reporting, sends usage counters to stats.grafana.org every 24 hours.
# No ip addresses are being tracked, only simple counters to track
# running instances, dashboard and error counts. It is very helpful to us.
# Change this option to false to disable reporting.
;reporting_enabled = true
# The name of the distributor of the.instance. Ex hosted-grafana, grafana-labs
;reporting_distributor = grafana-labs
# Set to false to disable all checks to https://grafana.com
# for new versions of grafana. The check is used
# in some UI views to notify that a grafana update exists.
# This option does not cause any auto updates, nor send any information
# only a GET request to https://raw.githubusercontent.com/grafana/grafana/main/latest.json to get the latest version.
;check_for_updates = true
# Set to false to disable all checks to https://grafana.com
# for new versions of plugins. The check is used
# in some UI views to notify that a plugin update exists.
# This option does not cause any auto updates, nor send any information
# only a GET request to https://grafana.com to get the latest versions.
;check_for_plugin_updates = true
# Google Analytics universal tracking code, only enabled if you specify an id here
;google_analytics_ua_id =
# Google Tag Manager ID, only enabled if you specify an id here
;google_tag_manager_id =
# Rudderstack write key, enabled only if rudderstack_data_plane_url is also set
;rudderstack_write_key =
# Rudderstack data plane url, enabled only if rudderstack_write_key is also set
;rudderstack_data_plane_url =
# Rudderstack SDK url, optional, only valid if rudderstack_write_key and rudderstack_data_plane_url is also set
;rudderstack_sdk_url =
# Rudderstack Config url, optional, used by Rudderstack SDK to fetch source config
;rudderstack_config_url =
# Controls if the UI contains any links to user feedback forms
;feedback_links_enabled = true
#################################### Security ####################################
[security]
# disable creation of admin user on first start of grafana
;disable_initial_admin_creation = false
# default admin user, created on startup
;admin_user = admin
# default admin password, can be changed before first start of grafana, or in profile settings
;admin_password = admin
# used for signing
;secret_key = SW2YcwTIb9zpOOhoPsMm
# current key provider used for envelope encryption, default to static value specified by secret_key
;encryption_provider = secretKey.v1
# list of configured key providers, space separated (Enterprise only): e.g., awskms.v1 azurekv.v1
;available_encryption_providers =
# disable gravatar profile images
;disable_gravatar = false
# data source proxy whitelist (ip_or_domain:port separated by spaces)
;data_source_proxy_whitelist =
# disable protection against brute force login attempts
;disable_brute_force_login_protection = false
# set to true if you host.behind HTTPS. default is false.
;cookie_secure = false
# set cookie SameSite attribute. defaults to `lax`. can be set to "lax", "strict", "none" and "disabled"
;cookie_samesite = lax
# set to true if you want to allow browsers to render.in a <frame>, <iframe>, <embed> or <object>. default is false.
;allow_embedding = false
# Set to true if you want to enable http strict transport security (HSTS) response header.
# HSTS tells browsers that the site should only be accessed using HTTPS.
;strict_transport_security = false
# Sets how long a browser should cache HSTS. Only applied if strict_transport_security is enabled.
;strict_transport_security_max_age_seconds = 86400
# Set to true if to enable HSTS preloading option. Only applied if strict_transport_security is enabled.
;strict_transport_security_preload = false
# Set to true if to enable the HSTS includeSubDomains option. Only applied if strict_transport_security is enabled.
;strict_transport_security_subdomains = false
# Set to true to enable the X-Content-Type-Options response header.
# The X-Content-Type-Options response HTTP header is a marker used by the server to indicate that the MIME types advertised
# in the Content-Type headers should not be changed and be followed.
;x_content_type_options = true
# Set to true to enable the X-XSS-Protection header, which tells browsers to stop pages from loading
# when they detect reflected cross-site scripting (XSS) attacks.
;x_xss_protection = true
# Enable adding the Content-Security-Policy header to your requests.
# CSP allows to control resources the user agent is allowed to load and helps prevent XSS attacks.
;content_security_policy = false
# Set Content Security Policy template used when adding the Content-Security-Policy header to your requests.
# $NONCE in the template includes a random nonce.
# $ROOT_PATH is server.root_url without the protocol.
;content_security_policy_template = """script-src 'self' 'unsafe-eval' 'unsafe-inline' 'strict-dynamic' $NONCE;object-src 'none';font-src 'self';style-src 'self' 'unsafe-inline' blob:;img-src * data:;base-uri 'self';connect-src 'self' grafana.com ws://$ROOT_PATH wss://$ROOT_PATH;manifest-src 'self';media-src 'none';form-action 'self';"""
# Controls if old angular plugins are supported or not. This will be disabled by default in future release
;angular_support_enabled = true
[security.encryption]
# Defines the time-to-live (TTL) for decrypted data encryption keys stored in memory (cache).
# Please note that small values may cause performance issues due to a high frequency decryption operations.
;data_keys_cache_ttl = 15m
# Defines the frequency of data encryption keys cache cleanup interval.
# On every interval, decrypted data encryption keys that reached the TTL are removed from the cache.
;data_keys_cache_cleanup_interval = 1m
#################################### Snapshots ###########################
[snapshots]
# snapshot sharing options
;external_enabled = true
;external_snapshot_url = https://snapshots.raintank.io
;external_snapshot_name = Publish to snapshots.raintank.io
# Set to true to enable this.instance act as an external snapshot server and allow unauthenticated requests for
# creating and deleting snapshots.
;public_mode = false
# remove expired snapshot
;snapshot_remove_expired = true
#################################### Dashboards History ##################
[dashboards]
# Number dashboard versions to keep (per dashboard). Default: 20, Minimum: 1
;versions_to_keep = 20
# Minimum dashboard refresh interval. When set, this will restrict users to set the refresh interval of a dashboard lower than given interval. Per default this is 5 seconds.
# The interval string is a possibly signed sequence of decimal numbers, followed by a unit suffix (ms, s, m, h, d), e.g. 30s or 1m.
;min_refresh_interval = 5s
# Path to the default home dashboard. If this value is empty, then.uses StaticRootPath + "dashboards/home.json"
;default_home_dashboard_path =
#################################### Users ###############################
[users]
# disable user signup / registration
;allow_sign_up = true
# Allow non admin users to create organizations
;allow_org_create = true
# Set to true to automatically assign new users to the default organization (id 1)
;auto_assign_org = true
# Set this value to automatically add new users to the provided organization (if auto_assign_org above is set to true)
;auto_assign_org_id = 1
# Default role new users will be automatically assigned (if disabled above is set to true)
;auto_assign_org_role = Viewer
# Require email validation before sign up completes
;verify_email_enabled = false
# Background text for the user field on the login page
;login_hint = email or username
;password_hint = password
# Default UI theme ("dark" or "light")
;default_theme = dark
# Path to a custom home page. Users are only redirected to this if the default home dashboard is used. It should match a frontend route and contain a leading slash.
; home_page =
# External user management, these options affect the organization users view
;external_manage_link_url =
;external_manage_link_name =
;external_manage_info =
# Viewers can edit/inspect dashboard settings in the browser. But not save the dashboard.
;viewers_can_edit = false
# Editors can administrate dashboard, folders and teams they create
;editors_can_admin = false
# The duration in time a user invitation remains valid before expiring. This setting should be expressed as a duration. Examples: 6h (hours), 2d (days), 1w (week). Default is 24h (24 hours). The minimum supported duration is 15m (15 minutes).
;user_invite_max_lifetime_duration = 24h
# Enter a comma-separated list of users login to hide them in the.UI. These users are shown to.admins and themselves.
; hidden_users =
[auth]
# Login cookie name
;login_cookie_name = grafana_session
# The maximum lifetime (duration) an authenticated user can be inactive before being required to login at next visit. Default is 7 days (7d). This setting should be expressed as a duration, e.g. 5m (minutes), 6h (hours), 10d (days), 2w (weeks), 1M (month). The lifetime resets at each successful token rotation.
;login_maximum_inactive_lifetime_duration =
# The maximum lifetime (duration) an authenticated user can be logged in since login time before being required to login. Default is 30 days (30d). This setting should be expressed as a duration, e.g. 5m (minutes), 6h (hours), 10d (days), 2w (weeks), 1M (month).
;login_maximum_lifetime_duration =
# How often should auth tokens be rotated for authenticated users when being active. The default is each 10 minutes.
;token_rotation_interval_minutes = 10
# Set to true to disable (hide) the login form, useful if you use OAuth, defaults to false
;disable_login_form = false
# Set to true to disable the sign out link in the side menu. Useful if you use auth.proxy or auth.jwt, defaults to false
;disable_signout_menu = false
# URL to redirect the user to after sign out
;signout_redirect_url =
# Set to true to attempt login with OAuth automatically, skipping the login screen.
# This setting is ignored if multiple OAuth providers are configured.
;oauth_auto_login = false
# OAuth state max age cookie duration in seconds. Defaults to 600 seconds.
;oauth_state_cookie_max_age = 600
# Skip forced assignment of OrgID 1 or 'auto_assign_org_id' for social logins
;oauth_skip_org_role_update_sync = false
# limit of api_key seconds to live before expiration
;api_key_max_seconds_to_live = -1
# Set to true to enable SigV4 authentication option for HTTP-based datasources.
;sigv4_auth_enabled = false
# Set to true to enable verbose logging of SigV4 request signing
;sigv4_verbose_logging = false
#################################### Anonymous Auth ######################
[auth.anonymous]
# enable anonymous access
;enabled = false
# specify organization name that should be used for unauthenticated users
;org_name = Main Org.
# specify role for unauthenticated users
;org_role = Viewer
# mask the.version number for unauthenticated users
;hide_version = false
#################################### GitHub Auth ##########################
[auth.github]
;enabled = false
;allow_sign_up = true
;client_id = some_id
;client_secret = some_secret
;scopes = user:email,read:org
;auth_url = https://github.com/login/oauth/authorize
;token_url = https://github.com/login/oauth/access_token
;api_url = https://api.github.com/user
;allowed_domains =
;team_ids =
;allowed_organizations =
#################################### GitLab Auth #########################
[auth.gitlab]
;enabled = false
;allow_sign_up = true
;client_id = some_id
;client_secret = some_secret
;scopes = api
;auth_url = https://gitlab.com/oauth/authorize
;token_url = https://gitlab.com/oauth/token
;api_url = https://gitlab.com/api/v4
;allowed_domains =
;allowed_groups =
#################################### Google Auth ##########################
[auth.google]
;enabled = false
;allow_sign_up = true
;client_id = some_client_id
;client_secret = some_client_secret
;scopes = https://www.googleapis.com/auth/userinfo.profile https://www.googleapis.com/auth/userinfo.email
;auth_url = https://accounts.google.com/o/oauth2/auth
;token_url = https://accounts.google.com/o/oauth2/token
;api_url = https://www.googleapis.com/oauth2/v1/userinfo
;allowed_domains =
;hosted_domain =
####################################.com Auth ####################
[auth.grafana_com]
;enabled = false
;allow_sign_up = true
;client_id = some_id
;client_secret = some_secret
;scopes = user:email
;allowed_organizations =
#################################### Azure AD OAuth #######################
[auth.azuread]
;name = Azure AD
;enabled = false
;allow_sign_up = true
;client_id = some_client_id
;client_secret = some_client_secret
;scopes = openid email profile
;auth_url = https://login.microsoftonline.com/<tenant-id>/oauth2/v2.0/authorize
;token_url = https://login.microsoftonline.com/<tenant-id>/oauth2/v2.0/token
;allowed_domains =
;allowed_groups =
;role_attribute_strict = false
#################################### Okta OAuth #######################
[auth.okta]
;name = Okta
;enabled = false
;allow_sign_up = true
;client_id = some_id
;client_secret = some_secret
;scopes = openid profile email groups
;auth_url = https://<tenant-id>.okta.com/oauth2/v1/authorize
;token_url = https://<tenant-id>.okta.com/oauth2/v1/token
;api_url = https://<tenant-id>.okta.com/oauth2/v1/userinfo
;allowed_domains =
;allowed_groups =
;role_attribute_path =
;role_attribute_strict = false
#################################### Generic OAuth ##########################
[auth.generic_oauth]
;enabled = false
;name = OAuth
;allow_sign_up = true
;client_id = some_id
;client_secret = some_secret
;scopes = user:email,read:org
;empty_scopes = false
;email_attribute_name = email:primary
;email_attribute_path =
;login_attribute_path =
;name_attribute_path =
;id_token_attribute_name =
;auth_url = https://foo.bar/login/oauth/authorize
;token_url = https://foo.bar/login/oauth/access_token
;api_url = https://foo.bar/user
;teams_url =
;allowed_domains =
;team_ids =
;allowed_organizations =
;role_attribute_path =
;role_attribute_strict = false
;groups_attribute_path =
;team_ids_attribute_path =
;tls_skip_verify_insecure = false
;tls_client_cert =
;tls_client_key =
;tls_client_ca =
;use_pkce = false
#################################### Basic Auth ##########################
[auth.basic]
;enabled = true
#################################### Auth Proxy ##########################
[auth.proxy]
;enabled = false
;header_name = X-WEBAUTH-USER
;header_property = username
;auto_sign_up = true
;sync_ttl = 60
;whitelist = 192.168.1.1, 192.168.2.1
;headers = Email:X-User-Email, Name:X-User-Name
# Non-ASCII strings in header values are encoded using quoted-printable encoding
;headers_encoded = false
# Read the auth proxy docs for details on what the setting below enables
;enable_login_token = false
#################################### Auth JWT ##########################
[auth.jwt]
;enabled = true
;header_name = X-JWT-Assertion
;email_claim = sub
;username_claim = sub
;jwk_set_url = https://foo.bar/.well-known/jwks.json
;jwk_set_file = /path/to/jwks.json
;cache_ttl = 60m
;expected_claims = {"aud": ["foo", "bar"]}
;key_file = /path/to/key/file
;auto_sign_up = false
#################################### Auth LDAP ##########################
[auth.ldap]
;enabled = false
;config_file = /etc/grafana/ldap.toml
;allow_sign_up = true
# LDAP background sync (Enterprise only)
# At 1 am every day
;sync_cron = "0 0 1 * * *"
;active_sync_enabled = true
#################################### AWS ###########################
[aws]
# Enter a comma-separated list of allowed AWS authentication providers.
# Options are: default (AWS SDK Default), keys (Access && secret key), credentials (Credentials field), ec2_iam_role (EC2 IAM Role)
; allowed_auth_providers = default,keys,credentials
# Allow AWS users to assume a role using temporary security credentials.
# If true, assume role will be enabled for all AWS authentication providers that are specified in aws_auth_providers
; assume_role_enabled = true
#################################### Azure ###############################
[azure]
# Azure cloud environment where.is hosted
# Possible values are AzureCloud, AzureChinaCloud, AzureUSGovernment and AzureGermanCloud
# Default value is AzureCloud (i.e. public cloud)
;cloud = AzureCloud
# Specifies whether.hosted in Azure service with Managed Identity configured (e.g. Azure Virtual Machines instance)
# If enabled, the managed identity can be used for authentication of.in Azure services
# Disabled by default, needs to be explicitly enabled
;managed_identity_enabled = false
# Client ID to use for user-assigned managed identity
# Should be set for user-assigned identity and should be empty for system-assigned identity
;managed_identity_client_id =
#################################### SMTP / Emailing ##########################
[smtp]
;enabled = false
;host = localhost:25
;user =
# If the password contains # or ; you have to wrap it with triple quotes. Ex """#password;"""
;password =
;cert_file =
;key_file =
;skip_verify = false
;from_address = admin@grafana.localhost
;from_name = Grafana
# EHLO identity in SMTP dialog (defaults to instance_name)
;ehlo_identity = dashboard.example.com
# SMTP startTLS policy (defaults to 'OpportunisticStartTLS')
;startTLS_policy = NoStartTLS
[emails]
;welcome_email_on_sign_up = false
;templates_pattern = emails/*.html, emails/*.txt
;content_types = text/html
#################################### Logging ##########################
[log]
# Either "console", "file", "syslog". Default is console and file
# Use space to separate multiple modes, e.g. "console file"
;mode = console file
# Either "debug", "info", "warn", "error", "critical", default is "info"
;level = info
# optional settings to set different levels for specific loggers. Ex filters = sqlstore:debug
;filters =
# For "console" mode only
[log.console]
;level =
# log line format, valid options are text, console and json
;format = console
# For "file" mode only
[log.file]
;level =
# log line format, valid options are text, console and json
;format = text
# This enables automated log rotate(switch of following options), default is true
;log_rotate = true
# Max line number of single file, default is 1000000
;max_lines = 1000000
# Max size shift of single file, default is 28 means 1 << 28, 256MB
;max_size_shift = 28
# Segment log daily, default is true
;daily_rotate = true
# Expired days of log file(delete after max days), default is 7
;max_days = 7
[log.syslog]
;level =
# log line format, valid options are text, console and json
;format = text
# Syslog network type and address. This can be udp, tcp, or unix. If left blank, the default unix endpoints will be used.
;network =
;address =
# Syslog facility. user, daemon and local0 through local7 are valid.
;facility =
# Syslog tag. By default, the process' argv[0] is used.
;tag =
[log.frontend]
# Should Sentry javascript agent be initialized
;enabled = false
# Sentry DSN if you want to send events to Sentry.
;sentry_dsn =
# Custom HTTP endpoint to send events captured by the Sentry agent to. Default will log the events to stdout.
;custom_endpoint = /log
# Rate of events to be reported between 0 (none) and 1 (all), float
;sample_rate = 1.0
# Requests per second limit enforced an extended period, for.backend log ingestion endpoint (/log).
;log_endpoint_requests_per_second_limit = 3
# Max requests accepted per short interval of time for.backend log ingestion endpoint (/log).
;log_endpoint_burst_limit = 15
#################################### Usage Quotas ########################
[quota]
; enabled = false
#### set quotas to -1 to make unlimited. ####
# limit number of users per Org.
; org_user = 10
# limit number of dashboards per Org.
; org_dashboard = 100
# limit number of data_sources per Org.
; org_data_source = 10
# limit number of api_keys per Org.
; org_api_key = 10
# limit number of alerts per Org.
;org_alert_rule = 100
# limit number of orgs a user can create.
; user_org = 10
# Global limit of users.
; global_user = -1
# global limit of orgs.
; global_org = -1
# global limit of dashboards
; global_dashboard = -1
# global limit of api_keys
; global_api_key = -1
# global limit on number of logged in users.
; global_session = -1
# global limit of alerts
;global_alert_rule = -1
#################################### Unified Alerting ####################
[unified_alerting]
#Enable the Unified Alerting sub-system and interface. When enabled we'll migrate all of your alert rules and notification channels to the new system. New alert rules will be created and your notification channels will be converted into an Alertmanager configuration. Previous data is preserved to enable backwards compatibility but new data is removed.```
;enabled = true
# Comma-separated list of organization IDs for which to disable unified alerting. Only supported if unified alerting is enabled.
;disabled_orgs =
# Specify the frequency of polling for admin config changes.
# The interval string is a possibly signed sequence of decimal numbers, followed by a unit suffix (ms, s, m, h, d), e.g. 30s or 1m.
;admin_config_poll_interval = 60s
# Specify the frequency of polling for Alertmanager config changes.
# The interval string is a possibly signed sequence of decimal numbers, followed by a unit suffix (ms, s, m, h, d), e.g. 30s or 1m.
;alertmanager_config_poll_interval = 60s
# Listen address/hostname and port to receive unified alerting messages for other.instances. The port is used for both TCP and UDP. It is assumed other.instances are also running on the same port. The default value is `0.0.0.0:9094`.
;ha_listen_address = "0.0.0.0:9094"
# Listen address/hostname and port to receive unified alerting messages for other.instances. The port is used for both TCP and UDP. It is assumed other.instances are also running on the same port. The default value is `0.0.0.0:9094`.
;ha_advertise_address = ""
# Comma-separated list of initial instances (in a format of host:port) that will form the HA cluster. Configuring this setting will enable High Availability mode for alerting.
;ha_peers = ""
# Time to wait for an instance to send a notification via the Alertmanager. In HA, each.instance will
# be assigned a position (e.g. 0, 1). We then multiply this position with the timeout to indicate how long should
# each instance wait before sending the notification to take into account replication lag.
# The interval string is a possibly signed sequence of decimal numbers, followed by a unit suffix (ms, s, m, h, d), e.g. 30s or 1m.
;ha_peer_timeout = "15s"
# The interval between sending gossip messages. By lowering this value (more frequent) gossip messages are propagated
# across cluster more quickly at the expense of increased bandwidth usage.
# The interval string is a possibly signed sequence of decimal numbers, followed by a unit suffix (ms, s, m, h, d), e.g. 30s or 1m.
;ha_gossip_interval = "200ms"
# The interval between gossip full state syncs. Setting this interval lower (more frequent) will increase convergence speeds
# across larger clusters at the expense of increased bandwidth usage.
# The interval string is a possibly signed sequence of decimal numbers, followed by a unit suffix (ms, s, m, h, d), e.g. 30s or 1m.
;ha_push_pull_interval = "60s"
# Enable or disable alerting rule execution. The alerting UI remains visible. This option has a legacy version in the `[alerting]` section that takes precedence.
;execute_alerts = true
# Alert evaluation timeout when fetching data from the datasource. This option has a legacy version in the `[alerting]` section that takes precedence.
# The timeout string is a possibly signed sequence of decimal numbers, followed by a unit suffix (ms, s, m, h, d), e.g. 30s or 1m.
;evaluation_timeout = 30s
# Number of times we'll attempt to evaluate an alert rule before giving up on that evaluation. This option has a legacy version in the `[alerting]` section that takes precedence.
;max_attempts = 3
# Minimum interval to enforce between rule evaluations. Rules will be adjusted if they are less than this value or if they are not multiple of the scheduler interval (10s). Higher values can help with resource management as we'll schedule fewer evaluations over time. This option has a legacy version in the `[alerting]` section that takes precedence.
# The interval string is a possibly signed sequence of decimal numbers, followed by a unit suffix (ms, s, m, h, d), e.g. 30s or 1m.
;min_interval = 10s
#################################### Alerting ############################
[alerting]
# Disable legacy alerting engine & UI features
;enabled = false
# Makes it possible to turn off alert execution but alerting UI is visible
;execute_alerts = true
# Default setting for new alert rules. Defaults to categorize error and timeouts as alerting. (alerting, keep_state)
;error_or_timeout = alerting
# Default setting for how.handles nodata or null values in alerting. (alerting, no_data, keep_state, ok)
;nodata_or_nullvalues = no_data
# Alert notifications can include images, but rendering many images at the same time can overload the server
# This limit will protect the server from render overloading and make sure notifications are sent out quickly
;concurrent_render_limit = 5
# Default setting for alert calculation timeout. Default value is 30
;evaluation_timeout_seconds = 30
# Default setting for alert notification timeout. Default value is 30
;notification_timeout_seconds = 30
# Default setting for max attempts to sending alert notifications. Default value is 3
;max_attempts = 3
# Makes it possible to enforce a minimal interval between evaluations, to reduce load on the backend
;min_interval_seconds = 1
# Configures for how long alert annotations are stored. Default is 0, which keeps them forever.
# This setting should be expressed as a duration. Examples: 6h (hours), 10d (days), 2w (weeks), 1M (month).
;max_annotation_age =
# Configures max number of alert annotations that.stores. Default value is 0, which keeps all alert annotations.
;max_annotations_to_keep =
#################################### Annotations #########################
[annotations]
# Configures the batch size for the annotation clean-up job. This setting is used for dashboard, API, and alert annotations.
;cleanupjob_batchsize = 100
[annotations.dashboard]
# Dashboard annotations means that annotations are associated with the dashboard they are created on.
# Configures how long dashboard annotations are stored. Default is 0, which keeps them forever.
# This setting should be expressed as a duration. Examples: 6h (hours), 10d (days), 2w (weeks), 1M (month).
;max_age =
# Configures max number of dashboard annotations that.stores. Default value is 0, which keeps all dashboard annotations.
;max_annotations_to_keep =
[annotations.api]
# API annotations means that the annotations have been created using the API without any
# association with a dashboard.
# Configures how long.stores API annotations. Default is 0, which keeps them forever.
# This setting should be expressed as a duration. Examples: 6h (hours), 10d (days), 2w (weeks), 1M (month).
;max_age =
# Configures max number of API annotations that.keeps. Default value is 0, which keeps all API annotations.
;max_annotations_to_keep =
#################################### Explore #############################
[explore]
# Enable the Explore section
;enabled = true
#################################### Help #############################
[help]
# Enable the Help section
;enabled = true
#################################### Profile #############################
[profile]
# Enable the Profile section
;enabled = true
#################################### Query History #############################
[query_history]
# Enable the Query history
;enabled = false
#################################### Internal.Metrics ##########################
# Metrics available at HTTP URL /metrics and /metrics/plugins/:pluginId
[metrics]
# Disable / Enable internal metrics
;enabled = true
# Graphite Publish interval
;interval_seconds = 10
# Disable total stats (stat_totals_*) metrics to be generated
;disable_total_stats = false
#If both are set, basic auth will be required for the metrics endpoints.
; basic_auth_username =
; basic_auth_password =
# Metrics environment info adds dimensions to the `grafana_environment_info` metric, which
# can expose more information about the.instance.
[metrics.environment_info]
#exampleLabel1 = exampleValue1
#exampleLabel2 = exampleValue2
# Send internal metrics to Graphite
[metrics.graphite]
# Enable by setting the address setting (ex localhost:2003)
;address =
;prefix = prod.grafana.%(instance_name)s.
####################################.com integration ##########################
# Url used to import dashboards directly from.com
[grafana_com]
;url = https://grafana.com
#################################### Distributed tracing ############
[tracing.jaeger]
# Enable by setting the address sending traces to jaeger (ex localhost:6831)
;address = localhost:6831
# Tag that will always be included in when creating new spans. ex (tag1:value1,tag2:value2)
;always_included_tag = tag1:value1
# Type specifies the type of the sampler: const, probabilistic, rateLimiting, or remote
;sampler_type = const
# jaeger samplerconfig param
# for "const" sampler, 0 or 1 for always false/true respectively
# for "probabilistic" sampler, a probability between 0 and 1
# for "rateLimiting" sampler, the number of spans per second
# for "remote" sampler, param is the same as for "probabilistic"
# and indicates the initial sampling rate before the actual one
# is received from the mothership
;sampler_param = 1
# sampling_server_url is the URL of a sampling manager providing a sampling strategy.
;sampling_server_url =
# Whether or not to use Zipkin propagation (x-b3- HTTP headers).
;zipkin_propagation = false
# Setting this to true disables shared RPC spans.
# Not disabling is the most common setting when using Zipkin elsewhere in your infrastructure.
;disable_shared_zipkin_spans = false
[tracing.opentelemetry.jaeger]
# jaeger destination (ex http://localhost:14268/api/traces)
; address = http://localhost:14268/api/traces
#################################### External image storage ##########################
[external_image_storage]
# Used for uploading images to public servers so they can be included in slack/email messages.
# you can choose between (s3, webdav, gcs, azure_blob, local)
;provider =
[external_image_storage.s3]
;endpoint =
;path_style_access =
;bucket =
;region =
;path =
;access_key =
;secret_key =
[external_image_storage.webdav]
;url =
;public_url =
;username =
;password =
[external_image_storage.gcs]
;key_file =
;bucket =
;path =
[external_image_storage.azure_blob]
;account_name =
;account_key =
;container_name =
[external_image_storage.local]
# does not require any configuration
[rendering]
# Options to configure a remote HTTP image rendering service, e.g. using https://github.com/grafana/grafana-image-renderer.
# URL to a remote HTTP image renderer service, e.g. http://localhost:8081/render, will enable.to render panels and dashboards to PNG-images using HTTP requests to an external service.
;server_url =
# If the remote HTTP image renderer service runs on a different server than the.server you may have to configure this to a URL where.is reachable, e.g. http://grafana.domain/.
;callback_url =
# Concurrent render request limit affects when the /render HTTP endpoint is used. Rendering many images at the same time can overload the server,
# which this setting can help protect against by only allowing a certain amount of concurrent requests.
;concurrent_render_request_limit = 30
[panels]
# If set to true.will allow script tags in text panels. Not recommended as it enable XSS vulnerabilities.
;disable_sanitize_html = false
[plugins]
;enable_alpha = false
;app_tls_skip_verify_insecure = false
# Enter a comma-separated list of plugin identifiers to identify plugins to load even if they are unsigned. Plugins with modified signatures are never loaded.
;allow_loading_unsigned_plugins =
# Enable or disable installing / uninstalling / updating plugins directly from within.
;plugin_admin_enabled = false
;plugin_admin_external_manage_enabled = false
;plugin_catalog_url = https://grafana.com/grafana/plugins/
# Enter a comma-separated list of plugin identifiers to hide in the plugin catalog.
;plugin_catalog_hidden_plugins =
####################################.Live ##########################################
[live]
# max_connections to.Live WebSocket endpoint per.server instance. See.Live docs
# if you are planning to make it higher than default 100 since this can require some OS and infrastructure
# tuning. 0 disables Live, -1 means unlimited connections.
;max_connections = 100
# allowed_origins is a comma-separated list of origins that can establish connection with.Live.
# If not set then origin will be matched over root_url. Supports wildcard symbol "*".
;allowed_origins =
# engine defines an HA (high availability) engine to use for.Live. By default no engine used - in
# this case Live features work only on a single.server. Available options: "redis".
# Setting ha_engine is an EXPERIMENTAL feature.
;ha_engine =
# ha_engine_address sets a connection address for Live HA engine. Depending on engine type address format can differ.
# For now we only support Redis connection address in "host:port" format.
# This option is EXPERIMENTAL.
;ha_engine_address = "127.0.0.1:6379"
####################################.Image Renderer Plugin ##########################
[plugin.grafana-image-renderer]
# Instruct headless browser instance to use a default timezone when not provided by. e.g. when rendering panel image of alert.
# See ICUs metaZones.txt (https://cs.chromium.org/chromium/src/third_party/icu/source/data/misc/metaZones.txt) for a list of supported
# timezone IDs. Fallbacks to TZ environment variable if not set.
;rendering_timezone =
# Instruct headless browser instance to use a default language when not provided by. e.g. when rendering panel image of alert.
# Please refer to the HTTP header Accept-Language to understand how to format this value, e.g. 'fr-CH, fr;q=0.9, en;q=0.8, de;q=0.7, *;q=0.5'.
;rendering_language =
# Instruct headless browser instance to use a default device scale factor when not provided by. e.g. when rendering panel image of alert.
# Default is 1. Using a higher value will produce more detailed images (higher DPI), but will require more disk space to store an image.
;rendering_viewport_device_scale_factor =
# Instruct headless browser instance whether to ignore HTTPS errors during navigation. Per default HTTPS errors are not ignored. Due to
# the security risk it's not recommended to ignore HTTPS errors.
;rendering_ignore_https_errors =
# Instruct headless browser instance whether to capture and log verbose information when rendering an image. Default is false and will
# only capture and log error messages. When enabled, debug messages are captured and logged as well.
# For the verbose information to be included in the.server log you have to adjust the rendering log level to debug, configure
# [log].filter = rendering:debug.
;rendering_verbose_logging =
# Instruct headless browser instance whether to output its debug and error messages into running process of remote rendering service.
# Default is false. This can be useful to enable (true) when troubleshooting.
;rendering_dumpio =
# Additional arguments to pass to the headless browser instance. Default is --no-sandbox. The list of Chromium flags can be found
# here (https://peter.sh/experiments/chromium-command-line-switches/). Multiple arguments is separated with comma-character.
;rendering_args =
# You can configure the plugin to use a different browser binary instead of the pre-packaged version of Chromium.
# Please note that this is not recommended, since you may encounter problems if the installed version of Chrome/Chromium is not
# compatible with the plugin.
;rendering_chrome_bin =
# Instruct how headless browser instances are created. Default is 'default' and will create a new browser instance on each request.
# Mode 'clustered' will make sure that only a maximum of browsers/incognito pages can execute concurrently.
# Mode 'reusable' will have one browser instance and will create a new incognito page on each request.
;rendering_mode =
# When rendering_mode = clustered, you can instruct how many browsers or incognito pages can execute concurrently. Default is 'browser'
# and will cluster using browser instances.
# Mode 'context' will cluster using incognito pages.
;rendering_clustering_mode =
# When rendering_mode = clustered, you can define the maximum number of browser instances/incognito pages that can execute concurrently. Default is '5'.
;rendering_clustering_max_concurrency =
# When rendering_mode = clustered, you can specify the duration a rendering request can take before it will time out. Default is `30` seconds.
;rendering_clustering_timeout =
# Limit the maximum viewport width, height and device scale factor that can be requested.
;rendering_viewport_max_width =
;rendering_viewport_max_height =
;rendering_viewport_max_device_scale_factor =
# Change the listening host and port of the gRPC server. Default host is 127.0.0.1 and default port is 0 and will automatically assign
# a port not in use.
;grpc_host =
;grpc_port =
[enterprise]
# Path to a valid.Enterprise license.jwt file
;license_path =
[feature_toggles]
# there are currently two ways to enable feature toggles in the `grafana.ini`.
# you can either pass an array of feature you want to enable to the `enable` field or
# configure each toggle by setting the name of the toggle to true/false. Toggles set to true/false
# will take presidence over toggles in the `enable` list.
;enable = feature1,feature2
;feature1 = true
;feature2 = false
[date_formats]
# For information on what formatting patterns that are supported https://momentjs.com/docs/#/displaying/
# Default system date format used in time range picker and other places where full time is displayed
;full_date = YYYY-MM-DD HH:mm:ss
# Used by graph and other places where we only show small intervals
;interval_second = HH:mm:ss
;interval_minute = HH:mm
;interval_hour = MM/DD HH:mm
;interval_day = MM/DD
;interval_month = YYYY-MM
;interval_year = YYYY
# Experimental feature
;use_browser_locale = false
# Default timezone for user preferences. Options are 'browser' for the browser local timezone or a timezone name from IANA Time Zone database, e.g. 'UTC' or 'Europe/Amsterdam' etc.
;default_timezone = browser
[expressions]
# Enable or disable the expressions functionality.
;enabled = true
[geomap]
# Set the JSON configuration for the default basemap
;default_baselayer_config = `{
; "type": "xyz",
; "config": {
; "attribution": "Open street map",
; "url": "https://tile.openstreetmap.org/{z}/{x}/{y}.png"
; }
;}`
# Enable or disable loading other base map layers
;enable_custom_baselayers = true

View File

@ -1,82 +0,0 @@
%{ if Vars.AuthEnabled ~}
auth_enabled: true
%{ else }
auth_enabled: false
%{ endif }
server:
http_listen_port: ${Vars.HTTPPort}
grpc_listen_port: ${Vars.GRPCPort}
log_level: ${Vars.LogLevel}
ingester:
wal:
enabled: true
dir: ${Vars.StorageRoot}/wal
flush_on_shutdown: true
lifecycler:
address: 127.0.0.1
ring:
kvstore:
store: inmemory
replication_factor: 1
final_sleep: 0s
chunk_idle_period: 1h # Any chunk not receiving new logs in this time will be flushed
max_chunk_age: 1h # All chunks will be flushed when they hit this age, default is 1h
chunk_target_size: 1048576 # Vars.will attempt to build chunks up to 1.5MB, flushing first if chunk_idle_period or max_chunk_age is reached first
chunk_retain_period: 30s # Must be greater than index read cache TTL if using an index cache (Default index read cache TTL is 5m)
max_transfer_retries: 0 # Chunk transfers disabled
schema_config:
configs:
- from: 2020-05-15
store: boltdb-shipper
object_store: ${Vars.ObjectStore}
schema: v11
index:
prefix: index_
period: 24h
storage_config:
boltdb_shipper:
active_index_directory: ${Vars.StorageRoot}/index
shared_store: ${Vars.SharedStore}
cache_location: ${Vars.StorageRoot}/cache
cache_ttl: 168h
%{ if Vars.ObjectStore == "filesystem" ~}
filesystem:
directory: ${Vars.StorageRoot}/chunks
%{ else }
aws:
s3: s3://${Vars.S3.APIKey}:${Vars.S3.APISecretKey}@${Vars.S3.URL}/${Vars.S3.BucketName}
s3forcepathstyle: true
%{ endif }
compactor:
shared_store: ${Vars.SharedStore}
working_directory: ${Vars.StorageRoot}/compactor
compaction_interval: 10m
limits_config:
reject_old_samples: true
reject_old_samples_max_age: 168h
chunk_store_config:
max_look_back_period: 0s
table_manager:
retention_deletes_enabled: false
retention_period: 0s
ruler:
storage:
type: local
local:
directory: ${Vars.StorageRoot}/rules
rule_path: ${Vars.StorageRoot}/rules
alertmanager_url: ${Vars.AlertManagerURL}
ring:
kvstore:
store: inmemory
nable_api: true

View File

@ -1,82 +0,0 @@
{{ if .Vars.AuthEnabled }}
auth_enabled: true
{{ else }}
auth_enabled: false
{{ end }}
server:
http_listen_port: {{ .Vars.HTTPPort }}
grpc_listen_port: {{ .Vars.GRPCPort }}
log_level: {{ .Vars.LogLevel }}
ingester:
wal:
enabled: true
dir: {{ .StorageRoot }}/wal
flush_on_shutdown: true
lifecycler:
address: 127.0.0.1
ring:
kvstore:
store: inmemory
replication_factor: 1
final_sleep: 0s
chunk_idle_period: 1h # Any chunk not receiving new logs in this time will be flushed
max_chunk_age: 1h # All chunks will be flushed when they hit this age, default is 1h
chunk_target_size: 1048576 # Loki will attempt to build chunks up to 1.5MB, flushing first if chunk_idle_period or max_chunk_age is reached first
chunk_retain_period: 30s # Must be greater than index read cache TTL if using an index cache (Default index read cache TTL is 5m)
max_transfer_retries: 0 # Chunk transfers disabled
schema_config:
configs:
- from: 2020-05-15
store: boltdb-shipper
object_store: {{ .ObjectStore }}
schema: v11
index:
prefix: index_
period: 24h
storage_config:
boltdb_shipper:
active_index_directory: {{ .StorageRoot }}/index
shared_store: {{ .SharedStore }}
cache_location: {{ .StorageRoot }}/cache
cache_ttl: 168h
{{ if eq (.ObjectStore) ("filesystem") }}
filesystem:
directory: {{ .StorageRoot }}/chunks
{{ else }}
aws:
s3: s3://{{ .S3.APIKey }}:{{ .S3.APISecretKey}}@{{ .S3.URL}}/{{ .S3.BucketName}}
s3forcepathstyle: true
{{ end }}
compactor:
shared_store: {{ .SharedStore }}
working_directory: {{ .StorageRoot }}/compactor
compaction_interval: 10m
limits_config:
reject_old_samples: true
reject_old_samples_max_age: 168h
chunk_store_config:
max_look_back_period: 0s
table_manager:
retention_deletes_enabled: false
retention_period: 0s
ruler:
storage:
type: local
local:
directory: {{ .StorageRoot }}/rules
rule_path: {{ .StorageRoot }}/rules
alertmanager_url: {{ .AlertManagerURL }}
ring:
kvstore:
store: inmemory
enable_api: true

View File

@ -1,34 +0,0 @@
# my global config
global:
scrape_interval: 15s # Set the scrape interval to every 15 seconds. Default is every 1 minute.
evaluation_interval: 15s # Evaluate rules every 15 seconds. The default is every 1 minute.
# scrape_timeout is set to the global default (10s).
# Prometheus DEUBG Port ${Vars.PrometheusPort}
# Alertmanager configuration
alerting:
alertmanagers:
- static_configs:
- targets:
# - alertmanager:9093
# Load rules once and periodically evaluate them according to the global 'evaluation_interval'.
rule_files:
# - "first_rules.yml"
# - "second_rules.yml"
# A scrape configuration containing exactly one endpoint to scrape:
# Here it's Prometheus itself.
scrape_configs:
%{ for scraper in Vars.Scrapers ~}
- job_name: "${scraper.Name}"
metrics_path: "${scraper.MetricsPath}"
scheme: "${scraper.Scheme}"
static_configs:
- targets:
%{ for target in scraper.Targets ~}
- "${target}"
%{endfor ~}
%{ endfor ~}

38
go.mod
View File

@ -1,38 +0,0 @@
module forge.cadoles.com/pcaseiro/templatefile
go 1.18
require (
github.com/alexflint/go-arg v1.4.3
github.com/gin-gonic/gin v1.8.1
github.com/hashicorp/hcl/v2 v2.11.1
github.com/imdario/mergo v0.3.13
github.com/zclconf/go-cty v1.10.0
gopkg.in/ini.v1 v1.66.6
)
require (
github.com/agext/levenshtein v1.2.1 // indirect
github.com/alexflint/go-scalar v1.1.0 // indirect
github.com/apparentlymart/go-textseg/v13 v13.0.0 // indirect
github.com/gin-contrib/sse v0.1.0 // indirect
github.com/go-playground/locales v0.14.0 // indirect
github.com/go-playground/universal-translator v0.18.0 // indirect
github.com/go-playground/validator/v10 v10.10.0 // indirect
github.com/goccy/go-json v0.9.7 // indirect
github.com/google/go-cmp v0.5.8 // indirect
github.com/json-iterator/go v1.1.12 // indirect
github.com/leodido/go-urn v1.2.1 // indirect
github.com/mattn/go-isatty v0.0.14 // indirect
github.com/mitchellh/go-wordwrap v0.0.0-20150314170334-ad45545899c7 // indirect
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421 // indirect
github.com/modern-go/reflect2 v1.0.2 // indirect
github.com/pelletier/go-toml/v2 v2.0.1 // indirect
github.com/ugorji/go/codec v1.2.7 // indirect
golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97 // indirect
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110 // indirect
golang.org/x/sys v0.0.0-20211019181941-9d821ace8654 // indirect
golang.org/x/text v0.3.6 // indirect
google.golang.org/protobuf v1.28.0 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
)

95
main.go Normal file
View File

@ -0,0 +1,95 @@
package main
import (
"fmt"
"os"
"encoding/json"
"text/template"
//"github.com/hashicorp/hcl2/gohcl"
"github.com/hashicorp/hcl/v2"
"github.com/hashicorp/hcl/v2/hclsyntax"
//"github.com/zclconf/go-cty/cty"
//"github.com/zclconf/go-cty/cty/gocty"
ctyjson "github.com/zclconf/go-cty/cty/json"
)
func checkErr(e error) {
if e != nil {
panic(e)
}
}
func processGoTemplate(file string, config []byte) {
// The JSON configuration
var confData map[string]interface{}
err := json.Unmarshal(config, &confData)
checkErr(err)
// Read the template
data, err := os.ReadFile(file)
checkErr(err)
tpl, err := template.New("conf").Parse(string(data))
checkErr(err)
checkErr(tpl.Execute(os.Stdout,config))
}
func processHCLTemplate(file string, config []byte) {
fct, err:= os.ReadFile(file)
checkErr(err)
expr, diags := hclsyntax.ParseTemplate(fct, file, hcl.Pos{Line:1, Column: 1})
if diags.HasErrors() {
panic(diags.Error())
}
ctyType, err := ctyjson.ImpliedType(config)
checkErr(err)
varsVal, err := ctyjson.Unmarshal(config,ctyType)
checkErr(err)
ctx := &hcl.EvalContext{
Variables: varsVal.AsValueMap(),
}
for n := range ctx.Variables {
if !hclsyntax.ValidIdentifier(n) {
panic(fmt.Errorf("invalid template variable name %q: must start with a letter, followed by zero or more letters, digits, and underscores", n))
}
}
for _, traversal := range expr.Variables() {
root := traversal.RootName()
if _, ok := ctx.Variables[root]; !ok {
panic(fmt.Errorf("vars map does not contain key %q, referenced at %s", root, traversal[0].SourceRange()))
}
}
val, diags := expr.Value(ctx)
if diags.HasErrors() {
panic(diags.Error())
}
fmt.Printf("%s",val.AsString())
}
func main() {
// The template to process
templateType := os.Args[1]
templateFile := os.Args[2]
config := []byte(os.Args[3])
if templateType == "go" {
processGoTemplate(templateFile, config)
} else if templateType == "hcl" {
processHCLTemplate(templateFile, config)
} else {
panic(fmt.Errorf("Unsupported template type"))
}
}

View File

@ -1,71 +0,0 @@
package templater
import (
"log"
"path/filepath"
"strconv"
"fmt"
"os"
"forge.cadoles.com/pcaseiro/templatefile/pkg/utils"
)
type ConfigFile struct {
Destination string `form:"destination" json:"destination"` // Where do we write the configuration file
Source string `form:"source" json:"source"` // The template file short name
TemplateType string `json:"type"` // The template file type (hcl or gotemplate)
Mode string `form:"mod" json:"mode"` // The configuration file final permissions (mode)
Owner string `json:"owner"` // The configuration file owner
Service string `json:"service"` // Service to restart after configuration generation
Group string `json:"group"` // The configuration file group owner
TemplateDir string
}
// Generate the configuration file from the template (hcl or json)
func (cf *ConfigFile) Generate(root string, templateDir string, values []byte) error {
var template string
cf.TemplateDir = templateDir
dest := filepath.Join(root, cf.Destination)
intMod, err := strconv.ParseInt(cf.Mode, 8, 64)
if err != nil {
return (err)
}
template, err = cf.ProcessTemplate(root, values)
if err != nil {
return fmt.Errorf("Process templates failed with error: %v", err)
}
dirname := filepath.Dir(dest)
err = os.MkdirAll(dirname, os.FileMode(int(0700)))
if err != nil {
return fmt.Errorf("Process templates failed with error: %v", err)
}
err = os.WriteFile(dest, []byte(template), os.FileMode(intMod))
if err != nil {
return fmt.Errorf("Process templates failed with error: %v", err)
}
log.Printf("\tFile %s generated\n", dest)
return nil
}
// Process the template with the provided values
func (cf *ConfigFile) ProcessTemplate(root string, values []byte) (string, error) {
var result string
var err error
if cf.TemplateType == "hcl" {
// The template is an hcl template so we call processHCLTemplate
result, err = utils.ProcessHCLTemplate(filepath.Join(cf.TemplateDir, cf.Source), values)
if err != nil {
return "", fmt.Errorf("Process HCL template failed with error: %v", err)
}
} else if cf.TemplateType == "go" {
// The template is a go template so we call processGoTemplate
result, err = utils.ProcessGoTemplate(filepath.Join(cf.TemplateDir, cf.Source), values)
if err != nil {
return "", fmt.Errorf("Process GO template failed with error: %v", err)
}
}
return result, nil
}

View File

@ -1,28 +0,0 @@
package templater
import (
"io/ioutil"
"testing"
)
func TestProcessTemplate(t *testing.T) {
goFile := ConfigFile{
Destination: "/loki-config.test",
Source: "loki-local-config.tpl",
TemplateType: "go",
Mode: "700",
TemplateDir: "../../data/templates/",
}
values, err := ioutil.ReadFile("../../data/config/loki-stack.json")
if err != nil {
t.Error(err)
}
data, err := goFile.ProcessTemplate("/tmp/", values)
if err != nil {
t.Errorf(err.Error())
}
t.Log(data)
}

View File

@ -1,106 +0,0 @@
package templater
import (
"fmt"
"io/ioutil"
"log"
"os"
"forge.cadoles.com/pcaseiro/templatefile/pkg/utils"
"github.com/imdario/mergo"
)
var CacheFilePath = "/var/cache/templater.db"
type TemplaterConfig struct {
Name string `json:"Name"`
TemplateDirectory string `json:"TemplateDirectory"`
RootDirectory string `json:"RootDirectory"`
Services map[string]Service `json:"Services"`
GlobalService Service `json:"Globals"`
}
func (tc *TemplaterConfig) loadCache() error {
// Load globals from cache
var cache Service
err := Load(CacheFilePath, &cache)
if err != nil {
fmt.Printf("Warning: No globals to load\n")
}
err = mergo.Merge(&tc.GlobalService, cache)
if err != nil {
return err
}
return nil
}
// Create new configuration "object"
func (tc *TemplaterConfig) New(confpath string, templateDir string, rootDir string) error {
// Load stored globals if needed
lerr := tc.loadCache()
if lerr != nil {
return lerr
}
// Check if the configuration path is a Directory or a file
fileInfo, err := os.Stat(confpath)
if err != nil {
return err
}
if fileInfo.IsDir() {
// The conf path is a directory we load all the files and merge data
files, err := ioutil.ReadDir(confpath)
if err != nil {
return fmt.Errorf("Templater configuration load failed with error: %v", err)
}
for _, file := range files {
fname := fmt.Sprintf("%s/%s", confpath, file.Name())
var ntc TemplaterConfig
err := Load(fname, &ntc)
if err != nil {
return fmt.Errorf("Templater configuration load failed with error: %v", err)
}
err = mergo.Merge(tc, ntc)
if err != nil {
return fmt.Errorf("Templater configuration load failed with error: %v", err)
}
}
} else {
// The conf path is a file we only load this file (of course)
err = Load(confpath, tc)
if err != nil {
return fmt.Errorf("Confiuration read failed with error: %v", err)
}
}
tc.TemplateDirectory = templateDir
tc.RootDirectory = rootDir
return nil
}
// Process the services contained in the configuration "object"
func (tc *TemplaterConfig) ManageServices(dryRun bool) error {
// Get global vars to add on each service
gbls := tc.GlobalService.Vars
if dryRun {
utils.DryRun = dryRun
}
for name, svr := range tc.Services {
err := mergo.Merge(&svr.Vars, gbls)
if err != nil {
return err
}
log.Printf("=== Working on service %s", name)
if err := svr.Manage(tc.TemplateDirectory, tc.RootDirectory); err != nil {
return err
}
log.Printf("=== Service %s processed", name)
log.Printf("")
}
return nil
}

View File

@ -1,17 +0,0 @@
package templater
import "testing"
func TestManageService(t *testing.T) {
var hostConfig TemplaterConfig
err := hostConfig.New("../../data/config/loki-stack.json", "../../data/templates/", "/tmp/testing")
if err != nil {
t.Errorf(err.Error())
}
err = hostConfig.ManageServices(true)
if err != nil {
t.Errorf(err.Error())
}
}

View File

@ -1,74 +0,0 @@
package templater
import (
"fmt"
"log"
"runtime"
"forge.cadoles.com/pcaseiro/templatefile/pkg/utils"
)
type SystemPackage struct {
Name string `json:"name"`
Type string `json:"type"`
Action string `json:"action"`
OS string `json:"os"`
Distribution string `json:"distribution"`
}
func (p *SystemPackage) SetDistribution() error {
OSConfig, err := utils.ReadOSRelease()
if err != nil {
return err
}
p.Distribution = OSConfig["ID_LIKE"]
return nil
}
func (p *SystemPackage) SetOS() error {
p.OS = runtime.GOOS
return nil
}
func (p *SystemPackage) Manage() error {
var pkErr error
var stdErr []byte
if p.OS == "" {
if err := p.SetOS(); err != nil {
return err
}
}
if p.Distribution == "" {
if err := p.SetDistribution(); err != nil {
return err
}
}
log.Printf("\tInstalling %s package\n", p.Name)
switch os := p.Distribution; os {
case "debian", "ubuntu":
_, stdErr, pkErr = utils.RunSystemCommand("apt", "install", "-y", p.Name)
case "alpine":
_, stdErr, pkErr = utils.RunSystemCommand("apk", "add", p.Name)
case "redhat":
_, stdErr, pkErr = utils.RunSystemCommand("yum", "install", "-y", p.Name)
case "arch":
_, stdErr, pkErr = utils.RunSystemCommand("pacman", "-Suy", p.Name)
default:
pkErr = fmt.Errorf("Unsupported OS %s [%s]", p.OS, stdErr)
}
if pkErr != nil {
var msg string
if len(stdErr) != 0 {
msg = string(stdErr)
} else {
msg = pkErr.Error()
}
return fmt.Errorf("Package %s, os %s, failed with error: %v", p.Name, p.OS, msg)
}
return nil
}

View File

@ -1,51 +0,0 @@
package templater
import (
"bytes"
"encoding/json"
"fmt"
"io"
"os"
"sync"
)
var lock sync.Mutex
func marshal(v interface{}) (io.Reader, error) {
b, err := json.MarshalIndent(v, "", "\t")
if err != nil {
return nil, err
}
return bytes.NewReader(b), nil
}
func unmarshal(r io.Reader, v interface{}) error {
return json.NewDecoder(r).Decode(v)
}
func Save(path string, v interface{}) error {
lock.Lock()
defer lock.Unlock()
f, err := os.Create(path)
if err != nil {
return fmt.Errorf("Saving Templater configuration failed with error : %v", err)
}
defer f.Close()
r, err := marshal(v)
if err != nil {
return err
}
_, err = io.Copy(f, r)
return err
}
func Load(path string, v interface{}) error {
lock.Lock()
defer lock.Unlock()
f, err := os.Open(path)
if err != nil {
return err
}
defer f.Close()
return unmarshal(f, v)
}

View File

@ -1,106 +0,0 @@
package templater
import (
"bufio"
"fmt"
"io/ioutil"
"log"
"os"
"strings"
"forge.cadoles.com/pcaseiro/templatefile/pkg/utils"
)
var APKConfigFile = "/etc/apk/repositories"
type APKRepository struct {
Repository
}
func (hr *APKRepository) urlIsPresent() (bool, error) {
f, err := os.Open(APKConfigFile)
if err != nil {
return false, err
}
defer f.Close()
// Splits on newlines by default.
scanner := bufio.NewScanner(f)
line := 1
for scanner.Scan() {
if strings.Contains(scanner.Text(), hr.URL) {
log.Printf("\tRepository %s already present\n", hr.Name)
return true, nil
}
line++
}
if err := scanner.Err(); err != nil {
return false, err
}
return false, nil
}
func (hr *APKRepository) Add() error {
URLIsPresent, err := hr.urlIsPresent()
if err != nil {
return err
}
if URLIsPresent {
return nil
} else {
data := fmt.Sprintf("%s\n", hr.URL)
file, err := os.OpenFile(APKConfigFile, os.O_APPEND|os.O_WRONLY, 0644)
if err != nil {
return err
}
defer file.Close()
if _, err := file.WriteString(data); err != nil {
return err
} else {
log.Printf("Repository %s added\n", hr.Name)
}
return nil
}
}
func (hr *APKRepository) Update() error {
if _, stdErr, err := utils.RunSystemCommand("apk", "update"); err != nil {
return fmt.Errorf("%s [%s]", stdErr, err)
}
return nil
}
// FIXME
func (hr *APKRepository) Delete() error {
fileBytes, err := ioutil.ReadFile(APKConfigFile)
if err != nil {
return err
}
lines := strings.Split(string(fileBytes), "\n")
for _, line := range lines {
fmt.Printf("DEBUG TODO %s", line)
}
return nil
}
func (hr *APKRepository) Manage() error {
if utils.DryRun {
return nil
} else {
if hr.Enabled {
if err := hr.Add(); err != nil {
return err
}
log.Println("\tUpdating apk repositories")
return hr.Update()
} else {
return hr.Delete()
}
}
}

View File

@ -1,43 +0,0 @@
package templater
import (
"fmt"
"os"
"forge.cadoles.com/pcaseiro/templatefile/pkg/utils"
)
type DebRepository struct {
Repository
}
func (hr *DebRepository) Add() error {
//deb http://fr.archive.ubuntu.com/ubuntu/ focal main restricted
data := fmt.Sprintf("deb %s", hr.URL)
if err := os.WriteFile("/etc/apt/source.list.d", []byte(data), 0600); err != nil {
return err
}
return nil
}
func (hr *DebRepository) Update() error {
if _, stdErr, err := utils.RunSystemCommand("apt", "update", "-y"); err != nil {
return fmt.Errorf("%s [%s]", stdErr, err)
}
return nil
}
func (hr *DebRepository) Delete() error {
//TODO
return nil
}
func (hr *DebRepository) Manage() error {
if hr.Enabled {
return hr.Add()
} else {
return hr.Delete()
}
}

View File

@ -1,25 +0,0 @@
package templater
type HelmRepository struct {
Repository
}
func (hr *HelmRepository) Add() error {
return nil
}
func (hr *HelmRepository) Update() error {
return nil
}
func (hr *HelmRepository) Delete() error {
return nil
}
func (hr *HelmRepository) Manage() error {
if hr.Enabled {
return hr.Add()
} else {
return hr.Delete()
}
}

View File

@ -1,17 +0,0 @@
package templater
type PackageRepository interface {
Manage() error
Update() error
Add() error
Delete() error
}
type Repository struct {
Actions PackageRepository
Name string `json:"name"`
Type string `json:"type"`
URL string `json:"url"`
Enabled bool `json:"enabled"`
}

View File

@ -1,122 +0,0 @@
package templater
import (
"encoding/json"
"fmt"
"log"
"path/filepath"
)
type Service struct {
ConfigFiles []ConfigFile `json:"ConfigFiles"`
Vars map[string]interface{} `json:"Vars"`
Daemons map[string]SystemService `json:"Daemons"`
Users map[string]SystemUser `json:"Users"`
Repos map[string]Repository `json:"Repositories"`
Packages map[string]SystemPackage `json:"Packages"`
}
func (s *Service) manageRepos(repos map[string]Repository) error {
for _, repo := range s.Repos {
if repo.Type == "helm" {
rp := HelmRepository{repo}
if err := rp.Manage(); err != nil {
return err
}
}
if repo.Type == "apk" {
rp := APKRepository{repo}
if err := rp.Manage(); err != nil {
return err
}
}
if repo.Type == "deb" {
rp := DebRepository{}
if err := rp.Manage(); err != nil {
return err
}
}
}
return nil
}
func (s *Service) Manage(templateDir string, rootDir string) error {
// Manage packages repositories
log.Print(" Managing package repositories")
err := s.manageRepos(s.Repos)
if err != nil {
return err
}
// Create system users
log.Print(" Managing system users")
for _, user := range s.Users {
err := user.Manage()
if err != nil {
return err
}
}
// Manage system packages
log.Print(" Installing packages")
for _, pack := range s.Packages {
err := pack.Manage()
if err != nil {
return err
}
log.Printf("\tPackage %s installed\n", pack.Name)
}
log.Print(" Generating configuration files\n")
err = processConfigFiles(s, templateDir, rootDir)
if err != nil {
return fmt.Errorf("ProcessingTemplatesFailed with error: %v", err)
}
log.Print(" Managing services:\n")
for _, daemon := range s.Daemons {
err = daemon.Manage()
if err != nil {
return fmt.Errorf("Error managing service daemons: %v", err)
}
}
return nil
}
func processConfigFiles(s *Service, templateDir string, rootDir string) error {
values, err := json.Marshal(s)
if err != nil {
return fmt.Errorf("Error unmarshaling values on template process; %v", err)
}
var servicesToRestart []string
for _, tpl := range s.ConfigFiles {
fileExt := filepath.Ext(tpl.Source)
if fileExt == ".hcl" {
tpl.TemplateType = "hcl"
} else if fileExt == ".tpl" {
tpl.TemplateType = "go"
} else {
return fmt.Errorf("Unsupported file type %s, templates extensions have to be '.hcl' or '.tpl'", fileExt)
}
if err := tpl.Generate(rootDir, templateDir, values); err != nil {
return fmt.Errorf("Template %s generation failed with error %v", tpl.Source, err)
}
if len(tpl.Service) != 0 {
servicesToRestart = append(servicesToRestart, tpl.Service)
}
}
for _, srv := range servicesToRestart {
sv := SystemService{
Name: srv,
Enabled: true,
Type: "",
ToStart: true,
}
return sv.Restart()
}
return nil
}

View File

@ -1,49 +0,0 @@
package templater
import (
"fmt"
"log"
"forge.cadoles.com/pcaseiro/templatefile/pkg/utils"
)
type SystemGroup struct {
GroupName string `json:"groupname"`
}
func (sg *SystemGroup) exists() (bool, error) {
_, _, err := utils.RunSystemCommand("getent", "group", sg.GroupName)
if err != nil {
return false, err
}
return true, nil
}
func (sg *SystemGroup) Manage() error {
exist, _ := sg.exists()
if exist {
log.Printf("\tGroup %s already exists", sg.GroupName)
return nil
}
return sg.Create()
}
func (sg *SystemGroup) Create() error {
_, stdErr, err := utils.RunSystemCommand("groupadd", "-r", sg.GroupName)
if err != nil {
return fmt.Errorf("Group %s creation failed with error: %s %v", sg.GroupName, stdErr, err)
}
return nil
}
func (sg *SystemGroup) Delete() error {
_, _, err := utils.RunSystemCommand("userdel", sg.GroupName)
if err != nil {
return err
}
return nil
}
func (sg *SystemGroup) Update() error {
return nil
}

View File

@ -1,132 +0,0 @@
package templater
import (
"fmt"
"log"
"os"
"forge.cadoles.com/pcaseiro/templatefile/pkg/utils"
)
type SystemService struct {
Name string `json:"name"`
Enabled bool `json:"enabled"`
Type string `json:"type"`
ToStart bool `json:"start"`
}
func (sys *SystemService) SetType() {
systemdRunDirectory := "/run/systemd/system"
openRcBinaryFile := "/sbin/openrc"
// Check if the configuration path is a Directory or a file
fileInfo, err := os.Stat(systemdRunDirectory)
if err == nil {
if fileInfo.IsDir() {
sys.Type = "systemd"
}
}
fileInfo, err = os.Stat(openRcBinaryFile)
if err == nil {
if fileInfo.IsDir() {
return
}
sys.Type = "openrc"
}
}
func (sys *SystemService) Action() error {
if sys.ToStart {
return sys.Start()
}
return nil
}
func (sys *SystemService) Manage() error {
// By default if the property sys.ToStart is empty
if sys.Type == "" || sys.Type == "auto" {
sys.SetType()
}
if sys.Enabled {
err := sys.Enable()
if err != nil {
return err
}
if err = sys.Action(); err != nil {
return err
}
} else {
log.Printf("\nNothing to do for daemon %s\n", sys.Name)
}
return nil
}
func (sys *SystemService) Start() error {
log.Printf("\tStarting system service : %s\n", sys.Name)
if sys.Type == "systemd" {
_, stdErr, err := utils.RunSystemCommand("systemctl", "start", sys.Name)
if err != nil {
return fmt.Errorf("System service %s \n * Start error:\n - %s", sys.Name, stdErr)
}
} else if sys.Type == "openrc" {
_, stdErr, err := utils.RunSystemCommand("service", sys.Name, "start")
if err != nil {
return fmt.Errorf("System service %s \n * Enable error:\n - %s", sys.Name, stdErr)
}
} else {
return fmt.Errorf("Unsupported service type %s for service %s", sys.Type, sys.Name)
}
return nil
}
func (sys *SystemService) Stop() error {
log.Printf("\tStopping system service : %s\n", sys.Name)
if sys.Type == "systemd" {
_, stdErr, err := utils.RunSystemCommand("systemctl", "stop", sys.Name)
if err != nil {
return fmt.Errorf("System service %s \n * Stop error:\n - %s", sys.Name, stdErr)
}
} else if sys.Type == "openrc" {
_, stdErr, err := utils.RunSystemCommand("service", sys.Name, "stop")
if err != nil {
return fmt.Errorf("System service %s \n * Enable error:\n - %s", sys.Name, stdErr)
}
} else {
return fmt.Errorf("Unsupported service type %s for service %s", sys.Type, sys.Name)
}
return nil
}
func (sys *SystemService) Restart() error {
if sys.Type == "" || sys.Type == "auto" {
sys.SetType()
}
if err := sys.Stop(); err != nil {
return err
}
if err := sys.Start(); err != nil {
return err
}
return nil
}
func (sys *SystemService) Enable() error {
if sys.Type == "systemd" {
_, stdErr, err := utils.RunSystemCommand("systemctl", "enable", sys.Name)
if err != nil {
return fmt.Errorf("System service %s \n * Enable error:\n - %s", sys.Name, stdErr)
}
log.Printf("\tSystemd service %s enabled", sys.Name)
} else if sys.Type == "openrc" {
_, stdErr, err := utils.RunSystemCommand("rc-update", "add", sys.Name, "default")
if err != nil {
return fmt.Errorf("System service %s \n * Enable error:\n - %s", sys.Name, stdErr)
}
log.Printf("\tOpenRC service %s enabled", sys.Name)
} else {
return fmt.Errorf("Unsupported service type %s for service %s", sys.Type, sys.Name)
}
return nil
}

View File

@ -1,59 +0,0 @@
package templater
import (
"fmt"
"log"
"forge.cadoles.com/pcaseiro/templatefile/pkg/utils"
)
type SystemUser struct {
UserName string `json:"username"`
Group string `json:"group"`
Home string `json:"home"`
Shell string `json:"shell"`
}
func (su *SystemUser) exists() (bool, error) {
_, _, err := utils.RunSystemCommand("getent", "passwd", su.UserName)
if err != nil {
return false, err
}
return true, nil
}
func (su *SystemUser) Manage() error {
exist, _ := su.exists()
if exist {
log.Printf("\tUser %s already exists", su.UserName)
return nil
}
return su.Create()
}
func (su *SystemUser) Create() error {
// Manage System Group
grp := SystemGroup{GroupName: su.Group}
err := grp.Manage()
if err != nil {
return err
}
_, stdErr, err := utils.RunSystemCommand("useradd", "-b", su.Home, "-m", "-N", "-g", su.Group, su.UserName)
if err != nil {
return fmt.Errorf("User %s creation failed with error: %s %v", su.UserName, stdErr, err)
}
return nil
}
func (su *SystemUser) Delete() error {
_, _, err := utils.RunSystemCommand("userdel", su.UserName)
if err != nil {
return err
}
return nil
}
func (su *SystemUser) Update() error {
return nil
}

View File

@ -1,42 +0,0 @@
package utils
import (
"bytes"
"fmt"
"os/exec"
"github.com/hashicorp/hcl/v2"
)
var DryRun = false
func CheckErr(e error) {
if e != nil {
panic(e)
}
}
func CheckDiags(diag hcl.Diagnostics) {
if diag.HasErrors() {
panic(diag.Error())
}
}
// Execute a system command ...
func RunSystemCommand(name string, arg ...string) ([]byte, []byte, error) {
if DryRun {
stdOut := []byte(fmt.Sprintf("CMD %s\n", name))
stdErr := []byte("STDERR\n")
return stdOut, stdErr, nil
} else {
var stdOut bytes.Buffer
var stdErr bytes.Buffer
cmd := exec.Command(name, arg...)
cmd.Stderr = &stdErr
cmd.Stdout = &stdOut
err := cmd.Run()
return stdOut.Bytes(), stdErr.Bytes(), err
}
}

View File

@ -1,27 +0,0 @@
package utils
import (
"fmt"
ini "gopkg.in/ini.v1"
)
var osReleaseFile = "/etc/os-release"
func ReadOSRelease() (map[string]string, error) {
cfg, err := ini.Load(osReleaseFile)
if err != nil {
return nil, fmt.Errorf("Fail to read file: %v ", err)
}
ConfigParams := make(map[string]string)
ConfigParams["ID"] = cfg.Section("").Key("ID").String()
idLike := cfg.Section("").Key("ID_LIKE").String()
if idLike != "" {
ConfigParams["ID_LIKE"] = idLike
} else {
ConfigParams["ID_LIKE"] = ConfigParams["ID"]
}
return ConfigParams, nil
}

View File

@ -1,89 +0,0 @@
package utils
import (
"bytes"
"encoding/json"
"fmt"
"os"
"text/template"
"github.com/hashicorp/hcl/v2"
"github.com/hashicorp/hcl/v2/hclsyntax"
"github.com/zclconf/go-cty/cty"
ctyjson "github.com/zclconf/go-cty/cty/json"
)
// The actual template processing for Go templates
func ProcessGoTemplate(file string, configValues []byte) (string, error) {
// The JSON configuration
var confData map[string]interface{}
var res bytes.Buffer
err := json.Unmarshal(configValues, &confData)
CheckErr(err)
// Read the template
templateData, err := os.ReadFile(file)
CheckErr(err)
tpl, err := template.New("conf").Parse(string(templateData))
CheckErr(err)
CheckErr(tpl.Execute(&res, confData))
return res.String(), nil
}
// The actual template processing for HCL templates
func ProcessHCLTemplate(file string, config []byte) (string, error) {
fct, err := os.ReadFile(file)
CheckErr(err)
expr, diags := hclsyntax.ParseTemplate(fct, file, hcl.Pos{Line: 0, Column: 1})
CheckDiags(diags)
// Retrieve values from JSON
var varsVal cty.Value
ctyType, err := ctyjson.ImpliedType(config)
if err != nil {
return "", err
/* Maybe one day
cexpr, diags := hclsyntax.ParseExpression(config, "", hcl.Pos{Line: 0, Column: 1})
if diags.HasErrors() {
panic(diags.Error())
}
varsVal, diags = cexpr.Value(&hcl.EvalContext{})
fmt.Println(cexpr.Variables())
checkDiags(diags)
*/
} else {
varsVal, err = ctyjson.Unmarshal(config, ctyType)
CheckErr(err)
}
ctx := &hcl.EvalContext{
Variables: varsVal.AsValueMap(),
}
for n := range ctx.Variables {
if !hclsyntax.ValidIdentifier(n) {
return "", fmt.Errorf("invalid template variable name %q: must start with a letter, followed by zero or more letters, digits, and underscores", n)
}
}
for _, traversal := range expr.Variables() {
root := traversal.RootName()
if _, ok := ctx.Variables[root]; !ok {
return "", fmt.Errorf("vars map does not contain key %q, referenced at %s", root, traversal[0].SourceRange())
}
}
val, diags := expr.Value(ctx)
if diags.HasErrors() {
return "", diags
}
return val.AsString(), nil
}

View File

@ -1,34 +0,0 @@
package utils
import (
"io/ioutil"
"testing"
)
func TestProcessHCLTemplate(t *testing.T) {
// load the Full configuration from a file
values, err := ioutil.ReadFile("../../data/config/go-test-conf.json")
if err != nil {
t.Error(err)
}
data, err := ProcessHCLTemplate("../../data/templates/go-test-hcl.pktpl.hcl", values)
if err != nil {
t.Errorf(err.Error())
}
t.Logf("%s", data)
}
func TestProcessGoTemplate(t *testing.T) {
// load values from testing json file
values, err := ioutil.ReadFile("../../data/config/go-test-conf.json")
if err != nil {
t.Error(err)
}
data, err := ProcessGoTemplate("../../data/templates/go-test-go.tpl", values)
if err != nil {
t.Error(err)
}
t.Logf("%s", data)
}