Compare commits
4 Commits
v2024.6.28
...
v2024.9.23
Author | SHA1 | Date | |
---|---|---|---|
f37425018b | |||
4801974ca3 | |||
bf15732935 | |||
8317ac5b9a |
7
Makefile
7
Makefile
@ -17,7 +17,8 @@ GOTEST_ARGS ?= -short
|
||||
OPENWRT_DEVICE ?= 192.168.1.1
|
||||
|
||||
SIEGE_URLS_FILE ?= misc/siege/urls.txt
|
||||
SIEGE_CONCURRENCY ?= 100
|
||||
SIEGE_CONCURRENCY ?= 50
|
||||
SIEGE_DURATION ?= 1M
|
||||
|
||||
data/bootstrap.d/dummy.yml:
|
||||
mkdir -p data/bootstrap.d
|
||||
@ -114,7 +115,7 @@ grafterm: tools/grafterm/bin/grafterm
|
||||
siege:
|
||||
$(eval TMP := $(shell mktemp))
|
||||
cat $(SIEGE_URLS_FILE) | envsubst > $(TMP)
|
||||
siege -i -b -c $(SIEGE_CONCURRENCY) -f $(TMP)
|
||||
siege -R ./misc/siege/siege.conf -i -b -c $(SIEGE_CONCURRENCY) -t $(SIEGE_DURATION) -f $(TMP)
|
||||
rm -rf $(TMP)
|
||||
|
||||
tools/gitea-release/bin/gitea-release.sh:
|
||||
@ -150,7 +151,7 @@ run-redis:
|
||||
-v $(PWD)/data/redis:/data \
|
||||
-p 6379:6379 \
|
||||
redis:alpine3.17 \
|
||||
redis-server --save 60 1 --loglevel warning
|
||||
redis-server --save 60 1 --loglevel debug
|
||||
|
||||
redis-shell:
|
||||
docker exec -it \
|
||||
|
@ -24,6 +24,7 @@
|
||||
- [(FR) - Ajouter une authentification OpenID Connect](./fr/tutorials/add-oidc-authn-layer.md)
|
||||
- [(FR) - Amorçage d'un serveur Bouncer via la configuration](./fr/tutorials/bootstrapping.md)
|
||||
- [(FR) - Intégration avec Kubernetes](./fr/tutorials/kubernetes-integration.md)
|
||||
- [(FR) - Profilage](./fr/tutorials/profiling.md)
|
||||
|
||||
### Développement
|
||||
|
||||
|
@ -1,10 +1,24 @@
|
||||
# Étudier les performances de Bouncer
|
||||
|
||||
## In situ
|
||||
|
||||
Il est possible d'activer via la configuration de Bouncer de endpoints capable de générer des fichiers de profil au format [`pprof`](https://github.com/google/pprof). Par défaut, le point d'entrée est `.bouncer/profiling` (l'activation et la personnalisation de ce point d'entrée sont modifiables via la [configuration](../../../misc/packaging/common/config.yml)).
|
||||
|
||||
**Exemple:** Visualiser l'utilisation mémoire de Bouncer
|
||||
|
||||
```bash
|
||||
go tool pprof -web http://<bouncer_proxy>/.bouncer/profiling/heap
|
||||
```
|
||||
|
||||
L'ensemble des profils disponibles sont visibles à l'adresse `http://<bouncer_proxy>/.bouncer/profiling`.
|
||||
|
||||
## En développement
|
||||
|
||||
Le package `./internal` est dédié à l'étude des performances de Bouncer. Il contient une suite de benchmarks simulant de proxies avec différentes configurations de layers afin d'évaluer les points d'engorgement sur le traitement des requêtes.
|
||||
|
||||
Voir le répertoire `./internal/bench/testdata/proxies` pour voir les différentes configurations de cas.
|
||||
|
||||
## Lancer les benchmarks
|
||||
### Lancer les benchmarks
|
||||
|
||||
Le plus simple est d'utiliser la commande `make bench` qui exécutera séquentiellement tous les benchmarks. Il est également possible de lancer un benchmark spécifique via la commande suivante:
|
||||
|
||||
@ -19,7 +33,7 @@ Par exemple:
|
||||
go test -bench='BenchmarkProxies/basic-auth' -run='^$' ./internal/bench
|
||||
```
|
||||
|
||||
## Visualiser les profils d'exécution
|
||||
### Visualiser les profils d'exécution
|
||||
|
||||
Vous pouvez visualiser les profils d'exécution via la commande suivante:
|
||||
|
||||
@ -35,7 +49,7 @@ Par exemple:
|
||||
go tool pprof -web ./internal/bench/testdata/proxies/basic-auth.prof
|
||||
```
|
||||
|
||||
## Comparer les évolutions
|
||||
### Comparer les évolutions
|
||||
|
||||
```bash
|
||||
# Lancer un premier benchmark
|
||||
|
@ -27,7 +27,7 @@ func (s *Server) initRepositories(ctx context.Context) error {
|
||||
}
|
||||
|
||||
func (s *Server) initRedisClient(ctx context.Context) error {
|
||||
client := setup.NewRedisClient(ctx, s.redisConfig)
|
||||
client := setup.NewSharedClient(s.redisConfig)
|
||||
|
||||
s.redisClient = client
|
||||
|
||||
|
@ -6,6 +6,7 @@ import (
|
||||
"log"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/http/pprof"
|
||||
|
||||
"forge.cadoles.com/cadoles/bouncer/internal/auth"
|
||||
"forge.cadoles.com/cadoles/bouncer/internal/auth/jwt"
|
||||
@ -155,6 +156,34 @@ func (s *Server) run(parentCtx context.Context, addrs chan net.Addr, errs chan e
|
||||
})
|
||||
}
|
||||
|
||||
if s.serverConfig.Profiling.Enabled {
|
||||
profiling := s.serverConfig.Profiling
|
||||
logger.Info(ctx, "enabling profiling", logger.F("endpoint", profiling.Endpoint))
|
||||
|
||||
router.Group(func(r chi.Router) {
|
||||
if profiling.BasicAuth != nil {
|
||||
logger.Info(ctx, "enabling authentication on metrics endpoint")
|
||||
|
||||
r.Use(middleware.BasicAuth(
|
||||
"profiling",
|
||||
profiling.BasicAuth.CredentialsMap(),
|
||||
))
|
||||
}
|
||||
|
||||
r.Route(string(profiling.Endpoint), func(r chi.Router) {
|
||||
r.HandleFunc("/", pprof.Index)
|
||||
r.HandleFunc("/cmdline", pprof.Cmdline)
|
||||
r.HandleFunc("/profile", pprof.Profile)
|
||||
r.HandleFunc("/symbol", pprof.Symbol)
|
||||
r.HandleFunc("/trace", pprof.Trace)
|
||||
r.HandleFunc("/{name}", func(w http.ResponseWriter, r *http.Request) {
|
||||
name := chi.URLParam(r, "name")
|
||||
pprof.Handler(name).ServeHTTP(w, r)
|
||||
})
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
router.Route("/api/v1", func(r chi.Router) {
|
||||
r.Group(func(r chi.Router) {
|
||||
r.Use(auth.Middleware(
|
||||
|
@ -35,12 +35,15 @@ func RunCommand() *cli.Command {
|
||||
logger.SetLevel(logger.Level(conf.Logger.Level))
|
||||
|
||||
projectVersion := ctx.String("projectVersion")
|
||||
flushSentry, err := setup.SetupSentry(ctx.Context, conf.Admin.Sentry, projectVersion)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not initialize sentry client")
|
||||
}
|
||||
|
||||
defer flushSentry()
|
||||
if conf.Proxy.Sentry.DSN != "" {
|
||||
flushSentry, err := setup.SetupSentry(ctx.Context, conf.Proxy.Sentry, projectVersion)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not initialize sentry client")
|
||||
}
|
||||
|
||||
defer flushSentry()
|
||||
}
|
||||
|
||||
integrations, err := setup.SetupIntegrations(ctx.Context, conf)
|
||||
if err != nil {
|
||||
|
@ -30,12 +30,15 @@ func RunCommand() *cli.Command {
|
||||
logger.SetLevel(logger.Level(conf.Logger.Level))
|
||||
|
||||
projectVersion := ctx.String("projectVersion")
|
||||
flushSentry, err := setup.SetupSentry(ctx.Context, conf.Proxy.Sentry, projectVersion)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not initialize sentry client")
|
||||
}
|
||||
|
||||
defer flushSentry()
|
||||
if conf.Proxy.Sentry.DSN != "" {
|
||||
flushSentry, err := setup.SetupSentry(ctx.Context, conf.Proxy.Sentry, projectVersion)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not initialize sentry client")
|
||||
}
|
||||
|
||||
defer flushSentry()
|
||||
}
|
||||
|
||||
layers, err := setup.GetLayers(ctx.Context, conf)
|
||||
if err != nil {
|
||||
|
@ -1,20 +1,22 @@
|
||||
package config
|
||||
|
||||
type AdminServerConfig struct {
|
||||
HTTP HTTPConfig `yaml:"http"`
|
||||
CORS CORSConfig `yaml:"cors"`
|
||||
Auth AuthConfig `yaml:"auth"`
|
||||
Metrics MetricsConfig `yaml:"metrics"`
|
||||
Sentry SentryConfig `yaml:"sentry"`
|
||||
HTTP HTTPConfig `yaml:"http"`
|
||||
CORS CORSConfig `yaml:"cors"`
|
||||
Auth AuthConfig `yaml:"auth"`
|
||||
Metrics MetricsConfig `yaml:"metrics"`
|
||||
Profiling ProfilingConfig `yaml:"profiling"`
|
||||
Sentry SentryConfig `yaml:"sentry"`
|
||||
}
|
||||
|
||||
func NewDefaultAdminServerConfig() AdminServerConfig {
|
||||
return AdminServerConfig{
|
||||
HTTP: NewHTTPConfig("127.0.0.1", 8081),
|
||||
CORS: NewDefaultCORSConfig(),
|
||||
Auth: NewDefaultAuthConfig(),
|
||||
Metrics: NewDefaultMetricsConfig(),
|
||||
Sentry: NewDefaultSentryConfig(),
|
||||
HTTP: NewHTTPConfig("127.0.0.1", 8081),
|
||||
CORS: NewDefaultCORSConfig(),
|
||||
Auth: NewDefaultAuthConfig(),
|
||||
Metrics: NewDefaultMetricsConfig(),
|
||||
Sentry: NewDefaultSentryConfig(),
|
||||
Profiling: NewDefaultProfilingConfig(),
|
||||
}
|
||||
}
|
||||
|
||||
|
15
internal/config/profiling.go
Normal file
15
internal/config/profiling.go
Normal file
@ -0,0 +1,15 @@
|
||||
package config
|
||||
|
||||
type ProfilingConfig struct {
|
||||
Enabled InterpolatedBool `yaml:"enabled"`
|
||||
Endpoint InterpolatedString `yaml:"endpoint"`
|
||||
BasicAuth *BasicAuthConfig `yaml:"basicAuth"`
|
||||
}
|
||||
|
||||
func NewDefaultProfilingConfig() ProfilingConfig {
|
||||
return ProfilingConfig{
|
||||
Enabled: true,
|
||||
Endpoint: "/.bouncer/profiling",
|
||||
BasicAuth: nil,
|
||||
}
|
||||
}
|
@ -10,6 +10,7 @@ type ProxyServerConfig struct {
|
||||
Debug InterpolatedBool `yaml:"debug"`
|
||||
HTTP HTTPConfig `yaml:"http"`
|
||||
Metrics MetricsConfig `yaml:"metrics"`
|
||||
Profiling ProfilingConfig `yaml:"profiling"`
|
||||
Transport TransportConfig `yaml:"transport"`
|
||||
Dial DialConfig `yaml:"dial"`
|
||||
Sentry SentryConfig `yaml:"sentry"`
|
||||
@ -27,6 +28,7 @@ func NewDefaultProxyServerConfig() ProxyServerConfig {
|
||||
Sentry: NewDefaultSentryConfig(),
|
||||
Cache: NewDefaultCacheConfig(),
|
||||
Templates: NewDefaultTemplatesConfig(),
|
||||
Profiling: NewDefaultProfilingConfig(),
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -15,6 +15,8 @@ type RedisConfig struct {
|
||||
WriteTimeout InterpolatedDuration `yaml:"writeTimeout"`
|
||||
DialTimeout InterpolatedDuration `yaml:"dialTimeout"`
|
||||
LockMaxRetries InterpolatedInt `yaml:"lockMaxRetries"`
|
||||
MaxRetries InterpolatedInt `yaml:"maxRetries"`
|
||||
PingInterval InterpolatedDuration `yaml:"pingInterval"`
|
||||
}
|
||||
|
||||
func NewDefaultRedisConfig() RedisConfig {
|
||||
@ -25,5 +27,7 @@ func NewDefaultRedisConfig() RedisConfig {
|
||||
WriteTimeout: InterpolatedDuration(30 * time.Second),
|
||||
DialTimeout: InterpolatedDuration(30 * time.Second),
|
||||
LockMaxRetries: 10,
|
||||
MaxRetries: 3,
|
||||
PingInterval: InterpolatedDuration(30 * time.Second),
|
||||
}
|
||||
}
|
||||
|
@ -28,10 +28,10 @@ func NewDefaultSentryConfig() SentryConfig {
|
||||
Debug: false,
|
||||
FlushTimeout: NewInterpolatedDuration(2 * time.Second),
|
||||
AttachStacktrace: true,
|
||||
SampleRate: 1,
|
||||
SampleRate: 0.2,
|
||||
EnableTracing: true,
|
||||
TracesSampleRate: 0.2,
|
||||
ProfilesSampleRate: 1,
|
||||
ProfilesSampleRate: 0.2,
|
||||
IgnoreErrors: []string{},
|
||||
SendDefaultPII: false,
|
||||
ServerName: "",
|
||||
|
@ -65,7 +65,7 @@ func (q *Queue) Middleware(layer *store.Layer) proxy.Middleware {
|
||||
return
|
||||
}
|
||||
|
||||
defer q.updateMetrics(ctx, layer.Proxy, layer.Name, options)
|
||||
defer q.updateMetrics(layer.Proxy, layer.Name, options)
|
||||
|
||||
cookieName := q.getCookieName(layer.Name)
|
||||
|
||||
@ -217,7 +217,9 @@ func (q *Queue) refreshQueue(ctx context.Context, layerName store.LayerName, kee
|
||||
}
|
||||
}
|
||||
|
||||
func (q *Queue) updateMetrics(ctx context.Context, proxyName store.ProxyName, layerName store.LayerName, options *LayerOptions) {
|
||||
func (q *Queue) updateMetrics(proxyName store.ProxyName, layerName store.LayerName, options *LayerOptions) {
|
||||
ctx := context.Background()
|
||||
|
||||
// Update queue capacity metric
|
||||
metricQueueCapacity.With(
|
||||
prometheus.Labels{
|
||||
|
@ -9,7 +9,7 @@ import (
|
||||
)
|
||||
|
||||
func (s *Server) initRepositories(ctx context.Context) error {
|
||||
client := setup.NewRedisClient(ctx, s.redisConfig)
|
||||
client := setup.NewSharedClient(s.redisConfig)
|
||||
|
||||
if err := s.initProxyRepository(ctx, client); err != nil {
|
||||
return errors.WithStack(err)
|
||||
|
@ -8,6 +8,7 @@ import (
|
||||
"net"
|
||||
"net/http"
|
||||
"net/http/httputil"
|
||||
"net/http/pprof"
|
||||
"net/url"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
@ -146,6 +147,34 @@ func (s *Server) run(parentCtx context.Context, addrs chan net.Addr, errs chan e
|
||||
})
|
||||
}
|
||||
|
||||
if s.serverConfig.Profiling.Enabled {
|
||||
profiling := s.serverConfig.Profiling
|
||||
logger.Info(ctx, "enabling profiling", logger.F("endpoint", profiling.Endpoint))
|
||||
|
||||
router.Group(func(r chi.Router) {
|
||||
if profiling.BasicAuth != nil {
|
||||
logger.Info(ctx, "enabling authentication on metrics endpoint")
|
||||
|
||||
r.Use(middleware.BasicAuth(
|
||||
"profiling",
|
||||
profiling.BasicAuth.CredentialsMap(),
|
||||
))
|
||||
}
|
||||
|
||||
r.Route(string(profiling.Endpoint), func(r chi.Router) {
|
||||
r.HandleFunc("/", pprof.Index)
|
||||
r.HandleFunc("/cmdline", pprof.Cmdline)
|
||||
r.HandleFunc("/profile", pprof.Profile)
|
||||
r.HandleFunc("/symbol", pprof.Symbol)
|
||||
r.HandleFunc("/trace", pprof.Trace)
|
||||
r.HandleFunc("/{name}", func(w http.ResponseWriter, r *http.Request) {
|
||||
name := chi.URLParam(r, "name")
|
||||
pprof.Handler(name).ServeHTTP(w, r)
|
||||
})
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
router.Group(func(r chi.Router) {
|
||||
r.Use(director.Middleware())
|
||||
|
||||
|
@ -23,7 +23,7 @@ func init() {
|
||||
}
|
||||
|
||||
func setupAuthnOIDCLayer(conf *config.Config) (director.Layer, error) {
|
||||
rdb := newRedisClient(conf.Redis)
|
||||
rdb := NewSharedClient(conf.Redis)
|
||||
adapter := redis.NewStoreAdapter(rdb)
|
||||
store := session.NewStore(adapter)
|
||||
|
||||
|
@ -27,7 +27,7 @@ func SetupIntegrations(ctx context.Context, conf *config.Config) ([]integration.
|
||||
}
|
||||
|
||||
func setupKubernetesIntegration(ctx context.Context, conf *config.Config) (*kubernetes.Integration, error) {
|
||||
client := newRedisClient(conf.Redis)
|
||||
client := NewSharedClient(conf.Redis)
|
||||
locker := redis.NewLocker(client, 10)
|
||||
|
||||
integration := kubernetes.NewIntegration(
|
||||
|
@ -9,7 +9,7 @@ import (
|
||||
)
|
||||
|
||||
func SetupLocker(ctx context.Context, conf *config.Config) (lock.Locker, error) {
|
||||
client := newRedisClient(conf.Redis)
|
||||
client := NewSharedClient(conf.Redis)
|
||||
locker := redis.NewLocker(client, int(conf.Redis.LockMaxRetries))
|
||||
return locker, nil
|
||||
}
|
||||
|
@ -3,19 +3,11 @@ package setup
|
||||
import (
|
||||
"context"
|
||||
|
||||
"forge.cadoles.com/cadoles/bouncer/internal/config"
|
||||
"forge.cadoles.com/cadoles/bouncer/internal/store"
|
||||
redisStore "forge.cadoles.com/cadoles/bouncer/internal/store/redis"
|
||||
"github.com/redis/go-redis/v9"
|
||||
)
|
||||
|
||||
func NewRedisClient(ctx context.Context, conf config.RedisConfig) redis.UniversalClient {
|
||||
return redis.NewUniversalClient(&redis.UniversalOptions{
|
||||
Addrs: conf.Adresses,
|
||||
MasterName: string(conf.Master),
|
||||
})
|
||||
}
|
||||
|
||||
func NewProxyRepository(ctx context.Context, client redis.UniversalClient) (store.ProxyRepository, error) {
|
||||
return redisStore.NewProxyRepository(client, redisStore.DefaultTxMaxAttempts, redisStore.DefaultTxBaseDelay), nil
|
||||
}
|
||||
|
@ -35,6 +35,6 @@ func setupQueueLayer(conf *config.Config) (director.Layer, error) {
|
||||
}
|
||||
|
||||
func newQueueAdapter(redisConf config.RedisConfig) (queue.Adapter, error) {
|
||||
rdb := newRedisClient(redisConf)
|
||||
rdb := NewSharedClient(redisConf)
|
||||
return queueRedis.NewAdapter(rdb, 2), nil
|
||||
}
|
||||
|
@ -1,14 +1,38 @@
|
||||
package setup
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"forge.cadoles.com/cadoles/bouncer/internal/config"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/redis/go-redis/v9"
|
||||
"gitlab.com/wpetit/goweb/logger"
|
||||
)
|
||||
|
||||
var clients sync.Map
|
||||
|
||||
func NewSharedClient(conf config.RedisConfig) redis.UniversalClient {
|
||||
key := strings.Join(conf.Adresses, "|") + "|" + string(conf.Master)
|
||||
|
||||
value, exists := clients.Load(key)
|
||||
if exists {
|
||||
if client, ok := (value).(redis.UniversalClient); ok {
|
||||
return client
|
||||
}
|
||||
}
|
||||
|
||||
client := newRedisClient(conf)
|
||||
|
||||
clients.Store(key, client)
|
||||
|
||||
return client
|
||||
}
|
||||
|
||||
func newRedisClient(conf config.RedisConfig) redis.UniversalClient {
|
||||
return redis.NewUniversalClient(&redis.UniversalOptions{
|
||||
client := redis.NewUniversalClient(&redis.UniversalOptions{
|
||||
Addrs: conf.Adresses,
|
||||
MasterName: string(conf.Master),
|
||||
ReadTimeout: time.Duration(conf.ReadTimeout),
|
||||
@ -16,5 +40,33 @@ func newRedisClient(conf config.RedisConfig) redis.UniversalClient {
|
||||
DialTimeout: time.Duration(conf.DialTimeout),
|
||||
RouteByLatency: true,
|
||||
ContextTimeoutEnabled: true,
|
||||
MaxRetries: int(conf.MaxRetries),
|
||||
})
|
||||
|
||||
go func() {
|
||||
ctx := logger.With(context.Background(),
|
||||
logger.F("adresses", conf.Adresses),
|
||||
logger.F("master", conf.Master),
|
||||
)
|
||||
|
||||
timer := time.NewTicker(time.Duration(conf.PingInterval))
|
||||
defer timer.Stop()
|
||||
|
||||
connected := true
|
||||
|
||||
for range timer.C {
|
||||
if _, err := client.Ping(ctx).Result(); err != nil {
|
||||
logger.Error(ctx, "redis disconnected", logger.E(errors.WithStack(err)))
|
||||
connected = false
|
||||
continue
|
||||
}
|
||||
|
||||
if !connected {
|
||||
logger.Info(ctx, "redis reconnected")
|
||||
connected = true
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
return client
|
||||
}
|
||||
|
@ -91,12 +91,14 @@ func WithRetry(ctx context.Context, client redis.UniversalClient, key string, fn
|
||||
continue
|
||||
}
|
||||
|
||||
return err
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
logger.Error(ctx, "redis error", logger.E(errors.WithStack(err)))
|
||||
|
||||
return errors.WithStack(redis.TxFailedErr)
|
||||
}
|
||||
|
||||
|
@ -49,6 +49,19 @@ admin:
|
||||
# Mettre à null pour désactiver l'authentification
|
||||
basicAuth: null
|
||||
|
||||
# Profiling
|
||||
profiling:
|
||||
# Activer ou désactiver les endpoints de profiling
|
||||
enabled: true
|
||||
# Route de publication des endpoints de profiling
|
||||
endpoint: /.bouncer/profiling
|
||||
# Authentification "basic auth" sur les endpoints
|
||||
# de profiling
|
||||
# Mettre à null pour désactiver l'authentification
|
||||
basicAuth:
|
||||
credentials:
|
||||
prof: iling
|
||||
|
||||
# Configuration de l'intégration Sentry
|
||||
# Voir https://pkg.go.dev/github.com/getsentry/sentry-go?utm_source=godoc#ClientOptions
|
||||
sentry:
|
||||
@ -59,7 +72,7 @@ admin:
|
||||
sampleRate: 1
|
||||
enableTracing: true
|
||||
tracesSampleRate: 0.2
|
||||
profilesSampleRate: 1
|
||||
profilesSampleRate: 0.2
|
||||
ignoreErrors: []
|
||||
sendDefaultPII: false
|
||||
serverName: ""
|
||||
@ -99,6 +112,19 @@ proxy:
|
||||
credentials:
|
||||
prom: etheus
|
||||
|
||||
# Profiling
|
||||
profiling:
|
||||
# Activer ou désactiver les endpoints de profiling
|
||||
enabled: true
|
||||
# Route de publication des endpoints de profiling
|
||||
endpoint: /.bouncer/profiling
|
||||
# Authentification "basic auth" sur les endpoints
|
||||
# de profiling
|
||||
# Mettre à null pour désactiver l'authentification
|
||||
basicAuth:
|
||||
credentials:
|
||||
prof: iling
|
||||
|
||||
# Configuration de la mise en cache
|
||||
# locale des données proxy/layers
|
||||
cache:
|
||||
@ -164,6 +190,8 @@ redis:
|
||||
writeTimeout: 30s
|
||||
readTimeout: 30s
|
||||
dialTimeout: 30s
|
||||
maxRetries: 3
|
||||
pingInterval: 30s
|
||||
|
||||
# Configuration des logs
|
||||
logger:
|
||||
|
79
misc/siege/siege.conf
Normal file
79
misc/siege/siege.conf
Normal file
@ -0,0 +1,79 @@
|
||||
# Updated by Siege %_VERSION%, %_DATE%
|
||||
# Copyright 2000-2016 by %_AUTHOR%
|
||||
#
|
||||
# Siege configuration file -- edit as necessary
|
||||
# For more information about configuring and running this program,
|
||||
# visit: http://www.joedog.org/
|
||||
|
||||
#
|
||||
#
|
||||
# Verbose mode: With this feature enabled, siege will print the
|
||||
# result of each transaction to stdout. (Enabled by default)
|
||||
#
|
||||
# ex: verbose = true|false
|
||||
#
|
||||
verbose = true
|
||||
|
||||
#
|
||||
# Color mode: This option works in conjunction with verbose mode.
|
||||
# It tells siege whether or not it should display its output in
|
||||
# color-coded output. (Enabled by default)
|
||||
#
|
||||
# ex: color = on | off
|
||||
#
|
||||
color = on
|
||||
|
||||
#
|
||||
# Cache revalidation. Siege supports cache revalidation for both ETag
|
||||
# and Last-modified headers. If a copy is still fresh, the server
|
||||
# responds with 304. While this feature is required for HTTP/1.1, it
|
||||
# may not be welcomed for load testing. We allow you to breach the
|
||||
# protocol and turn off caching
|
||||
#
|
||||
# HTTP/1.1 200 0.00 secs: 2326 bytes ==> /apache_pb.gif
|
||||
# HTTP/1.1 304 0.00 secs: 0 bytes ==> /apache_pb.gif
|
||||
# HTTP/1.1 304 0.00 secs: 0 bytes ==> /apache_pb.gif
|
||||
#
|
||||
# Siege also supports Cache-control headers. Consider this server
|
||||
# response: Cache-Control: max-age=3
|
||||
# That tells siege to cache the file for three seconds. While it
|
||||
# doesn't actually store the file, it will logically grab it from
|
||||
# its cache. In verbose output, it designates a cached resource
|
||||
# with (c):
|
||||
#
|
||||
# HTTP/1.1 200 0.25 secs: 159 bytes ==> GET /expires/
|
||||
# HTTP/1.1 200 1.48 secs: 498419 bytes ==> GET /expires/Otter_in_Southwold.jpg
|
||||
# HTTP/1.1 200 0.24 secs: 159 bytes ==> GET /expires/
|
||||
# HTTP/1.1 200(C) 0.00 secs: 0 bytes ==> GET /expires/Otter_in_Southwold.jpg
|
||||
#
|
||||
# NOTE: with color enabled, cached URLs appear in green
|
||||
#
|
||||
# ex: cache = true
|
||||
#
|
||||
cache = true
|
||||
|
||||
#
|
||||
# Cookie support: by default siege accepts cookies. This directive is
|
||||
# available to disable that support. Set cookies to 'false' to refuse
|
||||
# cookies. Set it to 'true' to accept them. The default value is true.
|
||||
# If you want to maintain state with the server, then this MUST be set
|
||||
# to true.
|
||||
#
|
||||
# ex: cookies = false
|
||||
#
|
||||
cookies = true
|
||||
|
||||
#
|
||||
# Failures: This is the number of total connection failures allowed
|
||||
# before siege aborts. Connection failures (timeouts, socket failures,
|
||||
# etc.) are combined with 400 and 500 level errors in the final stats,
|
||||
# but those errors do not count against the abort total. If you set
|
||||
# this total to 10, then siege will abort after ten socket timeouts,
|
||||
# but it will NOT abort after ten 404s. This is designed to prevent a
|
||||
# run-away mess on an unattended siege.
|
||||
#
|
||||
# The default value is 1024
|
||||
#
|
||||
# ex: failures = 50
|
||||
#
|
||||
failures = -1
|
Reference in New Issue
Block a user