Compare commits

...

5 Commits

Author SHA1 Message Date
4801974ca3 fix(queue): prevent metrics update cancellation on aborted http requests (#39)
All checks were successful
Cadoles/bouncer/pipeline/head This commit looks good
2024-09-23 10:34:24 +02:00
bf15732935 feat: disable sentry integration when no dsn is defined
All checks were successful
Cadoles/bouncer/pipeline/head This commit looks good
2024-09-23 10:13:04 +02:00
8317ac5b9a feat: add configurable profiling endpoints (#38) 2024-09-23 10:12:42 +02:00
f35384c0f3 feat: create profiling package + rewrite profiling tutorial
Some checks reported warnings
Cadoles/bouncer/pipeline/head This commit was not built
2024-06-28 17:44:51 +02:00
c73fe8cca5 feat(rewriter): pass structured url to ease request rewriting
All checks were successful
Cadoles/bouncer/pipeline/head This commit looks good
2024-06-28 10:46:38 +02:00
26 changed files with 680 additions and 238 deletions

2
.gitignore vendored
View File

@ -10,4 +10,4 @@
/out
.dockerconfigjson
*.prof
proxy.test
*.test

View File

@ -131,7 +131,7 @@ tools/grafterm/bin/grafterm:
GOBIN=$(PWD)/tools/grafterm/bin go install github.com/slok/grafterm/cmd/grafterm@v0.2.0
bench:
go test -bench=. -run '^$$' -count=10 ./...
go test -bench=. -run '^$$' ./internal/bench
tools/benchstat/bin/benchstat:
mkdir -p tools/benchstat/bin

View File

@ -24,6 +24,7 @@
- [(FR) - Ajouter une authentification OpenID Connect](./fr/tutorials/add-oidc-authn-layer.md)
- [(FR) - Amorçage d'un serveur Bouncer via la configuration](./fr/tutorials/bootstrapping.md)
- [(FR) - Intégration avec Kubernetes](./fr/tutorials/kubernetes-integration.md)
- [(FR) - Profilage](./fr/tutorials/profiling.md)
### Développement

View File

@ -66,7 +66,21 @@ La requête en cours de traitement.
{
method: "string", // Méthode HTTP
host: "string", // Nom d'hôte (`Host`) associé à la requête
url: "string", // URL associée à la requête
url: { // URL associée à la requête sous sa forme structurée
"scheme": "string", // Schéma HTTP de l'URL
"opaque": "string", // Données opaque de l'URL
"user": { // Identifiants d'URL (Basic Auth)
"username": "",
"password": ""
},
"host": "string", // Nom d'hôte (<domaine>:<port>) de l'URL
"path": "string", // Chemin de l'URL (format assaini)
"rawPath": "string", // Chemin de l'URL (format brut)
"rawQuery": "string", // Variables d'URL (format brut)
"fragment" : "string", // Fragment d'URL (format assaini)
"rawFragment" : "string" // Fragment d'URL (format brut)
},
rawUrl: "string", // URL associée à la requête (format assaini)
proto: "string", // Numéro de version du protocole utilisé
protoMajor: "int", // Numéro de version majeure du protocole utilisé
protoMinor: "int", // Numéro de version mineur du protocole utilisé

View File

@ -1,31 +1,68 @@
# Étudier les performances de Bouncer
1. Lancer un benchmark du proxy
## In situ
```shell
go test -bench=. -run '^$' -count=5 -cpuprofile bench_proxy.prof ./internal/proxy
```
Il est possible d'activer via la configuration de Bouncer de endpoints capable de générer des fichiers de profil au format [`pprof`](https://github.com/google/pprof). Par défaut, le point d'entrée est `.bouncer/profiling` (l'activation et la personnalisation de ce point d'entrée sont modifiables via la [configuration](../../../misc/packaging/common/config.yml)).
2. Visualiser les temps d'exécution
**Exemple:** Visualiser l'utilisation mémoire de Bouncer
```shell
go tool pprof -web bench_proxy.prof
```
```bash
go tool pprof -web http://<bouncer_proxy>/.bouncer/profiling/heap
```
3. Comparer les performances d'une exécution à l'autre
L'ensemble des profils disponibles sont visibles à l'adresse `http://<bouncer_proxy>/.bouncer/profiling`.
```shell
# Lancer un premier benchmark
go test -bench=. -run '^$' -count=10 ./internal/proxy > bench_before.txt
## En développement
# Faire des modifications sur les sources
Le package `./internal` est dédié à l'étude des performances de Bouncer. Il contient une suite de benchmarks simulant de proxies avec différentes configurations de layers afin d'évaluer les points d'engorgement sur le traitement des requêtes.
# Lancer un second benchmark
go test -bench=. -run '^$' -count=10 ./internal/proxy > bench_after.txt
Voir le répertoire `./internal/bench/testdata/proxies` pour voir les différentes configurations de cas.
# Installer l'outil benchstat
make tools/benchstat/bin/benchstat
### Lancer les benchmarks
# Comparer les rapports
tools/benchstat/bin/benchstat bench_before.txt bench_after.txt
```
Le plus simple est d'utiliser la commande `make bench` qui exécutera séquentiellement tous les benchmarks. Il est également possible de lancer un benchmark spécifique via la commande suivante:
```bash
go test -bench="BenchmarkProxies/$BENCH_CASE" -run='^$' ./internal/bench
```
Par exemple:
```bash
# Pour exécuter ./internal/bench/testdata/proxies/basic-auth.yml
go test -bench='BenchmarkProxies/basic-auth' -run='^$' ./internal/bench
```
### Visualiser les profils d'exécution
Vous pouvez visualiser les profils d'exécution via la commande suivante:
```shell
go tool pprof -web path/to/file.prof
```
Par défaut l'exécution des benchmarks créera automatiquement des fichiers de profil dans le répertoire `./internal/bench/testdata/proxies`.
Par exemple:
```shell
go tool pprof -web ./internal/bench/testdata/proxies/basic-auth.prof
```
### Comparer les évolutions
```bash
# Lancer un premier benchmark
go test -bench="BenchmarkProxies/$BENCH_CASE" -run='^$' ./internal/bench
# Faire une sauvegarde du fichier de profil
cp ./internal/bench/testdata/proxies/$BENCH_CASE.prof ./internal/bench/testdata/proxies/$BENCH_CASE-prev.prof
# Faire des modifications sur les sources
# Lancer un second benchmark
go test -bench="BenchmarkProxies/$BENCH_CASE" -run='^$' ./internal/bench
# Visualiser la différence entre les deux profils
go tool pprof -web -base=./internal/bench/testdata/proxies/$BENCH_CASE-prev.prof ./internal/bench/testdata/proxies/$BENCH_CASE.prof
```

View File

@ -6,6 +6,7 @@ import (
"log"
"net"
"net/http"
"net/http/pprof"
"forge.cadoles.com/cadoles/bouncer/internal/auth"
"forge.cadoles.com/cadoles/bouncer/internal/auth/jwt"
@ -155,6 +156,34 @@ func (s *Server) run(parentCtx context.Context, addrs chan net.Addr, errs chan e
})
}
if s.serverConfig.Profiling.Enabled {
profiling := s.serverConfig.Profiling
logger.Info(ctx, "enabling profiling", logger.F("endpoint", profiling.Endpoint))
router.Group(func(r chi.Router) {
if profiling.BasicAuth != nil {
logger.Info(ctx, "enabling authentication on metrics endpoint")
r.Use(middleware.BasicAuth(
"profiling",
profiling.BasicAuth.CredentialsMap(),
))
}
r.Route(string(profiling.Endpoint), func(r chi.Router) {
r.HandleFunc("/", pprof.Index)
r.HandleFunc("/cmdline", pprof.Cmdline)
r.HandleFunc("/profile", pprof.Profile)
r.HandleFunc("/symbol", pprof.Symbol)
r.HandleFunc("/trace", pprof.Trace)
r.HandleFunc("/{name}", func(w http.ResponseWriter, r *http.Request) {
name := chi.URLParam(r, "name")
pprof.Handler(name).ServeHTTP(w, r)
})
})
})
}
router.Route("/api/v1", func(r chi.Router) {
r.Group(func(r chi.Router) {
r.Use(auth.Middleware(

View File

@ -0,0 +1,300 @@
package proxy_test
import (
"context"
"io"
"log"
"net/http"
"net/http/httptest"
"net/http/httputil"
"net/url"
"os"
"path/filepath"
"runtime/pprof"
"strings"
"testing"
"time"
"forge.cadoles.com/Cadoles/go-proxy"
"forge.cadoles.com/cadoles/bouncer/internal/cache/memory"
"forge.cadoles.com/cadoles/bouncer/internal/cache/ttl"
"forge.cadoles.com/cadoles/bouncer/internal/config"
"forge.cadoles.com/cadoles/bouncer/internal/proxy/director"
"forge.cadoles.com/cadoles/bouncer/internal/store"
redisStore "forge.cadoles.com/cadoles/bouncer/internal/store/redis"
"github.com/pkg/errors"
"github.com/redis/go-redis/v9"
"gopkg.in/yaml.v3"
"forge.cadoles.com/cadoles/bouncer/internal/setup"
)
func BenchmarkProxies(b *testing.B) {
proxyFiles, err := filepath.Glob("testdata/proxies/*.yml")
if err != nil {
b.Fatalf("%+v", errors.WithStack(err))
}
for _, f := range proxyFiles {
name := strings.TrimSuffix(filepath.Base(f), filepath.Ext(f))
b.Run(name, func(b *testing.B) {
conf, err := loadProxyBenchConfig(f)
if err != nil {
b.Fatalf("%+v", errors.Wrapf(err, "could notre load bench config"))
}
proxy, backend, err := createProxy(name, conf, b.Logf)
if err != nil {
b.Fatalf("%+v", errors.Wrapf(err, "could not create proxy"))
}
defer proxy.Close()
if backend != nil {
defer backend.Close()
}
client := proxy.Client()
proxyURL, err := url.Parse(proxy.URL)
if err != nil {
b.Fatalf("%+v", errors.Wrapf(err, "could not parse proxy url"))
}
if conf.Fetch.URL.Path != "" {
proxyURL.Path = conf.Fetch.URL.Path
}
if conf.Fetch.URL.RawQuery != "" {
proxyURL.RawQuery = conf.Fetch.URL.RawQuery
}
if conf.Fetch.URL.User.Username != "" || conf.Fetch.URL.User.Password != "" {
proxyURL.User = url.UserPassword(conf.Fetch.URL.User.Username, conf.Fetch.URL.User.Password)
}
rawProxyURL := proxyURL.String()
b.Logf("fetching url '%s'", rawProxyURL)
profile, err := os.Create(filepath.Join("testdata", "proxies", name+".prof"))
if err != nil {
b.Fatalf("%+v", errors.Wrapf(err, "could not create cpu profile"))
}
defer profile.Close()
if err := pprof.StartCPUProfile(profile); err != nil {
log.Fatal(err)
}
defer pprof.StopCPUProfile()
b.ResetTimer()
for i := 0; i < b.N; i++ {
res, err := client.Get(rawProxyURL)
if err != nil {
b.Errorf("could not fetch proxy url: %+v", errors.WithStack(err))
}
body, err := io.ReadAll(res.Body)
if err != nil {
b.Errorf("could not read response body: %+v", errors.WithStack(err))
}
b.Logf("%s \n %v", res.Status, string(body))
if err := res.Body.Close(); err != nil {
b.Errorf("could not close response body: %+v", errors.WithStack(err))
}
}
})
}
}
type proxyBenchConfig struct {
Proxy config.BootstrapProxyConfig `yaml:"proxy"`
Fetch fetchBenchConfig `yaml:"fetch"`
}
type fetchBenchConfig struct {
URL fetchURLBenchConfig `yaml:"url"`
}
type fetchURLBenchConfig struct {
Path string `yaml:"path"`
RawQuery string `yaml:"rawQuery"`
User fetchURLUserBenchConfig `yaml:"user"`
}
type fetchURLUserBenchConfig struct {
Username string `yaml:"username"`
Password string `yaml:"password"`
}
func loadProxyBenchConfig(filename string) (*proxyBenchConfig, error) {
data, err := os.ReadFile(filename)
if err != nil {
return nil, errors.Wrapf(err, "could not read file '%s'", filename)
}
conf := proxyBenchConfig{}
if err := yaml.Unmarshal(data, &conf); err != nil {
return nil, errors.Wrapf(err, "could not unmarshal config")
}
return &conf, nil
}
func createProxy(name string, conf *proxyBenchConfig, logf func(format string, a ...any)) (*httptest.Server, *httptest.Server, error) {
redisEndpoint := os.Getenv("BOUNCER_BENCH_REDIS_ADDR")
if redisEndpoint == "" {
redisEndpoint = "127.0.0.1:6379"
}
client := redis.NewUniversalClient(&redis.UniversalOptions{
Addrs: []string{redisEndpoint},
})
proxyRepository := redisStore.NewProxyRepository(client, redisStore.DefaultTxMaxAttempts, redisStore.DefaultTxBaseDelay)
layerRepository := redisStore.NewLayerRepository(client, redisStore.DefaultTxMaxAttempts, redisStore.DefaultTxBaseDelay)
var backend *httptest.Server
if conf.Proxy.To == "" {
backend = httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusOK)
if _, err := w.Write([]byte("Hello, world.")); err != nil {
logf("[ERROR] %+v", errors.WithStack(err))
}
}))
if err := waitFor(backend.URL, 5*time.Second); err != nil {
return nil, nil, errors.WithStack(err)
}
logf("started backend '%s'", backend.URL)
}
ctx := context.Background()
proxyName := store.ProxyName("bench-" + name)
proxies, err := proxyRepository.QueryProxy(ctx)
if err != nil {
return nil, nil, errors.WithStack(err)
}
// Cleanup existing proxies
for _, p := range proxies {
if err := proxyRepository.DeleteProxy(ctx, p.Name); err != nil {
return nil, nil, errors.WithStack(err)
}
}
logf("creating proxy '%s'", proxyName)
to := string(conf.Proxy.To)
if to == "" {
to = backend.URL
}
if _, err := proxyRepository.CreateProxy(ctx, proxyName, to, conf.Proxy.From...); err != nil {
return nil, nil, errors.WithStack(err)
}
if _, err := proxyRepository.UpdateProxy(ctx, proxyName, store.WithProxyUpdateEnabled(true)); err != nil {
return nil, nil, errors.WithStack(err)
}
for layerName, layerConf := range conf.Proxy.Layers {
if err := layerRepository.DeleteLayer(ctx, proxyName, store.LayerName(layerName)); err != nil {
return nil, nil, errors.WithStack(err)
}
_, err := layerRepository.CreateLayer(ctx, proxyName, store.LayerName(layerName), store.LayerType(layerConf.Type), layerConf.Options.Data)
if err != nil {
return nil, nil, errors.WithStack(err)
}
_, err = layerRepository.UpdateLayer(ctx, proxyName, store.LayerName(layerName), store.WithLayerUpdateEnabled(bool(layerConf.Enabled)))
if err != nil {
return nil, nil, errors.WithStack(err)
}
}
layers, err := setup.GetLayers(context.Background(), config.NewDefault())
if err != nil {
return nil, nil, errors.WithStack(err)
}
director := director.New(
proxyRepository, layerRepository,
director.WithLayerCache(
ttl.NewCache(
memory.NewCache[string, []*store.Layer](),
memory.NewCache[string, time.Time](),
30*time.Second,
),
),
director.WithProxyCache(
ttl.NewCache(
memory.NewCache[string, []*store.Proxy](),
memory.NewCache[string, time.Time](),
30*time.Second,
),
),
director.WithLayers(layers...),
)
directorMiddleware := director.Middleware()
handler := proxy.New(
proxy.WithRequestTransformers(
director.RequestTransformer(),
),
proxy.WithResponseTransformers(
director.ResponseTransformer(),
),
proxy.WithReverseProxyFactory(func(ctx context.Context, target *url.URL) *httputil.ReverseProxy {
reverse := httputil.NewSingleHostReverseProxy(target)
reverse.ErrorHandler = func(w http.ResponseWriter, r *http.Request, err error) {
logf("[ERROR] %s", errors.WithStack(err))
}
return reverse
}),
)
server := httptest.NewServer(directorMiddleware(handler))
return server, backend, nil
}
func waitFor(url string, ttl time.Duration) error {
var lastErr error
timeout := time.After(ttl)
for {
select {
case <-timeout:
if lastErr != nil {
return lastErr
}
return errors.New("wait timed out")
default:
res, err := http.Get(url)
if err != nil {
lastErr = errors.WithStack(err)
continue
}
if res.StatusCode >= 200 && res.StatusCode < 400 {
return nil
}
}
}
}

View File

@ -0,0 +1,20 @@
proxy:
from: ["*"]
to: ""
layers:
basic-auth:
type: authn-basic
enabled: true
options:
users:
- username: foo
passwordHash: "$2y$10$ShTc856wMB8PCxyr46qJRO8z06MpV4UejAVRDJ/bixhu0XTGn7Giy"
attributes:
email: foo@bar.com
rules:
- set_header("Remote-User-Attr-Email", user.attrs.email)
fetch:
url:
user:
username: foo
password: bar

View File

@ -0,0 +1,3 @@
proxy:
from: ["*"]
to: ""

View File

@ -0,0 +1,12 @@
proxy:
from: ["*"]
to: ""
layers:
host-rewriter:
type: rewriter
enabled: true
options:
rules:
request:
- set_host(request.url.host)
- set_header("X-Proxied-With", "bouncer")

View File

@ -35,12 +35,15 @@ func RunCommand() *cli.Command {
logger.SetLevel(logger.Level(conf.Logger.Level))
projectVersion := ctx.String("projectVersion")
flushSentry, err := setup.SetupSentry(ctx.Context, conf.Admin.Sentry, projectVersion)
if conf.Proxy.Sentry.DSN != "" {
flushSentry, err := setup.SetupSentry(ctx.Context, conf.Proxy.Sentry, projectVersion)
if err != nil {
return errors.Wrap(err, "could not initialize sentry client")
}
defer flushSentry()
}
integrations, err := setup.SetupIntegrations(ctx.Context, conf)
if err != nil {

View File

@ -30,12 +30,15 @@ func RunCommand() *cli.Command {
logger.SetLevel(logger.Level(conf.Logger.Level))
projectVersion := ctx.String("projectVersion")
if conf.Proxy.Sentry.DSN != "" {
flushSentry, err := setup.SetupSentry(ctx.Context, conf.Proxy.Sentry, projectVersion)
if err != nil {
return errors.Wrap(err, "could not initialize sentry client")
}
defer flushSentry()
}
layers, err := setup.GetLayers(ctx.Context, conf)
if err != nil {

View File

@ -5,6 +5,7 @@ type AdminServerConfig struct {
CORS CORSConfig `yaml:"cors"`
Auth AuthConfig `yaml:"auth"`
Metrics MetricsConfig `yaml:"metrics"`
Profiling ProfilingConfig `yaml:"profiling"`
Sentry SentryConfig `yaml:"sentry"`
}
@ -15,6 +16,7 @@ func NewDefaultAdminServerConfig() AdminServerConfig {
Auth: NewDefaultAuthConfig(),
Metrics: NewDefaultMetricsConfig(),
Sentry: NewDefaultSentryConfig(),
Profiling: NewDefaultProfilingConfig(),
}
}

View File

@ -80,9 +80,22 @@ func loadBootstrapDir(dir string) (map[store.ProxyName]BootstrapProxyConfig, err
proxies := make(map[store.ProxyName]BootstrapProxyConfig)
for _, f := range files {
data, err := os.ReadFile(f)
proxy, err := loadBootstrappedProxyConfig(f)
if err != nil {
return nil, errors.Wrapf(err, "could not read file '%s'", f)
return nil, errors.Wrapf(err, "could not load proxy bootstrap file '%s'", f)
}
name := store.ProxyName(strings.TrimSuffix(filepath.Base(f), filepath.Ext(f)))
proxies[name] = *proxy
}
return proxies, nil
}
func loadBootstrappedProxyConfig(filename string) (*BootstrapProxyConfig, error) {
data, err := os.ReadFile(filename)
if err != nil {
return nil, errors.Wrapf(err, "could not read file '%s'", filename)
}
proxy := BootstrapProxyConfig{}
@ -91,11 +104,7 @@ func loadBootstrapDir(dir string) (map[store.ProxyName]BootstrapProxyConfig, err
return nil, errors.Wrapf(err, "could not unmarshal proxy")
}
name := store.ProxyName(strings.TrimSuffix(filepath.Base(f), filepath.Ext(f)))
proxies[name] = proxy
}
return proxies, nil
return &proxy, nil
}
func overrideProxies(base map[store.ProxyName]BootstrapProxyConfig, proxies map[store.ProxyName]BootstrapProxyConfig) map[store.ProxyName]BootstrapProxyConfig {

View File

@ -127,7 +127,7 @@ func (im *InterpolatedMap) UnmarshalYAML(value *yaml.Node) error {
return nil
}
func (im *InterpolatedMap) interpolateRecursive(data any) (any, error) {
func (im InterpolatedMap) interpolateRecursive(data any) (any, error) {
switch typ := data.(type) {
case map[string]any:
for key, value := range typ {

View File

@ -0,0 +1,15 @@
package config
type ProfilingConfig struct {
Enabled InterpolatedBool `yaml:"enabled"`
Endpoint InterpolatedString `yaml:"endpoint"`
BasicAuth *BasicAuthConfig `yaml:"basicAuth"`
}
func NewDefaultProfilingConfig() ProfilingConfig {
return ProfilingConfig{
Enabled: true,
Endpoint: "/.bouncer/profiling",
BasicAuth: nil,
}
}

View File

@ -10,6 +10,7 @@ type ProxyServerConfig struct {
Debug InterpolatedBool `yaml:"debug"`
HTTP HTTPConfig `yaml:"http"`
Metrics MetricsConfig `yaml:"metrics"`
Profiling ProfilingConfig `yaml:"profiling"`
Transport TransportConfig `yaml:"transport"`
Dial DialConfig `yaml:"dial"`
Sentry SentryConfig `yaml:"sentry"`
@ -27,6 +28,7 @@ func NewDefaultProxyServerConfig() ProxyServerConfig {
Sentry: NewDefaultSentryConfig(),
Cache: NewDefaultCacheConfig(),
Templates: NewDefaultTemplatesConfig(),
Profiling: NewDefaultProfilingConfig(),
}
}

View File

@ -28,10 +28,10 @@ func NewDefaultSentryConfig() SentryConfig {
Debug: false,
FlushTimeout: NewInterpolatedDuration(2 * time.Second),
AttachStacktrace: true,
SampleRate: 1,
SampleRate: 0.2,
EnableTracing: true,
TracesSampleRate: 0.2,
ProfilesSampleRate: 1,
ProfilesSampleRate: 0.2,
IgnoreErrors: []string{},
SendDefaultPII: false,
ServerName: "",

View File

@ -65,7 +65,7 @@ func (q *Queue) Middleware(layer *store.Layer) proxy.Middleware {
return
}
defer q.updateMetrics(ctx, layer.Proxy, layer.Name, options)
defer q.updateMetrics(layer.Proxy, layer.Name, options)
cookieName := q.getCookieName(layer.Name)
@ -217,7 +217,9 @@ func (q *Queue) refreshQueue(ctx context.Context, layerName store.LayerName, kee
}
}
func (q *Queue) updateMetrics(ctx context.Context, proxyName store.ProxyName, layerName store.LayerName, options *LayerOptions) {
func (q *Queue) updateMetrics(proxyName store.ProxyName, layerName store.LayerName, options *LayerOptions) {
ctx := context.Background()
// Update queue capacity metric
metricQueueCapacity.With(
prometheus.Labels{

View File

@ -74,7 +74,7 @@ func (l *Layer) ResponseTransformer(layer *store.Layer) proxy.ResponseTransforme
}
}
func New() *Layer {
func New(funcs ...OptionFunc) *Layer {
return &Layer{}
}

View File

@ -0,0 +1,16 @@
package rewriter
type Options struct {
}
type OptionFunc func(opts *Options)
func NewOptions(funcs ...OptionFunc) *Options {
opts := &Options{}
for _, fn := range funcs {
fn(opts)
}
return opts
}

View File

@ -12,9 +12,27 @@ type RequestEnv struct {
Request RequestInfo `expr:"request"`
}
type URLEnv struct {
Scheme string `expr:"scheme"`
Opaque string `expr:"opaque"`
User UserInfoEnv `expr:"user"`
Host string `expr:"host"`
Path string `expr:"path"`
RawPath string `expr:"rawPath"`
RawQuery string `expr:"rawQuery"`
Fragment string `expr:"fragment"`
RawFragment string `expr:"rawFragment"`
}
type UserInfoEnv struct {
Username string `expr:"username"`
Password string `expr:"password"`
}
type RequestInfo struct {
Method string `expr:"method"`
URL string `expr:"url"`
URL URLEnv `expr:"url"`
RawURL string `expr:"rawUrl"`
Proto string `expr:"proto"`
ProtoMajor int `expr:"protoMajor"`
ProtoMinor int `expr:"protoMinor"`
@ -33,10 +51,7 @@ func (l *Layer) applyRequestRules(r *http.Request, options *LayerOptions) error
return nil
}
engine, err := rule.NewEngine[*RequestEnv](
ruleHTTP.WithRequestFuncs(r),
rule.WithRules(options.Rules.Request...),
)
engine, err := l.getRequestRuleEngine(r, options)
if err != nil {
return errors.WithStack(err)
}
@ -44,7 +59,24 @@ func (l *Layer) applyRequestRules(r *http.Request, options *LayerOptions) error
env := &RequestEnv{
Request: RequestInfo{
Method: r.Method,
URL: r.URL.String(),
URL: URLEnv{
Scheme: r.URL.Scheme,
Opaque: r.URL.Opaque,
User: UserInfoEnv{
Username: r.URL.User.Username(),
Password: func() string {
passwd, _ := r.URL.User.Password()
return passwd
}(),
},
Host: r.URL.Host,
Path: r.URL.Path,
RawPath: r.URL.RawPath,
RawQuery: r.URL.RawQuery,
Fragment: r.URL.Fragment,
RawFragment: r.URL.RawFragment,
},
RawURL: r.URL.String(),
Proto: r.Proto,
ProtoMajor: r.ProtoMajor,
ProtoMinor: r.ProtoMinor,
@ -65,6 +97,18 @@ func (l *Layer) applyRequestRules(r *http.Request, options *LayerOptions) error
return nil
}
func (l *Layer) getRequestRuleEngine(r *http.Request, options *LayerOptions) (*rule.Engine[*RequestEnv], error) {
engine, err := rule.NewEngine[*RequestEnv](
rule.WithRules(options.Rules.Request...),
ruleHTTP.WithRequestFuncs(r),
)
if err != nil {
return nil, errors.WithStack(err)
}
return engine, nil
}
type ResponseEnv struct {
Request RequestInfo `expr:"request"`
Response ResponseInfo `expr:"response"`
@ -84,15 +128,12 @@ type ResponseInfo struct {
}
func (l *Layer) applyResponseRules(r *http.Response, options *LayerOptions) error {
rules := options.Rules.Request
rules := options.Rules.Response
if len(rules) == 0 {
return nil
}
engine, err := rule.NewEngine[*ResponseEnv](
rule.WithRules(options.Rules.Response...),
ruleHTTP.WithResponseFuncs(r),
)
engine, err := l.getResponseRuleEngine(r, options)
if err != nil {
return errors.WithStack(err)
}
@ -100,7 +141,24 @@ func (l *Layer) applyResponseRules(r *http.Response, options *LayerOptions) erro
env := &ResponseEnv{
Request: RequestInfo{
Method: r.Request.Method,
URL: r.Request.URL.String(),
URL: URLEnv{
Scheme: r.Request.URL.Scheme,
Opaque: r.Request.URL.Opaque,
User: UserInfoEnv{
Username: r.Request.URL.User.Username(),
Password: func() string {
passwd, _ := r.Request.URL.User.Password()
return passwd
}(),
},
Host: r.Request.URL.Host,
Path: r.Request.URL.Path,
RawPath: r.Request.URL.RawPath,
RawQuery: r.Request.URL.RawQuery,
Fragment: r.Request.URL.Fragment,
RawFragment: r.Request.URL.RawFragment,
},
RawURL: r.Request.URL.String(),
Proto: r.Request.Proto,
ProtoMajor: r.Request.ProtoMajor,
ProtoMinor: r.Request.ProtoMinor,
@ -131,3 +189,15 @@ func (l *Layer) applyResponseRules(r *http.Response, options *LayerOptions) erro
return nil
}
func (l *Layer) getResponseRuleEngine(r *http.Response, options *LayerOptions) (*rule.Engine[*ResponseEnv], error) {
engine, err := rule.NewEngine[*ResponseEnv](
rule.WithRules(options.Rules.Response...),
ruleHTTP.WithResponseFuncs(r),
)
if err != nil {
return nil, errors.WithStack(err)
}
return engine, nil
}

View File

@ -1,156 +0,0 @@
package proxy_test
import (
"context"
"io"
"net/http"
"net/http/httptest"
"net/http/httputil"
"net/url"
"os"
"testing"
"time"
"forge.cadoles.com/Cadoles/go-proxy"
"forge.cadoles.com/cadoles/bouncer/internal/cache/memory"
"forge.cadoles.com/cadoles/bouncer/internal/cache/ttl"
"forge.cadoles.com/cadoles/bouncer/internal/proxy/director"
"forge.cadoles.com/cadoles/bouncer/internal/store"
redisStore "forge.cadoles.com/cadoles/bouncer/internal/store/redis"
"github.com/pkg/errors"
"github.com/redis/go-redis/v9"
)
func BenchmarkProxy(b *testing.B) {
redisEndpoint := os.Getenv("BOUNCER_BENCH_REDIS_ADDR")
if redisEndpoint == "" {
redisEndpoint = "127.0.0.1:6379"
}
client := redis.NewUniversalClient(&redis.UniversalOptions{
Addrs: []string{redisEndpoint},
})
proxyRepository := redisStore.NewProxyRepository(client, redisStore.DefaultTxMaxAttempts, redisStore.DefaultTxBaseDelay)
layerRepository := redisStore.NewLayerRepository(client, redisStore.DefaultTxMaxAttempts, redisStore.DefaultTxBaseDelay)
backend := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusOK)
if _, err := w.Write([]byte("Hello, world.")); err != nil {
b.Logf("[ERROR] %+v", errors.WithStack(err))
}
}))
defer backend.Close()
if err := waitFor(backend.URL, 5*time.Second); err != nil {
b.Fatalf("[FATAL] %+v", errors.WithStack(err))
}
b.Logf("started backend '%s'", backend.URL)
ctx := context.Background()
proxyName := store.ProxyName(b.Name())
b.Logf("creating proxy '%s'", proxyName)
if err := proxyRepository.DeleteProxy(ctx, proxyName); err != nil {
b.Fatalf("[FATAL] %+v", errors.WithStack(err))
}
if _, err := proxyRepository.CreateProxy(ctx, proxyName, backend.URL, "*"); err != nil {
b.Fatalf("[FATAL] %+v", errors.WithStack(err))
}
if _, err := proxyRepository.UpdateProxy(ctx, proxyName, store.WithProxyUpdateEnabled(true)); err != nil {
b.Fatalf("[FATAL] %+v", errors.WithStack(err))
}
director := director.New(
proxyRepository, layerRepository,
director.WithLayerCache(
ttl.NewCache(
memory.NewCache[string, []*store.Layer](),
memory.NewCache[string, time.Time](),
30*time.Second,
),
),
director.WithProxyCache(
ttl.NewCache(
memory.NewCache[string, []*store.Proxy](),
memory.NewCache[string, time.Time](),
30*time.Second,
),
),
)
directorMiddleware := director.Middleware()
handler := proxy.New(
proxy.WithRequestTransformers(
director.RequestTransformer(),
),
proxy.WithResponseTransformers(
director.ResponseTransformer(),
),
proxy.WithReverseProxyFactory(func(ctx context.Context, target *url.URL) *httputil.ReverseProxy {
reverse := httputil.NewSingleHostReverseProxy(target)
reverse.ErrorHandler = func(w http.ResponseWriter, r *http.Request, err error) {
b.Logf("[ERROR] %s", errors.WithStack(err))
}
return reverse
}),
)
server := httptest.NewServer(directorMiddleware(handler))
defer server.Close()
b.Logf("started proxy '%s'", server.URL)
httpClient := server.Client()
b.ResetTimer()
for i := 0; i < b.N; i++ {
res, err := httpClient.Get(server.URL)
if err != nil {
b.Errorf("could not fetch server url: %+v", errors.WithStack(err))
}
body, err := io.ReadAll(res.Body)
if err != nil {
b.Errorf("could not read response body: %+v", errors.WithStack(err))
}
b.Logf("%s - %v", res.Status, string(body))
if err := res.Body.Close(); err != nil {
b.Errorf("could not close response body: %+v", errors.WithStack(err))
}
}
}
func waitFor(url string, ttl time.Duration) error {
var lastErr error
timeout := time.After(ttl)
for {
select {
case <-timeout:
if lastErr != nil {
return lastErr
}
return errors.New("wait timed out")
default:
res, err := http.Get(url)
if err != nil {
lastErr = errors.WithStack(err)
continue
}
if res.StatusCode >= 200 && res.StatusCode < 400 {
return nil
}
}
}
}

View File

@ -8,6 +8,7 @@ import (
"net"
"net/http"
"net/http/httputil"
"net/http/pprof"
"net/url"
"path/filepath"
"strconv"
@ -146,6 +147,34 @@ func (s *Server) run(parentCtx context.Context, addrs chan net.Addr, errs chan e
})
}
if s.serverConfig.Profiling.Enabled {
profiling := s.serverConfig.Profiling
logger.Info(ctx, "enabling profiling", logger.F("endpoint", profiling.Endpoint))
router.Group(func(r chi.Router) {
if profiling.BasicAuth != nil {
logger.Info(ctx, "enabling authentication on metrics endpoint")
r.Use(middleware.BasicAuth(
"profiling",
profiling.BasicAuth.CredentialsMap(),
))
}
r.Route(string(profiling.Endpoint), func(r chi.Router) {
r.HandleFunc("/", pprof.Index)
r.HandleFunc("/cmdline", pprof.Cmdline)
r.HandleFunc("/profile", pprof.Profile)
r.HandleFunc("/symbol", pprof.Symbol)
r.HandleFunc("/trace", pprof.Trace)
r.HandleFunc("/{name}", func(w http.ResponseWriter, r *http.Request) {
name := chi.URLParam(r, "name")
pprof.Handler(name).ServeHTTP(w, r)
})
})
})
}
router.Group(func(r chi.Router) {
r.Use(director.Middleware())

View File

@ -36,7 +36,7 @@ func (r *LayerRepository) CreateLayer(ctx context.Context, proxyName store.Proxy
CreatedAt: wrap(now),
UpdatedAt: wrap(now),
Options: wrap(store.LayerOptions{}),
Options: wrap(options),
}
txf := func(tx *redis.Tx) error {
@ -60,6 +60,11 @@ func (r *LayerRepository) CreateLayer(ctx context.Context, proxyName store.Proxy
return errors.WithStack(err)
}
layerItem, err = r.txGetLayerItem(ctx, tx, proxyName, layerName)
if err != nil {
return errors.WithStack(err)
}
return nil
}
@ -70,16 +75,16 @@ func (r *LayerRepository) CreateLayer(ctx context.Context, proxyName store.Proxy
return &store.Layer{
LayerHeader: store.LayerHeader{
Name: layerName,
Proxy: proxyName,
Type: layerType,
Weight: 0,
Enabled: false,
Name: store.LayerName(layerItem.Name),
Proxy: store.ProxyName(layerItem.Proxy),
Type: store.LayerType(layerItem.Type),
Weight: layerItem.Weight,
Enabled: layerItem.Enabled,
},
CreatedAt: now,
UpdatedAt: now,
Options: store.LayerOptions{},
CreatedAt: layerItem.CreatedAt.Value(),
UpdatedAt: layerItem.UpdatedAt.Value(),
Options: layerItem.Options.Value(),
}, nil
}

View File

@ -49,6 +49,19 @@ admin:
# Mettre à null pour désactiver l'authentification
basicAuth: null
# Profiling
profiling:
# Activer ou désactiver les endpoints de profiling
enabled: true
# Route de publication des endpoints de profiling
endpoint: /.bouncer/profiling
# Authentification "basic auth" sur les endpoints
# de profiling
# Mettre à null pour désactiver l'authentification
basicAuth:
credentials:
prof: iling
# Configuration de l'intégration Sentry
# Voir https://pkg.go.dev/github.com/getsentry/sentry-go?utm_source=godoc#ClientOptions
sentry:
@ -59,7 +72,7 @@ admin:
sampleRate: 1
enableTracing: true
tracesSampleRate: 0.2
profilesSampleRate: 1
profilesSampleRate: 0.2
ignoreErrors: []
sendDefaultPII: false
serverName: ""
@ -99,6 +112,19 @@ proxy:
credentials:
prom: etheus
# Profiling
profiling:
# Activer ou désactiver les endpoints de profiling
enabled: true
# Route de publication des endpoints de profiling
endpoint: /.bouncer/profiling
# Authentification "basic auth" sur les endpoints
# de profiling
# Mettre à null pour désactiver l'authentification
basicAuth:
credentials:
prof: iling
# Configuration de la mise en cache
# locale des données proxy/layers
cache: