Merge pull request 'Mise en place de cache local au niveau du serveur pour améliorer les temps de traitement des requêtes' (#26) from benchmark into develop
Cadoles/bouncer/pipeline/head This commit looks good Details

Reviewed-on: #26
This commit is contained in:
wpetit 2024-05-28 16:52:34 +02:00
commit d667bb03f5
17 changed files with 498 additions and 45 deletions

2
.gitignore vendored
View File

@ -9,3 +9,5 @@
/data
/out
.dockerconfigjson
*.prof
proxy.test

View File

@ -130,6 +130,13 @@ tools/grafterm/bin/grafterm:
mkdir -p tools/grafterm/bin
GOBIN=$(PWD)/tools/grafterm/bin go install github.com/slok/grafterm/cmd/grafterm@v0.2.0
bench:
go test -bench=. -run '^$$' -count=10 ./...
tools/benchstat/bin/benchstat:
mkdir -p tools/benchstat/bin
GOBIN=$(PWD)/tools/benchstat/bin go install golang.org/x/perf/cmd/benchstat@latest
full-version:
@echo $(FULL_VERSION)

View File

@ -0,0 +1,31 @@
# Analyser les performances de Bouncer
1. Lancer un benchmark du proxy
```shell
go test -bench=. -run '^$' -count=5 -cpuprofile bench_proxy.prof ./internal/proxy
```
2. Visualiser les temps d'exécution
```shell
go tool pprof -web bench_proxy.prof
```
3. Comparer les performances d'une exécution à l'autre
```shell
# Lancer un premier benchmark
go test -bench=. -run '^$' -count=10 ./internal/proxy > bench_before.txt
# Faire des modifications sur les sources
# Lancer un second benchmark
go test -bench=. -run '^$' -count=10 ./internal/proxy > bench_after.txt
# Installer l'outil benchstat
make tools/benchstat/bin/benchstat
# Comparer les rapports
tools/benchstat/bin/benchstat bench_before.txt bench_after.txt
```

1
go.mod
View File

@ -93,6 +93,7 @@ require (
go.opentelemetry.io/otel v1.21.0 // indirect
go.opentelemetry.io/otel/trace v1.21.0 // indirect
golang.org/x/net v0.19.0 // indirect
golang.org/x/sync v0.7.0 // indirect
golang.org/x/text v0.14.0 // indirect
golang.org/x/time v0.3.0 // indirect
google.golang.org/appengine v1.6.8 // indirect

4
go.sum
View File

@ -405,8 +405,8 @@ golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.5.0 h1:60k92dhOjHxJkrqnwsfl8KuaHbn/5dl0lUPUklKo3qE=
golang.org/x/sync v0.5.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M=
golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=

6
internal/cache/cache.go vendored Normal file
View File

@ -0,0 +1,6 @@
package cache
type Cache[K comparable, V any] interface {
Get(key K) (V, bool)
Set(key K, value V)
}

34
internal/cache/memory/cache.go vendored Normal file
View File

@ -0,0 +1,34 @@
package memory
import (
"sync"
cache "forge.cadoles.com/cadoles/bouncer/internal/cache"
)
type Cache[K comparable, V any] struct {
store *sync.Map
}
// Get implements cache.Cache.
func (c *Cache[K, V]) Get(key K) (V, bool) {
raw, exists := c.store.Load(key)
if !exists {
return *new(V), false
}
return raw.(V), exists
}
// Set implements cache.Cache.
func (c *Cache[K, V]) Set(key K, value V) {
c.store.Store(key, value)
}
func NewCache[K comparable, V any]() *Cache[K, V] {
return &Cache[K, V]{
store: new(sync.Map),
}
}
var _ cache.Cache[string, bool] = &Cache[string, bool]{}

39
internal/cache/ttl/cache.go vendored Normal file
View File

@ -0,0 +1,39 @@
package ttl
import (
"time"
cache "forge.cadoles.com/cadoles/bouncer/internal/cache"
)
type Cache[K comparable, V any] struct {
timestamps cache.Cache[K, time.Time]
values cache.Cache[K, V]
ttl time.Duration
}
// Get implements cache.Cache.
func (c *Cache[K, V]) Get(key K) (V, bool) {
timestamp, exists := c.timestamps.Get(key)
if !exists || timestamp.Add(c.ttl).Before(time.Now()) {
return *new(V), false
}
return c.values.Get(key)
}
// Set implements cache.Cache.
func (c *Cache[K, V]) Set(key K, value V) {
c.timestamps.Set(key, time.Now())
c.values.Set(key, value)
}
func NewCache[K comparable, V any](values cache.Cache[K, V], timestamps cache.Cache[K, time.Time], ttl time.Duration) *Cache[K, V] {
return &Cache[K, V]{
values: values,
timestamps: timestamps,
ttl: ttl,
}
}
var _ cache.Cache[string, bool] = &Cache[string, bool]{}

39
internal/cache/ttl/cache_test.go vendored Normal file
View File

@ -0,0 +1,39 @@
package ttl
import (
"testing"
"time"
"forge.cadoles.com/cadoles/bouncer/internal/cache/memory"
)
func TestCache(t *testing.T) {
cache := NewCache(
memory.NewCache[string, int](),
memory.NewCache[string, time.Time](),
time.Second,
)
key := "foo"
if _, exists := cache.Get(key); exists {
t.Errorf("cache.Get(\"%s\"): should not exists", key)
}
cache.Set(key, 1)
value, exists := cache.Get(key)
if !exists {
t.Errorf("cache.Get(\"%s\"): should exists", key)
}
if e, g := 1, value; e != g {
t.Errorf("cache.Get(\"%s\"): expected '%v', got '%v'", key, e, g)
}
time.Sleep(time.Second)
if _, exists := cache.Get("foo"); exists {
t.Errorf("cache.Get(\"%s\"): should not exists", key)
}
}

View File

@ -3,6 +3,7 @@ package proxy
import (
"fmt"
"strings"
"time"
"forge.cadoles.com/cadoles/bouncer/internal/command/common"
"forge.cadoles.com/cadoles/bouncer/internal/proxy"
@ -45,6 +46,7 @@ func RunCommand() *cli.Command {
proxy.WithServerConfig(conf.Proxy),
proxy.WithRedisConfig(conf.Redis),
proxy.WithDirectorLayers(layers...),
proxy.WithDirectorCacheTTL(time.Duration(conf.Proxy.Cache.TTL)),
)
addrs, srvErrs := srv.Start(ctx.Context)

View File

@ -12,6 +12,18 @@ type ProxyServerConfig struct {
Transport TransportConfig `yaml:"transport"`
Dial DialConfig `yaml:"dial"`
Sentry SentryConfig `yaml:"sentry"`
Cache CacheConfig `yaml:"cache"`
}
func NewDefaultProxyServerConfig() ProxyServerConfig {
return ProxyServerConfig{
HTTP: NewHTTPConfig("0.0.0.0", 8080),
Metrics: NewDefaultMetricsConfig(),
Transport: NewDefaultTransportConfig(),
Dial: NewDefaultDialConfig(),
Sentry: NewDefaultSentryConfig(),
Cache: NewDefaultCacheConfig(),
}
}
// See https://pkg.go.dev/net/http#Transport
@ -58,13 +70,22 @@ func (c TransportConfig) AsTransport() *http.Transport {
return httpTransport
}
func NewDefaultProxyServerConfig() ProxyServerConfig {
return ProxyServerConfig{
HTTP: NewHTTPConfig("0.0.0.0", 8080),
Metrics: NewDefaultMetricsConfig(),
Transport: NewDefaultTransportConfig(),
Dial: NewDefaultDialConfig(),
Sentry: NewDefaultSentryConfig(),
func NewDefaultTransportConfig() TransportConfig {
return TransportConfig{
ForceAttemptHTTP2: true,
MaxIdleConns: 100,
MaxIdleConnsPerHost: 100,
MaxConnsPerHost: 100,
IdleConnTimeout: NewInterpolatedDuration(90 * time.Second),
TLSHandshakeTimeout: NewInterpolatedDuration(10 * time.Second),
ExpectContinueTimeout: NewInterpolatedDuration(1 * time.Second),
ResponseHeaderTimeout: NewInterpolatedDuration(10 * time.Second),
DisableCompression: false,
DisableKeepAlives: false,
ReadBufferSize: 4096,
WriteBufferSize: 4096,
MaxResponseHeaderBytes: 0,
InsecureSkipVerify: false,
}
}
@ -85,21 +106,12 @@ func NewDefaultDialConfig() DialConfig {
}
}
func NewDefaultTransportConfig() TransportConfig {
return TransportConfig{
ForceAttemptHTTP2: true,
MaxIdleConns: 100,
MaxIdleConnsPerHost: 100,
MaxConnsPerHost: 100,
IdleConnTimeout: NewInterpolatedDuration(90 * time.Second),
TLSHandshakeTimeout: NewInterpolatedDuration(10 * time.Second),
ExpectContinueTimeout: NewInterpolatedDuration(1 * time.Second),
ResponseHeaderTimeout: NewInterpolatedDuration(10 * time.Second),
DisableCompression: false,
DisableKeepAlives: false,
ReadBufferSize: 4096,
WriteBufferSize: 4096,
MaxResponseHeaderBytes: 0,
InsecureSkipVerify: false,
type CacheConfig struct {
TTL InterpolatedDuration `yaml:"ttl"`
}
func NewDefaultCacheConfig() CacheConfig {
return CacheConfig{
TTL: *NewInterpolatedDuration(time.Second * 30),
}
}

View File

@ -7,6 +7,7 @@ import (
"forge.cadoles.com/Cadoles/go-proxy"
"forge.cadoles.com/Cadoles/go-proxy/wildcard"
"forge.cadoles.com/cadoles/bouncer/internal/cache"
"forge.cadoles.com/cadoles/bouncer/internal/store"
"github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus"
@ -17,6 +18,9 @@ type Director struct {
proxyRepository store.ProxyRepository
layerRepository store.LayerRepository
layerRegistry *LayerRegistry
proxyCache cache.Cache[string, []*store.Proxy]
layerCache cache.Cache[string, []*store.Layer]
}
func (d *Director) rewriteRequest(r *http.Request) (*http.Request, error) {
@ -88,7 +92,14 @@ MAIN:
return r, nil
}
const proxiesCacheKey = "proxies"
func (d *Director) getProxies(ctx context.Context) ([]*store.Proxy, error) {
proxies, exists := d.proxyCache.Get(proxiesCacheKey)
if exists {
return proxies, nil
}
headers, err := d.proxyRepository.QueryProxy(ctx, store.WithProxyQueryEnabled(true))
if err != nil {
return nil, errors.WithStack(err)
@ -96,7 +107,7 @@ func (d *Director) getProxies(ctx context.Context) ([]*store.Proxy, error) {
sort.Sort(store.ByProxyWeight(headers))
proxies := make([]*store.Proxy, 0, len(headers))
proxies = make([]*store.Proxy, 0, len(headers))
for _, h := range headers {
if !h.Enabled {
@ -111,10 +122,19 @@ func (d *Director) getProxies(ctx context.Context) ([]*store.Proxy, error) {
proxies = append(proxies, proxy)
}
d.proxyCache.Set(proxiesCacheKey, proxies)
return proxies, nil
}
func (d *Director) getLayers(ctx context.Context, proxyName store.ProxyName) ([]*store.Layer, error) {
cacheKey := "layers-" + string(proxyName)
layers, exists := d.layerCache.Get(cacheKey)
if exists {
return layers, nil
}
headers, err := d.layerRepository.QueryLayers(ctx, proxyName, store.WithLayerQueryEnabled(true))
if err != nil {
return nil, errors.WithStack(err)
@ -122,7 +142,7 @@ func (d *Director) getLayers(ctx context.Context, proxyName store.ProxyName) ([]
sort.Sort(store.ByLayerWeight(headers))
layers := make([]*store.Layer, 0, len(headers))
layers = make([]*store.Layer, 0, len(headers))
for _, h := range headers {
if !h.Enabled {
@ -137,6 +157,8 @@ func (d *Director) getLayers(ctx context.Context, proxyName store.ProxyName) ([]
layers = append(layers, layer)
}
d.layerCache.Set(cacheKey, layers)
return layers, nil
}
@ -240,8 +262,16 @@ func (d *Director) Middleware() proxy.Middleware {
}
}
func New(proxyRepository store.ProxyRepository, layerRepository store.LayerRepository, layers ...Layer) *Director {
registry := NewLayerRegistry(layers...)
func New(proxyRepository store.ProxyRepository, layerRepository store.LayerRepository, funcs ...OptionFunc) *Director {
opts := NewOptions(funcs...)
return &Director{proxyRepository, layerRepository, registry}
registry := NewLayerRegistry(opts.Layers...)
return &Director{
proxyRepository: proxyRepository,
layerRepository: layerRepository,
layerRegistry: registry,
proxyCache: opts.ProxyCache,
layerCache: opts.LayerCache,
}
}

View File

@ -0,0 +1,58 @@
package director
import (
"time"
"forge.cadoles.com/cadoles/bouncer/internal/cache"
"forge.cadoles.com/cadoles/bouncer/internal/cache/memory"
"forge.cadoles.com/cadoles/bouncer/internal/cache/ttl"
"forge.cadoles.com/cadoles/bouncer/internal/store"
)
type Options struct {
Layers []Layer
ProxyCache cache.Cache[string, []*store.Proxy]
LayerCache cache.Cache[string, []*store.Layer]
}
type OptionFunc func(opts *Options)
func NewOptions(funcs ...OptionFunc) *Options {
opts := &Options{
Layers: make([]Layer, 0),
ProxyCache: ttl.NewCache(
memory.NewCache[string, []*store.Proxy](),
memory.NewCache[string, time.Time](),
30*time.Second,
),
LayerCache: ttl.NewCache(
memory.NewCache[string, []*store.Layer](),
memory.NewCache[string, time.Time](),
30*time.Second,
),
}
for _, fn := range funcs {
fn(opts)
}
return opts
}
func WithLayers(layers ...Layer) OptionFunc {
return func(opts *Options) {
opts.Layers = layers
}
}
func WithProxyCache(cache cache.Cache[string, []*store.Proxy]) OptionFunc {
return func(opts *Options) {
opts.ProxyCache = cache
}
}
func WithLayerCache(cache cache.Cache[string, []*store.Layer]) OptionFunc {
return func(opts *Options) {
opts.LayerCache = cache
}
}

View File

@ -1,23 +1,27 @@
package proxy
import (
"time"
"forge.cadoles.com/cadoles/bouncer/internal/config"
"forge.cadoles.com/cadoles/bouncer/internal/proxy/director"
)
type Option struct {
ServerConfig config.ProxyServerConfig
RedisConfig config.RedisConfig
DirectorLayers []director.Layer
ServerConfig config.ProxyServerConfig
RedisConfig config.RedisConfig
DirectorLayers []director.Layer
DirectorCacheTTL time.Duration
}
type OptionFunc func(*Option)
func defaultOption() *Option {
return &Option{
ServerConfig: config.NewDefaultProxyServerConfig(),
RedisConfig: config.NewDefaultRedisConfig(),
DirectorLayers: make([]director.Layer, 0),
ServerConfig: config.NewDefaultProxyServerConfig(),
RedisConfig: config.NewDefaultRedisConfig(),
DirectorLayers: make([]director.Layer, 0),
DirectorCacheTTL: 30 * time.Second,
}
}
@ -38,3 +42,9 @@ func WithDirectorLayers(layers ...director.Layer) OptionFunc {
opt.DirectorLayers = layers
}
}
func WithDirectorCacheTTL(ttl time.Duration) OptionFunc {
return func(opt *Option) {
opt.DirectorCacheTTL = ttl
}
}

View File

@ -0,0 +1,156 @@
package proxy_test
import (
"context"
"io"
"net/http"
"net/http/httptest"
"net/http/httputil"
"net/url"
"os"
"testing"
"time"
"forge.cadoles.com/Cadoles/go-proxy"
"forge.cadoles.com/cadoles/bouncer/internal/cache/memory"
"forge.cadoles.com/cadoles/bouncer/internal/cache/ttl"
"forge.cadoles.com/cadoles/bouncer/internal/proxy/director"
"forge.cadoles.com/cadoles/bouncer/internal/store"
redisStore "forge.cadoles.com/cadoles/bouncer/internal/store/redis"
"github.com/pkg/errors"
"github.com/redis/go-redis/v9"
)
func BenchmarkProxy(b *testing.B) {
redisEndpoint := os.Getenv("BOUNCER_BENCH_REDIS_ADDR")
if redisEndpoint == "" {
redisEndpoint = "127.0.0.1:6379"
}
client := redis.NewUniversalClient(&redis.UniversalOptions{
Addrs: []string{redisEndpoint},
})
proxyRepository := redisStore.NewProxyRepository(client)
layerRepository := redisStore.NewLayerRepository(client)
backend := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusOK)
if _, err := w.Write([]byte("Hello, world.")); err != nil {
b.Logf("[ERROR] %+v", errors.WithStack(err))
}
}))
defer backend.Close()
if err := waitFor(backend.URL, 5*time.Second); err != nil {
b.Fatalf("[FATAL] %+v", errors.WithStack(err))
}
b.Logf("started backend '%s'", backend.URL)
ctx := context.Background()
proxyName := store.ProxyName(b.Name())
b.Logf("creating proxy '%s'", proxyName)
if err := proxyRepository.DeleteProxy(ctx, proxyName); err != nil {
b.Fatalf("[FATAL] %+v", errors.WithStack(err))
}
if _, err := proxyRepository.CreateProxy(ctx, proxyName, backend.URL, "*"); err != nil {
b.Fatalf("[FATAL] %+v", errors.WithStack(err))
}
if _, err := proxyRepository.UpdateProxy(ctx, proxyName, store.WithProxyUpdateEnabled(true)); err != nil {
b.Fatalf("[FATAL] %+v", errors.WithStack(err))
}
director := director.New(
proxyRepository, layerRepository,
director.WithLayerCache(
ttl.NewCache(
memory.NewCache[string, []*store.Layer](),
memory.NewCache[string, time.Time](),
30*time.Second,
),
),
director.WithProxyCache(
ttl.NewCache(
memory.NewCache[string, []*store.Proxy](),
memory.NewCache[string, time.Time](),
30*time.Second,
),
),
)
directorMiddleware := director.Middleware()
handler := proxy.New(
proxy.WithRequestTransformers(
director.RequestTransformer(),
),
proxy.WithResponseTransformers(
director.ResponseTransformer(),
),
proxy.WithReverseProxyFactory(func(ctx context.Context, target *url.URL) *httputil.ReverseProxy {
reverse := httputil.NewSingleHostReverseProxy(target)
reverse.ErrorHandler = func(w http.ResponseWriter, r *http.Request, err error) {
b.Logf("[ERROR] %s", errors.WithStack(err))
}
return reverse
}),
)
server := httptest.NewServer(directorMiddleware(handler))
defer server.Close()
b.Logf("started proxy '%s'", server.URL)
httpClient := server.Client()
b.ResetTimer()
for i := 0; i < b.N; i++ {
res, err := httpClient.Get(server.URL)
if err != nil {
b.Errorf("could not fetch server url: %+v", errors.WithStack(err))
}
body, err := io.ReadAll(res.Body)
if err != nil {
b.Errorf("could not read response body: %+v", errors.WithStack(err))
}
b.Logf("%s - %v", res.Status, string(body))
if err := res.Body.Close(); err != nil {
b.Errorf("could not close response body: %+v", errors.WithStack(err))
}
}
}
func waitFor(url string, ttl time.Duration) error {
var lastErr error
timeout := time.After(ttl)
for {
select {
case <-timeout:
if lastErr != nil {
return lastErr
}
return errors.New("wait timed out")
default:
res, err := http.Get(url)
if err != nil {
lastErr = errors.WithStack(err)
continue
}
if res.StatusCode >= 200 && res.StatusCode < 400 {
return nil
}
}
}
}

View File

@ -11,6 +11,8 @@ import (
"time"
"forge.cadoles.com/Cadoles/go-proxy"
"forge.cadoles.com/cadoles/bouncer/internal/cache/memory"
"forge.cadoles.com/cadoles/bouncer/internal/cache/ttl"
bouncerChi "forge.cadoles.com/cadoles/bouncer/internal/chi"
"forge.cadoles.com/cadoles/bouncer/internal/config"
"forge.cadoles.com/cadoles/bouncer/internal/proxy/director"
@ -25,11 +27,12 @@ import (
)
type Server struct {
serverConfig config.ProxyServerConfig
redisConfig config.RedisConfig
directorLayers []director.Layer
proxyRepository store.ProxyRepository
layerRepository store.LayerRepository
serverConfig config.ProxyServerConfig
redisConfig config.RedisConfig
directorLayers []director.Layer
directorCacheTTL time.Duration
proxyRepository store.ProxyRepository
layerRepository store.LayerRepository
}
func (s *Server) Start(ctx context.Context) (<-chan net.Addr, <-chan error) {
@ -86,7 +89,21 @@ func (s *Server) run(parentCtx context.Context, addrs chan net.Addr, errs chan e
director := director.New(
s.proxyRepository,
s.layerRepository,
s.directorLayers...,
director.WithLayers(s.directorLayers...),
director.WithLayerCache(
ttl.NewCache(
memory.NewCache[string, []*store.Layer](),
memory.NewCache[string, time.Time](),
s.directorCacheTTL,
),
),
director.WithProxyCache(
ttl.NewCache(
memory.NewCache[string, []*store.Proxy](),
memory.NewCache[string, time.Time](),
s.directorCacheTTL,
),
),
)
if s.serverConfig.HTTP.UseRealIP {
@ -184,8 +201,9 @@ func NewServer(funcs ...OptionFunc) *Server {
}
return &Server{
serverConfig: opt.ServerConfig,
redisConfig: opt.RedisConfig,
directorLayers: opt.DirectorLayers,
serverConfig: opt.ServerConfig,
redisConfig: opt.RedisConfig,
directorLayers: opt.DirectorLayers,
directorCacheTTL: opt.DirectorCacheTTL,
}
}

View File

@ -93,6 +93,14 @@ proxy:
credentials:
prom: etheus
# Configuration de la mise en cache
# locale des données proxy/layers
cache:
# Les proxys/layers sont mis en cache local pour une durée de 30s
# par défaut. Si les modifications sont rares, vous pouvez augmenter
# cette valeur pour réduire la "pression" sur le serveur Redis.
ttl: 30s
# Configuration du transport HTTP(S)
# Voir https://pkg.go.dev/net/http#Transport
transport: