Compare commits

..

7 Commits

Author SHA1 Message Date
59f023a7d9 fix: do not use goja.Value outside of loop
All checks were successful
arcad/edge/pipeline/head This commit looks good
ref #22
2023-12-05 21:27:43 +01:00
753a6c9708 fix: temporarily write blob directly as response body without http.ServeContent
All checks were successful
arcad/edge/pipeline/head This commit looks good
2023-12-05 14:18:22 +01:00
b120e590b6 fix: do not use goja.Value outside of run loop 2023-12-05 14:14:08 +01:00
242bf379a8 feat: rewrite cache blobstore driver parameters parsing
All checks were successful
arcad/edge/pipeline/head This commit looks good
2023-12-03 14:26:57 +01:00
065a9002a0 fix(storage): use missing cache driver options
All checks were successful
arcad/edge/pipeline/head This commit looks good
2023-12-01 15:20:12 +01:00
83a1e89665 feat: use forked version of bigcache to prevent 64bits misalignment problems
All checks were successful
arcad/edge/pipeline/head This commit looks good
See https://github.com/allegro/bigcache/issues/368
See https://golang.org/pkg/sync/atomic/#pkg-note-BUG
2023-12-01 12:22:53 +01:00
d9e8aac458 feat(packaging): rotate storage-server log files on alpine
All checks were successful
arcad/edge/pipeline/head This commit looks good
2023-11-30 19:54:00 +01:00
15 changed files with 342 additions and 91 deletions

2
.gitignore vendored
View File

@ -2,7 +2,7 @@
/bin /bin
/.env /.env
/tools /tools
*.sqlite *.sqlite*
/.gitea-release /.gitea-release
/.edge /.edge
/data /data

View File

@ -108,10 +108,17 @@ nfpms:
file_info: file_info:
mode: 0640 mode: 0640
packager: apk packager: apk
- src: misc/packaging/openrc/storage-server.logrotate.conf
dst: /etc/logrotate.d/storage-server
packager: apk
- dst: /var/lib/storage-server - dst: /var/lib/storage-server
type: dir type: dir
file_info: file_info:
mode: 0700 mode: 0700
packager: apk packager: apk
- dst: /var/log/storage-server
type: dir
file_info:
mode: 0700
scripts: scripts:
postinstall: "misc/packaging/common/postinstall-storage-server.sh" postinstall: "misc/packaging/common/postinstall-storage-server.sh"

146
cmd/blobstore-test/main.go Normal file
View File

@ -0,0 +1,146 @@
package main
import (
"context"
"crypto/rand"
"flag"
"io"
mrand "math/rand"
"runtime"
"time"
"forge.cadoles.com/arcad/edge/pkg/storage"
"forge.cadoles.com/arcad/edge/pkg/storage/driver"
"github.com/pkg/errors"
"gitlab.com/wpetit/goweb/logger"
_ "forge.cadoles.com/arcad/edge/pkg/storage/driver/cache"
_ "forge.cadoles.com/arcad/edge/pkg/storage/driver/rpc"
_ "forge.cadoles.com/arcad/edge/pkg/storage/driver/sqlite"
)
var (
dsn string
)
func init() {
flag.StringVar(&dsn, "dsn", "cache://./test-cache.sqlite?driver=sqlite&_pragma=foreign_keys(1)&_pragma=journal_mode=wal&bigCacheShards=32&bigCacheHardMaxCacheSize=128&bigCacheMaxEntrySize=125&bigCacheMaxEntriesInWindow=200000", "blobstore dsn")
}
func main() {
flag.Parse()
ctx := context.Background()
logger.SetLevel(logger.LevelDebug)
blobStore, err := driver.NewBlobStore(dsn)
if err != nil {
logger.Fatal(ctx, "could not create blobstore", logger.CapturedE(errors.WithStack(err)))
}
bucket, err := blobStore.OpenBucket(ctx, "default")
if err != nil {
logger.Fatal(ctx, "could not open bucket", logger.CapturedE(errors.WithStack(err)))
}
defer func() {
if err := bucket.Close(); err != nil {
logger.Fatal(ctx, "could not close bucket", logger.CapturedE(errors.WithStack(err)))
}
}()
go readRandomBlobs(ctx, bucket)
for {
writeRandomBlob(ctx, bucket)
time.Sleep(1 * time.Second)
size, err := bucket.Size(ctx)
if err != nil {
logger.Fatal(ctx, "could not retrieve bucket size", logger.CapturedE(errors.WithStack(err)))
}
logger.Debug(ctx, "bucket stats", logger.F("size", size))
}
}
func readRandomBlobs(ctx context.Context, bucket storage.BlobBucket) {
for {
infos, err := bucket.List(ctx)
if err != nil {
logger.Fatal(ctx, "could not list blobs", logger.CapturedE(errors.WithStack(err)))
}
total := len(infos)
if total == 0 {
logger.Debug(ctx, "no blob yet")
continue
}
blob := infos[mrand.Intn(total)]
readBlob(ctx, bucket, blob.ID())
time.Sleep(250 * time.Millisecond)
}
}
func readBlob(ctx context.Context, bucket storage.BlobBucket, blobID storage.BlobID) {
ctx = logger.With(ctx, logger.F("blobID", blobID))
reader, err := bucket.NewReader(ctx, blobID)
if err != nil {
logger.Fatal(ctx, "could not create reader", logger.CapturedE(errors.WithStack(err)))
}
defer func() {
if err := reader.Close(); err != nil {
logger.Fatal(ctx, "could not close reader", logger.CapturedE(errors.WithStack(err)))
}
}()
if _, err := io.ReadAll(reader); err != nil {
logger.Fatal(ctx, "could not read blob", logger.CapturedE(errors.WithStack(err)))
}
}
func writeRandomBlob(ctx context.Context, bucket storage.BlobBucket) {
blobID := storage.NewBlobID()
buff := make([]byte, 10*1024)
writer, err := bucket.NewWriter(ctx, blobID)
if err != nil {
logger.Fatal(ctx, "could not create writer", logger.CapturedE(errors.WithStack(err)))
}
defer func() {
if err := writer.Close(); err != nil {
logger.Fatal(ctx, "could not close writer", logger.CapturedE(errors.WithStack(err)))
}
}()
if _, err := rand.Read(buff); err != nil {
logger.Fatal(ctx, "could not read random data", logger.CapturedE(errors.WithStack(err)))
}
if _, err := writer.Write(buff); err != nil {
logger.Fatal(ctx, "could not write blob", logger.CapturedE(errors.WithStack(err)))
}
printMemUsage(ctx)
}
func printMemUsage(ctx context.Context) {
var m runtime.MemStats
runtime.ReadMemStats(&m)
logger.Debug(
ctx, "memory usage",
logger.F("alloc", m.Alloc/1024/1024),
logger.F("totalAlloc", m.TotalAlloc/1024/1024),
logger.F("sys", m.Sys/1024/1024),
logger.F("numGC", m.NumGC),
)
}

2
go.mod
View File

@ -87,3 +87,5 @@ require (
modernc.org/strutil v1.1.3 // indirect modernc.org/strutil v1.1.3 // indirect
modernc.org/token v1.0.1 // indirect modernc.org/token v1.0.1 // indirect
) )
replace github.com/allegro/bigcache/v3 v3.1.0 => github.com/Bornholm/bigcache v0.0.0-20231201111725-1ddf51584cad

4
go.sum
View File

@ -37,6 +37,8 @@ cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohl
cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs=
cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
github.com/Bornholm/bigcache v0.0.0-20231201111725-1ddf51584cad h1:PTOf0L/YjiVis5LYzJmi7WqttJ/h/DU6h06aJ24Kpbg=
github.com/Bornholm/bigcache v0.0.0-20231201111725-1ddf51584cad/go.mod h1:+q+mA6jGsjfsZ2HzhVSk38qDbX2/ZBJ7Yyciv75Ruo0=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
github.com/GeertJohan/go.incremental v1.0.0/go.mod h1:6fAjUhbVuX1KcMD3c8TEgVUqmo4seqhv0i0kdATSkM0= github.com/GeertJohan/go.incremental v1.0.0/go.mod h1:6fAjUhbVuX1KcMD3c8TEgVUqmo4seqhv0i0kdATSkM0=
@ -53,8 +55,6 @@ github.com/alecthomas/kong v0.2.1-0.20190708041108-0548c6b1afae/go.mod h1:+inYUS
github.com/alecthomas/kong-hcl v0.1.8-0.20190615233001-b21fea9723c8/go.mod h1:MRgZdU3vrFd05IQ89AxUZ0aYdF39BYoNFa324SodPCA= github.com/alecthomas/kong-hcl v0.1.8-0.20190615233001-b21fea9723c8/go.mod h1:MRgZdU3vrFd05IQ89AxUZ0aYdF39BYoNFa324SodPCA=
github.com/alecthomas/repr v0.0.0-20180818092828-117648cd9897 h1:p9Sln00KOTlrYkxI1zYWl1QLnEqAqEARBEYa8FQnQcY= github.com/alecthomas/repr v0.0.0-20180818092828-117648cd9897 h1:p9Sln00KOTlrYkxI1zYWl1QLnEqAqEARBEYa8FQnQcY=
github.com/alecthomas/repr v0.0.0-20180818092828-117648cd9897/go.mod h1:xTS7Pm1pD1mvyM075QCDSRqH6qRLXylzS24ZTpRiSzQ= github.com/alecthomas/repr v0.0.0-20180818092828-117648cd9897/go.mod h1:xTS7Pm1pD1mvyM075QCDSRqH6qRLXylzS24ZTpRiSzQ=
github.com/allegro/bigcache/v3 v3.1.0 h1:H2Vp8VOvxcrB91o86fUSVJFqeuz8kpyyB02eH3bSzwk=
github.com/allegro/bigcache/v3 v3.1.0/go.mod h1:aPyh7jEvrog9zAwx5N7+JUQX5dZTSGpxF1LAR4dr35I=
github.com/barnybug/go-cast v0.0.0-20201201064555-a87ccbc26692 h1:JW4WZlqyaNWUUahfr7MigeDW6jmtam5cTzzo1lwsFhE= github.com/barnybug/go-cast v0.0.0-20201201064555-a87ccbc26692 h1:JW4WZlqyaNWUUahfr7MigeDW6jmtam5cTzzo1lwsFhE=
github.com/barnybug/go-cast v0.0.0-20201201064555-a87ccbc26692/go.mod h1:Au0ipPuCBA7zsOC61SnyrYetm8VT3vo1UJtwHeYke44= github.com/barnybug/go-cast v0.0.0-20201201064555-a87ccbc26692/go.mod h1:Au0ipPuCBA7zsOC61SnyrYetm8VT3vo1UJtwHeYke44=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=

View File

@ -0,0 +1,9 @@
/var/log/storage-server/storage-server.log {
missingok
sharedscripts
compress
rotate 7
postrotate
/etc/init.d/storage-server restart
endscript
}

View File

@ -3,7 +3,7 @@
command="/usr/bin/storage-server" command="/usr/bin/storage-server"
command_args="run" command_args="run"
supervisor=supervise-daemon supervisor=supervise-daemon
output_log="/var/log/storage-server.log" output_log="/var/log/storage-server/storage-server.log"
error_log="$output_log" error_log="$output_log"
depend() { depend() {

View File

@ -46,11 +46,11 @@ func NewPromiseProxyFrom(rt *goja.Runtime) *PromiseProxy {
return NewPromiseProxy(promise, resolve, reject) return NewPromiseProxy(promise, resolve, reject)
} }
func IsPromise(v goja.Value) (*goja.Promise, bool) { func isPromise(v any) (*goja.Promise, bool) {
if v == nil { if v == nil {
return nil, false return nil, false
} }
promise, ok := v.Export().(*goja.Promise) promise, ok := v.(*goja.Promise)
return promise, ok return promise, ok
} }

View File

@ -23,7 +23,7 @@ type Server struct {
modules []ServerModule modules []ServerModule
} }
func (s *Server) ExecFuncByName(ctx context.Context, funcName string, args ...interface{}) (any, error) { func (s *Server) ExecFuncByName(ctx context.Context, funcName string, args ...any) (any, error) {
ctx = logger.With(ctx, logger.F("function", funcName), logger.F("args", args)) ctx = logger.With(ctx, logger.F("function", funcName), logger.F("args", args))
ret, err := s.Exec(ctx, funcName, args...) ret, err := s.Exec(ctx, funcName, args...)
@ -34,9 +34,9 @@ func (s *Server) ExecFuncByName(ctx context.Context, funcName string, args ...in
return ret, nil return ret, nil
} }
func (s *Server) Exec(ctx context.Context, callableOrFuncname any, args ...interface{}) (any, error) { func (s *Server) Exec(ctx context.Context, callableOrFuncname any, args ...any) (any, error) {
type result struct { type result struct {
value goja.Value value any
err error err error
} }
@ -110,7 +110,7 @@ func (s *Server) Exec(ctx context.Context, callableOrFuncname any, args ...inter
} }
done <- result{ done <- result{
value: value, value: value.Export(),
} }
logger.Debug(ctx, "executed callable", logger.F("callable", callableOrFuncname), logger.F("duration", time.Since(start).String())) logger.Debug(ctx, "executed callable", logger.F("callable", callableOrFuncname), logger.F("duration", time.Since(start).String()))
@ -129,20 +129,18 @@ func (s *Server) Exec(ctx context.Context, callableOrFuncname any, args ...inter
return nil, errors.WithStack(result.err) return nil, errors.WithStack(result.err)
} }
value := result.value if promise, ok := isPromise(result.value); ok {
return s.waitForPromise(promise), nil
if promise, ok := IsPromise(value); ok {
value = s.waitForPromise(promise)
} }
return value.Export(), nil return result.value, nil
} }
} }
func (s *Server) waitForPromise(promise *goja.Promise) goja.Value { func (s *Server) waitForPromise(promise *goja.Promise) any {
var ( var (
wg sync.WaitGroup wg sync.WaitGroup
value goja.Value value any
) )
wg.Add(1) wg.Add(1)
@ -162,7 +160,7 @@ func (s *Server) waitForPromise(promise *goja.Promise) goja.Value {
return return
} }
value = promise.Result() value = promise.Result().Export()
breakLoop = true breakLoop = true
}) })

View File

@ -2,6 +2,7 @@ package blob
import ( import (
"encoding/json" "encoding/json"
"io"
"io/fs" "io/fs"
"mime/multipart" "mime/multipart"
"net/http" "net/http"
@ -164,7 +165,14 @@ func handleAppDownload(w http.ResponseWriter, r *http.Request) {
} }
}() }()
http.ServeContent(w, r, string(replyMessage.BlobInfo.ID()), replyMessage.BlobInfo.ModTime(), replyMessage.Blob) // TODO Fix usage of ServeContent
// http.ServeContent(w, r, string(replyMessage.BlobInfo.ID()), replyMessage.BlobInfo.ModTime(), replyMessage.Blob)
w.Header().Add("Content-Type", replyMessage.BlobInfo.ContentType())
if _, err := io.Copy(w, replyMessage.Blob); err != nil {
logger.Error(ctx, "could not write blob", logger.CapturedE(errors.WithStack(err)))
}
} }
type uploadedFile struct { type uploadedFile struct {

View File

@ -140,7 +140,7 @@ func (b *BlobBucket) clearCache(ctx context.Context, id storage.BlobID) {
logger.Debug(ctx, "clearing cache", logger.F("cacheKey", key)) logger.Debug(ctx, "clearing cache", logger.F("cacheKey", key))
if err := b.contentCache.Delete(key); err != nil { if err := b.contentCache.Delete(key); err != nil && !errors.Is(err, bigcache.ErrEntryNotFound) {
logger.Error(ctx, "could not clear cache", logger.CapturedE(errors.WithStack(err))) logger.Error(ctx, "could not clear cache", logger.CapturedE(errors.WithStack(err)))
} }

View File

@ -70,10 +70,7 @@ func (s *BlobStore) OpenBucket(ctx context.Context, name string) (storage.BlobBu
func NewBlobStore(store storage.BlobStore, funcs ...OptionFunc) (*BlobStore, error) { func NewBlobStore(store storage.BlobStore, funcs ...OptionFunc) (*BlobStore, error) {
options := NewOptions(funcs...) options := NewOptions(funcs...)
cacheConfig := bigcache.DefaultConfig(options.CacheTTL) contentCache, err := bigcache.New(context.Background(), options.BigCache)
cacheConfig.Logger = &cacheLogger{}
contentCache, err := bigcache.New(context.Background(), cacheConfig)
if err != nil { if err != nil {
return nil, errors.WithStack(err) return nil, errors.WithStack(err)
} }

View File

@ -30,65 +30,45 @@ func blobStoreFactory(dsn *url.URL) (storage.BlobStore, error) {
blobStoreOptionFuncs := make([]OptionFunc, 0) blobStoreOptionFuncs := make([]OptionFunc, 0)
rawCacheTTL := query.Get("cacheTTL") cacheTTL, err := parseDuration(&query, "cacheTTL")
if rawCacheTTL != "" { if err != nil {
query.Del("cacheTTL") if !errors.Is(err, errNotFound) {
return nil, errors.WithStack(err)
ttl, err := time.ParseDuration(rawCacheTTL)
if err != nil {
return nil, errors.Wrap(err, "could not parse url parameter 'cacheTTL'")
} }
blobStoreOptionFuncs = append(blobStoreOptionFuncs, WithCacheTTL(ttl)) cacheTTL = time.Hour
} }
rawCacheShards := query.Get("blobCacheShards") blobStoreOptionFuncs = append(blobStoreOptionFuncs, WithCacheTTL(cacheTTL))
if rawCacheShards != "" {
query.Del("blobCacheShards")
cacheShards, err := strconv.ParseInt(rawCacheShards, 10, 32) cacheConfig, err := parseBigCacheConfig(&query, cacheTTL)
if err != nil { if err != nil {
return nil, errors.Wrap(err, "could not parse url parameter 'blobCacheShards'") return nil, errors.Wrap(err, "could not parse big cache config")
}
blobStoreOptionFuncs = append(blobStoreOptionFuncs, WithBigCacheConfig(*cacheConfig))
blobBucketCacheSize, err := parseInt(&query, "blobBucketCacheSize")
if err != nil {
if !errors.Is(err, errNotFound) {
return nil, errors.WithStack(err)
} }
blobStoreOptionFuncs = append(blobStoreOptionFuncs, WithBlobCacheShards(int(cacheShards))) blobBucketCacheSize = 16
} }
rawBlobCacheMaxMemorySize := query.Get("blobCacheMaxMemorySize") blobStoreOptionFuncs = append(blobStoreOptionFuncs, WithBucketCacheSize(int(blobBucketCacheSize)))
if rawBlobCacheMaxMemorySize != "" {
query.Del("blobCacheMaxMemorySize")
blobCacheMaxMemorySize, err := strconv.ParseInt(rawBlobCacheMaxMemorySize, 10, 32) bloInfoCacheSize, err := parseInt(&query, "bloInfoCacheSize")
if err != nil { if err != nil {
return nil, errors.Wrap(err, "could not parse url parameter 'blobCacheMaxMemorySize'") if !errors.Is(err, errNotFound) {
return nil, errors.WithStack(err)
} }
blobStoreOptionFuncs = append(blobStoreOptionFuncs, WithBlobCacheMaxMemorySize(int(blobCacheMaxMemorySize))) bloInfoCacheSize = 16
} }
rawBlobBucketCacheSize := query.Get("blobBucketCacheSize") blobStoreOptionFuncs = append(blobStoreOptionFuncs, WithBlobInfoCacheSize(int(bloInfoCacheSize)))
if rawBlobBucketCacheSize != "" {
query.Del("blobBucketCacheSize")
blobBucketCacheSize, err := strconv.ParseInt(rawBlobBucketCacheSize, 10, 32)
if err != nil {
return nil, errors.Wrap(err, "could not parse url parameter 'blobBucketCacheSize'")
}
blobStoreOptionFuncs = append(blobStoreOptionFuncs, WithBucketCacheSize(int(blobBucketCacheSize)))
}
rawBlobInfoCacheSize := query.Get("blobInfoCacheSize")
if rawBlobInfoCacheSize != "" {
query.Del("blobInfoCacheSize")
blobInfoCacheSize, err := strconv.ParseInt(rawBlobInfoCacheSize, 10, 32)
if err != nil {
return nil, errors.Wrap(err, "could not parse url parameter 'blobInfoCacheSize'")
}
blobStoreOptionFuncs = append(blobStoreOptionFuncs, WithBlobInfoCacheSize(int(blobInfoCacheSize)))
}
url := &url.URL{ url := &url.URL{
Scheme: rawDriver, Scheme: rawDriver,
@ -117,3 +97,110 @@ func (l *cacheLogger) Printf(format string, v ...interface{}) {
} }
var _ bigcache.Logger = &cacheLogger{} var _ bigcache.Logger = &cacheLogger{}
func parseBigCacheConfig(query *url.Values, cacheTTL time.Duration) (*bigcache.Config, error) {
config := bigcache.DefaultConfig(cacheTTL)
config.Logger = &cacheLogger{}
hardMaxCacheSize, err := parseInt(query, "bigCacheHardMaxCacheSize")
if err != nil {
if !errors.Is(err, errNotFound) {
return nil, errors.WithStack(err)
}
hardMaxCacheSize = int64(config.HardMaxCacheSize)
}
config.HardMaxCacheSize = int(hardMaxCacheSize)
maxEntriesInWindow, err := parseInt(query, "bigCacheMaxEntriesInWindow")
if err != nil {
if !errors.Is(err, errNotFound) {
return nil, errors.WithStack(err)
}
maxEntriesInWindow = int64(config.MaxEntriesInWindow)
}
config.MaxEntriesInWindow = int(maxEntriesInWindow)
shards, err := parseInt(query, "bigCacheShards")
if err != nil {
if !errors.Is(err, errNotFound) {
return nil, errors.WithStack(err)
}
shards = int64(config.Shards)
}
config.Shards = int(shards)
maxEntrySize, err := parseInt(query, "bigCacheMaxEntrySize")
if err != nil {
if !errors.Is(err, errNotFound) {
return nil, errors.WithStack(err)
}
maxEntrySize = int64(config.MaxEntrySize)
}
config.MaxEntrySize = int(maxEntrySize)
cleanWindow, err := parseDuration(query, "bigCacheCleanWindow")
if err != nil {
if !errors.Is(err, errNotFound) {
return nil, errors.WithStack(err)
}
cleanWindow = config.CleanWindow
}
config.CleanWindow = cleanWindow
lifeWindow, err := parseDuration(query, "bigCacheLifeWindow")
if err != nil {
if !errors.Is(err, errNotFound) {
return nil, errors.WithStack(err)
}
lifeWindow = config.LifeWindow
}
config.LifeWindow = lifeWindow
return &config, nil
}
var errNotFound = errors.New("not found")
func parseInt(query *url.Values, name string) (int64, error) {
rawValue := query.Get(name)
if rawValue != "" {
query.Del(name)
value, err := strconv.ParseInt(rawValue, 10, 32)
if err != nil {
return 0, errors.Wrapf(err, "could not parse url parameter '%s'", name)
}
return value, nil
}
return 0, errors.WithStack(errNotFound)
}
func parseDuration(query *url.Values, name string) (time.Duration, error) {
rawValue := query.Get(name)
if rawValue != "" {
query.Del(name)
value, err := time.ParseDuration(rawValue)
if err != nil {
return 0, errors.Wrapf(err, "could not parse url parameter '%s'", name)
}
return value, nil
}
return 0, errors.WithStack(errNotFound)
}

View File

@ -1,24 +1,27 @@
package cache package cache
import "time" import (
"time"
"github.com/allegro/bigcache/v3"
)
type Options struct { type Options struct {
CacheTTL time.Duration CacheTTL time.Duration
BlobCacheMaxMemorySize int BigCache bigcache.Config
BlobCacheShards int BucketCacheSize int
BucketCacheSize int BlobInfoCacheSize int
BlobInfoCacheSize int
} }
type OptionFunc func(opts *Options) type OptionFunc func(opts *Options)
func NewOptions(funcs ...OptionFunc) *Options { func NewOptions(funcs ...OptionFunc) *Options {
defaultTTL := 60 * time.Minute
opts := &Options{ opts := &Options{
CacheTTL: 60 * time.Minute, CacheTTL: defaultTTL,
BlobCacheMaxMemorySize: 256, BigCache: bigcache.DefaultConfig(defaultTTL),
BlobCacheShards: 1024, BucketCacheSize: 16,
BucketCacheSize: 16, BlobInfoCacheSize: 256,
BlobInfoCacheSize: 512,
} }
for _, fn := range funcs { for _, fn := range funcs {
@ -34,15 +37,9 @@ func WithCacheTTL(ttl time.Duration) OptionFunc {
} }
} }
func WithBlobCacheMaxMemorySize(size int) OptionFunc { func WithBigCacheConfig(config bigcache.Config) OptionFunc {
return func(opts *Options) { return func(opts *Options) {
opts.BlobCacheMaxMemorySize = size opts.BigCache = config
}
}
func WithBlobCacheShards(shards int) OptionFunc {
return func(opts *Options) {
opts.BlobCacheShards = shards
} }
} }

View File

@ -20,7 +20,7 @@ func init() {
func documentStoreFactory(url *url.URL) (storage.DocumentStore, error) { func documentStoreFactory(url *url.URL) (storage.DocumentStore, error) {
dir := filepath.Dir(url.Host + url.Path) dir := filepath.Dir(url.Host + url.Path)
if dir != ":memory:" { if dir != "." {
if err := os.MkdirAll(dir, os.FileMode(0750)); err != nil { if err := os.MkdirAll(dir, os.FileMode(0750)); err != nil {
return nil, errors.WithStack(err) return nil, errors.WithStack(err)
} }
@ -39,7 +39,7 @@ func documentStoreFactory(url *url.URL) (storage.DocumentStore, error) {
func blobStoreFactory(url *url.URL) (storage.BlobStore, error) { func blobStoreFactory(url *url.URL) (storage.BlobStore, error) {
dir := filepath.Dir(url.Host + url.Path) dir := filepath.Dir(url.Host + url.Path)
if dir != ":memory:" { if dir != "." {
if err := os.MkdirAll(dir, os.FileMode(0750)); err != nil { if err := os.MkdirAll(dir, os.FileMode(0750)); err != nil {
return nil, errors.WithStack(err) return nil, errors.WithStack(err)
} }
@ -58,7 +58,7 @@ func blobStoreFactory(url *url.URL) (storage.BlobStore, error) {
func shareStoreFactory(url *url.URL) (share.Store, error) { func shareStoreFactory(url *url.URL) (share.Store, error) {
dir := filepath.Dir(url.Host + url.Path) dir := filepath.Dir(url.Host + url.Path)
if dir != ":memory:" { if dir != "." {
if err := os.MkdirAll(dir, os.FileMode(0750)); err != nil { if err := os.MkdirAll(dir, os.FileMode(0750)); err != nil {
return nil, errors.WithStack(err) return nil, errors.WithStack(err)
} }