Compare commits
7 Commits
2023.11.30
...
2023.12.5-
Author | SHA1 | Date | |
---|---|---|---|
b9c08f647c | |||
59f023a7d9 | |||
753a6c9708 | |||
b120e590b6 | |||
242bf379a8 | |||
065a9002a0 | |||
83a1e89665 |
2
.gitignore
vendored
2
.gitignore
vendored
@ -2,7 +2,7 @@
|
||||
/bin
|
||||
/.env
|
||||
/tools
|
||||
*.sqlite
|
||||
*.sqlite*
|
||||
/.gitea-release
|
||||
/.edge
|
||||
/data
|
||||
|
146
cmd/blobstore-test/main.go
Normal file
146
cmd/blobstore-test/main.go
Normal file
@ -0,0 +1,146 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/rand"
|
||||
"flag"
|
||||
"io"
|
||||
mrand "math/rand"
|
||||
"runtime"
|
||||
"time"
|
||||
|
||||
"forge.cadoles.com/arcad/edge/pkg/storage"
|
||||
"forge.cadoles.com/arcad/edge/pkg/storage/driver"
|
||||
"github.com/pkg/errors"
|
||||
"gitlab.com/wpetit/goweb/logger"
|
||||
|
||||
_ "forge.cadoles.com/arcad/edge/pkg/storage/driver/cache"
|
||||
_ "forge.cadoles.com/arcad/edge/pkg/storage/driver/rpc"
|
||||
_ "forge.cadoles.com/arcad/edge/pkg/storage/driver/sqlite"
|
||||
)
|
||||
|
||||
var (
|
||||
dsn string
|
||||
)
|
||||
|
||||
func init() {
|
||||
flag.StringVar(&dsn, "dsn", "cache://./test-cache.sqlite?driver=sqlite&_pragma=foreign_keys(1)&_pragma=journal_mode=wal&bigCacheShards=32&bigCacheHardMaxCacheSize=128&bigCacheMaxEntrySize=125&bigCacheMaxEntriesInWindow=200000", "blobstore dsn")
|
||||
}
|
||||
|
||||
func main() {
|
||||
flag.Parse()
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
logger.SetLevel(logger.LevelDebug)
|
||||
|
||||
blobStore, err := driver.NewBlobStore(dsn)
|
||||
if err != nil {
|
||||
logger.Fatal(ctx, "could not create blobstore", logger.CapturedE(errors.WithStack(err)))
|
||||
}
|
||||
|
||||
bucket, err := blobStore.OpenBucket(ctx, "default")
|
||||
if err != nil {
|
||||
logger.Fatal(ctx, "could not open bucket", logger.CapturedE(errors.WithStack(err)))
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if err := bucket.Close(); err != nil {
|
||||
logger.Fatal(ctx, "could not close bucket", logger.CapturedE(errors.WithStack(err)))
|
||||
}
|
||||
}()
|
||||
|
||||
go readRandomBlobs(ctx, bucket)
|
||||
|
||||
for {
|
||||
writeRandomBlob(ctx, bucket)
|
||||
|
||||
time.Sleep(1 * time.Second)
|
||||
|
||||
size, err := bucket.Size(ctx)
|
||||
if err != nil {
|
||||
logger.Fatal(ctx, "could not retrieve bucket size", logger.CapturedE(errors.WithStack(err)))
|
||||
}
|
||||
|
||||
logger.Debug(ctx, "bucket stats", logger.F("size", size))
|
||||
}
|
||||
}
|
||||
|
||||
func readRandomBlobs(ctx context.Context, bucket storage.BlobBucket) {
|
||||
for {
|
||||
infos, err := bucket.List(ctx)
|
||||
if err != nil {
|
||||
logger.Fatal(ctx, "could not list blobs", logger.CapturedE(errors.WithStack(err)))
|
||||
}
|
||||
|
||||
total := len(infos)
|
||||
if total == 0 {
|
||||
logger.Debug(ctx, "no blob yet")
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
blob := infos[mrand.Intn(total)]
|
||||
|
||||
readBlob(ctx, bucket, blob.ID())
|
||||
|
||||
time.Sleep(250 * time.Millisecond)
|
||||
}
|
||||
}
|
||||
|
||||
func readBlob(ctx context.Context, bucket storage.BlobBucket, blobID storage.BlobID) {
|
||||
ctx = logger.With(ctx, logger.F("blobID", blobID))
|
||||
|
||||
reader, err := bucket.NewReader(ctx, blobID)
|
||||
if err != nil {
|
||||
logger.Fatal(ctx, "could not create reader", logger.CapturedE(errors.WithStack(err)))
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if err := reader.Close(); err != nil {
|
||||
logger.Fatal(ctx, "could not close reader", logger.CapturedE(errors.WithStack(err)))
|
||||
}
|
||||
}()
|
||||
|
||||
if _, err := io.ReadAll(reader); err != nil {
|
||||
logger.Fatal(ctx, "could not read blob", logger.CapturedE(errors.WithStack(err)))
|
||||
}
|
||||
}
|
||||
|
||||
func writeRandomBlob(ctx context.Context, bucket storage.BlobBucket) {
|
||||
blobID := storage.NewBlobID()
|
||||
buff := make([]byte, 10*1024)
|
||||
|
||||
writer, err := bucket.NewWriter(ctx, blobID)
|
||||
if err != nil {
|
||||
logger.Fatal(ctx, "could not create writer", logger.CapturedE(errors.WithStack(err)))
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if err := writer.Close(); err != nil {
|
||||
logger.Fatal(ctx, "could not close writer", logger.CapturedE(errors.WithStack(err)))
|
||||
}
|
||||
}()
|
||||
|
||||
if _, err := rand.Read(buff); err != nil {
|
||||
logger.Fatal(ctx, "could not read random data", logger.CapturedE(errors.WithStack(err)))
|
||||
}
|
||||
|
||||
if _, err := writer.Write(buff); err != nil {
|
||||
logger.Fatal(ctx, "could not write blob", logger.CapturedE(errors.WithStack(err)))
|
||||
}
|
||||
|
||||
printMemUsage(ctx)
|
||||
}
|
||||
|
||||
func printMemUsage(ctx context.Context) {
|
||||
var m runtime.MemStats
|
||||
runtime.ReadMemStats(&m)
|
||||
logger.Debug(
|
||||
ctx, "memory usage",
|
||||
logger.F("alloc", m.Alloc/1024/1024),
|
||||
logger.F("totalAlloc", m.TotalAlloc/1024/1024),
|
||||
logger.F("sys", m.Sys/1024/1024),
|
||||
logger.F("numGC", m.NumGC),
|
||||
)
|
||||
}
|
2
go.mod
2
go.mod
@ -87,3 +87,5 @@ require (
|
||||
modernc.org/strutil v1.1.3 // indirect
|
||||
modernc.org/token v1.0.1 // indirect
|
||||
)
|
||||
|
||||
replace github.com/allegro/bigcache/v3 v3.1.0 => github.com/Bornholm/bigcache v0.0.0-20231201111725-1ddf51584cad
|
||||
|
4
go.sum
4
go.sum
@ -37,6 +37,8 @@ cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohl
|
||||
cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs=
|
||||
cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=
|
||||
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
|
||||
github.com/Bornholm/bigcache v0.0.0-20231201111725-1ddf51584cad h1:PTOf0L/YjiVis5LYzJmi7WqttJ/h/DU6h06aJ24Kpbg=
|
||||
github.com/Bornholm/bigcache v0.0.0-20231201111725-1ddf51584cad/go.mod h1:+q+mA6jGsjfsZ2HzhVSk38qDbX2/ZBJ7Yyciv75Ruo0=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
|
||||
github.com/GeertJohan/go.incremental v1.0.0/go.mod h1:6fAjUhbVuX1KcMD3c8TEgVUqmo4seqhv0i0kdATSkM0=
|
||||
@ -53,8 +55,6 @@ github.com/alecthomas/kong v0.2.1-0.20190708041108-0548c6b1afae/go.mod h1:+inYUS
|
||||
github.com/alecthomas/kong-hcl v0.1.8-0.20190615233001-b21fea9723c8/go.mod h1:MRgZdU3vrFd05IQ89AxUZ0aYdF39BYoNFa324SodPCA=
|
||||
github.com/alecthomas/repr v0.0.0-20180818092828-117648cd9897 h1:p9Sln00KOTlrYkxI1zYWl1QLnEqAqEARBEYa8FQnQcY=
|
||||
github.com/alecthomas/repr v0.0.0-20180818092828-117648cd9897/go.mod h1:xTS7Pm1pD1mvyM075QCDSRqH6qRLXylzS24ZTpRiSzQ=
|
||||
github.com/allegro/bigcache/v3 v3.1.0 h1:H2Vp8VOvxcrB91o86fUSVJFqeuz8kpyyB02eH3bSzwk=
|
||||
github.com/allegro/bigcache/v3 v3.1.0/go.mod h1:aPyh7jEvrog9zAwx5N7+JUQX5dZTSGpxF1LAR4dr35I=
|
||||
github.com/barnybug/go-cast v0.0.0-20201201064555-a87ccbc26692 h1:JW4WZlqyaNWUUahfr7MigeDW6jmtam5cTzzo1lwsFhE=
|
||||
github.com/barnybug/go-cast v0.0.0-20201201064555-a87ccbc26692/go.mod h1:Au0ipPuCBA7zsOC61SnyrYetm8VT3vo1UJtwHeYke44=
|
||||
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||
|
@ -4,7 +4,7 @@ ARG HTTP_PROXY=
|
||||
ARG HTTPS_PROXY=
|
||||
ARG http_proxy=
|
||||
ARG https_proxy=
|
||||
ARG GO_VERSION=1.21.2
|
||||
ARG GO_VERSION=1.21.5
|
||||
|
||||
# Install dev environment dependencies
|
||||
RUN export DEBIAN_FRONTEND=noninteractive &&\
|
||||
|
@ -46,11 +46,11 @@ func NewPromiseProxyFrom(rt *goja.Runtime) *PromiseProxy {
|
||||
return NewPromiseProxy(promise, resolve, reject)
|
||||
}
|
||||
|
||||
func IsPromise(v goja.Value) (*goja.Promise, bool) {
|
||||
func isPromise(v any) (*goja.Promise, bool) {
|
||||
if v == nil {
|
||||
return nil, false
|
||||
}
|
||||
|
||||
promise, ok := v.Export().(*goja.Promise)
|
||||
promise, ok := v.(*goja.Promise)
|
||||
return promise, ok
|
||||
}
|
||||
|
@ -23,7 +23,7 @@ type Server struct {
|
||||
modules []ServerModule
|
||||
}
|
||||
|
||||
func (s *Server) ExecFuncByName(ctx context.Context, funcName string, args ...interface{}) (any, error) {
|
||||
func (s *Server) ExecFuncByName(ctx context.Context, funcName string, args ...any) (any, error) {
|
||||
ctx = logger.With(ctx, logger.F("function", funcName), logger.F("args", args))
|
||||
|
||||
ret, err := s.Exec(ctx, funcName, args...)
|
||||
@ -34,9 +34,9 @@ func (s *Server) ExecFuncByName(ctx context.Context, funcName string, args ...in
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
func (s *Server) Exec(ctx context.Context, callableOrFuncname any, args ...interface{}) (any, error) {
|
||||
func (s *Server) Exec(ctx context.Context, callableOrFuncname any, args ...any) (any, error) {
|
||||
type result struct {
|
||||
value goja.Value
|
||||
value any
|
||||
err error
|
||||
}
|
||||
|
||||
@ -110,7 +110,7 @@ func (s *Server) Exec(ctx context.Context, callableOrFuncname any, args ...inter
|
||||
}
|
||||
|
||||
done <- result{
|
||||
value: value,
|
||||
value: value.Export(),
|
||||
}
|
||||
|
||||
logger.Debug(ctx, "executed callable", logger.F("callable", callableOrFuncname), logger.F("duration", time.Since(start).String()))
|
||||
@ -129,20 +129,18 @@ func (s *Server) Exec(ctx context.Context, callableOrFuncname any, args ...inter
|
||||
return nil, errors.WithStack(result.err)
|
||||
}
|
||||
|
||||
value := result.value
|
||||
|
||||
if promise, ok := IsPromise(value); ok {
|
||||
value = s.waitForPromise(promise)
|
||||
if promise, ok := isPromise(result.value); ok {
|
||||
return s.waitForPromise(promise), nil
|
||||
}
|
||||
|
||||
return value.Export(), nil
|
||||
return result.value, nil
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Server) waitForPromise(promise *goja.Promise) goja.Value {
|
||||
func (s *Server) waitForPromise(promise *goja.Promise) any {
|
||||
var (
|
||||
wg sync.WaitGroup
|
||||
value goja.Value
|
||||
value any
|
||||
)
|
||||
|
||||
wg.Add(1)
|
||||
@ -162,7 +160,7 @@ func (s *Server) waitForPromise(promise *goja.Promise) goja.Value {
|
||||
return
|
||||
}
|
||||
|
||||
value = promise.Result()
|
||||
value = promise.Result().Export()
|
||||
|
||||
breakLoop = true
|
||||
})
|
||||
|
@ -2,6 +2,7 @@ package blob
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"io"
|
||||
"io/fs"
|
||||
"mime/multipart"
|
||||
"net/http"
|
||||
@ -164,7 +165,14 @@ func handleAppDownload(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
}()
|
||||
|
||||
http.ServeContent(w, r, string(replyMessage.BlobInfo.ID()), replyMessage.BlobInfo.ModTime(), replyMessage.Blob)
|
||||
// TODO Fix usage of ServeContent
|
||||
// http.ServeContent(w, r, string(replyMessage.BlobInfo.ID()), replyMessage.BlobInfo.ModTime(), replyMessage.Blob)
|
||||
|
||||
w.Header().Add("Content-Type", replyMessage.BlobInfo.ContentType())
|
||||
|
||||
if _, err := io.Copy(w, replyMessage.Blob); err != nil {
|
||||
logger.Error(ctx, "could not write blob", logger.CapturedE(errors.WithStack(err)))
|
||||
}
|
||||
}
|
||||
|
||||
type uploadedFile struct {
|
||||
|
2
pkg/storage/driver/cache/blob_bucket.go
vendored
2
pkg/storage/driver/cache/blob_bucket.go
vendored
@ -140,7 +140,7 @@ func (b *BlobBucket) clearCache(ctx context.Context, id storage.BlobID) {
|
||||
|
||||
logger.Debug(ctx, "clearing cache", logger.F("cacheKey", key))
|
||||
|
||||
if err := b.contentCache.Delete(key); err != nil {
|
||||
if err := b.contentCache.Delete(key); err != nil && !errors.Is(err, bigcache.ErrEntryNotFound) {
|
||||
logger.Error(ctx, "could not clear cache", logger.CapturedE(errors.WithStack(err)))
|
||||
}
|
||||
|
||||
|
5
pkg/storage/driver/cache/blob_store.go
vendored
5
pkg/storage/driver/cache/blob_store.go
vendored
@ -70,10 +70,7 @@ func (s *BlobStore) OpenBucket(ctx context.Context, name string) (storage.BlobBu
|
||||
func NewBlobStore(store storage.BlobStore, funcs ...OptionFunc) (*BlobStore, error) {
|
||||
options := NewOptions(funcs...)
|
||||
|
||||
cacheConfig := bigcache.DefaultConfig(options.CacheTTL)
|
||||
cacheConfig.Logger = &cacheLogger{}
|
||||
|
||||
contentCache, err := bigcache.New(context.Background(), cacheConfig)
|
||||
contentCache, err := bigcache.New(context.Background(), options.BigCache)
|
||||
if err != nil {
|
||||
return nil, errors.WithStack(err)
|
||||
}
|
||||
|
177
pkg/storage/driver/cache/driver.go
vendored
177
pkg/storage/driver/cache/driver.go
vendored
@ -30,65 +30,45 @@ func blobStoreFactory(dsn *url.URL) (storage.BlobStore, error) {
|
||||
|
||||
blobStoreOptionFuncs := make([]OptionFunc, 0)
|
||||
|
||||
rawCacheTTL := query.Get("cacheTTL")
|
||||
if rawCacheTTL != "" {
|
||||
query.Del("cacheTTL")
|
||||
|
||||
ttl, err := time.ParseDuration(rawCacheTTL)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not parse url parameter 'cacheTTL'")
|
||||
cacheTTL, err := parseDuration(&query, "cacheTTL")
|
||||
if err != nil {
|
||||
if !errors.Is(err, errNotFound) {
|
||||
return nil, errors.WithStack(err)
|
||||
}
|
||||
|
||||
blobStoreOptionFuncs = append(blobStoreOptionFuncs, WithCacheTTL(ttl))
|
||||
cacheTTL = time.Hour
|
||||
}
|
||||
|
||||
rawCacheShards := query.Get("blobCacheShards")
|
||||
if rawCacheShards != "" {
|
||||
query.Del("blobCacheShards")
|
||||
blobStoreOptionFuncs = append(blobStoreOptionFuncs, WithCacheTTL(cacheTTL))
|
||||
|
||||
cacheShards, err := strconv.ParseInt(rawCacheShards, 10, 32)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not parse url parameter 'blobCacheShards'")
|
||||
cacheConfig, err := parseBigCacheConfig(&query, cacheTTL)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not parse big cache config")
|
||||
}
|
||||
|
||||
blobStoreOptionFuncs = append(blobStoreOptionFuncs, WithBigCacheConfig(*cacheConfig))
|
||||
|
||||
blobBucketCacheSize, err := parseInt(&query, "blobBucketCacheSize")
|
||||
if err != nil {
|
||||
if !errors.Is(err, errNotFound) {
|
||||
return nil, errors.WithStack(err)
|
||||
}
|
||||
|
||||
blobStoreOptionFuncs = append(blobStoreOptionFuncs, WithBlobCacheShards(int(cacheShards)))
|
||||
blobBucketCacheSize = 16
|
||||
}
|
||||
|
||||
rawBlobCacheMaxMemorySize := query.Get("blobCacheMaxMemorySize")
|
||||
if rawBlobCacheMaxMemorySize != "" {
|
||||
query.Del("blobCacheMaxMemorySize")
|
||||
blobStoreOptionFuncs = append(blobStoreOptionFuncs, WithBucketCacheSize(int(blobBucketCacheSize)))
|
||||
|
||||
blobCacheMaxMemorySize, err := strconv.ParseInt(rawBlobCacheMaxMemorySize, 10, 32)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not parse url parameter 'blobCacheMaxMemorySize'")
|
||||
bloInfoCacheSize, err := parseInt(&query, "bloInfoCacheSize")
|
||||
if err != nil {
|
||||
if !errors.Is(err, errNotFound) {
|
||||
return nil, errors.WithStack(err)
|
||||
}
|
||||
|
||||
blobStoreOptionFuncs = append(blobStoreOptionFuncs, WithBlobCacheMaxMemorySize(int(blobCacheMaxMemorySize)))
|
||||
bloInfoCacheSize = 16
|
||||
}
|
||||
|
||||
rawBlobBucketCacheSize := query.Get("blobBucketCacheSize")
|
||||
if rawBlobBucketCacheSize != "" {
|
||||
query.Del("blobBucketCacheSize")
|
||||
|
||||
blobBucketCacheSize, err := strconv.ParseInt(rawBlobBucketCacheSize, 10, 32)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not parse url parameter 'blobBucketCacheSize'")
|
||||
}
|
||||
|
||||
blobStoreOptionFuncs = append(blobStoreOptionFuncs, WithBucketCacheSize(int(blobBucketCacheSize)))
|
||||
}
|
||||
|
||||
rawBlobInfoCacheSize := query.Get("blobInfoCacheSize")
|
||||
if rawBlobInfoCacheSize != "" {
|
||||
query.Del("blobInfoCacheSize")
|
||||
|
||||
blobInfoCacheSize, err := strconv.ParseInt(rawBlobInfoCacheSize, 10, 32)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not parse url parameter 'blobInfoCacheSize'")
|
||||
}
|
||||
|
||||
blobStoreOptionFuncs = append(blobStoreOptionFuncs, WithBlobInfoCacheSize(int(blobInfoCacheSize)))
|
||||
}
|
||||
blobStoreOptionFuncs = append(blobStoreOptionFuncs, WithBlobInfoCacheSize(int(bloInfoCacheSize)))
|
||||
|
||||
url := &url.URL{
|
||||
Scheme: rawDriver,
|
||||
@ -117,3 +97,110 @@ func (l *cacheLogger) Printf(format string, v ...interface{}) {
|
||||
}
|
||||
|
||||
var _ bigcache.Logger = &cacheLogger{}
|
||||
|
||||
func parseBigCacheConfig(query *url.Values, cacheTTL time.Duration) (*bigcache.Config, error) {
|
||||
config := bigcache.DefaultConfig(cacheTTL)
|
||||
config.Logger = &cacheLogger{}
|
||||
|
||||
hardMaxCacheSize, err := parseInt(query, "bigCacheHardMaxCacheSize")
|
||||
if err != nil {
|
||||
if !errors.Is(err, errNotFound) {
|
||||
return nil, errors.WithStack(err)
|
||||
}
|
||||
|
||||
hardMaxCacheSize = int64(config.HardMaxCacheSize)
|
||||
}
|
||||
|
||||
config.HardMaxCacheSize = int(hardMaxCacheSize)
|
||||
|
||||
maxEntriesInWindow, err := parseInt(query, "bigCacheMaxEntriesInWindow")
|
||||
if err != nil {
|
||||
if !errors.Is(err, errNotFound) {
|
||||
return nil, errors.WithStack(err)
|
||||
}
|
||||
|
||||
maxEntriesInWindow = int64(config.MaxEntriesInWindow)
|
||||
}
|
||||
|
||||
config.MaxEntriesInWindow = int(maxEntriesInWindow)
|
||||
|
||||
shards, err := parseInt(query, "bigCacheShards")
|
||||
if err != nil {
|
||||
if !errors.Is(err, errNotFound) {
|
||||
return nil, errors.WithStack(err)
|
||||
}
|
||||
|
||||
shards = int64(config.Shards)
|
||||
}
|
||||
|
||||
config.Shards = int(shards)
|
||||
|
||||
maxEntrySize, err := parseInt(query, "bigCacheMaxEntrySize")
|
||||
if err != nil {
|
||||
if !errors.Is(err, errNotFound) {
|
||||
return nil, errors.WithStack(err)
|
||||
}
|
||||
|
||||
maxEntrySize = int64(config.MaxEntrySize)
|
||||
}
|
||||
|
||||
config.MaxEntrySize = int(maxEntrySize)
|
||||
|
||||
cleanWindow, err := parseDuration(query, "bigCacheCleanWindow")
|
||||
if err != nil {
|
||||
if !errors.Is(err, errNotFound) {
|
||||
return nil, errors.WithStack(err)
|
||||
}
|
||||
|
||||
cleanWindow = config.CleanWindow
|
||||
}
|
||||
|
||||
config.CleanWindow = cleanWindow
|
||||
|
||||
lifeWindow, err := parseDuration(query, "bigCacheLifeWindow")
|
||||
if err != nil {
|
||||
if !errors.Is(err, errNotFound) {
|
||||
return nil, errors.WithStack(err)
|
||||
}
|
||||
|
||||
lifeWindow = config.LifeWindow
|
||||
}
|
||||
|
||||
config.LifeWindow = lifeWindow
|
||||
|
||||
return &config, nil
|
||||
}
|
||||
|
||||
var errNotFound = errors.New("not found")
|
||||
|
||||
func parseInt(query *url.Values, name string) (int64, error) {
|
||||
rawValue := query.Get(name)
|
||||
if rawValue != "" {
|
||||
query.Del(name)
|
||||
|
||||
value, err := strconv.ParseInt(rawValue, 10, 32)
|
||||
if err != nil {
|
||||
return 0, errors.Wrapf(err, "could not parse url parameter '%s'", name)
|
||||
}
|
||||
|
||||
return value, nil
|
||||
}
|
||||
|
||||
return 0, errors.WithStack(errNotFound)
|
||||
}
|
||||
|
||||
func parseDuration(query *url.Values, name string) (time.Duration, error) {
|
||||
rawValue := query.Get(name)
|
||||
if rawValue != "" {
|
||||
query.Del(name)
|
||||
|
||||
value, err := time.ParseDuration(rawValue)
|
||||
if err != nil {
|
||||
return 0, errors.Wrapf(err, "could not parse url parameter '%s'", name)
|
||||
}
|
||||
|
||||
return value, nil
|
||||
}
|
||||
|
||||
return 0, errors.WithStack(errNotFound)
|
||||
}
|
||||
|
35
pkg/storage/driver/cache/options.go
vendored
35
pkg/storage/driver/cache/options.go
vendored
@ -1,24 +1,27 @@
|
||||
package cache
|
||||
|
||||
import "time"
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/allegro/bigcache/v3"
|
||||
)
|
||||
|
||||
type Options struct {
|
||||
CacheTTL time.Duration
|
||||
BlobCacheMaxMemorySize int
|
||||
BlobCacheShards int
|
||||
BucketCacheSize int
|
||||
BlobInfoCacheSize int
|
||||
CacheTTL time.Duration
|
||||
BigCache bigcache.Config
|
||||
BucketCacheSize int
|
||||
BlobInfoCacheSize int
|
||||
}
|
||||
|
||||
type OptionFunc func(opts *Options)
|
||||
|
||||
func NewOptions(funcs ...OptionFunc) *Options {
|
||||
defaultTTL := 60 * time.Minute
|
||||
opts := &Options{
|
||||
CacheTTL: 60 * time.Minute,
|
||||
BlobCacheMaxMemorySize: 256,
|
||||
BlobCacheShards: 1024,
|
||||
BucketCacheSize: 16,
|
||||
BlobInfoCacheSize: 512,
|
||||
CacheTTL: defaultTTL,
|
||||
BigCache: bigcache.DefaultConfig(defaultTTL),
|
||||
BucketCacheSize: 16,
|
||||
BlobInfoCacheSize: 256,
|
||||
}
|
||||
|
||||
for _, fn := range funcs {
|
||||
@ -34,15 +37,9 @@ func WithCacheTTL(ttl time.Duration) OptionFunc {
|
||||
}
|
||||
}
|
||||
|
||||
func WithBlobCacheMaxMemorySize(size int) OptionFunc {
|
||||
func WithBigCacheConfig(config bigcache.Config) OptionFunc {
|
||||
return func(opts *Options) {
|
||||
opts.BlobCacheMaxMemorySize = size
|
||||
}
|
||||
}
|
||||
|
||||
func WithBlobCacheShards(shards int) OptionFunc {
|
||||
return func(opts *Options) {
|
||||
opts.BlobCacheShards = shards
|
||||
opts.BigCache = config
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -20,7 +20,7 @@ func init() {
|
||||
func documentStoreFactory(url *url.URL) (storage.DocumentStore, error) {
|
||||
dir := filepath.Dir(url.Host + url.Path)
|
||||
|
||||
if dir != ":memory:" {
|
||||
if dir != "." {
|
||||
if err := os.MkdirAll(dir, os.FileMode(0750)); err != nil {
|
||||
return nil, errors.WithStack(err)
|
||||
}
|
||||
@ -39,7 +39,7 @@ func documentStoreFactory(url *url.URL) (storage.DocumentStore, error) {
|
||||
func blobStoreFactory(url *url.URL) (storage.BlobStore, error) {
|
||||
dir := filepath.Dir(url.Host + url.Path)
|
||||
|
||||
if dir != ":memory:" {
|
||||
if dir != "." {
|
||||
if err := os.MkdirAll(dir, os.FileMode(0750)); err != nil {
|
||||
return nil, errors.WithStack(err)
|
||||
}
|
||||
@ -58,7 +58,7 @@ func blobStoreFactory(url *url.URL) (storage.BlobStore, error) {
|
||||
func shareStoreFactory(url *url.URL) (share.Store, error) {
|
||||
dir := filepath.Dir(url.Host + url.Path)
|
||||
|
||||
if dir != ":memory:" {
|
||||
if dir != "." {
|
||||
if err := os.MkdirAll(dir, os.FileMode(0750)); err != nil {
|
||||
return nil, errors.WithStack(err)
|
||||
}
|
||||
|
Reference in New Issue
Block a user