Compare commits
5 Commits
2023.12.1-
...
2023.12.5-
Author | SHA1 | Date | |
---|---|---|---|
b9c08f647c | |||
59f023a7d9 | |||
753a6c9708 | |||
b120e590b6 | |||
242bf379a8 |
2
.gitignore
vendored
2
.gitignore
vendored
@ -2,7 +2,7 @@
|
||||
/bin
|
||||
/.env
|
||||
/tools
|
||||
*.sqlite
|
||||
*.sqlite*
|
||||
/.gitea-release
|
||||
/.edge
|
||||
/data
|
||||
|
146
cmd/blobstore-test/main.go
Normal file
146
cmd/blobstore-test/main.go
Normal file
@ -0,0 +1,146 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/rand"
|
||||
"flag"
|
||||
"io"
|
||||
mrand "math/rand"
|
||||
"runtime"
|
||||
"time"
|
||||
|
||||
"forge.cadoles.com/arcad/edge/pkg/storage"
|
||||
"forge.cadoles.com/arcad/edge/pkg/storage/driver"
|
||||
"github.com/pkg/errors"
|
||||
"gitlab.com/wpetit/goweb/logger"
|
||||
|
||||
_ "forge.cadoles.com/arcad/edge/pkg/storage/driver/cache"
|
||||
_ "forge.cadoles.com/arcad/edge/pkg/storage/driver/rpc"
|
||||
_ "forge.cadoles.com/arcad/edge/pkg/storage/driver/sqlite"
|
||||
)
|
||||
|
||||
var (
|
||||
dsn string
|
||||
)
|
||||
|
||||
func init() {
|
||||
flag.StringVar(&dsn, "dsn", "cache://./test-cache.sqlite?driver=sqlite&_pragma=foreign_keys(1)&_pragma=journal_mode=wal&bigCacheShards=32&bigCacheHardMaxCacheSize=128&bigCacheMaxEntrySize=125&bigCacheMaxEntriesInWindow=200000", "blobstore dsn")
|
||||
}
|
||||
|
||||
func main() {
|
||||
flag.Parse()
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
logger.SetLevel(logger.LevelDebug)
|
||||
|
||||
blobStore, err := driver.NewBlobStore(dsn)
|
||||
if err != nil {
|
||||
logger.Fatal(ctx, "could not create blobstore", logger.CapturedE(errors.WithStack(err)))
|
||||
}
|
||||
|
||||
bucket, err := blobStore.OpenBucket(ctx, "default")
|
||||
if err != nil {
|
||||
logger.Fatal(ctx, "could not open bucket", logger.CapturedE(errors.WithStack(err)))
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if err := bucket.Close(); err != nil {
|
||||
logger.Fatal(ctx, "could not close bucket", logger.CapturedE(errors.WithStack(err)))
|
||||
}
|
||||
}()
|
||||
|
||||
go readRandomBlobs(ctx, bucket)
|
||||
|
||||
for {
|
||||
writeRandomBlob(ctx, bucket)
|
||||
|
||||
time.Sleep(1 * time.Second)
|
||||
|
||||
size, err := bucket.Size(ctx)
|
||||
if err != nil {
|
||||
logger.Fatal(ctx, "could not retrieve bucket size", logger.CapturedE(errors.WithStack(err)))
|
||||
}
|
||||
|
||||
logger.Debug(ctx, "bucket stats", logger.F("size", size))
|
||||
}
|
||||
}
|
||||
|
||||
func readRandomBlobs(ctx context.Context, bucket storage.BlobBucket) {
|
||||
for {
|
||||
infos, err := bucket.List(ctx)
|
||||
if err != nil {
|
||||
logger.Fatal(ctx, "could not list blobs", logger.CapturedE(errors.WithStack(err)))
|
||||
}
|
||||
|
||||
total := len(infos)
|
||||
if total == 0 {
|
||||
logger.Debug(ctx, "no blob yet")
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
blob := infos[mrand.Intn(total)]
|
||||
|
||||
readBlob(ctx, bucket, blob.ID())
|
||||
|
||||
time.Sleep(250 * time.Millisecond)
|
||||
}
|
||||
}
|
||||
|
||||
func readBlob(ctx context.Context, bucket storage.BlobBucket, blobID storage.BlobID) {
|
||||
ctx = logger.With(ctx, logger.F("blobID", blobID))
|
||||
|
||||
reader, err := bucket.NewReader(ctx, blobID)
|
||||
if err != nil {
|
||||
logger.Fatal(ctx, "could not create reader", logger.CapturedE(errors.WithStack(err)))
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if err := reader.Close(); err != nil {
|
||||
logger.Fatal(ctx, "could not close reader", logger.CapturedE(errors.WithStack(err)))
|
||||
}
|
||||
}()
|
||||
|
||||
if _, err := io.ReadAll(reader); err != nil {
|
||||
logger.Fatal(ctx, "could not read blob", logger.CapturedE(errors.WithStack(err)))
|
||||
}
|
||||
}
|
||||
|
||||
func writeRandomBlob(ctx context.Context, bucket storage.BlobBucket) {
|
||||
blobID := storage.NewBlobID()
|
||||
buff := make([]byte, 10*1024)
|
||||
|
||||
writer, err := bucket.NewWriter(ctx, blobID)
|
||||
if err != nil {
|
||||
logger.Fatal(ctx, "could not create writer", logger.CapturedE(errors.WithStack(err)))
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if err := writer.Close(); err != nil {
|
||||
logger.Fatal(ctx, "could not close writer", logger.CapturedE(errors.WithStack(err)))
|
||||
}
|
||||
}()
|
||||
|
||||
if _, err := rand.Read(buff); err != nil {
|
||||
logger.Fatal(ctx, "could not read random data", logger.CapturedE(errors.WithStack(err)))
|
||||
}
|
||||
|
||||
if _, err := writer.Write(buff); err != nil {
|
||||
logger.Fatal(ctx, "could not write blob", logger.CapturedE(errors.WithStack(err)))
|
||||
}
|
||||
|
||||
printMemUsage(ctx)
|
||||
}
|
||||
|
||||
func printMemUsage(ctx context.Context) {
|
||||
var m runtime.MemStats
|
||||
runtime.ReadMemStats(&m)
|
||||
logger.Debug(
|
||||
ctx, "memory usage",
|
||||
logger.F("alloc", m.Alloc/1024/1024),
|
||||
logger.F("totalAlloc", m.TotalAlloc/1024/1024),
|
||||
logger.F("sys", m.Sys/1024/1024),
|
||||
logger.F("numGC", m.NumGC),
|
||||
)
|
||||
}
|
@ -4,7 +4,7 @@ ARG HTTP_PROXY=
|
||||
ARG HTTPS_PROXY=
|
||||
ARG http_proxy=
|
||||
ARG https_proxy=
|
||||
ARG GO_VERSION=1.21.2
|
||||
ARG GO_VERSION=1.21.5
|
||||
|
||||
# Install dev environment dependencies
|
||||
RUN export DEBIAN_FRONTEND=noninteractive &&\
|
||||
|
@ -46,11 +46,11 @@ func NewPromiseProxyFrom(rt *goja.Runtime) *PromiseProxy {
|
||||
return NewPromiseProxy(promise, resolve, reject)
|
||||
}
|
||||
|
||||
func IsPromise(v goja.Value) (*goja.Promise, bool) {
|
||||
func isPromise(v any) (*goja.Promise, bool) {
|
||||
if v == nil {
|
||||
return nil, false
|
||||
}
|
||||
|
||||
promise, ok := v.Export().(*goja.Promise)
|
||||
promise, ok := v.(*goja.Promise)
|
||||
return promise, ok
|
||||
}
|
||||
|
@ -23,7 +23,7 @@ type Server struct {
|
||||
modules []ServerModule
|
||||
}
|
||||
|
||||
func (s *Server) ExecFuncByName(ctx context.Context, funcName string, args ...interface{}) (any, error) {
|
||||
func (s *Server) ExecFuncByName(ctx context.Context, funcName string, args ...any) (any, error) {
|
||||
ctx = logger.With(ctx, logger.F("function", funcName), logger.F("args", args))
|
||||
|
||||
ret, err := s.Exec(ctx, funcName, args...)
|
||||
@ -34,9 +34,9 @@ func (s *Server) ExecFuncByName(ctx context.Context, funcName string, args ...in
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
func (s *Server) Exec(ctx context.Context, callableOrFuncname any, args ...interface{}) (any, error) {
|
||||
func (s *Server) Exec(ctx context.Context, callableOrFuncname any, args ...any) (any, error) {
|
||||
type result struct {
|
||||
value goja.Value
|
||||
value any
|
||||
err error
|
||||
}
|
||||
|
||||
@ -110,7 +110,7 @@ func (s *Server) Exec(ctx context.Context, callableOrFuncname any, args ...inter
|
||||
}
|
||||
|
||||
done <- result{
|
||||
value: value,
|
||||
value: value.Export(),
|
||||
}
|
||||
|
||||
logger.Debug(ctx, "executed callable", logger.F("callable", callableOrFuncname), logger.F("duration", time.Since(start).String()))
|
||||
@ -129,20 +129,18 @@ func (s *Server) Exec(ctx context.Context, callableOrFuncname any, args ...inter
|
||||
return nil, errors.WithStack(result.err)
|
||||
}
|
||||
|
||||
value := result.value
|
||||
|
||||
if promise, ok := IsPromise(value); ok {
|
||||
value = s.waitForPromise(promise)
|
||||
if promise, ok := isPromise(result.value); ok {
|
||||
return s.waitForPromise(promise), nil
|
||||
}
|
||||
|
||||
return value.Export(), nil
|
||||
return result.value, nil
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Server) waitForPromise(promise *goja.Promise) goja.Value {
|
||||
func (s *Server) waitForPromise(promise *goja.Promise) any {
|
||||
var (
|
||||
wg sync.WaitGroup
|
||||
value goja.Value
|
||||
value any
|
||||
)
|
||||
|
||||
wg.Add(1)
|
||||
@ -162,7 +160,7 @@ func (s *Server) waitForPromise(promise *goja.Promise) goja.Value {
|
||||
return
|
||||
}
|
||||
|
||||
value = promise.Result()
|
||||
value = promise.Result().Export()
|
||||
|
||||
breakLoop = true
|
||||
})
|
||||
|
@ -2,6 +2,7 @@ package blob
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"io"
|
||||
"io/fs"
|
||||
"mime/multipart"
|
||||
"net/http"
|
||||
@ -164,7 +165,14 @@ func handleAppDownload(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
}()
|
||||
|
||||
http.ServeContent(w, r, string(replyMessage.BlobInfo.ID()), replyMessage.BlobInfo.ModTime(), replyMessage.Blob)
|
||||
// TODO Fix usage of ServeContent
|
||||
// http.ServeContent(w, r, string(replyMessage.BlobInfo.ID()), replyMessage.BlobInfo.ModTime(), replyMessage.Blob)
|
||||
|
||||
w.Header().Add("Content-Type", replyMessage.BlobInfo.ContentType())
|
||||
|
||||
if _, err := io.Copy(w, replyMessage.Blob); err != nil {
|
||||
logger.Error(ctx, "could not write blob", logger.CapturedE(errors.WithStack(err)))
|
||||
}
|
||||
}
|
||||
|
||||
type uploadedFile struct {
|
||||
|
2
pkg/storage/driver/cache/blob_bucket.go
vendored
2
pkg/storage/driver/cache/blob_bucket.go
vendored
@ -140,7 +140,7 @@ func (b *BlobBucket) clearCache(ctx context.Context, id storage.BlobID) {
|
||||
|
||||
logger.Debug(ctx, "clearing cache", logger.F("cacheKey", key))
|
||||
|
||||
if err := b.contentCache.Delete(key); err != nil {
|
||||
if err := b.contentCache.Delete(key); err != nil && !errors.Is(err, bigcache.ErrEntryNotFound) {
|
||||
logger.Error(ctx, "could not clear cache", logger.CapturedE(errors.WithStack(err)))
|
||||
}
|
||||
|
||||
|
7
pkg/storage/driver/cache/blob_store.go
vendored
7
pkg/storage/driver/cache/blob_store.go
vendored
@ -70,12 +70,7 @@ func (s *BlobStore) OpenBucket(ctx context.Context, name string) (storage.BlobBu
|
||||
func NewBlobStore(store storage.BlobStore, funcs ...OptionFunc) (*BlobStore, error) {
|
||||
options := NewOptions(funcs...)
|
||||
|
||||
cacheConfig := bigcache.DefaultConfig(options.CacheTTL)
|
||||
cacheConfig.Logger = &cacheLogger{}
|
||||
cacheConfig.HardMaxCacheSize = options.BlobCacheMaxMemorySize
|
||||
cacheConfig.Shards = options.BlobCacheShards
|
||||
|
||||
contentCache, err := bigcache.New(context.Background(), cacheConfig)
|
||||
contentCache, err := bigcache.New(context.Background(), options.BigCache)
|
||||
if err != nil {
|
||||
return nil, errors.WithStack(err)
|
||||
}
|
||||
|
177
pkg/storage/driver/cache/driver.go
vendored
177
pkg/storage/driver/cache/driver.go
vendored
@ -30,65 +30,45 @@ func blobStoreFactory(dsn *url.URL) (storage.BlobStore, error) {
|
||||
|
||||
blobStoreOptionFuncs := make([]OptionFunc, 0)
|
||||
|
||||
rawCacheTTL := query.Get("cacheTTL")
|
||||
if rawCacheTTL != "" {
|
||||
query.Del("cacheTTL")
|
||||
|
||||
ttl, err := time.ParseDuration(rawCacheTTL)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not parse url parameter 'cacheTTL'")
|
||||
cacheTTL, err := parseDuration(&query, "cacheTTL")
|
||||
if err != nil {
|
||||
if !errors.Is(err, errNotFound) {
|
||||
return nil, errors.WithStack(err)
|
||||
}
|
||||
|
||||
blobStoreOptionFuncs = append(blobStoreOptionFuncs, WithCacheTTL(ttl))
|
||||
cacheTTL = time.Hour
|
||||
}
|
||||
|
||||
rawCacheShards := query.Get("blobCacheShards")
|
||||
if rawCacheShards != "" {
|
||||
query.Del("blobCacheShards")
|
||||
blobStoreOptionFuncs = append(blobStoreOptionFuncs, WithCacheTTL(cacheTTL))
|
||||
|
||||
cacheShards, err := strconv.ParseInt(rawCacheShards, 10, 32)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not parse url parameter 'blobCacheShards'")
|
||||
cacheConfig, err := parseBigCacheConfig(&query, cacheTTL)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not parse big cache config")
|
||||
}
|
||||
|
||||
blobStoreOptionFuncs = append(blobStoreOptionFuncs, WithBigCacheConfig(*cacheConfig))
|
||||
|
||||
blobBucketCacheSize, err := parseInt(&query, "blobBucketCacheSize")
|
||||
if err != nil {
|
||||
if !errors.Is(err, errNotFound) {
|
||||
return nil, errors.WithStack(err)
|
||||
}
|
||||
|
||||
blobStoreOptionFuncs = append(blobStoreOptionFuncs, WithBlobCacheShards(int(cacheShards)))
|
||||
blobBucketCacheSize = 16
|
||||
}
|
||||
|
||||
rawBlobCacheMaxMemorySize := query.Get("blobCacheMaxMemorySize")
|
||||
if rawBlobCacheMaxMemorySize != "" {
|
||||
query.Del("blobCacheMaxMemorySize")
|
||||
blobStoreOptionFuncs = append(blobStoreOptionFuncs, WithBucketCacheSize(int(blobBucketCacheSize)))
|
||||
|
||||
blobCacheMaxMemorySize, err := strconv.ParseInt(rawBlobCacheMaxMemorySize, 10, 32)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not parse url parameter 'blobCacheMaxMemorySize'")
|
||||
bloInfoCacheSize, err := parseInt(&query, "bloInfoCacheSize")
|
||||
if err != nil {
|
||||
if !errors.Is(err, errNotFound) {
|
||||
return nil, errors.WithStack(err)
|
||||
}
|
||||
|
||||
blobStoreOptionFuncs = append(blobStoreOptionFuncs, WithBlobCacheMaxMemorySize(int(blobCacheMaxMemorySize)))
|
||||
bloInfoCacheSize = 16
|
||||
}
|
||||
|
||||
rawBlobBucketCacheSize := query.Get("blobBucketCacheSize")
|
||||
if rawBlobBucketCacheSize != "" {
|
||||
query.Del("blobBucketCacheSize")
|
||||
|
||||
blobBucketCacheSize, err := strconv.ParseInt(rawBlobBucketCacheSize, 10, 32)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not parse url parameter 'blobBucketCacheSize'")
|
||||
}
|
||||
|
||||
blobStoreOptionFuncs = append(blobStoreOptionFuncs, WithBucketCacheSize(int(blobBucketCacheSize)))
|
||||
}
|
||||
|
||||
rawBlobInfoCacheSize := query.Get("blobInfoCacheSize")
|
||||
if rawBlobInfoCacheSize != "" {
|
||||
query.Del("blobInfoCacheSize")
|
||||
|
||||
blobInfoCacheSize, err := strconv.ParseInt(rawBlobInfoCacheSize, 10, 32)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not parse url parameter 'blobInfoCacheSize'")
|
||||
}
|
||||
|
||||
blobStoreOptionFuncs = append(blobStoreOptionFuncs, WithBlobInfoCacheSize(int(blobInfoCacheSize)))
|
||||
}
|
||||
blobStoreOptionFuncs = append(blobStoreOptionFuncs, WithBlobInfoCacheSize(int(bloInfoCacheSize)))
|
||||
|
||||
url := &url.URL{
|
||||
Scheme: rawDriver,
|
||||
@ -117,3 +97,110 @@ func (l *cacheLogger) Printf(format string, v ...interface{}) {
|
||||
}
|
||||
|
||||
var _ bigcache.Logger = &cacheLogger{}
|
||||
|
||||
func parseBigCacheConfig(query *url.Values, cacheTTL time.Duration) (*bigcache.Config, error) {
|
||||
config := bigcache.DefaultConfig(cacheTTL)
|
||||
config.Logger = &cacheLogger{}
|
||||
|
||||
hardMaxCacheSize, err := parseInt(query, "bigCacheHardMaxCacheSize")
|
||||
if err != nil {
|
||||
if !errors.Is(err, errNotFound) {
|
||||
return nil, errors.WithStack(err)
|
||||
}
|
||||
|
||||
hardMaxCacheSize = int64(config.HardMaxCacheSize)
|
||||
}
|
||||
|
||||
config.HardMaxCacheSize = int(hardMaxCacheSize)
|
||||
|
||||
maxEntriesInWindow, err := parseInt(query, "bigCacheMaxEntriesInWindow")
|
||||
if err != nil {
|
||||
if !errors.Is(err, errNotFound) {
|
||||
return nil, errors.WithStack(err)
|
||||
}
|
||||
|
||||
maxEntriesInWindow = int64(config.MaxEntriesInWindow)
|
||||
}
|
||||
|
||||
config.MaxEntriesInWindow = int(maxEntriesInWindow)
|
||||
|
||||
shards, err := parseInt(query, "bigCacheShards")
|
||||
if err != nil {
|
||||
if !errors.Is(err, errNotFound) {
|
||||
return nil, errors.WithStack(err)
|
||||
}
|
||||
|
||||
shards = int64(config.Shards)
|
||||
}
|
||||
|
||||
config.Shards = int(shards)
|
||||
|
||||
maxEntrySize, err := parseInt(query, "bigCacheMaxEntrySize")
|
||||
if err != nil {
|
||||
if !errors.Is(err, errNotFound) {
|
||||
return nil, errors.WithStack(err)
|
||||
}
|
||||
|
||||
maxEntrySize = int64(config.MaxEntrySize)
|
||||
}
|
||||
|
||||
config.MaxEntrySize = int(maxEntrySize)
|
||||
|
||||
cleanWindow, err := parseDuration(query, "bigCacheCleanWindow")
|
||||
if err != nil {
|
||||
if !errors.Is(err, errNotFound) {
|
||||
return nil, errors.WithStack(err)
|
||||
}
|
||||
|
||||
cleanWindow = config.CleanWindow
|
||||
}
|
||||
|
||||
config.CleanWindow = cleanWindow
|
||||
|
||||
lifeWindow, err := parseDuration(query, "bigCacheLifeWindow")
|
||||
if err != nil {
|
||||
if !errors.Is(err, errNotFound) {
|
||||
return nil, errors.WithStack(err)
|
||||
}
|
||||
|
||||
lifeWindow = config.LifeWindow
|
||||
}
|
||||
|
||||
config.LifeWindow = lifeWindow
|
||||
|
||||
return &config, nil
|
||||
}
|
||||
|
||||
var errNotFound = errors.New("not found")
|
||||
|
||||
func parseInt(query *url.Values, name string) (int64, error) {
|
||||
rawValue := query.Get(name)
|
||||
if rawValue != "" {
|
||||
query.Del(name)
|
||||
|
||||
value, err := strconv.ParseInt(rawValue, 10, 32)
|
||||
if err != nil {
|
||||
return 0, errors.Wrapf(err, "could not parse url parameter '%s'", name)
|
||||
}
|
||||
|
||||
return value, nil
|
||||
}
|
||||
|
||||
return 0, errors.WithStack(errNotFound)
|
||||
}
|
||||
|
||||
func parseDuration(query *url.Values, name string) (time.Duration, error) {
|
||||
rawValue := query.Get(name)
|
||||
if rawValue != "" {
|
||||
query.Del(name)
|
||||
|
||||
value, err := time.ParseDuration(rawValue)
|
||||
if err != nil {
|
||||
return 0, errors.Wrapf(err, "could not parse url parameter '%s'", name)
|
||||
}
|
||||
|
||||
return value, nil
|
||||
}
|
||||
|
||||
return 0, errors.WithStack(errNotFound)
|
||||
}
|
||||
|
35
pkg/storage/driver/cache/options.go
vendored
35
pkg/storage/driver/cache/options.go
vendored
@ -1,24 +1,27 @@
|
||||
package cache
|
||||
|
||||
import "time"
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/allegro/bigcache/v3"
|
||||
)
|
||||
|
||||
type Options struct {
|
||||
CacheTTL time.Duration
|
||||
BlobCacheMaxMemorySize int
|
||||
BlobCacheShards int
|
||||
BucketCacheSize int
|
||||
BlobInfoCacheSize int
|
||||
CacheTTL time.Duration
|
||||
BigCache bigcache.Config
|
||||
BucketCacheSize int
|
||||
BlobInfoCacheSize int
|
||||
}
|
||||
|
||||
type OptionFunc func(opts *Options)
|
||||
|
||||
func NewOptions(funcs ...OptionFunc) *Options {
|
||||
defaultTTL := 60 * time.Minute
|
||||
opts := &Options{
|
||||
CacheTTL: 60 * time.Minute,
|
||||
BlobCacheMaxMemorySize: 256,
|
||||
BlobCacheShards: 1024,
|
||||
BucketCacheSize: 16,
|
||||
BlobInfoCacheSize: 512,
|
||||
CacheTTL: defaultTTL,
|
||||
BigCache: bigcache.DefaultConfig(defaultTTL),
|
||||
BucketCacheSize: 16,
|
||||
BlobInfoCacheSize: 256,
|
||||
}
|
||||
|
||||
for _, fn := range funcs {
|
||||
@ -34,15 +37,9 @@ func WithCacheTTL(ttl time.Duration) OptionFunc {
|
||||
}
|
||||
}
|
||||
|
||||
func WithBlobCacheMaxMemorySize(size int) OptionFunc {
|
||||
func WithBigCacheConfig(config bigcache.Config) OptionFunc {
|
||||
return func(opts *Options) {
|
||||
opts.BlobCacheMaxMemorySize = size
|
||||
}
|
||||
}
|
||||
|
||||
func WithBlobCacheShards(shards int) OptionFunc {
|
||||
return func(opts *Options) {
|
||||
opts.BlobCacheShards = shards
|
||||
opts.BigCache = config
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -20,7 +20,7 @@ func init() {
|
||||
func documentStoreFactory(url *url.URL) (storage.DocumentStore, error) {
|
||||
dir := filepath.Dir(url.Host + url.Path)
|
||||
|
||||
if dir != ":memory:" {
|
||||
if dir != "." {
|
||||
if err := os.MkdirAll(dir, os.FileMode(0750)); err != nil {
|
||||
return nil, errors.WithStack(err)
|
||||
}
|
||||
@ -39,7 +39,7 @@ func documentStoreFactory(url *url.URL) (storage.DocumentStore, error) {
|
||||
func blobStoreFactory(url *url.URL) (storage.BlobStore, error) {
|
||||
dir := filepath.Dir(url.Host + url.Path)
|
||||
|
||||
if dir != ":memory:" {
|
||||
if dir != "." {
|
||||
if err := os.MkdirAll(dir, os.FileMode(0750)); err != nil {
|
||||
return nil, errors.WithStack(err)
|
||||
}
|
||||
@ -58,7 +58,7 @@ func blobStoreFactory(url *url.URL) (storage.BlobStore, error) {
|
||||
func shareStoreFactory(url *url.URL) (share.Store, error) {
|
||||
dir := filepath.Dir(url.Host + url.Path)
|
||||
|
||||
if dir != ":memory:" {
|
||||
if dir != "." {
|
||||
if err := os.MkdirAll(dir, os.FileMode(0750)); err != nil {
|
||||
return nil, errors.WithStack(err)
|
||||
}
|
||||
|
Reference in New Issue
Block a user