2023-10-24 22:52:33 +02:00
|
|
|
package cache
|
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
|
|
|
"fmt"
|
|
|
|
"io"
|
|
|
|
|
|
|
|
"forge.cadoles.com/arcad/edge/pkg/storage"
|
2023-11-29 11:10:29 +01:00
|
|
|
"github.com/allegro/bigcache/v3"
|
2023-10-24 22:52:33 +02:00
|
|
|
"github.com/pkg/errors"
|
|
|
|
"gitlab.com/wpetit/goweb/logger"
|
|
|
|
)
|
|
|
|
|
|
|
|
type BlobBucket struct {
|
|
|
|
bucket storage.BlobBucket
|
2023-11-29 11:10:29 +01:00
|
|
|
cache *bigcache.BigCache
|
2023-10-24 22:52:33 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// Close implements storage.BlobBucket.
|
|
|
|
func (b *BlobBucket) Close() error {
|
|
|
|
if err := b.bucket.Close(); err != nil {
|
|
|
|
return errors.WithStack(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Delete implements storage.BlobBucket.
|
|
|
|
func (b *BlobBucket) Delete(ctx context.Context, id storage.BlobID) error {
|
|
|
|
if err := b.bucket.Delete(ctx, id); err != nil {
|
|
|
|
return errors.WithStack(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Get implements storage.BlobBucket.
|
|
|
|
func (b *BlobBucket) Get(ctx context.Context, id storage.BlobID) (storage.BlobInfo, error) {
|
|
|
|
info, err := b.bucket.Get(ctx, id)
|
|
|
|
if err != nil {
|
|
|
|
return nil, errors.WithStack(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
return info, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// List implements storage.BlobBucket.
|
|
|
|
func (b *BlobBucket) List(ctx context.Context) ([]storage.BlobInfo, error) {
|
|
|
|
infos, err := b.bucket.List(ctx)
|
|
|
|
if err != nil {
|
|
|
|
return nil, errors.WithStack(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
return infos, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Name implements storage.BlobBucket.
|
|
|
|
func (b *BlobBucket) Name() string {
|
|
|
|
return b.bucket.Name()
|
|
|
|
}
|
|
|
|
|
|
|
|
// NewReader implements storage.BlobBucket.
|
|
|
|
func (b *BlobBucket) NewReader(ctx context.Context, id storage.BlobID) (io.ReadSeekCloser, error) {
|
|
|
|
if cached, exist := b.inCache(id); exist {
|
2023-11-29 11:10:29 +01:00
|
|
|
logger.Debug(ctx, "found blob in cache", logger.F("cacheKey", b.getCacheKey(id)), logger.F("cacheStats", b.cache.Stats()))
|
2023-10-24 22:52:33 +02:00
|
|
|
return cached, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
reader, err := b.bucket.NewReader(ctx, id)
|
|
|
|
if err != nil {
|
|
|
|
return nil, errors.WithStack(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
return &readCacher{
|
|
|
|
reader: reader,
|
|
|
|
cache: b.cache,
|
|
|
|
key: b.getCacheKey(id),
|
|
|
|
}, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (b *BlobBucket) getCacheKey(id storage.BlobID) string {
|
|
|
|
return fmt.Sprintf("%s-%s", b.Name(), id)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (b *BlobBucket) inCache(id storage.BlobID) (io.ReadSeekCloser, bool) {
|
|
|
|
key := b.getCacheKey(id)
|
2023-11-29 11:10:29 +01:00
|
|
|
data, err := b.cache.Get(key)
|
|
|
|
if err != nil {
|
|
|
|
if errors.Is(err, bigcache.ErrEntryNotFound) {
|
|
|
|
return nil, false
|
|
|
|
}
|
|
|
|
|
|
|
|
logger.Error(context.Background(), "could not retrieve cache value", logger.CapturedE(errors.WithStack(err)))
|
|
|
|
|
2023-10-24 22:52:33 +02:00
|
|
|
return nil, false
|
|
|
|
}
|
|
|
|
|
|
|
|
return &cachedReader{data, 0}, true
|
|
|
|
}
|
|
|
|
|
|
|
|
// NewWriter implements storage.BlobBucket.
|
|
|
|
func (b *BlobBucket) NewWriter(ctx context.Context, id storage.BlobID) (io.WriteCloser, error) {
|
|
|
|
writer, err := b.bucket.NewWriter(ctx, id)
|
|
|
|
if err != nil {
|
|
|
|
return nil, errors.WithStack(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
return writer, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Size implements storage.BlobBucket.
|
|
|
|
func (b *BlobBucket) Size(ctx context.Context) (int64, error) {
|
|
|
|
size, err := b.bucket.Size(ctx)
|
|
|
|
if err != nil {
|
|
|
|
return 0, errors.WithStack(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
return size, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
var _ storage.BlobBucket = &BlobBucket{}
|