edge/pkg/storage/driver/cache/lfu/testsuite/test_concurrent.go
William Petit a276b92a03
All checks were successful
arcad/edge/pipeline/head This commit looks good
arcad/edge/pipeline/pr-master This commit looks good
feat: implement lfu based cache strategy
2024-01-10 13:16:52 +01:00

68 lines
1.5 KiB
Go

package testsuite
import (
"fmt"
"sync"
"testing"
"forge.cadoles.com/arcad/edge/pkg/storage/driver/cache/lfu"
"github.com/pkg/errors"
)
func testConcurrent(t *testing.T, store lfu.Store[string, string]) error {
const value = "foobar"
totalKeys := 25
totalSize := len(value) * totalKeys
capacity := totalSize / 2
cache := lfu.NewCache[string, string](store,
lfu.WithCapacity[string, string](capacity),
lfu.WithLog[string, string](t.Logf),
)
var wg sync.WaitGroup
wg.Add(totalKeys)
loops := totalKeys * 10
for i := 0; i < totalKeys; i++ {
key := fmt.Sprintf("key%d", i)
func(key string) {
go func() {
defer wg.Done()
for i := 0; i < loops; i++ {
if err := cache.Set(key, value); err != nil {
t.Errorf("%+v", errors.WithStack(err))
}
}
}()
}(key)
}
wg.Wait()
t.Logf("cache before final evict [capacity: %d, size: %d, len: %d]", cache.Capacity(), cache.Size(), cache.Len())
if err := cache.Evict(); err != nil {
t.Errorf("%+v", errors.WithStack(err))
}
t.Logf("cache after final evict [capacity: %d, size: %d, len: %d]", cache.Capacity(), cache.Size(), cache.Len())
expectedLength := capacity / len(value)
if e, g := expectedLength, cache.Len(); e < g {
t.Errorf("cache.Len(): expected <= %d, got %d", e, g)
}
if cache.Size() > capacity {
t.Errorf("cache.Size(): expected <= %d, got %d", capacity, cache.Size())
}
if e, g := expectedLength*len(value), cache.Size(); e < g {
t.Errorf("cache.Size(): expected <= %d, got %d", e, g)
}
return nil
}