2023-09-13 06:03:25 +02:00
|
|
|
package testsuite
|
|
|
|
|
|
|
|
import (
|
|
|
|
"bytes"
|
|
|
|
"context"
|
2023-10-24 22:52:33 +02:00
|
|
|
"crypto/rand"
|
2023-09-13 06:03:25 +02:00
|
|
|
"fmt"
|
|
|
|
"io"
|
2023-10-24 22:52:33 +02:00
|
|
|
mrand "math/rand"
|
2023-09-13 06:03:25 +02:00
|
|
|
"testing"
|
|
|
|
|
|
|
|
"forge.cadoles.com/arcad/edge/pkg/storage"
|
|
|
|
"github.com/pkg/errors"
|
|
|
|
)
|
|
|
|
|
|
|
|
func BenchmarkBlobStore(t *testing.B, store storage.BlobStore) {
|
|
|
|
t.Run("BlobCreateUpdateReadDelete", func(t *testing.B) {
|
|
|
|
for i := 0; i < t.N; i++ {
|
|
|
|
bucketName := fmt.Sprintf("bucket-%d", i)
|
|
|
|
if err := runBlobCreateUpdateReadDelete(store, bucketName); err != nil {
|
|
|
|
t.Fatalf("%+v", errors.WithStack(err))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
})
|
2023-10-24 22:52:33 +02:00
|
|
|
|
|
|
|
t.Run("BlobRandomRead", func(t *testing.B) {
|
|
|
|
t.StopTimer()
|
|
|
|
if err := prepareBlobStoreRandomRead(store); err != nil {
|
|
|
|
t.Fatalf("%+v", errors.WithStack(err))
|
|
|
|
}
|
|
|
|
t.ResetTimer()
|
|
|
|
|
|
|
|
t.StartTimer()
|
|
|
|
for i := 0; i < t.N; i++ {
|
|
|
|
if err := doRandomRead(store); err != nil {
|
|
|
|
t.Fatalf("%+v", errors.WithStack(err))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
})
|
2023-09-13 06:03:25 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
func runBlobCreateUpdateReadDelete(store storage.BlobStore, bucketName string) error {
|
|
|
|
ctx := context.Background()
|
|
|
|
|
|
|
|
bucket, err := store.OpenBucket(ctx, bucketName)
|
|
|
|
if err != nil {
|
|
|
|
return errors.WithStack(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
blobID := storage.NewBlobID()
|
|
|
|
|
|
|
|
writer, err := bucket.NewWriter(ctx, blobID)
|
|
|
|
if err != nil {
|
|
|
|
return errors.WithStack(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
data := []byte("foo")
|
|
|
|
|
|
|
|
if _, err = writer.Write(data); err != nil {
|
|
|
|
return errors.WithStack(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := writer.Close(); err != nil {
|
|
|
|
return errors.WithStack(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
reader, err := bucket.NewReader(ctx, blobID)
|
|
|
|
if err != nil {
|
|
|
|
return errors.WithStack(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
var buf bytes.Buffer
|
|
|
|
|
|
|
|
if _, err = io.Copy(&buf, reader); err != nil {
|
|
|
|
return errors.WithStack(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := reader.Close(); err != nil {
|
|
|
|
return errors.WithStack(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := bucket.Delete(ctx, blobID); err != nil {
|
|
|
|
return errors.WithStack(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := bucket.Close(); err != nil {
|
|
|
|
return errors.WithStack(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := store.DeleteBucket(ctx, bucketName); err != nil {
|
|
|
|
return errors.WithStack(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
2023-10-24 22:52:33 +02:00
|
|
|
|
|
|
|
func prepareBlobStoreRandomRead(store storage.BlobStore) error {
|
|
|
|
ctx := context.Background()
|
|
|
|
totalBuckets := 128
|
|
|
|
totalBlobs := 64
|
|
|
|
|
|
|
|
for i := 0; i < totalBuckets; i++ {
|
|
|
|
bucketName := fmt.Sprintf("bucket-%d", i)
|
|
|
|
err := func(bucketName string) error {
|
|
|
|
bucket, err := store.OpenBucket(ctx, bucketName)
|
|
|
|
if err != nil {
|
|
|
|
return errors.WithStack(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
defer func() {
|
|
|
|
if err := bucket.Close(); err != nil {
|
|
|
|
panic(errors.WithStack(err))
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
|
|
|
for j := 0; j < totalBlobs; j++ {
|
|
|
|
blobID := storage.NewBlobID()
|
|
|
|
err = func(blobID storage.BlobID) error {
|
|
|
|
writer, err := bucket.NewWriter(ctx, blobID)
|
|
|
|
if err != nil {
|
|
|
|
return errors.WithStack(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
defer func() {
|
|
|
|
if err := writer.Close(); err != nil {
|
|
|
|
panic(errors.WithStack(err))
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
|
|
|
data := make([]byte, j)
|
|
|
|
if _, err := rand.Read(data); err != nil {
|
|
|
|
return errors.WithStack(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if _, err = writer.Write(data); err != nil {
|
|
|
|
return errors.WithStack(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := writer.Close(); err != nil {
|
|
|
|
return errors.WithStack(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}(blobID)
|
|
|
|
if err != nil {
|
|
|
|
return errors.WithStack(err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}(bucketName)
|
|
|
|
if err != nil {
|
|
|
|
return errors.WithStack(err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func doRandomRead(store storage.BlobStore) error {
|
|
|
|
ctx := context.Background()
|
|
|
|
|
|
|
|
buckets, err := store.ListBuckets(ctx)
|
|
|
|
if err != nil {
|
|
|
|
return errors.WithStack(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
randBucketIndex := mrand.Int31n(int32(len(buckets)))
|
|
|
|
bucketName := buckets[randBucketIndex]
|
|
|
|
|
|
|
|
bucket, err := store.OpenBucket(ctx, bucketName)
|
|
|
|
if err != nil {
|
|
|
|
return errors.WithStack(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
defer func() {
|
|
|
|
if err := bucket.Close(); err != nil {
|
|
|
|
panic(errors.WithStack(err))
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
|
|
|
blobs, err := bucket.List(ctx)
|
|
|
|
if err != nil {
|
|
|
|
return errors.WithStack(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
randBlobIndex := mrand.Int31n(int32(len(blobs)))
|
|
|
|
blobInfo := blobs[randBlobIndex]
|
|
|
|
blobID := blobInfo.ID()
|
|
|
|
|
|
|
|
reader, err := bucket.NewReader(ctx, blobID)
|
|
|
|
if err != nil {
|
|
|
|
return errors.WithStack(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
var buf bytes.Buffer
|
|
|
|
|
|
|
|
if _, err = io.Copy(&buf, reader); err != nil {
|
|
|
|
return errors.WithStack(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := reader.Close(); err != nil {
|
|
|
|
return errors.WithStack(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|