feat: initial commit

This commit is contained in:
2023-02-09 12:16:36 +01:00
commit 81dc1adfef
126 changed files with 11551 additions and 0 deletions

66
pkg/storage/blob_store.go Normal file
View File

@ -0,0 +1,66 @@
package storage
import (
"context"
"errors"
"io"
"time"
"github.com/oklog/ulid/v2"
)
var (
ErrBucketClosed = errors.New("bucket closed")
ErrBlobNotFound = errors.New("blob not found")
)
type BlobID string
func NewBlobID() BlobID {
return BlobID(ulid.Make().String())
}
type BlobStore interface {
OpenBucket(ctx context.Context, name string) (BlobBucket, error)
ListBuckets(ctx context.Context) ([]string, error)
DeleteBucket(ctx context.Context, name string) error
}
type BlobBucket interface {
Name() string
Close() error
Get(ctx context.Context, id BlobID) (BlobInfo, error)
Delete(ctx context.Context, id BlobID) error
NewReader(ctx context.Context, id BlobID) (io.ReadSeekCloser, error)
NewWriter(ctx context.Context, id BlobID) (io.WriteCloser, error)
List(ctx context.Context) ([]BlobInfo, error)
Size(ctx context.Context) (int64, error)
}
type BlobInfo interface {
ID() BlobID
Bucket() string
ModTime() time.Time
Size() int64
ContentType() string
}
type BucketListOptions struct {
Limit *int
Offset *int
}
type BucketListOptionsFunc func(o *BucketListOptions)
func WithBucketListLimit(limit int) BucketListOptionsFunc {
return func(o *BucketListOptions) {
o.Limit = &limit
}
}
func WithBucketListOffset(offset int) BucketListOptionsFunc {
return func(o *BucketListOptions) {
o.Offset = &offset
}
}

View File

@ -0,0 +1,69 @@
package storage
import (
"context"
"errors"
"time"
"forge.cadoles.com/arcad/edge/pkg/storage/filter"
"github.com/oklog/ulid/v2"
)
var ErrDocumentNotFound = errors.New("document not found")
type DocumentID string
const (
DocumentAttrID = "_id"
DocumentAttrCreatedAt = "_createdAt"
DocumentAttrUpdatedAt = "_updatedAt"
)
func NewDocumentID() DocumentID {
return DocumentID(ulid.Make().String())
}
type Document map[string]interface{}
func (d Document) ID() (DocumentID, bool) {
rawID, exists := d[DocumentAttrID]
if !exists {
return "", false
}
id, ok := rawID.(string)
if ok {
return "", false
}
return DocumentID(id), true
}
func (d Document) CreatedAt() (time.Time, bool) {
return d.timeAttr(DocumentAttrCreatedAt)
}
func (d Document) UpdatedAt() (time.Time, bool) {
return d.timeAttr(DocumentAttrUpdatedAt)
}
func (d Document) timeAttr(attr string) (time.Time, bool) {
rawTime, exists := d[attr]
if !exists {
return time.Time{}, false
}
t, ok := rawTime.(time.Time)
if ok {
return time.Time{}, false
}
return t, true
}
type DocumentStore interface {
Get(ctx context.Context, collection string, id DocumentID) (Document, error)
Query(ctx context.Context, collection string, filter *filter.Filter, funcs ...QueryOptionFunc) ([]Document, error)
Upsert(ctx context.Context, collection string, document Document) (Document, error)
Delete(ctx context.Context, collection string, id DocumentID) error
}

17
pkg/storage/filter/and.go Normal file
View File

@ -0,0 +1,17 @@
package filter
type AndOperator struct {
children []Operator
}
func (o *AndOperator) Token() Token {
return TokenAnd
}
func (o *AndOperator) Children() []Operator {
return o.children
}
func NewAndOperator(ops ...Operator) *AndOperator {
return &AndOperator{ops}
}

17
pkg/storage/filter/eq.go Normal file
View File

@ -0,0 +1,17 @@
package filter
type EqOperator struct {
fields map[string]interface{}
}
func (o *EqOperator) Token() Token {
return TokenEq
}
func (o *EqOperator) Fields() map[string]interface{} {
return o.fields
}
func NewEqOperator(fields map[string]interface{}) *EqOperator {
return &EqOperator{fields}
}

View File

@ -0,0 +1,13 @@
package filter
import "errors"
var (
ErrInvalidFieldOperator = errors.New("invalid field operator")
ErrInvalidAggregationOperator = errors.New("invalid aggregation operator")
ErrInvalidFieldMap = errors.New("invalid field map")
ErrUnknownOperator = errors.New("unknown operator")
ErrUnexpectedOperator = errors.New("unexpected operator")
ErrUnsupportedOperator = errors.New("unsupported operator")
ErrInvalidRoot = errors.New("invalid root")
)

View File

@ -0,0 +1,136 @@
package filter
import (
"github.com/pkg/errors"
)
type Filter struct {
root Operator
}
func (f *Filter) Root() Operator {
return f.root
}
func New(root Operator) *Filter {
return &Filter{root}
}
func NewFrom(raw map[string]interface{}) (*Filter, error) {
if len(raw) != 1 {
return nil, errors.WithStack(ErrInvalidRoot)
}
op, err := toFieldOperator(raw)
if err != nil {
return nil, err
}
return &Filter{op}, nil
}
func toFieldOperator(v interface{}) (Operator, error) {
vv, ok := v.(map[string]interface{})
if !ok {
return nil, errors.WithStack(ErrInvalidFieldOperator)
}
ops := make([]Operator, 0)
for rawToken, val := range vv {
var (
op Operator
err error
)
token := Token(rawToken)
switch {
case isAggregatorToken(token):
op, err = toAggregateOperator(token, val)
case isFieldToken(token):
fields, ok := val.(map[string]interface{})
if !ok {
return nil, errors.WithStack(ErrInvalidFieldMap)
}
switch token {
case TokenEq:
op = NewEqOperator(fields)
case TokenNeq:
op = NewNeqOperator(fields)
case TokenGt:
op = NewGtOperator(fields)
case TokenGte:
op = NewGteOperator(fields)
case TokenLt:
op = NewLtOperator(fields)
case TokenLte:
op = NewLteOperator(fields)
case TokenIn:
op = NewInOperator(fields)
case TokenLike:
op = NewLikeOperator(fields)
default:
return nil, errors.Wrapf(ErrUnknownOperator, "unknown operator field '%s'", token)
}
default:
return nil, errors.Wrapf(ErrUnknownOperator, "unknown operator field '%s'", token)
}
if err != nil {
return nil, err
}
ops = append(ops, op)
}
and := NewAndOperator(ops...)
return and, nil
}
func toAggregateOperator(token Token, v interface{}) (Operator, error) {
vv, ok := v.([]interface{})
if !ok {
return nil, errors.WithStack(ErrInvalidAggregationOperator)
}
ops := make([]Operator, 0)
for _, c := range vv {
op, err := toFieldOperator(c)
if err != nil {
return nil, err
}
ops = append(ops, op)
}
var aggregator Operator
switch token {
case TokenAnd:
aggregator = NewAndOperator(ops...)
case TokenOr:
aggregator = NewOrOperator(ops...)
case TokenNot:
aggregator = NewNotOperator(ops...)
}
return aggregator, nil
}
func isAggregatorToken(token Token) bool {
return token == TokenAnd || token == TokenOr || token == TokenNot
}
func isFieldToken(token Token) bool {
return token == TokenEq ||
token == TokenGt || token == TokenGte ||
token == TokenLt || token == TokenLte ||
token == TokenNeq || token == TokenIn ||
token == TokenLike
}

17
pkg/storage/filter/gt.go Normal file
View File

@ -0,0 +1,17 @@
package filter
type GtOperator struct {
fields map[string]interface{}
}
func (o *GtOperator) Token() Token {
return TokenGt
}
func (o *GtOperator) Fields() map[string]interface{} {
return o.fields
}
func NewGtOperator(fields OperatorFields) *GtOperator {
return &GtOperator{fields}
}

17
pkg/storage/filter/gte.go Normal file
View File

@ -0,0 +1,17 @@
package filter
type GteOperator struct {
fields OperatorFields
}
func (o *GteOperator) Token() Token {
return TokenGte
}
func (o *GteOperator) Fields() map[string]interface{} {
return o.fields
}
func NewGteOperator(fields OperatorFields) *GteOperator {
return &GteOperator{fields}
}

17
pkg/storage/filter/in.go Normal file
View File

@ -0,0 +1,17 @@
package filter
type InOperator struct {
fields map[string]interface{}
}
func (o *InOperator) Token() Token {
return TokenIn
}
func (o *InOperator) Fields() map[string]interface{} {
return o.fields
}
func NewInOperator(fields OperatorFields) *InOperator {
return &InOperator{fields}
}

View File

@ -0,0 +1,17 @@
package filter
type LikeOperator struct {
fields map[string]interface{}
}
func (o *LikeOperator) Token() Token {
return TokenLike
}
func (o *LikeOperator) Fields() map[string]interface{} {
return o.fields
}
func NewLikeOperator(fields OperatorFields) *LikeOperator {
return &LikeOperator{fields}
}

17
pkg/storage/filter/lt.go Normal file
View File

@ -0,0 +1,17 @@
package filter
type LtOperator struct {
fields map[string]interface{}
}
func (o *LtOperator) Token() Token {
return TokenLt
}
func (o *LtOperator) Fields() map[string]interface{} {
return o.fields
}
func NewLtOperator(fields OperatorFields) *LtOperator {
return &LtOperator{fields}
}

17
pkg/storage/filter/lte.go Normal file
View File

@ -0,0 +1,17 @@
package filter
type LteOperator struct {
fields map[string]interface{}
}
func (o *LteOperator) Token() Token {
return TokenLte
}
func (o *LteOperator) Fields() map[string]interface{} {
return o.fields
}
func NewLteOperator(fields OperatorFields) *LteOperator {
return &LteOperator{fields}
}

17
pkg/storage/filter/neq.go Normal file
View File

@ -0,0 +1,17 @@
package filter
type NeqOperator struct {
fields map[string]interface{}
}
func (o *NeqOperator) Token() Token {
return TokenNeq
}
func (o *NeqOperator) Fields() map[string]interface{} {
return o.fields
}
func NewNeqOperator(fields map[string]interface{}) *NeqOperator {
return &NeqOperator{fields}
}

17
pkg/storage/filter/not.go Normal file
View File

@ -0,0 +1,17 @@
package filter
type NotOperator struct {
children []Operator
}
func (o *NotOperator) Token() Token {
return TokenOr
}
func (o *NotOperator) Children() []Operator {
return o.children
}
func NewNotOperator(ops ...Operator) *NotOperator {
return &NotOperator{ops}
}

View File

@ -0,0 +1,23 @@
package filter
type Token string
const (
TokenAnd Token = "and"
TokenOr Token = "or"
TokenNot Token = "not"
TokenEq Token = "eq"
TokenNeq Token = "neq"
TokenGt Token = "gt"
TokenGte Token = "gte"
TokenLt Token = "lt"
TokenLte Token = "lte"
TokenIn Token = "in"
TokenLike Token = "like"
)
type OperatorFields map[string]interface{}
type Operator interface {
Token() Token
}

17
pkg/storage/filter/or.go Normal file
View File

@ -0,0 +1,17 @@
package filter
type OrOperator struct {
children []Operator
}
func (o *OrOperator) Token() Token {
return TokenOr
}
func (o *OrOperator) Children() []Operator {
return o.children
}
func NewOrOperator(ops ...Operator) *OrOperator {
return &OrOperator{ops}
}

View File

@ -0,0 +1,87 @@
package sql
import (
"strings"
"forge.cadoles.com/arcad/edge/pkg/storage/filter"
"github.com/pkg/errors"
)
func aggregatorToSQL(operator string, opt *Option, children ...filter.Operator) (string, []interface{}, error) {
args := make([]interface{}, 0)
if len(children) == 0 {
return "", args, nil
}
var sb strings.Builder
if _, err := sb.WriteString("("); err != nil {
return "", nil, errors.WithStack(err)
}
for i, c := range children {
if i != 0 {
if _, err := sb.WriteString(" " + operator + " "); err != nil {
return "", nil, errors.WithStack(err)
}
}
cSQL, cArgs, err := toSQL(c, opt)
if err != nil {
return "", nil, errors.WithStack(err)
}
args = append(args, cArgs...)
if _, err := sb.WriteString(cSQL); err != nil {
return "", nil, errors.WithStack(err)
}
}
if _, err := sb.WriteString(")"); err != nil {
return "", nil, errors.WithStack(err)
}
result := sb.String()
if result == "()" {
return "", args, nil
}
return result, args, nil
}
func fieldsToSQL(operator string, invert bool, fields map[string]interface{}, option *Option) (string, []interface{}, error) {
var sb strings.Builder
args := make([]interface{}, 0)
i := 0
for k, v := range fields {
if i != 0 {
if _, err := sb.WriteString(" AND "); err != nil {
return "", nil, errors.WithStack(err)
}
}
var (
tr string
err error
)
tr, v, err = option.Transform(operator, invert, k, v, option)
if err != nil {
return "", nil, errors.WithStack(err)
}
if _, err := sb.WriteString(tr); err != nil {
return "", nil, errors.WithStack(err)
}
args = append(args, option.ValueTransform(v))
i++
}
return sb.String(), args, nil
}

View File

@ -0,0 +1,78 @@
package sql
import (
"strconv"
)
type (
PreparedParameterFunc func() string
KeyTransformFunc func(key string) string
ValueTransformFunc func(v interface{}) interface{}
TransformFunc func(operator string, invert bool, key string, value interface{}, option *Option) (string, interface{}, error)
)
type Option struct {
PreparedParameter PreparedParameterFunc
KeyTransform KeyTransformFunc
ValueTransform ValueTransformFunc
Transform TransformFunc
}
func DefaultOption() *Option {
opt := &Option{}
defaults := []OptionFunc{
WithPreparedParameter("$", 1),
WithNoOpKeyTransform(),
WithNoOpValueTransform(),
WithDefaultTransform(),
}
for _, fn := range defaults {
fn(opt)
}
return opt
}
type OptionFunc func(*Option)
func WithPreparedParameter(prefix string, index int) OptionFunc {
return func(opt *Option) {
opt.PreparedParameter = func() string {
param := prefix + strconv.FormatInt(int64(index), 10)
index++
return param
}
}
}
func WithKeyTransform(transform KeyTransformFunc) OptionFunc {
return func(opt *Option) {
opt.KeyTransform = transform
}
}
func WithNoOpKeyTransform() OptionFunc {
return WithKeyTransform(func(key string) string {
return key
})
}
func WithValueTransform(transform ValueTransformFunc) OptionFunc {
return func(opt *Option) {
opt.ValueTransform = transform
}
}
func WithDefaultTransform() OptionFunc {
return func(opt *Option) {
opt.Transform = DefaultTransform
}
}
func WithNoOpValueTransform() OptionFunc {
return WithValueTransform(func(value interface{}) interface{} {
return value
})
}

View File

@ -0,0 +1,159 @@
package sql
import (
"forge.cadoles.com/arcad/edge/pkg/storage/filter"
"github.com/pkg/errors"
)
type transformFunc func(op filter.Operator, option *Option) (string, []interface{}, error)
var transforms map[filter.Token]transformFunc
func init() {
// Initialise transforms map
transforms = map[filter.Token]transformFunc{
filter.TokenAnd: transformAndOperator,
filter.TokenOr: transformOrOperator,
filter.TokenNot: transformNotOperator,
filter.TokenEq: transformEqOperator,
filter.TokenNeq: transformNeqOperator,
filter.TokenGt: transformGtOperator,
filter.TokenGte: transformGteOperator,
filter.TokenLte: transformLteOperator,
filter.TokenLt: transformLtOperator,
filter.TokenLike: transformLikeOperator,
filter.TokenIn: transformInOperator,
}
}
func ToSQL(op filter.Operator, funcs ...OptionFunc) (string, []interface{}, error) {
opt := DefaultOption()
for _, fn := range funcs {
fn(opt)
}
return toSQL(op, opt)
}
func toSQL(op filter.Operator, opt *Option) (string, []interface{}, error) {
if op == nil {
return "", nil, nil
}
transform, exists := transforms[op.Token()]
if !exists {
return "", nil, errors.WithStack(filter.ErrUnsupportedOperator)
}
sql, args, err := transform(op, opt)
if err != nil {
return "", nil, errors.WithStack(err)
}
return sql, args, nil
}
func transformAndOperator(op filter.Operator, option *Option) (string, []interface{}, error) {
andOp, ok := op.(*filter.AndOperator)
if !ok {
return "", nil, errors.Wrapf(filter.ErrUnexpectedOperator, "expected '%s', got '%s'", filter.TokenAnd, op.Token())
}
return aggregatorToSQL("AND", option, andOp.Children()...)
}
func transformOrOperator(op filter.Operator, option *Option) (string, []interface{}, error) {
orOp, ok := op.(*filter.OrOperator)
if !ok {
return "", nil, errors.Wrapf(filter.ErrUnexpectedOperator, "expected '%s', got '%s'", filter.TokenOr, op.Token())
}
return aggregatorToSQL("OR", option, orOp.Children()...)
}
func transformEqOperator(op filter.Operator, option *Option) (string, []interface{}, error) {
eqOp, ok := op.(*filter.EqOperator)
if !ok {
return "", nil, errors.Wrapf(filter.ErrUnexpectedOperator, "expected '%s', got '%s'", filter.TokenEq, op.Token())
}
return fieldsToSQL("=", false, eqOp.Fields(), option)
}
func transformNeqOperator(op filter.Operator, option *Option) (string, []interface{}, error) {
eqOp, ok := op.(*filter.NeqOperator)
if !ok {
return "", nil, errors.Wrapf(filter.ErrUnexpectedOperator, "expected '%s', got '%s'", filter.TokenNeq, op.Token())
}
return fieldsToSQL("!=", false, eqOp.Fields(), option)
}
func transformGtOperator(op filter.Operator, option *Option) (string, []interface{}, error) {
gtOp, ok := op.(*filter.GtOperator)
if !ok {
return "", nil, errors.Wrapf(filter.ErrUnexpectedOperator, "expected '%s', got '%s'", filter.TokenGt, op.Token())
}
return fieldsToSQL(">", false, gtOp.Fields(), option)
}
func transformGteOperator(op filter.Operator, option *Option) (string, []interface{}, error) {
gteOp, ok := op.(*filter.GteOperator)
if !ok {
return "", nil, errors.Wrapf(filter.ErrUnexpectedOperator, "expected '%s', got '%s'", filter.TokenGte, op.Token())
}
return fieldsToSQL(">=", false, gteOp.Fields(), option)
}
func transformLtOperator(op filter.Operator, option *Option) (string, []interface{}, error) {
ltOp, ok := op.(*filter.LtOperator)
if !ok {
return "", nil, errors.Wrapf(filter.ErrUnexpectedOperator, "expected '%s', got '%s'", filter.TokenLt, op.Token())
}
return fieldsToSQL("<", false, ltOp.Fields(), option)
}
func transformLteOperator(op filter.Operator, option *Option) (string, []interface{}, error) {
lteOp, ok := op.(*filter.LteOperator)
if !ok {
return "", nil, errors.Wrapf(filter.ErrUnexpectedOperator, "expected '%s', got '%s'", filter.TokenLte, op.Token())
}
return fieldsToSQL("<=", false, lteOp.Fields(), option)
}
func transformInOperator(op filter.Operator, option *Option) (string, []interface{}, error) {
inOp, ok := op.(*filter.InOperator)
if !ok {
return "", nil, errors.Wrapf(filter.ErrUnexpectedOperator, "expected '%s', got '%s'", filter.TokenIn, op.Token())
}
return fieldsToSQL("IN", true, inOp.Fields(), option)
}
func transformLikeOperator(op filter.Operator, option *Option) (string, []interface{}, error) {
likeOp, ok := op.(*filter.LikeOperator)
if !ok {
return "", nil, errors.Wrapf(filter.ErrUnexpectedOperator, "expected '%s', got '%s'", filter.TokenLike, op.Token())
}
return fieldsToSQL("LIKE", false, likeOp.Fields(), option)
}
func transformNotOperator(op filter.Operator, option *Option) (string, []interface{}, error) {
notOp, ok := op.(*filter.NotOperator)
if !ok {
return "", nil, errors.Wrapf(filter.ErrUnexpectedOperator, "expected '%s', got '%s'", filter.TokenNot, op.Token())
}
sql, args, err := aggregatorToSQL("AND", option, notOp.Children()...)
if err != nil {
return "", nil, errors.WithStack(err)
}
return "NOT " + sql, args, nil
}

View File

@ -0,0 +1,84 @@
package sql
import (
"encoding/json"
"fmt"
"testing"
"forge.cadoles.com/arcad/edge/pkg/storage/filter"
)
type (
op map[string]interface{}
aggr []interface{}
)
type testCase struct {
RawFilter string
ExpectedSQL string
ExpectedArgs []interface{}
}
var testCases = []testCase{
{
RawFilter: `
{
"or": [
{"eq": {"foo": "bar"}},
{"neq": {"hello": "world"}}
]
}
`,
ExpectedSQL: "(((foo = $1) OR (hello != $2)))",
ExpectedArgs: []interface{}{"bar", "world"},
},
}
func TestSQLFilter(t *testing.T) {
for i, tc := range testCases {
func(tc testCase) {
t.Run(fmt.Sprintf("Test case #%d", i), func(t *testing.T) {
raw := make(map[string]interface{})
if err := json.Unmarshal([]byte(tc.RawFilter), &raw); err != nil {
t.Fatal(err)
}
query, err := filter.NewFrom(raw)
if err != nil {
t.Fatal(err)
}
sql, args, err := ToSQL(query.Root())
if err != nil {
t.Error(err)
}
if e, g := tc.ExpectedSQL, sql; e != g {
t.Errorf("sql: expected '%s', got '%s'", e, g)
}
if args == nil {
t.Fatal("args should not be nil")
}
for i, a := range args {
if i >= len(tc.ExpectedArgs) {
t.Errorf("args[%d]: expected nil, got '%v'", i, a)
continue
}
if e, g := tc.ExpectedArgs[i], a; e != g {
t.Errorf("args[%d]: expected '%v', got '%v'", i, e, g)
}
}
for i, a := range tc.ExpectedArgs {
if i >= len(args) {
t.Errorf("args[%d]: expected '%v', got nil", i, a)
}
}
})
}(tc)
}
}

View File

@ -0,0 +1,45 @@
package sql
import (
"strings"
"github.com/pkg/errors"
)
func DefaultTransform(operator string, invert bool, key string, value interface{}, option *Option) (string, interface{}, error) {
var sb strings.Builder
if invert {
if _, err := sb.WriteString(option.PreparedParameter()); err != nil {
return "", nil, errors.WithStack(err)
}
} else {
if _, err := sb.WriteString(option.KeyTransform(key)); err != nil {
return "", nil, errors.WithStack(err)
}
}
if _, err := sb.WriteString(" "); err != nil {
return "", nil, errors.WithStack(err)
}
if _, err := sb.WriteString(operator); err != nil {
return "", nil, errors.WithStack(err)
}
if invert {
if _, err := sb.WriteString(" "); err != nil {
return "", nil, errors.WithStack(err)
}
if _, err := sb.WriteString(key); err != nil {
return "", nil, errors.WithStack(err)
}
} else {
if _, err := sb.WriteString(" " + option.PreparedParameter()); err != nil {
return "", nil, errors.WithStack(err)
}
}
return sb.String(), value, nil
}

View File

@ -0,0 +1,41 @@
package storage
type OrderDirection string
const (
OrderDirectionAsc OrderDirection = "ASC"
OrderDirectionDesc OrderDirection = "DESC"
)
type QueryOption struct {
Limit *int
Offset *int
OrderBy *string
OrderDirection *OrderDirection
}
type QueryOptionFunc func(o *QueryOption)
func WithLimit(limit int) QueryOptionFunc {
return func(o *QueryOption) {
o.Limit = &limit
}
}
func WithOffset(offset int) QueryOptionFunc {
return func(o *QueryOption) {
o.Offset = &offset
}
}
func WithOrderBy(orderBy string) QueryOptionFunc {
return func(o *QueryOption) {
o.OrderBy = &orderBy
}
}
func WithOrderDirection(direction OrderDirection) QueryOptionFunc {
return func(o *QueryOption) {
o.OrderDirection = &direction
}
}

View File

@ -0,0 +1,424 @@
package sqlite
import (
"bytes"
"context"
"database/sql"
"io"
"sync"
"time"
"forge.cadoles.com/arcad/edge/pkg/storage"
"github.com/gabriel-vasile/mimetype"
"github.com/pkg/errors"
"gitlab.com/wpetit/goweb/logger"
)
type BlobBucket struct {
name string
getDB getDBFunc
closed bool
}
// Size implements storage.BlobBucket
func (b *BlobBucket) Size(ctx context.Context) (int64, error) {
var size int64
err := b.withTx(ctx, func(tx *sql.Tx) error {
query := `SELECT SUM(size) FROM blobs WHERE bucket = $1`
row := tx.QueryRowContext(ctx, query, b.name)
var nullSize sql.NullInt64
if err := row.Scan(&nullSize); err != nil {
return errors.WithStack(err)
}
size = nullSize.Int64
return nil
})
if err != nil {
return 0, errors.WithStack(err)
}
return size, nil
}
// Name implements storage.BlobBucket
func (b *BlobBucket) Name() string {
return b.name
}
// Close implements storage.BlobBucket
func (b *BlobBucket) Close() error {
logger.Debug(
context.Background(), "closing bucket",
logger.F("alreadyClosed", b.closed),
logger.F("name", b.name),
)
b.closed = true
return nil
}
// Delete implements storage.BlobBucket
func (b *BlobBucket) Delete(ctx context.Context, id storage.BlobID) error {
err := b.withTx(ctx, func(tx *sql.Tx) error {
query := `DELETE FROM blobs WHERE bucket = $1 AND id = $2`
if _, err := tx.ExecContext(ctx, query, b.name, id); err != nil {
return errors.WithStack(err)
}
return nil
})
if err != nil {
return errors.WithStack(err)
}
return nil
}
// Get implements storage.BlobBucket
func (b *BlobBucket) Get(ctx context.Context, id storage.BlobID) (storage.BlobInfo, error) {
var blobInfo *BlobInfo
err := b.withTx(ctx, func(tx *sql.Tx) error {
query := `SELECT content_type, mod_time, size FROM blobs WHERE bucket = $1 AND id = $2`
row := tx.QueryRowContext(ctx, query, b.name, id)
var (
contentType string
modTime time.Time
size int64
)
if err := row.Scan(&contentType, &modTime, &size); err != nil {
if errors.Is(err, sql.ErrNoRows) {
return errors.WithStack(storage.ErrBlobNotFound)
}
return errors.WithStack(err)
}
blobInfo = &BlobInfo{
id: id,
bucket: b.name,
contentType: contentType,
modTime: modTime,
size: size,
}
return nil
})
if err != nil {
return nil, errors.WithStack(err)
}
return blobInfo, nil
}
// List implements storage.BlobBucket
func (b *BlobBucket) List(ctx context.Context) ([]storage.BlobInfo, error) {
var blobs []storage.BlobInfo
err := b.withTx(ctx, func(tx *sql.Tx) error {
query := `SELECT id, content_type, mod_time, size FROM blobs WHERE bucket = $1`
rows, err := tx.QueryContext(ctx, query, b.name)
if err != nil {
return errors.WithStack(err)
}
blobs = make([]storage.BlobInfo, 0)
for rows.Next() {
var (
blobID string
contentType string
modTime time.Time
size int64
)
if err := rows.Scan(&blobID, &contentType, &modTime, &size); err != nil {
if errors.Is(err, sql.ErrNoRows) {
return errors.WithStack(storage.ErrBlobNotFound)
}
return errors.WithStack(err)
}
blobInfo := &BlobInfo{
id: storage.BlobID(blobID),
bucket: b.name,
contentType: contentType,
modTime: modTime,
size: size,
}
blobs = append(blobs, blobInfo)
}
if err := rows.Err(); err != nil {
return errors.WithStack(err)
}
return nil
})
if err != nil {
return nil, errors.WithStack(err)
}
return blobs, nil
}
// NewReader implements storage.BlobBucket
func (b *BlobBucket) NewReader(ctx context.Context, id storage.BlobID) (io.ReadSeekCloser, error) {
if b.closed {
return nil, errors.WithStack(storage.ErrBucketClosed)
}
return &blobReaderCloser{
id: id,
bucket: b.name,
getDB: b.getDB,
}, nil
}
// NewWriter implements storage.BlobBucket
func (b *BlobBucket) NewWriter(ctx context.Context, id storage.BlobID) (io.WriteCloser, error) {
if b.closed {
return nil, errors.WithStack(storage.ErrBucketClosed)
}
return &blobWriterCloser{
id: id,
bucket: b.name,
getDB: b.getDB,
buf: bytes.Buffer{},
}, nil
}
func (b *BlobBucket) withTx(ctx context.Context, fn func(tx *sql.Tx) error) error {
if b.closed {
return errors.WithStack(storage.ErrBucketClosed)
}
db, err := b.getDB(ctx)
if err != nil {
return errors.WithStack(err)
}
if err := withTx(ctx, db, fn); err != nil {
return errors.WithStack(err)
}
return nil
}
type blobWriterCloser struct {
id storage.BlobID
bucket string
getDB getDBFunc
buf bytes.Buffer
closed bool
}
// Write implements io.WriteCloser
func (wbc *blobWriterCloser) Write(p []byte) (int, error) {
logger.Debug(context.Background(), "writing data to blob", logger.F("data", p))
n, err := wbc.buf.Write(p)
if err != nil {
return n, errors.WithStack(err)
}
return n, nil
}
// Close implements io.WriteCloser
func (wbc *blobWriterCloser) Close() error {
ctx := context.Background()
logger.Debug(
ctx, "closing writer",
logger.F("alreadyClosed", wbc.closed),
logger.F("bucket", wbc.bucket),
logger.F("blobID", wbc.id),
)
if wbc.closed {
return nil
}
err := wbc.withTx(ctx, func(tx *sql.Tx) error {
query := `
INSERT INTO blobs (bucket, id, data, content_type, mod_time, size)
VALUES ($1, $2, $3, $4, $5, $6)
ON CONFLICT (id, bucket) DO UPDATE SET
data = $3, content_type = $4, mod_time = $5, size = $6
`
data := wbc.buf.Bytes()
mime := mimetype.Detect(data)
modTime := time.Now().UTC()
_, err := tx.Exec(
query,
wbc.bucket,
wbc.id,
data,
mime.String(),
modTime,
len(data),
)
if err != nil {
return errors.WithStack(err)
}
return nil
})
if err != nil {
return errors.WithStack(err)
}
wbc.closed = true
return nil
}
func (wbc *blobWriterCloser) withTx(ctx context.Context, fn func(tx *sql.Tx) error) error {
if wbc.closed {
return errors.WithStack(io.ErrClosedPipe)
}
db, err := wbc.getDB(ctx)
if err != nil {
return errors.WithStack(err)
}
if err := withTx(ctx, db, fn); err != nil {
return errors.WithStack(err)
}
return nil
}
type blobReaderCloser struct {
id storage.BlobID
bucket string
getDB getDBFunc
reader bytes.Reader
once sync.Once
closed bool
}
// Read implements io.ReadSeekCloser
func (brc *blobReaderCloser) Read(p []byte) (int, error) {
var err error
brc.once.Do(func() {
err = brc.loadBlob()
})
if err != nil {
return 0, errors.WithStack(err)
}
n, err := brc.reader.Read(p)
if err != nil {
if errors.Is(err, io.EOF) {
return n, io.EOF
}
return n, errors.WithStack(err)
}
return n, nil
}
// Seek implements io.ReadSeekCloser
func (brc *blobReaderCloser) Seek(offset int64, whence int) (int64, error) {
var err error
brc.once.Do(func() {
err = brc.loadBlob()
})
if err != nil {
return 0, errors.WithStack(err)
}
n, err := brc.reader.Seek(offset, whence)
if err != nil {
return n, errors.WithStack(err)
}
return n, nil
}
func (brc *blobReaderCloser) loadBlob() error {
ctx := context.Background()
logger.Debug(ctx, "loading blob", logger.F("alreadyClosed", brc.closed))
err := brc.withTx(ctx, func(tx *sql.Tx) error {
query := `SELECT data FROM blobs WHERE bucket = $1 AND id = $2`
row := tx.QueryRow(query, brc.bucket, brc.id)
var data []byte
if err := row.Scan(&data); err != nil {
if errors.Is(err, sql.ErrNoRows) {
return errors.WithStack(storage.ErrBlobNotFound)
}
return errors.WithStack(err)
}
brc.reader = *bytes.NewReader(data)
return nil
})
if err != nil {
return errors.WithStack(err)
}
return nil
}
// Close implements io.ReadSeekCloser
func (brc *blobReaderCloser) Close() error {
logger.Debug(
context.Background(), "closing reader",
logger.F("alreadyClosed", brc.closed),
logger.F("bucket", brc.bucket),
logger.F("blobID", brc.id),
)
brc.closed = true
return nil
}
func (brc *blobReaderCloser) withTx(ctx context.Context, fn func(tx *sql.Tx) error) error {
db, err := brc.getDB(ctx)
if err != nil {
return errors.WithStack(err)
}
if err := withTx(ctx, db, fn); err != nil {
return errors.WithStack(err)
}
return nil
}
var (
_ storage.BlobBucket = &BlobBucket{}
_ storage.BlobInfo = &BlobInfo{}
_ io.WriteCloser = &blobWriterCloser{}
_ io.ReadSeekCloser = &blobReaderCloser{}
)

View File

@ -0,0 +1,40 @@
package sqlite
import (
"time"
"forge.cadoles.com/arcad/edge/pkg/storage"
)
type BlobInfo struct {
id storage.BlobID
bucket string
contentType string
modTime time.Time
size int64
}
// Bucket implements storage.BlobInfo
func (i *BlobInfo) Bucket() string {
return i.bucket
}
// ID implements storage.BlobInfo
func (i *BlobInfo) ID() storage.BlobID {
return i.id
}
// ContentType implements storage.BlobInfo
func (i *BlobInfo) ContentType() string {
return i.contentType
}
// ModTime implements storage.BlobInfo
func (i *BlobInfo) ModTime() time.Time {
return i.modTime
}
// Size implements storage.BlobInfo
func (i *BlobInfo) Size() int64 {
return i.size
}

View File

@ -0,0 +1,136 @@
package sqlite
import (
"context"
"database/sql"
"forge.cadoles.com/arcad/edge/pkg/storage"
"github.com/pkg/errors"
"gitlab.com/wpetit/goweb/logger"
)
type BlobStore struct {
getDB getDBFunc
}
// DeleteBucket implements storage.BlobStore
func (s *BlobStore) DeleteBucket(ctx context.Context, name string) error {
err := s.withTx(ctx, func(tx *sql.Tx) error {
query := `DELETE FROM blobs WHERE bucket = $1`
_, err := tx.ExecContext(ctx, query, name)
if err != nil {
return errors.WithStack(err)
}
return nil
})
if err != nil {
return errors.WithStack(err)
}
return nil
}
// ListBuckets implements storage.BlobStore
func (s *BlobStore) ListBuckets(ctx context.Context) ([]string, error) {
buckets := make([]string, 0)
err := s.withTx(ctx, func(tx *sql.Tx) error {
query := `SELECT DISTINCT name FROM blobs`
rows, err := tx.QueryContext(ctx, query)
if err != nil {
return errors.WithStack(err)
}
defer func() {
if err := rows.Close(); err != nil {
logger.Error(ctx, "could not close rows", logger.E(errors.WithStack(err)))
}
}()
for rows.Next() {
var name string
if err := rows.Scan(&name); err != nil {
return errors.WithStack(err)
}
buckets = append(buckets, name)
}
if err := rows.Err(); err != nil {
return errors.WithStack(err)
}
return nil
})
if err != nil {
return nil, errors.WithStack(err)
}
return buckets, nil
}
// OpenBucket implements storage.BlobStore
func (s *BlobStore) OpenBucket(ctx context.Context, name string) (storage.BlobBucket, error) {
return &BlobBucket{
name: name,
getDB: s.getDB,
}, nil
}
func ensureBlobTables(ctx context.Context, db *sql.DB) error {
logger.Debug(ctx, "creating blobs table")
err := withTx(ctx, db, func(tx *sql.Tx) error {
query := `
CREATE TABLE IF NOT EXISTS blobs (
id TEXT,
bucket TEXT,
data BLOB,
content_type TEXT NOT NULL,
mod_time TIMESTAMP NOT NULL,
size INTEGER,
PRIMARY KEY (id, bucket)
);
`
if _, err := tx.ExecContext(ctx, query); err != nil {
return errors.WithStack(err)
}
return nil
})
if err != nil {
return errors.WithStack(err)
}
return nil
}
func (s *BlobStore) withTx(ctx context.Context, fn func(tx *sql.Tx) error) error {
var db *sql.DB
db, err := s.getDB(ctx)
if err != nil {
return errors.WithStack(err)
}
if err := withTx(ctx, db, fn); err != nil {
return errors.WithStack(err)
}
return nil
}
func NewBlobStore(dsn string) *BlobStore {
getDB := newGetDBFunc(dsn, ensureBlobTables)
return &BlobStore{getDB}
}
func NewBlobStoreWithDB(db *sql.DB) *BlobStore {
getDB := newGetDBFuncFromDB(db, ensureBlobTables)
return &BlobStore{getDB}
}
var _ storage.BlobStore = &BlobStore{}

View File

@ -0,0 +1,25 @@
package sqlite
import (
"os"
"testing"
"forge.cadoles.com/arcad/edge/pkg/storage/testsuite"
"github.com/pkg/errors"
"gitlab.com/wpetit/goweb/logger"
)
func TestBlobStore(t *testing.T) {
t.Parallel()
logger.SetLevel(logger.LevelDebug)
file := "./testdata/blobstore_test.sqlite"
if err := os.Remove(file); err != nil && !errors.Is(err, os.ErrNotExist) {
t.Fatalf("%+v", errors.WithStack(err))
}
store := NewBlobStore(file)
testsuite.TestBlobStore(t, store)
}

View File

@ -0,0 +1,340 @@
package sqlite
import (
"context"
"database/sql"
"fmt"
"sync"
"time"
"forge.cadoles.com/arcad/edge/pkg/storage"
"forge.cadoles.com/arcad/edge/pkg/storage/filter"
filterSQL "forge.cadoles.com/arcad/edge/pkg/storage/filter/sql"
"github.com/pkg/errors"
"gitlab.com/wpetit/goweb/logger"
_ "modernc.org/sqlite"
)
type DocumentStore struct {
db *sql.DB
path string
openOnce sync.Once
mutex sync.RWMutex
}
// Delete implements storage.DocumentStore
func (s *DocumentStore) Delete(ctx context.Context, collection string, id storage.DocumentID) error {
err := s.withTx(ctx, func(tx *sql.Tx) error {
query := `
DELETE FROM documents
WHERE collection = $1 AND id = $2
`
_, err := tx.ExecContext(ctx, query, collection, string(id))
if err != nil {
return errors.WithStack(err)
}
return nil
})
if err != nil {
return errors.WithStack(err)
}
return nil
}
// Get implements storage.DocumentStore
func (s *DocumentStore) Get(ctx context.Context, collection string, id storage.DocumentID) (storage.Document, error) {
var document storage.Document
err := s.withTx(ctx, func(tx *sql.Tx) error {
query := `
SELECT id, data, created_at, updated_at
FROM documents
WHERE collection = $1 AND id = $2
`
row := tx.QueryRowContext(ctx, query, collection, string(id))
var (
createdAt time.Time
updatedAt time.Time
data JSONMap
)
err := row.Scan(&id, &data, &createdAt, &updatedAt)
if err != nil {
if errors.Is(err, sql.ErrNoRows) {
return errors.WithStack(storage.ErrDocumentNotFound)
}
return errors.WithStack(err)
}
document = storage.Document(data)
document[storage.DocumentAttrID] = id
document[storage.DocumentAttrCreatedAt] = createdAt
document[storage.DocumentAttrUpdatedAt] = updatedAt
return nil
})
if err != nil {
return nil, errors.WithStack(err)
}
return document, nil
}
// Query implements storage.DocumentStore
func (s *DocumentStore) Query(ctx context.Context, collection string, filter *filter.Filter, funcs ...storage.QueryOptionFunc) ([]storage.Document, error) {
var documents []storage.Document
err := s.withTx(ctx, func(tx *sql.Tx) error {
criteria, args, err := filterSQL.ToSQL(
filter.Root(),
filterSQL.WithPreparedParameter("$", 2),
filterSQL.WithKeyTransform(func(key string) string {
return fmt.Sprintf("json_extract(data, '$.%s')", key)
}),
)
if err != nil {
return errors.WithStack(err)
}
query := `
SELECT id, data, created_at, updated_at
FROM documents
WHERE collection = $1 AND (` + criteria + `)
`
args = append([]interface{}{collection}, args...)
logger.Debug(
ctx, "executing query",
logger.F("query", query),
logger.F("args", args),
)
rows, err := tx.QueryContext(ctx, query, args...)
if err != nil {
return errors.WithStack(err)
}
defer rows.Close()
documents = make([]storage.Document, 0)
for rows.Next() {
var (
id storage.DocumentID
createdAt time.Time
updatedAt time.Time
data JSONMap
)
if err := rows.Scan(&id, &data, &createdAt, &updatedAt); err != nil {
return errors.WithStack(err)
}
document := storage.Document(data)
document[storage.DocumentAttrID] = id
document[storage.DocumentAttrCreatedAt] = createdAt
document[storage.DocumentAttrUpdatedAt] = updatedAt
documents = append(documents, document)
}
if err := rows.Err(); err != nil {
return errors.WithStack(err)
}
return nil
})
if err != nil {
return nil, errors.WithStack(err)
}
return documents, nil
}
// Upsert implements storage.DocumentStore
func (s *DocumentStore) Upsert(ctx context.Context, collection string, document storage.Document) (storage.Document, error) {
var upsertedDocument storage.Document
err := s.withTx(ctx, func(tx *sql.Tx) error {
query := `
INSERT INTO documents (id, collection, data, created_at, updated_at)
VALUES($1, $2, $3, $4, $4)
ON CONFLICT (id, collection) DO UPDATE SET
data = $3, updated_at = $4
RETURNING "id", "data", "created_at", "updated_at"
`
now := time.Now().UTC()
id, exists := document.ID()
if !exists || id == "" {
id = storage.NewDocumentID()
}
delete(document, storage.DocumentAttrID)
delete(document, storage.DocumentAttrCreatedAt)
delete(document, storage.DocumentAttrUpdatedAt)
args := []any{id, collection, JSONMap(document), now, now}
row := tx.QueryRowContext(ctx, query, args...)
var (
createdAt time.Time
updatedAt time.Time
data JSONMap
)
err := row.Scan(&id, &data, &createdAt, &updatedAt)
if err != nil {
return errors.WithStack(err)
}
upsertedDocument = storage.Document(data)
upsertedDocument[storage.DocumentAttrID] = id
upsertedDocument[storage.DocumentAttrCreatedAt] = createdAt
upsertedDocument[storage.DocumentAttrUpdatedAt] = updatedAt
return nil
})
if err != nil {
return nil, errors.WithStack(err)
}
return upsertedDocument, nil
}
func (s *DocumentStore) withTx(ctx context.Context, fn func(tx *sql.Tx) error) error {
var db *sql.DB
db, err := s.getDatabase(ctx)
if err != nil {
return errors.WithStack(err)
}
if err := withTx(ctx, db, fn); err != nil {
return errors.WithStack(err)
}
return nil
}
func (s *DocumentStore) getDatabase(ctx context.Context) (*sql.DB, error) {
s.mutex.RLock()
if s.db != nil {
defer s.mutex.RUnlock()
var err error
s.openOnce.Do(func() {
if err = s.ensureTables(ctx, s.db); err != nil {
err = errors.WithStack(err)
return
}
})
if err != nil {
return nil, errors.WithStack(err)
}
return s.db, nil
}
s.mutex.RUnlock()
var (
db *sql.DB
err error
)
s.openOnce.Do(func() {
db, err = sql.Open("sqlite", s.path)
if err != nil {
err = errors.WithStack(err)
return
}
if err = s.ensureTables(ctx, db); err != nil {
err = errors.WithStack(err)
return
}
})
if err != nil {
return nil, errors.WithStack(err)
}
if db != nil {
s.mutex.Lock()
s.db = db
s.mutex.Unlock()
}
s.mutex.RLock()
defer s.mutex.RUnlock()
return s.db, nil
}
func (s *DocumentStore) ensureTables(ctx context.Context, db *sql.DB) error {
err := withTx(ctx, db, func(tx *sql.Tx) error {
query := `
CREATE TABLE IF NOT EXISTS documents (
id TEXT PRIMARY KEY,
collection TEXT NOT NULL,
data TEXT,
created_at TIMESTAMP NOT NULL,
updated_at TIMESTAMP NOT NULL,
UNIQUE(id, collection) ON CONFLICT REPLACE
);
`
if _, err := tx.ExecContext(ctx, query); err != nil {
return errors.WithStack(err)
}
query = `
CREATE INDEX IF NOT EXISTS collection_idx ON documents (collection);
`
if _, err := tx.ExecContext(ctx, query); err != nil {
return errors.WithStack(err)
}
return nil
})
if err != nil {
return errors.WithStack(err)
}
return nil
}
func NewDocumentStore(path string) *DocumentStore {
return &DocumentStore{
db: nil,
path: path,
openOnce: sync.Once{},
}
}
func NewDocumentStoreWithDB(db *sql.DB) *DocumentStore {
return &DocumentStore{
db: db,
path: "",
openOnce: sync.Once{},
}
}
var _ storage.DocumentStore = &DocumentStore{}

View File

@ -0,0 +1,25 @@
package sqlite
import (
"os"
"testing"
"forge.cadoles.com/arcad/edge/pkg/storage/testsuite"
"github.com/pkg/errors"
"gitlab.com/wpetit/goweb/logger"
)
func TestDocumentStore(t *testing.T) {
// t.Parallel()
logger.SetLevel(logger.LevelDebug)
file := "./testdata/documentstore_test.sqlite"
if err := os.Remove(file); err != nil && !errors.Is(err, os.ErrNotExist) {
t.Fatalf("%+v", errors.WithStack(err))
}
store := NewDocumentStore(file)
testsuite.TestDocumentStore(t, store)
}

View File

@ -0,0 +1,42 @@
package sqlite
import (
"database/sql/driver"
"encoding/json"
"github.com/pkg/errors"
)
type JSONMap map[string]any
func (j *JSONMap) Scan(value interface{}) error {
if value == nil {
return nil
}
var data []byte
switch typ := value.(type) {
case []byte:
data = typ
case string:
data = []byte(typ)
default:
return errors.Errorf("unexpected type '%T'", value)
}
if err := json.Unmarshal(data, &j); err != nil {
return errors.WithStack(err)
}
return nil
}
func (j JSONMap) Value() (driver.Value, error) {
data, err := json.Marshal(j)
if err != nil {
return nil, errors.WithStack(err)
}
return data, nil
}

99
pkg/storage/sqlite/sql.go Normal file
View File

@ -0,0 +1,99 @@
package sqlite
import (
"context"
"database/sql"
"sync"
"github.com/pkg/errors"
"gitlab.com/wpetit/goweb/logger"
)
func withTx(ctx context.Context, db *sql.DB, fn func(tx *sql.Tx) error) error {
var tx *sql.Tx
tx, err := db.Begin()
if err != nil {
return errors.WithStack(err)
}
defer func() {
if err := tx.Rollback(); err != nil {
if errors.Is(err, sql.ErrTxDone) {
return
}
panic(errors.WithStack(err))
}
}()
if err = fn(tx); err != nil {
return errors.WithStack(err)
}
if err = tx.Commit(); err != nil {
return errors.WithStack(err)
}
return nil
}
type getDBFunc func(ctx context.Context) (*sql.DB, error)
func newGetDBFunc(dsn string, initFunc func(ctx context.Context, db *sql.DB) error) getDBFunc {
var (
db *sql.DB
mutex sync.RWMutex
)
return func(ctx context.Context) (*sql.DB, error) {
mutex.RLock()
if db != nil {
defer mutex.RUnlock()
return db, nil
}
mutex.RUnlock()
mutex.Lock()
defer mutex.Unlock()
logger.Debug(ctx, "opening database", logger.F("dsn", dsn))
newDB, err := sql.Open("sqlite", dsn)
if err != nil {
return nil, errors.WithStack(err)
}
logger.Debug(ctx, "initializing database")
if err = initFunc(ctx, newDB); err != nil {
return nil, errors.WithStack(err)
}
db = newDB
return db, nil
}
}
func newGetDBFuncFromDB(db *sql.DB, initFunc func(ctx context.Context, db *sql.DB) error) getDBFunc {
var err error
initOnce := &sync.Once{}
return func(ctx context.Context) (*sql.DB, error) {
initOnce.Do(func() {
logger.Debug(ctx, "initializing database")
err = initFunc(ctx, db)
})
if err != nil {
return nil, errors.WithStack(err)
}
return db, nil
}
}

View File

@ -0,0 +1 @@
/*.sqlite

View File

@ -0,0 +1,14 @@
package testsuite
import (
"testing"
"forge.cadoles.com/arcad/edge/pkg/storage"
)
func TestBlobStore(t *testing.T, store storage.BlobStore) {
t.Run("Ops", func(t *testing.T) {
// t.Parallel()
testBlobStoreOps(t, store)
})
}

View File

@ -0,0 +1,129 @@
package testsuite
import (
"bytes"
"context"
"io"
"testing"
"forge.cadoles.com/arcad/edge/pkg/storage"
"github.com/pkg/errors"
)
type blobStoreTestCase struct {
Name string
Run func(ctx context.Context, store storage.BlobStore) error
}
var blobStoreTestCases = []blobStoreTestCase{
{
Name: "Open new bucket",
Run: func(ctx context.Context, store storage.BlobStore) error {
bucket, err := store.OpenBucket(ctx, "open-new-bucket")
if err != nil {
return errors.WithStack(err)
}
if bucket == nil {
return errors.New("bucket should not be nil")
}
size, err := bucket.Size(ctx)
if err != nil {
return errors.WithStack(err)
}
if e, g := int64(0), size; e != g {
return errors.Errorf("bucket size: expected '%v', got '%v'", e, g)
}
blobs, err := bucket.List(ctx)
if err != nil {
return errors.WithStack(err)
}
if e, g := 0, len(blobs); e != g {
return errors.Errorf("len(blobs): expected '%v', got '%v'", e, g)
}
if err := bucket.Close(); err != nil {
return errors.WithStack(err)
}
return nil
},
},
{
Name: "Create blob",
Run: func(ctx context.Context, store storage.BlobStore) error {
bucket, err := store.OpenBucket(ctx, "create-blob")
if err != nil {
return errors.WithStack(err)
}
blobID := storage.NewBlobID()
writer, err := bucket.NewWriter(ctx, blobID)
if err != nil {
return errors.WithStack(err)
}
data := []byte("foo")
written, err := writer.Write(data)
if err != nil {
return errors.WithStack(err)
}
if e, g := len(data), written; e != g {
return errors.Errorf("length of written data: expected '%v', got '%v'", e, g)
}
if err := writer.Close(); err != nil {
panic(errors.WithStack(err))
}
reader, err := bucket.NewReader(ctx, blobID)
if err != nil {
return errors.WithStack(err)
}
var buf bytes.Buffer
written64, err := io.Copy(&buf, reader)
if err != nil {
return errors.WithStack(err)
}
if e, g := int64(len(data)), written64; e != g {
return errors.Errorf("length of written data: expected '%v', got '%v'", e, g)
}
if err := reader.Close(); err != nil {
panic(errors.WithStack(err))
}
if err := bucket.Close(); err != nil {
return errors.WithStack(err)
}
return nil
},
},
}
func testBlobStoreOps(t *testing.T, store storage.BlobStore) {
for _, tc := range blobStoreTestCases {
func(tc blobStoreTestCase) {
t.Run(tc.Name, func(t *testing.T) {
t.Parallel()
ctx := context.Background()
if err := tc.Run(ctx, store); err != nil {
t.Errorf("%+v", errors.WithStack(err))
}
})
}(tc)
}
}

View File

@ -0,0 +1,14 @@
package testsuite
import (
"testing"
"forge.cadoles.com/arcad/edge/pkg/storage"
)
func TestDocumentStore(t *testing.T, store storage.DocumentStore) {
t.Run("Query", func(t *testing.T) {
// t.Parallel()
testDocumentStoreQuery(t, store)
})
}

View File

@ -0,0 +1,85 @@
package testsuite
import (
"context"
"testing"
"forge.cadoles.com/arcad/edge/pkg/storage"
"forge.cadoles.com/arcad/edge/pkg/storage/filter"
"github.com/pkg/errors"
)
type documentStoreQueryTestCase struct {
Name string
Before func(ctx context.Context, store storage.DocumentStore) error
Collection string
Filter *filter.Filter
QueryOptionsFuncs []storage.QueryOptionFunc
After func(t *testing.T, results []storage.Document, err error)
}
var documentStoreQueryTestCases = []documentStoreQueryTestCase{
{
Name: "Simple select",
Before: func(ctx context.Context, store storage.DocumentStore) error {
doc1 := storage.Document{
"attr1": "Foo",
}
if _, err := store.Upsert(ctx, "simple_select", doc1); err != nil {
return errors.WithStack(err)
}
doc2 := storage.Document{
"attr1": "Bar",
}
if _, err := store.Upsert(ctx, "simple_select", doc2); err != nil {
return errors.WithStack(err)
}
return nil
},
Collection: "simple_select",
Filter: filter.New(
filter.NewEqOperator(map[string]interface{}{
"attr1": "Foo",
}),
),
After: func(t *testing.T, results []storage.Document, err error) {
if err != nil {
t.Fatalf("%+v", errors.WithStack(err))
}
if e, g := 1, len(results); e != g {
t.Errorf("len(results): expected '%v', got '%v'", e, g)
}
if e, g := "Foo", results[0]["attr1"]; e != g {
t.Errorf("results[0][\"Attr1\"]: expected '%v', got '%v'", e, g)
}
},
},
}
func testDocumentStoreQuery(t *testing.T, store storage.DocumentStore) {
for _, tc := range documentStoreQueryTestCases {
func(tc documentStoreQueryTestCase) {
t.Run(tc.Name, func(t *testing.T) {
// t.Parallel()
ctx := context.Background()
if tc.Before != nil {
if err := tc.Before(ctx, store); err != nil {
t.Fatalf("%+v", errors.WithStack(err))
}
}
documents, err := store.Query(ctx, tc.Collection, tc.Filter, tc.QueryOptionsFuncs...)
tc.After(t, documents, err)
})
}(tc)
}
}