feat: initial commit
This commit is contained in:
69
pkg/storage/document_store.go
Normal file
69
pkg/storage/document_store.go
Normal file
@ -0,0 +1,69 @@
|
||||
package storage
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"time"
|
||||
|
||||
"forge.cadoles.com/arcad/edge/pkg/storage/filter"
|
||||
"github.com/oklog/ulid/v2"
|
||||
)
|
||||
|
||||
var ErrDocumentNotFound = errors.New("document not found")
|
||||
|
||||
type DocumentID string
|
||||
|
||||
const (
|
||||
DocumentAttrID = "_id"
|
||||
DocumentAttrCreatedAt = "_createdAt"
|
||||
DocumentAttrUpdatedAt = "_updatedAt"
|
||||
)
|
||||
|
||||
func NewDocumentID() DocumentID {
|
||||
return DocumentID(ulid.Make().String())
|
||||
}
|
||||
|
||||
type Document map[string]interface{}
|
||||
|
||||
func (d Document) ID() (DocumentID, bool) {
|
||||
rawID, exists := d[DocumentAttrID]
|
||||
if !exists {
|
||||
return "", false
|
||||
}
|
||||
|
||||
id, ok := rawID.(string)
|
||||
if ok {
|
||||
return "", false
|
||||
}
|
||||
|
||||
return DocumentID(id), true
|
||||
}
|
||||
|
||||
func (d Document) CreatedAt() (time.Time, bool) {
|
||||
return d.timeAttr(DocumentAttrCreatedAt)
|
||||
}
|
||||
|
||||
func (d Document) UpdatedAt() (time.Time, bool) {
|
||||
return d.timeAttr(DocumentAttrUpdatedAt)
|
||||
}
|
||||
|
||||
func (d Document) timeAttr(attr string) (time.Time, bool) {
|
||||
rawTime, exists := d[attr]
|
||||
if !exists {
|
||||
return time.Time{}, false
|
||||
}
|
||||
|
||||
t, ok := rawTime.(time.Time)
|
||||
if ok {
|
||||
return time.Time{}, false
|
||||
}
|
||||
|
||||
return t, true
|
||||
}
|
||||
|
||||
type DocumentStore interface {
|
||||
Get(ctx context.Context, collection string, id DocumentID) (Document, error)
|
||||
Query(ctx context.Context, collection string, filter *filter.Filter, funcs ...QueryOptionFunc) ([]Document, error)
|
||||
Upsert(ctx context.Context, collection string, document Document) (Document, error)
|
||||
Delete(ctx context.Context, collection string, id DocumentID) error
|
||||
}
|
17
pkg/storage/filter/and.go
Normal file
17
pkg/storage/filter/and.go
Normal file
@ -0,0 +1,17 @@
|
||||
package filter
|
||||
|
||||
type AndOperator struct {
|
||||
children []Operator
|
||||
}
|
||||
|
||||
func (o *AndOperator) Token() Token {
|
||||
return TokenAnd
|
||||
}
|
||||
|
||||
func (o *AndOperator) Children() []Operator {
|
||||
return o.children
|
||||
}
|
||||
|
||||
func NewAndOperator(ops ...Operator) *AndOperator {
|
||||
return &AndOperator{ops}
|
||||
}
|
17
pkg/storage/filter/eq.go
Normal file
17
pkg/storage/filter/eq.go
Normal file
@ -0,0 +1,17 @@
|
||||
package filter
|
||||
|
||||
type EqOperator struct {
|
||||
fields map[string]interface{}
|
||||
}
|
||||
|
||||
func (o *EqOperator) Token() Token {
|
||||
return TokenEq
|
||||
}
|
||||
|
||||
func (o *EqOperator) Fields() map[string]interface{} {
|
||||
return o.fields
|
||||
}
|
||||
|
||||
func NewEqOperator(fields map[string]interface{}) *EqOperator {
|
||||
return &EqOperator{fields}
|
||||
}
|
13
pkg/storage/filter/error.go
Normal file
13
pkg/storage/filter/error.go
Normal file
@ -0,0 +1,13 @@
|
||||
package filter
|
||||
|
||||
import "errors"
|
||||
|
||||
var (
|
||||
ErrInvalidFieldOperator = errors.New("invalid field operator")
|
||||
ErrInvalidAggregationOperator = errors.New("invalid aggregation operator")
|
||||
ErrInvalidFieldMap = errors.New("invalid field map")
|
||||
ErrUnknownOperator = errors.New("unknown operator")
|
||||
ErrUnexpectedOperator = errors.New("unexpected operator")
|
||||
ErrUnsupportedOperator = errors.New("unsupported operator")
|
||||
ErrInvalidRoot = errors.New("invalid root")
|
||||
)
|
136
pkg/storage/filter/filter.go
Normal file
136
pkg/storage/filter/filter.go
Normal file
@ -0,0 +1,136 @@
|
||||
package filter
|
||||
|
||||
import (
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
type Filter struct {
|
||||
root Operator
|
||||
}
|
||||
|
||||
func (f *Filter) Root() Operator {
|
||||
return f.root
|
||||
}
|
||||
|
||||
func New(root Operator) *Filter {
|
||||
return &Filter{root}
|
||||
}
|
||||
|
||||
func NewFrom(raw map[string]interface{}) (*Filter, error) {
|
||||
if len(raw) != 1 {
|
||||
return nil, errors.WithStack(ErrInvalidRoot)
|
||||
}
|
||||
|
||||
op, err := toFieldOperator(raw)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &Filter{op}, nil
|
||||
}
|
||||
|
||||
func toFieldOperator(v interface{}) (Operator, error) {
|
||||
vv, ok := v.(map[string]interface{})
|
||||
if !ok {
|
||||
return nil, errors.WithStack(ErrInvalidFieldOperator)
|
||||
}
|
||||
|
||||
ops := make([]Operator, 0)
|
||||
|
||||
for rawToken, val := range vv {
|
||||
var (
|
||||
op Operator
|
||||
err error
|
||||
)
|
||||
|
||||
token := Token(rawToken)
|
||||
|
||||
switch {
|
||||
case isAggregatorToken(token):
|
||||
op, err = toAggregateOperator(token, val)
|
||||
|
||||
case isFieldToken(token):
|
||||
fields, ok := val.(map[string]interface{})
|
||||
if !ok {
|
||||
return nil, errors.WithStack(ErrInvalidFieldMap)
|
||||
}
|
||||
|
||||
switch token {
|
||||
case TokenEq:
|
||||
op = NewEqOperator(fields)
|
||||
case TokenNeq:
|
||||
op = NewNeqOperator(fields)
|
||||
case TokenGt:
|
||||
op = NewGtOperator(fields)
|
||||
case TokenGte:
|
||||
op = NewGteOperator(fields)
|
||||
case TokenLt:
|
||||
op = NewLtOperator(fields)
|
||||
case TokenLte:
|
||||
op = NewLteOperator(fields)
|
||||
case TokenIn:
|
||||
op = NewInOperator(fields)
|
||||
case TokenLike:
|
||||
op = NewLikeOperator(fields)
|
||||
default:
|
||||
return nil, errors.Wrapf(ErrUnknownOperator, "unknown operator field '%s'", token)
|
||||
}
|
||||
|
||||
default:
|
||||
return nil, errors.Wrapf(ErrUnknownOperator, "unknown operator field '%s'", token)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ops = append(ops, op)
|
||||
}
|
||||
|
||||
and := NewAndOperator(ops...)
|
||||
|
||||
return and, nil
|
||||
}
|
||||
|
||||
func toAggregateOperator(token Token, v interface{}) (Operator, error) {
|
||||
vv, ok := v.([]interface{})
|
||||
if !ok {
|
||||
return nil, errors.WithStack(ErrInvalidAggregationOperator)
|
||||
}
|
||||
|
||||
ops := make([]Operator, 0)
|
||||
|
||||
for _, c := range vv {
|
||||
op, err := toFieldOperator(c)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ops = append(ops, op)
|
||||
}
|
||||
|
||||
var aggregator Operator
|
||||
|
||||
switch token {
|
||||
case TokenAnd:
|
||||
aggregator = NewAndOperator(ops...)
|
||||
case TokenOr:
|
||||
aggregator = NewOrOperator(ops...)
|
||||
case TokenNot:
|
||||
aggregator = NewNotOperator(ops...)
|
||||
}
|
||||
|
||||
return aggregator, nil
|
||||
}
|
||||
|
||||
func isAggregatorToken(token Token) bool {
|
||||
return token == TokenAnd || token == TokenOr || token == TokenNot
|
||||
}
|
||||
|
||||
func isFieldToken(token Token) bool {
|
||||
return token == TokenEq ||
|
||||
token == TokenGt || token == TokenGte ||
|
||||
token == TokenLt || token == TokenLte ||
|
||||
token == TokenNeq || token == TokenIn ||
|
||||
token == TokenLike
|
||||
}
|
17
pkg/storage/filter/gt.go
Normal file
17
pkg/storage/filter/gt.go
Normal file
@ -0,0 +1,17 @@
|
||||
package filter
|
||||
|
||||
type GtOperator struct {
|
||||
fields map[string]interface{}
|
||||
}
|
||||
|
||||
func (o *GtOperator) Token() Token {
|
||||
return TokenGt
|
||||
}
|
||||
|
||||
func (o *GtOperator) Fields() map[string]interface{} {
|
||||
return o.fields
|
||||
}
|
||||
|
||||
func NewGtOperator(fields OperatorFields) *GtOperator {
|
||||
return &GtOperator{fields}
|
||||
}
|
17
pkg/storage/filter/gte.go
Normal file
17
pkg/storage/filter/gte.go
Normal file
@ -0,0 +1,17 @@
|
||||
package filter
|
||||
|
||||
type GteOperator struct {
|
||||
fields OperatorFields
|
||||
}
|
||||
|
||||
func (o *GteOperator) Token() Token {
|
||||
return TokenGte
|
||||
}
|
||||
|
||||
func (o *GteOperator) Fields() map[string]interface{} {
|
||||
return o.fields
|
||||
}
|
||||
|
||||
func NewGteOperator(fields OperatorFields) *GteOperator {
|
||||
return &GteOperator{fields}
|
||||
}
|
17
pkg/storage/filter/in.go
Normal file
17
pkg/storage/filter/in.go
Normal file
@ -0,0 +1,17 @@
|
||||
package filter
|
||||
|
||||
type InOperator struct {
|
||||
fields map[string]interface{}
|
||||
}
|
||||
|
||||
func (o *InOperator) Token() Token {
|
||||
return TokenIn
|
||||
}
|
||||
|
||||
func (o *InOperator) Fields() map[string]interface{} {
|
||||
return o.fields
|
||||
}
|
||||
|
||||
func NewInOperator(fields OperatorFields) *InOperator {
|
||||
return &InOperator{fields}
|
||||
}
|
17
pkg/storage/filter/like.go
Normal file
17
pkg/storage/filter/like.go
Normal file
@ -0,0 +1,17 @@
|
||||
package filter
|
||||
|
||||
type LikeOperator struct {
|
||||
fields map[string]interface{}
|
||||
}
|
||||
|
||||
func (o *LikeOperator) Token() Token {
|
||||
return TokenLike
|
||||
}
|
||||
|
||||
func (o *LikeOperator) Fields() map[string]interface{} {
|
||||
return o.fields
|
||||
}
|
||||
|
||||
func NewLikeOperator(fields OperatorFields) *LikeOperator {
|
||||
return &LikeOperator{fields}
|
||||
}
|
17
pkg/storage/filter/lt.go
Normal file
17
pkg/storage/filter/lt.go
Normal file
@ -0,0 +1,17 @@
|
||||
package filter
|
||||
|
||||
type LtOperator struct {
|
||||
fields map[string]interface{}
|
||||
}
|
||||
|
||||
func (o *LtOperator) Token() Token {
|
||||
return TokenLt
|
||||
}
|
||||
|
||||
func (o *LtOperator) Fields() map[string]interface{} {
|
||||
return o.fields
|
||||
}
|
||||
|
||||
func NewLtOperator(fields OperatorFields) *LtOperator {
|
||||
return &LtOperator{fields}
|
||||
}
|
17
pkg/storage/filter/lte.go
Normal file
17
pkg/storage/filter/lte.go
Normal file
@ -0,0 +1,17 @@
|
||||
package filter
|
||||
|
||||
type LteOperator struct {
|
||||
fields map[string]interface{}
|
||||
}
|
||||
|
||||
func (o *LteOperator) Token() Token {
|
||||
return TokenLte
|
||||
}
|
||||
|
||||
func (o *LteOperator) Fields() map[string]interface{} {
|
||||
return o.fields
|
||||
}
|
||||
|
||||
func NewLteOperator(fields OperatorFields) *LteOperator {
|
||||
return &LteOperator{fields}
|
||||
}
|
17
pkg/storage/filter/neq.go
Normal file
17
pkg/storage/filter/neq.go
Normal file
@ -0,0 +1,17 @@
|
||||
package filter
|
||||
|
||||
type NeqOperator struct {
|
||||
fields map[string]interface{}
|
||||
}
|
||||
|
||||
func (o *NeqOperator) Token() Token {
|
||||
return TokenNeq
|
||||
}
|
||||
|
||||
func (o *NeqOperator) Fields() map[string]interface{} {
|
||||
return o.fields
|
||||
}
|
||||
|
||||
func NewNeqOperator(fields map[string]interface{}) *NeqOperator {
|
||||
return &NeqOperator{fields}
|
||||
}
|
17
pkg/storage/filter/not.go
Normal file
17
pkg/storage/filter/not.go
Normal file
@ -0,0 +1,17 @@
|
||||
package filter
|
||||
|
||||
type NotOperator struct {
|
||||
children []Operator
|
||||
}
|
||||
|
||||
func (o *NotOperator) Token() Token {
|
||||
return TokenOr
|
||||
}
|
||||
|
||||
func (o *NotOperator) Children() []Operator {
|
||||
return o.children
|
||||
}
|
||||
|
||||
func NewNotOperator(ops ...Operator) *NotOperator {
|
||||
return &NotOperator{ops}
|
||||
}
|
23
pkg/storage/filter/operator.go
Normal file
23
pkg/storage/filter/operator.go
Normal file
@ -0,0 +1,23 @@
|
||||
package filter
|
||||
|
||||
type Token string
|
||||
|
||||
const (
|
||||
TokenAnd Token = "and"
|
||||
TokenOr Token = "or"
|
||||
TokenNot Token = "not"
|
||||
TokenEq Token = "eq"
|
||||
TokenNeq Token = "neq"
|
||||
TokenGt Token = "gt"
|
||||
TokenGte Token = "gte"
|
||||
TokenLt Token = "lt"
|
||||
TokenLte Token = "lte"
|
||||
TokenIn Token = "in"
|
||||
TokenLike Token = "like"
|
||||
)
|
||||
|
||||
type OperatorFields map[string]interface{}
|
||||
|
||||
type Operator interface {
|
||||
Token() Token
|
||||
}
|
17
pkg/storage/filter/or.go
Normal file
17
pkg/storage/filter/or.go
Normal file
@ -0,0 +1,17 @@
|
||||
package filter
|
||||
|
||||
type OrOperator struct {
|
||||
children []Operator
|
||||
}
|
||||
|
||||
func (o *OrOperator) Token() Token {
|
||||
return TokenOr
|
||||
}
|
||||
|
||||
func (o *OrOperator) Children() []Operator {
|
||||
return o.children
|
||||
}
|
||||
|
||||
func NewOrOperator(ops ...Operator) *OrOperator {
|
||||
return &OrOperator{ops}
|
||||
}
|
87
pkg/storage/filter/sql/helper.go
Normal file
87
pkg/storage/filter/sql/helper.go
Normal file
@ -0,0 +1,87 @@
|
||||
package sql
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"forge.cadoles.com/arcad/edge/pkg/storage/filter"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
func aggregatorToSQL(operator string, opt *Option, children ...filter.Operator) (string, []interface{}, error) {
|
||||
args := make([]interface{}, 0)
|
||||
|
||||
if len(children) == 0 {
|
||||
return "", args, nil
|
||||
}
|
||||
|
||||
var sb strings.Builder
|
||||
|
||||
if _, err := sb.WriteString("("); err != nil {
|
||||
return "", nil, errors.WithStack(err)
|
||||
}
|
||||
|
||||
for i, c := range children {
|
||||
if i != 0 {
|
||||
if _, err := sb.WriteString(" " + operator + " "); err != nil {
|
||||
return "", nil, errors.WithStack(err)
|
||||
}
|
||||
}
|
||||
|
||||
cSQL, cArgs, err := toSQL(c, opt)
|
||||
if err != nil {
|
||||
return "", nil, errors.WithStack(err)
|
||||
}
|
||||
|
||||
args = append(args, cArgs...)
|
||||
|
||||
if _, err := sb.WriteString(cSQL); err != nil {
|
||||
return "", nil, errors.WithStack(err)
|
||||
}
|
||||
}
|
||||
|
||||
if _, err := sb.WriteString(")"); err != nil {
|
||||
return "", nil, errors.WithStack(err)
|
||||
}
|
||||
|
||||
result := sb.String()
|
||||
if result == "()" {
|
||||
return "", args, nil
|
||||
}
|
||||
|
||||
return result, args, nil
|
||||
}
|
||||
|
||||
func fieldsToSQL(operator string, invert bool, fields map[string]interface{}, option *Option) (string, []interface{}, error) {
|
||||
var sb strings.Builder
|
||||
|
||||
args := make([]interface{}, 0)
|
||||
|
||||
i := 0
|
||||
for k, v := range fields {
|
||||
if i != 0 {
|
||||
if _, err := sb.WriteString(" AND "); err != nil {
|
||||
return "", nil, errors.WithStack(err)
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
tr string
|
||||
err error
|
||||
)
|
||||
|
||||
tr, v, err = option.Transform(operator, invert, k, v, option)
|
||||
if err != nil {
|
||||
return "", nil, errors.WithStack(err)
|
||||
}
|
||||
|
||||
if _, err := sb.WriteString(tr); err != nil {
|
||||
return "", nil, errors.WithStack(err)
|
||||
}
|
||||
|
||||
args = append(args, option.ValueTransform(v))
|
||||
|
||||
i++
|
||||
}
|
||||
|
||||
return sb.String(), args, nil
|
||||
}
|
78
pkg/storage/filter/sql/option.go
Normal file
78
pkg/storage/filter/sql/option.go
Normal file
@ -0,0 +1,78 @@
|
||||
package sql
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
)
|
||||
|
||||
type (
|
||||
PreparedParameterFunc func() string
|
||||
KeyTransformFunc func(key string) string
|
||||
ValueTransformFunc func(v interface{}) interface{}
|
||||
TransformFunc func(operator string, invert bool, key string, value interface{}, option *Option) (string, interface{}, error)
|
||||
)
|
||||
|
||||
type Option struct {
|
||||
PreparedParameter PreparedParameterFunc
|
||||
KeyTransform KeyTransformFunc
|
||||
ValueTransform ValueTransformFunc
|
||||
Transform TransformFunc
|
||||
}
|
||||
|
||||
func DefaultOption() *Option {
|
||||
opt := &Option{}
|
||||
defaults := []OptionFunc{
|
||||
WithPreparedParameter("$", 1),
|
||||
WithNoOpKeyTransform(),
|
||||
WithNoOpValueTransform(),
|
||||
WithDefaultTransform(),
|
||||
}
|
||||
|
||||
for _, fn := range defaults {
|
||||
fn(opt)
|
||||
}
|
||||
|
||||
return opt
|
||||
}
|
||||
|
||||
type OptionFunc func(*Option)
|
||||
|
||||
func WithPreparedParameter(prefix string, index int) OptionFunc {
|
||||
return func(opt *Option) {
|
||||
opt.PreparedParameter = func() string {
|
||||
param := prefix + strconv.FormatInt(int64(index), 10)
|
||||
index++
|
||||
|
||||
return param
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func WithKeyTransform(transform KeyTransformFunc) OptionFunc {
|
||||
return func(opt *Option) {
|
||||
opt.KeyTransform = transform
|
||||
}
|
||||
}
|
||||
|
||||
func WithNoOpKeyTransform() OptionFunc {
|
||||
return WithKeyTransform(func(key string) string {
|
||||
return key
|
||||
})
|
||||
}
|
||||
|
||||
func WithValueTransform(transform ValueTransformFunc) OptionFunc {
|
||||
return func(opt *Option) {
|
||||
opt.ValueTransform = transform
|
||||
}
|
||||
}
|
||||
|
||||
func WithDefaultTransform() OptionFunc {
|
||||
return func(opt *Option) {
|
||||
opt.Transform = DefaultTransform
|
||||
}
|
||||
}
|
||||
|
||||
func WithNoOpValueTransform() OptionFunc {
|
||||
return WithValueTransform(func(value interface{}) interface{} {
|
||||
return value
|
||||
})
|
||||
}
|
159
pkg/storage/filter/sql/sql.go
Normal file
159
pkg/storage/filter/sql/sql.go
Normal file
@ -0,0 +1,159 @@
|
||||
package sql
|
||||
|
||||
import (
|
||||
"forge.cadoles.com/arcad/edge/pkg/storage/filter"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
type transformFunc func(op filter.Operator, option *Option) (string, []interface{}, error)
|
||||
|
||||
var transforms map[filter.Token]transformFunc
|
||||
|
||||
func init() {
|
||||
// Initialise transforms map
|
||||
transforms = map[filter.Token]transformFunc{
|
||||
filter.TokenAnd: transformAndOperator,
|
||||
filter.TokenOr: transformOrOperator,
|
||||
filter.TokenNot: transformNotOperator,
|
||||
filter.TokenEq: transformEqOperator,
|
||||
filter.TokenNeq: transformNeqOperator,
|
||||
filter.TokenGt: transformGtOperator,
|
||||
filter.TokenGte: transformGteOperator,
|
||||
filter.TokenLte: transformLteOperator,
|
||||
filter.TokenLt: transformLtOperator,
|
||||
filter.TokenLike: transformLikeOperator,
|
||||
filter.TokenIn: transformInOperator,
|
||||
}
|
||||
}
|
||||
|
||||
func ToSQL(op filter.Operator, funcs ...OptionFunc) (string, []interface{}, error) {
|
||||
opt := DefaultOption()
|
||||
|
||||
for _, fn := range funcs {
|
||||
fn(opt)
|
||||
}
|
||||
|
||||
return toSQL(op, opt)
|
||||
}
|
||||
|
||||
func toSQL(op filter.Operator, opt *Option) (string, []interface{}, error) {
|
||||
if op == nil {
|
||||
return "", nil, nil
|
||||
}
|
||||
|
||||
transform, exists := transforms[op.Token()]
|
||||
if !exists {
|
||||
return "", nil, errors.WithStack(filter.ErrUnsupportedOperator)
|
||||
}
|
||||
|
||||
sql, args, err := transform(op, opt)
|
||||
if err != nil {
|
||||
return "", nil, errors.WithStack(err)
|
||||
}
|
||||
|
||||
return sql, args, nil
|
||||
}
|
||||
|
||||
func transformAndOperator(op filter.Operator, option *Option) (string, []interface{}, error) {
|
||||
andOp, ok := op.(*filter.AndOperator)
|
||||
if !ok {
|
||||
return "", nil, errors.Wrapf(filter.ErrUnexpectedOperator, "expected '%s', got '%s'", filter.TokenAnd, op.Token())
|
||||
}
|
||||
|
||||
return aggregatorToSQL("AND", option, andOp.Children()...)
|
||||
}
|
||||
|
||||
func transformOrOperator(op filter.Operator, option *Option) (string, []interface{}, error) {
|
||||
orOp, ok := op.(*filter.OrOperator)
|
||||
if !ok {
|
||||
return "", nil, errors.Wrapf(filter.ErrUnexpectedOperator, "expected '%s', got '%s'", filter.TokenOr, op.Token())
|
||||
}
|
||||
|
||||
return aggregatorToSQL("OR", option, orOp.Children()...)
|
||||
}
|
||||
|
||||
func transformEqOperator(op filter.Operator, option *Option) (string, []interface{}, error) {
|
||||
eqOp, ok := op.(*filter.EqOperator)
|
||||
if !ok {
|
||||
return "", nil, errors.Wrapf(filter.ErrUnexpectedOperator, "expected '%s', got '%s'", filter.TokenEq, op.Token())
|
||||
}
|
||||
|
||||
return fieldsToSQL("=", false, eqOp.Fields(), option)
|
||||
}
|
||||
|
||||
func transformNeqOperator(op filter.Operator, option *Option) (string, []interface{}, error) {
|
||||
eqOp, ok := op.(*filter.NeqOperator)
|
||||
if !ok {
|
||||
return "", nil, errors.Wrapf(filter.ErrUnexpectedOperator, "expected '%s', got '%s'", filter.TokenNeq, op.Token())
|
||||
}
|
||||
|
||||
return fieldsToSQL("!=", false, eqOp.Fields(), option)
|
||||
}
|
||||
|
||||
func transformGtOperator(op filter.Operator, option *Option) (string, []interface{}, error) {
|
||||
gtOp, ok := op.(*filter.GtOperator)
|
||||
if !ok {
|
||||
return "", nil, errors.Wrapf(filter.ErrUnexpectedOperator, "expected '%s', got '%s'", filter.TokenGt, op.Token())
|
||||
}
|
||||
|
||||
return fieldsToSQL(">", false, gtOp.Fields(), option)
|
||||
}
|
||||
|
||||
func transformGteOperator(op filter.Operator, option *Option) (string, []interface{}, error) {
|
||||
gteOp, ok := op.(*filter.GteOperator)
|
||||
if !ok {
|
||||
return "", nil, errors.Wrapf(filter.ErrUnexpectedOperator, "expected '%s', got '%s'", filter.TokenGte, op.Token())
|
||||
}
|
||||
|
||||
return fieldsToSQL(">=", false, gteOp.Fields(), option)
|
||||
}
|
||||
|
||||
func transformLtOperator(op filter.Operator, option *Option) (string, []interface{}, error) {
|
||||
ltOp, ok := op.(*filter.LtOperator)
|
||||
if !ok {
|
||||
return "", nil, errors.Wrapf(filter.ErrUnexpectedOperator, "expected '%s', got '%s'", filter.TokenLt, op.Token())
|
||||
}
|
||||
|
||||
return fieldsToSQL("<", false, ltOp.Fields(), option)
|
||||
}
|
||||
|
||||
func transformLteOperator(op filter.Operator, option *Option) (string, []interface{}, error) {
|
||||
lteOp, ok := op.(*filter.LteOperator)
|
||||
if !ok {
|
||||
return "", nil, errors.Wrapf(filter.ErrUnexpectedOperator, "expected '%s', got '%s'", filter.TokenLte, op.Token())
|
||||
}
|
||||
|
||||
return fieldsToSQL("<=", false, lteOp.Fields(), option)
|
||||
}
|
||||
|
||||
func transformInOperator(op filter.Operator, option *Option) (string, []interface{}, error) {
|
||||
inOp, ok := op.(*filter.InOperator)
|
||||
if !ok {
|
||||
return "", nil, errors.Wrapf(filter.ErrUnexpectedOperator, "expected '%s', got '%s'", filter.TokenIn, op.Token())
|
||||
}
|
||||
|
||||
return fieldsToSQL("IN", true, inOp.Fields(), option)
|
||||
}
|
||||
|
||||
func transformLikeOperator(op filter.Operator, option *Option) (string, []interface{}, error) {
|
||||
likeOp, ok := op.(*filter.LikeOperator)
|
||||
if !ok {
|
||||
return "", nil, errors.Wrapf(filter.ErrUnexpectedOperator, "expected '%s', got '%s'", filter.TokenLike, op.Token())
|
||||
}
|
||||
|
||||
return fieldsToSQL("LIKE", false, likeOp.Fields(), option)
|
||||
}
|
||||
|
||||
func transformNotOperator(op filter.Operator, option *Option) (string, []interface{}, error) {
|
||||
notOp, ok := op.(*filter.NotOperator)
|
||||
if !ok {
|
||||
return "", nil, errors.Wrapf(filter.ErrUnexpectedOperator, "expected '%s', got '%s'", filter.TokenNot, op.Token())
|
||||
}
|
||||
|
||||
sql, args, err := aggregatorToSQL("AND", option, notOp.Children()...)
|
||||
if err != nil {
|
||||
return "", nil, errors.WithStack(err)
|
||||
}
|
||||
|
||||
return "NOT " + sql, args, nil
|
||||
}
|
84
pkg/storage/filter/sql/sql_test.go
Normal file
84
pkg/storage/filter/sql/sql_test.go
Normal file
@ -0,0 +1,84 @@
|
||||
package sql
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"forge.cadoles.com/arcad/edge/pkg/storage/filter"
|
||||
)
|
||||
|
||||
type (
|
||||
op map[string]interface{}
|
||||
aggr []interface{}
|
||||
)
|
||||
|
||||
type testCase struct {
|
||||
RawFilter string
|
||||
ExpectedSQL string
|
||||
ExpectedArgs []interface{}
|
||||
}
|
||||
|
||||
var testCases = []testCase{
|
||||
{
|
||||
RawFilter: `
|
||||
{
|
||||
"or": [
|
||||
{"eq": {"foo": "bar"}},
|
||||
{"neq": {"hello": "world"}}
|
||||
]
|
||||
}
|
||||
`,
|
||||
ExpectedSQL: "(((foo = $1) OR (hello != $2)))",
|
||||
ExpectedArgs: []interface{}{"bar", "world"},
|
||||
},
|
||||
}
|
||||
|
||||
func TestSQLFilter(t *testing.T) {
|
||||
for i, tc := range testCases {
|
||||
func(tc testCase) {
|
||||
t.Run(fmt.Sprintf("Test case #%d", i), func(t *testing.T) {
|
||||
raw := make(map[string]interface{})
|
||||
if err := json.Unmarshal([]byte(tc.RawFilter), &raw); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
query, err := filter.NewFrom(raw)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
sql, args, err := ToSQL(query.Root())
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
if e, g := tc.ExpectedSQL, sql; e != g {
|
||||
t.Errorf("sql: expected '%s', got '%s'", e, g)
|
||||
}
|
||||
|
||||
if args == nil {
|
||||
t.Fatal("args should not be nil")
|
||||
}
|
||||
|
||||
for i, a := range args {
|
||||
if i >= len(tc.ExpectedArgs) {
|
||||
t.Errorf("args[%d]: expected nil, got '%v'", i, a)
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
if e, g := tc.ExpectedArgs[i], a; e != g {
|
||||
t.Errorf("args[%d]: expected '%v', got '%v'", i, e, g)
|
||||
}
|
||||
}
|
||||
|
||||
for i, a := range tc.ExpectedArgs {
|
||||
if i >= len(args) {
|
||||
t.Errorf("args[%d]: expected '%v', got nil", i, a)
|
||||
}
|
||||
}
|
||||
})
|
||||
}(tc)
|
||||
}
|
||||
}
|
45
pkg/storage/filter/sql/transform.go
Normal file
45
pkg/storage/filter/sql/transform.go
Normal file
@ -0,0 +1,45 @@
|
||||
package sql
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
func DefaultTransform(operator string, invert bool, key string, value interface{}, option *Option) (string, interface{}, error) {
|
||||
var sb strings.Builder
|
||||
|
||||
if invert {
|
||||
if _, err := sb.WriteString(option.PreparedParameter()); err != nil {
|
||||
return "", nil, errors.WithStack(err)
|
||||
}
|
||||
} else {
|
||||
if _, err := sb.WriteString(option.KeyTransform(key)); err != nil {
|
||||
return "", nil, errors.WithStack(err)
|
||||
}
|
||||
}
|
||||
|
||||
if _, err := sb.WriteString(" "); err != nil {
|
||||
return "", nil, errors.WithStack(err)
|
||||
}
|
||||
|
||||
if _, err := sb.WriteString(operator); err != nil {
|
||||
return "", nil, errors.WithStack(err)
|
||||
}
|
||||
|
||||
if invert {
|
||||
if _, err := sb.WriteString(" "); err != nil {
|
||||
return "", nil, errors.WithStack(err)
|
||||
}
|
||||
|
||||
if _, err := sb.WriteString(key); err != nil {
|
||||
return "", nil, errors.WithStack(err)
|
||||
}
|
||||
} else {
|
||||
if _, err := sb.WriteString(" " + option.PreparedParameter()); err != nil {
|
||||
return "", nil, errors.WithStack(err)
|
||||
}
|
||||
}
|
||||
|
||||
return sb.String(), value, nil
|
||||
}
|
41
pkg/storage/query_option.go
Normal file
41
pkg/storage/query_option.go
Normal file
@ -0,0 +1,41 @@
|
||||
package storage
|
||||
|
||||
type OrderDirection string
|
||||
|
||||
const (
|
||||
OrderDirectionAsc OrderDirection = "ASC"
|
||||
OrderDirectionDesc OrderDirection = "DESC"
|
||||
)
|
||||
|
||||
type QueryOption struct {
|
||||
Limit *int
|
||||
Offset *int
|
||||
OrderBy *string
|
||||
OrderDirection *OrderDirection
|
||||
}
|
||||
|
||||
type QueryOptionFunc func(o *QueryOption)
|
||||
|
||||
func WithLimit(limit int) QueryOptionFunc {
|
||||
return func(o *QueryOption) {
|
||||
o.Limit = &limit
|
||||
}
|
||||
}
|
||||
|
||||
func WithOffset(offset int) QueryOptionFunc {
|
||||
return func(o *QueryOption) {
|
||||
o.Offset = &offset
|
||||
}
|
||||
}
|
||||
|
||||
func WithOrderBy(orderBy string) QueryOptionFunc {
|
||||
return func(o *QueryOption) {
|
||||
o.OrderBy = &orderBy
|
||||
}
|
||||
}
|
||||
|
||||
func WithOrderDirection(direction OrderDirection) QueryOptionFunc {
|
||||
return func(o *QueryOption) {
|
||||
o.OrderDirection = &direction
|
||||
}
|
||||
}
|
338
pkg/storage/sqlite/document_store.go
Normal file
338
pkg/storage/sqlite/document_store.go
Normal file
@ -0,0 +1,338 @@
|
||||
package sqlite
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"forge.cadoles.com/arcad/edge/pkg/storage"
|
||||
"forge.cadoles.com/arcad/edge/pkg/storage/filter"
|
||||
filterSQL "forge.cadoles.com/arcad/edge/pkg/storage/filter/sql"
|
||||
"github.com/pkg/errors"
|
||||
"gitlab.com/wpetit/goweb/logger"
|
||||
|
||||
_ "modernc.org/sqlite"
|
||||
)
|
||||
|
||||
type DocumentStore struct {
|
||||
db *sql.DB
|
||||
path string
|
||||
openOnce sync.Once
|
||||
}
|
||||
|
||||
// Delete implements storage.DocumentStore
|
||||
func (s *DocumentStore) Delete(ctx context.Context, collection string, id storage.DocumentID) error {
|
||||
err := s.withTx(ctx, func(tx *sql.Tx) error {
|
||||
query := `
|
||||
DELETE FROM documents
|
||||
WHERE collection = $1 AND id = $2
|
||||
`
|
||||
|
||||
_, err := tx.ExecContext(ctx, query, collection, string(id))
|
||||
if err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Get implements storage.DocumentStore
|
||||
func (s *DocumentStore) Get(ctx context.Context, collection string, id storage.DocumentID) (storage.Document, error) {
|
||||
var document storage.Document
|
||||
|
||||
err := s.withTx(ctx, func(tx *sql.Tx) error {
|
||||
query := `
|
||||
SELECT id, data, created_at, updated_at
|
||||
FROM documents
|
||||
WHERE collection = $1 AND id = $2
|
||||
`
|
||||
|
||||
row := tx.QueryRowContext(ctx, query, collection, string(id))
|
||||
|
||||
var (
|
||||
createdAt time.Time
|
||||
updatedAt time.Time
|
||||
data JSONMap
|
||||
)
|
||||
|
||||
err := row.Scan(&id, &data, &createdAt, &updatedAt)
|
||||
if err != nil {
|
||||
if errors.Is(err, sql.ErrNoRows) {
|
||||
return errors.WithStack(storage.ErrDocumentNotFound)
|
||||
}
|
||||
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
|
||||
document = storage.Document(data)
|
||||
|
||||
document[storage.DocumentAttrID] = id
|
||||
document[storage.DocumentAttrCreatedAt] = createdAt
|
||||
document[storage.DocumentAttrUpdatedAt] = updatedAt
|
||||
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.WithStack(err)
|
||||
}
|
||||
|
||||
return document, nil
|
||||
}
|
||||
|
||||
// Query implements storage.DocumentStore
|
||||
func (s *DocumentStore) Query(ctx context.Context, collection string, filter *filter.Filter, funcs ...storage.QueryOptionFunc) ([]storage.Document, error) {
|
||||
var documents []storage.Document
|
||||
|
||||
err := s.withTx(ctx, func(tx *sql.Tx) error {
|
||||
criteria, args, err := filterSQL.ToSQL(
|
||||
filter.Root(),
|
||||
filterSQL.WithPreparedParameter("$", 2),
|
||||
filterSQL.WithKeyTransform(func(key string) string {
|
||||
return fmt.Sprintf("json_extract(data, '$.%s')", key)
|
||||
}),
|
||||
)
|
||||
if err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
|
||||
query := `
|
||||
SELECT id, data, created_at, updated_at
|
||||
FROM documents
|
||||
WHERE collection = $1 AND (` + criteria + `)
|
||||
`
|
||||
|
||||
args = append([]interface{}{collection}, args...)
|
||||
|
||||
logger.Debug(
|
||||
ctx, "executing query",
|
||||
logger.F("query", query),
|
||||
logger.F("args", args),
|
||||
)
|
||||
|
||||
rows, err := tx.QueryContext(ctx, query, args...)
|
||||
if err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
|
||||
defer rows.Close()
|
||||
|
||||
documents = make([]storage.Document, 0)
|
||||
|
||||
for rows.Next() {
|
||||
var (
|
||||
id storage.DocumentID
|
||||
createdAt time.Time
|
||||
updatedAt time.Time
|
||||
data JSONMap
|
||||
)
|
||||
|
||||
if err := rows.Scan(&id, &data, &createdAt, &updatedAt); err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
|
||||
document := storage.Document(data)
|
||||
document[storage.DocumentAttrID] = id
|
||||
document[storage.DocumentAttrCreatedAt] = createdAt
|
||||
document[storage.DocumentAttrUpdatedAt] = updatedAt
|
||||
|
||||
documents = append(documents, document)
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.WithStack(err)
|
||||
}
|
||||
|
||||
return documents, nil
|
||||
}
|
||||
|
||||
// Upsert implements storage.DocumentStore
|
||||
func (s *DocumentStore) Upsert(ctx context.Context, collection string, document storage.Document) (storage.Document, error) {
|
||||
var upsertedDocument storage.Document
|
||||
|
||||
err := s.withTx(ctx, func(tx *sql.Tx) error {
|
||||
query := `
|
||||
INSERT INTO documents (id, collection, data, created_at, updated_at)
|
||||
VALUES($1, $2, $3, $4, $4)
|
||||
ON CONFLICT (id, collection) DO UPDATE SET
|
||||
data = $3, updated_at = $4
|
||||
RETURNING "id", "data", "created_at", "updated_at"
|
||||
`
|
||||
|
||||
now := time.Now().UTC()
|
||||
|
||||
id, exists := document.ID()
|
||||
if !exists || id == "" {
|
||||
id = storage.NewDocumentID()
|
||||
}
|
||||
|
||||
delete(document, storage.DocumentAttrID)
|
||||
delete(document, storage.DocumentAttrCreatedAt)
|
||||
delete(document, storage.DocumentAttrUpdatedAt)
|
||||
|
||||
args := []any{id, collection, JSONMap(document), now, now}
|
||||
|
||||
row := tx.QueryRowContext(ctx, query, args...)
|
||||
|
||||
var (
|
||||
createdAt time.Time
|
||||
updatedAt time.Time
|
||||
data JSONMap
|
||||
)
|
||||
|
||||
err := row.Scan(&id, &data, &createdAt, &updatedAt)
|
||||
if err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
|
||||
upsertedDocument = storage.Document(data)
|
||||
|
||||
upsertedDocument[storage.DocumentAttrID] = id
|
||||
upsertedDocument[storage.DocumentAttrCreatedAt] = createdAt
|
||||
upsertedDocument[storage.DocumentAttrUpdatedAt] = updatedAt
|
||||
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.WithStack(err)
|
||||
}
|
||||
|
||||
return upsertedDocument, nil
|
||||
}
|
||||
|
||||
func (s *DocumentStore) withTx(ctx context.Context, fn func(tx *sql.Tx) error) error {
|
||||
var db *sql.DB
|
||||
|
||||
db, err := s.getDatabase(ctx)
|
||||
if err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
|
||||
if err := withTx(ctx, db, fn); err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func withTx(ctx context.Context, db *sql.DB, fn func(tx *sql.Tx) error) error {
|
||||
var tx *sql.Tx
|
||||
|
||||
tx, err := db.Begin()
|
||||
if err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if err := tx.Rollback(); err != nil {
|
||||
if errors.Is(err, sql.ErrTxDone) {
|
||||
return
|
||||
}
|
||||
|
||||
panic(errors.WithStack(err))
|
||||
}
|
||||
}()
|
||||
|
||||
if err = fn(tx); err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
|
||||
if err = tx.Commit(); err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *DocumentStore) getDatabase(ctx context.Context) (*sql.DB, error) {
|
||||
if s.db != nil {
|
||||
return s.db, nil
|
||||
}
|
||||
|
||||
var (
|
||||
db *sql.DB
|
||||
err error
|
||||
)
|
||||
|
||||
s.openOnce.Do(func() {
|
||||
db, err = sql.Open("sqlite", s.path)
|
||||
if err != nil {
|
||||
err = errors.WithStack(err)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
if err = s.ensureTables(ctx, db); err != nil {
|
||||
err = errors.WithStack(err)
|
||||
|
||||
return
|
||||
}
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return nil, errors.WithStack(err)
|
||||
}
|
||||
|
||||
s.db = db
|
||||
|
||||
return db, nil
|
||||
}
|
||||
|
||||
func (s *DocumentStore) ensureTables(ctx context.Context, db *sql.DB) error {
|
||||
err := withTx(ctx, db, func(tx *sql.Tx) error {
|
||||
query := `
|
||||
CREATE TABLE IF NOT EXISTS documents (
|
||||
id TEXT PRIMARY KEY,
|
||||
collection TEXT NOT NULL,
|
||||
data TEXT,
|
||||
created_at TIMESTAMP NOT NULL,
|
||||
updated_at TIMESTAMP NOT NULL,
|
||||
UNIQUE(id, collection) ON CONFLICT REPLACE
|
||||
);
|
||||
`
|
||||
if _, err := tx.ExecContext(ctx, query); err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
|
||||
query = `
|
||||
CREATE INDEX IF NOT EXISTS collection_idx ON documents (collection);
|
||||
`
|
||||
if _, err := tx.ExecContext(ctx, query); err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func NewDocumentStore(path string) *DocumentStore {
|
||||
return &DocumentStore{
|
||||
db: nil,
|
||||
path: path,
|
||||
openOnce: sync.Once{},
|
||||
}
|
||||
}
|
||||
|
||||
func NewDocumentStoreWithDB(db *sql.DB) *DocumentStore {
|
||||
return &DocumentStore{
|
||||
db: db,
|
||||
path: "",
|
||||
openOnce: sync.Once{},
|
||||
}
|
||||
}
|
||||
|
||||
var _ storage.DocumentStore = &DocumentStore{}
|
16
pkg/storage/sqlite/document_store_test.go
Normal file
16
pkg/storage/sqlite/document_store_test.go
Normal file
@ -0,0 +1,16 @@
|
||||
package sqlite
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"forge.cadoles.com/arcad/edge/pkg/storage/testsuite"
|
||||
"gitlab.com/wpetit/goweb/logger"
|
||||
)
|
||||
|
||||
func TestDocumentStore(t *testing.T) {
|
||||
logger.SetLevel(logger.LevelDebug)
|
||||
|
||||
store := NewDocumentStore(":memory:")
|
||||
|
||||
testsuite.TestDocumentStore(t, store)
|
||||
}
|
42
pkg/storage/sqlite/json.go
Normal file
42
pkg/storage/sqlite/json.go
Normal file
@ -0,0 +1,42 @@
|
||||
package sqlite
|
||||
|
||||
import (
|
||||
"database/sql/driver"
|
||||
"encoding/json"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
type JSONMap map[string]any
|
||||
|
||||
func (j *JSONMap) Scan(value interface{}) error {
|
||||
if value == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
var data []byte
|
||||
|
||||
switch typ := value.(type) {
|
||||
case []byte:
|
||||
data = typ
|
||||
case string:
|
||||
data = []byte(typ)
|
||||
default:
|
||||
return errors.Errorf("unexpected type '%T'", value)
|
||||
}
|
||||
|
||||
if err := json.Unmarshal(data, &j); err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (j JSONMap) Value() (driver.Value, error) {
|
||||
data, err := json.Marshal(j)
|
||||
if err != nil {
|
||||
return nil, errors.WithStack(err)
|
||||
}
|
||||
|
||||
return data, nil
|
||||
}
|
16
pkg/storage/testsuite/document_store.go
Normal file
16
pkg/storage/testsuite/document_store.go
Normal file
@ -0,0 +1,16 @@
|
||||
package testsuite
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"forge.cadoles.com/arcad/edge/pkg/storage"
|
||||
)
|
||||
|
||||
func TestDocumentStore(t *testing.T, store storage.DocumentStore) {
|
||||
t.Parallel()
|
||||
|
||||
t.Run("TestDocumentStoreQuery", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
testDocumentStoreQuery(t, store)
|
||||
})
|
||||
}
|
85
pkg/storage/testsuite/document_store_query.go
Normal file
85
pkg/storage/testsuite/document_store_query.go
Normal file
@ -0,0 +1,85 @@
|
||||
package testsuite
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"forge.cadoles.com/arcad/edge/pkg/storage"
|
||||
"forge.cadoles.com/arcad/edge/pkg/storage/filter"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
type documentStoreQueryTestCase struct {
|
||||
Name string
|
||||
Before func(ctx context.Context, store storage.DocumentStore) error
|
||||
Collection string
|
||||
Filter *filter.Filter
|
||||
QueryOptionsFuncs []storage.QueryOptionFunc
|
||||
After func(t *testing.T, results []storage.Document, err error)
|
||||
}
|
||||
|
||||
var documentStoreQueryTestCases = []documentStoreQueryTestCase{
|
||||
{
|
||||
Name: "Simple select",
|
||||
Before: func(ctx context.Context, store storage.DocumentStore) error {
|
||||
doc1 := storage.Document{
|
||||
"attr1": "Foo",
|
||||
}
|
||||
|
||||
if _, err := store.Upsert(ctx, "simple_select", doc1); err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
|
||||
doc2 := storage.Document{
|
||||
"attr1": "Bar",
|
||||
}
|
||||
|
||||
if _, err := store.Upsert(ctx, "simple_select", doc2); err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
|
||||
return nil
|
||||
},
|
||||
Collection: "simple_select",
|
||||
Filter: filter.New(
|
||||
filter.NewEqOperator(map[string]interface{}{
|
||||
"attr1": "Foo",
|
||||
}),
|
||||
),
|
||||
After: func(t *testing.T, results []storage.Document, err error) {
|
||||
if err != nil {
|
||||
t.Fatalf("%+v", errors.WithStack(err))
|
||||
}
|
||||
|
||||
if e, g := 1, len(results); e != g {
|
||||
t.Errorf("len(results): expected '%v', got '%v'", e, g)
|
||||
}
|
||||
|
||||
if e, g := "Foo", results[0]["attr1"]; e != g {
|
||||
t.Errorf("results[0][\"Attr1\"]: expected '%v', got '%v'", e, g)
|
||||
}
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
func testDocumentStoreQuery(t *testing.T, store storage.DocumentStore) {
|
||||
for _, tc := range documentStoreQueryTestCases {
|
||||
func(tc documentStoreQueryTestCase) {
|
||||
t.Run(tc.Name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
if tc.Before != nil {
|
||||
if err := tc.Before(ctx, store); err != nil {
|
||||
t.Fatalf("%+v", errors.WithStack(err))
|
||||
}
|
||||
}
|
||||
|
||||
documents, err := store.Query(ctx, tc.Collection, tc.Filter, tc.QueryOptionsFuncs...)
|
||||
|
||||
tc.After(t, documents, err)
|
||||
})
|
||||
}(tc)
|
||||
}
|
||||
}
|
Reference in New Issue
Block a user