2023-02-09 12:16:36 +01:00
|
|
|
package sqlite
|
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
|
|
|
"database/sql"
|
|
|
|
"fmt"
|
2023-03-01 12:12:11 +01:00
|
|
|
"math"
|
2023-02-09 12:16:36 +01:00
|
|
|
"time"
|
|
|
|
|
|
|
|
"forge.cadoles.com/arcad/edge/pkg/storage"
|
|
|
|
"forge.cadoles.com/arcad/edge/pkg/storage/filter"
|
|
|
|
filterSQL "forge.cadoles.com/arcad/edge/pkg/storage/filter/sql"
|
|
|
|
"github.com/pkg/errors"
|
|
|
|
"gitlab.com/wpetit/goweb/logger"
|
|
|
|
|
2024-01-11 19:30:30 +01:00
|
|
|
_ "embed"
|
|
|
|
|
2023-02-09 12:16:36 +01:00
|
|
|
_ "modernc.org/sqlite"
|
|
|
|
)
|
|
|
|
|
2024-01-11 19:30:30 +01:00
|
|
|
//go:embed document_store.sql
|
|
|
|
var documentStoreSchema string
|
|
|
|
|
2023-02-09 12:16:36 +01:00
|
|
|
type DocumentStore struct {
|
2023-04-13 10:16:48 +02:00
|
|
|
getDB GetDBFunc
|
2023-02-09 12:16:36 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// Delete implements storage.DocumentStore
|
|
|
|
func (s *DocumentStore) Delete(ctx context.Context, collection string, id storage.DocumentID) error {
|
|
|
|
err := s.withTx(ctx, func(tx *sql.Tx) error {
|
|
|
|
query := `
|
|
|
|
DELETE FROM documents
|
|
|
|
WHERE collection = $1 AND id = $2
|
|
|
|
`
|
|
|
|
|
|
|
|
_, err := tx.ExecContext(ctx, query, collection, string(id))
|
|
|
|
if err != nil {
|
|
|
|
return errors.WithStack(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return errors.WithStack(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Get implements storage.DocumentStore
|
|
|
|
func (s *DocumentStore) Get(ctx context.Context, collection string, id storage.DocumentID) (storage.Document, error) {
|
|
|
|
var document storage.Document
|
|
|
|
|
|
|
|
err := s.withTx(ctx, func(tx *sql.Tx) error {
|
|
|
|
query := `
|
2024-01-11 19:30:30 +01:00
|
|
|
SELECT id, revision, data, created_at, updated_at
|
2023-02-09 12:16:36 +01:00
|
|
|
FROM documents
|
|
|
|
WHERE collection = $1 AND id = $2
|
|
|
|
`
|
|
|
|
|
|
|
|
row := tx.QueryRowContext(ctx, query, collection, string(id))
|
|
|
|
|
|
|
|
var (
|
|
|
|
createdAt time.Time
|
|
|
|
updatedAt time.Time
|
|
|
|
data JSONMap
|
2024-01-11 19:30:30 +01:00
|
|
|
revision int
|
2023-02-09 12:16:36 +01:00
|
|
|
)
|
|
|
|
|
2024-01-11 19:30:30 +01:00
|
|
|
err := row.Scan(&id, &revision, &data, &createdAt, &updatedAt)
|
2023-02-09 12:16:36 +01:00
|
|
|
if err != nil {
|
|
|
|
if errors.Is(err, sql.ErrNoRows) {
|
|
|
|
return errors.WithStack(storage.ErrDocumentNotFound)
|
|
|
|
}
|
|
|
|
|
|
|
|
return errors.WithStack(err)
|
|
|
|
}
|
|
|
|
|
2023-04-06 14:45:50 +02:00
|
|
|
if err := row.Err(); err != nil {
|
|
|
|
return errors.WithStack(err)
|
|
|
|
}
|
|
|
|
|
2023-02-09 12:16:36 +01:00
|
|
|
document = storage.Document(data)
|
|
|
|
|
|
|
|
document[storage.DocumentAttrID] = id
|
2024-01-11 19:30:30 +01:00
|
|
|
document[storage.DocumentAttrRevision] = revision
|
2023-02-09 12:16:36 +01:00
|
|
|
document[storage.DocumentAttrCreatedAt] = createdAt
|
|
|
|
document[storage.DocumentAttrUpdatedAt] = updatedAt
|
|
|
|
|
|
|
|
return nil
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return nil, errors.WithStack(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
return document, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Query implements storage.DocumentStore
|
|
|
|
func (s *DocumentStore) Query(ctx context.Context, collection string, filter *filter.Filter, funcs ...storage.QueryOptionFunc) ([]storage.Document, error) {
|
2023-03-01 12:12:11 +01:00
|
|
|
opts := &storage.QueryOptions{}
|
|
|
|
for _, fn := range funcs {
|
|
|
|
fn(opts)
|
|
|
|
}
|
|
|
|
|
2023-02-09 12:16:36 +01:00
|
|
|
var documents []storage.Document
|
|
|
|
|
|
|
|
err := s.withTx(ctx, func(tx *sql.Tx) error {
|
2023-02-17 13:01:39 +01:00
|
|
|
criteria := "1 = 1"
|
|
|
|
args := make([]any, 0)
|
|
|
|
|
|
|
|
var err error
|
|
|
|
|
|
|
|
if filter != nil {
|
|
|
|
criteria, args, err = filterSQL.ToSQL(
|
|
|
|
filter.Root(),
|
|
|
|
filterSQL.WithPreparedParameter("$", 2),
|
2023-02-17 16:59:05 +01:00
|
|
|
filterSQL.WithTransform(transformOperator),
|
2023-02-17 13:01:39 +01:00
|
|
|
filterSQL.WithKeyTransform(func(key string) string {
|
|
|
|
return fmt.Sprintf("json_extract(data, '$.%s')", key)
|
|
|
|
}),
|
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
return errors.WithStack(err)
|
|
|
|
}
|
2023-02-09 12:16:36 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
query := `
|
2024-01-11 19:30:30 +01:00
|
|
|
SELECT id, revision, data, created_at, updated_at
|
2023-02-09 12:16:36 +01:00
|
|
|
FROM documents
|
|
|
|
WHERE collection = $1 AND (` + criteria + `)
|
|
|
|
`
|
|
|
|
|
|
|
|
args = append([]interface{}{collection}, args...)
|
|
|
|
|
2023-03-01 12:12:11 +01:00
|
|
|
if opts.OrderBy != nil {
|
2023-03-01 13:06:32 +01:00
|
|
|
direction := storage.OrderDirectionAsc
|
|
|
|
if opts.OrderDirection != nil {
|
|
|
|
direction = *opts.OrderDirection
|
|
|
|
}
|
|
|
|
|
|
|
|
query, args = withOrderByClause(query, args, *opts.OrderBy, direction)
|
2023-03-01 12:12:11 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
if opts.Offset != nil || opts.Limit != nil {
|
|
|
|
offset := 0
|
|
|
|
if opts.Offset != nil {
|
|
|
|
offset = *opts.Offset
|
|
|
|
}
|
|
|
|
|
|
|
|
limit := math.MaxInt
|
|
|
|
if opts.Limit != nil {
|
|
|
|
limit = *opts.Limit
|
|
|
|
}
|
|
|
|
|
|
|
|
query, args = withLimitOffsetClause(query, args, limit, offset)
|
|
|
|
}
|
|
|
|
|
2023-02-09 12:16:36 +01:00
|
|
|
logger.Debug(
|
|
|
|
ctx, "executing query",
|
|
|
|
logger.F("query", query),
|
|
|
|
logger.F("args", args),
|
|
|
|
)
|
|
|
|
|
|
|
|
rows, err := tx.QueryContext(ctx, query, args...)
|
|
|
|
if err != nil {
|
|
|
|
return errors.WithStack(err)
|
|
|
|
}
|
|
|
|
|
2023-04-06 14:45:50 +02:00
|
|
|
defer func() {
|
|
|
|
if err := rows.Close(); err != nil {
|
2023-10-19 21:47:09 +02:00
|
|
|
logger.Error(ctx, "could not close rows", logger.CapturedE(errors.WithStack(err)))
|
2023-04-06 14:45:50 +02:00
|
|
|
}
|
|
|
|
}()
|
2023-02-09 12:16:36 +01:00
|
|
|
|
|
|
|
documents = make([]storage.Document, 0)
|
|
|
|
|
|
|
|
for rows.Next() {
|
|
|
|
var (
|
|
|
|
id storage.DocumentID
|
2024-01-11 19:30:30 +01:00
|
|
|
revision int
|
2023-02-09 12:16:36 +01:00
|
|
|
createdAt time.Time
|
|
|
|
updatedAt time.Time
|
|
|
|
data JSONMap
|
|
|
|
)
|
|
|
|
|
2024-01-11 19:30:30 +01:00
|
|
|
if err := rows.Scan(&id, &revision, &data, &createdAt, &updatedAt); err != nil {
|
2023-02-09 12:16:36 +01:00
|
|
|
return errors.WithStack(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
document := storage.Document(data)
|
|
|
|
document[storage.DocumentAttrID] = id
|
2024-01-11 19:30:30 +01:00
|
|
|
document[storage.DocumentAttrRevision] = revision
|
2023-02-09 12:16:36 +01:00
|
|
|
document[storage.DocumentAttrCreatedAt] = createdAt
|
|
|
|
document[storage.DocumentAttrUpdatedAt] = updatedAt
|
|
|
|
|
|
|
|
documents = append(documents, document)
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := rows.Err(); err != nil {
|
|
|
|
return errors.WithStack(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return nil, errors.WithStack(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
return documents, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Upsert implements storage.DocumentStore
|
|
|
|
func (s *DocumentStore) Upsert(ctx context.Context, collection string, document storage.Document) (storage.Document, error) {
|
|
|
|
var upsertedDocument storage.Document
|
|
|
|
|
|
|
|
err := s.withTx(ctx, func(tx *sql.Tx) error {
|
2024-01-11 19:30:30 +01:00
|
|
|
id, exists := document.ID()
|
|
|
|
if !exists || id == "" {
|
|
|
|
id = storage.NewDocumentID()
|
|
|
|
}
|
|
|
|
|
2023-02-09 12:16:36 +01:00
|
|
|
query := `
|
2024-01-11 19:30:30 +01:00
|
|
|
SELECT revision FROM documents WHERE id = $1
|
|
|
|
`
|
|
|
|
|
|
|
|
args := []any{id}
|
|
|
|
|
|
|
|
logger.Debug(
|
|
|
|
ctx, "executing query",
|
|
|
|
logger.F("query", query),
|
|
|
|
logger.F("args", args),
|
|
|
|
)
|
|
|
|
|
|
|
|
row := tx.QueryRowContext(ctx, query, args...)
|
|
|
|
|
|
|
|
var storedRevision int
|
|
|
|
|
|
|
|
if err := row.Scan(&storedRevision); err != nil && !errors.Is(err, sql.ErrNoRows) {
|
|
|
|
return errors.WithStack(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
revision, found := document.Revision()
|
|
|
|
if found && storedRevision != revision {
|
|
|
|
return errors.Wrapf(storage.ErrDocumentRevisionConflict, "document revision '%d' does not match stored '%d'", revision, storedRevision)
|
|
|
|
}
|
|
|
|
|
|
|
|
query = `
|
2023-02-09 12:16:36 +01:00
|
|
|
INSERT INTO documents (id, collection, data, created_at, updated_at)
|
|
|
|
VALUES($1, $2, $3, $4, $4)
|
|
|
|
ON CONFLICT (id, collection) DO UPDATE SET
|
2024-01-11 19:30:30 +01:00
|
|
|
data = $3, updated_at = $4, revision = revision + 1
|
|
|
|
RETURNING "id", "revision", "data", "created_at", "updated_at"
|
2023-02-09 12:16:36 +01:00
|
|
|
`
|
|
|
|
|
|
|
|
now := time.Now().UTC()
|
|
|
|
|
2024-01-11 19:30:30 +01:00
|
|
|
args = []any{id, collection, JSONMap(document), now, now}
|
2023-02-09 12:16:36 +01:00
|
|
|
|
2023-02-24 10:38:48 +01:00
|
|
|
logger.Debug(
|
|
|
|
ctx, "executing query",
|
|
|
|
logger.F("query", query),
|
|
|
|
logger.F("args", args),
|
|
|
|
)
|
|
|
|
|
2024-01-11 19:30:30 +01:00
|
|
|
row = tx.QueryRowContext(ctx, query, args...)
|
2023-02-09 12:16:36 +01:00
|
|
|
|
|
|
|
var (
|
|
|
|
createdAt time.Time
|
|
|
|
updatedAt time.Time
|
|
|
|
data JSONMap
|
|
|
|
)
|
|
|
|
|
2024-01-11 19:30:30 +01:00
|
|
|
if err := row.Scan(&id, &revision, &data, &createdAt, &updatedAt); err != nil {
|
2023-02-09 12:16:36 +01:00
|
|
|
return errors.WithStack(err)
|
|
|
|
}
|
|
|
|
|
2023-04-06 14:45:50 +02:00
|
|
|
if err := row.Err(); err != nil {
|
|
|
|
return errors.WithStack(err)
|
|
|
|
}
|
|
|
|
|
2023-02-09 12:16:36 +01:00
|
|
|
upsertedDocument = storage.Document(data)
|
|
|
|
|
|
|
|
upsertedDocument[storage.DocumentAttrID] = id
|
2024-01-11 19:30:30 +01:00
|
|
|
upsertedDocument[storage.DocumentAttrRevision] = revision
|
2023-02-09 12:16:36 +01:00
|
|
|
upsertedDocument[storage.DocumentAttrCreatedAt] = createdAt
|
|
|
|
upsertedDocument[storage.DocumentAttrUpdatedAt] = updatedAt
|
|
|
|
|
|
|
|
return nil
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return nil, errors.WithStack(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
return upsertedDocument, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *DocumentStore) withTx(ctx context.Context, fn func(tx *sql.Tx) error) error {
|
|
|
|
var db *sql.DB
|
|
|
|
|
2023-04-06 14:45:50 +02:00
|
|
|
db, err := s.getDB(ctx)
|
2023-02-09 12:16:36 +01:00
|
|
|
if err != nil {
|
|
|
|
return errors.WithStack(err)
|
|
|
|
}
|
|
|
|
|
2023-10-22 23:18:02 +02:00
|
|
|
if err := WithRetry(ctx, db, sqliteBusyMaxRetry, fn); err != nil {
|
2023-02-09 12:16:36 +01:00
|
|
|
return errors.WithStack(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2024-01-11 19:30:30 +01:00
|
|
|
func migrateSchema(ctx context.Context, db *sql.DB) error {
|
2023-04-13 10:16:48 +02:00
|
|
|
err := WithTx(ctx, db, func(tx *sql.Tx) error {
|
2024-01-11 19:30:30 +01:00
|
|
|
for _, migr := range documentStoreMigrations {
|
|
|
|
if err := migr(ctx, tx); err != nil {
|
|
|
|
return errors.WithStack(err)
|
|
|
|
}
|
2023-02-09 12:16:36 +01:00
|
|
|
}
|
|
|
|
return nil
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return errors.WithStack(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2023-03-01 12:12:11 +01:00
|
|
|
func withOrderByClause(query string, args []any, orderBy string, orderDirection storage.OrderDirection) (string, []any) {
|
|
|
|
direction := "ASC"
|
|
|
|
if orderDirection == storage.OrderDirectionDesc {
|
|
|
|
direction = "DESC"
|
|
|
|
}
|
|
|
|
|
|
|
|
var column string
|
|
|
|
|
|
|
|
switch orderBy {
|
|
|
|
case storage.DocumentAttrID:
|
|
|
|
column = "id"
|
|
|
|
|
|
|
|
case storage.DocumentAttrCreatedAt:
|
|
|
|
column = "created_at"
|
|
|
|
|
|
|
|
case storage.DocumentAttrUpdatedAt:
|
|
|
|
column = "updated_at"
|
|
|
|
|
|
|
|
default:
|
|
|
|
column = fmt.Sprintf("json_extract(data, '$.' || $%d)", len(args)+1)
|
|
|
|
args = append(args, orderBy)
|
|
|
|
}
|
|
|
|
|
2023-03-01 13:06:32 +01:00
|
|
|
query += fmt.Sprintf(` ORDER BY %s %s`, column, direction)
|
2023-03-01 12:12:11 +01:00
|
|
|
|
|
|
|
return query, args
|
|
|
|
}
|
|
|
|
|
|
|
|
func withLimitOffsetClause(query string, args []any, limit int, offset int) (string, []any) {
|
2023-03-01 13:06:32 +01:00
|
|
|
query += fmt.Sprintf(` LIMIT $%d OFFSET $%d`, len(args)+1, len(args)+2)
|
2023-03-01 12:12:11 +01:00
|
|
|
args = append(args, limit, offset)
|
|
|
|
|
|
|
|
return query, args
|
|
|
|
}
|
|
|
|
|
2023-02-09 12:16:36 +01:00
|
|
|
func NewDocumentStore(path string) *DocumentStore {
|
2024-01-11 19:30:30 +01:00
|
|
|
getDB := NewGetDBFunc(path, migrateSchema)
|
2023-04-06 14:45:50 +02:00
|
|
|
|
2023-02-09 12:16:36 +01:00
|
|
|
return &DocumentStore{
|
2023-04-06 14:45:50 +02:00
|
|
|
getDB: getDB,
|
2023-02-09 12:16:36 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func NewDocumentStoreWithDB(db *sql.DB) *DocumentStore {
|
2024-01-11 19:30:30 +01:00
|
|
|
getDB := NewGetDBFuncFromDB(db, migrateSchema)
|
2023-04-06 14:45:50 +02:00
|
|
|
|
2023-02-09 12:16:36 +01:00
|
|
|
return &DocumentStore{
|
2023-04-06 14:45:50 +02:00
|
|
|
getDB: getDB,
|
2023-02-09 12:16:36 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
var _ storage.DocumentStore = &DocumentStore{}
|