Compare commits

..

24 Commits

Author SHA1 Message Date
776dbba5b0 Merge pull request 'feat(storage): add revision number to documents' (#24) from document-revision into master
All checks were successful
arcad/edge/pipeline/head This commit looks good
Reviewed-on: #24
2024-01-12 10:07:46 +01:00
8f9428b3f3 feat(storage): add revision number to documents
Some checks are pending
arcad/edge/pipeline/head Build started...
arcad/edge/pipeline/pr-master Build started...
ref #13
2024-01-12 10:05:47 +01:00
a268759d33 Merge pull request 'Implémentation d'un système de cache type LFU pour le BlobStore' (#23) from lfu-cache into master
All checks were successful
arcad/edge/pipeline/head This commit looks good
Reviewed-on: #23
2024-01-10 13:22:51 +01:00
a276b92a03 feat: implement lfu based cache strategy
All checks were successful
arcad/edge/pipeline/head This commit looks good
arcad/edge/pipeline/pr-master This commit looks good
2024-01-10 13:16:52 +01:00
b9c08f647c feat: use go 1.21.5
All checks were successful
arcad/edge/pipeline/head This commit looks good
2023-12-05 22:40:53 +01:00
59f023a7d9 fix: do not use goja.Value outside of loop
All checks were successful
arcad/edge/pipeline/head This commit looks good
ref #22
2023-12-05 21:27:43 +01:00
753a6c9708 fix: temporarily write blob directly as response body without http.ServeContent
All checks were successful
arcad/edge/pipeline/head This commit looks good
2023-12-05 14:18:22 +01:00
b120e590b6 fix: do not use goja.Value outside of run loop 2023-12-05 14:14:08 +01:00
242bf379a8 feat: rewrite cache blobstore driver parameters parsing
All checks were successful
arcad/edge/pipeline/head This commit looks good
2023-12-03 14:26:57 +01:00
065a9002a0 fix(storage): use missing cache driver options
All checks were successful
arcad/edge/pipeline/head This commit looks good
2023-12-01 15:20:12 +01:00
83a1e89665 feat: use forked version of bigcache to prevent 64bits misalignment problems
All checks were successful
arcad/edge/pipeline/head This commit looks good
See https://github.com/allegro/bigcache/issues/368
See https://golang.org/pkg/sync/atomic/#pkg-note-BUG
2023-12-01 12:22:53 +01:00
d9e8aac458 feat(packaging): rotate storage-server log files on alpine
All checks were successful
arcad/edge/pipeline/head This commit looks good
2023-11-30 19:54:00 +01:00
32f04af138 feat(storage): improve caching in cache driver
All checks were successful
arcad/edge/pipeline/head This commit looks good
ref #20
2023-11-30 19:09:51 +01:00
870db072e0 Merge pull request 'Réécriture du package bus pour éviter les deadlocks' (#21) from bus-rewrite into master
All checks were successful
arcad/edge/pipeline/head This commit looks good
Reviewed-on: #21
2023-11-30 15:10:50 +01:00
ad49c1718c feat: rewrite bus to prevent deadlocks
All checks were successful
arcad/edge/pipeline/head This commit looks good
arcad/edge/pipeline/pr-master This commit looks good
2023-11-30 15:02:36 +01:00
f4a7366aad feat(storage): rpc driver client pooling and memory-constrained cache
All checks were successful
arcad/edge/pipeline/head This commit looks good
driver

ref #20
2023-11-29 11:10:29 +01:00
02c74b6f8d feat(client): add loader for apps menu
All checks were successful
arcad/edge/pipeline/head This commit looks good
2023-10-25 21:27:41 +02:00
8889694125 feat(cli): add basic bundle info command
All checks were successful
arcad/edge/pipeline/head This commit looks good
2023-10-24 22:52:51 +02:00
6a99409a15 feat(blobstore): add cache driver 2023-10-24 22:52:33 +02:00
2fc590d708 feat(storage): retry sqlite failed transaction when database is busy
All checks were successful
arcad/edge/pipeline/head This commit looks good
2023-10-22 23:18:02 +02:00
6e4bf2f025 feat(storage): remap rpc errors
All checks were successful
arcad/edge/pipeline/head This commit looks good
2023-10-22 23:04:56 +02:00
22a3326be9 feat(lifecycle): execute onInit func asynchronously
All checks were successful
arcad/edge/pipeline/head This commit looks good
2023-10-22 10:47:44 +02:00
0cfb132b65 feat(lifecycle-module): add debug message for onInit() execution
All checks were successful
arcad/edge/pipeline/head This commit looks good
2023-10-21 21:46:51 +02:00
de4ab0d02c fix(bus): prevent double close in event dispatcher
All checks were successful
arcad/edge/pipeline/head This commit looks good
2023-10-21 21:38:34 +02:00
116 changed files with 5042 additions and 1514 deletions

View File

@ -1,4 +1,4 @@
RUN_APP_ARGS=""
#EDGE_DOCUMENTSTORE_DSN="rpc://localhost:3001/documentstore?tenant=local&appId=%APPID%"
#EDGE_BLOBSTORE_DSN="rpc://localhost:3001/blobstore?tenant=local&appId=%APPID%"
#EDGE_BLOBSTORE_DSN="cache://localhost:3001/blobstore?driver=rpc&tenant=local&appId=%APPID%&blobCacheStoreType=fs&blobCacheStoreBaseDir=data/cache/%APPID%&blobCacheSize=64MB"
#EDGE_SHARESTORE_DSN="rpc://localhost:3001/sharestore?tenant=local"

2
.gitignore vendored
View File

@ -2,7 +2,7 @@
/bin
/.env
/tools
*.sqlite
*.sqlite*
/.gitea-release
/.edge
/data

View File

@ -108,10 +108,17 @@ nfpms:
file_info:
mode: 0640
packager: apk
- src: misc/packaging/openrc/storage-server.logrotate.conf
dst: /etc/logrotate.d/storage-server
packager: apk
- dst: /var/lib/storage-server
type: dir
file_info:
mode: 0700
packager: apk
- dst: /var/log/storage-server
type: dir
file_info:
mode: 0700
scripts:
postinstall: "misc/packaging/common/postinstall-storage-server.sh"

146
cmd/blobstore-test/main.go Normal file
View File

@ -0,0 +1,146 @@
package main
import (
"context"
"crypto/rand"
"flag"
"io"
mrand "math/rand"
"runtime"
"time"
"forge.cadoles.com/arcad/edge/pkg/storage"
"forge.cadoles.com/arcad/edge/pkg/storage/driver"
"github.com/pkg/errors"
"gitlab.com/wpetit/goweb/logger"
_ "forge.cadoles.com/arcad/edge/pkg/storage/driver/cache"
_ "forge.cadoles.com/arcad/edge/pkg/storage/driver/rpc"
_ "forge.cadoles.com/arcad/edge/pkg/storage/driver/sqlite"
)
var (
dsn string
)
func init() {
flag.StringVar(&dsn, "dsn", "cache://./test-cache.sqlite?driver=sqlite&_pragma=foreign_keys(1)&_pragma=journal_mode=wal&bigCacheShards=32&bigCacheHardMaxCacheSize=128&bigCacheMaxEntrySize=125&bigCacheMaxEntriesInWindow=200000", "blobstore dsn")
}
func main() {
flag.Parse()
ctx := context.Background()
logger.SetLevel(logger.LevelDebug)
blobStore, err := driver.NewBlobStore(dsn)
if err != nil {
logger.Fatal(ctx, "could not create blobstore", logger.CapturedE(errors.WithStack(err)))
}
bucket, err := blobStore.OpenBucket(ctx, "default")
if err != nil {
logger.Fatal(ctx, "could not open bucket", logger.CapturedE(errors.WithStack(err)))
}
defer func() {
if err := bucket.Close(); err != nil {
logger.Fatal(ctx, "could not close bucket", logger.CapturedE(errors.WithStack(err)))
}
}()
go readRandomBlobs(ctx, bucket)
for {
writeRandomBlob(ctx, bucket)
time.Sleep(1 * time.Second)
size, err := bucket.Size(ctx)
if err != nil {
logger.Fatal(ctx, "could not retrieve bucket size", logger.CapturedE(errors.WithStack(err)))
}
logger.Debug(ctx, "bucket stats", logger.F("size", size))
}
}
func readRandomBlobs(ctx context.Context, bucket storage.BlobBucket) {
for {
infos, err := bucket.List(ctx)
if err != nil {
logger.Fatal(ctx, "could not list blobs", logger.CapturedE(errors.WithStack(err)))
}
total := len(infos)
if total == 0 {
logger.Debug(ctx, "no blob yet")
continue
}
blob := infos[mrand.Intn(total)]
readBlob(ctx, bucket, blob.ID())
time.Sleep(250 * time.Millisecond)
}
}
func readBlob(ctx context.Context, bucket storage.BlobBucket, blobID storage.BlobID) {
ctx = logger.With(ctx, logger.F("blobID", blobID))
reader, err := bucket.NewReader(ctx, blobID)
if err != nil {
logger.Fatal(ctx, "could not create reader", logger.CapturedE(errors.WithStack(err)))
}
defer func() {
if err := reader.Close(); err != nil {
logger.Fatal(ctx, "could not close reader", logger.CapturedE(errors.WithStack(err)))
}
}()
if _, err := io.ReadAll(reader); err != nil {
logger.Fatal(ctx, "could not read blob", logger.CapturedE(errors.WithStack(err)))
}
}
func writeRandomBlob(ctx context.Context, bucket storage.BlobBucket) {
blobID := storage.NewBlobID()
buff := make([]byte, 10*1024)
writer, err := bucket.NewWriter(ctx, blobID)
if err != nil {
logger.Fatal(ctx, "could not create writer", logger.CapturedE(errors.WithStack(err)))
}
defer func() {
if err := writer.Close(); err != nil {
logger.Fatal(ctx, "could not close writer", logger.CapturedE(errors.WithStack(err)))
}
}()
if _, err := rand.Read(buff); err != nil {
logger.Fatal(ctx, "could not read random data", logger.CapturedE(errors.WithStack(err)))
}
if _, err := writer.Write(buff); err != nil {
logger.Fatal(ctx, "could not write blob", logger.CapturedE(errors.WithStack(err)))
}
printMemUsage(ctx)
}
func printMemUsage(ctx context.Context) {
var m runtime.MemStats
runtime.ReadMemStats(&m)
logger.Debug(
ctx, "memory usage",
logger.F("alloc", m.Alloc/1024/1024),
logger.F("totalAlloc", m.TotalAlloc/1024/1024),
logger.F("sys", m.Sys/1024/1024),
logger.F("numGC", m.NumGC),
)
}

View File

@ -0,0 +1,56 @@
package app
import (
"os"
"forge.cadoles.com/arcad/edge/pkg/app"
"forge.cadoles.com/arcad/edge/pkg/bundle"
"github.com/pkg/errors"
"github.com/urfave/cli/v2"
"gopkg.in/yaml.v2"
)
func InfoCommand() *cli.Command {
return &cli.Command{
Name: "info",
Usage: "Print app manifest informations",
Flags: []cli.Flag{
&cli.StringFlag{
Name: "path",
Usage: "use `PATH` as app bundle (zip, zim or directory bundle)",
Aliases: []string{"p"},
Value: "",
Required: true,
},
},
Action: func(ctx *cli.Context) error {
appPath := ctx.String("path")
bundle, err := bundle.FromPath(appPath)
if err != nil {
return errors.Wrap(err, "could not load app bundle")
}
manifest, err := app.LoadManifest(bundle)
if err != nil {
return errors.Wrap(err, "could not load app manifest")
}
if valid, err := manifest.Validate(manifestMetadataValidators...); !valid {
return errors.Wrap(err, "invalid app manifest")
}
encoder := yaml.NewEncoder(os.Stdout)
if err := encoder.Encode(manifest); err != nil {
return errors.Wrap(err, "could not encode manifest")
}
if err := encoder.Close(); err != nil {
return errors.WithStack(err)
}
return nil
},
}
}

View File

@ -12,6 +12,7 @@ func Root() *cli.Command {
RunCommand(),
PackageCommand(),
HashPasswordCommand(),
InfoCommand(),
},
}
}

View File

@ -23,10 +23,11 @@ import (
authModule "forge.cadoles.com/arcad/edge/pkg/module/auth"
authHTTP "forge.cadoles.com/arcad/edge/pkg/module/auth/http"
authModuleMiddleware "forge.cadoles.com/arcad/edge/pkg/module/auth/middleware"
"forge.cadoles.com/arcad/edge/pkg/module/blob"
"forge.cadoles.com/arcad/edge/pkg/module/cast"
"forge.cadoles.com/arcad/edge/pkg/module/fetch"
blobModule "forge.cadoles.com/arcad/edge/pkg/module/blob"
castModule "forge.cadoles.com/arcad/edge/pkg/module/cast"
fetchModule "forge.cadoles.com/arcad/edge/pkg/module/fetch"
netModule "forge.cadoles.com/arcad/edge/pkg/module/net"
rpcModule "forge.cadoles.com/arcad/edge/pkg/module/rpc"
shareModule "forge.cadoles.com/arcad/edge/pkg/module/share"
"forge.cadoles.com/arcad/edge/pkg/storage"
"gitlab.com/wpetit/goweb/logger"
@ -44,10 +45,13 @@ import (
_ "forge.cadoles.com/arcad/edge/pkg/module/auth/http/passwd/argon2id"
_ "forge.cadoles.com/arcad/edge/pkg/module/auth/http/passwd/plain"
// Register storage drivers
"forge.cadoles.com/arcad/edge/pkg/storage/driver"
// Register storage drivers
_ "forge.cadoles.com/arcad/edge/pkg/storage/driver/cache"
_ "forge.cadoles.com/arcad/edge/pkg/storage/driver/rpc"
_ "forge.cadoles.com/arcad/edge/pkg/storage/driver/sqlite"
"forge.cadoles.com/arcad/edge/pkg/storage/share"
)
@ -103,6 +107,11 @@ func RunCommand() *cli.Command {
Usage: "use `FILE` as local accounts",
Value: ".edge/%APPID%/accounts.json",
},
&cli.Int64Flag{
Name: "max-upload-size",
Usage: "use `MAX-UPLOAD-SIZE` as blob max upload size",
Value: 128 << (10 * 2), // 128Mb
},
},
Action: func(ctx *cli.Context) error {
address := ctx.String("address")
@ -114,6 +123,7 @@ func RunCommand() *cli.Command {
documentstoreDSN := ctx.String("documentstore-dsn")
shareStoreDSN := ctx.String("sharestore-dsn")
accountsFile := ctx.String("accounts-file")
maxUploadSize := ctx.Int64("max-upload-size")
logger.SetFormat(logger.Format(logFormat))
logger.SetLevel(logger.Level(logLevel))
@ -159,7 +169,7 @@ func RunCommand() *cli.Command {
appCtx := logger.With(cmdCtx, logger.F("address", address))
if err := runApp(appCtx, path, address, documentstoreDSN, blobstoreDSN, shareStoreDSN, accountsFile, appsRepository); err != nil {
if err := runApp(appCtx, path, address, documentstoreDSN, blobstoreDSN, shareStoreDSN, accountsFile, appsRepository, maxUploadSize); err != nil {
logger.Error(appCtx, "could not run app", logger.CapturedE(errors.WithStack(err)))
}
}(p, port, idx)
@ -172,7 +182,7 @@ func RunCommand() *cli.Command {
}
}
func runApp(ctx context.Context, path, address, documentStoreDSN, blobStoreDSN, shareStoreDSN, accountsFile string, appRepository appModule.Repository) error {
func runApp(ctx context.Context, path, address, documentStoreDSN, blobStoreDSN, shareStoreDSN, accountsFile string, appRepository appModule.Repository, maxUploadSize int64) error {
absPath, err := filepath.Abs(path)
if err != nil {
return errors.Wrapf(err, "could not resolve path '%s'", path)
@ -233,6 +243,8 @@ func runApp(ctx context.Context, path, address, documentStoreDSN, blobStoreDSN,
return jwtutil.NewSymmetricKeySet(dummySecret)
}),
),
blobModule.Mount(maxUploadSize), // 10Mb,
fetchModule.Mount(),
),
appHTTP.WithHTTPMiddlewares(
authModuleMiddleware.AnonymousUser(key, jwa.HS256),
@ -275,18 +287,18 @@ func getServerModules(deps *moduleDeps) []app.ServerModuleFactory {
module.LifecycleModuleFactory(),
module.ContextModuleFactory(),
module.ConsoleModuleFactory(),
cast.CastModuleFactory(),
castModule.CastModuleFactory(),
netModule.ModuleFactory(deps.Bus),
module.RPCModuleFactory(deps.Bus),
rpcModule.ModuleFactory(deps.Bus),
module.StoreModuleFactory(deps.DocumentStore),
blob.ModuleFactory(deps.Bus, deps.BlobStore),
blobModule.ModuleFactory(deps.Bus, deps.BlobStore),
authModule.ModuleFactory(
authModule.WithJWT(func() (jwk.Set, error) {
return jwtutil.NewSymmetricKeySet(dummySecret)
}),
),
appModule.ModuleFactory(deps.AppRepository),
fetch.ModuleFactory(deps.Bus),
fetchModule.ModuleFactory(deps.Bus),
shareModule.ModuleFactory(deps.AppID, deps.ShareStore),
}
}

View File

@ -22,13 +22,15 @@ import (
"github.com/urfave/cli/v2"
// Register storage drivers
_ "forge.cadoles.com/arcad/edge/pkg/storage/driver/cache"
_ "forge.cadoles.com/arcad/edge/pkg/storage/driver/rpc"
_ "forge.cadoles.com/arcad/edge/pkg/storage/driver/sqlite"
"forge.cadoles.com/arcad/edge/cmd/storage-server/command/flag"
"forge.cadoles.com/arcad/edge/pkg/jwtutil"
"forge.cadoles.com/arcad/edge/pkg/storage"
"forge.cadoles.com/arcad/edge/pkg/storage/driver"
_ "forge.cadoles.com/arcad/edge/pkg/storage/driver/rpc"
"forge.cadoles.com/arcad/edge/pkg/storage/driver/rpc/server"
_ "forge.cadoles.com/arcad/edge/pkg/storage/driver/sqlite"
"forge.cadoles.com/arcad/edge/pkg/storage/share"
)
@ -51,17 +53,17 @@ func Run() *cli.Command {
&cli.StringFlag{
Name: "blobstore-dsn-pattern",
EnvVars: []string{"STORAGE_SERVER_BLOBSTORE_DSN_PATTERN"},
Value: fmt.Sprintf("sqlite://data/%%TENANT%%/%%APPID%%/blobstore.sqlite?_pragma=foreign_keys(1)&_pragma=busy_timeout=%d", (60 * time.Second).Milliseconds()),
Value: fmt.Sprintf("sqlite://data/%%TENANT%%/%%APPID%%/blobstore.sqlite?_pragma=foreign_keys(1)&_pragma=busy_timeout=%d&_pragma=journal_mode=wal", (60 * time.Second).Milliseconds()),
},
&cli.StringFlag{
Name: "documentstore-dsn-pattern",
EnvVars: []string{"STORAGE_SERVER_DOCUMENTSTORE_DSN_PATTERN"},
Value: fmt.Sprintf("sqlite://data/%%TENANT%%/%%APPID%%/documentstore.sqlite?_pragma=foreign_keys(1)&_pragma=busy_timeout=%d", (60 * time.Second).Milliseconds()),
Value: fmt.Sprintf("sqlite://data/%%TENANT%%/%%APPID%%/documentstore.sqlite?_pragma=foreign_keys(1)&_pragma=busy_timeout=%d&_pragma=journal_mode=wal", (60 * time.Second).Milliseconds()),
},
&cli.StringFlag{
Name: "sharestore-dsn-pattern",
EnvVars: []string{"STORAGE_SERVER_SHARESTORE_DSN_PATTERN"},
Value: fmt.Sprintf("sqlite://data/%%TENANT%%/sharestore.sqlite?_pragma=foreign_keys(1)&_pragma=busy_timeout=%d", (60 * time.Second).Milliseconds()),
Value: fmt.Sprintf("sqlite://data/%%TENANT%%/sharestore.sqlite?_pragma=foreign_keys(1)&_pragma=busy_timeout=%d&_pragma=journal_mode=wal", (60 * time.Second).Milliseconds()),
},
&cli.StringFlag{
Name: "sentry-dsn",

7
go.mod
View File

@ -6,10 +6,14 @@ require (
github.com/getsentry/sentry-go v0.25.0
github.com/hashicorp/golang-lru/v2 v2.0.7
github.com/hashicorp/mdns v1.0.5
github.com/inhies/go-bytesize v0.0.0-20220417184213-4913239db9cf
github.com/jackc/puddle/v2 v2.2.1
github.com/keegancsmith/rpc v1.3.0
github.com/klauspost/compress v1.16.6
github.com/lestrrat-go/jwx/v2 v2.0.8
github.com/mitchellh/hashstructure/v2 v2.0.2
github.com/ulikunitz/xz v0.5.11
go.uber.org/goleak v1.3.0
modernc.org/sqlite v1.20.4
)
@ -27,6 +31,7 @@ require (
github.com/lestrrat-go/iter v1.0.2 // indirect
github.com/lestrrat-go/option v1.0.0 // indirect
github.com/miekg/dns v1.1.53 // indirect
golang.org/x/sync v0.1.0 // indirect
google.golang.org/genproto v0.0.0-20210226172003-ab064af71705 // indirect
google.golang.org/grpc v1.35.0 // indirect
gopkg.in/go-playground/validator.v9 v9.29.1 // indirect
@ -83,3 +88,5 @@ require (
modernc.org/strutil v1.1.3 // indirect
modernc.org/token v1.0.1 // indirect
)
replace github.com/allegro/bigcache/v3 v3.1.0 => github.com/Bornholm/bigcache v0.0.0-20231201111725-1ddf51584cad

8
go.sum
View File

@ -206,6 +206,10 @@ github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:
github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
github.com/igm/sockjs-go/v3 v3.0.2 h1:2m0k53w0DBiGozeQUIEPR6snZFmpFpYvVsGnfLPNXbE=
github.com/igm/sockjs-go/v3 v3.0.2/go.mod h1:UqchsOjeagIBFHvd+RZpLaVRbCwGilEC08EDHsD1jYE=
github.com/inhies/go-bytesize v0.0.0-20220417184213-4913239db9cf h1:FtEj8sfIcaaBfAKrE1Cwb61YDtYq9JxChK1c7AKce7s=
github.com/inhies/go-bytesize v0.0.0-20220417184213-4913239db9cf/go.mod h1:yrqSXGoD/4EKfF26AOGzscPOgTTJcyAwM2rpixWT+t4=
github.com/jackc/puddle/v2 v2.2.1 h1:RhxXJtFG022u4ibrCSMSiu5aOq1i77R3OHKNJj77OAk=
github.com/jackc/puddle/v2 v2.2.1/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4=
github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
@ -254,6 +258,8 @@ github.com/miekg/dns v0.0.0-20161006100029-fc4e1e2843d8/go.mod h1:W1PPwlIAgtquWB
github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI=
github.com/miekg/dns v1.1.53 h1:ZBkuHr5dxHtB1caEOlZTLPo7D3L3TWckgUUs/RHfDxw=
github.com/miekg/dns v1.1.53/go.mod h1:uInx36IzPl7FYnDcMeVWxj9byh7DutNykX4G9Sj60FY=
github.com/mitchellh/hashstructure/v2 v2.0.2 h1:vGKWl0YJqUNxE8d+h8f6NJLcCJrgbhC4NcD46KavDd4=
github.com/mitchellh/hashstructure/v2 v2.0.2/go.mod h1:MG3aRVU/N29oo/V/IhBX8GR/zz4kQkprJgF2EVszyDE=
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
@ -322,6 +328,8 @@ go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.22.5 h1:dntmOdLpSpHlVqbW5Eay97DelsZHe+55D+xC6i0dDS0=
go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk=
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=

View File

@ -4,7 +4,7 @@ ARG HTTP_PROXY=
ARG HTTPS_PROXY=
ARG http_proxy=
ARG https_proxy=
ARG GO_VERSION=1.21.2
ARG GO_VERSION=1.21.5
# Install dev environment dependencies
RUN export DEBIAN_FRONTEND=noninteractive &&\

View File

@ -0,0 +1,9 @@
/var/log/storage-server/storage-server.log {
missingok
sharedscripts
compress
rotate 7
postrotate
/etc/init.d/storage-server restart
endscript
}

View File

@ -3,7 +3,7 @@
command="/usr/bin/storage-server"
command_args="run"
supervisor=supervise-daemon
output_log="/var/log/storage-server.log"
output_log="/var/log/storage-server/storage-server.log"
error_log="$output_log"
depend() {

View File

@ -46,7 +46,11 @@ func NewPromiseProxyFrom(rt *goja.Runtime) *PromiseProxy {
return NewPromiseProxy(promise, resolve, reject)
}
func IsPromise(v goja.Value) (*goja.Promise, bool) {
promise, ok := v.Export().(*goja.Promise)
func isPromise(v any) (*goja.Promise, bool) {
if v == nil {
return nil, false
}
promise, ok := v.(*goja.Promise)
return promise, ok
}

View File

@ -4,6 +4,7 @@ import (
"context"
"math/rand"
"sync"
"time"
"github.com/dop251/goja"
"github.com/dop251/goja_nodejs/eventloop"
@ -22,23 +23,7 @@ type Server struct {
modules []ServerModule
}
func (s *Server) Load(name string, src string) error {
var err error
s.loop.RunOnLoop(func(rt *goja.Runtime) {
_, err = rt.RunScript(name, src)
if err != nil {
err = errors.Wrap(err, "could not run js script")
}
})
if err != nil {
return errors.WithStack(err)
}
return nil
}
func (s *Server) ExecFuncByName(ctx context.Context, funcName string, args ...interface{}) (goja.Value, error) {
func (s *Server) ExecFuncByName(ctx context.Context, funcName string, args ...any) (any, error) {
ctx = logger.With(ctx, logger.F("function", funcName), logger.F("args", args))
ret, err := s.Exec(ctx, funcName, args...)
@ -49,16 +34,23 @@ func (s *Server) ExecFuncByName(ctx context.Context, funcName string, args ...in
return ret, nil
}
func (s *Server) Exec(ctx context.Context, callableOrFuncname any, args ...interface{}) (goja.Value, error) {
var (
wg sync.WaitGroup
value goja.Value
func (s *Server) Exec(ctx context.Context, callableOrFuncname any, args ...any) (any, error) {
type result struct {
value any
err error
)
}
wg.Add(1)
done := make(chan result)
defer func() {
// Drain done channel
for range done {
}
}()
s.loop.RunOnLoop(func(rt *goja.Runtime) {
defer close(done)
var callable goja.Callable
switch typ := callableOrFuncname.(type) {
case goja.Callable:
@ -67,7 +59,9 @@ func (s *Server) Exec(ctx context.Context, callableOrFuncname any, args ...inter
case string:
call, ok := goja.AssertFunction(rt.Get(typ))
if !ok {
err = errors.WithStack(ErrFuncDoesNotExist)
done <- result{
err: errors.WithStack(ErrFuncDoesNotExist),
}
return
}
@ -75,28 +69,27 @@ func (s *Server) Exec(ctx context.Context, callableOrFuncname any, args ...inter
callable = call
default:
err = errors.Errorf("callableOrFuncname: expected callable or function name, got '%T'", callableOrFuncname)
done <- result{
err: errors.Errorf("callableOrFuncname: expected callable or function name, got '%T'", callableOrFuncname),
}
return
}
logger.Debug(ctx, "executing callable")
defer wg.Done()
defer func() {
if recovered := recover(); recovered != nil {
revoveredErr, ok := recovered.(error)
if ok {
logger.Error(ctx, "recovered runtime error", logger.CapturedE(errors.WithStack(revoveredErr)))
err = errors.WithStack(ErrUnknownError)
recovered := recover()
if recovered == nil {
return
}
recoveredErr, ok := recovered.(error)
if !ok {
panic(recovered)
}
done <- result{
err: recoveredErr,
}
}()
jsArgs := make([]goja.Value, 0, len(args))
@ -104,25 +97,50 @@ func (s *Server) Exec(ctx context.Context, callableOrFuncname any, args ...inter
jsArgs = append(jsArgs, rt.ToValue(a))
}
value, err = callable(nil, jsArgs...)
logger.Debug(ctx, "executing callable", logger.F("callable", callableOrFuncname))
start := time.Now()
value, err := callable(nil, jsArgs...)
if err != nil {
err = errors.WithStack(err)
done <- result{
err: errors.WithStack(err),
}
return
}
done <- result{
value: value.Export(),
}
logger.Debug(ctx, "executed callable", logger.F("callable", callableOrFuncname), logger.F("duration", time.Since(start).String()))
})
wg.Wait()
if err != nil {
select {
case <-ctx.Done():
if err := ctx.Err(); err != nil {
return nil, errors.WithStack(err)
}
return value, nil
return nil, nil
case result := <-done:
if result.err != nil {
return nil, errors.WithStack(result.err)
}
if promise, ok := isPromise(result.value); ok {
return s.waitForPromise(promise), nil
}
return result.value, nil
}
}
func (s *Server) WaitForPromise(promise *goja.Promise) goja.Value {
func (s *Server) waitForPromise(promise *goja.Promise) any {
var (
wg sync.WaitGroup
value goja.Value
value any
)
wg.Add(1)
@ -142,7 +160,7 @@ func (s *Server) WaitForPromise(promise *goja.Promise) goja.Value {
return
}
value = promise.Result()
value = promise.Result().Export()
breakLoop = true
})
@ -162,20 +180,40 @@ func (s *Server) WaitForPromise(promise *goja.Promise) goja.Value {
return value
}
func (s *Server) Start(ctx context.Context) error {
func (s *Server) Start(ctx context.Context, name string, src string) error {
s.loop.Start()
var err error
done := make(chan error)
s.loop.RunOnLoop(func(rt *goja.Runtime) {
defer close(done)
rt.SetFieldNameMapper(goja.TagFieldNameMapper("goja", true))
rt.SetRandSource(createRandomSource())
if err = s.initModules(ctx, rt); err != nil {
if err := s.loadModules(ctx, rt); err != nil {
err = errors.WithStack(err)
done <- err
return
}
if _, err := rt.RunScript(name, src); err != nil {
done <- errors.Wrap(err, "could not run js script")
return
}
if err := s.initModules(ctx, rt); err != nil {
err = errors.WithStack(err)
done <- err
return
}
done <- nil
})
if err != nil {
if err := <-done; err != nil {
return errors.WithStack(err)
}
@ -186,7 +224,7 @@ func (s *Server) Stop() {
s.loop.Stop()
}
func (s *Server) initModules(ctx context.Context, rt *goja.Runtime) error {
func (s *Server) loadModules(ctx context.Context, rt *goja.Runtime) error {
modules := make([]ServerModule, 0, len(s.factories))
for _, moduleFactory := range s.factories {
@ -200,7 +238,13 @@ func (s *Server) initModules(ctx context.Context, rt *goja.Runtime) error {
modules = append(modules, mod)
}
for _, mod := range modules {
s.modules = modules
return nil
}
func (s *Server) initModules(ctx context.Context, rt *goja.Runtime) error {
for _, mod := range s.modules {
initMod, ok := mod.(InitializableModule)
if !ok {
continue
@ -213,8 +257,6 @@ func (s *Server) initModules(ctx context.Context, rt *goja.Runtime) error {
}
}
s.modules = modules
return nil
}

View File

@ -3,11 +3,11 @@ package bus
import "context"
type Bus interface {
Subscribe(ctx context.Context, ns MessageNamespace) (<-chan Message, error)
Unsubscribe(ctx context.Context, ns MessageNamespace, ch <-chan Message)
Publish(ctx context.Context, msg Message) error
Request(ctx context.Context, msg Message) (Message, error)
Reply(ctx context.Context, ns MessageNamespace, h RequestHandler) error
Subscribe(ctx context.Context, addr Address) (<-chan Envelope, error)
Unsubscribe(addr Address, ch <-chan Envelope)
Publish(env Envelope) error
Request(ctx context.Context, env Envelope) (Envelope, error)
Reply(ctx context.Context, addr Address, h RequestHandler) chan error
}
type RequestHandler func(msg Message) (Message, error)
type RequestHandler func(env Envelope) (any, error)

32
pkg/bus/envelope.go Normal file
View File

@ -0,0 +1,32 @@
package bus
type Address string
type Envelope interface {
Message() any
Address() Address
}
type BaseEnvelope struct {
msg any
addr Address
}
// Address implements Envelope.
func (e *BaseEnvelope) Address() Address {
return e.addr
}
// Message implements Envelope.
func (e *BaseEnvelope) Message() any {
return e.msg
}
func NewEnvelope(addr Address, msg any) *BaseEnvelope {
return &BaseEnvelope{
addr: addr,
msg: msg,
}
}
var _ Envelope = &BaseEnvelope{}

View File

@ -15,13 +15,13 @@ type Bus struct {
nextRequestID uint64
}
func (b *Bus) Subscribe(ctx context.Context, ns bus.MessageNamespace) (<-chan bus.Message, error) {
func (b *Bus) Subscribe(ctx context.Context, address bus.Address) (<-chan bus.Envelope, error) {
logger.Debug(
ctx, "subscribing to messages",
logger.F("messageNamespace", ns),
ctx, "subscribing",
logger.F("address", address),
)
dispatchers := b.getDispatchers(ns)
dispatchers := b.getDispatchers(address)
disp := newEventDispatcher(b.opt.BufferSize)
go disp.Run(ctx)
@ -31,50 +31,41 @@ func (b *Bus) Subscribe(ctx context.Context, ns bus.MessageNamespace) (<-chan bu
return disp.Out(), nil
}
func (b *Bus) Unsubscribe(ctx context.Context, ns bus.MessageNamespace, ch <-chan bus.Message) {
func (b *Bus) Unsubscribe(address bus.Address, ch <-chan bus.Envelope) {
logger.Debug(
ctx, "unsubscribing from messages",
logger.F("messageNamespace", ns),
context.Background(), "unsubscribing",
logger.F("address", address),
)
dispatchers := b.getDispatchers(ns)
dispatchers := b.getDispatchers(address)
dispatchers.RemoveByOutChannel(ch)
}
func (b *Bus) Publish(ctx context.Context, msg bus.Message) error {
dispatchers := b.getDispatchers(msg.MessageNamespace())
dispatchersList := dispatchers.List()
func (b *Bus) Publish(env bus.Envelope) error {
dispatchers := b.getDispatchers(env.Address())
logger.Debug(
ctx, "publishing message",
logger.F("dispatchers", len(dispatchersList)),
logger.F("messageNamespace", msg.MessageNamespace()),
context.Background(), "publish",
logger.F("address", env.Address()),
)
for _, d := range dispatchersList {
if d.Closed() {
dispatchers.Remove(d)
continue
}
if err := d.In(msg); err != nil {
return errors.WithStack(err)
}
dispatchers.Range(func(d *eventDispatcher) {
if err := d.In(env); err != nil {
logger.Error(context.Background(), "could not publish message", logger.CapturedE(errors.WithStack(err)))
}
})
return nil
}
func (b *Bus) getDispatchers(namespace bus.MessageNamespace) *eventDispatcherSet {
strNamespace := string(namespace)
func (b *Bus) getDispatchers(address bus.Address) *eventDispatcherSet {
rawAddress := string(address)
rawDispatchers, exists := b.dispatchers.Get(strNamespace)
rawDispatchers, exists := b.dispatchers.Get(rawAddress)
dispatchers, ok := rawDispatchers.(*eventDispatcherSet)
if !exists || !ok {
dispatchers = newEventDispatcherSet()
b.dispatchers.Set(strNamespace, dispatchers)
b.dispatchers.Set(rawAddress, dispatchers)
}
return dispatchers

View File

@ -4,13 +4,23 @@ import (
"testing"
busTesting "forge.cadoles.com/arcad/edge/pkg/bus/testing"
"gitlab.com/wpetit/goweb/logger"
"go.uber.org/goleak"
)
func TestMain(m *testing.M) {
goleak.VerifyTestMain(m)
}
func TestMemoryBus(t *testing.T) {
if testing.Short() {
t.Skip("Test disabled when -short flag is set")
}
if testing.Verbose() {
logger.SetLevel(logger.LevelDebug)
}
t.Parallel()
t.Run("PublishSubscribe", func(t *testing.T) {
@ -26,4 +36,11 @@ func TestMemoryBus(t *testing.T) {
b := NewBus()
busTesting.TestRequestReply(t, b)
})
t.Run("CanceledRequestReply", func(t *testing.T) {
t.Parallel()
b := NewBus()
busTesting.TestCanceledRequest(t, b)
})
}

View File

@ -3,7 +3,6 @@ package memory
import (
"context"
"sync"
"time"
"forge.cadoles.com/arcad/edge/pkg/bus"
"github.com/pkg/errors"
@ -30,7 +29,7 @@ func (s *eventDispatcherSet) Remove(d *eventDispatcher) {
delete(s.items, d)
}
func (s *eventDispatcherSet) RemoveByOutChannel(out <-chan bus.Message) {
func (s *eventDispatcherSet) RemoveByOutChannel(out <-chan bus.Envelope) {
s.mutex.Lock()
defer s.mutex.Unlock()
@ -42,17 +41,18 @@ func (s *eventDispatcherSet) RemoveByOutChannel(out <-chan bus.Message) {
}
}
func (s *eventDispatcherSet) List() []*eventDispatcher {
func (s *eventDispatcherSet) Range(fn func(d *eventDispatcher)) {
s.mutex.Lock()
defer s.mutex.Unlock()
dispatchers := make([]*eventDispatcher, 0, len(s.items))
for d := range s.items {
dispatchers = append(dispatchers, d)
if d.Closed() {
s.Remove(d)
continue
}
return dispatchers
fn(d)
}
}
func newEventDispatcherSet() *eventDispatcherSet {
@ -62,8 +62,8 @@ func newEventDispatcherSet() *eventDispatcherSet {
}
type eventDispatcher struct {
in chan bus.Message
out chan bus.Message
in chan bus.Envelope
out chan bus.Envelope
mutex sync.RWMutex
closed bool
}
@ -83,11 +83,15 @@ func (d *eventDispatcher) Close() {
}
func (d *eventDispatcher) close() {
d.closed = true
if d.closed {
return
}
close(d.in)
d.closed = true
}
func (d *eventDispatcher) In(msg bus.Message) (err error) {
func (d *eventDispatcher) In(msg bus.Envelope) (err error) {
d.mutex.RLock()
defer d.mutex.RUnlock()
@ -100,67 +104,52 @@ func (d *eventDispatcher) In(msg bus.Message) (err error) {
return nil
}
func (d *eventDispatcher) Out() <-chan bus.Message {
func (d *eventDispatcher) Out() <-chan bus.Envelope {
return d.out
}
func (d *eventDispatcher) IsOut(out <-chan bus.Message) bool {
func (d *eventDispatcher) IsOut(out <-chan bus.Envelope) bool {
return d.out == out
}
func (d *eventDispatcher) Run(ctx context.Context) {
defer func() {
for {
logger.Debug(ctx, "closing dispatcher, flushing out incoming messages")
close(d.out)
for range d.in {
// Flush all incoming messages
for {
_, ok := <-d.in
if !ok {
return
}
}
}
}()
for {
msg, ok := <-d.in
select {
case <-ctx.Done():
if err := ctx.Err(); !errors.Is(err, context.Canceled) {
logger.Error(
ctx,
"message subscription context canceled",
logger.CapturedE(errors.WithStack(err)),
)
}
return
case msg, ok := <-d.in:
if !ok {
return
}
timeout := time.After(time.Second)
select {
case d.out <- msg:
case <-timeout:
logger.Error(
ctx,
"out message channel timeout",
logger.F("message", msg),
)
return
case <-ctx.Done():
logger.Error(
ctx,
"message subscription context canceled",
logger.F("message", msg),
logger.CapturedE(errors.WithStack(ctx.Err())),
)
return
d.out <- msg
}
}
}
func newEventDispatcher(bufferSize int64) *eventDispatcher {
return &eventDispatcher{
in: make(chan bus.Message, bufferSize),
out: make(chan bus.Message, bufferSize),
in: make(chan bus.Envelope, bufferSize),
out: make(chan bus.Envelope, bufferSize),
closed: false,
}
}

View File

@ -11,57 +11,78 @@ import (
)
const (
MessageNamespaceRequest bus.MessageNamespace = "reqrep/request"
MessageNamespaceReply bus.MessageNamespace = "reqrep/reply"
AddressRequest bus.Address = "bus/memory/request"
AddressReply bus.Address = "bus/memory/reply"
)
type RequestMessage struct {
RequestID uint64
Message bus.Message
ns bus.MessageNamespace
type RequestEnvelope struct {
requestID uint64
wrapped bus.Envelope
}
func (m *RequestMessage) MessageNamespace() bus.MessageNamespace {
return m.ns
func (e *RequestEnvelope) Address() bus.Address {
return getRequestAddress(e.wrapped.Address())
}
type ReplyMessage struct {
RequestID uint64
Message bus.Message
Error error
ns bus.MessageNamespace
func (e *RequestEnvelope) Message() any {
return e.wrapped.Message()
}
func (m *ReplyMessage) MessageNamespace() bus.MessageNamespace {
return m.ns
func (e *RequestEnvelope) RequestID() uint64 {
return e.requestID
}
func (b *Bus) Request(ctx context.Context, msg bus.Message) (bus.Message, error) {
func (e *RequestEnvelope) Unwrap() bus.Envelope {
return e.wrapped
}
type ReplyEnvelope struct {
requestID uint64
wrapped bus.Envelope
err error
}
func (e *ReplyEnvelope) Address() bus.Address {
return getReplyAddress(e.wrapped.Address(), e.requestID)
}
func (e *ReplyEnvelope) Message() any {
return e.wrapped.Message()
}
func (e *ReplyEnvelope) Err() error {
return e.err
}
func (e *ReplyEnvelope) Unwrap() bus.Envelope {
return e.wrapped
}
func (b *Bus) Request(ctx context.Context, env bus.Envelope) (bus.Envelope, error) {
requestID := atomic.AddUint64(&b.nextRequestID, 1)
req := &RequestMessage{
RequestID: requestID,
Message: msg,
ns: msg.MessageNamespace(),
req := &RequestEnvelope{
requestID: requestID,
wrapped: env,
}
replyNamespace := createReplyNamespace(requestID)
replyAddress := getReplyAddress(env.Address(), requestID)
replies, err := b.Subscribe(ctx, replyNamespace)
subCtx, cancel := context.WithCancel(ctx)
defer cancel()
replies, err := b.Subscribe(subCtx, replyAddress)
if err != nil {
return nil, errors.WithStack(err)
}
defer func() {
b.Unsubscribe(ctx, replyNamespace, replies)
b.Unsubscribe(replyAddress, replies)
}()
logger.Debug(ctx, "publishing request", logger.F("request", req))
if err := b.Publish(ctx, req); err != nil {
if err := b.Publish(req); err != nil {
return nil, errors.WithStack(err)
}
@ -70,82 +91,93 @@ func (b *Bus) Request(ctx context.Context, msg bus.Message) (bus.Message, error)
case <-ctx.Done():
return nil, errors.WithStack(ctx.Err())
case msg, ok := <-replies:
case env, ok := <-replies:
if !ok {
return nil, errors.WithStack(bus.ErrNoResponse)
}
reply, ok := msg.(*ReplyMessage)
reply, ok := env.(*ReplyEnvelope)
if !ok {
return nil, errors.WithStack(bus.ErrUnexpectedMessage)
}
if reply.Error != nil {
if err := reply.Err(); err != nil {
return nil, errors.WithStack(err)
}
return reply.Message, nil
return reply.Unwrap(), nil
}
}
}
type RequestHandler func(evt bus.Message) (bus.Message, error)
func (b *Bus) Reply(ctx context.Context, address bus.Address, handler bus.RequestHandler) chan error {
requestAddress := getRequestAddress(address)
func (b *Bus) Reply(ctx context.Context, msgNamespace bus.MessageNamespace, h bus.RequestHandler) error {
requests, err := b.Subscribe(ctx, msgNamespace)
errs := make(chan error)
requests, err := b.Subscribe(ctx, requestAddress)
if err != nil {
return errors.WithStack(err)
go func() {
errs <- errors.WithStack(err)
close(errs)
}()
return errs
}
go func() {
defer func() {
b.Unsubscribe(ctx, msgNamespace, requests)
b.Unsubscribe(requestAddress, requests)
close(errs)
}()
for {
select {
case <-ctx.Done():
return errors.WithStack(ctx.Err())
errs <- errors.WithStack(ctx.Err())
return
case msg, ok := <-requests:
case env, ok := <-requests:
if !ok {
return nil
return
}
request, ok := msg.(*RequestMessage)
request, ok := env.(*RequestEnvelope)
if !ok {
return errors.WithStack(bus.ErrUnexpectedMessage)
errs <- errors.WithStack(bus.ErrUnexpectedMessage)
continue
}
logger.Debug(ctx, "handling request", logger.F("request", request))
msg, err := h(request.Message)
msg, err := handler(request.Unwrap())
reply := &ReplyMessage{
RequestID: request.RequestID,
Message: nil,
Error: nil,
ns: createReplyNamespace(request.RequestID),
reply := &ReplyEnvelope{
requestID: request.RequestID(),
wrapped: bus.NewEnvelope(request.Unwrap().Address(), msg),
}
if err != nil {
reply.Error = errors.WithStack(err)
} else {
reply.Message = msg
reply.err = errors.WithStack(err)
}
logger.Debug(ctx, "publishing reply", logger.F("reply", reply))
if err := b.Publish(ctx, reply); err != nil {
return errors.WithStack(err)
if err := b.Publish(reply); err != nil {
errs <- errors.WithStack(err)
continue
}
}
}
}()
return errs
}
func createReplyNamespace(requestID uint64) bus.MessageNamespace {
return bus.NewMessageNamespace(
MessageNamespaceReply,
bus.MessageNamespace(strconv.FormatUint(requestID, 10)),
)
func getRequestAddress(addr bus.Address) bus.Address {
return AddressRequest + "/" + addr
}
func getReplyAddress(addr bus.Address, requestID uint64) bus.Address {
return AddressReply + "/" + addr + "/" + bus.Address(strconv.FormatUint(requestID, 10))
}

View File

@ -1,33 +0,0 @@
package bus
import (
"strings"
"github.com/pkg/errors"
)
type (
MessageNamespace string
)
type Message interface {
MessageNamespace() MessageNamespace
}
func NewMessageNamespace(namespaces ...MessageNamespace) MessageNamespace {
var sb strings.Builder
for i, ns := range namespaces {
if i != 0 {
if _, err := sb.WriteString(":"); err != nil {
panic(errors.Wrap(err, "could not build new message namespace"))
}
}
if _, err := sb.WriteString(string(ns)); err != nil {
panic(errors.Wrap(err, "could not build new message namespace"))
}
}
return MessageNamespace(sb.String())
}

View File

@ -2,6 +2,7 @@ package testing
import (
"context"
"fmt"
"sync"
"sync/atomic"
"testing"
@ -12,74 +13,52 @@ import (
)
const (
testNamespace bus.MessageNamespace = "testNamespace"
testAddress bus.Address = "testAddress"
)
type testMessage struct{}
func (e *testMessage) MessageNamespace() bus.MessageNamespace {
return testNamespace
}
func TestPublishSubscribe(t *testing.T, b bus.Bus) {
ctx, cancel := context.WithTimeout(context.Background(), time.Minute)
defer cancel()
t.Log("subscribe")
messages, err := b.Subscribe(ctx, testNamespace)
envelopes, err := b.Subscribe(ctx, testAddress)
if err != nil {
t.Fatal(errors.WithStack(err))
}
expectedTotal := 5
var wg sync.WaitGroup
wg.Add(5)
wg.Add(expectedTotal)
go func() {
// 5 events should be received
t.Log("publish 0")
if err := b.Publish(ctx, &testMessage{}); err != nil {
count := expectedTotal
for i := 0; i < count; i++ {
env := bus.NewEnvelope(testAddress, fmt.Sprintf("message %d", i))
if err := b.Publish(env); err != nil {
t.Error(errors.WithStack(err))
}
t.Log("publish 1")
if err := b.Publish(ctx, &testMessage{}); err != nil {
t.Error(errors.WithStack(err))
}
t.Log("publish 2")
if err := b.Publish(ctx, &testMessage{}); err != nil {
t.Error(errors.WithStack(err))
}
t.Log("publish 3")
if err := b.Publish(ctx, &testMessage{}); err != nil {
t.Error(errors.WithStack(err))
}
t.Log("publish 4")
if err := b.Publish(ctx, &testMessage{}); err != nil {
t.Error(errors.WithStack(err))
t.Logf("published %d", i)
}
}()
var count int32 = 0
go func() {
t.Log("range for events")
t.Log("range for received envelopes")
for msg := range messages {
for env := range envelopes {
t.Logf("received msg %d", atomic.LoadInt32(&count))
atomic.AddInt32(&count, 1)
if e, g := testNamespace, msg.MessageNamespace(); e != g {
t.Errorf("evt.MessageNamespace(): expected '%v', got '%v'", e, g)
if e, g := testAddress, env.Address(); e != g {
t.Errorf("env.Address(): expected '%v', got '%v'", e, g)
}
wg.Done()
@ -88,9 +67,9 @@ func TestPublishSubscribe(t *testing.T, b bus.Bus) {
wg.Wait()
b.Unsubscribe(ctx, testNamespace, messages)
b.Unsubscribe(testAddress, envelopes)
if e, g := int32(5), count; e != g {
t.Errorf("message received count: expected '%v', got '%v'", e, g)
if e, g := int32(expectedTotal), count; e != g {
t.Errorf("envelopes received count: expected '%v', got '%v'", e, g)
}
}

View File

@ -11,58 +11,42 @@ import (
)
const (
testTypeReqRes bus.MessageNamespace = "testNamspaceReqRes"
testTypeReqResAddress bus.Address = "testTypeReqResAddress"
)
type testReqResMessage struct {
i int
}
func (m *testReqResMessage) MessageNamespace() bus.MessageNamespace {
return testNamespace
}
func TestRequestReply(t *testing.T, b bus.Bus) {
expectedRoundTrips := 256
timeout := time.Now().Add(time.Duration(expectedRoundTrips) * time.Second)
var (
initWaitGroup sync.WaitGroup
resWaitGroup sync.WaitGroup
)
replyCtx, cancelReply := context.WithDeadline(context.Background(), timeout)
defer cancelReply()
initWaitGroup.Add(1)
var resWaitGroup sync.WaitGroup
go func() {
repondCtx, cancelRespond := context.WithDeadline(context.Background(), timeout)
defer cancelRespond()
initWaitGroup.Done()
err := b.Reply(repondCtx, testNamespace, func(msg bus.Message) (bus.Message, error) {
replyErrs := b.Reply(replyCtx, testTypeReqResAddress, func(env bus.Envelope) (any, error) {
defer resWaitGroup.Done()
req, ok := msg.(*testReqResMessage)
req, ok := env.Message().(int)
if !ok {
return nil, errors.WithStack(bus.ErrUnexpectedMessage)
}
result := &testReqResMessage{req.i}
// Simulate random work
time.Sleep(time.Millisecond * 100)
t.Logf("[RES] sending res #%d", req.i)
t.Logf("[RES] sending res #%d", req)
return result, nil
return req, nil
})
if err != nil {
t.Error(err)
go func() {
for err := range replyErrs {
if !errors.Is(err, context.Canceled) {
t.Errorf("%+v", errors.WithStack(err))
}
}
}()
initWaitGroup.Wait()
var reqWaitGroup sync.WaitGroup
for i := 0; i < expectedRoundTrips; i++ {
@ -75,32 +59,30 @@ func TestRequestReply(t *testing.T, b bus.Bus) {
requestCtx, cancelRequest := context.WithDeadline(context.Background(), timeout)
defer cancelRequest()
req := &testReqResMessage{i}
t.Logf("[REQ] sending req #%d", i)
result, err := b.Request(requestCtx, req)
response, err := b.Request(requestCtx, bus.NewEnvelope(testTypeReqResAddress, i))
if err != nil {
t.Error(err)
}
t.Logf("[REQ] received req #%d reply", i)
if result == nil {
t.Error("result should not be nil")
if response == nil {
t.Error("response should not be nil")
return
}
res, ok := result.(*testReqResMessage)
result, ok := response.Message().(int)
if !ok {
t.Error(errors.WithStack(bus.ErrUnexpectedMessage))
return
}
if e, g := req.i, res.i; e != g {
t.Errorf("res.i: expected '%v', got '%v'", e, g)
if e, g := i, result; e != g {
t.Errorf("response.Message(): expected '%v', got '%v'", e, g)
}
}(i)
}
@ -108,3 +90,77 @@ func TestRequestReply(t *testing.T, b bus.Bus) {
reqWaitGroup.Wait()
resWaitGroup.Wait()
}
func TestCanceledRequest(t *testing.T, b bus.Bus) {
replyCtx, cancelReply := context.WithCancel(context.Background())
defer cancelReply()
errs := b.Reply(replyCtx, testTypeReqResAddress, func(env bus.Envelope) (any, error) {
return env.Message(), nil
})
go func() {
for err := range errs {
if !errors.Is(err, context.Canceled) {
t.Errorf("%+v", errors.WithStack(err))
}
}
}()
var wg sync.WaitGroup
count := 100
wg.Add(count)
for i := 0; i < count; i++ {
go func(i int) {
defer wg.Done()
t.Logf("calling %d", i)
isCanceled := i%2 == 0
var ctx context.Context
if isCanceled {
canceledCtx, cancel := context.WithCancel(context.Background())
cancel()
ctx = canceledCtx
} else {
ctx = context.Background()
}
t.Logf("publishing envelope #%d", i)
reply, err := b.Request(ctx, bus.NewEnvelope(testTypeReqResAddress, int64(i)))
if err != nil {
if errors.Is(err, context.Canceled) && isCanceled {
return
}
if errors.Is(err, bus.ErrNoResponse) && isCanceled {
return
}
t.Errorf("%+v", errors.WithStack(err))
return
}
result, ok := reply.Message().(int64)
if !ok {
t.Errorf("response.Result: expected type '%T', got '%T'", int64(0), reply.Message())
return
}
if e, g := i, int(result); e != g {
t.Errorf("response.Result: expected '%v', got '%v'", e, g)
return
}
}(i)
}
wg.Wait()
}

View File

@ -1,282 +0,0 @@
package http
import (
"encoding/json"
"io"
"io/fs"
"mime/multipart"
"net/http"
"os"
"time"
"forge.cadoles.com/arcad/edge/pkg/bus"
"forge.cadoles.com/arcad/edge/pkg/module"
"forge.cadoles.com/arcad/edge/pkg/module/blob"
"forge.cadoles.com/arcad/edge/pkg/storage"
"github.com/go-chi/chi/v5"
"github.com/pkg/errors"
"gitlab.com/wpetit/goweb/logger"
)
const (
errorCodeForbidden = "forbidden"
errorCodeInternalError = "internal-error"
errorCodeBadRequest = "bad-request"
errorCodeNotFound = "not-found"
)
type uploadResponse struct {
Bucket string `json:"bucket"`
BlobID storage.BlobID `json:"blobId"`
}
func (h *Handler) handleAppUpload(w http.ResponseWriter, r *http.Request) {
h.mutex.RLock()
defer h.mutex.RUnlock()
ctx := r.Context()
r.Body = http.MaxBytesReader(w, r.Body, h.uploadMaxFileSize)
if err := r.ParseMultipartForm(h.uploadMaxFileSize); err != nil {
logger.Error(ctx, "could not parse multipart form", logger.CapturedE(errors.WithStack(err)))
jsonError(w, http.StatusBadRequest, errorCodeBadRequest)
return
}
_, fileHeader, err := r.FormFile("file")
if err != nil {
logger.Error(ctx, "could not read form file", logger.CapturedE(errors.WithStack(err)))
jsonError(w, http.StatusBadRequest, errorCodeBadRequest)
return
}
var metadata map[string]any
rawMetadata := r.Form.Get("metadata")
if rawMetadata != "" {
if err := json.Unmarshal([]byte(rawMetadata), &metadata); err != nil {
logger.Error(ctx, "could not parse metadata", logger.CapturedE(errors.WithStack(err)))
jsonError(w, http.StatusBadRequest, errorCodeBadRequest)
return
}
}
ctx = module.WithContext(ctx, map[module.ContextKey]any{
ContextKeyOriginRequest: r,
})
requestMsg := blob.NewMessageUploadRequest(ctx, fileHeader, metadata)
reply, err := h.bus.Request(ctx, requestMsg)
if err != nil {
logger.Error(ctx, "could not retrieve file", logger.CapturedE(errors.WithStack(err)))
jsonError(w, http.StatusInternalServerError, errorCodeInternalError)
return
}
logger.Debug(ctx, "upload reply", logger.F("reply", reply))
responseMsg, ok := reply.(*blob.MessageUploadResponse)
if !ok {
logger.Error(
ctx, "unexpected upload response message",
logger.F("message", reply),
)
jsonError(w, http.StatusInternalServerError, errorCodeInternalError)
return
}
if !responseMsg.Allow {
jsonError(w, http.StatusForbidden, errorCodeForbidden)
return
}
encoder := json.NewEncoder(w)
res := &uploadResponse{
Bucket: responseMsg.Bucket,
BlobID: responseMsg.BlobID,
}
if err := encoder.Encode(res); err != nil {
panic(errors.Wrap(err, "could not encode upload response"))
}
}
func (h *Handler) handleAppDownload(w http.ResponseWriter, r *http.Request) {
h.mutex.RLock()
defer h.mutex.RUnlock()
bucket := chi.URLParam(r, "bucket")
blobID := chi.URLParam(r, "blobID")
ctx := logger.With(r.Context(), logger.F("blobID", blobID), logger.F("bucket", bucket))
ctx = module.WithContext(ctx, map[module.ContextKey]any{
ContextKeyOriginRequest: r,
})
requestMsg := blob.NewMessageDownloadRequest(ctx, bucket, storage.BlobID(blobID))
reply, err := h.bus.Request(ctx, requestMsg)
if err != nil {
logger.Error(ctx, "could not retrieve file", logger.CapturedE(errors.WithStack(err)))
http.Error(w, http.StatusText(http.StatusInternalServerError), http.StatusInternalServerError)
return
}
replyMsg, ok := reply.(*blob.MessageDownloadResponse)
if !ok {
logger.Error(
ctx, "unexpected download response message",
logger.CapturedE(errors.WithStack(bus.ErrUnexpectedMessage)),
logger.F("message", reply),
)
jsonError(w, http.StatusInternalServerError, errorCodeInternalError)
return
}
if !replyMsg.Allow {
jsonError(w, http.StatusForbidden, errorCodeForbidden)
return
}
if replyMsg.Blob == nil {
jsonError(w, http.StatusNotFound, errorCodeNotFound)
return
}
defer func() {
if err := replyMsg.Blob.Close(); err != nil {
logger.Error(ctx, "could not close blob", logger.CapturedE(errors.WithStack(err)))
}
}()
http.ServeContent(w, r, string(replyMsg.BlobInfo.ID()), replyMsg.BlobInfo.ModTime(), replyMsg.Blob)
}
func serveFile(w http.ResponseWriter, r *http.Request, fs fs.FS, path string) {
ctx := logger.With(r.Context(), logger.F("path", path))
file, err := fs.Open(path)
if err != nil {
if errors.Is(err, os.ErrNotExist) {
http.Error(w, http.StatusText(http.StatusNotFound), http.StatusNotFound)
return
}
logger.Error(ctx, "error while opening fs file", logger.CapturedE(errors.WithStack(err)))
http.Error(w, http.StatusText(http.StatusInternalServerError), http.StatusInternalServerError)
return
}
defer func() {
if err := file.Close(); err != nil {
logger.Error(ctx, "error while closing fs file", logger.CapturedE(errors.WithStack(err)))
}
}()
info, err := file.Stat()
if err != nil {
logger.Error(ctx, "error while retrieving fs file stat", logger.CapturedE(errors.WithStack(err)))
http.Error(w, http.StatusText(http.StatusInternalServerError), http.StatusInternalServerError)
return
}
reader, ok := file.(io.ReadSeeker)
if !ok {
return
}
http.ServeContent(w, r, path, info.ModTime(), reader)
}
type jsonErrorResponse struct {
Error jsonErr `json:"error"`
}
type jsonErr struct {
Code string `json:"code"`
}
func jsonError(w http.ResponseWriter, status int, code string) {
w.Header().Add("Content-Type", "application/json")
w.WriteHeader(status)
encoder := json.NewEncoder(w)
response := jsonErrorResponse{
Error: jsonErr{
Code: code,
},
}
if err := encoder.Encode(response); err != nil {
panic(errors.WithStack(err))
}
}
type uploadedFile struct {
multipart.File
header *multipart.FileHeader
modTime time.Time
}
// Stat implements fs.File
func (f *uploadedFile) Stat() (fs.FileInfo, error) {
return &uploadedFileInfo{
header: f.header,
modTime: f.modTime,
}, nil
}
type uploadedFileInfo struct {
header *multipart.FileHeader
modTime time.Time
}
// IsDir implements fs.FileInfo
func (i *uploadedFileInfo) IsDir() bool {
return false
}
// ModTime implements fs.FileInfo
func (i *uploadedFileInfo) ModTime() time.Time {
return i.modTime
}
// Mode implements fs.FileInfo
func (i *uploadedFileInfo) Mode() fs.FileMode {
return os.ModePerm
}
// Name implements fs.FileInfo
func (i *uploadedFileInfo) Name() string {
return i.header.Filename
}
// Size implements fs.FileInfo
func (i *uploadedFileInfo) Size() int64 {
return i.header.Size
}
// Sys implements fs.FileInfo
func (i *uploadedFileInfo) Sys() any {
return nil
}
var (
_ fs.File = &uploadedFile{}
_ fs.FileInfo = &uploadedFileInfo{}
)

View File

@ -7,11 +7,11 @@ import (
)
func (h *Handler) handleSDKClient(w http.ResponseWriter, r *http.Request) {
serveFile(w, r, &sdk.FS, "client/dist/client.js")
ServeFile(w, r, &sdk.FS, "client/dist/client.js")
}
func (h *Handler) handleSDKClientMap(w http.ResponseWriter, r *http.Request) {
serveFile(w, r, &sdk.FS, "client/dist/client.js.map")
ServeFile(w, r, &sdk.FS, "client/dist/client.js.map")
}
func (h *Handler) handleAppFiles(w http.ResponseWriter, r *http.Request) {

75
pkg/http/context.go Normal file
View File

@ -0,0 +1,75 @@
package http
import (
"context"
"net/http"
"forge.cadoles.com/arcad/edge/pkg/bus"
)
type contextKey string
var (
contextKeyBus contextKey = "bus"
contextKeyHTTPRequest contextKey = "httpRequest"
contextKeyHTTPClient contextKey = "httpClient"
contextKeySessionID contextKey = "sessionId"
)
func (h *Handler) contextMiddleware(next http.Handler) http.Handler {
fn := func(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
ctx = WithContextBus(ctx, h.bus)
ctx = WithContextHTTPRequest(ctx, r)
ctx = WithContextHTTPClient(ctx, h.httpClient)
r = r.WithContext(ctx)
next.ServeHTTP(w, r)
}
return http.HandlerFunc(fn)
}
func ContextBus(ctx context.Context) (bus.Bus, bool) {
return contextValue[bus.Bus](ctx, contextKeyBus)
}
func WithContextBus(parent context.Context, bus bus.Bus) context.Context {
return context.WithValue(parent, contextKeyBus, bus)
}
func ContextHTTPRequest(ctx context.Context) (*http.Request, bool) {
return contextValue[*http.Request](ctx, contextKeyHTTPRequest)
}
func WithContextHTTPRequest(parent context.Context, request *http.Request) context.Context {
return context.WithValue(parent, contextKeyHTTPRequest, request)
}
func ContextHTTPClient(ctx context.Context) (*http.Client, bool) {
return contextValue[*http.Client](ctx, contextKeyHTTPClient)
}
func WithContextHTTPClient(parent context.Context, client *http.Client) context.Context {
return context.WithValue(parent, contextKeyHTTPClient, client)
}
func ContextSessionID(ctx context.Context) (string, bool) {
return contextValue[string](ctx, contextKeySessionID)
}
func WithContextSessionID(parent context.Context, sessionID string) context.Context {
return context.WithValue(parent, contextKeySessionID, sessionID)
}
func contextValue[T any](ctx context.Context, key any) (T, bool) {
value, ok := ctx.Value(key).(T)
if !ok {
return *new(T), false
}
return value, true
}

30
pkg/http/envelope.go Normal file
View File

@ -0,0 +1,30 @@
package http
import (
"context"
"forge.cadoles.com/arcad/edge/pkg/bus"
)
var (
AddressIncomingMessage bus.Address = "http/incoming-message"
AddressOutgoingMessage bus.Address = "http/outgoing-message"
)
type IncomingMessage struct {
Context context.Context
Payload map[string]any
}
func NewIncomingMessageEnvelope(ctx context.Context, payload map[string]any) bus.Envelope {
return bus.NewEnvelope(AddressIncomingMessage, &IncomingMessage{ctx, payload})
}
type OutgoingMessage struct {
SessionID string
Data any
}
func NewOutgoingMessageEnvelope(sessionID string, data any) bus.Envelope {
return bus.NewEnvelope(AddressOutgoingMessage, &OutgoingMessage{sessionID, data})
}

View File

@ -27,7 +27,6 @@ type Handler struct {
sockjs http.Handler
bus bus.Bus
sockjsOpts sockjs.Options
uploadMaxFileSize int64
server *app.Server
serverModuleFactories []app.ServerModuleFactory
@ -57,10 +56,6 @@ func (h *Handler) Load(ctx context.Context, bdle bundle.Bundle) error {
server := app.NewServer(h.serverModuleFactories...)
if err := server.Load(serverMainScript, string(mainScript)); err != nil {
return errors.WithStack(err)
}
fs := bundle.NewFileSystem("public", bdle)
public := HTML5Fileserver(fs)
sockjs := sockjs.NewHandler(sockJSPathPrefix, h.sockjsOpts, h.handleSockJSSession)
@ -69,7 +64,7 @@ func (h *Handler) Load(ctx context.Context, bdle bundle.Bundle) error {
h.server.Stop()
}
if err := server.Start(ctx); err != nil {
if err := server.Start(ctx, serverMainScript, string(mainScript)); err != nil {
return errors.WithStack(err)
}
@ -90,7 +85,6 @@ func NewHandler(funcs ...HandlerOptionFunc) *Handler {
router := chi.NewRouter()
handler := &Handler{
uploadMaxFileSize: opts.UploadMaxFileSize,
sockjsOpts: opts.SockJS,
router: router,
serverModuleFactories: opts.ServerModuleFactories,
@ -108,15 +102,9 @@ func NewHandler(funcs ...HandlerOptionFunc) *Handler {
r.Get("/client.js.map", handler.handleSDKClientMap)
})
r.Route("/api", func(r chi.Router) {
r.Post("/v1/upload", handler.handleAppUpload)
r.Get("/v1/download/{bucket}/{blobID}", handler.handleAppDownload)
r.Get("/v1/fetch", handler.handleAppFetch)
})
for _, fn := range opts.HTTPMounts {
r.Group(func(r chi.Router) {
r.Use(handler.contextMiddleware)
fn(r)
})
}

View File

@ -15,7 +15,6 @@ type HandlerOptions struct {
Bus bus.Bus
SockJS sockjs.Options
ServerModuleFactories []app.ServerModuleFactory
UploadMaxFileSize int64
HTTPClient *http.Client
HTTPMounts []func(r chi.Router)
HTTPMiddlewares []func(next http.Handler) http.Handler
@ -31,7 +30,6 @@ func defaultHandlerOptions() *HandlerOptions {
Bus: memory.NewBus(),
SockJS: sockjsOptions,
ServerModuleFactories: make([]app.ServerModuleFactory, 0),
UploadMaxFileSize: 10 << (10 * 2), // 10Mb
HTTPClient: &http.Client{
Timeout: time.Second * 30,
},
@ -60,12 +58,6 @@ func WithBus(bus bus.Bus) HandlerOptionFunc {
}
}
func WithUploadMaxFileSize(size int64) HandlerOptionFunc {
return func(opts *HandlerOptions) {
opts.UploadMaxFileSize = size
}
}
func WithHTTPClient(client *http.Client) HandlerOptionFunc {
return func(opts *HandlerOptions) {
opts.HTTPClient = client

View File

@ -5,7 +5,6 @@ import (
"encoding/json"
"net/http"
"forge.cadoles.com/arcad/edge/pkg/module"
"github.com/igm/sockjs-go/v3/sockjs"
"github.com/pkg/errors"
"gitlab.com/wpetit/goweb/logger"
@ -15,11 +14,6 @@ const (
statusChannelClosed = iota
)
const (
ContextKeySessionID module.ContextKey = "sessionId"
ContextKeyOriginRequest module.ContextKey = "originRequest"
)
func (h *Handler) handleSockJS(w http.ResponseWriter, r *http.Request) {
h.mutex.RLock()
defer h.mutex.RUnlock()
@ -42,19 +36,18 @@ func (h *Handler) handleSockJSSession(sess sockjs.Session) {
}
}()
go h.handleServerMessages(ctx, sess)
h.handleClientMessages(ctx, sess)
go h.handleOutgoingMessages(ctx, sess)
h.handleIncomingMessages(ctx, sess)
}
func (h *Handler) handleServerMessages(ctx context.Context, sess sockjs.Session) {
messages, err := h.bus.Subscribe(ctx, module.MessageNamespaceServer)
func (h *Handler) handleOutgoingMessages(ctx context.Context, sess sockjs.Session) {
envelopes, err := h.bus.Subscribe(ctx, AddressOutgoingMessage)
if err != nil {
panic(errors.WithStack(err))
}
defer func() {
// Close messages subscriber
h.bus.Unsubscribe(ctx, module.MessageNamespaceServer, messages)
h.bus.Unsubscribe(AddressOutgoingMessage, envelopes)
logger.Debug(ctx, "unsubscribed")
@ -72,26 +65,22 @@ func (h *Handler) handleServerMessages(ctx context.Context, sess sockjs.Session)
case <-ctx.Done():
return
case msg := <-messages:
serverMessage, ok := msg.(*module.ServerMessage)
case env := <-envelopes:
outgoingMessage, ok := env.Message().(*OutgoingMessage)
if !ok {
logger.Error(
ctx,
"unexpected server message",
logger.F("message", msg),
"unexpected outgoing message",
logger.F("message", env.Message()),
)
continue
}
sessionID := module.ContextValue[string](serverMessage.Context, ContextKeySessionID)
isDest := sessionID == "" || sessionID == sess.ID()
isDest := outgoingMessage.SessionID == "" || outgoingMessage.SessionID == sess.ID()
if !isDest {
continue
}
payload, err := json.Marshal(serverMessage.Data)
payload, err := json.Marshal(outgoingMessage.Data)
if err != nil {
logger.Error(
ctx,
@ -132,7 +121,7 @@ func (h *Handler) handleServerMessages(ctx context.Context, sess sockjs.Session)
}
}
func (h *Handler) handleClientMessages(ctx context.Context, sess sockjs.Session) {
func (h *Handler) handleIncomingMessages(ctx context.Context, sess sockjs.Session) {
for {
select {
case <-ctx.Done():
@ -145,7 +134,7 @@ func (h *Handler) handleClientMessages(ctx context.Context, sess sockjs.Session)
data, err := sess.RecvCtx(ctx)
if err != nil {
if errors.Is(err, sockjs.ErrSessionNotOpen) {
if errors.Is(err, sockjs.ErrSessionNotOpen) || errors.Is(err, context.Canceled) {
break
}
@ -174,7 +163,7 @@ func (h *Handler) handleClientMessages(ctx context.Context, sess sockjs.Session)
switch {
case message.Type == WebsocketMessageTypeMessage:
var payload map[string]interface{}
var payload map[string]any
if err := json.Unmarshal(message.Payload, &payload); err != nil {
logger.Error(
ctx,
@ -186,26 +175,22 @@ func (h *Handler) handleClientMessages(ctx context.Context, sess sockjs.Session)
}
ctx := logger.With(ctx, logger.F("payload", payload))
ctx = module.WithContext(ctx, map[module.ContextKey]any{
ContextKeySessionID: sess.ID(),
ContextKeyOriginRequest: sess.Request(),
})
ctx = WithContextHTTPRequest(ctx, sess.Request())
ctx = WithContextSessionID(ctx, sess.ID())
clientMessage := module.NewClientMessage(ctx, payload)
incomingMessage := NewIncomingMessageEnvelope(ctx, payload)
logger.Debug(ctx, "publishing new client message", logger.F("message", clientMessage))
logger.Debug(ctx, "publishing new incoming message", logger.F("message", incomingMessage))
if err := h.bus.Publish(ctx, clientMessage); err != nil {
if err := h.bus.Publish(incomingMessage); err != nil {
logger.Error(ctx, "could not publish message",
logger.CapturedE(errors.WithStack(err)),
logger.F("message", clientMessage),
logger.F("message", incomingMessage),
)
return
}
logger.Debug(ctx, "new client message published", logger.F("message", clientMessage))
default:
logger.Error(
ctx,

82
pkg/http/util.go Normal file
View File

@ -0,0 +1,82 @@
package http
import (
"encoding/json"
"io"
"io/fs"
"net/http"
"os"
"github.com/pkg/errors"
"gitlab.com/wpetit/goweb/logger"
)
const (
ErrCodeForbidden = "forbidden"
ErrCodeInternalError = "internal-error"
ErrCodeBadRequest = "bad-request"
ErrCodeNotFound = "not-found"
)
type jsonErrorResponse struct {
Error jsonErr `json:"error"`
}
type jsonErr struct {
Code string `json:"code"`
}
func JSONError(w http.ResponseWriter, status int, code string) {
w.Header().Add("Content-Type", "application/json")
w.WriteHeader(status)
encoder := json.NewEncoder(w)
response := jsonErrorResponse{
Error: jsonErr{
Code: code,
},
}
if err := encoder.Encode(response); err != nil {
panic(errors.WithStack(err))
}
}
func ServeFile(w http.ResponseWriter, r *http.Request, fs fs.FS, path string) {
ctx := logger.With(r.Context(), logger.F("path", path))
file, err := fs.Open(path)
if err != nil {
if errors.Is(err, os.ErrNotExist) {
http.Error(w, http.StatusText(http.StatusNotFound), http.StatusNotFound)
return
}
logger.Error(ctx, "error while opening fs file", logger.CapturedE(errors.WithStack(err)))
http.Error(w, http.StatusText(http.StatusInternalServerError), http.StatusInternalServerError)
return
}
defer func() {
if err := file.Close(); err != nil {
logger.Error(ctx, "error while closing fs file", logger.CapturedE(errors.WithStack(err)))
}
}()
info, err := file.Stat()
if err != nil {
logger.Error(ctx, "error while retrieving fs file stat", logger.CapturedE(errors.WithStack(err)))
http.Error(w, http.StatusText(http.StatusInternalServerError), http.StatusInternalServerError)
return
}
reader, ok := file.(io.ReadSeeker)
if !ok {
return
}
http.ServeContent(w, r, path, info.ModTime(), reader)
}

View File

@ -39,21 +39,17 @@ func TestAppModuleWithMemoryRepository(t *testing.T) {
)),
)
file := "testdata/app.js"
script := "testdata/app.js"
data, err := os.ReadFile(file)
data, err := os.ReadFile(script)
if err != nil {
t.Fatal(err)
}
if err := server.Load(file, string(data)); err != nil {
t.Fatal(err)
ctx := context.Background()
if err := server.Start(ctx, script, string(data)); err != nil {
t.Fatalf("%+v", errors.WithStack(err))
}
defer server.Stop()
ctx := context.Background()
if err := server.Start(ctx); err != nil {
t.Fatalf("%+v", errors.WithStack(err))
}
}

View File

@ -1,10 +1,8 @@
package auth
import (
"net/http"
"forge.cadoles.com/arcad/edge/pkg/app"
edgeHTTP "forge.cadoles.com/arcad/edge/pkg/http"
edgehttp "forge.cadoles.com/arcad/edge/pkg/http"
"forge.cadoles.com/arcad/edge/pkg/jwtutil"
"forge.cadoles.com/arcad/edge/pkg/module/util"
"github.com/dop251/goja"
@ -68,7 +66,7 @@ func (m *Module) getClaim(call goja.FunctionCall, rt *goja.Runtime) goja.Value {
ctx := util.AssertContext(call.Argument(0), rt)
claimName := util.AssertString(call.Argument(1), rt)
req, ok := ctx.Value(edgeHTTP.ContextKeyOriginRequest).(*http.Request)
req, ok := edgehttp.ContextHTTPRequest(ctx)
if !ok {
panic(rt.ToValue(errors.New("could not find http request in context")))
}

View File

@ -2,14 +2,14 @@ package auth
import (
"context"
"io/ioutil"
"net/http"
"os"
"testing"
"time"
"cdr.dev/slog"
"forge.cadoles.com/arcad/edge/pkg/app"
edgeHTTP "forge.cadoles.com/arcad/edge/pkg/http"
edgehttp "forge.cadoles.com/arcad/edge/pkg/http"
"forge.cadoles.com/arcad/edge/pkg/jwtutil"
"forge.cadoles.com/arcad/edge/pkg/module"
"github.com/lestrrat-go/jwx/v2/jwa"
@ -22,7 +22,9 @@ import (
func TestAuthModule(t *testing.T) {
t.Parallel()
if testing.Verbose() {
logger.SetLevel(slog.LevelDebug)
}
key := getDummyKey()
@ -33,17 +35,15 @@ func TestAuthModule(t *testing.T) {
),
)
data, err := ioutil.ReadFile("testdata/auth.js")
script := "testdata/auth.js"
data, err := os.ReadFile(script)
if err != nil {
t.Fatal(err)
}
if err := server.Load("testdata/auth.js", string(data)); err != nil {
t.Fatal(err)
}
ctx := context.Background()
if err := server.Start(ctx); err != nil {
if err := server.Start(ctx, script, string(data)); err != nil {
t.Fatalf("%+v", errors.WithStack(err))
}
@ -71,7 +71,7 @@ func TestAuthModule(t *testing.T) {
req.Header.Add("Authorization", "Bearer "+string(rawToken))
ctx = context.WithValue(context.Background(), edgeHTTP.ContextKeyOriginRequest, req)
ctx = edgehttp.WithContextHTTPRequest(context.Background(), req)
if _, err := server.ExecFuncByName(ctx, "testAuth", ctx); err != nil {
t.Fatalf("%+v", errors.WithStack(err))
@ -81,7 +81,9 @@ func TestAuthModule(t *testing.T) {
func TestAuthAnonymousModule(t *testing.T) {
t.Parallel()
if testing.Verbose() {
logger.SetLevel(slog.LevelDebug)
}
key := getDummyKey()
@ -90,17 +92,15 @@ func TestAuthAnonymousModule(t *testing.T) {
ModuleFactory(WithJWT(getDummyKeySet(key))),
)
data, err := ioutil.ReadFile("testdata/auth_anonymous.js")
script := "testdata/auth_anonymous.js"
data, err := os.ReadFile("testdata/auth_anonymous.js")
if err != nil {
t.Fatal(err)
}
if err := server.Load("testdata/auth_anonymous.js", string(data)); err != nil {
t.Fatal(err)
}
ctx := context.Background()
if err := server.Start(ctx); err != nil {
if err := server.Start(ctx, script, string(data)); err != nil {
t.Fatalf("%+v", errors.WithStack(err))
}
@ -111,7 +111,7 @@ func TestAuthAnonymousModule(t *testing.T) {
t.Fatalf("%+v", errors.WithStack(err))
}
ctx = context.WithValue(context.Background(), edgeHTTP.ContextKeyOriginRequest, req)
ctx = edgehttp.WithContextHTTPRequest(context.Background(), req)
if _, err := server.ExecFuncByName(ctx, "testAuth", ctx); err != nil {
t.Fatalf("%+v", errors.WithStack(err))

View File

@ -1,92 +0,0 @@
package blob
import (
"context"
"io"
"mime/multipart"
"forge.cadoles.com/arcad/edge/pkg/bus"
"forge.cadoles.com/arcad/edge/pkg/storage"
"github.com/oklog/ulid/v2"
)
const (
MessageNamespaceUploadRequest bus.MessageNamespace = "uploadRequest"
MessageNamespaceUploadResponse bus.MessageNamespace = "uploadResponse"
MessageNamespaceDownloadRequest bus.MessageNamespace = "downloadRequest"
MessageNamespaceDownloadResponse bus.MessageNamespace = "downloadResponse"
)
type MessageUploadRequest struct {
Context context.Context
RequestID string
FileHeader *multipart.FileHeader
Metadata map[string]interface{}
}
func (m *MessageUploadRequest) MessageNamespace() bus.MessageNamespace {
return MessageNamespaceUploadRequest
}
func NewMessageUploadRequest(ctx context.Context, fileHeader *multipart.FileHeader, metadata map[string]interface{}) *MessageUploadRequest {
return &MessageUploadRequest{
Context: ctx,
RequestID: ulid.Make().String(),
FileHeader: fileHeader,
Metadata: metadata,
}
}
type MessageUploadResponse struct {
RequestID string
BlobID storage.BlobID
Bucket string
Allow bool
}
func (m *MessageUploadResponse) MessageNamespace() bus.MessageNamespace {
return MessageNamespaceDownloadResponse
}
func NewMessageUploadResponse(requestID string) *MessageUploadResponse {
return &MessageUploadResponse{
RequestID: requestID,
}
}
type MessageDownloadRequest struct {
Context context.Context
RequestID string
Bucket string
BlobID storage.BlobID
}
func (m *MessageDownloadRequest) MessageNamespace() bus.MessageNamespace {
return MessageNamespaceDownloadRequest
}
func NewMessageDownloadRequest(ctx context.Context, bucket string, blobID storage.BlobID) *MessageDownloadRequest {
return &MessageDownloadRequest{
Context: ctx,
RequestID: ulid.Make().String(),
Bucket: bucket,
BlobID: blobID,
}
}
type MessageDownloadResponse struct {
RequestID string
Allow bool
BlobInfo storage.BlobInfo
Blob io.ReadSeekCloser
}
func (m *MessageDownloadResponse) MessageNamespace() bus.MessageNamespace {
return MessageNamespaceDownloadResponse
}
func NewMessageDownloadResponse(requestID string) *MessageDownloadResponse {
return &MessageDownloadResponse{
RequestID: requestID,
}
}

View File

@ -0,0 +1,55 @@
package blob
import (
"context"
"io"
"mime/multipart"
"forge.cadoles.com/arcad/edge/pkg/bus"
"forge.cadoles.com/arcad/edge/pkg/storage"
)
const (
AddressUpload bus.Address = "module/blob/upload"
AddressDownload bus.Address = "module/blob/download"
)
type UploadRequest struct {
Context context.Context
FileHeader *multipart.FileHeader
Metadata map[string]interface{}
}
func NewUploadRequestEnvelope(ctx context.Context, fileHeader *multipart.FileHeader, metadata map[string]interface{}) bus.Envelope {
return bus.NewEnvelope(AddressUpload, &UploadRequest{
Context: ctx,
FileHeader: fileHeader,
Metadata: metadata,
})
}
type UploadResponse struct {
Allow bool
Bucket string
BlobID storage.BlobID
}
type DownloadRequest struct {
Context context.Context
Bucket string
BlobID storage.BlobID
}
func NewDownloadRequestEnvelope(ctx context.Context, bucket string, blobID storage.BlobID) bus.Envelope {
return bus.NewEnvelope(AddressDownload, &DownloadRequest{
Context: ctx,
Bucket: bucket,
BlobID: blobID,
})
}
type DownloadResponse struct {
Allow bool
Blob io.ReadSeekCloser
BlobInfo storage.BlobInfo
}

230
pkg/module/blob/http.go Normal file
View File

@ -0,0 +1,230 @@
package blob
import (
"encoding/json"
"io"
"io/fs"
"mime/multipart"
"net/http"
"os"
"time"
"forge.cadoles.com/arcad/edge/pkg/bus"
edgehttp "forge.cadoles.com/arcad/edge/pkg/http"
"forge.cadoles.com/arcad/edge/pkg/storage"
"github.com/go-chi/chi/v5"
"github.com/pkg/errors"
"gitlab.com/wpetit/goweb/logger"
)
type uploadResponse struct {
Bucket string `json:"bucket"`
BlobID storage.BlobID `json:"blobId"`
}
func Mount(uploadMaxFileSize int64) func(r chi.Router) {
return func(r chi.Router) {
r.Post("/api/v1/upload", getAppUploadHandler(uploadMaxFileSize))
r.Get("/api/v1/download/{bucket}/{blobID}", handleAppDownload)
}
}
func getAppUploadHandler(uploadMaxFileSize int64) func(w http.ResponseWriter, r *http.Request) {
return func(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
r.Body = http.MaxBytesReader(w, r.Body, uploadMaxFileSize)
if err := r.ParseMultipartForm(uploadMaxFileSize); err != nil {
logger.Error(ctx, "could not parse multipart form", logger.CapturedE(errors.WithStack(err)))
edgehttp.JSONError(w, http.StatusBadRequest, edgehttp.ErrCodeBadRequest)
return
}
_, fileHeader, err := r.FormFile("file")
if err != nil {
logger.Error(ctx, "could not read form file", logger.CapturedE(errors.WithStack(err)))
edgehttp.JSONError(w, http.StatusBadRequest, edgehttp.ErrCodeBadRequest)
return
}
var metadata map[string]any
rawMetadata := r.Form.Get("metadata")
if rawMetadata != "" {
if err := json.Unmarshal([]byte(rawMetadata), &metadata); err != nil {
logger.Error(ctx, "could not parse metadata", logger.CapturedE(errors.WithStack(err)))
edgehttp.JSONError(w, http.StatusBadRequest, edgehttp.ErrCodeBadRequest)
return
}
}
requestEnv := NewUploadRequestEnvelope(ctx, fileHeader, metadata)
bus, ok := edgehttp.ContextBus(ctx)
if !ok {
logger.Error(ctx, "could find bus on context")
edgehttp.JSONError(w, http.StatusInternalServerError, edgehttp.ErrCodeInternalError)
return
}
reply, err := bus.Request(ctx, requestEnv)
if err != nil {
logger.Error(ctx, "could not retrieve file", logger.CapturedE(errors.WithStack(err)))
edgehttp.JSONError(w, http.StatusInternalServerError, edgehttp.ErrCodeInternalError)
return
}
logger.Debug(ctx, "upload reply", logger.F("reply", reply))
replyMessage, ok := reply.Message().(*UploadResponse)
if !ok {
logger.Error(
ctx, "unexpected upload response message",
logger.F("message", reply.Message()),
)
edgehttp.JSONError(w, http.StatusInternalServerError, edgehttp.ErrCodeInternalError)
return
}
if !replyMessage.Allow {
edgehttp.JSONError(w, http.StatusForbidden, edgehttp.ErrCodeForbidden)
return
}
encoder := json.NewEncoder(w)
res := &uploadResponse{
Bucket: replyMessage.Bucket,
BlobID: replyMessage.BlobID,
}
if err := encoder.Encode(res); err != nil {
panic(errors.Wrap(err, "could not encode upload response"))
}
}
}
func handleAppDownload(w http.ResponseWriter, r *http.Request) {
bucket := chi.URLParam(r, "bucket")
blobID := chi.URLParam(r, "blobID")
ctx := logger.With(r.Context(), logger.F("blobID", blobID), logger.F("bucket", bucket))
requestMsg := NewDownloadRequestEnvelope(ctx, bucket, storage.BlobID(blobID))
bs, ok := edgehttp.ContextBus(ctx)
if !ok {
logger.Error(ctx, "could find bus on context")
edgehttp.JSONError(w, http.StatusInternalServerError, edgehttp.ErrCodeInternalError)
return
}
reply, err := bs.Request(ctx, requestMsg)
if err != nil {
logger.Error(ctx, "could not retrieve file", logger.CapturedE(errors.WithStack(err)))
http.Error(w, http.StatusText(http.StatusInternalServerError), http.StatusInternalServerError)
return
}
replyMessage, ok := reply.Message().(*DownloadResponse)
if !ok {
logger.Error(
ctx, "unexpected download response message",
logger.CapturedE(errors.WithStack(bus.ErrUnexpectedMessage)),
logger.F("message", reply),
)
edgehttp.JSONError(w, http.StatusInternalServerError, edgehttp.ErrCodeInternalError)
return
}
if !replyMessage.Allow {
edgehttp.JSONError(w, http.StatusForbidden, edgehttp.ErrCodeForbidden)
return
}
if replyMessage.Blob == nil {
edgehttp.JSONError(w, http.StatusNotFound, edgehttp.ErrCodeNotFound)
return
}
defer func() {
if err := replyMessage.Blob.Close(); err != nil {
logger.Error(ctx, "could not close blob", logger.CapturedE(errors.WithStack(err)))
}
}()
// TODO Fix usage of ServeContent
// http.ServeContent(w, r, string(replyMessage.BlobInfo.ID()), replyMessage.BlobInfo.ModTime(), replyMessage.Blob)
w.Header().Add("Content-Type", replyMessage.BlobInfo.ContentType())
if _, err := io.Copy(w, replyMessage.Blob); err != nil {
logger.Error(ctx, "could not write blob", logger.CapturedE(errors.WithStack(err)))
}
}
type uploadedFile struct {
multipart.File
header *multipart.FileHeader
modTime time.Time
}
// Stat implements fs.File
func (f *uploadedFile) Stat() (fs.FileInfo, error) {
return &uploadedFileInfo{
header: f.header,
modTime: f.modTime,
}, nil
}
type uploadedFileInfo struct {
header *multipart.FileHeader
modTime time.Time
}
// IsDir implements fs.FileInfo
func (i *uploadedFileInfo) IsDir() bool {
return false
}
// ModTime implements fs.FileInfo
func (i *uploadedFileInfo) ModTime() time.Time {
return i.modTime
}
// Mode implements fs.FileInfo
func (i *uploadedFileInfo) Mode() fs.FileMode {
return os.ModePerm
}
// Name implements fs.FileInfo
func (i *uploadedFileInfo) Name() string {
return i.header.Filename
}
// Size implements fs.FileInfo
func (i *uploadedFileInfo) Size() int64 {
return i.header.Size
}
// Sys implements fs.FileInfo
func (i *uploadedFileInfo) Sys() any {
return nil
}
var (
_ fs.File = &uploadedFile{}
_ fs.FileInfo = &uploadedFileInfo{}
)

View File

@ -236,11 +236,10 @@ func (m *Module) getBucketSize(call goja.FunctionCall, rt *goja.Runtime) goja.Va
func (m *Module) handleMessages() {
ctx := context.Background()
go func() {
err := m.bus.Reply(ctx, MessageNamespaceUploadRequest, func(msg bus.Message) (bus.Message, error) {
uploadRequest, ok := msg.(*MessageUploadRequest)
uploadRequestErrs := m.bus.Reply(ctx, AddressUpload, func(env bus.Envelope) (any, error) {
uploadRequest, ok := env.Message().(*UploadRequest)
if !ok {
return nil, errors.Wrapf(bus.ErrUnexpectedMessage, "expected message upload request, got '%T'", msg)
return nil, errors.Wrapf(bus.ErrUnexpectedMessage, "expected message upload request, got '%T'", env.Message())
}
res, err := m.handleUploadRequest(uploadRequest)
@ -254,15 +253,17 @@ func (m *Module) handleMessages() {
return res, nil
})
if err != nil {
panic(errors.WithStack(err))
go func() {
for err := range uploadRequestErrs {
logger.Error(ctx, "error while replying to upload requests", logger.CapturedE(errors.WithStack(err)))
}
}()
err := m.bus.Reply(ctx, MessageNamespaceDownloadRequest, func(msg bus.Message) (bus.Message, error) {
downloadRequest, ok := msg.(*MessageDownloadRequest)
downloadRequestErrs := m.bus.Reply(ctx, AddressDownload, func(env bus.Envelope) (any, error) {
downloadRequest, ok := env.Message().(*DownloadRequest)
if !ok {
return nil, errors.Wrapf(bus.ErrUnexpectedMessage, "expected message download request, got '%T'", msg)
return nil, errors.Wrapf(bus.ErrUnexpectedMessage, "expected message download request, got '%T'", env.Message())
}
res, err := m.handleDownloadRequest(downloadRequest)
@ -274,14 +275,15 @@ func (m *Module) handleMessages() {
return res, nil
})
if err != nil {
panic(errors.WithStack(err))
for err := range downloadRequestErrs {
logger.Fatal(ctx, "error while replying to download requests", logger.CapturedE(errors.WithStack(err)))
}
}
func (m *Module) handleUploadRequest(req *MessageUploadRequest) (*MessageUploadResponse, error) {
func (m *Module) handleUploadRequest(req *UploadRequest) (*UploadResponse, error) {
blobID := storage.NewBlobID()
res := NewMessageUploadResponse(req.RequestID)
res := &UploadResponse{}
ctx := logger.With(req.Context, logger.F("blobID", blobID))
@ -302,11 +304,11 @@ func (m *Module) handleUploadRequest(req *MessageUploadRequest) (*MessageUploadR
return nil, errors.WithStack(err)
}
result, ok := rawResult.Export().(map[string]interface{})
result, ok := rawResult.(map[string]interface{})
if !ok {
return nil, errors.Errorf(
"unexpected onBlobUpload result: expected 'map[string]interface{}', got '%T'",
rawResult.Export(),
rawResult,
)
}
@ -393,8 +395,8 @@ func (m *Module) saveBlob(ctx context.Context, bucketName string, blobID storage
return nil
}
func (m *Module) handleDownloadRequest(req *MessageDownloadRequest) (*MessageDownloadResponse, error) {
res := NewMessageDownloadResponse(req.RequestID)
func (m *Module) handleDownloadRequest(req *DownloadRequest) (*DownloadResponse, error) {
res := &DownloadResponse{}
rawResult, err := m.server.ExecFuncByName(req.Context, "onBlobDownload", req.Context, req.Bucket, req.BlobID)
if err != nil {
@ -407,11 +409,11 @@ func (m *Module) handleDownloadRequest(req *MessageDownloadRequest) (*MessageDow
return nil, errors.WithStack(err)
}
result, ok := rawResult.Export().(map[string]interface{})
result, ok := rawResult.(map[string]interface{})
if !ok {
return nil, errors.Errorf(
"unexpected onBlobDownload result: expected 'map[string]interface{}', got '%T'",
rawResult.Export(),
rawResult,
)
}

View File

@ -17,7 +17,9 @@ import (
func TestBlobModule(t *testing.T) {
t.Parallel()
if testing.Verbose() {
logger.SetLevel(slog.LevelDebug)
}
bus := memory.NewBus()
store := sqlite.NewBlobStore(":memory:?_pragma=foreign_keys(1)&_pragma=busy_timeout=60000")
@ -28,19 +30,17 @@ func TestBlobModule(t *testing.T) {
ModuleFactory(bus, store),
)
data, err := os.ReadFile("testdata/blob.js")
script := "testdata/blob.js"
data, err := os.ReadFile(script)
if err != nil {
t.Fatal(err)
}
if err := server.Load("testdata/blob.js", string(data)); err != nil {
t.Fatal(err)
ctx := context.Background()
if err := server.Start(ctx, script, string(data)); err != nil {
t.Fatalf("%+v", errors.WithStack(err))
}
defer server.Stop()
ctx := context.Background()
if err := server.Start(ctx); err != nil {
t.Fatalf("%+v", errors.WithStack(err))
}
}

View File

@ -21,7 +21,9 @@ func TestCastLoadURL(t *testing.T) {
return
}
if testing.Verbose() {
logger.SetLevel(slog.LevelDebug)
}
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()

View File

@ -2,7 +2,6 @@ package cast
import (
"context"
"io/ioutil"
"os"
"testing"
"time"
@ -24,24 +23,24 @@ func TestCastModule(t *testing.T) {
return
}
if testing.Verbose() {
logger.SetLevel(slog.LevelDebug)
}
server := app.NewServer(
module.ConsoleModuleFactory(),
CastModuleFactory(),
)
data, err := ioutil.ReadFile("testdata/cast.js")
script := "testdata/cast.js"
data, err := os.ReadFile(script)
if err != nil {
t.Fatal(err)
}
if err := server.Load("testdata/cast.js", string(data)); err != nil {
t.Fatal(err)
}
ctx := context.Background()
if err := server.Start(ctx); err != nil {
if err := server.Start(ctx, script, string(data)); err != nil {
t.Fatalf("%+v", errors.WithStack(err))
}
@ -59,24 +58,24 @@ func TestCastModuleRefreshDevices(t *testing.T) {
return
}
if testing.Verbose() {
logger.SetLevel(slog.LevelDebug)
}
server := app.NewServer(
module.ConsoleModuleFactory(),
CastModuleFactory(),
)
data, err := ioutil.ReadFile("testdata/refresh_devices.js")
script := "testdata/refresh_devices.js"
data, err := os.ReadFile(script)
if err != nil {
t.Fatal(err)
}
if err := server.Load("testdata/refresh_devices.js", string(data)); err != nil {
t.Fatal(err)
}
ctx := context.Background()
if err := server.Start(ctx); err != nil {
if err := server.Start(ctx, script, string(data)); err != nil {
t.Fatalf("%+v", errors.WithStack(err))
}
@ -87,12 +86,5 @@ func TestCastModuleRefreshDevices(t *testing.T) {
t.Error(errors.WithStack(err))
}
promise, ok := app.IsPromise(result)
if !ok {
t.Fatal("expected promise")
}
value := server.WaitForPromise(promise)
spew.Dump(value.Export())
spew.Dump(result)
}

View File

@ -0,0 +1,38 @@
package fetch
import (
"context"
"net/url"
"forge.cadoles.com/arcad/edge/pkg/bus"
)
const (
AddressFetchRequest bus.Address = "module/fetch/request"
AddressFetchResponse bus.Address = "module/fetch/response"
)
type FetchRequest struct {
Context context.Context
RequestID string
URL *url.URL
RemoteAddr string
}
func NewFetchRequestEnvelope(ctx context.Context, remoteAddr string, url *url.URL) bus.Envelope {
return bus.NewEnvelope(AddressFetchRequest, &FetchRequest{
Context: ctx,
URL: url,
RemoteAddr: remoteAddr,
})
}
type FetchResponse struct {
Allow bool
}
func NewFetchResponseEnvelope(allow bool) bus.Envelope {
return bus.NewEnvelope(AddressFetchResponse, &FetchResponse{
Allow: allow,
})
}

View File

@ -1,49 +0,0 @@
package fetch
import (
"context"
"net/url"
"forge.cadoles.com/arcad/edge/pkg/bus"
"github.com/oklog/ulid/v2"
)
const (
MessageNamespaceFetchRequest bus.MessageNamespace = "fetchRequest"
MessageNamespaceFetchResponse bus.MessageNamespace = "fetchResponse"
)
type MessageFetchRequest struct {
Context context.Context
RequestID string
URL *url.URL
RemoteAddr string
}
func (m *MessageFetchRequest) MessageNamespace() bus.MessageNamespace {
return MessageNamespaceFetchRequest
}
func NewMessageFetchRequest(ctx context.Context, remoteAddr string, url *url.URL) *MessageFetchRequest {
return &MessageFetchRequest{
Context: ctx,
RequestID: ulid.Make().String(),
RemoteAddr: remoteAddr,
URL: url,
}
}
type MessageFetchResponse struct {
RequestID string
Allow bool
}
func (m *MessageFetchResponse) MessageNamespace() bus.MessageNamespace {
return MessageNamespaceFetchResponse
}
func NewMessageFetchResponse(requestID string) *MessageFetchResponse {
return &MessageFetchResponse{
RequestID: requestID,
}
}

View File

@ -1,60 +1,67 @@
package http
package fetch
import (
"io"
"net/http"
"net/url"
"forge.cadoles.com/arcad/edge/pkg/module"
"forge.cadoles.com/arcad/edge/pkg/module/fetch"
edgehttp "forge.cadoles.com/arcad/edge/pkg/http"
"github.com/go-chi/chi/v5"
"github.com/pkg/errors"
"gitlab.com/wpetit/goweb/logger"
)
func (h *Handler) handleAppFetch(w http.ResponseWriter, r *http.Request) {
h.mutex.RLock()
defer h.mutex.RUnlock()
func Mount() func(r chi.Router) {
return func(r chi.Router) {
r.Get("/api/v1/fetch", handleAppFetch)
}
}
func handleAppFetch(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
ctx = module.WithContext(ctx, map[module.ContextKey]any{
ContextKeyOriginRequest: r,
})
rawURL := r.URL.Query().Get("url")
url, err := url.Parse(rawURL)
if err != nil {
jsonError(w, http.StatusBadRequest, errorCodeBadRequest)
edgehttp.JSONError(w, http.StatusBadRequest, edgehttp.ErrCodeBadRequest)
return
}
requestMsg := fetch.NewMessageFetchRequest(ctx, r.RemoteAddr, url)
requestMsg := NewFetchRequestEnvelope(ctx, r.RemoteAddr, url)
reply, err := h.bus.Request(ctx, requestMsg)
bus, ok := edgehttp.ContextBus(ctx)
if !ok {
logger.Error(ctx, "could find bus on context")
edgehttp.JSONError(w, http.StatusInternalServerError, edgehttp.ErrCodeInternalError)
return
}
reply, err := bus.Request(ctx, requestMsg)
if err != nil {
logger.Error(ctx, "could not retrieve fetch request reply", logger.CapturedE(errors.WithStack(err)))
jsonError(w, http.StatusInternalServerError, errorCodeInternalError)
edgehttp.JSONError(w, http.StatusInternalServerError, edgehttp.ErrCodeInternalError)
return
}
logger.Debug(ctx, "fetch reply", logger.F("reply", reply))
responseMsg, ok := reply.(*fetch.MessageFetchResponse)
responseMsg, ok := reply.Message().(*FetchResponse)
if !ok {
logger.Error(
ctx, "unexpected fetch response message",
logger.F("message", reply),
)
jsonError(w, http.StatusInternalServerError, errorCodeInternalError)
edgehttp.JSONError(w, http.StatusInternalServerError, edgehttp.ErrCodeInternalError)
return
}
if !responseMsg.Allow {
jsonError(w, http.StatusForbidden, errorCodeForbidden)
edgehttp.JSONError(w, http.StatusForbidden, edgehttp.ErrCodeForbidden)
return
}
@ -65,7 +72,7 @@ func (h *Handler) handleAppFetch(w http.ResponseWriter, r *http.Request) {
ctx, "could not create proxy request",
logger.CapturedE(errors.WithStack(err)),
)
jsonError(w, http.StatusInternalServerError, errorCodeInternalError)
edgehttp.JSONError(w, http.StatusInternalServerError, edgehttp.ErrCodeInternalError)
return
}
@ -78,13 +85,21 @@ func (h *Handler) handleAppFetch(w http.ResponseWriter, r *http.Request) {
proxyReq.Header.Add("X-Forwarded-From", r.RemoteAddr)
res, err := h.httpClient.Do(proxyReq)
httpClient, ok := edgehttp.ContextHTTPClient(ctx)
if !ok {
logger.Error(ctx, "could find http client on context")
edgehttp.JSONError(w, http.StatusInternalServerError, edgehttp.ErrCodeInternalError)
return
}
res, err := httpClient.Do(proxyReq)
if err != nil {
logger.Error(
ctx, "could not execute proxy request",
logger.CapturedE(errors.WithStack(err)),
)
jsonError(w, http.StatusInternalServerError, errorCodeInternalError)
edgehttp.JSONError(w, http.StatusInternalServerError, edgehttp.ErrCodeInternalError)
return
}

View File

@ -40,10 +40,10 @@ func (m *Module) get(call goja.FunctionCall, rt *goja.Runtime) goja.Value {
func (m *Module) handleMessages() {
ctx := context.Background()
err := m.bus.Reply(ctx, MessageNamespaceFetchRequest, func(msg bus.Message) (bus.Message, error) {
fetchRequest, ok := msg.(*MessageFetchRequest)
fetchErrs := m.bus.Reply(ctx, AddressFetchRequest, func(env bus.Envelope) (any, error) {
fetchRequest, ok := env.Message().(*FetchRequest)
if !ok {
return nil, errors.Wrapf(bus.ErrUnexpectedMessage, "expected message fetch request, got '%T'", msg)
return nil, errors.Wrapf(bus.ErrUnexpectedMessage, "expected fetch request, got '%T'", env.Message())
}
res, err := m.handleFetchRequest(fetchRequest)
@ -57,13 +57,14 @@ func (m *Module) handleMessages() {
return res, nil
})
if err != nil {
panic(errors.WithStack(err))
for err := range fetchErrs {
logger.Fatal(ctx, "error while replying to fetch requests", logger.CapturedE(errors.WithStack(err)))
}
}
func (m *Module) handleFetchRequest(req *MessageFetchRequest) (*MessageFetchResponse, error) {
res := NewMessageFetchResponse(req.RequestID)
func (m *Module) handleFetchRequest(req *FetchRequest) (*FetchResponse, error) {
res := &FetchResponse{}
ctx := logger.With(
req.Context,
@ -83,11 +84,11 @@ func (m *Module) handleFetchRequest(req *MessageFetchRequest) (*MessageFetchResp
return nil, errors.WithStack(err)
}
result, ok := rawResult.Export().(map[string]interface{})
result, ok := rawResult.(map[string]interface{})
if !ok {
return nil, errors.Errorf(
"unexpected onClientFetch result: expected 'map[string]interface{}', got '%T'",
rawResult.Export(),
rawResult,
)
}

View File

@ -2,8 +2,8 @@ package fetch
import (
"context"
"io/ioutil"
"net/url"
"os"
"testing"
"time"
@ -18,7 +18,9 @@ import (
func TestFetchModule(t *testing.T) {
t.Parallel()
if testing.Verbose() {
logger.SetLevel(slog.LevelDebug)
}
bus := memory.NewBus()
@ -28,22 +30,20 @@ func TestFetchModule(t *testing.T) {
ModuleFactory(bus),
)
data, err := ioutil.ReadFile("testdata/fetch.js")
path := "testdata/fetch.js"
data, err := os.ReadFile(path)
if err != nil {
t.Fatalf("%+v", errors.WithStack(err))
}
if err := server.Load("testdata/fetch.js", string(data)); err != nil {
ctx := context.Background()
if err := server.Start(ctx, path, string(data)); err != nil {
t.Fatalf("%+v", errors.WithStack(err))
}
defer server.Stop()
ctx := context.Background()
if err := server.Start(ctx); err != nil {
t.Fatalf("%+v", errors.WithStack(err))
}
// Wait for module to startup
time.Sleep(1 * time.Second)
@ -53,33 +53,33 @@ func TestFetchModule(t *testing.T) {
remoteAddr := "127.0.0.1"
url, _ := url.Parse("http://example.com")
rawReply, err := bus.Request(ctx, NewMessageFetchRequest(ctx, remoteAddr, url))
reply, err := bus.Request(ctx, NewFetchRequestEnvelope(ctx, remoteAddr, url))
if err != nil {
t.Fatalf("%+v", errors.WithStack(err))
}
reply, ok := rawReply.(*MessageFetchResponse)
response, ok := reply.Message().(*FetchResponse)
if !ok {
t.Fatalf("unexpected reply type '%T'", rawReply)
t.Fatalf("unexpected reply message type '%T'", reply.Message())
}
if e, g := true, reply.Allow; e != g {
if e, g := true, response.Allow; e != g {
t.Errorf("reply.Allow: expected '%v', got '%v'", e, g)
}
url, _ = url.Parse("https://google.com")
rawReply, err = bus.Request(ctx, NewMessageFetchRequest(ctx, remoteAddr, url))
reply, err = bus.Request(ctx, NewFetchRequestEnvelope(ctx, remoteAddr, url))
if err != nil {
t.Fatalf("%+v", errors.WithStack(err))
}
reply, ok = rawReply.(*MessageFetchResponse)
response, ok = reply.Message().(*FetchResponse)
if !ok {
t.Fatalf("unexpected reply type '%T'", rawReply)
t.Fatalf("unexpected reply message type '%T'", reply.Message())
}
if e, g := false, reply.Allow; e != g {
if e, g := false, response.Allow; e != g {
t.Errorf("reply.Allow: expected '%v', got '%v'", e, g)
}
}

View File

@ -5,7 +5,6 @@ import (
"forge.cadoles.com/arcad/edge/pkg/app"
"github.com/dop251/goja"
"github.com/pkg/errors"
"gitlab.com/wpetit/goweb/logger"
)
@ -27,21 +26,20 @@ func (m *LifecycleModule) OnInit(ctx context.Context, rt *goja.Runtime) (err err
}
defer func() {
if recovered := recover(); recovered != nil {
revoveredErr, ok := recovered.(error)
if ok {
logger.Error(ctx, "recovered runtime error", logger.CapturedE(errors.WithStack(revoveredErr)))
err = errors.WithStack(app.ErrUnknownError)
recovered := recover()
if recovered == nil {
return
}
recoveredErr, ok := recovered.(error)
if !ok {
panic(recovered)
}
err = recoveredErr
}()
call(nil)
call(nil, rt.ToValue(ctx))
return nil
}

View File

@ -1,38 +0,0 @@
package module
import (
"context"
"forge.cadoles.com/arcad/edge/pkg/bus"
)
const (
MessageNamespaceClient bus.MessageNamespace = "client"
MessageNamespaceServer bus.MessageNamespace = "server"
)
type ServerMessage struct {
Context context.Context
Data interface{}
}
func (m *ServerMessage) MessageNamespace() bus.MessageNamespace {
return MessageNamespaceServer
}
func NewServerMessage(ctx context.Context, data interface{}) *ServerMessage {
return &ServerMessage{ctx, data}
}
type ClientMessage struct {
Context context.Context
Data map[string]interface{}
}
func (m *ClientMessage) MessageNamespace() bus.MessageNamespace {
return MessageNamespaceClient
}
func NewClientMessage(ctx context.Context, data map[string]interface{}) *ClientMessage {
return &ClientMessage{ctx, data}
}

View File

@ -5,8 +5,7 @@ import (
"forge.cadoles.com/arcad/edge/pkg/app"
"forge.cadoles.com/arcad/edge/pkg/bus"
edgeHTTP "forge.cadoles.com/arcad/edge/pkg/http"
"forge.cadoles.com/arcad/edge/pkg/module"
edgehttp "forge.cadoles.com/arcad/edge/pkg/http"
"forge.cadoles.com/arcad/edge/pkg/module/util"
"github.com/dop251/goja"
"github.com/pkg/errors"
@ -38,10 +37,9 @@ func (m *Module) broadcast(call goja.FunctionCall, rt *goja.Runtime) goja.Value
}
data := call.Argument(0).Export()
ctx := context.Background()
msg := module.NewServerMessage(ctx, data)
if err := m.bus.Publish(ctx, msg); err != nil {
env := edgehttp.NewOutgoingMessageEnvelope("", data)
if err := m.bus.Publish(env); err != nil {
panic(rt.ToValue(errors.WithStack(err)))
}
@ -53,38 +51,36 @@ func (m *Module) send(call goja.FunctionCall, rt *goja.Runtime) goja.Value {
panic(rt.ToValue(errors.New("invalid number of argument")))
}
var ctx context.Context
firstArg := call.Argument(0)
sessionID, ok := firstArg.Export().(string)
if ok {
ctx = module.WithContext(context.Background(), map[module.ContextKey]any{
edgeHTTP.ContextKeySessionID: sessionID,
})
} else {
ctx = util.AssertContext(firstArg, rt)
if !ok {
ctx := util.AssertContext(firstArg, rt)
sessionID, ok = edgehttp.ContextSessionID(ctx)
if !ok {
panic(rt.ToValue(errors.New("could not find session id in context")))
}
}
data := call.Argument(1).Export()
msg := module.NewServerMessage(ctx, data)
if err := m.bus.Publish(ctx, msg); err != nil {
env := edgehttp.NewOutgoingMessageEnvelope(sessionID, data)
if err := m.bus.Publish(env); err != nil {
panic(rt.ToValue(errors.WithStack(err)))
}
return nil
}
func (m *Module) handleClientMessages() {
func (m *Module) handleIncomingMessages() {
ctx := context.Background()
logger.Debug(
ctx,
"subscribing to bus messages",
"subscribing to bus envelopes",
)
clientMessages, err := m.bus.Subscribe(ctx, module.MessageNamespaceClient)
envelopes, err := m.bus.Subscribe(ctx, edgehttp.AddressIncomingMessage)
if err != nil {
panic(errors.WithStack(err))
}
@ -92,16 +88,16 @@ func (m *Module) handleClientMessages() {
defer func() {
logger.Debug(
ctx,
"unsubscribing from bus messages",
"unsubscribing from bus envelopes",
)
m.bus.Unsubscribe(ctx, module.MessageNamespaceClient, clientMessages)
m.bus.Unsubscribe(edgehttp.AddressIncomingMessage, envelopes)
}()
for {
logger.Debug(
ctx,
"waiting for next message",
"waiting for next envelope",
)
select {
case <-ctx.Done():
@ -112,13 +108,13 @@ func (m *Module) handleClientMessages() {
return
case msg := <-clientMessages:
clientMessage, ok := msg.(*module.ClientMessage)
case env := <-envelopes:
incomingMessage, ok := env.Message().(*edgehttp.IncomingMessage)
if !ok {
logger.Error(
logger.Warn(
ctx,
"unexpected message type",
logger.F("message", msg),
logger.F("message", env.Message()),
)
continue
@ -126,11 +122,11 @@ func (m *Module) handleClientMessages() {
logger.Debug(
ctx,
"received client message",
logger.F("message", clientMessage),
"received incoming message",
logger.F("message", incomingMessage),
)
if _, err := m.server.ExecFuncByName(clientMessage.Context, "onClientMessage", clientMessage.Context, clientMessage.Data); err != nil {
if _, err := m.server.ExecFuncByName(incomingMessage.Context, "onClientMessage", incomingMessage.Context, incomingMessage.Payload); err != nil {
if errors.Is(err, app.ErrFuncDoesNotExist) {
continue
}
@ -152,7 +148,7 @@ func ModuleFactory(bus bus.Bus) app.ServerModuleFactory {
bus: bus,
}
go module.handleClientMessages()
go module.handleIncomingMessages()
return module
}

View File

@ -1,278 +0,0 @@
package module
import (
"context"
"fmt"
"sync"
"forge.cadoles.com/arcad/edge/pkg/app"
"forge.cadoles.com/arcad/edge/pkg/bus"
"forge.cadoles.com/arcad/edge/pkg/module/util"
"github.com/dop251/goja"
"github.com/pkg/errors"
"gitlab.com/wpetit/goweb/logger"
)
type RPCRequest struct {
Method string
Params interface{}
ID interface{}
}
type RPCError struct {
Code int `json:"code"`
Message string `json:"message"`
Data interface{} `json:"data"`
}
type RPCResponse struct {
Result interface{}
Error *RPCError
ID interface{}
}
type RPCModule struct {
server *app.Server
bus bus.Bus
callbacks sync.Map
}
func (m *RPCModule) Name() string {
return "rpc"
}
func (m *RPCModule) Export(export *goja.Object) {
if err := export.Set("register", m.register); err != nil {
panic(errors.Wrap(err, "could not set 'register' function"))
}
if err := export.Set("unregister", m.unregister); err != nil {
panic(errors.Wrap(err, "could not set 'unregister' function"))
}
}
func (m *RPCModule) OnInit(ctx context.Context, rt *goja.Runtime) error {
go m.handleMessages(ctx)
return nil
}
func (m *RPCModule) register(call goja.FunctionCall, rt *goja.Runtime) goja.Value {
fnName := util.AssertString(call.Argument(0), rt)
var (
callable goja.Callable
ok bool
)
if len(call.Arguments) > 1 {
callable, ok = goja.AssertFunction(call.Argument(1))
} else {
callable, ok = goja.AssertFunction(rt.Get(fnName))
}
if !ok {
panic(rt.NewTypeError("method should be a valid function"))
}
ctx := context.Background()
logger.Debug(ctx, "registering method", logger.F("method", fnName))
m.callbacks.Store(fnName, callable)
return nil
}
func (m *RPCModule) unregister(call goja.FunctionCall, rt *goja.Runtime) goja.Value {
fnName := util.AssertString(call.Argument(0), rt)
m.callbacks.Delete(fnName)
return nil
}
func (m *RPCModule) handleMessages(ctx context.Context) {
clientMessages, err := m.bus.Subscribe(ctx, MessageNamespaceClient)
if err != nil {
panic(errors.WithStack(err))
}
defer func() {
m.bus.Unsubscribe(ctx, MessageNamespaceClient, clientMessages)
}()
sendRes := func(ctx context.Context, req *RPCRequest, result goja.Value) {
res := &RPCResponse{
ID: req.ID,
Result: result.Export(),
}
logger.Debug(ctx, "sending rpc response", logger.F("response", res))
if err := m.sendResponse(ctx, res); err != nil {
logger.Error(
ctx, "could not send response",
logger.CapturedE(errors.WithStack(err)),
logger.F("response", res),
logger.F("request", req),
)
}
}
for msg := range clientMessages {
go m.handleMessage(ctx, msg, sendRes)
}
}
func (m *RPCModule) handleMessage(ctx context.Context, msg bus.Message, sendRes func(ctx context.Context, req *RPCRequest, result goja.Value)) {
clientMessage, ok := msg.(*ClientMessage)
if !ok {
logger.Warn(ctx, "unexpected bus message", logger.F("message", msg))
return
}
ok, req := m.isRPCRequest(clientMessage)
if !ok {
return
}
logger.Debug(ctx, "received rpc request", logger.F("request", req))
rawCallable, exists := m.callbacks.Load(req.Method)
if !exists {
logger.Debug(ctx, "method not found", logger.F("req", req))
if err := m.sendMethodNotFoundResponse(clientMessage.Context, req); err != nil {
logger.Error(
ctx, "could not send method not found response",
logger.CapturedE(errors.WithStack(err)),
logger.F("request", req),
)
}
return
}
callable, ok := rawCallable.(goja.Callable)
if !ok {
logger.Debug(ctx, "invalid method", logger.F("req", req))
if err := m.sendMethodNotFoundResponse(clientMessage.Context, req); err != nil {
logger.Error(
ctx, "could not send method not found response",
logger.CapturedE(errors.WithStack(err)),
logger.F("request", req),
)
}
return
}
result, err := m.server.Exec(clientMessage.Context, callable, clientMessage.Context, req.Params)
if err != nil {
logger.Error(
ctx, "rpc call error",
logger.CapturedE(errors.WithStack(err)),
logger.F("request", req),
)
if err := m.sendErrorResponse(clientMessage.Context, req, err); err != nil {
logger.Error(
ctx, "could not send error response",
logger.CapturedE(errors.WithStack(err)),
logger.F("originalError", err),
logger.F("request", req),
)
}
return
}
promise, ok := app.IsPromise(result)
if ok {
go func(ctx context.Context, req *RPCRequest, promise *goja.Promise) {
result := m.server.WaitForPromise(promise)
sendRes(ctx, req, result)
}(clientMessage.Context, req, promise)
} else {
sendRes(clientMessage.Context, req, result)
}
}
func (m *RPCModule) sendErrorResponse(ctx context.Context, req *RPCRequest, err error) error {
return m.sendResponse(ctx, &RPCResponse{
ID: req.ID,
Result: nil,
Error: &RPCError{
Code: -32603,
Message: err.Error(),
},
})
}
func (m *RPCModule) sendMethodNotFoundResponse(ctx context.Context, req *RPCRequest) error {
return m.sendResponse(ctx, &RPCResponse{
ID: req.ID,
Result: nil,
Error: &RPCError{
Code: -32601,
Message: fmt.Sprintf("method not found"),
},
})
}
func (m *RPCModule) sendResponse(ctx context.Context, res *RPCResponse) error {
msg := NewServerMessage(ctx, map[string]interface{}{
"jsonrpc": "2.0",
"id": res.ID,
"error": res.Error,
"result": res.Result,
})
if err := m.bus.Publish(ctx, msg); err != nil {
return errors.WithStack(err)
}
return nil
}
func (m *RPCModule) isRPCRequest(msg *ClientMessage) (bool, *RPCRequest) {
jsonRPC, exists := msg.Data["jsonrpc"]
if !exists || jsonRPC != "2.0" {
return false, nil
}
rawMethod, exists := msg.Data["method"]
if !exists {
return false, nil
}
method, ok := rawMethod.(string)
if !ok {
return false, nil
}
id := msg.Data["id"]
params := msg.Data["params"]
return true, &RPCRequest{
ID: id,
Method: method,
Params: params,
}
}
func RPCModuleFactory(bus bus.Bus) app.ServerModuleFactory {
return func(server *app.Server) app.ServerModule {
mod := &RPCModule{
server: server,
bus: bus,
}
return mod
}
}
var _ app.InitializableModule = &RPCModule{}

View File

@ -0,0 +1,21 @@
package rpc
import (
"context"
"forge.cadoles.com/arcad/edge/pkg/bus"
)
const (
Address bus.Address = "module/rpc"
)
type Request struct {
Context context.Context
Method string
Params any
}
func NewRequestEnvelope(ctx context.Context, method string, params any) bus.Envelope {
return bus.NewEnvelope(Address, &Request{ctx, method, params})
}

7
pkg/module/rpc/error.go Normal file
View File

@ -0,0 +1,7 @@
package rpc
import "errors"
var (
ErrMethodNotFound = errors.New("method not found")
)

19
pkg/module/rpc/jsonrpc.go Normal file
View File

@ -0,0 +1,19 @@
package rpc
import "fmt"
type JSONRPCRequest struct {
ID any
Method string
Params any
}
type JSONRPCError struct {
Code int `json:"code"`
Message string `json:"message"`
Data interface{} `json:"data"`
}
func (e *JSONRPCError) Error() string {
return fmt.Sprintf("json-rpc error: %d - %s", e.Code, e.Message)
}

260
pkg/module/rpc/module.go Normal file
View File

@ -0,0 +1,260 @@
package rpc
import (
"context"
"sync"
"forge.cadoles.com/arcad/edge/pkg/app"
"forge.cadoles.com/arcad/edge/pkg/bus"
edgehttp "forge.cadoles.com/arcad/edge/pkg/http"
"forge.cadoles.com/arcad/edge/pkg/module/util"
"github.com/dop251/goja"
"github.com/pkg/errors"
"gitlab.com/wpetit/goweb/logger"
)
type Module struct {
server *app.Server
bus bus.Bus
callbacks sync.Map
}
func (m *Module) Name() string {
return "rpc"
}
func (m *Module) Export(export *goja.Object) {
if err := export.Set("register", m.register); err != nil {
panic(errors.Wrap(err, "could not set 'register' function"))
}
if err := export.Set("unregister", m.unregister); err != nil {
panic(errors.Wrap(err, "could not set 'unregister' function"))
}
}
func (m *Module) OnInit(ctx context.Context, rt *goja.Runtime) error {
requestErrs := m.bus.Reply(ctx, Address, m.handleRequest)
go func() {
for err := range requestErrs {
logger.Error(ctx, "error while replying to rpc requests", logger.CapturedE(errors.WithStack(err)))
}
}()
httpIncomingMessages, err := m.bus.Subscribe(ctx, edgehttp.AddressIncomingMessage)
if err != nil {
return errors.WithStack(err)
}
go m.handleIncomingHTTPMessages(ctx, httpIncomingMessages)
return nil
}
func (m *Module) register(call goja.FunctionCall, rt *goja.Runtime) goja.Value {
fnName := util.AssertString(call.Argument(0), rt)
var (
callable goja.Callable
ok bool
)
if len(call.Arguments) > 1 {
callable, ok = goja.AssertFunction(call.Argument(1))
} else {
callable, ok = goja.AssertFunction(rt.Get(fnName))
}
if !ok {
panic(rt.NewTypeError("method should be a valid function"))
}
ctx := context.Background()
logger.Debug(ctx, "registering method", logger.F("method", fnName))
m.callbacks.Store(fnName, callable)
return nil
}
func (m *Module) unregister(call goja.FunctionCall, rt *goja.Runtime) goja.Value {
fnName := util.AssertString(call.Argument(0), rt)
m.callbacks.Delete(fnName)
return nil
}
func (m *Module) handleRequest(env bus.Envelope) (any, error) {
request, ok := env.Message().(*Request)
if !ok {
logger.Warn(context.Background(), "unexpected bus message", logger.F("message", env.Message()))
return nil, errors.WithStack(bus.ErrUnexpectedMessage)
}
ctx := logger.With(request.Context, logger.F("request", request))
logger.Debug(ctx, "received rpc request")
rawCallable, exists := m.callbacks.Load(request.Method)
if !exists {
logger.Debug(ctx, "method not found")
return nil, errors.WithStack(ErrMethodNotFound)
}
callable, ok := rawCallable.(goja.Callable)
if !ok {
logger.Debug(ctx, "invalid method")
return nil, errors.WithStack(ErrMethodNotFound)
}
result, err := m.server.Exec(ctx, callable, request.Context, request.Params)
if err != nil {
logger.Error(
ctx, "rpc call error",
logger.CapturedE(errors.WithStack(err)),
)
return nil, errors.WithStack(err)
}
return result, nil
}
func (m *Module) handleIncomingHTTPMessages(ctx context.Context, incoming <-chan bus.Envelope) {
defer func() {
m.bus.Unsubscribe(edgehttp.AddressIncomingMessage, incoming)
}()
for env := range incoming {
msg, ok := env.Message().(*edgehttp.IncomingMessage)
if !ok {
logger.Error(ctx, "unexpected incoming http message type", logger.F("message", env.Message()))
continue
}
jsonReq, ok := m.isRPCRequest(msg.Payload)
if !ok {
continue
}
sessionID, ok := edgehttp.ContextSessionID(msg.Context)
if !ok {
logger.Error(ctx, "could not find session id in context")
continue
}
request := NewRequestEnvelope(msg.Context, jsonReq.Method, jsonReq.Params)
requestCtx := logger.With(msg.Context, logger.F("rpcRequestMethod", jsonReq.Method), logger.F("rpcRequestID", jsonReq.ID))
reply, err := m.bus.Request(requestCtx, request)
if err != nil {
err = errors.WithStack(err)
logger.Error(
ctx, "could not execute rpc request",
logger.CapturedE(err),
)
if errors.Is(err, ErrMethodNotFound) {
if err := m.sendMethodNotFoundResponse(sessionID, jsonReq.ID); err != nil {
logger.Error(
ctx, "could not send json rpc error response",
logger.CapturedE(errors.WithStack(err)),
)
}
continue
}
if err := m.sendErrorResponse(sessionID, jsonReq.ID, err); err != nil {
logger.Error(
ctx, "could not send json rpc error response",
logger.CapturedE(errors.WithStack(err)),
)
}
continue
}
if err := m.sendResponse(sessionID, jsonReq.ID, reply.Message(), nil); err != nil {
logger.Error(
ctx, "could not send json rpc result response",
logger.CapturedE(err),
)
}
}
}
func (m *Module) sendErrorResponse(sessionID string, requestID any, err error) error {
return m.sendResponse(sessionID, requestID, nil, &JSONRPCError{
Code: -32603,
Message: err.Error(),
})
}
func (m *Module) sendMethodNotFoundResponse(sessionID string, requestID any) error {
return m.sendResponse(sessionID, requestID, nil, &JSONRPCError{
Code: -32601,
Message: "method not found",
})
}
func (m *Module) sendResponse(sessionID string, requestID any, result any, err error) error {
env := edgehttp.NewOutgoingMessageEnvelope(sessionID, map[string]interface{}{
"jsonrpc": "2.0",
"id": requestID,
"error": err,
"result": result,
})
if err := m.bus.Publish(env); err != nil {
return errors.WithStack(err)
}
return nil
}
func (m *Module) isRPCRequest(payload map[string]any) (*JSONRPCRequest, bool) {
jsonRPC, exists := payload["jsonrpc"]
if !exists || jsonRPC != "2.0" {
return nil, false
}
rawMethod, exists := payload["method"]
if !exists {
return nil, false
}
method, ok := rawMethod.(string)
if !ok {
return nil, false
}
id := payload["id"]
params := payload["params"]
return &JSONRPCRequest{
ID: id,
Method: method,
Params: params,
}, true
}
func ModuleFactory(bus bus.Bus) app.ServerModuleFactory {
return func(server *app.Server) app.ServerModule {
mod := &Module{
server: server,
bus: bus,
}
return mod
}
}
var _ app.InitializableModule = &Module{}

View File

@ -0,0 +1,109 @@
package rpc
import (
"context"
"os"
"sync"
"testing"
"forge.cadoles.com/arcad/edge/pkg/app"
"forge.cadoles.com/arcad/edge/pkg/bus"
"forge.cadoles.com/arcad/edge/pkg/bus/memory"
"forge.cadoles.com/arcad/edge/pkg/module"
"github.com/pkg/errors"
"gitlab.com/wpetit/goweb/logger"
)
func TestServerExecDeadlock(t *testing.T) {
if testing.Verbose() {
logger.SetLevel(logger.LevelDebug)
}
b := memory.NewBus(memory.WithBufferSize(1))
server := app.NewServer(
module.ConsoleModuleFactory(),
ModuleFactory(b),
module.LifecycleModuleFactory(),
)
data, err := os.ReadFile("testdata/deadlock.js")
if err != nil {
t.Fatalf("%+v", errors.WithStack(err))
}
ctx := context.Background()
t.Log("starting server")
if err := server.Start(ctx, "deadlock.js", string(data)); err != nil {
t.Fatalf("%+v", errors.WithStack(err))
}
defer server.Stop()
t.Log("server started")
count := 100
delay := 100
var wg sync.WaitGroup
wg.Add(count)
for i := 0; i < count; i++ {
go func(i int) {
defer wg.Done()
t.Logf("calling %d", i)
isCanceled := i%2 == 0
var ctx context.Context
if isCanceled {
canceledCtx, cancel := context.WithCancel(context.Background())
cancel()
ctx = canceledCtx
} else {
ctx = context.Background()
}
env := NewRequestEnvelope(ctx, "doSomethingLong", map[string]any{
"i": i,
"delay": delay,
})
t.Logf("publishing envelope #%d", i)
reply, err := b.Request(ctx, env)
if err != nil {
if errors.Is(err, context.Canceled) && isCanceled {
return
}
if errors.Is(err, bus.ErrNoResponse) && isCanceled {
return
}
t.Errorf("%+v", errors.WithStack(err))
return
}
result, ok := reply.Message().(int64)
if !ok {
t.Errorf("response.Result: expected type '%T', got '%T'", int64(0), reply.Message())
return
}
if e, g := i, int(result); e != g {
t.Errorf("response.Result: expected '%v', got '%v'", e, g)
return
}
}(i)
}
wg.Wait()
}

14
pkg/module/rpc/testdata/deadlock.js vendored Normal file
View File

@ -0,0 +1,14 @@
function onInit() {
rpc.register("doSomethingLong", doSomethingLong)
}
function doSomethingLong(ctx, params) {
var start = Date.now()
while (true) {
var now = Date.now()
if (now - start >= params.delay) break
}
return params.i;
}

View File

@ -33,18 +33,14 @@ func TestModule(t *testing.T) {
t.Fatalf("%+v", errors.WithStack(err))
}
if err := server.Load("testdata/share.js", string(data)); err != nil {
ctx := context.Background()
if err := server.Start(ctx, "testdata/share.js", string(data)); err != nil {
t.Fatalf("%+v", errors.WithStack(err))
}
ctx := context.Background()
if err := server.Start(ctx); err != nil {
t.Fatalf("%+v", errors.WithStack(err))
}
defer server.Stop()
if _, err := server.ExecFuncByName(context.Background(), "testModule"); err != nil {
t.Fatalf("%+v", errors.WithStack(err))
}
server.Stop()
}

View File

@ -27,18 +27,14 @@ func TestStoreModule(t *testing.T) {
t.Fatalf("%+v", errors.WithStack(err))
}
if err := server.Load("testdata/store.js", string(data)); err != nil {
ctx := context.Background()
if err := server.Start(ctx, "testdata/store.js", string(data)); err != nil {
t.Fatalf("%+v", errors.WithStack(err))
}
ctx := context.Background()
if err := server.Start(ctx); err != nil {
t.Fatalf("%+v", errors.WithStack(err))
}
defer server.Stop()
if _, err := server.ExecFuncByName(context.Background(), "testStore"); err != nil {
t.Fatalf("%+v", errors.WithStack(err))
}
server.Stop()
}

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View File

@ -5,5 +5,6 @@ import LoginIcon from './login.svg';
import HomeIcon from './home.svg';
import LinkIcon from './link.svg';
import LogoutIcon from './logout.svg';
import LoaderIcon from './loader.svg';
export { UserCircleIcon, MenuIcon, CloudIcon, LoginIcon, HomeIcon, LinkIcon, LogoutIcon }
export { LoaderIcon, UserCircleIcon, MenuIcon, CloudIcon, LoginIcon, HomeIcon, LinkIcon, LogoutIcon }

View File

@ -0,0 +1 @@
<svg viewBox="0 0 24 24" xmlns="http://www.w3.org/2000/svg" fill="#000000"><g id="SVGRepo_bgCarrier" stroke-width="0"></g><g id="SVGRepo_tracerCarrier" stroke-linecap="round" stroke-linejoin="round"></g><g id="SVGRepo_iconCarrier"> <g> <path fill="none" d="M0 0h24v24H0z"></path> <path d="M12 2a1 1 0 0 1 1 1v3a1 1 0 0 1-2 0V3a1 1 0 0 1 1-1zm0 15a1 1 0 0 1 1 1v3a1 1 0 0 1-2 0v-3a1 1 0 0 1 1-1zm8.66-10a1 1 0 0 1-.366 1.366l-2.598 1.5a1 1 0 1 1-1-1.732l2.598-1.5A1 1 0 0 1 20.66 7zM7.67 14.5a1 1 0 0 1-.366 1.366l-2.598 1.5a1 1 0 1 1-1-1.732l2.598-1.5a1 1 0 0 1 1.366.366zM20.66 17a1 1 0 0 1-1.366.366l-2.598-1.5a1 1 0 0 1 1-1.732l2.598 1.5A1 1 0 0 1 20.66 17zM7.67 9.5a1 1 0 0 1-1.366.366l-2.598-1.5a1 1 0 1 1 1-1.732l2.598 1.5A1 1 0 0 1 7.67 9.5z"></path> </g> </g></svg>

After

Width:  |  Height:  |  Size: 773 B

View File

@ -0,0 +1,57 @@
import { LitElement, html, css } from 'lit';
import { LoaderIcon } from './icons';
export class Loader extends LitElement {
static styles = css`
:host {
display: inline-block;
height: 100%;
width: 100%;
border-bottom: 1px solid rgb(229,231,235);
border-top: 10px solid transparent;
background-color: #fff;
min-height: 50px;
padding: 10px 0;
}
.container {
display: flex;
align-items: center;
flex-direction: column;
justify-content: center;
font-family: Arial, Helvetica Neue, Helvetica, sans-serif;
font-size: 14px;
color: black;
}
.icon {
height: 35px;
animation-duration: 3s;
animation-name: spin;
animation-iteration-count: infinite;
}
@keyframes spin {
from {
transform: rotateZ(0deg);
}
to {
transform: rotateZ(360deg);
}
}
`;
constructor() {
super();
}
render() {
return html`
<div class="container">
<img class="icon" src="${LoaderIcon}" />
Chargement en cours
</div>
`
}
}

View File

@ -49,6 +49,9 @@ export class Menu extends LitElement {
@property()
_profile: Profile
@property()
_loading: boolean = false
static styles = css`
:host {
position: fixed;
@ -95,6 +98,7 @@ export class Menu extends LitElement {
}
_fetchApps() {
this._loading = true;
return fetch(`${BASE_API_URL}/apps`)
.then(res => res.json())
.then(result => {
@ -130,9 +134,14 @@ export class Menu extends LitElement {
return Promise.all(promises);
})
.then((manifests: Manifest[]) => {
this._loading = false
this._apps = manifests;
})
.catch(err => console.error(err))
.catch(err => {
console.error(err);
this._loading = false;
})
}
_fetchProfile() {
@ -158,7 +167,11 @@ export class Menu extends LitElement {
}
_renderApps() {
const apps = this._apps
let apps;
if (this._loading) {
apps = [ html`<edge-loader></edge-loader>` ]
} else {
apps = this._apps
.filter(manifest => this._canAccess(manifest))
.map(manifest => {
const iconUrl = ( ( manifest.url || '') + ( manifest.metadata?.paths?.icon || '' ) ) || LinkIcon;
@ -171,6 +184,7 @@ export class Menu extends LitElement {
</edge-menu-sub-item>
`
});
}
return html`
<edge-menu-item name='apps' label='Apps' icon-url='${CloudIcon}'>

View File

@ -6,10 +6,12 @@ import { MenuItem as MenuItemElement } from './components/menu-item.js';
import { MenuSubItem as MenuSubItemElement } from './components/menu-sub-item.js';
import { CrossFrameMessenger } from './crossframe-messenger.js';
import { MenuManager } from './menu-manager.js';
import { Loader } from './components/loader';
customElements.define('edge-menu', MenuElement);
customElements.define('edge-menu-item', MenuItemElement);
customElements.define('edge-menu-sub-item', MenuSubItemElement);
customElements.define('edge-loader', Loader);
export const Client = new EdgeClient();
export const Frame = new CrossFrameMessenger();

View File

@ -9,12 +9,16 @@ import (
"github.com/oklog/ulid/v2"
)
var ErrDocumentNotFound = errors.New("document not found")
var (
ErrDocumentNotFound = errors.New("document not found")
ErrDocumentRevisionConflict = errors.New("document revision conflict")
)
type DocumentID string
const (
DocumentAttrID = "_id"
DocumentAttrRevision = "_revision"
DocumentAttrCreatedAt = "_createdAt"
DocumentAttrUpdatedAt = "_updatedAt"
)
@ -44,6 +48,20 @@ func (d Document) ID() (DocumentID, bool) {
return "", false
}
func (d Document) Revision() (int, bool) {
rawRevision, exists := d[DocumentAttrRevision]
if !exists {
return 0, false
}
revision, ok := rawRevision.(int)
if ok {
return revision, true
}
return 0, false
}
func (d Document) CreatedAt() (time.Time, bool) {
return d.timeAttr(DocumentAttrCreatedAt)
}

View File

@ -23,7 +23,7 @@ func NewBlobStore(dsn string) (storage.BlobStore, error) {
factory, exists := blobStoreFactories[url.Scheme]
if !exists {
return nil, errors.WithStack(ErrSchemeNotRegistered)
return nil, errors.Wrapf(ErrSchemeNotRegistered, "no driver associated with scheme '%s'", url.Scheme)
}
store, err := factory(url)

235
pkg/storage/driver/cache/blob_bucket.go vendored Normal file
View File

@ -0,0 +1,235 @@
package cache
import (
"context"
"fmt"
"io"
"forge.cadoles.com/arcad/edge/pkg/storage"
"forge.cadoles.com/arcad/edge/pkg/storage/driver/cache/lfu"
"github.com/pkg/errors"
"gitlab.com/wpetit/goweb/logger"
)
type BlobBucket struct {
bucket storage.BlobBucket
blobCache *lfu.Cache[string, []byte]
bucketCache *lfu.Cache[string, storage.BlobBucket]
blobInfoCache *lfu.Cache[string, storage.BlobInfo]
}
// Close implements storage.BlobBucket.
func (b *BlobBucket) Close() error {
// Close only when bucket is evicted from cache
return nil
}
// Delete implements storage.BlobBucket.
func (b *BlobBucket) Delete(ctx context.Context, id storage.BlobID) error {
defer b.clearCache(ctx, id)
if err := b.bucket.Delete(ctx, id); err != nil {
return errors.WithStack(err)
}
return nil
}
// Get implements storage.BlobBucket.
func (b *BlobBucket) Get(ctx context.Context, id storage.BlobID) (storage.BlobInfo, error) {
key := b.getCacheKey(id)
blobInfo, err := b.blobInfoCache.Get(key)
if err != nil && !errors.Is(err, lfu.ErrNotFound) {
logger.Error(
ctx, "could not retrieve blob info from cache",
logger.F("cacheKey", key),
logger.CapturedE(errors.WithStack(err)),
)
}
if blobInfo != nil {
logger.Debug(
ctx, "found blob info in cache",
logger.F("cacheKey", key),
)
return blobInfo, nil
}
info, err := b.bucket.Get(ctx, id)
if err != nil {
if errors.Is(err, storage.ErrBucketClosed) {
b.clearCache(ctx, id)
if err := b.bucketCache.Delete(b.Name()); err != nil && !errors.Is(err, lfu.ErrNotFound) {
logger.Error(
ctx, "could not delete bucket from cache",
logger.F("cacheKey", b.Name()),
logger.CapturedE(errors.WithStack(err)),
)
}
}
return nil, errors.WithStack(err)
}
if err := b.blobInfoCache.Set(key, info); err != nil {
logger.Error(
ctx, "could not set blob info in cache",
logger.F("cacheKey", key),
logger.CapturedE(errors.WithStack(err)),
)
}
return info, nil
}
// List implements storage.BlobBucket.
func (b *BlobBucket) List(ctx context.Context) ([]storage.BlobInfo, error) {
infos, err := b.bucket.List(ctx)
if err != nil {
if errors.Is(err, storage.ErrBucketClosed) {
if err := b.bucketCache.Delete(b.Name()); err != nil && !errors.Is(err, lfu.ErrNotFound) {
logger.Error(
ctx, "could not delete bucket from cache",
logger.F("cacheKey", b.Name()),
logger.CapturedE(errors.WithStack(err)),
)
}
}
return nil, errors.WithStack(err)
}
for _, ifo := range infos {
key := b.getCacheKey(ifo.ID())
if err := b.blobInfoCache.Set(key, ifo); err != nil {
logger.Error(
ctx, "could not set blob info in cache",
logger.F("cacheKey", key),
logger.CapturedE(errors.WithStack(err)),
)
}
}
return infos, nil
}
// Name implements storage.BlobBucket.
func (b *BlobBucket) Name() string {
return b.bucket.Name()
}
// NewReader implements storage.BlobBucket.
func (b *BlobBucket) NewReader(ctx context.Context, id storage.BlobID) (io.ReadSeekCloser, error) {
if cached, exist := b.inContentCache(id); exist {
logger.Debug(
ctx, "found blob content in cache",
logger.F("cacheKey", b.getCacheKey(id)),
)
return cached, nil
}
reader, err := b.bucket.NewReader(ctx, id)
if err != nil {
if errors.Is(err, storage.ErrBucketClosed) {
b.clearCache(ctx, id)
if err := b.bucketCache.Delete(b.Name()); err != nil && !errors.Is(err, lfu.ErrNotFound) {
logger.Error(
ctx, "could not delete bucket from cache",
logger.F("cacheKey", b.Name()),
logger.CapturedE(errors.WithStack(err)),
)
}
}
return nil, errors.WithStack(err)
}
return &readCacher{
reader: reader,
cache: b.blobCache,
key: b.getCacheKey(id),
}, nil
}
func (b *BlobBucket) getCacheKey(id storage.BlobID) string {
return fmt.Sprintf("%s-%s", b.Name(), id)
}
func (b *BlobBucket) inContentCache(id storage.BlobID) (io.ReadSeekCloser, bool) {
key := b.getCacheKey(id)
data, err := b.blobCache.Get(key)
if err != nil {
if errors.Is(err, lfu.ErrNotFound) {
return nil, false
}
logger.Error(context.Background(), "could not retrieve cached value", logger.CapturedE(errors.WithStack(err)))
return nil, false
}
return &cachedReader{data, 0}, true
}
func (b *BlobBucket) clearCache(ctx context.Context, id storage.BlobID) {
key := b.getCacheKey(id)
logger.Debug(ctx, "clearing cache", logger.F("cacheKey", key))
if err := b.blobCache.Delete(key); err != nil && !errors.Is(err, lfu.ErrNotFound) {
logger.Error(ctx, "could not clear cache", logger.F("cacheKey", key), logger.CapturedE(errors.WithStack(err)))
}
if err := b.blobInfoCache.Delete(key); err != nil {
logger.Error(
ctx, "could not delete blob info from cache",
logger.F("cacheKey", key),
logger.CapturedE(errors.WithStack(err)),
)
}
}
// NewWriter implements storage.BlobBucket.
func (b *BlobBucket) NewWriter(ctx context.Context, id storage.BlobID) (io.WriteCloser, error) {
defer b.clearCache(ctx, id)
writer, err := b.bucket.NewWriter(ctx, id)
if err != nil {
if errors.Is(err, storage.ErrBucketClosed) {
if err := b.bucketCache.Delete(b.Name()); err != nil && !errors.Is(err, lfu.ErrNotFound) {
logger.Error(
ctx, "could not delete bucket from cache",
logger.F("cacheKey", b.Name()),
logger.CapturedE(errors.WithStack(err)),
)
}
}
return nil, errors.WithStack(err)
}
return writer, nil
}
// Size implements storage.BlobBucket.
func (b *BlobBucket) Size(ctx context.Context) (int64, error) {
size, err := b.bucket.Size(ctx)
if err != nil {
if errors.Is(err, storage.ErrBucketClosed) {
if err := b.bucketCache.Delete(b.Name()); err != nil && !errors.Is(err, lfu.ErrNotFound) {
logger.Error(
ctx, "could not delete bucket from cache",
logger.F("cacheKey", b.Name()),
logger.CapturedE(errors.WithStack(err)),
)
}
}
return 0, errors.WithStack(err)
}
return size, nil
}
var _ storage.BlobBucket = &BlobBucket{}

118
pkg/storage/driver/cache/blob_store.go vendored Normal file
View File

@ -0,0 +1,118 @@
package cache
import (
"context"
"forge.cadoles.com/arcad/edge/pkg/storage"
"forge.cadoles.com/arcad/edge/pkg/storage/driver/cache/lfu"
"github.com/pkg/errors"
"gitlab.com/wpetit/goweb/logger"
)
type BlobStore struct {
store storage.BlobStore
blobCache *lfu.Cache[string, []byte]
bucketCache *lfu.Cache[string, storage.BlobBucket]
blobInfoCache *lfu.Cache[string, storage.BlobInfo]
}
// DeleteBucket implements storage.BlobStore.
func (s *BlobStore) DeleteBucket(ctx context.Context, name string) error {
if err := s.store.DeleteBucket(ctx, name); err != nil {
return errors.WithStack(err)
}
s.bucketCache.Delete(name)
return nil
}
// ListBuckets implements storage.BlobStore.
func (s *BlobStore) ListBuckets(ctx context.Context) ([]string, error) {
buckets, err := s.store.ListBuckets(ctx)
if err != nil {
return nil, errors.WithStack(err)
}
return buckets, nil
}
// OpenBucket implements storage.BlobStore.
func (s *BlobStore) OpenBucket(ctx context.Context, name string) (storage.BlobBucket, error) {
bucket, err := s.bucketCache.Get(name)
if err == nil {
logger.Debug(ctx, "found bucket in cache", logger.F("name", name))
return &BlobBucket{
bucket: bucket,
blobCache: s.blobCache,
blobInfoCache: s.blobInfoCache,
bucketCache: s.bucketCache,
}, nil
}
if err != nil && !errors.Is(err, lfu.ErrNotFound) {
logger.Error(ctx, "could not retrieve bucket from cache",
logger.F("cacheKey", name),
logger.CapturedE(errors.WithStack(err)),
)
}
bucket, err = s.store.OpenBucket(ctx, name)
if err != nil {
return nil, errors.WithStack(err)
}
if err := s.bucketCache.Set(name, bucket); err != nil {
logger.Error(ctx, "could not set bucket in cache",
logger.F("cacheKey", name),
logger.CapturedE(errors.WithStack(err)),
)
}
return &BlobBucket{
bucket: bucket,
blobCache: s.blobCache,
blobInfoCache: s.blobInfoCache,
bucketCache: s.bucketCache,
}, nil
}
func NewBlobStore(store storage.BlobStore, funcs ...OptionFunc) (*BlobStore, error) {
options := NewOptions(funcs...)
blobCache := lfu.NewCache[string, []byte](
options.BlobCacheStore,
lfu.WithTTL[string, []byte](options.CacheTTL),
lfu.WithCapacity[string, []byte](options.BlobCacheSize),
lfu.WithGetValueSize[string, []byte](func(value []byte) (int, error) {
return len(value), nil
}),
)
blobBucketCache := lfu.NewCache[string, storage.BlobBucket](
options.BlobBucketCacheStore,
lfu.WithCapacity[string, storage.BlobBucket](options.BlobBucketCacheSize),
lfu.WithGetValueSize[string, storage.BlobBucket](func(value storage.BlobBucket) (int, error) {
return 1, nil
}),
)
blobInfoCache := lfu.NewCache[string, storage.BlobInfo](
options.BlobInfoCacheStore,
lfu.WithTTL[string, storage.BlobInfo](options.CacheTTL),
lfu.WithCapacity[string, storage.BlobInfo](options.BlobInfoCacheSize),
lfu.WithGetValueSize[string, storage.BlobInfo](func(value storage.BlobInfo) (int, error) {
return 1, nil
}),
)
return &BlobStore{
store: store,
blobCache: blobCache,
bucketCache: blobBucketCache,
blobInfoCache: blobInfoCache,
}, nil
}
var _ storage.BlobStore = &BlobStore{}

View File

@ -0,0 +1,58 @@
package cache
import (
"context"
"fmt"
"os"
"testing"
"time"
"forge.cadoles.com/arcad/edge/pkg/storage/driver/sqlite"
"forge.cadoles.com/arcad/edge/pkg/storage/testsuite"
"github.com/pkg/errors"
"gitlab.com/wpetit/goweb/logger"
)
func TestBlobStore(t *testing.T) {
t.Parallel()
if testing.Verbose() {
logger.SetLevel(logger.LevelDebug)
}
file := "./testdata/blobstore_test.sqlite"
if err := os.Remove(file); err != nil && !errors.Is(err, os.ErrNotExist) {
t.Fatalf("%+v", errors.WithStack(err))
}
dsn := fmt.Sprintf("%s?_pragma=foreign_keys(1)&_pragma=busy_timeout=%d", file, (60 * time.Second).Milliseconds())
backend := sqlite.NewBlobStore(dsn)
store, err := NewBlobStore(backend)
if err != nil {
t.Fatalf("%+v", errors.WithStack(err))
}
testsuite.TestBlobStore(context.Background(), t, store)
}
func BenchmarkBlobStore(t *testing.B) {
logger.SetLevel(logger.LevelError)
file := "./testdata/blobstore_test.sqlite"
if err := os.Remove(file); err != nil && !errors.Is(err, os.ErrNotExist) {
t.Fatalf("%+v", errors.WithStack(err))
}
dsn := fmt.Sprintf("%s?_pragma=foreign_keys(1)&_pragma=busy_timeout=%d", file, (60 * time.Second).Milliseconds())
backend := sqlite.NewBlobStore(dsn)
store, err := NewBlobStore(backend)
if err != nil {
t.Fatalf("%+v", errors.WithStack(err))
}
testsuite.BenchmarkBlobStore(t, store)
}

264
pkg/storage/driver/cache/driver.go vendored Normal file
View File

@ -0,0 +1,264 @@
package cache
import (
"bytes"
"io"
"net/url"
"strconv"
"time"
"forge.cadoles.com/arcad/edge/pkg/storage"
"forge.cadoles.com/arcad/edge/pkg/storage/driver"
"forge.cadoles.com/arcad/edge/pkg/storage/driver/cache/lfu"
"forge.cadoles.com/arcad/edge/pkg/storage/driver/cache/lfu/fs"
"forge.cadoles.com/arcad/edge/pkg/storage/driver/cache/lfu/memory"
"github.com/inhies/go-bytesize"
"github.com/pkg/errors"
)
func init() {
driver.RegisterBlobStoreFactory("cache", blobStoreFactory)
}
func blobStoreFactory(dsn *url.URL) (storage.BlobStore, error) {
query := dsn.Query()
rawDriver := query.Get("driver")
if rawDriver == "" {
return nil, errors.New("missing required url parameter 'driver'")
}
query.Del("driver")
blobStoreOptionFuncs := make([]OptionFunc, 0)
cacheTTL, err := parseDuration(&query, "cacheTTL")
if err != nil {
if !errors.Is(err, errNotFound) {
return nil, errors.WithStack(err)
}
cacheTTL = time.Hour
}
blobStoreOptionFuncs = append(blobStoreOptionFuncs, WithCacheTTL(cacheTTL))
blobBucketCacheSize, err := parseInt(&query, "blobBucketCacheSize")
if err != nil {
if !errors.Is(err, errNotFound) {
return nil, errors.WithStack(err)
}
blobBucketCacheSize = 16
}
blobStoreOptionFuncs = append(blobStoreOptionFuncs, WithBlobBucketCacheSize(int(blobBucketCacheSize)))
blobBucketCacheStorePrefix := "blobBucketCacheStore"
blobBucketCacheStore, err := parseCacheStore[string, storage.BlobBucket](&query, blobBucketCacheStorePrefix)
if err != nil {
return nil, errors.WithStack(err)
}
blobStoreOptionFuncs = append(blobStoreOptionFuncs, WithBlobBucketCacheStore(blobBucketCacheStore))
bloInfoCacheSize, err := parseInt(&query, "bloInfoCacheSize")
if err != nil {
if !errors.Is(err, errNotFound) {
return nil, errors.WithStack(err)
}
bloInfoCacheSize = 16
}
blobStoreOptionFuncs = append(blobStoreOptionFuncs, WithBlobInfoCacheSize(int(bloInfoCacheSize)))
blobInfoCacheStorePrefix := "blobInfoCacheStore"
blobInfoCacheStore, err := parseCacheStore[string, storage.BlobInfo](&query, blobInfoCacheStorePrefix)
if err != nil {
return nil, errors.WithStack(err)
}
blobStoreOptionFuncs = append(blobStoreOptionFuncs, WithBlobInfoCacheStore(blobInfoCacheStore))
blobCacheSize, err := parseByteSize(&query, "blobCacheSize")
if err != nil {
if !errors.Is(err, errNotFound) {
return nil, errors.WithStack(err)
}
blobCacheSize = 256e+6
}
blobStoreOptionFuncs = append(blobStoreOptionFuncs, WithBlobCacheSize(int(blobCacheSize)))
blobCacheStorePrefix := "blobCacheStore"
blobCacheStore, err := parseCacheStore[string, []byte](
&query, blobCacheStorePrefix,
fs.WithMarshalValue[string, []byte](func(value []byte) (io.Reader, error) {
return bytes.NewBuffer(value), nil
}),
fs.WithUnmarshalValue[string, []byte](func(r io.Reader) ([]byte, error) {
data, err := io.ReadAll(r)
if err != nil {
return nil, errors.WithStack(err)
}
return data, nil
}),
)
if err != nil {
return nil, errors.WithStack(err)
}
blobStoreOptionFuncs = append(blobStoreOptionFuncs, WithBlobCacheStore(blobCacheStore))
url := &url.URL{
Scheme: rawDriver,
Host: dsn.Host,
Path: dsn.Path,
RawQuery: query.Encode(),
}
backend, err := driver.NewBlobStore(url.String())
if err != nil {
return nil, errors.WithStack(err)
}
store, err := NewBlobStore(backend, blobStoreOptionFuncs...)
if err != nil {
return nil, errors.WithStack(err)
}
return store, nil
}
var errNotFound = errors.New("not found")
func parseString(query *url.Values, name string) (string, error) {
value := query.Get(name)
if value != "" {
query.Del(name)
return value, nil
}
return "", errors.WithStack(errNotFound)
}
func parseByteSize(query *url.Values, name string) (bytesize.ByteSize, error) {
rawValue := query.Get(name)
if rawValue != "" {
query.Del(name)
value, err := bytesize.Parse(rawValue)
if err != nil {
return 0, errors.Wrapf(err, "could not parse url parameter '%s'", name)
}
return value, nil
}
return 0, errors.WithStack(errNotFound)
}
func parseInt(query *url.Values, name string) (int64, error) {
rawValue := query.Get(name)
if rawValue != "" {
query.Del(name)
value, err := strconv.ParseInt(rawValue, 10, 32)
if err != nil {
return 0, errors.Wrapf(err, "could not parse url parameter '%s'", name)
}
return value, nil
}
return 0, errors.WithStack(errNotFound)
}
func parseDuration(query *url.Values, name string) (time.Duration, error) {
rawValue := query.Get(name)
if rawValue != "" {
query.Del(name)
value, err := time.ParseDuration(rawValue)
if err != nil {
return 0, errors.Wrapf(err, "could not parse url parameter '%s'", name)
}
return value, nil
}
return 0, errors.WithStack(errNotFound)
}
const (
storeTypeFS string = "fs"
storeTypeMemory string = "memory"
)
func parseCacheStore[K comparable, V any](query *url.Values, prefix string, optionFuncs ...any) (lfu.Store[K, V], error) {
storeTypeParam := prefix + "Type"
storeType, err := parseString(query, storeTypeParam)
if err != nil {
if errors.Is(err, errNotFound) {
storeType = storeTypeMemory
}
}
switch storeType {
case storeTypeFS:
store, err := parseFSCacheStore[K, V](query, prefix, optionFuncs...)
if err != nil {
return nil, errors.WithStack(err)
}
return store, nil
case storeTypeMemory:
store, err := parseMemoryCacheStore[K, V](query, prefix, optionFuncs...)
if err != nil {
return nil, errors.WithStack(err)
}
return store, nil
}
return nil, errors.Errorf("unexpected store type value '%s' for parameter '%s'", storeType, storeTypeParam)
}
func parseFSCacheStore[K comparable, V any](query *url.Values, prefix string, optionFuncs ...any) (*fs.Store[K, V], error) {
baseDirParam := prefix + "BaseDir"
baseDir, err := parseString(query, baseDirParam)
if err != nil {
if errors.Is(err, errNotFound) {
return nil, errors.Wrapf(err, "missing required url parameter '%s'", baseDirParam)
}
return nil, errors.WithStack(err)
}
funcs := make([]fs.OptionsFunc[K, V], 0)
for _, anyFn := range optionFuncs {
fn, ok := anyFn.(fs.OptionsFunc[K, V])
if !ok {
continue
}
funcs = append(funcs, fn)
}
store := fs.NewStore[K, V](baseDir, funcs...)
if err := store.Clear(); err != nil {
return nil, errors.WithStack(err)
}
return store, nil
}
func parseMemoryCacheStore[K comparable, V any](query *url.Values, prefix string, optionFuncs ...any) (*memory.Store[K, V], error) {
return memory.NewStore[K, V](), nil
}

349
pkg/storage/driver/cache/lfu/cache.go vendored Normal file
View File

@ -0,0 +1,349 @@
package lfu
import (
"slices"
"sync/atomic"
"time"
"github.com/pkg/errors"
)
var (
ErrNotFound = errors.New("not found")
ErrSizeExceedCapacity = errors.New("size exceed capacity")
errExpired = errors.New("expired")
)
type Cache[K comparable, V any] struct {
index *Map[K, *cacheItem[K, V]]
freqs *List[*frequencyItem[K, V]]
size atomic.Int32
capacity int
store Store[K, V]
getValueSize GetValueSizeFunc[V]
sync *Synchronizer[K]
log LogFunc
ttl time.Duration
}
type cacheItem[K any, V any] struct {
key K
size int
time atomic.Int64
frequencyParent *Element[*frequencyItem[K, V]]
}
func (i *cacheItem[K, V]) Expired(ttl time.Duration) bool {
if ttl == 0 {
return false
}
itemTime := time.Unix(i.time.Load(), 0)
// If item has expired, mark it as not found
return itemTime.Add(ttl).Before(time.Now())
}
func (i *cacheItem[K, V]) Refresh() {
i.time.Store(time.Now().Unix())
}
func newCacheItem[K any, V any](key K, size int) *cacheItem[K, V] {
item := &cacheItem[K, V]{
key: key,
size: size,
}
item.time.Store(time.Now().Unix())
return item
}
type frequencyItem[K any, V any] struct {
entries *Map[*cacheItem[K, V], struct{}]
freq int
}
func newFrequencyItem[K any, V any]() *frequencyItem[K, V] {
frequencyItem := &frequencyItem[K, V]{}
frequencyItem.entries = NewMap[*cacheItem[K, V], struct{}]()
return frequencyItem
}
func (c *Cache[K, V]) Set(key K, value V) error {
newItemSize, err := c.getValueSize(value)
if err != nil {
return errors.WithStack(err)
}
c.log("setting '%v' (size: %d)", key, newItemSize)
if newItemSize > int(c.capacity) {
return errors.Wrapf(ErrSizeExceedCapacity, "item size '%d' exceed cache total capacity of '%v'", newItemSize, c.capacity)
}
var sizeDelta int
err = c.sync.WriteTx(key, func() error {
if err := c.store.Set(key, value); err != nil {
return errors.WithStack(err)
}
item, ok := c.index.Get(key)
if ok {
oldItemSize := item.size
sizeDelta = -int(oldItemSize) + newItemSize
item.Refresh()
} else {
item = newCacheItem[K, V](key, newItemSize)
c.index.Set(key, item)
sizeDelta = newItemSize
}
c.size.Add(int32(sizeDelta))
c.increment(item)
return nil
})
if err != nil {
return errors.WithStack(err)
}
// Eviction, if needed
if err := c.Evict(key); err != nil {
return errors.WithStack(err)
}
return nil
}
func (c *Cache[K, V]) Get(key K) (V, error) {
var value V
err := c.sync.ReadTx(key, func(upgrade func(func())) error {
c.log("getting '%v'", key)
e, ok := c.index.Get(key)
if !ok {
return errors.WithStack(ErrNotFound)
}
if e.Expired(c.ttl) {
return errors.WithStack(errExpired)
}
v, err := c.store.Get(key)
if err != nil {
return errors.WithStack(err)
}
upgrade(func() {
c.increment(e)
})
value = v
return nil
})
if err != nil {
if errors.Is(err, errExpired) {
if err := c.Delete(key); err != nil {
return *new(V), errors.WithStack(err)
}
return *new(V), errors.WithStack(ErrNotFound)
}
return *new(V), errors.WithStack(err)
}
return value, nil
}
func (c *Cache[K, V]) Delete(key K) error {
err := c.sync.WriteTx(key, func() error {
c.log("deleting '%v'", key)
item, exists := c.index.Get(key)
if !exists {
return errors.WithStack(ErrNotFound)
}
if err := c.store.Delete(key); err != nil {
return errors.WithStack(err)
}
c.size.Add(-int32(item.size))
c.remove(item.frequencyParent, item)
c.index.Delete(key)
return nil
})
if err != nil {
return errors.WithStack(err)
}
return nil
}
func (c *Cache[K, V]) Evict(skipped ...K) error {
exceed, delta := c.atCapacity()
if exceed && delta > 0 {
if err := c.evict(delta, skipped...); err != nil {
return errors.WithStack(err)
}
}
return nil
}
func (c *Cache[K, V]) Len() int {
return c.index.Len()
}
func (c *Cache[K, V]) Size() int {
return int(c.size.Load())
}
func (c *Cache[K, V]) Capacity() int {
return c.capacity
}
func (c *Cache[K, V]) increment(item *cacheItem[K, V]) {
currentFrequencyElement := item.frequencyParent
var nextFrequencyAmount int
var nextFrequencyElement *Element[*frequencyItem[K, V]]
if currentFrequencyElement == nil {
nextFrequencyAmount = 1
nextFrequencyElement = c.freqs.First()
} else {
atomicFrequencyItem := c.freqs.Value(currentFrequencyElement)
nextFrequencyAmount = atomicFrequencyItem.freq + 1
nextFrequencyElement = c.freqs.Next(currentFrequencyElement)
}
var nextFrequency *frequencyItem[K, V]
if nextFrequencyElement != nil {
nextFrequency = c.freqs.Value(nextFrequencyElement)
}
if nextFrequencyElement == nil || nextFrequency == nil || nextFrequency.freq != nextFrequencyAmount {
newFrequencyItem := newFrequencyItem[K, V]()
newFrequencyItem.freq = nextFrequencyAmount
if currentFrequencyElement == nil {
nextFrequencyElement = c.freqs.PushFront(newFrequencyItem)
} else {
nextFrequencyElement = c.freqs.InsertValueAfter(newFrequencyItem, currentFrequencyElement)
}
}
item.frequencyParent = nextFrequencyElement
nextFrequency = c.freqs.Value(nextFrequencyElement)
nextFrequency.entries.Set(item, struct{}{})
if currentFrequencyElement != nil {
c.remove(currentFrequencyElement, item)
}
}
func (c *Cache[K, V]) remove(listItem *Element[*frequencyItem[K, V]], item *cacheItem[K, V]) {
entries := c.freqs.Value(listItem).entries
entries.Delete(item)
}
func (c *Cache[K, V]) atCapacity() (bool, int) {
size, capacity := c.Size(), c.Capacity()
c.log("cache stats: %d/%d", size, capacity)
return size >= capacity, size - capacity
}
func (c *Cache[K, V]) evict(total int, skipped ...K) error {
if total == 0 {
return nil
}
frequencyElement := c.freqs.First()
if frequencyElement == nil {
c.log("no frequency element")
return nil
}
for evicted := 0; evicted < total; {
c.log("running eviction: [to_evict:%d, evicted: %d]", total, evicted)
c.log("first frequency element %p", frequencyElement)
frequencyItem := c.freqs.Value(frequencyElement)
if frequencyItem == nil {
return nil
}
entries := frequencyItem.entries
if entries.Len() == 0 {
c.log("no frequency entries")
frequencyElement = c.freqs.Next(frequencyElement)
continue
}
var rangeErr error
entries.Range(func(key, v any) bool {
if evicted >= total {
c.log("evicted enough (%d >= %d), stopping", evicted, total)
return false
}
entry, _ := key.(*cacheItem[K, V])
if slices.Contains(skipped, entry.key) {
c.log("skipping key '%v'", entry.key)
return true
}
if err := c.Delete(entry.key); err != nil {
if errors.Is(err, ErrNotFound) {
c.log("key '%s' not found", entry.key)
// Cleanup obsolete frequency
c.remove(frequencyElement, entry)
return true
}
rangeErr = errors.WithStack(err)
return false
}
c.log("evicted key '%v' (size: %d)", entry.key, entry.size)
evicted += int(entry.size)
return true
})
if rangeErr != nil {
return errors.WithStack(rangeErr)
}
}
return nil
}
func NewCache[K comparable, V any](store Store[K, V], funcs ...OptionsFunc[K, V]) *Cache[K, V] {
opts := DefaultOptions[K, V](funcs...)
cache := &Cache[K, V]{
index: NewMap[K, *cacheItem[K, V]](),
freqs: NewList[*frequencyItem[K, V]](),
capacity: opts.Capacity,
store: store,
getValueSize: opts.GetValueSize,
sync: NewSynchronizer[K](),
log: opts.Log,
ttl: opts.TTL,
}
return cache
}

View File

@ -0,0 +1,37 @@
package fs
import (
"bytes"
"io"
"path/filepath"
"testing"
"forge.cadoles.com/arcad/edge/pkg/storage/driver/cache/lfu"
"forge.cadoles.com/arcad/edge/pkg/storage/driver/cache/lfu/testsuite"
"github.com/pkg/errors"
)
func TestCacheWithFSStore(t *testing.T) {
testsuite.TestCacheWithStore(t, func(testName string) lfu.Store[string, string] {
dir := filepath.Join("testdata", "testsuite", testName)
store := NewStore[string, string](dir,
WithMarshalValue[string, string](func(value string) (io.Reader, error) {
return bytes.NewBuffer([]byte(value)), nil
}),
WithUnmarshalValue[string, string](func(r io.Reader) (string, error) {
data, err := io.ReadAll(r)
if err != nil {
return "", errors.WithStack(err)
}
return string(data), nil
}),
)
if err := store.Clear(); err != nil {
panic(errors.WithStack(err))
}
return store
})
}

19
pkg/storage/driver/cache/lfu/fs/hash.go vendored Normal file
View File

@ -0,0 +1,19 @@
package fs
import (
"strconv"
"github.com/mitchellh/hashstructure/v2"
"github.com/pkg/errors"
)
func DefaultGetPath[K comparable](key K) ([]string, error) {
uintHash, err := hashstructure.Hash(key, hashstructure.FormatV2, nil)
if err != nil {
return nil, errors.WithStack(err)
}
hash := strconv.FormatUint(uintHash, 16)
return []string{hash}, nil
}

View File

@ -0,0 +1,31 @@
package fs
import (
"bytes"
"encoding/gob"
"io"
"github.com/pkg/errors"
)
func DefaultMarshalValue[V any](value V) (io.Reader, error) {
var buf bytes.Buffer
encoder := gob.NewEncoder(&buf)
if err := encoder.Encode(value); err != nil {
return nil, errors.WithStack(err)
}
return &buf, nil
}
func DefaultUnmarshalValue[V any](d io.Reader) (V, error) {
var value V
encoder := gob.NewDecoder(d)
if err := encoder.Decode(&value); err != nil {
return *new(V), errors.WithStack(err)
}
return value, nil
}

View File

@ -0,0 +1,45 @@
package fs
import "io"
type GetPathFunc[K comparable] func(key K) ([]string, error)
type MarshalValueFunc[V any] func(value V) (io.Reader, error)
type UnmarshalValueFunc[V any] func(r io.Reader) (V, error)
type Options[K comparable, V any] struct {
GetPath GetPathFunc[K]
MarshalValue MarshalValueFunc[V]
UnmarshalValue UnmarshalValueFunc[V]
}
type OptionsFunc[K comparable, V any] func(opts *Options[K, V])
func DefaultOptions[K comparable, V any](funcs ...OptionsFunc[K, V]) *Options[K, V] {
opts := &Options[K, V]{
GetPath: DefaultGetPath[K],
MarshalValue: DefaultMarshalValue[V],
UnmarshalValue: DefaultUnmarshalValue[V],
}
for _, fn := range funcs {
fn(opts)
}
return opts
}
func WithGetPath[K comparable, V any](getKeyHash GetPathFunc[K]) OptionsFunc[K, V] {
return func(opts *Options[K, V]) {
opts.GetPath = getKeyHash
}
}
func WithMarshalValue[K comparable, V any](marshalValue MarshalValueFunc[V]) OptionsFunc[K, V] {
return func(opts *Options[K, V]) {
opts.MarshalValue = marshalValue
}
}
func WithUnmarshalValue[K comparable, V any](unmarshalValue UnmarshalValueFunc[V]) OptionsFunc[K, V] {
return func(opts *Options[K, V]) {
opts.UnmarshalValue = unmarshalValue
}
}

165
pkg/storage/driver/cache/lfu/fs/store.go vendored Normal file
View File

@ -0,0 +1,165 @@
package fs
import (
"fmt"
"io"
"os"
"path/filepath"
"forge.cadoles.com/arcad/edge/pkg/storage/driver/cache/lfu"
"github.com/pkg/errors"
)
type Store[K comparable, V any] struct {
baseDir string
getPath GetPathFunc[K]
marshalValue MarshalValueFunc[V]
unmarshalValue UnmarshalValueFunc[V]
}
// Delete implements Store.
func (s *Store[K, V]) Delete(key K) error {
path, err := s.getEntryPath(key)
if err != nil {
return errors.WithStack(err)
}
if err := os.Remove(path); err != nil && !errors.Is(err, os.ErrNotExist) {
return errors.WithStack(err)
}
return nil
}
// Get implements Store.
func (s *Store[K, V]) Get(key K) (V, error) {
path, err := s.getEntryPath(key)
if err != nil {
return *new(V), errors.WithStack(err)
}
value, err := s.readValue(path)
if err != nil {
if errors.Is(err, os.ErrNotExist) {
return *new(V), errors.WithStack(lfu.ErrNotFound)
}
return *new(V), errors.WithStack(err)
}
return value, nil
}
// Set implements Store.
func (s *Store[K, V]) Set(key K, value V) error {
path, err := s.getEntryPath(key)
if err != nil {
return errors.WithStack(err)
}
if err := s.writeValue(path, value); err != nil {
return errors.WithStack(err)
}
return nil
}
func (s *Store[K, V]) Clear() error {
if err := os.RemoveAll(s.baseDir); err != nil {
return errors.WithStack(err)
}
return nil
}
func (s *Store[K, V]) getEntryPath(k K) (string, error) {
path, err := s.getPath(k)
if err != nil {
return "", errors.WithStack(err)
}
path = append([]string{s.baseDir}, path...)
return filepath.Join(path...), nil
}
func (s *Store[K, V]) writeValue(path string, value V) error {
fi, err := os.Stat(path)
if err == nil && !fi.Mode().IsRegular() {
return fmt.Errorf("%s already exists and is not a regular file", path)
}
dir := filepath.Dir(path)
if err := os.MkdirAll(dir, 0750); err != nil {
return errors.WithStack(err)
}
f, err := os.CreateTemp(dir, filepath.Base(path)+".tmp")
if err != nil {
return errors.WithStack(err)
}
tmpName := f.Name()
defer func() {
if err != nil {
f.Close()
os.Remove(tmpName)
}
}()
reader, err := s.marshalValue(value)
if err != nil {
return errors.WithStack(err)
}
if _, err := io.Copy(f, reader); err != nil {
return errors.WithStack(err)
}
if err := f.Sync(); err != nil {
return errors.WithStack(err)
}
if err := f.Close(); err != nil {
return errors.WithStack(err)
}
if err := os.Rename(tmpName, path); err != nil {
return errors.WithStack(err)
}
return nil
}
func (s *Store[K, V]) readValue(path string) (V, error) {
file, err := os.Open(path)
if err != nil {
return *new(V), errors.WithStack(err)
}
defer func() {
if err := file.Close(); err != nil && !errors.Is(err, os.ErrClosed) {
panic(errors.WithStack(err))
}
}()
value, err := s.unmarshalValue(file)
if err != nil {
return *new(V), errors.WithStack(err)
}
return value, nil
}
func NewStore[K comparable, V any](baseDir string, funcs ...OptionsFunc[K, V]) *Store[K, V] {
opts := DefaultOptions[K, V](funcs...)
return &Store[K, V]{
baseDir: baseDir,
getPath: opts.GetPath,
unmarshalValue: opts.UnmarshalValue,
marshalValue: opts.MarshalValue,
}
}
var _ lfu.Store[string, int] = &Store[string, int]{}

View File

@ -0,0 +1,2 @@
*
!.gitignore

203
pkg/storage/driver/cache/lfu/list.go vendored Normal file
View File

@ -0,0 +1,203 @@
package lfu
import (
"sync/atomic"
)
type List[T any] struct {
root *Element[T]
len atomic.Int32
sync *Synchronizer[*Element[T]]
}
func (l *List[T]) First() *Element[T] {
if l.Len() == 0 {
return nil
}
var next *Element[T]
l.sync.ReadTx(l.root, func(upgrade func(func())) error {
next = l.root.next
return nil
})
return next
}
func (l *List[T]) Last() *Element[T] {
if l.Len() == 0 {
return nil
}
var prev *Element[T]
l.sync.ReadTx(l.root, func(upgrade func(func())) error {
prev = l.root.prev
return nil
})
return prev
}
func (l *List[T]) Prev(e *Element[T]) *Element[T] {
var prev *Element[T]
l.sync.ReadTx(e, func(upgrade func(func())) error {
prev = e.prev
return nil
})
return prev
}
func (l *List[T]) Next(e *Element[T]) *Element[T] {
var next *Element[T]
l.sync.ReadTx(e, func(upgrade func(func())) error {
next = e.next
return nil
})
return next
}
func (l *List[T]) Value(e *Element[T]) T {
var value T
l.sync.ReadTx(e, func(upgrade func(func())) error {
value = e.value
return nil
})
return value
}
func (l *List[T]) PushFront(v T) *Element[T] {
return l.InsertValueAfter(v, l.root)
}
func (l *List[T]) PushBack(v T) *Element[T] {
return l.InsertValueAfter(v, l.root)
}
func (l *List[T]) Remove(e *Element[T]) {
l.remove(e)
}
func (l *List[T]) Len() int {
return int(l.len.Load())
}
func (l *List[T]) insertAfter(e *Element[T], at *Element[T]) *Element[T] {
l.sync.ReadTx(e, func(upgrade func(fn func())) error {
var next *Element[T]
l.sync.ReadTx(at, func(upgrade func(func())) error {
next = at.next
return nil
})
upgrade(func() {
e.prev = at
e.next = next
e.list = l
})
if e.prev != nil {
l.sync.WriteTx(e.prev, func() error {
e.prev.next = e
return nil
})
}
if e.next != nil {
l.sync.WriteTx(e.next, func() error {
e.next.prev = e
return nil
})
}
return nil
})
l.len.Add(1)
return e
}
func (l *List[T]) InsertValueAfter(v T, at *Element[T]) *Element[T] {
e := NewElement[T](v)
return l.insertAfter(e, at)
}
func (l *List[T]) remove(e *Element[T]) {
if e == nil && e == l.root {
return
}
l.sync.ReadTx(e, func(upgrade func(fn func())) error {
if e.prev != nil {
if e.prev == e {
upgrade(func() {
e.prev.next = e.next
})
} else {
l.sync.WriteTx(e.prev, func() error {
e.prev.next = e.next
return nil
})
}
}
if e.next != nil {
if e.next == e {
upgrade(func() {
e.next.prev = e.prev
})
} else {
l.sync.WriteTx(e.next, func() error {
e.next.prev = e.prev
return nil
})
}
}
upgrade(func() {
e.next = nil
e.prev = nil
e.list = nil
})
return nil
})
l.sync.Remove(e)
l.len.Add(-1)
}
func NewList[T any]() *List[T] {
root := NewElement(*new(T))
root.next = root
root.prev = root
list := &List[T]{
sync: NewSynchronizer[*Element[T]](),
}
root.list = list
list.root = root
return list
}
type Element[T any] struct {
prev *Element[T]
next *Element[T]
list *List[T]
value T
}
func NewElement[T any](v T) *Element[T] {
element := &Element[T]{
prev: nil,
next: nil,
list: nil,
value: v,
}
return element
}

67
pkg/storage/driver/cache/lfu/map.go vendored Normal file
View File

@ -0,0 +1,67 @@
package lfu
import (
"sync"
"sync/atomic"
)
type Map[K comparable, V any] struct {
size atomic.Int32
inner sync.Map
}
func (m *Map[K, V]) Get(key K) (V, bool) {
raw, exists := m.inner.Load(key)
if !exists {
return *new(V), false
}
value, ok := raw.(V)
if !ok {
return *new(V), false
}
return value, true
}
func (m *Map[K, V]) GetOrSet(key K, defaultValue V) (V, bool) {
raw, loaded := m.inner.LoadOrStore(key, defaultValue)
if !loaded {
m.size.Add(1)
}
value, ok := raw.(V)
if !ok {
return *new(V), loaded
}
return value, loaded
}
func (m *Map[K, V]) Set(key K, value V) {
_, loaded := m.inner.Swap(key, value)
if !loaded {
m.size.Add(1)
}
}
func (m *Map[K, V]) Delete(key K) {
_, existed := m.inner.LoadAndDelete(key)
if existed {
m.size.Add(-1)
}
}
func (m *Map[K, V]) Range(fn func(key, value any) bool) {
m.inner.Range(fn)
}
func (m *Map[K, V]) Len() int {
return int(m.size.Load())
}
func NewMap[K comparable, V any]() *Map[K, V] {
return &Map[K, V]{
inner: sync.Map{},
}
}

View File

@ -0,0 +1,14 @@
package memory
import (
"testing"
"forge.cadoles.com/arcad/edge/pkg/storage/driver/cache/lfu"
"forge.cadoles.com/arcad/edge/pkg/storage/driver/cache/lfu/testsuite"
)
func TestCacheWithMemoryStore(t *testing.T) {
testsuite.TestCacheWithStore(t, func(testName string) lfu.Store[string, string] {
return NewStore[string, string]()
})
}

View File

@ -0,0 +1,40 @@
package memory
import (
"forge.cadoles.com/arcad/edge/pkg/storage/driver/cache/lfu"
"github.com/pkg/errors"
)
type Store[K comparable, V any] struct {
index *lfu.Map[K, V]
}
// Delete implements Store.
func (s *Store[K, V]) Delete(key K) error {
s.index.Delete(key)
return nil
}
// Get implements Store.
func (s *Store[K, V]) Get(key K) (V, error) {
value, exists := s.index.Get(key)
if !exists {
return *new(V), errors.WithStack(lfu.ErrNotFound)
}
return value, nil
}
// Set implements Store.
func (s *Store[K, V]) Set(key K, value V) error {
s.index.Set(key, value)
return nil
}
func NewStore[K comparable, V any]() *Store[K, V] {
return &Store[K, V]{
index: lfu.NewMap[K, V](),
}
}
var _ lfu.Store[string, int] = &Store[string, int]{}

57
pkg/storage/driver/cache/lfu/options.go vendored Normal file
View File

@ -0,0 +1,57 @@
package lfu
import "time"
type GetValueSizeFunc[V any] func(value V) (int, error)
type LogFunc func(format string, values ...any)
func DefaultLogFunc(format string, values ...any) {
}
type Options[K comparable, V any] struct {
GetValueSize GetValueSizeFunc[V]
Capacity int
Log LogFunc
TTL time.Duration
}
type OptionsFunc[K comparable, V any] func(opts *Options[K, V])
func DefaultOptions[K comparable, V any](funcs ...OptionsFunc[K, V]) *Options[K, V] {
opts := &Options[K, V]{
GetValueSize: DefaultGetValueSize[V],
Capacity: 100,
Log: DefaultLogFunc,
TTL: 0,
}
for _, fn := range funcs {
fn(opts)
}
return opts
}
func WithCapacity[K comparable, V any](capacity int) OptionsFunc[K, V] {
return func(opts *Options[K, V]) {
opts.Capacity = capacity
}
}
func WithGetValueSize[K comparable, V any](getValueSize GetValueSizeFunc[V]) OptionsFunc[K, V] {
return func(opts *Options[K, V]) {
opts.GetValueSize = getValueSize
}
}
func WithLog[K comparable, V any](fn LogFunc) OptionsFunc[K, V] {
return func(opts *Options[K, V]) {
opts.Log = fn
}
}
func WithTTL[K comparable, V any](ttl time.Duration) OptionsFunc[K, V] {
return func(opts *Options[K, V]) {
opts.TTL = ttl
}
}

41
pkg/storage/driver/cache/lfu/size.go vendored Normal file
View File

@ -0,0 +1,41 @@
package lfu
import (
"github.com/pkg/errors"
)
type Measurable interface {
Size() (int, error)
}
func DefaultGetValueSize[V any](value V) (int, error) {
switch v := any(value).(type) {
case int:
return v, nil
case int8:
return int(v), nil
case int32:
return int(v), nil
case int64:
return int(v), nil
case float32:
return int(v), nil
case float64:
return int(v), nil
case []byte:
return len(v), nil
case string:
return len(v), nil
}
if measurable, ok := any(value).(Measurable); ok {
size, err := measurable.Size()
if err != nil {
return 0, errors.WithStack(err)
}
return size, nil
}
return 0, errors.Errorf("could not retrieve size of type '%T'", value)
}

7
pkg/storage/driver/cache/lfu/store.go vendored Normal file
View File

@ -0,0 +1,7 @@
package lfu
type Store[K comparable, V any] interface {
Delete(key K) error
Set(key K, value V) error
Get(key K) (V, error)
}

View File

@ -0,0 +1,56 @@
package lfu
import (
"sync"
"github.com/pkg/errors"
)
type Synchronizer[K comparable] struct {
index *Map[K, *sync.RWMutex]
}
func (s *Synchronizer[K]) Remove(key K) {
s.index.Delete(key)
}
func (s *Synchronizer[K]) ReadTx(key K, fn func(upgrade func(fn func())) error) error {
mutex, _ := s.index.GetOrSet(key, &sync.RWMutex{})
mutex.RLock()
defer mutex.RUnlock()
upgrade := func(fn func()) {
mutex.RUnlock()
mutex.Lock()
defer func() {
mutex.Unlock()
mutex.RLock()
}()
fn()
}
if err := fn(upgrade); err != nil {
return errors.WithStack(err)
}
return nil
}
func (s *Synchronizer[K]) WriteTx(key K, fn func() error) error {
mutex, _ := s.index.GetOrSet(key, &sync.RWMutex{})
mutex.Lock()
defer mutex.Unlock()
if err := fn(); err != nil {
return errors.WithStack(err)
}
return nil
}
func NewSynchronizer[K comparable]() *Synchronizer[K] {
return &Synchronizer[K]{
index: NewMap[K, *sync.RWMutex](),
}
}

View File

@ -0,0 +1,41 @@
package testsuite
import (
"reflect"
"runtime"
"strings"
"testing"
"forge.cadoles.com/arcad/edge/pkg/storage/driver/cache/lfu"
"github.com/pkg/errors"
)
type StoreFactory func(testName string) lfu.Store[string, string]
type testCase func(t *testing.T, store lfu.Store[string, string]) error
var testCases = []testCase{
testSetGetDelete,
testEviction,
testConcurrent,
testMultipleSet,
testTTL,
}
func TestCacheWithStore(t *testing.T, factory StoreFactory) {
for _, tc := range testCases {
funcName := runtime.FuncForPC(reflect.ValueOf(tc).Pointer()).Name()
funcNameParts := strings.Split(funcName, "/")
testName := funcNameParts[len(funcNameParts)-1]
func(tc testCase) {
t.Run(testName, func(t *testing.T) {
t.Parallel()
store := factory(testName)
if err := tc(t, store); err != nil {
t.Fatalf("%+v", errors.WithStack(err))
}
})
}(tc)
}
}

View File

@ -0,0 +1,67 @@
package testsuite
import (
"fmt"
"sync"
"testing"
"forge.cadoles.com/arcad/edge/pkg/storage/driver/cache/lfu"
"github.com/pkg/errors"
)
func testConcurrent(t *testing.T, store lfu.Store[string, string]) error {
const value = "foobar"
totalKeys := 25
totalSize := len(value) * totalKeys
capacity := totalSize / 2
cache := lfu.NewCache[string, string](store,
lfu.WithCapacity[string, string](capacity),
lfu.WithLog[string, string](t.Logf),
)
var wg sync.WaitGroup
wg.Add(totalKeys)
loops := totalKeys * 10
for i := 0; i < totalKeys; i++ {
key := fmt.Sprintf("key%d", i)
func(key string) {
go func() {
defer wg.Done()
for i := 0; i < loops; i++ {
if err := cache.Set(key, value); err != nil {
t.Errorf("%+v", errors.WithStack(err))
}
}
}()
}(key)
}
wg.Wait()
t.Logf("cache before final evict [capacity: %d, size: %d, len: %d]", cache.Capacity(), cache.Size(), cache.Len())
if err := cache.Evict(); err != nil {
t.Errorf("%+v", errors.WithStack(err))
}
t.Logf("cache after final evict [capacity: %d, size: %d, len: %d]", cache.Capacity(), cache.Size(), cache.Len())
expectedLength := capacity / len(value)
if e, g := expectedLength, cache.Len(); e < g {
t.Errorf("cache.Len(): expected <= %d, got %d", e, g)
}
if cache.Size() > capacity {
t.Errorf("cache.Size(): expected <= %d, got %d", capacity, cache.Size())
}
if e, g := expectedLength*len(value), cache.Size(); e < g {
t.Errorf("cache.Size(): expected <= %d, got %d", e, g)
}
return nil
}

View File

@ -0,0 +1,70 @@
package testsuite
import (
"testing"
"forge.cadoles.com/arcad/edge/pkg/storage/driver/cache/lfu"
"github.com/pkg/errors"
)
func testEviction(t *testing.T, store lfu.Store[string, string]) error {
cache := lfu.NewCache[string, string](store,
lfu.WithCapacity[string, string](10),
lfu.WithLog[string, string](t.Logf),
)
if err := cache.Set("key1", "key1"); err != nil {
return errors.WithStack(err)
}
if err := cache.Set("key2", "key2"); err != nil {
return errors.WithStack(err)
}
// Increment frequency of key2
if _, err := cache.Get("key2"); err != nil {
return errors.WithStack(err)
}
if e, g := 8, cache.Size(); e != g {
t.Errorf("cache.Size(): expected '%v', got '%v'", e, g)
}
if err := cache.Set("key3", "key3"); err != nil {
return errors.WithStack(err)
}
t.Logf("cache [capacity: %d, size: %d, len: %d]", cache.Capacity(), cache.Size(), cache.Len())
_, err := cache.Get("key1")
if err == nil {
t.Errorf("expected 'key1' to be evicted")
}
if !errors.Is(err, lfu.ErrNotFound) {
t.Errorf("expected err to be 'ErrNotFound'")
}
value, err := cache.Get("key2")
if err != nil {
return errors.WithStack(err)
}
if e, g := "key2", value; e < g {
t.Errorf("cache.Get(\"key2\"): expected %v, got %v", e, g)
}
if e, g := cache.Capacity(), cache.Size(); e < g {
t.Errorf("cache.Size(): expected <= %d, got %d", e, g)
}
if e, g := 2, cache.Len(); e != g {
t.Errorf("cache.Len(): expected %d, got %d", e, g)
}
if cache.Size() < 0 {
t.Errorf("cache.Size(): expected value >= 0, got %d", cache.Size())
}
return nil
}

View File

@ -0,0 +1,80 @@
package testsuite
import (
"testing"
"forge.cadoles.com/arcad/edge/pkg/storage/driver/cache/lfu"
"github.com/pkg/errors"
)
func testMultipleSet(t *testing.T, store lfu.Store[string, string]) error {
const (
key = "mykey"
firstValue = "foo"
secondValue = "bar"
thirdValue = "foobar"
)
cache := lfu.NewCache[string, string](store)
if e, g := 0, cache.Size(); e != g {
t.Errorf("cache.Size(): expected '%v', got '%v'", e, g)
}
if err := cache.Set(key, firstValue); err != nil {
return errors.WithStack(err)
}
if e, g := len(firstValue), cache.Size(); e != g {
t.Errorf("cache.Size(): expected '%v', got '%v'", e, g)
}
retrieved, err := cache.Get(key)
if err != nil {
return errors.WithStack(err)
}
if e, g := firstValue, retrieved; e != g {
t.Errorf("cache.Get(key): expected '%v', got '%v'", e, g)
}
if err := cache.Set(key, secondValue); err != nil {
return errors.WithStack(err)
}
if e, g := len(secondValue), cache.Size(); e != g {
t.Errorf("cache.Size(): expected '%v', got '%v'", e, g)
}
retrieved, err = cache.Get(key)
if err != nil {
return errors.WithStack(err)
}
if e, g := secondValue, retrieved; e != g {
t.Errorf("cache.Get(key): expected '%v', got '%v'", e, g)
}
if err := cache.Set(key, thirdValue); err != nil {
return errors.WithStack(err)
}
if e, g := len(thirdValue), cache.Size(); e != g {
t.Errorf("cache.Size(): expected '%v', got '%v'", e, g)
}
retrieved, err = cache.Get(key)
if err != nil {
return errors.WithStack(err)
}
if e, g := thirdValue, retrieved; e != g {
t.Errorf("cache.Get(key): expected '%v', got '%v'", e, g)
}
if e, g := len(thirdValue), cache.Size(); e != g {
t.Errorf("cache.Size(): expected '%v', got '%v'", e, g)
}
return nil
}

View File

@ -0,0 +1,66 @@
package testsuite
import (
"testing"
"forge.cadoles.com/arcad/edge/pkg/storage/driver/cache/lfu"
"github.com/pkg/errors"
)
func testSetGetDelete(t *testing.T, store lfu.Store[string, string]) error {
const (
key = "mykey"
value = "foobar"
)
cache := lfu.NewCache[string, string](store, lfu.WithCapacity[string, string](10))
if e, g := 0, cache.Size(); e != g {
t.Errorf("cache.Size(): expected '%v', got '%v'", e, g)
}
if err := cache.Set(key, value); err != nil {
return errors.WithStack(err)
}
if e, g := len(value), cache.Size(); e != g {
t.Errorf("cache.Size(): expected '%v', got '%v'", e, g)
}
if e, g := 1, cache.Len(); e != g {
t.Errorf("cache.Len(): expected '%v', got '%v'", e, g)
}
retrieved, err := cache.Get(key)
if err != nil {
return errors.WithStack(err)
}
if e, g := value, retrieved; e != g {
t.Errorf("cache.Get(key): expected '%v', got '%v'", e, g)
}
if err := cache.Delete(key); err != nil {
return errors.WithStack(err)
}
if _, err := cache.Get(key); err == nil || !errors.Is(err, lfu.ErrNotFound) {
t.Errorf("cache.Get(key): err should be lfu.ErrNotFound, got '%v'", errors.WithStack(err))
}
if e, g := value, retrieved; e != g {
t.Errorf("cache.Get(key): expected '%v', got '%v'", e, g)
}
if e, g := 0, cache.Size(); e != g {
t.Errorf("cache.Size(): expected '%v', got '%v'", e, g)
}
if e, g := 0, cache.Len(); e != g {
t.Errorf("cache.Len(): expected '%v', got '%v'", e, g)
}
t.Logf("cache [capacity: %d, size: %d, len: %d]", cache.Capacity(), cache.Size(), cache.Len())
return nil
}

View File

@ -0,0 +1,54 @@
package testsuite
import (
"testing"
"time"
"forge.cadoles.com/arcad/edge/pkg/storage/driver/cache/lfu"
"github.com/pkg/errors"
)
func testTTL(t *testing.T, store lfu.Store[string, string]) error {
const (
key = "mykey"
value = "foobar"
)
ttl := time.Second
cache := lfu.NewCache[string, string](store,
lfu.WithTTL[string, string](ttl),
lfu.WithCapacity[string, string](10),
)
if err := cache.Set(key, value); err != nil {
return errors.WithStack(err)
}
retrieved, err := cache.Get(key)
if err != nil {
return errors.WithStack(err)
}
if e, g := value, retrieved; e != g {
t.Errorf("cache.Get(key): expected '%v', got '%v'", e, g)
}
time.Sleep(ttl * 2)
if _, err := cache.Get(key); !errors.Is(err, lfu.ErrNotFound) {
t.Errorf("cache.Get(key): expected err == lfu.ErrNotFound, got '%v'", err)
}
t.Logf("cache [capacity: %d, size: %d, len: %d]", cache.Capacity(), cache.Size(), cache.Len())
if e, g := 0, cache.Size(); e != g {
t.Errorf("cache.Size(): expected '%v', got '%v'", e, g)
}
if e, g := 0, cache.Len(); e != g {
t.Errorf("cache.Len(): expected '%v', got '%v'", e, g)
}
return nil
}

88
pkg/storage/driver/cache/options.go vendored Normal file
View File

@ -0,0 +1,88 @@
package cache
import (
"time"
"forge.cadoles.com/arcad/edge/pkg/storage"
"forge.cadoles.com/arcad/edge/pkg/storage/driver/cache/lfu"
"forge.cadoles.com/arcad/edge/pkg/storage/driver/cache/lfu/memory"
)
type Options struct {
CacheTTL time.Duration
BlobCacheStore lfu.Store[string, []byte]
// Maximum total size of cached data
BlobCacheSize int
BlobInfoCacheStore lfu.Store[string, storage.BlobInfo]
// Maximum number of blob infos
BlobInfoCacheSize int
BlobBucketCacheStore lfu.Store[string, storage.BlobBucket]
// Maximum number of blob bucket
BlobBucketCacheSize int
}
type OptionFunc func(opts *Options)
func NewOptions(funcs ...OptionFunc) *Options {
defaultTTL := 60 * time.Minute
opts := &Options{
CacheTTL: defaultTTL,
BlobCacheStore: memory.NewStore[string, []byte](),
BlobCacheSize: 1e+9, // 1Gb
BlobInfoCacheStore: memory.NewStore[string, storage.BlobInfo](),
BlobInfoCacheSize: 256,
BlobBucketCacheStore: memory.NewStore[string, storage.BlobBucket](),
BlobBucketCacheSize: 16,
}
for _, fn := range funcs {
fn(opts)
}
return opts
}
func WithCacheTTL(ttl time.Duration) OptionFunc {
return func(opts *Options) {
opts.CacheTTL = ttl
}
}
func WithBlobBucketCacheSize(size int) OptionFunc {
return func(opts *Options) {
opts.BlobBucketCacheSize = size
}
}
func WithBlobBucketCacheStore(store lfu.Store[string, storage.BlobBucket]) OptionFunc {
return func(opts *Options) {
opts.BlobBucketCacheStore = store
}
}
func WithBlobInfoCacheSize(size int) OptionFunc {
return func(opts *Options) {
opts.BlobInfoCacheSize = size
}
}
func WithBlobInfoCacheStore(store lfu.Store[string, storage.BlobInfo]) OptionFunc {
return func(opts *Options) {
opts.BlobInfoCacheStore = store
}
}
func WithBlobCacheSize(size int) OptionFunc {
return func(opts *Options) {
opts.BlobCacheSize = size
}
}
func WithBlobCacheStore(store lfu.Store[string, []byte]) OptionFunc {
return func(opts *Options) {
opts.BlobCacheStore = store
}
}

131
pkg/storage/driver/cache/reader.go vendored Normal file
View File

@ -0,0 +1,131 @@
package cache
import (
"bytes"
"context"
"fmt"
"io"
"cdr.dev/slog"
"forge.cadoles.com/arcad/edge/pkg/storage/driver/cache/lfu"
"github.com/pkg/errors"
"gitlab.com/wpetit/goweb/logger"
)
type readCacher struct {
reader io.ReadSeekCloser
cache *lfu.Cache[string, []byte]
buf bytes.Buffer
key string
}
// Close implements io.ReadSeekCloser.
func (r *readCacher) Close() error {
if err := r.reader.Close(); err != nil {
return errors.WithStack(err)
}
if err := r.cache.Set(r.key, r.buf.Bytes()); err != nil {
var logErr slog.Field
if errors.Is(err, lfu.ErrSizeExceedCapacity) {
logErr = logger.E(errors.WithStack(err))
} else {
logErr = logger.CapturedE(errors.WithStack(err))
}
logger.Error(context.Background(), "could not cache buffered data",
logger.F("cacheKey", r.key),
logErr,
)
}
r.buf.Reset()
return nil
}
// Read implements io.ReadSeekCloser.
func (r *readCacher) Read(p []byte) (n int, err error) {
length, err := r.reader.Read(p)
if err != nil {
if err == io.EOF {
return length, io.EOF
}
return length, errors.WithStack(err)
}
if length > 0 {
if _, err := r.buf.Write(p[:length]); err != nil {
ctx := logger.With(context.Background(), logger.F("cacheKey", r.key))
logger.Error(ctx, "could not write to buffer", logger.CapturedE(errors.WithStack(err)))
}
}
return length, nil
}
// Seek implements io.ReadSeekCloser.
func (r *readCacher) Seek(offset int64, whence int) (int64, error) {
length, err := r.reader.Seek(offset, whence)
if err != nil {
return length, errors.WithStack(err)
}
return length, nil
}
var _ io.ReadSeekCloser = &readCacher{}
type cachedReader struct {
buffer []byte
offset int64
}
// Read implements io.ReadSeekCloser.
func (r *cachedReader) Read(p []byte) (n int, err error) {
available := len(r.buffer) - int(r.offset)
if available == 0 {
return 0, io.EOF
}
size := len(p)
if size > available {
size = available
}
copy(p, r.buffer[r.offset:r.offset+int64(size)])
r.offset += int64(size)
return size, nil
}
// Close implements io.ReadSeekCloser.
func (r *cachedReader) Close() error {
return nil
}
// Seek implements io.ReadSeekCloser.
func (r *cachedReader) Seek(offset int64, whence int) (int64, error) {
var newOffset int64
switch whence {
case io.SeekStart:
newOffset = offset
case io.SeekCurrent:
newOffset = r.offset + offset
case io.SeekEnd:
newOffset = int64(len(r.buffer)) + offset
default:
return 0, errors.Errorf("unknown seek whence '%d'", whence)
}
if newOffset > int64(len(r.buffer)) || newOffset < 0 {
return 0, fmt.Errorf("invalid offset %d", offset)
}
r.offset = newOffset
return newOffset, nil
}
var _ io.ReadSeekCloser = &cachedReader{}

View File

@ -0,0 +1,2 @@
*
!.gitignore

View File

@ -5,7 +5,6 @@ import (
"net/url"
"github.com/keegancsmith/rpc"
"gitlab.com/wpetit/goweb/logger"
"forge.cadoles.com/arcad/edge/pkg/storage"
"forge.cadoles.com/arcad/edge/pkg/storage/driver/rpc/server/blob"
@ -13,7 +12,7 @@ import (
)
type BlobStore struct {
serverURL *url.URL
withClient WithClientFunc
}
// DeleteBucket implements storage.BlobStore.
@ -63,7 +62,7 @@ func (s *BlobStore) OpenBucket(ctx context.Context, name string) (storage.BlobBu
func (s *BlobStore) call(ctx context.Context, serviceMethod string, args any, reply any) error {
err := s.withClient(ctx, func(ctx context.Context, client *rpc.Client) error {
if err := client.Call(ctx, serviceMethod, args, reply); err != nil {
return errors.WithStack(err)
return errors.WithStack(remapBlobError(err))
}
return nil
@ -75,27 +74,11 @@ func (s *BlobStore) call(ctx context.Context, serviceMethod string, args any, re
return nil
}
func (s *BlobStore) withClient(ctx context.Context, fn func(ctx context.Context, client *rpc.Client) error) error {
client, err := rpc.DialHTTPPath("tcp", s.serverURL.Host, s.serverURL.Path+"?"+s.serverURL.RawQuery)
if err != nil {
return errors.WithStack(err)
}
defer func() {
if err := client.Close(); err != nil {
logger.Error(ctx, "could not close rpc client", logger.CapturedE(errors.WithStack(err)))
}
}()
if err := fn(ctx, client); err != nil {
return errors.WithStack(err)
}
return nil
}
func NewBlobStore(serverURL *url.URL) *BlobStore {
return &BlobStore{serverURL}
withClient := WithPooledClient(serverURL)
return &BlobStore{
withClient: withClient,
}
}
var _ storage.BlobStore = &BlobStore{}

View File

@ -0,0 +1,133 @@
package client
import (
"context"
"net/url"
"strconv"
"sync"
"time"
"github.com/jackc/puddle/v2"
"github.com/keegancsmith/rpc"
"github.com/pkg/errors"
"gitlab.com/wpetit/goweb/logger"
)
func NewClientPool(serverURL *url.URL, poolSize int) (*puddle.Pool[*rpc.Client], error) {
constructor := func(context.Context) (*rpc.Client, error) {
client, err := rpc.DialHTTPPath("tcp", serverURL.Host, serverURL.Path+"?"+serverURL.RawQuery)
if err != nil {
return nil, errors.WithStack(err)
}
return client, nil
}
destructor := func(client *rpc.Client) {
if err := client.Close(); err != nil {
logger.Error(context.Background(), "could not close client", logger.CapturedE(errors.WithStack(err)))
}
}
maxPoolSize := int32(poolSize)
pool, err := puddle.NewPool(&puddle.Config[*rpc.Client]{Constructor: constructor, Destructor: destructor, MaxSize: maxPoolSize})
if err != nil {
return nil, errors.WithStack(err)
}
return pool, nil
}
type WithClientFunc func(ctx context.Context, fn func(ctx context.Context, client *rpc.Client) error) error
func WithPooledClient(serverURL *url.URL) WithClientFunc {
var (
pool *puddle.Pool[*rpc.Client]
createPool sync.Once
)
return func(ctx context.Context, fn func(context.Context, *rpc.Client) error) error {
var err error
createPool.Do(func() {
rawPoolSize := serverURL.Query().Get("clientPoolSize")
if rawPoolSize == "" {
rawPoolSize = "5"
}
var poolSize int64
poolSize, err = strconv.ParseInt(rawPoolSize, 10, 32)
if err != nil {
err = errors.Wrap(err, "could not parse clientPoolSize url query parameter")
return
}
pool, err = NewClientPool(serverURL, int(poolSize))
if err != nil {
err = errors.WithStack(err)
return
}
})
if err != nil {
return errors.WithStack(err)
}
attempts := 0
max := 5
for {
if attempts >= max {
logger.Debug(ctx, "rpc client call retrying failed", logger.F("attempts", attempts))
return errors.Wrapf(err, "rpc client call failed after %d attempts", max)
}
clientResource, err := pool.Acquire(ctx)
if err != nil {
return errors.WithStack(err)
}
client := clientResource.Value()
if err := fn(ctx, client); err != nil {
if errors.Is(err, rpc.ErrShutdown) {
clientResource.Destroy()
wait := time.Duration(8<<(attempts+1)) * time.Millisecond
logger.Warn(
ctx, "rpc client connection is shutdown, retrying",
logger.F("attempts", attempts),
logger.F("max", max),
logger.F("delay", wait),
)
timer := time.NewTimer(wait)
select {
case <-timer.C:
attempts++
continue
case <-ctx.Done():
if err := ctx.Err(); err != nil {
return errors.WithStack(err)
}
return nil
}
}
clientResource.Release()
return errors.WithStack(err)
}
clientResource.Release()
return nil
}
}
}

View File

@ -6,7 +6,6 @@ import (
"github.com/keegancsmith/rpc"
"github.com/pkg/errors"
"gitlab.com/wpetit/goweb/logger"
"forge.cadoles.com/arcad/edge/pkg/storage"
"forge.cadoles.com/arcad/edge/pkg/storage/driver/rpc/server/document"
@ -14,7 +13,7 @@ import (
)
type DocumentStore struct {
serverURL *url.URL
withClient WithClientFunc
}
// Delete implements storage.DocumentStore.
@ -96,7 +95,7 @@ func (s *DocumentStore) Upsert(ctx context.Context, collection string, doc stora
func (s *DocumentStore) call(ctx context.Context, serviceMethod string, args any, reply any) error {
err := s.withClient(ctx, func(ctx context.Context, client *rpc.Client) error {
if err := client.Call(ctx, serviceMethod, args, reply); err != nil {
return errors.WithStack(err)
return errors.WithStack(remapDocumentError(err))
}
return nil
@ -108,27 +107,12 @@ func (s *DocumentStore) call(ctx context.Context, serviceMethod string, args any
return nil
}
func (s *DocumentStore) withClient(ctx context.Context, fn func(ctx context.Context, client *rpc.Client) error) error {
client, err := rpc.DialHTTPPath("tcp", s.serverURL.Host, s.serverURL.Path+"?"+s.serverURL.RawQuery)
if err != nil {
return errors.WithStack(err)
func NewDocumentStore(serverURL *url.URL) *DocumentStore {
withClient := WithPooledClient(serverURL)
return &DocumentStore{
withClient: withClient,
}
defer func() {
if err := client.Close(); err != nil {
logger.Error(ctx, "could not close rpc client", logger.CapturedE(errors.WithStack(err)))
}
}()
if err := fn(ctx, client); err != nil {
return errors.WithStack(err)
}
return nil
}
func NewDocumentStore(url *url.URL) *DocumentStore {
return &DocumentStore{url}
}
var _ storage.DocumentStore = &DocumentStore{}

View File

@ -1,10 +1,31 @@
package client
import (
"forge.cadoles.com/arcad/edge/pkg/storage"
"forge.cadoles.com/arcad/edge/pkg/storage/share"
"github.com/pkg/errors"
)
func remapBlobError(err error) error {
switch errors.Cause(err).Error() {
case storage.ErrBlobNotFound.Error():
return storage.ErrBlobNotFound
case storage.ErrBucketClosed.Error():
return storage.ErrBucketClosed
default:
return err
}
}
func remapDocumentError(err error) error {
switch errors.Cause(err).Error() {
case storage.ErrDocumentNotFound.Error():
return storage.ErrDocumentNotFound
default:
return err
}
}
func remapShareError(err error) error {
switch errors.Cause(err).Error() {
case share.ErrAttributeRequired.Error():

Some files were not shown because too many files have changed in this diff Show More