fix: prevent bus congestion by flushing out messages
arcad/edge/pipeline/head This commit looks good Details

This commit is contained in:
wpetit 2023-04-26 15:53:23 +02:00
parent ba9ae6e391
commit 17808d14c9
3 changed files with 138 additions and 69 deletions

View File

@ -22,13 +22,13 @@ func (b *Bus) Subscribe(ctx context.Context, ns bus.MessageNamespace) (<-chan bu
)
dispatchers := b.getDispatchers(ns)
d := newEventDispatcher(b.opt.BufferSize)
disp := newEventDispatcher(b.opt.BufferSize)
go d.Run()
go disp.Run(ctx)
dispatchers.Add(d)
dispatchers.Add(disp)
return d.Out(), nil
return disp.Out(), nil
}
func (b *Bus) Unsubscribe(ctx context.Context, ns bus.MessageNamespace, ch <-chan bus.Message) {
@ -52,6 +52,12 @@ func (b *Bus) Publish(ctx context.Context, msg bus.Message) error {
)
for _, d := range dispatchersList {
if d.Closed() {
dispatchers.Remove(d)
continue
}
if err := d.In(msg); err != nil {
return errors.WithStack(err)
}

View File

@ -1,9 +1,13 @@
package memory
import (
"context"
"sync"
"time"
"forge.cadoles.com/arcad/edge/pkg/bus"
"github.com/pkg/errors"
"gitlab.com/wpetit/goweb/logger"
)
type eventDispatcherSet struct {
@ -18,13 +22,21 @@ func (s *eventDispatcherSet) Add(d *eventDispatcher) {
s.items[d] = struct{}{}
}
func (s *eventDispatcherSet) Remove(d *eventDispatcher) {
s.mutex.Lock()
defer s.mutex.Unlock()
d.close()
delete(s.items, d)
}
func (s *eventDispatcherSet) RemoveByOutChannel(out <-chan bus.Message) {
s.mutex.Lock()
defer s.mutex.Unlock()
for d := range s.items {
if d.IsOut(out) {
d.Close()
d.close()
delete(s.items, d)
}
}
@ -56,10 +68,21 @@ type eventDispatcher struct {
closed bool
}
func (d *eventDispatcher) Closed() bool {
d.mutex.RLock()
defer d.mutex.RUnlock()
return d.closed
}
func (d *eventDispatcher) Close() {
d.mutex.Lock()
defer d.mutex.Unlock()
d.close()
}
func (d *eventDispatcher) close() {
d.closed = true
close(d.in)
}
@ -85,16 +108,52 @@ func (d *eventDispatcher) IsOut(out <-chan bus.Message) bool {
return d.out == out
}
func (d *eventDispatcher) Run() {
func (d *eventDispatcher) Run(ctx context.Context) {
defer func() {
for {
logger.Debug(ctx, "closing dispatcher, flushing out incoming messages")
close(d.out)
// Flush all incoming messages
for {
_, ok := <-d.in
if !ok {
return
}
}
}
}()
for {
msg, ok := <-d.in
if !ok {
close(d.out)
return
}
d.out <- msg
timeout := time.After(time.Second)
select {
case d.out <- msg:
case <-timeout:
logger.Error(
ctx,
"out message channel timeout",
logger.F("message", msg),
)
return
case <-ctx.Done():
logger.Error(
ctx,
"message subscription context canceled",
logger.F("message", msg),
logger.E(errors.WithStack(ctx.Err())),
)
return
}
}
}

View File

@ -123,16 +123,21 @@ func (m *RPCModule) handleMessages() {
}
for msg := range clientMessages {
go m.handleMessage(ctx, msg, sendRes)
}
}
func (m *RPCModule) handleMessage(ctx context.Context, msg bus.Message, sendRes func(ctx context.Context, req *RPCRequest, result goja.Value)) {
clientMessage, ok := msg.(*ClientMessage)
if !ok {
logger.Warn(ctx, "unexpected bus message", logger.F("message", msg))
continue
return
}
ok, req := m.isRPCRequest(clientMessage)
if !ok {
continue
return
}
logger.Debug(ctx, "received rpc request", logger.F("request", req))
@ -149,7 +154,7 @@ func (m *RPCModule) handleMessages() {
)
}
continue
return
}
callable, ok := rawCallable.(goja.Callable)
@ -164,7 +169,7 @@ func (m *RPCModule) handleMessages() {
)
}
continue
return
}
result, err := m.server.Exec(clientMessage.Context, callable, clientMessage.Context, req.Params)
@ -184,7 +189,7 @@ func (m *RPCModule) handleMessages() {
)
}
continue
return
}
promise, ok := app.IsPromise(result)
@ -196,7 +201,6 @@ func (m *RPCModule) handleMessages() {
} else {
sendRes(clientMessage.Context, req, result)
}
}
}
func (m *RPCModule) sendErrorResponse(ctx context.Context, req *RPCRequest, err error) error {