Fix issue with make install
This commit is contained in:
parent
ef50c1957b
commit
1370d24985
|
@ -35,3 +35,4 @@ suppressions
|
||||||
release
|
release
|
||||||
.gofuzz
|
.gofuzz
|
||||||
*-fuzz.zip
|
*-fuzz.zip
|
||||||
|
|
||||||
|
|
5
Makefile
5
Makefile
|
@ -77,11 +77,10 @@ clean:
|
||||||
run: clean
|
run: clean
|
||||||
@go run $(BUILD_FLAGS) main.go $(ARGS)
|
@go run $(BUILD_FLAGS) main.go $(ARGS)
|
||||||
|
|
||||||
install:
|
install: build
|
||||||
@echo $(GOPATH)
|
@mv $(BINARY) $(GOPATH)/bin/$(BINARY)
|
||||||
@echo "Commit Hash: `git rev-parse HEAD`"
|
@echo "Commit Hash: `git rev-parse HEAD`"
|
||||||
@echo "Old Hash: `shasum $(GOPATH)/bin/$(BINARY) 2>/dev/null | cut -c -32`"
|
@echo "Old Hash: `shasum $(GOPATH)/bin/$(BINARY) 2>/dev/null | cut -c -32`"
|
||||||
@go install $(BUILD_FLAGS) cmd
|
|
||||||
@echo "New Hash:" `shasum $(GOPATH)/bin/$(BINARY) 2>/dev/null | cut -c -32`
|
@echo "New Hash:" `shasum $(GOPATH)/bin/$(BINARY) 2>/dev/null | cut -c -32`
|
||||||
|
|
||||||
uninstall: clean
|
uninstall: clean
|
||||||
|
|
|
@ -10,20 +10,9 @@ import (
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
"github.com/cespare/xxhash/v2"
|
|
||||||
"github.com/dosco/super-graph/jsn"
|
"github.com/dosco/super-graph/jsn"
|
||||||
)
|
)
|
||||||
|
|
||||||
// nolint: errcheck
|
|
||||||
func mkkey(h *xxhash.Digest, k1 string, k2 string) uint64 {
|
|
||||||
h.WriteString(k1)
|
|
||||||
h.WriteString(k2)
|
|
||||||
v := h.Sum64()
|
|
||||||
h.Reset()
|
|
||||||
|
|
||||||
return v
|
|
||||||
}
|
|
||||||
|
|
||||||
// nolint: errcheck
|
// nolint: errcheck
|
||||||
func gqlHash(b string, vars []byte, role string) string {
|
func gqlHash(b string, vars []byte, role string) string {
|
||||||
b = strings.TrimSpace(b)
|
b = strings.TrimSpace(b)
|
||||||
|
|
|
@ -87,6 +87,7 @@ type SuperGraph struct {
|
||||||
prepared map[string]*preparedItem
|
prepared map[string]*preparedItem
|
||||||
roles map[string]*Role
|
roles map[string]*Role
|
||||||
getRole *sql.Stmt
|
getRole *sql.Stmt
|
||||||
|
rmap map[uint64]*resolvFn
|
||||||
abacEnabled bool
|
abacEnabled bool
|
||||||
anonExists bool
|
anonExists bool
|
||||||
qc *qcode.Compiler
|
qc *qcode.Compiler
|
||||||
|
@ -118,6 +119,10 @@ func NewSuperGraph(conf *Config, db *sql.DB) (*SuperGraph, error) {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if err := sg.initResolvers(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
if len(conf.SecretKey) != 0 {
|
if len(conf.SecretKey) != 0 {
|
||||||
sk := sha256.Sum256([]byte(conf.SecretKey))
|
sk := sha256.Sum256([]byte(conf.SecretKey))
|
||||||
conf.SecretKey = ""
|
conf.SecretKey = ""
|
||||||
|
|
13
core/core.go
13
core/core.go
|
@ -89,25 +89,28 @@ func (sg *SuperGraph) initCompilers() error {
|
||||||
|
|
||||||
func (c *scontext) execQuery() ([]byte, error) {
|
func (c *scontext) execQuery() ([]byte, error) {
|
||||||
var data []byte
|
var data []byte
|
||||||
// var st *stmt
|
var st *stmt
|
||||||
var err error
|
var err error
|
||||||
|
|
||||||
if c.sg.conf.UseAllowList {
|
if c.sg.conf.UseAllowList {
|
||||||
data, _, err = c.resolvePreparedSQL()
|
data, st, err = c.resolvePreparedSQL()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
data, _, err = c.resolveSQL()
|
data, st, err = c.resolveSQL()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return data, nil
|
if len(data) == 0 || st.skipped == 0 {
|
||||||
|
return data, nil
|
||||||
|
}
|
||||||
|
|
||||||
//return execRemoteJoin(st, data, c.req.hdr)
|
// return c.sg.execRemoteJoin(st, data, c.req.hdr)
|
||||||
|
return c.sg.execRemoteJoin(st, data, nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *scontext) resolvePreparedSQL() ([]byte, *stmt, error) {
|
func (c *scontext) resolvePreparedSQL() ([]byte, *stmt, error) {
|
||||||
|
|
382
core/remote.go
382
core/remote.go
|
@ -1,253 +1,249 @@
|
||||||
package core
|
package core
|
||||||
|
|
||||||
// import (
|
import (
|
||||||
// "bytes"
|
"bytes"
|
||||||
// "errors"
|
"errors"
|
||||||
// "fmt"
|
"fmt"
|
||||||
// "net/http"
|
"net/http"
|
||||||
// "sync"
|
"sync"
|
||||||
|
|
||||||
// "github.com/cespare/xxhash/v2"
|
"github.com/cespare/xxhash/v2"
|
||||||
// "github.com/dosco/super-graph/jsn"
|
"github.com/dosco/super-graph/core/internal/qcode"
|
||||||
// "github.com/dosco/super-graph/core/internal/qcode"
|
"github.com/dosco/super-graph/jsn"
|
||||||
// )
|
)
|
||||||
|
|
||||||
// func execRemoteJoin(st *stmt, data []byte, hdr http.Header) ([]byte, error) {
|
func (sg *SuperGraph) execRemoteJoin(st *stmt, data []byte, hdr http.Header) ([]byte, error) {
|
||||||
// var err error
|
var err error
|
||||||
|
|
||||||
// if len(data) == 0 || st.skipped == 0 {
|
sel := st.qc.Selects
|
||||||
// return data, nil
|
h := xxhash.New()
|
||||||
// }
|
|
||||||
|
|
||||||
// sel := st.qc.Selects
|
// fetch the field name used within the db response json
|
||||||
// h := xxhash.New()
|
// that are used to mark insertion points and the mapping between
|
||||||
|
// those field names and their select objects
|
||||||
|
fids, sfmap := sg.parentFieldIds(h, sel, st.skipped)
|
||||||
|
|
||||||
// // fetch the field name used within the db response json
|
// fetch the field values of the marked insertion points
|
||||||
// // that are used to mark insertion points and the mapping between
|
// these values contain the id to be used with fetching remote data
|
||||||
// // those field names and their select objects
|
from := jsn.Get(data, fids)
|
||||||
// fids, sfmap := parentFieldIds(h, sel, st.skipped)
|
var to []jsn.Field
|
||||||
|
|
||||||
// // fetch the field values of the marked insertion points
|
switch {
|
||||||
// // these values contain the id to be used with fetching remote data
|
case len(from) == 1:
|
||||||
// from := jsn.Get(data, fids)
|
to, err = sg.resolveRemote(hdr, h, from[0], sel, sfmap)
|
||||||
// var to []jsn.Field
|
|
||||||
|
|
||||||
// switch {
|
case len(from) > 1:
|
||||||
// case len(from) == 1:
|
to, err = sg.resolveRemotes(hdr, h, from, sel, sfmap)
|
||||||
// to, err = resolveRemote(hdr, h, from[0], sel, sfmap)
|
|
||||||
|
|
||||||
// case len(from) > 1:
|
default:
|
||||||
// to, err = resolveRemotes(hdr, h, from, sel, sfmap)
|
return nil, errors.New("something wrong no remote ids found in db response")
|
||||||
|
}
|
||||||
|
|
||||||
// default:
|
if err != nil {
|
||||||
// return nil, errors.New("something wrong no remote ids found in db response")
|
return nil, err
|
||||||
// }
|
}
|
||||||
|
|
||||||
// if err != nil {
|
var ob bytes.Buffer
|
||||||
// return nil, err
|
|
||||||
// }
|
|
||||||
|
|
||||||
// var ob bytes.Buffer
|
err = jsn.Replace(&ob, data, from, to)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
// err = jsn.Replace(&ob, data, from, to)
|
return ob.Bytes(), nil
|
||||||
// if err != nil {
|
}
|
||||||
// return nil, err
|
|
||||||
// }
|
|
||||||
|
|
||||||
// return ob.Bytes(), nil
|
func (sg *SuperGraph) resolveRemote(
|
||||||
// }
|
hdr http.Header,
|
||||||
|
h *xxhash.Digest,
|
||||||
|
field jsn.Field,
|
||||||
|
sel []qcode.Select,
|
||||||
|
sfmap map[uint64]*qcode.Select) ([]jsn.Field, error) {
|
||||||
|
|
||||||
// func resolveRemote(
|
// replacement data for the marked insertion points
|
||||||
// hdr http.Header,
|
// key and value will be replaced by whats below
|
||||||
// h *xxhash.Digest,
|
toA := [1]jsn.Field{}
|
||||||
// field jsn.Field,
|
to := toA[:1]
|
||||||
// sel []qcode.Select,
|
|
||||||
// sfmap map[uint64]*qcode.Select) ([]jsn.Field, error) {
|
|
||||||
|
|
||||||
// // replacement data for the marked insertion points
|
// use the json key to find the related Select object
|
||||||
// // key and value will be replaced by whats below
|
k1 := xxhash.Sum64(field.Key)
|
||||||
// toA := [1]jsn.Field{}
|
|
||||||
// to := toA[:1]
|
|
||||||
|
|
||||||
// // use the json key to find the related Select object
|
s, ok := sfmap[k1]
|
||||||
// k1 := xxhash.Sum64(field.Key)
|
if !ok {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
p := sel[s.ParentID]
|
||||||
|
|
||||||
// s, ok := sfmap[k1]
|
// then use the Table nme in the Select and it's parent
|
||||||
// if !ok {
|
// to find the resolver to use for this relationship
|
||||||
// return nil, nil
|
k2 := mkkey(h, s.Name, p.Name)
|
||||||
// }
|
|
||||||
// p := sel[s.ParentID]
|
|
||||||
|
|
||||||
// // then use the Table nme in the Select and it's parent
|
r, ok := sg.rmap[k2]
|
||||||
// // to find the resolver to use for this relationship
|
if !ok {
|
||||||
// k2 := mkkey(h, s.Name, p.Name)
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
// r, ok := rmap[k2]
|
id := jsn.Value(field.Value)
|
||||||
// if !ok {
|
if len(id) == 0 {
|
||||||
// return nil, nil
|
return nil, nil
|
||||||
// }
|
}
|
||||||
|
|
||||||
// id := jsn.Value(field.Value)
|
//st := time.Now()
|
||||||
// if len(id) == 0 {
|
|
||||||
// return nil, nil
|
|
||||||
// }
|
|
||||||
|
|
||||||
// //st := time.Now()
|
b, err := r.Fn(hdr, id)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
// b, err := r.Fn(hdr, id)
|
if len(r.Path) != 0 {
|
||||||
// if err != nil {
|
b = jsn.Strip(b, r.Path)
|
||||||
// return nil, err
|
}
|
||||||
// }
|
|
||||||
|
|
||||||
// if len(r.Path) != 0 {
|
var ob bytes.Buffer
|
||||||
// b = jsn.Strip(b, r.Path)
|
|
||||||
// }
|
|
||||||
|
|
||||||
// var ob bytes.Buffer
|
if len(s.Cols) != 0 {
|
||||||
|
err = jsn.Filter(&ob, b, colsToList(s.Cols))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
// if len(s.Cols) != 0 {
|
} else {
|
||||||
// err = jsn.Filter(&ob, b, colsToList(s.Cols))
|
ob.WriteString("null")
|
||||||
// if err != nil {
|
}
|
||||||
// return nil, err
|
|
||||||
// }
|
|
||||||
|
|
||||||
// } else {
|
to[0] = jsn.Field{Key: []byte(s.FieldName), Value: ob.Bytes()}
|
||||||
// ob.WriteString("null")
|
return to, nil
|
||||||
// }
|
}
|
||||||
|
|
||||||
// to[0] = jsn.Field{Key: []byte(s.FieldName), Value: ob.Bytes()}
|
func (sg *SuperGraph) resolveRemotes(
|
||||||
// return to, nil
|
hdr http.Header,
|
||||||
// }
|
h *xxhash.Digest,
|
||||||
|
from []jsn.Field,
|
||||||
|
sel []qcode.Select,
|
||||||
|
sfmap map[uint64]*qcode.Select) ([]jsn.Field, error) {
|
||||||
|
|
||||||
// func resolveRemotes(
|
// replacement data for the marked insertion points
|
||||||
// hdr http.Header,
|
// key and value will be replaced by whats below
|
||||||
// h *xxhash.Digest,
|
to := make([]jsn.Field, len(from))
|
||||||
// from []jsn.Field,
|
|
||||||
// sel []qcode.Select,
|
|
||||||
// sfmap map[uint64]*qcode.Select) ([]jsn.Field, error) {
|
|
||||||
|
|
||||||
// // replacement data for the marked insertion points
|
var wg sync.WaitGroup
|
||||||
// // key and value will be replaced by whats below
|
wg.Add(len(from))
|
||||||
// to := make([]jsn.Field, len(from))
|
|
||||||
|
|
||||||
// var wg sync.WaitGroup
|
var cerr error
|
||||||
// wg.Add(len(from))
|
|
||||||
|
|
||||||
// var cerr error
|
for i, id := range from {
|
||||||
|
|
||||||
// for i, id := range from {
|
// use the json key to find the related Select object
|
||||||
|
k1 := xxhash.Sum64(id.Key)
|
||||||
|
|
||||||
// // use the json key to find the related Select object
|
s, ok := sfmap[k1]
|
||||||
// k1 := xxhash.Sum64(id.Key)
|
if !ok {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
p := sel[s.ParentID]
|
||||||
|
|
||||||
// s, ok := sfmap[k1]
|
// then use the Table nme in the Select and it's parent
|
||||||
// if !ok {
|
// to find the resolver to use for this relationship
|
||||||
// return nil, nil
|
k2 := mkkey(h, s.Name, p.Name)
|
||||||
// }
|
|
||||||
// p := sel[s.ParentID]
|
|
||||||
|
|
||||||
// // then use the Table nme in the Select and it's parent
|
r, ok := sg.rmap[k2]
|
||||||
// // to find the resolver to use for this relationship
|
if !ok {
|
||||||
// k2 := mkkey(h, s.Name, p.Name)
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
// r, ok := rmap[k2]
|
id := jsn.Value(id.Value)
|
||||||
// if !ok {
|
if len(id) == 0 {
|
||||||
// return nil, nil
|
return nil, nil
|
||||||
// }
|
}
|
||||||
|
|
||||||
// id := jsn.Value(id.Value)
|
go func(n int, id []byte, s *qcode.Select) {
|
||||||
// if len(id) == 0 {
|
defer wg.Done()
|
||||||
// return nil, nil
|
|
||||||
// }
|
|
||||||
|
|
||||||
// go func(n int, id []byte, s *qcode.Select) {
|
//st := time.Now()
|
||||||
// defer wg.Done()
|
|
||||||
|
|
||||||
// //st := time.Now()
|
b, err := r.Fn(hdr, id)
|
||||||
|
if err != nil {
|
||||||
|
cerr = fmt.Errorf("%s: %s", s.Name, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
// b, err := r.Fn(hdr, id)
|
if len(r.Path) != 0 {
|
||||||
// if err != nil {
|
b = jsn.Strip(b, r.Path)
|
||||||
// cerr = fmt.Errorf("%s: %s", s.Name, err)
|
}
|
||||||
// return
|
|
||||||
// }
|
|
||||||
|
|
||||||
// if len(r.Path) != 0 {
|
var ob bytes.Buffer
|
||||||
// b = jsn.Strip(b, r.Path)
|
|
||||||
// }
|
|
||||||
|
|
||||||
// var ob bytes.Buffer
|
if len(s.Cols) != 0 {
|
||||||
|
err = jsn.Filter(&ob, b, colsToList(s.Cols))
|
||||||
|
if err != nil {
|
||||||
|
cerr = fmt.Errorf("%s: %s", s.Name, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
// if len(s.Cols) != 0 {
|
} else {
|
||||||
// err = jsn.Filter(&ob, b, colsToList(s.Cols))
|
ob.WriteString("null")
|
||||||
// if err != nil {
|
}
|
||||||
// cerr = fmt.Errorf("%s: %s", s.Name, err)
|
|
||||||
// return
|
|
||||||
// }
|
|
||||||
|
|
||||||
// } else {
|
to[n] = jsn.Field{Key: []byte(s.FieldName), Value: ob.Bytes()}
|
||||||
// ob.WriteString("null")
|
}(i, id, s)
|
||||||
// }
|
}
|
||||||
|
wg.Wait()
|
||||||
|
|
||||||
// to[n] = jsn.Field{Key: []byte(s.FieldName), Value: ob.Bytes()}
|
return to, cerr
|
||||||
// }(i, id, s)
|
}
|
||||||
// }
|
|
||||||
// wg.Wait()
|
|
||||||
|
|
||||||
// return to, cerr
|
func (sg *SuperGraph) parentFieldIds(h *xxhash.Digest, sel []qcode.Select, skipped uint32) (
|
||||||
// }
|
[][]byte,
|
||||||
|
map[uint64]*qcode.Select) {
|
||||||
|
|
||||||
// func parentFieldIds(h *xxhash.Digest, sel []qcode.Select, skipped uint32) (
|
c := 0
|
||||||
// [][]byte,
|
for i := range sel {
|
||||||
// map[uint64]*qcode.Select) {
|
s := &sel[i]
|
||||||
|
if isSkipped(skipped, uint32(s.ID)) {
|
||||||
|
c++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// c := 0
|
// list of keys (and it's related value) to extract from
|
||||||
// for i := range sel {
|
// the db json response
|
||||||
// s := &sel[i]
|
fm := make([][]byte, c)
|
||||||
// if isSkipped(skipped, uint32(s.ID)) {
|
|
||||||
// c++
|
|
||||||
// }
|
|
||||||
// }
|
|
||||||
|
|
||||||
// // list of keys (and it's related value) to extract from
|
// mapping between the above extracted key and a Select
|
||||||
// // the db json response
|
// object
|
||||||
// fm := make([][]byte, c)
|
sm := make(map[uint64]*qcode.Select, c)
|
||||||
|
n := 0
|
||||||
|
|
||||||
// // mapping between the above extracted key and a Select
|
for i := range sel {
|
||||||
// // object
|
s := &sel[i]
|
||||||
// sm := make(map[uint64]*qcode.Select, c)
|
|
||||||
// n := 0
|
|
||||||
|
|
||||||
// for i := range sel {
|
if !isSkipped(skipped, uint32(s.ID)) {
|
||||||
// s := &sel[i]
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
// if !isSkipped(skipped, uint32(s.ID)) {
|
p := sel[s.ParentID]
|
||||||
// continue
|
k := mkkey(h, s.Name, p.Name)
|
||||||
// }
|
|
||||||
|
|
||||||
// p := sel[s.ParentID]
|
if r, ok := sg.rmap[k]; ok {
|
||||||
// k := mkkey(h, s.Name, p.Name)
|
fm[n] = r.IDField
|
||||||
|
n++
|
||||||
|
|
||||||
// if r, ok := rmap[k]; ok {
|
k := xxhash.Sum64(r.IDField)
|
||||||
// fm[n] = r.IDField
|
sm[k] = s
|
||||||
// n++
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// k := xxhash.Sum64(r.IDField)
|
return fm, sm
|
||||||
// sm[k] = s
|
}
|
||||||
// }
|
|
||||||
// }
|
|
||||||
|
|
||||||
// return fm, sm
|
func isSkipped(n uint32, pos uint32) bool {
|
||||||
// }
|
return ((n & (1 << pos)) != 0)
|
||||||
|
}
|
||||||
|
|
||||||
// func isSkipped(n uint32, pos uint32) bool {
|
func colsToList(cols []qcode.Column) []string {
|
||||||
// return ((n & (1 << pos)) != 0)
|
var f []string
|
||||||
// }
|
|
||||||
|
|
||||||
// func colsToList(cols []qcode.Column) []string {
|
for i := range cols {
|
||||||
// var f []string
|
f = append(f, cols[i].Name)
|
||||||
|
}
|
||||||
// for i := range cols {
|
return f
|
||||||
// f = append(f, cols[i].Name)
|
}
|
||||||
// }
|
|
||||||
// return f
|
|
||||||
// }
|
|
||||||
|
|
127
core/resolve.go
127
core/resolve.go
|
@ -6,90 +6,90 @@ import (
|
||||||
"net/http"
|
"net/http"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
|
"github.com/cespare/xxhash/v2"
|
||||||
|
"github.com/dosco/super-graph/core/internal/psql"
|
||||||
"github.com/dosco/super-graph/jsn"
|
"github.com/dosco/super-graph/jsn"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
|
||||||
rmap map[uint64]*resolvFn
|
|
||||||
)
|
|
||||||
|
|
||||||
type resolvFn struct {
|
type resolvFn struct {
|
||||||
IDField []byte
|
IDField []byte
|
||||||
Path [][]byte
|
Path [][]byte
|
||||||
Fn func(h http.Header, id []byte) ([]byte, error)
|
Fn func(h http.Header, id []byte) ([]byte, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
// func initResolvers() {
|
func (sg *SuperGraph) initResolvers() error {
|
||||||
// var err error
|
var err error
|
||||||
// rmap = make(map[uint64]*resolvFn)
|
sg.rmap = make(map[uint64]*resolvFn)
|
||||||
|
|
||||||
// for _, t := range conf.Tables {
|
for _, t := range sg.conf.Tables {
|
||||||
// err = initRemotes(t)
|
err = sg.initRemotes(t)
|
||||||
// if err != nil {
|
if err != nil {
|
||||||
// break
|
break
|
||||||
// }
|
}
|
||||||
// }
|
}
|
||||||
|
|
||||||
// if err != nil {
|
if err != nil {
|
||||||
// errlog.Fatal().Err(err).Msg("failed to initialize resolvers")
|
return fmt.Errorf("failed to initialize resolvers: %v", err)
|
||||||
// }
|
}
|
||||||
// }
|
|
||||||
|
|
||||||
// func initRemotes(t Table) error {
|
return nil
|
||||||
// h := xxhash.New()
|
}
|
||||||
|
|
||||||
// for _, r := range t.Remotes {
|
func (sg *SuperGraph) initRemotes(t Table) error {
|
||||||
// // defines the table column to be used as an id in the
|
h := xxhash.New()
|
||||||
// // remote request
|
|
||||||
// idcol := r.ID
|
|
||||||
|
|
||||||
// // if no table column specified in the config then
|
for _, r := range t.Remotes {
|
||||||
// // use the primary key of the table as the id
|
// defines the table column to be used as an id in the
|
||||||
// if len(idcol) == 0 {
|
// remote request
|
||||||
// pcol, err := pcompile.IDColumn(t.Name)
|
idcol := r.ID
|
||||||
// if err != nil {
|
|
||||||
// return err
|
|
||||||
// }
|
|
||||||
// idcol = pcol.Key
|
|
||||||
// }
|
|
||||||
// idk := fmt.Sprintf("__%s_%s", t.Name, idcol)
|
|
||||||
|
|
||||||
// // register a relationship between the remote data
|
// if no table column specified in the config then
|
||||||
// // and the database table
|
// use the primary key of the table as the id
|
||||||
|
if len(idcol) == 0 {
|
||||||
|
pcol, err := sg.pc.IDColumn(t.Name)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
idcol = pcol.Key
|
||||||
|
}
|
||||||
|
idk := fmt.Sprintf("__%s_%s", t.Name, idcol)
|
||||||
|
|
||||||
// val := &psql.DBRel{Type: psql.RelRemote}
|
// register a relationship between the remote data
|
||||||
// val.Left.Col = idcol
|
// and the database table
|
||||||
// val.Right.Col = idk
|
|
||||||
|
|
||||||
// err := pcompile.AddRelationship(strings.ToLower(r.Name), t.Name, val)
|
val := &psql.DBRel{Type: psql.RelRemote}
|
||||||
// if err != nil {
|
val.Left.Col = idcol
|
||||||
// return err
|
val.Right.Col = idk
|
||||||
// }
|
|
||||||
|
|
||||||
// // the function thats called to resolve this remote
|
err := sg.pc.AddRelationship(sanitize(r.Name), t.Name, val)
|
||||||
// // data request
|
if err != nil {
|
||||||
// fn := buildFn(r)
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
// path := [][]byte{}
|
// the function thats called to resolve this remote
|
||||||
// for _, p := range strings.Split(r.Path, ".") {
|
// data request
|
||||||
// path = append(path, []byte(p))
|
fn := buildFn(r)
|
||||||
// }
|
|
||||||
|
|
||||||
// rf := &resolvFn{
|
path := [][]byte{}
|
||||||
// IDField: []byte(idk),
|
for _, p := range strings.Split(r.Path, ".") {
|
||||||
// Path: path,
|
path = append(path, []byte(p))
|
||||||
// Fn: fn,
|
}
|
||||||
// }
|
|
||||||
|
|
||||||
// // index resolver obj by parent and child names
|
rf := &resolvFn{
|
||||||
// rmap[mkkey(h, r.Name, t.Name)] = rf
|
IDField: []byte(idk),
|
||||||
|
Path: path,
|
||||||
|
Fn: fn,
|
||||||
|
}
|
||||||
|
|
||||||
// // index resolver obj by IDField
|
// index resolver obj by parent and child names
|
||||||
// rmap[xxhash.Sum64(rf.IDField)] = rf
|
sg.rmap[mkkey(h, r.Name, t.Name)] = rf
|
||||||
// }
|
|
||||||
|
|
||||||
// return nil
|
// index resolver obj by IDField
|
||||||
// }
|
sg.rmap[xxhash.Sum64(rf.IDField)] = rf
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
func buildFn(r Remote) func(http.Header, []byte) ([]byte, error) {
|
func buildFn(r Remote) func(http.Header, []byte) ([]byte, error) {
|
||||||
reqURL := strings.Replace(r.URL, "$id", "%s", 1)
|
reqURL := strings.Replace(r.URL, "$id", "%s", 1)
|
||||||
|
@ -114,12 +114,9 @@ func buildFn(r Remote) func(http.Header, []byte) ([]byte, error) {
|
||||||
req.Header.Set(v, hdr.Get(v))
|
req.Header.Set(v, hdr.Get(v))
|
||||||
}
|
}
|
||||||
|
|
||||||
// logger.Debug().Str("uri", uri).Msg("Remote Join")
|
|
||||||
|
|
||||||
res, err := client.Do(req)
|
res, err := client.Do(req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// errlog.Error().Err(err).Msgf("Failed to connect to: %s", uri)
|
return nil, fmt.Errorf("failed to connect to '%s': %v", uri, err)
|
||||||
return nil, err
|
|
||||||
}
|
}
|
||||||
defer res.Body.Close()
|
defer res.Body.Close()
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,15 @@
|
||||||
|
package core
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/cespare/xxhash/v2"
|
||||||
|
)
|
||||||
|
|
||||||
|
// nolint: errcheck
|
||||||
|
func mkkey(h *xxhash.Digest, k1 string, k2 string) uint64 {
|
||||||
|
h.WriteString(k1)
|
||||||
|
h.WriteString(k2)
|
||||||
|
v := h.Sum64()
|
||||||
|
h.Reset()
|
||||||
|
|
||||||
|
return v
|
||||||
|
}
|
1
go.mod
1
go.mod
|
@ -6,6 +6,7 @@ require (
|
||||||
github.com/adjust/gorails v0.0.0-20171013043634-2786ed0c03d3
|
github.com/adjust/gorails v0.0.0-20171013043634-2786ed0c03d3
|
||||||
github.com/bradfitz/gomemcache v0.0.0-20190913173617-a41fca850d0b
|
github.com/bradfitz/gomemcache v0.0.0-20190913173617-a41fca850d0b
|
||||||
github.com/brianvoe/gofakeit v3.18.0+incompatible
|
github.com/brianvoe/gofakeit v3.18.0+incompatible
|
||||||
|
github.com/cespare/xxhash v1.1.0
|
||||||
github.com/cespare/xxhash/v2 v2.1.0
|
github.com/cespare/xxhash/v2 v2.1.0
|
||||||
github.com/daaku/go.zipexe v1.0.1 // indirect
|
github.com/daaku/go.zipexe v1.0.1 // indirect
|
||||||
github.com/dgrijalva/jwt-go v3.2.0+incompatible
|
github.com/dgrijalva/jwt-go v3.2.0+incompatible
|
||||||
|
|
Loading…
Reference in New Issue