Compare commits
4 Commits
Author | SHA1 | Date | |
---|---|---|---|
09d6460a13 | |||
40c99e9ef3 | |||
75ff5510d4 | |||
1370d24985 |
1
.gitignore
vendored
1
.gitignore
vendored
@ -35,3 +35,4 @@ suppressions
|
||||
release
|
||||
.gofuzz
|
||||
*-fuzz.zip
|
||||
|
||||
|
@ -7,7 +7,7 @@ rules:
|
||||
- name: run
|
||||
match: \.go$
|
||||
ignore: web|examples|docs|_test\.go$
|
||||
command: go run cmd/main.go serv
|
||||
command: go run main.go serv
|
||||
- name: test
|
||||
match: _test\.go$
|
||||
command: go test -cover {PKG}
|
@ -1,7 +1,7 @@
|
||||
# stage: 1
|
||||
FROM node:10 as react-build
|
||||
WORKDIR /web
|
||||
COPY /cmd/internal/serv/web/ ./
|
||||
COPY /internal/serv/web/ ./
|
||||
RUN yarn
|
||||
RUN yarn build
|
||||
|
||||
@ -24,8 +24,8 @@ RUN chmod 755 /usr/local/bin/sops
|
||||
WORKDIR /app
|
||||
COPY . /app
|
||||
|
||||
RUN mkdir -p /app/cmd/internal/serv/web/build
|
||||
COPY --from=react-build /web/build/ ./cmd/internal/serv/web/build
|
||||
RUN mkdir -p /app/internal/serv/web/build
|
||||
COPY --from=react-build /web/build/ ./internal/serv/web/build
|
||||
|
||||
RUN go mod vendor
|
||||
RUN make build
|
||||
@ -45,7 +45,7 @@ RUN mkdir -p /config
|
||||
COPY --from=go-build /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/
|
||||
COPY --from=go-build /app/config/* /config/
|
||||
COPY --from=go-build /app/super-graph .
|
||||
COPY --from=go-build /app/cmd/scripts/start.sh .
|
||||
COPY --from=go-build /app/internal/scripts/start.sh .
|
||||
COPY --from=go-build /usr/local/bin/sops .
|
||||
|
||||
RUN chmod +x /super-graph
|
||||
|
23
Makefile
23
Makefile
@ -12,10 +12,10 @@ endif
|
||||
export GO111MODULE := on
|
||||
|
||||
# Build-time Go variables
|
||||
version = github.com/dosco/super-graph/serv.version
|
||||
gitBranch = github.com/dosco/super-graph/serv.gitBranch
|
||||
lastCommitSHA = github.com/dosco/super-graph/serv.lastCommitSHA
|
||||
lastCommitTime = github.com/dosco/super-graph/serv.lastCommitTime
|
||||
version = github.com/dosco/super-graph/internal/serv.version
|
||||
gitBranch = github.com/dosco/super-graph/internal/serv.gitBranch
|
||||
lastCommitSHA = github.com/dosco/super-graph/internal/serv.lastCommitSHA
|
||||
lastCommitTime = github.com/dosco/super-graph/internal/serv.lastCommitTime
|
||||
|
||||
BUILD_FLAGS ?= -ldflags '-s -w -X ${lastCommitSHA}=${BUILD} -X "${lastCommitTime}=${BUILD_DATE}" -X "${version}=${BUILD_VERSION}" -X ${gitBranch}=${BUILD_BRANCH}'
|
||||
|
||||
@ -28,18 +28,18 @@ BIN_DIR := $(GOPATH)/bin
|
||||
GORICE := $(BIN_DIR)/rice
|
||||
GOLANGCILINT := $(BIN_DIR)/golangci-lint
|
||||
GITCHGLOG := $(BIN_DIR)/git-chglog
|
||||
WEB_BUILD_DIR := ./cmd/internal/serv/web/build/manifest.json
|
||||
WEB_BUILD_DIR := ./internal/serv/web/build/manifest.json
|
||||
|
||||
$(GORICE):
|
||||
@GO111MODULE=off go get -u github.com/GeertJohan/go.rice/rice
|
||||
|
||||
$(WEB_BUILD_DIR):
|
||||
@echo "First install Yarn and create a build of the web UI then re-run make install"
|
||||
@echo "Run this command: yarn --cwd cmd/internal/serv/web/ build"
|
||||
@echo "Run this command: yarn --cwd internal/serv/web/ build"
|
||||
@exit 1
|
||||
|
||||
$(GITCHGLOG):
|
||||
@GO111MODULE=off go get -u github.com/git-chglog/git-chglog/cmd/git-chglog
|
||||
@GO111MODULE=off go get -u github.com/git-chglog/git-chglog/git-chglog
|
||||
|
||||
changelog: $(GITCHGLOG)
|
||||
@git-chglog $(ARGS)
|
||||
@ -57,7 +57,7 @@ os = $(word 1, $@)
|
||||
|
||||
$(PLATFORMS): lint test
|
||||
@mkdir -p release
|
||||
@GOOS=$(os) GOARCH=amd64 go build $(BUILD_FLAGS) -o release/$(BINARY)-$(BUILD_VERSION)-$(os)-amd64 cmd/main.go
|
||||
@GOOS=$(os) GOARCH=amd64 go build $(BUILD_FLAGS) -o release/$(BINARY)-$(BUILD_VERSION)-$(os)-amd64 main.go
|
||||
|
||||
release: windows linux darwin
|
||||
|
||||
@ -69,7 +69,7 @@ gen: $(GORICE) $(WEB_BUILD_DIR)
|
||||
@go generate ./...
|
||||
|
||||
$(BINARY): clean
|
||||
@go build $(BUILD_FLAGS) -o $(BINARY) cmd/main.go
|
||||
@go build $(BUILD_FLAGS) -o $(BINARY) main.go
|
||||
|
||||
clean:
|
||||
@rm -f $(BINARY)
|
||||
@ -77,11 +77,10 @@ clean:
|
||||
run: clean
|
||||
@go run $(BUILD_FLAGS) main.go $(ARGS)
|
||||
|
||||
install:
|
||||
@echo $(GOPATH)
|
||||
install: clean build
|
||||
@echo "Commit Hash: `git rev-parse HEAD`"
|
||||
@echo "Old Hash: `shasum $(GOPATH)/bin/$(BINARY) 2>/dev/null | cut -c -32`"
|
||||
@go install $(BUILD_FLAGS) cmd
|
||||
@mv $(BINARY) $(GOPATH)/bin/$(BINARY)
|
||||
@echo "New Hash:" `shasum $(GOPATH)/bin/$(BINARY) 2>/dev/null | cut -c -32`
|
||||
|
||||
uninstall: clean
|
||||
|
@ -87,6 +87,7 @@ type SuperGraph struct {
|
||||
prepared map[string]*preparedItem
|
||||
roles map[string]*Role
|
||||
getRole *sql.Stmt
|
||||
rmap map[uint64]*resolvFn
|
||||
abacEnabled bool
|
||||
anonExists bool
|
||||
qc *qcode.Compiler
|
||||
@ -118,6 +119,10 @@ func NewSuperGraph(conf *Config, db *sql.DB) (*SuperGraph, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := sg.initResolvers(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(conf.SecretKey) != 0 {
|
||||
sk := sha256.Sum256([]byte(conf.SecretKey))
|
||||
conf.SecretKey = ""
|
||||
|
13
core/core.go
13
core/core.go
@ -89,25 +89,28 @@ func (sg *SuperGraph) initCompilers() error {
|
||||
|
||||
func (c *scontext) execQuery() ([]byte, error) {
|
||||
var data []byte
|
||||
// var st *stmt
|
||||
var st *stmt
|
||||
var err error
|
||||
|
||||
if c.sg.conf.UseAllowList {
|
||||
data, _, err = c.resolvePreparedSQL()
|
||||
data, st, err = c.resolvePreparedSQL()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
} else {
|
||||
data, _, err = c.resolveSQL()
|
||||
data, st, err = c.resolveSQL()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return data, nil
|
||||
if len(data) == 0 || st.skipped == 0 {
|
||||
return data, nil
|
||||
}
|
||||
|
||||
//return execRemoteJoin(st, data, c.req.hdr)
|
||||
// return c.sg.execRemoteJoin(st, data, c.req.hdr)
|
||||
return c.sg.execRemoteJoin(st, data, nil)
|
||||
}
|
||||
|
||||
func (c *scontext) resolvePreparedSQL() ([]byte, *stmt, error) {
|
||||
|
@ -1,12 +1,17 @@
|
||||
package qcode
|
||||
|
||||
func GetQType(gql string) QType {
|
||||
ic := false
|
||||
for i := range gql {
|
||||
b := gql[i]
|
||||
if b == '{' {
|
||||
switch {
|
||||
case b == '#':
|
||||
ic = true
|
||||
case b == '\n':
|
||||
ic = false
|
||||
case !ic && b == '{':
|
||||
return QTQuery
|
||||
}
|
||||
if al(b) {
|
||||
case !ic && al(b):
|
||||
switch b {
|
||||
case 'm', 'M':
|
||||
return QTMutation
|
||||
|
50
core/internal/qcode/utils_test.go
Normal file
50
core/internal/qcode/utils_test.go
Normal file
@ -0,0 +1,50 @@
|
||||
package qcode
|
||||
|
||||
import "testing"
|
||||
|
||||
func TestGetQType(t *testing.T) {
|
||||
type args struct {
|
||||
gql string
|
||||
}
|
||||
type ts struct {
|
||||
name string
|
||||
args args
|
||||
want QType
|
||||
}
|
||||
tests := []ts{
|
||||
ts{
|
||||
name: "query",
|
||||
args: args{gql: " query {"},
|
||||
want: QTQuery,
|
||||
},
|
||||
ts{
|
||||
name: "mutation",
|
||||
args: args{gql: " mutation {"},
|
||||
want: QTMutation,
|
||||
},
|
||||
ts{
|
||||
name: "default query",
|
||||
args: args{gql: " {"},
|
||||
want: QTQuery,
|
||||
},
|
||||
ts{
|
||||
name: "default query with comment",
|
||||
args: args{gql: `# query is good
|
||||
{`},
|
||||
want: QTQuery,
|
||||
},
|
||||
ts{
|
||||
name: "failed query with comment",
|
||||
args: args{gql: `# query is good query {`},
|
||||
want: -1,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if got := GetQType(tt.args.gql); got != tt.want {
|
||||
t.Errorf("GetQType() = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
382
core/remote.go
382
core/remote.go
@ -1,253 +1,249 @@
|
||||
package core
|
||||
|
||||
// import (
|
||||
// "bytes"
|
||||
// "errors"
|
||||
// "fmt"
|
||||
// "net/http"
|
||||
// "sync"
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"sync"
|
||||
|
||||
// "github.com/cespare/xxhash/v2"
|
||||
// "github.com/dosco/super-graph/jsn"
|
||||
// "github.com/dosco/super-graph/core/internal/qcode"
|
||||
// )
|
||||
"github.com/cespare/xxhash/v2"
|
||||
"github.com/dosco/super-graph/core/internal/qcode"
|
||||
"github.com/dosco/super-graph/jsn"
|
||||
)
|
||||
|
||||
// func execRemoteJoin(st *stmt, data []byte, hdr http.Header) ([]byte, error) {
|
||||
// var err error
|
||||
func (sg *SuperGraph) execRemoteJoin(st *stmt, data []byte, hdr http.Header) ([]byte, error) {
|
||||
var err error
|
||||
|
||||
// if len(data) == 0 || st.skipped == 0 {
|
||||
// return data, nil
|
||||
// }
|
||||
sel := st.qc.Selects
|
||||
h := xxhash.New()
|
||||
|
||||
// sel := st.qc.Selects
|
||||
// h := xxhash.New()
|
||||
// fetch the field name used within the db response json
|
||||
// that are used to mark insertion points and the mapping between
|
||||
// those field names and their select objects
|
||||
fids, sfmap := sg.parentFieldIds(h, sel, st.skipped)
|
||||
|
||||
// // fetch the field name used within the db response json
|
||||
// // that are used to mark insertion points and the mapping between
|
||||
// // those field names and their select objects
|
||||
// fids, sfmap := parentFieldIds(h, sel, st.skipped)
|
||||
// fetch the field values of the marked insertion points
|
||||
// these values contain the id to be used with fetching remote data
|
||||
from := jsn.Get(data, fids)
|
||||
var to []jsn.Field
|
||||
|
||||
// // fetch the field values of the marked insertion points
|
||||
// // these values contain the id to be used with fetching remote data
|
||||
// from := jsn.Get(data, fids)
|
||||
// var to []jsn.Field
|
||||
switch {
|
||||
case len(from) == 1:
|
||||
to, err = sg.resolveRemote(hdr, h, from[0], sel, sfmap)
|
||||
|
||||
// switch {
|
||||
// case len(from) == 1:
|
||||
// to, err = resolveRemote(hdr, h, from[0], sel, sfmap)
|
||||
case len(from) > 1:
|
||||
to, err = sg.resolveRemotes(hdr, h, from, sel, sfmap)
|
||||
|
||||
// case len(from) > 1:
|
||||
// to, err = resolveRemotes(hdr, h, from, sel, sfmap)
|
||||
default:
|
||||
return nil, errors.New("something wrong no remote ids found in db response")
|
||||
}
|
||||
|
||||
// default:
|
||||
// return nil, errors.New("something wrong no remote ids found in db response")
|
||||
// }
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// if err != nil {
|
||||
// return nil, err
|
||||
// }
|
||||
var ob bytes.Buffer
|
||||
|
||||
// var ob bytes.Buffer
|
||||
err = jsn.Replace(&ob, data, from, to)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// err = jsn.Replace(&ob, data, from, to)
|
||||
// if err != nil {
|
||||
// return nil, err
|
||||
// }
|
||||
return ob.Bytes(), nil
|
||||
}
|
||||
|
||||
// return ob.Bytes(), nil
|
||||
// }
|
||||
func (sg *SuperGraph) resolveRemote(
|
||||
hdr http.Header,
|
||||
h *xxhash.Digest,
|
||||
field jsn.Field,
|
||||
sel []qcode.Select,
|
||||
sfmap map[uint64]*qcode.Select) ([]jsn.Field, error) {
|
||||
|
||||
// func resolveRemote(
|
||||
// hdr http.Header,
|
||||
// h *xxhash.Digest,
|
||||
// field jsn.Field,
|
||||
// sel []qcode.Select,
|
||||
// sfmap map[uint64]*qcode.Select) ([]jsn.Field, error) {
|
||||
// replacement data for the marked insertion points
|
||||
// key and value will be replaced by whats below
|
||||
toA := [1]jsn.Field{}
|
||||
to := toA[:1]
|
||||
|
||||
// // replacement data for the marked insertion points
|
||||
// // key and value will be replaced by whats below
|
||||
// toA := [1]jsn.Field{}
|
||||
// to := toA[:1]
|
||||
// use the json key to find the related Select object
|
||||
k1 := xxhash.Sum64(field.Key)
|
||||
|
||||
// // use the json key to find the related Select object
|
||||
// k1 := xxhash.Sum64(field.Key)
|
||||
s, ok := sfmap[k1]
|
||||
if !ok {
|
||||
return nil, nil
|
||||
}
|
||||
p := sel[s.ParentID]
|
||||
|
||||
// s, ok := sfmap[k1]
|
||||
// if !ok {
|
||||
// return nil, nil
|
||||
// }
|
||||
// p := sel[s.ParentID]
|
||||
// then use the Table nme in the Select and it's parent
|
||||
// to find the resolver to use for this relationship
|
||||
k2 := mkkey(h, s.Name, p.Name)
|
||||
|
||||
// // then use the Table nme in the Select and it's parent
|
||||
// // to find the resolver to use for this relationship
|
||||
// k2 := mkkey(h, s.Name, p.Name)
|
||||
r, ok := sg.rmap[k2]
|
||||
if !ok {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// r, ok := rmap[k2]
|
||||
// if !ok {
|
||||
// return nil, nil
|
||||
// }
|
||||
id := jsn.Value(field.Value)
|
||||
if len(id) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// id := jsn.Value(field.Value)
|
||||
// if len(id) == 0 {
|
||||
// return nil, nil
|
||||
// }
|
||||
//st := time.Now()
|
||||
|
||||
// //st := time.Now()
|
||||
b, err := r.Fn(hdr, id)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// b, err := r.Fn(hdr, id)
|
||||
// if err != nil {
|
||||
// return nil, err
|
||||
// }
|
||||
if len(r.Path) != 0 {
|
||||
b = jsn.Strip(b, r.Path)
|
||||
}
|
||||
|
||||
// if len(r.Path) != 0 {
|
||||
// b = jsn.Strip(b, r.Path)
|
||||
// }
|
||||
var ob bytes.Buffer
|
||||
|
||||
// var ob bytes.Buffer
|
||||
if len(s.Cols) != 0 {
|
||||
err = jsn.Filter(&ob, b, colsToList(s.Cols))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// if len(s.Cols) != 0 {
|
||||
// err = jsn.Filter(&ob, b, colsToList(s.Cols))
|
||||
// if err != nil {
|
||||
// return nil, err
|
||||
// }
|
||||
} else {
|
||||
ob.WriteString("null")
|
||||
}
|
||||
|
||||
// } else {
|
||||
// ob.WriteString("null")
|
||||
// }
|
||||
to[0] = jsn.Field{Key: []byte(s.FieldName), Value: ob.Bytes()}
|
||||
return to, nil
|
||||
}
|
||||
|
||||
// to[0] = jsn.Field{Key: []byte(s.FieldName), Value: ob.Bytes()}
|
||||
// return to, nil
|
||||
// }
|
||||
func (sg *SuperGraph) resolveRemotes(
|
||||
hdr http.Header,
|
||||
h *xxhash.Digest,
|
||||
from []jsn.Field,
|
||||
sel []qcode.Select,
|
||||
sfmap map[uint64]*qcode.Select) ([]jsn.Field, error) {
|
||||
|
||||
// func resolveRemotes(
|
||||
// hdr http.Header,
|
||||
// h *xxhash.Digest,
|
||||
// from []jsn.Field,
|
||||
// sel []qcode.Select,
|
||||
// sfmap map[uint64]*qcode.Select) ([]jsn.Field, error) {
|
||||
// replacement data for the marked insertion points
|
||||
// key and value will be replaced by whats below
|
||||
to := make([]jsn.Field, len(from))
|
||||
|
||||
// // replacement data for the marked insertion points
|
||||
// // key and value will be replaced by whats below
|
||||
// to := make([]jsn.Field, len(from))
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(len(from))
|
||||
|
||||
// var wg sync.WaitGroup
|
||||
// wg.Add(len(from))
|
||||
var cerr error
|
||||
|
||||
// var cerr error
|
||||
for i, id := range from {
|
||||
|
||||
// for i, id := range from {
|
||||
// use the json key to find the related Select object
|
||||
k1 := xxhash.Sum64(id.Key)
|
||||
|
||||
// // use the json key to find the related Select object
|
||||
// k1 := xxhash.Sum64(id.Key)
|
||||
s, ok := sfmap[k1]
|
||||
if !ok {
|
||||
return nil, nil
|
||||
}
|
||||
p := sel[s.ParentID]
|
||||
|
||||
// s, ok := sfmap[k1]
|
||||
// if !ok {
|
||||
// return nil, nil
|
||||
// }
|
||||
// p := sel[s.ParentID]
|
||||
// then use the Table nme in the Select and it's parent
|
||||
// to find the resolver to use for this relationship
|
||||
k2 := mkkey(h, s.Name, p.Name)
|
||||
|
||||
// // then use the Table nme in the Select and it's parent
|
||||
// // to find the resolver to use for this relationship
|
||||
// k2 := mkkey(h, s.Name, p.Name)
|
||||
r, ok := sg.rmap[k2]
|
||||
if !ok {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// r, ok := rmap[k2]
|
||||
// if !ok {
|
||||
// return nil, nil
|
||||
// }
|
||||
id := jsn.Value(id.Value)
|
||||
if len(id) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// id := jsn.Value(id.Value)
|
||||
// if len(id) == 0 {
|
||||
// return nil, nil
|
||||
// }
|
||||
go func(n int, id []byte, s *qcode.Select) {
|
||||
defer wg.Done()
|
||||
|
||||
// go func(n int, id []byte, s *qcode.Select) {
|
||||
// defer wg.Done()
|
||||
//st := time.Now()
|
||||
|
||||
// //st := time.Now()
|
||||
b, err := r.Fn(hdr, id)
|
||||
if err != nil {
|
||||
cerr = fmt.Errorf("%s: %s", s.Name, err)
|
||||
return
|
||||
}
|
||||
|
||||
// b, err := r.Fn(hdr, id)
|
||||
// if err != nil {
|
||||
// cerr = fmt.Errorf("%s: %s", s.Name, err)
|
||||
// return
|
||||
// }
|
||||
if len(r.Path) != 0 {
|
||||
b = jsn.Strip(b, r.Path)
|
||||
}
|
||||
|
||||
// if len(r.Path) != 0 {
|
||||
// b = jsn.Strip(b, r.Path)
|
||||
// }
|
||||
var ob bytes.Buffer
|
||||
|
||||
// var ob bytes.Buffer
|
||||
if len(s.Cols) != 0 {
|
||||
err = jsn.Filter(&ob, b, colsToList(s.Cols))
|
||||
if err != nil {
|
||||
cerr = fmt.Errorf("%s: %s", s.Name, err)
|
||||
return
|
||||
}
|
||||
|
||||
// if len(s.Cols) != 0 {
|
||||
// err = jsn.Filter(&ob, b, colsToList(s.Cols))
|
||||
// if err != nil {
|
||||
// cerr = fmt.Errorf("%s: %s", s.Name, err)
|
||||
// return
|
||||
// }
|
||||
} else {
|
||||
ob.WriteString("null")
|
||||
}
|
||||
|
||||
// } else {
|
||||
// ob.WriteString("null")
|
||||
// }
|
||||
to[n] = jsn.Field{Key: []byte(s.FieldName), Value: ob.Bytes()}
|
||||
}(i, id, s)
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
// to[n] = jsn.Field{Key: []byte(s.FieldName), Value: ob.Bytes()}
|
||||
// }(i, id, s)
|
||||
// }
|
||||
// wg.Wait()
|
||||
return to, cerr
|
||||
}
|
||||
|
||||
// return to, cerr
|
||||
// }
|
||||
func (sg *SuperGraph) parentFieldIds(h *xxhash.Digest, sel []qcode.Select, skipped uint32) (
|
||||
[][]byte,
|
||||
map[uint64]*qcode.Select) {
|
||||
|
||||
// func parentFieldIds(h *xxhash.Digest, sel []qcode.Select, skipped uint32) (
|
||||
// [][]byte,
|
||||
// map[uint64]*qcode.Select) {
|
||||
c := 0
|
||||
for i := range sel {
|
||||
s := &sel[i]
|
||||
if isSkipped(skipped, uint32(s.ID)) {
|
||||
c++
|
||||
}
|
||||
}
|
||||
|
||||
// c := 0
|
||||
// for i := range sel {
|
||||
// s := &sel[i]
|
||||
// if isSkipped(skipped, uint32(s.ID)) {
|
||||
// c++
|
||||
// }
|
||||
// }
|
||||
// list of keys (and it's related value) to extract from
|
||||
// the db json response
|
||||
fm := make([][]byte, c)
|
||||
|
||||
// // list of keys (and it's related value) to extract from
|
||||
// // the db json response
|
||||
// fm := make([][]byte, c)
|
||||
// mapping between the above extracted key and a Select
|
||||
// object
|
||||
sm := make(map[uint64]*qcode.Select, c)
|
||||
n := 0
|
||||
|
||||
// // mapping between the above extracted key and a Select
|
||||
// // object
|
||||
// sm := make(map[uint64]*qcode.Select, c)
|
||||
// n := 0
|
||||
for i := range sel {
|
||||
s := &sel[i]
|
||||
|
||||
// for i := range sel {
|
||||
// s := &sel[i]
|
||||
if !isSkipped(skipped, uint32(s.ID)) {
|
||||
continue
|
||||
}
|
||||
|
||||
// if !isSkipped(skipped, uint32(s.ID)) {
|
||||
// continue
|
||||
// }
|
||||
p := sel[s.ParentID]
|
||||
k := mkkey(h, s.Name, p.Name)
|
||||
|
||||
// p := sel[s.ParentID]
|
||||
// k := mkkey(h, s.Name, p.Name)
|
||||
if r, ok := sg.rmap[k]; ok {
|
||||
fm[n] = r.IDField
|
||||
n++
|
||||
|
||||
// if r, ok := rmap[k]; ok {
|
||||
// fm[n] = r.IDField
|
||||
// n++
|
||||
k := xxhash.Sum64(r.IDField)
|
||||
sm[k] = s
|
||||
}
|
||||
}
|
||||
|
||||
// k := xxhash.Sum64(r.IDField)
|
||||
// sm[k] = s
|
||||
// }
|
||||
// }
|
||||
return fm, sm
|
||||
}
|
||||
|
||||
// return fm, sm
|
||||
// }
|
||||
func isSkipped(n uint32, pos uint32) bool {
|
||||
return ((n & (1 << pos)) != 0)
|
||||
}
|
||||
|
||||
// func isSkipped(n uint32, pos uint32) bool {
|
||||
// return ((n & (1 << pos)) != 0)
|
||||
// }
|
||||
func colsToList(cols []qcode.Column) []string {
|
||||
var f []string
|
||||
|
||||
// func colsToList(cols []qcode.Column) []string {
|
||||
// var f []string
|
||||
|
||||
// for i := range cols {
|
||||
// f = append(f, cols[i].Name)
|
||||
// }
|
||||
// return f
|
||||
// }
|
||||
for i := range cols {
|
||||
f = append(f, cols[i].Name)
|
||||
}
|
||||
return f
|
||||
}
|
||||
|
127
core/resolve.go
127
core/resolve.go
@ -6,90 +6,90 @@ import (
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
"github.com/cespare/xxhash/v2"
|
||||
"github.com/dosco/super-graph/core/internal/psql"
|
||||
"github.com/dosco/super-graph/jsn"
|
||||
)
|
||||
|
||||
var (
|
||||
rmap map[uint64]*resolvFn
|
||||
)
|
||||
|
||||
type resolvFn struct {
|
||||
IDField []byte
|
||||
Path [][]byte
|
||||
Fn func(h http.Header, id []byte) ([]byte, error)
|
||||
}
|
||||
|
||||
// func initResolvers() {
|
||||
// var err error
|
||||
// rmap = make(map[uint64]*resolvFn)
|
||||
func (sg *SuperGraph) initResolvers() error {
|
||||
var err error
|
||||
sg.rmap = make(map[uint64]*resolvFn)
|
||||
|
||||
// for _, t := range conf.Tables {
|
||||
// err = initRemotes(t)
|
||||
// if err != nil {
|
||||
// break
|
||||
// }
|
||||
// }
|
||||
for _, t := range sg.conf.Tables {
|
||||
err = sg.initRemotes(t)
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// if err != nil {
|
||||
// errlog.Fatal().Err(err).Msg("failed to initialize resolvers")
|
||||
// }
|
||||
// }
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to initialize resolvers: %v", err)
|
||||
}
|
||||
|
||||
// func initRemotes(t Table) error {
|
||||
// h := xxhash.New()
|
||||
return nil
|
||||
}
|
||||
|
||||
// for _, r := range t.Remotes {
|
||||
// // defines the table column to be used as an id in the
|
||||
// // remote request
|
||||
// idcol := r.ID
|
||||
func (sg *SuperGraph) initRemotes(t Table) error {
|
||||
h := xxhash.New()
|
||||
|
||||
// // if no table column specified in the config then
|
||||
// // use the primary key of the table as the id
|
||||
// if len(idcol) == 0 {
|
||||
// pcol, err := pcompile.IDColumn(t.Name)
|
||||
// if err != nil {
|
||||
// return err
|
||||
// }
|
||||
// idcol = pcol.Key
|
||||
// }
|
||||
// idk := fmt.Sprintf("__%s_%s", t.Name, idcol)
|
||||
for _, r := range t.Remotes {
|
||||
// defines the table column to be used as an id in the
|
||||
// remote request
|
||||
idcol := r.ID
|
||||
|
||||
// // register a relationship between the remote data
|
||||
// // and the database table
|
||||
// if no table column specified in the config then
|
||||
// use the primary key of the table as the id
|
||||
if len(idcol) == 0 {
|
||||
pcol, err := sg.pc.IDColumn(t.Name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
idcol = pcol.Key
|
||||
}
|
||||
idk := fmt.Sprintf("__%s_%s", t.Name, idcol)
|
||||
|
||||
// val := &psql.DBRel{Type: psql.RelRemote}
|
||||
// val.Left.Col = idcol
|
||||
// val.Right.Col = idk
|
||||
// register a relationship between the remote data
|
||||
// and the database table
|
||||
|
||||
// err := pcompile.AddRelationship(strings.ToLower(r.Name), t.Name, val)
|
||||
// if err != nil {
|
||||
// return err
|
||||
// }
|
||||
val := &psql.DBRel{Type: psql.RelRemote}
|
||||
val.Left.Col = idcol
|
||||
val.Right.Col = idk
|
||||
|
||||
// // the function thats called to resolve this remote
|
||||
// // data request
|
||||
// fn := buildFn(r)
|
||||
err := sg.pc.AddRelationship(sanitize(r.Name), t.Name, val)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// path := [][]byte{}
|
||||
// for _, p := range strings.Split(r.Path, ".") {
|
||||
// path = append(path, []byte(p))
|
||||
// }
|
||||
// the function thats called to resolve this remote
|
||||
// data request
|
||||
fn := buildFn(r)
|
||||
|
||||
// rf := &resolvFn{
|
||||
// IDField: []byte(idk),
|
||||
// Path: path,
|
||||
// Fn: fn,
|
||||
// }
|
||||
path := [][]byte{}
|
||||
for _, p := range strings.Split(r.Path, ".") {
|
||||
path = append(path, []byte(p))
|
||||
}
|
||||
|
||||
// // index resolver obj by parent and child names
|
||||
// rmap[mkkey(h, r.Name, t.Name)] = rf
|
||||
rf := &resolvFn{
|
||||
IDField: []byte(idk),
|
||||
Path: path,
|
||||
Fn: fn,
|
||||
}
|
||||
|
||||
// // index resolver obj by IDField
|
||||
// rmap[xxhash.Sum64(rf.IDField)] = rf
|
||||
// }
|
||||
// index resolver obj by parent and child names
|
||||
sg.rmap[mkkey(h, r.Name, t.Name)] = rf
|
||||
|
||||
// return nil
|
||||
// }
|
||||
// index resolver obj by IDField
|
||||
sg.rmap[xxhash.Sum64(rf.IDField)] = rf
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func buildFn(r Remote) func(http.Header, []byte) ([]byte, error) {
|
||||
reqURL := strings.Replace(r.URL, "$id", "%s", 1)
|
||||
@ -114,12 +114,9 @@ func buildFn(r Remote) func(http.Header, []byte) ([]byte, error) {
|
||||
req.Header.Set(v, hdr.Get(v))
|
||||
}
|
||||
|
||||
// logger.Debug().Str("uri", uri).Msg("Remote Join")
|
||||
|
||||
res, err := client.Do(req)
|
||||
if err != nil {
|
||||
// errlog.Error().Err(err).Msgf("Failed to connect to: %s", uri)
|
||||
return nil, err
|
||||
return nil, fmt.Errorf("failed to connect to '%s': %v", uri, err)
|
||||
}
|
||||
defer res.Body.Close()
|
||||
|
||||
|
15
core/utils.go
Normal file
15
core/utils.go
Normal file
@ -0,0 +1,15 @@
|
||||
package core
|
||||
|
||||
import (
|
||||
"github.com/cespare/xxhash/v2"
|
||||
)
|
||||
|
||||
// nolint: errcheck
|
||||
func mkkey(h *xxhash.Digest, k1 string, k2 string) uint64 {
|
||||
h.WriteString(k1)
|
||||
h.WriteString(k2)
|
||||
v := h.Sum64()
|
||||
h.Reset()
|
||||
|
||||
return v
|
||||
}
|
@ -1790,18 +1790,37 @@ database:
|
||||
# Enable this if you need the user id in triggers, etc
|
||||
set_user_id: false
|
||||
|
||||
# Define additional variables here to be used with filters
|
||||
variables:
|
||||
admin_account_id: "5"
|
||||
# database ping timeout is used for db health checking
|
||||
ping_timeout: 1m
|
||||
|
||||
# Field and table names that you wish to block
|
||||
blocklist:
|
||||
- ar_internal_metadata
|
||||
- schema_migrations
|
||||
- secret
|
||||
- password
|
||||
- encrypted
|
||||
- token
|
||||
# Set up an secure tls encrypted db connection
|
||||
enable_tls: false
|
||||
|
||||
# Required for tls. For example with Google Cloud SQL it's
|
||||
# <gcp-project-id>:<cloud-sql-instance>"
|
||||
# server_name: blah
|
||||
|
||||
# Required for tls. Can be a file path or the contents of the pem file
|
||||
# server_cert: ./server-ca.pem
|
||||
|
||||
# Required for tls. Can be a file path or the contents of the pem file
|
||||
# client_cert: ./client-cert.pem
|
||||
|
||||
# Required for tls. Can be a file path or the contents of the pem file
|
||||
# client_key: ./client-key.pem
|
||||
|
||||
# Define additional variables here to be used with filters
|
||||
variables:
|
||||
admin_account_id: "5"
|
||||
|
||||
# Field and table names that you wish to block
|
||||
blocklist:
|
||||
- ar_internal_metadata
|
||||
- schema_migrations
|
||||
- secret
|
||||
- password
|
||||
- encrypted
|
||||
- token
|
||||
|
||||
# Create custom actions with their own api endpoints
|
||||
# For example the below action will be available at /api/v1/actions/refresh_leaderboard_users
|
||||
|
1
go.mod
1
go.mod
@ -6,6 +6,7 @@ require (
|
||||
github.com/adjust/gorails v0.0.0-20171013043634-2786ed0c03d3
|
||||
github.com/bradfitz/gomemcache v0.0.0-20190913173617-a41fca850d0b
|
||||
github.com/brianvoe/gofakeit v3.18.0+incompatible
|
||||
github.com/cespare/xxhash v1.1.0
|
||||
github.com/cespare/xxhash/v2 v2.1.0
|
||||
github.com/daaku/go.zipexe v1.0.1 // indirect
|
||||
github.com/dgrijalva/jwt-go v3.2.0+incompatible
|
||||
|
@ -3,8 +3,8 @@ package serv
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/dosco/super-graph/cmd/internal/serv/internal/auth"
|
||||
"github.com/dosco/super-graph/core"
|
||||
"github.com/dosco/super-graph/internal/serv/internal/auth"
|
||||
|
||||
"github.com/spf13/viper"
|
||||
)
|
||||
@ -60,6 +60,11 @@ type Serv struct {
|
||||
PoolSize int32 `mapstructure:"pool_size"`
|
||||
MaxRetries int `mapstructure:"max_retries"`
|
||||
PingTimeout time.Duration `mapstructure:"ping_timeout"`
|
||||
EnableTLS bool `mapstructure:"enable_tls"`
|
||||
ServerName string `mapstructure:"server_name"`
|
||||
ServerCert string `mapstructure:"server_cert"`
|
||||
ClientCert string `mapstructure:"client_cert"`
|
||||
ClientKey string `mapstructure:"client_key"`
|
||||
} `mapstructure:"database"`
|
||||
|
||||
Actions []Action
|
@ -156,6 +156,20 @@ func cmdVersion(cmd *cobra.Command, args []string) {
|
||||
}
|
||||
|
||||
func BuildDetails() string {
|
||||
if len(version) == 0 {
|
||||
return fmt.Sprintf(`
|
||||
Super Graph (unknown version)
|
||||
For documentation, visit https://supergraph.dev
|
||||
|
||||
To build with version information please use the Makefile
|
||||
> git clone https://github.com/dosco/super-graph
|
||||
> cd super-graph && make install
|
||||
|
||||
Licensed under the Apache Public License 2.0
|
||||
Copyright 2020, Vikram Rangnekar
|
||||
`)
|
||||
}
|
||||
|
||||
return fmt.Sprintf(`
|
||||
Super Graph %v
|
||||
For documentation, visit https://supergraph.dev
|
||||
@ -166,7 +180,7 @@ Branch : %v
|
||||
Go version : %v
|
||||
|
||||
Licensed under the Apache Public License 2.0
|
||||
Copyright 2020, Vikram Rangnekar.
|
||||
Copyright 2020, Vikram Rangnekar
|
||||
`,
|
||||
version,
|
||||
lastCommitSHA,
|
@ -9,7 +9,7 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/dosco/super-graph/cmd/internal/serv/internal/migrate"
|
||||
"github.com/dosco/super-graph/internal/serv/internal/migrate"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
@ -55,7 +55,7 @@ func cmdDBReset(cmd *cobra.Command, args []string) {
|
||||
func cmdDBCreate(cmd *cobra.Command, args []string) {
|
||||
initConfOnce()
|
||||
|
||||
db, err := initDB(conf)
|
||||
db, err := initDB(conf, false)
|
||||
if err != nil {
|
||||
log.Fatalf("ERR failed to connect to database: %s", err)
|
||||
}
|
||||
@ -74,7 +74,7 @@ func cmdDBCreate(cmd *cobra.Command, args []string) {
|
||||
func cmdDBDrop(cmd *cobra.Command, args []string) {
|
||||
initConfOnce()
|
||||
|
||||
db, err := initDB(conf)
|
||||
db, err := initDB(conf, false)
|
||||
if err != nil {
|
||||
log.Fatalf("ERR failed to connect to database: %s", err)
|
||||
}
|
||||
@ -131,7 +131,7 @@ func cmdDBMigrate(cmd *cobra.Command, args []string) {
|
||||
initConfOnce()
|
||||
dest := args[0]
|
||||
|
||||
conn, err := initDB(conf)
|
||||
conn, err := initDB(conf, true)
|
||||
if err != nil {
|
||||
log.Fatalf("ERR failed to connect to database: %s", err)
|
||||
}
|
||||
@ -223,7 +223,7 @@ func cmdDBMigrate(cmd *cobra.Command, args []string) {
|
||||
func cmdDBStatus(cmd *cobra.Command, args []string) {
|
||||
initConfOnce()
|
||||
|
||||
db, err := initDB(conf)
|
||||
db, err := initDB(conf, true)
|
||||
if err != nil {
|
||||
log.Fatalf("ERR failed to connect to database: %s", err)
|
||||
}
|
@ -28,7 +28,7 @@ func cmdDBSeed(cmd *cobra.Command, args []string) {
|
||||
|
||||
conf.Production = false
|
||||
|
||||
db, err = initDB(conf)
|
||||
db, err = initDB(conf, true)
|
||||
if err != nil {
|
||||
log.Fatalf("ERR failed to connect to database: %s", err)
|
||||
}
|
@ -19,7 +19,7 @@ func cmdServ(cmd *cobra.Command, args []string) {
|
||||
|
||||
initWatcher()
|
||||
|
||||
db, err = initDB(conf)
|
||||
db, err = initDB(conf, true)
|
||||
if err != nil {
|
||||
fatalInProd(err, "failed to connect to database")
|
||||
}
|
@ -8,8 +8,8 @@ import (
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
"github.com/dosco/super-graph/cmd/internal/serv/internal/auth"
|
||||
"github.com/dosco/super-graph/core"
|
||||
"github.com/dosco/super-graph/internal/serv/internal/auth"
|
||||
"github.com/rs/cors"
|
||||
"go.uber.org/zap"
|
||||
)
|
@ -1,8 +1,14 @@
|
||||
package serv
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"database/sql"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"path"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/jackc/pgx/v4"
|
||||
@ -10,6 +16,10 @@ import (
|
||||
//_ "github.com/jackc/pgx/v4/stdlib"
|
||||
)
|
||||
|
||||
const (
|
||||
PEM_SIG = "--BEGIN "
|
||||
)
|
||||
|
||||
func initConf() (*Config, error) {
|
||||
c, err := ReadInConfig(path.Join(confPath, GetConfigName()))
|
||||
if err != nil {
|
||||
@ -79,35 +89,13 @@ func initConf() (*Config, error) {
|
||||
return c, nil
|
||||
}
|
||||
|
||||
func initDB(c *Config) (*sql.DB, error) {
|
||||
func initDB(c *Config, useDB bool) (*sql.DB, error) {
|
||||
var db *sql.DB
|
||||
var err error
|
||||
|
||||
// cs := fmt.Sprintf("host=%s port=%d user=%s password=%s dbname=%s",
|
||||
// c.DB.Host, c.DB.Port,
|
||||
// c.DB.User, c.DB.Password,
|
||||
// c.DB.DBName)
|
||||
|
||||
// fmt.Println(">>", cs)
|
||||
|
||||
// for i := 1; i < 10; i++ {
|
||||
// db, err = sql.Open("pgx", cs)
|
||||
// if err == nil {
|
||||
// break
|
||||
// }
|
||||
// time.Sleep(time.Duration(i*100) * time.Millisecond)
|
||||
// }
|
||||
|
||||
// if err != nil {
|
||||
// return nil, err
|
||||
// }
|
||||
|
||||
// return db, nil
|
||||
|
||||
config, _ := pgx.ParseConfig("")
|
||||
config.Host = c.DB.Host
|
||||
config.Port = c.DB.Port
|
||||
config.Database = c.DB.DBName
|
||||
config.User = c.DB.User
|
||||
config.Password = c.DB.Password
|
||||
config.RuntimeParams = map[string]string{
|
||||
@ -115,6 +103,63 @@ func initDB(c *Config) (*sql.DB, error) {
|
||||
"search_path": c.DB.Schema,
|
||||
}
|
||||
|
||||
if useDB {
|
||||
config.Database = c.DB.DBName
|
||||
}
|
||||
|
||||
if c.DB.EnableTLS {
|
||||
if len(c.DB.ServerName) == 0 {
|
||||
return nil, errors.New("server_name is required")
|
||||
}
|
||||
if len(c.DB.ServerCert) == 0 {
|
||||
return nil, errors.New("server_cert is required")
|
||||
}
|
||||
if len(c.DB.ClientCert) == 0 {
|
||||
return nil, errors.New("client_cert is required")
|
||||
}
|
||||
if len(c.DB.ClientKey) == 0 {
|
||||
return nil, errors.New("client_key is required")
|
||||
}
|
||||
|
||||
rootCertPool := x509.NewCertPool()
|
||||
var pem []byte
|
||||
var err error
|
||||
|
||||
if strings.Contains(c.DB.ServerCert, PEM_SIG) {
|
||||
pem = []byte(c.DB.ServerCert)
|
||||
} else {
|
||||
pem, err = ioutil.ReadFile(c.DB.ServerCert)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("db tls: %w", err)
|
||||
}
|
||||
|
||||
if ok := rootCertPool.AppendCertsFromPEM(pem); !ok {
|
||||
return nil, errors.New("db tls: failed to append pem")
|
||||
}
|
||||
|
||||
clientCert := make([]tls.Certificate, 0, 1)
|
||||
var certs tls.Certificate
|
||||
|
||||
if strings.Contains(c.DB.ClientCert, PEM_SIG) {
|
||||
certs, err = tls.X509KeyPair([]byte(c.DB.ClientCert), []byte(c.DB.ClientKey))
|
||||
} else {
|
||||
certs, err = tls.LoadX509KeyPair(c.DB.ClientCert, c.DB.ClientKey)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("db tls: %w", err)
|
||||
}
|
||||
|
||||
clientCert = append(clientCert, certs)
|
||||
config.TLSConfig = &tls.Config{
|
||||
RootCAs: rootCertPool,
|
||||
Certificates: clientCert,
|
||||
ServerName: c.DB.ServerName,
|
||||
}
|
||||
}
|
||||
|
||||
// switch c.LogLevel {
|
||||
// case "debug":
|
||||
// config.LogLevel = pgx.LogLevelDebug
|
@ -9,8 +9,8 @@ import (
|
||||
"strings"
|
||||
|
||||
"github.com/bradfitz/gomemcache/memcache"
|
||||
"github.com/dosco/super-graph/cmd/internal/serv/internal/rails"
|
||||
"github.com/dosco/super-graph/core"
|
||||
"github.com/dosco/super-graph/internal/serv/internal/rails"
|
||||
"github.com/garyburd/redigo/redis"
|
||||
)
|
||||
|
@ -11,7 +11,7 @@ import (
|
||||
|
||||
rice "github.com/GeertJohan/go.rice"
|
||||
"github.com/NYTimes/gziphandler"
|
||||
"github.com/dosco/super-graph/cmd/internal/serv/internal/auth"
|
||||
"github.com/dosco/super-graph/internal/serv/internal/auth"
|
||||
)
|
||||
|
||||
func initWatcher() {
|
@ -133,20 +133,36 @@ database:
|
||||
# database ping timeout is used for db health checking
|
||||
ping_timeout: 1m
|
||||
|
||||
# Define additional variables here to be used with filters
|
||||
variables:
|
||||
#admin_account_id: "5"
|
||||
admin_account_id: "sql:select id from users where admin = true limit 1"
|
||||
# Set up an secure tls encrypted db connection
|
||||
enable_tls: false
|
||||
|
||||
# Required for tls. For example with Google Cloud SQL it's
|
||||
# <gcp-project-id>:<cloud-sql-instance>"
|
||||
# server_name: blah
|
||||
|
||||
# Required for tls. Can be a file path or the contents of the pem file
|
||||
# server_cert: ./server-ca.pem
|
||||
|
||||
# Required for tls. Can be a file path or the contents of the pem file
|
||||
# client_cert: ./client-cert.pem
|
||||
|
||||
# Required for tls. Can be a file path or the contents of the pem file
|
||||
# client_key: ./client-key.pem
|
||||
|
||||
# Define additional variables here to be used with filters
|
||||
variables:
|
||||
#admin_account_id: "5"
|
||||
admin_account_id: "sql:select id from users where admin = true limit 1"
|
||||
|
||||
|
||||
# Field and table names that you wish to block
|
||||
blocklist:
|
||||
- ar_internal_metadata
|
||||
- schema_migrations
|
||||
- secret
|
||||
- password
|
||||
- encrypted
|
||||
- token
|
||||
# Field and table names that you wish to block
|
||||
blocklist:
|
||||
- ar_internal_metadata
|
||||
- schema_migrations
|
||||
- secret
|
||||
- password
|
||||
- encrypted
|
||||
- token
|
||||
|
||||
# Create custom actions with their own api endpoints
|
||||
# For example the below action will be available at /api/v1/actions/refresh_leaderboard_users
|
@ -77,4 +77,20 @@ database:
|
||||
set_user_id: false
|
||||
|
||||
# database ping timeout is used for db health checking
|
||||
ping_timeout: 5m
|
||||
ping_timeout: 5m
|
||||
|
||||
# Set up an secure tls encrypted db connection
|
||||
enable_tls: false
|
||||
|
||||
# Required for tls. For example with Google Cloud SQL it's
|
||||
# <gcp-project-id>:<cloud-sql-instance>"
|
||||
# server_name: blah
|
||||
|
||||
# Required for tls. Can be a file path or the contents of the pem file
|
||||
# server_cert: ./server-ca.pem
|
||||
|
||||
# Required for tls. Can be a file path or the contents of the pem file
|
||||
# client_cert: ./client-cert.pem
|
||||
|
||||
# Required for tls. Can be a file path or the contents of the pem file
|
||||
# client_key: ./client-key.pem
|
@ -10,20 +10,9 @@ import (
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/cespare/xxhash/v2"
|
||||
"github.com/dosco/super-graph/jsn"
|
||||
)
|
||||
|
||||
// nolint: errcheck
|
||||
func mkkey(h *xxhash.Digest, k1 string, k2 string) uint64 {
|
||||
h.WriteString(k1)
|
||||
h.WriteString(k2)
|
||||
v := h.Sum64()
|
||||
h.Reset()
|
||||
|
||||
return v
|
||||
}
|
||||
|
||||
// nolint: errcheck
|
||||
func gqlHash(b string, vars []byte, role string) string {
|
||||
b = strings.TrimSpace(b)
|
Before Width: | Height: | Size: 15 KiB After Width: | Height: | Size: 15 KiB |
Before Width: | Height: | Size: 31 KiB After Width: | Height: | Size: 31 KiB |
Before Width: | Height: | Size: 15 KiB After Width: | Height: | Size: 15 KiB |
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user