Compare commits

..

60 Commits

Author SHA1 Message Date
c7557f761f Fix broken build 2020-04-16 01:28:55 -04:00
09d6460a13 Make go get to install work. 2020-04-16 00:26:32 -04:00
40c99e9ef3 Fix issue with missing build variables 2020-04-13 00:50:54 -04:00
75ff5510d4 Fix issue with failing db cmds 2020-04-13 00:43:18 -04:00
1370d24985 Fix issue with make install 2020-04-12 20:35:31 -04:00
ef50c1957b Fix CloudRun connection issue 2020-04-12 10:09:37 -04:00
41ea6ef6f5 Fix readme add library usage 2020-04-11 16:41:10 -04:00
a266517d17 Remove config package 2020-04-11 02:45:06 -04:00
7831d27345 Refactor Super Graph into a library #26 2020-04-10 02:27:43 -04:00
e102da839e Fix issue with Postgres FUNC_MAX_ARGS by moving to row_to_json 2020-04-01 21:25:50 -04:00
68a378c00f Fix issue with prepared statements skipped on error 2020-03-31 01:28:39 -04:00
d96eaf14f4 Fix bugs with escape char handling 2020-03-30 10:03:47 -04:00
01e488b69d Fix for bug blocking anon queries 2020-03-21 20:11:04 -04:00
7a450b16ba Fix issue with detecting many to many relationships 2020-03-18 20:19:56 -04:00
1ad8cbf15b Fix minor parser bug 2020-03-17 23:03:41 -04:00
f69f1c67d5 Fix to remove left over debug log 2020-03-16 01:43:26 -04:00
a172193955 Fix to ensure cursor fields can be defined in the query 2020-03-16 01:40:47 -04:00
81338b6123 Fix issues blocking Apollo client 2020-03-14 01:35:42 -04:00
265b93b203 Fix for encrypted cursor in production mode bug 2020-03-06 21:38:01 +05:30
6c240e21b4 Fix bug related to 'anon' role prepared statements 2020-03-06 15:39:15 +05:30
7930719eaa Add ability to set CORS headers 2020-03-06 09:47:51 +05:30
cc687b1b2b Fix issue with Docerfile CMD 2020-03-05 09:13:52 +05:30
3033dcf1a9 Fix issue with setting PORT env var 2020-03-04 15:39:53 +05:30
0381982d19 Fix upx version issue in Dockerfile 2020-03-04 12:27:07 +05:30
2b0a798faa Add 'secrets' command to startup script 2020-03-03 19:44:14 +05:30
8b6c562ac1 Add CSV import command to seed javascript 2020-03-03 13:45:47 +05:30
a1fb89b762 Add support for SQL in variables 2020-02-29 10:35:48 +05:30
c82a7bff0d Misprint (#43) 2020-02-24 10:48:50 +05:30
7acf28bb3c Fix issue with upgrading to postgres 12 docker image #36 2020-02-24 02:37:21 +05:30
be5d4e976a Misprint (#41) 2020-02-24 02:04:23 +05:30
d1b884aec6 Misprint (#40) 2020-02-24 02:03:57 +05:30
4be4ce860b Misprint (#39) 2020-02-24 02:03:40 +05:30
dfa4caf540 Misprint (#37) 2020-02-24 02:03:27 +05:30
7763251fb7 fix "Try the demo app" in docs (#38)
* fix "Try the demo app" in docs

* fix "Get Started" setup in docs
2020-02-24 02:02:22 +05:30
51e105699e Fix corrupt json bug in jsn package 2020-02-24 02:00:11 +05:30
90694f8803 Fix spelling in docs (#34) 2020-02-23 15:41:04 +05:30
ad82f5b267 Fix spelling in docs (#35) 2020-02-23 15:40:42 +05:30
99b37a9c50 Fix bug related to new Postgres docker image 2020-02-23 10:28:32 +05:30
7ec1f59224 Fix bug with cursors and multiple order by 2020-02-23 02:28:37 +05:30
d3ecb1d6cc Fix bug with multi root queries 2020-02-21 10:29:37 +05:30
aed4170e8e Fix bug with cursor filters 2020-02-20 22:53:29 +05:30
c33e93ab37 Add support for cursors with multiple order by clauses 2020-02-19 10:22:44 +05:30
3d3e5d9c2b Add Yugabyte to docs 2020-02-12 08:42:53 +05:30
67b4a4d945 Fix issue with cursor as a variable 2020-02-11 11:41:35 +05:30
7413813138 Add pagination using opaque cursors 2020-02-10 12:15:37 +05:30
12007db76e Add support for Yugabyte DB 2020-02-07 11:42:14 +05:30
c85d379fe2 Add ability to add comments to the allow list 2020-02-04 00:20:25 -05:00
62fd1eac55 Add named auth and the all new action endpoints 2020-02-03 01:21:07 -05:00
1a3d74e1ce Fix issues surfaced by the fuzzer 2020-02-02 01:43:09 -05:00
3a4d885987 Fix to ensure only named queries are saved to the allow list 2020-02-01 10:54:19 -05:00
3bd9b199dd Fix bug with connect / disconnect on array relationships 2020-01-31 00:19:38 -05:00
4ffa1483a4 Add ability to treat JSON/JSONB columns as tables 2020-01-28 00:26:53 -05:00
52f3b1c7a2 Add mutation support for connect / disconnect with array relationships 2020-01-26 01:10:54 -05:00
2d466bfb12 Add skip query selectors that require auth in anon role 2020-01-20 23:38:17 -05:00
a0b8907c3c Fix various json parsing and sql generation bugs 2020-01-19 03:12:51 -05:00
8097ca3b8f Fixes example steps (#33) 2020-01-18 16:44:16 -05:00
0e498b0e94 Fix order by with aliases bug 2020-01-17 09:35:14 -05:00
3eb5b83070 Fix invalid update sql bug 2020-01-17 00:48:17 -05:00
e3c94d17d1 Add corrupt query validation 2020-01-16 01:44:19 -05:00
7240b27214 Fix for table alias relationship bug 2020-01-15 23:26:06 -05:00
236 changed files with 10584 additions and 5767 deletions

7
.gitignore vendored
View File

@ -24,14 +24,15 @@
/demo/tmp
.vscode
main
.DS_Store
.swp
.release
main
super-graph
supergraph
*-fuzz.zip
crashers
suppressions
release
release
.gofuzz
*-fuzz.zip

View File

@ -1,24 +1,31 @@
# stage: 1
FROM node:10 as react-build
WORKDIR /web
COPY web/ ./
COPY /internal/serv/web/ ./
RUN yarn
RUN yarn build
# stage: 2
FROM golang:1.13.4-alpine as go-build
FROM golang:1.14-alpine as go-build
RUN apk update && \
apk add --no-cache make && \
apk add --no-cache git && \
apk add --no-cache jq && \
apk add --no-cache upx=3.95-r2
RUN GO111MODULE=off go get -u github.com/rafaelsq/wtc
ARG SOPS_VERSION=3.5.0
ADD https://github.com/mozilla/sops/releases/download/v${SOPS_VERSION}/sops-v${SOPS_VERSION}.linux /usr/local/bin/sops
RUN chmod 755 /usr/local/bin/sops
WORKDIR /app
COPY . /app
RUN mkdir -p /app/web/build
COPY --from=react-build /web/build/ ./web/build/
RUN mkdir -p /app/internal/serv/web/build
COPY --from=react-build /web/build/ ./internal/serv/web/build
RUN go mod vendor
RUN make build
@ -26,6 +33,8 @@ RUN echo "Compressing binary, will take a bit of time..." && \
upx --ultra-brute -qq super-graph && \
upx -t super-graph
# stage: 3
FROM alpine:latest
WORKDIR /
@ -36,10 +45,15 @@ RUN mkdir -p /config
COPY --from=go-build /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/
COPY --from=go-build /app/config/* /config/
COPY --from=go-build /app/super-graph .
COPY --from=go-build /app/internal/scripts/start.sh .
COPY --from=go-build /usr/local/bin/sops .
RUN chmod +x /super-graph
RUN chmod +x /start.sh
USER nobody
EXPOSE 8080
ENV GO_ENV production
CMD ./super-graph serv
ENTRYPOINT ["./start.sh"]
CMD ["./super-graph", "serv"]

View File

@ -12,10 +12,10 @@ endif
export GO111MODULE := on
# Build-time Go variables
version = github.com/dosco/super-graph/serv.version
gitBranch = github.com/dosco/super-graph/serv.gitBranch
lastCommitSHA = github.com/dosco/super-graph/serv.lastCommitSHA
lastCommitTime = github.com/dosco/super-graph/serv.lastCommitTime
version = github.com/dosco/super-graph/internal/serv.version
gitBranch = github.com/dosco/super-graph/internal/serv.gitBranch
lastCommitSHA = github.com/dosco/super-graph/internal/serv.lastCommitSHA
lastCommitTime = github.com/dosco/super-graph/internal/serv.lastCommitTime
BUILD_FLAGS ?= -ldflags '-s -w -X ${lastCommitSHA}=${BUILD} -X "${lastCommitTime}=${BUILD_DATE}" -X "${version}=${BUILD_VERSION}" -X ${gitBranch}=${BUILD_BRANCH}'
@ -28,18 +28,18 @@ BIN_DIR := $(GOPATH)/bin
GORICE := $(BIN_DIR)/rice
GOLANGCILINT := $(BIN_DIR)/golangci-lint
GITCHGLOG := $(BIN_DIR)/git-chglog
WEB_BUILD_DIR := ./web/build/manifest.json
WEB_BUILD_DIR := ./internal/serv/web/build/manifest.json
$(GORICE):
@GO111MODULE=off go get -u github.com/GeertJohan/go.rice/rice
$(WEB_BUILD_DIR):
@echo "First install Yarn and create a build of the web UI found under ./web"
@echo "Command: cd web && yarn build"
@echo "First install Yarn and create a build of the web UI then re-run make install"
@echo "Run this command: yarn --cwd internal/serv/web/ build"
@exit 1
$(GITCHGLOG):
@GO111MODULE=off go get -u github.com/git-chglog/git-chglog/cmd/git-chglog
@GO111MODULE=off go get -u github.com/git-chglog/git-chglog/git-chglog
changelog: $(GITCHGLOG)
@git-chglog $(ARGS)
@ -57,7 +57,7 @@ os = $(word 1, $@)
$(PLATFORMS): lint test
@mkdir -p release
@GOOS=$(os) GOARCH=amd64 go build $(BUILD_FLAGS) -o release/$(BINARY)-$(BUILD_VERSION)-$(os)-amd64
@GOOS=$(os) GOARCH=amd64 go build $(BUILD_FLAGS) -o release/$(BINARY)-$(BUILD_VERSION)-$(os)-amd64 main.go
release: windows linux darwin
@ -69,7 +69,7 @@ gen: $(GORICE) $(WEB_BUILD_DIR)
@go generate ./...
$(BINARY): clean
@go build $(BUILD_FLAGS) -o $(BINARY)
@go build $(BUILD_FLAGS) -o $(BINARY) main.go
clean:
@rm -f $(BINARY)
@ -77,11 +77,10 @@ clean:
run: clean
@go run $(BUILD_FLAGS) main.go $(ARGS)
install:
@echo $(GOPATH)
install: clean build
@echo "Commit Hash: `git rev-parse HEAD`"
@echo "Old Hash: `shasum $(GOPATH)/bin/$(BINARY) 2>/dev/null | cut -c -32`"
@go install $(BUILD_FLAGS)
@mv $(BINARY) $(GOPATH)/bin/$(BINARY)
@echo "New Hash:" `shasum $(GOPATH)/bin/$(BINARY) 2>/dev/null | cut -c -32`
uninstall: clean

View File

@ -1,26 +1,74 @@
<!-- <a href="https://supergraph.dev"><img src="https://supergraph.dev/hologram.svg" width="100" height="100" align="right" /></a> -->
<img src="docs/.vuepress/public/super-graph.png" width="250" />
<img src="docs/guide/.vuepress/public/super-graph.png" width="250" />
### Build web products faster. Secure high performance GraphQL
![Apache Public License 2.0](https://img.shields.io/github/license/dosco/super-graph.svg)
![Docker build](https://img.shields.io/docker/cloud/build/dosco/super-graph.svg)
![Cloud native](https://img.shields.io/badge/cloud--native-enabled-blue.svg)
[![GoDoc](https://img.shields.io/badge/godoc-reference-5272B4.svg)](https://pkg.go.dev/github.com/dosco/super-graph/core?tab=doc)
![Apache 2.0](https://img.shields.io/github/license/dosco/super-graph.svg?style=flat-square)
![Docker build](https://img.shields.io/docker/cloud/build/dosco/super-graph.svg?style=flat-square)
![Cloud native](https://img.shields.io/badge/cloud--native-enabled-blue.svg?style=flat-squareg)
[![Discord Chat](https://img.shields.io/discord/628796009539043348.svg)](https://discord.gg/6pSWCTZ)
## What's Super Graph?
## What is Super Graph
Designed to 100x your developer productivity. Super Graph will instantly and without you writing code provide you a high performance GraphQL API for Postgres DB. GraphQL queries are compiled into a single fast SQL query. Super Graph is a GO library and a service, use it in your own code or run it as a seperate service.
Is designed to 100x your developer productivity. Super Graph will instantly and without you writing code provide you a high performance and secure GraphQL API for Postgres DB. GraphQL queries are translated into a single fast SQL query. No more writing API code as you develop
your web frontend just make the query you need and Super Graph will do the rest.
## Using it as a service
Super Graph has a rich feature set like integrating with your existing Ruby on Rails apps, joining your DB with data from remote APIs, role and attribute based access control, support for JWT tokens, built-in DB mutations and seeding, and a lot more.
```console
git clone https://github.com/dosco/super-graph
cd ./super-graph
make install
![GraphQL](docs/.vuepress/public/graphql.png?raw=true "")
super-graph new <app_name>
```
## Using it in your own code
## The story of Super Graph?
```golang
package main
import (
"database/sql"
"fmt"
"time"
"github.com/dosco/super-graph/core"
_ "github.com/jackc/pgx/v4/stdlib"
)
func main() {
db, err := sql.Open("pgx", "postgres://postgrs:@localhost:5432/example_db")
if err != nil {
log.Fatalf(err)
}
conf, err := core.ReadInConfig("./config/dev.yml")
if err != nil {
log.Fatalf(err)
}
sg, err = core.NewSuperGraph(conf, db)
if err != nil {
log.Fatalf(err)
}
query := `
query {
posts {
id
title
}
}`
res, err := sg.GraphQL(context.Background(), query, nil)
if err != nil {
log.Fatalf(err)
}
fmt.Println(string(res.Data))
}
```
## About Super Graph
After working on several products through my career I find that we spend way too much time on building API backends. Most APIs also require constant updating, this costs real time and money.
@ -37,6 +85,7 @@ This compiler is what sits at the heart of Super Graph with layers of useful fun
- Complex nested queries and mutations
- Auto learns database tables and relationships
- Role and Attribute based access control
- Opaque cursor based efficient pagination
- Full text search and aggregations
- JWT tokens supported (Auth0, etc)
- Join database queries with remote REST APIs
@ -48,16 +97,8 @@ This compiler is what sits at the heart of Super Graph with layers of useful fun
- Fuzz tested for security
- Database migrations tool
- Database seeding tool
- Works with Postgres and YugabyteDB
## Get started
```
git clone https://github.com/dosco/super-graph
cd ./super-graph
make install
super-graph new <app_name>
```
## Documentation

View File

@ -2,7 +2,7 @@ app_name: "Super Graph Development"
host_port: 0.0.0.0:8080
web_ui: true
# debug, info, warn, error, fatal, panic
# debug, error, warn, info, none
log_level: "debug"
# enable or disable http compression (uses gzip)
@ -32,6 +32,19 @@ reload_on_config_change: true
# Path pointing to where the migrations can be found
migrations_path: ./config/migrations
# Secret key for general encryption operations like
# encrypting the cursor data
secret_key: supercalifajalistics
# CORS: A list of origins a cross-domain request can be executed from.
# If the special * value is present in the list, all origins will be allowed.
# An origin may contain a wildcard (*) to replace 0 or more
# characters (i.e.: http://*.domain.com).
cors_allowed_origins: ["*"]
# Debug Cross Origin Resource Sharing requests
cors_debug: true
# Postgres related environment Variables
# SG_DATABASE_HOST
# SG_DATABASE_PORT
@ -89,7 +102,7 @@ database:
port: 5432
dbname: app_development
user: postgres
password: ''
password: postgres
#schema: "public"
#pool_size: 10
@ -167,10 +180,13 @@ roles:
block: false
- name: deals
query:
limit: 3
columns: ["name", "description" ]
aggregation: false
- name: purchases
query:
limit: 3
aggregation: false
- name: user
@ -183,12 +199,10 @@ roles:
query:
limit: 50
filters: ["{ user_id: { eq: $user_id } }"]
columns: ["id", "name", "description", "search_rank", "search_headline_description" ]
disable_functions: false
insert:
filters: ["{ user_id: { eq: $user_id } }"]
columns: ["id", "name", "description" ]
presets:
- user_id: "$user_id"
- created_at: "now"

View File

@ -6,7 +6,7 @@ app_name: "Super Graph Production"
host_port: 0.0.0.0:8080
web_ui: false
# debug, info, warn, error, fatal, panic, disable
# debug, error, warn, info, none
log_level: "info"
# enable or disable http compression (uses gzip)
@ -32,6 +32,10 @@ enable_tracing: true
# Path pointing to where the migrations can be found
# migrations_path: migrations
# Secret key for general encryption operations like
# encrypting the cursor data
# secret_key: supercalifajalistics
# Postgres related environment Variables
# SG_DATABASE_HOST
# SG_DATABASE_PORT
@ -50,7 +54,7 @@ database:
port: 5432
dbname: app_production
user: postgres
password: ''
password: postgres
#pool_size: 10
#max_retries: 0
#log_level: "debug"

View File

@ -11,7 +11,7 @@ for (i = 0; i < user_count; i++) {
var pwd = fake.password()
var data = {
full_name: fake.name(),
avatar: fake.image_url(),
avatar: fake.avatar_url(200),
phone: fake.phone(),
email: fake.email(),
password: pwd,

180
core/api.go Normal file
View File

@ -0,0 +1,180 @@
// Package core provides the primary API to include and use Super Graph with your own code.
// For detailed documentation visit https://supergraph.dev
//
// Example usage:
/*
package main
import (
"database/sql"
"fmt"
"time"
"github.com/dosco/super-graph/core"
_ "github.com/jackc/pgx/v4/stdlib"
)
func main() {
db, err := sql.Open("pgx", "postgres://postgrs:@localhost:5432/example_db")
if err != nil {
log.Fatalf(err)
}
conf, err := core.ReadInConfig("./config/dev.yml")
if err != nil {
log.Fatalf(err)
}
sg, err = core.NewSuperGraph(conf, db)
if err != nil {
log.Fatalf(err)
}
query := `
query {
posts {
id
title
}
}`
res, err := sg.GraphQL(context.Background(), query, nil)
if err != nil {
log.Fatalf(err)
}
fmt.Println(string(res.Data))
}
*/
package core
import (
"context"
"crypto/sha256"
"database/sql"
"encoding/json"
_log "log"
"os"
"github.com/dosco/super-graph/core/internal/allow"
"github.com/dosco/super-graph/core/internal/crypto"
"github.com/dosco/super-graph/core/internal/psql"
"github.com/dosco/super-graph/core/internal/qcode"
)
type contextkey int
// Constants to set values on the context passed to the NewSuperGraph function
const (
// Name of the authentication provider. Eg. google, github, etc
UserIDProviderKey contextkey = iota
// User ID value for authenticated users
UserIDKey
// User role if pre-defined
UserRoleKey
)
// SuperGraph struct is an instance of the Super Graph engine it holds all the required information like
// datase schemas, relationships, etc that the GraphQL to SQL compiler would need to do it's job.
type SuperGraph struct {
conf *Config
db *sql.DB
log *_log.Logger
schema *psql.DBSchema
allowList *allow.List
encKey [32]byte
prepared map[string]*preparedItem
roles map[string]*Role
getRole *sql.Stmt
rmap map[uint64]*resolvFn
abacEnabled bool
anonExists bool
qc *qcode.Compiler
pc *psql.Compiler
}
// NewSuperGraph creates the SuperGraph struct, this involves querying the database to learn its
// schemas and relationships
func NewSuperGraph(conf *Config, db *sql.DB) (*SuperGraph, error) {
sg := &SuperGraph{
conf: conf,
db: db,
log: _log.New(os.Stdout, "", 0),
}
if err := sg.initConfig(); err != nil {
return nil, err
}
if err := sg.initCompilers(); err != nil {
return nil, err
}
if err := sg.initAllowList(); err != nil {
return nil, err
}
if err := sg.initPrepared(); err != nil {
return nil, err
}
if err := sg.initResolvers(); err != nil {
return nil, err
}
if len(conf.SecretKey) != 0 {
sk := sha256.Sum256([]byte(conf.SecretKey))
conf.SecretKey = ""
sg.encKey = sk
} else {
sg.encKey = crypto.NewEncryptionKey()
}
return sg, nil
}
// Result struct contains the output of the GraphQL function this includes resulting json from the
// database query and any error information
type Result struct {
op qcode.QType
name string
sql string
role string
Error string `json:"message,omitempty"`
Data json.RawMessage `json:"data,omitempty"`
Extensions *extensions `json:"extensions,omitempty"`
}
// GraphQL function is called on the SuperGraph struct to convert the provided GraphQL query into an
// SQL query and execute it on the database. In production mode prepared statements are directly used
// and no query compiling takes places.
//
// In developer mode all names queries are saved into a file `allow.list` and in production mode only
// queries from this file can be run.
func (sg *SuperGraph) GraphQL(c context.Context, query string, vars json.RawMessage) (*Result, error) {
ct := scontext{Context: c, sg: sg, query: query, vars: vars}
if len(vars) <= 2 {
ct.vars = nil
}
if keyExists(c, UserIDKey) {
ct.role = "user"
} else {
ct.role = "anon"
}
ct.res.op = qcode.GetQType(query)
ct.res.name = allow.QueryName(query)
data, err := ct.execQuery()
if err != nil {
return &ct.res, err
}
ct.res.Data = json.RawMessage(data)
return &ct.res, nil
}

View File

@ -1,60 +1,73 @@
package serv
package core
import (
"bytes"
"context"
"encoding/json"
"errors"
"fmt"
"io"
"github.com/dosco/super-graph/jsn"
)
func argMap(ctx context.Context, vars []byte) func(w io.Writer, tag string) (int, error) {
func (c *scontext) argMap() func(w io.Writer, tag string) (int, error) {
return func(w io.Writer, tag string) (int, error) {
switch tag {
case "user_id_provider":
if v := ctx.Value(userIDProviderKey); v != nil {
if v := c.Value(UserIDProviderKey); v != nil {
return io.WriteString(w, v.(string))
}
return 0, errors.New("query requires variable $user_id_provider")
return 0, argErr("user_id_provider")
case "user_id":
if v := ctx.Value(userIDKey); v != nil {
if v := c.Value(UserIDKey); v != nil {
return io.WriteString(w, v.(string))
}
return 0, errors.New("query requires variable $user_id")
return 0, argErr("user_id")
case "user_role":
if v := ctx.Value(userRoleKey); v != nil {
if v := c.Value(UserRoleKey); v != nil {
return io.WriteString(w, v.(string))
}
return 0, errors.New("query requires variable $user_role")
return 0, argErr("user_role")
}
fields := jsn.Get(vars, [][]byte{[]byte(tag)})
fields := jsn.Get(c.vars, [][]byte{[]byte(tag)})
if len(fields) == 0 {
return 0, nil
return 0, argErr(tag)
}
v := fields[0].Value
// Open and close quotes
if len(v) >= 2 && v[0] == '"' && v[len(v)-1] == '"' {
fields[0].Value = v[1 : len(v)-1]
}
if tag == "cursor" {
if bytes.EqualFold(v, []byte("null")) {
return io.WriteString(w, ``)
}
v1, err := c.sg.decrypt(string(fields[0].Value))
if err != nil {
return 0, err
}
return w.Write(v1)
}
return w.Write(escQuote(fields[0].Value))
}
}
func argList(ctx *coreContext, args [][]byte) ([]interface{}, error) {
func (c *scontext) argList(args [][]byte) ([]interface{}, error) {
vars := make([]interface{}, len(args))
var fields map[string]json.RawMessage
var err error
if len(ctx.req.Vars) != 0 {
fields, _, err = jsn.Tree(ctx.req.Vars)
if len(c.vars) != 0 {
fields, _, err = jsn.Tree(c.vars)
if err != nil {
return nil, err
@ -63,27 +76,37 @@ func argList(ctx *coreContext, args [][]byte) ([]interface{}, error) {
for i := range args {
av := args[i]
switch {
case bytes.Equal(av, []byte("user_id")):
if v := ctx.Value(userIDKey); v != nil {
if v := c.Value(UserIDKey); v != nil {
vars[i] = v.(string)
} else {
return nil, errors.New("query requires variable $user_id")
return nil, argErr("user_id")
}
case bytes.Equal(av, []byte("user_id_provider")):
if v := ctx.Value(userIDProviderKey); v != nil {
if v := c.Value(UserIDProviderKey); v != nil {
vars[i] = v.(string)
} else {
return nil, errors.New("query requires variable $user_id_provider")
return nil, argErr("user_id_provider")
}
case bytes.Equal(av, []byte("user_role")):
if v := ctx.Value(userRoleKey); v != nil {
if v := c.Value(UserRoleKey); v != nil {
vars[i] = v.(string)
} else {
return nil, errors.New("query requires variable $user_role")
return nil, argErr("user_role")
}
case bytes.Equal(av, []byte("cursor")):
if v, ok := fields["cursor"]; ok && v[0] == '"' {
v1, err := c.sg.decrypt(string(v[1 : len(v)-1]))
if err != nil {
return nil, err
}
vars[i] = v1
} else {
return nil, argErr("cursor")
}
default:
@ -96,11 +119,12 @@ func argList(ctx *coreContext, args [][]byte) ([]interface{}, error) {
if err := json.Unmarshal(v, &val); err != nil {
return nil, err
}
vars[i] = val
}
} else {
return nil, fmt.Errorf("query requires variable $%s", string(av))
return nil, argErr(string(av))
}
}
}
@ -135,3 +159,7 @@ func escQuote(b []byte) []byte {
}
return buf.Bytes()
}
func argErr(name string) error {
return fmt.Errorf("query requires variable '%s' to be set", name)
}

View File

@ -1,4 +1,4 @@
package serv
package core
import (
"bytes"
@ -7,42 +7,42 @@ import (
"fmt"
"io"
"github.com/dosco/super-graph/psql"
"github.com/dosco/super-graph/qcode"
"github.com/dosco/super-graph/core/internal/psql"
"github.com/dosco/super-graph/core/internal/qcode"
)
type stmt struct {
role *configRole
role *Role
qc *qcode.QCode
skipped uint32
sql string
}
func buildStmt(qt qcode.QType, gql, vars []byte, role string) ([]stmt, error) {
func (sg *SuperGraph) buildStmt(qt qcode.QType, query, vars []byte, role string) ([]stmt, error) {
switch qt {
case qcode.QTMutation:
return buildRoleStmt(gql, vars, role)
return sg.buildRoleStmt(query, vars, role)
case qcode.QTQuery:
if role == "anon" {
return buildRoleStmt(gql, vars, "anon")
return sg.buildRoleStmt(query, vars, "anon")
}
if conf.isABACEnabled() {
return buildMultiStmt(gql, vars)
if sg.abacEnabled {
return sg.buildMultiStmt(query, vars)
}
return buildRoleStmt(gql, vars, "user")
return sg.buildRoleStmt(query, vars, "user")
default:
return nil, fmt.Errorf("unknown query type '%d'", qt)
}
}
func buildRoleStmt(gql, vars []byte, role string) ([]stmt, error) {
ro, ok := conf.roles[role]
func (sg *SuperGraph) buildRoleStmt(query, vars []byte, role string) ([]stmt, error) {
ro, ok := sg.roles[role]
if !ok {
return nil, fmt.Errorf(`roles '%s' not defined in config`, role)
return nil, fmt.Errorf(`roles '%s' not defined in c.sg.config`, role)
}
var vm map[string]json.RawMessage
@ -54,21 +54,15 @@ func buildRoleStmt(gql, vars []byte, role string) ([]stmt, error) {
}
}
qc, err := qcompile.Compile(gql, ro.Name)
qc, err := sg.qc.Compile(query, ro.Name)
if err != nil {
return nil, err
}
// For the 'anon' role in production only compile
// queries for tables defined in the config file.
if conf.Production && ro.Name == "anon" && !hasTablesWithConfig(qc, ro) {
return nil, errors.New("query contains tables with no 'anon' role config")
}
stmts := []stmt{stmt{role: ro, qc: qc}}
w := &bytes.Buffer{}
skipped, err := pcompile.Compile(qc, w, psql.Variables(vm))
skipped, err := sg.pc.Compile(qc, w, psql.Variables(vm))
if err != nil {
return nil, err
}
@ -79,7 +73,7 @@ func buildRoleStmt(gql, vars []byte, role string) ([]stmt, error) {
return stmts, nil
}
func buildMultiStmt(gql, vars []byte) ([]stmt, error) {
func (sg *SuperGraph) buildMultiStmt(query, vars []byte) ([]stmt, error) {
var vm map[string]json.RawMessage
var err error
@ -89,28 +83,29 @@ func buildMultiStmt(gql, vars []byte) ([]stmt, error) {
}
}
if len(conf.RolesQuery) == 0 {
return buildRoleStmt(gql, vars, "user")
if len(sg.conf.RolesQuery) == 0 {
return nil, errors.New("roles_query not defined")
}
stmts := make([]stmt, 0, len(conf.Roles))
stmts := make([]stmt, 0, len(sg.conf.Roles))
w := &bytes.Buffer{}
for i := 0; i < len(conf.Roles); i++ {
role := &conf.Roles[i]
for i := 0; i < len(sg.conf.Roles); i++ {
role := &sg.conf.Roles[i]
// skip anon as it's not included in the combined multi-statement
if role.Name == "anon" {
continue
}
qc, err := qcompile.Compile(gql, role.Name)
qc, err := sg.qc.Compile(query, role.Name)
if err != nil {
return nil, err
}
stmts = append(stmts, stmt{role: role, qc: qc})
skipped, err := pcompile.Compile(qc, w, psql.Variables(vm))
skipped, err := sg.pc.Compile(qc, w, psql.Variables(vm))
if err != nil {
return nil, err
}
@ -121,7 +116,7 @@ func buildMultiStmt(gql, vars []byte) ([]stmt, error) {
w.Reset()
}
sql, err := renderUserQuery(stmts, vm)
sql, err := sg.renderUserQuery(stmts)
if err != nil {
return nil, err
}
@ -131,8 +126,7 @@ func buildMultiStmt(gql, vars []byte) ([]stmt, error) {
}
//nolint: errcheck
func renderUserQuery(
stmts []stmt, vars map[string]json.RawMessage) (string, error) {
func (sg *SuperGraph) renderUserQuery(stmts []stmt) (string, error) {
w := &bytes.Buffer{}
io.WriteString(w, `SELECT "_sg_auth_info"."role", (CASE "_sg_auth_info"."role" `)
@ -150,7 +144,7 @@ func renderUserQuery(
}
io.WriteString(w, `END) FROM (SELECT (CASE WHEN EXISTS (`)
io.WriteString(w, conf.RolesQuery)
io.WriteString(w, sg.conf.RolesQuery)
io.WriteString(w, `) THEN `)
io.WriteString(w, `(SELECT (CASE`)
@ -166,20 +160,21 @@ func renderUserQuery(
}
io.WriteString(w, ` ELSE 'user' END) FROM (`)
io.WriteString(w, conf.RolesQuery)
io.WriteString(w, sg.conf.RolesQuery)
io.WriteString(w, `) AS "_sg_auth_roles_query" LIMIT 1) `)
io.WriteString(w, `ELSE 'anon' END) FROM (VALUES (1)) AS "_sg_auth_filler") AS "_sg_auth_info"(role) LIMIT 1; `)
return w.String(), nil
}
func hasTablesWithConfig(qc *qcode.QCode, role *configRole) bool {
func (sg *SuperGraph) hasTablesWithConfig(qc *qcode.QCode, role *Role) bool {
for _, id := range qc.Roots {
t, err := schema.GetTable(qc.Selects[id].Name)
t, err := sg.schema.GetTable(qc.Selects[id].Name)
if err != nil {
return false
}
if _, ok := role.tablesMap[t.Name]; !ok {
if r := role.GetTable(t.Name); r == nil {
return false
}
}

163
core/config.go Normal file
View File

@ -0,0 +1,163 @@
package core
import (
"fmt"
"path"
"strings"
"github.com/spf13/viper"
)
// Core struct contains core specific config value
type Config struct {
SecretKey string `mapstructure:"secret_key"`
UseAllowList bool `mapstructure:"use_allow_list"`
AllowListFile string `mapstructure:"allow_list_file"`
SetUserID bool `mapstructure:"set_user_id"`
Vars map[string]string `mapstructure:"variables"`
Blocklist []string
Tables []Table
RolesQuery string `mapstructure:"roles_query"`
Roles []Role
Inflections map[string]string
}
// Table struct defines a database table
type Table struct {
Name string
Table string
Blocklist []string
Remotes []Remote
Columns []Column
}
// Column struct defines a database column
type Column struct {
Name string
Type string
ForeignKey string `mapstructure:"related_to"`
}
// Remote struct defines a remote API endpoint
type Remote struct {
Name string
ID string
Path string
URL string
Debug bool
PassHeaders []string `mapstructure:"pass_headers"`
SetHeaders []struct {
Name string
Value string
} `mapstructure:"set_headers"`
}
// Role struct contains role specific access control values for for all database tables
type Role struct {
Name string
Match string
Tables []RoleTable
tm map[string]*RoleTable
}
// RoleTable struct contains role specific access control values for a database table
type RoleTable struct {
Name string
Query Query
Insert Insert
Update Update
Delete Delete
}
// Query struct contains access control values for query operations
type Query struct {
Limit int
Filters []string
Columns []string
DisableFunctions bool `mapstructure:"disable_functions"`
Block bool
}
// Insert struct contains access control values for insert operations
type Insert struct {
Filters []string
Columns []string
Presets map[string]string
Block bool
}
// Insert struct contains access control values for update operations
type Update struct {
Filters []string
Columns []string
Presets map[string]string
Block bool
}
// Delete struct contains access control values for delete operations
type Delete struct {
Filters []string
Columns []string
Block bool
}
// ReadInConfig function reads in the config file for the environment specified in the GO_ENV
// environment variable. This is the best way to create a new Super Graph config.
func ReadInConfig(configFile string) (*Config, error) {
cpath := path.Dir(configFile)
cfile := path.Base(configFile)
vi := newViper(cpath, cfile)
if err := vi.ReadInConfig(); err != nil {
return nil, err
}
inherits := vi.GetString("inherits")
if len(inherits) != 0 {
vi = newViper(cpath, inherits)
if err := vi.ReadInConfig(); err != nil {
return nil, err
}
if vi.IsSet("inherits") {
return nil, fmt.Errorf("inherited config (%s) cannot itself inherit (%s)",
inherits,
vi.GetString("inherits"))
}
vi.SetConfigName(cfile)
if err := vi.MergeInConfig(); err != nil {
return nil, err
}
}
c := &Config{}
if err := vi.Unmarshal(&c); err != nil {
return nil, fmt.Errorf("failed to decode config, %v", err)
}
if len(c.AllowListFile) == 0 {
c.AllowListFile = path.Join(cpath, "allow.list")
}
return c, nil
}
func newViper(configPath, configFile string) *viper.Viper {
vi := viper.New()
vi.SetEnvPrefix("SG")
vi.SetEnvKeyReplacer(strings.NewReplacer(".", "_"))
vi.AutomaticEnv()
vi.SetConfigName(configFile)
vi.AddConfigPath(configPath)
vi.AddConfigPath("./config")
return vi
}

19
core/consts.go Normal file
View File

@ -0,0 +1,19 @@
package core
import (
"context"
"errors"
)
const (
openVar = "{{"
closeVar = "}}"
)
var (
errNotFound = errors.New("not found in prepared statements")
)
func keyExists(ct context.Context, key contextkey) bool {
return ct.Value(key) != nil
}

395
core/core.go Normal file
View File

@ -0,0 +1,395 @@
package core
import (
"bytes"
"context"
"database/sql"
"encoding/json"
"fmt"
"time"
"github.com/dosco/super-graph/core/internal/psql"
"github.com/dosco/super-graph/core/internal/qcode"
"github.com/valyala/fasttemplate"
)
type extensions struct {
Tracing *trace `json:"tracing,omitempty"`
}
type trace struct {
Version int `json:"version"`
StartTime time.Time `json:"startTime"`
EndTime time.Time `json:"endTime"`
Duration time.Duration `json:"duration"`
Execution execution `json:"execution"`
}
type execution struct {
Resolvers []resolver `json:"resolvers"`
}
type resolver struct {
Path []string `json:"path"`
ParentType string `json:"parentType"`
FieldName string `json:"fieldName"`
ReturnType string `json:"returnType"`
StartOffset int `json:"startOffset"`
Duration time.Duration `json:"duration"`
}
type scontext struct {
context.Context
sg *SuperGraph
query string
vars json.RawMessage
role string
res Result
}
func (sg *SuperGraph) initCompilers() error {
di, err := psql.GetDBInfo(sg.db)
if err != nil {
return err
}
if err = addTables(sg.conf, di); err != nil {
return err
}
if err = addForeignKeys(sg.conf, di); err != nil {
return err
}
sg.schema, err = psql.NewDBSchema(di, getDBTableAliases(sg.conf))
if err != nil {
return err
}
sg.qc, err = qcode.NewCompiler(qcode.Config{
Blocklist: sg.conf.Blocklist,
})
if err != nil {
return err
}
if err := addRoles(sg.conf, sg.qc); err != nil {
return err
}
sg.pc = psql.NewCompiler(psql.Config{
Schema: sg.schema,
Vars: sg.conf.Vars,
})
return nil
}
func (c *scontext) execQuery() ([]byte, error) {
var data []byte
var st *stmt
var err error
if c.sg.conf.UseAllowList {
data, st, err = c.resolvePreparedSQL()
if err != nil {
return nil, err
}
} else {
data, st, err = c.resolveSQL()
if err != nil {
return nil, err
}
}
if len(data) == 0 || st.skipped == 0 {
return data, nil
}
// return c.sg.execRemoteJoin(st, data, c.req.hdr)
return c.sg.execRemoteJoin(st, data, nil)
}
func (c *scontext) resolvePreparedSQL() ([]byte, *stmt, error) {
var tx *sql.Tx
var err error
mutation := (c.res.op == qcode.QTMutation)
useRoleQuery := c.sg.abacEnabled && mutation
useTx := useRoleQuery || c.sg.conf.SetUserID
if useTx {
if tx, err = c.sg.db.BeginTx(c, nil); err != nil {
return nil, nil, err
}
defer tx.Rollback() //nolint: errcheck
}
if c.sg.conf.SetUserID {
if err := setLocalUserID(c, tx); err != nil {
return nil, nil, err
}
}
var role string
if useRoleQuery {
if role, err = c.executeRoleQuery(tx); err != nil {
return nil, nil, err
}
} else if v := c.Value(UserRoleKey); v != nil {
role = v.(string)
} else {
role = c.role
}
c.res.role = role
ps, ok := c.sg.prepared[stmtHash(c.res.name, role)]
if !ok {
return nil, nil, errNotFound
}
c.res.sql = ps.st.sql
var root []byte
var row *sql.Row
varsList, err := c.argList(ps.args)
if err != nil {
return nil, nil, err
}
if useTx {
row = tx.Stmt(ps.sd).QueryRow(varsList...)
} else {
row = ps.sd.QueryRow(varsList...)
}
if ps.roleArg {
err = row.Scan(&role, &root)
} else {
err = row.Scan(&root)
}
if err != nil {
return nil, nil, err
}
c.role = role
if useTx {
if err := tx.Commit(); err != nil {
return nil, nil, err
}
}
if root, err = c.sg.encryptCursor(ps.st.qc, root); err != nil {
return nil, nil, err
}
return root, &ps.st, nil
}
func (c *scontext) resolveSQL() ([]byte, *stmt, error) {
var tx *sql.Tx
var err error
mutation := (c.res.op == qcode.QTMutation)
useRoleQuery := c.sg.abacEnabled && mutation
useTx := useRoleQuery || c.sg.conf.SetUserID
if useTx {
if tx, err = c.sg.db.BeginTx(c, nil); err != nil {
return nil, nil, err
}
defer tx.Rollback() //nolint: errcheck
}
if c.sg.conf.SetUserID {
if err := setLocalUserID(c, tx); err != nil {
return nil, nil, err
}
}
if useRoleQuery {
if c.role, err = c.executeRoleQuery(tx); err != nil {
return nil, nil, err
}
} else if v := c.Value(UserRoleKey); v != nil {
c.role = v.(string)
}
stmts, err := c.sg.buildStmt(c.res.op, []byte(c.query), c.vars, c.role)
if err != nil {
return nil, nil, err
}
st := &stmts[0]
t := fasttemplate.New(st.sql, openVar, closeVar)
buf := &bytes.Buffer{}
_, err = t.ExecuteFunc(buf, c.argMap())
if err != nil {
return nil, nil, err
}
finalSQL := buf.String()
// var stime time.Time
// if c.sg.conf.EnableTracing {
// stime = time.Now()
// }
var root []byte
var role string
var row *sql.Row
// defaultRole := c.role
if useTx {
row = tx.QueryRow(finalSQL)
} else {
row = c.sg.db.QueryRow(finalSQL)
}
if len(stmts) > 1 {
err = row.Scan(&role, &root)
} else {
err = row.Scan(&root)
}
c.res.sql = finalSQL
if len(role) == 0 {
c.res.role = c.role
} else {
c.res.role = role
}
if err != nil {
return nil, nil, err
}
if useTx {
if err := tx.Commit(); err != nil {
return nil, nil, err
}
}
if root, err = c.sg.encryptCursor(st.qc, root); err != nil {
return nil, nil, err
}
if c.sg.allowList.IsPersist() {
if err := c.sg.allowList.Set(c.vars, c.query, ""); err != nil {
return nil, nil, err
}
}
if len(stmts) > 1 {
if st = findStmt(role, stmts); st == nil {
return nil, nil, fmt.Errorf("invalid role '%s' returned", role)
}
}
// if c.sg.conf.EnableTracing {
// for _, id := range st.qc.Roots {
// c.addTrace(st.qc.Selects, id, stime)
// }
// }
return root, st, nil
}
func (c *scontext) executeRoleQuery(tx *sql.Tx) (string, error) {
userID := c.Value(UserIDKey)
if userID == nil {
return "anon", nil
}
var role string
row := c.sg.getRole.QueryRow(userID, c.role)
if err := row.Scan(&role); err != nil {
return "", err
}
return role, nil
}
func (r *Result) Operation() string {
return r.op.String()
}
func (r *Result) QueryName() string {
return r.name
}
func (r *Result) Role() string {
return r.role
}
func (r *Result) SQL() string {
return r.sql
}
// func (c *scontext) addTrace(sel []qcode.Select, id int32, st time.Time) {
// et := time.Now()
// du := et.Sub(st)
// if c.res.Extensions == nil {
// c.res.Extensions = &extensions{&trace{
// Version: 1,
// StartTime: st,
// Execution: execution{},
// }}
// }
// c.res.Extensions.Tracing.EndTime = et
// c.res.Extensions.Tracing.Duration = du
// n := 1
// for i := id; i != -1; i = sel[i].ParentID {
// n++
// }
// path := make([]string, n)
// n--
// for i := id; ; i = sel[i].ParentID {
// path[n] = sel[i].Name
// if sel[i].ParentID == -1 {
// break
// }
// n--
// }
// tr := resolver{
// Path: path,
// ParentType: "Query",
// FieldName: sel[id].Name,
// ReturnType: "object",
// StartOffset: 1,
// Duration: du,
// }
// c.res.Extensions.Tracing.Execution.Resolvers =
// append(c.res.Extensions.Tracing.Execution.Resolvers, tr)
// }
func findStmt(role string, stmts []stmt) *stmt {
for i := range stmts {
if stmts[i].role.Name != role {
continue
}
return &stmts[i]
}
return nil
}

View File

@ -1,4 +1,4 @@
package serv
package core
/*

72
core/cursor.go Normal file
View File

@ -0,0 +1,72 @@
package core
import (
"bytes"
"encoding/base64"
"github.com/dosco/super-graph/core/internal/crypto"
"github.com/dosco/super-graph/jsn"
"github.com/dosco/super-graph/core/internal/qcode"
)
func (sg *SuperGraph) encryptCursor(qc *qcode.QCode, data []byte) ([]byte, error) {
var keys [][]byte
for _, s := range qc.Selects {
if s.Paging.Type != qcode.PtOffset {
var buf bytes.Buffer
buf.WriteString(s.FieldName)
buf.WriteString("_cursor")
keys = append(keys, buf.Bytes())
}
}
if len(keys) == 0 {
return data, nil
}
from := jsn.Get(data, keys)
to := make([]jsn.Field, len(from))
for i, f := range from {
to[i].Key = f.Key
if f.Value[0] != '"' || f.Value[len(f.Value)-1] != '"' {
continue
}
var buf bytes.Buffer
if len(f.Value) > 2 {
v, err := crypto.Encrypt(f.Value[1:len(f.Value)-1], &sg.encKey)
if err != nil {
return nil, err
}
buf.WriteByte('"')
buf.WriteString(base64.StdEncoding.EncodeToString(v))
buf.WriteByte('"')
} else {
buf.WriteString(`null`)
}
to[i].Value = buf.Bytes()
}
var buf bytes.Buffer
if err := jsn.Replace(&buf, data, from, to); err != nil {
return nil, err
}
return buf.Bytes(), nil
}
func (sg *SuperGraph) decrypt(data string) ([]byte, error) {
v, err := base64.StdEncoding.DecodeString(data)
if err != nil {
return nil, err
}
return crypto.Decrypt(v, &sg.encKey)
}

15
core/db.go Normal file
View File

@ -0,0 +1,15 @@
package core
import (
"context"
"database/sql"
)
func setLocalUserID(c context.Context, tx *sql.Tx) error {
var err error
if v := c.Value(UserIDKey); v != nil {
_, err = tx.Exec(`SET LOCAL "user.id" = ?`, v)
}
return err
}

284
core/init.go Normal file
View File

@ -0,0 +1,284 @@
package core
import (
"fmt"
"regexp"
"strings"
"unicode"
"github.com/dosco/super-graph/core/internal/psql"
"github.com/dosco/super-graph/core/internal/qcode"
"github.com/gobuffalo/flect"
)
func (sg *SuperGraph) initConfig() error {
c := sg.conf
for k, v := range c.Inflections {
flect.AddPlural(k, v)
}
// Variables: Validate and sanitize
for k, v := range c.Vars {
c.Vars[k] = sanitizeVars(v)
}
// Tables: Validate and sanitize
tm := make(map[string]struct{})
for i := 0; i < len(c.Tables); i++ {
t := &c.Tables[i]
t.Name = flect.Pluralize(strings.ToLower(t.Name))
if _, ok := tm[t.Name]; ok {
sg.conf.Tables = append(c.Tables[:i], c.Tables[i+1:]...)
sg.log.Printf("WRN duplicate table found: %s", t.Name)
}
tm[t.Name] = struct{}{}
t.Table = flect.Pluralize(strings.ToLower(t.Table))
}
sg.roles = make(map[string]*Role)
for i := 0; i < len(c.Roles); i++ {
role := &c.Roles[i]
role.Name = sanitize(role.Name)
if _, ok := sg.roles[role.Name]; ok {
c.Roles = append(c.Roles[:i], c.Roles[i+1:]...)
sg.log.Printf("WRN duplicate role found: %s", role.Name)
}
role.Match = sanitize(role.Match)
role.tm = make(map[string]*RoleTable)
for n, table := range role.Tables {
role.tm[table.Name] = &role.Tables[n]
}
sg.roles[role.Name] = role
}
// If user role not defined then create it
if _, ok := sg.roles["user"]; !ok {
ur := Role{
Name: "user",
tm: make(map[string]*RoleTable),
}
c.Roles = append(c.Roles, ur)
sg.roles["user"] = &ur
}
// Roles: validate and sanitize
c.RolesQuery = sanitize(c.RolesQuery)
if len(c.RolesQuery) == 0 {
sg.log.Printf("WRN roles_query not defined: attribute based access control disabled")
}
_, userExists := sg.roles["user"]
_, sg.anonExists = sg.roles["anon"]
sg.abacEnabled = userExists && len(c.RolesQuery) != 0
return nil
}
func getDBTableAliases(c *Config) map[string][]string {
m := make(map[string][]string, len(c.Tables))
for i := range c.Tables {
t := c.Tables[i]
if len(t.Table) == 0 || len(t.Columns) != 0 {
continue
}
m[t.Table] = append(m[t.Table], t.Name)
}
return m
}
func addTables(c *Config, di *psql.DBInfo) error {
for _, t := range c.Tables {
if len(t.Table) == 0 || len(t.Columns) == 0 {
continue
}
if err := addTable(di, t.Columns, t); err != nil {
return err
}
}
return nil
}
func addTable(di *psql.DBInfo, cols []Column, t Table) error {
bc, ok := di.GetColumn(t.Table, t.Name)
if !ok {
return fmt.Errorf(
"Column '%s' not found on table '%s'",
t.Name, t.Table)
}
if bc.Type != "json" && bc.Type != "jsonb" {
return fmt.Errorf(
"Column '%s' in table '%s' is of type '%s'. Only JSON or JSONB is valid",
t.Name, t.Table, bc.Type)
}
table := psql.DBTable{
Name: t.Name,
Key: strings.ToLower(t.Name),
Type: bc.Type,
}
columns := make([]psql.DBColumn, 0, len(cols))
for i := range cols {
c := cols[i]
columns = append(columns, psql.DBColumn{
Name: c.Name,
Key: strings.ToLower(c.Name),
Type: c.Type,
})
}
di.AddTable(table, columns)
bc.FKeyTable = t.Name
return nil
}
func addForeignKeys(c *Config, di *psql.DBInfo) error {
for _, t := range c.Tables {
for _, c := range t.Columns {
if len(c.ForeignKey) == 0 {
continue
}
if err := addForeignKey(di, c, t); err != nil {
return err
}
}
}
return nil
}
func addForeignKey(di *psql.DBInfo, c Column, t Table) error {
c1, ok := di.GetColumn(t.Name, c.Name)
if !ok {
return fmt.Errorf(
"Invalid table '%s' or column '%s' in Config",
t.Name, c.Name)
}
v := strings.SplitN(c.ForeignKey, ".", 2)
if len(v) != 2 {
return fmt.Errorf(
"Invalid foreign_key in Config for table '%s' and column '%s",
t.Name, c.Name)
}
fkt, fkc := v[0], v[1]
c2, ok := di.GetColumn(fkt, fkc)
if !ok {
return fmt.Errorf(
"Invalid foreign_key in Config for table '%s' and column '%s",
t.Name, c.Name)
}
c1.FKeyTable = fkt
c1.FKeyColID = []int16{c2.ID}
return nil
}
func addRoles(c *Config, qc *qcode.Compiler) error {
for _, r := range c.Roles {
for _, t := range r.Tables {
if err := addRole(qc, r, t); err != nil {
return err
}
}
}
return nil
}
func addRole(qc *qcode.Compiler, r Role, t RoleTable) error {
blockFilter := []string{"false"}
query := qcode.QueryConfig{
Limit: t.Query.Limit,
Filters: t.Query.Filters,
Columns: t.Query.Columns,
DisableFunctions: t.Query.DisableFunctions,
}
if t.Query.Block {
query.Filters = blockFilter
}
insert := qcode.InsertConfig{
Filters: t.Insert.Filters,
Columns: t.Insert.Columns,
Presets: t.Insert.Presets,
}
if t.Insert.Block {
insert.Filters = blockFilter
}
update := qcode.UpdateConfig{
Filters: t.Update.Filters,
Columns: t.Update.Columns,
Presets: t.Update.Presets,
}
if t.Update.Block {
update.Filters = blockFilter
}
delete := qcode.DeleteConfig{
Filters: t.Delete.Filters,
Columns: t.Delete.Columns,
}
if t.Delete.Block {
delete.Filters = blockFilter
}
return qc.AddRole(r.Name, t.Name, qcode.TRConfig{
Query: query,
Insert: insert,
Update: update,
Delete: delete,
})
}
func (r *Role) GetTable(name string) *RoleTable {
return r.tm[name]
}
func sanitize(value string) string {
return strings.ToLower(strings.TrimSpace(value))
}
var (
varRe1 = regexp.MustCompile(`(?mi)\$([a-zA-Z0-9_.]+)`)
varRe2 = regexp.MustCompile(`\{\{([a-zA-Z0-9_.]+)\}\}`)
)
func sanitizeVars(s string) string {
s0 := varRe1.ReplaceAllString(s, `{{$1}}`)
s1 := strings.Map(func(r rune) rune {
if unicode.IsSpace(r) {
return ' '
}
return r
}, s0)
return varRe2.ReplaceAllStringFunc(s1, func(m string) string {
return strings.ToLower(m)
})
}

View File

@ -0,0 +1,360 @@
package allow
import (
"bytes"
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"os"
"sort"
"strings"
)
const (
AL_QUERY int = iota + 1
AL_VARS
)
type Item struct {
Name string
key string
Query string
Vars json.RawMessage
Comment string
}
type List struct {
filepath string
saveChan chan Item
}
type Config struct {
CreateIfNotExists bool
Persist bool
}
func New(filename string, conf Config) (*List, error) {
al := List{}
if len(filename) != 0 {
fp := filename
if _, err := os.Stat(fp); err == nil {
al.filepath = fp
} else if !os.IsNotExist(err) {
return nil, err
}
}
if len(al.filepath) == 0 {
fp := "./allow.list"
if _, err := os.Stat(fp); err == nil {
al.filepath = fp
} else if !os.IsNotExist(err) {
return nil, err
}
}
if len(al.filepath) == 0 {
fp := "./config/allow.list"
if _, err := os.Stat(fp); err == nil {
al.filepath = fp
} else if !os.IsNotExist(err) {
return nil, err
}
}
if len(al.filepath) == 0 {
if !conf.CreateIfNotExists {
return nil, errors.New("allow.list not found")
}
if len(filename) == 0 {
al.filepath = "./config/allow.list"
} else {
al.filepath = filename
}
}
var err error
if conf.Persist {
al.saveChan = make(chan Item)
go func() {
for v := range al.saveChan {
if err = al.save(v); err != nil {
break
}
}
}()
}
if err != nil {
return nil, err
}
return &al, nil
}
func (al *List) IsPersist() bool {
return al.saveChan != nil
}
func (al *List) Set(vars []byte, query, comment string) error {
if al.saveChan == nil {
return errors.New("allow.list is read-only")
}
if len(query) == 0 {
return errors.New("empty query")
}
var q string
for i := 0; i < len(query); i++ {
c := query[i]
if c >= 'a' && c <= 'z' || c >= 'A' && c <= 'Z' {
q = query
break
} else if c == '{' {
q = "query " + query
break
}
}
al.saveChan <- Item{
Comment: comment,
Query: q,
Vars: vars,
}
return nil
}
func (al *List) Load() ([]Item, error) {
var list []Item
b, err := ioutil.ReadFile(al.filepath)
if err != nil {
return list, err
}
if len(b) == 0 {
return list, nil
}
var comment bytes.Buffer
var varBytes []byte
itemMap := make(map[string]struct{})
s, e, c := 0, 0, 0
ty := 0
for {
fq := false
if c == 0 && b[e] == '#' {
s = e
for e < len(b) && b[e] != '\n' {
e++
}
if (e - s) > 2 {
comment.Write(b[(s + 1):(e + 1)])
}
}
if e >= len(b) {
break
}
if matchPrefix(b, e, "query") || matchPrefix(b, e, "mutation") {
if c == 0 {
s = e
}
ty = AL_QUERY
} else if matchPrefix(b, e, "variables") {
if c == 0 {
s = e + len("variables") + 1
}
ty = AL_VARS
} else if b[e] == '{' {
c++
} else if b[e] == '}' {
c--
if c == 0 {
if ty == AL_QUERY {
fq = true
} else if ty == AL_VARS {
varBytes = b[s:(e + 1)]
}
ty = 0
}
}
if fq {
query := string(b[s:(e + 1)])
name := QueryName(query)
key := strings.ToLower(name)
if _, ok := itemMap[key]; !ok {
v := Item{
Name: name,
key: key,
Query: query,
Vars: varBytes,
Comment: comment.String(),
}
list = append(list, v)
comment.Reset()
}
varBytes = nil
}
e++
if e >= len(b) {
break
}
}
return list, nil
}
func (al *List) save(item Item) error {
item.Name = QueryName(item.Query)
item.key = strings.ToLower(item.Name)
if len(item.Name) == 0 {
return nil
}
list, err := al.Load()
if err != nil {
return err
}
index := -1
for i, v := range list {
if strings.EqualFold(v.Name, item.Name) {
index = i
break
}
}
if index != -1 {
if len(list[index].Comment) != 0 {
item.Comment = list[index].Comment
}
list[index] = item
} else {
list = append(list, item)
}
f, err := os.Create(al.filepath)
if err != nil {
return err
}
defer f.Close()
sort.Slice(list, func(i, j int) bool {
return strings.Compare(list[i].key, list[j].key) == -1
})
for _, v := range list {
cmtLines := strings.Split(v.Comment, "\n")
i := 0
for _, c := range cmtLines {
if c = strings.TrimSpace(c); len(c) == 0 {
continue
}
_, err := f.WriteString(fmt.Sprintf("# %s\n", c))
if err != nil {
return err
}
i++
}
if i != 0 {
if _, err := f.WriteString("\n"); err != nil {
return err
}
} else {
if _, err := f.WriteString(fmt.Sprintf("# Query named %s\n\n", v.Name)); err != nil {
return err
}
}
if len(v.Vars) != 0 && !bytes.Equal(v.Vars, []byte("{}")) {
vj, err := json.MarshalIndent(v.Vars, "", " ")
if err != nil {
return fmt.Errorf("failed to marshal vars: %v", err)
}
_, err = f.WriteString(fmt.Sprintf("variables %s\n\n", vj))
if err != nil {
return err
}
}
if v.Query[0] == '{' {
_, err = f.WriteString(fmt.Sprintf("query %s\n\n", v.Query))
} else {
_, err = f.WriteString(fmt.Sprintf("%s\n\n", v.Query))
}
if err != nil {
return err
}
}
return nil
}
func matchPrefix(b []byte, i int, s string) bool {
if (len(b) - i) < len(s) {
return false
}
for n := 0; n < len(s); n++ {
if b[(i+n)] != s[n] {
return false
}
}
return true
}
func QueryName(b string) string {
state, s := 0, 0
for i := 0; i < len(b); i++ {
switch {
case state == 2 && !isValidNameChar(b[i]):
return b[s:i]
case state == 1 && b[i] == '{':
return ""
case state == 1 && isValidNameChar(b[i]):
s = i
state = 2
case i != 0 && b[i] == ' ' && (b[i-1] == 'n' || b[i-1] == 'y'):
state = 1
}
}
return ""
}
func isValidNameChar(c byte) bool {
return (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') || (c >= '0' && c <= '9') || c == '_'
}

View File

@ -0,0 +1,84 @@
package allow
import (
"testing"
)
func TestGQLName1(t *testing.T) {
var q = `
query {
products(
distinct: [price]
where: { id: { and: { greater_or_equals: 20, lt: 28 } } }
) { id name } }`
name := QueryName(q)
if len(name) != 0 {
t.Fatal("Name should be empty, not ", name)
}
}
func TestGQLName2(t *testing.T) {
var q = `
query hakuna_matata
{
products(
distinct: [price]
where: { id: { and: { greater_or_equals: 20, lt: 28 } } }
) {
id
name
}
}`
name := QueryName(q)
if name != "hakuna_matata" {
t.Fatal("Name should be 'hakuna_matata', not ", name)
}
}
func TestGQLName3(t *testing.T) {
var q = `
mutation means{ users { id } }`
// var v2 = ` { products( limit: 30, order_by: { price: desc }, distinct: [ price ] where: { id: { and: { greater_or_equals: 20, lt: 28 } } }) { id name price user { id email } } } `
name := QueryName(q)
if name != "means" {
t.Fatal("Name should be 'means', not ", name)
}
}
func TestGQLName4(t *testing.T) {
var q = `
query no_worries
users {
id
}
}`
name := QueryName(q)
if name != "no_worries" {
t.Fatal("Name should be 'no_worries', not ", name)
}
}
func TestGQLName5(t *testing.T) {
var q = `
{
users {
id
}
}`
name := QueryName(q)
if len(name) != 0 {
t.Fatal("Name should be empty, not ", name)
}
}

View File

@ -0,0 +1,15 @@
package allow
import "testing"
func TestFuzzCrashers(t *testing.T) {
var crashers = []string{
"query",
"q",
"que",
}
for _, f := range crashers {
_ = QueryName(f)
}
}

View File

@ -0,0 +1,80 @@
// cryptopasta - basic cryptography examples
//
// Written in 2015 by George Tankersley <george.tankersley@gmail.com>
//
// To the extent possible under law, the author(s) have dedicated all copyright
// and related and neighboring rights to this software to the public domain
// worldwide. This software is distributed without any warranty.
//
// You should have received a copy of the CC0 Public Domain Dedication along
// with this software. If not, see // <http://creativecommons.org/publicdomain/zero/1.0/>.
// Provides symmetric authenticated encryption using 256-bit AES-GCM with a random nonce.
package crypto
import (
"crypto/aes"
"crypto/cipher"
"crypto/rand"
"errors"
"io"
)
// NewEncryptionKey generates a random 256-bit key for Encrypt() and
// Decrypt(). It panics if the source of randomness fails.
func NewEncryptionKey() [32]byte {
key := [32]byte{}
_, err := io.ReadFull(rand.Reader, key[:])
if err != nil {
panic(err)
}
return key
}
// Encrypt encrypts data using 256-bit AES-GCM. This both hides the content of
// the data and provides a check that it hasn't been altered. Output takes the
// form nonce|ciphertext|tag where '|' indicates concatenation.
func Encrypt(plaintext []byte, key *[32]byte) (ciphertext []byte, err error) {
block, err := aes.NewCipher(key[:])
if err != nil {
return nil, err
}
gcm, err := cipher.NewGCM(block)
if err != nil {
return nil, err
}
nonce := make([]byte, gcm.NonceSize())
_, err = io.ReadFull(rand.Reader, nonce)
if err != nil {
return nil, err
}
return gcm.Seal(nonce, nonce, plaintext, nil), nil
}
// Decrypt decrypts data using 256-bit AES-GCM. This both hides the content of
// the data and provides a check that it hasn't been altered. Expects input
// form nonce|ciphertext|tag where '|' indicates concatenation.
func Decrypt(ciphertext []byte, key *[32]byte) (plaintext []byte, err error) {
block, err := aes.NewCipher(key[:])
if err != nil {
return nil, err
}
gcm, err := cipher.NewGCM(block)
if err != nil {
return nil, err
}
if len(ciphertext) < gcm.NonceSize() {
return nil, errors.New("malformed ciphertext")
}
return gcm.Open(nil,
ciphertext[:gcm.NonceSize()],
ciphertext[gcm.NonceSize():],
nil,
)
}

View File

@ -0,0 +1,216 @@
//nolint:errcheck
package psql
import (
"errors"
"io"
"strings"
"github.com/dosco/super-graph/core/internal/qcode"
)
func (c *compilerContext) renderBaseColumns(
sel *qcode.Select,
ti *DBTableInfo,
childCols []*qcode.Column,
skipped uint32) ([]int, bool, error) {
var realColsRendered []int
colcount := (len(sel.Cols) + len(sel.OrderBy) + 1)
colmap := make(map[string]struct{}, colcount)
isSearch := sel.Args["search"] != nil
isCursorPaged := sel.Paging.Type != qcode.PtOffset
isAgg := false
i := 0
for n, col := range sel.Cols {
cn := col.Name
colmap[cn] = struct{}{}
_, isRealCol := ti.ColMap[cn]
if isRealCol {
c.renderComma(i)
realColsRendered = append(realColsRendered, n)
colWithTable(c.w, ti.Name, cn)
} else {
switch {
case isSearch && cn == "search_rank":
if err := c.renderColumnSearchRank(sel, ti, col, i); err != nil {
return nil, false, err
}
case isSearch && strings.HasPrefix(cn, "search_headline_"):
if err := c.renderColumnSearchHeadline(sel, ti, col, i); err != nil {
return nil, false, err
}
case cn == "__typename":
if err := c.renderColumnTypename(sel, ti, col, i); err != nil {
return nil, false, err
}
case strings.HasSuffix(cn, "_cursor"):
continue
default:
if err := c.renderColumnFunction(sel, ti, col, i); err != nil {
return nil, false, err
}
isAgg = true
}
}
i++
}
if isCursorPaged {
if _, ok := colmap[ti.PrimaryCol.Key]; !ok {
colmap[ti.PrimaryCol.Key] = struct{}{}
c.renderComma(i)
colWithTable(c.w, ti.Name, ti.PrimaryCol.Name)
}
i++
}
for _, ob := range sel.OrderBy {
if _, ok := colmap[ob.Col]; ok {
continue
}
colmap[ob.Col] = struct{}{}
c.renderComma(i)
colWithTable(c.w, ti.Name, ob.Col)
i++
}
for _, col := range childCols {
if _, ok := colmap[col.Name]; ok {
continue
}
c.renderComma(i)
colWithTable(c.w, col.Table, col.Name)
i++
}
return realColsRendered, isAgg, nil
}
func (c *compilerContext) renderColumnSearchRank(sel *qcode.Select, ti *DBTableInfo, col qcode.Column, columnsRendered int) error {
if isColumnBlocked(sel, col.Name) {
return nil
}
if ti.TSVCol == nil {
return errors.New("no ts_vector column found")
}
cn := ti.TSVCol.Name
arg := sel.Args["search"]
c.renderComma(columnsRendered)
//fmt.Fprintf(w, `ts_rank("%s"."%s", websearch_to_tsquery('%s')) AS %s`,
//c.sel.Name, cn, arg.Val, col.Name)
io.WriteString(c.w, `ts_rank(`)
colWithTable(c.w, ti.Name, cn)
if c.schema.ver >= 110000 {
io.WriteString(c.w, `, websearch_to_tsquery('{{`)
} else {
io.WriteString(c.w, `, to_tsquery('{{`)
}
io.WriteString(c.w, arg.Val)
io.WriteString(c.w, `}}'))`)
alias(c.w, col.Name)
return nil
}
func (c *compilerContext) renderColumnSearchHeadline(sel *qcode.Select, ti *DBTableInfo, col qcode.Column, columnsRendered int) error {
cn := col.Name[16:]
if isColumnBlocked(sel, cn) {
return nil
}
arg := sel.Args["search"]
c.renderComma(columnsRendered)
//fmt.Fprintf(w, `ts_headline("%s"."%s", websearch_to_tsquery('%s')) AS %s`,
//c.sel.Name, cn, arg.Val, col.Name)
io.WriteString(c.w, `ts_headline(`)
colWithTable(c.w, ti.Name, cn)
if c.schema.ver >= 110000 {
io.WriteString(c.w, `, websearch_to_tsquery('{{`)
} else {
io.WriteString(c.w, `, to_tsquery('{{`)
}
io.WriteString(c.w, arg.Val)
io.WriteString(c.w, `}}'))`)
alias(c.w, col.Name)
return nil
}
func (c *compilerContext) renderColumnTypename(sel *qcode.Select, ti *DBTableInfo, col qcode.Column, columnsRendered int) error {
if isColumnBlocked(sel, col.Name) {
return nil
}
c.renderComma(columnsRendered)
io.WriteString(c.w, `(`)
squoted(c.w, ti.Name)
io.WriteString(c.w, ` :: text)`)
alias(c.w, col.Name)
return nil
}
func (c *compilerContext) renderColumnFunction(sel *qcode.Select, ti *DBTableInfo, col qcode.Column, columnsRendered int) error {
pl := funcPrefixLen(col.Name)
// if pl == 0 {
// //fmt.Fprintf(w, `'%s not defined' AS %s`, cn, col.Name)
// io.WriteString(c.w, `'`)
// io.WriteString(c.w, col.Name)
// io.WriteString(c.w, ` not defined'`)
// alias(c.w, col.Name)
// }
if pl == 0 || !sel.Functions {
return nil
}
cn := col.Name[pl:]
if isColumnBlocked(sel, cn) {
return nil
}
fn := col.Name[:pl-1]
c.renderComma(columnsRendered)
//fmt.Fprintf(w, `%s("%s"."%s") AS %s`, fn, c.sel.Name, cn, col.Name)
io.WriteString(c.w, fn)
io.WriteString(c.w, `(`)
colWithTable(c.w, ti.Name, cn)
io.WriteString(c.w, `)`)
alias(c.w, col.Name)
return nil
}
func (c *compilerContext) renderComma(columnsRendered int) {
if columnsRendered != 0 {
io.WriteString(c.w, `, `)
}
}
func isColumnBlocked(sel *qcode.Select, name string) bool {
if len(sel.Allowed) != 0 {
if _, ok := sel.Allowed[name]; !ok {
return true
}
}
return false
}

View File

@ -0,0 +1,54 @@
// +build gofuzz
package psql
import (
"encoding/json"
"github.com/dosco/super-graph/core/internal/qcode"
)
var (
qcompileTest, _ = qcode.NewCompiler(qcode.Config{})
schema = getTestSchema()
vars = NewVariables(map[string]string{
"admin_account_id": "5",
})
pcompileTest = NewCompiler(Config{
Schema: schema,
Vars: vars,
})
)
// FuzzerEntrypoint for Fuzzbuzz
func Fuzz(data []byte) int {
gql := `mutation {
product(insert: $data) {
id
name
user {
id
full_name
email
}
}
}`
qc, err := qcompileTest.Compile([]byte(gql), "user")
if err != nil {
panic("qcompile can't fail")
}
vars := map[string]json.RawMessage{
"data": json.RawMessage(data),
}
_, _, err = pcompileTest.CompileEx(qc, vars)
if err != nil {
return 0
}
return 1
}

View File

@ -6,8 +6,8 @@ import (
"fmt"
"io"
"github.com/dosco/super-graph/qcode"
"github.com/dosco/super-graph/util"
"github.com/dosco/super-graph/core/internal/qcode"
"github.com/dosco/super-graph/core/internal/util"
)
func (c *compilerContext) renderInsert(qc *qcode.QCode, w io.Writer,
@ -15,7 +15,10 @@ func (c *compilerContext) renderInsert(qc *qcode.QCode, w io.Writer,
insert, ok := vars[qc.ActionVar]
if !ok {
return 0, fmt.Errorf("Variable '%s' not !defined", qc.ActionVar)
return 0, fmt.Errorf("variable '%s' not defined", qc.ActionVar)
}
if len(insert) == 0 {
return 0, fmt.Errorf("variable '%s' is empty", qc.ActionVar)
}
io.WriteString(c.w, `WITH "_sg_input" AS (SELECT '{{`)
@ -147,7 +150,14 @@ func renderNestedInsertRelColumns(w io.Writer, item kvitem, values bool) error {
io.WriteString(w, `, `)
}
if values {
colWithTable(w, v.relCP.Left.Table, v.relCP.Left.Col)
if v._ctype > 0 {
io.WriteString(w, `"_x_`)
io.WriteString(w, v.relCP.Left.Table)
io.WriteString(w, `".`)
quoted(w, v.relCP.Left.Col)
} else {
colWithTable(w, v.relCP.Left.Table, v.relCP.Left.Col)
}
} else {
quoted(w, v.relCP.Right.Col)
}
@ -166,12 +176,18 @@ func renderNestedInsertRelTables(w io.Writer, item kvitem) error {
io.WriteString(w, `, `)
}
} else {
// Render child foreign key columns if child-to-parent
// Render tables needed to set values if child-to-parent
// relationship is one-to-many
for _, v := range item.items {
if v.relCP.Type == RelOneToMany {
quoted(w, v.relCP.Left.Table)
io.WriteString(w, `, `)
if v._ctype > 0 {
io.WriteString(w, `"_x_`)
io.WriteString(w, v.relCP.Left.Table)
io.WriteString(w, `", `)
} else {
quoted(w, v.relCP.Left.Table)
io.WriteString(w, `, `)
}
}
}
}

View File

@ -0,0 +1,271 @@
package psql
import (
"encoding/json"
"testing"
)
func simpleInsert(t *testing.T) {
gql := `mutation {
user(insert: $data) {
id
}
}`
vars := map[string]json.RawMessage{
"data": json.RawMessage(`{"email": "reannagreenholt@orn.com", "full_name": "Flo Barton"}`),
}
compileGQLToPSQL(t, gql, vars, "user")
}
func singleInsert(t *testing.T) {
gql := `mutation {
product(id: $id, insert: $insert) {
id
name
}
}`
vars := map[string]json.RawMessage{
"insert": json.RawMessage(` { "name": "my_name", "price": 6.95, "description": "my_desc", "user_id": 5 }`),
}
compileGQLToPSQL(t, gql, vars, "anon")
}
func bulkInsert(t *testing.T) {
gql := `mutation {
product(name: "test", id: $id, insert: $insert) {
id
name
}
}`
vars := map[string]json.RawMessage{
"insert": json.RawMessage(` [{ "name": "my_name", "description": "my_desc" }]`),
}
compileGQLToPSQL(t, gql, vars, "anon")
}
func simpleInsertWithPresets(t *testing.T) {
gql := `mutation {
product(insert: $data) {
id
}
}`
vars := map[string]json.RawMessage{
"data": json.RawMessage(`{"name": "Tomato", "price": 5.76}`),
}
compileGQLToPSQL(t, gql, vars, "user")
}
func nestedInsertManyToMany(t *testing.T) {
gql := `mutation {
purchase(insert: $data) {
sale_type
quantity
due_date
customer {
id
full_name
email
}
product {
id
name
price
}
}
}`
vars := map[string]json.RawMessage{
"data": json.RawMessage(` {
"sale_type": "bought",
"quantity": 5,
"due_date": "now",
"customer": {
"email": "thedude@rug.com",
"full_name": "The Dude"
},
"product": {
"name": "Apple",
"price": 1.25
}
}
`),
}
compileGQLToPSQL(t, gql, vars, "admin")
}
func nestedInsertOneToMany(t *testing.T) {
gql := `mutation {
user(insert: $data) {
id
full_name
email
product {
id
name
price
}
}
}`
vars := map[string]json.RawMessage{
"data": json.RawMessage(`{
"email": "thedude@rug.com",
"full_name": "The Dude",
"created_at": "now",
"updated_at": "now",
"product": {
"name": "Apple",
"price": 1.25,
"created_at": "now",
"updated_at": "now"
}
}`),
}
compileGQLToPSQL(t, gql, vars, "admin")
}
func nestedInsertOneToOne(t *testing.T) {
gql := `mutation {
product(insert: $data) {
id
name
user {
id
full_name
email
}
}
}`
vars := map[string]json.RawMessage{
"data": json.RawMessage(`{
"name": "Apple",
"price": 1.25,
"created_at": "now",
"updated_at": "now",
"user": {
"hey": {
"now": "what's the matter"
},
"email": "thedude@rug.com",
"full_name": "The Dude",
"created_at": "now",
"updated_at": "now"
}
}`),
}
compileGQLToPSQL(t, gql, vars, "admin")
}
func nestedInsertOneToManyWithConnect(t *testing.T) {
gql := `mutation {
user(insert: $data) {
id
full_name
email
product {
id
name
price
}
}
}`
vars := map[string]json.RawMessage{
"data": json.RawMessage(`{
"email": "thedude@rug.com",
"full_name": "The Dude",
"created_at": "now",
"updated_at": "now",
"product": {
"connect": { "id": 5 }
}
}`),
}
compileGQLToPSQL(t, gql, vars, "admin")
}
func nestedInsertOneToOneWithConnect(t *testing.T) {
gql := `mutation {
product(insert: $data) {
id
name
tags {
id
name
}
user {
id
full_name
email
}
}
}`
vars := map[string]json.RawMessage{
"data": json.RawMessage(`{
"name": "Apple",
"price": 1.25,
"created_at": "now",
"updated_at": "now",
"user": {
"connect": { "id": 5 }
}
}`),
}
compileGQLToPSQL(t, gql, vars, "admin")
}
func nestedInsertOneToOneWithConnectArray(t *testing.T) {
gql := `mutation {
product(insert: $data) {
id
name
user {
id
full_name
email
}
}
}`
vars := map[string]json.RawMessage{
"data": json.RawMessage(`{
"name": "Apple",
"price": 1.25,
"created_at": "now",
"updated_at": "now",
"user": {
"connect": { "id": [1,2] }
}
}`),
}
compileGQLToPSQL(t, gql, vars, "admin")
}
func TestCompileInsert(t *testing.T) {
t.Run("simpleInsert", simpleInsert)
t.Run("singleInsert", singleInsert)
t.Run("bulkInsert", bulkInsert)
t.Run("simpleInsertWithPresets", simpleInsertWithPresets)
t.Run("nestedInsertManyToMany", nestedInsertManyToMany)
t.Run("nestedInsertOneToMany", nestedInsertOneToMany)
t.Run("nestedInsertOneToOne", nestedInsertOneToOne)
t.Run("nestedInsertOneToManyWithConnect", nestedInsertOneToManyWithConnect)
t.Run("nestedInsertOneToOneWithConnect", nestedInsertOneToOneWithConnect)
t.Run("nestedInsertOneToOneWithConnectArray", nestedInsertOneToOneWithConnectArray)
}

View File

@ -8,8 +8,8 @@ import (
"io"
"github.com/dosco/super-graph/jsn"
"github.com/dosco/super-graph/qcode"
"github.com/dosco/super-graph/util"
"github.com/dosco/super-graph/core/internal/qcode"
"github.com/dosco/super-graph/core/internal/util"
)
type itemType int
@ -77,7 +77,7 @@ func (co *Compiler) compileMutation(qc *qcode.QCode, w io.Writer, vars Variables
root.Where = nil
root.Args = nil
return c.compileQuery(qc, w)
return c.compileQuery(qc, w, vars)
}
type kvitem struct {
@ -101,6 +101,9 @@ type renitem struct {
data map[string]json.RawMessage
}
// TODO: Handle cases where a column name matches the child table name
// the child path needs to be exluded in the json sent to insert or update
func (c *compilerContext) handleKVItem(st *util.Stack, item kvitem) error {
var data map[string]json.RawMessage
var array bool
@ -124,9 +127,6 @@ func (c *compilerContext) handleKVItem(st *util.Stack, item kvitem) error {
if v[0] != '{' && v[0] != '[' {
continue
}
if _, ok := item.ti.ColMap[k]; ok {
continue
}
// Get child-to-parent relationship
relCP, err := c.schema.GetRel(k, item.key)
@ -152,13 +152,9 @@ func (c *compilerContext) handleKVItem(st *util.Stack, item kvitem) error {
id++
}
} else {
ti, err := c.schema.GetTable(k)
if err != nil {
return err
}
// Get parent-to-child relationship
relPC, err := c.schema.GetRel(item.key, k)
} else if relPC, err := c.schema.GetRel(item.key, k); err == nil {
ti, err := c.schema.GetTable(k)
if err != nil {
return err
}
@ -277,8 +273,12 @@ func (c *compilerContext) renderUnionStmt(w io.Writer, item renitem) error {
io.WriteString(w, ` SET `)
quoted(w, item.relPC.Right.Col)
io.WriteString(w, ` = `)
// When setting the id of the connected table in a one-to-many setting
// we always overwrite the value including for array columns
colWithTable(w, item.relPC.Left.Table, item.relPC.Left.Col)
io.WriteString(w, `FROM `)
io.WriteString(w, ` FROM `)
quoted(w, item.relPC.Left.Table)
io.WriteString(w, ` WHERE`)
@ -290,7 +290,7 @@ func (c *compilerContext) renderUnionStmt(w io.Writer, item renitem) error {
} else {
io.WriteString(w, ` (`)
}
if err := renderKVItemWhere(w, v); err != nil {
if err := renderWhereFromJSON(w, v, "connect", v.val); err != nil {
return err
}
io.WriteString(w, `)`)
@ -313,7 +313,19 @@ func (c *compilerContext) renderUnionStmt(w io.Writer, item renitem) error {
quoted(w, item.ti.Name)
io.WriteString(w, ` SET `)
quoted(w, item.relPC.Right.Col)
io.WriteString(w, ` = NULL`)
io.WriteString(w, ` = `)
if item.relPC.Right.Array {
io.WriteString(w, ` array_remove(`)
quoted(w, item.relPC.Right.Col)
io.WriteString(w, `, `)
colWithTable(w, item.relPC.Left.Table, item.relPC.Left.Col)
io.WriteString(w, `)`)
} else {
io.WriteString(w, ` NULL`)
}
io.WriteString(w, ` FROM `)
quoted(w, item.relPC.Left.Table)
io.WriteString(w, ` WHERE`)
@ -326,7 +338,7 @@ func (c *compilerContext) renderUnionStmt(w io.Writer, item renitem) error {
} else {
io.WriteString(w, ` (`)
}
if err := renderKVItemWhere(w, v); err != nil {
if err := renderWhereFromJSON(w, v, "disconnect", v.val); err != nil {
return err
}
io.WriteString(w, `)`)
@ -335,10 +347,11 @@ func (c *compilerContext) renderUnionStmt(w io.Writer, item renitem) error {
}
io.WriteString(w, ` RETURNING `)
quoted(w, item.ti.Name)
io.WriteString(w, `.*), `)
io.WriteString(w, `.*)`)
}
if connect && disconnect {
io.WriteString(w, `, `)
quoted(w, item.ti.Name)
io.WriteString(w, ` AS (`)
io.WriteString(w, `SELECT * FROM `)
@ -433,7 +446,10 @@ func (c *compilerContext) renderUpsert(qc *qcode.QCode, w io.Writer,
upsert, ok := vars[qc.ActionVar]
if !ok {
return 0, fmt.Errorf("Variable '%s' not defined", qc.ActionVar)
return 0, fmt.Errorf("variable '%s' not defined", qc.ActionVar)
}
if len(upsert) == 0 {
return 0, fmt.Errorf("variable '%s' is empty", qc.ActionVar)
}
if ti.PrimaryCol == nil {
@ -507,18 +523,33 @@ func (c *compilerContext) renderConnectStmt(qc *qcode.QCode, w io.Writer,
rel := item.relPC
// Render only for parent-to-child relationship of one-to-one
// For this to work the child needs to found first so it's primary key
// can be set in the related column on the parent object.
// Eg. Create product and connect a user to it.
if rel.Type != RelOneToOne {
return nil
}
io.WriteString(w, `, `)
quoted(w, item.ti.Name)
io.WriteString(c.w, ` AS (`)
io.WriteString(w, `, "_x_`)
io.WriteString(c.w, item.ti.Name)
io.WriteString(c.w, `" AS (SELECT `)
io.WriteString(c.w, `SELECT * FROM `)
if rel.Left.Array {
io.WriteString(w, `array_agg(DISTINCT `)
quoted(w, rel.Right.Col)
io.WriteString(w, `) AS `)
quoted(w, rel.Right.Col)
} else {
quoted(w, rel.Right.Col)
}
io.WriteString(c.w, ` FROM "_sg_input" i,`)
quoted(c.w, item.ti.Name)
io.WriteString(c.w, ` WHERE `)
if err := renderKVItemWhere(c.w, item.kvitem); err != nil {
if err := renderWhereFromJSON(c.w, item.kvitem, "connect", item.kvitem.val); err != nil {
return err
}
io.WriteString(c.w, ` LIMIT 1)`)
@ -532,50 +563,105 @@ func (c *compilerContext) renderDisconnectStmt(qc *qcode.QCode, w io.Writer,
rel := item.relPC
// Render only for parent-to-child relationship of one-to-one
// For this to work the child needs to found first so it's
// null value can beset in the related column on the parent object.
// Eg. Update product and diconnect the user from it.
if rel.Type != RelOneToOne {
return nil
}
io.WriteString(w, `, `)
quoted(w, item.ti.Name)
io.WriteString(c.w, ` AS (`)
io.WriteString(w, `, "_x_`)
io.WriteString(c.w, item.ti.Name)
io.WriteString(c.w, `" AS (`)
io.WriteString(c.w, `SELECT * FROM (VALUES(NULL::`)
io.WriteString(w, rel.Right.col.Type)
io.WriteString(c.w, `)) AS LOOKUP(`)
quoted(w, rel.Right.Col)
io.WriteString(c.w, `))`)
if rel.Right.Array {
io.WriteString(c.w, `SELECT `)
quoted(w, rel.Right.Col)
io.WriteString(c.w, ` FROM "_sg_input" i,`)
quoted(c.w, item.ti.Name)
io.WriteString(c.w, ` WHERE `)
if err := renderWhereFromJSON(c.w, item.kvitem, "connect", item.kvitem.val); err != nil {
return err
}
io.WriteString(c.w, ` LIMIT 1))`)
} else {
io.WriteString(c.w, `SELECT * FROM (VALUES(NULL::`)
io.WriteString(w, rel.Right.col.Type)
io.WriteString(c.w, `)) AS LOOKUP(`)
quoted(w, rel.Right.Col)
io.WriteString(c.w, `))`)
}
return nil
}
func renderKVItemWhere(w io.Writer, item kvitem) error {
return renderWhereFromJSON(w, item.ti.Name, item.val)
}
func renderWhereFromJSON(w io.Writer, table string, val []byte) error {
func renderWhereFromJSON(w io.Writer, item kvitem, key string, val []byte) error {
var kv map[string]json.RawMessage
ti := item.ti
if err := json.Unmarshal(val, &kv); err != nil {
return err
}
i := 0
for k, v := range kv {
col, ok := ti.ColMap[k]
if !ok {
continue
}
if i != 0 {
io.WriteString(w, ` AND `)
}
colWithTable(w, table, k)
io.WriteString(w, ` = '`)
switch v[0] {
case '"':
w.Write(v[1 : len(v)-1])
default:
w.Write(v)
if v[0] == '[' {
colWithTable(w, ti.Name, k)
if col.Array {
io.WriteString(w, ` && `)
} else {
io.WriteString(w, ` = `)
}
io.WriteString(w, `ANY((select a::`)
io.WriteString(w, col.Type)
io.WriteString(w, ` AS list from json_array_elements_text(`)
renderPathJSON(w, item, key, k)
io.WriteString(w, `::json) AS a))`)
} else if col.Array {
io.WriteString(w, `(`)
renderPathJSON(w, item, key, k)
io.WriteString(w, `)::`)
io.WriteString(w, col.Type)
io.WriteString(w, ` = ANY(`)
colWithTable(w, ti.Name, k)
io.WriteString(w, `)`)
} else {
colWithTable(w, ti.Name, k)
io.WriteString(w, `= (`)
renderPathJSON(w, item, key, k)
io.WriteString(w, `)::`)
io.WriteString(w, col.Type)
}
io.WriteString(w, `'`)
i++
}
return nil
}
func renderPathJSON(w io.Writer, item kvitem, key1, key2 string) {
io.WriteString(w, `(i.j->`)
joinPath(w, item.path)
io.WriteString(w, `->'`)
io.WriteString(w, key1)
io.WriteString(w, `'->>'`)
io.WriteString(w, key2)
io.WriteString(w, `')`)
}
func renderCteName(w io.Writer, item kvitem) error {
io.WriteString(w, `"`)
io.WriteString(w, item.ti.Name)
@ -596,12 +682,6 @@ func renderCteNameWithSuffix(w io.Writer, item kvitem, suffix string) error {
return nil
}
func quoted(w io.Writer, identifier string) {
io.WriteString(w, `"`)
io.WriteString(w, identifier)
io.WriteString(w, `"`)
}
func joinPath(w io.Writer, path []string) {
for i := range path {
if i != 0 {

View File

@ -13,20 +13,11 @@ func singleUpsert(t *testing.T) {
}
}`
sql := `WITH "_sg_input" AS (SELECT '{{upsert}}' :: json AS j), "products" AS (INSERT INTO "products" ("name", "description") SELECT "t"."name", "t"."description" FROM "_sg_input" i, json_populate_record(NULL::products, i.j) t RETURNING *) ON CONFLICT (id) DO UPDATE SET name = EXCLUDED.name, description = EXCLUDED.description RETURNING *) SELECT json_object_agg('product', json_0) FROM (SELECT row_to_json((SELECT "json_row_0" FROM (SELECT "products_0"."id" AS "id", "products_0"."name" AS "name") AS "json_row_0")) AS "json_0" FROM (SELECT "products"."id", "products"."name" FROM "products" LIMIT ('1') :: integer) AS "products_0" LIMIT ('1') :: integer) AS "sel_0"`
vars := map[string]json.RawMessage{
"upsert": json.RawMessage(` { "name": "my_name", "description": "my_desc" }`),
}
resSQL, err := compileGQLToPSQL(gql, vars, "user")
if err != nil {
t.Fatal(err)
}
if string(resSQL) != sql {
t.Fatal(errNotExpected)
}
compileGQLToPSQL(t, gql, vars, "user")
}
func singleUpsertWhere(t *testing.T) {
@ -37,20 +28,11 @@ func singleUpsertWhere(t *testing.T) {
}
}`
sql := `WITH "_sg_input" AS (SELECT '{{upsert}}' :: json AS j), "products" AS (INSERT INTO "products" ("name", "description") SELECT "t"."name", "t"."description" FROM "_sg_input" i, json_populate_record(NULL::products, i.j) t RETURNING *) ON CONFLICT (id) WHERE (("products"."price") > 3) DO UPDATE SET name = EXCLUDED.name, description = EXCLUDED.description RETURNING *) SELECT json_object_agg('product', json_0) FROM (SELECT row_to_json((SELECT "json_row_0" FROM (SELECT "products_0"."id" AS "id", "products_0"."name" AS "name") AS "json_row_0")) AS "json_0" FROM (SELECT "products"."id", "products"."name" FROM "products" LIMIT ('1') :: integer) AS "products_0" LIMIT ('1') :: integer) AS "sel_0"`
vars := map[string]json.RawMessage{
"upsert": json.RawMessage(` { "name": "my_name", "description": "my_desc" }`),
}
resSQL, err := compileGQLToPSQL(gql, vars, "user")
if err != nil {
t.Fatal(err)
}
if string(resSQL) != sql {
t.Fatal(errNotExpected)
}
compileGQLToPSQL(t, gql, vars, "user")
}
func bulkUpsert(t *testing.T) {
@ -61,20 +43,11 @@ func bulkUpsert(t *testing.T) {
}
}`
sql := `WITH "_sg_input" AS (SELECT '{{upsert}}' :: json AS j), "products" AS (INSERT INTO "products" ("name", "description") SELECT "t"."name", "t"."description" FROM "_sg_input" i, json_populate_recordset(NULL::products, i.j) t RETURNING *) ON CONFLICT (id) DO UPDATE SET name = EXCLUDED.name, description = EXCLUDED.description RETURNING *) SELECT json_object_agg('product', json_0) FROM (SELECT row_to_json((SELECT "json_row_0" FROM (SELECT "products_0"."id" AS "id", "products_0"."name" AS "name") AS "json_row_0")) AS "json_0" FROM (SELECT "products"."id", "products"."name" FROM "products" LIMIT ('1') :: integer) AS "products_0" LIMIT ('1') :: integer) AS "sel_0"`
vars := map[string]json.RawMessage{
"upsert": json.RawMessage(` [{ "name": "my_name", "description": "my_desc" }]`),
}
resSQL, err := compileGQLToPSQL(gql, vars, "user")
if err != nil {
t.Fatal(err)
}
if string(resSQL) != sql {
t.Fatal(errNotExpected)
}
compileGQLToPSQL(t, gql, vars, "user")
}
func delete(t *testing.T) {
@ -85,20 +58,11 @@ func delete(t *testing.T) {
}
}`
sql := `WITH "products" AS (DELETE FROM "products" WHERE (((("products"."price") > 0) AND (("products"."price") < 8)) AND (("products"."id") IS NOT DISTINCT FROM 1)) RETURNING "products".*)SELECT json_object_agg('product', json_0) FROM (SELECT row_to_json((SELECT "json_row_0" FROM (SELECT "products_0"."id" AS "id", "products_0"."name" AS "name") AS "json_row_0")) AS "json_0" FROM (SELECT "products"."id", "products"."name" FROM "products" LIMIT ('1') :: integer) AS "products_0" LIMIT ('1') :: integer) AS "sel_0"`
vars := map[string]json.RawMessage{
"update": json.RawMessage(` { "name": "my_name", "description": "my_desc" }`),
}
resSQL, err := compileGQLToPSQL(gql, vars, "user")
if err != nil {
t.Fatal(err)
}
if string(resSQL) != sql {
t.Fatal(errNotExpected)
}
compileGQLToPSQL(t, gql, vars, "user")
}
// func blockedInsert(t *testing.T) {

View File

@ -0,0 +1,237 @@
package psql
import (
"fmt"
"io/ioutil"
"log"
"os"
"strings"
"testing"
"github.com/dosco/super-graph/core/internal/qcode"
)
const (
errNotExpected = "Generated SQL did not match what was expected"
headerMarker = "=== RUN"
commentMarker = "---"
)
var (
qcompile *qcode.Compiler
pcompile *Compiler
expected map[string][]string
)
func TestMain(m *testing.M) {
var err error
qcompile, err = qcode.NewCompiler(qcode.Config{
Blocklist: []string{
"secret",
"password",
"token",
},
})
if err != nil {
log.Fatal(err)
}
err = qcompile.AddRole("user", "product", qcode.TRConfig{
Query: qcode.QueryConfig{
Columns: []string{"id", "name", "price", "users", "customers"},
Filters: []string{
"{ price: { gt: 0 } }",
"{ price: { lt: 8 } }",
},
},
Insert: qcode.InsertConfig{
Presets: map[string]string{
"user_id": "$user_id",
"created_at": "now",
"updated_at": "now",
},
},
Update: qcode.UpdateConfig{
Filters: []string{"{ user_id: { eq: $user_id } }"},
Presets: map[string]string{"updated_at": "now"},
},
Delete: qcode.DeleteConfig{
Filters: []string{
"{ price: { gt: 0 } }",
"{ price: { lt: 8 } }",
},
},
})
if err != nil {
log.Fatal(err)
}
err = qcompile.AddRole("anon", "product", qcode.TRConfig{
Query: qcode.QueryConfig{
Columns: []string{"id", "name"},
},
})
if err != nil {
log.Fatal(err)
}
err = qcompile.AddRole("anon1", "product", qcode.TRConfig{
Query: qcode.QueryConfig{
Columns: []string{"id", "name", "price"},
DisableFunctions: true,
},
})
if err != nil {
log.Fatal(err)
}
err = qcompile.AddRole("user", "users", qcode.TRConfig{
Query: qcode.QueryConfig{
Columns: []string{"id", "full_name", "avatar", "email", "products"},
},
})
if err != nil {
log.Fatal(err)
}
err = qcompile.AddRole("bad_dude", "users", qcode.TRConfig{
Query: qcode.QueryConfig{
Filters: []string{"false"},
DisableFunctions: true,
},
Insert: qcode.InsertConfig{
Filters: []string{"false"},
},
Update: qcode.UpdateConfig{
Filters: []string{"false"},
},
})
if err != nil {
log.Fatal(err)
}
err = qcompile.AddRole("user", "mes", qcode.TRConfig{
Query: qcode.QueryConfig{
Columns: []string{"id", "full_name", "avatar"},
Filters: []string{
"{ id: { eq: $user_id } }",
},
},
})
if err != nil {
log.Fatal(err)
}
err = qcompile.AddRole("user", "customers", qcode.TRConfig{
Query: qcode.QueryConfig{
Columns: []string{"id", "email", "full_name", "products"},
},
})
if err != nil {
log.Fatal(err)
}
schema := getTestSchema()
vars := NewVariables(map[string]string{
"admin_account_id": "5",
})
pcompile = NewCompiler(Config{
Schema: schema,
Vars: vars,
})
expected = make(map[string][]string)
b, err := ioutil.ReadFile("tests.sql")
if err != nil {
log.Fatal(err)
}
text := string(b)
lines := strings.Split(text, "\n")
var h string
for _, v := range lines {
switch {
case strings.HasPrefix(v, headerMarker):
h = strings.TrimSpace(v[len(headerMarker):])
case strings.HasPrefix(v, commentMarker):
break
default:
v := strings.TrimSpace(v)
if len(v) != 0 {
expected[h] = append(expected[h], v)
}
}
}
os.Exit(m.Run())
}
func compileGQLToPSQL(t *testing.T, gql string, vars Variables, role string) {
generateTestFile := false
if generateTestFile {
var sqlStmts []string
for i := 0; i < 100; i++ {
qc, err := qcompile.Compile([]byte(gql), role)
if err != nil {
t.Fatal(err)
}
_, sqlB, err := pcompile.CompileEx(qc, vars)
if err != nil {
t.Fatal(err)
}
sql := string(sqlB)
match := false
for _, s := range sqlStmts {
if sql == s {
match = true
break
}
}
if !match {
s := string(sql)
sqlStmts = append(sqlStmts, s)
fmt.Println(s)
}
}
return
}
for i := 0; i < 200; i++ {
qc, err := qcompile.Compile([]byte(gql), role)
if err != nil {
t.Fatal(err)
}
_, sqlStmt, err := pcompile.CompileEx(qc, vars)
if err != nil {
t.Fatal(err)
}
failed := true
for _, sql := range expected[t.Name()] {
if string(sqlStmt) == sql {
failed = false
}
}
if failed {
fmt.Println(string(sqlStmt))
t.Fatal(errNotExpected)
}
}
}

View File

@ -9,14 +9,18 @@ import (
"io"
"strings"
"github.com/dosco/super-graph/qcode"
"github.com/dosco/super-graph/util"
"github.com/dosco/super-graph/core/internal/qcode"
"github.com/dosco/super-graph/core/internal/util"
)
const (
closeBlock = 500
)
var (
ErrAllTablesSkipped = errors.New("all tables skipped. cannot render query")
)
type Variables map[string]json.RawMessage
type Config struct {
@ -30,7 +34,10 @@ type Compiler struct {
}
func NewCompiler(conf Config) *Compiler {
return &Compiler{conf.Schema, conf.Vars}
return &Compiler{
schema: conf.Schema,
vars: conf.Vars,
}
}
func (c *Compiler) AddRelationship(child, parent string, rel *DBRel) error {
@ -65,7 +72,7 @@ func (co *Compiler) CompileEx(qc *qcode.QCode, vars Variables) (uint32, []byte,
func (co *Compiler) Compile(qc *qcode.QCode, w io.Writer, vars Variables) (uint32, error) {
switch qc.Type {
case qcode.QTQuery:
return co.compileQuery(qc, w)
return co.compileQuery(qc, w, vars)
case qcode.QTInsert, qcode.QTUpdate, qcode.QTDelete, qcode.QTUpsert:
return co.compileMutation(qc, w, vars)
}
@ -73,54 +80,38 @@ func (co *Compiler) Compile(qc *qcode.QCode, w io.Writer, vars Variables) (uint3
return 0, fmt.Errorf("Unknown operation type %d", qc.Type)
}
func (co *Compiler) compileQuery(qc *qcode.QCode, w io.Writer) (uint32, error) {
func (co *Compiler) compileQuery(qc *qcode.QCode, w io.Writer, vars Variables) (uint32, error) {
if len(qc.Selects) == 0 {
return 0, errors.New("empty query")
}
c := &compilerContext{w, qc.Selects, co}
multiRoot := (len(qc.Roots) > 1)
st := NewIntStack()
i := 0
if multiRoot {
io.WriteString(c.w, `SELECT row_to_json("json_root") FROM (SELECT `)
for i, id := range qc.Roots {
root := qc.Selects[id]
st.Push(root.ID + closeBlock)
st.Push(root.ID)
if i != 0 {
io.WriteString(c.w, `, `)
}
io.WriteString(c.w, `"sel_`)
int2string(c.w, root.ID)
io.WriteString(c.w, `"."json_`)
int2string(c.w, root.ID)
io.WriteString(c.w, `"`)
alias(c.w, root.FieldName)
io.WriteString(c.w, `SELECT jsonb_build_object(`)
for _, id := range qc.Roots {
root := &qc.Selects[id]
if root.SkipRender || len(root.Cols) == 0 {
continue
}
io.WriteString(c.w, ` FROM `)
} else {
root := qc.Selects[0]
io.WriteString(c.w, `SELECT json_object_agg(`)
io.WriteString(c.w, `'`)
io.WriteString(c.w, root.FieldName)
io.WriteString(c.w, `', `)
io.WriteString(c.w, `json_`)
int2string(c.w, root.ID)
st.Push(root.ID + closeBlock)
st.Push(root.ID)
io.WriteString(c.w, `) FROM `)
if i != 0 {
io.WriteString(c.w, `, `)
}
c.renderRootSelect(root)
i++
}
io.WriteString(c.w, `) as "__root" FROM `)
if i == 0 {
return 0, ErrAllTablesSkipped
}
var ignored uint32
@ -135,8 +126,8 @@ func (co *Compiler) compileQuery(qc *qcode.QCode, w io.Writer) (uint32, error) {
if id < closeBlock {
sel := &c.s[id]
if sel.ParentID == -1 {
io.WriteString(c.w, `(`)
if len(sel.Cols) == 0 {
continue
}
ti, err := c.schema.GetTable(sel.Name)
@ -144,13 +135,17 @@ func (co *Compiler) compileQuery(qc *qcode.QCode, w io.Writer) (uint32, error) {
return 0, err
}
if sel.ParentID != -1 {
if err = c.renderLateralJoin(sel); err != nil {
return 0, err
}
if sel.ParentID == -1 {
io.WriteString(c.w, `(`)
} else {
c.renderLateralJoin(sel)
}
skipped, err := c.renderSelect(sel, ti)
if !ti.Singular {
c.renderPluralSelect(sel, ti)
}
skipped, err := c.renderSelect(sel, ti, vars)
if err != nil {
return 0, err
}
@ -161,6 +156,9 @@ func (co *Compiler) compileQuery(qc *qcode.QCode, w io.Writer) (uint32, error) {
continue
}
child := &c.s[cid]
if child.SkipRender {
continue
}
st.Push(child.ID + closeBlock)
st.Push(child.ID)
@ -174,40 +172,97 @@ func (co *Compiler) compileQuery(qc *qcode.QCode, w io.Writer) (uint32, error) {
return 0, err
}
err = c.renderSelectClose(sel, ti)
if err != nil {
return 0, err
io.WriteString(c.w, `)`)
aliasWithID(c.w, "__sr", sel.ID)
io.WriteString(c.w, `)`)
aliasWithID(c.w, "__sj", sel.ID)
if !ti.Singular {
io.WriteString(c.w, `)`)
aliasWithID(c.w, "__sj", sel.ID)
}
if sel.ParentID != -1 {
if err = c.renderLateralJoinClose(sel); err != nil {
return 0, err
}
} else {
io.WriteString(c.w, `)`)
aliasWithID(c.w, `sel`, sel.ID)
if sel.ParentID == -1 {
if st.Len() != 0 {
io.WriteString(c.w, `, `)
}
} else {
c.renderLateralJoinClose(sel)
}
if len(sel.Args) != 0 {
i := 0
for _, v := range sel.Args {
qcode.FreeNode(v)
qcode.FreeNode(v, 500)
i++
}
}
}
}
if multiRoot {
io.WriteString(c.w, `) AS "json_root"`)
}
return ignored, nil
}
func (c *compilerContext) processChildren(sel *qcode.Select, ti *DBTableInfo) (uint32, []*qcode.Column) {
func (c *compilerContext) renderPluralSelect(sel *qcode.Select, ti *DBTableInfo) error {
io.WriteString(c.w, `SELECT coalesce(jsonb_agg("__sj_`)
int2string(c.w, sel.ID)
io.WriteString(c.w, `"."json"), '[]') as "json"`)
if sel.Paging.Type != qcode.PtOffset {
n := 0
// check if primary key already included in order by
// query argument
for _, ob := range sel.OrderBy {
if ob.Col == ti.PrimaryCol.Key {
n = 1
break
}
}
if n == 1 {
n = len(sel.OrderBy)
} else {
n = len(sel.OrderBy) + 1
}
io.WriteString(c.w, `, CONCAT_WS(','`)
for i := 0; i < n; i++ {
io.WriteString(c.w, `, max("__cur_`)
int2string(c.w, int32(i))
io.WriteString(c.w, `")`)
}
io.WriteString(c.w, `) as "cursor"`)
}
io.WriteString(c.w, ` FROM (`)
return nil
}
func (c *compilerContext) renderRootSelect(sel *qcode.Select) error {
io.WriteString(c.w, `'`)
io.WriteString(c.w, sel.FieldName)
io.WriteString(c.w, `', `)
io.WriteString(c.w, `"__sj_`)
int2string(c.w, sel.ID)
io.WriteString(c.w, `"."json"`)
if sel.Paging.Type != qcode.PtOffset {
io.WriteString(c.w, `, '`)
io.WriteString(c.w, sel.FieldName)
io.WriteString(c.w, `_cursor', `)
io.WriteString(c.w, `"__sj_`)
int2string(c.w, sel.ID)
io.WriteString(c.w, `"."cursor"`)
}
return nil
}
func (c *compilerContext) initSelect(sel *qcode.Select, ti *DBTableInfo, vars Variables) (uint32, []*qcode.Column, error) {
var skipped uint32
cols := make([]*qcode.Column, 0, len(sel.Cols))
@ -217,168 +272,233 @@ func (c *compilerContext) processChildren(sel *qcode.Select, ti *DBTableInfo) (u
colmap[sel.Cols[i].Name] = struct{}{}
}
for i := range sel.OrderBy {
colmap[sel.OrderBy[i].Col] = struct{}{}
}
if sel.Paging.Type != qcode.PtOffset {
colmap[ti.PrimaryCol.Key] = struct{}{}
addPrimaryKey := true
for _, ob := range sel.OrderBy {
if ob.Col == ti.PrimaryCol.Key {
addPrimaryKey = false
break
}
}
if addPrimaryKey {
ob := &qcode.OrderBy{Col: ti.PrimaryCol.Name, Order: qcode.OrderAsc}
if sel.Paging.Type == qcode.PtBackward {
ob.Order = qcode.OrderDesc
}
sel.OrderBy = append(sel.OrderBy, ob)
}
}
if sel.Paging.Cursor {
c.addSeekPredicate(sel)
}
for _, id := range sel.Children {
child := &c.s[id]
rel, err := c.schema.GetRel(child.Name, ti.Name)
if err != nil {
skipped |= (1 << uint(id))
continue
return 0, nil, err
//skipped |= (1 << uint(id))
//continue
}
switch rel.Type {
case RelOneToOne, RelOneToMany:
if _, ok := colmap[rel.Right.Col]; !ok {
cols = append(cols, &qcode.Column{Table: ti.Name, Name: rel.Right.Col, FieldName: rel.Right.Col})
colmap[rel.Right.Col] = struct{}{}
}
colmap[rel.Right.Col] = struct{}{}
case RelOneToManyThrough:
if _, ok := colmap[rel.Left.Col]; !ok {
cols = append(cols, &qcode.Column{Table: ti.Name, Name: rel.Left.Col, FieldName: rel.Left.Col})
colmap[rel.Left.Col] = struct{}{}
}
case RelEmbedded:
if _, ok := colmap[rel.Left.Col]; !ok {
cols = append(cols, &qcode.Column{Table: ti.Name, Name: rel.Left.Col, FieldName: rel.Left.Col})
colmap[rel.Left.Col] = struct{}{}
}
colmap[rel.Left.Col] = struct{}{}
case RelRemote:
if _, ok := colmap[rel.Left.Col]; !ok {
cols = append(cols, &qcode.Column{Table: ti.Name, Name: rel.Left.Col, FieldName: rel.Right.Col})
colmap[rel.Left.Col] = struct{}{}
skipped |= (1 << uint(id))
}
colmap[rel.Left.Col] = struct{}{}
skipped |= (1 << uint(id))
default:
skipped |= (1 << uint(id))
return 0, nil, fmt.Errorf("unknown relationship %s", rel)
//skipped |= (1 << uint(id))
}
}
return skipped, cols
return skipped, cols, nil
}
func (c *compilerContext) renderSelect(sel *qcode.Select, ti *DBTableInfo) (uint32, error) {
skipped, childCols := c.processChildren(sel, ti)
hasOrder := len(sel.OrderBy) != 0
// This
// (A, B, C) >= (X, Y, Z)
//
// Becomes
// (A > X)
// OR ((A = X) AND (B > Y))
// OR ((A = X) AND (B = Y) AND (C > Z))
// OR ((A = X) AND (B = Y) AND (C = Z))
// SELECT
if !ti.Singular {
//fmt.Fprintf(w, `SELECT coalesce(json_agg("%s"`, c.sel.Name)
io.WriteString(c.w, `SELECT coalesce(json_agg("`)
io.WriteString(c.w, "json_")
int2string(c.w, sel.ID)
io.WriteString(c.w, `"`)
func (c *compilerContext) addSeekPredicate(sel *qcode.Select) error {
var or, and *qcode.Exp
if hasOrder {
err := c.renderOrderBy(sel, ti)
if err != nil {
return skipped, err
obLen := len(sel.OrderBy)
if obLen > 1 {
or = qcode.NewFilter()
or.Op = qcode.OpOr
}
for i := 0; i < obLen; i++ {
if i > 0 {
and = qcode.NewFilter()
and.Op = qcode.OpAnd
}
for n, ob := range sel.OrderBy {
f := qcode.NewFilter()
f.Col = ob.Col
f.Type = qcode.ValRef
f.Table = "__cur"
f.Val = ob.Col
if obLen == 1 {
qcode.AddFilter(sel, f)
return nil
}
switch {
case i > 0 && n != i:
f.Op = qcode.OpEquals
case ob.Order == qcode.OrderDesc:
f.Op = qcode.OpLesserThan
default:
f.Op = qcode.OpGreaterThan
}
if and != nil {
and.Children = append(and.Children, f)
} else {
or.Children = append(or.Children, f)
}
if n == i {
break
}
}
//fmt.Fprintf(w, `), '[]') AS "%s" FROM (`, c.sel.Name)
io.WriteString(c.w, `), '[]')`)
aliasWithID(c.w, "json", sel.ID)
io.WriteString(c.w, ` FROM (`)
if and != nil {
or.Children = append(or.Children, and)
}
}
// ROW-TO-JSON
io.WriteString(c.w, `SELECT `)
qcode.AddFilter(sel, or)
return nil
}
if len(sel.DistinctOn) != 0 {
c.renderDistinctOn(sel, ti)
func (c *compilerContext) renderSelect(sel *qcode.Select, ti *DBTableInfo, vars Variables) (uint32, error) {
var rel *DBRel
var err error
if sel.ParentID != -1 {
parent := c.s[sel.ParentID]
rel, err = c.schema.GetRel(ti.Name, parent.Name)
if err != nil {
return 0, err
}
}
io.WriteString(c.w, `row_to_json((`)
//fmt.Fprintf(w, `SELECT "%d" FROM (SELECT `, c.sel.ID)
io.WriteString(c.w, `SELECT "json_row_`)
int2string(c.w, sel.ID)
io.WriteString(c.w, `" FROM (SELECT `)
// Combined column names
c.renderColumns(sel, ti)
c.renderRemoteRelColumns(sel, ti)
err := c.renderJoinedColumns(sel, ti, skipped)
skipped, childCols, err := c.initSelect(sel, ti, vars)
if err != nil {
return skipped, err
return 0, err
}
//fmt.Fprintf(w, `) AS "%d"`, c.sel.ID)
io.WriteString(c.w, `)`)
aliasWithID(c.w, "json_row", sel.ID)
// SELECT
// io.WriteString(c.w, `SELECT jsonb_build_object(`)
// if err := c.renderColumns(sel, ti, skipped); err != nil {
// return 0, err
// }
//fmt.Fprintf(w, `)) AS "%s"`, c.sel.Name)
io.WriteString(c.w, `))`)
aliasWithID(c.w, "json", sel.ID)
// END-ROW-TO-JSON
io.WriteString(c.w, `SELECT to_jsonb("__sr_`)
int2string(c.w, sel.ID)
io.WriteString(c.w, `") `)
if hasOrder {
c.renderOrderByColumns(sel, ti)
if sel.Paging.Type != qcode.PtOffset {
for i := range sel.OrderBy {
io.WriteString(c.w, `- '__cur_`)
int2string(c.w, int32(i))
io.WriteString(c.w, `' `)
}
}
// END-SELECT
io.WriteString(c.w, `AS "json"`)
if sel.Paging.Type != qcode.PtOffset {
for i := range sel.OrderBy {
io.WriteString(c.w, `, "__cur_`)
int2string(c.w, int32(i))
io.WriteString(c.w, `"`)
}
}
io.WriteString(c.w, `FROM (SELECT `)
if err := c.renderColumns(sel, ti, skipped); err != nil {
return 0, err
}
if sel.Paging.Type != qcode.PtOffset {
for i, ob := range sel.OrderBy {
io.WriteString(c.w, `, LAST_VALUE(`)
colWithTableID(c.w, ti.Name, sel.ID, ob.Col)
io.WriteString(c.w, `) OVER() AS "__cur_`)
int2string(c.w, int32(i))
io.WriteString(c.w, `"`)
}
}
io.WriteString(c.w, ` FROM (`)
// FROM (SELECT .... )
err = c.renderBaseSelect(sel, ti, childCols, skipped)
err = c.renderBaseSelect(sel, ti, rel, childCols, skipped)
if err != nil {
return skipped, err
}
//fmt.Fprintf(w, `) AS "%s_%d"`, c.sel.Name, c.sel.ID)
io.WriteString(c.w, `)`)
aliasWithID(c.w, ti.Name, sel.ID)
// END-FROM
return skipped, nil
}
func (c *compilerContext) renderSelectClose(sel *qcode.Select, ti *DBTableInfo) error {
hasOrder := len(sel.OrderBy) != 0
if hasOrder {
err := c.renderOrderBy(sel, ti)
if err != nil {
return err
}
}
switch {
case ti.Singular:
io.WriteString(c.w, ` LIMIT ('1') :: integer`)
case len(sel.Paging.Limit) != 0:
//fmt.Fprintf(w, ` LIMIT ('%s') :: integer`, c.sel.Paging.Limit)
io.WriteString(c.w, ` LIMIT ('`)
io.WriteString(c.w, sel.Paging.Limit)
io.WriteString(c.w, `') :: integer`)
case sel.Paging.NoLimit:
break
default:
io.WriteString(c.w, ` LIMIT ('20') :: integer`)
}
if len(sel.Paging.Offset) != 0 {
//fmt.Fprintf(w, ` OFFSET ('%s') :: integer`, c.sel.Paging.Offset)
io.WriteString(c.w, `OFFSET ('`)
io.WriteString(c.w, sel.Paging.Offset)
io.WriteString(c.w, `') :: integer`)
}
if !ti.Singular {
//fmt.Fprintf(w, `) AS "json_agg_%d"`, c.sel.ID)
io.WriteString(c.w, `)`)
aliasWithID(c.w, "json_agg", sel.ID)
}
return nil
}
func (c *compilerContext) renderLateralJoin(sel *qcode.Select) error {
io.WriteString(c.w, ` LEFT OUTER JOIN LATERAL (`)
return nil
}
func (c *compilerContext) renderLateralJoinClose(sel *qcode.Select) error {
//fmt.Fprintf(w, `) AS "%s_%d_join" ON ('true')`, c.sel.Name, c.sel.ID)
io.WriteString(c.w, `)`)
aliasWithIDSuffix(c.w, sel.Name, sel.ID, "_join")
// io.WriteString(c.w, `) `)
// aliasWithID(c.w, "__sj", sel.ID)
io.WriteString(c.w, ` ON ('true')`)
return nil
}
@ -418,39 +538,47 @@ func (c *compilerContext) renderJoinByName(table, parent string, id int32) error
return nil
}
func (c *compilerContext) renderColumns(sel *qcode.Select, ti *DBTableInfo) {
func (c *compilerContext) renderColumns(sel *qcode.Select, ti *DBTableInfo, skipped uint32) error {
i := 0
var cn string
for _, col := range sel.Cols {
n := funcPrefixLen(col.Name)
if n != 0 {
if n := funcPrefixLen(col.Name); n != 0 {
if !sel.Functions {
continue
}
if len(sel.Allowed) != 0 {
if _, ok := sel.Allowed[col.Name[n:]]; !ok {
continue
}
}
cn = col.Name[n:]
} else {
if len(sel.Allowed) != 0 {
if _, ok := sel.Allowed[col.Name]; !ok {
continue
}
cn = col.Name
if strings.HasSuffix(cn, "_cursor") {
continue
}
}
if len(sel.Allowed) != 0 {
if _, ok := sel.Allowed[cn]; !ok {
continue
}
}
if i != 0 {
io.WriteString(c.w, ", ")
}
//fmt.Fprintf(w, `"%s_%d"."%s" AS "%s"`,
//c.sel.Name, c.sel.ID, col.Name, col.FieldName)
colWithTableIDAlias(c.w, ti.Name, sel.ID, col.Name, col.FieldName)
colWithTableID(c.w, ti.Name, sel.ID, col.Name)
alias(c.w, col.FieldName)
i++
}
i += c.renderRemoteRelColumns(sel, ti, i)
return c.renderJoinColumns(sel, ti, skipped, i)
}
func (c *compilerContext) renderRemoteRelColumns(sel *qcode.Select, ti *DBTableInfo) {
i := 0
func (c *compilerContext) renderRemoteRelColumns(sel *qcode.Select, ti *DBTableInfo, colsRendered int) int {
i := colsRendered
for _, id := range sel.Children {
child := &c.s[id]
@ -462,195 +590,78 @@ func (c *compilerContext) renderRemoteRelColumns(sel *qcode.Select, ti *DBTableI
if i != 0 || len(sel.Cols) != 0 {
io.WriteString(c.w, ", ")
}
//fmt.Fprintf(w, `"%s_%d"."%s" AS "%s"`,
//c.sel.Name, c.sel.ID, rel.Left.Col, rel.Right.Col)
colWithTableID(c.w, ti.Name, sel.ID, rel.Left.Col)
alias(c.w, rel.Right.Col)
i++
}
return i
}
func (c *compilerContext) renderJoinedColumns(sel *qcode.Select, ti *DBTableInfo, skipped uint32) error {
colsRendered := len(sel.Cols) != 0
func (c *compilerContext) renderJoinColumns(sel *qcode.Select, ti *DBTableInfo, skipped uint32, colsRendered int) error {
// columns previously rendered
i := colsRendered
for _, id := range sel.Children {
skipThis := hasBit(skipped, uint32(id))
if colsRendered && !skipThis {
io.WriteString(c.w, ", ")
}
if skipThis {
if hasBit(skipped, uint32(id)) {
continue
}
childSel := &c.s[id]
//fmt.Fprintf(w, `"%s_%d_join"."%s" AS "%s"`,
//s.Name, s.ID, s.Name, s.FieldName)
//if cti.Singular {
io.WriteString(c.w, `"`)
io.WriteString(c.w, childSel.Name)
io.WriteString(c.w, `_`)
if i != 0 {
io.WriteString(c.w, ", ")
}
if childSel.SkipRender {
io.WriteString(c.w, `NULL`)
alias(c.w, childSel.FieldName)
continue
}
io.WriteString(c.w, `"__sj_`)
int2string(c.w, childSel.ID)
io.WriteString(c.w, `_join"."json_`)
int2string(c.w, childSel.ID)
io.WriteString(c.w, `" AS "`)
io.WriteString(c.w, childSel.FieldName)
io.WriteString(c.w, `"`)
io.WriteString(c.w, `"."json"`)
alias(c.w, childSel.FieldName)
if childSel.Paging.Type != qcode.PtOffset {
io.WriteString(c.w, `, "__sj_`)
int2string(c.w, childSel.ID)
io.WriteString(c.w, `"."cursor" AS "`)
io.WriteString(c.w, childSel.FieldName)
io.WriteString(c.w, `_cursor"`)
}
i++
}
return nil
}
func (c *compilerContext) renderBaseSelect(sel *qcode.Select, ti *DBTableInfo,
func (c *compilerContext) renderBaseSelect(sel *qcode.Select, ti *DBTableInfo, rel *DBRel,
childCols []*qcode.Column, skipped uint32) error {
var groupBy []int
isRoot := sel.ParentID == -1
isRoot := (rel == nil)
isFil := (sel.Where != nil && sel.Where.Op != qcode.OpNop)
isSearch := sel.Args["search"] != nil
isAgg := false
hasOrder := len(sel.OrderBy) != 0
io.WriteString(c.w, ` FROM (SELECT `)
i := 0
for n, col := range sel.Cols {
cn := col.Name
_, isRealCol := ti.ColMap[cn]
if !isRealCol {
if isSearch {
switch {
case cn == "search_rank":
if len(sel.Allowed) != 0 {
if _, ok := sel.Allowed[cn]; !ok {
continue
}
}
if ti.TSVCol == nil {
return errors.New("no ts_vector column found")
}
cn = ti.TSVCol.Name
arg := sel.Args["search"]
if i != 0 {
io.WriteString(c.w, `, `)
}
//fmt.Fprintf(w, `ts_rank("%s"."%s", websearch_to_tsquery('%s')) AS %s`,
//c.sel.Name, cn, arg.Val, col.Name)
io.WriteString(c.w, `ts_rank(`)
colWithTable(c.w, ti.Name, cn)
if c.schema.ver >= 110000 {
io.WriteString(c.w, `, websearch_to_tsquery('`)
} else {
io.WriteString(c.w, `, to_tsquery('`)
}
io.WriteString(c.w, arg.Val)
io.WriteString(c.w, `'))`)
alias(c.w, col.Name)
i++
case strings.HasPrefix(cn, "search_headline_"):
cn1 := cn[16:]
if len(sel.Allowed) != 0 {
if _, ok := sel.Allowed[cn1]; !ok {
continue
}
}
arg := sel.Args["search"]
if i != 0 {
io.WriteString(c.w, `, `)
}
//fmt.Fprintf(w, `ts_headline("%s"."%s", websearch_to_tsquery('%s')) AS %s`,
//c.sel.Name, cn, arg.Val, col.Name)
io.WriteString(c.w, `ts_headline(`)
colWithTable(c.w, ti.Name, cn1)
if c.schema.ver >= 110000 {
io.WriteString(c.w, `, websearch_to_tsquery('`)
} else {
io.WriteString(c.w, `, to_tsquery('`)
}
io.WriteString(c.w, arg.Val)
io.WriteString(c.w, `'))`)
alias(c.w, col.Name)
i++
}
} else {
pl := funcPrefixLen(cn)
if pl == 0 {
if i != 0 {
io.WriteString(c.w, `, `)
}
//fmt.Fprintf(w, `'%s not defined' AS %s`, cn, col.Name)
io.WriteString(c.w, `'`)
io.WriteString(c.w, cn)
io.WriteString(c.w, ` not defined'`)
alias(c.w, col.Name)
i++
} else if sel.Functions {
cn1 := cn[pl:]
if len(sel.Allowed) != 0 {
if _, ok := sel.Allowed[cn1]; !ok {
continue
}
}
if i != 0 {
io.WriteString(c.w, `, `)
}
fn := cn[0 : pl-1]
isAgg = true
//fmt.Fprintf(w, `%s("%s"."%s") AS %s`, fn, c.sel.Name, cn, col.Name)
io.WriteString(c.w, fn)
io.WriteString(c.w, `(`)
colWithTable(c.w, ti.Name, cn1)
io.WriteString(c.w, `)`)
alias(c.w, col.Name)
i++
}
}
} else {
groupBy = append(groupBy, n)
//fmt.Fprintf(w, `"%s"."%s"`, c.sel.Name, cn)
if i != 0 {
io.WriteString(c.w, `, `)
}
colWithTable(c.w, ti.Name, cn)
i++
}
if sel.Paging.Cursor {
c.renderCursorCTE(sel)
}
for _, col := range childCols {
if i != 0 {
io.WriteString(c.w, `, `)
}
io.WriteString(c.w, `SELECT `)
//fmt.Fprintf(w, `"%s"."%s"`, col.Table, col.Name)
colWithTable(c.w, col.Table, col.Name)
i++
if len(sel.DistinctOn) != 0 {
c.renderDistinctOn(sel, ti)
}
realColsRendered, isAgg, err := c.renderBaseColumns(sel, ti, childCols, skipped)
if err != nil {
return err
}
io.WriteString(c.w, ` FROM `)
//fmt.Fprintf(w, ` FROM "%s"`, c.sel.Name)
io.WriteString(c.w, `"`)
io.WriteString(c.w, ti.Name)
io.WriteString(c.w, `"`)
// if tn, ok := c.tmap[sel.Name]; ok {
// //fmt.Fprintf(w, ` FROM "%s" AS "%s"`, tn, c.sel.Name)
// tableWithAlias(c.w, ti.Name, sel.Name)
// } else {
// //fmt.Fprintf(w, ` FROM "%s"`, c.sel.Name)
// io.WriteString(c.w, `"`)
// io.WriteString(c.w, sel.Name)
// io.WriteString(c.w, `"`)
// }
c.renderFrom(sel, ti, rel)
if isRoot && isFil {
io.WriteString(c.w, ` WHERE (`)
@ -666,11 +677,9 @@ func (c *compilerContext) renderBaseSelect(sel *qcode.Select, ti *DBTableInfo,
}
io.WriteString(c.w, ` WHERE (`)
if err := c.renderRelationship(sel, ti); err != nil {
return err
}
if isFil {
io.WriteString(c.w, ` AND `)
if err := c.renderWhere(sel, ti); err != nil {
@ -680,17 +689,19 @@ func (c *compilerContext) renderBaseSelect(sel *qcode.Select, ti *DBTableInfo,
io.WriteString(c.w, `)`)
}
if isAgg {
if len(groupBy) != 0 {
io.WriteString(c.w, ` GROUP BY `)
if isAgg && len(realColsRendered) != 0 {
io.WriteString(c.w, ` GROUP BY `)
for i, id := range groupBy {
if i != 0 {
io.WriteString(c.w, `, `)
}
//fmt.Fprintf(w, `"%s"."%s"`, c.sel.Name, c.sel.Cols[id].Name)
colWithTable(c.w, ti.Name, sel.Cols[id].Name)
}
for i, id := range realColsRendered {
c.renderComma(i)
//fmt.Fprintf(w, `"%s"."%s"`, c.sel.Name, c.sel.Cols[id].Name)
colWithTable(c.w, ti.Name, sel.Cols[id].Name)
}
}
if hasOrder {
if err := c.renderOrderBy(sel, ti); err != nil {
return err
}
}
@ -718,30 +729,64 @@ func (c *compilerContext) renderBaseSelect(sel *qcode.Select, ti *DBTableInfo,
io.WriteString(c.w, `') :: integer`)
}
//fmt.Fprintf(w, `) AS "%s_%d"`, c.sel.Name, c.sel.ID)
io.WriteString(c.w, `)`)
aliasWithID(c.w, ti.Name, sel.ID)
return nil
}
func (c *compilerContext) renderFrom(sel *qcode.Select, ti *DBTableInfo, rel *DBRel) error {
if rel != nil && rel.Type == RelEmbedded {
// jsonb_to_recordset('[{"a":1,"b":[1,2,3],"c":"bar"}, {"a":2,"b":[1,2,3],"c":"bar"}]') as x(a int, b text, d text);
io.WriteString(c.w, `"`)
io.WriteString(c.w, rel.Left.Table)
io.WriteString(c.w, `", `)
io.WriteString(c.w, ti.Type)
io.WriteString(c.w, `_to_recordset(`)
colWithTable(c.w, rel.Left.Table, rel.Right.Col)
io.WriteString(c.w, `) AS `)
io.WriteString(c.w, `"`)
io.WriteString(c.w, ti.Name)
io.WriteString(c.w, `"`)
io.WriteString(c.w, `(`)
for i, col := range ti.Columns {
if i != 0 {
io.WriteString(c.w, `, `)
}
io.WriteString(c.w, col.Name)
io.WriteString(c.w, ` `)
io.WriteString(c.w, col.Type)
}
io.WriteString(c.w, `)`)
} else {
//fmt.Fprintf(w, ` FROM "%s"`, c.sel.Name)
io.WriteString(c.w, `"`)
io.WriteString(c.w, ti.Name)
io.WriteString(c.w, `"`)
}
if sel.Paging.Cursor {
io.WriteString(c.w, `, "__cur"`)
}
return nil
}
func (c *compilerContext) renderOrderByColumns(sel *qcode.Select, ti *DBTableInfo) {
colsRendered := len(sel.Cols) != 0
for i := range sel.OrderBy {
if colsRendered {
//io.WriteString(w, ", ")
func (c *compilerContext) renderCursorCTE(sel *qcode.Select) error {
io.WriteString(c.w, `WITH "__cur" AS (SELECT `)
for i, ob := range sel.OrderBy {
if i != 0 {
io.WriteString(c.w, `, `)
}
col := sel.OrderBy[i].Col
//fmt.Fprintf(w, `"%s_%d"."%s" AS "%s_%d_%s_ob"`,
//c.sel.Name, c.sel.ID, c,
//c.sel.Name, c.sel.ID, c)
colWithTableID(c.w, ti.Name, sel.ID, col)
io.WriteString(c.w, ` AS `)
tableIDColSuffix(c.w, sel.Name, sel.ID, col, "_ob")
io.WriteString(c.w, `a[`)
int2string(c.w, int32(i+1))
io.WriteString(c.w, `] as `)
quoted(c.w, ob.Col)
}
io.WriteString(c.w, ` FROM string_to_array('{{cursor}}', ',') as a) `)
return nil
}
func (c *compilerContext) renderRelationship(sel *qcode.Select, ti *DBTableInfo) error {
@ -807,7 +852,13 @@ func (c *compilerContext) renderRelationshipByName(table, parent string, id int3
io.WriteString(c.w, `) = (`)
colWithTable(c.w, rel.Through, rel.Right.Col)
}
case RelEmbedded:
colWithTable(c.w, rel.Left.Table, rel.Left.Col)
io.WriteString(c.w, `) = (`)
colWithTableID(c.w, parent, id, rel.Left.Col)
}
io.WriteString(c.w, `))`)
return nil
@ -858,7 +909,6 @@ func (c *compilerContext) renderExp(ex *qcode.Exp, ti *DBTableInfo, skipNested b
switch val.Op {
case qcode.OpFalse:
st.Push(val.Op)
qcode.FreeExp(val)
case qcode.OpAnd, qcode.OpOr:
st.Push(')')
@ -869,12 +919,12 @@ func (c *compilerContext) renderExp(ex *qcode.Exp, ti *DBTableInfo, skipNested b
}
}
st.Push('(')
qcode.FreeExp(val)
case qcode.OpNot:
//fmt.Printf("1> %s %d %s %s\n", val.Op, len(val.Children), val.Children[0].Op, val.Children[1].Op)
st.Push(val.Children[0])
st.Push(qcode.OpNot)
qcode.FreeExp(val)
default:
if !skipNested && len(val.NestedCols) != 0 {
@ -889,14 +939,13 @@ func (c *compilerContext) renderExp(ex *qcode.Exp, ti *DBTableInfo, skipNested b
if err := c.renderOp(val, ti); err != nil {
return err
}
qcode.FreeExp(val)
}
}
//qcode.FreeExp(val)
default:
return fmt.Errorf("12: unexpected value %v (%t)", intf, intf)
}
}
return nil
@ -963,8 +1012,12 @@ func (c *compilerContext) renderOp(ex *qcode.Exp, ti *DBTableInfo) error {
switch ex.Op {
case qcode.OpEquals:
io.WriteString(c.w, `IS NOT DISTINCT FROM`)
io.WriteString(c.w, `=`)
case qcode.OpNotEquals:
io.WriteString(c.w, `!=`)
case qcode.OpNotDistinct:
io.WriteString(c.w, `IS NOT DISTINCT FROM`)
case qcode.OpDistinct:
io.WriteString(c.w, `IS DISTINCT FROM`)
case qcode.OpGreaterOrEquals:
io.WriteString(c.w, `>=`)
@ -1027,23 +1080,24 @@ func (c *compilerContext) renderOp(ex *qcode.Exp, ti *DBTableInfo) error {
io.WriteString(c.w, `((`)
colWithTable(c.w, ti.Name, ti.TSVCol.Name)
if c.schema.ver >= 110000 {
io.WriteString(c.w, `) @@ websearch_to_tsquery('`)
io.WriteString(c.w, `) @@ websearch_to_tsquery('{{`)
} else {
io.WriteString(c.w, `) @@ to_tsquery('`)
io.WriteString(c.w, `) @@ to_tsquery('{{`)
}
io.WriteString(c.w, ex.Val)
io.WriteString(c.w, `'))`)
io.WriteString(c.w, `}}'))`)
return nil
default:
return fmt.Errorf("[Where] unexpected op code %d", ex.Op)
}
if ex.Type == qcode.ValList {
switch {
case ex.Type == qcode.ValList:
c.renderList(ex)
} else if col == nil {
case col == nil:
return errors.New("no column found for expression value")
} else {
default:
c.renderVal(ex, c.vars, col)
}
@ -1058,31 +1112,20 @@ func (c *compilerContext) renderOrderBy(sel *qcode.Select, ti *DBTableInfo) erro
io.WriteString(c.w, `, `)
}
ob := sel.OrderBy[i]
colWithTable(c.w, ti.Name, ob.Col)
switch ob.Order {
case qcode.OrderAsc:
//fmt.Fprintf(w, `"%s_%d.ob.%s" ASC`, sel.Name, sel.ID, ob.Col)
tableIDColSuffix(c.w, ti.Name, sel.ID, ob.Col, "_ob")
io.WriteString(c.w, ` ASC`)
case qcode.OrderDesc:
//fmt.Fprintf(w, `"%s_%d.ob.%s" DESC`, sel.Name, sel.ID, ob.Col)
tableIDColSuffix(c.w, ti.Name, sel.ID, ob.Col, "_ob")
io.WriteString(c.w, ` DESC`)
case qcode.OrderAscNullsFirst:
//fmt.Fprintf(w, `"%s_%d.ob.%s" ASC NULLS FIRST`, sel.Name, sel.ID, ob.Col)
tableIDColSuffix(c.w, ti.Name, sel.ID, ob.Col, "_ob")
io.WriteString(c.w, ` ASC NULLS FIRST`)
case qcode.OrderDescNullsFirst:
//fmt.Fprintf(w, `%s_%d.ob.%s DESC NULLS FIRST`, sel.Name, sel.ID, ob.Col)
tableIDColSuffix(c.w, ti.Name, sel.ID, ob.Col, "_ob")
io.WriteString(c.w, ` DESC NULLLS FIRST`)
case qcode.OrderAscNullsLast:
//fmt.Fprintf(w, `"%s_%d.ob.%s ASC NULLS LAST`, sel.Name, sel.ID, ob.Col)
tableIDColSuffix(c.w, ti.Name, sel.ID, ob.Col, "_ob")
io.WriteString(c.w, ` ASC NULLS LAST`)
case qcode.OrderDescNullsLast:
//fmt.Fprintf(w, `%s_%d.ob.%s DESC NULLS LAST`, sel.Name, sel.ID, ob.Col)
tableIDColSuffix(c.w, ti.Name, sel.ID, ob.Col, "_ob")
io.WriteString(c.w, ` DESC NULLS LAST`)
default:
return fmt.Errorf("13: unexpected value %v", ob.Order)
@ -1097,8 +1140,7 @@ func (c *compilerContext) renderDistinctOn(sel *qcode.Select, ti *DBTableInfo) {
if i != 0 {
io.WriteString(c.w, `, `)
}
//fmt.Fprintf(w, `"%s_%d.ob.%s"`, c.sel.Name, c.sel.ID, c.sel.DistinctOn[i])
tableIDColSuffix(c.w, ti.Name, sel.ID, sel.DistinctOn[i], "_ob")
colWithTable(c.w, ti.Name, sel.DistinctOn[i])
}
io.WriteString(c.w, `) `)
}
@ -1125,32 +1167,30 @@ func (c *compilerContext) renderVal(ex *qcode.Exp, vars map[string]string, col *
io.WriteString(c.w, ` `)
switch ex.Type {
case qcode.ValBool, qcode.ValInt, qcode.ValFloat:
if len(ex.Val) != 0 {
io.WriteString(c.w, ex.Val)
} else {
io.WriteString(c.w, `''`)
}
case qcode.ValStr:
io.WriteString(c.w, `'`)
io.WriteString(c.w, ex.Val)
io.WriteString(c.w, `'`)
case qcode.ValVar:
io.WriteString(c.w, `'`)
if val, ok := vars[ex.Val]; ok {
io.WriteString(c.w, val)
} else {
//fmt.Fprintf(w, `'{{%s}}'`, ex.Val)
io.WriteString(c.w, `{{`)
val, ok := vars[ex.Val]
switch {
case ok && strings.HasPrefix(val, "sql:"):
io.WriteString(c.w, ` (`)
io.WriteString(c.w, val[4:])
io.WriteString(c.w, `)`)
case ok:
squoted(c.w, val)
default:
io.WriteString(c.w, ` '{{`)
io.WriteString(c.w, ex.Val)
io.WriteString(c.w, `}}`)
io.WriteString(c.w, `}}'`)
}
io.WriteString(c.w, `' :: `)
io.WriteString(c.w, col.Type)
case qcode.ValRef:
colWithTable(c.w, ex.Table, ex.Col)
default:
squoted(c.w, ex.Val)
}
//io.WriteString(c.w, `)`)
io.WriteString(c.w, ` :: `)
io.WriteString(c.w, col.Type)
}
func funcPrefixLen(fn string) int {
@ -1200,15 +1240,6 @@ func aliasWithID(w io.Writer, alias string, id int32) {
io.WriteString(w, `"`)
}
func aliasWithIDSuffix(w io.Writer, alias string, id int32, suffix string) {
io.WriteString(w, ` AS "`)
io.WriteString(w, alias)
io.WriteString(w, `_`)
int2string(w, id)
io.WriteString(w, suffix)
io.WriteString(w, `"`)
}
func colWithTable(w io.Writer, table, col string) {
io.WriteString(w, `"`)
io.WriteString(w, table)
@ -1229,27 +1260,16 @@ func colWithTableID(w io.Writer, table string, id int32, col string) {
io.WriteString(w, `"`)
}
func colWithTableIDAlias(w io.Writer, table string, id int32, col, alias string) {
func quoted(w io.Writer, identifier string) {
io.WriteString(w, `"`)
io.WriteString(w, table)
io.WriteString(w, `_`)
int2string(w, id)
io.WriteString(w, `"."`)
io.WriteString(w, col)
io.WriteString(w, `" AS "`)
io.WriteString(w, alias)
io.WriteString(w, identifier)
io.WriteString(w, `"`)
}
func tableIDColSuffix(w io.Writer, table string, id int32, col, suffix string) {
io.WriteString(w, `"`)
io.WriteString(w, table)
io.WriteString(w, `_`)
int2string(w, id)
io.WriteString(w, `_`)
io.WriteString(w, col)
io.WriteString(w, suffix)
io.WriteString(w, `"`)
func squoted(w io.Writer, identifier string) {
io.WriteString(w, `'`)
io.WriteString(w, identifier)
io.WriteString(w, `'`)
}
const charset = "0123456789"

View File

@ -0,0 +1,459 @@
package psql
import (
"bytes"
"encoding/json"
"testing"
)
func withComplexArgs(t *testing.T) {
gql := `query {
proDUcts(
# returns only 30 items
limit: 30,
# starts from item 10, commented out for now
# offset: 10,
# orders the response items by highest price
order_by: { price: desc },
# no duplicate prices returned
distinct: [ price ]
# only items with an id >= 20 and < 28 are returned
where: { id: { and: { greater_or_equals: 20, lt: 28 } } }) {
id
NAME
price
}
}`
compileGQLToPSQL(t, gql, nil, "user")
}
func withWhereAndList(t *testing.T) {
gql := `query {
products(
where: {
and: [
{ not: { id: { is_null: true } } },
{ price: { gt: 10 } },
] } ) {
id
name
price
}
}`
compileGQLToPSQL(t, gql, nil, "user")
}
func withWhereIsNull(t *testing.T) {
gql := `query {
products(
where: {
and: {
not: { id: { is_null: true } },
price: { gt: 10 }
}}) {
id
name
price
}
}`
compileGQLToPSQL(t, gql, nil, "user")
}
func withWhereMultiOr(t *testing.T) {
gql := `query {
products(
where: {
or: {
not: { id: { is_null: true } },
price: { gt: 10 },
price: { lt: 20 }
} }
) {
id
name
price
}
}`
compileGQLToPSQL(t, gql, nil, "user")
}
func fetchByID(t *testing.T) {
gql := `query {
product(id: $id) {
id
name
}
}`
compileGQLToPSQL(t, gql, nil, "user")
}
func searchQuery(t *testing.T) {
gql := `query {
products(search: $query) {
id
name
search_rank
search_headline_description
}
}`
compileGQLToPSQL(t, gql, nil, "admin")
}
func oneToMany(t *testing.T) {
gql := `query {
users {
email
products {
name
price
}
}
}`
compileGQLToPSQL(t, gql, nil, "user")
}
func oneToManyReverse(t *testing.T) {
gql := `query {
products {
name
price
users {
email
}
}
}`
compileGQLToPSQL(t, gql, nil, "user")
}
func oneToManyArray(t *testing.T) {
gql := `
query {
product {
name
price
tags {
id
name
}
}
tags {
name
product {
name
}
}
}`
compileGQLToPSQL(t, gql, nil, "admin")
}
func manyToMany(t *testing.T) {
gql := `query {
products {
name
customers {
email
full_name
}
}
}`
compileGQLToPSQL(t, gql, nil, "user")
}
func manyToManyReverse(t *testing.T) {
gql := `query {
customers {
email
full_name
products {
name
}
}
}`
compileGQLToPSQL(t, gql, nil, "user")
}
func aggFunction(t *testing.T) {
gql := `query {
products {
name
count_price
}
}`
compileGQLToPSQL(t, gql, nil, "user")
}
func aggFunctionBlockedByCol(t *testing.T) {
gql := `query {
products {
name
count_price
}
}`
compileGQLToPSQL(t, gql, nil, "anon")
}
func aggFunctionDisabled(t *testing.T) {
gql := `query {
products {
name
count_price
}
}`
compileGQLToPSQL(t, gql, nil, "anon1")
}
func aggFunctionWithFilter(t *testing.T) {
gql := `query {
products(where: { id: { gt: 10 } }) {
id
max_price
}
}`
compileGQLToPSQL(t, gql, nil, "user")
}
func syntheticTables(t *testing.T) {
gql := `query {
me {
email
}
}`
compileGQLToPSQL(t, gql, nil, "user")
}
func queryWithVariables(t *testing.T) {
gql := `query {
product(id: $PRODUCT_ID, where: { price: { eq: $PRODUCT_PRICE } }) {
id
name
}
}`
compileGQLToPSQL(t, gql, nil, "user")
}
func withWhereOnRelations(t *testing.T) {
gql := `query {
users(where: {
not: {
products: {
price: { gt: 3 }
}
}
}) {
id
email
}
}`
compileGQLToPSQL(t, gql, nil, "user")
}
func multiRoot(t *testing.T) {
gql := `query {
product {
id
name
customer {
email
}
customers {
email
}
}
user {
id
email
}
customer {
id
}
}`
compileGQLToPSQL(t, gql, nil, "user")
}
func withCursor(t *testing.T) {
gql := `query {
Products(
first: 20
after: $cursor
order_by: { price: desc }) {
Name
}
}`
vars := map[string]json.RawMessage{
"cursor": json.RawMessage(`"0,1"`),
}
compileGQLToPSQL(t, gql, vars, "admin")
}
func jsonColumnAsTable(t *testing.T) {
gql := `query {
products {
id
name
tag_count {
count
tags {
name
}
}
}
}`
compileGQLToPSQL(t, gql, nil, "admin")
}
func nullForAuthRequiredInAnon(t *testing.T) {
gql := `query {
products {
id
name
user(where: { id: { eq: $user_id } }) {
id
email
}
}
}`
compileGQLToPSQL(t, gql, nil, "anon")
}
func blockedQuery(t *testing.T) {
gql := `query {
user(id: $id, where: { id: { gt: 3 } }) {
id
full_name
email
}
}`
compileGQLToPSQL(t, gql, nil, "bad_dude")
}
func blockedFunctions(t *testing.T) {
gql := `query {
users {
count_id
email
}
}`
compileGQLToPSQL(t, gql, nil, "bad_dude")
}
func TestCompileQuery(t *testing.T) {
t.Run("withComplexArgs", withComplexArgs)
t.Run("withWhereAndList", withWhereAndList)
t.Run("withWhereIsNull", withWhereIsNull)
t.Run("withWhereMultiOr", withWhereMultiOr)
t.Run("fetchByID", fetchByID)
t.Run("searchQuery", searchQuery)
t.Run("oneToMany", oneToMany)
t.Run("oneToManyReverse", oneToManyReverse)
t.Run("oneToManyArray", oneToManyArray)
t.Run("manyToMany", manyToMany)
t.Run("manyToManyReverse", manyToManyReverse)
t.Run("aggFunction", aggFunction)
t.Run("aggFunctionBlockedByCol", aggFunctionBlockedByCol)
t.Run("aggFunctionDisabled", aggFunctionDisabled)
t.Run("aggFunctionWithFilter", aggFunctionWithFilter)
t.Run("syntheticTables", syntheticTables)
t.Run("queryWithVariables", queryWithVariables)
t.Run("withWhereOnRelations", withWhereOnRelations)
t.Run("multiRoot", multiRoot)
t.Run("jsonColumnAsTable", jsonColumnAsTable)
t.Run("withCursor", withCursor)
t.Run("nullForAuthRequiredInAnon", nullForAuthRequiredInAnon)
t.Run("blockedQuery", blockedQuery)
t.Run("blockedFunctions", blockedFunctions)
}
var benchGQL = []byte(`query {
proDUcts(
# returns only 30 items
limit: 30,
# starts from item 10, commented out for now
# offset: 10,
# orders the response items by highest price
order_by: { price: desc },
# only items with an id >= 30 and < 30 are returned
where: { id: { and: { greater_or_equals: 20, lt: 28 } } }) {
id
NAME
price
user {
full_name
picture : avatar
}
}
}`)
func BenchmarkCompile(b *testing.B) {
w := &bytes.Buffer{}
b.ResetTimer()
b.ReportAllocs()
for n := 0; n < b.N; n++ {
w.Reset()
qc, err := qcompile.Compile(benchGQL, "user")
if err != nil {
b.Fatal(err)
}
_, err = pcompile.Compile(qc, w, nil)
if err != nil {
b.Fatal(err)
}
}
}
func BenchmarkCompileParallel(b *testing.B) {
b.ReportAllocs()
b.RunParallel(func(pb *testing.PB) {
w := &bytes.Buffer{}
for pb.Next() {
w.Reset()
qc, err := qcompile.Compile(benchGQL, "user")
if err != nil {
b.Fatal(err)
}
_, err = pcompile.Compile(qc, w, nil)
if err != nil {
b.Fatal(err)
}
}
})
}

View File

@ -15,6 +15,7 @@ type DBSchema struct {
type DBTableInfo struct {
Name string
Type string
Singular bool
Columns []DBColumn
PrimaryCol *DBColumn
@ -29,6 +30,7 @@ const (
RelOneToOne RelType = iota + 1
RelOneToMany
RelOneToManyThrough
RelEmbedded
RelRemote
)
@ -51,7 +53,6 @@ type DBRel struct {
}
func NewDBSchema(info *DBInfo, aliases map[string][]string) (*DBSchema, error) {
schema := &DBSchema{
t: make(map[string]*DBTableInfo),
rm: make(map[string]map[string]*DBRel),
@ -65,7 +66,14 @@ func NewDBSchema(info *DBInfo, aliases map[string][]string) (*DBSchema, error) {
}
for i, t := range info.Tables {
err := schema.updateRelationships(t, info.Columns[i])
err := schema.firstDegreeRels(t, info.Columns[i])
if err != nil {
return nil, err
}
}
for i, t := range info.Tables {
err := schema.secondDegreeRels(t, info.Columns[i])
if err != nil {
return nil, err
}
@ -83,6 +91,7 @@ func (s *DBSchema) addTable(
singular := flect.Singularize(t.Key)
s.t[singular] = &DBTableInfo{
Name: t.Name,
Type: t.Type,
Singular: true,
Columns: cols,
ColMap: colmap,
@ -92,6 +101,7 @@ func (s *DBSchema) addTable(
plural := flect.Pluralize(t.Key)
s.t[plural] = &DBTableInfo{
Name: t.Name,
Type: t.Type,
Singular: false,
Columns: cols,
ColMap: colmap,
@ -128,28 +138,53 @@ func (s *DBSchema) addTable(
return nil
}
func (s *DBSchema) updateRelationships(t DBTable, cols []DBColumn) error {
jcols := make([]DBColumn, 0, len(cols))
func (s *DBSchema) firstDegreeRels(t DBTable, cols []DBColumn) error {
ct := t.Key
cti, ok := s.t[ct]
if !ok {
return fmt.Errorf("invalid foreign key table '%s'", ct)
}
for _, c := range cols {
if len(c.FKeyTable) == 0 || len(c.FKeyColID) == 0 {
for i := range cols {
c := cols[i]
if len(c.FKeyTable) == 0 {
continue
}
// Foreign key column name
ft := strings.ToLower(c.FKeyTable)
fcid := c.FKeyColID[0]
ti, ok := s.t[ft]
if !ok {
return fmt.Errorf("invalid foreign key table '%s'", ft)
}
// This is an embedded relationship like when a json/jsonb column
// is exposed as a table
if c.Name == c.FKeyTable && len(c.FKeyColID) == 0 {
rel := &DBRel{Type: RelEmbedded}
rel.Left.col = cti.PrimaryCol
rel.Left.Table = cti.Name
rel.Left.Col = cti.PrimaryCol.Name
rel.Right.col = &c
rel.Right.Table = ti.Name
rel.Right.Col = c.Name
if err := s.SetRel(ft, ct, rel); err != nil {
return err
}
continue
}
if len(c.FKeyColID) == 0 {
continue
}
// Foreign key column id
fcid := c.FKeyColID[0]
fc, ok := ti.ColIDMap[fcid]
if !ok {
return fmt.Errorf("invalid foreign key column id '%d' for table '%s'",
@ -188,10 +223,12 @@ func (s *DBSchema) updateRelationships(t DBTable, cols []DBColumn) error {
rel2 = &DBRel{Type: RelOneToMany}
}
rel2.Left.col = fc
rel2.Left.Table = c.FKeyTable
rel2.Left.Col = fc.Name
rel2.Left.Array = fc.Array
rel2.Right.col = &c
rel2.Right.Table = t.Name
rel2.Right.Col = c.Name
rel2.Right.Array = c.Array
@ -199,6 +236,51 @@ func (s *DBSchema) updateRelationships(t DBTable, cols []DBColumn) error {
if err := s.SetRel(ft, ct, rel2); err != nil {
return err
}
}
return nil
}
func (s *DBSchema) secondDegreeRels(t DBTable, cols []DBColumn) error {
jcols := make([]DBColumn, 0, len(cols))
ct := t.Key
cti, ok := s.t[ct]
if !ok {
return fmt.Errorf("invalid foreign key table '%s'", ct)
}
for i := range cols {
c := cols[i]
if len(c.FKeyTable) == 0 {
continue
}
// Foreign key column name
ft := strings.ToLower(c.FKeyTable)
ti, ok := s.t[ft]
if !ok {
return fmt.Errorf("invalid foreign key table '%s'", ft)
}
// This is an embedded relationship like when a json/jsonb column
// is exposed as a table
if c.Name == c.FKeyTable && len(c.FKeyColID) == 0 {
continue
}
if len(c.FKeyColID) == 0 {
continue
}
// Foreign key column id
fcid := c.FKeyColID[0]
if _, ok := ti.ColIDMap[fcid]; !ok {
return fmt.Errorf("invalid foreign key column id '%d' for table '%s'",
fcid, ti.Name)
}
jcols = append(jcols, c)
}
@ -249,9 +331,11 @@ func (s *DBSchema) updateSchemaOTMT(
rel1.Through = ti.Name
rel1.ColT = col2.Name
rel1.Left.col = &col2
rel1.Left.Table = col2.FKeyTable
rel1.Left.Col = fc2.Name
rel1.Right.col = &col1
rel1.Right.Table = ti.Name
rel1.Right.Col = col1.Name
@ -265,9 +349,11 @@ func (s *DBSchema) updateSchemaOTMT(
rel2.Through = ti.Name
rel2.ColT = col1.Name
rel1.Left.col = fc1
rel2.Left.Table = col1.FKeyTable
rel2.Left.Col = fc1.Name
rel1.Right.col = &col2
rel2.Right.Table = ti.Name
rel2.Right.Col = col2.Name
@ -287,6 +373,9 @@ func (s *DBSchema) GetTable(table string) (*DBTableInfo, error) {
}
func (s *DBSchema) SetRel(child, parent string, rel *DBRel) error {
sp := strings.ToLower(flect.Singularize(parent))
pp := strings.ToLower(flect.Pluralize(parent))
sc := strings.ToLower(flect.Singularize(child))
pc := strings.ToLower(flect.Pluralize(child))
@ -298,9 +387,6 @@ func (s *DBSchema) SetRel(child, parent string, rel *DBRel) error {
s.rm[pc] = make(map[string]*DBRel)
}
sp := strings.ToLower(flect.Singularize(parent))
pp := strings.ToLower(flect.Pluralize(parent))
if _, ok := s.rm[sc][sp]; !ok {
s.rm[sc][sp] = rel
}
@ -320,8 +406,21 @@ func (s *DBSchema) SetRel(child, parent string, rel *DBRel) error {
func (s *DBSchema) GetRel(child, parent string) (*DBRel, error) {
rel, ok := s.rm[child][parent]
if !ok {
return nil, fmt.Errorf("unknown relationship '%s' -> '%s'",
child, parent)
// No relationship found so this time fetch the table info
// and try again in case child or parent was an alias
ct, err := s.GetTable(child)
if err != nil {
return nil, err
}
pt, err := s.GetTable(parent)
if err != nil {
return nil, err
}
rel, ok = s.rm[ct.Name][pt.Name]
if !ok {
return nil, fmt.Errorf("unknown relationship '%s' -> '%s'",
child, parent)
}
}
return rel, nil
}

View File

@ -12,11 +12,17 @@ func (rt RelType) String() string {
return "one to many through"
case RelRemote:
return "remote"
case RelEmbedded:
return "embedded"
}
return ""
}
func (re *DBRel) String() string {
if re.Type == RelOneToManyThrough {
return fmt.Sprintf("'%s.%s' --(Through: %s)--> '%s.%s'",
re.Left.Table, re.Left.Col, re.Through, re.Right.Table, re.Right.Col)
}
return fmt.Sprintf("'%s.%s' --(%s)--> '%s.%s'",
re.Left.Table, re.Left.Col, re.Type, re.Right.Table, re.Right.Col)
}

View File

@ -1,13 +1,12 @@
package psql
import (
"context"
"database/sql"
"fmt"
"strconv"
"strings"
"github.com/jackc/pgtype"
"github.com/jackc/pgx/v4/pgxpool"
)
type DBInfo struct {
@ -17,18 +16,11 @@ type DBInfo struct {
colmap map[string]map[string]*DBColumn
}
func GetDBInfo(db *pgxpool.Pool) (*DBInfo, error) {
func GetDBInfo(db *sql.DB) (*DBInfo, error) {
di := &DBInfo{}
dbc, err := db.Acquire(context.Background())
if err != nil {
return nil, fmt.Errorf("error acquiring connection from pool: %w", err)
}
defer dbc.Release()
var version string
err = dbc.QueryRow(context.Background(), `SHOW server_version_num`).Scan(&version)
err := db.QueryRow(`SHOW server_version_num`).Scan(&version)
if err != nil {
return nil, fmt.Errorf("error fetching version: %w", err)
}
@ -38,7 +30,7 @@ func GetDBInfo(db *pgxpool.Pool) (*DBInfo, error) {
return nil, err
}
di.Tables, err = GetTables(dbc)
di.Tables, err = GetTables(db)
if err != nil {
return nil, err
}
@ -46,7 +38,7 @@ func GetDBInfo(db *pgxpool.Pool) (*DBInfo, error) {
di.colmap = make(map[string]map[string]*DBColumn, len(di.Tables))
for i, t := range di.Tables {
cols, err := GetColumns(dbc, "public", t.Name)
cols, err := GetColumns(db, "public", t.Name)
if err != nil {
return nil, err
}
@ -62,6 +54,20 @@ func GetDBInfo(db *pgxpool.Pool) (*DBInfo, error) {
return di, nil
}
func (di *DBInfo) AddTable(t DBTable, cols []DBColumn) {
t.ID = di.Tables[len(di.Tables)-1].ID
di.Tables = append(di.Tables, t)
di.colmap[t.Key] = make(map[string]*DBColumn, len(cols))
for i := range cols {
cols[i].ID = int16(i)
c := &cols[i]
di.colmap[t.Key][c.Key] = c
}
di.Columns = append(di.Columns, cols)
}
func (di *DBInfo) GetColumn(table, column string) (*DBColumn, bool) {
v, ok := di.colmap[strings.ToLower(table)][strings.ToLower(column)]
return v, ok
@ -74,7 +80,7 @@ type DBTable struct {
Type string
}
func GetTables(dbc *pgxpool.Conn) ([]DBTable, error) {
func GetTables(db *sql.DB) ([]DBTable, error) {
sqlStmt := `
SELECT
c.relname as "name",
@ -93,7 +99,7 @@ AND pg_catalog.pg_table_is_visible(c.oid);`
var tables []DBTable
rows, err := dbc.Query(context.Background(), sqlStmt)
rows, err := db.Query(sqlStmt)
if err != nil {
return nil, fmt.Errorf("Error fetching tables: %s", err)
}
@ -128,7 +134,7 @@ type DBColumn struct {
fKeyColID pgtype.Int2Array
}
func GetColumns(dbc *pgxpool.Conn, schema, table string) ([]DBColumn, error) {
func GetColumns(db *sql.DB, schema, table string) ([]DBColumn, error) {
sqlStmt := `
SELECT
f.attnum AS id,
@ -137,6 +143,7 @@ SELECT
pg_catalog.format_type(f.atttypid,f.atttypmod) AS type,
CASE
WHEN f.attndims != 0 THEN true
WHEN right(pg_catalog.format_type(f.atttypid,f.atttypmod), 2) = '[]' THEN true
ELSE false
END AS array,
CASE
@ -152,7 +159,7 @@ SELECT
ELSE ''::text
END AS foreignkey,
CASE
WHEN p.contype = ('f'::char) THEN p.confkey
WHEN p.contype = ('f'::char) THEN p.confkey::int2[]
ELSE ARRAY[]::int2[]
END AS foreignkey_fieldnum
FROM pg_attribute f
@ -161,14 +168,14 @@ FROM pg_attribute f
LEFT JOIN pg_namespace n ON n.oid = c.relnamespace
LEFT JOIN pg_constraint p ON p.conrelid = c.oid AND f.attnum = ANY (p.conkey)
LEFT JOIN pg_class AS g ON p.confrelid = g.oid
WHERE c.relkind = ('r'::char)
WHERE c.relkind IN ('r', 'v', 'm', 'f')
AND n.nspname = $1 -- Replace with Schema name
AND c.relname = $2 -- Replace with table name
AND f.attnum > 0
AND f.attisdropped = false
ORDER BY id;`
rows, err := dbc.Query(context.Background(), sqlStmt, schema, table)
rows, err := db.Query(sqlStmt, schema, table)
if err != nil {
return nil, fmt.Errorf("error fetching columns: %s", err)
}
@ -229,3 +236,13 @@ ORDER BY id;`
return cols, nil
}
// func GetValType(type string) qcode.ValType {
// switch {
// case "bigint", "integer", "smallint", "numeric", "bigserial":
// return qcode.ValInt
// case "double precision", "real":
// return qcode.ValFloat
// case ""
// }
// }

View File

@ -2,138 +2,17 @@ package psql
import (
"log"
"os"
"strings"
"testing"
"github.com/dosco/super-graph/qcode"
)
const (
errNotExpected = "Generated SQL did not match what was expected"
)
var (
qcompile *qcode.Compiler
pcompile *Compiler
)
func TestMain(m *testing.M) {
var err error
qcompile, err = qcode.NewCompiler(qcode.Config{
Blocklist: []string{
"secret",
"password",
"token",
},
})
if err != nil {
log.Fatal(err)
}
err = qcompile.AddRole("user", "product", qcode.TRConfig{
Query: qcode.QueryConfig{
Columns: []string{"id", "name", "price", "users", "customers"},
Filters: []string{
"{ price: { gt: 0 } }",
"{ price: { lt: 8 } }",
},
},
Insert: qcode.InsertConfig{
Presets: map[string]string{
"user_id": "$user_id",
"created_at": "now",
"updated_at": "now",
},
},
Update: qcode.UpdateConfig{
Filters: []string{"{ user_id: { eq: $user_id } }"},
Presets: map[string]string{"updated_at": "now"},
},
Delete: qcode.DeleteConfig{
Filters: []string{
"{ price: { gt: 0 } }",
"{ price: { lt: 8 } }",
},
},
})
if err != nil {
log.Fatal(err)
}
err = qcompile.AddRole("anon", "product", qcode.TRConfig{
Query: qcode.QueryConfig{
Columns: []string{"id", "name"},
},
})
if err != nil {
log.Fatal(err)
}
err = qcompile.AddRole("anon1", "product", qcode.TRConfig{
Query: qcode.QueryConfig{
Columns: []string{"id", "name", "price"},
DisableFunctions: true,
},
})
if err != nil {
log.Fatal(err)
}
err = qcompile.AddRole("user", "users", qcode.TRConfig{
Query: qcode.QueryConfig{
Columns: []string{"id", "full_name", "avatar", "email", "products"},
},
})
if err != nil {
log.Fatal(err)
}
err = qcompile.AddRole("bad_dude", "users", qcode.TRConfig{
Query: qcode.QueryConfig{
Filters: []string{"false"},
DisableFunctions: true,
},
Insert: qcode.InsertConfig{
Filters: []string{"false"},
},
Update: qcode.UpdateConfig{
Filters: []string{"false"},
},
})
if err != nil {
log.Fatal(err)
}
err = qcompile.AddRole("user", "mes", qcode.TRConfig{
Query: qcode.QueryConfig{
Columns: []string{"id", "full_name", "avatar"},
Filters: []string{
"{ id: { eq: $user_id } }",
},
},
})
if err != nil {
log.Fatal(err)
}
err = qcompile.AddRole("user", "customers", qcode.TRConfig{
Query: qcode.QueryConfig{
Columns: []string{"id", "email", "full_name", "products"},
},
})
if err != nil {
log.Fatal(err)
}
func getTestSchema() *DBSchema {
tables := []DBTable{
DBTable{Name: "customers", Type: "table"},
DBTable{Name: "users", Type: "table"},
DBTable{Name: "products", Type: "table"},
DBTable{Name: "purchases", Type: "table"},
DBTable{Name: "tags", Type: "table"},
DBTable{Name: "tag_count", Type: "json"},
}
columns := [][]DBColumn{
@ -169,7 +48,8 @@ func TestMain(m *testing.M) {
DBColumn{ID: 6, Name: "created_at", Type: "timestamp without time zone", NotNull: true, PrimaryKey: false, UniqueKey: false},
DBColumn{ID: 7, Name: "updated_at", Type: "timestamp without time zone", NotNull: true, PrimaryKey: false, UniqueKey: false},
DBColumn{ID: 8, Name: "tsv", Type: "tsvector", NotNull: false, PrimaryKey: false, UniqueKey: false},
DBColumn{ID: 9, Name: "tags", Type: "text[]", NotNull: false, PrimaryKey: false, UniqueKey: false, FKeyTable: "tags", FKeyColID: []int16{3}, Array: true}},
DBColumn{ID: 9, Name: "tags", Type: "text[]", NotNull: false, PrimaryKey: false, UniqueKey: false, FKeyTable: "tags", FKeyColID: []int16{3}, Array: true},
DBColumn{ID: 9, Name: "tag_count", Type: "json", NotNull: false, PrimaryKey: false, UniqueKey: false, FKeyTable: "tag_count", FKeyColID: []int16{}}},
[]DBColumn{
DBColumn{ID: 1, Name: "id", Type: "bigint", NotNull: true, PrimaryKey: true, UniqueKey: true},
DBColumn{ID: 2, Name: "customer_id", Type: "bigint", NotNull: false, PrimaryKey: false, UniqueKey: false, FKeyTable: "customers", FKeyColID: []int16{1}},
@ -182,6 +62,9 @@ func TestMain(m *testing.M) {
DBColumn{ID: 1, Name: "id", Type: "bigint", NotNull: true, PrimaryKey: true, UniqueKey: true},
DBColumn{ID: 2, Name: "name", Type: "text", NotNull: false, PrimaryKey: false, UniqueKey: false},
DBColumn{ID: 3, Name: "slug", Type: "text", NotNull: false, PrimaryKey: false, UniqueKey: false}},
[]DBColumn{
DBColumn{ID: 1, Name: "tag_id", Type: "bigint", NotNull: false, PrimaryKey: false, UniqueKey: false, FKeyTable: "tags", FKeyColID: []int16{1}},
DBColumn{ID: 2, Name: "count", Type: "int", NotNull: false, PrimaryKey: false, UniqueKey: false}},
}
for i := range tables {
@ -209,36 +92,18 @@ func TestMain(m *testing.M) {
}
for i, t := range tables {
err := schema.updateRelationships(t, columns[i])
err := schema.firstDegreeRels(t, columns[i])
if err != nil {
log.Fatal(err)
}
}
vars := NewVariables(map[string]string{
"admin_account_id": "5",
})
pcompile = NewCompiler(Config{
Schema: schema,
Vars: vars,
})
os.Exit(m.Run())
}
func compileGQLToPSQL(gql string, vars Variables, role string) ([]byte, error) {
qc, err := qcompile.Compile([]byte(gql), role)
if err != nil {
return nil, err
for i, t := range tables {
err := schema.secondDegreeRels(t, columns[i])
if err != nil {
log.Fatal(err)
}
}
_, sqlStmt, err := pcompile.CompileEx(qc, vars)
if err != nil {
return nil, err
}
//fmt.Println(string(sqlStmt))
return sqlStmt, nil
return schema
}

View File

@ -0,0 +1,151 @@
=== RUN TestCompileInsert
=== RUN TestCompileInsert/simpleInsert
WITH "_sg_input" AS (SELECT '{{data}}' :: json AS j), "users" AS (INSERT INTO "users" ("full_name", "email") SELECT "t"."full_name", "t"."email" FROM "_sg_input" i, json_populate_record(NULL::users, i.j) t RETURNING *) SELECT jsonb_build_object('user', "__sj_0"."json") as "__root" FROM (SELECT to_jsonb("__sr_0") AS "json"FROM (SELECT "users_0"."id" AS "id" FROM (SELECT "users"."id" FROM "users" LIMIT ('1') :: integer) AS "users_0") AS "__sr_0") AS "__sj_0"
=== RUN TestCompileInsert/singleInsert
WITH "_sg_input" AS (SELECT '{{insert}}' :: json AS j), "products" AS (INSERT INTO "products" ("name", "description", "price", "user_id") SELECT "t"."name", "t"."description", "t"."price", "t"."user_id" FROM "_sg_input" i, json_populate_record(NULL::products, i.j) t RETURNING *) SELECT jsonb_build_object('product', "__sj_0"."json") as "__root" FROM (SELECT to_jsonb("__sr_0") AS "json"FROM (SELECT "products_0"."id" AS "id", "products_0"."name" AS "name" FROM (SELECT "products"."id", "products"."name" FROM "products" LIMIT ('1') :: integer) AS "products_0") AS "__sr_0") AS "__sj_0"
=== RUN TestCompileInsert/bulkInsert
WITH "_sg_input" AS (SELECT '{{insert}}' :: json AS j), "products" AS (INSERT INTO "products" ("name", "description") SELECT "t"."name", "t"."description" FROM "_sg_input" i, json_populate_recordset(NULL::products, i.j) t RETURNING *) SELECT jsonb_build_object('product', "__sj_0"."json") as "__root" FROM (SELECT to_jsonb("__sr_0") AS "json"FROM (SELECT "products_0"."id" AS "id", "products_0"."name" AS "name" FROM (SELECT "products"."id", "products"."name" FROM "products" LIMIT ('1') :: integer) AS "products_0") AS "__sr_0") AS "__sj_0"
=== RUN TestCompileInsert/simpleInsertWithPresets
WITH "_sg_input" AS (SELECT '{{data}}' :: json AS j), "products" AS (INSERT INTO "products" ("name", "price", "created_at", "updated_at", "user_id") SELECT "t"."name", "t"."price", 'now' :: timestamp without time zone, 'now' :: timestamp without time zone, '{{user_id}}' :: bigint FROM "_sg_input" i, json_populate_record(NULL::products, i.j) t RETURNING *) SELECT jsonb_build_object('product', "__sj_0"."json") as "__root" FROM (SELECT to_jsonb("__sr_0") AS "json"FROM (SELECT "products_0"."id" AS "id" FROM (SELECT "products"."id" FROM "products" LIMIT ('1') :: integer) AS "products_0") AS "__sr_0") AS "__sj_0"
=== RUN TestCompileInsert/nestedInsertManyToMany
WITH "_sg_input" AS (SELECT '{{data}}' :: json AS j), "products" AS (INSERT INTO "products" ("name", "price") SELECT "t"."name", "t"."price" FROM "_sg_input" i, json_populate_record(NULL::products, i.j->'product') t RETURNING *), "customers" AS (INSERT INTO "customers" ("full_name", "email") SELECT "t"."full_name", "t"."email" FROM "_sg_input" i, json_populate_record(NULL::customers, i.j->'customer') t RETURNING *), "purchases" AS (INSERT INTO "purchases" ("sale_type", "quantity", "due_date", "customer_id", "product_id") SELECT "t"."sale_type", "t"."quantity", "t"."due_date", "customers"."id", "products"."id" FROM "_sg_input" i, "customers", "products", json_populate_record(NULL::purchases, i.j) t RETURNING *) SELECT jsonb_build_object('purchase', "__sj_0"."json") as "__root" FROM (SELECT to_jsonb("__sr_0") AS "json"FROM (SELECT "purchases_0"."sale_type" AS "sale_type", "purchases_0"."quantity" AS "quantity", "purchases_0"."due_date" AS "due_date", "__sj_1"."json" AS "product", "__sj_2"."json" AS "customer" FROM (SELECT "purchases"."sale_type", "purchases"."quantity", "purchases"."due_date", "purchases"."product_id", "purchases"."customer_id" FROM "purchases" LIMIT ('1') :: integer) AS "purchases_0" LEFT OUTER JOIN LATERAL (SELECT to_jsonb("__sr_2") AS "json"FROM (SELECT "customers_2"."id" AS "id", "customers_2"."full_name" AS "full_name", "customers_2"."email" AS "email" FROM (SELECT "customers"."id", "customers"."full_name", "customers"."email" FROM "customers" WHERE ((("customers"."id") = ("purchases_0"."customer_id"))) LIMIT ('1') :: integer) AS "customers_2") AS "__sr_2") AS "__sj_2" ON ('true') LEFT OUTER JOIN LATERAL (SELECT to_jsonb("__sr_1") AS "json"FROM (SELECT "products_1"."id" AS "id", "products_1"."name" AS "name", "products_1"."price" AS "price" FROM (SELECT "products"."id", "products"."name", "products"."price" FROM "products" WHERE ((("products"."id") = ("purchases_0"."product_id"))) LIMIT ('1') :: integer) AS "products_1") AS "__sr_1") AS "__sj_1" ON ('true')) AS "__sr_0") AS "__sj_0"
WITH "_sg_input" AS (SELECT '{{data}}' :: json AS j), "customers" AS (INSERT INTO "customers" ("full_name", "email") SELECT "t"."full_name", "t"."email" FROM "_sg_input" i, json_populate_record(NULL::customers, i.j->'customer') t RETURNING *), "products" AS (INSERT INTO "products" ("name", "price") SELECT "t"."name", "t"."price" FROM "_sg_input" i, json_populate_record(NULL::products, i.j->'product') t RETURNING *), "purchases" AS (INSERT INTO "purchases" ("sale_type", "quantity", "due_date", "product_id", "customer_id") SELECT "t"."sale_type", "t"."quantity", "t"."due_date", "products"."id", "customers"."id" FROM "_sg_input" i, "products", "customers", json_populate_record(NULL::purchases, i.j) t RETURNING *) SELECT jsonb_build_object('purchase', "__sj_0"."json") as "__root" FROM (SELECT to_jsonb("__sr_0") AS "json"FROM (SELECT "purchases_0"."sale_type" AS "sale_type", "purchases_0"."quantity" AS "quantity", "purchases_0"."due_date" AS "due_date", "__sj_1"."json" AS "product", "__sj_2"."json" AS "customer" FROM (SELECT "purchases"."sale_type", "purchases"."quantity", "purchases"."due_date", "purchases"."product_id", "purchases"."customer_id" FROM "purchases" LIMIT ('1') :: integer) AS "purchases_0" LEFT OUTER JOIN LATERAL (SELECT to_jsonb("__sr_2") AS "json"FROM (SELECT "customers_2"."id" AS "id", "customers_2"."full_name" AS "full_name", "customers_2"."email" AS "email" FROM (SELECT "customers"."id", "customers"."full_name", "customers"."email" FROM "customers" WHERE ((("customers"."id") = ("purchases_0"."customer_id"))) LIMIT ('1') :: integer) AS "customers_2") AS "__sr_2") AS "__sj_2" ON ('true') LEFT OUTER JOIN LATERAL (SELECT to_jsonb("__sr_1") AS "json"FROM (SELECT "products_1"."id" AS "id", "products_1"."name" AS "name", "products_1"."price" AS "price" FROM (SELECT "products"."id", "products"."name", "products"."price" FROM "products" WHERE ((("products"."id") = ("purchases_0"."product_id"))) LIMIT ('1') :: integer) AS "products_1") AS "__sr_1") AS "__sj_1" ON ('true')) AS "__sr_0") AS "__sj_0"
=== RUN TestCompileInsert/nestedInsertOneToMany
WITH "_sg_input" AS (SELECT '{{data}}' :: json AS j), "users" AS (INSERT INTO "users" ("full_name", "email", "created_at", "updated_at") SELECT "t"."full_name", "t"."email", "t"."created_at", "t"."updated_at" FROM "_sg_input" i, json_populate_record(NULL::users, i.j) t RETURNING *), "products" AS (INSERT INTO "products" ("name", "price", "created_at", "updated_at", "user_id") SELECT "t"."name", "t"."price", "t"."created_at", "t"."updated_at", "users"."id" FROM "_sg_input" i, "users", json_populate_record(NULL::products, i.j->'product') t RETURNING *) SELECT jsonb_build_object('user', "__sj_0"."json") as "__root" FROM (SELECT to_jsonb("__sr_0") AS "json"FROM (SELECT "users_0"."id" AS "id", "users_0"."full_name" AS "full_name", "users_0"."email" AS "email", "__sj_1"."json" AS "product" FROM (SELECT "users"."id", "users"."full_name", "users"."email" FROM "users" LIMIT ('1') :: integer) AS "users_0" LEFT OUTER JOIN LATERAL (SELECT to_jsonb("__sr_1") AS "json"FROM (SELECT "products_1"."id" AS "id", "products_1"."name" AS "name", "products_1"."price" AS "price" FROM (SELECT "products"."id", "products"."name", "products"."price" FROM "products" WHERE ((("products"."user_id") = ("users_0"."id"))) LIMIT ('1') :: integer) AS "products_1") AS "__sr_1") AS "__sj_1" ON ('true')) AS "__sr_0") AS "__sj_0"
=== RUN TestCompileInsert/nestedInsertOneToOne
WITH "_sg_input" AS (SELECT '{{data}}' :: json AS j), "users" AS (INSERT INTO "users" ("full_name", "email", "created_at", "updated_at") SELECT "t"."full_name", "t"."email", "t"."created_at", "t"."updated_at" FROM "_sg_input" i, json_populate_record(NULL::users, i.j->'user') t RETURNING *), "products" AS (INSERT INTO "products" ("name", "price", "created_at", "updated_at", "user_id") SELECT "t"."name", "t"."price", "t"."created_at", "t"."updated_at", "users"."id" FROM "_sg_input" i, "users", json_populate_record(NULL::products, i.j) t RETURNING *) SELECT jsonb_build_object('product', "__sj_0"."json") as "__root" FROM (SELECT to_jsonb("__sr_0") AS "json"FROM (SELECT "products_0"."id" AS "id", "products_0"."name" AS "name", "__sj_1"."json" AS "user" FROM (SELECT "products"."id", "products"."name", "products"."user_id" FROM "products" LIMIT ('1') :: integer) AS "products_0" LEFT OUTER JOIN LATERAL (SELECT to_jsonb("__sr_1") AS "json"FROM (SELECT "users_1"."id" AS "id", "users_1"."full_name" AS "full_name", "users_1"."email" AS "email" FROM (SELECT "users"."id", "users"."full_name", "users"."email" FROM "users" WHERE ((("users"."id") = ("products_0"."user_id"))) LIMIT ('1') :: integer) AS "users_1") AS "__sr_1") AS "__sj_1" ON ('true')) AS "__sr_0") AS "__sj_0"
=== RUN TestCompileInsert/nestedInsertOneToManyWithConnect
WITH "_sg_input" AS (SELECT '{{data}}' :: json AS j), "users" AS (INSERT INTO "users" ("full_name", "email", "created_at", "updated_at") SELECT "t"."full_name", "t"."email", "t"."created_at", "t"."updated_at" FROM "_sg_input" i, json_populate_record(NULL::users, i.j) t RETURNING *), "products" AS ( UPDATE "products" SET "user_id" = "users"."id" FROM "users" WHERE ("products"."id"= ((i.j->'product'->'connect'->>'id'))::bigint) RETURNING "products".*) SELECT jsonb_build_object('user', "__sj_0"."json") as "__root" FROM (SELECT to_jsonb("__sr_0") AS "json"FROM (SELECT "users_0"."id" AS "id", "users_0"."full_name" AS "full_name", "users_0"."email" AS "email", "__sj_1"."json" AS "product" FROM (SELECT "users"."id", "users"."full_name", "users"."email" FROM "users" LIMIT ('1') :: integer) AS "users_0" LEFT OUTER JOIN LATERAL (SELECT to_jsonb("__sr_1") AS "json"FROM (SELECT "products_1"."id" AS "id", "products_1"."name" AS "name", "products_1"."price" AS "price" FROM (SELECT "products"."id", "products"."name", "products"."price" FROM "products" WHERE ((("products"."user_id") = ("users_0"."id"))) LIMIT ('1') :: integer) AS "products_1") AS "__sr_1") AS "__sj_1" ON ('true')) AS "__sr_0") AS "__sj_0"
=== RUN TestCompileInsert/nestedInsertOneToOneWithConnect
WITH "_sg_input" AS (SELECT '{{data}}' :: json AS j), "_x_users" AS (SELECT "id" FROM "_sg_input" i,"users" WHERE "users"."id"= ((i.j->'user'->'connect'->>'id'))::bigint LIMIT 1), "products" AS (INSERT INTO "products" ("name", "price", "created_at", "updated_at", "user_id") SELECT "t"."name", "t"."price", "t"."created_at", "t"."updated_at", "_x_users"."id" FROM "_sg_input" i, "_x_users", json_populate_record(NULL::products, i.j) t RETURNING *) SELECT jsonb_build_object('product', "__sj_0"."json") as "__root" FROM (SELECT to_jsonb("__sr_0") AS "json"FROM (SELECT "products_0"."id" AS "id", "products_0"."name" AS "name", "__sj_1"."json" AS "user", "__sj_2"."json" AS "tags" FROM (SELECT "products"."id", "products"."name", "products"."user_id", "products"."tags" FROM "products" LIMIT ('1') :: integer) AS "products_0" LEFT OUTER JOIN LATERAL (SELECT coalesce(jsonb_agg("__sj_2"."json"), '[]') as "json" FROM (SELECT to_jsonb("__sr_2") AS "json"FROM (SELECT "tags_2"."id" AS "id", "tags_2"."name" AS "name" FROM (SELECT "tags"."id", "tags"."name" FROM "tags" WHERE ((("tags"."slug") = any ("products_0"."tags"))) LIMIT ('20') :: integer) AS "tags_2") AS "__sr_2") AS "__sj_2") AS "__sj_2" ON ('true') LEFT OUTER JOIN LATERAL (SELECT to_jsonb("__sr_1") AS "json"FROM (SELECT "users_1"."id" AS "id", "users_1"."full_name" AS "full_name", "users_1"."email" AS "email" FROM (SELECT "users"."id", "users"."full_name", "users"."email" FROM "users" WHERE ((("users"."id") = ("products_0"."user_id"))) LIMIT ('1') :: integer) AS "users_1") AS "__sr_1") AS "__sj_1" ON ('true')) AS "__sr_0") AS "__sj_0"
=== RUN TestCompileInsert/nestedInsertOneToOneWithConnectArray
WITH "_sg_input" AS (SELECT '{{data}}' :: json AS j), "_x_users" AS (SELECT "id" FROM "_sg_input" i,"users" WHERE "users"."id" = ANY((select a::bigint AS list from json_array_elements_text((i.j->'user'->'connect'->>'id')::json) AS a)) LIMIT 1), "products" AS (INSERT INTO "products" ("name", "price", "created_at", "updated_at", "user_id") SELECT "t"."name", "t"."price", "t"."created_at", "t"."updated_at", "_x_users"."id" FROM "_sg_input" i, "_x_users", json_populate_record(NULL::products, i.j) t RETURNING *) SELECT jsonb_build_object('product', "__sj_0"."json") as "__root" FROM (SELECT to_jsonb("__sr_0") AS "json"FROM (SELECT "products_0"."id" AS "id", "products_0"."name" AS "name", "__sj_1"."json" AS "user" FROM (SELECT "products"."id", "products"."name", "products"."user_id" FROM "products" LIMIT ('1') :: integer) AS "products_0" LEFT OUTER JOIN LATERAL (SELECT to_jsonb("__sr_1") AS "json"FROM (SELECT "users_1"."id" AS "id", "users_1"."full_name" AS "full_name", "users_1"."email" AS "email" FROM (SELECT "users"."id", "users"."full_name", "users"."email" FROM "users" WHERE ((("users"."id") = ("products_0"."user_id"))) LIMIT ('1') :: integer) AS "users_1") AS "__sr_1") AS "__sj_1" ON ('true')) AS "__sr_0") AS "__sj_0"
--- PASS: TestCompileInsert (0.02s)
--- PASS: TestCompileInsert/simpleInsert (0.00s)
--- PASS: TestCompileInsert/singleInsert (0.00s)
--- PASS: TestCompileInsert/bulkInsert (0.00s)
--- PASS: TestCompileInsert/simpleInsertWithPresets (0.00s)
--- PASS: TestCompileInsert/nestedInsertManyToMany (0.00s)
--- PASS: TestCompileInsert/nestedInsertOneToMany (0.00s)
--- PASS: TestCompileInsert/nestedInsertOneToOne (0.00s)
--- PASS: TestCompileInsert/nestedInsertOneToManyWithConnect (0.00s)
--- PASS: TestCompileInsert/nestedInsertOneToOneWithConnect (0.00s)
--- PASS: TestCompileInsert/nestedInsertOneToOneWithConnectArray (0.00s)
=== RUN TestCompileMutate
=== RUN TestCompileMutate/singleUpsert
WITH "_sg_input" AS (SELECT '{{upsert}}' :: json AS j), "products" AS (INSERT INTO "products" ("name", "description") SELECT "t"."name", "t"."description" FROM "_sg_input" i, json_populate_record(NULL::products, i.j) t RETURNING *) ON CONFLICT (id) DO UPDATE SET name = EXCLUDED.name, description = EXCLUDED.description RETURNING *) SELECT jsonb_build_object('product', "__sj_0"."json") as "__root" FROM (SELECT to_jsonb("__sr_0") AS "json"FROM (SELECT "products_0"."id" AS "id", "products_0"."name" AS "name" FROM (SELECT "products"."id", "products"."name" FROM "products" LIMIT ('1') :: integer) AS "products_0") AS "__sr_0") AS "__sj_0"
=== RUN TestCompileMutate/singleUpsertWhere
WITH "_sg_input" AS (SELECT '{{upsert}}' :: json AS j), "products" AS (INSERT INTO "products" ("name", "description") SELECT "t"."name", "t"."description" FROM "_sg_input" i, json_populate_record(NULL::products, i.j) t RETURNING *) ON CONFLICT (id) WHERE (("products"."price") > '3' :: numeric(7,2)) DO UPDATE SET name = EXCLUDED.name, description = EXCLUDED.description RETURNING *) SELECT jsonb_build_object('product', "__sj_0"."json") as "__root" FROM (SELECT to_jsonb("__sr_0") AS "json"FROM (SELECT "products_0"."id" AS "id", "products_0"."name" AS "name" FROM (SELECT "products"."id", "products"."name" FROM "products" LIMIT ('1') :: integer) AS "products_0") AS "__sr_0") AS "__sj_0"
=== RUN TestCompileMutate/bulkUpsert
WITH "_sg_input" AS (SELECT '{{upsert}}' :: json AS j), "products" AS (INSERT INTO "products" ("name", "description") SELECT "t"."name", "t"."description" FROM "_sg_input" i, json_populate_recordset(NULL::products, i.j) t RETURNING *) ON CONFLICT (id) DO UPDATE SET name = EXCLUDED.name, description = EXCLUDED.description RETURNING *) SELECT jsonb_build_object('product', "__sj_0"."json") as "__root" FROM (SELECT to_jsonb("__sr_0") AS "json"FROM (SELECT "products_0"."id" AS "id", "products_0"."name" AS "name" FROM (SELECT "products"."id", "products"."name" FROM "products" LIMIT ('1') :: integer) AS "products_0") AS "__sr_0") AS "__sj_0"
=== RUN TestCompileMutate/delete
WITH "products" AS (DELETE FROM "products" WHERE (((("products"."price") > '0' :: numeric(7,2)) AND (("products"."price") < '8' :: numeric(7,2))) AND (("products"."id") = '1' :: bigint)) RETURNING "products".*) SELECT jsonb_build_object('product', "__sj_0"."json") as "__root" FROM (SELECT to_jsonb("__sr_0") AS "json"FROM (SELECT "products_0"."id" AS "id", "products_0"."name" AS "name" FROM (SELECT "products"."id", "products"."name" FROM "products" LIMIT ('1') :: integer) AS "products_0") AS "__sr_0") AS "__sj_0"
--- PASS: TestCompileMutate (0.01s)
--- PASS: TestCompileMutate/singleUpsert (0.00s)
--- PASS: TestCompileMutate/singleUpsertWhere (0.00s)
--- PASS: TestCompileMutate/bulkUpsert (0.00s)
--- PASS: TestCompileMutate/delete (0.00s)
=== RUN TestCompileQuery
=== RUN TestCompileQuery/withComplexArgs
SELECT jsonb_build_object('products', "__sj_0"."json") as "__root" FROM (SELECT coalesce(jsonb_agg("__sj_0"."json"), '[]') as "json" FROM (SELECT to_jsonb("__sr_0") AS "json"FROM (SELECT "products_0"."id" AS "id", "products_0"."name" AS "name", "products_0"."price" AS "price" FROM (SELECT DISTINCT ON ("products"."price") "products"."id", "products"."name", "products"."price" FROM "products" WHERE (((("products"."id") < '28' :: bigint) AND (("products"."id") >= '20' :: bigint) AND ((("products"."price") > '0' :: numeric(7,2)) AND (("products"."price") < '8' :: numeric(7,2))))) ORDER BY "products"."price" DESC LIMIT ('30') :: integer) AS "products_0") AS "__sr_0") AS "__sj_0") AS "__sj_0"
=== RUN TestCompileQuery/withWhereAndList
SELECT jsonb_build_object('products', "__sj_0"."json") as "__root" FROM (SELECT coalesce(jsonb_agg("__sj_0"."json"), '[]') as "json" FROM (SELECT to_jsonb("__sr_0") AS "json"FROM (SELECT "products_0"."id" AS "id", "products_0"."name" AS "name", "products_0"."price" AS "price" FROM (SELECT "products"."id", "products"."name", "products"."price" FROM "products" WHERE (((("products"."price") > '10' :: numeric(7,2)) AND NOT (("products"."id") IS NULL) AND ((("products"."price") > '0' :: numeric(7,2)) AND (("products"."price") < '8' :: numeric(7,2))))) LIMIT ('20') :: integer) AS "products_0") AS "__sr_0") AS "__sj_0") AS "__sj_0"
=== RUN TestCompileQuery/withWhereIsNull
SELECT jsonb_build_object('products', "__sj_0"."json") as "__root" FROM (SELECT coalesce(jsonb_agg("__sj_0"."json"), '[]') as "json" FROM (SELECT to_jsonb("__sr_0") AS "json"FROM (SELECT "products_0"."id" AS "id", "products_0"."name" AS "name", "products_0"."price" AS "price" FROM (SELECT "products"."id", "products"."name", "products"."price" FROM "products" WHERE (((("products"."price") > '10' :: numeric(7,2)) AND NOT (("products"."id") IS NULL) AND ((("products"."price") > '0' :: numeric(7,2)) AND (("products"."price") < '8' :: numeric(7,2))))) LIMIT ('20') :: integer) AS "products_0") AS "__sr_0") AS "__sj_0") AS "__sj_0"
=== RUN TestCompileQuery/withWhereMultiOr
SELECT jsonb_build_object('products', "__sj_0"."json") as "__root" FROM (SELECT coalesce(jsonb_agg("__sj_0"."json"), '[]') as "json" FROM (SELECT to_jsonb("__sr_0") AS "json"FROM (SELECT "products_0"."id" AS "id", "products_0"."name" AS "name", "products_0"."price" AS "price" FROM (SELECT "products"."id", "products"."name", "products"."price" FROM "products" WHERE ((((("products"."price") > '0' :: numeric(7,2)) AND (("products"."price") < '8' :: numeric(7,2))) AND ((("products"."price") < '20' :: numeric(7,2)) OR (("products"."price") > '10' :: numeric(7,2)) OR NOT (("products"."id") IS NULL)))) LIMIT ('20') :: integer) AS "products_0") AS "__sr_0") AS "__sj_0") AS "__sj_0"
=== RUN TestCompileQuery/fetchByID
SELECT jsonb_build_object('product', "__sj_0"."json") as "__root" FROM (SELECT to_jsonb("__sr_0") AS "json"FROM (SELECT "products_0"."id" AS "id", "products_0"."name" AS "name" FROM (SELECT "products"."id", "products"."name" FROM "products" WHERE ((((("products"."price") > '0' :: numeric(7,2)) AND (("products"."price") < '8' :: numeric(7,2))) AND (("products"."id") = '{{id}}' :: bigint))) LIMIT ('1') :: integer) AS "products_0") AS "__sr_0") AS "__sj_0"
=== RUN TestCompileQuery/searchQuery
SELECT jsonb_build_object('products', "__sj_0"."json") as "__root" FROM (SELECT coalesce(jsonb_agg("__sj_0"."json"), '[]') as "json" FROM (SELECT to_jsonb("__sr_0") AS "json"FROM (SELECT "products_0"."id" AS "id", "products_0"."name" AS "name", "products_0"."search_rank" AS "search_rank", "products_0"."search_headline_description" AS "search_headline_description" FROM (SELECT "products"."id", "products"."name", ts_rank("products"."tsv", websearch_to_tsquery('{{query}}')) AS "search_rank", ts_headline("products"."description", websearch_to_tsquery('{{query}}')) AS "search_headline_description" FROM "products" WHERE ((("products"."tsv") @@ websearch_to_tsquery('{{query}}'))) LIMIT ('20') :: integer) AS "products_0") AS "__sr_0") AS "__sj_0") AS "__sj_0"
=== RUN TestCompileQuery/oneToMany
SELECT jsonb_build_object('users', "__sj_0"."json") as "__root" FROM (SELECT coalesce(jsonb_agg("__sj_0"."json"), '[]') as "json" FROM (SELECT to_jsonb("__sr_0") AS "json"FROM (SELECT "users_0"."email" AS "email", "__sj_1"."json" AS "products" FROM (SELECT "users"."email", "users"."id" FROM "users" LIMIT ('20') :: integer) AS "users_0" LEFT OUTER JOIN LATERAL (SELECT coalesce(jsonb_agg("__sj_1"."json"), '[]') as "json" FROM (SELECT to_jsonb("__sr_1") AS "json"FROM (SELECT "products_1"."name" AS "name", "products_1"."price" AS "price" FROM (SELECT "products"."name", "products"."price" FROM "products" WHERE ((("products"."user_id") = ("users_0"."id")) AND ((("products"."price") > '0' :: numeric(7,2)) AND (("products"."price") < '8' :: numeric(7,2)))) LIMIT ('20') :: integer) AS "products_1") AS "__sr_1") AS "__sj_1") AS "__sj_1" ON ('true')) AS "__sr_0") AS "__sj_0") AS "__sj_0"
=== RUN TestCompileQuery/oneToManyReverse
SELECT jsonb_build_object('products', "__sj_0"."json") as "__root" FROM (SELECT coalesce(jsonb_agg("__sj_0"."json"), '[]') as "json" FROM (SELECT to_jsonb("__sr_0") AS "json"FROM (SELECT "products_0"."name" AS "name", "products_0"."price" AS "price", "__sj_1"."json" AS "users" FROM (SELECT "products"."name", "products"."price", "products"."user_id" FROM "products" WHERE (((("products"."price") > '0' :: numeric(7,2)) AND (("products"."price") < '8' :: numeric(7,2)))) LIMIT ('20') :: integer) AS "products_0" LEFT OUTER JOIN LATERAL (SELECT coalesce(jsonb_agg("__sj_1"."json"), '[]') as "json" FROM (SELECT to_jsonb("__sr_1") AS "json"FROM (SELECT "users_1"."email" AS "email" FROM (SELECT "users"."email" FROM "users" WHERE ((("users"."id") = ("products_0"."user_id"))) LIMIT ('20') :: integer) AS "users_1") AS "__sr_1") AS "__sj_1") AS "__sj_1" ON ('true')) AS "__sr_0") AS "__sj_0") AS "__sj_0"
=== RUN TestCompileQuery/oneToManyArray
SELECT jsonb_build_object('tags', "__sj_0"."json", 'product', "__sj_2"."json") as "__root" FROM (SELECT to_jsonb("__sr_2") AS "json"FROM (SELECT "products_2"."name" AS "name", "products_2"."price" AS "price", "__sj_3"."json" AS "tags" FROM (SELECT "products"."name", "products"."price", "products"."tags" FROM "products" LIMIT ('1') :: integer) AS "products_2" LEFT OUTER JOIN LATERAL (SELECT coalesce(jsonb_agg("__sj_3"."json"), '[]') as "json" FROM (SELECT to_jsonb("__sr_3") AS "json"FROM (SELECT "tags_3"."id" AS "id", "tags_3"."name" AS "name" FROM (SELECT "tags"."id", "tags"."name" FROM "tags" WHERE ((("tags"."slug") = any ("products_2"."tags"))) LIMIT ('20') :: integer) AS "tags_3") AS "__sr_3") AS "__sj_3") AS "__sj_3" ON ('true')) AS "__sr_2") AS "__sj_2", (SELECT coalesce(jsonb_agg("__sj_0"."json"), '[]') as "json" FROM (SELECT to_jsonb("__sr_0") AS "json"FROM (SELECT "tags_0"."name" AS "name", "__sj_1"."json" AS "product" FROM (SELECT "tags"."name", "tags"."slug" FROM "tags" LIMIT ('20') :: integer) AS "tags_0" LEFT OUTER JOIN LATERAL (SELECT to_jsonb("__sr_1") AS "json"FROM (SELECT "products_1"."name" AS "name" FROM (SELECT "products"."name" FROM "products" WHERE ((("tags_0"."slug") = any ("products"."tags"))) LIMIT ('1') :: integer) AS "products_1") AS "__sr_1") AS "__sj_1" ON ('true')) AS "__sr_0") AS "__sj_0") AS "__sj_0"
=== RUN TestCompileQuery/manyToMany
SELECT jsonb_build_object('products', "__sj_0"."json") as "__root" FROM (SELECT coalesce(jsonb_agg("__sj_0"."json"), '[]') as "json" FROM (SELECT to_jsonb("__sr_0") AS "json"FROM (SELECT "products_0"."name" AS "name", "__sj_1"."json" AS "customers" FROM (SELECT "products"."name", "products"."id" FROM "products" WHERE (((("products"."price") > '0' :: numeric(7,2)) AND (("products"."price") < '8' :: numeric(7,2)))) LIMIT ('20') :: integer) AS "products_0" LEFT OUTER JOIN LATERAL (SELECT coalesce(jsonb_agg("__sj_1"."json"), '[]') as "json" FROM (SELECT to_jsonb("__sr_1") AS "json"FROM (SELECT "customers_1"."email" AS "email", "customers_1"."full_name" AS "full_name" FROM (SELECT "customers"."email", "customers"."full_name" FROM "customers" LEFT OUTER JOIN "purchases" ON (("purchases"."product_id") = ("products_0"."id")) WHERE ((("customers"."id") = ("purchases"."customer_id"))) LIMIT ('20') :: integer) AS "customers_1") AS "__sr_1") AS "__sj_1") AS "__sj_1" ON ('true')) AS "__sr_0") AS "__sj_0") AS "__sj_0"
=== RUN TestCompileQuery/manyToManyReverse
SELECT jsonb_build_object('customers', "__sj_0"."json") as "__root" FROM (SELECT coalesce(jsonb_agg("__sj_0"."json"), '[]') as "json" FROM (SELECT to_jsonb("__sr_0") AS "json"FROM (SELECT "customers_0"."email" AS "email", "customers_0"."full_name" AS "full_name", "__sj_1"."json" AS "products" FROM (SELECT "customers"."email", "customers"."full_name", "customers"."id" FROM "customers" LIMIT ('20') :: integer) AS "customers_0" LEFT OUTER JOIN LATERAL (SELECT coalesce(jsonb_agg("__sj_1"."json"), '[]') as "json" FROM (SELECT to_jsonb("__sr_1") AS "json"FROM (SELECT "products_1"."name" AS "name" FROM (SELECT "products"."name" FROM "products" LEFT OUTER JOIN "purchases" ON (("purchases"."customer_id") = ("customers_0"."id")) WHERE ((("products"."id") = ("purchases"."product_id")) AND ((("products"."price") > '0' :: numeric(7,2)) AND (("products"."price") < '8' :: numeric(7,2)))) LIMIT ('20') :: integer) AS "products_1") AS "__sr_1") AS "__sj_1") AS "__sj_1" ON ('true')) AS "__sr_0") AS "__sj_0") AS "__sj_0"
=== RUN TestCompileQuery/aggFunction
SELECT jsonb_build_object('products', "__sj_0"."json") as "__root" FROM (SELECT coalesce(jsonb_agg("__sj_0"."json"), '[]') as "json" FROM (SELECT to_jsonb("__sr_0") AS "json"FROM (SELECT "products_0"."name" AS "name", "products_0"."count_price" AS "count_price" FROM (SELECT "products"."name", count("products"."price") AS "count_price" FROM "products" WHERE (((("products"."price") > '0' :: numeric(7,2)) AND (("products"."price") < '8' :: numeric(7,2)))) GROUP BY "products"."name" LIMIT ('20') :: integer) AS "products_0") AS "__sr_0") AS "__sj_0") AS "__sj_0"
=== RUN TestCompileQuery/aggFunctionBlockedByCol
SELECT jsonb_build_object('products', "__sj_0"."json") as "__root" FROM (SELECT coalesce(jsonb_agg("__sj_0"."json"), '[]') as "json" FROM (SELECT to_jsonb("__sr_0") AS "json"FROM (SELECT "products_0"."name" AS "name" FROM (SELECT "products"."name" FROM "products" GROUP BY "products"."name" LIMIT ('20') :: integer) AS "products_0") AS "__sr_0") AS "__sj_0") AS "__sj_0"
=== RUN TestCompileQuery/aggFunctionDisabled
SELECT jsonb_build_object('products', "__sj_0"."json") as "__root" FROM (SELECT coalesce(jsonb_agg("__sj_0"."json"), '[]') as "json" FROM (SELECT to_jsonb("__sr_0") AS "json"FROM (SELECT "products_0"."name" AS "name" FROM (SELECT "products"."name" FROM "products" GROUP BY "products"."name" LIMIT ('20') :: integer) AS "products_0") AS "__sr_0") AS "__sj_0") AS "__sj_0"
=== RUN TestCompileQuery/aggFunctionWithFilter
SELECT jsonb_build_object('products', "__sj_0"."json") as "__root" FROM (SELECT coalesce(jsonb_agg("__sj_0"."json"), '[]') as "json" FROM (SELECT to_jsonb("__sr_0") AS "json"FROM (SELECT "products_0"."id" AS "id", "products_0"."max_price" AS "max_price" FROM (SELECT "products"."id", max("products"."price") AS "max_price" FROM "products" WHERE ((((("products"."price") > '0' :: numeric(7,2)) AND (("products"."price") < '8' :: numeric(7,2))) AND (("products"."id") > '10' :: bigint))) GROUP BY "products"."id" LIMIT ('20') :: integer) AS "products_0") AS "__sr_0") AS "__sj_0") AS "__sj_0"
=== RUN TestCompileQuery/syntheticTables
SELECT jsonb_build_object('me', "__sj_0"."json") as "__root" FROM (SELECT to_jsonb("__sr_0") AS "json"FROM (SELECT FROM (SELECT "users"."email" FROM "users" WHERE ((("users"."id") = '{{user_id}}' :: bigint)) LIMIT ('1') :: integer) AS "users_0") AS "__sr_0") AS "__sj_0"
=== RUN TestCompileQuery/queryWithVariables
SELECT jsonb_build_object('product', "__sj_0"."json") as "__root" FROM (SELECT to_jsonb("__sr_0") AS "json"FROM (SELECT "products_0"."id" AS "id", "products_0"."name" AS "name" FROM (SELECT "products"."id", "products"."name" FROM "products" WHERE (((("products"."price") = '{{product_price}}' :: numeric(7,2)) AND (("products"."id") = '{{product_id}}' :: bigint) AND ((("products"."price") > '0' :: numeric(7,2)) AND (("products"."price") < '8' :: numeric(7,2))))) LIMIT ('1') :: integer) AS "products_0") AS "__sr_0") AS "__sj_0"
=== RUN TestCompileQuery/withWhereOnRelations
SELECT jsonb_build_object('users', "__sj_0"."json") as "__root" FROM (SELECT coalesce(jsonb_agg("__sj_0"."json"), '[]') as "json" FROM (SELECT to_jsonb("__sr_0") AS "json"FROM (SELECT "users_0"."id" AS "id", "users_0"."email" AS "email" FROM (SELECT "users"."id", "users"."email" FROM "users" WHERE (NOT EXISTS (SELECT 1 FROM products WHERE (("products"."user_id") = ("users"."id")) AND ((("products"."price") > '3' :: numeric(7,2))))) LIMIT ('20') :: integer) AS "users_0") AS "__sr_0") AS "__sj_0") AS "__sj_0"
=== RUN TestCompileQuery/multiRoot
SELECT jsonb_build_object('customer', "__sj_0"."json", 'user', "__sj_1"."json", 'product', "__sj_2"."json") as "__root" FROM (SELECT to_jsonb("__sr_2") AS "json"FROM (SELECT "products_2"."id" AS "id", "products_2"."name" AS "name", "__sj_3"."json" AS "customers", "__sj_4"."json" AS "customer" FROM (SELECT "products"."id", "products"."name" FROM "products" WHERE (((("products"."price") > '0' :: numeric(7,2)) AND (("products"."price") < '8' :: numeric(7,2)))) LIMIT ('1') :: integer) AS "products_2" LEFT OUTER JOIN LATERAL (SELECT to_jsonb("__sr_4") AS "json"FROM (SELECT "customers_4"."email" AS "email" FROM (SELECT "customers"."email" FROM "customers" LEFT OUTER JOIN "purchases" ON (("purchases"."product_id") = ("products_2"."id")) WHERE ((("customers"."id") = ("purchases"."customer_id"))) LIMIT ('1') :: integer) AS "customers_4") AS "__sr_4") AS "__sj_4" ON ('true') LEFT OUTER JOIN LATERAL (SELECT coalesce(jsonb_agg("__sj_3"."json"), '[]') as "json" FROM (SELECT to_jsonb("__sr_3") AS "json"FROM (SELECT "customers_3"."email" AS "email" FROM (SELECT "customers"."email" FROM "customers" LEFT OUTER JOIN "purchases" ON (("purchases"."product_id") = ("products_2"."id")) WHERE ((("customers"."id") = ("purchases"."customer_id"))) LIMIT ('20') :: integer) AS "customers_3") AS "__sr_3") AS "__sj_3") AS "__sj_3" ON ('true')) AS "__sr_2") AS "__sj_2", (SELECT to_jsonb("__sr_1") AS "json"FROM (SELECT "users_1"."id" AS "id", "users_1"."email" AS "email" FROM (SELECT "users"."id", "users"."email" FROM "users" LIMIT ('1') :: integer) AS "users_1") AS "__sr_1") AS "__sj_1", (SELECT to_jsonb("__sr_0") AS "json"FROM (SELECT "customers_0"."id" AS "id" FROM (SELECT "customers"."id" FROM "customers" LIMIT ('1') :: integer) AS "customers_0") AS "__sr_0") AS "__sj_0"
=== RUN TestCompileQuery/jsonColumnAsTable
SELECT jsonb_build_object('products', "__sj_0"."json") as "__root" FROM (SELECT coalesce(jsonb_agg("__sj_0"."json"), '[]') as "json" FROM (SELECT to_jsonb("__sr_0") AS "json"FROM (SELECT "products_0"."id" AS "id", "products_0"."name" AS "name", "__sj_1"."json" AS "tag_count" FROM (SELECT "products"."id", "products"."name" FROM "products" LIMIT ('20') :: integer) AS "products_0" LEFT OUTER JOIN LATERAL (SELECT to_jsonb("__sr_1") AS "json"FROM (SELECT "tag_count_1"."count" AS "count", "__sj_2"."json" AS "tags" FROM (SELECT "tag_count"."count", "tag_count"."tag_id" FROM "products", json_to_recordset("products"."tag_count") AS "tag_count"(tag_id bigint, count int) WHERE ((("products"."id") = ("products_0"."id"))) LIMIT ('1') :: integer) AS "tag_count_1" LEFT OUTER JOIN LATERAL (SELECT coalesce(jsonb_agg("__sj_2"."json"), '[]') as "json" FROM (SELECT to_jsonb("__sr_2") AS "json"FROM (SELECT "tags_2"."name" AS "name" FROM (SELECT "tags"."name" FROM "tags" WHERE ((("tags"."id") = ("tag_count_1"."tag_id"))) LIMIT ('20') :: integer) AS "tags_2") AS "__sr_2") AS "__sj_2") AS "__sj_2" ON ('true')) AS "__sr_1") AS "__sj_1" ON ('true')) AS "__sr_0") AS "__sj_0") AS "__sj_0"
=== RUN TestCompileQuery/withCursor
SELECT jsonb_build_object('products', "__sj_0"."json", 'products_cursor', "__sj_0"."cursor") as "__root" FROM (SELECT coalesce(jsonb_agg("__sj_0"."json"), '[]') as "json", CONCAT_WS(',', max("__cur_0"), max("__cur_1")) as "cursor" FROM (SELECT to_jsonb("__sr_0") - '__cur_0' - '__cur_1' AS "json", "__cur_0", "__cur_1"FROM (SELECT "products_0"."name" AS "name", LAST_VALUE("products_0"."price") OVER() AS "__cur_0", LAST_VALUE("products_0"."id") OVER() AS "__cur_1" FROM (WITH "__cur" AS (SELECT a[1] as "price", a[2] as "id" FROM string_to_array('{{cursor}}', ',') as a) SELECT "products"."name", "products"."id", "products"."price" FROM "products", "__cur" WHERE (((("products"."price") < "__cur"."price" :: numeric(7,2)) OR ((("products"."price") = "__cur"."price" :: numeric(7,2)) AND (("products"."id") > "__cur"."id" :: bigint)))) ORDER BY "products"."price" DESC, "products"."id" ASC LIMIT ('20') :: integer) AS "products_0") AS "__sr_0") AS "__sj_0") AS "__sj_0"
=== RUN TestCompileQuery/nullForAuthRequiredInAnon
SELECT jsonb_build_object('products', "__sj_0"."json") as "__root" FROM (SELECT coalesce(jsonb_agg("__sj_0"."json"), '[]') as "json" FROM (SELECT to_jsonb("__sr_0") AS "json"FROM (SELECT "products_0"."id" AS "id", "products_0"."name" AS "name", NULL AS "user" FROM (SELECT "products"."id", "products"."name", "products"."user_id" FROM "products" LIMIT ('20') :: integer) AS "products_0") AS "__sr_0") AS "__sj_0") AS "__sj_0"
=== RUN TestCompileQuery/blockedQuery
SELECT jsonb_build_object('user', "__sj_0"."json") as "__root" FROM (SELECT to_jsonb("__sr_0") AS "json"FROM (SELECT "users_0"."id" AS "id", "users_0"."full_name" AS "full_name", "users_0"."email" AS "email" FROM (SELECT "users"."id", "users"."full_name", "users"."email" FROM "users" WHERE (false) LIMIT ('1') :: integer) AS "users_0") AS "__sr_0") AS "__sj_0"
=== RUN TestCompileQuery/blockedFunctions
SELECT jsonb_build_object('users', "__sj_0"."json") as "__root" FROM (SELECT coalesce(jsonb_agg("__sj_0"."json"), '[]') as "json" FROM (SELECT to_jsonb("__sr_0") AS "json"FROM (SELECT "users_0"."email" AS "email" FROM (SELECT , "users"."email" FROM "users" WHERE (false) GROUP BY "users"."email" LIMIT ('20') :: integer) AS "users_0") AS "__sr_0") AS "__sj_0") AS "__sj_0"
--- PASS: TestCompileQuery (0.02s)
--- PASS: TestCompileQuery/withComplexArgs (0.00s)
--- PASS: TestCompileQuery/withWhereAndList (0.00s)
--- PASS: TestCompileQuery/withWhereIsNull (0.00s)
--- PASS: TestCompileQuery/withWhereMultiOr (0.00s)
--- PASS: TestCompileQuery/fetchByID (0.00s)
--- PASS: TestCompileQuery/searchQuery (0.00s)
--- PASS: TestCompileQuery/oneToMany (0.00s)
--- PASS: TestCompileQuery/oneToManyReverse (0.00s)
--- PASS: TestCompileQuery/oneToManyArray (0.00s)
--- PASS: TestCompileQuery/manyToMany (0.00s)
--- PASS: TestCompileQuery/manyToManyReverse (0.00s)
--- PASS: TestCompileQuery/aggFunction (0.00s)
--- PASS: TestCompileQuery/aggFunctionBlockedByCol (0.00s)
--- PASS: TestCompileQuery/aggFunctionDisabled (0.00s)
--- PASS: TestCompileQuery/aggFunctionWithFilter (0.00s)
--- PASS: TestCompileQuery/syntheticTables (0.00s)
--- PASS: TestCompileQuery/queryWithVariables (0.00s)
--- PASS: TestCompileQuery/withWhereOnRelations (0.00s)
--- PASS: TestCompileQuery/multiRoot (0.00s)
--- PASS: TestCompileQuery/jsonColumnAsTable (0.00s)
--- PASS: TestCompileQuery/withCursor (0.00s)
--- PASS: TestCompileQuery/nullForAuthRequiredInAnon (0.00s)
--- PASS: TestCompileQuery/blockedQuery (0.00s)
--- PASS: TestCompileQuery/blockedFunctions (0.00s)
=== RUN TestCompileUpdate
=== RUN TestCompileUpdate/singleUpdate
WITH "_sg_input" AS (SELECT '{{update}}' :: json AS j), "products" AS (UPDATE "products" SET ("name", "description") = (SELECT "t"."name", "t"."description" FROM "_sg_input" i, json_populate_record(NULL::products, i.j) t) WHERE ((("products"."id") = '1' :: bigint) AND (("products"."id") = '{{id}}' :: bigint)) RETURNING "products".*) SELECT jsonb_build_object('product', "__sj_0"."json") as "__root" FROM (SELECT to_jsonb("__sr_0") AS "json"FROM (SELECT "products_0"."id" AS "id", "products_0"."name" AS "name" FROM (SELECT "products"."id", "products"."name" FROM "products" LIMIT ('1') :: integer) AS "products_0") AS "__sr_0") AS "__sj_0"
=== RUN TestCompileUpdate/simpleUpdateWithPresets
WITH "_sg_input" AS (SELECT '{{data}}' :: json AS j), "products" AS (UPDATE "products" SET ("name", "price", "updated_at") = (SELECT "t"."name", "t"."price", 'now' :: timestamp without time zone FROM "_sg_input" i, json_populate_record(NULL::products, i.j) t) WHERE (("products"."user_id") = '{{user_id}}' :: bigint) RETURNING "products".*) SELECT jsonb_build_object('product', "__sj_0"."json") as "__root" FROM (SELECT to_jsonb("__sr_0") AS "json"FROM (SELECT "products_0"."id" AS "id" FROM (SELECT "products"."id" FROM "products" LIMIT ('1') :: integer) AS "products_0") AS "__sr_0") AS "__sj_0"
=== RUN TestCompileUpdate/nestedUpdateManyToMany
WITH "_sg_input" AS (SELECT '{{data}}' :: json AS j), "purchases" AS (UPDATE "purchases" SET ("sale_type", "quantity", "due_date") = (SELECT "t"."sale_type", "t"."quantity", "t"."due_date" FROM "_sg_input" i, json_populate_record(NULL::purchases, i.j) t) WHERE (("purchases"."id") = '{{id}}' :: bigint) RETURNING "purchases".*), "products" AS (UPDATE "products" SET ("name", "price") = (SELECT "t"."name", "t"."price" FROM "_sg_input" i, json_populate_record(NULL::products, i.j->'product') t) FROM "purchases" WHERE (("products"."id") = ("purchases"."product_id")) RETURNING "products".*), "customers" AS (UPDATE "customers" SET ("full_name", "email") = (SELECT "t"."full_name", "t"."email" FROM "_sg_input" i, json_populate_record(NULL::customers, i.j->'customer') t) FROM "purchases" WHERE (("customers"."id") = ("purchases"."customer_id")) RETURNING "customers".*) SELECT jsonb_build_object('purchase', "__sj_0"."json") as "__root" FROM (SELECT to_jsonb("__sr_0") AS "json"FROM (SELECT "purchases_0"."sale_type" AS "sale_type", "purchases_0"."quantity" AS "quantity", "purchases_0"."due_date" AS "due_date", "__sj_1"."json" AS "product", "__sj_2"."json" AS "customer" FROM (SELECT "purchases"."sale_type", "purchases"."quantity", "purchases"."due_date", "purchases"."product_id", "purchases"."customer_id" FROM "purchases" LIMIT ('1') :: integer) AS "purchases_0" LEFT OUTER JOIN LATERAL (SELECT to_jsonb("__sr_2") AS "json"FROM (SELECT "customers_2"."id" AS "id", "customers_2"."full_name" AS "full_name", "customers_2"."email" AS "email" FROM (SELECT "customers"."id", "customers"."full_name", "customers"."email" FROM "customers" WHERE ((("customers"."id") = ("purchases_0"."customer_id"))) LIMIT ('1') :: integer) AS "customers_2") AS "__sr_2") AS "__sj_2" ON ('true') LEFT OUTER JOIN LATERAL (SELECT to_jsonb("__sr_1") AS "json"FROM (SELECT "products_1"."id" AS "id", "products_1"."name" AS "name", "products_1"."price" AS "price" FROM (SELECT "products"."id", "products"."name", "products"."price" FROM "products" WHERE ((("products"."id") = ("purchases_0"."product_id"))) LIMIT ('1') :: integer) AS "products_1") AS "__sr_1") AS "__sj_1" ON ('true')) AS "__sr_0") AS "__sj_0"
WITH "_sg_input" AS (SELECT '{{data}}' :: json AS j), "purchases" AS (UPDATE "purchases" SET ("sale_type", "quantity", "due_date") = (SELECT "t"."sale_type", "t"."quantity", "t"."due_date" FROM "_sg_input" i, json_populate_record(NULL::purchases, i.j) t) WHERE (("purchases"."id") = '{{id}}' :: bigint) RETURNING "purchases".*), "customers" AS (UPDATE "customers" SET ("full_name", "email") = (SELECT "t"."full_name", "t"."email" FROM "_sg_input" i, json_populate_record(NULL::customers, i.j->'customer') t) FROM "purchases" WHERE (("customers"."id") = ("purchases"."customer_id")) RETURNING "customers".*), "products" AS (UPDATE "products" SET ("name", "price") = (SELECT "t"."name", "t"."price" FROM "_sg_input" i, json_populate_record(NULL::products, i.j->'product') t) FROM "purchases" WHERE (("products"."id") = ("purchases"."product_id")) RETURNING "products".*) SELECT jsonb_build_object('purchase', "__sj_0"."json") as "__root" FROM (SELECT to_jsonb("__sr_0") AS "json"FROM (SELECT "purchases_0"."sale_type" AS "sale_type", "purchases_0"."quantity" AS "quantity", "purchases_0"."due_date" AS "due_date", "__sj_1"."json" AS "product", "__sj_2"."json" AS "customer" FROM (SELECT "purchases"."sale_type", "purchases"."quantity", "purchases"."due_date", "purchases"."product_id", "purchases"."customer_id" FROM "purchases" LIMIT ('1') :: integer) AS "purchases_0" LEFT OUTER JOIN LATERAL (SELECT to_jsonb("__sr_2") AS "json"FROM (SELECT "customers_2"."id" AS "id", "customers_2"."full_name" AS "full_name", "customers_2"."email" AS "email" FROM (SELECT "customers"."id", "customers"."full_name", "customers"."email" FROM "customers" WHERE ((("customers"."id") = ("purchases_0"."customer_id"))) LIMIT ('1') :: integer) AS "customers_2") AS "__sr_2") AS "__sj_2" ON ('true') LEFT OUTER JOIN LATERAL (SELECT to_jsonb("__sr_1") AS "json"FROM (SELECT "products_1"."id" AS "id", "products_1"."name" AS "name", "products_1"."price" AS "price" FROM (SELECT "products"."id", "products"."name", "products"."price" FROM "products" WHERE ((("products"."id") = ("purchases_0"."product_id"))) LIMIT ('1') :: integer) AS "products_1") AS "__sr_1") AS "__sj_1" ON ('true')) AS "__sr_0") AS "__sj_0"
=== RUN TestCompileUpdate/nestedUpdateOneToMany
WITH "_sg_input" AS (SELECT '{{data}}' :: json AS j), "users" AS (UPDATE "users" SET ("full_name", "email", "created_at", "updated_at") = (SELECT "t"."full_name", "t"."email", "t"."created_at", "t"."updated_at" FROM "_sg_input" i, json_populate_record(NULL::users, i.j) t) WHERE (("users"."id") = '8' :: bigint) RETURNING "users".*), "products" AS (UPDATE "products" SET ("name", "price", "created_at", "updated_at") = (SELECT "t"."name", "t"."price", "t"."created_at", "t"."updated_at" FROM "_sg_input" i, json_populate_record(NULL::products, i.j->'product') t) FROM "users" WHERE (("products"."user_id") = ("users"."id") AND "products"."id"= ((i.j->'product'->'where'->>'id'))::bigint) RETURNING "products".*) SELECT jsonb_build_object('user', "__sj_0"."json") as "__root" FROM (SELECT to_jsonb("__sr_0") AS "json"FROM (SELECT "users_0"."id" AS "id", "users_0"."full_name" AS "full_name", "users_0"."email" AS "email", "__sj_1"."json" AS "product" FROM (SELECT "users"."id", "users"."full_name", "users"."email" FROM "users" LIMIT ('1') :: integer) AS "users_0" LEFT OUTER JOIN LATERAL (SELECT to_jsonb("__sr_1") AS "json"FROM (SELECT "products_1"."id" AS "id", "products_1"."name" AS "name", "products_1"."price" AS "price" FROM (SELECT "products"."id", "products"."name", "products"."price" FROM "products" WHERE ((("products"."user_id") = ("users_0"."id"))) LIMIT ('1') :: integer) AS "products_1") AS "__sr_1") AS "__sj_1" ON ('true')) AS "__sr_0") AS "__sj_0"
=== RUN TestCompileUpdate/nestedUpdateOneToOne
WITH "_sg_input" AS (SELECT '{{data}}' :: json AS j), "products" AS (UPDATE "products" SET ("name", "price", "created_at", "updated_at") = (SELECT "t"."name", "t"."price", "t"."created_at", "t"."updated_at" FROM "_sg_input" i, json_populate_record(NULL::products, i.j) t) WHERE (("products"."id") = '{{id}}' :: bigint) RETURNING "products".*), "users" AS (UPDATE "users" SET ("email") = (SELECT "t"."email" FROM "_sg_input" i, json_populate_record(NULL::users, i.j->'user') t) FROM "products" WHERE (("users"."id") = ("products"."user_id")) RETURNING "users".*) SELECT jsonb_build_object('product', "__sj_0"."json") as "__root" FROM (SELECT to_jsonb("__sr_0") AS "json"FROM (SELECT "products_0"."id" AS "id", "products_0"."name" AS "name", "__sj_1"."json" AS "user" FROM (SELECT "products"."id", "products"."name", "products"."user_id" FROM "products" LIMIT ('1') :: integer) AS "products_0" LEFT OUTER JOIN LATERAL (SELECT to_jsonb("__sr_1") AS "json"FROM (SELECT "users_1"."id" AS "id", "users_1"."full_name" AS "full_name", "users_1"."email" AS "email" FROM (SELECT "users"."id", "users"."full_name", "users"."email" FROM "users" WHERE ((("users"."id") = ("products_0"."user_id"))) LIMIT ('1') :: integer) AS "users_1") AS "__sr_1") AS "__sj_1" ON ('true')) AS "__sr_0") AS "__sj_0"
=== RUN TestCompileUpdate/nestedUpdateOneToManyWithConnect
WITH "_sg_input" AS (SELECT '{{data}}' :: json AS j), "users" AS (UPDATE "users" SET ("full_name", "email", "created_at", "updated_at") = (SELECT "t"."full_name", "t"."email", "t"."created_at", "t"."updated_at" FROM "_sg_input" i, json_populate_record(NULL::users, i.j) t) WHERE (("users"."id") = '{{id}}' :: bigint) RETURNING "users".*), "products_c" AS ( UPDATE "products" SET "user_id" = "users"."id" FROM "users" WHERE ("products"."id"= ((i.j->'product'->'connect'->>'id'))::bigint) RETURNING "products".*), "products_d" AS ( UPDATE "products" SET "user_id" = NULL FROM "users" WHERE ("products"."id"= ((i.j->'product'->'disconnect'->>'id'))::bigint) RETURNING "products".*), "products" AS (SELECT * FROM "products_c" UNION ALL SELECT * FROM "products_d") SELECT jsonb_build_object('user', "__sj_0"."json") as "__root" FROM (SELECT to_jsonb("__sr_0") AS "json"FROM (SELECT "users_0"."id" AS "id", "users_0"."full_name" AS "full_name", "users_0"."email" AS "email", "__sj_1"."json" AS "product" FROM (SELECT "users"."id", "users"."full_name", "users"."email" FROM "users" LIMIT ('1') :: integer) AS "users_0" LEFT OUTER JOIN LATERAL (SELECT to_jsonb("__sr_1") AS "json"FROM (SELECT "products_1"."id" AS "id", "products_1"."name" AS "name", "products_1"."price" AS "price" FROM (SELECT "products"."id", "products"."name", "products"."price" FROM "products" WHERE ((("products"."user_id") = ("users_0"."id"))) LIMIT ('1') :: integer) AS "products_1") AS "__sr_1") AS "__sj_1" ON ('true')) AS "__sr_0") AS "__sj_0"
=== RUN TestCompileUpdate/nestedUpdateOneToOneWithConnect
WITH "_sg_input" AS (SELECT '{{data}}' :: json AS j), "_x_users" AS (SELECT "id" FROM "_sg_input" i,"users" WHERE "users"."id"= ((i.j->'user'->'connect'->>'id'))::bigint AND "users"."email"= ((i.j->'user'->'connect'->>'email'))::character varying LIMIT 1), "products" AS (UPDATE "products" SET ("name", "price", "user_id") = (SELECT "t"."name", "t"."price", "_x_users"."id" FROM "_sg_input" i, "_x_users", json_populate_record(NULL::products, i.j) t) WHERE (("products"."id") = '{{product_id}}' :: bigint) RETURNING "products".*) SELECT jsonb_build_object('product', "__sj_0"."json") as "__root" FROM (SELECT to_jsonb("__sr_0") AS "json"FROM (SELECT "products_0"."id" AS "id", "products_0"."name" AS "name", "__sj_1"."json" AS "user" FROM (SELECT "products"."id", "products"."name", "products"."user_id" FROM "products" LIMIT ('1') :: integer) AS "products_0" LEFT OUTER JOIN LATERAL (SELECT to_jsonb("__sr_1") AS "json"FROM (SELECT "users_1"."id" AS "id", "users_1"."full_name" AS "full_name", "users_1"."email" AS "email" FROM (SELECT "users"."id", "users"."full_name", "users"."email" FROM "users" WHERE ((("users"."id") = ("products_0"."user_id"))) LIMIT ('1') :: integer) AS "users_1") AS "__sr_1") AS "__sj_1" ON ('true')) AS "__sr_0") AS "__sj_0"
WITH "_sg_input" AS (SELECT '{{data}}' :: json AS j), "_x_users" AS (SELECT "id" FROM "_sg_input" i,"users" WHERE "users"."email"= ((i.j->'user'->'connect'->>'email'))::character varying AND "users"."id"= ((i.j->'user'->'connect'->>'id'))::bigint LIMIT 1), "products" AS (UPDATE "products" SET ("name", "price", "user_id") = (SELECT "t"."name", "t"."price", "_x_users"."id" FROM "_sg_input" i, "_x_users", json_populate_record(NULL::products, i.j) t) WHERE (("products"."id") = '{{product_id}}' :: bigint) RETURNING "products".*) SELECT jsonb_build_object('product', "__sj_0"."json") as "__root" FROM (SELECT to_jsonb("__sr_0") AS "json"FROM (SELECT "products_0"."id" AS "id", "products_0"."name" AS "name", "__sj_1"."json" AS "user" FROM (SELECT "products"."id", "products"."name", "products"."user_id" FROM "products" LIMIT ('1') :: integer) AS "products_0" LEFT OUTER JOIN LATERAL (SELECT to_jsonb("__sr_1") AS "json"FROM (SELECT "users_1"."id" AS "id", "users_1"."full_name" AS "full_name", "users_1"."email" AS "email" FROM (SELECT "users"."id", "users"."full_name", "users"."email" FROM "users" WHERE ((("users"."id") = ("products_0"."user_id"))) LIMIT ('1') :: integer) AS "users_1") AS "__sr_1") AS "__sj_1" ON ('true')) AS "__sr_0") AS "__sj_0"
=== RUN TestCompileUpdate/nestedUpdateOneToOneWithDisconnect
WITH "_sg_input" AS (SELECT '{{data}}' :: json AS j), "_x_users" AS (SELECT * FROM (VALUES(NULL::bigint)) AS LOOKUP("id")), "products" AS (UPDATE "products" SET ("name", "price", "user_id") = (SELECT "t"."name", "t"."price", "_x_users"."id" FROM "_sg_input" i, "_x_users", json_populate_record(NULL::products, i.j) t) WHERE (("products"."id") = '{{id}}' :: bigint) RETURNING "products".*) SELECT jsonb_build_object('product', "__sj_0"."json") as "__root" FROM (SELECT to_jsonb("__sr_0") AS "json"FROM (SELECT "products_0"."id" AS "id", "products_0"."name" AS "name", "products_0"."user_id" AS "user_id" FROM (SELECT "products"."id", "products"."name", "products"."user_id" FROM "products" LIMIT ('1') :: integer) AS "products_0") AS "__sr_0") AS "__sj_0"
--- PASS: TestCompileUpdate (0.02s)
--- PASS: TestCompileUpdate/singleUpdate (0.00s)
--- PASS: TestCompileUpdate/simpleUpdateWithPresets (0.00s)
--- PASS: TestCompileUpdate/nestedUpdateManyToMany (0.00s)
--- PASS: TestCompileUpdate/nestedUpdateOneToMany (0.00s)
--- PASS: TestCompileUpdate/nestedUpdateOneToOne (0.00s)
--- PASS: TestCompileUpdate/nestedUpdateOneToManyWithConnect (0.00s)
--- PASS: TestCompileUpdate/nestedUpdateOneToOneWithConnect (0.00s)
--- PASS: TestCompileUpdate/nestedUpdateOneToOneWithDisconnect (0.00s)
PASS
ok github.com/dosco/super-graph/core/internal/psql 0.320s

View File

@ -6,8 +6,8 @@ import (
"fmt"
"io"
"github.com/dosco/super-graph/qcode"
"github.com/dosco/super-graph/util"
"github.com/dosco/super-graph/core/internal/qcode"
"github.com/dosco/super-graph/core/internal/util"
)
func (c *compilerContext) renderUpdate(qc *qcode.QCode, w io.Writer,
@ -15,7 +15,10 @@ func (c *compilerContext) renderUpdate(qc *qcode.QCode, w io.Writer,
update, ok := vars[qc.ActionVar]
if !ok {
return 0, fmt.Errorf("Variable '%s' not !defined", qc.ActionVar)
return 0, fmt.Errorf("variable '%s' not !defined", qc.ActionVar)
}
if len(update) == 0 {
return 0, fmt.Errorf("variable '%s' is empty", qc.ActionVar)
}
io.WriteString(c.w, `WITH "_sg_input" AS (SELECT '{{`)
@ -125,16 +128,16 @@ func (c *compilerContext) renderUpdateStmt(w io.Writer, qc *qcode.QCode, item re
if item.relPC.Type == RelOneToMany {
if conn, ok := item.data["where"]; ok {
io.WriteString(w, ` AND `)
renderWhereFromJSON(w, item.ti.Name, conn)
renderWhereFromJSON(w, item.kvitem, "where", conn)
} else if conn, ok := item.data["_where"]; ok {
io.WriteString(w, ` AND `)
renderWhereFromJSON(w, item.ti.Name, conn)
renderWhereFromJSON(w, item.kvitem, "_where", conn)
}
}
io.WriteString(w, `)`)
} else {
io.WriteString(w, `WHERE `)
io.WriteString(w, ` WHERE `)
if err := c.renderWhere(&qc.Selects[0], ti); err != nil {
return err
}
@ -165,9 +168,28 @@ func renderNestedUpdateRelColumns(w io.Writer, item kvitem, values bool) error {
for _, v := range item.items {
if v._ctype > 0 && v.relCP.Type == RelOneToMany {
if values {
colWithTable(w, v.relCP.Left.Table, v.relCP.Left.Col)
// if v.relCP.Right.Array {
// io.WriteString(w, `array_diff(`)
// colWithTable(w, v.relCP.Right.Table, v.relCP.Right.Col)
// io.WriteString(w, `, `)
// }
if v._ctype > 0 {
io.WriteString(w, `"_x_`)
io.WriteString(w, v.relCP.Left.Table)
io.WriteString(w, `".`)
quoted(w, v.relCP.Left.Col)
} else {
colWithTable(w, v.relCP.Left.Table, v.relCP.Left.Col)
}
// if v.relCP.Right.Array {
// io.WriteString(w, `)`)
// }
} else {
quoted(w, v.relCP.Right.Col)
}
}
}
@ -176,12 +198,13 @@ func renderNestedUpdateRelColumns(w io.Writer, item kvitem, values bool) error {
}
func renderNestedUpdateRelTables(w io.Writer, item kvitem) error {
// Render child foreign key columns if child-to-parent
// Render tables needed to set values if child-to-parent
// relationship is one-to-many
for _, v := range item.items {
if v._ctype > 0 && v.relCP.Type == RelOneToMany {
quoted(w, v.relCP.Left.Table)
io.WriteString(w, `, `)
io.WriteString(w, `"_x_`)
io.WriteString(w, v.relCP.Left.Table)
io.WriteString(w, `", `)
}
}
@ -199,12 +222,16 @@ func (c *compilerContext) renderDelete(qc *qcode.QCode, w io.Writer,
quoted(c.w, ti.Name)
io.WriteString(c.w, ` WHERE `)
if root.Where == nil {
return 0, errors.New("'where' clause missing in delete mutation")
}
if err := c.renderWhere(root, ti); err != nil {
return 0, err
}
io.WriteString(w, ` RETURNING `)
quoted(w, ti.Name)
io.WriteString(w, `.*)`)
io.WriteString(w, `.*) `)
return 0, nil
}

View File

@ -0,0 +1,258 @@
package psql
import (
"encoding/json"
"testing"
)
func singleUpdate(t *testing.T) {
gql := `mutation {
product(id: $id, update: $update, where: { id: { eq: 1 } }) {
id
name
}
}`
vars := map[string]json.RawMessage{
"update": json.RawMessage(` { "name": "my_name", "description": "my_desc" }`),
}
compileGQLToPSQL(t, gql, vars, "anon")
}
func simpleUpdateWithPresets(t *testing.T) {
gql := `mutation {
product(update: $data) {
id
}
}`
vars := map[string]json.RawMessage{
"data": json.RawMessage(`{"name": "Apple", "price": 1.25}`),
}
compileGQLToPSQL(t, gql, vars, "user")
}
func nestedUpdateManyToMany(t *testing.T) {
gql := `mutation {
purchase(update: $data, id: $id) {
sale_type
quantity
due_date
customer {
id
full_name
email
}
product {
id
name
price
}
}
}`
vars := map[string]json.RawMessage{
"data": json.RawMessage(` {
"sale_type": "bought",
"quantity": 5,
"due_date": "now",
"customer": {
"email": "thedude@rug.com",
"full_name": "The Dude"
},
"product": {
"name": "Apple",
"price": 1.25
}
}
`),
}
compileGQLToPSQL(t, gql, vars, "admin")
}
func nestedUpdateOneToMany(t *testing.T) {
gql := `mutation {
user(update: $data, where: { id: { eq: 8 } }) {
id
full_name
email
product {
id
name
price
}
}
}`
vars := map[string]json.RawMessage{
"data": json.RawMessage(`{
"email": "thedude@rug.com",
"full_name": "The Dude",
"created_at": "now",
"updated_at": "now",
"product": {
"where": {
"id": 2
},
"name": "Apple",
"price": 1.25,
"created_at": "now",
"updated_at": "now"
}
}`),
}
compileGQLToPSQL(t, gql, vars, "admin")
}
func nestedUpdateOneToOne(t *testing.T) {
gql := `mutation {
product(update: $data, id: $id) {
id
name
user {
id
full_name
email
}
}
}`
vars := map[string]json.RawMessage{
"data": json.RawMessage(`{
"name": "Apple",
"price": 1.25,
"created_at": "now",
"updated_at": "now",
"user": {
"email": "thedude@rug.com"
}
}`),
}
compileGQLToPSQL(t, gql, vars, "admin")
}
func nestedUpdateOneToManyWithConnect(t *testing.T) {
gql := `mutation {
user(update: $data, id: $id) {
id
full_name
email
product {
id
name
price
}
}
}`
vars := map[string]json.RawMessage{
"data": json.RawMessage(`{
"email": "thedude@rug.com",
"full_name": "The Dude",
"created_at": "now",
"updated_at": "now",
"product": {
"connect": { "id": 7 },
"disconnect": { "id": 8 }
}
}`),
}
compileGQLToPSQL(t, gql, vars, "admin")
}
func nestedUpdateOneToOneWithConnect(t *testing.T) {
gql := `mutation {
product(update: $data, id: $product_id) {
id
name
user {
id
full_name
email
}
}
}`
vars := map[string]json.RawMessage{
"data": json.RawMessage(`{
"name": "Apple",
"price": 1.25,
"user": {
"connect": { "id": 5, "email": "test@test.com" }
}
}`),
}
compileGQLToPSQL(t, gql, vars, "admin")
}
func nestedUpdateOneToOneWithDisconnect(t *testing.T) {
gql := `mutation {
product(update: $data, id: $id) {
id
name
user_id
}
}`
vars := map[string]json.RawMessage{
"data": json.RawMessage(`{
"name": "Apple",
"price": 1.25,
"user": {
"disconnect": { "id": 5 }
}
}`),
}
compileGQLToPSQL(t, gql, vars, "admin")
}
// func nestedUpdateOneToOneWithDisconnectArray(t *testing.T) {
// gql := `mutation {
// product(update: $data, id: 2) {
// id
// name
// user_id
// }
// }`
// sql := `WITH "_sg_input" AS (SELECT '{{data}}' :: json AS j), "users" AS (SELECT * FROM (VALUES(NULL::bigint)) AS LOOKUP("id")), "products" AS (UPDATE "products" SET ("name", "price", "user_id") = (SELECT "t"."name", "t"."price", "users"."id" FROM "_sg_input" i, "users", json_populate_record(NULL::products, i.j) t) WHERE (("products"."id") = 2) RETURNING "products".*) SELECT json_object_agg('product', json_0) FROM (SELECT row_to_json((SELECT "json_row_0" FROM (SELECT "products_0"."id" AS "id", "products_0"."name" AS "name", "products_0"."user_id" AS "user_id") AS "json_row_0")) AS "json_0" FROM (SELECT "products"."id", "products"."name", "products"."user_id" FROM "products" LIMIT ('1') :: integer) AS "products_0" LIMIT ('1') :: integer) AS "sel_0"`
// vars := map[string]json.RawMessage{
// "data": json.RawMessage(`{
// "name": "Apple",
// "price": 1.25,
// "user": {
// "disconnect": { "id": 5 }
// }
// }`),
// }
// resSQL, err := compileGQLToPSQL(gql, vars, "admin")
// if err != nil {
// t.Fatal(err)
// }
// if string(resSQL) != sql {
// t.Fatal(errNotExpected)
// }
// }
func TestCompileUpdate(t *testing.T) {
t.Run("singleUpdate", singleUpdate)
t.Run("simpleUpdateWithPresets", simpleUpdateWithPresets)
t.Run("nestedUpdateManyToMany", nestedUpdateManyToMany)
t.Run("nestedUpdateOneToMany", nestedUpdateOneToMany)
t.Run("nestedUpdateOneToOne", nestedUpdateOneToOne)
t.Run("nestedUpdateOneToManyWithConnect", nestedUpdateOneToManyWithConnect)
t.Run("nestedUpdateOneToOneWithConnect", nestedUpdateOneToOneWithConnect)
t.Run("nestedUpdateOneToOneWithDisconnect", nestedUpdateOneToOneWithDisconnect)
//t.Run("nestedUpdateOneToOneWithDisconnectArray", nestedUpdateOneToOneWithDisconnectArray)
}

View File

@ -45,6 +45,7 @@ type trval struct {
query struct {
limit string
fil *Exp
filNU bool
cols map[string]struct{}
disable struct {
funcs bool
@ -53,6 +54,7 @@ type trval struct {
insert struct {
fil *Exp
filNU bool
cols map[string]struct{}
psmap map[string]string
pslist []string
@ -60,14 +62,16 @@ type trval struct {
update struct {
fil *Exp
filNU bool
cols map[string]struct{}
psmap map[string]string
pslist []string
}
delete struct {
fil *Exp
cols map[string]struct{}
fil *Exp
filNU bool
cols map[string]struct{}
}
}
@ -88,21 +92,21 @@ func (trv *trval) allowedColumns(qt QType) map[string]struct{} {
return nil
}
func (trv *trval) filter(qt QType) *Exp {
func (trv *trval) filter(qt QType) (*Exp, bool) {
switch qt {
case QTQuery:
return trv.query.fil
return trv.query.fil, trv.query.filNU
case QTInsert:
return trv.insert.fil
return trv.insert.fil, trv.insert.filNU
case QTUpdate:
return trv.update.fil
return trv.update.fil, trv.update.filNU
case QTDelete:
return trv.delete.fil
return trv.delete.fil, trv.delete.filNU
case QTUpsert:
return trv.insert.fil
return trv.insert.fil, trv.insert.filNU
}
return nil
return nil, false
}
func listToMap(list []string) map[string]struct{} {

View File

@ -4,7 +4,11 @@ package qcode
// FuzzerEntrypoint for Fuzzbuzz
func Fuzz(data []byte) int {
GetQType(string(data))
qt := GetQType(string(data))
if qt > QTUpsert {
panic("qt > QTUpsert")
}
qcompile, _ := NewCompiler(Config{})
_, err := qcompile.Compile(data, "user")

View File

@ -31,7 +31,7 @@ type item struct {
_type itemType // The type of this item.
pos Pos // The starting position, in bytes, of this item in the input string.
end Pos // The ending position, in bytes, of this item in the input string.
line uint16 // The line number at the start of this item.
line int16 // The line number at the start of this item.
}
// itemType identifies the type of lex items.
@ -87,7 +87,7 @@ type lexer struct {
width Pos // width of last rune read from input
items []item // array of scanned items
itemsA [50]item
line uint16 // 1+number of newlines seen
line int16 // 1+number of newlines seen
err error
}
@ -137,7 +137,7 @@ func (l *lexer) emit(t itemType) {
l.items = append(l.items, item{t, l.start, l.pos, l.line})
// Some items contain text internally. If so, count their newlines.
switch t {
case itemName:
case itemStringVal:
for i := l.start; i < l.pos; i++ {
if l.input[i] == '\n' {
l.line++
@ -155,11 +155,6 @@ func (l *lexer) emitL(t itemType) {
// ignore skips over the pending input before this point.
func (l *lexer) ignore() {
for i := l.start; i < l.pos; i++ {
if l.input[i] == '\n' {
l.line++
}
}
l.start = l.pos
}
@ -436,7 +431,7 @@ func lowercase(b []byte, s Pos, e Pos) {
}
}
func (i *item) String() string {
func (i item) String() string {
var v string
switch i._type {

View File

@ -6,7 +6,7 @@ import (
"sync"
"unsafe"
"github.com/dosco/super-graph/util"
"github.com/dosco/super-graph/core/internal/util"
)
var (
@ -16,8 +16,8 @@ var (
type parserType int32
const (
maxFields = 100
maxArgs = 10
maxFields = 1200
maxArgs = 25
)
const (
@ -156,12 +156,19 @@ func parseSelectionSet(gql []byte) (*Operation, error) {
return nil, err
}
lexPool.Put(l)
if err != nil {
return nil, err
if p.peek(itemObjClose) {
p.ignore()
} else {
return nil, fmt.Errorf("operation missing closing '}'")
}
if !p.peek(itemEOF) {
p.ignore()
return nil, fmt.Errorf("invalid '%s' found after closing '}'", p.current())
}
lexPool.Put(l)
return op, err
}
@ -184,11 +191,16 @@ func (p *Parser) ignore() {
p.pos = n
}
func (p *Parser) current() string {
item := p.items[p.pos]
return b2s(p.input[item.pos:item.end])
}
func (p *Parser) peek(types ...itemType) bool {
n := p.pos + 1
if p.items[n]._type == itemEOF {
return false
}
// if p.items[n]._type == itemEOF {
// return false
// }
if n >= len(p.items) {
return false
}
@ -230,7 +242,8 @@ func (p *Parser) parseOp() (*Operation, error) {
if p.peek(itemArgsOpen) {
p.ignore()
op.Args, err = p.parseArgs(op.Args)
op.Args, err = p.parseOpParams(op.Args)
if err != nil {
return nil, err
}
@ -292,8 +305,9 @@ func (p *Parser) parseFields(fields []Field) ([]Field, error) {
if st.Len() == 0 {
break
} else {
continue
}
continue
}
if !p.peek(itemName) {
@ -306,6 +320,8 @@ func (p *Parser) parseFields(fields []Field) ([]Field, error) {
f.Args = f.argsA[:0]
f.Children = f.childrenA[:0]
// Parse the inside of the the fields () parentheses
// in short parse the args like id, where, etc
if err := p.parseField(f); err != nil {
return nil, err
}
@ -318,9 +334,18 @@ func (p *Parser) parseFields(fields []Field) ([]Field, error) {
f.ParentID = -1
}
// The first opening curley brackets after this
// comes the columns or child fields
if p.peek(itemObjOpen) {
p.ignore()
st.Push(f.ID)
} else if p.peek(itemObjClose) {
if st.Len() == 0 {
break
} else {
continue
}
}
}
@ -354,6 +379,22 @@ func (p *Parser) parseField(f *Field) error {
return nil
}
func (p *Parser) parseOpParams(args []Arg) ([]Arg, error) {
for {
if len(args) >= maxArgs {
return nil, fmt.Errorf("too many args (max %d)", maxArgs)
}
if p.peek(itemArgsClose) {
p.ignore()
break
}
p.next()
}
return args, nil
}
func (p *Parser) parseArgs(args []Arg) ([]Arg, error) {
var err error
@ -366,6 +407,7 @@ func (p *Parser) parseArgs(args []Arg) ([]Arg, error) {
p.ignore()
break
}
if !p.peek(itemName) {
return nil, errors.New("expecting an argument name")
}
@ -539,6 +581,31 @@ func (t parserType) String() string {
return fmt.Sprintf("<%s>", v)
}
func FreeNode(n *Node) {
// type Frees struct {
// n *Node
// loc int
// }
// var freeList []Frees
// func FreeNode(n *Node, loc int) {
// j := -1
// for i := range freeList {
// if n == freeList[i].n {
// j = i
// break
// }
// }
// if j == -1 {
// nodePool.Put(n)
// freeList = append(freeList, Frees{n, loc})
// } else {
// fmt.Printf(">>>>(%d) RE_FREE %d %p %s %s\n", loc, freeList[j].loc, freeList[j].n, n.Name, n.Type)
// }
// }
func FreeNode(n *Node, loc int) {
nodePool.Put(n)
}

View File

@ -17,13 +17,13 @@ func TestCompile1(t *testing.T) {
}
_, err = qc.Compile([]byte(`
{ product(id: 15) {
query { product(id: 15) {
id
name
} }`), "user")
if err != nil {
t.Fatal(err)
if err == nil {
t.Fatal(errors.New("this should be an error id must be a variable"))
}
}
@ -39,7 +39,7 @@ func TestCompile2(t *testing.T) {
}
_, err = qc.Compile([]byte(`
query { product(id: 15) {
query { product(id: $id) {
id
name
} }`), "user")
@ -62,7 +62,7 @@ func TestCompile3(t *testing.T) {
_, err = qc.Compile([]byte(`
mutation {
product(id: 15, name: "Test") {
product(id: $test, name: "Test") {
id
name
}
@ -100,6 +100,35 @@ func TestEmptyCompile(t *testing.T) {
}
}
func TestInvalidPostfixCompile(t *testing.T) {
gql := `mutation
updateThread {
thread(update: $data, where: { slug: { eq: $slug } }) {
slug
title
published
createdAt : created_at
totalVotes : cached_votes_total
totalPosts : cached_posts_total
vote : thread_vote(where: { user_id: { eq: $user_id } }) {
id
}
topics {
slug
name
}
}
}
}`
qcompile, _ := NewCompiler(Config{})
_, err := qcompile.Compile([]byte(gql), "anon")
if err == nil {
t.Fatal(errors.New("expecting an error"))
}
}
var gql = []byte(`
products(
# returns only 30 items

View File

@ -7,7 +7,7 @@ import (
"strings"
"sync"
"github.com/dosco/super-graph/util"
"github.com/dosco/super-graph/core/internal/util"
"github.com/gobuffalo/flect"
)
@ -51,6 +51,7 @@ type Select struct {
Allowed map[string]struct{}
PresetMap map[string]string
PresetList []string
SkipRender bool
}
type Column struct {
@ -64,6 +65,7 @@ type Exp struct {
Col string
NestedCols []string
Type ValType
Table string
Val string
ListType ValType
ListVal []string
@ -83,9 +85,19 @@ type OrderBy struct {
Order Order
}
type PagingType int
const (
PtOffset PagingType = iota
PtForward
PtBackward
)
type Paging struct {
Type PagingType
Limit string
Offset string
Cursor bool
NoLimit bool
}
@ -119,6 +131,8 @@ const (
OpEqID
OpTsQuery
OpFalse
OpNotDistinct
OpDistinct
)
type ValType int
@ -131,6 +145,7 @@ const (
ValList
ValVar
ValNone
ValRef
)
type AggregrateOp int
@ -182,12 +197,19 @@ func NewCompiler(c Config) (*Compiler, error) {
return co, nil
}
func NewFilter() *Exp {
ex := expPool.Get().(*Exp)
ex.Reset()
return ex
}
func (com *Compiler) AddRole(role, table string, trc TRConfig) error {
var err error
trv := &trval{}
// query config
trv.query.fil, err = compileFilter(trc.Query.Filters)
trv.query.fil, trv.query.filNU, err = compileFilter(trc.Query.Filters)
if err != nil {
return err
}
@ -198,7 +220,8 @@ func (com *Compiler) AddRole(role, table string, trc TRConfig) error {
trv.query.disable.funcs = trc.Query.DisableFunctions
// insert config
if trv.insert.fil, err = compileFilter(trc.Insert.Filters); err != nil {
trv.insert.fil, trv.insert.filNU, err = compileFilter(trc.Insert.Filters)
if err != nil {
return err
}
trv.insert.cols = listToMap(trc.Insert.Columns)
@ -206,7 +229,8 @@ func (com *Compiler) AddRole(role, table string, trc TRConfig) error {
trv.insert.pslist = mapToList(trv.insert.psmap)
// update config
if trv.update.fil, err = compileFilter(trc.Update.Filters); err != nil {
trv.update.fil, trv.update.filNU, err = compileFilter(trc.Update.Filters)
if err != nil {
return err
}
trv.update.cols = listToMap(trc.Update.Columns)
@ -214,7 +238,8 @@ func (com *Compiler) AddRole(role, table string, trc TRConfig) error {
trv.update.pslist = mapToList(trv.update.psmap)
// delete config
if trv.delete.fil, err = compileFilter(trc.Delete.Filters); err != nil {
trv.delete.fil, trv.delete.filNU, err = compileFilter(trc.Delete.Filters)
if err != nil {
return err
}
trv.delete.cols = listToMap(trc.Delete.Columns)
@ -334,13 +359,13 @@ func (com *Compiler) compileQuery(qc *QCode, op *Operation, role string) error {
s.FieldName = s.Name
}
err := com.compileArgs(qc, s, field.Args)
err := com.compileArgs(qc, s, field.Args, role)
if err != nil {
return err
}
// Order is important addFilters must come after compileArgs
com.addFilters(qc, s, role)
// Order is important AddFilters must come after compileArgs
com.AddFilters(qc, s, role)
if s.ParentID == -1 {
qc.Roots = append(qc.Roots, s.ID)
@ -386,72 +411,82 @@ func (com *Compiler) compileQuery(qc *QCode, op *Operation, role string) error {
return nil
}
func (com *Compiler) addFilters(qc *QCode, sel *Select, role string) {
func (com *Compiler) AddFilters(qc *QCode, sel *Select, role string) {
var fil *Exp
var nu bool
if trv, ok := com.tr[role][sel.Name]; ok {
fil = trv.filter(qc.Type)
} else {
return
fil, nu = trv.filter(qc.Type)
} else if role == "anon" {
// Tables not defined under the anon role will not be rendered
sel.SkipRender = true
}
if fil == nil {
return
}
if nu && role == "anon" {
sel.SkipRender = true
}
switch fil.Op {
case OpNop:
case OpFalse:
sel.Where = fil
default:
if sel.Where != nil {
ow := sel.Where
sel.Where = expPool.Get().(*Exp)
sel.Where.Reset()
sel.Where.Op = OpAnd
sel.Where.Children = sel.Where.childrenA[:2]
sel.Where.Children[0] = fil
sel.Where.Children[1] = ow
} else {
sel.Where = fil
}
AddFilter(sel, fil)
}
}
func (com *Compiler) compileArgs(qc *QCode, sel *Select, args []Arg) error {
func (com *Compiler) compileArgs(qc *QCode, sel *Select, args []Arg, role string) error {
var err error
var ka bool
// don't free this arg either previously done or will be free'd
// in the future like in psql
var df bool
for i := range args {
arg := &args[i]
switch arg.Name {
case "id":
err, ka = com.compileArgID(sel, arg)
err, df = com.compileArgID(sel, arg)
case "search":
err, ka = com.compileArgSearch(sel, arg)
err, df = com.compileArgSearch(sel, arg)
case "where":
err, ka = com.compileArgWhere(sel, arg)
err, df = com.compileArgWhere(sel, arg, role)
case "orderby", "order_by", "order":
err, ka = com.compileArgOrderBy(sel, arg)
err, df = com.compileArgOrderBy(sel, arg)
case "distinct_on", "distinct":
err, ka = com.compileArgDistinctOn(sel, arg)
err, df = com.compileArgDistinctOn(sel, arg)
case "limit":
err, ka = com.compileArgLimit(sel, arg)
err, df = com.compileArgLimit(sel, arg)
case "offset":
err, ka = com.compileArgOffset(sel, arg)
err, df = com.compileArgOffset(sel, arg)
case "first":
err, df = com.compileArgFirstLast(sel, arg, PtForward)
case "last":
err, df = com.compileArgFirstLast(sel, arg, PtBackward)
case "after":
err, df = com.compileArgAfterBefore(sel, arg, PtForward)
case "before":
err, df = com.compileArgAfterBefore(sel, arg, PtBackward)
}
if !ka {
nodePool.Put(arg.Val)
if !df {
FreeNode(arg.Val, 5)
}
if err != nil {
@ -465,7 +500,7 @@ func (com *Compiler) compileArgs(qc *QCode, sel *Select, args []Arg) error {
func (com *Compiler) setMutationType(qc *QCode, args []Arg) error {
setActionVar := func(arg *Arg) error {
if arg.Val.Type != NodeVar {
return fmt.Errorf("value for argument '%s' must be a variable", arg.Name)
return argErr(arg.Name, "variable")
}
qc.ActionVar = arg.Val.Val
return nil
@ -488,7 +523,7 @@ func (com *Compiler) setMutationType(qc *QCode, args []Arg) error {
qc.Type = QTDelete
if arg.Val.Type != NodeBool {
return fmt.Errorf("value for argument '%s' must be a boolean", arg.Name)
return argErr(arg.Name, "boolen")
}
if arg.Val.Val == "false" {
@ -501,19 +536,20 @@ func (com *Compiler) setMutationType(qc *QCode, args []Arg) error {
return nil
}
func (com *Compiler) compileArgObj(st *util.Stack, arg *Arg) (*Exp, error) {
func (com *Compiler) compileArgObj(st *util.Stack, arg *Arg) (*Exp, bool, error) {
if arg.Val.Type != NodeObj {
return nil, fmt.Errorf("expecting an object")
return nil, false, fmt.Errorf("expecting an object")
}
return com.compileArgNode(st, arg.Val, true)
}
func (com *Compiler) compileArgNode(st *util.Stack, node *Node, usePool bool) (*Exp, error) {
func (com *Compiler) compileArgNode(st *util.Stack, node *Node, usePool bool) (*Exp, bool, error) {
var root *Exp
var needsUser bool
if node == nil || len(node.Children) == 0 {
return nil, errors.New("invalid argument value")
return nil, false, errors.New("invalid argument value")
}
pushChild(st, nil, node)
@ -524,9 +560,10 @@ func (com *Compiler) compileArgNode(st *util.Stack, node *Node, usePool bool) (*
}
intf := st.Pop()
node, ok := intf.(*Node)
if !ok || node == nil {
return nil, fmt.Errorf("16: unexpected value %v (%t)", intf, intf)
return nil, needsUser, fmt.Errorf("16: unexpected value %v (%t)", intf, intf)
}
// Objects inside a list
@ -542,13 +579,17 @@ func (com *Compiler) compileArgNode(st *util.Stack, node *Node, usePool bool) (*
ex, err := newExp(st, node, usePool)
if err != nil {
return nil, err
return nil, needsUser, err
}
if ex == nil {
continue
}
if ex.Type == ValVar && ex.Val == "user_id" {
needsUser = true
}
if node.exp == nil {
root = ex
} else {
@ -556,22 +597,26 @@ func (com *Compiler) compileArgNode(st *util.Stack, node *Node, usePool bool) (*
}
}
pushChild(st, nil, node)
if usePool {
st.Push(node)
for {
if st.Len() == 0 {
break
for {
if st.Len() == 0 {
break
}
intf := st.Pop()
node, ok := intf.(*Node)
if !ok || node == nil {
continue
}
for i := range node.Children {
st.Push(node.Children[i])
}
FreeNode(node, 1)
}
intf := st.Pop()
node, _ := intf.(*Node)
for i := range node.Children {
st.Push(node.Children[i])
}
nodePool.Put(node)
}
return root, nil
return root, needsUser, nil
}
func (com *Compiler) compileArgID(sel *Select, arg *Arg) (error, bool) {
@ -583,86 +628,58 @@ func (com *Compiler) compileArgID(sel *Select, arg *Arg) (error, bool) {
return nil, false
}
if arg.Val.Type != NodeVar {
return argErr("id", "variable"), false
}
ex := expPool.Get().(*Exp)
ex.Reset()
ex.Op = OpEqID
ex.Type = ValVar
ex.Val = arg.Val.Val
switch arg.Val.Type {
case NodeStr:
ex.Type = ValStr
case NodeInt:
ex.Type = ValInt
case NodeFloat:
ex.Type = ValFloat
case NodeVar:
ex.Type = ValVar
default:
return fmt.Errorf("expecting a string, int, float or variable"), false
}
sel.Where = ex
return nil, false
}
func (com *Compiler) compileArgSearch(sel *Select, arg *Arg) (error, bool) {
if arg.Val.Type != NodeVar {
return argErr("search", "variable"), false
}
ex := expPool.Get().(*Exp)
ex.Reset()
ex.Op = OpTsQuery
ex.Type = ValVar
ex.Val = arg.Val.Val
if arg.Val.Type == NodeVar {
ex.Type = ValVar
} else {
ex.Type = ValStr
}
if sel.Args == nil {
sel.Args = make(map[string]*Node)
}
sel.Args[arg.Name] = arg.Val
AddFilter(sel, ex)
if sel.Where != nil {
ow := sel.Where
sel.Where = expPool.Get().(*Exp)
sel.Where.Reset()
sel.Where.Op = OpAnd
sel.Where.Children = sel.Where.childrenA[:2]
sel.Where.Children[0] = ex
sel.Where.Children[1] = ow
} else {
sel.Where = ex
}
return nil, true
}
func (com *Compiler) compileArgWhere(sel *Select, arg *Arg) (error, bool) {
func (com *Compiler) compileArgWhere(sel *Select, arg *Arg, role string) (error, bool) {
st := util.NewStack()
var err error
ex, err := com.compileArgObj(st, arg)
ex, nu, err := com.compileArgObj(st, arg)
if err != nil {
return err, false
}
if sel.Where != nil {
ow := sel.Where
sel.Where = expPool.Get().(*Exp)
sel.Where.Reset()
sel.Where.Op = OpAnd
sel.Where.Children = sel.Where.childrenA[:2]
sel.Where.Children[0] = ex
sel.Where.Children[1] = ow
} else {
sel.Where = ex
if nu && role == "anon" {
sel.SkipRender = true
}
AddFilter(sel, ex)
return nil, false
return nil, true
}
func (com *Compiler) compileArgOrderBy(sel *Select, arg *Arg) (error, bool) {
@ -689,16 +706,12 @@ func (com *Compiler) compileArgOrderBy(sel *Select, arg *Arg) (error, bool) {
}
if _, ok := com.bl[node.Name]; ok {
nodePool.Put(node)
FreeNode(node, 2)
continue
}
if node.Type == NodeObj {
for i := range node.Children {
st.Push(node.Children[i])
}
nodePool.Put(node)
continue
if node.Type != NodeStr && node.Type != NodeVar {
return fmt.Errorf("expecting a string or variable"), false
}
ob := &OrderBy{}
@ -722,7 +735,7 @@ func (com *Compiler) compileArgOrderBy(sel *Select, arg *Arg) (error, bool) {
setOrderByColName(ob, node)
sel.OrderBy = append(sel.OrderBy, ob)
nodePool.Put(node)
FreeNode(node, 3)
}
return nil, false
}
@ -744,7 +757,7 @@ func (com *Compiler) compileArgDistinctOn(sel *Select, arg *Arg) (error, bool) {
for i := range node.Children {
sel.DistinctOn = append(sel.DistinctOn, node.Children[i].Val)
nodePool.Put(node.Children[i])
FreeNode(node.Children[i], 5)
}
return nil, false
@ -754,7 +767,7 @@ func (com *Compiler) compileArgLimit(sel *Select, arg *Arg) (error, bool) {
node := arg.Val
if node.Type != NodeInt {
return fmt.Errorf("expecting an integer"), false
return argErr("limit", "number"), false
}
sel.Paging.Limit = node.Val
@ -765,14 +778,39 @@ func (com *Compiler) compileArgLimit(sel *Select, arg *Arg) (error, bool) {
func (com *Compiler) compileArgOffset(sel *Select, arg *Arg) (error, bool) {
node := arg.Val
if node.Type != NodeInt {
return fmt.Errorf("expecting an integer"), false
if node.Type != NodeVar {
return argErr("offset", "variable"), false
}
sel.Paging.Offset = node.Val
return nil, false
}
func (com *Compiler) compileArgFirstLast(sel *Select, arg *Arg, pt PagingType) (error, bool) {
node := arg.Val
if node.Type != NodeInt {
return argErr(arg.Name, "number"), false
}
sel.Paging.Type = pt
sel.Paging.Limit = node.Val
return nil, false
}
func (com *Compiler) compileArgAfterBefore(sel *Select, arg *Arg, pt PagingType) (error, bool) {
node := arg.Val
if node.Type != NodeVar || node.Val != "cursor" {
return fmt.Errorf("value for argument '%s' must be a variable named $cursor", arg.Name), false
}
sel.Paging.Type = pt
sel.Paging.Cursor = true
return nil, false
}
var zeroTrv = &trval{}
func (com *Compiler) getRole(role, field string) *trval {
@ -783,6 +821,27 @@ func (com *Compiler) getRole(role, field string) *trval {
}
}
func AddFilter(sel *Select, fil *Exp) {
if sel.Where != nil {
ow := sel.Where
if sel.Where.Op != OpAnd || !sel.Where.doFree {
sel.Where = expPool.Get().(*Exp)
sel.Where.Reset()
sel.Where.Op = OpAnd
sel.Where.Children = sel.Where.childrenA[:2]
sel.Where.Children[0] = fil
sel.Where.Children[1] = ow
} else {
sel.Where.Children = append(sel.Where.Children, fil)
}
} else {
sel.Where = fil
}
}
func newExp(st *util.Stack, node *Node, usePool bool) (*Exp, error) {
name := node.Name
if name[0] == '_' {
@ -797,6 +856,7 @@ func newExp(st *util.Stack, node *Node, usePool bool) (*Exp, error) {
} else {
ex = &Exp{doFree: false}
}
ex.Children = ex.childrenA[:0]
switch name {
@ -878,6 +938,12 @@ func newExp(st *util.Stack, node *Node, usePool bool) (*Exp, error) {
case "is_null":
ex.Op = OpIsNull
ex.Val = node.Val
case "null_eq", "ndis", "not_distinct":
ex.Op = OpNotDistinct
ex.Val = node.Val
case "null_neq", "dis", "distinct":
ex.Op = OpDistinct
ex.Val = node.Val
default:
pushChildren(st, node.exp, node)
return nil, nil // skip node
@ -973,30 +1039,34 @@ func pushChildren(st *util.Stack, exp *Exp, node *Node) {
func pushChild(st *util.Stack, exp *Exp, node *Node) {
node.Children[0].exp = exp
st.Push(node.Children[0])
}
func compileFilter(filter []string) (*Exp, error) {
func compileFilter(filter []string) (*Exp, bool, error) {
var fl *Exp
var needsUser bool
com := &Compiler{}
st := util.NewStack()
if len(filter) == 0 {
return &Exp{Op: OpNop, doFree: false}, nil
return &Exp{Op: OpNop, doFree: false}, false, nil
}
for i := range filter {
if filter[i] == "false" {
return &Exp{Op: OpFalse, doFree: false}, nil
return &Exp{Op: OpFalse, doFree: false}, false, nil
}
node, err := ParseArgValue(filter[i])
if err != nil {
return nil, err
return nil, false, err
}
f, err := com.compileArgNode(st, node, false)
f, nu, err := com.compileArgNode(st, node, false)
if err != nil {
return nil, err
return nil, false, err
}
if nu {
needsUser = true
}
// TODO: Invalid table names in nested where causes fail silently
@ -1010,7 +1080,7 @@ func compileFilter(filter []string) (*Exp, error) {
fl = &Exp{Op: OpAnd, Children: []*Exp{fl, f}, doFree: false}
}
}
return fl, nil
return fl, needsUser, nil
}
func buildPath(a []string) string {
@ -1101,3 +1171,7 @@ func FreeExp(ex *Exp) {
expPool.Put(ex)
}
}
func argErr(name, ty string) error {
return fmt.Errorf("value for argument '%s' must be a %s", name, ty)
}

View File

@ -0,0 +1,47 @@
package qcode
func GetQType(gql string) QType {
ic := false
for i := range gql {
b := gql[i]
switch {
case b == '#':
ic = true
case b == '\n':
ic = false
case !ic && b == '{':
return QTQuery
case !ic && al(b):
switch b {
case 'm', 'M':
return QTMutation
case 'q', 'Q':
return QTQuery
}
}
}
return -1
}
func al(b byte) bool {
return (b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || (b >= '0' && b <= '9')
}
func (qt QType) String() string {
switch qt {
case QTQuery:
return "query"
case QTMutation:
return "mutation"
case QTInsert:
return "insert"
case QTUpdate:
return "update"
case QTDelete:
return "delete"
case QTUpsert:
return "upsert"
}
return ""
}

View File

@ -0,0 +1,50 @@
package qcode
import "testing"
func TestGetQType(t *testing.T) {
type args struct {
gql string
}
type ts struct {
name string
args args
want QType
}
tests := []ts{
ts{
name: "query",
args: args{gql: " query {"},
want: QTQuery,
},
ts{
name: "mutation",
args: args{gql: " mutation {"},
want: QTMutation,
},
ts{
name: "default query",
args: args{gql: " {"},
want: QTQuery,
},
ts{
name: "default query with comment",
args: args{gql: `# query is good
{`},
want: QTQuery,
},
ts{
name: "failed query with comment",
args: args{gql: `# query is good query {`},
want: -1,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := GetQType(tt.args.gql); got != tt.want {
t.Errorf("GetQType() = %v, want %v", got, tt.want)
}
})
}
}

277
core/prepare.go Normal file
View File

@ -0,0 +1,277 @@
package core
import (
"bytes"
"context"
"crypto/sha1"
"database/sql"
"encoding/hex"
"fmt"
"io"
"strings"
"github.com/dosco/super-graph/core/internal/allow"
"github.com/dosco/super-graph/core/internal/psql"
"github.com/dosco/super-graph/core/internal/qcode"
"github.com/valyala/fasttemplate"
)
type preparedItem struct {
sd *sql.Stmt
args [][]byte
st stmt
roleArg bool
}
func (sg *SuperGraph) initPrepared() error {
ct := context.Background()
if sg.allowList.IsPersist() {
return nil
}
sg.prepared = make(map[string]*preparedItem)
tx, err := sg.db.BeginTx(ct, nil)
if err != nil {
return err
}
defer tx.Rollback() //nolint: errcheck
if err = sg.prepareRoleStmt(tx); err != nil {
return fmt.Errorf("prepareRoleStmt: %w", err)
}
if err := tx.Commit(); err != nil {
return err
}
success := 0
list, err := sg.allowList.Load()
if err != nil {
return err
}
for _, v := range list {
if len(v.Query) == 0 {
continue
}
err := sg.prepareStmt(v)
if err == nil {
success++
continue
}
// if len(v.Vars) == 0 {
// logger.Warn().Err(err).Msg(v.Query)
// } else {
// logger.Warn().Err(err).Msgf("%s %s", v.Vars, v.Query)
// }
}
// logger.Info().
// Msgf("Registered %d of %d queries from allow.list as prepared statements",
// success, len(list))
return nil
}
func (sg *SuperGraph) prepareStmt(item allow.Item) error {
query := item.Query
qb := []byte(query)
vars := item.Vars
qt := qcode.GetQType(query)
ct := context.Background()
tx, err := sg.db.BeginTx(ct, nil)
if err != nil {
return err
}
defer tx.Rollback() //nolint: errcheck
switch qt {
case qcode.QTQuery:
var stmts1 []stmt
var err error
if sg.abacEnabled {
stmts1, err = sg.buildMultiStmt(qb, vars)
} else {
stmts1, err = sg.buildRoleStmt(qb, vars, "user")
}
if err != nil {
return err
}
//logger.Debug().Msgf("Prepared statement 'query %s' (user)", item.Name)
err = sg.prepare(ct, tx, stmts1, stmtHash(item.Name, "user"))
if err != nil {
return err
}
if sg.anonExists {
// logger.Debug().Msgf("Prepared statement 'query %s' (anon)", item.Name)
stmts2, err := sg.buildRoleStmt(qb, vars, "anon")
if err == psql.ErrAllTablesSkipped {
return nil
}
if err != nil {
return err
}
err = sg.prepare(ct, tx, stmts2, stmtHash(item.Name, "anon"))
if err != nil {
return err
}
}
case qcode.QTMutation:
for _, role := range sg.conf.Roles {
// logger.Debug().Msgf("Prepared statement 'mutation %s' (%s)", item.Name, role.Name)
stmts, err := sg.buildRoleStmt(qb, vars, role.Name)
if err != nil {
// if len(item.Vars) == 0 {
// logger.Warn().Err(err).Msg(item.Query)
// } else {
// logger.Warn().Err(err).Msgf("%s %s", item.Vars, item.Query)
// }
continue
}
err = sg.prepare(ct, tx, stmts, stmtHash(item.Name, role.Name))
if err != nil {
return err
}
}
}
if err := tx.Commit(); err != nil {
return err
}
return nil
}
func (sg *SuperGraph) prepare(ct context.Context, tx *sql.Tx, st []stmt, key string) error {
finalSQL, am := processTemplate(st[0].sql)
sd, err := tx.Prepare(finalSQL)
if err != nil {
return err
}
sg.prepared[key] = &preparedItem{
sd: sd,
args: am,
st: st[0],
roleArg: len(st) > 1,
}
return nil
}
// nolint: errcheck
func (sg *SuperGraph) prepareRoleStmt(tx *sql.Tx) error {
var err error
if !sg.abacEnabled {
return nil
}
w := &bytes.Buffer{}
io.WriteString(w, `SELECT (CASE WHEN EXISTS (`)
io.WriteString(w, sg.conf.RolesQuery)
io.WriteString(w, `) THEN `)
io.WriteString(w, `(SELECT (CASE`)
for _, role := range sg.conf.Roles {
if len(role.Match) == 0 {
continue
}
io.WriteString(w, ` WHEN `)
io.WriteString(w, role.Match)
io.WriteString(w, ` THEN '`)
io.WriteString(w, role.Name)
io.WriteString(w, `'`)
}
io.WriteString(w, ` ELSE {{role}} END) FROM (`)
io.WriteString(w, sg.conf.RolesQuery)
io.WriteString(w, `) AS "_sg_auth_roles_query" LIMIT 1) `)
io.WriteString(w, `ELSE 'anon' END) FROM (VALUES (1)) AS "_sg_auth_filler" LIMIT 1; `)
roleSQL, _ := processTemplate(w.String())
sg.getRole, err = tx.Prepare(roleSQL)
if err != nil {
return err
}
return nil
}
func processTemplate(tmpl string) (string, [][]byte) {
st := struct {
vmap map[string]int
am [][]byte
i int
}{
vmap: make(map[string]int),
am: make([][]byte, 0, 5),
i: 0,
}
execFunc := func(w io.Writer, tag string) (int, error) {
if n, ok := st.vmap[tag]; ok {
return w.Write([]byte(fmt.Sprintf("$%d", n)))
}
st.am = append(st.am, []byte(tag))
st.i++
st.vmap[tag] = st.i
return w.Write([]byte(fmt.Sprintf("$%d", st.i)))
}
t1 := fasttemplate.New(tmpl, `'{{`, `}}'`)
ts1 := t1.ExecuteFuncString(execFunc)
t2 := fasttemplate.New(ts1, `{{`, `}}`)
ts2 := t2.ExecuteFuncString(execFunc)
return ts2, st.am
}
func (sg *SuperGraph) initAllowList() error {
var ac allow.Config
var err error
if len(sg.conf.AllowListFile) == 0 {
sg.conf.UseAllowList = false
sg.log.Printf("WRN allow list disabled no file specified")
}
if sg.conf.UseAllowList {
ac = allow.Config{CreateIfNotExists: true, Persist: true}
}
sg.allowList, err = allow.New(sg.conf.AllowListFile, ac)
if err != nil {
return fmt.Errorf("failed to initialize allow list: %w", err)
}
return nil
}
// nolint: errcheck
func stmtHash(name string, role string) string {
h := sha1.New()
io.WriteString(h, strings.ToLower(name))
io.WriteString(h, role)
return hex.EncodeToString(h.Sum(nil))
}

View File

@ -1,4 +1,4 @@
package serv
package core
import (
"bytes"
@ -8,24 +8,20 @@ import (
"sync"
"github.com/cespare/xxhash/v2"
"github.com/dosco/super-graph/core/internal/qcode"
"github.com/dosco/super-graph/jsn"
"github.com/dosco/super-graph/qcode"
)
func execRemoteJoin(st *stmt, data []byte, hdr http.Header) ([]byte, error) {
func (sg *SuperGraph) execRemoteJoin(st *stmt, data []byte, hdr http.Header) ([]byte, error) {
var err error
if len(data) == 0 || st.skipped == 0 {
return data, nil
}
sel := st.qc.Selects
h := xxhash.New()
// fetch the field name used within the db response json
// that are used to mark insertion points and the mapping between
// those field names and their select objects
fids, sfmap := parentFieldIds(h, sel, st.skipped)
fids, sfmap := sg.parentFieldIds(h, sel, st.skipped)
// fetch the field values of the marked insertion points
// these values contain the id to be used with fetching remote data
@ -34,10 +30,10 @@ func execRemoteJoin(st *stmt, data []byte, hdr http.Header) ([]byte, error) {
switch {
case len(from) == 1:
to, err = resolveRemote(hdr, h, from[0], sel, sfmap)
to, err = sg.resolveRemote(hdr, h, from[0], sel, sfmap)
case len(from) > 1:
to, err = resolveRemotes(hdr, h, from, sel, sfmap)
to, err = sg.resolveRemotes(hdr, h, from, sel, sfmap)
default:
return nil, errors.New("something wrong no remote ids found in db response")
@ -57,7 +53,7 @@ func execRemoteJoin(st *stmt, data []byte, hdr http.Header) ([]byte, error) {
return ob.Bytes(), nil
}
func resolveRemote(
func (sg *SuperGraph) resolveRemote(
hdr http.Header,
h *xxhash.Digest,
field jsn.Field,
@ -82,7 +78,7 @@ func resolveRemote(
// to find the resolver to use for this relationship
k2 := mkkey(h, s.Name, p.Name)
r, ok := rmap[k2]
r, ok := sg.rmap[k2]
if !ok {
return nil, nil
}
@ -119,7 +115,7 @@ func resolveRemote(
return to, nil
}
func resolveRemotes(
func (sg *SuperGraph) resolveRemotes(
hdr http.Header,
h *xxhash.Digest,
from []jsn.Field,
@ -150,7 +146,7 @@ func resolveRemotes(
// to find the resolver to use for this relationship
k2 := mkkey(h, s.Name, p.Name)
r, ok := rmap[k2]
r, ok := sg.rmap[k2]
if !ok {
return nil, nil
}
@ -195,3 +191,59 @@ func resolveRemotes(
return to, cerr
}
func (sg *SuperGraph) parentFieldIds(h *xxhash.Digest, sel []qcode.Select, skipped uint32) (
[][]byte,
map[uint64]*qcode.Select) {
c := 0
for i := range sel {
s := &sel[i]
if isSkipped(skipped, uint32(s.ID)) {
c++
}
}
// list of keys (and it's related value) to extract from
// the db json response
fm := make([][]byte, c)
// mapping between the above extracted key and a Select
// object
sm := make(map[uint64]*qcode.Select, c)
n := 0
for i := range sel {
s := &sel[i]
if !isSkipped(skipped, uint32(s.ID)) {
continue
}
p := sel[s.ParentID]
k := mkkey(h, s.Name, p.Name)
if r, ok := sg.rmap[k]; ok {
fm[n] = r.IDField
n++
k := xxhash.Sum64(r.IDField)
sm[k] = s
}
}
return fm, sm
}
func isSkipped(n uint32, pos uint32) bool {
return ((n & (1 << pos)) != 0)
}
func colsToList(cols []qcode.Column) []string {
var f []string
for i := range cols {
f = append(f, cols[i].Name)
}
return f
}

View File

@ -1,19 +1,14 @@
package serv
package core
import (
"fmt"
"io/ioutil"
"net/http"
"net/http/httputil"
"strings"
"github.com/cespare/xxhash/v2"
"github.com/dosco/super-graph/core/internal/psql"
"github.com/dosco/super-graph/jsn"
"github.com/dosco/super-graph/psql"
)
var (
rmap map[uint64]*resolvFn
)
type resolvFn struct {
@ -22,19 +17,25 @@ type resolvFn struct {
Fn func(h http.Header, id []byte) ([]byte, error)
}
func initResolvers() error {
rmap = make(map[uint64]*resolvFn)
func (sg *SuperGraph) initResolvers() error {
var err error
sg.rmap = make(map[uint64]*resolvFn)
for _, t := range conf.Tables {
err := initRemotes(t)
for _, t := range sg.conf.Tables {
err = sg.initRemotes(t)
if err != nil {
return err
break
}
}
if err != nil {
return fmt.Errorf("failed to initialize resolvers: %v", err)
}
return nil
}
func initRemotes(t configTable) error {
func (sg *SuperGraph) initRemotes(t Table) error {
h := xxhash.New()
for _, r := range t.Remotes {
@ -45,7 +46,7 @@ func initRemotes(t configTable) error {
// if no table column specified in the config then
// use the primary key of the table as the id
if len(idcol) == 0 {
pcol, err := pcompile.IDColumn(t.Name)
pcol, err := sg.pc.IDColumn(t.Name)
if err != nil {
return err
}
@ -60,7 +61,7 @@ func initRemotes(t configTable) error {
val.Left.Col = idcol
val.Right.Col = idk
err := pcompile.AddRelationship(strings.ToLower(r.Name), t.Name, val)
err := sg.pc.AddRelationship(sanitize(r.Name), t.Name, val)
if err != nil {
return err
}
@ -81,16 +82,16 @@ func initRemotes(t configTable) error {
}
// index resolver obj by parent and child names
rmap[mkkey(h, r.Name, t.Name)] = rf
sg.rmap[mkkey(h, r.Name, t.Name)] = rf
// index resolver obj by IDField
rmap[xxhash.Sum64(rf.IDField)] = rf
sg.rmap[xxhash.Sum64(rf.IDField)] = rf
}
return nil
}
func buildFn(r configRemote) func(http.Header, []byte) ([]byte, error) {
func buildFn(r Remote) func(http.Header, []byte) ([]byte, error) {
reqURL := strings.Replace(r.URL, "$id", "%s", 1)
client := &http.Client{}
@ -113,28 +114,25 @@ func buildFn(r configRemote) func(http.Header, []byte) ([]byte, error) {
req.Header.Set(v, hdr.Get(v))
}
logger.Debug().Str("uri", uri).Msg("Remote Join")
res, err := client.Do(req)
if err != nil {
errlog.Error().Err(err).Msgf("Failed to connect to: %s", uri)
return nil, err
return nil, fmt.Errorf("failed to connect to '%s': %v", uri, err)
}
defer res.Body.Close()
if r.Debug {
reqDump, err := httputil.DumpRequestOut(req, true)
if err != nil {
return nil, err
}
// reqDump, err := httputil.DumpRequestOut(req, true)
// if err != nil {
// return nil, err
// }
resDump, err := httputil.DumpResponse(res, true)
if err != nil {
return nil, err
}
// resDump, err := httputil.DumpResponse(res, true)
// if err != nil {
// return nil, err
// }
logger.Debug().Msgf("Remote Request Debug:\n%s\n%s",
reqDump, resDump)
// logger.Debug().Msgf("Remote Request Debug:\n%s\n%s",
// reqDump, resDump)
}
if res.StatusCode != 200 {

15
core/utils.go Normal file
View File

@ -0,0 +1,15 @@
package core
import (
"github.com/cespare/xxhash/v2"
)
// nolint: errcheck
func mkkey(h *xxhash.Digest, k1 string, k2 string) uint64 {
h.WriteString(k1)
h.WriteString(k2)
v := h.Sum64()
h.Reset()
return v
}

View File

@ -1,7 +1,10 @@
version: '3.4'
services:
db:
image: postgres
image: postgres:12
environment:
POSTGRES_USER: postgres
POSTGRES_PASSWORD: postgres
ports:
- "5432:5432"

View File

@ -1,267 +0,0 @@
<template>
<div>
<main aria-labelledby="main-title" >
<Navbar />
<div class="container mx-auto">
<div class="flex flex-col md:flex-row justify-between px-10 md:px-20">
<div class="bg-bottom bg-no-repeat bg-cover">
<div class="text-center md:text-left pt-24">
<h1 v-if="data.heroText !== null" class="text-5xl font-bold text-black pb-0 uppercase">
<img src="/super-graph.png" width="250" />
</h1>
<p class="text-4xl text-gray-800 leading-tight mt-1">
Build web products faster. Secure high performance GraphQL
</p>
<NavLink
class="inline-block px-4 py-3 my-8 bg-blue-600 text-blue-100 font-bold rounded"
:item="actionLink"
/>
<a
class="px-4 py-3 my-8 border-2 border-gray-500 text-gray-600 font-bold rounded"
href="https://github.com/dosco/super-graph"
target="_blank"
>Github</a>
</div>
</div>
<div class="py-10 md:p-20">
<img src="/hologram.svg" class="h-64">
</div>
</div>
</div>
<div>
<div
class="flex flex-wrap mx-2 md:mx-20"
v-if="data.features && data.features.length"
>
<div
class="w-2/4 md:w-1/3 shadow"
v-for="(feature, index) in data.features"
:key="index"
>
<div class="p-8">
<h2 class="md:text-xl text-blue-800 font-medium border-0 mb-1">{{ feature.title }}</h2>
<p class="md:text-xl text-gray-700 leading-snug">{{ feature.details }}</p>
</div>
</div>
</div>
</div>
<div class="bg-gray-100 mt-10">
<div class="container mx-auto px-10 md:px-0 py-32">
<div class="pb-8 hidden md:block ">
<img src="arch-basic.svg">
</div>
<h1 class="uppercase font-semibold text-xl text-blue-800 mb-4">
What is {{ data.heroText }}?
</h1>
<div class="text-2xl md:text-3xl">
Super Graph can automatically learn a Postgres database and instantly serve it as a fast and secured GraphQL API. It comes with tools to create a new app and manage it's database. You get it all, a very productive developer and a highly scalable app backend. It's designed to work well on serverless platforms by Google, AWS, Microsoft, etc. The goal is to save you a ton of time and money so you can focus on you're apps core value.
</div>
</div>
</div>
<div class="flex flex-wrap">
<div class="md:w-2/4">
<img src="/graphql.png">
</div>
<div class="md:w-2/4">
<img src="/json.png">
</div>
</div>
<div class="mt-10 py-10 md:py-20">
<div class="container mx-auto px-10 md:px-0">
<h1 class="uppercase font-semibold text-xl text-blue-800 mb-4">
How to use {{ data.heroText }}?
</h1>
<div class="text-2xl md:text-3xl">
<small class="text-sm">Use the below command to download and install Super Graph. You will need Go 1.13 or above</small>
<pre>&#8227; GO111MODULE=on go get -u github.com/dosco/super-graph</pre>
<small class="text-sm">Create a new app and change to it's directory</small>
<pre>&#8227; super-graph new blog; cd blog</pre>
<small class="text-sm">Setup the app database and seed it with fake data. Docker compose will start a Postgres database for your app</small>
<pre>&#8227; docker-compose run blog_api ./super-graph db:setup</pre>
<small class="text-sm">And finally launch Super Graph configured for your app</small>
<pre>&#8227; docker-compose up</pre>
</div>
</div>
</div>
<div class="bg-gray-100 mt-10">
<div class="container mx-auto px-10 md:px-0 py-32">
<h1 class="uppercase font-semibold text-xl text-blue-800 mb-4">
The story of {{ data.heroText }}
</h1>
<div class="text-2xl md:text-3xl">
After working on several products through my career I find that we spend way too much time on building API backends. Most APIs also require constant updating, this costs real time and money.<br><br>
It's always the same thing, figure out what the UI needs then build an endpoint for it. Most API code involves struggling with an ORM to query a database and mangle the data into a shape that the UI expects to see.<br><br>
I didn't want to write this code anymore, I wanted the computer to do it. Enter GraphQL, to me it sounded great, but it still required me to write all the same database query code.<br><br>
Having worked with compilers before I saw this as a compiler problem. Why not build a compiler that converts GraphQL to highly efficient SQL.<br><br>
This compiler is what sits at the heart of Super Graph with layers of useful functionality around it like authentication, remote joins, rails integration, database migrations and everything else needed for you to build production ready apps with it.
</div>
</div>
</div>
<div class="overflow-hidden bg-indigo-900">
<div class="container mx-auto py-20">
<img src="/super-graph-web-ui.png">
</div>
</div>
<div class="py-10 md:py-20">
<div class="container mx-auto px-10 md:px-0">
<h1 class="uppercase font-semibold text-xl text-blue-800 mb-4">
Try it with a demo Rails app
</h1>
<div class="text-2xl md:text-3xl">
<small class="text-sm">Download the Docker compose config for the demo</small>
<pre>&#8227; curl -L -o demo.yml https://bit.ly/2mq05lW</pre>
<small class="text-sm">Setup the demo database</small>
<pre>&#8227; docker-compose -f demo.yml run rails_app rake db:create db:migrate db:seed</pre>
<small class="text-sm">Run the demo</small>
<pre>&#8227; docker-compose -f demo.yml up</pre>
<small class="text-sm">Signin to the demo app (user1@demo.com / 123456)</small>
<pre>&#8227; open http://localhost:3000</pre>
<small class="text-sm">Try the super graph web ui</small>
<pre>&#8227; open http://localhost:8080</pre>
</div>
</div>
</div>
<div class="border-t py-10">
<div class="block md:hidden w-100">
<iframe src='https://www.youtube.com/embed/MfPL2A-DAJk' frameborder='0' allowfullscreen style="width: 100%; height: 250px;">
</iframe>
</div>
<div class="container mx-auto flex flex-col md:flex-row items-center">
<div class="w-100 md:w-1/2 p-8">
<h1 class="text-2xl font-bold">GraphQL the future of APIs</h1>
<p class="text-xl text-gray-600">Keeping a tight and fast development loop helps you iterate quickly. Leveraging technology like Super Graph focuses your team on building the core product and not reinventing wheels. GraphQL eliminate the dependency on the backend engineering and keeps the things moving fast</p>
</div>
<div class="hidden md:block md:w-1/2">
<style>.embed-container { position: relative; padding-bottom: 56.25%; height: 0; overflow: hidden; max-width: 100%; } .embed-container iframe, .embed-container object, .embed-container embed { position: absolute; top: 0; left: 0; width: 100%; height: 100%; }</style>
<div class="embed-container shadow">
<iframe src='https://www.youtube.com/embed/MfPL2A-DAJk' frameborder='0' allowfullscreen >
</iframe>
</div>
</div>
</div>
</div>
<div class="bg-gray-200 mt-10">
<div class="container mx-auto px-10 md:px-0 py-32">
<h1 class="uppercase font-semibold text-xl text-blue-800 mb-4">
Build Secure Apps
</h1>
<div class="flex flex-col text-2xl md:text-3xl">
<card className="mb-1 p-8">
<template #image><font-awesome-icon icon="portrait" class="text-red-500" /></template>
<template #title>Role Based Access Control</template>
<template #body>Dynamically assign roles like admin, manager or anon to specific users. Generate role specific queries at runtime. For example admins can get all users while others can only fetch their own user.</template>
</card>
<card className="mb-1 p-8">
<template #image><font-awesome-icon icon="shield-alt" class="text-blue-500" /></template>
<template #title>Prepared Statements</template>
<template #body>An additional layer of protection from a variety of security issues like SQL injection. In production mode all queries are precompiled into prepared statements so only those can be executed. This also significantly speeds up all queries.</template>
</card>
<card className="p-8">
<template #image><font-awesome-icon icon="lock" class="text-green-500"/></template>
<template #title>Fuzz Tested Code</template>
<template #body>Fuzzing is done by complex software that generates massives amounts of random input to detect if code is free of security bugs. Google uses fuzzing to protects everything from their cloud infrastructure to the Chrome browser.</template>
</card>
</div>
</div>
</div>
<div class="">
<div class="container mx-auto px-10 md:px-0 py-32">
<h1 class="uppercase font-semibold text-xl text-blue-800 mb-4">
More Features
</h1>
<div class="flex flex-col md:flex-row text-2xl md:text-3xl">
<card className="mr-0 md:mr-1 mb-1 flex-col w-100 md:w-1/3 items-center">
<template #image><img src="/arch-remote-join.svg" class="h-64"></template>
<template #title>Remote Joins</template>
<template #body>A powerful feature that allows you to query your database and remote REST APIs at the same time. For example fetch a user from the DB, his tweets from Twitter and his payments from Stripe with a single GraphQL query.</template>
</card>
<card className="mr-0 md:mr-1 mb-1 flex-col w-100 md:w-1/3">
<template #image><img src="/arch-search.svg" class="h-64"></template>
<template #title>Full Text Search</template>
<template #body>Postgres has excellent full-text search built-in. You don't need another expensive service. Super Graph makes it super easy to use with keyword ranking and highlighting also supported.</template>
</card>
<card className="mb-1 flex-col w-100 md:w-1/3">
<template #image><img src="/arch-bulk.svg" class="h-64"></template>
<template #title>Bulk Inserts</template>
<template #body>Efficiently insert, update and delete multiple items with a single query. Upserts are also supported</template>
</card>
</div>
</div>
</div>
<div
class="mx-auto text-center py-8"
v-if="data.footer"
>
{{ data.footer }}
</div>
</main>
</div>
</template>
<script>
import NavLink from '@theme/components/NavLink.vue'
import Navbar from '@theme/components/Navbar.vue'
import Card from './Card.vue'
import { library } from '@fortawesome/fontawesome-svg-core'
import { faPortrait, faShieldAlt, faLock } from '@fortawesome/free-solid-svg-icons'
import { FontAwesomeIcon } from '@fortawesome/vue-fontawesome'
library.add(faPortrait, faShieldAlt, faLock)
export default {
components: { NavLink, Navbar, FontAwesomeIcon, Card },
computed: {
data () {
return this.$page.frontmatter
},
actionLink () {
return {
link: this.data.actionLink,
text: this.data.actionText
}
}
}
}
</script>

View File

@ -1,5 +1,5 @@
<template>
<div class="shadow bg-white p-4 flex items-start" :class="className">
<div class="shadow p-4 flex items-start" :class="className">
<slot name="image"></slot>
<div class="pl-4">
<h2 class="p-0">

View File

@ -0,0 +1,436 @@
<template>
<div>
<main aria-labelledby="main-title" >
<Navbar />
<div style="height: 3.6rem"></div>
<div class="container mx-auto pt-4">
<div class="text-center">
<div class="text-center text-3xl md:text-4xl text-black leading-tight font-semibold">
Fetch data without code
</div>
<NavLink
class="inline-block px-4 py-3 my-8 bg-blue-600 text-white font-bold rounded"
:item="actionLink"
/>
<a
class="px-4 py-3 my-8 border-2 border-blue-600 text-blue-600 font-bold rounded"
href="https://github.com/dosco/super-graph"
target="_blank"
>Github</a>
</div>
</div>
<div class="container mx-auto mb-8 mt-0 md:mt-20 bg-green-100">
<div class="flex flex-wrap">
<div class="w-100 md:w-1/2 border border-green-500 text-gray-6 00 text-sm md:text-lg p-6">
<div class="text-xl font-bold pb-4">Before, struggle with SQL</div>
<pre>
type User struct {
gorm.Model
Profile Profile
ProfileID int
}
type Profile struct {
gorm.Model
Name string
}
db.Model(&user).
Related(&profile).
Association("Languages").
Where("name in (?)", []string{"test"}).
Joins("left join emails on emails.user_id = users.id")
Find(&users)
and more ...
</pre>
</div>
<div class="w-100 md:w-1/2 border border-l md:border-l-0 border-green-500 text-blue-900 text-sm md:text-lg p-6">
<div class="text-xl font-bold pb-4">With Super Graph, just ask.</div>
<pre>
query {
user(id: 5) {
id
first_name
last_name
picture_url
posts(first: 20, order_by: { score: desc }) {
slug
title
created_at
votes_total
votes { created_at }
author { id name }
tags { id name }
}
posts_cursor
}
}
</pre>
</div>
</div>
</div>
<div class="mt-0 md:mt-20">
<div
class="flex flex-wrap mx-2 md:mx-20"
v-if="data.features && data.features.length"
>
<div
class="w-2/4 md:w-1/3 shadow"
v-for="(feature, index) in data.features"
:key="index"
>
<div class="p-8">
<h2 class="text-lg uppercase border-0">{{ feature.title }}</h2>
<div class="text-xl text-gray-900 leading-snug">{{ feature.details }}</div>
</div>
</div>
</div>
</div>
<div class="pt-0 md:pt-20">
<div class="container mx-auto p-10">
<div class="flex justify-center pb-20">
<img src="arch-basic.svg">
</div>
<div class="text-2xl md:text-3xl">
Super Graph is a library and service that fetches data from any Postgres database using just GraphQL. No more struggling with ORMs and SQL to wrangle data out of the database. No more having to figure out the right joins or making ineffiient queries. However complex the GraphQL, Super Graph will always generate just one single efficient SQL query. The goal is to save you time and money so you can focus on you're apps core value.
</div>
</div>
</div>
<div class="pt-20">
<div class="container mx-auto px-10 md:px-0">
<h1 class="uppercase font-semibold text-2xl text-blue-800 text-center">
Try Super Graph
</h1>
<h1 class="uppercase font-semibold text-lg text-gray-800">
Deploy as a service using docker
</h1>
<div class="p-4 rounded bg-black text-white">
<pre>$ git clone https://github.com/dosco/super-graph && cd super-graph && make install</pre>
<pre>$ super-graph new blog; cd blog</pre>
<pre>$ docker-compose run blog_api ./super-graph db:setup</pre>
<pre>$ docker-compose up</pre>
</div>
<h1 class="uppercase font-semibold text-lg text-gray-800">
Or use it with your own code
</h1>
<div class="text-md">
<pre class="p-4 rounded bg-black text-white">
package main
import (
"database/sql"
"fmt"
"time"
"github.com/dosco/super-graph/config"
"github.com/dosco/super-graph/core"
_ "github.com/jackc/pgx/v4/stdlib"
)
func main() {
db, err := sql.Open("pgx", "postgres://postgrs:@localhost:5432/example_db")
if err != nil {
log.Fatalf(err)
}
conf, err := config.NewConfig("./config")
if err != nil {
log.Fatalf(err)
}
sg, err = core.NewSuperGraph(conf, db)
if err != nil {
log.Fatalf(err)
}
graphqlQuery := `
query {
posts {
id
title
}
}`
res, err := sg.GraphQL(context.Background(), graphqlQuery, nil)
if err != nil {
log.Fatalf(err)
}
fmt.Println(string(res.Data))
}
</pre>
</div>
</div>
</div>
<div class="pt-0 md:pt-20">
<div class="container mx-auto px-10 md:px-0 py-32">
<h1 class="uppercase font-semibold text-xl text-blue-800 mb-4">
The story of {{ data.heroText }}
</h1>
<div class="text-2xl md:text-3xl">
After working on several products through my career I find that we spend way too much time on building API backends. Most APIs also require constant updating, this costs real time and money.<br><br>
It's always the same thing, figure out what the UI needs then build an endpoint for it. Most API code involves struggling with an ORM to query a database and mangle the data into a shape that the UI expects to see.<br><br>
I didn't want to write this code anymore, I wanted the computer to do it. Enter GraphQL, to me it sounded great, but it still required me to write all the same database query code.<br><br>
Having worked with compilers before I saw this as a compiler problem. Why not build a compiler that converts GraphQL to highly efficient SQL.<br><br>
This compiler is what sits at the heart of Super Graph with layers of useful functionality around it like authentication, remote joins, rails integration, database migrations and everything else needed for you to build production ready apps with it.
</div>
</div>
</div>
<div class="overflow-hidden bg-indigo-900">
<div class="container mx-auto py-20">
<img src="/super-graph-web-ui.png">
</div>
</div>
<!--
<div class="py-10 md:py-20">
<div class="container mx-auto px-10 md:px-0">
<h1 class="uppercase font-semibold text-xl text-blue-800 mb-4">
Try it with a demo Rails app
</h1>
<div class="text-2xl md:text-3xl">
<small class="text-sm">Download the Docker compose config for the demo</small>
<pre>&#8227; curl -L -o demo.yml https://bit.ly/2FZS0uw</pre>
<small class="text-sm">Setup the demo database</small>
<pre>&#8227; docker-compose -f demo.yml run rails_app rake db:create db:migrate db:seed</pre>
<small class="text-sm">Run the demo</small>
<pre>&#8227; docker-compose -f demo.yml up</pre>
<small class="text-sm">Signin to the demo app (user1@demo.com / 123456)</small>
<pre>&#8227; open http://localhost:3000</pre>
<small class="text-sm">Try the super graph web ui</small>
<pre>&#8227; open http://localhost:8080</pre>
</div>
</div>
</div>
-->
<div class="pt-0 md:pt-20">
<div class="block md:hidden w-100">
<iframe src='https://www.youtube.com/embed/MfPL2A-DAJk' frameborder='0' allowfullscreen style="width: 100%; height: 250px;">
</iframe>
</div>
<div class="container mx-auto flex flex-col md:flex-row items-center">
<div class="w-100 md:w-1/2 p-8">
<h1 class="text-2xl font-bold">GraphQL the future of APIs</h1>
<p class="text-xl text-gray-600">Keeping a tight and fast development loop helps you iterate quickly. Leveraging technology like Super Graph focuses your team on building the core product and not reinventing wheels. GraphQL eliminate the dependency on the backend engineering and keeps the things moving fast</p>
</div>
<div class="hidden md:block md:w-1/2">
<style>.embed-container { position: relative; padding-bottom: 56.25%; height: 0; overflow: hidden; max-width: 100%; } .embed-container iframe, .embed-container object, .embed-container embed { position: absolute; top: 0; left: 0; width: 100%; height: 100%; }</style>
<div class="embed-container shadow">
<iframe src='https://www.youtube.com/embed/MfPL2A-DAJk' frameborder='0' allowfullscreen >
</iframe>
</div>
</div>
</div>
</div>
<div class="container mx-auto pt-0 md:pt-20">
<div class="flex flex-wrap bg-green-100">
<div class="w-100 md:w-1/2 border border-green-500 text-gray-6 00 text-sm md:text-lg p-6">
<div class="text-xl font-bold pb-4">No more joins joins, json, orms, just use GraphQL. Fetch all the data want in the structure you need.</div>
<pre>
query {
thread {
slug
title
published
createdAt : created_at
totalVotes : cached_votes_total
totalPosts : cached_posts_total
vote : thread_vote(where: { user_id: { eq: $user_id } }) {
created_at
}
topics {
slug
name
}
author : me {
slug
}
posts(first: 1, order_by: { score: desc }) {
slug
body
published
createdAt : created_at
totalVotes : cached_votes_total
totalComments : cached_comments_total
vote {
created_at
}
author : user {
slug
firstName : first_name
lastName : last_name
}
}
posts_cursor
}
}
</pre>
</div>
<div class="w-100 md:w-1/2 border border-l md:border-l-0 border-green-500 text-blue-900 text-sm md:text-lg p-6">
<div class="text-xl font-bold pb-4">Instant results using a single highly optimized SQL. It's just that simple.</div>
<pre>
{
"data": {
"thread": {
"slug": "eveniet-ex-24",
"vote": null,
"posts": [
{
"body": "Dolor laborum harum sed sit est ducimus temporibus velit non nobis repudiandae nobis suscipit commodi voluptatem debitis sed voluptas sequi officia.",
"slug": "illum-in-voluptas-1418",
"vote": null,
"author": {
"slug": "sigurd-kemmer",
"lastName": "Effertz",
"firstName": "Brandt"
},
"createdAt": "2020-04-07T04:22:42.115874+00:00",
"published": true,
"totalVotes": 0,
"totalComments": 2
}
],
"title": "In aut qui deleniti quia dolore quasi porro tenetur voluptatem ut adita alias fugit explicabo.",
"author": null,
"topics": [
{
"name": "CloudRun",
"slug": "cloud-run"
},
{
"name": "Postgres",
"slug": "postgres"
}
],
"createdAt": "2020-04-07T04:22:38.099482+00:00",
"published": true,
"totalPosts": 24,
"totalVotes": 0,
"posts_cursor": "mpeBl6L+QfJHc3cmLkLDj9pOdEZYTt5KQtLsazG3TLITB3hJhg=="
}
}
}
</pre>
</div>
</div>
</div>
<div class="pt-0 md:pt-20">
<div class="container mx-auto px-10 md:px-0 py-32">
<h1 class="uppercase font-semibold text-xl text-blue-800 mb-4">
Build Secure Apps
</h1>
<div class="flex flex-col text-2xl md:text-3xl">
<card className="mb-1 p-8">
<template #image><font-awesome-icon icon="portrait" class="text-red-500" /></template>
<template #title>Role Based Access Control</template>
<template #body>Dynamically assign roles like admin, manager or anon to specific users. Generate role specific queries at runtime. For example admins can get all users while others can only fetch their own user.</template>
</card>
<card className="mb-1 p-8">
<template #image><font-awesome-icon icon="shield-alt" class="text-blue-500" /></template>
<template #title>Prepared Statements</template>
<template #body>An additional layer of protection from a variety of security issues like SQL injection. In production mode all queries are precompiled into prepared statements so only those can be executed. This also significantly speeds up all queries.</template>
</card>
<card className="p-8">
<template #image><font-awesome-icon icon="lock" class="text-green-500"/></template>
<template #title>Fuzz Tested Code</template>
<template #body>Fuzzing is done by complex software that generates massives amounts of random input to detect if code is free of security bugs. Google uses fuzzing to protects everything from their cloud infrastructure to the Chrome browser.</template>
</card>
</div>
</div>
</div>
<div class="pt-0 md:py -20">
<div class="container mx-auto">
<h1 class="uppercase font-semibold text-xl text-blue-800 mb-4">
More Features
</h1>
<div class="flex flex-col md:flex-row text-2xl md:text-3xl">
<card className="mr-0 md:mr-1 mb-1 flex-col w-100 md:w-1/3 items-center">
<!-- <template #image><img src="/arch-remote-join.svg" class="h-64"></template> -->
<template #title>Remote Joins</template>
<template #body>A powerful feature that allows you to query your database and remote REST APIs at the same time. For example fetch a user from the DB, his tweets from Twitter and his payments from Stripe with a single GraphQL query.</template>
</card>
<card className="mr-0 md:mr-1 mb-1 flex-col w-100 md:w-1/3">
<!-- <template #image><img src="/arch-search.svg" class="h-64"></template> -->
<template #title>Full Text Search</template>
<template #body>Postgres has excellent full-text search built-in. You don't need another expensive service. Super Graph makes it super easy to use with keyword ranking and highlighting also supported.</template>
</card>
<card className="mb-1 flex-col w-100 md:w-1/3">
<!-- <template #image><img src="/arch-bulk.svg" class="h-64"></template> -->
<template #title>Bulk Inserts</template>
<template #body>Efficiently insert, update and delete multiple items with a single query. Upserts are also supported</template>
</card>
</div>
</div>
</div>
<div
class="mx-auto text-center py-8"
v-if="data.footer"
>
{{ data.footer }}
</div>
</main>
</div>
</template>
<script>
import NavLink from '@theme/components/NavLink.vue'
import Navbar from '@theme/components/Navbar.vue'
import Card from './Card.vue'
import { library } from '@fortawesome/fontawesome-svg-core'
import { faPortrait, faShieldAlt, faLock } from '@fortawesome/free-solid-svg-icons'
import { FontAwesomeIcon } from '@fortawesome/vue-fontawesome'
library.add(faPortrait, faShieldAlt, faLock)
export default {
components: { NavLink, Navbar, FontAwesomeIcon, Card },
computed: {
data () {
return this.$page.frontmatter
},
actionLink () {
return {
link: this.data.actionLink,
text: this.data.actionText
}
}
}
}
</script>

View File

@ -1,6 +1,6 @@
let ogprefix = 'og: http://ogp.me/ns#'
let title = 'Super Graph'
let description = 'An instant GraphQL API for your app. No code needed.'
let description = 'Fetch data without code'
let color = '#f42525'
module.exports = {

View File

Before

Width:  |  Height:  |  Size: 5.4 KiB

After

Width:  |  Height:  |  Size: 5.4 KiB

View File

Before

Width:  |  Height:  |  Size: 2.3 KiB

After

Width:  |  Height:  |  Size: 2.3 KiB

View File

Before

Width:  |  Height:  |  Size: 5.4 KiB

After

Width:  |  Height:  |  Size: 5.4 KiB

View File

Before

Width:  |  Height:  |  Size: 1.8 KiB

After

Width:  |  Height:  |  Size: 1.8 KiB

View File

Before

Width:  |  Height:  |  Size: 21 KiB

After

Width:  |  Height:  |  Size: 21 KiB

View File

Before

Width:  |  Height:  |  Size: 66 KiB

After

Width:  |  Height:  |  Size: 66 KiB

View File

Before

Width:  |  Height:  |  Size: 7.4 KiB

After

Width:  |  Height:  |  Size: 7.4 KiB

View File

Before

Width:  |  Height:  |  Size: 149 KiB

After

Width:  |  Height:  |  Size: 149 KiB

View File

Before

Width:  |  Height:  |  Size: 1.8 KiB

After

Width:  |  Height:  |  Size: 1.8 KiB

View File

Before

Width:  |  Height:  |  Size: 146 KiB

After

Width:  |  Height:  |  Size: 146 KiB

View File

Before

Width:  |  Height:  |  Size: 911 KiB

After

Width:  |  Height:  |  Size: 911 KiB

View File

Before

Width:  |  Height:  |  Size: 14 KiB

After

Width:  |  Height:  |  Size: 14 KiB

View File

Before

Width:  |  Height:  |  Size: 42 KiB

After

Width:  |  Height:  |  Size: 42 KiB

View File

@ -1,6 +1,9 @@
@tailwind base;
@css {
body, .navbar, .navbar .links {
@apply bg-white text-black border-0 !important;
}
h1 {
@apply font-semibold text-3xl border-0 py-4
}

View File

@ -19,8 +19,8 @@ features:
details: Compiles your GraphQL into a fast SQL query in realtime.
- title: Built in GO
details: Built in Go is a language created at Google to build fast and secure web services.
- title: Ruby-on-Rails
details: Can read Rails cookies and supports rails database conventions.
- title: In your own Code
details: Use as a library in your own GO code. Build faster save time and money.
- title: Serverless
details: Instant startup for scale to zero environments like Google Cloud Run, App Engine, AWS Lambda
- title: Free and Open Source

View File

@ -4,9 +4,9 @@ sidebar: auto
# Guide to Super Graph
Super Graph is a micro-service that instantly and without code gives you a high performance and secure GraphQL API. Your GraphQL queries are auto translated into a single fast SQL query. No more writing API code as you develop your web frontend just make the query you need and Super Graph will do the rest.
Super Graph is a service that instantly and without code gives you a high performance and secure GraphQL API. Your GraphQL queries are auto translated into a single fast SQL query. No more spending weeks or months writing backend API code. Just make the query you need and Super Graph will do the rest.
Super Graph has a rich feature set like integrating with your existing Ruby on Rails apps, joining your DB with data from remote APIs, Role and Attribute based access control, Supoport for JWT tokens, DB migrations, seeding and a lot more.
Super Graph has a rich feature set like integrating with your existing Ruby on Rails apps, joining your DB with data from remote APIs, Role and Attribute based access control, Support for JWT tokens, DB migrations, seeding and a lot more.
## Features
@ -31,14 +31,20 @@ Super Graph has a rich feature set like integrating with your existing Ruby on R
## Try the demo app
```bash
# download the Docker compose config for the demo
curl -L -o demo.yml https://bit.ly/2mq05lW
# clone the repository
git clone https://github.com/dosco/super-graph
# run db in background
docker-compose up -d db
# see logs and wait until DB is really UP
docker-compose logs db
# setup the demo rails app & database and run it
docker-compose -f demo.yml run rails_app rake db:create db:migrate db:seed
docker-compose run rails_app rake db:create db:migrate db:seed
# run the demo
docker-compose -f demo.yml up
docker-compose up
# signin to the demo app (user1@demo.com / 123456)
open http://localhost:3000
@ -47,14 +53,14 @@ open http://localhost:3000
open http://localhost:8080
```
::: warning DEMO REQUIREMENTS
::: tip DEMO REQUIREMENTS
This demo requires `docker` you can either install it using `brew` or from the
docker website [https://docs.docker.com/docker-for-mac/install/](https://docs.docker.com/docker-for-mac/install/)
:::
#### Trying out GraphQL
We currently fully support queries and mutations. Support for `subscriptions` is work in progress. For example the below GraphQL query would fetch two products that belong to the current user where the price is greater than 10.
We fully support queries and mutations. For example the below GraphQL query would fetch two products that belong to the current user where the price is greater than 10.
#### GQL Query
@ -76,32 +82,6 @@ query {
}
```
In another example the below GraphQL mutation would insert a product into the database. The first part of the below example is the variable data and the second half is the GraphQL mutation. For mutations data has to always ben passed as a variable.
```json
{
"data": {
"name": "Art of Computer Programming",
"description": "The Art of Computer Programming (TAOCP) is a comprehensive monograph written by computer scientist Donald Knuth",
"price": 30.5
}
}
```
```graphql
mutation {
product(insert: $data) {
id
name
}
}
```
The above GraphQL query returns the JSON result below. It handles all
kinds of complexity without you having to writing a line of code.
For example there is a while greater than `gt` and a limit clause on a child field. And the `avatar` field is renamed to `picture`. The `password` field is blocked and not returned. Finally the relationship between the `users` table and the `products` table is auto discovered and used.
#### JSON Result
```json
@ -128,19 +108,107 @@ For example there is a while greater than `gt` and a limit clause on a child fie
}
```
#### Try with an authenticated user
::: tip Testing with a user
In development mode you can use the `X-User-ID: 4` header to set a user id so you don't have to worries about cookies etc. This can be set using the *HTTP Headers* tab at the bottom of the web UI.
:::
In development mode you can use the `X-User-ID: 4` header to set a user id so you don't have to worries about cookies etc. This can be set using the *HTTP Headers* tab at the bottom of the web UI you'll see when you visit the above link. You can also directly run queries from the commandline like below.
In another example the below GraphQL mutation would insert a product into the database. The first part of the below example is the variable data and the second half is the GraphQL mutation. For mutations data has to always ben passed as a variable.
#### Querying the GQL endpoint
```json
{
"data": {
"name": "Art of Computer Programming",
"description": "The Art of Computer Programming (TAOCP) is a comprehensive monograph written by computer scientist Donald Knuth",
"price": 30.5
}
}
```
```bash
```graphql
mutation {
product(insert: $data) {
id
name
}
}
```
# fetch the response json directly from the endpoint using user id 5
curl 'http://localhost:8080/api/v1/graphql' \
-H 'content-type: application/json' \
-H 'X-User-ID: 5' \
--data-binary '{"query":"{ products { name price users { email }}}"}'
## Why Super Graph
Let's take a simple example say you want to fetch 5 products priced over 12 dollars along with the photos of the products and the users that owns them. Additionally also fetch the last 10 of your own purchases along with the name and ID of the product you purchased. This is a common type of query to render a view in say an ecommerce app. Lets be honest it's not very exciting write and maintain. Keep in mind the data needed will only continue to grow and change as your app evolves. Developers might find that most ORMs will not be able to do all of this in a single SQL query and will require n+1 queries to fetch all the data and assembly it into the right JSON response.
What if I told you Super Graph will fetch all this data with a single SQL query and without you having to write a single line of code. Also as your app evolves feel free to evolve the query as you like. In our experience Super Graph saves us hundreds or thousands of man hours that we can put towards the more exciting parts of our app.
#### GraphQL Query
```graphql
query {
products(limit: 5, where: { price: { gt: 12 } }) {
id
name
description
price
photos {
url
}
user {
id
email
picture : avatar
full_name
}
}
purchases(
limit: 10,
order_by: { created_at: desc } ,
where: { user_id: { eq: $user_id } }
) {
id
created_at
product {
id
name
}
}
}
```
#### JSON Result
```json
"data": {
"products": [
{
"id": 1,
"name": "Oaked Arrogant Bastard Ale",
"description": "Coors lite, European Amber Lager, Perle, 1272 - American Ale II, 38 IBU, 6.4%, 9.7°Blg",
"price": 20,
"photos: [{
"url": "https://www.scienceworld.ca/wp-content/uploads/science-world-beer-flavours.jpg"
}],
"user": {
"id": 1,
"email": "user0@demo.com",
"picture": "https://robohash.org/sitaliquamquaerat.png?size=300x300&set=set1",
"full_name": "Mrs. Wilhemina Hilpert"
}
},
...
]
},
"purchases": [
{
"id": 5,
"created_at": "2020-01-24T05:34:39.880599",
"product": {
"id": 45,
"name": "Brooklyn Black",
}
},
...
]
}
```
## Get Started
@ -154,7 +222,7 @@ You can then add your database schema to the migrations, maybe create some seed
git clone https://github.com/dosco/super-graph && cd super-graph && make install
```
And then create and launch you're new app
And then create and launch your new app
```bash
# create a new app and change to it's directory
@ -224,6 +292,12 @@ for (i = 0; i < 10; i++) {
}
```
If you want to import a lot of data using a CSV file is the best and fastest option. The `import_csv` command uses the `COPY FROM` Postgres method to load massive amounts of data into tables. The first line of the CSV file must be the header with column names.
```javascript
var post_count = import_csv("posts", "posts.csv")
```
You can generate the following fake data for your seeding purposes. Below is the list of fake data functions supported by the built-in fake data library. For example `fake.image_url()` will generate a fake image url or `fake.shuffle_strings(['hello', 'world', 'cool'])` will generate a randomly shuffled version of that array of strings or `fake.rand_string(['hello', 'world', 'cool'])` will return a random string from the array provided.
```
@ -273,12 +347,10 @@ beer_style
beer_yeast
// Cars
vehicle
vehicle_type
car
car_type
car_maker
car_model
fuel_type
transmission_gear_type
// Text
word
@ -364,8 +436,8 @@ hipster_paragraph
hipster_sentence
// File
extension
mine_type
file_extension
file_mine_type
// Numbers
number
@ -389,7 +461,6 @@ mac_address
digit
letter
lexify
rand_string
shuffle_strings
numerify
```
@ -651,8 +722,6 @@ query {
}
```
## Mutations
In GraphQL mutations is the operation type for when you need to modify data. Super Graph supports the `insert`, `update`, `upsert` and `delete`. You can also do complex nested inserts and updates.
When using mutations the data must be passed as variables since Super Graphs compiles the query into an prepared statement in the database for maximum speed. Prepared statements are are functions in your code when called they accept arguments and your variables are passed in as those arguments.
@ -836,8 +905,6 @@ mutation {
}
```
## Nested Mutations
Often you will need to create or update multiple related items at the same time. This can be done using nested mutations. For example you might need to create a product and assign it to a user, or create a user and his products at the same time. You just have to use simple json to define you mutation and Super Graph takes care of the rest.
### Nested Insert
@ -906,7 +973,7 @@ mutation {
}
```
### Nested Updates
### Nested Update
Update a product item first and then assign it to a user
@ -966,9 +1033,116 @@ mutation {
}
```
## Using variables
### Pagination
Variables (`$product_id`) and their values (`"product_id": 5`) can be passed along side the GraphQL query. Using variables makes for better client side code as well as improved server side SQL query caching. The build-in web-ui also supports setting variables. Not having to manipulate your GraphQL query string to insert values into it makes for cleaner
This is a must have feature of any API. When you want your users to go thought a list page by page or implement some fancy infinite scroll you're going to need pagination. There are two ways to paginate in Super Graph.
Limit-Offset
This is simple enough but also inefficient when working with a large number of total items. Limit, limits the number of items fetched and offset is the point you want to fetch from. The below query will fetch 10 results at a time starting with the 100th item. You will have to keep updating offset (110, 120, 130, etc ) to walk thought the results so make offset a variable.
```graphql
query {
products(limit: 10, offset: 100) {
id
slug
name
}
}
```
#### Cursor
This is a powerful and highly efficient way to paginate though a large number of results. Infact it does not matter how many total results there are this will always be lighting fast. You can use a cursor to walk forward of backward though the results. If you plan to implement infinite scroll this is the option you should choose.
When going this route the results will contain a cursor value this is an encrypted string that you don't have to worry about just pass this back in to the next API call and you'll received the next set of results. The cursor value is encrypted since its contents should only matter to Super Graph and not the client. Also since the primary key is used for this feature it's possible you might not want to leak it's value to clients.
You will need to set this config value to ensure the encrypted cursor data is secure. If not set a random value is used which will change with each deployment breaking older cursor values that clients might be using so best to set it.
```yaml
# Secret key for general encryption operations like
# encrypting the cursor data
secret_key: supercalifajalistics
```
Paginating forward through your results
```json
{
"variables": {
"cursor": "MJoTLbQF4l0GuoDsYmCrpjPeaaIlNpfm4uFU4PQ="
}
}
```
```graphql
query {
products(first: 10, after: $cursor) {
slug
name
}
}
```
Paginating backward through your results
```graphql
query {
products(last: 10, before: $cursor) {
slug
name
}
}
```
```graphql
"data": {
"products": [
{
"slug": "eius-nulla-et-8",
"name" "Pale Ale"
},
{
"slug": "sapiente-ut-alias-12",
"name" "Brown Ale"
}
...
],
"products_cursor": "dJwHassm5+d82rGydH2xQnwNxJ1dcj4/cxkh5Cer"
}
```
Nested tables can also have cursors. Requesting multiple cursors are supported on a single request but when paginating using a cursor only one table is currently supported. To explain this better, you can only use a `before` or `after` argument with a cursor value to paginate a single table in a query.
```graphql
query {
products(last: 10) {
slug
name
customers(last: 5) {
email
full_name
}
}
}
```
Multiple order-by arguments are supported. Super Graph is smart enough to allow cursor based pagination when you also need complex sort order like below.
```graphql
query {
products(
last: 10
before: $cursor
order_by: [ price: desc, total_customers: asc ]) {
slug
name
}
}
```
## Using Variables
Variables (`$product_id`) and their values (`"product_id": 5`) can be passed along side the GraphQL query. Using variables makes for better client side code as well as improved server side SQL query caching. The built-in web-ui also supports setting variables. Not having to manipulate your GraphQL query string to insert values into it makes for cleaner
and better client side code.
```javascript
@ -988,6 +1162,104 @@ fetch('http://localhost:8080/api/v1/graphql', {
.then(res => console.log(res.data));
```
## GraphQL with React
This is a quick simple example using `graphql.js` [https://github.com/f/graphql.js/](https://github.com/f/graphql.js/)
```js
import React, { useState, useEffect } from 'react'
import graphql from 'graphql.js'
// Create a GraphQL client pointing to Super Graph
var graph = graphql("http://localhost:3000/api/v1/graphql", { asJSON: true })
const App = () => {
const [user, setUser] = useState(null)
useEffect(() => {
async function action() {
// Use the GraphQL client to execute a graphQL query
// The second argument to the client are the variables you need to pass
const result = await graph(`{ user { id first_name last_name picture_url } }`)()
setUser(result)
}
action()
}, []);
return (
<div className="App">
<h1>{ JSON.stringify(user) }</h1>
</div>
);
}
```
export default App;
## Advanced Columns
The ablity to have `JSON/JSONB` and `Array` columns is often considered in the top most useful features of Postgres. There are many cases where using an array or a json column saves space and reduces complexity in your app. The only issue with these columns is the really that your SQL queries can get harder to write and maintain.
Super Graph steps in here to help you by supporting these columns right out of the box. It allows you to work with these columns just like you would with tables. Joining data against or modifying array columns using the `connect` or `disconnect` keywords in mutations is fully supported. Another very useful feature is the ability to treat `json` or `binary json (jsonb)` columns as seperate tables, even using them in nested queries joining against related tables. To replicate these features on your own will take a lot of complex SQL. Using Super Graph means you don't have to deal with any of this it just works.
### Array Columns
Configure a relationship between an array column `tag_ids` which contains integer id's for tags and the column `id` in the table `tags`.
```yaml
tables:
- name: posts
columns:
- name: tag_ids
related_to: tags.id
```
```graphql
query {
posts {
title
tags {
name
image
}
}
}
```
### JSON Column
Configure a JSON column called `tag_count` in the table `products` into a seperate table. This JSON column contains a json array of objects each with a tag id and a count of the number of times the tag was used. As a seperate table you can nest it into your GraphQL query and treat it like table using any of the standard features like `order_by`, `limit`, `where clauses`, etc.
The configuration below tells Super Graph to create a synthetic table called `tag_count` using the column `tag_count` from the `products` table. And that this new table has two columns `tag_id` and `count` of the listed types and with the defined relationships.
```yaml
tables:
- name: tag_count
table: products
columns:
- name: tag_id
type: bigint
related_to: tags.id
- name: count
type: int
```
```graphql
query {
products {
name
tag_counts {
count
tag {
name
}
}
}
}
```
## Full text search
Every app these days needs search. Enought his often means reaching for something heavy like Solr. While this will work why add complexity to your infrastructure when Postgres has really great
@ -1073,45 +1345,45 @@ class AddSearchColumn < ActiveRecord::Migration[5.1]
end
```
## GraphQL with React
## API Security
This is a quick simple example using `graphql.js` [https://github.com/f/graphql.js/](https://github.com/f/graphql.js/)
One of the the most common questions I get asked is what happens if a user out on the internet sends queries
that we don't want run. For example how do we stop him from fetching all users or the emails of users. Our answer to this is that it is not an issue as this cannot happen, let me explain.
```js
import React, { useState, useEffect } from 'react'
import graphql from 'graphql.js'
Super Graph runs in one of two modes `development` or `production`, this is controlled via the config value `production: false` when it's false it's running in development mode and when true, production. In development mode all the **named** queries (including mutations) are saved to the allow list `./config/allow.list`. While in production mode when Super Graph starts only the queries from this allow list file are registered with the database as [prepared statements](https://stackoverflow.com/questions/8263371/how-can-prepared-statements-protect-from-sql-injection-attacks).
// Create a GraphQL client pointing to Super Graph
var graph = graphql("http://localhost:3000/api/v1/graphql", { asJSON: true })
Prepared statements are designed by databases to be fast and secure. They protect against all kinds of sql injection attacks and since they are pre-processed and pre-planned they are much faster to run then raw sql queries. Also there's no GraphQL to SQL compiling happening in production mode which makes your queries lighting fast as they are directly sent to the database with almost no overhead.
const App = () => {
const [user, setUser] = useState(null)
useEffect(() => {
async function action() {
// Use the GraphQL client to execute a graphQL query
// The second argument to the client are the variables you need to pass
const result = await graph(`{ user { id first_name last_name picture_url } }`)()
setUser(result)
In short in production only queries listed in the allow list file `./config/allow.list` can be used, all other queries will be blocked.
::: tip How to think about the allow list?
The allow list file is essentially a list of all your exposed API calls and the data that passes within them. It's very easy to build tooling to do things like parsing this file within your tests to ensure fields like `credit_card_no` are not accidently leaked. It's a great way to build compliance tooling and ensure your user data is always safe.
:::
This is an example of a named query, `getUserWithProducts` is the name you've given to this query it can be anything you like but should be unique across all you're queries. Only named queries are saved in the allow list in development mode.
```graphql
query getUserWithProducts {
users {
id
name
products {
id
name
price
}
action()
}, []);
return (
<div className="App">
<h1>{ JSON.stringify(user) }</h1>
</div>
);
}
}
```
export default App;
## Authentication
You can only have one type of auth enabled. You can either pick Rails or JWT.
You can only have one type of auth enabled either Rails or JWT.
### Rails Auth (Devise / Warden)
### Ruby on Rails
Almost all Rails apps use Devise or Warden for authentication. Once the user is
authenticated a session is created with the users ID. The session can either be
@ -1163,7 +1435,7 @@ auth:
max_active: 12000
```
### JWT Token Auth
### JWT Tokens
```yaml
auth:
@ -1177,14 +1449,67 @@ auth:
public_key_type: ecdsa #rsa
```
For JWT tokens we currently support tokens from a provider like Auth0
or if you have a custom solution then we look for the `user_id` in the
`subject` claim of of the `id token`. If you pick Auth0 then we derive two variables from the token `user_id` and `user_id_provider` for to use in your filters.
For JWT tokens we currently support tokens from a provider like Auth0 or if you have a custom solution then we look for the `user_id` in the `subject` claim of of the `id token`. If you pick Auth0 then we derive two variables from the token `user_id` and `user_id_provider` for to use in your filters.
We can get the JWT token either from the `authorization` header where we expect it to be a `bearer` token or if `cookie` is specified then we look there.
For validation a `secret` or a public key (ecdsa or rsa) is required. When using public keys they have to be in a PEM format file.
### HTTP Headers
```yaml
header:
name: X-AppEngine-QueueName
exists: true
#value: default
```
Header auth is usually the best option to authenticate requests to the action endpoints. For example you
might want to use an action to refresh a materalized view every hour and only want a cron service like the Google AppEngine Cron service to make that request in this case a config similar to the one above will do.
The `exists: true` parameter ensures that only the existance of the header is checked not its value. The `value` parameter lets you confirm that the value matches the one assgined to the parameter. This helps in the case you are using a shared secret to protect the endpoint.
### Named Auth
```yaml
# You can add additional named auths to use with actions
# In this example actions using this auth can only be
# called from the Google Appengine Cron service that
# sets a special header to all it's requests
auths:
- name: from_taskqueue
type: header
header:
name: X-Appengine-Cron
exists: true
```
In addition to the default auth configuration you can create additional named auth configurations to be used
with features like `actions`. For example while your main GraphQL endpoint uses JWT for authentication you may want to use a header value to ensure your actions can only be called by clients having access to a shared secret
or security header.
## Actions
Actions is a very useful feature that is currently work in progress. For now the best use case for actions is to
refresh database tables like materialized views or call a database procedure to refresh a cache table, etc. An action creates an http endpoint that anyone can call to have the SQL query executed. The below example will create an endpoint `/api/v1/actions/refresh_leaderboard_users` any request send to that endpoint will cause the sql query to be executed. the `auth_name` points to a named auth that should be used to secure this endpoint. In future we have big plans to allow your own custom code to run using actions.
```yaml
actions:
- name: refresh_leaderboard_users
sql: REFRESH MATERIALIZED VIEW CONCURRENTLY "leaderboard_users"
auth_name: from_taskqueue
```
#### Using CURL to test a query
```bash
# fetch the response json directly from the endpoint using user id 5
curl 'http://localhost:8080/api/v1/graphql' \
-H 'content-type: application/json' \
-H 'X-User-ID: 5' \
--data-binary '{"query":"{ products { name price users { email }}}"}'
```
## Access Control
It's common for APIs to control what information they return or insert based on the role of the user. In Super Graph we have two primary roles `user` and `anon` the first for users where a `user_id` is available the latter for users where it's not.
@ -1197,7 +1522,6 @@ The `user` role can be divided up into further roles based on attributes in the
Super Graph allows you to create roles dynamically using a `roles_query` and ` match` config values.
### Configure RBAC
```yaml
@ -1237,7 +1561,7 @@ roles:
This configuration is relatively simple to follow the `roles_query` parameter is the query that must be run to help figure out a users role. This query can be as complex as you like and include joins with other tables.
The individual roles are defined under the `roles` parameter and this includes each table the role has a custom setting for. The role is dynamically matched using the `match` parameter for example in the above case `users.id = 1` means that when the `roles_query` is executed a user with the id `1` willbe assigned the admin role and those that don't match get the `user` role if authenticated successfully or the `anon` role.
The individual roles are defined under the `roles` parameter and this includes each table the role has a custom setting for. The role is dynamically matched using the `match` parameter for example in the above case `users.id = 1` means that when the `roles_query` is executed a user with the id `1` will be assigned the admin role and those that don't match get the `user` role if authenticated successfully or the `anon` role.
## Remote Joins
@ -1334,7 +1658,7 @@ tables:
```
## Configuration files
## Configuration
Configuration files can either be in YAML or JSON their names are derived from the `GO_ENV` variable, for example `GO_ENV=prod` will cause the `prod.yaml` config file to be used. or `GO_ENV=dev` will use the `dev.yaml`. A path to look for the config files in can be specified using the `-path <folder>` command line argument.
@ -1349,7 +1673,7 @@ app_name: "Super Graph Development"
host_port: 0.0.0.0:8080
web_ui: true
# debug, info, warn, error, fatal, panic
# debug, error, warn, info
log_level: "debug"
# enable or disable http compression (uses gzip)
@ -1430,13 +1754,29 @@ auth:
# public_key_file: /secrets/public_key.pem
# public_key_type: ecdsa #rsa
# header:
# name: dnt
# exists: true
# value: localhost:8080
# You can add additional named auths to use with actions
# In this example actions using this auth can only be
# called from the Google Appengine Cron service that
# sets a special header to all it's requests
auths:
- name: from_taskqueue
type: header
header:
name: X-Appengine-Cron
exists: true
database:
type: postgres
host: db
port: 5432
dbname: app_development
user: postgres
password: ''
password: postgres
#schema: "public"
#pool_size: 10
@ -1447,18 +1787,48 @@ database:
# Enable this if you need the user id in triggers, etc
set_user_id: false
# Define additional variables here to be used with filters
variables:
admin_account_id: "5"
# database ping timeout is used for db health checking
ping_timeout: 1m
# Set up an secure tls encrypted db connection
enable_tls: false
# Required for tls. For example with Google Cloud SQL it's
# <gcp-project-id>:<cloud-sql-instance>"
# server_name: blah
# Required for tls. Can be a file path or the contents of the pem file
# server_cert: ./server-ca.pem
# Required for tls. Can be a file path or the contents of the pem file
# client_cert: ./client-cert.pem
# Required for tls. Can be a file path or the contents of the pem file
# client_key: ./client-key.pem
# Define additional variables here to be used with filters
variables:
admin_account_id: "5"
# Field and table names that you wish to block
blocklist:
- ar_internal_metadata
- schema_migrations
- secret
- password
- encrypted
- token
# Create custom actions with their own api endpoints
# For example the below action will be available at /api/v1/actions/refresh_leaderboard_users
# A request to this url will execute the configured SQL query
# which in this case refreshes a materialized view in the database.
# The auth_name is from one of the configured auths
actions:
- name: refresh_leaderboard_users
sql: REFRESH MATERIALIZED VIEW CONCURRENTLY "leaderboard_users"
auth_name: from_taskqueue
# Field and table names that you wish to block
blocklist:
- ar_internal_metadata
- schema_migrations
- secret
- password
- encrypted
- token
tables:
- name: customers
@ -1560,9 +1930,74 @@ SG_AUTH_RAILS_REDIS_PASSWORD
SG_AUTH_JWT_PUBLIC_KEY_FILE
```
## YugabyteDB
Yugabyte is an open-source, geo-distrubuted cloud-native relational DB that scales horizontally. Super Graph works with Yugabyte right out of the box. If you think you're data needs will outgrow Postgres and you don't really want to deal with sharding then Yugabyte is the way to go. Just point Super Graph to your Yugabyte DB and everything will just work including running migrations, seeding, querying, mutations, etc.
To use Yugabyte in your local development flow just uncomment the following lines in the `docker-compose.yml` file that is part of your Super Graph app. Also remember to comment out the originl postgres `db` config.
```yaml
# Postgres DB
# db:
# image: postgres:latest
# ports:
# - "5432:5432"
#Standard config to run a single node of Yugabyte
yb-master:
image: yugabytedb/yugabyte:latest
container_name: yb-master-n1
command: [ "/home/yugabyte/bin/yb-master",
"--fs_data_dirs=/mnt/disk0,/mnt/disk1",
"--master_addresses=yb-master-n1:7100",
"--replication_factor=1",
"--enable_ysql=true"]
ports:
- "7000:7000"
environment:
SERVICE_7000_NAME: yb-master
db:
image: yugabytedb/yugabyte:latest
container_name: yb-tserver-n1
command: [ "/home/yugabyte/bin/yb-tserver",
"--fs_data_dirs=/mnt/disk0,/mnt/disk1",
"--start_pgsql_proxy",
"--tserver_master_addrs=yb-master-n1:7100"]
ports:
- "9042:9042"
- "6379:6379"
- "5433:5433"
- "9000:9000"
environment:
SERVICE_5433_NAME: ysql
SERVICE_9042_NAME: ycql
SERVICE_6379_NAME: yedis
SERVICE_9000_NAME: yb-tserver
depends_on:
- yb-master
# Environment variables to point Super Graph to Yugabyte
# This is required since it uses a different user and port number
yourapp_api:
image: dosco/super-graph:latest
environment:
GO_ENV: "development"
Uncomment below for Yugabyte DB
SG_DATABASE_PORT: 5433
SG_DATABASE_USER: yugabyte
SG_DATABASE_PASSWORD: yugabyte
volumes:
- ./config:/config
ports:
- "8080:8080"
depends_on:
- db
```
## Developing Super Graph
If you want to build and run Super Graph from code then the below commands will build the web ui and launch Super Graph in developer mode with a watcher to rebuild on code changes. And the demo rails app is also launched to make it essier to test changes.
If you want to build and run Super Graph from code then the below commands will build the web ui and launch Super Graph in developer mode with a watcher to rebuild on code changes. And the demo rails app is also launched to make it easier to test changes.
```bash

Some files were not shown because too many files have changed in this diff Show More