Add REST API stitching

This commit is contained in:
Vikram Rangnekar 2019-05-12 19:27:26 -04:00
parent 6c9accb628
commit f16e95ef22
40 changed files with 1127 additions and 479 deletions

View File

@ -25,11 +25,17 @@ And so after a lot of coffee and some Avocado toasts __Super Graph was born, a G
- Full text search and Aggregations
- Rails Auth supported (Redis, Memcache, Cookie)
- JWT tokens supported (Auth0, etc)
- Stitching in REST APIs
- Highly optimized and fast Postgres SQL queries
- Configure with a simple config file
- High performance GO codebase
- Tiny docker image and low memory requirements
## Watch some talks
[![Watch the video](https://img.youtube.com/vi/TGq9wJAj78I/hqdefault.jpg)](https://youtu.be/TGq9wJAj78I)
## Documentation
[supergraph.dev](https://supergraph.dev)

View File

@ -60,7 +60,6 @@ auth:
# public_key_file: /secrets/public_key.pem
# public_key_type: ecdsa #rsa
database:
type: postgres
host: db
@ -80,9 +79,9 @@ database:
# Define defaults to for the field key and values below
defaults:
filter: ["{ user_id: { eq: $user_id } }"]
#filter: ["{ user_id: { eq: $user_id } }"]
# Fields and table names that you wish to block
# Field and table names that you wish to block
blacklist:
- ar_internal_metadata
- schema_migrations
@ -91,10 +90,10 @@ database:
- encrypted
- token
fields:
tables:
- name: users
# This filter will overwrite defaults.filter
filter: ["{ id: { eq: $user_id } }"]
# filter: ["{ id: { eq: $user_id } }"]
- name: products
# Multiple filters are AND'd together
@ -108,6 +107,18 @@ database:
# even defaults.filter
filter: none
remotes:
- name: payments
id: stripe_id
path: data
pass_headers:
- cookie
- host
# set_headers:
# - name: authorize
# value: Bearer 1234567890
url: http://rails_app:3000/stripe/$id
- # You can create new fields that have a
# real db table backing them
name: me

View File

@ -79,7 +79,7 @@ database:
defaults:
filter: ["{ user_id: { eq: $user_id } }"]
# Fields and table names that you wish to block
# Field and table names that you wish to block
blacklist:
- ar_internal_metadata
- schema_migrations
@ -88,7 +88,7 @@ database:
- encrypted
- token
fields:
tables:
- name: users
# This filter will overwrite defaults.filter
filter: ["{ id: { eq: $user_id } }"]

2
demo
View File

@ -1,7 +1,7 @@
#!/bin/bash
if [ "$1" == "setup" ]; then
docker-compose -f rails-app/demo.yml run web rake db:create db:migrate db:seed
docker-compose -f rails-app/demo.yml run rails_app rake db:create db:migrate db:seed
elif [ "$1" == "run" ]; then
docker-compose -f rails-app/demo.yml up
else

View File

@ -24,7 +24,7 @@ services:
working_dir: /app
command: fresh -c fresh.conf
web:
rails_app:
build: rails-app/.
command: bash -c "rm -f tmp/pids/server.pid && bundle exec rails s -p 3000 -b '0.0.0.0'"
volumes:

View File

@ -68,6 +68,11 @@ I always liked GraphQL it sounded friendly, but it still required me to write al
And so after a lot of coffee and some Avocado toasts __Super Graph was born, a GraphQL server that just works, is high performance and easy to deploy__. I hope you find it as useful as I do and there's a lot more coming so hit that :star: to stay in the loop.
## Watch some talks
<iframe class="w-full h-full" src="https://www.youtube.com/embed/TGq9wJAj78I" frameborder="0" allow="accelerometer; autoplay; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe>
## Say hello
[twitter.com/dosco](https://twitter.com/dosco)

View File

@ -347,6 +347,75 @@ class AddSearchColumn < ActiveRecord::Migration[5.1]
end
```
## Stitching in REST APIs
It often happens that after fetching some data from the DB we need to call another API to fetch some more data and all this combined into a single JSON response.
For example you need to list the last 3 payments made by a user. You will first need to look up the user in the database and then call the Stripe API to fetch his last 3 payments. For this to work your user table in the db has a `customer_id` column that contains his Stripe customer ID.
Similiarly you might also have the need to fetch the users last tweet and include that too. Super Graph can handle this for you using it's `API Stitching` feature.
### API Stitching configuration
The configuration is self explanatory. A `payments` field has been added under the `customers` table. This field is added to the `remotes` subsection that defines fields associated with `customers` that are remote and not real database columns.
The `id` parameter maps a column from the `customers` table to the `$id` variable. In this case it maps `$id` to the `customer_id` column.
```yaml
tables:
- name: customers
remotes:
- name: payments
id: customer_id
path: data
pass_headers:
- cookie
- host
# set_headers:
# - name: authorize
# value: Bearer 1234567890
url: http://rails_app:3000/stripe/$id
```
#### How do I make use of this?
Just include `payments` like you would any other GraphQL selector under the `customers` selector. Super Graph will call the configured API for you and stitch (merge) the JSON the API sends back with the JSON generated from the database query. GraphQL features like aliases and fields all work.
```graphql
query {
customers {
id
email
payments {
customer_id
amount
billing_details
}
}
}
```
And voila here is the result. You get all of this advanced and honestly complex querying capability without writing a single line of code.
```json
"data": {
"customers": [
{
"id": 1,
"email": "linseymertz@reilly.co",
"payments": [
{
"customer_id": "cus_YCj3ndB5Mz",
"amount": 100,
"billing_details": {
"address": "1 Infinity Drive",
"zipcode": "94024"
}
},
...
```
## Authentication
You can only have one type of auth enabled. You can either pick Rails or JWT.
@ -515,7 +584,7 @@ database:
defaults:
filter: ["{ user_id: { eq: $user_id } }"]
# Fields and table names that you wish to block
# Field and table names that you wish to block
blacklist:
- ar_internal_metadata
- schema_migrations
@ -524,7 +593,7 @@ database:
- encrypted
- token
fields:
tables:
- name: users
# This filter will overwrite defaults.filter
filter: ["{ id: { eq: $user_id } }"]
@ -587,7 +656,7 @@ brew install yarn
go generate ./...
# do this the only the time to setup the database
docker-compose run web rake db:create db:migrate
docker-compose run rails_app rake db:create db:migrate
# start super graph in development mode with a change watcher
docker-compose up

View File

@ -3476,6 +3476,11 @@ lodash.uniq@^4.5.0:
version "4.5.0"
resolved "https://registry.yarnpkg.com/lodash.uniq/-/lodash.uniq-4.5.0.tgz#d0225373aeb652adc1bc82e4945339a842754773"
lodash@^4.17.11:
version "4.17.11"
resolved "https://registry.yarnpkg.com/lodash/-/lodash-4.17.11.tgz#b39ea6229ef607ecd89e2c8df12536891cac9b8d"
integrity sha512-cQKh8igo5QUhZ7lg38DYWAxMvjSAKG0A8wGSVimP07SIUEK2UO+arSRKbRZWtelMtN5V0Hkwh5ryOto/SshYIg==
lodash@^4.17.3, lodash@^4.17.4, lodash@^4.17.5:
version "4.17.10"
resolved "https://registry.yarnpkg.com/lodash/-/lodash-4.17.10.tgz#1b7793cf7259ea38fb3661d4d38b3260af8ae4e7"
@ -5372,6 +5377,13 @@ table@^4.0.3:
slice-ansi "1.0.0"
string-width "^2.1.1"
tailwindcss-aspect-ratio@^1.0.3:
version "1.0.3"
resolved "https://registry.yarnpkg.com/tailwindcss-aspect-ratio/-/tailwindcss-aspect-ratio-1.0.3.tgz#7aa7cb73ffeeb9f69cedebbfd3980176b14a256f"
integrity sha512-burkG+yxTNp8REWMtFkRzXGdt+8/QR2LMRDHjQ37DV4Y7dk+f/WQtfZYFXXU2GKASrp6WidzrtN2z8OA/jilww==
dependencies:
lodash "^4.17.11"
tapable@^1.0.0:
version "1.0.0"
resolved "https://registry.yarnpkg.com/tapable/-/tapable-1.0.0.tgz#cbb639d9002eed9c6b5975eb20598d7936f1f9f2"

11
go.mod
View File

@ -1,10 +1,12 @@
module github.com/dosco/super-graph
require (
github.com/BurntSushi/toml v0.3.1 // indirect
github.com/Masterminds/semver v1.4.2
github.com/OneOfOne/xxhash v1.2.5 // indirect
github.com/adjust/gorails v0.0.0-20171013043634-2786ed0c03d3
github.com/allegro/bigcache v1.2.0
github.com/bradfitz/gomemcache v0.0.0-20180710155616-bc664df96737
github.com/cespare/xxhash/v2 v2.0.0
github.com/dgrijalva/jwt-go v3.2.0+incompatible
github.com/garyburd/redigo v1.6.0
@ -12,10 +14,15 @@ require (
github.com/gobuffalo/flect v0.1.1
github.com/gorilla/websocket v1.4.0
github.com/jinzhu/inflection v0.0.0-20180308033659-04140366298a // indirect
github.com/labstack/gommon v0.2.8
github.com/mattn/go-colorable v0.1.1 // indirect
github.com/mattn/go-isatty v0.0.7 // indirect
github.com/onsi/ginkgo v1.8.0 // indirect
github.com/onsi/gomega v1.5.0 // indirect
github.com/sirupsen/logrus v1.4.0
github.com/spaolacci/murmur3 v1.1.0 // indirect
github.com/spf13/viper v1.3.1
github.com/valyala/fasttemplate v1.0.1
golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9
golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a
mellium.im/sasl v0.2.1 // indirect
)

19
go.sum
View File

@ -2,13 +2,16 @@ github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/Masterminds/semver v1.4.2 h1:WBLTQ37jOCzSLtXNdoo8bNM8876KhNqOKvrlGITgsTc=
github.com/Masterminds/semver v1.4.2/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y=
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
github.com/OneOfOne/xxhash v1.2.5 h1:zl/OfRA6nftbBK9qTohYBJ5xvw6C/oNKizR7cZGl3cI=
github.com/OneOfOne/xxhash v1.2.5/go.mod h1:eZbhyaAYD41SGSSsnmcpxVoRiQ/MPUTjUdIIOT9Um7Q=
github.com/adjust/gorails v0.0.0-20171013043634-2786ed0c03d3 h1:+qz9Ga6l6lKw6fgvk5RMV5HQznSLvI8Zxajwdj4FhFg=
github.com/adjust/gorails v0.0.0-20171013043634-2786ed0c03d3/go.mod h1:FlkD11RtgMTYjVuBnb7cxoHmQGqvPpCsr2atC88nl/M=
github.com/allegro/bigcache v1.2.0 h1:qDaE0QoF29wKBb3+pXFrJFy1ihe5OT9OiXhg1t85SxM=
github.com/allegro/bigcache v1.2.0/go.mod h1:Cb/ax3seSYIx7SuZdm2G2xzfwmv3TPSk2ucNfQESPXM=
github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
github.com/bradfitz/gomemcache v0.0.0-20180710155616-bc664df96737 h1:rRISKWyXfVxvoa702s91Zl5oREZTrR3yv+tXrrX7G/g=
github.com/bradfitz/gomemcache v0.0.0-20180710155616-bc664df96737/go.mod h1:PmM6Mmwb0LSuEubjR8N7PtNe1KxZLtOUHtbeikc5h60=
github.com/cespare/xxhash/v2 v2.0.0 h1:Eb1IiuHmi3FhT12NKfqCQXSXRqc4NTMvgJoREemrSt4=
github.com/cespare/xxhash/v2 v2.0.0/go.mod h1:MaMeaVDXZNmTpkOyhVs3/WfjgobkbQgfrVnrr3DyZL0=
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk=
github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
@ -37,8 +40,15 @@ github.com/jinzhu/inflection v0.0.0-20180308033659-04140366298a h1:eeaG9XMUvRBYX
github.com/jinzhu/inflection v0.0.0-20180308033659-04140366298a/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc=
github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/labstack/gommon v0.2.8 h1:JvRqmeZcfrHC5u6uVleB4NxxNbzx6gpbJiQknDbKQu0=
github.com/labstack/gommon v0.2.8/go.mod h1:/tj9csK2iPSBvn+3NLM9e52usepMtrd5ilFYA+wQNJ4=
github.com/magiconair/properties v1.8.0 h1:LLgXmsheXeRoUOBOjtwPQCWIYqM/LU1ayDtDePerRcY=
github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
github.com/mattn/go-colorable v0.1.1 h1:G1f5SKeVxmagw/IyvzvtZE4Gybcc4Tr1tf7I8z0XgOg=
github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ=
github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
github.com/mattn/go-isatty v0.0.7 h1:UvyT9uN+3r7yLEYSlJsbQGdsaB/a0DlgWP3pql6iwOc=
github.com/mattn/go-isatty v0.0.7/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE=
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
@ -52,6 +62,9 @@ github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZb
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/sirupsen/logrus v1.4.0 h1:yKenngtzGh+cUSSh6GWbxW2abRqhYUSR/t/6+2QqNvE=
github.com/sirupsen/logrus v1.4.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI=
github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
github.com/spf13/afero v1.1.2 h1:m8/z1t7/fwjysjQRYbP0RD+bUIF/8tJwPdEZsI83ACI=
github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
github.com/spf13/cast v1.3.0 h1:oget//CVOEoFewqQxwr0Ej5yjygnqGkvggSE/gB35Q8=
@ -85,6 +98,8 @@ golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5h
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a h1:1n5lsVfiQW3yfsRGu98756EH1YthsFqr/5mxHduZW2A=
golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223 h1:DH4skfRX4EBpamg7iV4ZlCpblAHI6s6TDM39bFZumv8=
golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=

View File

@ -1,4 +1,4 @@
package ajson
package jsn
import (
"bytes"

View File

@ -1,4 +1,4 @@
package ajson
package jsn
import (
"github.com/cespare/xxhash/v2"

View File

@ -1,4 +1,4 @@
package ajson
package jsn
import (
"bytes"
@ -226,7 +226,6 @@ func TestFilter(t *testing.T) {
t.Error("Does not match expected json")
}
}
func TestStrip(t *testing.T) {
path1 := [][]byte{[]byte("data"), []byte("users")}
value1 := Strip([]byte(input3), path1)

View File

@ -1,4 +1,4 @@
package ajson
package jsn
import (
"bytes"

View File

@ -1,4 +1,4 @@
package ajson
package jsn
import (
"bytes"

View File

@ -5,5 +5,5 @@ import (
)
func main() {
serv.InitAndListen()
serv.Init()
}

View File

@ -1,6 +1,6 @@
goos: darwin
goarch: amd64
pkg: github.com/dosco/super-graph/psql
BenchmarkCompileGQLToSQL-8 30000 38686 ns/op 15110 B/op 262 allocs/op
BenchmarkCompileGQLToSQL-8 50000 39601 ns/op 20165 B/op 263 allocs/op
PASS
ok github.com/dosco/super-graph/psql 1.637s
ok github.com/dosco/super-graph/psql 2.549s

View File

@ -1,16 +1,6 @@
? github.com/dosco/super-graph [no test files]
goos: darwin
goarch: amd64
pkg: github.com/dosco/super-graph/psql
BenchmarkCompileGQLToSQL-8 30000 45507 ns/op 14565 B/op 244 allocs/op
BenchmarkCompileGQLToSQL-8 50000 38882 ns/op 15177 B/op 266 allocs/op
PASS
ok github.com/dosco/super-graph/psql 1.846s
goos: darwin
goarch: amd64
pkg: github.com/dosco/super-graph/qcode
BenchmarkParse-8 2000000000 0.00 ns/op
PASS
ok github.com/dosco/super-graph/qcode 0.008s
PASS
ok github.com/dosco/super-graph/serv 0.017s
? github.com/dosco/super-graph/util [no test files]
ok github.com/dosco/super-graph/psql 2.473s

View File

@ -1,3 +1,3 @@
#!/bin/sh
go test -bench=. -benchmem -cpuprofile cpu.out
go test -bench=. -benchmem -cpuprofile cpu.out -run=XXX
go tool pprof -cum cpu.out

View File

@ -1,3 +1,3 @@
#!/bin/sh
go test -bench=. -benchmem -memprofile mem.out
go test -bench=. -benchmem -memprofile mem.out -run=XXX
go tool pprof -cum mem.out

View File

@ -10,6 +10,10 @@ import (
"github.com/dosco/super-graph/util"
)
const (
empty = ""
)
type Config struct {
Schema *DBSchema
Vars map[string]string
@ -26,18 +30,43 @@ func NewCompiler(conf Config) *Compiler {
return &Compiler{conf.Schema, conf.Vars, conf.TableMap}
}
func (c *Compiler) Compile(w io.Writer, qc *qcode.QCode) error {
st := util.NewStack()
ti, err := c.getTable(qc.Query.Select)
if err != nil {
return err
func (c *Compiler) AddRelationship(key TTKey, val *DBRel) {
c.schema.RelMap[key] = val
}
st.Push(&selectBlockClose{nil, qc.Query.Select})
st.Push(&selectBlock{nil, qc.Query.Select, ti, c})
func (c *Compiler) IDColumn(table string) string {
t, ok := c.schema.Tables[table]
if !ok {
return empty
}
return t.PrimaryCol
}
func (c *Compiler) Compile(qc *qcode.QCode) (uint32, []string, error) {
if len(qc.Query.Selects) == 0 {
return 0, nil, errors.New("empty query")
}
root := &qc.Query.Selects[0]
st := util.NewStack()
ti, err := c.getTable(root)
if err != nil {
return 0, nil, err
}
buf := strings.Builder{}
buf.Grow(2048)
sql := make([]string, 0, 3)
w := io.Writer(&buf)
st.Push(&selectBlockClose{nil, root})
st.Push(&selectBlock{nil, root, qc, ti, c})
fmt.Fprintf(w, `SELECT json_object_agg('%s', %s) FROM (`,
qc.Query.Select.FieldName, qc.Query.Select.Table)
root.FieldName, root.Table)
var ignored uint32
for {
if st.Len() == 0 {
@ -48,37 +77,47 @@ func (c *Compiler) Compile(w io.Writer, qc *qcode.QCode) error {
switch v := intf.(type) {
case *selectBlock:
childCols, childIDs := c.relationshipColumns(v.sel)
v.render(w, c.schema, childCols, childIDs)
for i := range childIDs {
sub := v.sel.Joins[childIDs[i]]
ti, err := c.getTable(sub)
skipped, err := v.render(w)
if err != nil {
return err
return 0, nil, err
}
ignored |= skipped
for _, id := range v.sel.Children {
if hasBit(skipped, id) {
continue
}
child := &qc.Query.Selects[id]
ti, err := c.getTable(child)
if err != nil {
return 0, nil, err
}
st.Push(&joinClose{sub})
st.Push(&selectBlockClose{v.sel, sub})
st.Push(&selectBlock{v.sel, sub, ti, c})
st.Push(&joinOpen{sub})
st.Push(&joinClose{child})
st.Push(&selectBlockClose{v.sel, child})
st.Push(&selectBlock{v.sel, child, qc, ti, c})
st.Push(&joinOpen{child})
}
case *selectBlockClose:
v.render(w)
err = v.render(w)
case *joinOpen:
v.render(w)
err = v.render(w)
case *joinClose:
v.render(w)
err = v.render(w)
}
if err != nil {
return 0, nil, err
}
}
io.WriteString(w, `) AS "done_1337";`)
sql = append(sql, buf.String())
return nil
return ignored, sql, nil
}
func (c *Compiler) getTable(sel *qcode.Select) (*DBTableInfo, error) {
@ -88,50 +127,61 @@ func (c *Compiler) getTable(sel *qcode.Select) (*DBTableInfo, error) {
return c.schema.GetTable(sel.Table)
}
func (c *Compiler) relationshipColumns(parent *qcode.Select) (
cols []*qcode.Column, childIDs []int) {
func (v *selectBlock) processChildren() (uint32, []*qcode.Column) {
var skipped uint32
colmap := make(map[string]struct{}, len(parent.Cols))
for i := range parent.Cols {
colmap[parent.Cols[i].Name] = struct{}{}
cols := make([]*qcode.Column, 0, len(v.sel.Cols))
colmap := make(map[string]struct{}, len(v.sel.Cols))
for i := range v.sel.Cols {
colmap[v.sel.Cols[i].Name] = struct{}{}
}
for i, sub := range parent.Joins {
k := TTKey{sub.Table, parent.Table}
for _, id := range v.sel.Children {
child := &v.qc.Query.Selects[id]
k := TTKey{child.Table, v.sel.Table}
rel, ok := c.schema.RelMap[k]
rel, ok := v.schema.RelMap[k]
if !ok {
skipped |= (1 << uint(id))
continue
}
if rel.Type == RelBelongTo || rel.Type == RelOneToMany {
switch rel.Type {
case RelOneToMany:
fallthrough
case RelBelongTo:
if _, ok := colmap[rel.Col2]; !ok {
cols = append(cols, &qcode.Column{parent.Table, rel.Col2, rel.Col2})
cols = append(cols, &qcode.Column{v.sel.Table, rel.Col2, rel.Col2})
}
childIDs = append(childIDs, i)
}
if rel.Type == RelOneToManyThrough {
case RelOneToManyThrough:
if _, ok := colmap[rel.Col1]; !ok {
cols = append(cols, &qcode.Column{parent.Table, rel.Col1, rel.Col1})
cols = append(cols, &qcode.Column{v.sel.Table, rel.Col1, rel.Col1})
}
childIDs = append(childIDs, i)
case RelRemote:
if _, ok := colmap[rel.Col1]; !ok {
cols = append(cols, &qcode.Column{v.sel.Table, rel.Col1, rel.Col2})
}
skipped |= (1 << uint(id))
default:
skipped |= (1 << uint(id))
}
}
return cols, childIDs
return skipped, cols
}
type selectBlock struct {
parent *qcode.Select
sel *qcode.Select
qc *qcode.QCode
ti *DBTableInfo
*Compiler
}
func (v *selectBlock) render(w io.Writer,
schema *DBSchema, childCols []*qcode.Column, childIDs []int) error {
func (v *selectBlock) render(w io.Writer) (uint32, error) {
skipped, childCols := v.processChildren()
hasOrder := len(v.sel.OrderBy) != 0
// SELECT
@ -141,7 +191,7 @@ func (v *selectBlock) render(w io.Writer,
if hasOrder {
err := renderOrderBy(w, v.sel)
if err != nil {
return err
return skipped, err
}
}
@ -162,9 +212,11 @@ func (v *selectBlock) render(w io.Writer,
// Combined column names
v.renderColumns(w)
err := v.renderJoinedColumns(w, childIDs)
v.renderRemoteRelColumns(w)
err := v.renderJoinedColumns(w, skipped)
if err != nil {
return err
return skipped, err
}
fmt.Fprintf(w, `) AS "sel_%d"`, v.sel.ID)
@ -178,13 +230,13 @@ func (v *selectBlock) render(w io.Writer,
// END-SELECT
// FROM (SELECT .... )
err = v.renderBaseSelect(w, schema, childCols, childIDs)
err = v.renderBaseSelect(w, childCols, skipped)
if err != nil {
return err
return skipped, err
}
// END-FROM
return nil
return skipped, nil
}
type selectBlockClose struct {
@ -233,13 +285,13 @@ type joinClose struct {
}
func (v *joinClose) render(w io.Writer) error {
fmt.Fprintf(w, `) AS "%s_%d.join" ON ('true')`, v.sel.Table, v.sel.ID)
fmt.Fprintf(w, `) AS "%s_%d_join" ON ('true')`, v.sel.Table, v.sel.ID)
return nil
}
func (v *selectBlock) renderJoinTable(w io.Writer, schema *DBSchema, childIDs []int) {
func (v *selectBlock) renderJoinTable(w io.Writer) {
k := TTKey{v.sel.Table, v.parent.Table}
rel, ok := schema.RelMap[k]
rel, ok := v.schema.RelMap[k]
if !ok {
panic(errors.New("no relationship found"))
}
@ -250,40 +302,61 @@ func (v *selectBlock) renderJoinTable(w io.Writer, schema *DBSchema, childIDs []
fmt.Fprintf(w, ` LEFT OUTER JOIN "%s" ON (("%s"."%s") = ("%s_%d"."%s"))`,
rel.Through, rel.Through, rel.ColT, v.parent.Table, v.parent.ID, rel.Col1)
}
func (v *selectBlock) renderColumns(w io.Writer) {
for i, col := range v.sel.Cols {
if i != 0 {
io.WriteString(w, ", ")
}
fmt.Fprintf(w, `"%s_%d"."%s" AS "%s"`,
v.sel.Table, v.sel.ID, col.Name, col.FieldName)
}
}
if i < len(v.sel.Cols)-1 {
func (v *selectBlock) renderRemoteRelColumns(w io.Writer) {
k := TTKey{Table2: v.sel.Table}
i := 0
for _, id := range v.sel.Children {
child := &v.qc.Query.Selects[id]
k.Table1 = child.Table
rel, ok := v.schema.RelMap[k]
if !ok || rel.Type != RelRemote {
continue
}
if i != 0 || len(v.sel.Cols) != 0 {
io.WriteString(w, ", ")
}
fmt.Fprintf(w, `"%s_%d"."%s" AS "%s"`,
v.sel.Table, v.sel.ID, rel.Col1, rel.Col2)
i++
}
}
func (v *selectBlock) renderJoinedColumns(w io.Writer, childIDs []int) error {
if len(v.sel.Cols) != 0 && len(childIDs) != 0 {
func (v *selectBlock) renderJoinedColumns(w io.Writer, skipped uint32) error {
colsRendered := len(v.sel.Cols) != 0
for _, id := range v.sel.Children {
skipThis := hasBit(skipped, id)
if colsRendered && !skipThis {
io.WriteString(w, ", ")
}
if skipThis {
continue
}
s := &v.qc.Query.Selects[id]
for i := range childIDs {
s := v.sel.Joins[childIDs[i]]
fmt.Fprintf(w, `"%s_%d.join"."%s" AS "%s"`,
fmt.Fprintf(w, `"%s_%d_join"."%s" AS "%s"`,
s.Table, s.ID, s.Table, s.FieldName)
if i < len(childIDs)-1 {
io.WriteString(w, ", ")
}
}
return nil
}
func (v *selectBlock) renderBaseSelect(w io.Writer, schema *DBSchema, childCols []*qcode.Column, childIDs []int) error {
func (v *selectBlock) renderBaseSelect(w io.Writer, childCols []*qcode.Column, skipped uint32) error {
var groupBy []int
isRoot := v.parent == nil
@ -337,11 +410,11 @@ func (v *selectBlock) renderBaseSelect(w io.Writer, schema *DBSchema, childCols
}
for i, col := range childCols {
fmt.Fprintf(w, `"%s"."%s"`, col.Table, col.Name)
if i < len(childCols)-1 {
if i != 0 {
io.WriteString(w, ", ")
}
fmt.Fprintf(w, `"%s"."%s"`, col.Table, col.Name)
}
if tn, ok := v.tmap[v.sel.Table]; ok {
@ -359,10 +432,10 @@ func (v *selectBlock) renderBaseSelect(w io.Writer, schema *DBSchema, childCols
}
if !isRoot {
v.renderJoinTable(w, schema, childIDs)
v.renderJoinTable(w)
io.WriteString(w, ` WHERE (`)
v.renderRelationship(w, schema)
v.renderRelationship(w)
if isFil {
io.WriteString(w, ` AND `)
@ -378,11 +451,10 @@ func (v *selectBlock) renderBaseSelect(w io.Writer, schema *DBSchema, childCols
fmt.Fprintf(w, ` GROUP BY `)
for i, id := range groupBy {
fmt.Fprintf(w, `"%s"."%s"`, v.sel.Table, v.sel.Cols[id].Name)
if i < len(groupBy)-1 {
if i != 0 {
io.WriteString(w, ", ")
}
fmt.Fprintf(w, `"%s"."%s"`, v.sel.Table, v.sel.Cols[id].Name)
}
}
}
@ -402,25 +474,23 @@ func (v *selectBlock) renderBaseSelect(w io.Writer, schema *DBSchema, childCols
}
func (v *selectBlock) renderOrderByColumns(w io.Writer) {
if len(v.sel.Cols) != 0 {
colsRendered := len(v.sel.Cols) != 0
for i := range v.sel.OrderBy {
if colsRendered {
io.WriteString(w, ", ")
}
for i := range v.sel.OrderBy {
c := v.sel.OrderBy[i].Col
fmt.Fprintf(w, `"%s_%d"."%s" AS "%s_%d.ob.%s"`,
v.sel.Table, v.sel.ID, c,
v.sel.Table, v.sel.ID, c)
if i < len(v.sel.OrderBy)-1 {
io.WriteString(w, ", ")
}
}
}
func (v *selectBlock) renderRelationship(w io.Writer, schema *DBSchema) {
func (v *selectBlock) renderRelationship(w io.Writer) {
k := TTKey{v.sel.Table, v.parent.Table}
rel, ok := schema.RelMap[k]
rel, ok := v.schema.RelMap[k]
if !ok {
panic(errors.New("no relationship found"))
}
@ -464,7 +534,7 @@ func (v *selectBlock) renderWhere(w io.Writer) error {
case qcode.OpNot:
io.WriteString(w, `NOT `)
default:
return fmt.Errorf("[Where] unexpected value encountered %v", intf)
return fmt.Errorf("11: unexpected value %v (%t)", intf, intf)
}
case *qcode.Exp:
switch val.Op {
@ -562,7 +632,7 @@ func (v *selectBlock) renderWhere(w io.Writer) error {
}
default:
return fmt.Errorf("[Where] unexpected value encountered %v", intf)
return fmt.Errorf("12: unexpected value %v (%t)", intf, intf)
}
}
@ -572,6 +642,9 @@ func (v *selectBlock) renderWhere(w io.Writer) error {
func renderOrderBy(w io.Writer, sel *qcode.Select) error {
io.WriteString(w, ` ORDER BY `)
for i := range sel.OrderBy {
if i != 0 {
io.WriteString(w, ", ")
}
ob := sel.OrderBy[i]
switch ob.Order {
@ -588,10 +661,7 @@ func renderOrderBy(w io.Writer, sel *qcode.Select) error {
case qcode.OrderDescNullsLast:
fmt.Fprintf(w, `%s_%d.ob.%s DESC NULLS LAST`, sel.Table, sel.ID, ob.Col)
default:
return fmt.Errorf("[qcode.Order By] unexpected value encountered %v", ob.Order)
}
if i < len(sel.OrderBy)-1 {
io.WriteString(w, ", ")
return fmt.Errorf("13: unexpected value %v (%t)", ob.Order, ob.Order)
}
}
return nil
@ -600,12 +670,11 @@ func renderOrderBy(w io.Writer, sel *qcode.Select) error {
func (v selectBlock) renderDistinctOn(w io.Writer) {
io.WriteString(w, ` DISTINCT ON (`)
for i := range v.sel.DistinctOn {
fmt.Fprintf(w, `"%s_%d.ob.%s"`,
v.sel.Table, v.sel.ID, v.sel.DistinctOn[i])
if i < len(v.sel.DistinctOn)-1 {
if i != 0 {
io.WriteString(w, ", ")
}
fmt.Fprintf(w, `"%s_%d.ob.%s"`,
v.sel.Table, v.sel.ID, v.sel.DistinctOn[i])
}
io.WriteString(w, `) `)
}
@ -613,16 +682,15 @@ func (v selectBlock) renderDistinctOn(w io.Writer) {
func renderList(w io.Writer, ex *qcode.Exp) {
io.WriteString(w, ` (`)
for i := range ex.ListVal {
if i != 0 {
io.WriteString(w, ", ")
}
switch ex.ListType {
case qcode.ValBool, qcode.ValInt, qcode.ValFloat:
io.WriteString(w, ex.ListVal[i])
case qcode.ValStr:
fmt.Fprintf(w, `'%s'`, ex.ListVal[i])
}
if i < len(ex.ListVal)-1 {
io.WriteString(w, ", ")
}
}
io.WriteString(w, `)`)
}
@ -675,3 +743,8 @@ func funcPrefixLen(fn string) int {
}
return 0
}
func hasBit(n uint32, pos uint16) bool {
val := n & (1 << pos)
return (val > 0)
}

View File

@ -3,7 +3,6 @@ package psql
import (
"log"
"os"
"strings"
"testing"
"github.com/dosco/super-graph/qcode"
@ -22,7 +21,7 @@ func TestMain(m *testing.M) {
var err error
qcompile, err = qcode.NewCompiler(qcode.Config{
Filter: []string{
DefaultFilter: []string{
`{ user_id: { _eq: $user_id } }`,
},
FilterMap: map[string][]string{
@ -129,13 +128,12 @@ func compileGQLToPSQL(gql string) (string, error) {
return "", err
}
var sqlStmt strings.Builder
if err := pcompile.Compile(&sqlStmt, qc); err != nil {
_, sqlStmts, err := pcompile.Compile(qc)
if err != nil {
return "", err
}
return sqlStmt.String(), nil
return sqlStmts[0], nil
}
func withComplexArgs(t *testing.T) {

View File

@ -33,6 +33,7 @@ const (
RelBelongTo RelType = iota + 1
RelOneToMany
RelOneToManyThrough
RelRemote
)
type DBRel struct {

View File

@ -14,7 +14,7 @@ var (
type parserType int16
const (
maxNested = 50
maxFields = 100
parserError parserType = iota
parserEOF
@ -66,17 +66,16 @@ type Operation struct {
Type parserType
Name string
Args []*Arg
Fields []*Field
FieldLen int16
Fields []Field
}
type Field struct {
ID int16
ID uint16
Name string
Alias string
Args []*Arg
Parent *Field
Children []*Field
ParentID uint16
Children []uint16
}
type Arg struct {
@ -206,12 +205,10 @@ func (p *Parser) parseOpByType(ty parserType) (*Operation, error) {
if p.peek(itemObjOpen) {
p.ignore()
n := int16(0)
op.Fields, n, err = p.parseFields()
op.Fields, err = p.parseFields()
if err != nil {
return nil, err
}
op.FieldLen = n
}
if p.peek(itemObjClose) {
@ -241,12 +238,17 @@ func (p *Parser) parseOp() (*Operation, error) {
return nil, errors.New("unknown operation type")
}
func (p *Parser) parseFields() ([]*Field, int16, error) {
var roots []*Field
func (p *Parser) parseFields() ([]Field, error) {
var id uint16
fields := make([]Field, 0, 5)
st := util.NewStack()
i := int16(0)
for {
if id >= maxFields {
return nil, fmt.Errorf("field limit reached (%d)", maxFields)
}
if p.peek(itemObjClose) {
p.ignore()
st.Pop()
@ -257,66 +259,63 @@ func (p *Parser) parseFields() ([]*Field, int16, error) {
continue
}
if i > maxNested {
return nil, 0, errors.New("too many fields")
}
if p.peek(itemName) == false {
return nil, 0, errors.New("expecting an alias or field name")
return nil, errors.New("expecting an alias or field name")
}
field, err := p.parseField()
if err != nil {
return nil, 0, err
f := Field{ID: id}
if err := p.parseField(&f); err != nil {
return nil, err
}
field.ID = i
i++
if st.Len() == 0 {
roots = append(roots, field)
} else {
if f.ID != 0 {
intf := st.Peek()
parent, ok := intf.(*Field)
if !ok || parent == nil {
return nil, 0, fmt.Errorf("unexpected value encountered %v", intf)
pid, ok := intf.(uint16)
if !ok {
return nil, fmt.Errorf("14: unexpected value %v (%t)", intf, intf)
}
field.Parent = parent
parent.Children = append(parent.Children, field)
f.ParentID = pid
fields[pid].Children = append(fields[pid].Children, f.ID)
}
fields = append(fields, f)
id++
if p.peek(itemObjOpen) {
p.ignore()
st.Push(field)
st.Push(f.ID)
}
}
return roots, i, nil
return fields, nil
}
func (p *Parser) parseField() (*Field, error) {
func (p *Parser) parseField(f *Field) error {
var err error
field := &Field{Name: p.next().val}
f.Name = p.next().val
if p.peek(itemColon) {
p.ignore()
if p.peek(itemName) {
field.Alias = field.Name
field.Name = p.next().val
f.Alias = f.Name
f.Name = p.next().val
} else {
return nil, errors.New("expecting an aliased field name")
return errors.New("expecting an aliased field name")
}
}
if p.peek(itemArgsOpen) {
p.ignore()
if field.Args, err = p.parseArgs(); err != nil {
return nil, err
if f.Args, err = p.parseArgs(); err != nil {
return err
}
}
return field, nil
return nil
}
func (p *Parser) parseArgs() ([]*Arg, error) {

View File

@ -2,11 +2,10 @@ package qcode
import (
"errors"
"fmt"
"reflect"
"testing"
)
/*
func compareOp(op1, op2 Operation) error {
if op1.Type != op2.Type {
return errors.New("operator type mismatch")
@ -44,6 +43,7 @@ func compareOp(op1, op2 Operation) error {
return nil
}
*/
func TestCompile(t *testing.T) {
qcompile, _ := NewCompiler(Config{})

View File

@ -9,12 +9,16 @@ import (
"github.com/gobuffalo/flect"
)
const (
maxSelectors = 30
)
type QCode struct {
Query *Query
}
type Query struct {
Select *Select
Selects []Select
}
type Column struct {
@ -24,18 +28,19 @@ type Column struct {
}
type Select struct {
ID int16
ID uint16
ParentID uint16
Args map[string]*Node
AsList bool
Table string
Singular string
FieldName string
Cols []*Column
Cols []Column
Where *Exp
OrderBy []*OrderBy
DistinctOn []string
Paging Paging
Joins []*Select
Children []uint16
}
type Exp struct {
@ -184,7 +189,7 @@ const (
)
type Config struct {
Filter []string
DefaultFilter []string
FilterMap map[string][]string
Blacklist []string
}
@ -202,7 +207,7 @@ func NewCompiler(conf Config) (*Compiler, error) {
bl[strings.ToLower(conf.Blacklist[i])] = struct{}{}
}
fl, err := compileFilter(conf.Filter)
fl, err := compileFilter(conf.DefaultFilter)
if err != nil {
return nil, err
}
@ -246,37 +251,49 @@ func (com *Compiler) CompileQuery(query string) (*QCode, error) {
}
func (com *Compiler) compileQuery(op *Operation) (*Query, error) {
var selRoot *Select
var id, parentID uint16
selects := make([]Select, 0, 5)
st := util.NewStack()
id := int16(0)
fs := make([]*Select, op.FieldLen)
for i := range op.Fields {
st.Push(op.Fields[i])
if len(op.Fields) == 0 {
return nil, errors.New("empty query")
}
st.Push(op.Fields[0].ID)
for {
if st.Len() == 0 {
break
}
intf := st.Pop()
field, ok := intf.(*Field)
if !ok || field == nil {
return nil, fmt.Errorf("unexpected value poped out %v", intf)
if id >= maxSelectors {
return nil, fmt.Errorf("selector limit reached (%d)", maxSelectors)
}
intf := st.Pop()
fid, ok := intf.(uint16)
if !ok {
return nil, fmt.Errorf("15: unexpected value %v (%t)", intf, intf)
}
field := &op.Fields[fid]
fn := strings.ToLower(field.Name)
if _, ok := com.bl[fn]; ok {
continue
}
tn := flect.Pluralize(fn)
s := &Select{
s := Select{
ID: id,
ParentID: parentID,
Table: tn,
Children: make([]uint16, 0, 5),
}
if s.ID != 0 {
p := &selects[s.ParentID]
p.Children = append(p.Children, s.ID)
}
if fn == tn {
@ -299,68 +316,67 @@ func (com *Compiler) compileQuery(op *Operation) (*Query, error) {
s.FieldName = s.Singular
}
id++
fs[field.ID] = s
err := com.compileArgs(s, field.Args)
err := com.compileArgs(&s, field.Args)
if err != nil {
return nil, err
}
for i := range field.Children {
f := field.Children[i]
s.Cols = make([]Column, 0, len(field.Children))
for _, cid := range field.Children {
f := op.Fields[cid]
fn := strings.ToLower(f.Name)
if _, ok := com.bl[fn]; ok {
continue
}
if f.Children == nil {
col := &Column{Name: fn}
if len(f.Children) != 0 {
parentID = s.ID
st.Push(f.ID)
continue
}
col := Column{Name: fn}
if len(f.Alias) != 0 {
col.FieldName = f.Alias
} else {
col.FieldName = f.Name
}
s.Cols = append(s.Cols, col)
} else {
st.Push(f)
}
}
if field.Parent == nil {
selRoot = s
} else {
sp := fs[field.Parent.ID]
sp.Joins = append(sp.Joins, s)
}
selects = append(selects, s)
id++
}
var ok bool
var fil *Exp
if selRoot != nil {
fil, ok = com.fm[selRoot.Table]
}
if id > 0 {
root := &selects[0]
fil, ok = com.fm[root.Table]
if !ok || fil == nil {
fil = com.fl
}
if fil != nil && fil.Op != OpNop {
if selRoot.Where != nil {
selRoot.Where = &Exp{Op: OpAnd, Children: []*Exp{fil, selRoot.Where}}
if root.Where != nil {
ex := &Exp{Op: OpAnd, Children: []*Exp{fil, root.Where}}
root.Where = ex
} else {
selRoot.Where = fil
root.Where = fil
}
}
if selRoot == nil {
} else {
return nil, errors.New("invalid query")
}
return &Query{selRoot}, nil
return &Query{selects[:id]}, nil
}
func (com *Compiler) compileArgs(sel *Select, args []*Arg) error {
@ -379,7 +395,7 @@ func (com *Compiler) compileArgs(sel *Select, args []*Arg) error {
switch an {
case "id":
if sel.ID == int16(0) {
if sel.ID == 0 {
err = com.compileArgID(sel, args[i])
}
case "search":
@ -437,7 +453,7 @@ func (com *Compiler) compileArgNode(val *Node) (*Exp, error) {
intf := st.Pop()
eT, ok := intf.(*expT)
if !ok || eT == nil {
return nil, fmt.Errorf("unexpected value poped out %v", intf)
return nil, fmt.Errorf("16: unexpected value %v (%t)", intf, intf)
}
if len(eT.node.Name) != 0 {
@ -542,7 +558,7 @@ func (com *Compiler) compileArgOrderBy(sel *Select, arg *Arg) error {
node, ok := intf.(*Node)
if !ok || node == nil {
return fmt.Errorf("OrderBy: unexpected value poped out %v", intf)
return fmt.Errorf("17: unexpected value %v (%t)", intf, intf)
}
if _, ok := com.bl[strings.ToLower(node.Name)]; ok {
@ -768,16 +784,17 @@ func setListVal(ex *Exp, node *Node) {
func setWhereColName(ex *Exp, node *Node) {
var list []string
for n := node.Parent; n != nil; n = n.Parent {
if n.Type != nodeObj {
continue
}
if len(n.Name) != 0 {
k := strings.ToLower(n.Name)
if k == "and" || k == "or" || k == "not" ||
k == "_and" || k == "_or" || k == "_not" {
continue
}
if len(k) != 0 {
list = append([]string{k}, list...)
}
}
@ -785,21 +802,22 @@ func setWhereColName(ex *Exp, node *Node) {
ex.Col = list[0]
} else if len(list) > 2 {
ex.Col = strings.Join(list, ".")
ex.Col = buildPath(list)
ex.NestedCol = true
}
}
func setOrderByColName(ob *OrderBy, node *Node) {
var list []string
for n := node; n != nil; n = n.Parent {
if len(n.Name) != 0 {
k := strings.ToLower(n.Name)
if len(k) != 0 {
list = append([]string{k}, list...)
}
}
if len(list) != 0 {
ob.Col = strings.Join(list, ".")
ob.Col = buildPath(list)
}
}
@ -834,3 +852,26 @@ func compileFilter(filter []string) (*Exp, error) {
}
return fl, nil
}
func buildPath(a []string) string {
switch len(a) {
case 0:
return ""
case 1:
return a[0]
}
n := len(a) - 1
for i := 0; i < len(a); i++ {
n += len(a[i])
}
var b strings.Builder
b.Grow(n)
b.WriteString(a[0])
for _, s := range a[1:] {
b.WriteRune('.')
b.WriteString(s)
}
return b.String()
}

View File

@ -1,3 +1,2 @@
class ApplicationController < ActionController::Base
before_action :authenticate_user!
end

View File

@ -1,4 +1,5 @@
class ProductsController < ApplicationController
before_action :authenticate_user!
before_action :set_product, only: [:show, :edit, :update, :destroy]
# GET /products

View File

@ -0,0 +1,55 @@
class StripeController < ApplicationController
# GET /stripe/1
# GET /stripe/1.json
def show
data = '{ "data": [
{
"id": 1,
"customer_id": "$id",
"object": "charge",
"amount": 100,
"amount_refunded": 0,
"date": "01/01/2019",
"application": null,
"billing_details": {
"address": "1 Infinity Drive",
"zipcode": "94024"
}
},
{
"id": 2,
"customer_id": "$id",
"object": "charge",
"amount": 150,
"amount_refunded": 0,
"date": "02/18/2019",
"billing_details": {
"address": "1 Infinity Drive",
"zipcode": "94024"
}
},
{
"id": 3,
"customer_id": "$id",
"object": "charge",
"amount": 150,
"amount_refunded": 50,
"date": "03/21/2019",
"billing_details": {
"address": "1 Infinity Drive",
"zipcode": "94024"
}
}
],
"data_type": "charges",
"total_count": 3,
"next_cursor": null
}'
data.gsub!("$id", params[:id])
result = JSON.parse(data)
render json: result
end
end

View File

@ -4,5 +4,7 @@ Rails.application.routes.draw do
resources :products
# For details on the DSL available within this file, see http://guides.rubyonrails.org/routing.html
get '/stripe/:id', to: 'stripe#show', as: 'stripe'
root to: "products#index"
end

View File

@ -5,6 +5,7 @@ class DeviseCreateCustomers < ActiveRecord::Migration[5.2]
create_table :customers do |t|
t.string :full_name, null: false
t.string :phone
t.string :stripe_id
## Database authenticatable
t.string :email, null: false, default: ""

View File

@ -18,6 +18,7 @@ ActiveRecord::Schema.define(version: 2019_04_05_042247) do
create_table "customers", force: :cascade do |t|
t.string "full_name", null: false
t.string "phone"
t.string "stripe_id"
t.string "email", default: "", null: false
t.string "encrypted_password", default: "", null: false
t.string "reset_password_token"

View File

@ -41,6 +41,7 @@ end
customer_count.times do |i|
customer = Customer.create(
stripe_id: "cus_" + [*('A'..'Z'),*('a'..'z'),*('0'..'9')].shuffle[0,10].join,
full_name: Faker::Name.name,
phone: Faker::PhoneNumber.cell_phone,
email: Faker::Internet.email,

114
serv/config.go Normal file
View File

@ -0,0 +1,114 @@
package serv
type config struct {
AppName string `mapstructure:"app_name"`
Env string
HostPort string `mapstructure:"host_port"`
WebUI bool `mapstructure:"web_ui"`
DebugLevel int `mapstructure:"debug_level"`
EnableTracing bool `mapstructure:"enable_tracing"`
AuthFailBlock string `mapstructure:"auth_fail_block"`
Inflections map[string]string
Auth struct {
Type string
Cookie string
Header string
Rails struct {
Version string
SecretKeyBase string `mapstructure:"secret_key_base"`
URL string
Password string
MaxIdle int `mapstructure:"max_idle"`
MaxActive int `mapstructure:"max_active"`
Salt string
SignSalt string `mapstructure:"sign_salt"`
AuthSalt string `mapstructure:"auth_salt"`
}
JWT struct {
Provider string
Secret string
PubKeyFile string `mapstructure:"public_key_file"`
PubKeyType string `mapstructure:"public_key_type"`
}
}
DB struct {
Type string
Host string
Port string
DBName string
User string
Password string
Schema string
PoolSize int `mapstructure:"pool_size"`
MaxRetries int `mapstructure:"max_retries"`
LogLevel string `mapstructure:"log_level"`
Variables map[string]string
Defaults struct {
Filter []string
Blacklist []string
}
Fields []configTable
Tables []configTable
} `mapstructure:"database"`
}
type configTable struct {
Name string
Filter []string
Table string
Blacklist []string
Remotes []configRemote
}
type configRemote struct {
Name string
ID string
Path string
URL string
PassHeaders []string `mapstructure:"pass_headers"`
SetHeaders []struct {
Name string
Value string
} `mapstructure:"set_headers"`
}
func (c *config) getAliasMap() map[string]string {
m := make(map[string]string, len(c.DB.Tables))
for i := range c.DB.Tables {
t := c.DB.Tables[i]
if len(t.Table) == 0 {
continue
}
m[t.Name] = t.Table
}
return m
}
func (c *config) getFilterMap() map[string][]string {
m := make(map[string][]string, len(c.DB.Tables))
for i := range c.DB.Tables {
t := c.DB.Tables[i]
if len(t.Filter) == 0 {
continue
}
if t.Filter[0] == "none" {
m[t.Name] = []string{}
} else {
m[t.Name] = t.Filter
}
}
return m
}

View File

@ -1,100 +1,282 @@
package serv
import (
"bytes"
"context"
"crypto/sha1"
"encoding/json"
"fmt"
"io"
"net/http"
"strings"
"time"
"github.com/allegro/bigcache"
"github.com/cespare/xxhash/v2"
"github.com/dosco/super-graph/jsn"
"github.com/dosco/super-graph/qcode"
"github.com/go-pg/pg"
"github.com/valyala/fasttemplate"
)
var (
cache, _ = bigcache.NewBigCache(bigcache.DefaultConfig(24 * time.Hour))
const (
empty = ""
)
func handleReq(ctx context.Context, w io.Writer, req *gqlReq) error {
var key, finalSQL string
var qc *qcode.QCode
// var (
// cache, _ = bigcache.NewBigCache(bigcache.DefaultConfig(24 * time.Hour))
// )
var entry []byte
var err error
cacheEnabled := (conf.EnableTracing == false)
if cacheEnabled {
k := sha1.Sum([]byte(req.Query))
key = string(k[:])
entry, err = cache.Get(key)
type coreContext struct {
req gqlReq
res gqlResp
context.Context
}
if len(entry) == 0 || err == bigcache.ErrEntryNotFound {
qc, err = qcompile.CompileQuery(req.Query)
func (c *coreContext) handleReq(w io.Writer, req *http.Request) error {
var err error
//cacheEnabled := (conf.EnableTracing == false)
qc, err := qcompile.CompileQuery(c.req.Query)
if err != nil {
return err
}
var sqlStmt strings.Builder
vars := varMap(c)
if err := pcompile.Compile(&sqlStmt, qc); err != nil {
data, skipped, err := c.resolveSQL(qc, vars)
if err != nil {
return err
}
t := fasttemplate.New(sqlStmt.String(), openVar, closeVar)
sqlStmt.Reset()
if len(data) == 0 || skipped == 0 {
return c.render(w, data)
}
_, err = t.Execute(&sqlStmt, varMap(ctx, req.Vars))
sel := qc.Query.Selects
h := xxhash.New()
// fetch the field name used within the db response json
// that are used to mark insertion points and the mapping between
// those field names and their select objects
fids, sfmap := parentFieldIds(h, sel, skipped)
// fetch the field values of the marked insertion points
// these values contain the id to be used with fetching remote data
from := jsn.Get(data, fids)
// replacement data for the marked insertion points
// key and value will be replaced by whats below
to := make([]jsn.Field, 0, len(from))
for _, id := range from {
// use the json key to find the related Select object
k1 := xxhash.Sum64(id.Key)
s, ok := sfmap[k1]
if !ok {
continue
}
p := sel[s.ParentID]
// then use the Table nme in the Select and it's parent
// to find the resolver to use for this relationship
k2 := mkkey(h, s.Table, p.Table)
r, ok := rmap[k2]
if !ok {
continue
}
id := jsn.Value(id.Value)
if len(id) == 0 {
continue
}
b, err := r.Fn(req, id)
if err != nil {
return err
}
if len(r.Path) != 0 {
b = jsn.Strip(b, r.Path)
}
fils := []string{}
for i := range s.Cols {
fils = append(fils, s.Cols[i].Name)
}
var ob bytes.Buffer
if err = jsn.Filter(&ob, b, fils); err != nil {
return err
}
f := jsn.Field{[]byte(s.FieldName), ob.Bytes()}
to = append(to, f)
}
var ob bytes.Buffer
err = jsn.Replace(&ob, data, from, to)
if err != nil {
return err
}
// if cacheEnabled {
// if err = cache.Set(key, []byte(finalSQL)); err != nil {
// return err
// }
// }
return c.render(w, ob.Bytes())
}
func (c *coreContext) resolveSQL(qc *qcode.QCode, vars variables) (
[]byte, uint32, error) {
//var entry []byte
//var key string
//cacheEnabled := (conf.EnableTracing == false)
// if cacheEnabled {
// k := sha1.Sum([]byte(req.Query))
// key = string(k[:])
// entry, err = cache.Get(key)
// if err != nil && err != bigcache.ErrEntryNotFound {
// return emtpy, err
// }
// if len(entry) != 0 && err == nil {
// return entry, nil
// }
// }
skipped, stmts, err := pcompile.Compile(qc)
if err != nil {
return nil, 0, err
}
t := fasttemplate.New(stmts[0], openVar, closeVar)
var sqlStmt strings.Builder
_, err = t.Execute(&sqlStmt, vars)
if err == errNoUserID &&
authFailBlock == authFailBlockPerQuery &&
authCheck(ctx) == false {
return errUnauthorized
authCheck(c) == false {
return nil, 0, errUnauthorized
}
if err != nil {
return err
return nil, 0, err
}
finalSQL = sqlStmt.String()
} else if err != nil {
return err
} else {
finalSQL = string(entry)
}
finalSQL := sqlStmt.String()
if conf.DebugLevel > 0 {
fmt.Println(finalSQL)
}
st := time.Now()
var root json.RawMessage
_, err = db.Query(pg.Scan(&root), finalSQL)
if err != nil {
return err
}
et := time.Now()
resp := gqlResp{Data: json.RawMessage(root)}
if cacheEnabled {
if err = cache.Set(key, []byte(finalSQL)); err != nil {
return err
}
return nil, 0, err
}
if conf.EnableTracing {
resp.Extensions = &extensions{newTrace(st, et, qc)}
c.res.Extensions = &extensions{newTrace(st, time.Now(), qc)}
}
json.NewEncoder(w).Encode(resp)
return []byte(root), skipped, nil
}
func (c *coreContext) render(w io.Writer, data []byte) error {
c.res.Data = json.RawMessage(data)
return json.NewEncoder(w).Encode(c.res)
}
func parentFieldIds(h *xxhash.Digest, sel []qcode.Select, skipped uint32) (
[][]byte,
map[uint64]*qcode.Select) {
c := 0
for i := range sel {
s := &sel[i]
if isSkipped(skipped, s.ID) {
c++
}
}
// list of keys (and it's related value) to extract from
// the db json response
fm := make([][]byte, c)
// mapping between the above extracted key and a Select
// object
sm := make(map[uint64]*qcode.Select, c)
n := 0
for i := range sel {
s := &sel[i]
if isSkipped(skipped, s.ID) == false {
continue
}
p := sel[s.ParentID]
k := mkkey(h, s.Table, p.Table)
if r, ok := rmap[k]; ok {
fm[n] = r.IDField
n++
k := xxhash.Sum64(r.IDField)
sm[k] = s
}
}
return fm, sm
}
func isSkipped(n uint32, pos uint16) bool {
return ((n & (1 << pos)) != 0)
}
func authCheck(ctx *coreContext) bool {
return (ctx.Value(userIDKey) != nil)
}
func newTrace(st, et time.Time, qc *qcode.QCode) *trace {
if len(qc.Query.Selects) == 0 {
return nil
}
du := et.Sub(et)
sel := qc.Query.Selects[0]
t := &trace{
Version: 1,
StartTime: st,
EndTime: et,
Duration: du,
Execution: execution{
[]resolver{
resolver{
Path: []string{sel.Table},
ParentType: "Query",
FieldName: sel.Table,
ReturnType: "object",
StartOffset: 1,
Duration: du,
},
},
},
}
return t
}

View File

@ -65,7 +65,7 @@ type resolver struct {
}
func apiv1Http(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
ctx := &coreContext{Context: r.Context()}
if authFailBlock == authFailBlockAlways && authCheck(ctx) == false {
http.Error(w, "Not authorized", 401)
@ -79,13 +79,12 @@ func apiv1Http(w http.ResponseWriter, r *http.Request) {
return
}
req := &gqlReq{}
if err := json.Unmarshal(b, req); err != nil {
if err := json.Unmarshal(b, &ctx.req); err != nil {
errorResp(w, err)
return
}
if strings.EqualFold(req.OpName, introspectionQuery) {
if strings.EqualFold(ctx.req.OpName, introspectionQuery) {
dat, err := ioutil.ReadFile("test.schema")
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
@ -95,7 +94,7 @@ func apiv1Http(w http.ResponseWriter, r *http.Request) {
return
}
err = handleReq(ctx, w, req)
err = ctx.handleReq(w, r)
if err == errUnauthorized {
http.Error(w, "Not authorized", 401)
@ -105,3 +104,12 @@ func apiv1Http(w http.ResponseWriter, r *http.Request) {
errorResp(w, err)
}
}
func errorResp(w http.ResponseWriter, err error) {
if conf.DebugLevel > 0 {
logger.Error(err.Error())
}
w.WriteHeader(http.StatusBadRequest)
json.NewEncoder(w).Encode(gqlResp{Error: err.Error()})
}

114
serv/reso.go Normal file
View File

@ -0,0 +1,114 @@
package serv
import (
"fmt"
"io/ioutil"
"net/http"
"strings"
"github.com/cespare/xxhash/v2"
"github.com/dosco/super-graph/psql"
)
var (
rmap map[uint64]*resolvFn
)
type resolvFn struct {
IDField []byte
Path [][]byte
Fn func(r *http.Request, id []byte) ([]byte, error)
}
func initResolvers() {
rmap = make(map[uint64]*resolvFn)
for _, t := range conf.DB.Tables {
initRemotes(t)
}
}
func initRemotes(t configTable) {
h := xxhash.New()
for _, r := range t.Remotes {
// defines the table column to be used as an id in the
// remote request
idcol := r.ID
// if no table column specified in the config then
// use the primary key of the table as the id
if len(idcol) == 0 {
idcol = pcompile.IDColumn(t.Name)
}
idk := fmt.Sprintf("__%s_%s", t.Name, idcol)
// register a relationship between the remote data
// and the database table
key := psql.TTKey{strings.ToLower(r.Name), t.Name}
val := &psql.DBRel{
Type: psql.RelRemote,
Col1: idcol,
Col2: idk,
}
pcompile.AddRelationship(key, val)
// the function thats called to resolve this remote
// data request
fn := buildFn(r)
path := [][]byte{}
for _, p := range strings.Split(r.Path, ".") {
path = append(path, []byte(p))
}
rf := &resolvFn{
IDField: []byte(idk),
Path: path,
Fn: fn,
}
// index resolver obj by parent and child names
rmap[mkkey(h, r.Name, t.Name)] = rf
// index resolver obj by IDField
rmap[xxhash.Sum64(rf.IDField)] = rf
}
}
func buildFn(r configRemote) func(*http.Request, []byte) ([]byte, error) {
reqURL := strings.Replace(r.URL, "$id", "%s", 1)
client := &http.Client{}
h := make(http.Header, len(r.PassHeaders))
for _, v := range r.SetHeaders {
h.Set(v.Name, v.Value)
}
fn := func(inReq *http.Request, id []byte) ([]byte, error) {
req, err := http.NewRequest("GET", fmt.Sprintf(reqURL, id), nil)
if err != nil {
return nil, err
}
for _, v := range r.PassHeaders {
h.Set(v, inReq.Header.Get(v))
}
req.Header = h
res, err := client.Do(req)
if err != nil {
return nil, err
}
defer res.Body.Close()
b, err := ioutil.ReadAll(res.Body)
if err != nil {
return nil, err
}
return b, nil
}
return fn
}

View File

@ -1,13 +1,16 @@
package serv
import (
"context"
"errors"
"flag"
"fmt"
"log"
"net/http"
"os"
"os/signal"
"strings"
"time"
"github.com/dosco/super-graph/psql"
"github.com/dosco/super-graph/qcode"
@ -20,6 +23,8 @@ import (
//go:generate esc -o static.go -ignore \\.DS_Store -prefix ../web/build -private -pkg serv ../web/build
const (
serverName = "Super Graph"
authFailBlockAlways = iota + 1
authFailBlockPerQuery
authFailBlockNever
@ -29,74 +34,11 @@ var (
logger *logrus.Logger
conf *config
db *pg.DB
pcompile *psql.Compiler
qcompile *qcode.Compiler
pcompile *psql.Compiler
authFailBlock int
)
type config struct {
AppName string `mapstructure:"app_name"`
Env string
HostPort string `mapstructure:"host_port"`
WebUI bool `mapstructure:"web_ui"`
DebugLevel int `mapstructure:"debug_level"`
EnableTracing bool `mapstructure:"enable_tracing"`
AuthFailBlock string `mapstructure:"auth_fail_block"`
Inflections map[string]string
Auth struct {
Type string
Cookie string
Header string
Rails struct {
Version string
SecretKeyBase string `mapstructure:"secret_key_base"`
URL string
Password string
MaxIdle int `mapstructure:"max_idle"`
MaxActive int `mapstructure:"max_active"`
Salt string
SignSalt string `mapstructure:"sign_salt"`
AuthSalt string `mapstructure:"auth_salt"`
}
JWT struct {
Provider string
Secret string
PubKeyFile string `mapstructure:"public_key_file"`
PubKeyType string `mapstructure:"public_key_type"`
}
}
DB struct {
Type string
Host string
Port string
DBName string
User string
Password string
Schema string
PoolSize int `mapstructure:"pool_size"`
MaxRetries int `mapstructure:"max_retries"`
LogLevel string `mapstructure:"log_level"`
Variables map[string]string
Defaults struct {
Filter []string
Blacklist []string
}
Fields []struct {
Name string
Filter []string
Table string
Blacklist []string
}
} `mapstructure:"database"`
}
func initLog() *logrus.Logger {
log := logrus.New()
log.Formatter = new(logrus.TextFormatter)
@ -153,6 +95,15 @@ func initConf() (*config, error) {
flect.AddPlural(k, v)
}
if len(c.DB.Tables) == 0 {
c.DB.Tables = c.DB.Fields
}
for i := range c.DB.Tables {
t := c.DB.Tables[i]
t.Name = flect.Pluralize(strings.ToLower(t.Name))
}
authFailBlock = getAuthFailBlock(c)
//fmt.Printf("%#v", c)
@ -196,50 +147,31 @@ func initDB(c *config) (*pg.DB, error) {
}
func initCompilers(c *config) (*qcode.Compiler, *psql.Compiler, error) {
cdb := c.DB
fm := make(map[string][]string, len(cdb.Fields))
tmap := make(map[string]string, len(cdb.Fields))
for i := range cdb.Fields {
f := cdb.Fields[i]
name := flect.Pluralize(strings.ToLower(f.Name))
if len(f.Filter) != 0 {
if f.Filter[0] == "none" {
fm[name] = []string{}
} else {
fm[name] = f.Filter
}
}
if len(f.Table) != 0 {
tmap[name] = f.Table
}
}
qc, err := qcode.NewCompiler(qcode.Config{
Filter: cdb.Defaults.Filter,
FilterMap: fm,
Blacklist: cdb.Defaults.Blacklist,
})
schema, err := psql.NewDBSchema(db)
if err != nil {
return nil, nil, err
}
schema, err := psql.NewDBSchema(db)
qc, err := qcode.NewCompiler(qcode.Config{
DefaultFilter: c.DB.Defaults.Filter,
FilterMap: c.getFilterMap(),
Blacklist: c.DB.Defaults.Blacklist,
})
if err != nil {
return nil, nil, err
}
pc := psql.NewCompiler(psql.Config{
Schema: schema,
Vars: cdb.Variables,
TableMap: tmap,
Vars: c.DB.Variables,
TableMap: c.getAliasMap(),
})
return qc, pc, nil
}
func InitAndListen() {
func Init() {
var err error
logger = initLog()
@ -259,16 +191,61 @@ func InitAndListen() {
log.Fatal(err)
}
http.HandleFunc("/api/v1/graphql", withAuth(apiv1Http))
initResolvers()
if conf.WebUI {
http.Handle("/", http.FileServer(_escFS(false)))
startHTTP()
}
fmt.Printf("Super-Graph listening on %s (%s)\n",
conf.HostPort, conf.Env)
func startHTTP() {
srv := &http.Server{
Addr: conf.HostPort,
Handler: routeHandler(),
ReadTimeout: 5 * time.Second,
WriteTimeout: 10 * time.Second,
MaxHeaderBytes: 1 << 20,
}
logger.Fatal(http.ListenAndServe(conf.HostPort, nil))
idleConnsClosed := make(chan struct{})
go func() {
sigint := make(chan os.Signal, 1)
signal.Notify(sigint, os.Interrupt)
<-sigint
if err := srv.Shutdown(context.Background()); err != nil {
log.Printf("http: %v", err)
}
close(idleConnsClosed)
}()
srv.RegisterOnShutdown(func() {
if err := db.Close(); err != nil {
log.Println(err)
}
})
fmt.Printf("%s listening on %s (%s)\n", serverName, conf.HostPort, conf.Env)
if err := srv.ListenAndServe(); err != http.ErrServerClosed {
fmt.Println(err)
}
<-idleConnsClosed
}
func routeHandler() http.Handler {
mux := http.NewServeMux()
mux.Handle("/api/v1/graphql", withAuth(apiv1Http))
if conf.WebUI {
mux.Handle("/", http.FileServer(_escFS(false)))
}
fn := func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Server", serverName)
mux.ServeHTTP(w, r)
}
return http.HandlerFunc(fn)
}
func getConfigName() string {

View File

@ -1,44 +1,12 @@
package serv
import (
"context"
"encoding/json"
"net/http"
"time"
import "github.com/cespare/xxhash/v2"
"github.com/dosco/super-graph/qcode"
)
func mkkey(h *xxhash.Digest, k1 string, k2 string) uint64 {
h.WriteString(k1)
h.WriteString(k2)
v := h.Sum64()
h.Reset()
func errorResp(w http.ResponseWriter, err error) {
b, _ := json.Marshal(gqlResp{Error: err.Error()})
http.Error(w, string(b), http.StatusBadRequest)
}
func authCheck(ctx context.Context) bool {
return (ctx.Value(userIDKey) != nil)
}
func newTrace(st, et time.Time, qc *qcode.QCode) *trace {
du := et.Sub(et)
t := &trace{
Version: 1,
StartTime: st,
EndTime: et,
Duration: du,
Execution: execution{
[]resolver{
resolver{
Path: []string{qc.Query.Select.Table},
ParentType: "Query",
FieldName: qc.Query.Select.Table,
ReturnType: "object",
StartOffset: 1,
Duration: du,
},
},
},
}
return t
return v
}

View File

@ -1,15 +1,13 @@
package serv
import (
"context"
"fmt"
"io"
"strconv"
"github.com/valyala/fasttemplate"
)
func varMap(ctx context.Context, vars variables) variables {
func varMap(ctx *coreContext) variables {
userIDFn := func(w io.Writer, _ string) (int, error) {
if v := ctx.Value(userIDKey); v != nil {
return w.Write([]byte(v.(string)))
@ -34,7 +32,8 @@ func varMap(ctx context.Context, vars variables) variables {
"USER_ID_PROVIDER": userIDProviderTag,
}
for k, v := range vars {
for k, v := range ctx.req.Vars {
var buf []byte
if _, ok := vm[k]; ok {
continue
}
@ -42,11 +41,11 @@ func varMap(ctx context.Context, vars variables) variables {
case string:
vm[k] = val
case int:
vm[k] = strconv.Itoa(val)
vm[k] = strconv.AppendInt(buf, int64(val), 10)
case int64:
vm[k] = strconv.FormatInt(val, 64)
vm[k] = strconv.AppendInt(buf, val, 10)
case float64:
vm[k] = fmt.Sprintf("%.0f", val)
vm[k] = strconv.AppendFloat(buf, val, 'f', -1, 64)
}
}
return vm