Compare commits

...

3 Commits

29 changed files with 1069 additions and 695 deletions

337
allow/allow.go Normal file
View File

@ -0,0 +1,337 @@
package allow
import (
"bytes"
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"os"
"path"
"sort"
"strings"
)
const (
AL_QUERY int = iota + 1
AL_VARS
)
type Item struct {
Name string
key string
URI string
Query string
Vars json.RawMessage
}
type List struct {
filepath string
saveChan chan Item
}
type Config struct {
CreateIfNotExists bool
Persist bool
}
func New(cpath string, conf Config) (*List, error) {
al := List{}
if len(cpath) != 0 {
fp := path.Join(cpath, "allow.list")
if _, err := os.Stat(fp); err == nil {
al.filepath = fp
} else if !os.IsNotExist(err) {
return nil, err
}
}
if len(al.filepath) == 0 {
fp := "./allow.list"
if _, err := os.Stat(fp); err == nil {
al.filepath = fp
} else if !os.IsNotExist(err) {
return nil, err
}
}
if len(al.filepath) == 0 {
fp := "./config/allow.list"
if _, err := os.Stat(fp); err == nil {
al.filepath = fp
} else if !os.IsNotExist(err) {
return nil, err
}
}
if len(al.filepath) == 0 {
if !conf.CreateIfNotExists {
return nil, errors.New("allow.list not found")
}
if len(cpath) == 0 {
al.filepath = "./config/allow.list"
} else {
al.filepath = path.Join(cpath, "allow.list")
}
}
var err error
if conf.Persist {
al.saveChan = make(chan Item)
go func() {
for v := range al.saveChan {
if err = al.save(v); err != nil {
break
}
}
}()
}
if err != nil {
return nil, err
}
return &al, nil
}
func (al *List) IsPersist() bool {
return al.saveChan != nil
}
func (al *List) Add(vars []byte, query, uri string) error {
if al.saveChan == nil {
return errors.New("allow.list is read-only")
}
if len(query) == 0 {
return errors.New("empty query")
}
var q string
for i := 0; i < len(query); i++ {
c := query[i]
if c >= 'a' && c <= 'z' || c >= 'A' && c <= 'Z' {
q = query
break
} else if c == '{' {
q = "query " + query
break
}
}
al.saveChan <- Item{
URI: uri,
Query: q,
Vars: vars,
}
return nil
}
func (al *List) Load() ([]Item, error) {
var list []Item
b, err := ioutil.ReadFile(al.filepath)
if err != nil {
return list, err
}
if len(b) == 0 {
return list, nil
}
var uri string
var varBytes []byte
itemMap := make(map[string]struct{})
s, e, c := 0, 0, 0
ty := 0
for {
fq := false
if c == 0 && b[e] == '#' {
s = e
for e < len(b) && b[e] != '\n' {
e++
}
if (e - s) > 2 {
uri = strings.TrimSpace(string(b[(s + 1):e]))
}
}
if e >= len(b) {
break
}
if matchPrefix(b, e, "query") || matchPrefix(b, e, "mutation") {
if c == 0 {
s = e
}
ty = AL_QUERY
} else if matchPrefix(b, e, "variables") {
if c == 0 {
s = e + len("variables") + 1
}
ty = AL_VARS
} else if b[e] == '{' {
c++
} else if b[e] == '}' {
c--
if c == 0 {
if ty == AL_QUERY {
fq = true
} else if ty == AL_VARS {
varBytes = b[s:(e + 1)]
}
ty = 0
}
}
if fq {
query := string(b[s:(e + 1)])
name := QueryName(query)
key := strings.ToLower(name)
if _, ok := itemMap[key]; !ok {
v := Item{
Name: name,
key: key,
URI: uri,
Query: query,
Vars: varBytes,
}
list = append(list, v)
}
varBytes = nil
}
e++
if e >= len(b) {
break
}
}
return list, nil
}
func (al *List) save(item Item) error {
item.Name = QueryName(item.Query)
item.key = strings.ToLower(item.Name)
if len(item.Name) == 0 {
return nil
}
list, err := al.Load()
if err != nil {
return err
}
index := -1
for i, v := range list {
if strings.EqualFold(v.Name, item.Name) {
index = i
break
}
}
if index != -1 {
list[index] = item
} else {
list = append(list, item)
}
f, err := os.Create(al.filepath)
if err != nil {
return err
}
defer f.Close()
sort.Slice(list, func(i, j int) bool {
return strings.Compare(list[i].key, list[j].key) == -1
})
for _, v := range list {
_, err := f.WriteString(fmt.Sprintf("# %s\n\n", v.URI))
if err != nil {
return err
}
if len(v.Vars) != 0 && !bytes.Equal(v.Vars, []byte("{}")) {
vj, err := json.MarshalIndent(v.Vars, "", " ")
if err != nil {
return fmt.Errorf("failed to marshal vars: %v", err)
}
_, err = f.WriteString(fmt.Sprintf("variables %s\n\n", vj))
if err != nil {
return err
}
}
if v.Query[0] == '{' {
_, err = f.WriteString(fmt.Sprintf("query %s\n\n", v.Query))
} else {
_, err = f.WriteString(fmt.Sprintf("%s\n\n", v.Query))
}
if err != nil {
return err
}
}
return nil
}
func matchPrefix(b []byte, i int, s string) bool {
if (len(b) - i) < len(s) {
return false
}
for n := 0; n < len(s); n++ {
if b[(i+n)] != s[n] {
return false
}
}
return true
}
func QueryName(b string) string {
state, s := 0, 0
for i := 0; i < len(b); i++ {
switch {
case state == 2 && b[i] == '{':
return b[s:i]
case state == 2 && b[i] == ' ':
return b[s:i]
case state == 1 && b[i] == '{':
return ""
case state == 1 && b[i] != ' ':
s = i
state = 2
case state == 1 && b[i] == ' ':
continue
case i != 0 && b[i] == ' ' && (b[i-1] == 'n' || b[i-1] == 'y'):
state = 1
}
}
return ""
}

82
allow/allow_test.go Normal file
View File

@ -0,0 +1,82 @@
package allow
import (
"testing"
)
func TestGQLName1(t *testing.T) {
var q = `
query {
products(
distinct: [price]
where: { id: { and: { greater_or_equals: 20, lt: 28 } } }
) { id name } }`
name := QueryName(q)
if len(name) != 0 {
t.Fatal("Name should be empty, not ", name)
}
}
func TestGQLName2(t *testing.T) {
var q = `
query hakuna_matata {
products(
distinct: [price]
where: { id: { and: { greater_or_equals: 20, lt: 28 } } }
) {
id
name
}
}`
name := QueryName(q)
if name != "hakuna_matata" {
t.Fatal("Name should be 'hakuna_matata', not ", name)
}
}
func TestGQLName3(t *testing.T) {
var q = `
mutation means{ users { id } }`
// var v2 = ` { products( limit: 30, order_by: { price: desc }, distinct: [ price ] where: { id: { and: { greater_or_equals: 20, lt: 28 } } }) { id name price user { id email } } } `
name := QueryName(q)
if name != "means" {
t.Fatal("Name should be 'means', not ", name)
}
}
func TestGQLName4(t *testing.T) {
var q = `
query no_worries
users {
id
}
}`
name := QueryName(q)
if name != "no_worries" {
t.Fatal("Name should be 'no_worries', not ", name)
}
}
func TestGQLName5(t *testing.T) {
var q = `
{
users {
id
}
}`
name := QueryName(q)
if len(name) != 0 {
t.Fatal("Name should be empty, not ", name)
}
}

15
allow/fuzz_test.go Normal file
View File

@ -0,0 +1,15 @@
package allow
import "testing"
func TestFuzzCrashers(t *testing.T) {
var crashers = []string{
"query",
"q",
"que",
}
for _, f := range crashers {
_ = QueryName(f)
}
}

View File

@ -651,8 +651,6 @@ query {
}
```
## Mutations
In GraphQL mutations is the operation type for when you need to modify data. Super Graph supports the `insert`, `update`, `upsert` and `delete`. You can also do complex nested inserts and updates.
When using mutations the data must be passed as variables since Super Graphs compiles the query into an prepared statement in the database for maximum speed. Prepared statements are are functions in your code when called they accept arguments and your variables are passed in as those arguments.
@ -836,8 +834,6 @@ mutation {
}
```
## Nested Mutations
Often you will need to create or update multiple related items at the same time. This can be done using nested mutations. For example you might need to create a product and assign it to a user, or create a user and his products at the same time. You just have to use simple json to define you mutation and Super Graph takes care of the rest.
### Nested Insert
@ -906,7 +902,7 @@ mutation {
}
```
### Nested Updates
### Nested Update
Update a product item first and then assign it to a user
@ -966,7 +962,7 @@ mutation {
}
```
## Using variables
## Using Variables
Variables (`$product_id`) and their values (`"product_id": 5`) can be passed along side the GraphQL query. Using variables makes for better client side code as well as improved server side SQL query caching. The build-in web-ui also supports setting variables. Not having to manipulate your GraphQL query string to insert values into it makes for cleaner
and better client side code.
@ -988,6 +984,104 @@ fetch('http://localhost:8080/api/v1/graphql', {
.then(res => console.log(res.data));
```
## GraphQL with React
This is a quick simple example using `graphql.js` [https://github.com/f/graphql.js/](https://github.com/f/graphql.js/)
```js
import React, { useState, useEffect } from 'react'
import graphql from 'graphql.js'
// Create a GraphQL client pointing to Super Graph
var graph = graphql("http://localhost:3000/api/v1/graphql", { asJSON: true })
const App = () => {
const [user, setUser] = useState(null)
useEffect(() => {
async function action() {
// Use the GraphQL client to execute a graphQL query
// The second argument to the client are the variables you need to pass
const result = await graph(`{ user { id first_name last_name picture_url } }`)()
setUser(result)
}
action()
}, []);
return (
<div className="App">
<h1>{ JSON.stringify(user) }</h1>
</div>
);
}
```
export default App;
## Advanced Columns
The ablity to have `JSON/JSONB` and `Array` columns is often considered in the top most useful features of Postgres. There are many cases where using an array or a json column saves space and reduces complexity in your app. The only issue with these columns is the really that your SQL queries can get harder to write and maintain.
Super Graph steps in here to help you by supporting these columns right out of the box. It allows you to work with these columns just like you would with tables. Joining data against or modifying array columns using the `connect` or `disconnect` keywords in mutations is fully supported. Another very useful feature is the ability to treat `json` or `binary json (jsonb)` columns as seperate tables, even using them in nested queries joining against related tables. To replicate these features on your own will take a lot of complex SQL. Using Super Graph means you don't have to deal with any of this it just works.
### Array Columns
Configure a relationship between an array column `tag_ids` which contains integer id's for tags and the column `id` in the table `tags`.
```yaml
tables:
- name: posts
columns:
- name: tag_ids
related_to: tags.id
```
```graphql
query {
posts {
title
tags {
name
image
}
}
}
```
### JSON Column
Configure a JSON column called `tag_count` in the table `products` into a seperate table. This JSON column contains a json array of objects each with a tag id and a count of the number of times the tag was used. As a seperate table you can nest it into your GraphQL query and treat it like table using any of the standard features like `order_by`, `limit`, `where clauses`, etc.
The configuration below tells Super Graph to create a synthetic table called `tag_count` using the column `tag_count` from the `products` table. And that this new table has two columns `tag_id` and `count` of the listed types and with the defined relationships.
```yaml
tables:
- name: tag_count
table: products
columns:
- name: tag_id
type: bigint
related_to: tags.id
- name: count
type: int
```
```graphql
query {
products {
name
tag_counts {
count
tag {
name
}
}
}
}
```
## Full text search
Every app these days needs search. Enought his often means reaching for something heavy like Solr. While this will work why add complexity to your infrastructure when Postgres has really great
@ -1073,45 +1167,43 @@ class AddSearchColumn < ActiveRecord::Migration[5.1]
end
```
## GraphQL with React
## API Security
This is a quick simple example using `graphql.js` [https://github.com/f/graphql.js/](https://github.com/f/graphql.js/)
One of the the most common questions I get asked if what happens if a user out on the internet issues queries
that we don't want issued. For example how do we stop him from fetching all users or the emails of users. Our answer to this is that it is not an issue as this cannot happen, let me explain.
```js
import React, { useState, useEffect } from 'react'
import graphql from 'graphql.js'
Super Graph runs in one of two modes `development` or `production`, this is controlled via the config value `production: false` when it's false it's running in development mode and when true, production. In development mode all the **named** quries (including mutations) you run are saved into the allow list (`./config/allow.list`). I production mode when Super Graph starts only the queries from this allow list file are registered with the database as (prepared statements)[https://stackoverflow.com/questions/8263371/how-can-prepared-statements-protect-from-sql-injection-attacks]. Prepared statements are designed by databases to be fast and secure. They protect against all kinds of sql injection attacks and since they are pre-processed and pre-planned they are much faster to run then raw sql queries. Also there's no GraphQL to SQL compiling happening in production mode which makes your queries lighting fast as they directly goto the database with almost no overhead.
// Create a GraphQL client pointing to Super Graph
var graph = graphql("http://localhost:3000/api/v1/graphql", { asJSON: true })
In short in production only queries listed in the allow list file (`./config/allow.list`) can be used all other queries will be blocked.
const App = () => {
const [user, setUser] = useState(null)
useEffect(() => {
async function action() {
// Use the GraphQL client to execute a graphQL query
// The second argument to the client are the variables you need to pass
const result = await graph(`{ user { id first_name last_name picture_url } }`)()
setUser(result)
::: tip How to think about the allow list?
The allow list file is essentially a list of all your exposed API calls and the data thats passes within them in plain text. It's very easy to build tooling to do things like parsing this file within your tests to ensure fields like `credit_card_no` are not accidently leaked. It's a great way to build compliance tooling and ensure your user data is always safe.
:::
This is an example of a named query `getUserWithProducts` is the name you've given to this query it can be anything you like but should be unique across all you're queries. Only named queries are saved in the allow list in development mode the allow list is not modified in production mode.
```graphql
query getUserWithProducts {
users {
id
name
products {
id
name
price
}
action()
}, []);
return (
<div className="App">
<h1>{ JSON.stringify(user) }</h1>
</div>
);
}
}
```
export default App;
## Authentication
You can only have one type of auth enabled. You can either pick Rails or JWT.
### Rails Auth (Devise / Warden)
### Ruby on Rails
Almost all Rails apps use Devise or Warden for authentication. Once the user is
authenticated a session is created with the users ID. The session can either be
@ -1197,7 +1289,6 @@ The `user` role can be divided up into further roles based on attributes in the
Super Graph allows you to create roles dynamically using a `roles_query` and ` match` config values.
### Configure RBAC
```yaml

View File

@ -5,5 +5,5 @@ import (
)
func main() {
serv.Init()
serv.Cmd()
}

View File

@ -147,7 +147,14 @@ func renderNestedInsertRelColumns(w io.Writer, item kvitem, values bool) error {
io.WriteString(w, `, `)
}
if values {
colWithTable(w, v.relCP.Left.Table, v.relCP.Left.Col)
if v._ctype > 0 {
io.WriteString(w, `"_x_`)
io.WriteString(w, v.relCP.Left.Table)
io.WriteString(w, `".`)
quoted(w, v.relCP.Left.Col)
} else {
colWithTable(w, v.relCP.Left.Table, v.relCP.Left.Col)
}
} else {
quoted(w, v.relCP.Right.Col)
}
@ -166,12 +173,18 @@ func renderNestedInsertRelTables(w io.Writer, item kvitem) error {
io.WriteString(w, `, `)
}
} else {
// Render child foreign key columns if child-to-parent
// Render tables needed to set values if child-to-parent
// relationship is one-to-many
for _, v := range item.items {
if v.relCP.Type == RelOneToMany {
quoted(w, v.relCP.Left.Table)
io.WriteString(w, `, `)
if v._ctype > 0 {
io.WriteString(w, `"_x_`)
io.WriteString(w, v.relCP.Left.Table)
io.WriteString(w, `", `)
} else {
quoted(w, v.relCP.Left.Table)
io.WriteString(w, `, `)
}
}
}
}

View File

@ -290,7 +290,7 @@ func nestedInsertOneToOneWithConnect(t *testing.T) {
}
}`
sql := `WITH "_sg_input" AS (SELECT '{{data}}' :: json AS j), "users" AS (SELECT "id" FROM "_sg_input" i,"users" WHERE "users"."id"= ((i.j->'user'->'connect'->>'id'))::bigint LIMIT 1), "products" AS (INSERT INTO "products" ("name", "price", "created_at", "updated_at", "user_id") SELECT "t"."name", "t"."price", "t"."created_at", "t"."updated_at", "users"."id" FROM "_sg_input" i, "users", json_populate_record(NULL::products, i.j) t RETURNING *) SELECT json_object_agg('product', json_0) FROM (SELECT row_to_json((SELECT "json_row_0" FROM (SELECT "products_0"."id" AS "id", "products_0"."name" AS "name", "user_1_join"."json_1" AS "user", "tags_2_join"."json_2" AS "tags") AS "json_row_0")) AS "json_0" FROM (SELECT "products"."id", "products"."name", "products"."user_id", "products"."tags" FROM "products" LIMIT ('1') :: integer) AS "products_0" LEFT OUTER JOIN LATERAL (SELECT coalesce(json_agg("json_2"), '[]') AS "json_2" FROM (SELECT row_to_json((SELECT "json_row_2" FROM (SELECT "tags_2"."id" AS "id", "tags_2"."name" AS "name") AS "json_row_2")) AS "json_2" FROM (SELECT "tags"."id", "tags"."name" FROM "tags" WHERE ((("tags"."slug") = any ("products_0"."tags"))) LIMIT ('20') :: integer) AS "tags_2" LIMIT ('20') :: integer) AS "json_agg_2") AS "tags_2_join" ON ('true') LEFT OUTER JOIN LATERAL (SELECT row_to_json((SELECT "json_row_1" FROM (SELECT "users_1"."id" AS "id", "users_1"."full_name" AS "full_name", "users_1"."email" AS "email") AS "json_row_1")) AS "json_1" FROM (SELECT "users"."id", "users"."full_name", "users"."email" FROM "users" WHERE ((("users"."id") = ("products_0"."user_id"))) LIMIT ('1') :: integer) AS "users_1" LIMIT ('1') :: integer) AS "user_1_join" ON ('true') LIMIT ('1') :: integer) AS "sel_0"`
sql := `WITH "_sg_input" AS (SELECT '{{data}}' :: json AS j), "_x_users" AS (SELECT "id" FROM "_sg_input" i,"users" WHERE "users"."id"= ((i.j->'user'->'connect'->>'id'))::bigint LIMIT 1), "products" AS (INSERT INTO "products" ("name", "price", "created_at", "updated_at", "user_id") SELECT "t"."name", "t"."price", "t"."created_at", "t"."updated_at", "_x_users"."id" FROM "_sg_input" i, "_x_users", json_populate_record(NULL::products, i.j) t RETURNING *) SELECT json_object_agg('product', json_0) FROM (SELECT row_to_json((SELECT "json_row_0" FROM (SELECT "products_0"."id" AS "id", "products_0"."name" AS "name", "user_1_join"."json_1" AS "user", "tags_2_join"."json_2" AS "tags") AS "json_row_0")) AS "json_0" FROM (SELECT "products"."id", "products"."name", "products"."user_id", "products"."tags" FROM "products" LIMIT ('1') :: integer) AS "products_0" LEFT OUTER JOIN LATERAL (SELECT coalesce(json_agg("json_2"), '[]') AS "json_2" FROM (SELECT row_to_json((SELECT "json_row_2" FROM (SELECT "tags_2"."id" AS "id", "tags_2"."name" AS "name") AS "json_row_2")) AS "json_2" FROM (SELECT "tags"."id", "tags"."name" FROM "tags" WHERE ((("tags"."slug") = any ("products_0"."tags"))) LIMIT ('20') :: integer) AS "tags_2" LIMIT ('20') :: integer) AS "json_agg_2") AS "tags_2_join" ON ('true') LEFT OUTER JOIN LATERAL (SELECT row_to_json((SELECT "json_row_1" FROM (SELECT "users_1"."id" AS "id", "users_1"."full_name" AS "full_name", "users_1"."email" AS "email") AS "json_row_1")) AS "json_1" FROM (SELECT "users"."id", "users"."full_name", "users"."email" FROM "users" WHERE ((("users"."id") = ("products_0"."user_id"))) LIMIT ('1') :: integer) AS "users_1" LIMIT ('1') :: integer) AS "user_1_join" ON ('true') LIMIT ('1') :: integer) AS "sel_0"`
vars := map[string]json.RawMessage{
"data": json.RawMessage(`{
@ -327,7 +327,7 @@ func nestedInsertOneToOneWithConnectArray(t *testing.T) {
}
}`
sql := `WITH "_sg_input" AS (SELECT '{{data}}' :: json AS j), "users" AS (SELECT "id" FROM "_sg_input" i,"users" WHERE "users"."id" = ANY((select a::bigint AS list from json_array_elements_text((i.j->'user'->'connect'->>'id')::json) AS a)) LIMIT 1), "products" AS (INSERT INTO "products" ("name", "price", "created_at", "updated_at", "user_id") SELECT "t"."name", "t"."price", "t"."created_at", "t"."updated_at", "users"."id" FROM "_sg_input" i, "users", json_populate_record(NULL::products, i.j) t RETURNING *) SELECT json_object_agg('product', json_0) FROM (SELECT row_to_json((SELECT "json_row_0" FROM (SELECT "products_0"."id" AS "id", "products_0"."name" AS "name", "user_1_join"."json_1" AS "user") AS "json_row_0")) AS "json_0" FROM (SELECT "products"."id", "products"."name", "products"."user_id" FROM "products" LIMIT ('1') :: integer) AS "products_0" LEFT OUTER JOIN LATERAL (SELECT row_to_json((SELECT "json_row_1" FROM (SELECT "users_1"."id" AS "id", "users_1"."full_name" AS "full_name", "users_1"."email" AS "email") AS "json_row_1")) AS "json_1" FROM (SELECT "users"."id", "users"."full_name", "users"."email" FROM "users" WHERE ((("users"."id") = ("products_0"."user_id"))) LIMIT ('1') :: integer) AS "users_1" LIMIT ('1') :: integer) AS "user_1_join" ON ('true') LIMIT ('1') :: integer) AS "sel_0"`
sql := `WITH "_sg_input" AS (SELECT '{{data}}' :: json AS j), "_x_users" AS (SELECT "id" FROM "_sg_input" i,"users" WHERE "users"."id" = ANY((select a::bigint AS list from json_array_elements_text((i.j->'user'->'connect'->>'id')::json) AS a)) LIMIT 1), "products" AS (INSERT INTO "products" ("name", "price", "created_at", "updated_at", "user_id") SELECT "t"."name", "t"."price", "t"."created_at", "t"."updated_at", "_x_users"."id" FROM "_sg_input" i, "_x_users", json_populate_record(NULL::products, i.j) t RETURNING *) SELECT json_object_agg('product', json_0) FROM (SELECT row_to_json((SELECT "json_row_0" FROM (SELECT "products_0"."id" AS "id", "products_0"."name" AS "name", "user_1_join"."json_1" AS "user") AS "json_row_0")) AS "json_0" FROM (SELECT "products"."id", "products"."name", "products"."user_id" FROM "products" LIMIT ('1') :: integer) AS "products_0" LEFT OUTER JOIN LATERAL (SELECT row_to_json((SELECT "json_row_1" FROM (SELECT "users_1"."id" AS "id", "users_1"."full_name" AS "full_name", "users_1"."email" AS "email") AS "json_row_1")) AS "json_1" FROM (SELECT "users"."id", "users"."full_name", "users"."email" FROM "users" WHERE ((("users"."id") = ("products_0"."user_id"))) LIMIT ('1') :: integer) AS "users_1" LIMIT ('1') :: integer) AS "user_1_join" ON ('true') LIMIT ('1') :: integer) AS "sel_0"`
vars := map[string]json.RawMessage{
"data": json.RawMessage(`{

View File

@ -520,13 +520,16 @@ func (c *compilerContext) renderConnectStmt(qc *qcode.QCode, w io.Writer,
rel := item.relPC
// Render only for parent-to-child relationship of one-to-one
// For this to work the child needs to found first so it's primary key
// can be set in the related column on the parent object.
// Eg. Create product and connect a user to it.
if rel.Type != RelOneToOne {
return nil
}
io.WriteString(w, `, `)
quoted(w, item.ti.Name)
io.WriteString(c.w, ` AS (SELECT `)
io.WriteString(w, `, "_x_`)
io.WriteString(c.w, item.ti.Name)
io.WriteString(c.w, `" AS (SELECT `)
if rel.Left.Array {
io.WriteString(w, `array_agg(DISTINCT `)
@ -557,12 +560,15 @@ func (c *compilerContext) renderDisconnectStmt(qc *qcode.QCode, w io.Writer,
rel := item.relPC
// Render only for parent-to-child relationship of one-to-one
// For this to work the child needs to found first so it's
// null value can beset in the related column on the parent object.
// Eg. Update product and diconnect the user from it.
if rel.Type != RelOneToOne {
return nil
}
io.WriteString(w, `, `)
quoted(w, item.ti.Name)
io.WriteString(c.w, ` AS (`)
io.WriteString(w, `, "_x_`)
io.WriteString(c.w, item.ti.Name)
io.WriteString(c.w, `" AS (`)
if rel.Right.Array {
io.WriteString(c.w, `SELECT `)

View File

@ -134,6 +134,7 @@ func TestMain(m *testing.M) {
DBTable{Name: "products", Type: "table"},
DBTable{Name: "purchases", Type: "table"},
DBTable{Name: "tags", Type: "table"},
DBTable{Name: "tag_count", Type: "json"},
}
columns := [][]DBColumn{
@ -169,7 +170,8 @@ func TestMain(m *testing.M) {
DBColumn{ID: 6, Name: "created_at", Type: "timestamp without time zone", NotNull: true, PrimaryKey: false, UniqueKey: false},
DBColumn{ID: 7, Name: "updated_at", Type: "timestamp without time zone", NotNull: true, PrimaryKey: false, UniqueKey: false},
DBColumn{ID: 8, Name: "tsv", Type: "tsvector", NotNull: false, PrimaryKey: false, UniqueKey: false},
DBColumn{ID: 9, Name: "tags", Type: "text[]", NotNull: false, PrimaryKey: false, UniqueKey: false, FKeyTable: "tags", FKeyColID: []int16{3}, Array: true}},
DBColumn{ID: 9, Name: "tags", Type: "text[]", NotNull: false, PrimaryKey: false, UniqueKey: false, FKeyTable: "tags", FKeyColID: []int16{3}, Array: true},
DBColumn{ID: 9, Name: "tag_count", Type: "json", NotNull: false, PrimaryKey: false, UniqueKey: false, FKeyTable: "tag_count", FKeyColID: []int16{}}},
[]DBColumn{
DBColumn{ID: 1, Name: "id", Type: "bigint", NotNull: true, PrimaryKey: true, UniqueKey: true},
DBColumn{ID: 2, Name: "customer_id", Type: "bigint", NotNull: false, PrimaryKey: false, UniqueKey: false, FKeyTable: "customers", FKeyColID: []int16{1}},
@ -182,6 +184,9 @@ func TestMain(m *testing.M) {
DBColumn{ID: 1, Name: "id", Type: "bigint", NotNull: true, PrimaryKey: true, UniqueKey: true},
DBColumn{ID: 2, Name: "name", Type: "text", NotNull: false, PrimaryKey: false, UniqueKey: false},
DBColumn{ID: 3, Name: "slug", Type: "text", NotNull: false, PrimaryKey: false, UniqueKey: false}},
[]DBColumn{
DBColumn{ID: 1, Name: "tag_id", Type: "bigint", NotNull: false, PrimaryKey: false, UniqueKey: false, FKeyTable: "tags", FKeyColID: []int16{1}},
DBColumn{ID: 2, Name: "count", Type: "int", NotNull: false, PrimaryKey: false, UniqueKey: false}},
}
for i := range tables {

View File

@ -224,7 +224,7 @@ func (co *Compiler) compileQuery(qc *qcode.QCode, w io.Writer) (uint32, error) {
return ignored, nil
}
func (c *compilerContext) processChildren(sel *qcode.Select, ti *DBTableInfo) (uint32, []*qcode.Column) {
func (c *compilerContext) processChildren(sel *qcode.Select, ti *DBTableInfo) (uint32, []*qcode.Column, error) {
var skipped uint32
cols := make([]*qcode.Column, 0, len(sel.Cols))
@ -243,40 +243,63 @@ func (c *compilerContext) processChildren(sel *qcode.Select, ti *DBTableInfo) (u
rel, err := c.schema.GetRel(child.Name, ti.Name)
if err != nil {
skipped |= (1 << uint(id))
continue
return 0, nil, err
//skipped |= (1 << uint(id))
//continue
}
switch rel.Type {
case RelOneToOne, RelOneToMany:
if _, ok := colmap[rel.Right.Col]; !ok {
cols = append(cols, &qcode.Column{Table: ti.Name, Name: rel.Right.Col, FieldName: rel.Right.Col})
colmap[rel.Right.Col] = struct{}{}
}
colmap[rel.Right.Col] = struct{}{}
case RelOneToManyThrough:
if _, ok := colmap[rel.Left.Col]; !ok {
cols = append(cols, &qcode.Column{Table: ti.Name, Name: rel.Left.Col, FieldName: rel.Left.Col})
colmap[rel.Left.Col] = struct{}{}
}
case RelEmbedded:
if _, ok := colmap[rel.Left.Col]; !ok {
cols = append(cols, &qcode.Column{Table: ti.Name, Name: rel.Left.Col, FieldName: rel.Left.Col})
colmap[rel.Left.Col] = struct{}{}
}
colmap[rel.Left.Col] = struct{}{}
case RelRemote:
if _, ok := colmap[rel.Left.Col]; !ok {
cols = append(cols, &qcode.Column{Table: ti.Name, Name: rel.Left.Col, FieldName: rel.Right.Col})
colmap[rel.Left.Col] = struct{}{}
skipped |= (1 << uint(id))
}
colmap[rel.Left.Col] = struct{}{}
skipped |= (1 << uint(id))
default:
skipped |= (1 << uint(id))
return 0, nil, fmt.Errorf("unknown relationship %s", rel)
//skipped |= (1 << uint(id))
}
}
return skipped, cols
return skipped, cols, nil
}
func (c *compilerContext) renderSelect(sel *qcode.Select, ti *DBTableInfo) (uint32, error) {
skipped, childCols := c.processChildren(sel, ti)
var rel *DBRel
var err error
if sel.ParentID != -1 {
parent := c.s[sel.ParentID]
rel, err = c.schema.GetRel(ti.Name, parent.Name)
if err != nil {
return 0, err
}
}
skipped, childCols, err := c.processChildren(sel, ti)
if err != nil {
return 0, err
}
hasOrder := len(sel.OrderBy) != 0
// SELECT
@ -288,9 +311,8 @@ func (c *compilerContext) renderSelect(sel *qcode.Select, ti *DBTableInfo) (uint
io.WriteString(c.w, `"`)
if hasOrder {
err := c.renderOrderBy(sel, ti)
if err != nil {
return skipped, err
if err := c.renderOrderBy(sel, ti); err != nil {
return 0, err
}
}
@ -319,8 +341,7 @@ func (c *compilerContext) renderSelect(sel *qcode.Select, ti *DBTableInfo) (uint
c.renderRemoteRelColumns(sel, ti)
err := c.renderJoinedColumns(sel, ti, skipped)
if err != nil {
if err = c.renderJoinedColumns(sel, ti, skipped); err != nil {
return skipped, err
}
@ -339,7 +360,7 @@ func (c *compilerContext) renderSelect(sel *qcode.Select, ti *DBTableInfo) (uint
// END-SELECT
// FROM (SELECT .... )
err = c.renderBaseSelect(sel, ti, childCols, skipped)
err = c.renderBaseSelect(sel, ti, rel, childCols, skipped)
if err != nil {
return skipped, err
}
@ -527,11 +548,11 @@ func (c *compilerContext) renderJoinedColumns(sel *qcode.Select, ti *DBTableInfo
return nil
}
func (c *compilerContext) renderBaseSelect(sel *qcode.Select, ti *DBTableInfo,
func (c *compilerContext) renderBaseSelect(sel *qcode.Select, ti *DBTableInfo, rel *DBRel,
childCols []*qcode.Column, skipped uint32) error {
var groupBy []int
isRoot := sel.ParentID == -1
isRoot := (rel == nil)
isFil := (sel.Where != nil && sel.Where.Op != qcode.OpNop)
isSearch := sel.Args["search"] != nil
isAgg := false
@ -682,10 +703,7 @@ func (c *compilerContext) renderBaseSelect(sel *qcode.Select, ti *DBTableInfo,
io.WriteString(c.w, ` FROM `)
//fmt.Fprintf(w, ` FROM "%s"`, c.sel.Name)
io.WriteString(c.w, `"`)
io.WriteString(c.w, ti.Name)
io.WriteString(c.w, `"`)
c.renderFrom(sel, ti, rel)
// if tn, ok := c.tmap[sel.Name]; ok {
// //fmt.Fprintf(w, ` FROM "%s" AS "%s"`, tn, c.sel.Name)
@ -711,11 +729,9 @@ func (c *compilerContext) renderBaseSelect(sel *qcode.Select, ti *DBTableInfo,
}
io.WriteString(c.w, ` WHERE (`)
if err := c.renderRelationship(sel, ti); err != nil {
return err
}
if isFil {
io.WriteString(c.w, ` AND `)
if err := c.renderWhere(sel, ti); err != nil {
@ -770,6 +786,44 @@ func (c *compilerContext) renderBaseSelect(sel *qcode.Select, ti *DBTableInfo,
return nil
}
func (c *compilerContext) renderFrom(sel *qcode.Select, ti *DBTableInfo, rel *DBRel) error {
if rel != nil && rel.Type == RelEmbedded {
// json_to_recordset('[{"a":1,"b":[1,2,3],"c":"bar"}, {"a":2,"b":[1,2,3],"c":"bar"}]') as x(a int, b text, d text);
io.WriteString(c.w, `"`)
io.WriteString(c.w, rel.Left.Table)
io.WriteString(c.w, `", `)
io.WriteString(c.w, ti.Type)
io.WriteString(c.w, `_to_recordset(`)
colWithTable(c.w, rel.Left.Table, rel.Right.Col)
io.WriteString(c.w, `) AS `)
io.WriteString(c.w, `"`)
io.WriteString(c.w, ti.Name)
io.WriteString(c.w, `"`)
io.WriteString(c.w, `(`)
for i, col := range ti.Columns {
if i != 0 {
io.WriteString(c.w, `, `)
}
io.WriteString(c.w, col.Name)
io.WriteString(c.w, ` `)
io.WriteString(c.w, col.Type)
}
io.WriteString(c.w, `)`)
} else {
//fmt.Fprintf(w, ` FROM "%s"`, c.sel.Name)
io.WriteString(c.w, `"`)
io.WriteString(c.w, ti.Name)
io.WriteString(c.w, `"`)
}
return nil
}
func (c *compilerContext) renderOrderByColumns(sel *qcode.Select, ti *DBTableInfo) {
colsRendered := len(sel.Cols) != 0
@ -852,7 +906,13 @@ func (c *compilerContext) renderRelationshipByName(table, parent string, id int3
io.WriteString(c.w, `) = (`)
colWithTable(c.w, rel.Through, rel.Right.Col)
}
case RelEmbedded:
colWithTable(c.w, rel.Left.Table, rel.Left.Col)
io.WriteString(c.w, `) = (`)
colWithTableID(c.w, parent, id, rel.Left.Col)
}
io.WriteString(c.w, `))`)
return nil

View File

@ -463,6 +463,32 @@ func multiRoot(t *testing.T) {
}
}
func jsonColumnAsTable(t *testing.T) {
gql := `query {
products {
id
name
tag_count {
count
tags {
name
}
}
}
}`
sql := `SELECT json_object_agg('products', json_0) FROM (SELECT coalesce(json_agg("json_0"), '[]') AS "json_0" FROM (SELECT row_to_json((SELECT "json_row_0" FROM (SELECT "products_0"."id" AS "id", "products_0"."name" AS "name", "tag_count_1_join"."json_1" AS "tag_count") AS "json_row_0")) AS "json_0" FROM (SELECT "products"."id", "products"."name" FROM "products" LIMIT ('20') :: integer) AS "products_0" LEFT OUTER JOIN LATERAL (SELECT row_to_json((SELECT "json_row_1" FROM (SELECT "tag_count_1"."count" AS "count", "tags_2_join"."json_2" AS "tags") AS "json_row_1")) AS "json_1" FROM (SELECT "tag_count"."count", "tag_count"."tag_id" FROM "products", json_to_recordset("products"."tag_count") AS "tag_count"(tag_id bigint, count int) WHERE ((("products"."id") = ("products_0"."id"))) LIMIT ('1') :: integer) AS "tag_count_1" LEFT OUTER JOIN LATERAL (SELECT coalesce(json_agg("json_2"), '[]') AS "json_2" FROM (SELECT row_to_json((SELECT "json_row_2" FROM (SELECT "tags_2"."name" AS "name") AS "json_row_2")) AS "json_2" FROM (SELECT "tags"."name" FROM "tags" WHERE ((("tags"."id") = ("tag_count_1"."tag_id"))) LIMIT ('20') :: integer) AS "tags_2" LIMIT ('20') :: integer) AS "json_agg_2") AS "tags_2_join" ON ('true') LIMIT ('1') :: integer) AS "tag_count_1_join" ON ('true') LIMIT ('20') :: integer) AS "json_agg_0") AS "sel_0"`
resSQL, err := compileGQLToPSQL(gql, nil, "admin")
if err != nil {
t.Fatal(err)
}
if string(resSQL) != sql {
t.Fatal(errNotExpected)
}
}
func skipUserIDForAnonRole(t *testing.T) {
gql := `query {
products {
@ -548,6 +574,7 @@ func TestCompileQuery(t *testing.T) {
t.Run("queryWithVariables", queryWithVariables)
t.Run("withWhereOnRelations", withWhereOnRelations)
t.Run("multiRoot", multiRoot)
t.Run("jsonColumnAsTable", jsonColumnAsTable)
t.Run("skipUserIDForAnonRole", skipUserIDForAnonRole)
t.Run("blockedQuery", blockedQuery)
t.Run("blockedFunctions", blockedFunctions)

View File

@ -15,6 +15,7 @@ type DBSchema struct {
type DBTableInfo struct {
Name string
Type string
Singular bool
Columns []DBColumn
PrimaryCol *DBColumn
@ -29,6 +30,7 @@ const (
RelOneToOne RelType = iota + 1
RelOneToMany
RelOneToManyThrough
RelEmbedded
RelRemote
)
@ -51,7 +53,6 @@ type DBRel struct {
}
func NewDBSchema(info *DBInfo, aliases map[string][]string) (*DBSchema, error) {
schema := &DBSchema{
t: make(map[string]*DBTableInfo),
rm: make(map[string]map[string]*DBRel),
@ -83,6 +84,7 @@ func (s *DBSchema) addTable(
singular := flect.Singularize(t.Key)
s.t[singular] = &DBTableInfo{
Name: t.Name,
Type: t.Type,
Singular: true,
Columns: cols,
ColMap: colmap,
@ -92,6 +94,7 @@ func (s *DBSchema) addTable(
plural := flect.Pluralize(t.Key)
s.t[plural] = &DBTableInfo{
Name: t.Name,
Type: t.Type,
Singular: false,
Columns: cols,
ColMap: colmap,
@ -139,19 +142,43 @@ func (s *DBSchema) updateRelationships(t DBTable, cols []DBColumn) error {
for i := range cols {
c := cols[i]
if len(c.FKeyTable) == 0 || len(c.FKeyColID) == 0 {
if len(c.FKeyTable) == 0 {
continue
}
// Foreign key column name
ft := strings.ToLower(c.FKeyTable)
fcid := c.FKeyColID[0]
ti, ok := s.t[ft]
if !ok {
return fmt.Errorf("invalid foreign key table '%s'", ft)
}
// This is an embedded relationship like when a json/jsonb column
// is exposed as a table
if c.Name == c.FKeyTable && len(c.FKeyColID) == 0 {
rel := &DBRel{Type: RelEmbedded}
rel.Left.col = cti.PrimaryCol
rel.Left.Table = cti.Name
rel.Left.Col = cti.PrimaryCol.Name
rel.Right.col = &c
rel.Right.Table = ti.Name
rel.Right.Col = c.Name
if err := s.SetRel(ft, ct, rel); err != nil {
return err
}
continue
}
if len(c.FKeyColID) == 0 {
continue
}
// Foreign key column id
fcid := c.FKeyColID[0]
fc, ok := ti.ColIDMap[fcid]
if !ok {
return fmt.Errorf("invalid foreign key column id '%d' for table '%s'",

View File

@ -12,6 +12,8 @@ func (rt RelType) String() string {
return "one to many through"
case RelRemote:
return "remote"
case RelEmbedded:
return "embedded"
}
return ""
}

View File

@ -62,6 +62,20 @@ func GetDBInfo(db *pgxpool.Pool) (*DBInfo, error) {
return di, nil
}
func (di *DBInfo) AddTable(t DBTable, cols []DBColumn) {
t.ID = di.Tables[len(di.Tables)-1].ID
di.Tables = append(di.Tables, t)
di.colmap[t.Key] = make(map[string]*DBColumn, len(cols))
for i := range cols {
cols[i].ID = int16(i)
c := &cols[i]
di.colmap[t.Key][c.Key] = c
}
di.Columns = append(di.Columns, cols)
}
func (di *DBInfo) GetColumn(table, column string) (*DBColumn, bool) {
v, ok := di.colmap[strings.ToLower(table)][strings.ToLower(column)]
return v, ok

View File

@ -165,17 +165,28 @@ func renderNestedUpdateRelColumns(w io.Writer, item kvitem, values bool) error {
for _, v := range item.items {
if v._ctype > 0 && v.relCP.Type == RelOneToMany {
if values {
colWithTable(w, v.relCP.Left.Table, v.relCP.Left.Col)
} else {
if v.relCP.Right.Array {
io.WriteString(w, `array_remove(`)
colWithTable(w, v.relCP.Left.Table, v.relCP.Left.Col)
io.WriteString(w, `, `)
quoted(w, v.relCP.Right.Col)
io.WriteString(w, `)`)
// if v.relCP.Right.Array {
// io.WriteString(w, `array_diff(`)
// colWithTable(w, v.relCP.Right.Table, v.relCP.Right.Col)
// io.WriteString(w, `, `)
// }
if v._ctype > 0 {
io.WriteString(w, `"_x_`)
io.WriteString(w, v.relCP.Left.Table)
io.WriteString(w, `".`)
quoted(w, v.relCP.Left.Col)
} else {
quoted(w, v.relCP.Right.Col)
colWithTable(w, v.relCP.Left.Table, v.relCP.Left.Col)
}
// if v.relCP.Right.Array {
// io.WriteString(w, `)`)
// }
} else {
quoted(w, v.relCP.Right.Col)
}
}
}
@ -184,12 +195,13 @@ func renderNestedUpdateRelColumns(w io.Writer, item kvitem, values bool) error {
}
func renderNestedUpdateRelTables(w io.Writer, item kvitem) error {
// Render child foreign key columns if child-to-parent
// Render tables needed to set values if child-to-parent
// relationship is one-to-many
for _, v := range item.items {
if v._ctype > 0 && v.relCP.Type == RelOneToMany {
quoted(w, v.relCP.Left.Table)
io.WriteString(w, `, `)
io.WriteString(w, `"_x_`)
io.WriteString(w, v.relCP.Left.Table)
io.WriteString(w, `", `)
}
}

View File

@ -238,9 +238,9 @@ func nestedUpdateOneToOneWithConnect(t *testing.T) {
}
}`
sql1 := `WITH "_sg_input" AS (SELECT '{{data}}' :: json AS j), "users" AS (SELECT "id" FROM "_sg_input" i,"users" WHERE "users"."id"= ((i.j->'user'->'connect'->>'id'))::bigint AND "users"."email"= ((i.j->'user'->'connect'->>'email'))::character varying LIMIT 1), "products" AS (UPDATE "products" SET ("name", "price", "user_id") = (SELECT "t"."name", "t"."price", "users"."id" FROM "_sg_input" i, "users", json_populate_record(NULL::products, i.j) t) WHERE (("products"."id") = 9) RETURNING "products".*) SELECT json_object_agg('product', json_0) FROM (SELECT row_to_json((SELECT "json_row_0" FROM (SELECT "products_0"."id" AS "id", "products_0"."name" AS "name", "user_1_join"."json_1" AS "user") AS "json_row_0")) AS "json_0" FROM (SELECT "products"."id", "products"."name", "products"."user_id" FROM "products" LIMIT ('1') :: integer) AS "products_0" LEFT OUTER JOIN LATERAL (SELECT row_to_json((SELECT "json_row_1" FROM (SELECT "users_1"."id" AS "id", "users_1"."full_name" AS "full_name", "users_1"."email" AS "email") AS "json_row_1")) AS "json_1" FROM (SELECT "users"."id", "users"."full_name", "users"."email" FROM "users" WHERE ((("users"."id") = ("products_0"."user_id"))) LIMIT ('1') :: integer) AS "users_1" LIMIT ('1') :: integer) AS "user_1_join" ON ('true') LIMIT ('1') :: integer) AS "sel_0"`
sql1 := `WITH "_sg_input" AS (SELECT '{{data}}' :: json AS j), "_x_users" AS (SELECT "id" FROM "_sg_input" i,"users" WHERE "users"."id"= ((i.j->'user'->'connect'->>'id'))::bigint AND "users"."email"= ((i.j->'user'->'connect'->>'email'))::character varying LIMIT 1), "products" AS (UPDATE "products" SET ("name", "price", "user_id") = (SELECT "t"."name", "t"."price", "_x_users"."id" FROM "_sg_input" i, "_x_users", json_populate_record(NULL::products, i.j) t) WHERE (("products"."id") = 9) RETURNING "products".*) SELECT json_object_agg('product', json_0) FROM (SELECT row_to_json((SELECT "json_row_0" FROM (SELECT "products_0"."id" AS "id", "products_0"."name" AS "name", "user_1_join"."json_1" AS "user") AS "json_row_0")) AS "json_0" FROM (SELECT "products"."id", "products"."name", "products"."user_id" FROM "products" LIMIT ('1') :: integer) AS "products_0" LEFT OUTER JOIN LATERAL (SELECT row_to_json((SELECT "json_row_1" FROM (SELECT "users_1"."id" AS "id", "users_1"."full_name" AS "full_name", "users_1"."email" AS "email") AS "json_row_1")) AS "json_1" FROM (SELECT "users"."id", "users"."full_name", "users"."email" FROM "users" WHERE ((("users"."id") = ("products_0"."user_id"))) LIMIT ('1') :: integer) AS "users_1" LIMIT ('1') :: integer) AS "user_1_join" ON ('true') LIMIT ('1') :: integer) AS "sel_0"`
sql2 := `WITH "_sg_input" AS (SELECT '{{data}}' :: json AS j), "users" AS (SELECT "id" FROM "_sg_input" i,"users" WHERE "users"."email"= ((i.j->'user'->'connect'->>'email'))::character varying AND "users"."id"= ((i.j->'user'->'connect'->>'id'))::bigint LIMIT 1), "products" AS (UPDATE "products" SET ("name", "price", "user_id") = (SELECT "t"."name", "t"."price", "users"."id" FROM "_sg_input" i, "users", json_populate_record(NULL::products, i.j) t) WHERE (("products"."id") = 9) RETURNING "products".*) SELECT json_object_agg('product', json_0) FROM (SELECT row_to_json((SELECT "json_row_0" FROM (SELECT "products_0"."id" AS "id", "products_0"."name" AS "name", "user_1_join"."json_1" AS "user") AS "json_row_0")) AS "json_0" FROM (SELECT "products"."id", "products"."name", "products"."user_id" FROM "products" LIMIT ('1') :: integer) AS "products_0" LEFT OUTER JOIN LATERAL (SELECT row_to_json((SELECT "json_row_1" FROM (SELECT "users_1"."id" AS "id", "users_1"."full_name" AS "full_name", "users_1"."email" AS "email") AS "json_row_1")) AS "json_1" FROM (SELECT "users"."id", "users"."full_name", "users"."email" FROM "users" WHERE ((("users"."id") = ("products_0"."user_id"))) LIMIT ('1') :: integer) AS "users_1" LIMIT ('1') :: integer) AS "user_1_join" ON ('true') LIMIT ('1') :: integer) AS "sel_0"`
sql2 := `WITH "_sg_input" AS (SELECT '{{data}}' :: json AS j), "_x_users" AS (SELECT "id" FROM "_sg_input" i,"users" WHERE "users"."email"= ((i.j->'user'->'connect'->>'email'))::character varying AND "users"."id"= ((i.j->'user'->'connect'->>'id'))::bigint LIMIT 1), "products" AS (UPDATE "products" SET ("name", "price", "user_id") = (SELECT "t"."name", "t"."price", "_x_users"."id" FROM "_sg_input" i, "_x_users", json_populate_record(NULL::products, i.j) t) WHERE (("products"."id") = 9) RETURNING "products".*) SELECT json_object_agg('product', json_0) FROM (SELECT row_to_json((SELECT "json_row_0" FROM (SELECT "products_0"."id" AS "id", "products_0"."name" AS "name", "user_1_join"."json_1" AS "user") AS "json_row_0")) AS "json_0" FROM (SELECT "products"."id", "products"."name", "products"."user_id" FROM "products" LIMIT ('1') :: integer) AS "products_0" LEFT OUTER JOIN LATERAL (SELECT row_to_json((SELECT "json_row_1" FROM (SELECT "users_1"."id" AS "id", "users_1"."full_name" AS "full_name", "users_1"."email" AS "email") AS "json_row_1")) AS "json_1" FROM (SELECT "users"."id", "users"."full_name", "users"."email" FROM "users" WHERE ((("users"."id") = ("products_0"."user_id"))) LIMIT ('1') :: integer) AS "users_1" LIMIT ('1') :: integer) AS "user_1_join" ON ('true') LIMIT ('1') :: integer) AS "sel_0"`
vars := map[string]json.RawMessage{
"data": json.RawMessage(`{
@ -273,7 +273,7 @@ func nestedUpdateOneToOneWithDisconnect(t *testing.T) {
}
}`
sql := `WITH "_sg_input" AS (SELECT '{{data}}' :: json AS j), "users" AS (SELECT * FROM (VALUES(NULL::bigint)) AS LOOKUP("id")), "products" AS (UPDATE "products" SET ("name", "price", "user_id") = (SELECT "t"."name", "t"."price", "users"."id" FROM "_sg_input" i, "users", json_populate_record(NULL::products, i.j) t) WHERE (("products"."id") = 2) RETURNING "products".*) SELECT json_object_agg('product', json_0) FROM (SELECT row_to_json((SELECT "json_row_0" FROM (SELECT "products_0"."id" AS "id", "products_0"."name" AS "name", "products_0"."user_id" AS "user_id") AS "json_row_0")) AS "json_0" FROM (SELECT "products"."id", "products"."name", "products"."user_id" FROM "products" LIMIT ('1') :: integer) AS "products_0" LIMIT ('1') :: integer) AS "sel_0"`
sql := `WITH "_sg_input" AS (SELECT '{{data}}' :: json AS j), "_x_users" AS (SELECT * FROM (VALUES(NULL::bigint)) AS LOOKUP("id")), "products" AS (UPDATE "products" SET ("name", "price", "user_id") = (SELECT "t"."name", "t"."price", "_x_users"."id" FROM "_sg_input" i, "_x_users", json_populate_record(NULL::products, i.j) t) WHERE (("products"."id") = 2) RETURNING "products".*) SELECT json_object_agg('product', json_0) FROM (SELECT row_to_json((SELECT "json_row_0" FROM (SELECT "products_0"."id" AS "id", "products_0"."name" AS "name", "products_0"."user_id" AS "user_id") AS "json_row_0")) AS "json_0" FROM (SELECT "products"."id", "products"."name", "products"."user_id" FROM "products" LIMIT ('1') :: integer) AS "products_0" LIMIT ('1') :: integer) AS "sel_0"`
vars := map[string]json.RawMessage{
"data": json.RawMessage(`{

View File

@ -1,320 +0,0 @@
package serv
import (
"bytes"
"encoding/json"
"fmt"
"io/ioutil"
"log"
"os"
"path"
"sort"
"strings"
)
const (
AL_QUERY int = iota + 1
AL_VARS
)
type allowItem struct {
name string
hash string
uri string
gql string
vars json.RawMessage
}
var _allowList allowList
type allowList struct {
list []*allowItem
index map[string]int
filepath string
saveChan chan *allowItem
active bool
}
func initAllowList(cpath string) {
_allowList = allowList{
index: make(map[string]int),
saveChan: make(chan *allowItem),
active: true,
}
if len(cpath) != 0 {
fp := path.Join(cpath, "allow.list")
if _, err := os.Stat(fp); err == nil {
_allowList.filepath = fp
} else if !os.IsNotExist(err) {
errlog.Fatal().Err(err).Send()
}
}
if len(_allowList.filepath) == 0 {
fp := "./allow.list"
if _, err := os.Stat(fp); err == nil {
_allowList.filepath = fp
} else if !os.IsNotExist(err) {
errlog.Fatal().Err(err).Send()
}
}
if len(_allowList.filepath) == 0 {
fp := "./config/allow.list"
if _, err := os.Stat(fp); err == nil {
_allowList.filepath = fp
} else if !os.IsNotExist(err) {
errlog.Fatal().Err(err).Send()
}
}
if len(_allowList.filepath) == 0 {
if conf.Production {
errlog.Fatal().Msg("allow.list not found")
}
if len(cpath) == 0 {
_allowList.filepath = "./config/allow.list"
} else {
_allowList.filepath = path.Join(cpath, "allow.list")
}
logger.Warn().Msg("allow.list not found")
} else {
_allowList.load()
}
go func() {
for v := range _allowList.saveChan {
_allowList.save(v)
}
}()
}
func (al *allowList) add(req *gqlReq) {
if al.saveChan == nil || len(req.ref) == 0 || len(req.Query) == 0 {
return
}
var query string
for i := 0; i < len(req.Query); i++ {
c := req.Query[i]
if c >= 'a' && c <= 'z' || c >= 'A' && c <= 'Z' {
query = req.Query
break
} else if c == '{' {
query = "query " + req.Query
break
}
}
al.saveChan <- &allowItem{
uri: req.ref,
gql: query,
vars: req.Vars,
}
}
func (al *allowList) upsert(query, vars []byte, uri string) {
q := string(query)
hash := gqlHash(q, vars, "")
name := gqlName(q)
var key string
if len(name) != 0 {
key = name
} else {
key = hash
}
if i, ok := al.index[key]; !ok {
al.list = append(al.list, &allowItem{
name: name,
hash: hash,
uri: uri,
gql: q,
vars: vars,
})
al.index[key] = len(al.list) - 1
} else {
item := al.list[i]
item.name = name
item.hash = hash
item.gql = q
item.vars = vars
}
}
func (al *allowList) load() {
b, err := ioutil.ReadFile(al.filepath)
if err != nil {
log.Fatal(err)
}
if len(b) == 0 {
return
}
var uri string
var varBytes []byte
s, e, c := 0, 0, 0
ty := 0
for {
if c == 0 && b[e] == '#' {
s = e
for e < len(b) && b[e] != '\n' {
e++
}
if (e - s) > 2 {
uri = strings.TrimSpace(string(b[(s + 1):e]))
}
}
if e >= len(b) {
break
}
if matchPrefix(b, e, "query") || matchPrefix(b, e, "mutation") {
if c == 0 {
s = e
}
ty = AL_QUERY
} else if matchPrefix(b, e, "variables") {
if c == 0 {
s = e + len("variables") + 1
}
ty = AL_VARS
} else if b[e] == '{' {
c++
} else if b[e] == '}' {
c--
if c == 0 {
if ty == AL_QUERY {
al.upsert(b[s:(e+1)], varBytes, uri)
varBytes = nil
} else if ty == AL_VARS {
varBytes = b[s:(e + 1)]
}
ty = 0
}
}
e++
if e >= len(b) {
break
}
}
}
func (al *allowList) save(item *allowItem) {
var err error
item.hash = gqlHash(item.gql, item.vars, "")
item.name = gqlName(item.gql)
if len(item.name) == 0 {
key := item.hash
if _, ok := al.index[key]; ok {
return
}
al.list = append(al.list, item)
al.index[key] = len(al.list) - 1
} else {
key := item.name
if i, ok := al.index[key]; ok {
if al.list[i].hash == item.hash {
return
}
al.list[i] = item
} else {
al.list = append(al.list, item)
al.index[key] = len(al.list) - 1
}
}
f, err := os.Create(al.filepath)
if err != nil {
logger.Warn().Err(err).Msgf("Failed to write allow list: %s", al.filepath)
return
}
defer f.Close()
keys := []string{}
urlMap := make(map[string][]*allowItem)
for _, v := range al.list {
urlMap[v.uri] = append(urlMap[v.uri], v)
}
for k := range urlMap {
keys = append(keys, k)
}
sort.Strings(keys)
for i := range keys {
k := keys[i]
v := urlMap[k]
if _, err := f.WriteString(fmt.Sprintf("# %s\n\n", k)); err != nil {
logger.Error().Err(err).Send()
return
}
for i := range v {
if len(v[i].vars) != 0 && !bytes.Equal(v[i].vars, []byte("{}")) {
vj, err := json.MarshalIndent(v[i].vars, "", " ")
if err != nil {
logger.Warn().Err(err).Msg("Failed to write allow list 'vars' to file")
continue
}
_, err = f.WriteString(fmt.Sprintf("variables %s\n\n", vj))
if err != nil {
logger.Error().Err(err).Send()
return
}
}
if v[i].gql[0] == '{' {
_, err = f.WriteString(fmt.Sprintf("query %s\n\n", v[i].gql))
} else {
_, err = f.WriteString(fmt.Sprintf("%s\n\n", v[i].gql))
}
if err != nil {
logger.Error().Err(err).Send()
return
}
}
}
}
func matchPrefix(b []byte, i int, s string) bool {
if (len(b) - i) < len(s) {
return false
}
for n := 0; n < len(s); n++ {
if b[(i+n)] != s[n] {
return false
}
}
return true
}

View File

@ -1,15 +1,13 @@
package serv
import (
"context"
"fmt"
"os"
"runtime"
"strings"
"github.com/dosco/super-graph/allow"
"github.com/dosco/super-graph/psql"
"github.com/dosco/super-graph/qcode"
"github.com/jackc/pgx/v4"
"github.com/jackc/pgx/v4/pgxpool"
"github.com/rs/zerolog"
"github.com/spf13/cobra"
@ -31,17 +29,18 @@ var (
)
var (
logger zerolog.Logger // logger for everything but errors
errlog zerolog.Logger // logger for errors includes line numbers
conf *config // parsed config
confPath string // path to the config file
db *pgxpool.Pool // database connection pool
schema *psql.DBSchema // database tables, columns and relationships
qcompile *qcode.Compiler // qcode compiler
pcompile *psql.Compiler // postgres sql compiler
logger zerolog.Logger // logger for everything but errors
errlog zerolog.Logger // logger for errors includes line numbers
conf *config // parsed config
confPath string // path to the config file
db *pgxpool.Pool // database connection pool
schema *psql.DBSchema // database tables, columns and relationships
allowList *allow.List // allow.list is contains queries allowed in production
qcompile *qcode.Compiler // qcode compiler
pcompile *psql.Compiler // postgres sql compiler
)
func Init() {
func Cmd() {
initLog()
rootCmd := &cobra.Command{
@ -156,159 +155,6 @@ e.g. db:migrate -+1
}
}
func initLog() {
out := zerolog.ConsoleWriter{Out: os.Stderr}
logger = zerolog.New(out).With().Timestamp().Logger()
errlog = logger.With().Caller().Logger()
}
func initConf() (*config, error) {
vi := newConfig(getConfigName())
if err := vi.ReadInConfig(); err != nil {
return nil, err
}
inherits := vi.GetString("inherits")
if len(inherits) != 0 {
vi = newConfig(inherits)
if err := vi.ReadInConfig(); err != nil {
return nil, err
}
if vi.IsSet("inherits") {
errlog.Fatal().Msgf("inherited config (%s) cannot itself inherit (%s)",
inherits,
vi.GetString("inherits"))
}
vi.SetConfigName(getConfigName())
if err := vi.MergeInConfig(); err != nil {
return nil, err
}
}
c := &config{}
if err := c.Init(vi); err != nil {
return nil, fmt.Errorf("unable to decode config, %v", err)
}
logLevel, err := zerolog.ParseLevel(c.LogLevel)
if err != nil {
errlog.Error().Err(err).Msg("error setting log_level")
}
zerolog.SetGlobalLevel(logLevel)
return c, nil
}
func initDB(c *config, useDB bool) (*pgx.Conn, error) {
config, _ := pgx.ParseConfig("")
config.Host = c.DB.Host
config.Port = c.DB.Port
config.User = c.DB.User
config.Password = c.DB.Password
config.RuntimeParams = map[string]string{
"application_name": c.AppName,
"search_path": c.DB.Schema,
}
if useDB {
config.Database = c.DB.DBName
}
switch c.LogLevel {
case "debug":
config.LogLevel = pgx.LogLevelDebug
case "info":
config.LogLevel = pgx.LogLevelInfo
case "warn":
config.LogLevel = pgx.LogLevelWarn
case "error":
config.LogLevel = pgx.LogLevelError
default:
config.LogLevel = pgx.LogLevelNone
}
config.Logger = NewSQLLogger(logger)
db, err := pgx.ConnectConfig(context.Background(), config)
if err != nil {
return nil, err
}
return db, nil
}
func initDBPool(c *config) (*pgxpool.Pool, error) {
config, _ := pgxpool.ParseConfig("")
config.ConnConfig.Host = c.DB.Host
config.ConnConfig.Port = c.DB.Port
config.ConnConfig.Database = c.DB.DBName
config.ConnConfig.User = c.DB.User
config.ConnConfig.Password = c.DB.Password
config.ConnConfig.RuntimeParams = map[string]string{
"application_name": c.AppName,
"search_path": c.DB.Schema,
}
switch c.LogLevel {
case "debug":
config.ConnConfig.LogLevel = pgx.LogLevelDebug
case "info":
config.ConnConfig.LogLevel = pgx.LogLevelInfo
case "warn":
config.ConnConfig.LogLevel = pgx.LogLevelWarn
case "error":
config.ConnConfig.LogLevel = pgx.LogLevelError
default:
config.ConnConfig.LogLevel = pgx.LogLevelNone
}
config.ConnConfig.Logger = NewSQLLogger(logger)
// if c.DB.MaxRetries != 0 {
// opt.MaxRetries = c.DB.MaxRetries
// }
if c.DB.PoolSize != 0 {
config.MaxConns = conf.DB.PoolSize
}
db, err := pgxpool.ConnectConfig(context.Background(), config)
if err != nil {
return nil, err
}
return db, nil
}
func initCompiler() {
var err error
qcompile, pcompile, err = initCompilers(conf)
if err != nil {
errlog.Fatal().Err(err).Msg("failed to initialize compilers")
}
if err := initResolvers(); err != nil {
errlog.Fatal().Err(err).Msg("failed to initialized resolvers")
}
}
func initConfOnce() {
var err error
if conf == nil {
if conf, err = initConf(); err != nil {
errlog.Fatal().Err(err).Msg("failed to read config")
}
}
}
func cmdVersion(cmd *cobra.Command, args []string) {
fmt.Printf("%s\n", BuildDetails())
}

View File

@ -17,7 +17,7 @@ func cmdServ(cmd *cobra.Command, args []string) {
if err == nil {
initCompiler()
initAllowList(confPath)
initPreparedList()
initPreparedList(confPath)
} else {
fatalInProd(err, "failed to connect to database")
}

View File

@ -87,6 +87,7 @@ type config struct {
type configColumn struct {
Name string
Type string
ForeignKey string `mapstructure:"related_to"`
}
@ -313,7 +314,7 @@ func (c *config) getAliasMap() map[string][]string {
for i := range c.Tables {
t := c.Tables[i]
if len(t.Table) == 0 {
if len(t.Table) == 0 || len(t.Columns) != 0 {
continue
}

View File

@ -8,9 +8,61 @@ import (
"github.com/dosco/super-graph/qcode"
)
func addTables(c *config, di *psql.DBInfo) error {
for _, t := range c.Tables {
if len(t.Table) == 0 || len(t.Columns) == 0 {
continue
}
if err := addTable(di, t.Columns, t); err != nil {
return err
}
}
return nil
}
func addTable(di *psql.DBInfo, cols []configColumn, t configTable) error {
bc, ok := di.GetColumn(t.Table, t.Name)
if !ok {
return fmt.Errorf(
"Column '%s' not found on table '%s'",
t.Name, t.Table)
}
if bc.Type != "json" && bc.Type != "jsonb" {
return fmt.Errorf(
"Column '%s' in table '%s' is of type '%s'. Only JSON or JSONB is valid",
t.Name, t.Table, bc.Type)
}
table := psql.DBTable{
Name: t.Name,
Key: strings.ToLower(t.Name),
Type: bc.Type,
}
columns := make([]psql.DBColumn, 0, len(cols))
for i := range cols {
c := cols[i]
columns = append(columns, psql.DBColumn{
Name: c.Name,
Key: strings.ToLower(c.Name),
Type: c.Type,
})
}
di.AddTable(table, columns)
bc.FKeyTable = t.Name
return nil
}
func addForeignKeys(c *config, di *psql.DBInfo) error {
for _, t := range c.Tables {
for _, c := range t.Columns {
if len(c.ForeignKey) == 0 {
continue
}
if err := addForeignKey(di, c, t); err != nil {
return err
}
@ -23,7 +75,7 @@ func addForeignKey(di *psql.DBInfo, c configColumn, t configTable) error {
c1, ok := di.GetColumn(t.Name, c.Name)
if !ok {
return fmt.Errorf(
"Invalid table '%s' or column '%s in config",
"Invalid table '%s' or column '%s' in config",
t.Name, c.Name)
}

View File

@ -11,6 +11,7 @@ import (
"time"
"github.com/cespare/xxhash/v2"
"github.com/dosco/super-graph/allow"
"github.com/dosco/super-graph/qcode"
"github.com/jackc/pgx/v4"
"github.com/valyala/fasttemplate"
@ -107,7 +108,7 @@ func (c *coreContext) resolvePreparedSQL() ([]byte, *stmt, error) {
}
ps, ok := _preparedList[gqlHash(c.req.Query, c.req.Vars, role)]
ps, ok := _preparedList[stmtHash(allow.QueryName(c.req.Query), role)]
if !ok {
return nil, nil, errUnauthorized
}
@ -240,8 +241,10 @@ func (c *coreContext) resolveSQL() ([]byte, *stmt, error) {
}
}
if !conf.Production {
_allowList.add(&c.req)
if allowList.IsPersist() {
if err := allowList.Add(c.req.Vars, c.req.Query, c.req.ref); err != nil {
return nil, nil, err
}
}
if len(stmts) > 1 {

View File

@ -4,7 +4,7 @@ package serv
func Fuzz(data []byte) int {
gql := string(data)
gqlName(gql)
QueryName(gql)
gqlHash(gql, nil, "")
return 1

View File

@ -10,7 +10,6 @@ func TestFuzzCrashers(t *testing.T) {
}
for _, f := range crashers {
_ = gqlName(f)
gqlHash(f, nil, "")
}
}

179
serv/init.go Normal file
View File

@ -0,0 +1,179 @@
package serv
import (
"context"
"fmt"
"os"
"github.com/dosco/super-graph/allow"
"github.com/jackc/pgx/v4"
"github.com/jackc/pgx/v4/pgxpool"
"github.com/rs/zerolog"
)
func initLog() {
out := zerolog.ConsoleWriter{Out: os.Stderr}
logger = zerolog.New(out).With().Timestamp().Logger()
errlog = logger.With().Caller().Logger()
}
func initConf() (*config, error) {
vi := newConfig(getConfigName())
if err := vi.ReadInConfig(); err != nil {
return nil, err
}
inherits := vi.GetString("inherits")
if len(inherits) != 0 {
vi = newConfig(inherits)
if err := vi.ReadInConfig(); err != nil {
return nil, err
}
if vi.IsSet("inherits") {
errlog.Fatal().Msgf("inherited config (%s) cannot itself inherit (%s)",
inherits,
vi.GetString("inherits"))
}
vi.SetConfigName(getConfigName())
if err := vi.MergeInConfig(); err != nil {
return nil, err
}
}
c := &config{}
if err := c.Init(vi); err != nil {
return nil, fmt.Errorf("unable to decode config, %v", err)
}
logLevel, err := zerolog.ParseLevel(c.LogLevel)
if err != nil {
errlog.Error().Err(err).Msg("error setting log_level")
}
zerolog.SetGlobalLevel(logLevel)
return c, nil
}
func initDB(c *config, useDB bool) (*pgx.Conn, error) {
config, _ := pgx.ParseConfig("")
config.Host = c.DB.Host
config.Port = c.DB.Port
config.User = c.DB.User
config.Password = c.DB.Password
config.RuntimeParams = map[string]string{
"application_name": c.AppName,
"search_path": c.DB.Schema,
}
if useDB {
config.Database = c.DB.DBName
}
switch c.LogLevel {
case "debug":
config.LogLevel = pgx.LogLevelDebug
case "info":
config.LogLevel = pgx.LogLevelInfo
case "warn":
config.LogLevel = pgx.LogLevelWarn
case "error":
config.LogLevel = pgx.LogLevelError
default:
config.LogLevel = pgx.LogLevelNone
}
config.Logger = NewSQLLogger(logger)
db, err := pgx.ConnectConfig(context.Background(), config)
if err != nil {
return nil, err
}
return db, nil
}
func initDBPool(c *config) (*pgxpool.Pool, error) {
config, _ := pgxpool.ParseConfig("")
config.ConnConfig.Host = c.DB.Host
config.ConnConfig.Port = c.DB.Port
config.ConnConfig.Database = c.DB.DBName
config.ConnConfig.User = c.DB.User
config.ConnConfig.Password = c.DB.Password
config.ConnConfig.RuntimeParams = map[string]string{
"application_name": c.AppName,
"search_path": c.DB.Schema,
}
switch c.LogLevel {
case "debug":
config.ConnConfig.LogLevel = pgx.LogLevelDebug
case "info":
config.ConnConfig.LogLevel = pgx.LogLevelInfo
case "warn":
config.ConnConfig.LogLevel = pgx.LogLevelWarn
case "error":
config.ConnConfig.LogLevel = pgx.LogLevelError
default:
config.ConnConfig.LogLevel = pgx.LogLevelNone
}
config.ConnConfig.Logger = NewSQLLogger(logger)
// if c.DB.MaxRetries != 0 {
// opt.MaxRetries = c.DB.MaxRetries
// }
if c.DB.PoolSize != 0 {
config.MaxConns = conf.DB.PoolSize
}
db, err := pgxpool.ConnectConfig(context.Background(), config)
if err != nil {
return nil, err
}
return db, nil
}
func initCompiler() {
var err error
qcompile, pcompile, err = initCompilers(conf)
if err != nil {
errlog.Fatal().Err(err).Msg("failed to initialize compilers")
}
if err := initResolvers(); err != nil {
errlog.Fatal().Err(err).Msg("failed to initialized resolvers")
}
}
func initConfOnce() {
var err error
if conf == nil {
if conf, err = initConf(); err != nil {
errlog.Fatal().Err(err).Msg("failed to read config")
}
}
}
func initAllowList(cpath string) {
var ac allow.Config
var err error
if !conf.Production {
ac = allow.Config{CreateIfNotExists: true, Persist: true}
}
allowList, err = allow.New(cpath, ac)
if err != nil {
errlog.Fatal().Err(err).Msg("failed to initialize allow list")
}
}

View File

@ -6,6 +6,7 @@ import (
"fmt"
"io"
"github.com/dosco/super-graph/allow"
"github.com/dosco/super-graph/qcode"
"github.com/jackc/pgconn"
"github.com/jackc/pgx/v4"
@ -23,7 +24,10 @@ var (
_preparedList map[string]*preparedItem
)
func initPreparedList() {
func initPreparedList(cpath string) {
if allowList.IsPersist() {
return
}
_preparedList = make(map[string]*preparedItem)
tx, err := db.Begin(context.Background())
@ -43,30 +47,38 @@ func initPreparedList() {
success := 0
for _, v := range _allowList.list {
if len(v.gql) == 0 {
list, err := allowList.Load()
if err != nil {
errlog.Fatal().Err(err).Send()
}
for _, v := range list {
if len(v.Query) == 0 {
continue
}
err := prepareStmt(v.gql, v.vars)
err := prepareStmt(v)
if err == nil {
success++
continue
}
if len(v.vars) == 0 {
logger.Warn().Err(err).Msg(v.gql)
if len(v.Vars) == 0 {
logger.Warn().Err(err).Msg(v.Query)
} else {
logger.Warn().Err(err).Msgf("%s %s", v.vars, v.gql)
logger.Warn().Err(err).Msgf("%s %s", v.Vars, v.Query)
}
}
logger.Info().
Msgf("Registered %d of %d queries from allow.list as prepared statements",
success, len(_allowList.list))
success, len(list))
}
func prepareStmt(gql string, vars []byte) error {
func prepareStmt(item allow.Item) error {
gql := item.Query
vars := item.Vars
qt := qcode.GetQType(gql)
q := []byte(gql)
@ -99,7 +111,7 @@ func prepareStmt(gql string, vars []byte) error {
logger.Debug().Msg("Prepared statement role: user")
err = prepare(tx, stmts1, gqlHash(gql, vars, "user"))
err = prepare(tx, stmts1, stmtHash(item.Name, "user"))
if err != nil {
return err
}
@ -112,7 +124,7 @@ func prepareStmt(gql string, vars []byte) error {
return err
}
err = prepare(tx, stmts2, gqlHash(gql, vars, "anon"))
err = prepare(tx, stmts2, stmtHash(item.Name, "anon"))
if err != nil {
return err
}
@ -127,7 +139,7 @@ func prepareStmt(gql string, vars []byte) error {
return err
}
err = prepare(tx, stmts, gqlHash(gql, vars, role.Name))
err = prepare(tx, stmts, stmtHash(item.Name, role.Name))
if err != nil {
return err
}

View File

@ -21,6 +21,10 @@ func initCompilers(c *config) (*qcode.Compiler, *psql.Compiler, error) {
return nil, nil, err
}
if err = addTables(c, di); err != nil {
return nil, nil, err
}
if err = addForeignKeys(c, di); err != nil {
return nil, nil, err
}

View File

@ -22,6 +22,14 @@ func mkkey(h *xxhash.Digest, k1 string, k2 string) uint64 {
return v
}
// nolint: errcheck
func stmtHash(name string, role string) string {
h := sha1.New()
io.WriteString(h, strings.ToLower(name))
io.WriteString(h, role)
return hex.EncodeToString(h.Sum(nil))
}
// nolint: errcheck
func gqlHash(b string, vars []byte, role string) string {
b = strings.TrimSpace(b)
@ -108,30 +116,6 @@ func al(b byte) bool {
return (b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || (b >= '0' && b <= '9')
}
func gqlName(b string) string {
state, s := 0, 0
for i := 0; i < len(b); i++ {
switch {
case state == 2 && b[i] == '{':
return b[s:i]
case state == 2 && b[i] == ' ':
return b[s:i]
case state == 1 && b[i] == '{':
return ""
case state == 1 && b[i] != ' ':
s = i
state = 2
case state == 1 && b[i] == ' ':
continue
case i != 0 && b[i] == ' ' && (b[i-1] == 'n' || b[i-1] == 'y'):
state = 1
}
}
return ""
}
func findStmt(role string, stmts []stmt) *stmt {
for i := range stmts {
if stmts[i].role.Name != role {

View File

@ -229,80 +229,3 @@ func TestGQLHashWithVars2(t *testing.T) {
t.Fatal("Hashes don't match they should")
}
}
func TestGQLName1(t *testing.T) {
var q = `
query {
products(
distinct: [price]
where: { id: { and: { greater_or_equals: 20, lt: 28 } } }
) { id name } }`
name := gqlName(q)
if len(name) != 0 {
t.Fatal("Name should be empty, not ", name)
}
}
func TestGQLName2(t *testing.T) {
var q = `
query hakuna_matata {
products(
distinct: [price]
where: { id: { and: { greater_or_equals: 20, lt: 28 } } }
) {
id
name
}
}`
name := gqlName(q)
if name != "hakuna_matata" {
t.Fatal("Name should be 'hakuna_matata', not ", name)
}
}
func TestGQLName3(t *testing.T) {
var q = `
mutation means{ users { id } }`
// var v2 = ` { products( limit: 30, order_by: { price: desc }, distinct: [ price ] where: { id: { and: { greater_or_equals: 20, lt: 28 } } }) { id name price user { id email } } } `
name := gqlName(q)
if name != "means" {
t.Fatal("Name should be 'means', not ", name)
}
}
func TestGQLName4(t *testing.T) {
var q = `
query no_worries
users {
id
}
}`
name := gqlName(q)
if name != "no_worries" {
t.Fatal("Name should be 'no_worries', not ", name)
}
}
func TestGQLName5(t *testing.T) {
var q = `
{
users {
id
}
}`
name := gqlName(q)
if len(name) != 0 {
t.Fatal("Name should be empty, not ", name)
}
}