Compare commits

...

4 Commits

39 changed files with 1493 additions and 952 deletions

4
.gitignore vendored
View File

@ -34,4 +34,6 @@ supergraph
*-fuzz.zip *-fuzz.zip
crashers crashers
suppressions suppressions
release release
.gofuzz
*-fuzz.zip

337
allow/allow.go Normal file
View File

@ -0,0 +1,337 @@
package allow
import (
"bytes"
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"os"
"path"
"sort"
"strings"
)
const (
AL_QUERY int = iota + 1
AL_VARS
)
type Item struct {
Name string
key string
URI string
Query string
Vars json.RawMessage
}
type List struct {
filepath string
saveChan chan Item
}
type Config struct {
CreateIfNotExists bool
Persist bool
}
func New(cpath string, conf Config) (*List, error) {
al := List{}
if len(cpath) != 0 {
fp := path.Join(cpath, "allow.list")
if _, err := os.Stat(fp); err == nil {
al.filepath = fp
} else if !os.IsNotExist(err) {
return nil, err
}
}
if len(al.filepath) == 0 {
fp := "./allow.list"
if _, err := os.Stat(fp); err == nil {
al.filepath = fp
} else if !os.IsNotExist(err) {
return nil, err
}
}
if len(al.filepath) == 0 {
fp := "./config/allow.list"
if _, err := os.Stat(fp); err == nil {
al.filepath = fp
} else if !os.IsNotExist(err) {
return nil, err
}
}
if len(al.filepath) == 0 {
if !conf.CreateIfNotExists {
return nil, errors.New("allow.list not found")
}
if len(cpath) == 0 {
al.filepath = "./config/allow.list"
} else {
al.filepath = path.Join(cpath, "allow.list")
}
}
var err error
if conf.Persist {
al.saveChan = make(chan Item)
go func() {
for v := range al.saveChan {
if err = al.save(v); err != nil {
break
}
}
}()
}
if err != nil {
return nil, err
}
return &al, nil
}
func (al *List) IsPersist() bool {
return al.saveChan != nil
}
func (al *List) Add(vars []byte, query, uri string) error {
if al.saveChan == nil {
return errors.New("allow.list is read-only")
}
if len(query) == 0 {
return errors.New("empty query")
}
var q string
for i := 0; i < len(query); i++ {
c := query[i]
if c >= 'a' && c <= 'z' || c >= 'A' && c <= 'Z' {
q = query
break
} else if c == '{' {
q = "query " + query
break
}
}
al.saveChan <- Item{
URI: uri,
Query: q,
Vars: vars,
}
return nil
}
func (al *List) Load() ([]Item, error) {
var list []Item
b, err := ioutil.ReadFile(al.filepath)
if err != nil {
return list, err
}
if len(b) == 0 {
return list, nil
}
var uri string
var varBytes []byte
itemMap := make(map[string]struct{})
s, e, c := 0, 0, 0
ty := 0
for {
fq := false
if c == 0 && b[e] == '#' {
s = e
for e < len(b) && b[e] != '\n' {
e++
}
if (e - s) > 2 {
uri = strings.TrimSpace(string(b[(s + 1):e]))
}
}
if e >= len(b) {
break
}
if matchPrefix(b, e, "query") || matchPrefix(b, e, "mutation") {
if c == 0 {
s = e
}
ty = AL_QUERY
} else if matchPrefix(b, e, "variables") {
if c == 0 {
s = e + len("variables") + 1
}
ty = AL_VARS
} else if b[e] == '{' {
c++
} else if b[e] == '}' {
c--
if c == 0 {
if ty == AL_QUERY {
fq = true
} else if ty == AL_VARS {
varBytes = b[s:(e + 1)]
}
ty = 0
}
}
if fq {
query := string(b[s:(e + 1)])
name := QueryName(query)
key := strings.ToLower(name)
if _, ok := itemMap[key]; !ok {
v := Item{
Name: name,
key: key,
URI: uri,
Query: query,
Vars: varBytes,
}
list = append(list, v)
}
varBytes = nil
}
e++
if e >= len(b) {
break
}
}
return list, nil
}
func (al *List) save(item Item) error {
item.Name = QueryName(item.Query)
item.key = strings.ToLower(item.Name)
if len(item.Name) == 0 {
return nil
}
list, err := al.Load()
if err != nil {
return err
}
index := -1
for i, v := range list {
if strings.EqualFold(v.Name, item.Name) {
index = i
break
}
}
if index != -1 {
list[index] = item
} else {
list = append(list, item)
}
f, err := os.Create(al.filepath)
if err != nil {
return err
}
defer f.Close()
sort.Slice(list, func(i, j int) bool {
return strings.Compare(list[i].key, list[j].key) == -1
})
for _, v := range list {
_, err := f.WriteString(fmt.Sprintf("# %s\n\n", v.URI))
if err != nil {
return err
}
if len(v.Vars) != 0 && !bytes.Equal(v.Vars, []byte("{}")) {
vj, err := json.MarshalIndent(v.Vars, "", " ")
if err != nil {
return fmt.Errorf("failed to marshal vars: %v", err)
}
_, err = f.WriteString(fmt.Sprintf("variables %s\n\n", vj))
if err != nil {
return err
}
}
if v.Query[0] == '{' {
_, err = f.WriteString(fmt.Sprintf("query %s\n\n", v.Query))
} else {
_, err = f.WriteString(fmt.Sprintf("%s\n\n", v.Query))
}
if err != nil {
return err
}
}
return nil
}
func matchPrefix(b []byte, i int, s string) bool {
if (len(b) - i) < len(s) {
return false
}
for n := 0; n < len(s); n++ {
if b[(i+n)] != s[n] {
return false
}
}
return true
}
func QueryName(b string) string {
state, s := 0, 0
for i := 0; i < len(b); i++ {
switch {
case state == 2 && b[i] == '{':
return b[s:i]
case state == 2 && b[i] == ' ':
return b[s:i]
case state == 1 && b[i] == '{':
return ""
case state == 1 && b[i] != ' ':
s = i
state = 2
case state == 1 && b[i] == ' ':
continue
case i != 0 && b[i] == ' ' && (b[i-1] == 'n' || b[i-1] == 'y'):
state = 1
}
}
return ""
}

82
allow/allow_test.go Normal file
View File

@ -0,0 +1,82 @@
package allow
import (
"testing"
)
func TestGQLName1(t *testing.T) {
var q = `
query {
products(
distinct: [price]
where: { id: { and: { greater_or_equals: 20, lt: 28 } } }
) { id name } }`
name := QueryName(q)
if len(name) != 0 {
t.Fatal("Name should be empty, not ", name)
}
}
func TestGQLName2(t *testing.T) {
var q = `
query hakuna_matata {
products(
distinct: [price]
where: { id: { and: { greater_or_equals: 20, lt: 28 } } }
) {
id
name
}
}`
name := QueryName(q)
if name != "hakuna_matata" {
t.Fatal("Name should be 'hakuna_matata', not ", name)
}
}
func TestGQLName3(t *testing.T) {
var q = `
mutation means{ users { id } }`
// var v2 = ` { products( limit: 30, order_by: { price: desc }, distinct: [ price ] where: { id: { and: { greater_or_equals: 20, lt: 28 } } }) { id name price user { id email } } } `
name := QueryName(q)
if name != "means" {
t.Fatal("Name should be 'means', not ", name)
}
}
func TestGQLName4(t *testing.T) {
var q = `
query no_worries
users {
id
}
}`
name := QueryName(q)
if name != "no_worries" {
t.Fatal("Name should be 'no_worries', not ", name)
}
}
func TestGQLName5(t *testing.T) {
var q = `
{
users {
id
}
}`
name := QueryName(q)
if len(name) != 0 {
t.Fatal("Name should be empty, not ", name)
}
}

15
allow/fuzz_test.go Normal file
View File

@ -0,0 +1,15 @@
package allow
import "testing"
func TestFuzzCrashers(t *testing.T) {
var crashers = []string{
"query",
"q",
"que",
}
for _, f := range crashers {
_ = QueryName(f)
}
}

View File

@ -167,10 +167,13 @@ roles:
block: false block: false
- name: deals - name: deals
query: query:
limit: 3 limit: 3
columns: ["name", "description" ] aggregation: false
- name: purchases
query:
limit: 3
aggregation: false aggregation: false
- name: user - name: user
@ -183,12 +186,10 @@ roles:
query: query:
limit: 50 limit: 50
filters: ["{ user_id: { eq: $user_id } }"] filters: ["{ user_id: { eq: $user_id } }"]
columns: ["id", "name", "description", "search_rank", "search_headline_description" ]
disable_functions: false disable_functions: false
insert: insert:
filters: ["{ user_id: { eq: $user_id } }"] filters: ["{ user_id: { eq: $user_id } }"]
columns: ["id", "name", "description" ]
presets: presets:
- user_id: "$user_id" - user_id: "$user_id"
- created_at: "now" - created_at: "now"

View File

@ -4,9 +4,9 @@ sidebar: auto
# Guide to Super Graph # Guide to Super Graph
Super Graph is a micro-service that instantly and without code gives you a high performance and secure GraphQL API. Your GraphQL queries are auto translated into a single fast SQL query. No more writing API code as you develop your web frontend just make the query you need and Super Graph will do the rest. Super Graph is a service that instantly and without code gives you a high performance and secure GraphQL API. Your GraphQL queries are auto translated into a single fast SQL query. No more spending weeks or months writing backend API code. Just make the query you need and Super Graph will do the rest.
Super Graph has a rich feature set like integrating with your existing Ruby on Rails apps, joining your DB with data from remote APIs, Role and Attribute based access control, Supoport for JWT tokens, DB migrations, seeding and a lot more. Super Graph has a rich feature set like integrating with your existing Ruby on Rails apps, joining your DB with data from remote APIs, Role and Attribute based access control, Support for JWT tokens, DB migrations, seeding and a lot more.
## Features ## Features
@ -47,14 +47,14 @@ open http://localhost:3000
open http://localhost:8080 open http://localhost:8080
``` ```
::: warning DEMO REQUIREMENTS ::: tip DEMO REQUIREMENTS
This demo requires `docker` you can either install it using `brew` or from the This demo requires `docker` you can either install it using `brew` or from the
docker website [https://docs.docker.com/docker-for-mac/install/](https://docs.docker.com/docker-for-mac/install/) docker website [https://docs.docker.com/docker-for-mac/install/](https://docs.docker.com/docker-for-mac/install/)
::: :::
#### Trying out GraphQL #### Trying out GraphQL
We currently fully support queries and mutations. Support for `subscriptions` is work in progress. For example the below GraphQL query would fetch two products that belong to the current user where the price is greater than 10. We fully support queries and mutations. For example the below GraphQL query would fetch two products that belong to the current user where the price is greater than 10.
#### GQL Query #### GQL Query
@ -76,32 +76,6 @@ query {
} }
``` ```
In another example the below GraphQL mutation would insert a product into the database. The first part of the below example is the variable data and the second half is the GraphQL mutation. For mutations data has to always ben passed as a variable.
```json
{
"data": {
"name": "Art of Computer Programming",
"description": "The Art of Computer Programming (TAOCP) is a comprehensive monograph written by computer scientist Donald Knuth",
"price": 30.5
}
}
```
```graphql
mutation {
product(insert: $data) {
id
name
}
}
```
The above GraphQL query returns the JSON result below. It handles all
kinds of complexity without you having to writing a line of code.
For example there is a while greater than `gt` and a limit clause on a child field. And the `avatar` field is renamed to `picture`. The `password` field is blocked and not returned. Finally the relationship between the `users` table and the `products` table is auto discovered and used.
#### JSON Result #### JSON Result
```json ```json
@ -128,19 +102,107 @@ For example there is a while greater than `gt` and a limit clause on a child fie
} }
``` ```
#### Try with an authenticated user ::: tip Testing with a user
In development mode you can use the `X-User-ID: 4` header to set a user id so you don't have to worries about cookies etc. This can be set using the *HTTP Headers* tab at the bottom of the web UI.
:::
In development mode you can use the `X-User-ID: 4` header to set a user id so you don't have to worries about cookies etc. This can be set using the *HTTP Headers* tab at the bottom of the web UI you'll see when you visit the above link. You can also directly run queries from the commandline like below. In another example the below GraphQL mutation would insert a product into the database. The first part of the below example is the variable data and the second half is the GraphQL mutation. For mutations data has to always ben passed as a variable.
#### Querying the GQL endpoint ```json
{
"data": {
"name": "Art of Computer Programming",
"description": "The Art of Computer Programming (TAOCP) is a comprehensive monograph written by computer scientist Donald Knuth",
"price": 30.5
}
}
```
```bash ```graphql
mutation {
product(insert: $data) {
id
name
}
}
```
# fetch the response json directly from the endpoint using user id 5 ## Why Super Graph
curl 'http://localhost:8080/api/v1/graphql' \
-H 'content-type: application/json' \ Let's take a simple example say you want to fetch 5 products priced over 12 dollars along with the photos of the products and the users that owns them. Additionally also fetch the last 10 of your own purchases along with the name and ID of the product you purchased. This is a common type of query to render a view in say an ecommerce app. Lets be honest it's not very exciting write and maintain. Keep in mind the data needed will only continue to grow and change as your app evolves. Developers might find that most ORMs will not be able to do all of this in a single SQL query and will require n+1 queries to fetch all the data and assembly it into the right JSON response.
-H 'X-User-ID: 5' \
--data-binary '{"query":"{ products { name price users { email }}}"}' What if I told you Super Graph will fetch all this data with a single SQL query and without you having to write a single line of code. Also as your app evolves feel free to evolve the query as you like. In our experience Super Graph saves us hundreds or thousands of man hours that we can put towards the more exciting parts of our app.
#### GraphQL Query
```graphql
query {
products(limit 5, where: { price: { gt: 12 } }) {
id
name
description
price
photos {
url
}
user {
id
email
picture : avatar
full_name
}
}
purchases(
limit 10,
order_by: { created_at: desc } ,
where: { user_id: { eq: $user_id } }
) {
id
created_at
product {
id
name
}
}
}
```
#### JSON Result
```json
"data": {
"products": [
{
"id": 1,
"name": "Oaked Arrogant Bastard Ale",
"description": "Coors lite, European Amber Lager, Perle, 1272 - American Ale II, 38 IBU, 6.4%, 9.7°Blg",
"price": 20,
"photos: [{
"url": "https://www.scienceworld.ca/wp-content/uploads/science-world-beer-flavours.jpg"
}],
"user": {
"id": 1,
"email": "user0@demo.com",
"picture": "https://robohash.org/sitaliquamquaerat.png?size=300x300&set=set1",
"full_name": "Mrs. Wilhemina Hilpert"
}
},
...
]
},
"purchases": [
{
"id": 5,
"created_at": "2020-01-24T05:34:39.880599",
"product": {
"id": 45,
"name": "Brooklyn Black",
}
},
...
]
}
``` ```
## Get Started ## Get Started
@ -651,8 +713,6 @@ query {
} }
``` ```
## Mutations
In GraphQL mutations is the operation type for when you need to modify data. Super Graph supports the `insert`, `update`, `upsert` and `delete`. You can also do complex nested inserts and updates. In GraphQL mutations is the operation type for when you need to modify data. Super Graph supports the `insert`, `update`, `upsert` and `delete`. You can also do complex nested inserts and updates.
When using mutations the data must be passed as variables since Super Graphs compiles the query into an prepared statement in the database for maximum speed. Prepared statements are are functions in your code when called they accept arguments and your variables are passed in as those arguments. When using mutations the data must be passed as variables since Super Graphs compiles the query into an prepared statement in the database for maximum speed. Prepared statements are are functions in your code when called they accept arguments and your variables are passed in as those arguments.
@ -836,8 +896,6 @@ mutation {
} }
``` ```
## Nested Mutations
Often you will need to create or update multiple related items at the same time. This can be done using nested mutations. For example you might need to create a product and assign it to a user, or create a user and his products at the same time. You just have to use simple json to define you mutation and Super Graph takes care of the rest. Often you will need to create or update multiple related items at the same time. This can be done using nested mutations. For example you might need to create a product and assign it to a user, or create a user and his products at the same time. You just have to use simple json to define you mutation and Super Graph takes care of the rest.
### Nested Insert ### Nested Insert
@ -988,6 +1046,40 @@ fetch('http://localhost:8080/api/v1/graphql', {
.then(res => console.log(res.data)); .then(res => console.log(res.data));
``` ```
## GraphQL with React
This is a quick simple example using `graphql.js` [https://github.com/f/graphql.js/](https://github.com/f/graphql.js/)
```js
import React, { useState, useEffect } from 'react'
import graphql from 'graphql.js'
// Create a GraphQL client pointing to Super Graph
var graph = graphql("http://localhost:3000/api/v1/graphql", { asJSON: true })
const App = () => {
const [user, setUser] = useState(null)
useEffect(() => {
async function action() {
// Use the GraphQL client to execute a graphQL query
// The second argument to the client are the variables you need to pass
const result = await graph(`{ user { id first_name last_name picture_url } }`)()
setUser(result)
}
action()
}, []);
return (
<div className="App">
<h1>{ JSON.stringify(user) }</h1>
</div>
);
}
```
export default App;
## Advanced Columns ## Advanced Columns
The ablity to have `JSON/JSONB` and `Array` columns is often considered in the top most useful features of Postgres. There are many cases where using an array or a json column saves space and reduces complexity in your app. The only issue with these columns is the really that your SQL queries can get harder to write and maintain. The ablity to have `JSON/JSONB` and `Array` columns is often considered in the top most useful features of Postgres. There are many cases where using an array or a json column saves space and reduces complexity in your app. The only issue with these columns is the really that your SQL queries can get harder to write and maintain.
@ -1137,45 +1229,45 @@ class AddSearchColumn < ActiveRecord::Migration[5.1]
end end
``` ```
## GraphQL with React ## API Security
This is a quick simple example using `graphql.js` [https://github.com/f/graphql.js/](https://github.com/f/graphql.js/) One of the the most common questions I get asked is what happens if a user out on the internet sends queries
that we don't want run. For example how do we stop him from fetching all users or the emails of users. Our answer to this is that it is not an issue as this cannot happen, let me explain.
```js Super Graph runs in one of two modes `development` or `production`, this is controlled via the config value `production: false` when it's false it's running in development mode and when true, production. In development mode all the **named** queries (including mutations) are saved to the allow list `./config/allow.list`. While in production mode when Super Graph starts only the queries from this allow list file are registered with the database as [prepared statements](https://stackoverflow.com/questions/8263371/how-can-prepared-statements-protect-from-sql-injection-attacks).
import React, { useState, useEffect } from 'react'
import graphql from 'graphql.js'
// Create a GraphQL client pointing to Super Graph Prepared statements are designed by databases to be fast and secure. They protect against all kinds of sql injection attacks and since they are pre-processed and pre-planned they are much faster to run then raw sql queries. Also there's no GraphQL to SQL compiling happening in production mode which makes your queries lighting fast as they are directly sent to the database with almost no overhead.
var graph = graphql("http://localhost:3000/api/v1/graphql", { asJSON: true })
const App = () => { In short in production only queries listed in the allow list file `./config/allow.list` can be used, all other queries will be blocked.
const [user, setUser] = useState(null)
::: tip How to think about the allow list?
useEffect(() => { The allow list file is essentially a list of all your exposed API calls and the data that passes within them. It's very easy to build tooling to do things like parsing this file within your tests to ensure fields like `credit_card_no` are not accidently leaked. It's a great way to build compliance tooling and ensure your user data is always safe.
async function action() { :::
// Use the GraphQL client to execute a graphQL query
// The second argument to the client are the variables you need to pass This is an example of a named query, `getUserWithProducts` is the name you've given to this query it can be anything you like but should be unique across all you're queries. Only named queries are saved in the allow list in development mode.
const result = await graph(`{ user { id first_name last_name picture_url } }`)()
setUser(result)
```graphql
query getUserWithProducts {
users {
id
name
products {
id
name
price
} }
action() }
}, []);
return (
<div className="App">
<h1>{ JSON.stringify(user) }</h1>
</div>
);
} }
``` ```
export default App;
## Authentication ## Authentication
You can only have one type of auth enabled. You can either pick Rails or JWT. You can only have one type of auth enabled either Rails or JWT.
### Rails Auth (Devise / Warden) ### Ruby on Rails
Almost all Rails apps use Devise or Warden for authentication. Once the user is Almost all Rails apps use Devise or Warden for authentication. Once the user is
authenticated a session is created with the users ID. The session can either be authenticated a session is created with the users ID. The session can either be
@ -1227,7 +1319,7 @@ auth:
max_active: 12000 max_active: 12000
``` ```
### JWT Token Auth ### JWT Tokens
```yaml ```yaml
auth: auth:
@ -1241,14 +1333,67 @@ auth:
public_key_type: ecdsa #rsa public_key_type: ecdsa #rsa
``` ```
For JWT tokens we currently support tokens from a provider like Auth0 For JWT tokens we currently support tokens from a provider like Auth0 or if you have a custom solution then we look for the `user_id` in the `subject` claim of of the `id token`. If you pick Auth0 then we derive two variables from the token `user_id` and `user_id_provider` for to use in your filters.
or if you have a custom solution then we look for the `user_id` in the
`subject` claim of of the `id token`. If you pick Auth0 then we derive two variables from the token `user_id` and `user_id_provider` for to use in your filters.
We can get the JWT token either from the `authorization` header where we expect it to be a `bearer` token or if `cookie` is specified then we look there. We can get the JWT token either from the `authorization` header where we expect it to be a `bearer` token or if `cookie` is specified then we look there.
For validation a `secret` or a public key (ecdsa or rsa) is required. When using public keys they have to be in a PEM format file. For validation a `secret` or a public key (ecdsa or rsa) is required. When using public keys they have to be in a PEM format file.
### HTTP Headers
```yaml
header:
name: X-AppEngine-QueueName
exists: true
#value: default
```
Header auth is usually the best option to authenticate requests to the action endpoints. For example you
might want to use an action to refresh a materalized view every hour and only want a cron service like the Google AppEngine Cron service to make that request in this case a config similar to the one above will do.
The `exists: true` parameter ensures that only the existance of the header is checked not its value. The `value` parameter lets you confirm that the value matches the one assgined to the parameter. This helps in the case you are using a shared secret to protect the endpoint.
### Named Auth
```yaml
# You can add additional named auths to use with actions
# In this example actions using this auth can only be
# called from the Google Appengine Cron service that
# sets a special header to all it's requests
auths:
- name: from_taskqueue
type: header
header:
name: X-Appengine-Cron
exists: true
```
In addition to the default auth configuration you can create additional named auth configurations to be used
with features like `actions`. For example while your main GraphQL endpoint uses JWT for authentication you may want to use a header value to ensure your actions can only be called by clients having access to a shared secret
or security header.
## Actions
Actions is a very useful feature that is currently work in progress. For now the best use case for actions is to
refresh database tables like materialized views or call a database procedure to refresh a cache table, etc. An action creates an http endpoint that anyone can call to have the SQL query executed. The below example will create an endpoint `/api/v1/actions/refresh_leaderboard_users` any request send to that endpoint will cause the sql query to be executed. the `auth_name` points to a named auth that should be used to secure this endpoint. In future we have big plans to allow your own custom code to run using actions.
```yaml
actions:
- name: refresh_leaderboard_users
sql: REFRESH MATERIALIZED VIEW CONCURRENTLY "leaderboard_users"
auth_name: from_taskqueue
```
#### Using CURL to test a query
```bash
# fetch the response json directly from the endpoint using user id 5
curl 'http://localhost:8080/api/v1/graphql' \
-H 'content-type: application/json' \
-H 'X-User-ID: 5' \
--data-binary '{"query":"{ products { name price users { email }}}"}'
```
## Access Control ## Access Control
It's common for APIs to control what information they return or insert based on the role of the user. In Super Graph we have two primary roles `user` and `anon` the first for users where a `user_id` is available the latter for users where it's not. It's common for APIs to control what information they return or insert based on the role of the user. In Super Graph we have two primary roles `user` and `anon` the first for users where a `user_id` is available the latter for users where it's not.
@ -1261,7 +1406,6 @@ The `user` role can be divided up into further roles based on attributes in the
Super Graph allows you to create roles dynamically using a `roles_query` and ` match` config values. Super Graph allows you to create roles dynamically using a `roles_query` and ` match` config values.
### Configure RBAC ### Configure RBAC
```yaml ```yaml
@ -1494,6 +1638,22 @@ auth:
# public_key_file: /secrets/public_key.pem # public_key_file: /secrets/public_key.pem
# public_key_type: ecdsa #rsa # public_key_type: ecdsa #rsa
# header:
# name: dnt
# exists: true
# value: localhost:8080
# You can add additional named auths to use with actions
# In this example actions using this auth can only be
# called from the Google Appengine Cron service that
# sets a special header to all it's requests
auths:
- name: from_taskqueue
type: header
header:
name: X-Appengine-Cron
exists: true
database: database:
type: postgres type: postgres
host: db host: db
@ -1524,6 +1684,17 @@ database:
- encrypted - encrypted
- token - token
# Create custom actions with their own api endpoints
# For example the below action will be available at /api/v1/actions/refresh_leaderboard_users
# A request to this url will execute the configured SQL query
# which in this case refreshes a materialized view in the database.
# The auth_name is from one of the configured auths
actions:
- name: refresh_leaderboard_users
sql: REFRESH MATERIALIZED VIEW CONCURRENTLY "leaderboard_users"
auth_name: from_taskqueue
tables: tables:
- name: customers - name: customers
remotes: remotes:

1
go.mod
View File

@ -12,6 +12,7 @@ require (
github.com/dgrijalva/jwt-go v3.2.0+incompatible github.com/dgrijalva/jwt-go v3.2.0+incompatible
github.com/dlclark/regexp2 v1.2.0 // indirect github.com/dlclark/regexp2 v1.2.0 // indirect
github.com/dop251/goja v0.0.0-20190912223329-aa89e6a4c733 github.com/dop251/goja v0.0.0-20190912223329-aa89e6a4c733
github.com/dvyukov/go-fuzz v0.0.0-20191206100749-a378175e205c // indirect
github.com/fsnotify/fsnotify v1.4.7 github.com/fsnotify/fsnotify v1.4.7
github.com/garyburd/redigo v1.6.0 github.com/garyburd/redigo v1.6.0
github.com/go-sourcemap/sourcemap v2.1.2+incompatible // indirect github.com/go-sourcemap/sourcemap v2.1.2+incompatible // indirect

2
go.sum
View File

@ -54,6 +54,8 @@ github.com/dlclark/regexp2 v1.2.0 h1:8sAhBGEM0dRWogWqWyQeIJnxjWO6oIjl8FKqREDsGfk
github.com/dlclark/regexp2 v1.2.0/go.mod h1:2pZnwuY/m+8K6iRw6wQdMtk+rH5tNGR1i55kozfMjCc= github.com/dlclark/regexp2 v1.2.0/go.mod h1:2pZnwuY/m+8K6iRw6wQdMtk+rH5tNGR1i55kozfMjCc=
github.com/dop251/goja v0.0.0-20190912223329-aa89e6a4c733 h1:cyNc40Dx5YNEO94idePU8rhVd3dn+sd04Arh0kDBAaw= github.com/dop251/goja v0.0.0-20190912223329-aa89e6a4c733 h1:cyNc40Dx5YNEO94idePU8rhVd3dn+sd04Arh0kDBAaw=
github.com/dop251/goja v0.0.0-20190912223329-aa89e6a4c733/go.mod h1:Mw6PkjjMXWbTj+nnj4s3QPXq1jaT0s5pC0iFD4+BOAA= github.com/dop251/goja v0.0.0-20190912223329-aa89e6a4c733/go.mod h1:Mw6PkjjMXWbTj+nnj4s3QPXq1jaT0s5pC0iFD4+BOAA=
github.com/dvyukov/go-fuzz v0.0.0-20191206100749-a378175e205c h1:/bXaeEuNG6V0HeyEGw11DYLW5BGsOPlcVRIXbHNUWSo=
github.com/dvyukov/go-fuzz v0.0.0-20191206100749-a378175e205c/go.mod h1:11Gm+ccJnvAhCNLlf5+cS9KjtbaD5I5zaZpFMsTHWTw=
github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
github.com/garyburd/redigo v1.6.0 h1:0VruCpn7yAIIu7pWVClQC8wxCJEcG3nyzpMSHKi1PQc= github.com/garyburd/redigo v1.6.0 h1:0VruCpn7yAIIu7pWVClQC8wxCJEcG3nyzpMSHKi1PQc=

View File

@ -5,5 +5,5 @@ import (
) )
func main() { func main() {
serv.Init() serv.Cmd()
} }

54
psql/fuzz.go Normal file
View File

@ -0,0 +1,54 @@
// +build gofuzz
package psql
import (
"encoding/json"
"github.com/dosco/super-graph/qcode"
)
var (
qcompileTest, _ = qcode.NewCompiler(qcode.Config{})
schema = getTestSchema()
vars = NewVariables(map[string]string{
"admin_account_id": "5",
})
pcompileTest = NewCompiler(Config{
Schema: schema,
Vars: vars,
})
)
// FuzzerEntrypoint for Fuzzbuzz
func Fuzz(data []byte) int {
gql := `mutation {
product(insert: $data) {
id
name
user {
id
full_name
email
}
}
}`
qc, err := qcompileTest.Compile([]byte(gql), "user")
if err != nil {
panic("qcompile can't fail")
}
vars := map[string]json.RawMessage{
"data": json.RawMessage(data),
}
_, _, err = pcompileTest.CompileEx(qc, vars)
if err != nil {
return 0
}
return 1
}

View File

@ -15,7 +15,10 @@ func (c *compilerContext) renderInsert(qc *qcode.QCode, w io.Writer,
insert, ok := vars[qc.ActionVar] insert, ok := vars[qc.ActionVar]
if !ok { if !ok {
return 0, fmt.Errorf("Variable '%s' not !defined", qc.ActionVar) return 0, fmt.Errorf("variable '%s' not defined", qc.ActionVar)
}
if len(insert) == 0 {
return 0, fmt.Errorf("variable '%s' is empty", qc.ActionVar)
} }
io.WriteString(c.w, `WITH "_sg_input" AS (SELECT '{{`) io.WriteString(c.w, `WITH "_sg_input" AS (SELECT '{{`)
@ -147,7 +150,14 @@ func renderNestedInsertRelColumns(w io.Writer, item kvitem, values bool) error {
io.WriteString(w, `, `) io.WriteString(w, `, `)
} }
if values { if values {
colWithTable(w, v.relCP.Left.Table, v.relCP.Left.Col) if v._ctype > 0 {
io.WriteString(w, `"_x_`)
io.WriteString(w, v.relCP.Left.Table)
io.WriteString(w, `".`)
quoted(w, v.relCP.Left.Col)
} else {
colWithTable(w, v.relCP.Left.Table, v.relCP.Left.Col)
}
} else { } else {
quoted(w, v.relCP.Right.Col) quoted(w, v.relCP.Right.Col)
} }
@ -166,12 +176,18 @@ func renderNestedInsertRelTables(w io.Writer, item kvitem) error {
io.WriteString(w, `, `) io.WriteString(w, `, `)
} }
} else { } else {
// Render child foreign key columns if child-to-parent // Render tables needed to set values if child-to-parent
// relationship is one-to-many // relationship is one-to-many
for _, v := range item.items { for _, v := range item.items {
if v.relCP.Type == RelOneToMany { if v.relCP.Type == RelOneToMany {
quoted(w, v.relCP.Left.Table) if v._ctype > 0 {
io.WriteString(w, `, `) io.WriteString(w, `"_x_`)
io.WriteString(w, v.relCP.Left.Table)
io.WriteString(w, `", `)
} else {
quoted(w, v.relCP.Left.Table)
io.WriteString(w, `, `)
}
} }
} }
} }

View File

@ -290,7 +290,7 @@ func nestedInsertOneToOneWithConnect(t *testing.T) {
} }
}` }`
sql := `WITH "_sg_input" AS (SELECT '{{data}}' :: json AS j), "users" AS (SELECT "id" FROM "_sg_input" i,"users" WHERE "users"."id"= ((i.j->'user'->'connect'->>'id'))::bigint LIMIT 1), "products" AS (INSERT INTO "products" ("name", "price", "created_at", "updated_at", "user_id") SELECT "t"."name", "t"."price", "t"."created_at", "t"."updated_at", "users"."id" FROM "_sg_input" i, "users", json_populate_record(NULL::products, i.j) t RETURNING *) SELECT json_object_agg('product', json_0) FROM (SELECT row_to_json((SELECT "json_row_0" FROM (SELECT "products_0"."id" AS "id", "products_0"."name" AS "name", "user_1_join"."json_1" AS "user", "tags_2_join"."json_2" AS "tags") AS "json_row_0")) AS "json_0" FROM (SELECT "products"."id", "products"."name", "products"."user_id", "products"."tags" FROM "products" LIMIT ('1') :: integer) AS "products_0" LEFT OUTER JOIN LATERAL (SELECT coalesce(json_agg("json_2"), '[]') AS "json_2" FROM (SELECT row_to_json((SELECT "json_row_2" FROM (SELECT "tags_2"."id" AS "id", "tags_2"."name" AS "name") AS "json_row_2")) AS "json_2" FROM (SELECT "tags"."id", "tags"."name" FROM "tags" WHERE ((("tags"."slug") = any ("products_0"."tags"))) LIMIT ('20') :: integer) AS "tags_2" LIMIT ('20') :: integer) AS "json_agg_2") AS "tags_2_join" ON ('true') LEFT OUTER JOIN LATERAL (SELECT row_to_json((SELECT "json_row_1" FROM (SELECT "users_1"."id" AS "id", "users_1"."full_name" AS "full_name", "users_1"."email" AS "email") AS "json_row_1")) AS "json_1" FROM (SELECT "users"."id", "users"."full_name", "users"."email" FROM "users" WHERE ((("users"."id") = ("products_0"."user_id"))) LIMIT ('1') :: integer) AS "users_1" LIMIT ('1') :: integer) AS "user_1_join" ON ('true') LIMIT ('1') :: integer) AS "sel_0"` sql := `WITH "_sg_input" AS (SELECT '{{data}}' :: json AS j), "_x_users" AS (SELECT "id" FROM "_sg_input" i,"users" WHERE "users"."id"= ((i.j->'user'->'connect'->>'id'))::bigint LIMIT 1), "products" AS (INSERT INTO "products" ("name", "price", "created_at", "updated_at", "user_id") SELECT "t"."name", "t"."price", "t"."created_at", "t"."updated_at", "_x_users"."id" FROM "_sg_input" i, "_x_users", json_populate_record(NULL::products, i.j) t RETURNING *) SELECT json_object_agg('product', json_0) FROM (SELECT row_to_json((SELECT "json_row_0" FROM (SELECT "products_0"."id" AS "id", "products_0"."name" AS "name", "user_1_join"."json_1" AS "user", "tags_2_join"."json_2" AS "tags") AS "json_row_0")) AS "json_0" FROM (SELECT "products"."id", "products"."name", "products"."user_id", "products"."tags" FROM "products" LIMIT ('1') :: integer) AS "products_0" LEFT OUTER JOIN LATERAL (SELECT coalesce(json_agg("json_2"), '[]') AS "json_2" FROM (SELECT row_to_json((SELECT "json_row_2" FROM (SELECT "tags_2"."id" AS "id", "tags_2"."name" AS "name") AS "json_row_2")) AS "json_2" FROM (SELECT "tags"."id", "tags"."name" FROM "tags" WHERE ((("tags"."slug") = any ("products_0"."tags"))) LIMIT ('20') :: integer) AS "tags_2" LIMIT ('20') :: integer) AS "json_agg_2") AS "tags_2_join" ON ('true') LEFT OUTER JOIN LATERAL (SELECT row_to_json((SELECT "json_row_1" FROM (SELECT "users_1"."id" AS "id", "users_1"."full_name" AS "full_name", "users_1"."email" AS "email") AS "json_row_1")) AS "json_1" FROM (SELECT "users"."id", "users"."full_name", "users"."email" FROM "users" WHERE ((("users"."id") = ("products_0"."user_id"))) LIMIT ('1') :: integer) AS "users_1" LIMIT ('1') :: integer) AS "user_1_join" ON ('true') LIMIT ('1') :: integer) AS "sel_0"`
vars := map[string]json.RawMessage{ vars := map[string]json.RawMessage{
"data": json.RawMessage(`{ "data": json.RawMessage(`{
@ -327,7 +327,7 @@ func nestedInsertOneToOneWithConnectArray(t *testing.T) {
} }
}` }`
sql := `WITH "_sg_input" AS (SELECT '{{data}}' :: json AS j), "users" AS (SELECT "id" FROM "_sg_input" i,"users" WHERE "users"."id" = ANY((select a::bigint AS list from json_array_elements_text((i.j->'user'->'connect'->>'id')::json) AS a)) LIMIT 1), "products" AS (INSERT INTO "products" ("name", "price", "created_at", "updated_at", "user_id") SELECT "t"."name", "t"."price", "t"."created_at", "t"."updated_at", "users"."id" FROM "_sg_input" i, "users", json_populate_record(NULL::products, i.j) t RETURNING *) SELECT json_object_agg('product', json_0) FROM (SELECT row_to_json((SELECT "json_row_0" FROM (SELECT "products_0"."id" AS "id", "products_0"."name" AS "name", "user_1_join"."json_1" AS "user") AS "json_row_0")) AS "json_0" FROM (SELECT "products"."id", "products"."name", "products"."user_id" FROM "products" LIMIT ('1') :: integer) AS "products_0" LEFT OUTER JOIN LATERAL (SELECT row_to_json((SELECT "json_row_1" FROM (SELECT "users_1"."id" AS "id", "users_1"."full_name" AS "full_name", "users_1"."email" AS "email") AS "json_row_1")) AS "json_1" FROM (SELECT "users"."id", "users"."full_name", "users"."email" FROM "users" WHERE ((("users"."id") = ("products_0"."user_id"))) LIMIT ('1') :: integer) AS "users_1" LIMIT ('1') :: integer) AS "user_1_join" ON ('true') LIMIT ('1') :: integer) AS "sel_0"` sql := `WITH "_sg_input" AS (SELECT '{{data}}' :: json AS j), "_x_users" AS (SELECT "id" FROM "_sg_input" i,"users" WHERE "users"."id" = ANY((select a::bigint AS list from json_array_elements_text((i.j->'user'->'connect'->>'id')::json) AS a)) LIMIT 1), "products" AS (INSERT INTO "products" ("name", "price", "created_at", "updated_at", "user_id") SELECT "t"."name", "t"."price", "t"."created_at", "t"."updated_at", "_x_users"."id" FROM "_sg_input" i, "_x_users", json_populate_record(NULL::products, i.j) t RETURNING *) SELECT json_object_agg('product', json_0) FROM (SELECT row_to_json((SELECT "json_row_0" FROM (SELECT "products_0"."id" AS "id", "products_0"."name" AS "name", "user_1_join"."json_1" AS "user") AS "json_row_0")) AS "json_0" FROM (SELECT "products"."id", "products"."name", "products"."user_id" FROM "products" LIMIT ('1') :: integer) AS "products_0" LEFT OUTER JOIN LATERAL (SELECT row_to_json((SELECT "json_row_1" FROM (SELECT "users_1"."id" AS "id", "users_1"."full_name" AS "full_name", "users_1"."email" AS "email") AS "json_row_1")) AS "json_1" FROM (SELECT "users"."id", "users"."full_name", "users"."email" FROM "users" WHERE ((("users"."id") = ("products_0"."user_id"))) LIMIT ('1') :: integer) AS "users_1" LIMIT ('1') :: integer) AS "user_1_join" ON ('true') LIMIT ('1') :: integer) AS "sel_0"`
vars := map[string]json.RawMessage{ vars := map[string]json.RawMessage{
"data": json.RawMessage(`{ "data": json.RawMessage(`{

View File

@ -446,7 +446,10 @@ func (c *compilerContext) renderUpsert(qc *qcode.QCode, w io.Writer,
upsert, ok := vars[qc.ActionVar] upsert, ok := vars[qc.ActionVar]
if !ok { if !ok {
return 0, fmt.Errorf("Variable '%s' not defined", qc.ActionVar) return 0, fmt.Errorf("variable '%s' not defined", qc.ActionVar)
}
if len(upsert) == 0 {
return 0, fmt.Errorf("variable '%s' is empty", qc.ActionVar)
} }
if ti.PrimaryCol == nil { if ti.PrimaryCol == nil {
@ -520,13 +523,16 @@ func (c *compilerContext) renderConnectStmt(qc *qcode.QCode, w io.Writer,
rel := item.relPC rel := item.relPC
// Render only for parent-to-child relationship of one-to-one // Render only for parent-to-child relationship of one-to-one
// For this to work the child needs to found first so it's primary key
// can be set in the related column on the parent object.
// Eg. Create product and connect a user to it.
if rel.Type != RelOneToOne { if rel.Type != RelOneToOne {
return nil return nil
} }
io.WriteString(w, `, `) io.WriteString(w, `, "_x_`)
quoted(w, item.ti.Name) io.WriteString(c.w, item.ti.Name)
io.WriteString(c.w, ` AS (SELECT `) io.WriteString(c.w, `" AS (SELECT `)
if rel.Left.Array { if rel.Left.Array {
io.WriteString(w, `array_agg(DISTINCT `) io.WriteString(w, `array_agg(DISTINCT `)
@ -557,12 +563,15 @@ func (c *compilerContext) renderDisconnectStmt(qc *qcode.QCode, w io.Writer,
rel := item.relPC rel := item.relPC
// Render only for parent-to-child relationship of one-to-one // Render only for parent-to-child relationship of one-to-one
// For this to work the child needs to found first so it's
// null value can beset in the related column on the parent object.
// Eg. Update product and diconnect the user from it.
if rel.Type != RelOneToOne { if rel.Type != RelOneToOne {
return nil return nil
} }
io.WriteString(w, `, `) io.WriteString(w, `, "_x_`)
quoted(w, item.ti.Name) io.WriteString(c.w, item.ti.Name)
io.WriteString(c.w, ` AS (`) io.WriteString(c.w, `" AS (`)
if rel.Right.Array { if rel.Right.Array {
io.WriteString(c.w, `SELECT `) io.WriteString(c.w, `SELECT `)

View File

@ -3,7 +3,6 @@ package psql
import ( import (
"log" "log"
"os" "os"
"strings"
"testing" "testing"
"github.com/dosco/super-graph/qcode" "github.com/dosco/super-graph/qcode"
@ -128,97 +127,7 @@ func TestMain(m *testing.M) {
log.Fatal(err) log.Fatal(err)
} }
tables := []DBTable{ schema := getTestSchema()
DBTable{Name: "customers", Type: "table"},
DBTable{Name: "users", Type: "table"},
DBTable{Name: "products", Type: "table"},
DBTable{Name: "purchases", Type: "table"},
DBTable{Name: "tags", Type: "table"},
DBTable{Name: "tag_count", Type: "json"},
}
columns := [][]DBColumn{
[]DBColumn{
DBColumn{ID: 1, Name: "id", Type: "bigint", NotNull: true, PrimaryKey: true, UniqueKey: true},
DBColumn{ID: 2, Name: "full_name", Type: "character varying", NotNull: true, PrimaryKey: false, UniqueKey: false},
DBColumn{ID: 3, Name: "phone", Type: "character varying", NotNull: false, PrimaryKey: false, UniqueKey: false},
DBColumn{ID: 4, Name: "email", Type: "character varying", NotNull: true, PrimaryKey: false, UniqueKey: false},
DBColumn{ID: 5, Name: "encrypted_password", Type: "character varying", NotNull: true, PrimaryKey: false, UniqueKey: false},
DBColumn{ID: 6, Name: "reset_password_token", Type: "character varying", NotNull: false, PrimaryKey: false, UniqueKey: false},
DBColumn{ID: 7, Name: "reset_password_sent_at", Type: "timestamp without time zone", NotNull: false, PrimaryKey: false, UniqueKey: false},
DBColumn{ID: 8, Name: "remember_created_at", Type: "timestamp without time zone", NotNull: false, PrimaryKey: false, UniqueKey: false},
DBColumn{ID: 9, Name: "created_at", Type: "timestamp without time zone", NotNull: true, PrimaryKey: false, UniqueKey: false},
DBColumn{ID: 10, Name: "updated_at", Type: "timestamp without time zone", NotNull: true, PrimaryKey: false, UniqueKey: false}},
[]DBColumn{
DBColumn{ID: 1, Name: "id", Type: "bigint", NotNull: true, PrimaryKey: true, UniqueKey: true},
DBColumn{ID: 2, Name: "full_name", Type: "character varying", NotNull: true, PrimaryKey: false, UniqueKey: false},
DBColumn{ID: 3, Name: "phone", Type: "character varying", NotNull: false, PrimaryKey: false, UniqueKey: false},
DBColumn{ID: 4, Name: "avatar", Type: "character varying", NotNull: false, PrimaryKey: false, UniqueKey: false},
DBColumn{ID: 5, Name: "email", Type: "character varying", NotNull: true, PrimaryKey: false, UniqueKey: false},
DBColumn{ID: 6, Name: "encrypted_password", Type: "character varying", NotNull: true, PrimaryKey: false, UniqueKey: false},
DBColumn{ID: 7, Name: "reset_password_token", Type: "character varying", NotNull: false, PrimaryKey: false, UniqueKey: false},
DBColumn{ID: 8, Name: "reset_password_sent_at", Type: "timestamp without time zone", NotNull: false, PrimaryKey: false, UniqueKey: false},
DBColumn{ID: 9, Name: "remember_created_at", Type: "timestamp without time zone", NotNull: false, PrimaryKey: false, UniqueKey: false},
DBColumn{ID: 10, Name: "created_at", Type: "timestamp without time zone", NotNull: true, PrimaryKey: false, UniqueKey: false},
DBColumn{ID: 11, Name: "updated_at", Type: "timestamp without time zone", NotNull: true, PrimaryKey: false, UniqueKey: false}},
[]DBColumn{
DBColumn{ID: 1, Name: "id", Type: "bigint", NotNull: true, PrimaryKey: true, UniqueKey: true},
DBColumn{ID: 2, Name: "name", Type: "character varying", NotNull: false, PrimaryKey: false, UniqueKey: false},
DBColumn{ID: 3, Name: "description", Type: "text", NotNull: false, PrimaryKey: false, UniqueKey: false},
DBColumn{ID: 4, Name: "price", Type: "numeric(7,2)", NotNull: false, PrimaryKey: false, UniqueKey: false},
DBColumn{ID: 5, Name: "user_id", Type: "bigint", NotNull: false, PrimaryKey: false, UniqueKey: false, FKeyTable: "users", FKeyColID: []int16{1}},
DBColumn{ID: 6, Name: "created_at", Type: "timestamp without time zone", NotNull: true, PrimaryKey: false, UniqueKey: false},
DBColumn{ID: 7, Name: "updated_at", Type: "timestamp without time zone", NotNull: true, PrimaryKey: false, UniqueKey: false},
DBColumn{ID: 8, Name: "tsv", Type: "tsvector", NotNull: false, PrimaryKey: false, UniqueKey: false},
DBColumn{ID: 9, Name: "tags", Type: "text[]", NotNull: false, PrimaryKey: false, UniqueKey: false, FKeyTable: "tags", FKeyColID: []int16{3}, Array: true},
DBColumn{ID: 9, Name: "tag_count", Type: "json", NotNull: false, PrimaryKey: false, UniqueKey: false, FKeyTable: "tag_count", FKeyColID: []int16{}}},
[]DBColumn{
DBColumn{ID: 1, Name: "id", Type: "bigint", NotNull: true, PrimaryKey: true, UniqueKey: true},
DBColumn{ID: 2, Name: "customer_id", Type: "bigint", NotNull: false, PrimaryKey: false, UniqueKey: false, FKeyTable: "customers", FKeyColID: []int16{1}},
DBColumn{ID: 3, Name: "product_id", Type: "bigint", NotNull: false, PrimaryKey: false, UniqueKey: false, FKeyTable: "products", FKeyColID: []int16{1}},
DBColumn{ID: 4, Name: "sale_type", Type: "character varying", NotNull: false, PrimaryKey: false, UniqueKey: false},
DBColumn{ID: 5, Name: "quantity", Type: "integer", NotNull: false, PrimaryKey: false, UniqueKey: false},
DBColumn{ID: 6, Name: "due_date", Type: "timestamp without time zone", NotNull: false, PrimaryKey: false, UniqueKey: false},
DBColumn{ID: 7, Name: "returned", Type: "timestamp without time zone", NotNull: false, PrimaryKey: false, UniqueKey: false}},
[]DBColumn{
DBColumn{ID: 1, Name: "id", Type: "bigint", NotNull: true, PrimaryKey: true, UniqueKey: true},
DBColumn{ID: 2, Name: "name", Type: "text", NotNull: false, PrimaryKey: false, UniqueKey: false},
DBColumn{ID: 3, Name: "slug", Type: "text", NotNull: false, PrimaryKey: false, UniqueKey: false}},
[]DBColumn{
DBColumn{ID: 1, Name: "tag_id", Type: "bigint", NotNull: false, PrimaryKey: false, UniqueKey: false, FKeyTable: "tags", FKeyColID: []int16{1}},
DBColumn{ID: 2, Name: "count", Type: "int", NotNull: false, PrimaryKey: false, UniqueKey: false}},
}
for i := range tables {
tables[i].Key = strings.ToLower(tables[i].Name)
for n := range columns[i] {
columns[i][n].Key = strings.ToLower(columns[i][n].Name)
}
}
schema := &DBSchema{
ver: 110000,
t: make(map[string]*DBTableInfo),
rm: make(map[string]map[string]*DBRel),
}
aliases := map[string][]string{
"users": []string{"mes"},
}
for i, t := range tables {
err := schema.addTable(t, columns[i], aliases)
if err != nil {
log.Fatal(err)
}
}
for i, t := range tables {
err := schema.updateRelationships(t, columns[i])
if err != nil {
log.Fatal(err)
}
}
vars := NewVariables(map[string]string{ vars := NewVariables(map[string]string{
"admin_account_id": "5", "admin_account_id": "5",

View File

@ -825,13 +825,11 @@ func (c *compilerContext) renderFrom(sel *qcode.Select, ti *DBTableInfo, rel *DB
} }
func (c *compilerContext) renderOrderByColumns(sel *qcode.Select, ti *DBTableInfo) { func (c *compilerContext) renderOrderByColumns(sel *qcode.Select, ti *DBTableInfo) {
colsRendered := len(sel.Cols) != 0 //colsRendered := len(sel.Cols) != 0
for i := range sel.OrderBy { for i := range sel.OrderBy {
if colsRendered { //io.WriteString(w, ", ")
//io.WriteString(w, ", ") io.WriteString(c.w, `, `)
io.WriteString(c.w, `, `)
}
col := sel.OrderBy[i].Col col := sel.OrderBy[i].Col
//fmt.Fprintf(w, `"%s_%d"."%s" AS "%s_%d_%s_ob"`, //fmt.Fprintf(w, `"%s_%d"."%s" AS "%s_%d_%s_ob"`,

View File

@ -151,6 +151,7 @@ SELECT
pg_catalog.format_type(f.atttypid,f.atttypmod) AS type, pg_catalog.format_type(f.atttypid,f.atttypmod) AS type,
CASE CASE
WHEN f.attndims != 0 THEN true WHEN f.attndims != 0 THEN true
WHEN right(pg_catalog.format_type(f.atttypid,f.atttypmod), 2) = '[]' THEN true
ELSE false ELSE false
END AS array, END AS array,
CASE CASE
@ -175,7 +176,7 @@ FROM pg_attribute f
LEFT JOIN pg_namespace n ON n.oid = c.relnamespace LEFT JOIN pg_namespace n ON n.oid = c.relnamespace
LEFT JOIN pg_constraint p ON p.conrelid = c.oid AND f.attnum = ANY (p.conkey) LEFT JOIN pg_constraint p ON p.conrelid = c.oid AND f.attnum = ANY (p.conkey)
LEFT JOIN pg_class AS g ON p.confrelid = g.oid LEFT JOIN pg_class AS g ON p.confrelid = g.oid
WHERE c.relkind = ('r'::char) WHERE c.relkind IN ('r', 'v', 'm', 'f')
AND n.nspname = $1 -- Replace with Schema name AND n.nspname = $1 -- Replace with Schema name
AND c.relname = $2 -- Replace with table name AND c.relname = $2 -- Replace with table name
AND f.attnum > 0 AND f.attnum > 0

102
psql/test_schema.go Normal file
View File

@ -0,0 +1,102 @@
package psql
import (
"log"
"strings"
)
func getTestSchema() *DBSchema {
tables := []DBTable{
DBTable{Name: "customers", Type: "table"},
DBTable{Name: "users", Type: "table"},
DBTable{Name: "products", Type: "table"},
DBTable{Name: "purchases", Type: "table"},
DBTable{Name: "tags", Type: "table"},
DBTable{Name: "tag_count", Type: "json"},
}
columns := [][]DBColumn{
[]DBColumn{
DBColumn{ID: 1, Name: "id", Type: "bigint", NotNull: true, PrimaryKey: true, UniqueKey: true},
DBColumn{ID: 2, Name: "full_name", Type: "character varying", NotNull: true, PrimaryKey: false, UniqueKey: false},
DBColumn{ID: 3, Name: "phone", Type: "character varying", NotNull: false, PrimaryKey: false, UniqueKey: false},
DBColumn{ID: 4, Name: "email", Type: "character varying", NotNull: true, PrimaryKey: false, UniqueKey: false},
DBColumn{ID: 5, Name: "encrypted_password", Type: "character varying", NotNull: true, PrimaryKey: false, UniqueKey: false},
DBColumn{ID: 6, Name: "reset_password_token", Type: "character varying", NotNull: false, PrimaryKey: false, UniqueKey: false},
DBColumn{ID: 7, Name: "reset_password_sent_at", Type: "timestamp without time zone", NotNull: false, PrimaryKey: false, UniqueKey: false},
DBColumn{ID: 8, Name: "remember_created_at", Type: "timestamp without time zone", NotNull: false, PrimaryKey: false, UniqueKey: false},
DBColumn{ID: 9, Name: "created_at", Type: "timestamp without time zone", NotNull: true, PrimaryKey: false, UniqueKey: false},
DBColumn{ID: 10, Name: "updated_at", Type: "timestamp without time zone", NotNull: true, PrimaryKey: false, UniqueKey: false}},
[]DBColumn{
DBColumn{ID: 1, Name: "id", Type: "bigint", NotNull: true, PrimaryKey: true, UniqueKey: true},
DBColumn{ID: 2, Name: "full_name", Type: "character varying", NotNull: true, PrimaryKey: false, UniqueKey: false},
DBColumn{ID: 3, Name: "phone", Type: "character varying", NotNull: false, PrimaryKey: false, UniqueKey: false},
DBColumn{ID: 4, Name: "avatar", Type: "character varying", NotNull: false, PrimaryKey: false, UniqueKey: false},
DBColumn{ID: 5, Name: "email", Type: "character varying", NotNull: true, PrimaryKey: false, UniqueKey: false},
DBColumn{ID: 6, Name: "encrypted_password", Type: "character varying", NotNull: true, PrimaryKey: false, UniqueKey: false},
DBColumn{ID: 7, Name: "reset_password_token", Type: "character varying", NotNull: false, PrimaryKey: false, UniqueKey: false},
DBColumn{ID: 8, Name: "reset_password_sent_at", Type: "timestamp without time zone", NotNull: false, PrimaryKey: false, UniqueKey: false},
DBColumn{ID: 9, Name: "remember_created_at", Type: "timestamp without time zone", NotNull: false, PrimaryKey: false, UniqueKey: false},
DBColumn{ID: 10, Name: "created_at", Type: "timestamp without time zone", NotNull: true, PrimaryKey: false, UniqueKey: false},
DBColumn{ID: 11, Name: "updated_at", Type: "timestamp without time zone", NotNull: true, PrimaryKey: false, UniqueKey: false}},
[]DBColumn{
DBColumn{ID: 1, Name: "id", Type: "bigint", NotNull: true, PrimaryKey: true, UniqueKey: true},
DBColumn{ID: 2, Name: "name", Type: "character varying", NotNull: false, PrimaryKey: false, UniqueKey: false},
DBColumn{ID: 3, Name: "description", Type: "text", NotNull: false, PrimaryKey: false, UniqueKey: false},
DBColumn{ID: 4, Name: "price", Type: "numeric(7,2)", NotNull: false, PrimaryKey: false, UniqueKey: false},
DBColumn{ID: 5, Name: "user_id", Type: "bigint", NotNull: false, PrimaryKey: false, UniqueKey: false, FKeyTable: "users", FKeyColID: []int16{1}},
DBColumn{ID: 6, Name: "created_at", Type: "timestamp without time zone", NotNull: true, PrimaryKey: false, UniqueKey: false},
DBColumn{ID: 7, Name: "updated_at", Type: "timestamp without time zone", NotNull: true, PrimaryKey: false, UniqueKey: false},
DBColumn{ID: 8, Name: "tsv", Type: "tsvector", NotNull: false, PrimaryKey: false, UniqueKey: false},
DBColumn{ID: 9, Name: "tags", Type: "text[]", NotNull: false, PrimaryKey: false, UniqueKey: false, FKeyTable: "tags", FKeyColID: []int16{3}, Array: true},
DBColumn{ID: 9, Name: "tag_count", Type: "json", NotNull: false, PrimaryKey: false, UniqueKey: false, FKeyTable: "tag_count", FKeyColID: []int16{}}},
[]DBColumn{
DBColumn{ID: 1, Name: "id", Type: "bigint", NotNull: true, PrimaryKey: true, UniqueKey: true},
DBColumn{ID: 2, Name: "customer_id", Type: "bigint", NotNull: false, PrimaryKey: false, UniqueKey: false, FKeyTable: "customers", FKeyColID: []int16{1}},
DBColumn{ID: 3, Name: "product_id", Type: "bigint", NotNull: false, PrimaryKey: false, UniqueKey: false, FKeyTable: "products", FKeyColID: []int16{1}},
DBColumn{ID: 4, Name: "sale_type", Type: "character varying", NotNull: false, PrimaryKey: false, UniqueKey: false},
DBColumn{ID: 5, Name: "quantity", Type: "integer", NotNull: false, PrimaryKey: false, UniqueKey: false},
DBColumn{ID: 6, Name: "due_date", Type: "timestamp without time zone", NotNull: false, PrimaryKey: false, UniqueKey: false},
DBColumn{ID: 7, Name: "returned", Type: "timestamp without time zone", NotNull: false, PrimaryKey: false, UniqueKey: false}},
[]DBColumn{
DBColumn{ID: 1, Name: "id", Type: "bigint", NotNull: true, PrimaryKey: true, UniqueKey: true},
DBColumn{ID: 2, Name: "name", Type: "text", NotNull: false, PrimaryKey: false, UniqueKey: false},
DBColumn{ID: 3, Name: "slug", Type: "text", NotNull: false, PrimaryKey: false, UniqueKey: false}},
[]DBColumn{
DBColumn{ID: 1, Name: "tag_id", Type: "bigint", NotNull: false, PrimaryKey: false, UniqueKey: false, FKeyTable: "tags", FKeyColID: []int16{1}},
DBColumn{ID: 2, Name: "count", Type: "int", NotNull: false, PrimaryKey: false, UniqueKey: false}},
}
for i := range tables {
tables[i].Key = strings.ToLower(tables[i].Name)
for n := range columns[i] {
columns[i][n].Key = strings.ToLower(columns[i][n].Name)
}
}
schema := &DBSchema{
ver: 110000,
t: make(map[string]*DBTableInfo),
rm: make(map[string]map[string]*DBRel),
}
aliases := map[string][]string{
"users": []string{"mes"},
}
for i, t := range tables {
err := schema.addTable(t, columns[i], aliases)
if err != nil {
log.Fatal(err)
}
}
for i, t := range tables {
err := schema.updateRelationships(t, columns[i])
if err != nil {
log.Fatal(err)
}
}
return schema
}

View File

@ -15,7 +15,10 @@ func (c *compilerContext) renderUpdate(qc *qcode.QCode, w io.Writer,
update, ok := vars[qc.ActionVar] update, ok := vars[qc.ActionVar]
if !ok { if !ok {
return 0, fmt.Errorf("Variable '%s' not !defined", qc.ActionVar) return 0, fmt.Errorf("variable '%s' not !defined", qc.ActionVar)
}
if len(update) == 0 {
return 0, fmt.Errorf("variable '%s' is empty", qc.ActionVar)
} }
io.WriteString(c.w, `WITH "_sg_input" AS (SELECT '{{`) io.WriteString(c.w, `WITH "_sg_input" AS (SELECT '{{`)
@ -165,17 +168,28 @@ func renderNestedUpdateRelColumns(w io.Writer, item kvitem, values bool) error {
for _, v := range item.items { for _, v := range item.items {
if v._ctype > 0 && v.relCP.Type == RelOneToMany { if v._ctype > 0 && v.relCP.Type == RelOneToMany {
if values { if values {
colWithTable(w, v.relCP.Left.Table, v.relCP.Left.Col) // if v.relCP.Right.Array {
} else { // io.WriteString(w, `array_diff(`)
if v.relCP.Right.Array { // colWithTable(w, v.relCP.Right.Table, v.relCP.Right.Col)
io.WriteString(w, `array_remove(`) // io.WriteString(w, `, `)
colWithTable(w, v.relCP.Left.Table, v.relCP.Left.Col) // }
io.WriteString(w, `, `)
quoted(w, v.relCP.Right.Col) if v._ctype > 0 {
io.WriteString(w, `)`) io.WriteString(w, `"_x_`)
io.WriteString(w, v.relCP.Left.Table)
io.WriteString(w, `".`)
quoted(w, v.relCP.Left.Col)
} else { } else {
quoted(w, v.relCP.Right.Col) colWithTable(w, v.relCP.Left.Table, v.relCP.Left.Col)
} }
// if v.relCP.Right.Array {
// io.WriteString(w, `)`)
// }
} else {
quoted(w, v.relCP.Right.Col)
} }
} }
} }
@ -184,12 +198,13 @@ func renderNestedUpdateRelColumns(w io.Writer, item kvitem, values bool) error {
} }
func renderNestedUpdateRelTables(w io.Writer, item kvitem) error { func renderNestedUpdateRelTables(w io.Writer, item kvitem) error {
// Render child foreign key columns if child-to-parent // Render tables needed to set values if child-to-parent
// relationship is one-to-many // relationship is one-to-many
for _, v := range item.items { for _, v := range item.items {
if v._ctype > 0 && v.relCP.Type == RelOneToMany { if v._ctype > 0 && v.relCP.Type == RelOneToMany {
quoted(w, v.relCP.Left.Table) io.WriteString(w, `"_x_`)
io.WriteString(w, `, `) io.WriteString(w, v.relCP.Left.Table)
io.WriteString(w, `", `)
} }
} }

View File

@ -238,9 +238,9 @@ func nestedUpdateOneToOneWithConnect(t *testing.T) {
} }
}` }`
sql1 := `WITH "_sg_input" AS (SELECT '{{data}}' :: json AS j), "users" AS (SELECT "id" FROM "_sg_input" i,"users" WHERE "users"."id"= ((i.j->'user'->'connect'->>'id'))::bigint AND "users"."email"= ((i.j->'user'->'connect'->>'email'))::character varying LIMIT 1), "products" AS (UPDATE "products" SET ("name", "price", "user_id") = (SELECT "t"."name", "t"."price", "users"."id" FROM "_sg_input" i, "users", json_populate_record(NULL::products, i.j) t) WHERE (("products"."id") = 9) RETURNING "products".*) SELECT json_object_agg('product', json_0) FROM (SELECT row_to_json((SELECT "json_row_0" FROM (SELECT "products_0"."id" AS "id", "products_0"."name" AS "name", "user_1_join"."json_1" AS "user") AS "json_row_0")) AS "json_0" FROM (SELECT "products"."id", "products"."name", "products"."user_id" FROM "products" LIMIT ('1') :: integer) AS "products_0" LEFT OUTER JOIN LATERAL (SELECT row_to_json((SELECT "json_row_1" FROM (SELECT "users_1"."id" AS "id", "users_1"."full_name" AS "full_name", "users_1"."email" AS "email") AS "json_row_1")) AS "json_1" FROM (SELECT "users"."id", "users"."full_name", "users"."email" FROM "users" WHERE ((("users"."id") = ("products_0"."user_id"))) LIMIT ('1') :: integer) AS "users_1" LIMIT ('1') :: integer) AS "user_1_join" ON ('true') LIMIT ('1') :: integer) AS "sel_0"` sql1 := `WITH "_sg_input" AS (SELECT '{{data}}' :: json AS j), "_x_users" AS (SELECT "id" FROM "_sg_input" i,"users" WHERE "users"."id"= ((i.j->'user'->'connect'->>'id'))::bigint AND "users"."email"= ((i.j->'user'->'connect'->>'email'))::character varying LIMIT 1), "products" AS (UPDATE "products" SET ("name", "price", "user_id") = (SELECT "t"."name", "t"."price", "_x_users"."id" FROM "_sg_input" i, "_x_users", json_populate_record(NULL::products, i.j) t) WHERE (("products"."id") = 9) RETURNING "products".*) SELECT json_object_agg('product', json_0) FROM (SELECT row_to_json((SELECT "json_row_0" FROM (SELECT "products_0"."id" AS "id", "products_0"."name" AS "name", "user_1_join"."json_1" AS "user") AS "json_row_0")) AS "json_0" FROM (SELECT "products"."id", "products"."name", "products"."user_id" FROM "products" LIMIT ('1') :: integer) AS "products_0" LEFT OUTER JOIN LATERAL (SELECT row_to_json((SELECT "json_row_1" FROM (SELECT "users_1"."id" AS "id", "users_1"."full_name" AS "full_name", "users_1"."email" AS "email") AS "json_row_1")) AS "json_1" FROM (SELECT "users"."id", "users"."full_name", "users"."email" FROM "users" WHERE ((("users"."id") = ("products_0"."user_id"))) LIMIT ('1') :: integer) AS "users_1" LIMIT ('1') :: integer) AS "user_1_join" ON ('true') LIMIT ('1') :: integer) AS "sel_0"`
sql2 := `WITH "_sg_input" AS (SELECT '{{data}}' :: json AS j), "users" AS (SELECT "id" FROM "_sg_input" i,"users" WHERE "users"."email"= ((i.j->'user'->'connect'->>'email'))::character varying AND "users"."id"= ((i.j->'user'->'connect'->>'id'))::bigint LIMIT 1), "products" AS (UPDATE "products" SET ("name", "price", "user_id") = (SELECT "t"."name", "t"."price", "users"."id" FROM "_sg_input" i, "users", json_populate_record(NULL::products, i.j) t) WHERE (("products"."id") = 9) RETURNING "products".*) SELECT json_object_agg('product', json_0) FROM (SELECT row_to_json((SELECT "json_row_0" FROM (SELECT "products_0"."id" AS "id", "products_0"."name" AS "name", "user_1_join"."json_1" AS "user") AS "json_row_0")) AS "json_0" FROM (SELECT "products"."id", "products"."name", "products"."user_id" FROM "products" LIMIT ('1') :: integer) AS "products_0" LEFT OUTER JOIN LATERAL (SELECT row_to_json((SELECT "json_row_1" FROM (SELECT "users_1"."id" AS "id", "users_1"."full_name" AS "full_name", "users_1"."email" AS "email") AS "json_row_1")) AS "json_1" FROM (SELECT "users"."id", "users"."full_name", "users"."email" FROM "users" WHERE ((("users"."id") = ("products_0"."user_id"))) LIMIT ('1') :: integer) AS "users_1" LIMIT ('1') :: integer) AS "user_1_join" ON ('true') LIMIT ('1') :: integer) AS "sel_0"` sql2 := `WITH "_sg_input" AS (SELECT '{{data}}' :: json AS j), "_x_users" AS (SELECT "id" FROM "_sg_input" i,"users" WHERE "users"."email"= ((i.j->'user'->'connect'->>'email'))::character varying AND "users"."id"= ((i.j->'user'->'connect'->>'id'))::bigint LIMIT 1), "products" AS (UPDATE "products" SET ("name", "price", "user_id") = (SELECT "t"."name", "t"."price", "_x_users"."id" FROM "_sg_input" i, "_x_users", json_populate_record(NULL::products, i.j) t) WHERE (("products"."id") = 9) RETURNING "products".*) SELECT json_object_agg('product', json_0) FROM (SELECT row_to_json((SELECT "json_row_0" FROM (SELECT "products_0"."id" AS "id", "products_0"."name" AS "name", "user_1_join"."json_1" AS "user") AS "json_row_0")) AS "json_0" FROM (SELECT "products"."id", "products"."name", "products"."user_id" FROM "products" LIMIT ('1') :: integer) AS "products_0" LEFT OUTER JOIN LATERAL (SELECT row_to_json((SELECT "json_row_1" FROM (SELECT "users_1"."id" AS "id", "users_1"."full_name" AS "full_name", "users_1"."email" AS "email") AS "json_row_1")) AS "json_1" FROM (SELECT "users"."id", "users"."full_name", "users"."email" FROM "users" WHERE ((("users"."id") = ("products_0"."user_id"))) LIMIT ('1') :: integer) AS "users_1" LIMIT ('1') :: integer) AS "user_1_join" ON ('true') LIMIT ('1') :: integer) AS "sel_0"`
vars := map[string]json.RawMessage{ vars := map[string]json.RawMessage{
"data": json.RawMessage(`{ "data": json.RawMessage(`{
@ -273,7 +273,7 @@ func nestedUpdateOneToOneWithDisconnect(t *testing.T) {
} }
}` }`
sql := `WITH "_sg_input" AS (SELECT '{{data}}' :: json AS j), "users" AS (SELECT * FROM (VALUES(NULL::bigint)) AS LOOKUP("id")), "products" AS (UPDATE "products" SET ("name", "price", "user_id") = (SELECT "t"."name", "t"."price", "users"."id" FROM "_sg_input" i, "users", json_populate_record(NULL::products, i.j) t) WHERE (("products"."id") = 2) RETURNING "products".*) SELECT json_object_agg('product', json_0) FROM (SELECT row_to_json((SELECT "json_row_0" FROM (SELECT "products_0"."id" AS "id", "products_0"."name" AS "name", "products_0"."user_id" AS "user_id") AS "json_row_0")) AS "json_0" FROM (SELECT "products"."id", "products"."name", "products"."user_id" FROM "products" LIMIT ('1') :: integer) AS "products_0" LIMIT ('1') :: integer) AS "sel_0"` sql := `WITH "_sg_input" AS (SELECT '{{data}}' :: json AS j), "_x_users" AS (SELECT * FROM (VALUES(NULL::bigint)) AS LOOKUP("id")), "products" AS (UPDATE "products" SET ("name", "price", "user_id") = (SELECT "t"."name", "t"."price", "_x_users"."id" FROM "_sg_input" i, "_x_users", json_populate_record(NULL::products, i.j) t) WHERE (("products"."id") = 2) RETURNING "products".*) SELECT json_object_agg('product', json_0) FROM (SELECT row_to_json((SELECT "json_row_0" FROM (SELECT "products_0"."id" AS "id", "products_0"."name" AS "name", "products_0"."user_id" AS "user_id") AS "json_row_0")) AS "json_0" FROM (SELECT "products"."id", "products"."name", "products"."user_id" FROM "products" LIMIT ('1') :: integer) AS "products_0" LIMIT ('1') :: integer) AS "sel_0"`
vars := map[string]json.RawMessage{ vars := map[string]json.RawMessage{
"data": json.RawMessage(`{ "data": json.RawMessage(`{

View File

@ -4,7 +4,11 @@ package qcode
// FuzzerEntrypoint for Fuzzbuzz // FuzzerEntrypoint for Fuzzbuzz
func Fuzz(data []byte) int { func Fuzz(data []byte) int {
GetQType(string(data)) qt := GetQType(string(data))
if qt > QTUpsert {
panic("qt > QTUpsert")
}
qcompile, _ := NewCompiler(Config{}) qcompile, _ := NewCompiler(Config{})
_, err := qcompile.Compile(data, "user") _, err := qcompile.Compile(data, "user")

41
serv/actions.go Normal file
View File

@ -0,0 +1,41 @@
package serv
import (
"fmt"
"net/http"
)
type actionFn func(w http.ResponseWriter, r *http.Request) error
func newAction(a configAction) (http.Handler, error) {
var fn actionFn
var err error
if len(a.SQL) != 0 {
fn, err = newSQLAction(a)
} else {
return nil, fmt.Errorf("invalid config for action '%s'", a.Name)
}
if err != nil {
return nil, err
}
httpFn := func(w http.ResponseWriter, r *http.Request) {
if err := fn(w, r); err != nil {
errlog.Error().Err(err).Send()
errorResp(w, err)
}
}
return http.HandlerFunc(httpFn), nil
}
func newSQLAction(a configAction) (actionFn, error) {
fn := func(w http.ResponseWriter, r *http.Request) error {
_, err := db.Exec(r.Context(), a.SQL)
return err
}
return fn, nil
}

View File

@ -1,320 +0,0 @@
package serv
import (
"bytes"
"encoding/json"
"fmt"
"io/ioutil"
"log"
"os"
"path"
"sort"
"strings"
)
const (
AL_QUERY int = iota + 1
AL_VARS
)
type allowItem struct {
name string
hash string
uri string
gql string
vars json.RawMessage
}
var _allowList allowList
type allowList struct {
list []*allowItem
index map[string]int
filepath string
saveChan chan *allowItem
active bool
}
func initAllowList(cpath string) {
_allowList = allowList{
index: make(map[string]int),
saveChan: make(chan *allowItem),
active: true,
}
if len(cpath) != 0 {
fp := path.Join(cpath, "allow.list")
if _, err := os.Stat(fp); err == nil {
_allowList.filepath = fp
} else if !os.IsNotExist(err) {
errlog.Fatal().Err(err).Send()
}
}
if len(_allowList.filepath) == 0 {
fp := "./allow.list"
if _, err := os.Stat(fp); err == nil {
_allowList.filepath = fp
} else if !os.IsNotExist(err) {
errlog.Fatal().Err(err).Send()
}
}
if len(_allowList.filepath) == 0 {
fp := "./config/allow.list"
if _, err := os.Stat(fp); err == nil {
_allowList.filepath = fp
} else if !os.IsNotExist(err) {
errlog.Fatal().Err(err).Send()
}
}
if len(_allowList.filepath) == 0 {
if conf.Production {
errlog.Fatal().Msg("allow.list not found")
}
if len(cpath) == 0 {
_allowList.filepath = "./config/allow.list"
} else {
_allowList.filepath = path.Join(cpath, "allow.list")
}
logger.Warn().Msg("allow.list not found")
} else {
_allowList.load()
}
go func() {
for v := range _allowList.saveChan {
_allowList.save(v)
}
}()
}
func (al *allowList) add(req *gqlReq) {
if al.saveChan == nil || len(req.ref) == 0 || len(req.Query) == 0 {
return
}
var query string
for i := 0; i < len(req.Query); i++ {
c := req.Query[i]
if c >= 'a' && c <= 'z' || c >= 'A' && c <= 'Z' {
query = req.Query
break
} else if c == '{' {
query = "query " + req.Query
break
}
}
al.saveChan <- &allowItem{
uri: req.ref,
gql: query,
vars: req.Vars,
}
}
func (al *allowList) upsert(query, vars []byte, uri string) {
q := string(query)
hash := gqlHash(q, vars, "")
name := gqlName(q)
var key string
if len(name) != 0 {
key = name
} else {
key = hash
}
if i, ok := al.index[key]; !ok {
al.list = append(al.list, &allowItem{
name: name,
hash: hash,
uri: uri,
gql: q,
vars: vars,
})
al.index[key] = len(al.list) - 1
} else {
item := al.list[i]
item.name = name
item.hash = hash
item.gql = q
item.vars = vars
}
}
func (al *allowList) load() {
b, err := ioutil.ReadFile(al.filepath)
if err != nil {
log.Fatal(err)
}
if len(b) == 0 {
return
}
var uri string
var varBytes []byte
s, e, c := 0, 0, 0
ty := 0
for {
if c == 0 && b[e] == '#' {
s = e
for e < len(b) && b[e] != '\n' {
e++
}
if (e - s) > 2 {
uri = strings.TrimSpace(string(b[(s + 1):e]))
}
}
if e >= len(b) {
break
}
if matchPrefix(b, e, "query") || matchPrefix(b, e, "mutation") {
if c == 0 {
s = e
}
ty = AL_QUERY
} else if matchPrefix(b, e, "variables") {
if c == 0 {
s = e + len("variables") + 1
}
ty = AL_VARS
} else if b[e] == '{' {
c++
} else if b[e] == '}' {
c--
if c == 0 {
if ty == AL_QUERY {
al.upsert(b[s:(e+1)], varBytes, uri)
varBytes = nil
} else if ty == AL_VARS {
varBytes = b[s:(e + 1)]
}
ty = 0
}
}
e++
if e >= len(b) {
break
}
}
}
func (al *allowList) save(item *allowItem) {
var err error
item.hash = gqlHash(item.gql, item.vars, "")
item.name = gqlName(item.gql)
if len(item.name) == 0 {
key := item.hash
if _, ok := al.index[key]; ok {
return
}
al.list = append(al.list, item)
al.index[key] = len(al.list) - 1
} else {
key := item.name
if i, ok := al.index[key]; ok {
if al.list[i].hash == item.hash {
return
}
al.list[i] = item
} else {
al.list = append(al.list, item)
al.index[key] = len(al.list) - 1
}
}
f, err := os.Create(al.filepath)
if err != nil {
logger.Warn().Err(err).Msgf("Failed to write allow list: %s", al.filepath)
return
}
defer f.Close()
keys := []string{}
urlMap := make(map[string][]*allowItem)
for _, v := range al.list {
urlMap[v.uri] = append(urlMap[v.uri], v)
}
for k := range urlMap {
keys = append(keys, k)
}
sort.Strings(keys)
for i := range keys {
k := keys[i]
v := urlMap[k]
if _, err := f.WriteString(fmt.Sprintf("# %s\n\n", k)); err != nil {
logger.Error().Err(err).Send()
return
}
for i := range v {
if len(v[i].vars) != 0 && !bytes.Equal(v[i].vars, []byte("{}")) {
vj, err := json.MarshalIndent(v[i].vars, "", " ")
if err != nil {
logger.Warn().Err(err).Msg("Failed to write allow list 'vars' to file")
continue
}
_, err = f.WriteString(fmt.Sprintf("variables %s\n\n", vj))
if err != nil {
logger.Error().Err(err).Send()
return
}
}
if v[i].gql[0] == '{' {
_, err = f.WriteString(fmt.Sprintf("query %s\n\n", v[i].gql))
} else {
_, err = f.WriteString(fmt.Sprintf("%s\n\n", v[i].gql))
}
if err != nil {
logger.Error().Err(err).Send()
return
}
}
}
}
func matchPrefix(b []byte, i int, s string) bool {
if (len(b) - i) < len(s) {
return false
}
for n := 0; n < len(s); n++ {
if b[(i+n)] != s[n] {
return false
}
}
return true
}

View File

@ -3,7 +3,6 @@ package serv
import ( import (
"context" "context"
"net/http" "net/http"
"strings"
) )
type ctxkey int type ctxkey int
@ -14,7 +13,7 @@ const (
userRoleKey userRoleKey
) )
func headerAuth(next http.Handler) http.HandlerFunc { func headerAuth(authc configAuth, next http.Handler) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) { return func(w http.ResponseWriter, r *http.Request) {
ctx := r.Context() ctx := r.Context()
@ -37,28 +36,53 @@ func headerAuth(next http.Handler) http.HandlerFunc {
} }
} }
func withAuth(next http.Handler) http.Handler { func headerHandler(authc configAuth, next http.Handler) http.HandlerFunc {
at := conf.Auth.Type hdr := authc.Header
ru := conf.Auth.Rails.URL
if conf.Auth.CredsInHeader { if len(hdr.Name) == 0 {
next = headerAuth(next) errlog.Fatal().Str("auth", authc.Name).Msg("no header.name defined")
} }
switch at { if !hdr.Exists && len(hdr.Value) == 0 {
errlog.Fatal().Str("auth", authc.Name).Msg("no header.value defined")
}
return func(w http.ResponseWriter, r *http.Request) {
var fo1 bool
value := r.Header.Get(hdr.Name)
switch {
case hdr.Exists:
fo1 = (len(value) == 0)
default:
fo1 = (value != hdr.Value)
}
if fo1 {
http.Error(w, "401 unauthorized", http.StatusUnauthorized)
return
}
next.ServeHTTP(w, r)
}
}
func withAuth(next http.Handler, authc configAuth) http.Handler {
if authc.CredsInHeader {
next = headerAuth(authc, next)
}
switch authc.Type {
case "rails": case "rails":
if strings.HasPrefix(ru, "memcache:") { return railsHandler(authc, next)
return railsMemcacheHandler(next)
}
if strings.HasPrefix(ru, "redis:") {
return railsRedisHandler(next)
}
return railsCookieHandler(next)
case "jwt": case "jwt":
return jwtHandler(next) return jwtHandler(authc, next)
case "header":
return headerHandler(authc, next)
} }
return next return next

View File

@ -14,18 +14,18 @@ const (
jwtAuth0 int = iota + 1 jwtAuth0 int = iota + 1
) )
func jwtHandler(next http.Handler) http.HandlerFunc { func jwtHandler(authc configAuth, next http.Handler) http.HandlerFunc {
var key interface{} var key interface{}
var jwtProvider int var jwtProvider int
cookie := conf.Auth.Cookie cookie := authc.Cookie
if conf.Auth.JWT.Provider == "auth0" { if authc.JWT.Provider == "auth0" {
jwtProvider = jwtAuth0 jwtProvider = jwtAuth0
} }
secret := conf.Auth.JWT.Secret secret := authc.JWT.Secret
publicKeyFile := conf.Auth.JWT.PubKeyFile publicKeyFile := authc.JWT.PubKeyFile
switch { switch {
case len(secret) != 0: case len(secret) != 0:
@ -37,7 +37,7 @@ func jwtHandler(next http.Handler) http.HandlerFunc {
errlog.Fatal().Err(err).Send() errlog.Fatal().Err(err).Send()
} }
switch conf.Auth.JWT.PubKeyType { switch authc.JWT.PubKeyType {
case "ecdsa": case "ecdsa":
key, err = jwt.ParseECPublicKeyFromPEM(kd) key, err = jwt.ParseECPublicKeyFromPEM(kd)

View File

@ -6,32 +6,47 @@ import (
"fmt" "fmt"
"net/http" "net/http"
"net/url" "net/url"
"strings"
"github.com/bradfitz/gomemcache/memcache" "github.com/bradfitz/gomemcache/memcache"
"github.com/dosco/super-graph/rails" "github.com/dosco/super-graph/rails"
"github.com/garyburd/redigo/redis" "github.com/garyburd/redigo/redis"
) )
func railsRedisHandler(next http.Handler) http.HandlerFunc { func railsHandler(authc configAuth, next http.Handler) http.HandlerFunc {
cookie := conf.Auth.Cookie ru := authc.Rails.URL
if strings.HasPrefix(ru, "memcache:") {
return railsMemcacheHandler(authc, next)
}
if strings.HasPrefix(ru, "redis:") {
return railsRedisHandler(authc, next)
}
return railsCookieHandler(authc, next)
}
func railsRedisHandler(authc configAuth, next http.Handler) http.HandlerFunc {
cookie := authc.Cookie
if len(cookie) == 0 { if len(cookie) == 0 {
errlog.Fatal().Msg("no auth.cookie defined") errlog.Fatal().Msg("no auth.cookie defined")
} }
if len(conf.Auth.Rails.URL) == 0 { if len(authc.Rails.URL) == 0 {
errlog.Fatal().Msg("no auth.rails.url defined") errlog.Fatal().Msg("no auth.rails.url defined")
} }
rp := &redis.Pool{ rp := &redis.Pool{
MaxIdle: conf.Auth.Rails.MaxIdle, MaxIdle: authc.Rails.MaxIdle,
MaxActive: conf.Auth.Rails.MaxActive, MaxActive: authc.Rails.MaxActive,
Dial: func() (redis.Conn, error) { Dial: func() (redis.Conn, error) {
c, err := redis.DialURL(conf.Auth.Rails.URL) c, err := redis.DialURL(authc.Rails.URL)
if err != nil { if err != nil {
errlog.Fatal().Err(err).Send() errlog.Fatal().Err(err).Send()
} }
pwd := conf.Auth.Rails.Password pwd := authc.Rails.Password
if len(pwd) != 0 { if len(pwd) != 0 {
if _, err := c.Do("AUTH", pwd); err != nil { if _, err := c.Do("AUTH", pwd); err != nil {
errlog.Fatal().Err(err).Send() errlog.Fatal().Err(err).Send()
@ -66,17 +81,17 @@ func railsRedisHandler(next http.Handler) http.HandlerFunc {
} }
} }
func railsMemcacheHandler(next http.Handler) http.HandlerFunc { func railsMemcacheHandler(authc configAuth, next http.Handler) http.HandlerFunc {
cookie := conf.Auth.Cookie cookie := authc.Cookie
if len(cookie) == 0 { if len(cookie) == 0 {
errlog.Fatal().Msg("no auth.cookie defined") errlog.Fatal().Msg("no auth.cookie defined")
} }
if len(conf.Auth.Rails.URL) == 0 { if len(authc.Rails.URL) == 0 {
errlog.Fatal().Msg("no auth.rails.url defined") errlog.Fatal().Msg("no auth.rails.url defined")
} }
rURL, err := url.Parse(conf.Auth.Rails.URL) rURL, err := url.Parse(authc.Rails.URL)
if err != nil { if err != nil {
errlog.Fatal().Err(err).Send() errlog.Fatal().Err(err).Send()
} }
@ -108,13 +123,13 @@ func railsMemcacheHandler(next http.Handler) http.HandlerFunc {
} }
} }
func railsCookieHandler(next http.Handler) http.HandlerFunc { func railsCookieHandler(authc configAuth, next http.Handler) http.HandlerFunc {
cookie := conf.Auth.Cookie cookie := authc.Cookie
if len(cookie) == 0 { if len(cookie) == 0 {
errlog.Fatal().Msg("no auth.cookie defined") errlog.Fatal().Msg("no auth.cookie defined")
} }
ra, err := railsAuth(conf) ra, err := railsAuth(authc)
if err != nil { if err != nil {
errlog.Fatal().Err(err).Send() errlog.Fatal().Err(err).Send()
} }
@ -139,13 +154,13 @@ func railsCookieHandler(next http.Handler) http.HandlerFunc {
} }
} }
func railsAuth(c *config) (*rails.Auth, error) { func railsAuth(authc configAuth) (*rails.Auth, error) {
secret := c.Auth.Rails.SecretKeyBase secret := authc.Rails.SecretKeyBase
if len(secret) == 0 { if len(secret) == 0 {
return nil, errors.New("no auth.rails.secret_key_base defined") return nil, errors.New("no auth.rails.secret_key_base defined")
} }
version := c.Auth.Rails.Version version := authc.Rails.Version
if len(version) == 0 { if len(version) == 0 {
return nil, errors.New("no auth.rails.version defined") return nil, errors.New("no auth.rails.version defined")
} }
@ -155,16 +170,16 @@ func railsAuth(c *config) (*rails.Auth, error) {
return nil, err return nil, err
} }
if len(c.Auth.Rails.Salt) != 0 { if len(authc.Rails.Salt) != 0 {
ra.Salt = c.Auth.Rails.Salt ra.Salt = authc.Rails.Salt
} }
if len(conf.Auth.Rails.SignSalt) != 0 { if len(authc.Rails.SignSalt) != 0 {
ra.SignSalt = c.Auth.Rails.SignSalt ra.SignSalt = authc.Rails.SignSalt
} }
if len(conf.Auth.Rails.AuthSalt) != 0 { if len(authc.Rails.AuthSalt) != 0 {
ra.AuthSalt = c.Auth.Rails.AuthSalt ra.AuthSalt = authc.Rails.AuthSalt
} }
return ra, nil return ra, nil

View File

@ -1,15 +1,13 @@
package serv package serv
import ( import (
"context"
"fmt" "fmt"
"os"
"runtime" "runtime"
"strings" "strings"
"github.com/dosco/super-graph/allow"
"github.com/dosco/super-graph/psql" "github.com/dosco/super-graph/psql"
"github.com/dosco/super-graph/qcode" "github.com/dosco/super-graph/qcode"
"github.com/jackc/pgx/v4"
"github.com/jackc/pgx/v4/pgxpool" "github.com/jackc/pgx/v4/pgxpool"
"github.com/rs/zerolog" "github.com/rs/zerolog"
"github.com/spf13/cobra" "github.com/spf13/cobra"
@ -31,17 +29,18 @@ var (
) )
var ( var (
logger zerolog.Logger // logger for everything but errors logger zerolog.Logger // logger for everything but errors
errlog zerolog.Logger // logger for errors includes line numbers errlog zerolog.Logger // logger for errors includes line numbers
conf *config // parsed config conf *config // parsed config
confPath string // path to the config file confPath string // path to the config file
db *pgxpool.Pool // database connection pool db *pgxpool.Pool // database connection pool
schema *psql.DBSchema // database tables, columns and relationships schema *psql.DBSchema // database tables, columns and relationships
qcompile *qcode.Compiler // qcode compiler allowList *allow.List // allow.list is contains queries allowed in production
pcompile *psql.Compiler // postgres sql compiler qcompile *qcode.Compiler // qcode compiler
pcompile *psql.Compiler // postgres sql compiler
) )
func Init() { func Cmd() {
initLog() initLog()
rootCmd := &cobra.Command{ rootCmd := &cobra.Command{
@ -156,159 +155,6 @@ e.g. db:migrate -+1
} }
} }
func initLog() {
out := zerolog.ConsoleWriter{Out: os.Stderr}
logger = zerolog.New(out).With().Timestamp().Logger()
errlog = logger.With().Caller().Logger()
}
func initConf() (*config, error) {
vi := newConfig(getConfigName())
if err := vi.ReadInConfig(); err != nil {
return nil, err
}
inherits := vi.GetString("inherits")
if len(inherits) != 0 {
vi = newConfig(inherits)
if err := vi.ReadInConfig(); err != nil {
return nil, err
}
if vi.IsSet("inherits") {
errlog.Fatal().Msgf("inherited config (%s) cannot itself inherit (%s)",
inherits,
vi.GetString("inherits"))
}
vi.SetConfigName(getConfigName())
if err := vi.MergeInConfig(); err != nil {
return nil, err
}
}
c := &config{}
if err := c.Init(vi); err != nil {
return nil, fmt.Errorf("unable to decode config, %v", err)
}
logLevel, err := zerolog.ParseLevel(c.LogLevel)
if err != nil {
errlog.Error().Err(err).Msg("error setting log_level")
}
zerolog.SetGlobalLevel(logLevel)
return c, nil
}
func initDB(c *config, useDB bool) (*pgx.Conn, error) {
config, _ := pgx.ParseConfig("")
config.Host = c.DB.Host
config.Port = c.DB.Port
config.User = c.DB.User
config.Password = c.DB.Password
config.RuntimeParams = map[string]string{
"application_name": c.AppName,
"search_path": c.DB.Schema,
}
if useDB {
config.Database = c.DB.DBName
}
switch c.LogLevel {
case "debug":
config.LogLevel = pgx.LogLevelDebug
case "info":
config.LogLevel = pgx.LogLevelInfo
case "warn":
config.LogLevel = pgx.LogLevelWarn
case "error":
config.LogLevel = pgx.LogLevelError
default:
config.LogLevel = pgx.LogLevelNone
}
config.Logger = NewSQLLogger(logger)
db, err := pgx.ConnectConfig(context.Background(), config)
if err != nil {
return nil, err
}
return db, nil
}
func initDBPool(c *config) (*pgxpool.Pool, error) {
config, _ := pgxpool.ParseConfig("")
config.ConnConfig.Host = c.DB.Host
config.ConnConfig.Port = c.DB.Port
config.ConnConfig.Database = c.DB.DBName
config.ConnConfig.User = c.DB.User
config.ConnConfig.Password = c.DB.Password
config.ConnConfig.RuntimeParams = map[string]string{
"application_name": c.AppName,
"search_path": c.DB.Schema,
}
switch c.LogLevel {
case "debug":
config.ConnConfig.LogLevel = pgx.LogLevelDebug
case "info":
config.ConnConfig.LogLevel = pgx.LogLevelInfo
case "warn":
config.ConnConfig.LogLevel = pgx.LogLevelWarn
case "error":
config.ConnConfig.LogLevel = pgx.LogLevelError
default:
config.ConnConfig.LogLevel = pgx.LogLevelNone
}
config.ConnConfig.Logger = NewSQLLogger(logger)
// if c.DB.MaxRetries != 0 {
// opt.MaxRetries = c.DB.MaxRetries
// }
if c.DB.PoolSize != 0 {
config.MaxConns = conf.DB.PoolSize
}
db, err := pgxpool.ConnectConfig(context.Background(), config)
if err != nil {
return nil, err
}
return db, nil
}
func initCompiler() {
var err error
qcompile, pcompile, err = initCompilers(conf)
if err != nil {
errlog.Fatal().Err(err).Msg("failed to initialize compilers")
}
if err := initResolvers(); err != nil {
errlog.Fatal().Err(err).Msg("failed to initialized resolvers")
}
}
func initConfOnce() {
var err error
if conf == nil {
if conf, err = initConf(); err != nil {
errlog.Fatal().Err(err).Msg("failed to read config")
}
}
}
func cmdVersion(cmd *cobra.Command, args []string) { func cmdVersion(cmd *cobra.Command, args []string) {
fmt.Printf("%s\n", BuildDetails()) fmt.Printf("%s\n", BuildDetails())
} }

View File

@ -311,3 +311,13 @@ func getMigrationVars() map[string]interface{} {
"env": strings.ToLower(os.Getenv("GO_ENV")), "env": strings.ToLower(os.Getenv("GO_ENV")),
} }
} }
func initConfOnce() {
var err error
if conf == nil {
if conf, err = initConf(); err != nil {
errlog.Fatal().Err(err).Msg("failed to read config")
}
}
}

View File

@ -7,23 +7,22 @@ import (
func cmdServ(cmd *cobra.Command, args []string) { func cmdServ(cmd *cobra.Command, args []string) {
var err error var err error
initWatcher(confPath)
if conf, err = initConf(); err != nil { if conf, err = initConf(); err != nil {
fatalInProd(err, "failed to read config") fatalInProd(err, "failed to read config")
} }
if conf != nil { db, err = initDBPool(conf)
db, err = initDBPool(conf)
if err == nil { if err != nil {
initCompiler() fatalInProd(err, "failed to connect to database")
initAllowList(confPath)
initPreparedList()
} else {
fatalInProd(err, "failed to connect to database")
}
} }
initWatcher(confPath) initCompiler()
initResolvers()
initAllowList(confPath)
initPreparedList(confPath)
startHTTP() startHTTP()
} }

View File

@ -33,30 +33,8 @@ type config struct {
Inflections map[string]string Inflections map[string]string
Auth struct { Auth configAuth
Type string Auths []configAuth
Cookie string
CredsInHeader bool `mapstructure:"creds_in_header"`
Rails struct {
Version string
SecretKeyBase string `mapstructure:"secret_key_base"`
URL string
Password string
MaxIdle int `mapstructure:"max_idle"`
MaxActive int `mapstructure:"max_active"`
Salt string
SignSalt string `mapstructure:"sign_salt"`
AuthSalt string `mapstructure:"auth_salt"`
}
JWT struct {
Provider string
Secret string
PubKeyFile string `mapstructure:"public_key_file"`
PubKeyType string `mapstructure:"public_key_type"`
}
}
DB struct { DB struct {
Type string Type string
@ -77,6 +55,8 @@ type config struct {
Tables []configTable Tables []configTable
} `mapstructure:"database"` } `mapstructure:"database"`
Actions []configAction
Tables []configTable Tables []configTable
RolesQuery string `mapstructure:"roles_query"` RolesQuery string `mapstructure:"roles_query"`
@ -85,6 +65,38 @@ type config struct {
abacEnabled bool abacEnabled bool
} }
type configAuth struct {
Name string
Type string
Cookie string
CredsInHeader bool `mapstructure:"creds_in_header"`
Rails struct {
Version string
SecretKeyBase string `mapstructure:"secret_key_base"`
URL string
Password string
MaxIdle int `mapstructure:"max_idle"`
MaxActive int `mapstructure:"max_active"`
Salt string
SignSalt string `mapstructure:"sign_salt"`
AuthSalt string `mapstructure:"auth_salt"`
}
JWT struct {
Provider string
Secret string
PubKeyFile string `mapstructure:"public_key_file"`
PubKeyType string `mapstructure:"public_key_type"`
}
Header struct {
Name string
Value string
Exists bool
}
}
type configColumn struct { type configColumn struct {
Name string Name string
Type string Type string
@ -156,6 +168,12 @@ type configRole struct {
tablesMap map[string]*configRoleTable tablesMap map[string]*configRoleTable
} }
type configAction struct {
Name string
SQL string
AuthName string `mapstructure:"auth_name"`
}
func newConfig(name string) *viper.Viper { func newConfig(name string) *viper.Viper {
vi := viper.New() vi := viper.New()
@ -283,26 +301,48 @@ func (c *config) Init(vi *viper.Viper) error {
func (c *config) validate() { func (c *config) validate() {
rm := make(map[string]struct{}) rm := make(map[string]struct{})
for i := range c.Roles { for _, v := range c.Roles {
name := c.Roles[i].Name name := strings.ToLower(v.Name)
if _, ok := rm[name]; ok { if _, ok := rm[name]; ok {
errlog.Fatal().Msgf("duplicate config for role '%s'", c.Roles[i].Name) errlog.Fatal().Msgf("duplicate config for role '%s'", v.Name)
} }
rm[name] = struct{}{} rm[name] = struct{}{}
} }
tm := make(map[string]struct{}) tm := make(map[string]struct{})
for i := range c.Tables { for _, v := range c.Tables {
name := c.Tables[i].Name name := strings.ToLower(v.Name)
if _, ok := tm[name]; ok { if _, ok := tm[name]; ok {
errlog.Fatal().Msgf("duplicate config for table '%s'", c.Tables[i].Name) errlog.Fatal().Msgf("duplicate config for table '%s'", v.Name)
} }
tm[name] = struct{}{} tm[name] = struct{}{}
} }
am := make(map[string]struct{})
for _, v := range c.Auths {
name := strings.ToLower(v.Name)
if _, ok := am[name]; ok {
errlog.Fatal().Msgf("duplicate config for auth '%s'", v.Name)
}
am[name] = struct{}{}
}
for _, v := range c.Actions {
if len(v.AuthName) == 0 {
continue
}
authName := strings.ToLower(v.AuthName)
if _, ok := am[authName]; !ok {
errlog.Fatal().Msgf("invalid auth_name for action '%s'", v.Name)
}
}
if len(c.RolesQuery) == 0 { if len(c.RolesQuery) == 0 {
logger.Warn().Msgf("no 'roles_query' defined.") logger.Warn().Msgf("no 'roles_query' defined.")
} }
@ -349,3 +389,31 @@ func sanitize(s string) string {
return strings.ToLower(m) return strings.ToLower(m)
}) })
} }
func getConfigName() string {
if len(os.Getenv("GO_ENV")) == 0 {
return "dev"
}
ge := strings.ToLower(os.Getenv("GO_ENV"))
switch {
case strings.HasPrefix(ge, "pro"):
return "prod"
case strings.HasPrefix(ge, "sta"):
return "stage"
case strings.HasPrefix(ge, "tes"):
return "test"
case strings.HasPrefix(ge, "dev"):
return "dev"
}
return ge
}
func isDev() bool {
return strings.HasPrefix(os.Getenv("GO_ENV"), "dev")
}

View File

@ -11,6 +11,7 @@ import (
"time" "time"
"github.com/cespare/xxhash/v2" "github.com/cespare/xxhash/v2"
"github.com/dosco/super-graph/allow"
"github.com/dosco/super-graph/qcode" "github.com/dosco/super-graph/qcode"
"github.com/jackc/pgx/v4" "github.com/jackc/pgx/v4"
"github.com/valyala/fasttemplate" "github.com/valyala/fasttemplate"
@ -107,7 +108,7 @@ func (c *coreContext) resolvePreparedSQL() ([]byte, *stmt, error) {
} }
ps, ok := _preparedList[gqlHash(c.req.Query, c.req.Vars, role)] ps, ok := _preparedList[stmtHash(allow.QueryName(c.req.Query), role)]
if !ok { if !ok {
return nil, nil, errUnauthorized return nil, nil, errUnauthorized
} }
@ -240,8 +241,10 @@ func (c *coreContext) resolveSQL() ([]byte, *stmt, error) {
} }
} }
if !conf.Production { if allowList.IsPersist() {
_allowList.add(&c.req) if err := allowList.Add(c.req.Vars, c.req.Query, c.req.ref); err != nil {
return nil, nil, err
}
} }
if len(stmts) > 1 { if len(stmts) > 1 {

View File

@ -4,7 +4,7 @@ package serv
func Fuzz(data []byte) int { func Fuzz(data []byte) int {
gql := string(data) gql := string(data)
gqlName(gql) QueryName(gql)
gqlHash(gql, nil, "") gqlHash(gql, nil, "")
return 1 return 1

View File

@ -10,7 +10,6 @@ func TestFuzzCrashers(t *testing.T) {
} }
for _, f := range crashers { for _, f := range crashers {
_ = gqlName(f)
gqlHash(f, nil, "") gqlHash(f, nil, "")
} }
} }

165
serv/init.go Normal file
View File

@ -0,0 +1,165 @@
package serv
import (
"context"
"fmt"
"os"
"github.com/dosco/super-graph/allow"
"github.com/jackc/pgx/v4"
"github.com/jackc/pgx/v4/pgxpool"
"github.com/rs/zerolog"
)
func initLog() {
out := zerolog.ConsoleWriter{Out: os.Stderr}
logger = zerolog.New(out).With().Timestamp().Logger()
errlog = logger.With().Caller().Logger()
}
func initConf() (*config, error) {
vi := newConfig(getConfigName())
if err := vi.ReadInConfig(); err != nil {
return nil, err
}
inherits := vi.GetString("inherits")
if len(inherits) != 0 {
vi = newConfig(inherits)
if err := vi.ReadInConfig(); err != nil {
return nil, err
}
if vi.IsSet("inherits") {
errlog.Fatal().Msgf("inherited config (%s) cannot itself inherit (%s)",
inherits,
vi.GetString("inherits"))
}
vi.SetConfigName(getConfigName())
if err := vi.MergeInConfig(); err != nil {
return nil, err
}
}
c := &config{}
if err := c.Init(vi); err != nil {
return nil, fmt.Errorf("unable to decode config, %v", err)
}
logLevel, err := zerolog.ParseLevel(c.LogLevel)
if err != nil {
errlog.Error().Err(err).Msg("error setting log_level")
}
zerolog.SetGlobalLevel(logLevel)
return c, nil
}
func initDB(c *config, useDB bool) (*pgx.Conn, error) {
config, _ := pgx.ParseConfig("")
config.Host = c.DB.Host
config.Port = c.DB.Port
config.User = c.DB.User
config.Password = c.DB.Password
config.RuntimeParams = map[string]string{
"application_name": c.AppName,
"search_path": c.DB.Schema,
}
if useDB {
config.Database = c.DB.DBName
}
switch c.LogLevel {
case "debug":
config.LogLevel = pgx.LogLevelDebug
case "info":
config.LogLevel = pgx.LogLevelInfo
case "warn":
config.LogLevel = pgx.LogLevelWarn
case "error":
config.LogLevel = pgx.LogLevelError
default:
config.LogLevel = pgx.LogLevelNone
}
config.Logger = NewSQLLogger(logger)
db, err := pgx.ConnectConfig(context.Background(), config)
if err != nil {
return nil, err
}
return db, nil
}
func initDBPool(c *config) (*pgxpool.Pool, error) {
config, _ := pgxpool.ParseConfig("")
config.ConnConfig.Host = c.DB.Host
config.ConnConfig.Port = c.DB.Port
config.ConnConfig.Database = c.DB.DBName
config.ConnConfig.User = c.DB.User
config.ConnConfig.Password = c.DB.Password
config.ConnConfig.RuntimeParams = map[string]string{
"application_name": c.AppName,
"search_path": c.DB.Schema,
}
switch c.LogLevel {
case "debug":
config.ConnConfig.LogLevel = pgx.LogLevelDebug
case "info":
config.ConnConfig.LogLevel = pgx.LogLevelInfo
case "warn":
config.ConnConfig.LogLevel = pgx.LogLevelWarn
case "error":
config.ConnConfig.LogLevel = pgx.LogLevelError
default:
config.ConnConfig.LogLevel = pgx.LogLevelNone
}
config.ConnConfig.Logger = NewSQLLogger(logger)
// if c.DB.MaxRetries != 0 {
// opt.MaxRetries = c.DB.MaxRetries
// }
if c.DB.PoolSize != 0 {
config.MaxConns = conf.DB.PoolSize
}
db, err := pgxpool.ConnectConfig(context.Background(), config)
if err != nil {
return nil, err
}
return db, nil
}
func initCompiler() {
var err error
qcompile, pcompile, err = initCompilers(conf)
if err != nil {
errlog.Fatal().Err(err).Msg("failed to initialize compilers")
}
}
func initAllowList(cpath string) {
var ac allow.Config
var err error
if !conf.Production {
ac = allow.Config{CreateIfNotExists: true, Persist: true}
}
allowList, err = allow.New(cpath, ac)
if err != nil {
errlog.Fatal().Err(err).Msg("failed to initialize allow list")
}
}

View File

@ -6,6 +6,7 @@ import (
"fmt" "fmt"
"io" "io"
"github.com/dosco/super-graph/allow"
"github.com/dosco/super-graph/qcode" "github.com/dosco/super-graph/qcode"
"github.com/jackc/pgconn" "github.com/jackc/pgconn"
"github.com/jackc/pgx/v4" "github.com/jackc/pgx/v4"
@ -23,7 +24,10 @@ var (
_preparedList map[string]*preparedItem _preparedList map[string]*preparedItem
) )
func initPreparedList() { func initPreparedList(cpath string) {
if allowList.IsPersist() {
return
}
_preparedList = make(map[string]*preparedItem) _preparedList = make(map[string]*preparedItem)
tx, err := db.Begin(context.Background()) tx, err := db.Begin(context.Background())
@ -43,30 +47,38 @@ func initPreparedList() {
success := 0 success := 0
for _, v := range _allowList.list { list, err := allowList.Load()
if len(v.gql) == 0 { if err != nil {
errlog.Fatal().Err(err).Send()
}
for _, v := range list {
if len(v.Query) == 0 {
continue continue
} }
err := prepareStmt(v.gql, v.vars) err := prepareStmt(v)
if err == nil { if err == nil {
success++ success++
continue continue
} }
if len(v.vars) == 0 { if len(v.Vars) == 0 {
logger.Warn().Err(err).Msg(v.gql) logger.Warn().Err(err).Msg(v.Query)
} else { } else {
logger.Warn().Err(err).Msgf("%s %s", v.vars, v.gql) logger.Warn().Err(err).Msgf("%s %s", v.Vars, v.Query)
} }
} }
logger.Info(). logger.Info().
Msgf("Registered %d of %d queries from allow.list as prepared statements", Msgf("Registered %d of %d queries from allow.list as prepared statements",
success, len(_allowList.list)) success, len(list))
} }
func prepareStmt(gql string, vars []byte) error { func prepareStmt(item allow.Item) error {
gql := item.Query
vars := item.Vars
qt := qcode.GetQType(gql) qt := qcode.GetQType(gql)
q := []byte(gql) q := []byte(gql)
@ -99,7 +111,7 @@ func prepareStmt(gql string, vars []byte) error {
logger.Debug().Msg("Prepared statement role: user") logger.Debug().Msg("Prepared statement role: user")
err = prepare(tx, stmts1, gqlHash(gql, vars, "user")) err = prepare(tx, stmts1, stmtHash(item.Name, "user"))
if err != nil { if err != nil {
return err return err
} }
@ -112,7 +124,7 @@ func prepareStmt(gql string, vars []byte) error {
return err return err
} }
err = prepare(tx, stmts2, gqlHash(gql, vars, "anon")) err = prepare(tx, stmts2, stmtHash(item.Name, "anon"))
if err != nil { if err != nil {
return err return err
} }
@ -127,7 +139,7 @@ func prepareStmt(gql string, vars []byte) error {
return err return err
} }
err = prepare(tx, stmts, gqlHash(gql, vars, role.Name)) err = prepare(tx, stmts, stmtHash(item.Name, role.Name))
if err != nil { if err != nil {
return err return err
} }

View File

@ -22,16 +22,20 @@ type resolvFn struct {
Fn func(h http.Header, id []byte) ([]byte, error) Fn func(h http.Header, id []byte) ([]byte, error)
} }
func initResolvers() error { func initResolvers() {
var err error
rmap = make(map[uint64]*resolvFn) rmap = make(map[uint64]*resolvFn)
for _, t := range conf.Tables { for _, t := range conf.Tables {
err := initRemotes(t) err = initRemotes(t)
if err != nil { if err != nil {
return err break
} }
} }
return nil
if err != nil {
errlog.Fatal().Err(err).Msg("failed to initialize resolvers")
}
} }
func initRemotes(t configTable) error { func initRemotes(t configTable) error {

View File

@ -101,9 +101,14 @@ func startHTTP() {
hostPort = defaultHP hostPort = defaultHP
} }
routes, err := routeHandler()
if err != nil {
errlog.Fatal().Err(err).Send()
}
srv := &http.Server{ srv := &http.Server{
Addr: hostPort, Addr: hostPort,
Handler: routeHandler(), Handler: routes,
ReadTimeout: 5 * time.Second, ReadTimeout: 5 * time.Second,
WriteTimeout: 10 * time.Second, WriteTimeout: 10 * time.Second,
MaxHeaderBytes: 1 << 20, MaxHeaderBytes: 1 << 20,
@ -140,59 +145,74 @@ func startHTTP() {
<-idleConnsClosed <-idleConnsClosed
} }
func routeHandler() http.Handler { func routeHandler() (http.Handler, error) {
var apiH http.Handler
if conf != nil && conf.HTTPGZip {
gzipH := gziphandler.MustNewGzipLevelHandler(6)
apiH = gzipH(http.HandlerFunc(apiV1))
} else {
apiH = http.HandlerFunc(apiV1)
}
mux := http.NewServeMux() mux := http.NewServeMux()
if conf != nil { if conf == nil {
mux.HandleFunc("/health", health) return mux, nil
mux.Handle("/api/v1/graphql", withAuth(apiH)) }
if conf.WebUI { routes := map[string]http.Handler{
mux.Handle("/", http.FileServer(rice.MustFindBox("../web/build").HTTPBox())) "/health": http.HandlerFunc(health),
"/api/v1/graphql": withAuth(http.HandlerFunc(apiV1), conf.Auth),
}
if err := setActionRoutes(routes); err != nil {
return nil, err
}
if conf.WebUI {
routes["/"] = http.FileServer(rice.MustFindBox("../web/build").HTTPBox())
}
if conf.HTTPGZip {
gz := gziphandler.MustNewGzipLevelHandler(6)
for k, v := range routes {
routes[k] = gz(v)
} }
} }
for k, v := range routes {
mux.Handle(k, v)
}
fn := func(w http.ResponseWriter, r *http.Request) { fn := func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Server", serverName) w.Header().Set("Server", serverName)
mux.ServeHTTP(w, r) mux.ServeHTTP(w, r)
} }
return http.HandlerFunc(fn) return http.HandlerFunc(fn), nil
} }
func getConfigName() string { func setActionRoutes(routes map[string]http.Handler) error {
if len(os.Getenv("GO_ENV")) == 0 { var err error
return "dev"
for _, a := range conf.Actions {
var fn http.Handler
fn, err = newAction(a)
if err != nil {
break
}
p := fmt.Sprintf("/api/v1/actions/%s", strings.ToLower(a.Name))
if authc, ok := findAuth(a.AuthName); ok {
routes[p] = withAuth(fn, authc)
} else {
routes[p] = fn
}
} }
return nil
}
ge := strings.ToLower(os.Getenv("GO_ENV")) func findAuth(name string) (configAuth, bool) {
var authc configAuth
switch { for _, a := range conf.Auths {
case strings.HasPrefix(ge, "pro"): if strings.EqualFold(a.Name, name) {
return "prod" return a, true
}
case strings.HasPrefix(ge, "sta"):
return "stage"
case strings.HasPrefix(ge, "tes"):
return "test"
case strings.HasPrefix(ge, "dev"):
return "dev"
} }
return authc, false
return ge
}
func isDev() bool {
return strings.HasPrefix(os.Getenv("GO_ENV"), "dev")
} }

View File

@ -7,6 +7,7 @@ import (
"io" "io"
"sort" "sort"
"strings" "strings"
"sync"
"github.com/cespare/xxhash/v2" "github.com/cespare/xxhash/v2"
"github.com/dosco/super-graph/jsn" "github.com/dosco/super-graph/jsn"
@ -22,6 +23,14 @@ func mkkey(h *xxhash.Digest, k1 string, k2 string) uint64 {
return v return v
} }
// nolint: errcheck
func stmtHash(name string, role string) string {
h := sha1.New()
io.WriteString(h, strings.ToLower(name))
io.WriteString(h, role)
return hex.EncodeToString(h.Sum(nil))
}
// nolint: errcheck // nolint: errcheck
func gqlHash(b string, vars []byte, role string) string { func gqlHash(b string, vars []byte, role string) string {
b = strings.TrimSpace(b) b = strings.TrimSpace(b)
@ -108,30 +117,6 @@ func al(b byte) bool {
return (b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || (b >= '0' && b <= '9') return (b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || (b >= '0' && b <= '9')
} }
func gqlName(b string) string {
state, s := 0, 0
for i := 0; i < len(b); i++ {
switch {
case state == 2 && b[i] == '{':
return b[s:i]
case state == 2 && b[i] == ' ':
return b[s:i]
case state == 1 && b[i] == '{':
return ""
case state == 1 && b[i] != ' ':
s = i
state = 2
case state == 1 && b[i] == ' ':
continue
case i != 0 && b[i] == ' ' && (b[i-1] == 'n' || b[i-1] == 'y'):
state = 1
}
}
return ""
}
func findStmt(role string, stmts []stmt) *stmt { func findStmt(role string, stmts []stmt) *stmt {
for i := range stmts { for i := range stmts {
if stmts[i].role.Name != role { if stmts[i].role.Name != role {
@ -143,9 +128,14 @@ func findStmt(role string, stmts []stmt) *stmt {
} }
func fatalInProd(err error, msg string) { func fatalInProd(err error, msg string) {
if isDev() { var wg sync.WaitGroup
errlog.Error().Err(err).Msg(msg)
} else { if !isDev() {
errlog.Fatal().Err(err).Msg(msg) errlog.Fatal().Err(err).Msg(msg)
} }
errlog.Error().Err(err).Msg(msg)
wg.Add(1)
wg.Wait()
} }

View File

@ -229,80 +229,3 @@ func TestGQLHashWithVars2(t *testing.T) {
t.Fatal("Hashes don't match they should") t.Fatal("Hashes don't match they should")
} }
} }
func TestGQLName1(t *testing.T) {
var q = `
query {
products(
distinct: [price]
where: { id: { and: { greater_or_equals: 20, lt: 28 } } }
) { id name } }`
name := gqlName(q)
if len(name) != 0 {
t.Fatal("Name should be empty, not ", name)
}
}
func TestGQLName2(t *testing.T) {
var q = `
query hakuna_matata {
products(
distinct: [price]
where: { id: { and: { greater_or_equals: 20, lt: 28 } } }
) {
id
name
}
}`
name := gqlName(q)
if name != "hakuna_matata" {
t.Fatal("Name should be 'hakuna_matata', not ", name)
}
}
func TestGQLName3(t *testing.T) {
var q = `
mutation means{ users { id } }`
// var v2 = ` { products( limit: 30, order_by: { price: desc }, distinct: [ price ] where: { id: { and: { greater_or_equals: 20, lt: 28 } } }) { id name price user { id email } } } `
name := gqlName(q)
if name != "means" {
t.Fatal("Name should be 'means', not ", name)
}
}
func TestGQLName4(t *testing.T) {
var q = `
query no_worries
users {
id
}
}`
name := gqlName(q)
if name != "no_worries" {
t.Fatal("Name should be 'no_worries', not ", name)
}
}
func TestGQLName5(t *testing.T) {
var q = `
{
users {
id
}
}`
name := gqlName(q)
if len(name) != 0 {
t.Fatal("Name should be empty, not ", name)
}
}

View File

@ -49,7 +49,7 @@ migrations_path: ./config/migrations
# sheep: sheep # sheep: sheep
auth: auth:
# Can be 'rails' or 'jwt' # Can be 'rails', 'jwt' or 'header'
type: rails type: rails
cookie: _{% app_name_slug %}_session cookie: _{% app_name_slug %}_session
@ -83,6 +83,22 @@ auth:
# public_key_file: /secrets/public_key.pem # public_key_file: /secrets/public_key.pem
# public_key_type: ecdsa #rsa # public_key_type: ecdsa #rsa
# header:
# name: dnt
# exists: true
# value: localhost:8080
# You can add additional named auths to use with actions
# In this example actions using this auth can only be
# called from the Google Appengine Cron service that
# sets a special header to all it's requests
auths:
- name: from_taskqueue
type: header
header:
name: X-Appengine-Cron
exists: true
database: database:
type: postgres type: postgres
host: db host: db
@ -116,6 +132,16 @@ database:
- encrypted - encrypted
- token - token
# Create custom actions with their own api endpoints
# For example the below action will be available at /api/v1/actions/refresh_leaderboard_users
# A request to this url will execute the configured SQL query
# which in this case refreshes a materialized view in the database.
# The auth_name is from one of the configured auths
actions:
- name: refresh_leaderboard_users
sql: REFRESH MATERIALIZED VIEW CONCURRENTLY "leaderboard_users"
auth_name: from_taskqueue
tables: tables:
- name: customers - name: customers
remotes: remotes:
@ -137,6 +163,7 @@ tables:
name: me name: me
table: users table: users
roles_query: "SELECT * FROM users WHERE id = $user_id" roles_query: "SELECT * FROM users WHERE id = $user_id"
roles: roles:
@ -168,20 +195,16 @@ roles:
query: query:
limit: 50 limit: 50
filters: ["{ user_id: { eq: $user_id } }"] filters: ["{ user_id: { eq: $user_id } }"]
columns: ["id", "name", "description" ]
disable_functions: false disable_functions: false
insert: insert:
filters: ["{ user_id: { eq: $user_id } }"] filters: ["{ user_id: { eq: $user_id } }"]
columns: ["id", "name", "description" ]
presets: presets:
- user_id: "$user_id"
- created_at: "now" - created_at: "now"
update: update:
filters: ["{ user_id: { eq: $user_id } }"] filters: ["{ user_id: { eq: $user_id } }"]
columns:
- id
- name
presets: presets:
- updated_at: "now" - updated_at: "now"