Compare commits

..

5 Commits

22 changed files with 1717 additions and 419 deletions

2
.gitignore vendored
View File

@ -29,10 +29,10 @@
.release .release
main main
super-graph super-graph
supergraph
*-fuzz.zip *-fuzz.zip
crashers crashers
suppressions suppressions
release release
.gofuzz .gofuzz
*-fuzz.zip *-fuzz.zip

View File

@ -5,6 +5,8 @@ COPY /cmd/internal/serv/web/ ./
RUN yarn RUN yarn
RUN yarn build RUN yarn build
# stage: 2 # stage: 2
FROM golang:1.14-alpine as go-build FROM golang:1.14-alpine as go-build
RUN apk update && \ RUN apk update && \
@ -31,6 +33,8 @@ RUN echo "Compressing binary, will take a bit of time..." && \
upx --ultra-brute -qq super-graph && \ upx --ultra-brute -qq super-graph && \
upx -t super-graph upx -t super-graph
# stage: 3 # stage: 3
FROM alpine:latest FROM alpine:latest
WORKDIR / WORKDIR /

View File

@ -12,10 +12,10 @@ endif
export GO111MODULE := on export GO111MODULE := on
# Build-time Go variables # Build-time Go variables
version = github.com/dosco/super-graph/serv.version version = github.com/dosco/super-graph/cmd/internal/serv.version
gitBranch = github.com/dosco/super-graph/serv.gitBranch gitBranch = github.com/dosco/super-graph/cmd/internal/serv.gitBranch
lastCommitSHA = github.com/dosco/super-graph/serv.lastCommitSHA lastCommitSHA = github.com/dosco/super-graph/cmd/internal/serv.lastCommitSHA
lastCommitTime = github.com/dosco/super-graph/serv.lastCommitTime lastCommitTime = github.com/dosco/super-graph/cmd/internal/serv.lastCommitTime
BUILD_FLAGS ?= -ldflags '-s -w -X ${lastCommitSHA}=${BUILD} -X "${lastCommitTime}=${BUILD_DATE}" -X "${version}=${BUILD_VERSION}" -X ${gitBranch}=${BUILD_BRANCH}' BUILD_FLAGS ?= -ldflags '-s -w -X ${lastCommitSHA}=${BUILD} -X "${lastCommitTime}=${BUILD_DATE}" -X "${version}=${BUILD_VERSION}" -X ${gitBranch}=${BUILD_BRANCH}'
@ -77,11 +77,10 @@ clean:
run: clean run: clean
@go run $(BUILD_FLAGS) main.go $(ARGS) @go run $(BUILD_FLAGS) main.go $(ARGS)
install: install: clean build
@echo $(GOPATH)
@echo "Commit Hash: `git rev-parse HEAD`" @echo "Commit Hash: `git rev-parse HEAD`"
@echo "Old Hash: `shasum $(GOPATH)/bin/$(BINARY) 2>/dev/null | cut -c -32`" @echo "Old Hash: `shasum $(GOPATH)/bin/$(BINARY) 2>/dev/null | cut -c -32`"
@go install $(BUILD_FLAGS) cmd @mv $(BINARY) $(GOPATH)/bin/$(BINARY)
@echo "New Hash:" `shasum $(GOPATH)/bin/$(BINARY) 2>/dev/null | cut -c -32` @echo "New Hash:" `shasum $(GOPATH)/bin/$(BINARY) 2>/dev/null | cut -c -32`
uninstall: clean uninstall: clean

View File

@ -1,26 +1,74 @@
<!-- <a href="https://supergraph.dev"><img src="https://supergraph.dev/hologram.svg" width="100" height="100" align="right" /></a> --> <img src="docs/guide/.vuepress/public/super-graph.png" width="250" />
<img src="docs/.vuepress/public/super-graph.png" width="250" />
### Build web products faster. Secure high performance GraphQL ### Build web products faster. Secure high performance GraphQL
![Apache Public License 2.0](https://img.shields.io/github/license/dosco/super-graph.svg) [![GoDoc](https://img.shields.io/badge/godoc-reference-5272B4.svg)](https://pkg.go.dev/github.com/dosco/super-graph/core?tab=doc)
![Docker build](https://img.shields.io/docker/cloud/build/dosco/super-graph.svg) ![Apache 2.0](https://img.shields.io/github/license/dosco/super-graph.svg?style=flat-square)
![Cloud native](https://img.shields.io/badge/cloud--native-enabled-blue.svg) ![Docker build](https://img.shields.io/docker/cloud/build/dosco/super-graph.svg?style=flat-square)
![Cloud native](https://img.shields.io/badge/cloud--native-enabled-blue.svg?style=flat-squareg)
[![Discord Chat](https://img.shields.io/discord/628796009539043348.svg)](https://discord.gg/6pSWCTZ) [![Discord Chat](https://img.shields.io/discord/628796009539043348.svg)](https://discord.gg/6pSWCTZ)
## What's Super Graph?
## What is Super Graph Designed to 100x your developer productivity. Super Graph will instantly and without you writing code provide you a high performance GraphQL API for Postgres DB. GraphQL queries are compiled into a single fast SQL query. Super Graph is a GO library and a service, use it in your own code or run it as a seperate service.
Is designed to 100x your developer productivity. Super Graph will instantly and without you writing code provide you a high performance and secure GraphQL API for Postgres DB. GraphQL queries are translated into a single fast SQL query. No more writing API code as you develop ## Using it as a service
your web frontend just make the query you need and Super Graph will do the rest.
Super Graph has a rich feature set like integrating with your existing Ruby on Rails apps, joining your DB with data from remote APIs, role and attribute based access control, support for JWT tokens, built-in DB mutations and seeding, and a lot more. ```console
git clone https://github.com/dosco/super-graph
cd ./super-graph
make install
![GraphQL](docs/.vuepress/public/graphql.png?raw=true "") super-graph new <app_name>
```
## Using it in your own code
## The story of Super Graph? ```golang
package main
import (
"database/sql"
"fmt"
"time"
"github.com/dosco/super-graph/core"
_ "github.com/jackc/pgx/v4/stdlib"
)
func main() {
db, err := sql.Open("pgx", "postgres://postgrs:@localhost:5432/example_db")
if err != nil {
log.Fatalf(err)
}
conf, err := core.ReadInConfig("./config/dev.yml")
if err != nil {
log.Fatalf(err)
}
sg, err = core.NewSuperGraph(conf, db)
if err != nil {
log.Fatalf(err)
}
query := `
query {
posts {
id
title
}
}`
res, err := sg.GraphQL(context.Background(), query, nil)
if err != nil {
log.Fatalf(err)
}
fmt.Println(string(res.Data))
}
```
## About Super Graph
After working on several products through my career I find that we spend way too much time on building API backends. Most APIs also require constant updating, this costs real time and money. After working on several products through my career I find that we spend way too much time on building API backends. Most APIs also require constant updating, this costs real time and money.
@ -37,6 +85,7 @@ This compiler is what sits at the heart of Super Graph with layers of useful fun
- Complex nested queries and mutations - Complex nested queries and mutations
- Auto learns database tables and relationships - Auto learns database tables and relationships
- Role and Attribute based access control - Role and Attribute based access control
- Opaque cursor based efficient pagination
- Full text search and aggregations - Full text search and aggregations
- JWT tokens supported (Auth0, etc) - JWT tokens supported (Auth0, etc)
- Join database queries with remote REST APIs - Join database queries with remote REST APIs
@ -50,15 +99,6 @@ This compiler is what sits at the heart of Super Graph with layers of useful fun
- Database seeding tool - Database seeding tool
- Works with Postgres and YugabyteDB - Works with Postgres and YugabyteDB
## Get started
```
git clone https://github.com/dosco/super-graph
cd ./super-graph
make install
super-graph new <app_name>
```
## Documentation ## Documentation

View File

@ -55,7 +55,7 @@ func cmdDBReset(cmd *cobra.Command, args []string) {
func cmdDBCreate(cmd *cobra.Command, args []string) { func cmdDBCreate(cmd *cobra.Command, args []string) {
initConfOnce() initConfOnce()
db, err := initDB(conf) db, err := initDB(conf, false)
if err != nil { if err != nil {
log.Fatalf("ERR failed to connect to database: %s", err) log.Fatalf("ERR failed to connect to database: %s", err)
} }
@ -74,7 +74,7 @@ func cmdDBCreate(cmd *cobra.Command, args []string) {
func cmdDBDrop(cmd *cobra.Command, args []string) { func cmdDBDrop(cmd *cobra.Command, args []string) {
initConfOnce() initConfOnce()
db, err := initDB(conf) db, err := initDB(conf, false)
if err != nil { if err != nil {
log.Fatalf("ERR failed to connect to database: %s", err) log.Fatalf("ERR failed to connect to database: %s", err)
} }
@ -131,7 +131,7 @@ func cmdDBMigrate(cmd *cobra.Command, args []string) {
initConfOnce() initConfOnce()
dest := args[0] dest := args[0]
conn, err := initDB(conf) conn, err := initDB(conf, true)
if err != nil { if err != nil {
log.Fatalf("ERR failed to connect to database: %s", err) log.Fatalf("ERR failed to connect to database: %s", err)
} }
@ -223,7 +223,7 @@ func cmdDBMigrate(cmd *cobra.Command, args []string) {
func cmdDBStatus(cmd *cobra.Command, args []string) { func cmdDBStatus(cmd *cobra.Command, args []string) {
initConfOnce() initConfOnce()
db, err := initDB(conf) db, err := initDB(conf, true)
if err != nil { if err != nil {
log.Fatalf("ERR failed to connect to database: %s", err) log.Fatalf("ERR failed to connect to database: %s", err)
} }

View File

@ -28,7 +28,7 @@ func cmdDBSeed(cmd *cobra.Command, args []string) {
conf.Production = false conf.Production = false
db, err = initDB(conf) db, err = initDB(conf, true)
if err != nil { if err != nil {
log.Fatalf("ERR failed to connect to database: %s", err) log.Fatalf("ERR failed to connect to database: %s", err)
} }

View File

@ -19,7 +19,7 @@ func cmdServ(cmd *cobra.Command, args []string) {
initWatcher() initWatcher()
db, err = initDB(conf) db, err = initDB(conf, true)
if err != nil { if err != nil {
fatalInProd(err, "failed to connect to database") fatalInProd(err, "failed to connect to database")
} }

View File

@ -2,11 +2,12 @@ package serv
import ( import (
"database/sql" "database/sql"
"fmt"
"path" "path"
"time" "time"
_ "github.com/jackc/pgx/v4/stdlib" "github.com/jackc/pgx/v4"
"github.com/jackc/pgx/v4/stdlib"
//_ "github.com/jackc/pgx/v4/stdlib"
) )
func initConf() (*Config, error) { func initConf() (*Config, error) {
@ -78,67 +79,19 @@ func initConf() (*Config, error) {
return c, nil return c, nil
} }
func initDB(c *Config) (*sql.DB, error) { func initDB(c *Config, useDB bool) (*sql.DB, error) {
var db *sql.DB var db *sql.DB
var err error var err error
cs := fmt.Sprintf("postgres://%s:%s@%s:%d/%s", // cs := fmt.Sprintf("host=%s port=%d user=%s password=%s dbname=%s",
c.DB.User, c.DB.Password, // c.DB.Host, c.DB.Port,
c.DB.Host, c.DB.Port, c.DB.DBName) // c.DB.User, c.DB.Password,
// c.DB.DBName)
for i := 1; i < 10; i++ { // fmt.Println(">>", cs)
db, err = sql.Open("pgx", cs)
if err == nil {
break
}
time.Sleep(time.Duration(i*100) * time.Millisecond)
}
if err != nil {
return nil, err
}
return db, nil
// config, _ := pgxpool.ParseConfig("")
// config.ConnConfig.Host = c.DB.Host
// config.ConnConfig.Port = c.DB.Port
// config.ConnConfig.Database = c.DB.DBName
// config.ConnConfig.User = c.DB.User
// config.ConnConfig.Password = c.DB.Password
// config.ConnConfig.RuntimeParams = map[string]string{
// "application_name": c.AppName,
// "search_path": c.DB.Schema,
// }
// switch c.LogLevel {
// case "debug":
// config.ConnConfig.LogLevel = pgx.LogLevelDebug
// case "info":
// config.ConnConfig.LogLevel = pgx.LogLevelInfo
// case "warn":
// config.ConnConfig.LogLevel = pgx.LogLevelWarn
// case "error":
// config.ConnConfig.LogLevel = pgx.LogLevelError
// default:
// config.ConnConfig.LogLevel = pgx.LogLevelNone
// }
// config.ConnConfig.Logger = NewSQLLogger(logger)
// // if c.DB.MaxRetries != 0 {
// // opt.MaxRetries = c.DB.MaxRetries
// // }
// if c.DB.PoolSize != 0 {
// config.MaxConns = conf.DB.PoolSize
// }
// var db *pgxpool.Pool
// var err error
// for i := 1; i < 10; i++ { // for i := 1; i < 10; i++ {
// db, err = pgxpool.ConnectConfig(context.Background(), config) // db, err = sql.Open("pgx", cs)
// if err == nil { // if err == nil {
// break // break
// } // }
@ -150,4 +103,55 @@ func initDB(c *Config) (*sql.DB, error) {
// } // }
// return db, nil // return db, nil
config, _ := pgx.ParseConfig("")
config.Host = c.DB.Host
config.Port = c.DB.Port
config.User = c.DB.User
config.Password = c.DB.Password
config.RuntimeParams = map[string]string{
"application_name": c.AppName,
"search_path": c.DB.Schema,
}
if useDB {
config.Database = c.DB.DBName
}
// switch c.LogLevel {
// case "debug":
// config.LogLevel = pgx.LogLevelDebug
// case "info":
// config.LogLevel = pgx.LogLevelInfo
// case "warn":
// config.LogLevel = pgx.LogLevelWarn
// case "error":
// config.LogLevel = pgx.LogLevelError
// default:
// config.LogLevel = pgx.LogLevelNone
// }
//config.Logger = NewSQLLogger(logger)
// if c.DB.MaxRetries != 0 {
// opt.MaxRetries = c.DB.MaxRetries
// }
// if c.DB.PoolSize != 0 {
// config.MaxConns = conf.DB.PoolSize
// }
for i := 1; i < 10; i++ {
db = stdlib.OpenDB(*config)
if db == nil {
break
}
time.Sleep(time.Duration(i*100) * time.Millisecond)
}
if err != nil {
return nil, err
}
return db, nil
} }

View File

@ -10,20 +10,9 @@ import (
"strings" "strings"
"sync" "sync"
"github.com/cespare/xxhash/v2"
"github.com/dosco/super-graph/jsn" "github.com/dosco/super-graph/jsn"
) )
// nolint: errcheck
func mkkey(h *xxhash.Digest, k1 string, k2 string) uint64 {
h.WriteString(k1)
h.WriteString(k2)
v := h.Sum64()
h.Reset()
return v
}
// nolint: errcheck // nolint: errcheck
func gqlHash(b string, vars []byte, role string) string { func gqlHash(b string, vars []byte, role string) string {
b = strings.TrimSpace(b) b = strings.TrimSpace(b)

755
config/allow.list Normal file
View File

@ -0,0 +1,755 @@
# http://localhost:8080/
variables {
"data": [
{
"name": "Protect Ya Neck",
"created_at": "now",
"updated_at": "now"
},
{
"name": "Enter the Wu-Tang",
"created_at": "now",
"updated_at": "now"
}
]
}
mutation {
products(insert: $data) {
id
name
}
}
variables {
"update": {
"name": "Wu-Tang",
"description": "No description needed"
},
"product_id": 1
}
mutation {
products(id: $product_id, update: $update) {
id
name
description
}
}
query {
users {
id
email
picture: avatar
products(limit: 2, where: {price: {gt: 10}}) {
id
name
description
}
}
}
variables {
"data": [
{
"name": "Gumbo1",
"created_at": "now",
"updated_at": "now"
},
{
"name": "Gumbo2",
"created_at": "now",
"updated_at": "now"
}
]
}
mutation {
products(id: 199, delete: true) {
id
name
}
}
query {
products {
id
name
user {
email
}
}
}
variables {
"data": {
"product_id": 5
}
}
mutation {
products(id: $product_id, delete: true) {
id
name
}
}
query {
products {
id
name
price
users {
email
}
}
}
variables {
"data": {
"email": "gfk@myspace.com",
"full_name": "Ghostface Killah",
"created_at": "now",
"updated_at": "now"
}
}
mutation {
user(insert: $data) {
id
}
}
variables {
"update": {
"name": "Helloo",
"description": "World \u003c\u003e"
},
"user": 123
}
mutation {
products(id: 5, update: $update) {
id
name
description
}
}
variables {
"data": {
"name": "WOOO",
"price": 50.5
}
}
mutation {
products(insert: $data) {
id
name
}
}
query getProducts {
products {
id
name
price
description
}
}
query {
deals {
id
name
price
}
}
variables {
"beer": "smoke"
}
query beerSearch {
products(search: $beer) {
id
name
search_rank
search_headline_description
}
}
query {
user {
id
full_name
}
}
variables {
"data": {
"email": "goo1@rug.com",
"full_name": "The Dude",
"created_at": "now",
"updated_at": "now",
"product": {
"name": "Apple",
"price": 1.25,
"created_at": "now",
"updated_at": "now"
}
}
}
mutation {
user(insert: $data) {
id
full_name
email
product {
id
name
price
}
}
}
variables {
"data": {
"email": "goo12@rug.com",
"full_name": "The Dude",
"created_at": "now",
"updated_at": "now",
"product": [
{
"name": "Banana 1",
"price": 1.1,
"created_at": "now",
"updated_at": "now"
},
{
"name": "Banana 2",
"price": 2.2,
"created_at": "now",
"updated_at": "now"
}
]
}
}
mutation {
user(insert: $data) {
id
full_name
email
products {
id
name
price
}
}
}
variables {
"data": {
"name": "Banana 3",
"price": 1.1,
"created_at": "now",
"updated_at": "now",
"user": {
"email": "a2@a.com",
"full_name": "The Dude",
"created_at": "now",
"updated_at": "now"
}
}
}
mutation {
products(insert: $data) {
id
name
price
user {
id
full_name
email
}
}
}
variables {
"update": {
"name": "my_name",
"description": "my_desc"
}
}
mutation {
product(id: 15, update: $update, where: {id: {eq: 1}}) {
id
name
}
}
variables {
"update": {
"name": "my_name",
"description": "my_desc"
}
}
mutation {
product(update: $update, where: {id: {eq: 1}}) {
id
name
}
}
variables {
"update": {
"name": "my_name 2",
"description": "my_desc 2"
}
}
mutation {
product(update: $update, where: {id: {eq: 1}}) {
id
name
description
}
}
variables {
"data": {
"sale_type": "tuutuu",
"quantity": 5,
"due_date": "now",
"customer": {
"email": "thedude1@rug.com",
"full_name": "The Dude"
},
"product": {
"name": "Apple",
"price": 1.25
}
}
}
mutation {
purchase(update: $data, id: 5) {
sale_type
quantity
due_date
customer {
id
full_name
email
}
product {
id
name
price
}
}
}
variables {
"data": {
"email": "thedude@rug.com",
"full_name": "The Dude",
"created_at": "now",
"updated_at": "now",
"product": {
"where": {
"id": 2
},
"name": "Apple",
"price": 1.25,
"created_at": "now",
"updated_at": "now"
}
}
}
mutation {
user(update: $data, where: {id: {eq: 8}}) {
id
full_name
email
product {
id
name
price
}
}
}
variables {
"data": {
"email": "thedude@rug.com",
"full_name": "The Dude",
"created_at": "now",
"updated_at": "now",
"product": {
"where": {
"id": 2
},
"name": "Apple",
"price": 1.25,
"created_at": "now",
"updated_at": "now"
}
}
}
query {
user(where: {id: {eq: 8}}) {
id
product {
id
name
price
}
}
}
variables {
"data": {
"name": "Apple",
"price": 1.25,
"created_at": "now",
"updated_at": "now",
"user": {
"email": "thedude@rug.com"
}
}
}
query {
user {
email
}
}
variables {
"data": {
"name": "Apple",
"price": 1.25,
"created_at": "now",
"updated_at": "now",
"user": {
"email": "booboo@demo.com"
}
}
}
mutation {
product(update: $data, id: 6) {
id
name
user {
id
full_name
email
}
}
}
variables {
"data": {
"name": "Apple",
"price": 1.25,
"created_at": "now",
"updated_at": "now",
"user": {
"email": "booboo@demo.com"
}
}
}
query {
product(id: 6) {
id
name
user {
id
full_name
email
}
}
}
variables {
"data": {
"email": "thedude123@rug.com",
"full_name": "The Dude",
"created_at": "now",
"updated_at": "now",
"product": {
"connect": {
"id": 7
},
"disconnect": {
"id": 8
}
}
}
}
mutation {
user(update: $data, id: 6) {
id
full_name
email
product {
id
name
price
}
}
}
variables {
"data": {
"name": "Apple",
"price": 1.25,
"created_at": "now",
"updated_at": "now",
"user": {
"connect": {
"id": 5,
"email": "test@test.com"
}
}
}
}
mutation {
product(update: $data, id: 9) {
id
name
user {
id
full_name
email
}
}
}
variables {
"data": {
"email": "thed44ude@rug.com",
"full_name": "The Dude",
"created_at": "now",
"updated_at": "now",
"product": {
"connect": {
"id": 5
}
}
}
}
mutation {
user(insert: $data) {
id
full_name
email
product {
id
name
price
}
}
}
variables {
"data": {
"name": "Apple",
"price": 1.25,
"created_at": "now",
"updated_at": "now",
"user": {
"connect": {
"id": 5
}
}
}
}
mutation {
product(insert: $data) {
id
name
user {
id
full_name
email
}
}
}
variables {
"data": [
{
"name": "Apple",
"price": 1.25,
"created_at": "now",
"updated_at": "now",
"user": {
"connect": {
"id": 6
}
}
},
{
"name": "Coconut",
"price": 2.25,
"created_at": "now",
"updated_at": "now",
"user": {
"connect": {
"id": 3
}
}
}
]
}
mutation {
products(insert: $data) {
id
name
user {
id
full_name
email
}
}
}
variables {
"data": [
{
"name": "Apple",
"price": 1.25,
"created_at": "now",
"updated_at": "now"
},
{
"name": "Coconut",
"price": 2.25,
"created_at": "now",
"updated_at": "now"
}
]
}
mutation {
products(insert: $data) {
id
name
user {
id
full_name
email
}
}
}
variables {
"data": {
"name": "Apple",
"price": 1.25,
"user": {
"connect": {
"id": 5,
"email": "test@test.com"
}
}
}
}
mutation {
product(update: $data, id: 9) {
id
name
user {
id
full_name
email
}
}
}
variables {
"data": {
"name": "Apple",
"price": 1.25,
"user": {
"connect": {
"id": 5
}
}
}
}
mutation {
product(update: $data, id: 9) {
id
name
user {
id
full_name
email
}
}
}
variables {
"data": {
"name": "Apple",
"price": 1.25,
"user": {
"disconnect": {
"id": 5
}
}
}
}
mutation {
product(update: $data, id: 9) {
id
name
user_id
}
}
variables {
"data": {
"name": "Apple",
"price": 1.25,
"user": {
"disconnect": {
"id": 5
}
}
}
}
mutation {
product(update: $data, id: 2) {
id
name
user_id
}
}

226
config/dev.yml Normal file
View File

@ -0,0 +1,226 @@
app_name: "Super Graph Development"
host_port: 0.0.0.0:8080
web_ui: true
# debug, error, warn, info, none
log_level: "debug"
# enable or disable http compression (uses gzip)
http_compress: true
# When production mode is 'true' only queries
# from the allow list are permitted.
# When it's 'false' all queries are saved to the
# the allow list in ./config/allow.list
production: false
# Throw a 401 on auth failure for queries that need auth
auth_fail_block: false
# Latency tracing for database queries and remote joins
# the resulting latency information is returned with the
# response
enable_tracing: true
# Watch the config folder and reload Super Graph
# with the new configs when a change is detected
reload_on_config_change: true
# File that points to the database seeding script
# seed_file: seed.js
# Path pointing to where the migrations can be found
migrations_path: ./config/migrations
# Secret key for general encryption operations like
# encrypting the cursor data
secret_key: supercalifajalistics
# CORS: A list of origins a cross-domain request can be executed from.
# If the special * value is present in the list, all origins will be allowed.
# An origin may contain a wildcard (*) to replace 0 or more
# characters (i.e.: http://*.domain.com).
cors_allowed_origins: ["*"]
# Debug Cross Origin Resource Sharing requests
cors_debug: true
# Postgres related environment Variables
# SG_DATABASE_HOST
# SG_DATABASE_PORT
# SG_DATABASE_USER
# SG_DATABASE_PASSWORD
# Auth related environment Variables
# SG_AUTH_RAILS_COOKIE_SECRET_KEY_BASE
# SG_AUTH_RAILS_REDIS_URL
# SG_AUTH_RAILS_REDIS_PASSWORD
# SG_AUTH_JWT_PUBLIC_KEY_FILE
# inflections:
# person: people
# sheep: sheep
auth:
# Can be 'rails' or 'jwt'
type: rails
cookie: _app_session
# Comment this out if you want to disable setting
# the user_id via a header for testing.
# Disable in production
creds_in_header: true
rails:
# Rails version this is used for reading the
# various cookies formats.
version: 5.2
# Found in 'Rails.application.config.secret_key_base'
secret_key_base: 0a248500a64c01184edb4d7ad3a805488f8097ac761b76aaa6c17c01dcb7af03a2f18ba61b2868134b9c7b79a122bc0dadff4367414a2d173297bfea92be5566
# Remote cookie store. (memcache or redis)
# url: redis://redis:6379
# password: ""
# max_idle: 80
# max_active: 12000
# In most cases you don't need these
# salt: "encrypted cookie"
# sign_salt: "signed encrypted cookie"
# auth_salt: "authenticated encrypted cookie"
# jwt:
# provider: auth0
# secret: abc335bfcfdb04e50db5bb0a4d67ab9
# public_key_file: /secrets/public_key.pem
# public_key_type: ecdsa #rsa
database:
type: postgres
host: db
port: 5432
dbname: app_development
user: postgres
password: postgres
#schema: "public"
#pool_size: 10
#max_retries: 0
#log_level: "debug"
# Set session variable "user.id" to the user id
# Enable this if you need the user id in triggers, etc
set_user_id: false
# database ping timeout is used for db health checking
ping_timeout: 1m
# Define additional variables here to be used with filters
variables:
admin_account_id: "5"
# Field and table names that you wish to block
blocklist:
- ar_internal_metadata
- schema_migrations
- secret
- password
- encrypted
- token
tables:
- name: customers
remotes:
- name: payments
id: stripe_id
url: http://rails_app:3000/stripe/$id
path: data
# debug: true
pass_headers:
- cookie
set_headers:
- name: Host
value: 0.0.0.0
# - name: Authorization
# value: Bearer <stripe_api_key>
- # You can create new fields that have a
# real db table backing them
name: me
table: users
- name: deals
table: products
- name: users
columns:
- name: email
related_to: products.name
roles_query: "SELECT * FROM users WHERE id = $user_id"
roles:
- name: anon
tables:
- name: products
query:
limit: 10
columns: ["id", "name", "description" ]
aggregation: false
insert:
block: false
update:
block: false
delete:
block: false
- name: deals
query:
limit: 3
aggregation: false
- name: purchases
query:
limit: 3
aggregation: false
- name: user
tables:
- name: users
query:
filters: ["{ id: { _eq: $user_id } }"]
- name: products
query:
limit: 50
filters: ["{ user_id: { eq: $user_id } }"]
disable_functions: false
insert:
filters: ["{ user_id: { eq: $user_id } }"]
presets:
- user_id: "$user_id"
- created_at: "now"
- updated_at: "now"
update:
filters: ["{ user_id: { eq: $user_id } }"]
columns:
- id
- name
presets:
- updated_at: "now"
delete:
block: true
- name: admin
match: id = 1000
tables:
- name: users
filters: []

67
config/prod.yml Normal file
View File

@ -0,0 +1,67 @@
# Inherit config from this other config file
# so I only need to overwrite some values
inherits: dev
app_name: "Super Graph Production"
host_port: 0.0.0.0:8080
web_ui: false
# debug, error, warn, info, none
log_level: "info"
# enable or disable http compression (uses gzip)
http_compress: true
# When production mode is 'true' only queries
# from the allow list are permitted.
# When it's 'false' all queries are saved to the
# the allow list in ./config/allow.list
production: true
# Throw a 401 on auth failure for queries that need auth
auth_fail_block: true
# Latency tracing for database queries and remote joins
# the resulting latency information is returned with the
# response
enable_tracing: true
# File that points to the database seeding script
# seed_file: seed.js
# Path pointing to where the migrations can be found
# migrations_path: migrations
# Secret key for general encryption operations like
# encrypting the cursor data
# secret_key: supercalifajalistics
# Postgres related environment Variables
# SG_DATABASE_HOST
# SG_DATABASE_PORT
# SG_DATABASE_USER
# SG_DATABASE_PASSWORD
# Auth related environment Variables
# SG_AUTH_RAILS_COOKIE_SECRET_KEY_BASE
# SG_AUTH_RAILS_REDIS_URL
# SG_AUTH_RAILS_REDIS_PASSWORD
# SG_AUTH_JWT_PUBLIC_KEY_FILE
database:
type: postgres
host: db
port: 5432
dbname: app_production
user: postgres
password: postgres
#pool_size: 10
#max_retries: 0
#log_level: "debug"
# Set session variable "user.id" to the user id
# Enable this if you need the user id in triggers, etc
set_user_id: false
# database ping timeout is used for db health checking
ping_timeout: 5m

116
config/seed.js Normal file
View File

@ -0,0 +1,116 @@
var user_count = 10
customer_count = 100
product_count = 50
purchase_count = 100
var users = []
customers = []
products = []
for (i = 0; i < user_count; i++) {
var pwd = fake.password()
var data = {
full_name: fake.name(),
avatar: fake.avatar_url(200),
phone: fake.phone(),
email: fake.email(),
password: pwd,
password_confirmation: pwd,
created_at: "now",
updated_at: "now"
}
var res = graphql(" \
mutation { \
user(insert: $data) { \
id \
} \
}", { data: data })
users.push(res.user)
}
for (i = 0; i < product_count; i++) {
var n = Math.floor(Math.random() * users.length)
var user = users[n]
var desc = [
fake.beer_style(),
fake.beer_hop(),
fake.beer_yeast(),
fake.beer_ibu(),
fake.beer_alcohol(),
fake.beer_blg(),
].join(", ")
var data = {
name: fake.beer_name(),
description: desc,
price: fake.price()
//user_id: user.id,
//created_at: "now",
//updated_at: "now"
}
var res = graphql(" \
mutation { \
product(insert: $data) { \
id \
} \
}", { data: data }, {
user_id: 5
})
products.push(res.product)
}
for (i = 0; i < customer_count; i++) {
var pwd = fake.password()
var data = {
stripe_id: "CUS-" + fake.uuid(),
full_name: fake.name(),
phone: fake.phone(),
email: fake.email(),
password: pwd,
password_confirmation: pwd,
created_at: "now",
updated_at: "now"
}
var res = graphql(" \
mutation { \
customer(insert: $data) { \
id \
} \
}", { data: data })
customers.push(res.customer)
}
for (i = 0; i < purchase_count; i++) {
var sale_type = fake.rand_string(["rented", "bought"])
if (sale_type === "rented") {
var due_date = fake.date()
var returned = fake.date()
}
var data = {
customer_id: customers[Math.floor(Math.random() * customer_count)].id,
product_id: products[Math.floor(Math.random() * product_count)].id,
sale_type: sale_type,
quantity: Math.floor(Math.random() * 10),
due_date: due_date,
returned: returned,
created_at: "now",
updated_at: "now"
}
var res = graphql(" \
mutation { \
purchase(insert: $data) { \
id \
} \
}", { data: data })
console.log(res)
}

View File

@ -87,6 +87,7 @@ type SuperGraph struct {
prepared map[string]*preparedItem prepared map[string]*preparedItem
roles map[string]*Role roles map[string]*Role
getRole *sql.Stmt getRole *sql.Stmt
rmap map[uint64]*resolvFn
abacEnabled bool abacEnabled bool
anonExists bool anonExists bool
qc *qcode.Compiler qc *qcode.Compiler
@ -118,6 +119,10 @@ func NewSuperGraph(conf *Config, db *sql.DB) (*SuperGraph, error) {
return nil, err return nil, err
} }
if err := sg.initResolvers(); err != nil {
return nil, err
}
if len(conf.SecretKey) != 0 { if len(conf.SecretKey) != 0 {
sk := sha256.Sum256([]byte(conf.SecretKey)) sk := sha256.Sum256([]byte(conf.SecretKey))
conf.SecretKey = "" conf.SecretKey = ""

View File

@ -89,25 +89,28 @@ func (sg *SuperGraph) initCompilers() error {
func (c *scontext) execQuery() ([]byte, error) { func (c *scontext) execQuery() ([]byte, error) {
var data []byte var data []byte
// var st *stmt var st *stmt
var err error var err error
if c.sg.conf.UseAllowList { if c.sg.conf.UseAllowList {
data, _, err = c.resolvePreparedSQL() data, st, err = c.resolvePreparedSQL()
if err != nil { if err != nil {
return nil, err return nil, err
} }
} else { } else {
data, _, err = c.resolveSQL() data, st, err = c.resolveSQL()
if err != nil { if err != nil {
return nil, err return nil, err
} }
} }
return data, nil if len(data) == 0 || st.skipped == 0 {
return data, nil
}
//return execRemoteJoin(st, data, c.req.hdr) // return c.sg.execRemoteJoin(st, data, c.req.hdr)
return c.sg.execRemoteJoin(st, data, nil)
} }
func (c *scontext) resolvePreparedSQL() ([]byte, *stmt, error) { func (c *scontext) resolvePreparedSQL() ([]byte, *stmt, error) {

View File

@ -1,253 +1,249 @@
package core package core
// import ( import (
// "bytes" "bytes"
// "errors" "errors"
// "fmt" "fmt"
// "net/http" "net/http"
// "sync" "sync"
// "github.com/cespare/xxhash/v2" "github.com/cespare/xxhash/v2"
// "github.com/dosco/super-graph/jsn" "github.com/dosco/super-graph/core/internal/qcode"
// "github.com/dosco/super-graph/core/internal/qcode" "github.com/dosco/super-graph/jsn"
// ) )
// func execRemoteJoin(st *stmt, data []byte, hdr http.Header) ([]byte, error) { func (sg *SuperGraph) execRemoteJoin(st *stmt, data []byte, hdr http.Header) ([]byte, error) {
// var err error var err error
// if len(data) == 0 || st.skipped == 0 { sel := st.qc.Selects
// return data, nil h := xxhash.New()
// }
// sel := st.qc.Selects // fetch the field name used within the db response json
// h := xxhash.New() // that are used to mark insertion points and the mapping between
// those field names and their select objects
fids, sfmap := sg.parentFieldIds(h, sel, st.skipped)
// // fetch the field name used within the db response json // fetch the field values of the marked insertion points
// // that are used to mark insertion points and the mapping between // these values contain the id to be used with fetching remote data
// // those field names and their select objects from := jsn.Get(data, fids)
// fids, sfmap := parentFieldIds(h, sel, st.skipped) var to []jsn.Field
// // fetch the field values of the marked insertion points switch {
// // these values contain the id to be used with fetching remote data case len(from) == 1:
// from := jsn.Get(data, fids) to, err = sg.resolveRemote(hdr, h, from[0], sel, sfmap)
// var to []jsn.Field
// switch { case len(from) > 1:
// case len(from) == 1: to, err = sg.resolveRemotes(hdr, h, from, sel, sfmap)
// to, err = resolveRemote(hdr, h, from[0], sel, sfmap)
// case len(from) > 1: default:
// to, err = resolveRemotes(hdr, h, from, sel, sfmap) return nil, errors.New("something wrong no remote ids found in db response")
}
// default: if err != nil {
// return nil, errors.New("something wrong no remote ids found in db response") return nil, err
// } }
// if err != nil { var ob bytes.Buffer
// return nil, err
// }
// var ob bytes.Buffer err = jsn.Replace(&ob, data, from, to)
if err != nil {
return nil, err
}
// err = jsn.Replace(&ob, data, from, to) return ob.Bytes(), nil
// if err != nil { }
// return nil, err
// }
// return ob.Bytes(), nil func (sg *SuperGraph) resolveRemote(
// } hdr http.Header,
h *xxhash.Digest,
field jsn.Field,
sel []qcode.Select,
sfmap map[uint64]*qcode.Select) ([]jsn.Field, error) {
// func resolveRemote( // replacement data for the marked insertion points
// hdr http.Header, // key and value will be replaced by whats below
// h *xxhash.Digest, toA := [1]jsn.Field{}
// field jsn.Field, to := toA[:1]
// sel []qcode.Select,
// sfmap map[uint64]*qcode.Select) ([]jsn.Field, error) {
// // replacement data for the marked insertion points // use the json key to find the related Select object
// // key and value will be replaced by whats below k1 := xxhash.Sum64(field.Key)
// toA := [1]jsn.Field{}
// to := toA[:1]
// // use the json key to find the related Select object s, ok := sfmap[k1]
// k1 := xxhash.Sum64(field.Key) if !ok {
return nil, nil
}
p := sel[s.ParentID]
// s, ok := sfmap[k1] // then use the Table nme in the Select and it's parent
// if !ok { // to find the resolver to use for this relationship
// return nil, nil k2 := mkkey(h, s.Name, p.Name)
// }
// p := sel[s.ParentID]
// // then use the Table nme in the Select and it's parent r, ok := sg.rmap[k2]
// // to find the resolver to use for this relationship if !ok {
// k2 := mkkey(h, s.Name, p.Name) return nil, nil
}
// r, ok := rmap[k2] id := jsn.Value(field.Value)
// if !ok { if len(id) == 0 {
// return nil, nil return nil, nil
// } }
// id := jsn.Value(field.Value) //st := time.Now()
// if len(id) == 0 {
// return nil, nil
// }
// //st := time.Now() b, err := r.Fn(hdr, id)
if err != nil {
return nil, err
}
// b, err := r.Fn(hdr, id) if len(r.Path) != 0 {
// if err != nil { b = jsn.Strip(b, r.Path)
// return nil, err }
// }
// if len(r.Path) != 0 { var ob bytes.Buffer
// b = jsn.Strip(b, r.Path)
// }
// var ob bytes.Buffer if len(s.Cols) != 0 {
err = jsn.Filter(&ob, b, colsToList(s.Cols))
if err != nil {
return nil, err
}
// if len(s.Cols) != 0 { } else {
// err = jsn.Filter(&ob, b, colsToList(s.Cols)) ob.WriteString("null")
// if err != nil { }
// return nil, err
// }
// } else { to[0] = jsn.Field{Key: []byte(s.FieldName), Value: ob.Bytes()}
// ob.WriteString("null") return to, nil
// } }
// to[0] = jsn.Field{Key: []byte(s.FieldName), Value: ob.Bytes()} func (sg *SuperGraph) resolveRemotes(
// return to, nil hdr http.Header,
// } h *xxhash.Digest,
from []jsn.Field,
sel []qcode.Select,
sfmap map[uint64]*qcode.Select) ([]jsn.Field, error) {
// func resolveRemotes( // replacement data for the marked insertion points
// hdr http.Header, // key and value will be replaced by whats below
// h *xxhash.Digest, to := make([]jsn.Field, len(from))
// from []jsn.Field,
// sel []qcode.Select,
// sfmap map[uint64]*qcode.Select) ([]jsn.Field, error) {
// // replacement data for the marked insertion points var wg sync.WaitGroup
// // key and value will be replaced by whats below wg.Add(len(from))
// to := make([]jsn.Field, len(from))
// var wg sync.WaitGroup var cerr error
// wg.Add(len(from))
// var cerr error for i, id := range from {
// for i, id := range from { // use the json key to find the related Select object
k1 := xxhash.Sum64(id.Key)
// // use the json key to find the related Select object s, ok := sfmap[k1]
// k1 := xxhash.Sum64(id.Key) if !ok {
return nil, nil
}
p := sel[s.ParentID]
// s, ok := sfmap[k1] // then use the Table nme in the Select and it's parent
// if !ok { // to find the resolver to use for this relationship
// return nil, nil k2 := mkkey(h, s.Name, p.Name)
// }
// p := sel[s.ParentID]
// // then use the Table nme in the Select and it's parent r, ok := sg.rmap[k2]
// // to find the resolver to use for this relationship if !ok {
// k2 := mkkey(h, s.Name, p.Name) return nil, nil
}
// r, ok := rmap[k2] id := jsn.Value(id.Value)
// if !ok { if len(id) == 0 {
// return nil, nil return nil, nil
// } }
// id := jsn.Value(id.Value) go func(n int, id []byte, s *qcode.Select) {
// if len(id) == 0 { defer wg.Done()
// return nil, nil
// }
// go func(n int, id []byte, s *qcode.Select) { //st := time.Now()
// defer wg.Done()
// //st := time.Now() b, err := r.Fn(hdr, id)
if err != nil {
cerr = fmt.Errorf("%s: %s", s.Name, err)
return
}
// b, err := r.Fn(hdr, id) if len(r.Path) != 0 {
// if err != nil { b = jsn.Strip(b, r.Path)
// cerr = fmt.Errorf("%s: %s", s.Name, err) }
// return
// }
// if len(r.Path) != 0 { var ob bytes.Buffer
// b = jsn.Strip(b, r.Path)
// }
// var ob bytes.Buffer if len(s.Cols) != 0 {
err = jsn.Filter(&ob, b, colsToList(s.Cols))
if err != nil {
cerr = fmt.Errorf("%s: %s", s.Name, err)
return
}
// if len(s.Cols) != 0 { } else {
// err = jsn.Filter(&ob, b, colsToList(s.Cols)) ob.WriteString("null")
// if err != nil { }
// cerr = fmt.Errorf("%s: %s", s.Name, err)
// return
// }
// } else { to[n] = jsn.Field{Key: []byte(s.FieldName), Value: ob.Bytes()}
// ob.WriteString("null") }(i, id, s)
// } }
wg.Wait()
// to[n] = jsn.Field{Key: []byte(s.FieldName), Value: ob.Bytes()} return to, cerr
// }(i, id, s) }
// }
// wg.Wait()
// return to, cerr func (sg *SuperGraph) parentFieldIds(h *xxhash.Digest, sel []qcode.Select, skipped uint32) (
// } [][]byte,
map[uint64]*qcode.Select) {
// func parentFieldIds(h *xxhash.Digest, sel []qcode.Select, skipped uint32) ( c := 0
// [][]byte, for i := range sel {
// map[uint64]*qcode.Select) { s := &sel[i]
if isSkipped(skipped, uint32(s.ID)) {
c++
}
}
// c := 0 // list of keys (and it's related value) to extract from
// for i := range sel { // the db json response
// s := &sel[i] fm := make([][]byte, c)
// if isSkipped(skipped, uint32(s.ID)) {
// c++
// }
// }
// // list of keys (and it's related value) to extract from // mapping between the above extracted key and a Select
// // the db json response // object
// fm := make([][]byte, c) sm := make(map[uint64]*qcode.Select, c)
n := 0
// // mapping between the above extracted key and a Select for i := range sel {
// // object s := &sel[i]
// sm := make(map[uint64]*qcode.Select, c)
// n := 0
// for i := range sel { if !isSkipped(skipped, uint32(s.ID)) {
// s := &sel[i] continue
}
// if !isSkipped(skipped, uint32(s.ID)) { p := sel[s.ParentID]
// continue k := mkkey(h, s.Name, p.Name)
// }
// p := sel[s.ParentID] if r, ok := sg.rmap[k]; ok {
// k := mkkey(h, s.Name, p.Name) fm[n] = r.IDField
n++
// if r, ok := rmap[k]; ok { k := xxhash.Sum64(r.IDField)
// fm[n] = r.IDField sm[k] = s
// n++ }
}
// k := xxhash.Sum64(r.IDField) return fm, sm
// sm[k] = s }
// }
// }
// return fm, sm func isSkipped(n uint32, pos uint32) bool {
// } return ((n & (1 << pos)) != 0)
}
// func isSkipped(n uint32, pos uint32) bool { func colsToList(cols []qcode.Column) []string {
// return ((n & (1 << pos)) != 0) var f []string
// }
// func colsToList(cols []qcode.Column) []string { for i := range cols {
// var f []string f = append(f, cols[i].Name)
}
// for i := range cols { return f
// f = append(f, cols[i].Name) }
// }
// return f
// }

View File

@ -6,90 +6,90 @@ import (
"net/http" "net/http"
"strings" "strings"
"github.com/cespare/xxhash/v2"
"github.com/dosco/super-graph/core/internal/psql"
"github.com/dosco/super-graph/jsn" "github.com/dosco/super-graph/jsn"
) )
var (
rmap map[uint64]*resolvFn
)
type resolvFn struct { type resolvFn struct {
IDField []byte IDField []byte
Path [][]byte Path [][]byte
Fn func(h http.Header, id []byte) ([]byte, error) Fn func(h http.Header, id []byte) ([]byte, error)
} }
// func initResolvers() { func (sg *SuperGraph) initResolvers() error {
// var err error var err error
// rmap = make(map[uint64]*resolvFn) sg.rmap = make(map[uint64]*resolvFn)
// for _, t := range conf.Tables { for _, t := range sg.conf.Tables {
// err = initRemotes(t) err = sg.initRemotes(t)
// if err != nil { if err != nil {
// break break
// } }
// } }
// if err != nil { if err != nil {
// errlog.Fatal().Err(err).Msg("failed to initialize resolvers") return fmt.Errorf("failed to initialize resolvers: %v", err)
// } }
// }
// func initRemotes(t Table) error { return nil
// h := xxhash.New() }
// for _, r := range t.Remotes { func (sg *SuperGraph) initRemotes(t Table) error {
// // defines the table column to be used as an id in the h := xxhash.New()
// // remote request
// idcol := r.ID
// // if no table column specified in the config then for _, r := range t.Remotes {
// // use the primary key of the table as the id // defines the table column to be used as an id in the
// if len(idcol) == 0 { // remote request
// pcol, err := pcompile.IDColumn(t.Name) idcol := r.ID
// if err != nil {
// return err
// }
// idcol = pcol.Key
// }
// idk := fmt.Sprintf("__%s_%s", t.Name, idcol)
// // register a relationship between the remote data // if no table column specified in the config then
// // and the database table // use the primary key of the table as the id
if len(idcol) == 0 {
pcol, err := sg.pc.IDColumn(t.Name)
if err != nil {
return err
}
idcol = pcol.Key
}
idk := fmt.Sprintf("__%s_%s", t.Name, idcol)
// val := &psql.DBRel{Type: psql.RelRemote} // register a relationship between the remote data
// val.Left.Col = idcol // and the database table
// val.Right.Col = idk
// err := pcompile.AddRelationship(strings.ToLower(r.Name), t.Name, val) val := &psql.DBRel{Type: psql.RelRemote}
// if err != nil { val.Left.Col = idcol
// return err val.Right.Col = idk
// }
// // the function thats called to resolve this remote err := sg.pc.AddRelationship(sanitize(r.Name), t.Name, val)
// // data request if err != nil {
// fn := buildFn(r) return err
}
// path := [][]byte{} // the function thats called to resolve this remote
// for _, p := range strings.Split(r.Path, ".") { // data request
// path = append(path, []byte(p)) fn := buildFn(r)
// }
// rf := &resolvFn{ path := [][]byte{}
// IDField: []byte(idk), for _, p := range strings.Split(r.Path, ".") {
// Path: path, path = append(path, []byte(p))
// Fn: fn, }
// }
// // index resolver obj by parent and child names rf := &resolvFn{
// rmap[mkkey(h, r.Name, t.Name)] = rf IDField: []byte(idk),
Path: path,
Fn: fn,
}
// // index resolver obj by IDField // index resolver obj by parent and child names
// rmap[xxhash.Sum64(rf.IDField)] = rf sg.rmap[mkkey(h, r.Name, t.Name)] = rf
// }
// return nil // index resolver obj by IDField
// } sg.rmap[xxhash.Sum64(rf.IDField)] = rf
}
return nil
}
func buildFn(r Remote) func(http.Header, []byte) ([]byte, error) { func buildFn(r Remote) func(http.Header, []byte) ([]byte, error) {
reqURL := strings.Replace(r.URL, "$id", "%s", 1) reqURL := strings.Replace(r.URL, "$id", "%s", 1)
@ -114,12 +114,9 @@ func buildFn(r Remote) func(http.Header, []byte) ([]byte, error) {
req.Header.Set(v, hdr.Get(v)) req.Header.Set(v, hdr.Get(v))
} }
// logger.Debug().Str("uri", uri).Msg("Remote Join")
res, err := client.Do(req) res, err := client.Do(req)
if err != nil { if err != nil {
// errlog.Error().Err(err).Msgf("Failed to connect to: %s", uri) return nil, fmt.Errorf("failed to connect to '%s': %v", uri, err)
return nil, err
} }
defer res.Body.Close() defer res.Body.Close()

15
core/utils.go Normal file
View File

@ -0,0 +1,15 @@
package core
import (
"github.com/cespare/xxhash/v2"
)
// nolint: errcheck
func mkkey(h *xxhash.Digest, k1 string, k2 string) uint64 {
h.WriteString(k1)
h.WriteString(k2)
v := h.Sum64()
h.Reset()
return v
}

View File

@ -1,5 +1,5 @@
<template> <template>
<div class="shadow bg-white p-4 flex items-start" :class="className"> <div class="shadow p-4 flex items-start" :class="className">
<slot name="image"></slot> <slot name="image"></slot>
<div class="pl-4"> <div class="pl-4">
<h2 class="p-0"> <h2 class="p-0">

View File

@ -2,33 +2,33 @@
<div> <div>
<main aria-labelledby="main-title" > <main aria-labelledby="main-title" >
<Navbar /> <Navbar />
<div style="height: 3.6rem"></div>
<div class="container mx-auto mt-24"> <div class="container mx-auto pt-4">
<div class="text-center"> <div class="text-center">
<div class="text-center text-4xl text-gray-800 leading-tight"> <div class="text-center text-3xl md:text-4xl text-black leading-tight font-semibold">
Fetch data without code Fetch data without code
</div> </div>
<NavLink <NavLink
class="inline-block px-4 py-3 my-8 bg-blue-600 text-blue-100 font-bold rounded" class="inline-block px-4 py-3 my-8 bg-blue-600 text-white font-bold rounded"
:item="actionLink" :item="actionLink"
/> />
<a <a
class="px-4 py-3 my-8 border-2 border-gray-500 text-gray-600 font-bold rounded" class="px-4 py-3 my-8 border-2 border-blue-600 text-blue-600 font-bold rounded"
href="https://github.com/dosco/super-graph" href="https://github.com/dosco/super-graph"
target="_blank" target="_blank"
>Github</a> >Github</a>
</div> </div>
</div> </div>
<div class="container mx-auto mb-8">
<div class="flex flex-wrap">
<div class="w-100 md:w-1/2 bg-indigo-300 text-indigo-800 text-lg p-4">
<div class="text-center text-2xl font-bold pb-2">Before, struggle with SQL</div>
<pre>
<div class="container mx-auto mb-8 mt-0 md:mt-20 bg-green-100">
<div class="flex flex-wrap">
<div class="w-100 md:w-1/2 border border-green-500 text-gray-6 00 text-sm md:text-lg p-6">
<div class="text-xl font-bold pb-4">Before, struggle with SQL</div>
<pre>
type User struct { type User struct {
gorm.Model gorm.Model
Profile Profile Profile Profile
@ -50,8 +50,9 @@ db.Model(&user).
and more ... and more ...
</pre> </pre>
</div> </div>
<div class="w-100 md:w-1/2 bg-green-300 text-black text-lg p-4">
<div class="text-center text-2xl font-bold pb-2">With Super Graph, just ask.</div> <div class="w-100 md:w-1/2 border border-l md:border-l-0 border-green-500 text-blue-900 text-sm md:text-lg p-6">
<div class="text-xl font-bold pb-4">With Super Graph, just ask.</div>
<pre> <pre>
query { query {
user(id: 5) { user(id: 5) {
@ -59,26 +60,24 @@ query {
first_name first_name
last_name last_name
picture_url picture_url
} posts(first: 20, order_by: { score: desc }) {
posts(first: 20, order_by: { score: desc }) { slug
slug title
title created_at
created_at votes_total
cached_votes_total votes { created_at }
vote(where: { user_id: { eq: $user_id } }) { author { id name }
id tags { id name }
} }
author { id name } posts_cursor
tags { id name }
} }
posts_cursor
} }
</pre> </pre>
</div> </div>
</div> </div>
</div> </div>
<div> <div class="mt-0 md:mt-20">
<div <div
class="flex flex-wrap mx-2 md:mx-20" class="flex flex-wrap mx-2 md:mx-20"
v-if="data.features && data.features.length" v-if="data.features && data.features.length"
@ -89,24 +88,21 @@ query {
:key="index" :key="index"
> >
<div class="p-8"> <div class="p-8">
<h2 class="md:text-xl text-blue-800 font-medium border-0 mb-1">{{ feature.title }}</h2> <h2 class="text-lg uppercase border-0">{{ feature.title }}</h2>
<p class="md:text-xl text-gray-700 leading-snug">{{ feature.details }}</p> <div class="text-xl text-gray-900 leading-snug">{{ feature.details }}</div>
</div> </div>
</div> </div>
</div> </div>
</div> </div>
<div class="bg-gray-100 mt-10"> <div class="pt-0 md:pt-20">
<div class="container mx-auto px-10 md:px-0 py-32"> <div class="container mx-auto p-10">
<div class="pb-8 hidden md:flex justify-center"> <div class="flex justify-center pb-20">
<img src="arch-basic.svg"> <img src="arch-basic.svg">
</div> </div>
<h1 class="uppercase font-semibold text-xl text-blue-800 text-center mb-4">
What is Super Graph?
</h1>
<div class="text-2xl md:text-3xl"> <div class="text-2xl md:text-3xl">
Super Graph is a library and service that fetches data from any Postgres database using just GraphQL. No more struggling with ORMs and SQL to wrangle data out of the database. No more having to figure out the right joins or making ineffiient queries. However complex the GraphQL, Super Graph will always generate just one single efficient SQL query. The goal is to save you time and money so you can focus on you're apps core value. Super Graph is a library and service that fetches data from any Postgres database using just GraphQL. No more struggling with ORMs and SQL to wrangle data out of the database. No more having to figure out the right joins or making ineffiient queries. However complex the GraphQL, Super Graph will always generate just one single efficient SQL query. The goal is to save you time and money so you can focus on you're apps core value.
</div> </div>
@ -114,17 +110,7 @@ query {
</div> </div>
<div class="container mx-auto flex flex-wrap"> <div class="pt-20">
<div class="md:w-1/2">
<img src="/graphql.png">
</div>
<div class="md:w-1/2">
<img src="/json.png">
</div>
</div>
<div class="mt-10 py-10 md:py-20">
<div class="container mx-auto px-10 md:px-0"> <div class="container mx-auto px-10 md:px-0">
<h1 class="uppercase font-semibold text-2xl text-blue-800 text-center"> <h1 class="uppercase font-semibold text-2xl text-blue-800 text-center">
Try Super Graph Try Super Graph
@ -133,20 +119,18 @@ query {
<h1 class="uppercase font-semibold text-lg text-gray-800"> <h1 class="uppercase font-semibold text-lg text-gray-800">
Deploy as a service using docker Deploy as a service using docker
</h1> </h1>
<div class="bg-gray-800 text-indigo-300 p-4 rounded"> <div class="p-4 rounded bg-black text-white">
<pre>$ git clone https://github.com/dosco/super-graph && cd super-graph && make install</pre> <pre>$ git clone https://github.com/dosco/super-graph && cd super-graph && make install</pre>
<pre>$ super-graph new blog; cd blog</pre> <pre>$ super-graph new blog; cd blog</pre>
<pre>$ docker-compose run blog_api ./super-graph db:setup</pre> <pre>$ docker-compose run blog_api ./super-graph db:setup</pre>
<pre>$ docker-compose up</pre> <pre>$ docker-compose up</pre>
</div> </div>
<div class="border-t mt-4 pb-4"></div>
<h1 class="uppercase font-semibold text-lg text-gray-800"> <h1 class="uppercase font-semibold text-lg text-gray-800">
Or use it with your own code Or use it with your own code
</h1> </h1>
<div class="text-md"> <div class="text-md">
<pre class="bg-gray-800 text-indigo-300 p-4 rounded"> <pre class="p-4 rounded bg-black text-white">
package main package main
import ( import (
@ -194,7 +178,7 @@ func main() {
</div> </div>
</div> </div>
<div class="bg-gray-100 mt-10"> <div class="pt-0 md:pt-20">
<div class="container mx-auto px-10 md:px-0 py-32"> <div class="container mx-auto px-10 md:px-0 py-32">
<h1 class="uppercase font-semibold text-xl text-blue-800 mb-4"> <h1 class="uppercase font-semibold text-xl text-blue-800 mb-4">
The story of {{ data.heroText }} The story of {{ data.heroText }}
@ -245,7 +229,7 @@ func main() {
</div> </div>
--> -->
<div class="border-t py-10"> <div class="pt-0 md:pt-20">
<div class="block md:hidden w-100"> <div class="block md:hidden w-100">
<iframe src='https://www.youtube.com/embed/MfPL2A-DAJk' frameborder='0' allowfullscreen style="width: 100%; height: 250px;"> <iframe src='https://www.youtube.com/embed/MfPL2A-DAJk' frameborder='0' allowfullscreen style="width: 100%; height: 250px;">
</iframe> </iframe>
@ -267,7 +251,101 @@ func main() {
</div> </div>
</div> </div>
<div class="bg-gray-200 mt-10"> <div class="container mx-auto pt-0 md:pt-20">
<div class="flex flex-wrap bg-green-100">
<div class="w-100 md:w-1/2 border border-green-500 text-gray-6 00 text-sm md:text-lg p-6">
<div class="text-xl font-bold pb-4">No more joins joins, json, orms, just use GraphQL. Fetch all the data want in the structure you need.</div>
<pre>
query {
thread {
slug
title
published
createdAt : created_at
totalVotes : cached_votes_total
totalPosts : cached_posts_total
vote : thread_vote(where: { user_id: { eq: $user_id } }) {
created_at
}
topics {
slug
name
}
author : me {
slug
}
posts(first: 1, order_by: { score: desc }) {
slug
body
published
createdAt : created_at
totalVotes : cached_votes_total
totalComments : cached_comments_total
vote {
created_at
}
author : user {
slug
firstName : first_name
lastName : last_name
}
}
posts_cursor
}
}
</pre>
</div>
<div class="w-100 md:w-1/2 border border-l md:border-l-0 border-green-500 text-blue-900 text-sm md:text-lg p-6">
<div class="text-xl font-bold pb-4">Instant results using a single highly optimized SQL. It's just that simple.</div>
<pre>
{
"data": {
"thread": {
"slug": "eveniet-ex-24",
"vote": null,
"posts": [
{
"body": "Dolor laborum harum sed sit est ducimus temporibus velit non nobis repudiandae nobis suscipit commodi voluptatem debitis sed voluptas sequi officia.",
"slug": "illum-in-voluptas-1418",
"vote": null,
"author": {
"slug": "sigurd-kemmer",
"lastName": "Effertz",
"firstName": "Brandt"
},
"createdAt": "2020-04-07T04:22:42.115874+00:00",
"published": true,
"totalVotes": 0,
"totalComments": 2
}
],
"title": "In aut qui deleniti quia dolore quasi porro tenetur voluptatem ut adita alias fugit explicabo.",
"author": null,
"topics": [
{
"name": "CloudRun",
"slug": "cloud-run"
},
{
"name": "Postgres",
"slug": "postgres"
}
],
"createdAt": "2020-04-07T04:22:38.099482+00:00",
"published": true,
"totalPosts": 24,
"totalVotes": 0,
"posts_cursor": "mpeBl6L+QfJHc3cmLkLDj9pOdEZYTt5KQtLsazG3TLITB3hJhg=="
}
}
}
</pre>
</div>
</div>
</div>
<div class="pt-0 md:pt-20">
<div class="container mx-auto px-10 md:px-0 py-32"> <div class="container mx-auto px-10 md:px-0 py-32">
<h1 class="uppercase font-semibold text-xl text-blue-800 mb-4"> <h1 class="uppercase font-semibold text-xl text-blue-800 mb-4">
Build Secure Apps Build Secure Apps
@ -292,8 +370,8 @@ func main() {
</div> </div>
</div> </div>
<div class=""> <div class="pt-0 md:py -20">
<div class="container mx-auto px-10 md:px-0 py-32"> <div class="container mx-auto">
<h1 class="uppercase font-semibold text-xl text-blue-800 mb-4"> <h1 class="uppercase font-semibold text-xl text-blue-800 mb-4">
More Features More Features
</h1> </h1>

View File

@ -1,6 +1,9 @@
@tailwind base; @tailwind base;
@css { @css {
body, .navbar, .navbar .links {
@apply bg-white text-black border-0 !important;
}
h1 { h1 {
@apply font-semibold text-3xl border-0 py-4 @apply font-semibold text-3xl border-0 py-4
} }

1
go.mod
View File

@ -6,6 +6,7 @@ require (
github.com/adjust/gorails v0.0.0-20171013043634-2786ed0c03d3 github.com/adjust/gorails v0.0.0-20171013043634-2786ed0c03d3
github.com/bradfitz/gomemcache v0.0.0-20190913173617-a41fca850d0b github.com/bradfitz/gomemcache v0.0.0-20190913173617-a41fca850d0b
github.com/brianvoe/gofakeit v3.18.0+incompatible github.com/brianvoe/gofakeit v3.18.0+incompatible
github.com/cespare/xxhash v1.1.0
github.com/cespare/xxhash/v2 v2.1.0 github.com/cespare/xxhash/v2 v2.1.0
github.com/daaku/go.zipexe v1.0.1 // indirect github.com/daaku/go.zipexe v1.0.1 // indirect
github.com/dgrijalva/jwt-go v3.2.0+incompatible github.com/dgrijalva/jwt-go v3.2.0+incompatible