Compare commits

..

5 Commits

22 changed files with 1717 additions and 419 deletions

2
.gitignore vendored
View File

@ -29,10 +29,10 @@
.release
main
super-graph
supergraph
*-fuzz.zip
crashers
suppressions
release
.gofuzz
*-fuzz.zip

View File

@ -5,6 +5,8 @@ COPY /cmd/internal/serv/web/ ./
RUN yarn
RUN yarn build
# stage: 2
FROM golang:1.14-alpine as go-build
RUN apk update && \
@ -31,6 +33,8 @@ RUN echo "Compressing binary, will take a bit of time..." && \
upx --ultra-brute -qq super-graph && \
upx -t super-graph
# stage: 3
FROM alpine:latest
WORKDIR /

View File

@ -12,10 +12,10 @@ endif
export GO111MODULE := on
# Build-time Go variables
version = github.com/dosco/super-graph/serv.version
gitBranch = github.com/dosco/super-graph/serv.gitBranch
lastCommitSHA = github.com/dosco/super-graph/serv.lastCommitSHA
lastCommitTime = github.com/dosco/super-graph/serv.lastCommitTime
version = github.com/dosco/super-graph/cmd/internal/serv.version
gitBranch = github.com/dosco/super-graph/cmd/internal/serv.gitBranch
lastCommitSHA = github.com/dosco/super-graph/cmd/internal/serv.lastCommitSHA
lastCommitTime = github.com/dosco/super-graph/cmd/internal/serv.lastCommitTime
BUILD_FLAGS ?= -ldflags '-s -w -X ${lastCommitSHA}=${BUILD} -X "${lastCommitTime}=${BUILD_DATE}" -X "${version}=${BUILD_VERSION}" -X ${gitBranch}=${BUILD_BRANCH}'
@ -77,11 +77,10 @@ clean:
run: clean
@go run $(BUILD_FLAGS) main.go $(ARGS)
install:
@echo $(GOPATH)
install: clean build
@echo "Commit Hash: `git rev-parse HEAD`"
@echo "Old Hash: `shasum $(GOPATH)/bin/$(BINARY) 2>/dev/null | cut -c -32`"
@go install $(BUILD_FLAGS) cmd
@mv $(BINARY) $(GOPATH)/bin/$(BINARY)
@echo "New Hash:" `shasum $(GOPATH)/bin/$(BINARY) 2>/dev/null | cut -c -32`
uninstall: clean

View File

@ -1,26 +1,74 @@
<!-- <a href="https://supergraph.dev"><img src="https://supergraph.dev/hologram.svg" width="100" height="100" align="right" /></a> -->
<img src="docs/.vuepress/public/super-graph.png" width="250" />
<img src="docs/guide/.vuepress/public/super-graph.png" width="250" />
### Build web products faster. Secure high performance GraphQL
![Apache Public License 2.0](https://img.shields.io/github/license/dosco/super-graph.svg)
![Docker build](https://img.shields.io/docker/cloud/build/dosco/super-graph.svg)
![Cloud native](https://img.shields.io/badge/cloud--native-enabled-blue.svg)
[![GoDoc](https://img.shields.io/badge/godoc-reference-5272B4.svg)](https://pkg.go.dev/github.com/dosco/super-graph/core?tab=doc)
![Apache 2.0](https://img.shields.io/github/license/dosco/super-graph.svg?style=flat-square)
![Docker build](https://img.shields.io/docker/cloud/build/dosco/super-graph.svg?style=flat-square)
![Cloud native](https://img.shields.io/badge/cloud--native-enabled-blue.svg?style=flat-squareg)
[![Discord Chat](https://img.shields.io/discord/628796009539043348.svg)](https://discord.gg/6pSWCTZ)
## What's Super Graph?
## What is Super Graph
Designed to 100x your developer productivity. Super Graph will instantly and without you writing code provide you a high performance GraphQL API for Postgres DB. GraphQL queries are compiled into a single fast SQL query. Super Graph is a GO library and a service, use it in your own code or run it as a seperate service.
Is designed to 100x your developer productivity. Super Graph will instantly and without you writing code provide you a high performance and secure GraphQL API for Postgres DB. GraphQL queries are translated into a single fast SQL query. No more writing API code as you develop
your web frontend just make the query you need and Super Graph will do the rest.
## Using it as a service
Super Graph has a rich feature set like integrating with your existing Ruby on Rails apps, joining your DB with data from remote APIs, role and attribute based access control, support for JWT tokens, built-in DB mutations and seeding, and a lot more.
```console
git clone https://github.com/dosco/super-graph
cd ./super-graph
make install
![GraphQL](docs/.vuepress/public/graphql.png?raw=true "")
super-graph new <app_name>
```
## Using it in your own code
## The story of Super Graph?
```golang
package main
import (
"database/sql"
"fmt"
"time"
"github.com/dosco/super-graph/core"
_ "github.com/jackc/pgx/v4/stdlib"
)
func main() {
db, err := sql.Open("pgx", "postgres://postgrs:@localhost:5432/example_db")
if err != nil {
log.Fatalf(err)
}
conf, err := core.ReadInConfig("./config/dev.yml")
if err != nil {
log.Fatalf(err)
}
sg, err = core.NewSuperGraph(conf, db)
if err != nil {
log.Fatalf(err)
}
query := `
query {
posts {
id
title
}
}`
res, err := sg.GraphQL(context.Background(), query, nil)
if err != nil {
log.Fatalf(err)
}
fmt.Println(string(res.Data))
}
```
## About Super Graph
After working on several products through my career I find that we spend way too much time on building API backends. Most APIs also require constant updating, this costs real time and money.
@ -37,6 +85,7 @@ This compiler is what sits at the heart of Super Graph with layers of useful fun
- Complex nested queries and mutations
- Auto learns database tables and relationships
- Role and Attribute based access control
- Opaque cursor based efficient pagination
- Full text search and aggregations
- JWT tokens supported (Auth0, etc)
- Join database queries with remote REST APIs
@ -50,15 +99,6 @@ This compiler is what sits at the heart of Super Graph with layers of useful fun
- Database seeding tool
- Works with Postgres and YugabyteDB
## Get started
```
git clone https://github.com/dosco/super-graph
cd ./super-graph
make install
super-graph new <app_name>
```
## Documentation

View File

@ -55,7 +55,7 @@ func cmdDBReset(cmd *cobra.Command, args []string) {
func cmdDBCreate(cmd *cobra.Command, args []string) {
initConfOnce()
db, err := initDB(conf)
db, err := initDB(conf, false)
if err != nil {
log.Fatalf("ERR failed to connect to database: %s", err)
}
@ -74,7 +74,7 @@ func cmdDBCreate(cmd *cobra.Command, args []string) {
func cmdDBDrop(cmd *cobra.Command, args []string) {
initConfOnce()
db, err := initDB(conf)
db, err := initDB(conf, false)
if err != nil {
log.Fatalf("ERR failed to connect to database: %s", err)
}
@ -131,7 +131,7 @@ func cmdDBMigrate(cmd *cobra.Command, args []string) {
initConfOnce()
dest := args[0]
conn, err := initDB(conf)
conn, err := initDB(conf, true)
if err != nil {
log.Fatalf("ERR failed to connect to database: %s", err)
}
@ -223,7 +223,7 @@ func cmdDBMigrate(cmd *cobra.Command, args []string) {
func cmdDBStatus(cmd *cobra.Command, args []string) {
initConfOnce()
db, err := initDB(conf)
db, err := initDB(conf, true)
if err != nil {
log.Fatalf("ERR failed to connect to database: %s", err)
}

View File

@ -28,7 +28,7 @@ func cmdDBSeed(cmd *cobra.Command, args []string) {
conf.Production = false
db, err = initDB(conf)
db, err = initDB(conf, true)
if err != nil {
log.Fatalf("ERR failed to connect to database: %s", err)
}

View File

@ -19,7 +19,7 @@ func cmdServ(cmd *cobra.Command, args []string) {
initWatcher()
db, err = initDB(conf)
db, err = initDB(conf, true)
if err != nil {
fatalInProd(err, "failed to connect to database")
}

View File

@ -2,11 +2,12 @@ package serv
import (
"database/sql"
"fmt"
"path"
"time"
_ "github.com/jackc/pgx/v4/stdlib"
"github.com/jackc/pgx/v4"
"github.com/jackc/pgx/v4/stdlib"
//_ "github.com/jackc/pgx/v4/stdlib"
)
func initConf() (*Config, error) {
@ -78,67 +79,19 @@ func initConf() (*Config, error) {
return c, nil
}
func initDB(c *Config) (*sql.DB, error) {
func initDB(c *Config, useDB bool) (*sql.DB, error) {
var db *sql.DB
var err error
cs := fmt.Sprintf("postgres://%s:%s@%s:%d/%s",
c.DB.User, c.DB.Password,
c.DB.Host, c.DB.Port, c.DB.DBName)
// cs := fmt.Sprintf("host=%s port=%d user=%s password=%s dbname=%s",
// c.DB.Host, c.DB.Port,
// c.DB.User, c.DB.Password,
// c.DB.DBName)
for i := 1; i < 10; i++ {
db, err = sql.Open("pgx", cs)
if err == nil {
break
}
time.Sleep(time.Duration(i*100) * time.Millisecond)
}
if err != nil {
return nil, err
}
return db, nil
// config, _ := pgxpool.ParseConfig("")
// config.ConnConfig.Host = c.DB.Host
// config.ConnConfig.Port = c.DB.Port
// config.ConnConfig.Database = c.DB.DBName
// config.ConnConfig.User = c.DB.User
// config.ConnConfig.Password = c.DB.Password
// config.ConnConfig.RuntimeParams = map[string]string{
// "application_name": c.AppName,
// "search_path": c.DB.Schema,
// }
// switch c.LogLevel {
// case "debug":
// config.ConnConfig.LogLevel = pgx.LogLevelDebug
// case "info":
// config.ConnConfig.LogLevel = pgx.LogLevelInfo
// case "warn":
// config.ConnConfig.LogLevel = pgx.LogLevelWarn
// case "error":
// config.ConnConfig.LogLevel = pgx.LogLevelError
// default:
// config.ConnConfig.LogLevel = pgx.LogLevelNone
// }
// config.ConnConfig.Logger = NewSQLLogger(logger)
// // if c.DB.MaxRetries != 0 {
// // opt.MaxRetries = c.DB.MaxRetries
// // }
// if c.DB.PoolSize != 0 {
// config.MaxConns = conf.DB.PoolSize
// }
// var db *pgxpool.Pool
// var err error
// fmt.Println(">>", cs)
// for i := 1; i < 10; i++ {
// db, err = pgxpool.ConnectConfig(context.Background(), config)
// db, err = sql.Open("pgx", cs)
// if err == nil {
// break
// }
@ -150,4 +103,55 @@ func initDB(c *Config) (*sql.DB, error) {
// }
// return db, nil
config, _ := pgx.ParseConfig("")
config.Host = c.DB.Host
config.Port = c.DB.Port
config.User = c.DB.User
config.Password = c.DB.Password
config.RuntimeParams = map[string]string{
"application_name": c.AppName,
"search_path": c.DB.Schema,
}
if useDB {
config.Database = c.DB.DBName
}
// switch c.LogLevel {
// case "debug":
// config.LogLevel = pgx.LogLevelDebug
// case "info":
// config.LogLevel = pgx.LogLevelInfo
// case "warn":
// config.LogLevel = pgx.LogLevelWarn
// case "error":
// config.LogLevel = pgx.LogLevelError
// default:
// config.LogLevel = pgx.LogLevelNone
// }
//config.Logger = NewSQLLogger(logger)
// if c.DB.MaxRetries != 0 {
// opt.MaxRetries = c.DB.MaxRetries
// }
// if c.DB.PoolSize != 0 {
// config.MaxConns = conf.DB.PoolSize
// }
for i := 1; i < 10; i++ {
db = stdlib.OpenDB(*config)
if db == nil {
break
}
time.Sleep(time.Duration(i*100) * time.Millisecond)
}
if err != nil {
return nil, err
}
return db, nil
}

View File

@ -10,20 +10,9 @@ import (
"strings"
"sync"
"github.com/cespare/xxhash/v2"
"github.com/dosco/super-graph/jsn"
)
// nolint: errcheck
func mkkey(h *xxhash.Digest, k1 string, k2 string) uint64 {
h.WriteString(k1)
h.WriteString(k2)
v := h.Sum64()
h.Reset()
return v
}
// nolint: errcheck
func gqlHash(b string, vars []byte, role string) string {
b = strings.TrimSpace(b)

755
config/allow.list Normal file
View File

@ -0,0 +1,755 @@
# http://localhost:8080/
variables {
"data": [
{
"name": "Protect Ya Neck",
"created_at": "now",
"updated_at": "now"
},
{
"name": "Enter the Wu-Tang",
"created_at": "now",
"updated_at": "now"
}
]
}
mutation {
products(insert: $data) {
id
name
}
}
variables {
"update": {
"name": "Wu-Tang",
"description": "No description needed"
},
"product_id": 1
}
mutation {
products(id: $product_id, update: $update) {
id
name
description
}
}
query {
users {
id
email
picture: avatar
products(limit: 2, where: {price: {gt: 10}}) {
id
name
description
}
}
}
variables {
"data": [
{
"name": "Gumbo1",
"created_at": "now",
"updated_at": "now"
},
{
"name": "Gumbo2",
"created_at": "now",
"updated_at": "now"
}
]
}
mutation {
products(id: 199, delete: true) {
id
name
}
}
query {
products {
id
name
user {
email
}
}
}
variables {
"data": {
"product_id": 5
}
}
mutation {
products(id: $product_id, delete: true) {
id
name
}
}
query {
products {
id
name
price
users {
email
}
}
}
variables {
"data": {
"email": "gfk@myspace.com",
"full_name": "Ghostface Killah",
"created_at": "now",
"updated_at": "now"
}
}
mutation {
user(insert: $data) {
id
}
}
variables {
"update": {
"name": "Helloo",
"description": "World \u003c\u003e"
},
"user": 123
}
mutation {
products(id: 5, update: $update) {
id
name
description
}
}
variables {
"data": {
"name": "WOOO",
"price": 50.5
}
}
mutation {
products(insert: $data) {
id
name
}
}
query getProducts {
products {
id
name
price
description
}
}
query {
deals {
id
name
price
}
}
variables {
"beer": "smoke"
}
query beerSearch {
products(search: $beer) {
id
name
search_rank
search_headline_description
}
}
query {
user {
id
full_name
}
}
variables {
"data": {
"email": "goo1@rug.com",
"full_name": "The Dude",
"created_at": "now",
"updated_at": "now",
"product": {
"name": "Apple",
"price": 1.25,
"created_at": "now",
"updated_at": "now"
}
}
}
mutation {
user(insert: $data) {
id
full_name
email
product {
id
name
price
}
}
}
variables {
"data": {
"email": "goo12@rug.com",
"full_name": "The Dude",
"created_at": "now",
"updated_at": "now",
"product": [
{
"name": "Banana 1",
"price": 1.1,
"created_at": "now",
"updated_at": "now"
},
{
"name": "Banana 2",
"price": 2.2,
"created_at": "now",
"updated_at": "now"
}
]
}
}
mutation {
user(insert: $data) {
id
full_name
email
products {
id
name
price
}
}
}
variables {
"data": {
"name": "Banana 3",
"price": 1.1,
"created_at": "now",
"updated_at": "now",
"user": {
"email": "a2@a.com",
"full_name": "The Dude",
"created_at": "now",
"updated_at": "now"
}
}
}
mutation {
products(insert: $data) {
id
name
price
user {
id
full_name
email
}
}
}
variables {
"update": {
"name": "my_name",
"description": "my_desc"
}
}
mutation {
product(id: 15, update: $update, where: {id: {eq: 1}}) {
id
name
}
}
variables {
"update": {
"name": "my_name",
"description": "my_desc"
}
}
mutation {
product(update: $update, where: {id: {eq: 1}}) {
id
name
}
}
variables {
"update": {
"name": "my_name 2",
"description": "my_desc 2"
}
}
mutation {
product(update: $update, where: {id: {eq: 1}}) {
id
name
description
}
}
variables {
"data": {
"sale_type": "tuutuu",
"quantity": 5,
"due_date": "now",
"customer": {
"email": "thedude1@rug.com",
"full_name": "The Dude"
},
"product": {
"name": "Apple",
"price": 1.25
}
}
}
mutation {
purchase(update: $data, id: 5) {
sale_type
quantity
due_date
customer {
id
full_name
email
}
product {
id
name
price
}
}
}
variables {
"data": {
"email": "thedude@rug.com",
"full_name": "The Dude",
"created_at": "now",
"updated_at": "now",
"product": {
"where": {
"id": 2
},
"name": "Apple",
"price": 1.25,
"created_at": "now",
"updated_at": "now"
}
}
}
mutation {
user(update: $data, where: {id: {eq: 8}}) {
id
full_name
email
product {
id
name
price
}
}
}
variables {
"data": {
"email": "thedude@rug.com",
"full_name": "The Dude",
"created_at": "now",
"updated_at": "now",
"product": {
"where": {
"id": 2
},
"name": "Apple",
"price": 1.25,
"created_at": "now",
"updated_at": "now"
}
}
}
query {
user(where: {id: {eq: 8}}) {
id
product {
id
name
price
}
}
}
variables {
"data": {
"name": "Apple",
"price": 1.25,
"created_at": "now",
"updated_at": "now",
"user": {
"email": "thedude@rug.com"
}
}
}
query {
user {
email
}
}
variables {
"data": {
"name": "Apple",
"price": 1.25,
"created_at": "now",
"updated_at": "now",
"user": {
"email": "booboo@demo.com"
}
}
}
mutation {
product(update: $data, id: 6) {
id
name
user {
id
full_name
email
}
}
}
variables {
"data": {
"name": "Apple",
"price": 1.25,
"created_at": "now",
"updated_at": "now",
"user": {
"email": "booboo@demo.com"
}
}
}
query {
product(id: 6) {
id
name
user {
id
full_name
email
}
}
}
variables {
"data": {
"email": "thedude123@rug.com",
"full_name": "The Dude",
"created_at": "now",
"updated_at": "now",
"product": {
"connect": {
"id": 7
},
"disconnect": {
"id": 8
}
}
}
}
mutation {
user(update: $data, id: 6) {
id
full_name
email
product {
id
name
price
}
}
}
variables {
"data": {
"name": "Apple",
"price": 1.25,
"created_at": "now",
"updated_at": "now",
"user": {
"connect": {
"id": 5,
"email": "test@test.com"
}
}
}
}
mutation {
product(update: $data, id: 9) {
id
name
user {
id
full_name
email
}
}
}
variables {
"data": {
"email": "thed44ude@rug.com",
"full_name": "The Dude",
"created_at": "now",
"updated_at": "now",
"product": {
"connect": {
"id": 5
}
}
}
}
mutation {
user(insert: $data) {
id
full_name
email
product {
id
name
price
}
}
}
variables {
"data": {
"name": "Apple",
"price": 1.25,
"created_at": "now",
"updated_at": "now",
"user": {
"connect": {
"id": 5
}
}
}
}
mutation {
product(insert: $data) {
id
name
user {
id
full_name
email
}
}
}
variables {
"data": [
{
"name": "Apple",
"price": 1.25,
"created_at": "now",
"updated_at": "now",
"user": {
"connect": {
"id": 6
}
}
},
{
"name": "Coconut",
"price": 2.25,
"created_at": "now",
"updated_at": "now",
"user": {
"connect": {
"id": 3
}
}
}
]
}
mutation {
products(insert: $data) {
id
name
user {
id
full_name
email
}
}
}
variables {
"data": [
{
"name": "Apple",
"price": 1.25,
"created_at": "now",
"updated_at": "now"
},
{
"name": "Coconut",
"price": 2.25,
"created_at": "now",
"updated_at": "now"
}
]
}
mutation {
products(insert: $data) {
id
name
user {
id
full_name
email
}
}
}
variables {
"data": {
"name": "Apple",
"price": 1.25,
"user": {
"connect": {
"id": 5,
"email": "test@test.com"
}
}
}
}
mutation {
product(update: $data, id: 9) {
id
name
user {
id
full_name
email
}
}
}
variables {
"data": {
"name": "Apple",
"price": 1.25,
"user": {
"connect": {
"id": 5
}
}
}
}
mutation {
product(update: $data, id: 9) {
id
name
user {
id
full_name
email
}
}
}
variables {
"data": {
"name": "Apple",
"price": 1.25,
"user": {
"disconnect": {
"id": 5
}
}
}
}
mutation {
product(update: $data, id: 9) {
id
name
user_id
}
}
variables {
"data": {
"name": "Apple",
"price": 1.25,
"user": {
"disconnect": {
"id": 5
}
}
}
}
mutation {
product(update: $data, id: 2) {
id
name
user_id
}
}

226
config/dev.yml Normal file
View File

@ -0,0 +1,226 @@
app_name: "Super Graph Development"
host_port: 0.0.0.0:8080
web_ui: true
# debug, error, warn, info, none
log_level: "debug"
# enable or disable http compression (uses gzip)
http_compress: true
# When production mode is 'true' only queries
# from the allow list are permitted.
# When it's 'false' all queries are saved to the
# the allow list in ./config/allow.list
production: false
# Throw a 401 on auth failure for queries that need auth
auth_fail_block: false
# Latency tracing for database queries and remote joins
# the resulting latency information is returned with the
# response
enable_tracing: true
# Watch the config folder and reload Super Graph
# with the new configs when a change is detected
reload_on_config_change: true
# File that points to the database seeding script
# seed_file: seed.js
# Path pointing to where the migrations can be found
migrations_path: ./config/migrations
# Secret key for general encryption operations like
# encrypting the cursor data
secret_key: supercalifajalistics
# CORS: A list of origins a cross-domain request can be executed from.
# If the special * value is present in the list, all origins will be allowed.
# An origin may contain a wildcard (*) to replace 0 or more
# characters (i.e.: http://*.domain.com).
cors_allowed_origins: ["*"]
# Debug Cross Origin Resource Sharing requests
cors_debug: true
# Postgres related environment Variables
# SG_DATABASE_HOST
# SG_DATABASE_PORT
# SG_DATABASE_USER
# SG_DATABASE_PASSWORD
# Auth related environment Variables
# SG_AUTH_RAILS_COOKIE_SECRET_KEY_BASE
# SG_AUTH_RAILS_REDIS_URL
# SG_AUTH_RAILS_REDIS_PASSWORD
# SG_AUTH_JWT_PUBLIC_KEY_FILE
# inflections:
# person: people
# sheep: sheep
auth:
# Can be 'rails' or 'jwt'
type: rails
cookie: _app_session
# Comment this out if you want to disable setting
# the user_id via a header for testing.
# Disable in production
creds_in_header: true
rails:
# Rails version this is used for reading the
# various cookies formats.
version: 5.2
# Found in 'Rails.application.config.secret_key_base'
secret_key_base: 0a248500a64c01184edb4d7ad3a805488f8097ac761b76aaa6c17c01dcb7af03a2f18ba61b2868134b9c7b79a122bc0dadff4367414a2d173297bfea92be5566
# Remote cookie store. (memcache or redis)
# url: redis://redis:6379
# password: ""
# max_idle: 80
# max_active: 12000
# In most cases you don't need these
# salt: "encrypted cookie"
# sign_salt: "signed encrypted cookie"
# auth_salt: "authenticated encrypted cookie"
# jwt:
# provider: auth0
# secret: abc335bfcfdb04e50db5bb0a4d67ab9
# public_key_file: /secrets/public_key.pem
# public_key_type: ecdsa #rsa
database:
type: postgres
host: db
port: 5432
dbname: app_development
user: postgres
password: postgres
#schema: "public"
#pool_size: 10
#max_retries: 0
#log_level: "debug"
# Set session variable "user.id" to the user id
# Enable this if you need the user id in triggers, etc
set_user_id: false
# database ping timeout is used for db health checking
ping_timeout: 1m
# Define additional variables here to be used with filters
variables:
admin_account_id: "5"
# Field and table names that you wish to block
blocklist:
- ar_internal_metadata
- schema_migrations
- secret
- password
- encrypted
- token
tables:
- name: customers
remotes:
- name: payments
id: stripe_id
url: http://rails_app:3000/stripe/$id
path: data
# debug: true
pass_headers:
- cookie
set_headers:
- name: Host
value: 0.0.0.0
# - name: Authorization
# value: Bearer <stripe_api_key>
- # You can create new fields that have a
# real db table backing them
name: me
table: users
- name: deals
table: products
- name: users
columns:
- name: email
related_to: products.name
roles_query: "SELECT * FROM users WHERE id = $user_id"
roles:
- name: anon
tables:
- name: products
query:
limit: 10
columns: ["id", "name", "description" ]
aggregation: false
insert:
block: false
update:
block: false
delete:
block: false
- name: deals
query:
limit: 3
aggregation: false
- name: purchases
query:
limit: 3
aggregation: false
- name: user
tables:
- name: users
query:
filters: ["{ id: { _eq: $user_id } }"]
- name: products
query:
limit: 50
filters: ["{ user_id: { eq: $user_id } }"]
disable_functions: false
insert:
filters: ["{ user_id: { eq: $user_id } }"]
presets:
- user_id: "$user_id"
- created_at: "now"
- updated_at: "now"
update:
filters: ["{ user_id: { eq: $user_id } }"]
columns:
- id
- name
presets:
- updated_at: "now"
delete:
block: true
- name: admin
match: id = 1000
tables:
- name: users
filters: []

67
config/prod.yml Normal file
View File

@ -0,0 +1,67 @@
# Inherit config from this other config file
# so I only need to overwrite some values
inherits: dev
app_name: "Super Graph Production"
host_port: 0.0.0.0:8080
web_ui: false
# debug, error, warn, info, none
log_level: "info"
# enable or disable http compression (uses gzip)
http_compress: true
# When production mode is 'true' only queries
# from the allow list are permitted.
# When it's 'false' all queries are saved to the
# the allow list in ./config/allow.list
production: true
# Throw a 401 on auth failure for queries that need auth
auth_fail_block: true
# Latency tracing for database queries and remote joins
# the resulting latency information is returned with the
# response
enable_tracing: true
# File that points to the database seeding script
# seed_file: seed.js
# Path pointing to where the migrations can be found
# migrations_path: migrations
# Secret key for general encryption operations like
# encrypting the cursor data
# secret_key: supercalifajalistics
# Postgres related environment Variables
# SG_DATABASE_HOST
# SG_DATABASE_PORT
# SG_DATABASE_USER
# SG_DATABASE_PASSWORD
# Auth related environment Variables
# SG_AUTH_RAILS_COOKIE_SECRET_KEY_BASE
# SG_AUTH_RAILS_REDIS_URL
# SG_AUTH_RAILS_REDIS_PASSWORD
# SG_AUTH_JWT_PUBLIC_KEY_FILE
database:
type: postgres
host: db
port: 5432
dbname: app_production
user: postgres
password: postgres
#pool_size: 10
#max_retries: 0
#log_level: "debug"
# Set session variable "user.id" to the user id
# Enable this if you need the user id in triggers, etc
set_user_id: false
# database ping timeout is used for db health checking
ping_timeout: 5m

116
config/seed.js Normal file
View File

@ -0,0 +1,116 @@
var user_count = 10
customer_count = 100
product_count = 50
purchase_count = 100
var users = []
customers = []
products = []
for (i = 0; i < user_count; i++) {
var pwd = fake.password()
var data = {
full_name: fake.name(),
avatar: fake.avatar_url(200),
phone: fake.phone(),
email: fake.email(),
password: pwd,
password_confirmation: pwd,
created_at: "now",
updated_at: "now"
}
var res = graphql(" \
mutation { \
user(insert: $data) { \
id \
} \
}", { data: data })
users.push(res.user)
}
for (i = 0; i < product_count; i++) {
var n = Math.floor(Math.random() * users.length)
var user = users[n]
var desc = [
fake.beer_style(),
fake.beer_hop(),
fake.beer_yeast(),
fake.beer_ibu(),
fake.beer_alcohol(),
fake.beer_blg(),
].join(", ")
var data = {
name: fake.beer_name(),
description: desc,
price: fake.price()
//user_id: user.id,
//created_at: "now",
//updated_at: "now"
}
var res = graphql(" \
mutation { \
product(insert: $data) { \
id \
} \
}", { data: data }, {
user_id: 5
})
products.push(res.product)
}
for (i = 0; i < customer_count; i++) {
var pwd = fake.password()
var data = {
stripe_id: "CUS-" + fake.uuid(),
full_name: fake.name(),
phone: fake.phone(),
email: fake.email(),
password: pwd,
password_confirmation: pwd,
created_at: "now",
updated_at: "now"
}
var res = graphql(" \
mutation { \
customer(insert: $data) { \
id \
} \
}", { data: data })
customers.push(res.customer)
}
for (i = 0; i < purchase_count; i++) {
var sale_type = fake.rand_string(["rented", "bought"])
if (sale_type === "rented") {
var due_date = fake.date()
var returned = fake.date()
}
var data = {
customer_id: customers[Math.floor(Math.random() * customer_count)].id,
product_id: products[Math.floor(Math.random() * product_count)].id,
sale_type: sale_type,
quantity: Math.floor(Math.random() * 10),
due_date: due_date,
returned: returned,
created_at: "now",
updated_at: "now"
}
var res = graphql(" \
mutation { \
purchase(insert: $data) { \
id \
} \
}", { data: data })
console.log(res)
}

View File

@ -87,6 +87,7 @@ type SuperGraph struct {
prepared map[string]*preparedItem
roles map[string]*Role
getRole *sql.Stmt
rmap map[uint64]*resolvFn
abacEnabled bool
anonExists bool
qc *qcode.Compiler
@ -118,6 +119,10 @@ func NewSuperGraph(conf *Config, db *sql.DB) (*SuperGraph, error) {
return nil, err
}
if err := sg.initResolvers(); err != nil {
return nil, err
}
if len(conf.SecretKey) != 0 {
sk := sha256.Sum256([]byte(conf.SecretKey))
conf.SecretKey = ""

View File

@ -89,25 +89,28 @@ func (sg *SuperGraph) initCompilers() error {
func (c *scontext) execQuery() ([]byte, error) {
var data []byte
// var st *stmt
var st *stmt
var err error
if c.sg.conf.UseAllowList {
data, _, err = c.resolvePreparedSQL()
data, st, err = c.resolvePreparedSQL()
if err != nil {
return nil, err
}
} else {
data, _, err = c.resolveSQL()
data, st, err = c.resolveSQL()
if err != nil {
return nil, err
}
}
if len(data) == 0 || st.skipped == 0 {
return data, nil
}
//return execRemoteJoin(st, data, c.req.hdr)
// return c.sg.execRemoteJoin(st, data, c.req.hdr)
return c.sg.execRemoteJoin(st, data, nil)
}
func (c *scontext) resolvePreparedSQL() ([]byte, *stmt, error) {

View File

@ -1,253 +1,249 @@
package core
// import (
// "bytes"
// "errors"
// "fmt"
// "net/http"
// "sync"
import (
"bytes"
"errors"
"fmt"
"net/http"
"sync"
// "github.com/cespare/xxhash/v2"
// "github.com/dosco/super-graph/jsn"
// "github.com/dosco/super-graph/core/internal/qcode"
// )
"github.com/cespare/xxhash/v2"
"github.com/dosco/super-graph/core/internal/qcode"
"github.com/dosco/super-graph/jsn"
)
// func execRemoteJoin(st *stmt, data []byte, hdr http.Header) ([]byte, error) {
// var err error
func (sg *SuperGraph) execRemoteJoin(st *stmt, data []byte, hdr http.Header) ([]byte, error) {
var err error
// if len(data) == 0 || st.skipped == 0 {
// return data, nil
// }
sel := st.qc.Selects
h := xxhash.New()
// sel := st.qc.Selects
// h := xxhash.New()
// fetch the field name used within the db response json
// that are used to mark insertion points and the mapping between
// those field names and their select objects
fids, sfmap := sg.parentFieldIds(h, sel, st.skipped)
// // fetch the field name used within the db response json
// // that are used to mark insertion points and the mapping between
// // those field names and their select objects
// fids, sfmap := parentFieldIds(h, sel, st.skipped)
// fetch the field values of the marked insertion points
// these values contain the id to be used with fetching remote data
from := jsn.Get(data, fids)
var to []jsn.Field
// // fetch the field values of the marked insertion points
// // these values contain the id to be used with fetching remote data
// from := jsn.Get(data, fids)
// var to []jsn.Field
switch {
case len(from) == 1:
to, err = sg.resolveRemote(hdr, h, from[0], sel, sfmap)
// switch {
// case len(from) == 1:
// to, err = resolveRemote(hdr, h, from[0], sel, sfmap)
case len(from) > 1:
to, err = sg.resolveRemotes(hdr, h, from, sel, sfmap)
// case len(from) > 1:
// to, err = resolveRemotes(hdr, h, from, sel, sfmap)
default:
return nil, errors.New("something wrong no remote ids found in db response")
}
// default:
// return nil, errors.New("something wrong no remote ids found in db response")
// }
if err != nil {
return nil, err
}
// if err != nil {
// return nil, err
// }
var ob bytes.Buffer
// var ob bytes.Buffer
err = jsn.Replace(&ob, data, from, to)
if err != nil {
return nil, err
}
// err = jsn.Replace(&ob, data, from, to)
// if err != nil {
// return nil, err
// }
return ob.Bytes(), nil
}
// return ob.Bytes(), nil
// }
func (sg *SuperGraph) resolveRemote(
hdr http.Header,
h *xxhash.Digest,
field jsn.Field,
sel []qcode.Select,
sfmap map[uint64]*qcode.Select) ([]jsn.Field, error) {
// func resolveRemote(
// hdr http.Header,
// h *xxhash.Digest,
// field jsn.Field,
// sel []qcode.Select,
// sfmap map[uint64]*qcode.Select) ([]jsn.Field, error) {
// replacement data for the marked insertion points
// key and value will be replaced by whats below
toA := [1]jsn.Field{}
to := toA[:1]
// // replacement data for the marked insertion points
// // key and value will be replaced by whats below
// toA := [1]jsn.Field{}
// to := toA[:1]
// use the json key to find the related Select object
k1 := xxhash.Sum64(field.Key)
// // use the json key to find the related Select object
// k1 := xxhash.Sum64(field.Key)
s, ok := sfmap[k1]
if !ok {
return nil, nil
}
p := sel[s.ParentID]
// s, ok := sfmap[k1]
// if !ok {
// return nil, nil
// }
// p := sel[s.ParentID]
// then use the Table nme in the Select and it's parent
// to find the resolver to use for this relationship
k2 := mkkey(h, s.Name, p.Name)
// // then use the Table nme in the Select and it's parent
// // to find the resolver to use for this relationship
// k2 := mkkey(h, s.Name, p.Name)
r, ok := sg.rmap[k2]
if !ok {
return nil, nil
}
// r, ok := rmap[k2]
// if !ok {
// return nil, nil
// }
id := jsn.Value(field.Value)
if len(id) == 0 {
return nil, nil
}
// id := jsn.Value(field.Value)
// if len(id) == 0 {
// return nil, nil
// }
//st := time.Now()
// //st := time.Now()
b, err := r.Fn(hdr, id)
if err != nil {
return nil, err
}
// b, err := r.Fn(hdr, id)
// if err != nil {
// return nil, err
// }
if len(r.Path) != 0 {
b = jsn.Strip(b, r.Path)
}
// if len(r.Path) != 0 {
// b = jsn.Strip(b, r.Path)
// }
var ob bytes.Buffer
// var ob bytes.Buffer
if len(s.Cols) != 0 {
err = jsn.Filter(&ob, b, colsToList(s.Cols))
if err != nil {
return nil, err
}
// if len(s.Cols) != 0 {
// err = jsn.Filter(&ob, b, colsToList(s.Cols))
// if err != nil {
// return nil, err
// }
} else {
ob.WriteString("null")
}
// } else {
// ob.WriteString("null")
// }
to[0] = jsn.Field{Key: []byte(s.FieldName), Value: ob.Bytes()}
return to, nil
}
// to[0] = jsn.Field{Key: []byte(s.FieldName), Value: ob.Bytes()}
// return to, nil
// }
func (sg *SuperGraph) resolveRemotes(
hdr http.Header,
h *xxhash.Digest,
from []jsn.Field,
sel []qcode.Select,
sfmap map[uint64]*qcode.Select) ([]jsn.Field, error) {
// func resolveRemotes(
// hdr http.Header,
// h *xxhash.Digest,
// from []jsn.Field,
// sel []qcode.Select,
// sfmap map[uint64]*qcode.Select) ([]jsn.Field, error) {
// replacement data for the marked insertion points
// key and value will be replaced by whats below
to := make([]jsn.Field, len(from))
// // replacement data for the marked insertion points
// // key and value will be replaced by whats below
// to := make([]jsn.Field, len(from))
var wg sync.WaitGroup
wg.Add(len(from))
// var wg sync.WaitGroup
// wg.Add(len(from))
var cerr error
// var cerr error
for i, id := range from {
// for i, id := range from {
// use the json key to find the related Select object
k1 := xxhash.Sum64(id.Key)
// // use the json key to find the related Select object
// k1 := xxhash.Sum64(id.Key)
s, ok := sfmap[k1]
if !ok {
return nil, nil
}
p := sel[s.ParentID]
// s, ok := sfmap[k1]
// if !ok {
// return nil, nil
// }
// p := sel[s.ParentID]
// then use the Table nme in the Select and it's parent
// to find the resolver to use for this relationship
k2 := mkkey(h, s.Name, p.Name)
// // then use the Table nme in the Select and it's parent
// // to find the resolver to use for this relationship
// k2 := mkkey(h, s.Name, p.Name)
r, ok := sg.rmap[k2]
if !ok {
return nil, nil
}
// r, ok := rmap[k2]
// if !ok {
// return nil, nil
// }
id := jsn.Value(id.Value)
if len(id) == 0 {
return nil, nil
}
// id := jsn.Value(id.Value)
// if len(id) == 0 {
// return nil, nil
// }
go func(n int, id []byte, s *qcode.Select) {
defer wg.Done()
// go func(n int, id []byte, s *qcode.Select) {
// defer wg.Done()
//st := time.Now()
// //st := time.Now()
b, err := r.Fn(hdr, id)
if err != nil {
cerr = fmt.Errorf("%s: %s", s.Name, err)
return
}
// b, err := r.Fn(hdr, id)
// if err != nil {
// cerr = fmt.Errorf("%s: %s", s.Name, err)
// return
// }
if len(r.Path) != 0 {
b = jsn.Strip(b, r.Path)
}
// if len(r.Path) != 0 {
// b = jsn.Strip(b, r.Path)
// }
var ob bytes.Buffer
// var ob bytes.Buffer
if len(s.Cols) != 0 {
err = jsn.Filter(&ob, b, colsToList(s.Cols))
if err != nil {
cerr = fmt.Errorf("%s: %s", s.Name, err)
return
}
// if len(s.Cols) != 0 {
// err = jsn.Filter(&ob, b, colsToList(s.Cols))
// if err != nil {
// cerr = fmt.Errorf("%s: %s", s.Name, err)
// return
// }
} else {
ob.WriteString("null")
}
// } else {
// ob.WriteString("null")
// }
to[n] = jsn.Field{Key: []byte(s.FieldName), Value: ob.Bytes()}
}(i, id, s)
}
wg.Wait()
// to[n] = jsn.Field{Key: []byte(s.FieldName), Value: ob.Bytes()}
// }(i, id, s)
// }
// wg.Wait()
return to, cerr
}
// return to, cerr
// }
func (sg *SuperGraph) parentFieldIds(h *xxhash.Digest, sel []qcode.Select, skipped uint32) (
[][]byte,
map[uint64]*qcode.Select) {
// func parentFieldIds(h *xxhash.Digest, sel []qcode.Select, skipped uint32) (
// [][]byte,
// map[uint64]*qcode.Select) {
c := 0
for i := range sel {
s := &sel[i]
if isSkipped(skipped, uint32(s.ID)) {
c++
}
}
// c := 0
// for i := range sel {
// s := &sel[i]
// if isSkipped(skipped, uint32(s.ID)) {
// c++
// }
// }
// list of keys (and it's related value) to extract from
// the db json response
fm := make([][]byte, c)
// // list of keys (and it's related value) to extract from
// // the db json response
// fm := make([][]byte, c)
// mapping between the above extracted key and a Select
// object
sm := make(map[uint64]*qcode.Select, c)
n := 0
// // mapping between the above extracted key and a Select
// // object
// sm := make(map[uint64]*qcode.Select, c)
// n := 0
for i := range sel {
s := &sel[i]
// for i := range sel {
// s := &sel[i]
if !isSkipped(skipped, uint32(s.ID)) {
continue
}
// if !isSkipped(skipped, uint32(s.ID)) {
// continue
// }
p := sel[s.ParentID]
k := mkkey(h, s.Name, p.Name)
// p := sel[s.ParentID]
// k := mkkey(h, s.Name, p.Name)
if r, ok := sg.rmap[k]; ok {
fm[n] = r.IDField
n++
// if r, ok := rmap[k]; ok {
// fm[n] = r.IDField
// n++
k := xxhash.Sum64(r.IDField)
sm[k] = s
}
}
// k := xxhash.Sum64(r.IDField)
// sm[k] = s
// }
// }
return fm, sm
}
// return fm, sm
// }
func isSkipped(n uint32, pos uint32) bool {
return ((n & (1 << pos)) != 0)
}
// func isSkipped(n uint32, pos uint32) bool {
// return ((n & (1 << pos)) != 0)
// }
func colsToList(cols []qcode.Column) []string {
var f []string
// func colsToList(cols []qcode.Column) []string {
// var f []string
// for i := range cols {
// f = append(f, cols[i].Name)
// }
// return f
// }
for i := range cols {
f = append(f, cols[i].Name)
}
return f
}

View File

@ -6,90 +6,90 @@ import (
"net/http"
"strings"
"github.com/cespare/xxhash/v2"
"github.com/dosco/super-graph/core/internal/psql"
"github.com/dosco/super-graph/jsn"
)
var (
rmap map[uint64]*resolvFn
)
type resolvFn struct {
IDField []byte
Path [][]byte
Fn func(h http.Header, id []byte) ([]byte, error)
}
// func initResolvers() {
// var err error
// rmap = make(map[uint64]*resolvFn)
func (sg *SuperGraph) initResolvers() error {
var err error
sg.rmap = make(map[uint64]*resolvFn)
// for _, t := range conf.Tables {
// err = initRemotes(t)
// if err != nil {
// break
// }
// }
for _, t := range sg.conf.Tables {
err = sg.initRemotes(t)
if err != nil {
break
}
}
// if err != nil {
// errlog.Fatal().Err(err).Msg("failed to initialize resolvers")
// }
// }
if err != nil {
return fmt.Errorf("failed to initialize resolvers: %v", err)
}
// func initRemotes(t Table) error {
// h := xxhash.New()
return nil
}
// for _, r := range t.Remotes {
// // defines the table column to be used as an id in the
// // remote request
// idcol := r.ID
func (sg *SuperGraph) initRemotes(t Table) error {
h := xxhash.New()
// // if no table column specified in the config then
// // use the primary key of the table as the id
// if len(idcol) == 0 {
// pcol, err := pcompile.IDColumn(t.Name)
// if err != nil {
// return err
// }
// idcol = pcol.Key
// }
// idk := fmt.Sprintf("__%s_%s", t.Name, idcol)
for _, r := range t.Remotes {
// defines the table column to be used as an id in the
// remote request
idcol := r.ID
// // register a relationship between the remote data
// // and the database table
// if no table column specified in the config then
// use the primary key of the table as the id
if len(idcol) == 0 {
pcol, err := sg.pc.IDColumn(t.Name)
if err != nil {
return err
}
idcol = pcol.Key
}
idk := fmt.Sprintf("__%s_%s", t.Name, idcol)
// val := &psql.DBRel{Type: psql.RelRemote}
// val.Left.Col = idcol
// val.Right.Col = idk
// register a relationship between the remote data
// and the database table
// err := pcompile.AddRelationship(strings.ToLower(r.Name), t.Name, val)
// if err != nil {
// return err
// }
val := &psql.DBRel{Type: psql.RelRemote}
val.Left.Col = idcol
val.Right.Col = idk
// // the function thats called to resolve this remote
// // data request
// fn := buildFn(r)
err := sg.pc.AddRelationship(sanitize(r.Name), t.Name, val)
if err != nil {
return err
}
// path := [][]byte{}
// for _, p := range strings.Split(r.Path, ".") {
// path = append(path, []byte(p))
// }
// the function thats called to resolve this remote
// data request
fn := buildFn(r)
// rf := &resolvFn{
// IDField: []byte(idk),
// Path: path,
// Fn: fn,
// }
path := [][]byte{}
for _, p := range strings.Split(r.Path, ".") {
path = append(path, []byte(p))
}
// // index resolver obj by parent and child names
// rmap[mkkey(h, r.Name, t.Name)] = rf
rf := &resolvFn{
IDField: []byte(idk),
Path: path,
Fn: fn,
}
// // index resolver obj by IDField
// rmap[xxhash.Sum64(rf.IDField)] = rf
// }
// index resolver obj by parent and child names
sg.rmap[mkkey(h, r.Name, t.Name)] = rf
// return nil
// }
// index resolver obj by IDField
sg.rmap[xxhash.Sum64(rf.IDField)] = rf
}
return nil
}
func buildFn(r Remote) func(http.Header, []byte) ([]byte, error) {
reqURL := strings.Replace(r.URL, "$id", "%s", 1)
@ -114,12 +114,9 @@ func buildFn(r Remote) func(http.Header, []byte) ([]byte, error) {
req.Header.Set(v, hdr.Get(v))
}
// logger.Debug().Str("uri", uri).Msg("Remote Join")
res, err := client.Do(req)
if err != nil {
// errlog.Error().Err(err).Msgf("Failed to connect to: %s", uri)
return nil, err
return nil, fmt.Errorf("failed to connect to '%s': %v", uri, err)
}
defer res.Body.Close()

15
core/utils.go Normal file
View File

@ -0,0 +1,15 @@
package core
import (
"github.com/cespare/xxhash/v2"
)
// nolint: errcheck
func mkkey(h *xxhash.Digest, k1 string, k2 string) uint64 {
h.WriteString(k1)
h.WriteString(k2)
v := h.Sum64()
h.Reset()
return v
}

View File

@ -1,5 +1,5 @@
<template>
<div class="shadow bg-white p-4 flex items-start" :class="className">
<div class="shadow p-4 flex items-start" :class="className">
<slot name="image"></slot>
<div class="pl-4">
<h2 class="p-0">

View File

@ -2,33 +2,33 @@
<div>
<main aria-labelledby="main-title" >
<Navbar />
<div style="height: 3.6rem"></div>
<div class="container mx-auto mt-24">
<div class="container mx-auto pt-4">
<div class="text-center">
<div class="text-center text-4xl text-gray-800 leading-tight">
<div class="text-center text-3xl md:text-4xl text-black leading-tight font-semibold">
Fetch data without code
</div>
<NavLink
class="inline-block px-4 py-3 my-8 bg-blue-600 text-blue-100 font-bold rounded"
class="inline-block px-4 py-3 my-8 bg-blue-600 text-white font-bold rounded"
:item="actionLink"
/>
<a
class="px-4 py-3 my-8 border-2 border-gray-500 text-gray-600 font-bold rounded"
class="px-4 py-3 my-8 border-2 border-blue-600 text-blue-600 font-bold rounded"
href="https://github.com/dosco/super-graph"
target="_blank"
>Github</a>
</div>
</div>
<div class="container mx-auto mb-8">
<div class="container mx-auto mb-8 mt-0 md:mt-20 bg-green-100">
<div class="flex flex-wrap">
<div class="w-100 md:w-1/2 bg-indigo-300 text-indigo-800 text-lg p-4">
<div class="text-center text-2xl font-bold pb-2">Before, struggle with SQL</div>
<div class="w-100 md:w-1/2 border border-green-500 text-gray-6 00 text-sm md:text-lg p-6">
<div class="text-xl font-bold pb-4">Before, struggle with SQL</div>
<pre>
type User struct {
gorm.Model
Profile Profile
@ -50,8 +50,9 @@ db.Model(&user).
and more ...
</pre>
</div>
<div class="w-100 md:w-1/2 bg-green-300 text-black text-lg p-4">
<div class="text-center text-2xl font-bold pb-2">With Super Graph, just ask.</div>
<div class="w-100 md:w-1/2 border border-l md:border-l-0 border-green-500 text-blue-900 text-sm md:text-lg p-6">
<div class="text-xl font-bold pb-4">With Super Graph, just ask.</div>
<pre>
query {
user(id: 5) {
@ -59,26 +60,24 @@ query {
first_name
last_name
picture_url
}
posts(first: 20, order_by: { score: desc }) {
slug
title
created_at
cached_votes_total
vote(where: { user_id: { eq: $user_id } }) {
id
}
votes_total
votes { created_at }
author { id name }
tags { id name }
}
posts_cursor
}
}
</pre>
</div>
</div>
</div>
<div>
<div class="mt-0 md:mt-20">
<div
class="flex flex-wrap mx-2 md:mx-20"
v-if="data.features && data.features.length"
@ -89,24 +88,21 @@ query {
:key="index"
>
<div class="p-8">
<h2 class="md:text-xl text-blue-800 font-medium border-0 mb-1">{{ feature.title }}</h2>
<p class="md:text-xl text-gray-700 leading-snug">{{ feature.details }}</p>
<h2 class="text-lg uppercase border-0">{{ feature.title }}</h2>
<div class="text-xl text-gray-900 leading-snug">{{ feature.details }}</div>
</div>
</div>
</div>
</div>
<div class="bg-gray-100 mt-10">
<div class="container mx-auto px-10 md:px-0 py-32">
<div class="pt-0 md:pt-20">
<div class="container mx-auto p-10">
<div class="pb-8 hidden md:flex justify-center">
<div class="flex justify-center pb-20">
<img src="arch-basic.svg">
</div>
<h1 class="uppercase font-semibold text-xl text-blue-800 text-center mb-4">
What is Super Graph?
</h1>
<div class="text-2xl md:text-3xl">
Super Graph is a library and service that fetches data from any Postgres database using just GraphQL. No more struggling with ORMs and SQL to wrangle data out of the database. No more having to figure out the right joins or making ineffiient queries. However complex the GraphQL, Super Graph will always generate just one single efficient SQL query. The goal is to save you time and money so you can focus on you're apps core value.
</div>
@ -114,17 +110,7 @@ query {
</div>
<div class="container mx-auto flex flex-wrap">
<div class="md:w-1/2">
<img src="/graphql.png">
</div>
<div class="md:w-1/2">
<img src="/json.png">
</div>
</div>
<div class="mt-10 py-10 md:py-20">
<div class="pt-20">
<div class="container mx-auto px-10 md:px-0">
<h1 class="uppercase font-semibold text-2xl text-blue-800 text-center">
Try Super Graph
@ -133,20 +119,18 @@ query {
<h1 class="uppercase font-semibold text-lg text-gray-800">
Deploy as a service using docker
</h1>
<div class="bg-gray-800 text-indigo-300 p-4 rounded">
<div class="p-4 rounded bg-black text-white">
<pre>$ git clone https://github.com/dosco/super-graph && cd super-graph && make install</pre>
<pre>$ super-graph new blog; cd blog</pre>
<pre>$ docker-compose run blog_api ./super-graph db:setup</pre>
<pre>$ docker-compose up</pre>
</div>
<div class="border-t mt-4 pb-4"></div>
<h1 class="uppercase font-semibold text-lg text-gray-800">
Or use it with your own code
</h1>
<div class="text-md">
<pre class="bg-gray-800 text-indigo-300 p-4 rounded">
<pre class="p-4 rounded bg-black text-white">
package main
import (
@ -194,7 +178,7 @@ func main() {
</div>
</div>
<div class="bg-gray-100 mt-10">
<div class="pt-0 md:pt-20">
<div class="container mx-auto px-10 md:px-0 py-32">
<h1 class="uppercase font-semibold text-xl text-blue-800 mb-4">
The story of {{ data.heroText }}
@ -245,7 +229,7 @@ func main() {
</div>
-->
<div class="border-t py-10">
<div class="pt-0 md:pt-20">
<div class="block md:hidden w-100">
<iframe src='https://www.youtube.com/embed/MfPL2A-DAJk' frameborder='0' allowfullscreen style="width: 100%; height: 250px;">
</iframe>
@ -267,7 +251,101 @@ func main() {
</div>
</div>
<div class="bg-gray-200 mt-10">
<div class="container mx-auto pt-0 md:pt-20">
<div class="flex flex-wrap bg-green-100">
<div class="w-100 md:w-1/2 border border-green-500 text-gray-6 00 text-sm md:text-lg p-6">
<div class="text-xl font-bold pb-4">No more joins joins, json, orms, just use GraphQL. Fetch all the data want in the structure you need.</div>
<pre>
query {
thread {
slug
title
published
createdAt : created_at
totalVotes : cached_votes_total
totalPosts : cached_posts_total
vote : thread_vote(where: { user_id: { eq: $user_id } }) {
created_at
}
topics {
slug
name
}
author : me {
slug
}
posts(first: 1, order_by: { score: desc }) {
slug
body
published
createdAt : created_at
totalVotes : cached_votes_total
totalComments : cached_comments_total
vote {
created_at
}
author : user {
slug
firstName : first_name
lastName : last_name
}
}
posts_cursor
}
}
</pre>
</div>
<div class="w-100 md:w-1/2 border border-l md:border-l-0 border-green-500 text-blue-900 text-sm md:text-lg p-6">
<div class="text-xl font-bold pb-4">Instant results using a single highly optimized SQL. It's just that simple.</div>
<pre>
{
"data": {
"thread": {
"slug": "eveniet-ex-24",
"vote": null,
"posts": [
{
"body": "Dolor laborum harum sed sit est ducimus temporibus velit non nobis repudiandae nobis suscipit commodi voluptatem debitis sed voluptas sequi officia.",
"slug": "illum-in-voluptas-1418",
"vote": null,
"author": {
"slug": "sigurd-kemmer",
"lastName": "Effertz",
"firstName": "Brandt"
},
"createdAt": "2020-04-07T04:22:42.115874+00:00",
"published": true,
"totalVotes": 0,
"totalComments": 2
}
],
"title": "In aut qui deleniti quia dolore quasi porro tenetur voluptatem ut adita alias fugit explicabo.",
"author": null,
"topics": [
{
"name": "CloudRun",
"slug": "cloud-run"
},
{
"name": "Postgres",
"slug": "postgres"
}
],
"createdAt": "2020-04-07T04:22:38.099482+00:00",
"published": true,
"totalPosts": 24,
"totalVotes": 0,
"posts_cursor": "mpeBl6L+QfJHc3cmLkLDj9pOdEZYTt5KQtLsazG3TLITB3hJhg=="
}
}
}
</pre>
</div>
</div>
</div>
<div class="pt-0 md:pt-20">
<div class="container mx-auto px-10 md:px-0 py-32">
<h1 class="uppercase font-semibold text-xl text-blue-800 mb-4">
Build Secure Apps
@ -292,8 +370,8 @@ func main() {
</div>
</div>
<div class="">
<div class="container mx-auto px-10 md:px-0 py-32">
<div class="pt-0 md:py -20">
<div class="container mx-auto">
<h1 class="uppercase font-semibold text-xl text-blue-800 mb-4">
More Features
</h1>

View File

@ -1,6 +1,9 @@
@tailwind base;
@css {
body, .navbar, .navbar .links {
@apply bg-white text-black border-0 !important;
}
h1 {
@apply font-semibold text-3xl border-0 py-4
}

1
go.mod
View File

@ -6,6 +6,7 @@ require (
github.com/adjust/gorails v0.0.0-20171013043634-2786ed0c03d3
github.com/bradfitz/gomemcache v0.0.0-20190913173617-a41fca850d0b
github.com/brianvoe/gofakeit v3.18.0+incompatible
github.com/cespare/xxhash v1.1.0
github.com/cespare/xxhash/v2 v2.1.0
github.com/daaku/go.zipexe v1.0.1 // indirect
github.com/dgrijalva/jwt-go v3.2.0+incompatible