Refactor Super Graph into a library #26

This commit is contained in:
Vikram Rangnekar
2020-04-10 02:27:43 -04:00
parent e102da839e
commit 7831d27345
200 changed files with 3590 additions and 4447 deletions

1
cmd/internal/serv/.gitignore vendored Normal file
View File

@ -0,0 +1 @@
static.go

View File

@ -0,0 +1,42 @@
package serv
import (
"fmt"
"net/http"
"github.com/dosco/super-graph/config"
)
type actionFn func(w http.ResponseWriter, r *http.Request) error
func newAction(a *config.Action) (http.Handler, error) {
var fn actionFn
var err error
if len(a.SQL) != 0 {
fn, err = newSQLAction(a)
} else {
return nil, fmt.Errorf("invalid config for action '%s'", a.Name)
}
if err != nil {
return nil, err
}
httpFn := func(w http.ResponseWriter, r *http.Request) {
if err := fn(w, r); err != nil {
renderErr(w, err, nil)
}
}
return http.HandlerFunc(httpFn), nil
}
func newSQLAction(a *config.Action) (actionFn, error) {
fn := func(w http.ResponseWriter, r *http.Request) error {
_, err := db.ExecContext(r.Context(), a.SQL)
return err
}
return fn, nil
}

178
cmd/internal/serv/cmd.go Normal file
View File

@ -0,0 +1,178 @@
package serv
import (
"database/sql"
"fmt"
_log "log"
"os"
"runtime"
"strings"
"github.com/dosco/super-graph/config"
"github.com/spf13/cobra"
"github.com/spf13/viper"
"go.uber.org/zap"
)
//go:generate rice embed-go
const (
serverName = "Super Graph"
)
var (
// These variables are set using -ldflags
version string
gitBranch string
lastCommitSHA string
lastCommitTime string
)
var (
log *_log.Logger // logger
zlog *zap.Logger // fast logger
conf *config.Config // parsed config
confPath string // path to the config file
db *sql.DB // database connection pool
secretKey [32]byte // encryption key
)
func Cmd() {
log = _log.New(os.Stdout, "", 0)
zlog = zap.NewExample()
rootCmd := &cobra.Command{
Use: "super-graph",
Short: BuildDetails(),
}
rootCmd.AddCommand(&cobra.Command{
Use: "serv",
Short: "Run the super-graph service",
Run: cmdServ,
})
rootCmd.AddCommand(&cobra.Command{
Use: "db:create",
Short: "Create database",
Run: cmdDBCreate,
})
rootCmd.AddCommand(&cobra.Command{
Use: "db:drop",
Short: "Drop database",
Run: cmdDBDrop,
})
rootCmd.AddCommand(&cobra.Command{
Use: "db:seed",
Short: "Run the seed script to seed the database",
Run: cmdDBSeed,
})
rootCmd.AddCommand(&cobra.Command{
Use: "db:migrate",
Short: "Migrate the database",
Long: `Migrate the database to destination migration version.
Destination migration version can be one of the following value types:
Migrate to the most recent migration.
e.g. db:migrate up
Rollback the most recent migration.
e.g. db:migrate down
Migrate to a specific migration.
e.g. db:migrate 42
Migrate forward N steps.
e.g. db:migrate +3
Migrate backward N steps.
e.g. db:migrate -2
Redo previous N steps (migrate backward N steps then forward N steps).
e.g. db:migrate -+1
`,
Run: cmdDBMigrate,
})
rootCmd.AddCommand(&cobra.Command{
Use: "db:status",
Short: "Print current migration status",
Run: cmdDBStatus,
})
rootCmd.AddCommand(&cobra.Command{
Use: "db:new NAME",
Short: "Generate a new migration",
Long: "Generate a new migration with the next sequence number and provided name",
Run: cmdDBNew,
})
rootCmd.AddCommand(&cobra.Command{
Use: "db:setup",
Short: "Setup database",
Long: "This command will create, migrate and seed the database",
Run: cmdDBSetup,
})
rootCmd.AddCommand(&cobra.Command{
Use: "db:reset",
Short: "Reset database",
Long: "This command will drop, create, migrate and seed the database (won't run in production)",
Run: cmdDBReset,
})
rootCmd.AddCommand(&cobra.Command{
Use: "new APP-NAME",
Short: "Create a new application",
Long: "Generate all the required files to start on a new Super Graph app",
Run: cmdNew,
})
rootCmd.AddCommand(&cobra.Command{
Use: fmt.Sprintf("conf:dump [%s]", strings.Join(viper.SupportedExts, "|")),
Short: "Dump config to file",
Long: "Dump current config to a file in the selected format",
Run: cmdConfDump,
})
rootCmd.AddCommand(&cobra.Command{
Use: "version",
Short: "Super Graph binary version information",
Run: cmdVersion,
})
rootCmd.PersistentFlags().StringVar(&confPath,
"path", "./config", "path to config files")
if err := rootCmd.Execute(); err != nil {
log.Fatalf("ERR %s", err)
}
}
func cmdVersion(cmd *cobra.Command, args []string) {
fmt.Printf("%s\n", BuildDetails())
}
func BuildDetails() string {
return fmt.Sprintf(`
Super Graph %v
For documentation, visit https://supergraph.dev
Commit SHA-1 : %v
Commit timestamp : %v
Branch : %v
Go version : %v
Licensed under the Apache Public License 2.0
Copyright 2020, Vikram Rangnekar.
`,
version,
lastCommitSHA,
lastCommitTime,
gitBranch,
runtime.Version())
}

View File

@ -0,0 +1,29 @@
package serv
import (
"fmt"
"os"
"github.com/dosco/super-graph/config"
"github.com/spf13/cobra"
)
func cmdConfDump(cmd *cobra.Command, args []string) {
if len(args) != 1 {
cmd.Help() //nolint: errcheck
os.Exit(1)
}
fname := fmt.Sprintf("%s.%s", config.GetConfigName(), args[0])
conf, err := initConf()
if err != nil {
log.Fatalf("ERR failed to read config: %s", err)
}
if err := conf.WriteConfigAs(fname); err != nil {
log.Fatalf("ERR failed to write config: %s", err)
}
log.Printf("INF config dumped to ./%s", fname)
}

View File

@ -0,0 +1,317 @@
package serv
import (
"fmt"
"os"
"path"
"path/filepath"
"strconv"
"strings"
"time"
"github.com/dosco/super-graph/cmd/internal/serv/internal/migrate"
"github.com/spf13/cobra"
)
var newMigrationText = `-- Write your migrate up statements here
---- create above / drop below ----
-- Write your migrate down statements here. If this migration is irreversible
-- Then delete the separator line above.
`
func cmdDBSetup(cmd *cobra.Command, args []string) {
initConfOnce()
cmdDBCreate(cmd, []string{})
cmdDBMigrate(cmd, []string{"up"})
sfile := path.Join(conf.ConfigPathUsed(), conf.SeedFile)
_, err := os.Stat(sfile)
if err == nil {
cmdDBSeed(cmd, []string{})
return
}
if !os.IsNotExist(err) {
log.Fatalf("ERR unable to check if '%s' exists: %s", sfile, err)
}
log.Printf("WRN failed to read seed file '%s'", sfile)
}
func cmdDBReset(cmd *cobra.Command, args []string) {
initConfOnce()
if conf.Production {
log.Fatalln("ERR db:reset does not work in production")
}
cmdDBDrop(cmd, []string{})
cmdDBSetup(cmd, []string{})
}
func cmdDBCreate(cmd *cobra.Command, args []string) {
initConfOnce()
db, err := initDB(conf)
if err != nil {
log.Fatalf("ERR failed to connect to database: %s", err)
}
defer db.Close()
sql := fmt.Sprintf(`CREATE DATABASE "%s"`, conf.DB.DBName)
_, err = db.Exec(sql)
if err != nil {
log.Fatalf("ERR failed to create database: %s", err)
}
log.Printf("INF created database '%s'", conf.DB.DBName)
}
func cmdDBDrop(cmd *cobra.Command, args []string) {
initConfOnce()
db, err := initDB(conf)
if err != nil {
log.Fatalf("ERR failed to connect to database: %s", err)
}
defer db.Close()
sql := fmt.Sprintf(`DROP DATABASE IF EXISTS "%s"`, conf.DB.DBName)
_, err = db.Exec(sql)
if err != nil {
log.Fatalf("ERR failed to drop database: %s", err)
}
log.Printf("INF dropped database '%s'", conf.DB.DBName)
}
func cmdDBNew(cmd *cobra.Command, args []string) {
if len(args) != 1 {
cmd.Help() //nolint: errcheck
os.Exit(1)
}
initConfOnce()
name := args[0]
m, err := migrate.FindMigrations(conf.MigrationsPath)
if err != nil {
log.Fatalf("ERR error loading migrations: %s", err)
}
mname := fmt.Sprintf("%d_%s.sql", len(m), name)
// Write new migration
mpath := filepath.Join(conf.MigrationsPath, mname)
mfile, err := os.OpenFile(mpath, os.O_CREATE|os.O_EXCL|os.O_WRONLY, 0666)
if err != nil {
log.Fatalf("ERR %s", err)
}
defer mfile.Close()
_, err = mfile.WriteString(newMigrationText)
if err != nil {
log.Fatalf("ERR %s", err)
}
log.Printf("INR created migration '%s'", mpath)
}
func cmdDBMigrate(cmd *cobra.Command, args []string) {
if len(args) == 0 {
cmd.Help() //nolint: errcheck
os.Exit(1)
}
initConfOnce()
dest := args[0]
conn, err := initDB(conf)
if err != nil {
log.Fatalf("ERR failed to connect to database: %s", err)
}
defer conn.Close()
m, err := migrate.NewMigrator(conn, "schema_version")
if err != nil {
log.Fatalf("ERR failed to initializing migrator: %s", err)
}
m.Data = getMigrationVars()
err = m.LoadMigrations(path.Join(conf.ConfigPathUsed(), conf.MigrationsPath))
if err != nil {
log.Fatalf("ERR failed to load migrations: %s", err)
}
if len(m.Migrations) == 0 {
log.Fatalf("ERR no migrations found")
}
m.OnStart = func(sequence int32, name, direction, sql string) {
log.Printf("INF %s executing %s %s\n%s\n\n",
time.Now().Format("2006-01-02 15:04:05"), name, direction, sql)
}
var currentVersion int32
currentVersion, err = m.GetCurrentVersion()
if err != nil {
fmt.Fprintf(os.Stderr, "Unable to get current version:\n %v\n", err)
os.Exit(1)
}
mustParseDestination := func(d string) int32 {
var n int64
n, err = strconv.ParseInt(d, 10, 32)
if err != nil {
log.Fatalf("ERR invalid destination: %s", err)
}
return int32(n)
}
if dest == "up" {
err = m.Migrate()
} else if dest == "down" {
err = m.MigrateTo(currentVersion - 1)
} else if len(dest) >= 3 && dest[0:2] == "-+" {
err = m.MigrateTo(currentVersion - mustParseDestination(dest[2:]))
if err == nil {
err = m.MigrateTo(currentVersion)
}
} else if len(dest) >= 2 && dest[0] == '-' {
err = m.MigrateTo(currentVersion - mustParseDestination(dest[1:]))
} else if len(dest) >= 2 && dest[0] == '+' {
err = m.MigrateTo(currentVersion + mustParseDestination(dest[1:]))
} else {
cmd.Help() //nolint: errcheck
os.Exit(1)
}
if err != nil {
log.Fatalf("ERR %s", err)
// if err, ok := err.(m.MigrationPgError); ok {
// if err.Detail != "" {
// log.Fatalf("ERR %s", err.Detail)
// }
// if err.Position != 0 {
// ele, err := ExtractErrorLine(err.Sql, int(err.Position))
// if err != nil {
// log.Fatalf("ERR %s", err)
// }
// log.Fatalf("INF line %d, %s%s", ele.LineNum, ele.Text)
// }
// }
}
log.Println("INF migration done")
}
func cmdDBStatus(cmd *cobra.Command, args []string) {
initConfOnce()
db, err := initDB(conf)
if err != nil {
log.Fatalf("ERR failed to connect to database: %s", err)
}
defer db.Close()
m, err := migrate.NewMigrator(db, "schema_version")
if err != nil {
log.Fatalf("ERR failed to initialize migrator: %s", err)
}
m.Data = getMigrationVars()
err = m.LoadMigrations(conf.MigrationsPath)
if err != nil {
log.Fatalf("ERR failed to load migrations: %s", err)
}
if len(m.Migrations) == 0 {
log.Fatalf("ERR no migrations found")
}
mver, err := m.GetCurrentVersion()
if err != nil {
log.Fatalf("ERR failed to retrieve migration: %s", err)
}
var status string
behindCount := len(m.Migrations) - int(mver)
if behindCount == 0 {
status = "up to date"
} else {
status = "migration(s) pending"
}
log.Printf("INF status: %s, version: %d of %d, host: %s, database: %s",
status, mver, len(m.Migrations), conf.DB.Host, conf.DB.DBName)
}
type ErrorLineExtract struct {
LineNum int // Line number starting with 1
ColumnNum int // Column number starting with 1
Text string // Text of the line without a new line character.
}
// ExtractErrorLine takes source and character position extracts the line
// number, column number, and the line of text.
//
// The first character is position 1.
func ExtractErrorLine(source string, position int) (ErrorLineExtract, error) {
ele := ErrorLineExtract{LineNum: 1}
if position > len(source) {
return ele, fmt.Errorf("position (%d) is greater than source length (%d)", position, len(source))
}
lines := strings.SplitAfter(source, "\n")
for _, ele.Text = range lines {
if position-len(ele.Text) < 1 {
ele.ColumnNum = position
break
}
ele.LineNum += 1
position -= len(ele.Text)
}
ele.Text = strings.TrimSuffix(ele.Text, "\n")
return ele, nil
}
func getMigrationVars() map[string]interface{} {
return map[string]interface{}{
"app_name": strings.Title(conf.AppName),
"app_name_slug": strings.ToLower(strings.Replace(conf.AppName, " ", "_", -1)),
"env": strings.ToLower(os.Getenv("GO_ENV")),
}
}
func initConfOnce() {
var err error
if conf != nil {
return
}
conf, err = initConf()
if err != nil {
log.Fatalf("ERR failed to read config: %s", err)
}
}

View File

@ -0,0 +1,150 @@
package serv
import (
"bytes"
"fmt"
"io"
"io/ioutil"
"os"
"path"
"strings"
rice "github.com/GeertJohan/go.rice"
"github.com/spf13/cobra"
"github.com/valyala/fasttemplate"
)
func cmdNew(cmd *cobra.Command, args []string) {
if len(args) != 1 {
cmd.Help() //nolint: errcheck
os.Exit(1)
}
tmpl := newTempl(map[string]string{
"app_name": strings.Title(strings.Join(args, " ")),
"app_name_slug": strings.ToLower(strings.Join(args, "_")),
})
// Create app folder and add relevant files
name := args[0]
appPath := path.Join("./", name)
ifNotExists(appPath, func(p string) error {
return os.Mkdir(p, os.ModePerm)
})
ifNotExists(path.Join(appPath, "Dockerfile"), func(p string) error {
if v, err := tmpl.get("Dockerfile"); err == nil {
return ioutil.WriteFile(p, v, 0644)
} else {
return err
}
})
ifNotExists(path.Join(appPath, "docker-compose.yml"), func(p string) error {
if v, err := tmpl.get("docker-compose.yml"); err == nil {
return ioutil.WriteFile(p, v, 0644)
} else {
return err
}
})
// Create app config folder and add relevant files
appConfigPath := path.Join(appPath, "config")
ifNotExists(appConfigPath, func(p string) error {
return os.Mkdir(p, os.ModePerm)
})
ifNotExists(path.Join(appConfigPath, "dev.yml"), func(p string) error {
if v, err := tmpl.get("dev.yml"); err == nil {
return ioutil.WriteFile(p, v, 0644)
} else {
return err
}
})
ifNotExists(path.Join(appConfigPath, "prod.yml"), func(p string) error {
if v, err := tmpl.get("prod.yml"); err == nil {
return ioutil.WriteFile(p, v, 0644)
} else {
return err
}
})
ifNotExists(path.Join(appConfigPath, "seed.js"), func(p string) error {
if v, err := tmpl.get("seed.js"); err == nil {
return ioutil.WriteFile(p, v, 0644)
} else {
return err
}
})
// Create app migrations folder and add relevant files
appMigrationsPath := path.Join(appConfigPath, "migrations")
ifNotExists(appMigrationsPath, func(p string) error {
return os.Mkdir(p, os.ModePerm)
})
ifNotExists(path.Join(appMigrationsPath, "0_init.sql"), func(p string) error {
if v, err := tmpl.get("0_init.sql"); err == nil {
return ioutil.WriteFile(p, v, 0644)
} else {
return err
}
})
log.Printf("INR app '%s' initialized", name)
}
type Templ struct {
*rice.Box
data map[string]string
}
func newTempl(data map[string]string) *Templ {
return &Templ{rice.MustFindBox("./tmpl"), data}
}
func (t *Templ) get(name string) ([]byte, error) {
v := t.MustString(name)
b := bytes.Buffer{}
tmpl := fasttemplate.New(v, "{%", "%}")
_, err := tmpl.ExecuteFunc(&b, func(w io.Writer, tag string) (int, error) {
if val, ok := t.data[strings.TrimSpace(tag)]; ok {
return w.Write([]byte(val))
}
return 0, fmt.Errorf("unknown template variable '%s'", tag)
})
if err != nil {
return nil, err
}
return b.Bytes(), nil
}
func ifNotExists(filePath string, doFn func(string) error) {
_, err := os.Stat(filePath)
if err == nil {
log.Printf("ERR create skipped '%s' exists", filePath)
return
}
if !os.IsNotExist(err) {
log.Fatalf("ERR unable to check if '%s' exists", filePath)
}
err = doFn(filePath)
if err != nil {
log.Fatalf("ERR unable to create '%s'", filePath)
}
log.Printf("INR created '%s'", filePath)
}

View File

@ -0,0 +1,413 @@
package serv
import (
"context"
"encoding/csv"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"math/rand"
"os"
"path"
"strconv"
"strings"
"github.com/brianvoe/gofakeit"
"github.com/dop251/goja"
"github.com/dosco/super-graph/core"
"github.com/spf13/cobra"
)
func cmdDBSeed(cmd *cobra.Command, args []string) {
var err error
if conf, err = initConf(); err != nil {
log.Fatalf("ERR failed to read config: %s", err)
}
conf.Production = false
db, err = initDB(conf)
if err != nil {
log.Fatalf("ERR failed to connect to database: %s", err)
}
sfile := path.Join(conf.ConfigPathUsed(), conf.SeedFile)
b, err := ioutil.ReadFile(sfile)
if err != nil {
log.Fatalf("ERR failed to read seed file %s: %s", sfile, err)
}
sg, err = core.NewSuperGraph(conf, db)
if err != nil {
log.Fatalf("ERR failed to initialize Super Graph: %s", err)
}
graphQLFn := func(query string, data interface{}, opt map[string]string) map[string]interface{} {
return graphQLFunc(sg, query, data, opt)
}
vm := goja.New()
vm.Set("graphql", graphQLFn)
//vm.Set("import_csv", importCSV)
console := vm.NewObject()
console.Set("log", logFunc) //nolint: errcheck
vm.Set("console", console)
fake := vm.NewObject()
setFakeFuncs(fake)
vm.Set("fake", fake)
_, err = vm.RunScript("seed.js", string(b))
if err != nil {
log.Fatalf("ERR failed to execute script: %s", err)
}
log.Println("INF seed script done")
}
// func runFunc(call goja.FunctionCall) {
func graphQLFunc(sg *core.SuperGraph, query string, data interface{}, opt map[string]string) map[string]interface{} {
ct := context.Background()
if v, ok := opt["user_id"]; ok && len(v) != 0 {
ct = context.WithValue(ct, core.UserIDKey, v)
}
// var role string
// if v, ok := opt["role"]; ok && len(v) != 0 {
// role = v
// } else {
// role = "user"
// }
var vars []byte
var err error
if vars, err = json.Marshal(data); err != nil {
log.Fatalf("ERR %s", err)
}
res, err := sg.GraphQL(ct, query, vars)
if err != nil {
log.Fatalf("ERR %s", err)
}
val := make(map[string]interface{})
if err = json.Unmarshal(res.Data, &val); err != nil {
log.Fatalf("ERR %s", err)
}
return val
}
type csvSource struct {
rows [][]string
i int
}
func NewCSVSource(filename string) (*csvSource, error) {
f, err := os.Open(filename)
if err != nil {
return nil, err
}
defer f.Close()
r := csv.NewReader(f)
rows, err := r.ReadAll()
if err != nil {
return nil, err
}
return &csvSource{rows: rows}, nil
}
func (c *csvSource) Next() bool {
return c.i < len(c.rows)
}
func (c *csvSource) Values() ([]interface{}, error) {
var vals []interface{}
var err error
for _, v := range c.rows[c.i] {
switch {
case len(v) == 0:
vals = append(vals, "")
case isDigit(v):
var n int
if n, err = strconv.Atoi(v); err == nil {
vals = append(vals, n)
}
case strings.EqualFold(v, "true") || strings.EqualFold(v, "false"):
var b bool
if b, err = strconv.ParseBool(v); err == nil {
vals = append(vals, b)
}
default:
vals = append(vals, v)
}
if err != nil {
return nil, fmt.Errorf("%w (line no %d)", err, c.i)
}
}
c.i++
return vals, nil
}
func isDigit(v string) bool {
for i := range v {
if v[i] < '0' || v[i] > '9' {
return false
}
}
return true
}
func (c *csvSource) Err() error {
return nil
}
// func importCSV(table, filename string) int64 {
// if filename[0] != '/' {
// filename = path.Join(conf.ConfigPathUsed(), filename)
// }
// s, err := NewCSVSource(filename)
// if err != nil {
// log.Fatalf("ERR %s", err)
// }
// var cols []string
// colval, _ := s.Values()
// for _, c := range colval {
// cols = append(cols, c.(string))
// }
// n, err := db.Exec(fmt.Sprintf("COPY %s FROM STDIN WITH "),
// cols,
// s)
// if err != nil {
// err = fmt.Errorf("%w (line no %d)", err, s.i)
// log.Fatalf("ERR %s", err)
// }
// return n
// }
//nolint: errcheck
func logFunc(args ...interface{}) {
for _, arg := range args {
if _, ok := arg.(map[string]interface{}); ok {
j, err := json.MarshalIndent(arg, "", " ")
if err != nil {
continue
}
os.Stdout.Write(j)
} else {
io.WriteString(os.Stdout, fmt.Sprintf("%v", arg))
}
io.WriteString(os.Stdout, "\n")
}
}
func avatarURL(size int) string {
if size == 0 {
size = 200
}
return fmt.Sprintf("https://i.pravatar.cc/%d?%d", size, rand.Intn(5000))
}
func imageURL(width int, height int) string {
return fmt.Sprintf("https://picsum.photos/%d/%d?%d", width, height, rand.Intn(5000))
}
//nolint: errcheck
func setFakeFuncs(f *goja.Object) {
gofakeit.Seed(0)
// Person
f.Set("person", gofakeit.Person)
f.Set("name", gofakeit.Name)
f.Set("name_prefix", gofakeit.NamePrefix)
f.Set("name_suffix", gofakeit.NameSuffix)
f.Set("first_name", gofakeit.FirstName)
f.Set("last_name", gofakeit.LastName)
f.Set("gender", gofakeit.Gender)
f.Set("ssn", gofakeit.SSN)
f.Set("contact", gofakeit.Contact)
f.Set("email", gofakeit.Email)
f.Set("phone", gofakeit.Phone)
f.Set("phone_formatted", gofakeit.PhoneFormatted)
f.Set("username", gofakeit.Username)
f.Set("password", gofakeit.Password)
// Address
f.Set("address", gofakeit.Address)
f.Set("city", gofakeit.City)
f.Set("country", gofakeit.Country)
f.Set("country_abr", gofakeit.CountryAbr)
f.Set("state", gofakeit.State)
f.Set("state_abr", gofakeit.StateAbr)
f.Set("status_code", gofakeit.StatusCode)
f.Set("street", gofakeit.Street)
f.Set("street_name", gofakeit.StreetName)
f.Set("street_number", gofakeit.StreetNumber)
f.Set("street_prefix", gofakeit.StreetPrefix)
f.Set("street_suffix", gofakeit.StreetSuffix)
f.Set("zip", gofakeit.Zip)
f.Set("latitude", gofakeit.Latitude)
f.Set("latitude_in_range", gofakeit.LatitudeInRange)
f.Set("longitude", gofakeit.Longitude)
f.Set("longitude_in_range", gofakeit.LongitudeInRange)
// Beer
f.Set("beer_alcohol", gofakeit.BeerAlcohol)
f.Set("beer_hop", gofakeit.BeerHop)
f.Set("beer_ibu", gofakeit.BeerIbu)
f.Set("beer_blg", gofakeit.BeerBlg)
f.Set("beer_malt", gofakeit.BeerMalt)
f.Set("beer_name", gofakeit.BeerName)
f.Set("beer_style", gofakeit.BeerStyle)
f.Set("beer_yeast", gofakeit.BeerYeast)
// Cars
f.Set("vehicle", gofakeit.Vehicle)
f.Set("vehicle_type", gofakeit.VehicleType)
f.Set("car_maker", gofakeit.CarMaker)
f.Set("car_model", gofakeit.CarModel)
f.Set("fuel_type", gofakeit.FuelType)
f.Set("transmission_gear_type", gofakeit.TransmissionGearType)
// Text
f.Set("word", gofakeit.Word)
f.Set("sentence", gofakeit.Sentence)
f.Set("paragraph", gofakeit.Paragraph)
f.Set("question", gofakeit.Question)
f.Set("quote", gofakeit.Quote)
// Misc
f.Set("generate", gofakeit.Generate)
f.Set("boolean", gofakeit.Bool)
f.Set("uuid", gofakeit.UUID)
// Colors
f.Set("color", gofakeit.Color)
f.Set("hex_color", gofakeit.HexColor)
f.Set("rgb_color", gofakeit.RGBColor)
f.Set("safe_color", gofakeit.SafeColor)
// Internet
f.Set("url", gofakeit.URL)
f.Set("image_url", imageURL)
f.Set("avatar_url", avatarURL)
f.Set("domain_name", gofakeit.DomainName)
f.Set("domain_suffix", gofakeit.DomainSuffix)
f.Set("ipv4_address", gofakeit.IPv4Address)
f.Set("ipv6_address", gofakeit.IPv6Address)
f.Set("simple_status_code", gofakeit.SimpleStatusCode)
f.Set("http_method", gofakeit.HTTPMethod)
f.Set("user_agent", gofakeit.UserAgent)
f.Set("user_agent_firefox", gofakeit.FirefoxUserAgent)
f.Set("user_agent_chrome", gofakeit.ChromeUserAgent)
f.Set("user_agent_opera", gofakeit.OperaUserAgent)
f.Set("user_agent_safari", gofakeit.SafariUserAgent)
// Date / Time
f.Set("date", gofakeit.Date)
f.Set("date_range", gofakeit.DateRange)
f.Set("nano_second", gofakeit.NanoSecond)
f.Set("second", gofakeit.Second)
f.Set("minute", gofakeit.Minute)
f.Set("hour", gofakeit.Hour)
f.Set("month", gofakeit.Month)
f.Set("day", gofakeit.Day)
f.Set("weekday", gofakeit.WeekDay)
f.Set("year", gofakeit.Year)
f.Set("timezone", gofakeit.TimeZone)
f.Set("timezone_abv", gofakeit.TimeZoneAbv)
f.Set("timezone_full", gofakeit.TimeZoneFull)
f.Set("timezone_offset", gofakeit.TimeZoneOffset)
// Payment
f.Set("price", gofakeit.Price)
f.Set("credit_card", gofakeit.CreditCard)
f.Set("credit_card_cvv", gofakeit.CreditCardCvv)
f.Set("credit_card_number", gofakeit.CreditCardNumber)
f.Set("credit_card_number_luhn", gofakeit.CreditCardNumberLuhn)
f.Set("credit_card_type", gofakeit.CreditCardType)
f.Set("currency", gofakeit.Currency)
f.Set("currency_long", gofakeit.CurrencyLong)
f.Set("currency_short", gofakeit.CurrencyShort)
// Company
f.Set("bs", gofakeit.BS)
f.Set("buzzword", gofakeit.BuzzWord)
f.Set("company", gofakeit.Company)
f.Set("company_suffix", gofakeit.CompanySuffix)
f.Set("job", gofakeit.Job)
f.Set("job_description", gofakeit.JobDescriptor)
f.Set("job_level", gofakeit.JobLevel)
f.Set("job_title", gofakeit.JobTitle)
// Hacker
f.Set("hacker_abbreviation", gofakeit.HackerAbbreviation)
f.Set("hacker_adjective", gofakeit.HackerAdjective)
f.Set("hacker_ingverb", gofakeit.HackerIngverb)
f.Set("hacker_noun", gofakeit.HackerNoun)
f.Set("hacker_phrase", gofakeit.HackerPhrase)
f.Set("hacker_verb", gofakeit.HackerVerb)
//Hipster
f.Set("hipster_word", gofakeit.HipsterWord)
f.Set("hipster_paragraph", gofakeit.HipsterParagraph)
f.Set("hipster_sentence", gofakeit.HipsterSentence)
//Languages
//f.Set("language", gofakeit.Language)
//f.Set("language_abbreviation", gofakeit.LanguageAbbreviation)
//f.Set("language_abbreviation", gofakeit.LanguageAbbreviation)
// File
f.Set("extension", gofakeit.Extension)
f.Set("mine_type", gofakeit.MimeType)
// Numbers
f.Set("number", gofakeit.Number)
f.Set("numerify", gofakeit.Numerify)
f.Set("int8", gofakeit.Int8)
f.Set("int16", gofakeit.Int16)
f.Set("int32", gofakeit.Int32)
f.Set("int64", gofakeit.Int64)
f.Set("uint8", gofakeit.Uint8)
f.Set("uint16", gofakeit.Uint16)
f.Set("uint32", gofakeit.Uint32)
f.Set("uint64", gofakeit.Uint64)
f.Set("float32", gofakeit.Float32)
f.Set("float32_range", gofakeit.Float32Range)
f.Set("float64", gofakeit.Float64)
f.Set("float64_range", gofakeit.Float64Range)
f.Set("shuffle_ints", gofakeit.ShuffleInts)
f.Set("mac_address", gofakeit.MacAddress)
// String
f.Set("digit", gofakeit.Digit)
f.Set("letter", gofakeit.Letter)
f.Set("lexify", gofakeit.Lexify)
f.Set("rand_string", gofakeit.RandString)
f.Set("shuffle_strings", gofakeit.ShuffleStrings)
f.Set("numerify", gofakeit.Numerify)
//f.Set("programming_language", gofakeit.ProgrammingLanguage)
}

View File

@ -0,0 +1,37 @@
package serv
import (
"github.com/dosco/super-graph/core"
"github.com/spf13/cobra"
)
var (
sg *core.SuperGraph
)
func cmdServ(cmd *cobra.Command, args []string) {
var err error
conf, err = initConf()
if err != nil {
fatalInProd(err, "failed to read config")
}
initWatcher()
db, err = initDB(conf)
if err != nil {
fatalInProd(err, "failed to connect to database")
}
// if conf != nil && db != nil {
// initResolvers()
// }
sg, err = core.NewSuperGraph(conf, db)
if err != nil {
fatalInProd(err, "failed to initialize Super Graph")
}
startHTTP()
}

View File

@ -0,0 +1,7 @@
package serv
// func (c *coreContext) handleReq(w io.Writer, req *http.Request) error {
// return nil
// }

View File

@ -0,0 +1,21 @@
query {
products(
# returns only 30 items
limit: 30,
# starts from item 10, commented out for now
# offset: 10,
# orders the response items by highest price
order_by: { price: desc },
# no duplicate prices returned
distinct: [ price ]
# only items with an id >= 30 and < 30 are returned
where: { id: { and: { greater_or_equals: 20, lt: 28 } } }) {
id
name
price
}
}

11
cmd/internal/serv/fuzz.go Normal file
View File

@ -0,0 +1,11 @@
// +build gofuzz
package serv
func Fuzz(data []byte) int {
gql := string(data)
QueryName(gql)
gqlHash(gql, nil, "")
return 1
}

View File

@ -0,0 +1,15 @@
package serv
import "testing"
func TestFuzzCrashers(t *testing.T) {
var crashers = []string{
"query",
"q",
"que",
}
for _, f := range crashers {
gqlHash(f, nil, "")
}
}

View File

@ -0,0 +1,25 @@
package serv
import (
"context"
"net/http"
)
var healthyResponse = []byte("All's Well")
func health(w http.ResponseWriter, _ *http.Request) {
ct, cancel := context.WithTimeout(context.Background(), conf.DB.PingTimeout)
defer cancel()
if err := db.PingContext(ct); err != nil {
log.Printf("ERR error pinging database: %s", err)
w.WriteHeader(http.StatusInternalServerError)
return
}
if _, err := w.Write(healthyResponse); err != nil {
log.Printf("ERR error writing healthy response: %s", err)
w.WriteHeader(http.StatusInternalServerError)
return
}
}

125
cmd/internal/serv/http.go Normal file
View File

@ -0,0 +1,125 @@
package serv
import (
"encoding/json"
"errors"
"io"
"io/ioutil"
"net/http"
"strings"
"github.com/dosco/super-graph/cmd/internal/serv/internal/auth"
"github.com/dosco/super-graph/config"
"github.com/dosco/super-graph/core"
"github.com/rs/cors"
"go.uber.org/zap"
)
const (
maxReadBytes = 100000 // 100Kb
introspectionQuery = "IntrospectionQuery"
)
var (
errUnauthorized = errors.New("not authorized")
)
type gqlReq struct {
OpName string `json:"operationName"`
Query string `json:"query"`
Vars json.RawMessage `json:"variables"`
}
type errorResp struct {
Error error `json:"error"`
}
func apiV1Handler() http.Handler {
h, err := auth.WithAuth(http.HandlerFunc(apiV1), &conf.Auth)
if err != nil {
log.Fatalf("ERR %s", err)
}
if len(conf.AllowedOrigins) != 0 {
c := cors.New(cors.Options{
AllowedOrigins: conf.AllowedOrigins,
AllowCredentials: true,
Debug: conf.DebugCORS,
})
h = c.Handler(h)
}
return h
}
func apiV1(w http.ResponseWriter, r *http.Request) {
ct := r.Context()
//nolint: errcheck
if conf.AuthFailBlock && !auth.IsAuth(ct) {
renderErr(w, errUnauthorized, nil)
return
}
b, err := ioutil.ReadAll(io.LimitReader(r.Body, maxReadBytes))
if err != nil {
renderErr(w, err, nil)
return
}
defer r.Body.Close()
req := gqlReq{}
err = json.Unmarshal(b, &req)
if err != nil {
renderErr(w, err, nil)
return
}
if strings.EqualFold(req.OpName, introspectionQuery) {
introspect(w)
return
}
res, err := sg.GraphQL(ct, req.Query, req.Vars)
if conf.LogLevel() >= config.LogLevelDebug {
log.Printf("DBG query:\n%s\nsql:\n%s", req.Query, res.SQL())
}
if err != nil {
renderErr(w, err, res)
return
}
json.NewEncoder(w).Encode(res)
if conf.LogLevel() >= config.LogLevelInfo {
zlog.Info("success",
zap.String("op", res.Operation()),
zap.String("name", res.QueryName()),
zap.String("role", res.Role()),
)
}
}
//nolint: errcheck
func renderErr(w http.ResponseWriter, err error, res *core.Result) {
if err == errUnauthorized {
w.WriteHeader(http.StatusUnauthorized)
}
json.NewEncoder(w).Encode(&errorResp{err})
if conf.LogLevel() >= config.LogLevelError {
if res != nil {
zlog.Error(err.Error(),
zap.String("op", res.Operation()),
zap.String("name", res.QueryName()),
zap.String("role", res.Role()),
)
} else {
zlog.Error(err.Error())
}
}
}

88
cmd/internal/serv/init.go Normal file
View File

@ -0,0 +1,88 @@
package serv
import (
"database/sql"
"fmt"
"time"
"github.com/dosco/super-graph/config"
_ "github.com/jackc/pgx/v4/stdlib"
)
func initConf() (*config.Config, error) {
return config.NewConfigWithLogger(confPath, log)
}
func initDB(c *config.Config) (*sql.DB, error) {
var db *sql.DB
var err error
cs := fmt.Sprintf("postgres://%s:%s@%s:%d/%s",
c.DB.User, c.DB.Password,
c.DB.Host, c.DB.Port, c.DB.DBName)
for i := 1; i < 10; i++ {
db, err = sql.Open("pgx", cs)
if err == nil {
break
}
time.Sleep(time.Duration(i*100) * time.Millisecond)
}
if err != nil {
return nil, err
}
return db, nil
// config, _ := pgxpool.ParseConfig("")
// config.ConnConfig.Host = c.DB.Host
// config.ConnConfig.Port = c.DB.Port
// config.ConnConfig.Database = c.DB.DBName
// config.ConnConfig.User = c.DB.User
// config.ConnConfig.Password = c.DB.Password
// config.ConnConfig.RuntimeParams = map[string]string{
// "application_name": c.AppName,
// "search_path": c.DB.Schema,
// }
// switch c.LogLevel {
// case "debug":
// config.ConnConfig.LogLevel = pgx.LogLevelDebug
// case "info":
// config.ConnConfig.LogLevel = pgx.LogLevelInfo
// case "warn":
// config.ConnConfig.LogLevel = pgx.LogLevelWarn
// case "error":
// config.ConnConfig.LogLevel = pgx.LogLevelError
// default:
// config.ConnConfig.LogLevel = pgx.LogLevelNone
// }
// config.ConnConfig.Logger = NewSQLLogger(logger)
// // if c.DB.MaxRetries != 0 {
// // opt.MaxRetries = c.DB.MaxRetries
// // }
// if c.DB.PoolSize != 0 {
// config.MaxConns = conf.DB.PoolSize
// }
// var db *pgxpool.Pool
// var err error
// for i := 1; i < 10; i++ {
// db, err = pgxpool.ConnectConfig(context.Background(), config)
// if err == nil {
// break
// }
// time.Sleep(time.Duration(i*100) * time.Millisecond)
// }
// if err != nil {
// return nil, err
// }
// return db, nil
}

View File

@ -0,0 +1,95 @@
package auth
import (
"context"
"fmt"
"net/http"
"github.com/dosco/super-graph/config"
"github.com/dosco/super-graph/core"
)
func SimpleHandler(ac *config.Auth, next http.Handler) (http.HandlerFunc, error) {
return func(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
userIDProvider := r.Header.Get("X-User-ID-Provider")
if len(userIDProvider) != 0 {
ctx = context.WithValue(ctx, core.UserIDProviderKey, userIDProvider)
}
userID := r.Header.Get("X-User-ID")
if len(userID) != 0 {
ctx = context.WithValue(ctx, core.UserIDKey, userID)
}
userRole := r.Header.Get("X-User-Role")
if len(userRole) != 0 {
ctx = context.WithValue(ctx, core.UserRoleKey, userRole)
}
next.ServeHTTP(w, r.WithContext(ctx))
}, nil
}
func HeaderHandler(ac *config.Auth, next http.Handler) (http.HandlerFunc, error) {
hdr := ac.Header
if len(hdr.Name) == 0 {
return nil, fmt.Errorf("auth '%s': no header.name defined", ac.Name)
}
if !hdr.Exists && len(hdr.Value) == 0 {
return nil, fmt.Errorf("auth '%s': no header.value defined", ac.Name)
}
return func(w http.ResponseWriter, r *http.Request) {
var fo1 bool
value := r.Header.Get(hdr.Name)
switch {
case hdr.Exists:
fo1 = (len(value) == 0)
default:
fo1 = (value != hdr.Value)
}
if fo1 {
http.Error(w, "401 unauthorized", http.StatusUnauthorized)
return
}
next.ServeHTTP(w, r)
}, nil
}
func WithAuth(next http.Handler, ac *config.Auth) (http.Handler, error) {
var err error
if ac.CredsInHeader {
next, err = SimpleHandler(ac, next)
}
if err != nil {
return nil, err
}
switch ac.Type {
case "rails":
return RailsHandler(ac, next)
case "jwt":
return JwtHandler(ac, next)
case "header":
return HeaderHandler(ac, next)
}
return next, nil
}
func IsAuth(ct context.Context) bool {
return ct.Value(core.UserIDKey) != nil
}

View File

@ -0,0 +1,106 @@
package auth
import (
"context"
"io/ioutil"
"net/http"
"strings"
jwt "github.com/dgrijalva/jwt-go"
"github.com/dosco/super-graph/config"
"github.com/dosco/super-graph/core"
)
const (
authHeader = "Authorization"
jwtAuth0 int = iota + 1
)
func JwtHandler(ac *config.Auth, next http.Handler) (http.HandlerFunc, error) {
var key interface{}
var jwtProvider int
cookie := ac.Cookie
if ac.JWT.Provider == "auth0" {
jwtProvider = jwtAuth0
}
secret := ac.JWT.Secret
publicKeyFile := ac.JWT.PubKeyFile
switch {
case len(secret) != 0:
key = []byte(secret)
case len(publicKeyFile) != 0:
kd, err := ioutil.ReadFile(publicKeyFile)
if err != nil {
return nil, err
}
switch ac.JWT.PubKeyType {
case "ecdsa":
key, err = jwt.ParseECPublicKeyFromPEM(kd)
case "rsa":
key, err = jwt.ParseRSAPublicKeyFromPEM(kd)
default:
key, err = jwt.ParseECPublicKeyFromPEM(kd)
}
if err != nil {
return nil, err
}
}
return func(w http.ResponseWriter, r *http.Request) {
var tok string
if len(cookie) != 0 {
ck, err := r.Cookie(cookie)
if err != nil {
next.ServeHTTP(w, r)
return
}
tok = ck.Value
} else {
ah := r.Header.Get(authHeader)
if len(ah) < 10 {
next.ServeHTTP(w, r)
return
}
tok = ah[7:]
}
token, err := jwt.ParseWithClaims(tok, &jwt.StandardClaims{}, func(token *jwt.Token) (interface{}, error) {
return key, nil
})
if err != nil {
next.ServeHTTP(w, r)
return
}
if claims, ok := token.Claims.(*jwt.StandardClaims); ok {
ctx := r.Context()
if jwtProvider == jwtAuth0 {
sub := strings.Split(claims.Subject, "|")
if len(sub) != 2 {
ctx = context.WithValue(ctx, core.UserIDProviderKey, sub[0])
ctx = context.WithValue(ctx, core.UserIDKey, sub[1])
}
} else {
ctx = context.WithValue(ctx, core.UserIDKey, claims.Subject)
}
next.ServeHTTP(w, r.WithContext(ctx))
return
}
next.ServeHTTP(w, r)
}, nil
}

View File

@ -0,0 +1,191 @@
package auth
import (
"context"
"errors"
"fmt"
"net/http"
"net/url"
"strings"
"github.com/bradfitz/gomemcache/memcache"
"github.com/dosco/super-graph/config"
"github.com/dosco/super-graph/core"
"github.com/dosco/super-graph/cmd/internal/serv/internal/rails"
"github.com/garyburd/redigo/redis"
)
func RailsHandler(ac *config.Auth, next http.Handler) (http.HandlerFunc, error) {
ru := ac.Rails.URL
if strings.HasPrefix(ru, "memcache:") {
return RailsMemcacheHandler(ac, next)
}
if strings.HasPrefix(ru, "redis:") {
return RailsRedisHandler(ac, next)
}
return RailsCookieHandler(ac, next)
}
func RailsRedisHandler(ac *config.Auth, next http.Handler) (http.HandlerFunc, error) {
cookie := ac.Cookie
if len(cookie) == 0 {
return nil, fmt.Errorf("no auth.cookie defined")
}
if len(ac.Rails.URL) == 0 {
return nil, fmt.Errorf("no auth.rails.url defined")
}
rp := &redis.Pool{
MaxIdle: ac.Rails.MaxIdle,
MaxActive: ac.Rails.MaxActive,
Dial: func() (redis.Conn, error) {
c, err := redis.DialURL(ac.Rails.URL)
if err != nil {
return nil, err
}
pwd := ac.Rails.Password
if len(pwd) != 0 {
if _, err := c.Do("AUTH", pwd); err != nil {
return nil, err
}
}
return c, nil
},
}
return func(w http.ResponseWriter, r *http.Request) {
ck, err := r.Cookie(cookie)
if err != nil {
next.ServeHTTP(w, r)
return
}
key := fmt.Sprintf("session:%s", ck.Value)
sessionData, err := redis.Bytes(rp.Get().Do("GET", key))
if err != nil {
next.ServeHTTP(w, r)
return
}
userID, err := rails.ParseCookie(string(sessionData))
if err != nil {
next.ServeHTTP(w, r)
return
}
ctx := context.WithValue(r.Context(), core.UserIDKey, userID)
next.ServeHTTP(w, r.WithContext(ctx))
}, nil
}
func RailsMemcacheHandler(ac *config.Auth, next http.Handler) (http.HandlerFunc, error) {
cookie := ac.Cookie
if len(cookie) == 0 {
return nil, fmt.Errorf("no auth.cookie defined")
}
if len(ac.Rails.URL) == 0 {
return nil, fmt.Errorf("no auth.rails.url defined")
}
rURL, err := url.Parse(ac.Rails.URL)
if err != nil {
return nil, err
}
mc := memcache.New(rURL.Host)
return func(w http.ResponseWriter, r *http.Request) {
ck, err := r.Cookie(cookie)
if err != nil {
next.ServeHTTP(w, r)
return
}
key := fmt.Sprintf("session:%s", ck.Value)
item, err := mc.Get(key)
if err != nil {
next.ServeHTTP(w, r)
return
}
userID, err := rails.ParseCookie(string(item.Value))
if err != nil {
next.ServeHTTP(w, r)
return
}
ctx := context.WithValue(r.Context(), core.UserIDKey, userID)
next.ServeHTTP(w, r.WithContext(ctx))
}, nil
}
func RailsCookieHandler(ac *config.Auth, next http.Handler) (http.HandlerFunc, error) {
cookie := ac.Cookie
if len(cookie) == 0 {
return nil, fmt.Errorf("no auth.cookie defined")
}
ra, err := railsAuth(ac)
if err != nil {
return nil, err
}
return func(w http.ResponseWriter, r *http.Request) {
ck, err := r.Cookie(cookie)
if err != nil || len(ck.Value) == 0 {
// logger.Warn().Err(err).Msg("rails cookie missing")
next.ServeHTTP(w, r)
return
}
userID, err := ra.ParseCookie(ck.Value)
if err != nil {
// logger.Warn().Err(err).Msg("failed to parse rails cookie")
next.ServeHTTP(w, r)
return
}
ctx := context.WithValue(r.Context(), core.UserIDKey, userID)
next.ServeHTTP(w, r.WithContext(ctx))
}, nil
}
func railsAuth(ac *config.Auth) (*rails.Auth, error) {
secret := ac.Rails.SecretKeyBase
if len(secret) == 0 {
return nil, errors.New("no auth.rails.secret_key_base defined")
}
version := ac.Rails.Version
if len(version) == 0 {
return nil, errors.New("no auth.rails.version defined")
}
ra, err := rails.NewAuth(version, secret)
if err != nil {
return nil, err
}
if len(ac.Rails.Salt) != 0 {
ra.Salt = ac.Rails.Salt
}
if len(ac.Rails.SignSalt) != 0 {
ra.SignSalt = ac.Rails.SignSalt
}
if len(ac.Rails.AuthSalt) != 0 {
ra.AuthSalt = ac.Rails.AuthSalt
}
return ra, nil
}

View File

@ -0,0 +1,370 @@
package migrate
import (
"bytes"
"context"
"database/sql"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"regexp"
"strconv"
"strings"
"text/template"
"github.com/pkg/errors"
)
var migrationPattern = regexp.MustCompile(`\A(\d+)_[^\.]+\.sql\z`)
var ErrNoFwMigration = errors.Errorf("no sql in forward migration step")
type BadVersionError string
func (e BadVersionError) Error() string {
return string(e)
}
type IrreversibleMigrationError struct {
m *Migration
}
func (e IrreversibleMigrationError) Error() string {
return fmt.Sprintf("Irreversible migration: %d - %s", e.m.Sequence, e.m.Name)
}
type NoMigrationsFoundError struct {
Path string
}
func (e NoMigrationsFoundError) Error() string {
return fmt.Sprintf("No migrations found at %s", e.Path)
}
type MigrationPgError struct {
Sql string
Error error
}
type Migration struct {
Sequence int32
Name string
UpSQL string
DownSQL string
}
type MigratorOptions struct {
// DisableTx causes the Migrator not to run migrations in a transaction.
DisableTx bool
// MigratorFS is the interface used for collecting the migrations.
MigratorFS MigratorFS
}
type Migrator struct {
db *sql.DB
versionTable string
options *MigratorOptions
Migrations []*Migration
OnStart func(int32, string, string, string) // OnStart is called when a migration is run with the sequence, name, direction, and SQL
Data map[string]interface{} // Data available to use in migrations
}
func NewMigrator(db *sql.DB, versionTable string) (m *Migrator, err error) {
return NewMigratorEx(db, versionTable, &MigratorOptions{MigratorFS: defaultMigratorFS{}})
}
func NewMigratorEx(db *sql.DB, versionTable string, opts *MigratorOptions) (m *Migrator, err error) {
m = &Migrator{db: db, versionTable: versionTable, options: opts}
err = m.ensureSchemaVersionTableExists()
m.Migrations = make([]*Migration, 0)
m.Data = make(map[string]interface{})
return
}
type MigratorFS interface {
ReadDir(dirname string) ([]os.FileInfo, error)
ReadFile(filename string) ([]byte, error)
Glob(pattern string) (matches []string, err error)
}
type defaultMigratorFS struct{}
func (defaultMigratorFS) ReadDir(dirname string) ([]os.FileInfo, error) {
return ioutil.ReadDir(dirname)
}
func (defaultMigratorFS) ReadFile(filename string) ([]byte, error) {
return ioutil.ReadFile(filename)
}
func (defaultMigratorFS) Glob(pattern string) ([]string, error) {
return filepath.Glob(pattern)
}
func FindMigrationsEx(path string, fs MigratorFS) ([]string, error) {
path = strings.TrimRight(path, string(filepath.Separator))
fileInfos, err := fs.ReadDir(path)
if err != nil {
return nil, err
}
paths := make([]string, 0, len(fileInfos))
for _, fi := range fileInfos {
if fi.IsDir() {
continue
}
matches := migrationPattern.FindStringSubmatch(fi.Name())
if len(matches) != 2 {
continue
}
n, err := strconv.ParseInt(matches[1], 10, 32)
if err != nil {
// The regexp already validated that the prefix is all digits so this *should* never fail
return nil, err
}
mcount := len(paths)
if n < int64(mcount) {
return nil, fmt.Errorf("Duplicate migration %d", n)
}
if int64(mcount) < n {
return nil, fmt.Errorf("Missing migration %d", mcount)
}
paths = append(paths, filepath.Join(path, fi.Name()))
}
return paths, nil
}
func FindMigrations(path string) ([]string, error) {
return FindMigrationsEx(path, defaultMigratorFS{})
}
func (m *Migrator) LoadMigrations(path string) error {
path = strings.TrimRight(path, string(filepath.Separator))
mainTmpl := template.New("main")
sharedPaths, err := m.options.MigratorFS.Glob(filepath.Join(path, "*", "*.sql"))
if err != nil {
return err
}
for _, p := range sharedPaths {
body, err := m.options.MigratorFS.ReadFile(p)
if err != nil {
return err
}
name := strings.Replace(p, path+string(filepath.Separator), "", 1)
_, err = mainTmpl.New(name).Parse(string(body))
if err != nil {
return err
}
}
paths, err := FindMigrationsEx(path, m.options.MigratorFS)
if err != nil {
return err
}
if len(paths) == 0 {
return NoMigrationsFoundError{Path: path}
}
for _, p := range paths {
body, err := m.options.MigratorFS.ReadFile(p)
if err != nil {
return err
}
pieces := strings.SplitN(string(body), "---- create above / drop below ----", 2)
var upSQL, downSQL string
upSQL = strings.TrimSpace(pieces[0])
upSQL, err = m.evalMigration(mainTmpl.New(filepath.Base(p)+" up"), upSQL)
if err != nil {
return err
}
// Make sure there is SQL in the forward migration step.
containsSQL := false
for _, v := range strings.Split(upSQL, "\n") {
// Only account for regular single line comment, empty line and space/comment combination
cleanString := strings.TrimSpace(v)
if len(cleanString) != 0 &&
!strings.HasPrefix(cleanString, "--") {
containsSQL = true
break
}
}
if !containsSQL {
return ErrNoFwMigration
}
if len(pieces) == 2 {
downSQL = strings.TrimSpace(pieces[1])
downSQL, err = m.evalMigration(mainTmpl.New(filepath.Base(p)+" down"), downSQL)
if err != nil {
return err
}
}
m.AppendMigration(filepath.Base(p), upSQL, downSQL)
}
return nil
}
func (m *Migrator) evalMigration(tmpl *template.Template, sql string) (string, error) {
tmpl, err := tmpl.Parse(sql)
if err != nil {
return "", err
}
var buf bytes.Buffer
err = tmpl.Execute(&buf, m.Data)
if err != nil {
return "", err
}
return buf.String(), nil
}
func (m *Migrator) AppendMigration(name, upSQL, downSQL string) {
m.Migrations = append(
m.Migrations,
&Migration{
Sequence: int32(len(m.Migrations)) + 1,
Name: name,
UpSQL: upSQL,
DownSQL: downSQL,
})
}
// Migrate runs pending migrations
// It calls m.OnStart when it begins a migration
func (m *Migrator) Migrate() error {
return m.MigrateTo(int32(len(m.Migrations)))
}
// MigrateTo migrates to targetVersion
func (m *Migrator) MigrateTo(targetVersion int32) (err error) {
// Lock to ensure multiple migrations cannot occur simultaneously
lockNum := int64(9628173550095224) // arbitrary random number
if _, lockErr := m.db.Exec("select pg_try_advisory_lock($1)", lockNum); lockErr != nil {
return lockErr
}
defer func() {
_, unlockErr := m.db.Exec("select pg_advisory_unlock($1)", lockNum)
if err == nil && unlockErr != nil {
err = unlockErr
}
}()
currentVersion, err := m.GetCurrentVersion()
if err != nil {
return err
}
if targetVersion < 0 || int32(len(m.Migrations)) < targetVersion {
errMsg := fmt.Sprintf("destination version %d is outside the valid versions of 0 to %d", targetVersion, len(m.Migrations))
return BadVersionError(errMsg)
}
if currentVersion < 0 || int32(len(m.Migrations)) < currentVersion {
errMsg := fmt.Sprintf("current version %d is outside the valid versions of 0 to %d", currentVersion, len(m.Migrations))
return BadVersionError(errMsg)
}
var direction int32
if currentVersion < targetVersion {
direction = 1
} else {
direction = -1
}
for currentVersion != targetVersion {
var current *Migration
var sql, directionName string
var sequence int32
if direction == 1 {
current = m.Migrations[currentVersion]
sequence = current.Sequence
sql = current.UpSQL
directionName = "up"
} else {
current = m.Migrations[currentVersion-1]
sequence = current.Sequence - 1
sql = current.DownSQL
directionName = "down"
if current.DownSQL == "" {
return IrreversibleMigrationError{m: current}
}
}
ctx := context.Background()
tx, err := m.db.BeginTx(ctx, nil)
if err != nil {
return err
}
defer tx.Rollback() //nolint: errcheck
// Fire on start callback
if m.OnStart != nil {
m.OnStart(current.Sequence, current.Name, directionName, sql)
}
// Execute the migration
_, err = tx.Exec(sql)
if err != nil {
// if err, ok := err.(pgx.PgError); ok {
// return MigrationPgError{Sql: sql, PgError: err}
// }
return err
}
// Reset all database connection settings. Important to do before updating version as search_path may have been changed.
// if _, err := tx.Exec(ctx, "reset all"); err != nil {
// return err
// }
// Add one to the version
_, err = tx.Exec("update "+m.versionTable+" set version=$1", sequence)
if err != nil {
return err
}
err = tx.Commit()
if err != nil {
return err
}
currentVersion = currentVersion + direction
}
return nil
}
func (m *Migrator) GetCurrentVersion() (v int32, err error) {
err = m.db.QueryRow("select version from " + m.versionTable).Scan(&v)
return v, err
}
func (m *Migrator) ensureSchemaVersionTableExists() (err error) {
_, err = m.db.Exec(fmt.Sprintf(`
create table if not exists %s(version int4 not null);
insert into %s(version)
select 0
where 0=(select count(*) from %s);
`, m.versionTable, m.versionTable, m.versionTable))
return err
}

View File

@ -0,0 +1,352 @@
package migrate_test
/*
import (
. "gopkg.in/check.v1"
)
type MigrateSuite struct {
conn *pgx.Conn
}
func Test(t *testing.T) { TestingT(t) }
var _ = Suite(&MigrateSuite{})
var versionTable string = "schema_version_non_default"
func (s *MigrateSuite) SetUpTest(c *C) {
var err error
s.conn, err = pgx.Connect(*defaultConnectionParameters)
c.Assert(err, IsNil)
s.cleanupSampleMigrator(c)
}
func (s *MigrateSuite) currentVersion(c *C) int32 {
var n int32
err := s.conn.QueryRow("select version from " + versionTable).Scan(&n)
c.Assert(err, IsNil)
return n
}
func (s *MigrateSuite) Exec(c *C, sql string, arguments ...interface{}) pgx.CommandTag {
commandTag, err := s.conn.Exec(sql, arguments...)
c.Assert(err, IsNil)
return commandTag
}
func (s *MigrateSuite) tableExists(c *C, tableName string) bool {
var exists bool
err := s.conn.QueryRow(
"select exists(select 1 from information_schema.tables where table_catalog=$1 and table_name=$2)",
defaultConnectionParameters.Database,
tableName,
).Scan(&exists)
c.Assert(err, IsNil)
return exists
}
func (s *MigrateSuite) createEmptyMigrator(c *C) *migrate.Migrator {
var err error
m, err := migrate.NewMigrator(s.conn, versionTable)
c.Assert(err, IsNil)
return m
}
func (s *MigrateSuite) createSampleMigrator(c *C) *migrate.Migrator {
m := s.createEmptyMigrator(c)
m.AppendMigration("Create t1", "create table t1(id serial);", "drop table t1;")
m.AppendMigration("Create t2", "create table t2(id serial);", "drop table t2;")
m.AppendMigration("Create t3", "create table t3(id serial);", "drop table t3;")
return m
}
func (s *MigrateSuite) cleanupSampleMigrator(c *C) {
tables := []string{versionTable, "t1", "t2", "t3"}
for _, table := range tables {
s.Exec(c, "drop table if exists "+table)
}
}
func (s *MigrateSuite) TestNewMigrator(c *C) {
var m *migrate.Migrator
var err error
// Initial run
m, err = migrate.NewMigrator(s.conn, versionTable)
c.Assert(err, IsNil)
// Creates version table
schemaVersionExists := s.tableExists(c, versionTable)
c.Assert(schemaVersionExists, Equals, true)
// Succeeds when version table is already created
m, err = migrate.NewMigrator(s.conn, versionTable)
c.Assert(err, IsNil)
initialVersion, err := m.GetCurrentVersion()
c.Assert(err, IsNil)
c.Assert(initialVersion, Equals, int32(0))
}
func (s *MigrateSuite) TestAppendMigration(c *C) {
m := s.createEmptyMigrator(c)
name := "Create t"
upSQL := "create t..."
downSQL := "drop t..."
m.AppendMigration(name, upSQL, downSQL)
c.Assert(len(m.Migrations), Equals, 1)
c.Assert(m.Migrations[0].Name, Equals, name)
c.Assert(m.Migrations[0].UpSQL, Equals, upSQL)
c.Assert(m.Migrations[0].DownSQL, Equals, downSQL)
}
func (s *MigrateSuite) TestLoadMigrationsMissingDirectory(c *C) {
m := s.createEmptyMigrator(c)
err := m.LoadMigrations("testdata/missing")
c.Assert(err, ErrorMatches, "open testdata/missing: no such file or directory")
}
func (s *MigrateSuite) TestLoadMigrationsEmptyDirectory(c *C) {
m := s.createEmptyMigrator(c)
err := m.LoadMigrations("testdata/empty")
c.Assert(err, ErrorMatches, "No migrations found at testdata/empty")
}
func (s *MigrateSuite) TestFindMigrationsWithGaps(c *C) {
_, err := migrate.FindMigrations("testdata/gap")
c.Assert(err, ErrorMatches, "Missing migration 2")
}
func (s *MigrateSuite) TestFindMigrationsWithDuplicate(c *C) {
_, err := migrate.FindMigrations("testdata/duplicate")
c.Assert(err, ErrorMatches, "Duplicate migration 2")
}
func (s *MigrateSuite) TestLoadMigrations(c *C) {
m := s.createEmptyMigrator(c)
m.Data = map[string]interface{}{"prefix": "foo"}
err := m.LoadMigrations("testdata/sample")
c.Assert(err, IsNil)
c.Assert(m.Migrations, HasLen, 5)
c.Check(m.Migrations[0].Name, Equals, "001_create_t1.sql")
c.Check(m.Migrations[0].UpSQL, Equals, `create table t1(
id serial primary key
);`)
c.Check(m.Migrations[0].DownSQL, Equals, "drop table t1;")
c.Check(m.Migrations[1].Name, Equals, "002_create_t2.sql")
c.Check(m.Migrations[1].UpSQL, Equals, `create table t2(
id serial primary key
);`)
c.Check(m.Migrations[1].DownSQL, Equals, "drop table t2;")
c.Check(m.Migrations[2].Name, Equals, "003_irreversible.sql")
c.Check(m.Migrations[2].UpSQL, Equals, "drop table t2;")
c.Check(m.Migrations[2].DownSQL, Equals, "")
c.Check(m.Migrations[3].Name, Equals, "004_data_interpolation.sql")
c.Check(m.Migrations[3].UpSQL, Equals, "create table foo_bar(id serial primary key);")
c.Check(m.Migrations[3].DownSQL, Equals, "drop table foo_bar;")
}
func (s *MigrateSuite) TestLoadMigrationsNoForward(c *C) {
var err error
m, err := migrate.NewMigrator(s.conn, versionTable)
c.Assert(err, IsNil)
m.Data = map[string]interface{}{"prefix": "foo"}
err = m.LoadMigrations("testdata/noforward")
c.Assert(err, Equals, migrate.ErrNoFwMigration)
}
func (s *MigrateSuite) TestMigrate(c *C) {
m := s.createSampleMigrator(c)
err := m.Migrate()
c.Assert(err, IsNil)
currentVersion := s.currentVersion(c)
c.Assert(currentVersion, Equals, int32(3))
}
func (s *MigrateSuite) TestMigrateToLifeCycle(c *C) {
m := s.createSampleMigrator(c)
var onStartCallUpCount int
var onStartCallDownCount int
m.OnStart = func(_ int32, _, direction, _ string) {
switch direction {
case "up":
onStartCallUpCount++
case "down":
onStartCallDownCount++
default:
c.Fatalf("Unexpected direction: %s", direction)
}
}
// Migrate from 0 up to 1
err := m.MigrateTo(1)
c.Assert(err, IsNil)
currentVersion := s.currentVersion(c)
c.Assert(currentVersion, Equals, int32(1))
c.Assert(s.tableExists(c, "t1"), Equals, true)
c.Assert(s.tableExists(c, "t2"), Equals, false)
c.Assert(s.tableExists(c, "t3"), Equals, false)
c.Assert(onStartCallUpCount, Equals, 1)
c.Assert(onStartCallDownCount, Equals, 0)
// Migrate from 1 up to 3
err = m.MigrateTo(3)
c.Assert(err, IsNil)
currentVersion = s.currentVersion(c)
c.Assert(currentVersion, Equals, int32(3))
c.Assert(s.tableExists(c, "t1"), Equals, true)
c.Assert(s.tableExists(c, "t2"), Equals, true)
c.Assert(s.tableExists(c, "t3"), Equals, true)
c.Assert(onStartCallUpCount, Equals, 3)
c.Assert(onStartCallDownCount, Equals, 0)
// Migrate from 3 to 3 is no-op
err = m.MigrateTo(3)
c.Assert(err, IsNil)
currentVersion = s.currentVersion(c)
c.Assert(s.tableExists(c, "t1"), Equals, true)
c.Assert(s.tableExists(c, "t2"), Equals, true)
c.Assert(s.tableExists(c, "t3"), Equals, true)
c.Assert(onStartCallUpCount, Equals, 3)
c.Assert(onStartCallDownCount, Equals, 0)
// Migrate from 3 down to 1
err = m.MigrateTo(1)
c.Assert(err, IsNil)
currentVersion = s.currentVersion(c)
c.Assert(currentVersion, Equals, int32(1))
c.Assert(s.tableExists(c, "t1"), Equals, true)
c.Assert(s.tableExists(c, "t2"), Equals, false)
c.Assert(s.tableExists(c, "t3"), Equals, false)
c.Assert(onStartCallUpCount, Equals, 3)
c.Assert(onStartCallDownCount, Equals, 2)
// Migrate from 1 down to 0
err = m.MigrateTo(0)
c.Assert(err, IsNil)
currentVersion = s.currentVersion(c)
c.Assert(currentVersion, Equals, int32(0))
c.Assert(s.tableExists(c, "t1"), Equals, false)
c.Assert(s.tableExists(c, "t2"), Equals, false)
c.Assert(s.tableExists(c, "t3"), Equals, false)
c.Assert(onStartCallUpCount, Equals, 3)
c.Assert(onStartCallDownCount, Equals, 3)
// Migrate back up to 3
err = m.MigrateTo(3)
c.Assert(err, IsNil)
currentVersion = s.currentVersion(c)
c.Assert(currentVersion, Equals, int32(3))
c.Assert(s.tableExists(c, "t1"), Equals, true)
c.Assert(s.tableExists(c, "t2"), Equals, true)
c.Assert(s.tableExists(c, "t3"), Equals, true)
c.Assert(onStartCallUpCount, Equals, 6)
c.Assert(onStartCallDownCount, Equals, 3)
}
func (s *MigrateSuite) TestMigrateToBoundaries(c *C) {
m := s.createSampleMigrator(c)
// Migrate to -1 is error
err := m.MigrateTo(-1)
c.Assert(err, ErrorMatches, "destination version -1 is outside the valid versions of 0 to 3")
// Migrate past end is error
err = m.MigrateTo(int32(len(m.Migrations)) + 1)
c.Assert(err, ErrorMatches, "destination version 4 is outside the valid versions of 0 to 3")
// When schema version says it is negative
s.Exec(c, "update "+versionTable+" set version=-1")
err = m.MigrateTo(int32(1))
c.Assert(err, ErrorMatches, "current version -1 is outside the valid versions of 0 to 3")
// When schema version says it is negative
s.Exec(c, "update "+versionTable+" set version=4")
err = m.MigrateTo(int32(1))
c.Assert(err, ErrorMatches, "current version 4 is outside the valid versions of 0 to 3")
}
func (s *MigrateSuite) TestMigrateToIrreversible(c *C) {
m := s.createEmptyMigrator(c)
m.AppendMigration("Foo", "drop table if exists t3", "")
err := m.MigrateTo(1)
c.Assert(err, IsNil)
err = m.MigrateTo(0)
c.Assert(err, ErrorMatches, "Irreversible migration: 1 - Foo")
}
func (s *MigrateSuite) TestMigrateToDisableTx(c *C) {
m, err := migrate.NewMigratorEx(s.conn, versionTable, &migrate.MigratorOptions{DisableTx: true})
c.Assert(err, IsNil)
m.AppendMigration("Create t1", "create table t1(id serial);", "drop table t1;")
m.AppendMigration("Create t2", "create table t2(id serial);", "drop table t2;")
m.AppendMigration("Create t3", "create table t3(id serial);", "drop table t3;")
tx, err := s.conn.Begin()
c.Assert(err, IsNil)
err = m.MigrateTo(3)
c.Assert(err, IsNil)
currentVersion := s.currentVersion(c)
c.Assert(currentVersion, Equals, int32(3))
c.Assert(s.tableExists(c, "t1"), Equals, true)
c.Assert(s.tableExists(c, "t2"), Equals, true)
c.Assert(s.tableExists(c, "t3"), Equals, true)
err = tx.Rollback()
c.Assert(err, IsNil)
currentVersion = s.currentVersion(c)
c.Assert(currentVersion, Equals, int32(0))
c.Assert(s.tableExists(c, "t1"), Equals, false)
c.Assert(s.tableExists(c, "t2"), Equals, false)
c.Assert(s.tableExists(c, "t3"), Equals, false)
}
func Example_OnStartMigrationProgressLogging() {
conn, err := pgx.Connect(*defaultConnectionParameters)
if err != nil {
fmt.Printf("Unable to establish connection: %v", err)
return
}
// Clear any previous runs
if _, err = conn.Exec("drop table if exists schema_version"); err != nil {
fmt.Printf("Unable to drop schema_version table: %v", err)
return
}
var m *migrate.Migrator
m, err = migrate.NewMigrator(conn, "schema_version")
if err != nil {
fmt.Printf("Unable to create migrator: %v", err)
return
}
m.OnStart = func(_ int32, name, direction, _ string) {
fmt.Printf("Migrating %s: %s", direction, name)
}
m.AppendMigration("create a table", "create temporary table foo(id serial primary key)", "")
if err = m.Migrate(); err != nil {
fmt.Printf("Unexpected failure migrating: %v", err)
return
}
// Output:
// Migrating up: create a table
}
*/

View File

@ -0,0 +1,7 @@
create table t1(
id serial primary key
);
---- create above / drop below ----
drop table t1;

View File

@ -0,0 +1,7 @@
create table t2(
id serial primary key
);
---- create above / drop below ----
drop table t2;

View File

@ -0,0 +1,7 @@
create table duplicate(
id serial primary key
);
---- create above / drop below ----
drop table duplicate;

View File

@ -0,0 +1,7 @@
create table t1(
id serial primary key
);
---- create above / drop below ----
drop table t1;

View File

@ -0,0 +1 @@
drop table t2;

View File

@ -0,0 +1,7 @@
-- no SQL here
-- nor here, just all comments.
-- comment with space before
---- create above / drop below ----
drop table t1;

View File

@ -0,0 +1,7 @@
create table t1(
id serial primary key
);
---- create above / drop below ----
drop table t1;

View File

@ -0,0 +1,7 @@
create table t2(
id serial primary key
);
---- create above / drop below ----
drop table t2;

View File

@ -0,0 +1 @@
drop table t2;

View File

@ -0,0 +1,5 @@
create table {{.prefix}}_bar(id serial primary key);
---- create above / drop below ----
drop table {{.prefix}}_bar;

View File

@ -0,0 +1,5 @@
{{ template "shared/v1_001.sql" . }}
---- create above / drop below ----
drop view {{.prefix}}v1;

View File

@ -0,0 +1 @@
create view {{.prefix}}v1 as select * from t1;

View File

@ -0,0 +1 @@
-- This file should be ignored because it does not start with a number.

View File

@ -0,0 +1,175 @@
package rails
import (
"encoding/json"
"errors"
"fmt"
"strconv"
"strings"
"github.com/adjust/gorails/marshal"
)
const (
salt = "encrypted cookie"
signSalt = "signed encrypted cookie"
authSalt = "authenticated encrypted cookie"
railsCipher = "aes-256-cbc"
railsCipher52 = "aes-256-gcm"
)
var (
errSessionData = errors.New("error decoding session data")
)
type Auth struct {
Cipher string
Secret string
Salt string
SignSalt string
AuthSalt string
}
func NewAuth(version, secret string) (*Auth, error) {
ra := &Auth{
Secret: secret,
Salt: salt,
SignSalt: signSalt,
AuthSalt: authSalt,
}
var v1, v2 int
var err error
sv := strings.Split(version, ".")
if len(sv) >= 2 {
if v1, err = strconv.Atoi(sv[0]); err != nil {
return nil, err
}
if v2, err = strconv.Atoi(sv[1]); err != nil {
return nil, err
}
}
if v1 >= 5 && v2 >= 2 {
ra.Cipher = railsCipher52
} else {
ra.Cipher = railsCipher
}
return ra, nil
}
func (ra *Auth) ParseCookie(cookie string) (userID string, err error) {
var dcookie []byte
switch ra.Cipher {
case railsCipher:
dcookie, err = parseCookie(cookie, ra.Secret, ra.Salt, ra.SignSalt)
case railsCipher52:
dcookie, err = parseCookie52(cookie, ra.Secret, ra.AuthSalt)
default:
err = fmt.Errorf("unknown rails cookie cipher '%s'", ra.Cipher)
}
if err != nil {
return
}
if dcookie[0] != '{' {
userID, err = getUserId4(dcookie)
} else {
userID, err = getUserId(dcookie)
}
return
}
func ParseCookie(cookie string) (string, error) {
if cookie[0] != '{' {
return getUserId4([]byte(cookie))
}
return getUserId([]byte(cookie))
}
func getUserId(data []byte) (userID string, err error) {
var sessionData map[string]interface{}
err = json.Unmarshal(data, &sessionData)
if err != nil {
return
}
userKey, ok := sessionData["warden.user.user.key"]
if !ok {
err = errors.New("key 'warden.user.user.key' not found in session data")
}
items, ok := userKey.([]interface{})
if !ok {
err = errSessionData
return
}
if len(items) != 2 {
err = errSessionData
return
}
uids, ok := items[0].([]interface{})
if !ok {
err = errSessionData
return
}
uid, ok := uids[0].(float64)
if !ok {
err = errSessionData
return
}
userID = fmt.Sprintf("%d", int64(uid))
return
}
func getUserId4(data []byte) (userID string, err error) {
sessionData, err := marshal.CreateMarshalledObject(data).GetAsMap()
if err != nil {
return
}
wardenData, ok := sessionData["warden.user.user.key"]
if !ok {
err = errSessionData
return
}
wardenUserKey, err := wardenData.GetAsArray()
if err != nil {
return
}
if len(wardenUserKey) < 1 {
err = errSessionData
return
}
userData, err := wardenUserKey[0].GetAsArray()
if err != nil {
return
}
if len(userData) < 1 {
err = errSessionData
return
}
uid, err := userData[0].GetAsInteger()
if err != nil {
return
}
userID = fmt.Sprintf("%d", uid)
return
}

View File

@ -0,0 +1,79 @@
package rails
import (
"testing"
)
func TestRailsEncryptedSession1(t *testing.T) {
cookie := "dDdjMW5jYUNYaFpBT1BSdFgwQkk4ZWNlT214L1FnM0pyZzZ1d21nSnVTTm9zS0ljN000S1JmT3cxcTNtRld2Ny0tQUFBQUFBQUFBQUFBQUFBQUFBQUFBQT09--75d8323b0f0e41cf4d5aabee1b229b1be76b83b6"
secret := "development_secret"
ra := Auth{
Cipher: railsCipher,
Secret: secret,
Salt: salt,
SignSalt: signSalt,
}
userID, err := ra.ParseCookie(cookie)
if err != nil {
t.Error(err)
return
}
if userID != "1" {
t.Errorf("Expecting userID 1 got %s", userID)
}
}
func TestRailsEncryptedSession52(t *testing.T) {
cookie :=
"fZy1lt%2FIuXh2cpQgy3wWjbvabh1AqJX%2Bt6qO4D95DOZIpDhMyK2HqPFeNoaBtrXCUa9%2BDQuvbs1GX6tuccEAp14QPLNhm0PPJS5U1pRHqPLWaqT%2BBPYP%2BY9bo677komm9CPuOCOqBKf7rv3%2F4ptLmVO7iefB%2FP2ZlkV1848Johv5q%2B5PGyMxII2BEQnBdS3Petw6lRu741Bquc8z9VofC3t4%2F%2BLxVz%2BvBbTg--VL0MorYITXB8Dj3W--0yr0sr6pRU%2FwlYMQ%2BpEifA%3D%3D"
secret := "0a248500a64c01184edb4d7ad3a805488f8097ac761b76aaa6c17c01dcb7af03a2f18ba61b2868134b9c7b79a122bc0dadff4367414a2d173297bfea92be5566"
ra := Auth{
Cipher: railsCipher52,
Secret: secret,
AuthSalt: authSalt,
}
userID, err := ra.ParseCookie(cookie)
if err != nil {
t.Error(err)
return
}
if userID != "2" {
t.Errorf("Expecting userID 2 got %s", userID)
}
}
func TestRailsJsonSession(t *testing.T) {
sessionData := `{"warden.user.user.key":[[1],"secret"]}`
userID, err := getUserId([]byte(sessionData))
if err != nil {
t.Error(err)
return
}
if userID != "1" {
t.Errorf("Expecting userID 1 got %s", userID)
}
}
func TestRailsMarshaledSession(t *testing.T) {
sessionData := "\x04\b{\bI\"\x15member_return_to\x06:\x06ETI\"\x06/\x06;\x00TI\"\x19warden.user.user.key\x06;\x00T[\a[\x06i\aI\"\"$2a$11$6SgXdvO9hld82kQAvpEY3e\x06;\x00TI\"\x10_csrf_token\x06;\x00FI\"17lqwj1UsTTgbXBQKH4ipCNW32uLusvfSPds1txppMec=\x06;\x00F"
userID, err := getUserId4([]byte(sessionData))
if err != nil {
t.Error(err)
return
}
if userID != "2" {
t.Errorf("Expecting userID 2 got %s", userID)
}
}

View File

@ -0,0 +1,62 @@
package rails
import (
"crypto/aes"
"crypto/cipher"
"crypto/sha1"
"encoding/base64"
"net/url"
"strings"
"github.com/adjust/gorails/session"
"golang.org/x/crypto/pbkdf2"
)
func parseCookie(cookie, secretKeyBase, salt, signSalt string) ([]byte, error) {
return session.DecryptSignedCookie(
cookie,
secretKeyBase,
salt,
signSalt)
}
// {"session_id":"a71d6ffcd4ed5572ea2097f569eb95ef","warden.user.user.key":[[2],"$2a$11$q9Br7m4wJxQvF11hAHvTZO"],"_csrf_token":"HsYgrD2YBaWAabOYceN0hluNRnGuz49XiplmMPt43aY="}
func parseCookie52(cookie, secretKeyBase, authSalt string) ([]byte, error) {
ecookie, err := url.QueryUnescape(cookie)
if err != nil {
return nil, err
}
vectors := strings.Split(ecookie, "--")
body, err := base64.RawStdEncoding.DecodeString(vectors[0])
if err != nil {
return nil, err
}
iv, err := base64.RawStdEncoding.DecodeString(vectors[1])
if err != nil {
return nil, err
}
tag, err := base64.StdEncoding.DecodeString(vectors[2])
if err != nil {
return nil, err
}
key := pbkdf2.Key([]byte(secretKeyBase), []byte(authSalt),
1000, 32, sha1.New)
c, err := aes.NewCipher(key)
if err != nil {
return nil, err
}
gcm, err := cipher.NewGCM(c)
if err != nil {
return nil, err
}
return gcm.Open(nil, iv, append(body, tag...), nil)
}

View File

@ -0,0 +1,37 @@
package serv
import "net/http"
//nolint: errcheck
func introspect(w http.ResponseWriter) {
w.Header().Set("Content-Type", "application/json")
w.Write([]byte(`{
"data": {
"__schema": {
"queryType": {
"name": "Query"
},
"mutationType": null,
"subscriptionType": null
}
},
"extensions":{
"tracing":{
"version":1,
"startTime":"2019-06-04T19:53:31.093Z",
"endTime":"2019-06-04T19:53:31.108Z",
"duration":15219720,
"execution": {
"resolvers": [{
"path": ["__schema"],
"parentType": "Query",
"fieldName": "__schema",
"returnType": "__Schema!",
"startOffset": 50950,
"duration": 17187
}]
}
}
}
}`))
}

206
cmd/internal/serv/reload.go Normal file
View File

@ -0,0 +1,206 @@
// Package reload offers lightweight automatic reloading of running processes.
//
// After initialisation with reload.Do() any changes to the binary will
// restart the process.
//
// Example:
//
// go func() {
// err := reload.Do(log.Printf)
// if err != nil {
// panic(err)
// }
// }()
//
// A list of additional directories to watch can be added:
//
// go func() {
// err := reload.Do(log.Printf, reload.Dir("tpl", reloadTpl)
// if err != nil {
// panic(err)
// }
// }()
//
// Note that this package won't prevent race conditions (e.g. when assigning to
// a global templates variable). You'll need to use sync.RWMutex yourself.
package serv
import (
"fmt"
"os"
"path/filepath"
"runtime"
"strings"
"syscall"
"time"
"github.com/fsnotify/fsnotify"
"github.com/pkg/errors"
)
var binSelf string
type dir struct {
path string
cb func()
}
// Dir is an additional directory to watch for changes. Directories are watched
// non-recursively.
//
// The second argument is the callback that to run when the directory changes.
// Use reload.ReExec() to restart the process.
func Dir(path string, cb func()) dir { return dir{path, cb} } // nolint: golint
// Do reload the current process when its binary changes.
//
// The log function is used to display an informational startup message and
// errors. It works well with e.g. the standard log package or Logrus.
//
// The error return will only return initialisation errors. Once initialized it
// will use the log function to print errors, rather than return.
func Do(log func(string, ...interface{}), additional ...dir) error {
watcher, err := fsnotify.NewWatcher()
if err != nil {
return errors.Wrap(err, "cannot setup watcher")
}
defer watcher.Close() // nolint: errcheck
binSelf, err = self()
if err != nil {
return err
}
// Watch the directory, because a recompile renames the existing
// file (rather than rewriting it), so we won't get events for that.
dirs := make([]string, len(additional)+1)
dirs[0] = filepath.Dir(binSelf)
for i := range additional {
path, err := filepath.Abs(additional[i].path)
if err != nil {
return errors.Wrapf(err, "cannot get absolute path to %q: %v",
additional[i].path, err)
}
s, err := os.Stat(path)
if err != nil {
return errors.Wrap(err, "os.Stat")
}
if !s.IsDir() {
return errors.Errorf("not a directory: %q; can only watch directories",
additional[i].path)
}
additional[i].path = path
dirs[i+1] = path
}
done := make(chan bool)
go func() {
for {
select {
case err := <-watcher.Errors:
// Standard logger doesn't have anything other than Print,
// Panic, and Fatal :-/ Printf() is probably best.
log("reload error: %v", err)
case event := <-watcher.Events:
// Ensure that we use the correct events, as they are not uniform across
// platforms. See https://github.com/fsnotify/fsnotify/issues/74
if conf != nil && strings.HasSuffix(event.Name, "/allow.list") {
continue
}
if conf.Production {
continue
}
log("INF Reloading, file changed detected: %s", event)
var trigger bool
switch runtime.GOOS {
case "darwin", "freebsd", "openbsd", "netbsd", "dragonfly":
trigger = event.Op&fsnotify.Create == fsnotify.Create
case "linux":
trigger = event.Op&fsnotify.Write == fsnotify.Write
default:
trigger = event.Op&fsnotify.Create == fsnotify.Create
log("reload: untested GOOS %q; this package may not work correctly", runtime.GOOS)
}
if !trigger {
continue
}
if event.Name == binSelf {
// Wait for writes to finish.
time.Sleep(100 * time.Millisecond)
ReExec()
}
for _, a := range additional {
if strings.HasPrefix(event.Name, a.path) {
time.Sleep(100 * time.Millisecond)
a.cb()
}
}
}
}
}()
for _, d := range dirs {
if err := watcher.Add(d); err != nil {
return errors.Wrapf(err, "cannot add %q to watcher", d)
}
}
add := ""
if len(additional) > 0 {
reldirs := make([]string, len(dirs)-1)
for i := range dirs[1:] {
reldirs[i] = relpath(dirs[i+1])
}
add = fmt.Sprintf(" (additional dirs: %s)", strings.Join(reldirs, ", "))
}
log("restarting %q when it changes%s", relpath(binSelf), add)
<-done
return nil
}
// Exec replaces the current process with a new copy of itself.
func ReExec() {
err := syscall.Exec(binSelf, append([]string{binSelf}, os.Args[1:]...), os.Environ())
if err != nil {
log.Fatalf("ERR cannot restart: %s", err)
}
}
// Get location to executable.
func self() (string, error) {
bin := os.Args[0]
if !filepath.IsAbs(bin) {
var err error
bin, err = os.Executable()
if err != nil {
return "", errors.Wrapf(err,
"cannot get path to binary %q (launch with absolute path): %v",
os.Args[0], err)
}
}
return bin, nil
}
// Get path relative to cwd
func relpath(p string) string {
cwd, err := os.Getwd()
if err != nil {
return p
}
if strings.HasPrefix(p, cwd) {
return "./" + strings.TrimLeft(p[len(cwd):], "/")
}
return p
}

File diff suppressed because one or more lines are too long

180
cmd/internal/serv/serv.go Normal file
View File

@ -0,0 +1,180 @@
package serv
import (
"context"
"fmt"
"net/http"
"os"
"os/signal"
"strings"
"time"
rice "github.com/GeertJohan/go.rice"
"github.com/NYTimes/gziphandler"
"github.com/dosco/super-graph/cmd/internal/serv/internal/auth"
"github.com/dosco/super-graph/config"
)
func initWatcher() {
cpath := conf.ConfigPathUsed()
if conf != nil && !conf.WatchAndReload {
return
}
var d dir
if len(cpath) == 0 || cpath == "./" {
d = Dir("./config", ReExec)
} else {
d = Dir(cpath, ReExec)
}
go func() {
err := Do(log.Printf, d)
if err != nil {
log.Fatalf("ERR %s", err)
}
}()
}
func startHTTP() {
var hostPort string
var appName string
defaultHP := "0.0.0.0:8080"
env := os.Getenv("GO_ENV")
if conf != nil {
appName = conf.AppName
hp := strings.SplitN(conf.HostPort, ":", 2)
if len(hp) == 2 {
if len(conf.Host) != 0 {
hp[0] = conf.Host
}
if len(conf.Port) != 0 {
hp[1] = conf.Port
}
hostPort = fmt.Sprintf("%s:%s", hp[0], hp[1])
}
}
if len(hostPort) == 0 {
hostPort = defaultHP
}
routes, err := routeHandler()
if err != nil {
log.Fatalf("ERR %s", err)
}
srv := &http.Server{
Addr: hostPort,
Handler: routes,
ReadTimeout: 5 * time.Second,
WriteTimeout: 10 * time.Second,
MaxHeaderBytes: 1 << 20,
}
idleConnsClosed := make(chan struct{})
go func() {
sigint := make(chan os.Signal, 1)
signal.Notify(sigint, os.Interrupt)
<-sigint
if err := srv.Shutdown(context.Background()); err != nil {
log.Fatalln("INF shutdown signal received")
}
close(idleConnsClosed)
}()
srv.RegisterOnShutdown(func() {
db.Close()
})
log.Printf("INF version: %s, git-branch: %s, host-port: %s, app-name: %s, env: %s\n",
version, gitBranch, hostPort, appName, env)
log.Printf("INF %s started\n", serverName)
if err := srv.ListenAndServe(); err != http.ErrServerClosed {
log.Fatalln("INF server closed")
}
<-idleConnsClosed
}
func routeHandler() (http.Handler, error) {
mux := http.NewServeMux()
if conf == nil {
return mux, nil
}
routes := map[string]http.Handler{
"/health": http.HandlerFunc(health),
"/api/v1/graphql": apiV1Handler(),
}
if err := setActionRoutes(routes); err != nil {
return nil, err
}
if conf.WebUI {
routes["/"] = http.FileServer(rice.MustFindBox("./web/build").HTTPBox())
}
if conf.HTTPGZip {
gz := gziphandler.MustNewGzipLevelHandler(6)
for k, v := range routes {
routes[k] = gz(v)
}
}
for k, v := range routes {
mux.Handle(k, v)
}
fn := func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Server", serverName)
mux.ServeHTTP(w, r)
}
return http.HandlerFunc(fn), nil
}
func setActionRoutes(routes map[string]http.Handler) error {
var err error
for _, a := range conf.Actions {
var fn http.Handler
fn, err = newAction(&a)
if err != nil {
break
}
p := fmt.Sprintf("/api/v1/actions/%s", strings.ToLower(a.Name))
if ac := findAuth(a.AuthName); ac != nil {
routes[p], err = auth.WithAuth(fn, ac)
} else {
routes[p] = fn
}
if err != nil {
return err
}
}
return nil
}
func findAuth(name string) *config.Auth {
for _, a := range conf.Auths {
if strings.EqualFold(a.Name, name) {
return &a
}
}
return nil
}

View File

@ -0,0 +1,43 @@
package serv
// import (
// "context"
// "github.com/jackc/pgx/v4"
// "github.com/rs/zerolog"
// )
// type Logger struct {
// logger zerolog.Logger
// }
// // NewLogger accepts a zerolog.Logger as input and returns a new custom pgx
// // logging fascade as output.
// func NewSQLLogger(logger zerolog.Logger) *Logger {
// return &Logger{
// logger: // logger.With().Logger(),
// }
// }
// func (pl *Logger) Log(ctx context.Context, level pgx.LogLevel, msg string, data map[string]interface{}) {
// var zlevel zerolog.Level
// switch level {
// case pgx.LogLevelNone:
// zlevel = zerolog.NoLevel
// case pgx.LogLevelError:
// zlevel = zerolog.ErrorLevel
// case pgx.LogLevelWarn:
// zlevel = zerolog.WarnLevel
// case pgx.LogLevelDebug, pgx.LogLevelInfo:
// zlevel = zerolog.DebugLevel
// default:
// zlevel = zerolog.DebugLevel
// }
// if sql, ok := data["sql"]; ok {
// delete(data, "sql")
// pl.// logger.WithLevel(zlevel).Fields(data).Msg(sql.(string))
// } else {
// pl.// logger.WithLevel(zlevel).Fields(data).Msg(msg)
// }
// }

View File

@ -0,0 +1,17 @@
-- Write your migrate up statements here
CREATE TABLE public.users (
id bigint GENERATED ALWAYS AS IDENTITY PRIMARY KEY,
full_name text,
email text UNIQUE NOT NULL CHECK (length(email) < 255),
created_at timestamptz NOT NULL NOT NULL DEFAULT NOW(),
updated_at timestamptz NOT NULL NOT NULL DEFAULT NOW()
);
---- create above / drop below ----
-- Write your down migrate statements here. If this migration is irreversible
-- then delete the separator line above.
DROP TABLE public.users

View File

@ -0,0 +1,4 @@
FROM dosco/super-graph:latest
WORKDIR /
COPY config/* /config/

View File

@ -0,0 +1,222 @@
app_name: "{% app_name %} Development"
host_port: 0.0.0.0:8080
web_ui: true
# debug, error, warn, info
log_level: "info"
# enable or disable http compression (uses gzip)
http_compress: true
# When production mode is 'true' only queries
# from the allow list are permitted.
# When it's 'false' all queries are saved to the
# the allow list in ./config/allow.list
production: false
# Throw a 401 on auth failure for queries that need auth
auth_fail_block: false
# Latency tracing for database queries and remote joins
# the resulting latency information is returned with the
# response
enable_tracing: true
# Watch the config folder and reload Super Graph
# with the new configs when a change is detected
reload_on_config_change: true
# File that points to the database seeding script
# seed_file: seed.js
# Path pointing to where the migrations can be found
# this must be a relative path under the config path
migrations_path: ./migrations
# Secret key for general encryption operations like
# encrypting the cursor data
secret_key: supercalifajalistics
# CORS: A list of origins a cross-domain request can be executed from.
# If the special * value is present in the list, all origins will be allowed.
# An origin may contain a wildcard (*) to replace 0 or more
# characters (i.e.: http://*.domain.com).
cors_allowed_origins: ["*"]
# Debug Cross Origin Resource Sharing requests
cors_debug: false
# Postgres related environment Variables
# SG_DATABASE_HOST
# SG_DATABASE_PORT
# SG_DATABASE_USER
# SG_DATABASE_PASSWORD
# Auth related environment Variables
# SG_AUTH_RAILS_COOKIE_SECRET_KEY_BASE
# SG_AUTH_RAILS_REDIS_URL
# SG_AUTH_RAILS_REDIS_PASSWORD
# SG_AUTH_JWT_PUBLIC_KEY_FILE
# inflections:
# person: people
# sheep: sheep
auth:
# Can be 'rails', 'jwt' or 'header'
type: rails
cookie: _{% app_name_slug %}_session
# Comment this out if you want to disable setting
# the user_id via a header for testing.
# Disable in production
creds_in_header: true
rails:
# Rails version this is used for reading the
# various cookies formats.
version: 5.2
# Found in 'Rails.application.config.secret_key_base'
secret_key_base: 0a248500a64c01184edb4d7ad3a805488f8097ac761b76aaa6c17c01dcb7af03a2f18ba61b2868134b9c7b79a122bc0dadff4367414a2d173297bfea92be5566
# Remote cookie store. (memcache or redis)
# url: redis://redis:6379
# password: ""
# max_idle: 80
# max_active: 12000
# In most cases you don't need these
# salt: "encrypted cookie"
# sign_salt: "signed encrypted cookie"
# auth_salt: "authenticated encrypted cookie"
# jwt:
# provider: auth0
# secret: abc335bfcfdb04e50db5bb0a4d67ab9
# public_key_file: /secrets/public_key.pem
# public_key_type: ecdsa #rsa
# header:
# name: dnt
# exists: true
# value: localhost:8080
# You can add additional named auths to use with actions
# In this example actions using this auth can only be
# called from the Google Appengine Cron service that
# sets a special header to all it's requests
auths:
- name: from_taskqueue
type: header
header:
name: X-Appengine-Cron
exists: true
database:
type: postgres
host: db
port: 5432
dbname: {% app_name_slug %}_development
user: postgres
password: postgres
#schema: "public"
#pool_size: 10
#max_retries: 0
#log_level: "debug"
# Set session variable "user.id" to the user id
# Enable this if you need the user id in triggers, etc
set_user_id: false
# database ping timeout is used for db health checking
ping_timeout: 1m
# Define additional variables here to be used with filters
variables:
#admin_account_id: "5"
admin_account_id: "sql:select id from users where admin = true limit 1"
# Field and table names that you wish to block
blocklist:
- ar_internal_metadata
- schema_migrations
- secret
- password
- encrypted
- token
# Create custom actions with their own api endpoints
# For example the below action will be available at /api/v1/actions/refresh_leaderboard_users
# A request to this url will execute the configured SQL query
# which in this case refreshes a materialized view in the database.
# The auth_name is from one of the configured auths
actions:
- name: refresh_leaderboard_users
sql: REFRESH MATERIALIZED VIEW CONCURRENTLY "leaderboard_users"
auth_name: from_taskqueue
tables:
- name: customers
remotes:
- name: payments
id: stripe_id
url: http://rails_app:3000/stripe/$id
path: data
# debug: true
pass_headers:
- cookie
set_headers:
- name: Host
value: 0.0.0.0
# - name: Authorization
# value: Bearer <stripe_api_key>
- # You can create new fields that have a
# real db table backing them
name: me
table: users
#roles_query: "SELECT * FROM users WHERE id = $user_id"
roles:
- name: anon
tables:
- name: users
query:
limit: 10
- name: user
tables:
- name: users
query:
filters: ["{ id: { _eq: $user_id } }"]
- name: products
query:
limit: 50
filters: ["{ user_id: { eq: $user_id } }"]
disable_functions: false
insert:
filters: ["{ user_id: { eq: $user_id } }"]
presets:
- user_id: "$user_id"
- created_at: "now"
update:
filters: ["{ user_id: { eq: $user_id } }"]
presets:
- updated_at: "now"
delete:
block: true
# - name: admin
# match: id = 1000
# tables:
# - name: users
# filters: []

View File

@ -0,0 +1,59 @@
version: '3.4'
services:
# Postgres DB
db:
image: postgres:12
environment:
POSTGRES_USER: postgres
POSTGRES_PASSWORD: postgres
ports:
- "5432:5432"
# Yugabyte DB
# yb-master:
# image: yugabytedb/yugabyte:latest
# container_name: yb-master-n1
# command: [ "/home/yugabyte/bin/yb-master",
# "--fs_data_dirs=/mnt/disk0,/mnt/disk1",
# "--master_addresses=yb-master-n1:7100",
# "--replication_factor=1",
# "--enable_ysql=true"]
# ports:
# - "7000:7000"
# environment:
# SERVICE_7000_NAME: yb-master
# db:
# image: yugabytedb/yugabyte:latest
# container_name: yb-tserver-n1
# command: [ "/home/yugabyte/bin/yb-tserver",
# "--fs_data_dirs=/mnt/disk0,/mnt/disk1",
# "--start_pgsql_proxy",
# "--tserver_master_addrs=yb-master-n1:7100"]
# ports:
# - "9042:9042"
# - "6379:6379"
# - "5433:5433"
# - "9000:9000"
# environment:
# SERVICE_5433_NAME: ysql
# SERVICE_9042_NAME: ycql
# SERVICE_6379_NAME: yedis
# SERVICE_9000_NAME: yb-tserver
# depends_on:
# - yb-master
{% app_name_slug %}_api:
image: dosco/super-graph:latest
environment:
GO_ENV: "development"
# Uncomment below for Yugabyte DB
# SG_DATABASE_PORT: 5433
# SG_DATABASE_USER: yugabyte
# SG_DATABASE_PASSWORD: yugabyte
volumes:
- ./config:/config
ports:
- "8080:8080"
depends_on:
- db

View File

@ -0,0 +1,80 @@
# Inherit config from this other config file
# so I only need to overwrite some values
inherits: dev
app_name: "{% app_name %} Production"
host_port: 0.0.0.0:8080
web_ui: false
# debug, error, warn, info
log_level: "warn"
# enable or disable http compression (uses gzip)
http_compress: true
# When production mode is 'true' only queries
# from the allow list are permitted.
# When it's 'false' all queries are saved to the
# the allow list in ./config/allow.list
production: true
# Throw a 401 on auth failure for queries that need auth
auth_fail_block: true
# Latency tracing for database queries and remote joins
# the resulting latency information is returned with the
# response
enable_tracing: false
# Watch the config folder and reload Super Graph
# with the new configs when a change is detected
reload_on_config_change: false
# File that points to the database seeding script
# seed_file: seed.js
# Path pointing to where the migrations can be found
# migrations_path: migrations
# Secret key for general encryption operations like
# encrypting the cursor data
# secret_key: supercalifajalistics
# CORS: A list of origins a cross-domain request can be executed from.
# If the special * value is present in the list, all origins will be allowed.
# An origin may contain a wildcard (*) to replace 0 or more
# characters (i.e.: http://*.domain.com).
# cors_allowed_origins: ["*"]
# Debug Cross Origin Resource Sharing requests
# cors_debug: false
# Postgres related environment Variables
# SG_DATABASE_HOST
# SG_DATABASE_PORT
# SG_DATABASE_USER
# SG_DATABASE_PASSWORD
# Auth related environment Variables
# SG_AUTH_RAILS_COOKIE_SECRET_KEY_BASE
# SG_AUTH_RAILS_REDIS_URL
# SG_AUTH_RAILS_REDIS_PASSWORD
# SG_AUTH_JWT_PUBLIC_KEY_FILE
database:
type: postgres
host: db
port: 5432
dbname: {% app_name_slug %}_production
user: postgres
password: postgres
#pool_size: 10
#max_retries: 0
#log_level: "debug"
# Set session variable "user.id" to the user id
# Enable this if you need the user id in triggers, etc
set_user_id: false
# database ping timeout is used for db health checking
ping_timeout: 5m

View File

@ -0,0 +1,19 @@
// Example script to seed database
var users = [];
for (i = 0; i < 10; i++) {
var data = {
full_name: fake.name(),
email: fake.email()
}
var res = graphql(" \
mutation { \
user(insert: $data) { \
id \
} \
}", { data: data })
users.push(res.user)
}

128
cmd/internal/serv/utils.go Normal file
View File

@ -0,0 +1,128 @@
package serv
import (
"bytes"
"crypto/sha1"
"encoding/hex"
"io"
"os"
"sort"
"strings"
"sync"
"github.com/cespare/xxhash/v2"
"github.com/dosco/super-graph/jsn"
)
// nolint: errcheck
func mkkey(h *xxhash.Digest, k1 string, k2 string) uint64 {
h.WriteString(k1)
h.WriteString(k2)
v := h.Sum64()
h.Reset()
return v
}
// nolint: errcheck
func gqlHash(b string, vars []byte, role string) string {
b = strings.TrimSpace(b)
h := sha1.New()
query := "query"
s, e := 0, 0
space := []byte{' '}
starting := true
var b0, b1 byte
if len(b) == 0 {
return ""
}
for {
if starting && b[e] == 'q' {
n := 0
se := e
for e < len(b) && n < len(query) && b[e] == query[n] {
n++
e++
}
if n != len(query) {
io.WriteString(h, strings.ToLower(b[se:e]))
}
}
if e >= len(b) {
break
}
if ws(b[e]) {
for e < len(b) && ws(b[e]) {
e++
}
if e < len(b) {
b1 = b[e]
}
if al(b0) && al(b1) {
h.Write(space)
}
} else {
starting = false
s = e
for e < len(b) && !ws(b[e]) {
e++
}
if e != 0 {
b0 = b[(e - 1)]
}
io.WriteString(h, strings.ToLower(b[s:e]))
}
if e >= len(b) {
break
}
}
if len(role) != 0 {
io.WriteString(h, role)
}
if len(vars) == 0 {
return hex.EncodeToString(h.Sum(nil))
}
fields := jsn.Keys([]byte(vars))
sort.Slice(fields, func(i, j int) bool {
return bytes.Compare(fields[i], fields[j]) == -1
})
for i := range fields {
h.Write(fields[i])
}
return hex.EncodeToString(h.Sum(nil))
}
func ws(b byte) bool {
return b == ' ' || b == '\n' || b == '\t' || b == ','
}
func al(b byte) bool {
return (b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || (b >= '0' && b <= '9')
}
func fatalInProd(err error, msg string) {
var wg sync.WaitGroup
if !isDev() {
log.Fatalf("ERR %s: %s", msg, err)
}
log.Printf("ERR %s: %s", msg, err)
wg.Add(1)
wg.Wait()
}
func isDev() bool {
return strings.HasPrefix(os.Getenv("GO_ENV"), "dev")
}

View File

@ -0,0 +1,231 @@
package serv
import (
"strings"
"testing"
)
func TestGQLHash1(t *testing.T) {
var v1 = `
products(
limit: 30,
where: { id: { AND: { greater_or_equals: 20, lt: 28 } } }) {
id
name
price
}`
var v2 = `
products
(limit: 30, where: { id: { AND: { greater_or_equals: 20, lt: 28 } } }) {
id
name
price
} `
h1 := gqlHash(v1, nil, "")
h2 := gqlHash(v2, nil, "")
if strings.Compare(h1, h2) != 0 {
t.Fatal("Hashes don't match they should")
}
}
func TestGQLHash2(t *testing.T) {
var v1 = `
{
products(
limit: 30
order_by: { price: desc }
distinct: [price]
where: { id: { and: { greater_or_equals: 20, lt: 28 } } }
) {
id
name
price
user {
id
email
}
}
}`
var v2 = ` { products( limit: 30, order_by: { price: desc }, distinct: [ price ] where: { id: { and: { greater_or_equals: 20, lt: 28 } } }) { id name price user { id email } } } `
h1 := gqlHash(v1, nil, "")
h2 := gqlHash(v2, nil, "")
if strings.Compare(h1, h2) != 0 {
t.Fatal("Hashes don't match they should")
}
}
func TestGQLHash3(t *testing.T) {
var v1 = `users {
id
email
picture: avatar
products(limit: 2, where: {price: {gt: 10}}) {
id
name
description
}
}`
var v2 = `
users {
id
email
picture: avatar
products(limit: 2, where: {price: {gt: 10}}) {
id
name
description
}
}
`
h1 := gqlHash(v1, nil, "")
h2 := gqlHash(v2, nil, "")
if strings.Compare(h1, h2) != 0 {
t.Fatal("Hashes don't match they should")
}
}
func TestGQLHash4(t *testing.T) {
var v1 = `
query {
products(
limit: 30
order_by: { price: desc }
distinct: [price]
where: { id: { and: { greater_or_equals: 20, lt: 28 } } }
) {
id
name
price
user {
id
email
}
}
}`
var v2 = ` { products( limit: 30, order_by: { price: desc }, distinct: [ price ] where: { id: { and: { greater_or_equals: 20, lt: 28 } } }) { id name price user { id email } } } `
h1 := gqlHash(v1, nil, "")
h2 := gqlHash(v2, nil, "")
if strings.Compare(h1, h2) != 0 {
t.Fatal("Hashes don't match they should")
}
}
func TestGQLHashWithVars1(t *testing.T) {
var q1 = `
products(
limit: 30,
where: { id: { AND: { greater_or_equals: 20, lt: 28 } } }) {
id
name
price
}`
var v1 = `
{
"insert": {
"name": "Hello",
"description": "World",
"created_at": "now",
"updated_at": "now",
"test": { "type2": "b", "type1": "a" }
},
"user": 123
}`
var q2 = `
products
(limit: 30, where: { id: { AND: { greater_or_equals: 20, lt: 28 } } }) {
id
name
price
} `
var v2 = `{
"insert": {
"created_at": "now",
"test": { "type1": "a", "type2": "b" },
"name": "Hello",
"updated_at": "now",
"description": "World"
},
"user": 123
}`
h1 := gqlHash(q1, []byte(v1), "user")
h2 := gqlHash(q2, []byte(v2), "user")
if strings.Compare(h1, h2) != 0 {
t.Fatal("Hashes don't match they should")
}
}
func TestGQLHashWithVars2(t *testing.T) {
var q1 = `
products(
limit: 30,
where: { id: { AND: { greater_or_equals: 20, lt: 28 } } }) {
id
name
price
}`
var v1 = `
{
"insert": [{
"name": "Hello",
"description": "World",
"created_at": "now",
"updated_at": "now",
"test": { "type2": "b", "type1": "a" }
},
{
"name": "Hello",
"description": "World",
"created_at": "now",
"updated_at": "now",
"test": { "type2": "b", "type1": "a" }
}],
"user": 123
}`
var q2 = `
products
(limit: 30, where: { id: { AND: { greater_or_equals: 20, lt: 28 } } }) {
id
name
price
} `
var v2 = `{
"insert": {
"created_at": "now",
"test": { "type1": "a", "type2": "b" },
"name": "Hello",
"updated_at": "now",
"description": "World"
},
"user": 123
}`
h1 := gqlHash(q1, []byte(v1), "user")
h2 := gqlHash(q2, []byte(v2), "user")
if strings.Compare(h1, h2) != 0 {
t.Fatal("Hashes don't match they should")
}
}

26
cmd/internal/serv/web/.gitignore vendored Executable file
View File

@ -0,0 +1,26 @@
# See https://help.github.com/articles/ignoring-files/ for more about ignoring files.
# dependencies
/node_modules
# testing
/coverage
# production
/build
# development
/src/components/dataviz/core/*.js.map
# misc
.DS_Store
.env.local
.env.development.local
.env.test.local
.env.production.local
npm-debug.log*
yarn-debug.log*
yarn-error.log*
.vscode

View File

@ -0,0 +1,35 @@
{
"name": "web",
"version": "0.1.0",
"private": true,
"dependencies": {
"@apollographql/graphql-playground-react": "^1.7.31",
"apollo-link-ws": "^1.0.8",
"graphql": "^14.1.1",
"react": "^16.11.0",
"react-dom": "^16.11.0",
"react-scripts": "3.2.0",
"subscriptions-transport-ws": "^0.9.14"
},
"scripts": {
"start": "PORT=3001 react-scripts start",
"build": "react-scripts build",
"test": "react-scripts test",
"eject": "react-scripts eject"
},
"proxy": "http://localhost:8080",
"eslintConfig": {
"extends": "react-app"
},
"browserslist": [
">0.2%",
"not dead",
"not ie <= 11",
"not op_mini all"
],
"resolutions": {
"apollo-link-ws": "^1.0.8",
"graphql": "^14.1.1",
"subscriptions-transport-ws": "^0.9.14"
}
}

Binary file not shown.

After

Width:  |  Height:  |  Size: 15 KiB

View File

@ -0,0 +1,43 @@
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8" />
<link rel="shortcut icon" href="%PUBLIC_URL%/favicon.ico" />
<meta
name="viewport"
content="width=device-width, initial-scale=1, shrink-to-fit=no"
/>
<meta name="theme-color" content="#000000" />
<!--
manifest.json provides metadata used when your web app is installed on a
user's mobile device or desktop. See https://developers.google.com/web/fundamentals/web-app-manifest/
-->
<link rel="manifest" href="%PUBLIC_URL%/manifest.json" />
<link href="https://fonts.googleapis.com/css?family=Open+Sans:300,400,600,700|Source+Code+Pro:400,700" rel="stylesheet">
<!--
Notice the use of %PUBLIC_URL% in the tags above.
It will be replaced with the URL of the `public` folder during the build.
Only files inside the `public` folder can be referenced from the HTML.
Unlike "/favicon.ico" or "favicon.ico", "%PUBLIC_URL%/favicon.ico" will
work correctly both with client-side routing and a non-root public URL.
Learn how to configure a non-root public URL by running `npm run build`.
-->
<title>Super Graph - GraphQL API for Rails</title>
</head>
<body>
<noscript>You need to enable JavaScript to run this app.</noscript>
<div id="root"></div>
<!--
This HTML file is a template.
If you open it directly in the browser, you will see an empty page.
You can add webfonts, meta tags, or analytics to this file.
The build step will place the bundled scripts into the <body> tag.
To begin the development, run `npm start` or `yarn start`.
To create a production bundle, use `npm run build` or `yarn build`.
-->
</body>
</html>

View File

@ -0,0 +1,15 @@
{
"short_name": "Super Graph",
"name": "Super Graph - GraphQL API for Rails",
"icons": [
{
"src": "favicon.ico",
"sizes": "64x64 32x32 24x24 16x16",
"type": "image/x-icon"
}
],
"start_url": ".",
"display": "standalone",
"theme_color": "#000000",
"background_color": "#ffffff"
}

View File

@ -0,0 +1,67 @@
import React, { Component } from 'react'
import { Provider } from 'react-redux'
import { Playground, store } from '@apollographql/graphql-playground-react'
import './index.css'
const fetch = window.fetch
window.fetch = function() {
arguments[1].credentials = 'include'
return Promise.resolve(fetch.apply(global, arguments))
}
class App extends Component {
render() {
return (
<div>
<header style={{
background: '#09141b',
color: '#03a9f4',
letterSpacing: '0.15rem',
height: '65px',
display: 'flex',
alignItems: 'center'
}}
>
<h3 style={{
textDecoration: 'none',
margin: '0px',
fontSize: '18px',
}}
>
<span style={{
textTransform: 'uppercase',
marginLeft: '20px',
paddingRight: '10px',
borderRight: '1px solid #fff'
}}>
Super Graph
</span>
<span style={{
fontSize: '16px',
marginLeft: '10px',
color: '#fff'
}}>
Instant GraphQL</span>
</h3>
</header>
<Provider store={store}>
<Playground
endpoint="/api/v1/graphql"
settings="{
'schema.polling.enable': false,
'request.credentials': 'include',
'general.betaUpdates': true,
'editor.reuseHeaders': true,
'editor.theme': 'dark'
}"
/>
</Provider>
</div>
);
}
}
export default App;

View File

@ -0,0 +1,9 @@
import React from 'react';
import ReactDOM from 'react-dom';
import App from './App';
it('renders without crashing', () => {
const div = document.createElement('div');
ReactDOM.render(<App />, div);
ReactDOM.unmountComponentAtNode(div);
});

View File

@ -0,0 +1,19 @@
body {
margin: 0;
padding: 0;
font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", "Roboto", "Oxygen",
"Ubuntu", "Cantarell", "Fira Sans", "Droid Sans", "Helvetica Neue",
sans-serif;
-webkit-font-smoothing: antialiased;
-moz-osx-font-smoothing: grayscale;
background-color: #0f202d;
}
code {
font-family: source-code-pro, Menlo, Monaco, Consolas, "Courier New",
monospace;
}
.playground > div:nth-child(2) {
height: calc(100vh - 131px);
}

View File

@ -0,0 +1,11 @@
import React from 'react';
import ReactDOM from 'react-dom';
import App from './App';
//import * as serviceWorker from './serviceWorker';
ReactDOM.render(<App />, document.getElementById('root'));
// If you want your app to work offline and load faster, you can change
// unregister() to register() below. Note this comes with some pitfalls.
// Learn more about service workers: http://bit.ly/CRA-PWA
//serviceWorker.unregister();

View File

@ -0,0 +1,7 @@
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 841.9 595.3">
<g fill="#61DAFB">
<path d="M666.3 296.5c0-32.5-40.7-63.3-103.1-82.4 14.4-63.6 8-114.2-20.2-130.4-6.5-3.8-14.1-5.6-22.4-5.6v22.3c4.6 0 8.3.9 11.4 2.6 13.6 7.8 19.5 37.5 14.9 75.7-1.1 9.4-2.9 19.3-5.1 29.4-19.6-4.8-41-8.5-63.5-10.9-13.5-18.5-27.5-35.3-41.6-50 32.6-30.3 63.2-46.9 84-46.9V78c-27.5 0-63.5 19.6-99.9 53.6-36.4-33.8-72.4-53.2-99.9-53.2v22.3c20.7 0 51.4 16.5 84 46.6-14 14.7-28 31.4-41.3 49.9-22.6 2.4-44 6.1-63.6 11-2.3-10-4-19.7-5.2-29-4.7-38.2 1.1-67.9 14.6-75.8 3-1.8 6.9-2.6 11.5-2.6V78.5c-8.4 0-16 1.8-22.6 5.6-28.1 16.2-34.4 66.7-19.9 130.1-62.2 19.2-102.7 49.9-102.7 82.3 0 32.5 40.7 63.3 103.1 82.4-14.4 63.6-8 114.2 20.2 130.4 6.5 3.8 14.1 5.6 22.5 5.6 27.5 0 63.5-19.6 99.9-53.6 36.4 33.8 72.4 53.2 99.9 53.2 8.4 0 16-1.8 22.6-5.6 28.1-16.2 34.4-66.7 19.9-130.1 62-19.1 102.5-49.9 102.5-82.3zm-130.2-66.7c-3.7 12.9-8.3 26.2-13.5 39.5-4.1-8-8.4-16-13.1-24-4.6-8-9.5-15.8-14.4-23.4 14.2 2.1 27.9 4.7 41 7.9zm-45.8 106.5c-7.8 13.5-15.8 26.3-24.1 38.2-14.9 1.3-30 2-45.2 2-15.1 0-30.2-.7-45-1.9-8.3-11.9-16.4-24.6-24.2-38-7.6-13.1-14.5-26.4-20.8-39.8 6.2-13.4 13.2-26.8 20.7-39.9 7.8-13.5 15.8-26.3 24.1-38.2 14.9-1.3 30-2 45.2-2 15.1 0 30.2.7 45 1.9 8.3 11.9 16.4 24.6 24.2 38 7.6 13.1 14.5 26.4 20.8 39.8-6.3 13.4-13.2 26.8-20.7 39.9zm32.3-13c5.4 13.4 10 26.8 13.8 39.8-13.1 3.2-26.9 5.9-41.2 8 4.9-7.7 9.8-15.6 14.4-23.7 4.6-8 8.9-16.1 13-24.1zM421.2 430c-9.3-9.6-18.6-20.3-27.8-32 9 .4 18.2.7 27.5.7 9.4 0 18.7-.2 27.8-.7-9 11.7-18.3 22.4-27.5 32zm-74.4-58.9c-14.2-2.1-27.9-4.7-41-7.9 3.7-12.9 8.3-26.2 13.5-39.5 4.1 8 8.4 16 13.1 24 4.7 8 9.5 15.8 14.4 23.4zM420.7 163c9.3 9.6 18.6 20.3 27.8 32-9-.4-18.2-.7-27.5-.7-9.4 0-18.7.2-27.8.7 9-11.7 18.3-22.4 27.5-32zm-74 58.9c-4.9 7.7-9.8 15.6-14.4 23.7-4.6 8-8.9 16-13 24-5.4-13.4-10-26.8-13.8-39.8 13.1-3.1 26.9-5.8 41.2-7.9zm-90.5 125.2c-35.4-15.1-58.3-34.9-58.3-50.6 0-15.7 22.9-35.6 58.3-50.6 8.6-3.7 18-7 27.7-10.1 5.7 19.6 13.2 40 22.5 60.9-9.2 20.8-16.6 41.1-22.2 60.6-9.9-3.1-19.3-6.5-28-10.2zM310 490c-13.6-7.8-19.5-37.5-14.9-75.7 1.1-9.4 2.9-19.3 5.1-29.4 19.6 4.8 41 8.5 63.5 10.9 13.5 18.5 27.5 35.3 41.6 50-32.6 30.3-63.2 46.9-84 46.9-4.5-.1-8.3-1-11.3-2.7zm237.2-76.2c4.7 38.2-1.1 67.9-14.6 75.8-3 1.8-6.9 2.6-11.5 2.6-20.7 0-51.4-16.5-84-46.6 14-14.7 28-31.4 41.3-49.9 22.6-2.4 44-6.1 63.6-11 2.3 10.1 4.1 19.8 5.2 29.1zm38.5-66.7c-8.6 3.7-18 7-27.7 10.1-5.7-19.6-13.2-40-22.5-60.9 9.2-20.8 16.6-41.1 22.2-60.6 9.9 3.1 19.3 6.5 28.1 10.2 35.4 15.1 58.3 34.9 58.3 50.6-.1 15.7-23 35.6-58.4 50.6zM320.8 78.4z"/>
<circle cx="420.9" cy="296.5" r="45.7"/>
<path d="M520.5 78.1z"/>
</g>
</svg>

After

Width:  |  Height:  |  Size: 2.6 KiB

View File

@ -0,0 +1,135 @@
// This optional code is used to register a service worker.
// register() is not called by default.
// This lets the app load faster on subsequent visits in production, and gives
// it offline capabilities. However, it also means that developers (and users)
// will only see deployed updates on subsequent visits to a page, after all the
// existing tabs open on the page have been closed, since previously cached
// resources are updated in the background.
// To learn more about the benefits of this model and instructions on how to
// opt-in, read http://bit.ly/CRA-PWA
const isLocalhost = Boolean(
window.location.hostname === 'localhost' ||
// [::1] is the IPv6 localhost address.
window.location.hostname === '[::1]' ||
// 127.0.0.1/8 is considered localhost for IPv4.
window.location.hostname.match(
/^127(?:\.(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)){3}$/
)
);
export function register(config) {
if (process.env.NODE_ENV === 'production' && 'serviceWorker' in navigator) {
// The URL constructor is available in all browsers that support SW.
const publicUrl = new URL(process.env.PUBLIC_URL, window.location.href);
if (publicUrl.origin !== window.location.origin) {
// Our service worker won't work if PUBLIC_URL is on a different origin
// from what our page is served on. This might happen if a CDN is used to
// serve assets; see https://github.com/facebook/create-react-app/issues/2374
return;
}
window.addEventListener('load', () => {
const swUrl = `${process.env.PUBLIC_URL}/service-worker.js`;
if (isLocalhost) {
// This is running on localhost. Let's check if a service worker still exists or not.
checkValidServiceWorker(swUrl, config);
// Add some additional logging to localhost, pointing developers to the
// service worker/PWA documentation.
navigator.serviceWorker.ready.then(() => {
console.log(
'This web app is being served cache-first by a service ' +
'worker. To learn more, visit http://bit.ly/CRA-PWA'
);
});
} else {
// Is not localhost. Just register service worker
registerValidSW(swUrl, config);
}
});
}
}
function registerValidSW(swUrl, config) {
navigator.serviceWorker
.register(swUrl)
.then(registration => {
registration.onupdatefound = () => {
const installingWorker = registration.installing;
if (installingWorker == null) {
return;
}
installingWorker.onstatechange = () => {
if (installingWorker.state === 'installed') {
if (navigator.serviceWorker.controller) {
// At this point, the updated precached content has been fetched,
// but the previous service worker will still serve the older
// content until all client tabs are closed.
console.log(
'New content is available and will be used when all ' +
'tabs for this page are closed. See http://bit.ly/CRA-PWA.'
);
// Execute callback
if (config && config.onUpdate) {
config.onUpdate(registration);
}
} else {
// At this point, everything has been precached.
// It's the perfect time to display a
// "Content is cached for offline use." message.
console.log('Content is cached for offline use.');
// Execute callback
if (config && config.onSuccess) {
config.onSuccess(registration);
}
}
}
};
};
})
.catch(error => {
console.error('Error during service worker registration:', error);
});
}
function checkValidServiceWorker(swUrl, config) {
// Check if the service worker can be found. If it can't reload the page.
fetch(swUrl)
.then(response => {
// Ensure service worker exists, and that we really are getting a JS file.
const contentType = response.headers.get('content-type');
if (
response.status === 404 ||
(contentType != null && contentType.indexOf('javascript') === -1)
) {
// No service worker found. Probably a different app. Reload the page.
navigator.serviceWorker.ready.then(registration => {
registration.unregister().then(() => {
window.location.reload();
});
});
} else {
// Service worker found. Proceed as normal.
registerValidSW(swUrl, config);
}
})
.catch(() => {
console.log(
'No internet connection found. App is running in offline mode.'
);
});
}
export function unregister() {
if ('serviceWorker' in navigator) {
navigator.serviceWorker.ready.then(registration => {
registration.unregister();
});
}
}

File diff suppressed because it is too large Load Diff

33
cmd/internal/serv/ws.go Normal file
View File

@ -0,0 +1,33 @@
package serv
/*
func apiv1Ws(w http.ResponseWriter, r *http.Request) {
c, err := upgrader.Upgrade(w, r, nil)
if err != nil {
fmt.Println(err)
return
}
defer c.Close()
for {
mt, message, err := c.ReadMessage()
if err != nil {
fmt.Println("read:", err)
break
}
fmt.Printf("recv: %s", message)
err = c.WriteMessage(mt, message)
if err != nil {
fmt.Println("write:", err)
break
}
}
}
func serve(w http.ResponseWriter, r *http.Request) {
// if websocket.IsWebSocketUpgrade(r) {
// apiv1Ws(w, r)
// return
// }
apiv1Http(w, r)
}
*/