2019-09-20 06:19:11 +02:00
|
|
|
package serv
|
|
|
|
|
|
|
|
import (
|
2019-09-26 06:35:31 +02:00
|
|
|
"context"
|
2019-09-20 06:19:11 +02:00
|
|
|
"fmt"
|
|
|
|
"os"
|
|
|
|
"strings"
|
|
|
|
|
|
|
|
"github.com/dosco/super-graph/psql"
|
|
|
|
"github.com/dosco/super-graph/qcode"
|
|
|
|
"github.com/gobuffalo/flect"
|
2019-09-26 06:35:31 +02:00
|
|
|
"github.com/jackc/pgx/v4"
|
|
|
|
"github.com/jackc/pgx/v4/pgxpool"
|
2019-09-20 06:19:11 +02:00
|
|
|
"github.com/rs/zerolog"
|
2019-09-26 06:35:31 +02:00
|
|
|
"github.com/spf13/cobra"
|
2019-09-20 06:19:11 +02:00
|
|
|
"github.com/spf13/viper"
|
|
|
|
)
|
|
|
|
|
2019-09-27 08:30:07 +02:00
|
|
|
//go:generate rice embed-go
|
|
|
|
|
2019-09-20 06:19:11 +02:00
|
|
|
const (
|
|
|
|
serverName = "Super Graph"
|
|
|
|
|
|
|
|
authFailBlockAlways = iota + 1
|
|
|
|
authFailBlockPerQuery
|
|
|
|
authFailBlockNever
|
|
|
|
)
|
|
|
|
|
|
|
|
var (
|
|
|
|
logger *zerolog.Logger
|
|
|
|
conf *config
|
2019-09-26 06:35:31 +02:00
|
|
|
confPath string
|
|
|
|
db *pgxpool.Pool
|
2019-09-20 06:19:11 +02:00
|
|
|
qcompile *qcode.Compiler
|
|
|
|
pcompile *psql.Compiler
|
|
|
|
authFailBlock int
|
|
|
|
)
|
|
|
|
|
2019-09-26 06:35:31 +02:00
|
|
|
func Init() {
|
2019-09-28 17:34:03 +02:00
|
|
|
logger = initLog()
|
|
|
|
|
|
|
|
rootCmd := &cobra.Command{
|
2019-09-26 06:35:31 +02:00
|
|
|
Use: "super-graph",
|
|
|
|
Short: "An instant high-performance GraphQL API. No code needed. https://supergraph.dev",
|
|
|
|
}
|
|
|
|
|
2019-09-28 17:34:03 +02:00
|
|
|
rootCmd.AddCommand(&cobra.Command{
|
2019-09-26 06:35:31 +02:00
|
|
|
Use: "serv",
|
|
|
|
Short: "Run the super-graph service",
|
|
|
|
Run: cmdServ,
|
2019-09-28 17:34:03 +02:00
|
|
|
})
|
|
|
|
|
|
|
|
rootCmd.AddCommand(&cobra.Command{
|
|
|
|
Use: "db:create",
|
|
|
|
Short: "Create database",
|
|
|
|
Run: cmdDBCreate,
|
|
|
|
})
|
|
|
|
|
|
|
|
rootCmd.AddCommand(&cobra.Command{
|
|
|
|
Use: "db:drop",
|
|
|
|
Short: "Drop database",
|
|
|
|
Run: cmdDBDrop,
|
|
|
|
})
|
|
|
|
|
|
|
|
rootCmd.AddCommand(&cobra.Command{
|
|
|
|
Use: "db:seed",
|
|
|
|
Short: "Run the seed script to seed the database",
|
|
|
|
Run: cmdDBSeed,
|
|
|
|
})
|
2019-09-26 06:35:31 +02:00
|
|
|
|
2019-09-28 17:34:03 +02:00
|
|
|
rootCmd.AddCommand(&cobra.Command{
|
|
|
|
Use: "db:migrate",
|
2019-09-26 06:35:31 +02:00
|
|
|
Short: "Migrate the database",
|
|
|
|
Long: `Migrate the database to destination migration version.
|
|
|
|
|
|
|
|
Destination migration version can be one of the following value types:
|
|
|
|
|
2019-09-28 17:34:03 +02:00
|
|
|
Migrate to the most recent migration.
|
2019-09-29 02:46:55 +02:00
|
|
|
e.g. db:migrate up
|
2019-09-28 17:34:03 +02:00
|
|
|
|
|
|
|
Rollback the most recent migration.
|
2019-09-29 02:46:55 +02:00
|
|
|
e.g. db:migrate down
|
2019-09-28 17:34:03 +02:00
|
|
|
|
2019-09-26 06:35:31 +02:00
|
|
|
Migrate to a specific migration.
|
2019-09-29 02:46:55 +02:00
|
|
|
e.g. db:migrate 42
|
2019-09-26 06:35:31 +02:00
|
|
|
|
|
|
|
Migrate forward N steps.
|
2019-09-29 02:46:55 +02:00
|
|
|
e.g. db:migrate +3
|
2019-09-26 06:35:31 +02:00
|
|
|
|
|
|
|
Migrate backward N steps.
|
2019-09-29 02:46:55 +02:00
|
|
|
e.g. db:migrate -2
|
2019-09-26 06:35:31 +02:00
|
|
|
|
|
|
|
Redo previous N steps (migrate backward N steps then forward N steps).
|
2019-09-29 02:46:55 +02:00
|
|
|
e.g. db:migrate -+1
|
2019-09-26 06:35:31 +02:00
|
|
|
`,
|
2019-09-28 17:34:03 +02:00
|
|
|
Run: cmdDBMigrate,
|
|
|
|
})
|
2019-09-26 06:35:31 +02:00
|
|
|
|
2019-09-28 17:34:03 +02:00
|
|
|
rootCmd.AddCommand(&cobra.Command{
|
|
|
|
Use: "db:status",
|
2019-09-26 06:35:31 +02:00
|
|
|
Short: "Print current migration status",
|
2019-09-28 17:34:03 +02:00
|
|
|
Run: cmdDBStatus,
|
|
|
|
})
|
2019-09-26 06:35:31 +02:00
|
|
|
|
2019-09-28 17:34:03 +02:00
|
|
|
rootCmd.AddCommand(&cobra.Command{
|
|
|
|
Use: "db:new NAME",
|
2019-09-26 06:35:31 +02:00
|
|
|
Short: "Generate a new migration",
|
|
|
|
Long: "Generate a new migration with the next sequence number and provided name",
|
2019-09-28 17:34:03 +02:00
|
|
|
Run: cmdDBNew,
|
|
|
|
})
|
|
|
|
|
|
|
|
rootCmd.AddCommand(&cobra.Command{
|
|
|
|
Use: "db:setup",
|
|
|
|
Short: "Setup database",
|
|
|
|
Long: "This command will create, migrate and seed the database",
|
|
|
|
Run: cmdDBSetup,
|
|
|
|
})
|
|
|
|
|
|
|
|
rootCmd.AddCommand(&cobra.Command{
|
|
|
|
Use: "new APP-NAME",
|
|
|
|
Short: "Create a new application",
|
2019-09-27 08:19:24 +02:00
|
|
|
Long: "Generate all the required files to start on a new Super Graph app",
|
2019-09-28 17:34:03 +02:00
|
|
|
Run: cmdNew,
|
|
|
|
})
|
2019-09-26 06:35:31 +02:00
|
|
|
|
2019-10-06 22:28:10 +02:00
|
|
|
rootCmd.AddCommand(&cobra.Command{
|
|
|
|
Use: fmt.Sprintf("conf:dump [%s]", strings.Join(viper.SupportedExts, "|")),
|
|
|
|
Short: "Dump config to file",
|
|
|
|
Long: "Dump current config to a file in the selected format",
|
|
|
|
Run: cmdConfDump,
|
|
|
|
})
|
|
|
|
|
2019-09-26 06:35:31 +02:00
|
|
|
rootCmd.Flags().StringVar(&confPath,
|
|
|
|
"path", "./config", "path to config files")
|
|
|
|
|
|
|
|
if err := rootCmd.Execute(); err != nil {
|
|
|
|
logger.Fatal().Err(err).Send()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-09-20 06:19:11 +02:00
|
|
|
func initLog() *zerolog.Logger {
|
2019-10-03 09:08:01 +02:00
|
|
|
out := zerolog.ConsoleWriter{Out: os.Stderr}
|
|
|
|
logger := zerolog.New(out).
|
2019-09-20 06:19:11 +02:00
|
|
|
With().
|
|
|
|
Timestamp().
|
|
|
|
Caller().
|
|
|
|
Logger()
|
|
|
|
|
|
|
|
return &logger
|
|
|
|
}
|
|
|
|
|
2019-09-26 06:35:31 +02:00
|
|
|
func initConf() (*config, error) {
|
2019-10-06 22:28:10 +02:00
|
|
|
vi := newConfig()
|
2019-09-20 06:19:11 +02:00
|
|
|
|
|
|
|
if err := vi.ReadInConfig(); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
c := &config{}
|
|
|
|
|
|
|
|
if err := vi.Unmarshal(c); err != nil {
|
|
|
|
return nil, fmt.Errorf("unable to decode config, %v", err)
|
|
|
|
}
|
|
|
|
|
2019-10-06 22:28:10 +02:00
|
|
|
if len(c.Tables) == 0 {
|
|
|
|
c.Tables = c.DB.Tables
|
|
|
|
}
|
|
|
|
|
2019-09-20 06:19:11 +02:00
|
|
|
for k, v := range c.Inflections {
|
|
|
|
flect.AddPlural(k, v)
|
|
|
|
}
|
|
|
|
|
2019-10-06 22:28:10 +02:00
|
|
|
for i := range c.Tables {
|
|
|
|
t := c.Tables[i]
|
2019-09-20 06:19:11 +02:00
|
|
|
t.Name = flect.Pluralize(strings.ToLower(t.Name))
|
|
|
|
}
|
|
|
|
|
|
|
|
authFailBlock = getAuthFailBlock(c)
|
|
|
|
|
2019-09-26 06:35:31 +02:00
|
|
|
logLevel, err := zerolog.ParseLevel(c.LogLevel)
|
|
|
|
if err != nil {
|
|
|
|
logger.Error().Err(err).Msg("error setting log_level")
|
|
|
|
}
|
|
|
|
zerolog.SetGlobalLevel(logLevel)
|
|
|
|
|
2019-10-24 08:07:42 +02:00
|
|
|
for k, v := range c.DB.Vars {
|
|
|
|
c.DB.Vars[k] = sanitize(v)
|
|
|
|
}
|
|
|
|
|
|
|
|
c.RolesQuery = sanitize(c.RolesQuery)
|
|
|
|
|
|
|
|
rolesMap := make(map[string]struct{})
|
|
|
|
|
|
|
|
for i := range c.Roles {
|
|
|
|
role := &c.Roles[i]
|
|
|
|
|
|
|
|
if _, ok := rolesMap[role.Name]; ok {
|
|
|
|
logger.Fatal().Msgf("duplicate role '%s' found", role.Name)
|
|
|
|
}
|
|
|
|
role.Name = sanitize(role.Name)
|
|
|
|
role.Match = sanitize(role.Match)
|
|
|
|
rolesMap[role.Name] = struct{}{}
|
|
|
|
}
|
|
|
|
|
|
|
|
if _, ok := rolesMap["user"]; !ok {
|
|
|
|
c.Roles = append(c.Roles, configRole{Name: "user"})
|
|
|
|
}
|
|
|
|
|
|
|
|
if _, ok := rolesMap["anon"]; !ok {
|
|
|
|
c.Roles = append(c.Roles, configRole{Name: "anon"})
|
|
|
|
}
|
2019-09-20 06:19:11 +02:00
|
|
|
|
2019-10-25 06:01:22 +02:00
|
|
|
c.Validate()
|
|
|
|
|
2019-09-20 06:19:11 +02:00
|
|
|
return c, nil
|
|
|
|
}
|
|
|
|
|
2019-09-28 17:34:03 +02:00
|
|
|
func initDB(c *config, useDB bool) (*pgx.Conn, error) {
|
2019-09-26 06:35:31 +02:00
|
|
|
config, _ := pgx.ParseConfig("")
|
|
|
|
config.Host = c.DB.Host
|
|
|
|
config.Port = c.DB.Port
|
|
|
|
config.User = c.DB.User
|
|
|
|
config.Password = c.DB.Password
|
|
|
|
config.RuntimeParams = map[string]string{
|
|
|
|
"application_name": c.AppName,
|
|
|
|
"search_path": c.DB.Schema,
|
2019-09-20 06:19:11 +02:00
|
|
|
}
|
|
|
|
|
2019-09-28 17:34:03 +02:00
|
|
|
if useDB {
|
|
|
|
config.Database = c.DB.DBName
|
|
|
|
}
|
|
|
|
|
2019-09-26 06:35:31 +02:00
|
|
|
switch c.LogLevel {
|
|
|
|
case "debug":
|
|
|
|
config.LogLevel = pgx.LogLevelDebug
|
|
|
|
case "info":
|
|
|
|
config.LogLevel = pgx.LogLevelInfo
|
|
|
|
case "warn":
|
|
|
|
config.LogLevel = pgx.LogLevelWarn
|
|
|
|
case "error":
|
|
|
|
config.LogLevel = pgx.LogLevelError
|
|
|
|
default:
|
|
|
|
config.LogLevel = pgx.LogLevelNone
|
2019-09-20 06:19:11 +02:00
|
|
|
}
|
|
|
|
|
2019-10-15 08:30:19 +02:00
|
|
|
config.Logger = NewSQLLogger(*logger)
|
2019-09-20 06:19:11 +02:00
|
|
|
|
2019-09-26 06:35:31 +02:00
|
|
|
db, err := pgx.ConnectConfig(context.Background(), config)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
2019-09-20 06:19:11 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
return db, nil
|
|
|
|
}
|
|
|
|
|
2019-09-26 06:35:31 +02:00
|
|
|
func initDBPool(c *config) (*pgxpool.Pool, error) {
|
|
|
|
config, _ := pgxpool.ParseConfig("")
|
|
|
|
config.ConnConfig.Host = c.DB.Host
|
|
|
|
config.ConnConfig.Port = c.DB.Port
|
|
|
|
config.ConnConfig.Database = c.DB.DBName
|
|
|
|
config.ConnConfig.User = c.DB.User
|
|
|
|
config.ConnConfig.Password = c.DB.Password
|
|
|
|
config.ConnConfig.RuntimeParams = map[string]string{
|
|
|
|
"application_name": c.AppName,
|
|
|
|
"search_path": c.DB.Schema,
|
|
|
|
}
|
2019-09-20 06:19:11 +02:00
|
|
|
|
2019-09-26 06:35:31 +02:00
|
|
|
switch c.LogLevel {
|
|
|
|
case "debug":
|
|
|
|
config.ConnConfig.LogLevel = pgx.LogLevelDebug
|
|
|
|
case "info":
|
|
|
|
config.ConnConfig.LogLevel = pgx.LogLevelInfo
|
|
|
|
case "warn":
|
|
|
|
config.ConnConfig.LogLevel = pgx.LogLevelWarn
|
|
|
|
case "error":
|
|
|
|
config.ConnConfig.LogLevel = pgx.LogLevelError
|
|
|
|
default:
|
|
|
|
config.ConnConfig.LogLevel = pgx.LogLevelNone
|
|
|
|
}
|
2019-09-20 06:19:11 +02:00
|
|
|
|
2019-10-15 08:30:19 +02:00
|
|
|
config.ConnConfig.Logger = NewSQLLogger(*logger)
|
2019-09-20 06:19:11 +02:00
|
|
|
|
2019-09-26 06:35:31 +02:00
|
|
|
// if c.DB.MaxRetries != 0 {
|
|
|
|
// opt.MaxRetries = c.DB.MaxRetries
|
|
|
|
// }
|
2019-09-20 06:19:11 +02:00
|
|
|
|
2019-09-26 06:35:31 +02:00
|
|
|
if c.DB.PoolSize != 0 {
|
|
|
|
config.MaxConns = conf.DB.PoolSize
|
2019-09-20 06:19:11 +02:00
|
|
|
}
|
|
|
|
|
2019-09-26 06:35:31 +02:00
|
|
|
db, err := pgxpool.ConnectConfig(context.Background(), config)
|
2019-09-20 06:19:11 +02:00
|
|
|
if err != nil {
|
2019-09-26 06:35:31 +02:00
|
|
|
return nil, err
|
2019-09-20 06:19:11 +02:00
|
|
|
}
|
|
|
|
|
2019-09-26 06:35:31 +02:00
|
|
|
return db, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func initCompiler() {
|
|
|
|
var err error
|
|
|
|
|
2019-09-20 06:19:11 +02:00
|
|
|
qcompile, pcompile, err = initCompilers(conf)
|
|
|
|
if err != nil {
|
2019-09-26 06:35:31 +02:00
|
|
|
logger.Fatal().Err(err).Msg("failed to initialize compilers")
|
2019-09-20 06:19:11 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
if err := initResolvers(); err != nil {
|
|
|
|
logger.Fatal().Err(err).Msg("failed to initialized resolvers")
|
|
|
|
}
|
|
|
|
}
|