Fix duplicte column bug

This commit is contained in:
Vikram Rangnekar 2019-09-29 15:20:59 -04:00
parent d715564833
commit 20ddfb26f3
5 changed files with 97 additions and 53 deletions

View File

@ -2,9 +2,35 @@ package psql
import ( import (
"encoding/json" "encoding/json"
"fmt"
"testing" "testing"
) )
func simpleInsert(t *testing.T) {
gql := `mutation {
user(insert: $data) {
id
}
}`
sql := `WITH "users" AS (WITH "input" AS (SELECT {{data}}::json AS j) INSERT INTO users (full_name, email) SELECT full_name, email FROM input i, json_populate_record(NULL::users, i.j) t RETURNING *) SELECT json_object_agg('user', sel_json_0) FROM (SELECT row_to_json((SELECT "sel_0" FROM (SELECT "user_0"."id" AS "id") AS "sel_0")) AS "sel_json_0" FROM (SELECT "user"."id" FROM "users" AS "user" WHERE ((("user"."id") = {{user_id}})) LIMIT ('1') :: integer) AS "user_0" LIMIT ('1') :: integer) AS "done_1337";`
vars := map[string]json.RawMessage{
"data": json.RawMessage(`{"email": "reannagreenholt@orn.com", "full_name": "Flo Barton"}`),
}
resSQL, err := compileGQLToPSQL(gql, vars)
if err != nil {
t.Fatal(err)
}
fmt.Println(">", string(resSQL))
if string(resSQL) != sql {
t.Fatal(errNotExpected)
}
}
func singleInsert(t *testing.T) { func singleInsert(t *testing.T) {
gql := `mutation { gql := `mutation {
product(id: 15, insert: $insert) { product(id: 15, insert: $insert) {
@ -102,6 +128,7 @@ func delete(t *testing.T) {
} }
func TestCompileInsert(t *testing.T) { func TestCompileInsert(t *testing.T) {
t.Run("simpleInsert", simpleInsert)
t.Run("singleInsert", singleInsert) t.Run("singleInsert", singleInsert)
t.Run("bulkInsert", bulkInsert) t.Run("bulkInsert", bulkInsert)
t.Run("singleUpdate", singleUpdate) t.Run("singleUpdate", singleUpdate)

View File

@ -17,21 +17,20 @@ type DBTable struct {
func GetTables(dbc *pgxpool.Conn) ([]*DBTable, error) { func GetTables(dbc *pgxpool.Conn) ([]*DBTable, error) {
sqlStmt := ` sqlStmt := `
SELECT SELECT
c.relname as "name", c.relname as "name",
CASE c.relkind WHEN 'r' THEN 'table' CASE c.relkind WHEN 'r' THEN 'table'
WHEN 'v' THEN 'view' WHEN 'v' THEN 'view'
WHEN 'm' THEN 'materialized view' WHEN 'm' THEN 'materialized view'
WHEN 'f' THEN 'foreign table' WHEN 'f' THEN 'foreign table'
END as "type" END as "type"
FROM pg_catalog.pg_class c FROM pg_catalog.pg_class c
LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace
WHERE c.relkind IN ('r','v','m','f','') WHERE c.relkind IN ('r','v','m','f','')
AND n.nspname <> 'pg_catalog' AND n.nspname <> 'pg_catalog'
AND n.nspname <> 'information_schema' AND n.nspname <> 'information_schema'
AND n.nspname !~ '^pg_toast' AND n.nspname !~ '^pg_toast'
AND pg_catalog.pg_table_is_visible(c.oid); AND pg_catalog.pg_table_is_visible(c.oid);`
`
var tables []*DBTable var tables []*DBTable
@ -67,41 +66,39 @@ type DBColumn struct {
func GetColumns(dbc *pgxpool.Conn, schema, table string) ([]*DBColumn, error) { func GetColumns(dbc *pgxpool.Conn, schema, table string) ([]*DBColumn, error) {
sqlStmt := ` sqlStmt := `
SELECT SELECT
f.attnum AS id, f.attnum AS id,
f.attname AS name, f.attname AS name,
f.attnotnull AS notnull, f.attnotnull AS notnull,
pg_catalog.format_type(f.atttypid,f.atttypmod) AS type, pg_catalog.format_type(f.atttypid,f.atttypmod) AS type,
CASE CASE
WHEN p.contype = 'p' THEN true WHEN p.contype = ('p'::char) THEN true
ELSE false ELSE false
END AS primarykey, END AS primarykey,
CASE CASE
WHEN p.contype = 'u' THEN true WHEN p.contype = ('u'::char) THEN true
ELSE false ELSE false
END AS uniquekey, END AS uniquekey,
CASE CASE
WHEN p.contype = 'f' THEN g.relname WHEN p.contype = ('f'::char) THEN g.relname
ELSE ''::text ELSE ''::text
END AS foreignkey, END AS foreignkey,
CASE CASE
WHEN p.contype = 'f' THEN p.confkey WHEN p.contype = ('f'::char) THEN p.confkey
ELSE ARRAY[]::int2[] ELSE ARRAY[]::int2[]
END AS foreignkey_fieldnum END AS foreignkey_fieldnum
FROM pg_attribute f FROM pg_attribute f
JOIN pg_class c ON c.oid = f.attrelid JOIN pg_class c ON c.oid = f.attrelid
JOIN pg_type t ON t.oid = f.atttypid LEFT JOIN pg_attrdef d ON d.adrelid = c.oid AND d.adnum = f.attnum
LEFT JOIN pg_attrdef d ON d.adrelid = c.oid AND d.adnum = f.attnum LEFT JOIN pg_namespace n ON n.oid = c.relnamespace
LEFT JOIN pg_namespace n ON n.oid = c.relnamespace LEFT JOIN pg_constraint p ON p.conrelid = c.oid AND f.attnum = ANY (p.conkey)
LEFT JOIN pg_constraint p ON p.conrelid = c.oid AND f.attnum = ANY (p.conkey) LEFT JOIN pg_class AS g ON p.confrelid = g.oid
LEFT JOIN pg_class AS g ON p.confrelid = g.oid WHERE c.relkind = ('r'::char)
WHERE c.relkind = 'r'::char AND n.nspname = $1 -- Replace with Schema name
AND n.nspname = $1 -- Replace with Schema name AND c.relname = $2 -- Replace with table name
AND c.relname = $2 -- Replace with table name AND f.attnum > 0
AND f.attnum > 0 ORDER BY id; AND f.attisdropped = false
` ORDER BY id;`
var cols []*DBColumn
rows, err := dbc.Query(context.Background(), sqlStmt, schema, table) rows, err := dbc.Query(context.Background(), sqlStmt, schema, table)
if err != nil { if err != nil {
@ -109,6 +106,8 @@ WHERE c.relkind = 'r'::char
} }
defer rows.Close() defer rows.Close()
cmap := make(map[int]*DBColumn)
for rows.Next() { for rows.Next() {
c := DBColumn{} c := DBColumn{}
err = rows.Scan(&c.ID, &c.Name, &c.NotNull, &c.Type, &c.PrimaryKey, &c.UniqueKey, err = rows.Scan(&c.ID, &c.Name, &c.NotNull, &c.Type, &c.PrimaryKey, &c.UniqueKey,
@ -117,7 +116,25 @@ WHERE c.relkind = 'r'::char
return nil, err return nil, err
} }
c.fKeyColID.AssignTo(&c.FKeyColID) c.fKeyColID.AssignTo(&c.FKeyColID)
cols = append(cols, &c)
if v, ok := cmap[c.ID]; ok {
if c.PrimaryKey {
v.PrimaryKey = true
}
if c.NotNull {
v.NotNull = true
}
if c.UniqueKey {
v.UniqueKey = true
}
} else {
cmap[c.ID] = &c
}
}
cols := make([]*DBColumn, 0, len(cmap))
for _, v := range cmap {
cols = append(cols, v)
} }
return cols, nil return cols, nil
@ -193,14 +210,14 @@ func (s *DBSchema) updateSchema(
// Foreign key columns in current table // Foreign key columns in current table
colByID := make(map[int]*DBColumn) colByID := make(map[int]*DBColumn)
columns := make(map[string]*DBColumn, len(cols)) columns := make(map[string]*DBColumn, len(cols))
colNames := make([]string, len(cols)) colNames := make([]string, 0, len(cols))
for i := range cols { for i := range cols {
c := cols[i] c := cols[i]
name := strings.ToLower(c.Name) name := strings.ToLower(c.Name)
columns[name] = cols[i] columns[name] = c
colNames = append(colNames, name) colNames = append(colNames, name)
colByID[c.ID] = cols[i] colByID[c.ID] = c
} }
singular := strings.ToLower(flect.Singularize(t.Name)) singular := strings.ToLower(flect.Singularize(t.Name))

View File

@ -69,7 +69,7 @@ func cmdDBCreate(cmd *cobra.Command, args []string) {
} }
defer conn.Close(ctx) defer conn.Close(ctx)
sql := fmt.Sprintf("create database %s", conf.DB.DBName) sql := fmt.Sprintf("CREATE DATABASE %s", conf.DB.DBName)
_, err = conn.Exec(ctx, sql) _, err = conn.Exec(ctx, sql)
if err != nil { if err != nil {
@ -94,7 +94,7 @@ func cmdDBDrop(cmd *cobra.Command, args []string) {
} }
defer conn.Close(ctx) defer conn.Close(ctx)
sql := fmt.Sprintf("drop database if exists %s", conf.DB.DBName) sql := fmt.Sprintf(`DROP DATABASE IF EXISTS %s`, conf.DB.DBName)
_, err = conn.Exec(ctx, sql) _, err = conn.Exec(ctx, sql)
if err != nil { if err != nil {

View File

@ -75,7 +75,7 @@ func cmdNew(cmd *cobra.Command, args []string) {
}) })
ifNotExists(path.Join(appConfigPath, "seed.js"), func(p string) error { ifNotExists(path.Join(appConfigPath, "seed.js"), func(p string) error {
if v, err := tmpl.get("docker-compose.yml"); err == nil { if v, err := tmpl.get("seed.js"); err == nil {
return ioutil.WriteFile(p, v, 0644) return ioutil.WriteFile(p, v, 0644)
} else { } else {
return err return err

View File

@ -4,8 +4,8 @@ var users = [];
for (i = 0; i < 10; i++) { for (i = 0; i < 10; i++) {
var data = { var data = {
full_name: fake.name(), full_name: fake.name(),
email: fake.email(), email: fake.email()
} }
var res = graphql(" \ var res = graphql(" \