BREAKING CHANGE: super-graph/core now defaults to allow all in anon role
This commit is contained in:
parent
2241364d00
commit
1fb7f0e6c8
@ -29,9 +29,9 @@ COPY --from=react-build /web/build/ ./internal/serv/web/build
|
||||
|
||||
RUN go mod vendor
|
||||
RUN make build
|
||||
RUN echo "Compressing binary, will take a bit of time..." && \
|
||||
upx --ultra-brute -qq super-graph && \
|
||||
upx -t super-graph
|
||||
# RUN echo "Compressing binary, will take a bit of time..." && \
|
||||
# upx --ultra-brute -qq super-graph && \
|
||||
# upx -t super-graph
|
||||
|
||||
|
||||
|
||||
|
@ -30,12 +30,10 @@ type Config struct {
|
||||
// or other database functions
|
||||
SetUserID bool `mapstructure:"set_user_id"`
|
||||
|
||||
// DefaultAllow reverses the blocked by default behaviour for queries in
|
||||
// anonymous mode. (anon role)
|
||||
// For example if the table `users` is not listed under the anon role then
|
||||
// access to it would by default for unauthenticated queries this reverses
|
||||
// this behavior (!!! Use with caution !!!!)
|
||||
DefaultAllow bool `mapstructure:"default_allow"`
|
||||
// DefaultBlock ensures that in anonymous mode (role 'anon') all tables
|
||||
// are blocked from queries and mutations. To open access to tables in
|
||||
// anonymous mode they have to be added to the 'anon' role config.
|
||||
DefaultBlock bool `mapstructure:"default_block"`
|
||||
|
||||
// Vars is a map of hardcoded variables that can be leveraged in your
|
||||
// queries (eg variable admin_id will be $admin_id in the query)
|
||||
@ -57,6 +55,9 @@ type Config struct {
|
||||
// Roles contains all the configuration for all the roles you want to support
|
||||
// `user` and `anon` are two default roles. User role is for when a user ID is
|
||||
// available and Anon when it's not.
|
||||
//
|
||||
// If you're using the RolesQuery config to enable atribute based acess control then
|
||||
// you can add more custom roles.
|
||||
Roles []Role
|
||||
|
||||
// Inflections is to add additionally singular to plural mappings
|
||||
@ -108,12 +109,12 @@ type Role struct {
|
||||
// RoleTable struct contains role specific access control values for a database table
|
||||
type RoleTable struct {
|
||||
Name string
|
||||
ReadOnly *bool `mapstructure:"read_only"`
|
||||
ReadOnly bool `mapstructure:"read_only"`
|
||||
|
||||
Query Query
|
||||
Insert Insert
|
||||
Update Update
|
||||
Delete Delete
|
||||
Query *Query
|
||||
Insert *Insert
|
||||
Update *Update
|
||||
Delete *Delete
|
||||
}
|
||||
|
||||
// Query struct contains access control values for query operations
|
||||
@ -122,7 +123,7 @@ type Query struct {
|
||||
Filters []string
|
||||
Columns []string
|
||||
DisableFunctions bool `mapstructure:"disable_functions"`
|
||||
Block *bool
|
||||
Block bool
|
||||
}
|
||||
|
||||
// Insert struct contains access control values for insert operations
|
||||
@ -130,7 +131,7 @@ type Insert struct {
|
||||
Filters []string
|
||||
Columns []string
|
||||
Presets map[string]string
|
||||
Block *bool
|
||||
Block bool
|
||||
}
|
||||
|
||||
// Insert struct contains access control values for update operations
|
||||
@ -138,14 +139,59 @@ type Update struct {
|
||||
Filters []string
|
||||
Columns []string
|
||||
Presets map[string]string
|
||||
Block *bool
|
||||
Block bool
|
||||
}
|
||||
|
||||
// Delete struct contains access control values for delete operations
|
||||
type Delete struct {
|
||||
Filters []string
|
||||
Columns []string
|
||||
Block *bool
|
||||
Block bool
|
||||
}
|
||||
|
||||
// AddRoleTable function is a helper function to make it easy to add per-table
|
||||
// row-level config
|
||||
func (c *Config) AddRoleTable(role string, table string, conf interface{}) error {
|
||||
var r *Role
|
||||
|
||||
for i := range c.Roles {
|
||||
if strings.EqualFold(c.Roles[i].Name, role) {
|
||||
r = &c.Roles[i]
|
||||
break
|
||||
}
|
||||
}
|
||||
if r == nil {
|
||||
nr := Role{Name: role}
|
||||
c.Roles = append(c.Roles, nr)
|
||||
r = &nr
|
||||
}
|
||||
|
||||
var t *RoleTable
|
||||
for i := range r.Tables {
|
||||
if strings.EqualFold(r.Tables[i].Name, table) {
|
||||
t = &r.Tables[i]
|
||||
break
|
||||
}
|
||||
}
|
||||
if t == nil {
|
||||
nt := RoleTable{Name: table}
|
||||
r.Tables = append(r.Tables, nt)
|
||||
t = &nt
|
||||
}
|
||||
|
||||
switch v := conf.(type) {
|
||||
case Query:
|
||||
t.Query = &v
|
||||
case Insert:
|
||||
t.Insert = &v
|
||||
case Update:
|
||||
t.Update = &v
|
||||
case Delete:
|
||||
t.Delete = &v
|
||||
default:
|
||||
return fmt.Errorf("unsupported object type: %t", v)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ReadInConfig function reads in the config file for the environment specified in the GO_ENV
|
||||
|
@ -90,7 +90,8 @@ func (sg *SuperGraph) initCompilers() error {
|
||||
}
|
||||
|
||||
sg.qc, err = qcode.NewCompiler(qcode.Config{
|
||||
Blocklist: sg.conf.Blocklist,
|
||||
DefaultBlock: sg.conf.DefaultBlock,
|
||||
Blocklist: sg.conf.Blocklist,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
|
91
core/init.go
91
core/init.go
@ -196,7 +196,7 @@ func addForeignKey(di *psql.DBInfo, c Column, t Table) error {
|
||||
func addRoles(c *Config, qc *qcode.Compiler) error {
|
||||
for _, r := range c.Roles {
|
||||
for _, t := range r.Tables {
|
||||
if err := addRole(qc, r, t, c.DefaultAllow); err != nil {
|
||||
if err := addRole(qc, r, t, c.DefaultBlock); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
@ -205,67 +205,56 @@ func addRoles(c *Config, qc *qcode.Compiler) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func addRole(qc *qcode.Compiler, r Role, t RoleTable, defaultAllow bool) error {
|
||||
ro := true // read-only
|
||||
func addRole(qc *qcode.Compiler, r Role, t RoleTable, defaultBlock bool) error {
|
||||
ro := false // read-only
|
||||
|
||||
if defaultAllow {
|
||||
ro = false
|
||||
if defaultBlock && r.Name == "anon" {
|
||||
ro = true
|
||||
}
|
||||
|
||||
if r.Name != "anon" {
|
||||
ro = false
|
||||
if t.ReadOnly {
|
||||
ro = true
|
||||
}
|
||||
|
||||
if t.ReadOnly != nil {
|
||||
ro = *t.ReadOnly
|
||||
query := qcode.QueryConfig{Block: false}
|
||||
insert := qcode.InsertConfig{Block: ro}
|
||||
update := qcode.UpdateConfig{Block: ro}
|
||||
del := qcode.DeleteConfig{Block: ro}
|
||||
|
||||
if t.Query != nil {
|
||||
query = qcode.QueryConfig{
|
||||
Limit: t.Query.Limit,
|
||||
Filters: t.Query.Filters,
|
||||
Columns: t.Query.Columns,
|
||||
DisableFunctions: t.Query.DisableFunctions,
|
||||
Block: t.Query.Block,
|
||||
}
|
||||
}
|
||||
|
||||
blocked := struct {
|
||||
query bool
|
||||
insert bool
|
||||
update bool
|
||||
delete bool
|
||||
}{false, ro, ro, ro}
|
||||
|
||||
if t.Query.Block != nil {
|
||||
blocked.query = *t.Query.Block
|
||||
}
|
||||
if t.Insert.Block != nil {
|
||||
blocked.insert = *t.Insert.Block
|
||||
}
|
||||
if t.Update.Block != nil {
|
||||
blocked.update = *t.Update.Block
|
||||
}
|
||||
if t.Delete.Block != nil {
|
||||
blocked.delete = *t.Delete.Block
|
||||
if t.Insert != nil {
|
||||
insert = qcode.InsertConfig{
|
||||
Filters: t.Insert.Filters,
|
||||
Columns: t.Insert.Columns,
|
||||
Presets: t.Insert.Presets,
|
||||
Block: t.Insert.Block,
|
||||
}
|
||||
}
|
||||
|
||||
query := qcode.QueryConfig{
|
||||
Limit: t.Query.Limit,
|
||||
Filters: t.Query.Filters,
|
||||
Columns: t.Query.Columns,
|
||||
DisableFunctions: t.Query.DisableFunctions,
|
||||
Block: blocked.query,
|
||||
if t.Update != nil {
|
||||
update = qcode.UpdateConfig{
|
||||
Filters: t.Update.Filters,
|
||||
Columns: t.Update.Columns,
|
||||
Presets: t.Update.Presets,
|
||||
Block: t.Update.Block,
|
||||
}
|
||||
}
|
||||
|
||||
insert := qcode.InsertConfig{
|
||||
Filters: t.Insert.Filters,
|
||||
Columns: t.Insert.Columns,
|
||||
Presets: t.Insert.Presets,
|
||||
Block: blocked.insert,
|
||||
}
|
||||
|
||||
update := qcode.UpdateConfig{
|
||||
Filters: t.Update.Filters,
|
||||
Columns: t.Update.Columns,
|
||||
Presets: t.Update.Presets,
|
||||
Block: blocked.update,
|
||||
}
|
||||
|
||||
del := qcode.DeleteConfig{
|
||||
Filters: t.Delete.Filters,
|
||||
Columns: t.Delete.Columns,
|
||||
Block: blocked.delete,
|
||||
if t.Delete != nil {
|
||||
del = qcode.DeleteConfig{
|
||||
Filters: t.Delete.Filters,
|
||||
Columns: t.Delete.Columns,
|
||||
Block: t.Delete.Block,
|
||||
}
|
||||
}
|
||||
|
||||
return qc.AddRole(r.Name, t.Name, qcode.TRConfig{
|
||||
|
@ -55,19 +55,6 @@ func TestSuperGraph(t *testing.T, db *sql.DB, before func(t *testing.T)) {
|
||||
config.AllowListFile = "./allow.list"
|
||||
config.RolesQuery = `SELECT * FROM users WHERE id = $user_id`
|
||||
|
||||
blockFalse := false
|
||||
|
||||
config.Roles = []core.Role{
|
||||
core.Role{
|
||||
Name: "anon",
|
||||
Tables: []core.RoleTable{
|
||||
core.RoleTable{Name: "users", ReadOnly: &blockFalse, Query: core.Query{Limit: 100}},
|
||||
core.RoleTable{Name: "product", ReadOnly: &blockFalse, Query: core.Query{Limit: 100}},
|
||||
core.RoleTable{Name: "line_item", ReadOnly: &blockFalse, Query: core.Query{Limit: 100}},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
sg, err := core.NewSuperGraph(&config, db)
|
||||
require.NoError(t, err)
|
||||
ctx := context.Background()
|
||||
|
@ -6,7 +6,8 @@ import (
|
||||
)
|
||||
|
||||
type Config struct {
|
||||
Blocklist []string
|
||||
DefaultBlock bool
|
||||
Blocklist []string
|
||||
}
|
||||
|
||||
type QueryConfig struct {
|
||||
|
@ -172,6 +172,8 @@ const (
|
||||
type Compiler struct {
|
||||
tr map[string]map[string]*trval
|
||||
bl map[string]struct{}
|
||||
|
||||
defBlock bool
|
||||
}
|
||||
|
||||
var expPool = sync.Pool{
|
||||
@ -179,7 +181,7 @@ var expPool = sync.Pool{
|
||||
}
|
||||
|
||||
func NewCompiler(c Config) (*Compiler, error) {
|
||||
co := &Compiler{}
|
||||
co := &Compiler{defBlock: c.DefaultBlock}
|
||||
co.tr = make(map[string]map[string]*trval)
|
||||
co.bl = make(map[string]struct{}, len(c.Blocklist))
|
||||
|
||||
@ -358,7 +360,7 @@ func (com *Compiler) compileQuery(qc *QCode, op *Operation, role string) error {
|
||||
}
|
||||
|
||||
} else if role == "anon" {
|
||||
skipRender = true
|
||||
skipRender = com.defBlock
|
||||
}
|
||||
|
||||
selects = append(selects, Select{
|
||||
|
249
docs/website/docs/seed.md
Normal file
249
docs/website/docs/seed.md
Normal file
@ -0,0 +1,249 @@
|
||||
---
|
||||
id: seed
|
||||
title: Database Seeding
|
||||
sidebar_label: Seed Scripts
|
||||
---
|
||||
|
||||
While developing it's often useful to be able to have fake data available in the database. Fake data can help with building the UI and save you time when trying to get the GraphQL query correct. Super Graph has the ability do this for you. All you have to do is write a seed script `config/seed.js` (In Javascript) and use the `db:seed` command line option. Below is an example of kind of things you can do in a seed script.
|
||||
|
||||
## Creating fake users
|
||||
|
||||
Since all mutations and queries are in standard GraphQL you can use all the features available in Super Graph GraphQL.
|
||||
|
||||
```javascript
|
||||
var users = [];
|
||||
|
||||
for (i = 0; i < 20; i++) {
|
||||
var data = {
|
||||
slug: util.make_slug(fake.first_name() + "-" + fake.last_name()),
|
||||
first_name: fake.first_name(),
|
||||
last_name: fake.last_name(),
|
||||
picture_url: fake.avatar_url(),
|
||||
email: fake.email(),
|
||||
bio: fake.sentence(10),
|
||||
};
|
||||
|
||||
var res = graphql(" \
|
||||
mutation { \
|
||||
user(insert: $data) { \
|
||||
id \
|
||||
} \
|
||||
}", { data: data });
|
||||
|
||||
users.push(res.user);
|
||||
}
|
||||
```
|
||||
|
||||
## Inserting the users fake blog posts
|
||||
|
||||
Another example highlighting how the `connect` syntax of Super Graph GraphQL can be used to connect inserted posts
|
||||
to random users that were previously created. For futher details checkout the [seed script](/seed) documentation.
|
||||
|
||||
```javascript
|
||||
var posts = [];
|
||||
|
||||
for (i = 0; i < 1500; i++) {
|
||||
var user.id = users[Math.floor(Math.random() * 10)];
|
||||
|
||||
var data = {
|
||||
slug: util.make_slug(fake.sentence(3) + i),
|
||||
body: fake.sentence(100),
|
||||
published: true,
|
||||
thread: {
|
||||
connect: { user: user.id }
|
||||
}
|
||||
}
|
||||
|
||||
var res = graphql(" \
|
||||
mutation { \
|
||||
post(insert: $data) { \
|
||||
id \
|
||||
} \
|
||||
}",
|
||||
{ data: data },
|
||||
{ user_id: u.id })
|
||||
|
||||
posts.push(res.post.slug)
|
||||
}
|
||||
```
|
||||
|
||||
## Insert a large number of rows efficiently
|
||||
|
||||
This feature uses the `COPY` functionality available in Postgres this is the best way to
|
||||
insert a large number of rows into a table. The `import_csv` function reads in a CSV file using the first
|
||||
line of the file as column names.
|
||||
|
||||
```javascript
|
||||
import_csv("post_tags", "./tags.csv");
|
||||
```
|
||||
|
||||
## A list of fake data functions available to you.
|
||||
|
||||
```
|
||||
person
|
||||
name
|
||||
name_prefix
|
||||
name_suffix
|
||||
first_name
|
||||
last_name
|
||||
gender
|
||||
ssn
|
||||
contact
|
||||
email
|
||||
phone
|
||||
phone_formatted
|
||||
username
|
||||
password
|
||||
|
||||
// Address
|
||||
address
|
||||
city
|
||||
country
|
||||
country_abr
|
||||
state
|
||||
state_abr
|
||||
street
|
||||
street_name
|
||||
street_number
|
||||
street_prefix
|
||||
street_suffix
|
||||
zip
|
||||
latitude
|
||||
latitude_in_range
|
||||
longitude
|
||||
longitude_in_range
|
||||
|
||||
// Beer
|
||||
beer_alcohol
|
||||
beer_hop
|
||||
beer_ibu
|
||||
beer_blg
|
||||
beer_malt
|
||||
beer_name
|
||||
beer_style
|
||||
beer_yeast
|
||||
|
||||
// Cars
|
||||
car
|
||||
car_type
|
||||
car_maker
|
||||
car_model
|
||||
|
||||
// Text
|
||||
word
|
||||
sentence
|
||||
paragraph
|
||||
question
|
||||
quote
|
||||
|
||||
// Misc
|
||||
generate
|
||||
boolean
|
||||
uuid
|
||||
|
||||
// Colors
|
||||
color
|
||||
hex_color
|
||||
rgb_color
|
||||
safe_color
|
||||
|
||||
// Internet
|
||||
url
|
||||
image_url
|
||||
avatar_url
|
||||
domain_name
|
||||
domain_suffix
|
||||
ipv4_address
|
||||
ipv6_address
|
||||
http_method
|
||||
user_agent
|
||||
user_agent_firefox
|
||||
user_agent_chrome
|
||||
user_agent_opera
|
||||
user_agent_safari
|
||||
|
||||
// Date / Time
|
||||
date
|
||||
date_range
|
||||
nano_second
|
||||
second
|
||||
minute
|
||||
hour
|
||||
month
|
||||
day
|
||||
weekday
|
||||
year
|
||||
timezone
|
||||
timezone_abv
|
||||
timezone_full
|
||||
timezone_offset
|
||||
|
||||
// Payment
|
||||
price
|
||||
credit_card
|
||||
credit_card_cvv
|
||||
credit_card_number
|
||||
credit_card_type
|
||||
currency
|
||||
currency_long
|
||||
currency_short
|
||||
|
||||
// Company
|
||||
bs
|
||||
buzzword
|
||||
company
|
||||
company_suffix
|
||||
job
|
||||
job_description
|
||||
job_level
|
||||
job_title
|
||||
|
||||
// Hacker
|
||||
hacker_abbreviation
|
||||
hacker_adjective
|
||||
hacker_noun
|
||||
hacker_phrase
|
||||
hacker_verb
|
||||
|
||||
//Hipster
|
||||
hipster_word
|
||||
hipster_paragraph
|
||||
hipster_sentence
|
||||
|
||||
// File
|
||||
file_extension
|
||||
file_mine_type
|
||||
|
||||
// Numbers
|
||||
number
|
||||
numerify
|
||||
int8
|
||||
int16
|
||||
int32
|
||||
int64
|
||||
uint8
|
||||
uint16
|
||||
uint32
|
||||
uint64
|
||||
float32
|
||||
float32_range
|
||||
float64
|
||||
float64_range
|
||||
shuffle_ints
|
||||
mac_address
|
||||
|
||||
// String
|
||||
digit
|
||||
letter
|
||||
lexify
|
||||
rand_string
|
||||
numerify
|
||||
```
|
||||
|
||||
## Some more utility functions
|
||||
|
||||
```
|
||||
shuffle_strings(string_array)
|
||||
make_slug(text)
|
||||
make_slug_lang(text, lang)
|
||||
```
|
@ -96,179 +96,6 @@ var post_count = import_csv("posts", "posts.csv");
|
||||
|
||||
You can generate the following fake data for your seeding purposes. Below is the list of fake data functions supported by the built-in fake data library. For example `fake.image_url()` will generate a fake image url or `fake.shuffle_strings(['hello', 'world', 'cool'])` will generate a randomly shuffled version of that array of strings or `fake.rand_string(['hello', 'world', 'cool'])` will return a random string from the array provided.
|
||||
|
||||
```
|
||||
// Person
|
||||
person
|
||||
name
|
||||
name_prefix
|
||||
name_suffix
|
||||
first_name
|
||||
last_name
|
||||
gender
|
||||
ssn
|
||||
contact
|
||||
email
|
||||
phone
|
||||
phone_formatted
|
||||
username
|
||||
password
|
||||
|
||||
// Address
|
||||
address
|
||||
city
|
||||
country
|
||||
country_abr
|
||||
state
|
||||
state_abr
|
||||
status_code
|
||||
street
|
||||
street_name
|
||||
street_number
|
||||
street_prefix
|
||||
street_suffix
|
||||
zip
|
||||
latitude
|
||||
latitude_in_range
|
||||
longitude
|
||||
longitude_in_range
|
||||
|
||||
// Beer
|
||||
beer_alcohol
|
||||
beer_hop
|
||||
beer_ibu
|
||||
beer_blg
|
||||
beer_malt
|
||||
beer_name
|
||||
beer_style
|
||||
beer_yeast
|
||||
|
||||
// Cars
|
||||
car
|
||||
car_type
|
||||
car_maker
|
||||
car_model
|
||||
|
||||
// Text
|
||||
word
|
||||
sentence
|
||||
paragraph
|
||||
question
|
||||
quote
|
||||
|
||||
// Misc
|
||||
generate
|
||||
boolean
|
||||
uuid
|
||||
|
||||
// Colors
|
||||
color
|
||||
hex_color
|
||||
rgb_color
|
||||
safe_color
|
||||
|
||||
// Internet
|
||||
url
|
||||
image_url
|
||||
domain_name
|
||||
domain_suffix
|
||||
ipv4_address
|
||||
ipv6_address
|
||||
simple_status_code
|
||||
http_method
|
||||
user_agent
|
||||
user_agent_firefox
|
||||
user_agent_chrome
|
||||
user_agent_opera
|
||||
user_agent_safari
|
||||
|
||||
// Date / Time
|
||||
date
|
||||
date_range
|
||||
nano_second
|
||||
second
|
||||
minute
|
||||
hour
|
||||
month
|
||||
day
|
||||
weekday
|
||||
year
|
||||
timezone
|
||||
timezone_abv
|
||||
timezone_full
|
||||
timezone_offset
|
||||
|
||||
// Payment
|
||||
price
|
||||
credit_card
|
||||
credit_card_cvv
|
||||
credit_card_number
|
||||
credit_card_number_luhn
|
||||
credit_card_type
|
||||
currency
|
||||
currency_long
|
||||
currency_short
|
||||
|
||||
// Company
|
||||
bs
|
||||
buzzword
|
||||
company
|
||||
company_suffix
|
||||
job
|
||||
job_description
|
||||
job_level
|
||||
job_title
|
||||
|
||||
// Hacker
|
||||
hacker_abbreviation
|
||||
hacker_adjective
|
||||
hacker_ingverb
|
||||
hacker_noun
|
||||
hacker_phrase
|
||||
hacker_verb
|
||||
|
||||
//Hipster
|
||||
hipster_word
|
||||
hipster_paragraph
|
||||
hipster_sentence
|
||||
|
||||
// File
|
||||
file_extension
|
||||
file_mine_type
|
||||
|
||||
// Numbers
|
||||
number
|
||||
numerify
|
||||
int8
|
||||
int16
|
||||
int32
|
||||
int64
|
||||
uint8
|
||||
uint16
|
||||
uint32
|
||||
uint64
|
||||
float32
|
||||
float32_range
|
||||
float64
|
||||
float64_range
|
||||
shuffle_ints
|
||||
mac_address
|
||||
|
||||
//String
|
||||
digit
|
||||
letter
|
||||
lexify
|
||||
shuffle_strings
|
||||
numerify
|
||||
```
|
||||
|
||||
Other utility functions
|
||||
|
||||
```
|
||||
shuffle_strings(string_array)
|
||||
make_slug(text)
|
||||
make_slug_lang(text, lang)
|
||||
```
|
||||
|
||||
### Migrations
|
||||
|
||||
Easy database migrations is the most important thing when building products backend by a relational database. We make it super easy to manage and migrate your database.
|
||||
|
@ -11,6 +11,7 @@ module.exports = {
|
||||
"security",
|
||||
"telemetry",
|
||||
"config",
|
||||
"seed",
|
||||
"deploy",
|
||||
"internals",
|
||||
],
|
||||
|
2
go.mod
2
go.mod
@ -41,7 +41,7 @@ require (
|
||||
go.uber.org/zap v1.14.1
|
||||
golang.org/x/crypto v0.0.0-20200414173820-0848c9571904
|
||||
golang.org/x/sys v0.0.0-20200413165638-669c56c373c4 // indirect
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 // indirect
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543
|
||||
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 // indirect
|
||||
gopkg.in/ini.v1 v1.55.0 // indirect
|
||||
)
|
||||
|
@ -17,6 +17,7 @@ import (
|
||||
"github.com/dop251/goja"
|
||||
"github.com/dosco/super-graph/core"
|
||||
"github.com/gosimple/slug"
|
||||
"github.com/jackc/pgx/v4"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
@ -27,6 +28,7 @@ func cmdDBSeed(cmd *cobra.Command, args []string) {
|
||||
log.Fatalf("ERR failed to read config: %s", err)
|
||||
}
|
||||
conf.Production = false
|
||||
conf.DefaultBlock = false
|
||||
|
||||
db, err = initDB(conf, true, false)
|
||||
if err != nil {
|
||||
@ -51,7 +53,7 @@ func cmdDBSeed(cmd *cobra.Command, args []string) {
|
||||
|
||||
vm := goja.New()
|
||||
vm.Set("graphql", graphQLFn)
|
||||
//vm.Set("import_csv", importCSV)
|
||||
vm.Set("import_csv", importCSV)
|
||||
|
||||
console := vm.NewObject()
|
||||
console.Set("log", logFunc) //nolint: errcheck
|
||||
@ -181,34 +183,42 @@ func (c *csvSource) Err() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// func importCSV(table, filename string) int64 {
|
||||
// if filename[0] != '/' {
|
||||
// filename = path.Join(conf.ConfigPathUsed(), filename)
|
||||
// }
|
||||
func importCSV(table, filename string) int64 {
|
||||
if filename[0] != '/' {
|
||||
filename = path.Join(confPath, filename)
|
||||
}
|
||||
|
||||
// s, err := NewCSVSource(filename)
|
||||
// if err != nil {
|
||||
// log.Fatalf("ERR %s", err)
|
||||
// }
|
||||
s, err := NewCSVSource(filename)
|
||||
if err != nil {
|
||||
log.Fatalf("ERR %v", err)
|
||||
}
|
||||
|
||||
// var cols []string
|
||||
// colval, _ := s.Values()
|
||||
var cols []string
|
||||
colval, _ := s.Values()
|
||||
|
||||
// for _, c := range colval {
|
||||
// cols = append(cols, c.(string))
|
||||
// }
|
||||
for _, c := range colval {
|
||||
cols = append(cols, c.(string))
|
||||
}
|
||||
|
||||
// n, err := db.Exec(fmt.Sprintf("COPY %s FROM STDIN WITH "),
|
||||
// cols,
|
||||
// s)
|
||||
conn, err := acquireConn(db)
|
||||
if err != nil {
|
||||
log.Fatalf("ERR %v", err)
|
||||
}
|
||||
//nolint: errcheck
|
||||
defer releaseConn(db, conn)
|
||||
|
||||
// if err != nil {
|
||||
// err = fmt.Errorf("%w (line no %d)", err, s.i)
|
||||
// log.Fatalf("ERR %s", err)
|
||||
// }
|
||||
n, err := conn.CopyFrom(
|
||||
context.Background(),
|
||||
pgx.Identifier{table},
|
||||
cols,
|
||||
s)
|
||||
|
||||
// return n
|
||||
// }
|
||||
if err != nil {
|
||||
log.Fatalf("ERR %v", fmt.Errorf("%w (line no %d)", err, s.i))
|
||||
}
|
||||
|
||||
return n
|
||||
}
|
||||
|
||||
//nolint: errcheck
|
||||
func logFunc(args ...interface{}) {
|
||||
@ -377,11 +387,6 @@ func setFakeFuncs(f *goja.Object) {
|
||||
f.Set("hipster_paragraph", gofakeit.HipsterParagraph)
|
||||
f.Set("hipster_sentence", gofakeit.HipsterSentence)
|
||||
|
||||
//Languages
|
||||
//f.Set("language", gofakeit.Language)
|
||||
//f.Set("language_abbreviation", gofakeit.LanguageAbbreviation)
|
||||
//f.Set("language_abbreviation", gofakeit.LanguageAbbreviation)
|
||||
|
||||
// File
|
||||
f.Set("file_extension", gofakeit.FileExtension)
|
||||
f.Set("file_mine_type", gofakeit.FileMimeType)
|
||||
@ -410,8 +415,6 @@ func setFakeFuncs(f *goja.Object) {
|
||||
f.Set("lexify", gofakeit.Lexify)
|
||||
f.Set("rand_string", getRandValue)
|
||||
f.Set("numerify", gofakeit.Numerify)
|
||||
|
||||
//f.Set("programming_language", gofakeit.ProgrammingLanguage)
|
||||
}
|
||||
|
||||
//nolint: errcheck
|
||||
|
@ -69,6 +69,8 @@ func newViper(configPath, configFile string) *viper.Viper {
|
||||
vi.SetDefault("auth_fail_block", "always")
|
||||
vi.SetDefault("seed_file", "seed.js")
|
||||
|
||||
vi.SetDefault("default_block", true)
|
||||
|
||||
vi.SetDefault("database.type", "postgres")
|
||||
vi.SetDefault("database.host", "localhost")
|
||||
vi.SetDefault("database.port", 5432)
|
||||
|
@ -15,7 +15,6 @@ import (
|
||||
"contrib.go.opencensus.io/integrations/ocsql"
|
||||
"github.com/jackc/pgx/v4"
|
||||
"github.com/jackc/pgx/v4/stdlib"
|
||||
//_ "github.com/jackc/pgx/v4/stdlib"
|
||||
)
|
||||
|
||||
const (
|
||||
|
67
internal/serv/stdlib.go
Normal file
67
internal/serv/stdlib.go
Normal file
@ -0,0 +1,67 @@
|
||||
package serv
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
errors "golang.org/x/xerrors"
|
||||
|
||||
"github.com/jackc/pgx/v4"
|
||||
)
|
||||
|
||||
type ctxKey int
|
||||
|
||||
var ctxKeyFakeTx ctxKey = 0
|
||||
|
||||
var errNotPgx = errors.New("not pgx *sql.DB")
|
||||
|
||||
var (
|
||||
fakeTxMutex sync.Mutex
|
||||
fakeTxConns map[*pgx.Conn]*sql.Tx
|
||||
)
|
||||
|
||||
func acquireConn(db *sql.DB) (*pgx.Conn, error) {
|
||||
var conn *pgx.Conn
|
||||
ctx := context.WithValue(context.Background(), ctxKeyFakeTx, &conn)
|
||||
tx, err := db.BeginTx(ctx, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if conn == nil {
|
||||
if err := tx.Rollback(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return nil, errNotPgx
|
||||
}
|
||||
|
||||
fakeTxMutex.Lock()
|
||||
fakeTxConns[conn] = tx
|
||||
fakeTxMutex.Unlock()
|
||||
|
||||
return conn, nil
|
||||
}
|
||||
|
||||
func releaseConn(db *sql.DB, conn *pgx.Conn) error {
|
||||
var tx *sql.Tx
|
||||
var ok bool
|
||||
|
||||
if conn.PgConn().IsBusy() || conn.PgConn().TxStatus() != 'I' {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
|
||||
defer cancel()
|
||||
conn.Close(ctx)
|
||||
}
|
||||
|
||||
fakeTxMutex.Lock()
|
||||
tx, ok = fakeTxConns[conn]
|
||||
if ok {
|
||||
delete(fakeTxConns, conn)
|
||||
fakeTxMutex.Unlock()
|
||||
} else {
|
||||
fakeTxMutex.Unlock()
|
||||
return errors.Errorf("can't release conn that is not acquired")
|
||||
}
|
||||
|
||||
return tx.Rollback()
|
||||
}
|
Loading…
Reference in New Issue
Block a user